diff --git a/R-package/DESCRIPTION b/R-package/DESCRIPTION index 7e5d968670f6..951724aa591e 100644 --- a/R-package/DESCRIPTION +++ b/R-package/DESCRIPTION @@ -4,7 +4,7 @@ Title: Extreme Gradient Boosting Version: 0.4-3 Date: 2015-08-01 Author: Tianqi Chen , Tong He , - Michael Benesty + Michael Benesty , Vadim Khotilovich Maintainer: Tong He Description: Extreme Gradient Boosting, which is an efficient implementation of gradient boosting framework. This package is its R interface. The package diff --git a/R-package/NAMESPACE b/R-package/NAMESPACE index d61e9ddae630..44f1d5c3cba6 100644 --- a/R-package/NAMESPACE +++ b/R-package/NAMESPACE @@ -12,8 +12,16 @@ S3method(slice,xgb.DMatrix) export("xgb.attr<-") export("xgb.attributes<-") export("xgb.parameters<-") +export(cb.cv.predict) +export(cb.early.stop) +export(cb.evaluation.log) +export(cb.print.evaluation) +export(cb.reset.parameters) +export(cb.save.model) export(getinfo) +export(print.xgb.Booster) export(print.xgb.DMatrix) +export(print.xgb.cv.synchronous) export(setinfo) export(slice) export(xgb.DMatrix) @@ -44,13 +52,13 @@ importFrom(Matrix,sparseVector) importFrom(data.table,":=") importFrom(data.table,as.data.table) importFrom(data.table,data.table) -importFrom(data.table,fread) importFrom(data.table,rbindlist) importFrom(data.table,setnames) importFrom(magrittr,"%>%") importFrom(stringr,str_detect) importFrom(stringr,str_extract) -importFrom(stringr,str_extract_all) importFrom(stringr,str_match) importFrom(stringr,str_replace) +importFrom(stringr,str_replace_all) importFrom(stringr,str_split) +useDynLib(xgboost) diff --git a/R-package/R/callbacks.R b/R-package/R/callbacks.R new file mode 100644 index 000000000000..cffcec31f0ac --- /dev/null +++ b/R-package/R/callbacks.R @@ -0,0 +1,605 @@ +#' Callback closures for booster training. +#' +#' These are used to perform various service tasks either during boosting iterations or at the end. +#' This approach helps to modularize many of such tasks without bloating the main training methods, +#' and it offers . +#' +#' @details +#' By default, a callback function is run after each boosting iteration. +#' An R-attribute \code{is_pre_iteration} could be set for a callback to define a pre-iteration function. +#' +#' When a callback function has \code{finalize} parameter, its finalizer part will also be run after +#' the boosting is completed. +#' +#' WARNING: side-effects!!! Be aware that these callback functions access and modify things in +#' the environment from which they are called from, which is a fairly uncommon thing to do in R. +#' +#' To write a custom callback closure, make sure you first understand the main concepts about R envoronments. +#' Check either R documentation on \code{\link[base]{environment}} or the +#' \href{http://adv-r.had.co.nz/Environments.html}{Environments chapter} from the "Advanced R" +#' book by Hadley Wickham. Further, the best option is to read the code of some of the existing callbacks - +#' choose ones that do something similar to what you want to achieve. Also, you would need to get familiar +#' with the objects available inside of the \code{xgb.train} and \code{xgb.cv} internal environments. +#' +#' @seealso +#' \code{\link{cb.print.evaluation}}, +#' \code{\link{cb.evaluation.log}}, +#' \code{\link{cb.reset.parameters}}, +#' \code{\link{cb.early.stop}}, +#' \code{\link{cb.save.model}}, +#' \code{\link{cb.cv.predict}}, +#' \code{\link{xgb.train}}, +#' \code{\link{xgb.cv}} +#' +#' @name callbacks +NULL + +# +# Callbacks ------------------------------------------------------------------- +# + +#' Callback closure for printing the result of evaluation +#' +#' @param period results would be printed every number of periods +#' +#' @details +#' The callback function prints the result of evaluation at every \code{period} iterations. +#' The initial and the last iteration's evaluations are always printed. +#' +#' Callback function expects the following values to be set in its calling frame: +#' \code{bst_evaluation} (also \code{bst_evaluation_err} when available), +#' \code{iteration}, +#' \code{begin_iteration}, +#' \code{end_iteration}. +#' +#' @seealso +#' \code{\link{callbacks}} +#' +#' @export +cb.print.evaluation <- function(period=1) { + + callback <- function(env = parent.frame()) { + if (length(env$bst_evaluation) == 0 || + period == 0 || + NVL(env$rank, 0) != 0 ) + return() + + i <- env$iteration + if ((i-1) %% period == 0 || + i == env$begin_iteration || + i == env$end_iteration) { + msg <- format.eval.string(i, env$bst_evaluation, env$bst_evaluation_err) + cat(msg, '\n') + } + } + attr(callback, 'call') <- match.call() + attr(callback, 'name') <- 'cb.print.evaluation' + callback +} + + +#' Callback closure for logging the evaluation history +#' +#' @details +#' This callback function appends the current iteration evaluation results \code{bst_evaluation} +#' available in the calling parent frame to the \code{evaluation_log} list in a calling frame. +#' +#' The finalizer callback (called with \code{finalize = TURE} in the end) converts +#' the \code{evaluation_log} list into a final data.table. +#' +#' The iteration evaluation result \code{bst_evaluation} must be a named numeric vector. +#' +#' Note: in the column names of the final data.table, the dash '-' character is replaced with +#' the underscore '_' in order to make the column names more like regular R identifiers. +#' +#' Callback function expects the following values to be set in its calling frame: +#' \code{evaluation_log}, +#' \code{bst_evaluation}, +#' \code{iteration}. +#' +#' @seealso +#' \code{\link{callbacks}} +#' +#' @export +cb.evaluation.log <- function() { + + mnames <- NULL + + init <- function(env) { + if (!is.list(env$evaluation_log)) + stop("'evaluation_log' has to be a list") + mnames <<- names(env$bst_evaluation) + if (is.null(mnames) || any(mnames == "")) + stop("bst_evaluation must have non-empty names") + + mnames <<- gsub('-', '_', names(env$bst_evaluation)) + if(!is.null(env$bst_evaluation_err)) + mnames <<- c(paste0(mnames, '_mean'), paste0(mnames, '_std')) + } + + finalizer <- function(env) { + env$evaluation_log <- as.data.table(t(simplify2array(env$evaluation_log))) + setnames(env$evaluation_log, c('iter', mnames)) + + if(!is.null(env$bst_evaluation_err)) { + # rearrange col order from _mean,_mean,...,_std,_std,... + # to be _mean,_std,_mean,_std,... + len <- length(mnames) + means <- mnames[1:(len/2)] + stds <- mnames[(len/2 + 1):len] + cnames <- numeric(len) + cnames[c(TRUE, FALSE)] <- means + cnames[c(FALSE, TRUE)] <- stds + env$evaluation_log <- env$evaluation_log[, c('iter', cnames), with=FALSE] + } + } + + callback <- function(env = parent.frame(), finalize = FALSE) { + if (is.null(mnames)) + init(env) + + if (finalize) + return(finalizer(env)) + + ev <- env$bst_evaluation + if(!is.null(env$bst_evaluation_err)) + ev <- c(ev, env$bst_evaluation_err) + env$evaluation_log <- c(env$evaluation_log, + list(c(iter = env$iteration, ev))) + } + attr(callback, 'call') <- match.call() + attr(callback, 'name') <- 'cb.evaluation.log' + callback +} + +#' Callback closure for restetting the booster's parameters at each iteration. +#' +#' @param new_params a list where each element corresponds to a parameter that needs to be reset. +#' Each element's value must be either a vector of values of length \code{nrounds} +#' to be set at each iteration, +#' or a function of two parameters \code{learning_rates(iteration, nrounds)} +#' which returns a new parameter value by using the current iteration number +#' and the total number of boosting rounds. +#' +#' @details +#' This is a "pre-iteration" callback function used to reset booster's parameters +#' at the beginning of each iteration. +#' +#' Note that when training is resumed from some previous model, and a function is used to +#' reset a parameter value, the \code{nround} argument in this function would be the +#' the number of boosting rounds in the current training. +#' +#' Callback function expects the following values to be set in its calling frame: +#' \code{bst} or \code{bst_folds}, +#' \code{iteration}, +#' \code{begin_iteration}, +#' \code{end_iteration}. +#' +#' @seealso +#' \code{\link{callbacks}} +#' +#' @export +cb.reset.parameters <- function(new_params) { + + if (typeof(new_params) != "list") + stop("'new_params' must be a list") + pnames <- gsub("\\.", "_", names(new_params)) + nrounds <- NULL + + # run some checks in the begining + init <- function(env) { + nrounds <<- env$end_iteration - env$begin_iteration + 1 + + if (is.null(env$bst) && is.null(env$bst_folds)) + stop("Parent frame has neither 'bst' nor 'bst_folds'") + + # Some parameters are not allowed to be changed, + # since changing them would simply wreck some chaos + not_allowed <- pnames %in% + c('num_class', 'num_output_group', 'size_leaf_vector', 'updater_seq') + if (any(not_allowed)) + stop('Parameters ', paste(pnames[not_allowed]), " cannot be changed during boosting.") + + for (n in pnames) { + p <- new_params[[n]] + if (is.function(p)) { + if (length(formals(p)) != 2) + stop("Parameter '", n, "' is a function but not of two arguments") + } else if (is.numeric(p) || is.character(p)) { + if (length(p) != nrounds) + stop("Length of '", n, "' has to be equal to 'nrounds'") + } else { + stop("Parameter '", n, "' is not a function or a vector") + } + } + } + + callback <- function(env = parent.frame()) { + if (is.null(nrounds)) + init(env) + + i <- env$iteration + pars <- lapply(new_params, function(p) { + if (is.function(p)) + return(p(i, nrounds)) + p[i] + }) + + if (!is.null(env$bst)) { + xgb.parameters(env$bst$handle) <- pars + } else { + for (fd in env$bst_folds) + xgb.parameters(fd$bst$handle) <- pars + } + } + attr(callback, 'is_pre_iteration') <- TRUE + attr(callback, 'call') <- match.call() + attr(callback, 'name') <- 'cb.reset.parameters' + callback +} + + +#' Callback closure to activate the early stopping. +#' +#' @param stopping_rounds The number of rounds with no improvement in +#' the evaluation metric in order to stop the training. +#' @param maximize whether to maximize the evaluation metric +#' @param metric_name the name of an evaluation column to use as a criteria for early +#' stopping. If not set, the last column would be used. +#' Let's say the test data in \code{watchlist} was labelled as \code{dtest}, +#' and one wants to use the AUC in test data for early stopping regardless of where +#' it is in the \code{watchlist}, then one of the following would need to be set: +#' \code{metric_name='dtest-auc'} or \code{metric_name='dtest_auc'}. +#' All dash '-' characters in metric names are considered equivalent to '_'. +#' @param verbose whether to print the early stopping information. +#' +#' @details +#' This callback function determines the condition for early stopping +#' by setting the \code{stop_condition = TRUE} flag in its calling frame. +#' +#' The following additional fields are assigned to the model's R object: +#' \itemize{ +#' \item \code{best_score} the evaluation score at the best iteration +#' \item \code{best_iteration} at which boosting iteration the best score has occurred (1-based index) +#' \item \code{best_ntreelimit} to use with the \code{ntreelimit} parameter in \code{predict}. +#' It differs from \code{best_iteration} in multiclass or random forest settings. +#' } +#' +#' The Same values are also stored as xgb-attributes: +#' \itemize{ +#' \item \code{best_iteration} is stored as a 0-based iteration index (for interoperability of binary models) +#' \item \code{best_msg} message string is also stored. +#' } +#' +#' At least one data element is required in the evaluation watchlist for early stopping to work. +#' +#' Callback function expects the following values to be set in its calling frame: +#' \code{stop_condition}, +#' \code{bst_evaluation}, +#' \code{rank}, +#' \code{bst} (or \code{bst_folds} and \code{basket}), +#' \code{iteration}, +#' \code{begin_iteration}, +#' \code{end_iteration}, +#' \code{num_parallel_tree}. +#' +#' @seealso +#' \code{\link{callbacks}}, +#' \code{\link{xgb.attr}} +#' +#' @export +cb.early.stop <- function(stopping_rounds, maximize=FALSE, + metric_name=NULL, verbose=TRUE) { + # state variables + best_iteration <- -1 + best_ntreelimit <- -1 + best_score <- Inf + best_msg <- NULL + metric_idx <- 1 + + init <- function(env) { + if (length(env$bst_evaluation) == 0) + stop("For early stopping, watchlist must have at least one element") + + eval_names <- gsub('-', '_', names(env$bst_evaluation)) + if (!is.null(metric_name)) { + metric_idx <<- which(gsub('-', '_', metric_name) == eval_names) + if (length(metric_idx) == 0) + stop("'metric_name' for early stopping is not one of the following:\n", + paste(eval_names, collapse=' '), '\n') + } + if (is.null(metric_name) && + length(env$bst_evaluation) > 1) { + metric_idx <<- length(eval_names) + if (verbose) + cat('Multiple eval metrics are present. Will use ', + eval_names[metric_idx], ' for early stopping.\n', sep = '') + } + + metric_name <<- eval_names[metric_idx] + + # maximixe is usually NULL when not set in xgb.train and built-in metrics + if (is.null(maximize)) + maximize <<- ifelse(grepl('(_auc|_map|_ndcg)', metric_name), TRUE, FALSE) + + if (verbose && NVL(env$rank, 0) == 0) + cat("Will train until ", metric_name, " hasn't improved in ", + stopping_rounds, " rounds.\n\n", sep = '') + + best_iteration <<- 1 + if (maximize) best_score <<- -Inf + + env$stop_condition <- FALSE + + if (!is.null(env$bst)) { + if (class(env$bst) != 'xgb.Booster') + stop("'bst' in the parent frame must be an 'xgb.Booster'") + if (!is.null(best_score <- xgb.attr(env$bst$handle, 'best_score'))) { + best_score <<- as.numeric(best_score) + best_iteration <<- as.numeric(xgb.attr(env$bst$handle, 'best_iteration')) + 1 + best_msg <<- as.numeric(xgb.attr(env$bst$handle, 'best_msg')) + } else { + xgb.attributes(env$bst$handle) <- list(best_iteration = best_iteration - 1, + best_score = best_score) + } + } else if (is.null(env$bst_folds) || is.null(env$basket)) { + stop("Parent frame has neither 'bst' nor ('bst_folds' and 'basket')") + } + } + + finalizer <- function(env) { + if (!is.null(env$bst)) { + attr_best_score = as.numeric(xgb.attr(env$bst$handle, 'best_score')) + if (best_score != attr_best_score) + stop("Inconsistent 'best_score' values between the closure state: ", best_score, + " and the xgb.attr: ", attr_best_score) + env$bst$best_iteration = best_iteration + env$bst$best_ntreelimit = best_ntreelimit + env$bst$best_score = best_score + } else { + env$basket$best_iteration <- best_iteration + env$basket$best_ntreelimit <- best_ntreelimit + } + } + + callback <- function(env = parent.frame(), finalize = FALSE) { + if (best_iteration < 0) + init(env) + + if (finalize) + return(finalizer(env)) + + i <- env$iteration + score = env$bst_evaluation[metric_idx] + + if (( maximize && score > best_score) || + (!maximize && score < best_score)) { + + best_msg <<- format.eval.string(i, env$bst_evaluation, env$bst_evaluation_err) + best_score <<- score + best_iteration <<- i + best_ntreelimit <<- best_iteration * env$num_parallel_tree + # save the property to attributes, so they will occur in checkpoint + if (!is.null(env$bst)) { + xgb.attributes(env$bst) <- list( + best_iteration = best_iteration - 1, # convert to 0-based index + best_score = best_score, + best_msg = best_msg, + best_ntreelimit = best_ntreelimit) + } + } else if (i - best_iteration >= stopping_rounds) { + env$stop_condition <- TRUE + env$end_iteration <- i + if (verbose && NVL(env$rank, 0) == 0) + cat("Stopping. Best iteration:\n", best_msg, "\n\n", sep = '') + } + } + attr(callback, 'call') <- match.call() + attr(callback, 'name') <- 'cb.early.stop' + callback +} + + +#' Callback closure for saving a model file. +#' +#' @param save_period save the model to disk after every +#' \code{save_period} iterations; 0 means save the model at the end. +#' @param save_name the name or path for the saved model file. +#' It can contain a \code{\link[base]{sprintf}} formatting specifier +#' to include the integer iteration number in the file name. +#' E.g., with \code{save_name} = 'xgboost_%04d.model', +#' the file saved at iteration 50 would be named "xgboost_0050.model". +#' +#' @details +#' This callback function allows to save an xgb-model file, either periodically after each \code{save_period}'s or at the end. +#' +#' Callback function expects the following values to be set in its calling frame: +#' \code{bst}, +#' \code{iteration}, +#' \code{begin_iteration}, +#' \code{end_iteration}. +#' +#' @seealso +#' \code{\link{callbacks}} +#' +#' @export +cb.save.model <- function(save_period = 0, save_name = "xgboost.model") { + + if (save_period < 0) + stop("'save_period' cannot be negative") + + callback <- function(env = parent.frame()) { + if (is.null(env$bst)) + stop("'save_model' callback requires the 'bst' booster object in its calling frame") + + if ((save_period > 0 && (env$iteration - env$begin_iteration) %% save_period == 0) || + (save_period == 0 && env$iteration == env$end_iteration)) + xgb.save(env$bst, sprintf(save_name, env$iteration)) + } + attr(callback, 'call') <- match.call() + attr(callback, 'name') <- 'cb.save.model' + callback +} + + +#' Callback closure for returning cross-validation based predictions. +#' +#' @param save_models a flag for whether to save the folds' models. +#' +#' @details +#' This callback function saves predictions for all of the test folds, +#' and also allows to save the folds' models. +#' +#' It is a "finalizer" callback and it uses early stopping information whenever it is available, +#' thus it must be run after the early stopping callback if the early stopping is used. +#' +#' Callback function expects the following values to be set in its calling frame: +#' \code{bst_folds}, +#' \code{basket}, +#' \code{data}, +#' \code{end_iteration}, +#' \code{num_parallel_tree}, +#' \code{num_class}. +#' +#' @return +#' Predictions are returned inside of the \code{pred} element, which is either a vector or a matrix, +#' depending on the number of prediction outputs per data row. The order of predictions corresponds +#' to the order of rows in the original dataset. Note that when a custom \code{folds} list is +#' provided in \code{xgb.cv}, the predictions would only be returned properly when this list is a +#' non-overlapping list of k sets of indices, as in a standard k-fold CV. The predictions would not be +#' meaningful when user-profided folds have overlapping indices as in, e.g., random sampling splits. +#' When some of the indices in the training dataset are not included into user-provided \code{folds}, +#' their prediction value would be \code{NA}. +#' +#' @seealso +#' \code{\link{callbacks}} +#' +#' @export +cb.cv.predict <- function(save_models = FALSE) { + + finalizer <- function(env) { + if (is.null(env$basket) || is.null(env$bst_folds)) + stop("'cb.cv.predict' callback requires 'basket' and 'bst_folds' lists in its calling frame") + + N <- nrow(env$data) + pred <- ifelse(env$num_class > 1, + matrix(NA_real_, N, env$num_class), + rep(NA_real_, N)) + + ntreelimit <- NVL(env$basket$best_ntreelimit, + env$end_iteration * env$num_parallel_tree) + for (fd in env$bst_folds) { + pr <- predict(fd$bst, fd$watchlist[[2]], ntreelimit = ntreelimit, reshape = TRUE) + if (is.matrix(pred)) { + pred[fd$index,] <- pr + } else { + pred[fd$index] <- pr + } + } + env$basket$pred <- pred + if (save_models) { + env$basket$models <- lapply(env$bst_folds, function(fd) { + xgb.attr(fd$bst, 'niter') <- env$end_iteration - 1 + xgb.Booster.check(xgb.handleToBooster(fd$bst), saveraw = TRUE) + }) + } + } + + callback <- function(env = parent.frame(), finalize = FALSE) { + if (finalize) + return(finalizer(env)) + } + attr(callback, 'call') <- match.call() + attr(callback, 'name') <- 'cb.cv.predict' + callback +} + + +# +# Internal utility functions for callbacks ------------------------------------ +# + +# Format the evaluation metric string +format.eval.string <- function(iter, eval_res, eval_err=NULL) { + if (length(eval_res) == 0) + stop('no evaluation results') + enames <- names(eval_res) + if (is.null(enames)) + stop('evaluation results must have names') + iter <- sprintf('[%d]\t', iter) + if (!is.null(eval_err)) { + if (length(eval_res) != length(eval_err)) + stop('eval_res & eval_err lengths mismatch') + res <- paste0(sprintf("%s:%f+%f", enames, eval_res, eval_err), collapse='\t') + } else { + res <- paste0(sprintf("%s:%f", enames, eval_res), collapse='\t') + } + return(paste0(iter, res)) +} + +# Extract callback names from the list of callbacks +callback.names <- function(cb_list) { + unlist(lapply(cb_list, function(x) attr(x, 'name'))) +} + +# Extract callback calls from the list of callbacks +callback.calls <- function(cb_list) { + unlist(lapply(cb_list, function(x) attr(x, 'call'))) +} + +# Add a callback cb to the list and make sure that +# cb.early.stop and cb.cv.predict are at the end of the list +# with cb.cv.predict being the last (when present) +add.cb <- function(cb_list, cb) { + cb_list <- c(cb_list, cb) + names(cb_list) <- callback.names(cb_list) + if ('cb.early.stop' %in% names(cb_list)) { + cb_list <- c(cb_list, cb_list['cb.early.stop']) + # this removes only the first one + cb_list['cb.early.stop'] <- NULL + } + if ('cb.cv.predict' %in% names(cb_list)) { + cb_list <- c(cb_list, cb_list['cb.cv.predict']) + cb_list['cb.cv.predict'] <- NULL + } + cb_list +} + +# Sort callbacks list into categories +categorize.callbacks <- function(cb_list) { + list( + pre_iter = Filter(function(x) { + pre <- attr(x, 'is_pre_iteration') + !is.null(pre) && pre + }, cb_list), + post_iter = Filter(function(x) { + pre <- attr(x, 'is_pre_iteration') + is.null(pre) || !pre + }, cb_list), + finalize = Filter(function(x) { + 'finalize' %in% names(formals(x)) + }, cb_list) + ) +} + +# Check whether all callback functions with names given by 'query_names' are present in the 'cb_list'. +has.callbacks <- function(cb_list, query_names) { + if (length(cb_list) < length(query_names)) + return(FALSE) + if (!is.list(cb_list) || + any(sapply(cb_list, class) != 'function')) { + stop('`cb_list`` must be a list of callback functions') + } + cb_names <- callback.names(cb_list) + if (!is.character(cb_names) || + length(cb_names) != length(cb_list) || + any(cb_names == "")) { + stop('All callbacks in the `cb_list` must have a non-empty `name` attribute') + } + if (!is.character(query_names) || + length(query_names) == 0 || + any(query_names == "")) { + stop('query_names must be a non-empty vector of non-empty character names') + } + return(all(query_names %in% cb_names)) +} diff --git a/R-package/R/utils.R b/R-package/R/utils.R index 514826b3c414..fb6cf92372df 100644 --- a/R-package/R/utils.R +++ b/R-package/R/utils.R @@ -1,184 +1,224 @@ -#' @importClassesFrom Matrix dgCMatrix dgeMatrix -#' @import methods +# +# This file is for the low level reuseable utility functions +# that are not supposed to be visibe to a user. +# -# depends on matrix -.onLoad <- function(libname, pkgname) { - library.dynam("xgboost", pkgname, libname) -} -.onUnload <- function(libpath) { - library.dynam.unload("xgboost", libpath) +# +# General helper utilities ---------------------------------------------------- +# + +# SQL-style NVL shortcut. +NVL <- function(x, val) { + if (is.null(x)) + return(val) + if (is.vector(x)) { + x[is.na(x)] <- val + return(x) + } + if (typeof(x) == 'closure') + return(x) + stop('x of unsupported for NVL type') } -## ----the following are low level iterative functions, not needed if -## you do not want to use them --------------------------------------- +# +# Low-level functions for boosting -------------------------------------------- +# -# iteratively update booster with customized statistics -xgb.iter.boost <- function(booster, dtrain, gpair) { - if (class(booster) != "xgb.Booster.handle") { - stop("xgb.iter.update: first argument must be type xgb.Booster.handle") +# Merges booster params with whatever is provided in ... +# plus runs some checks +check.booster.params <- function(params, ...) { + if (typeof(params) != "list") + stop("params must be a list") + + # in R interface, allow for '.' instead of '_' in parameter names + names(params) <- gsub("\\.", "_", names(params)) + + # merge parameters from the params and the dots-expansion + dot_params <- list(...) + names(dot_params) <- gsub("\\.", "_", names(dot_params)) + if (length(intersect(names(params), + names(dot_params))) > 0) + stop("Same parameters in 'params' and in the call are not allowed. Please check your 'params' list.") + params <- c(params, dot_params) + + # providing a parameter multiple times only makes sense for 'eval_metric' + name_freqs <- table(names(params)) + multi_names <- setdiff(names(name_freqs[name_freqs > 1]), 'eval_metric') + if (length(multi_names) > 0) { + warning("The following parameters were provided multiple times:\n\t", + paste(multi_names, collapse=', '), "\n Only the last value for each of them will be used.\n") + # While xgboost itself would choose the last value for a multi-parameter, + # will do some clean-up here b/c multi-parameters could be used further in R code, and R would + # pick the 1st (not the last) value when multiple elements with the same name are present in a list. + for (n in multi_names) { + del_idx <- which(n == names(params)) + del_idx <- del_idx[-length(del_idx)] + params[[del_idx]] <- NULL + } } - if (class(dtrain) != "xgb.DMatrix") { - stop("xgb.iter.update: second argument must be type xgb.DMatrix") + + # for multiclass, expect num_class to be set + if (typeof(params[['objective']]) == "character" && + substr(NVL(params[['objective']], 'x'), 1, 6) == 'multi:') { + if (as.numeric(NVL(params[['num_class']], 0)) < 2) + stop("'num_class' > 1 parameter must be set for multiclass classification") + } + + return(params) +} + + +# Performs some checks related to custom objective function. +# WARNING: has side-effects and can modify 'params' and 'obj' in its calling frame +check.custom.obj <- function(env = parent.frame()) { + if (!is.null(env$params[['objective']]) && !is.null(env$obj)) + stop("Setting objectives in 'params' and 'obj' at the same time is not allowed") + + if (!is.null(env$obj) && typeof(env$obj) != 'closure') + stop("'obj' must be a function") + + # handle the case when custom objective function was provided through params + if (!is.null(env$params[['objective']]) && + typeof(env$params$objective) == 'closure') { + env$obj <- env$params$objective + p <- env$params + p$objective <- NULL + env$params <- p } - .Call("XGBoosterBoostOneIter_R", booster, dtrain, gpair$grad, gpair$hess, PACKAGE = "xgboost") - return(TRUE) } -# iteratively update booster with dtrain +# Performs some checks related to custom evaluation function. +# WARNING: has side-effects and can modify 'params' and 'feval' in its calling frame +check.custom.eval <- function(env = parent.frame()) { + if (!is.null(env$params[['eval_metric']]) && !is.null(env$feval)) + stop("Setting evaluation metrics in 'params' and 'feval' at the same time is not allowed") + + if (!is.null(env$feval) && typeof(env$feval) != 'closure') + stop("'feval' must be a function") + + if (!is.null(env$feval) && is.null(env$maximize)) + stop("Please set 'maximize' to indicate whether the metric needs to be maximized or not") + + # handle a situation when custom eval function was provided through params + if (!is.null(env$params[['eval_metric']]) && + typeof(env$params$eval_metric) == 'closure') { + env$feval <- env$params$eval_metric + p <- env$params + p[ which(names(p) == 'eval_metric') ] <- NULL + env$params <- p + } +} + + +# Update booster with dtrain for an iteration xgb.iter.update <- function(booster, dtrain, iter, obj = NULL) { if (class(booster) != "xgb.Booster.handle") { - stop("xgb.iter.update: first argument must be type xgb.Booster.handle") + stop("first argument type must be xgb.Booster.handle") } if (class(dtrain) != "xgb.DMatrix") { - stop("xgb.iter.update: second argument must be type xgb.DMatrix") + stop("second argument type must be xgb.DMatrix") } if (is.null(obj)) { .Call("XGBoosterUpdateOneIter_R", booster, as.integer(iter), dtrain, PACKAGE = "xgboost") - } else { + } else { pred <- predict(booster, dtrain) gpair <- obj(pred, dtrain) - succ <- xgb.iter.boost(booster, dtrain, gpair) + .Call("XGBoosterBoostOneIter_R", booster, dtrain, gpair$grad, gpair$hess, PACKAGE = "xgboost") } return(TRUE) } -# iteratively evaluate one iteration -xgb.iter.eval <- function(booster, watchlist, iter, feval = NULL, prediction = FALSE) { - if (class(booster) != "xgb.Booster.handle") { - stop("xgb.eval: first argument must be type xgb.Booster") - } - if (typeof(watchlist) != "list") { - stop("xgb.eval: only accepts list of DMatrix as watchlist") - } - for (w in watchlist) { - if (class(w) != "xgb.DMatrix") { - stop("xgb.eval: watch list can only contain xgb.DMatrix") - } - } - if (length(watchlist) != 0) { - if (is.null(feval)) { - evnames <- list() - for (i in 1:length(watchlist)) { - w <- watchlist[i] - if (length(names(w)) == 0) { - stop("xgb.eval: name tag must be presented for every elements in watchlist") - } - evnames <- append(evnames, names(w)) - } - msg <- .Call("XGBoosterEvalOneIter_R", booster, as.integer(iter), watchlist, - evnames, PACKAGE = "xgboost") - } else { - msg <- paste("[", iter, "]", sep="") - for (j in 1:length(watchlist)) { - w <- watchlist[j] - if (length(names(w)) == 0) { - stop("xgb.eval: name tag must be presented for every elements in watchlist") - } - preds <- predict(booster, w[[1]]) - ret <- feval(preds, w[[1]]) - msg <- paste(msg, "\t", names(w), "-", ret$metric, ":", ret$value, sep="") - } - } + +# Evaluate one iteration. +# Returns a named vector of evaluation metrics +# with the names in a 'datasetname-metricname' format. +xgb.iter.eval <- function(booster, watchlist, iter, feval = NULL) { + if (class(booster) != "xgb.Booster.handle") + stop("first argument type must be xgb.Booster.handle") + + if (length(watchlist) == 0) + return(NULL) + + evnames <- names(watchlist) + if (is.null(feval)) { + msg <- .Call("XGBoosterEvalOneIter_R", booster, as.integer(iter), watchlist, + as.list(evnames), PACKAGE = "xgboost") + msg <- str_split(msg, '(\\s+|:|\\s+)')[[1]][-1] + res <- as.numeric(msg[c(FALSE,TRUE)]) # even indices are the values + names(res) <- msg[c(TRUE,FALSE)] # odds are the names } else { - msg <- "" - } - if (prediction){ - preds <- predict(booster,watchlist[[2]]) - return(list(msg,preds)) + res <- sapply(seq_along(watchlist), function(j) { + w <- watchlist[[j]] + preds <- predict(booster, w) # predict using all trees + eval_res <- feval(preds, w) + out <- eval_res$value + names(out) <- paste0(evnames[j], "-", eval_res$metric) + out + }) } - return(msg) + return(res) } -#------------------------------------------ -# helper functions for cross validation + +# +# Helper functions for cross validation --------------------------------------- # -xgb.cv.mknfold <- function(dall, nfold, param, stratified, folds) { - if (nfold <= 1) { - stop("nfold must be bigger than 1") + +# Generates random (stratified if needed) CV folds +generate.cv.folds <- function(nfold, nrows, stratified, label, params) { + + # cannot do it for rank + if (exists('objective', where=params) && + is.character(params$objective) && + strtrim(params$objective, 5) == 'rank:') { + stop("\n\tAutomatic generation of CV-folds is not implemented for ranking!\n", + "\tConsider providing pre-computed CV-folds through the 'folds=' parameter.\n") } - if(is.null(folds)) { - if (exists('objective', where=param) && is.character(param$objective) && - strtrim(param[['objective']], 5) == 'rank:') { - stop("\tAutomatic creation of CV-folds is not implemented for ranking!\n", - "\tConsider providing pre-computed CV-folds through the folds parameter.") - } - y <- getinfo(dall, 'label') - randidx <- sample(1 : nrow(dall)) - if (stratified & length(y) == length(randidx)) { - y <- y[randidx] - # - # WARNING: some heuristic logic is employed to identify classification setting! - # - # For classification, need to convert y labels to factor before making the folds, - # and then do stratification by factor levels. - # For regression, leave y numeric and do stratification by quantiles. - if (exists('objective', where=param) && is.character(param$objective)) { - # If 'objective' provided in params, assume that y is a classification label - # unless objective is reg:linear - if (param[['objective']] != 'reg:linear') y <- factor(y) - } else { - # If no 'objective' given in params, it means that user either wants to use - # the default 'reg:linear' objective or has provided a custom obj function. - # Here, assume classification setting when y has 5 or less unique values: - if (length(unique(y)) <= 5) y <- factor(y) - } - folds <- xgb.createFolds(y, nfold) + # shuffle + rnd_idx <- sample(1:nrows) + if (stratified && + length(label) == length(rnd_idx)) { + y <- label[rnd_idx] + # WARNING: some heuristic logic is employed to identify classification setting! + # - For classification, need to convert y labels to factor before making the folds, + # and then do stratification by factor levels. + # - For regression, leave y numeric and do stratification by quantiles. + if (exists('objective', where=params) && + is.character(params$objective)) { + # If 'objective' provided in params, assume that y is a classification label + # unless objective is reg:linear + if (params$objective != 'reg:linear') + y <- factor(y) } else { - # make simple non-stratified folds - kstep <- length(randidx) %/% nfold - folds <- list() - for (i in 1:(nfold - 1)) { - folds[[i]] <- randidx[1:kstep] - randidx <- setdiff(randidx, folds[[i]]) - } - folds[[nfold]] <- randidx - } - } - ret <- list() - for (k in 1:nfold) { - dtest <- slice(dall, folds[[k]]) - didx <- c() - for (i in 1:nfold) { - if (i != k) { - didx <- append(didx, folds[[i]]) - } - } - dtrain <- slice(dall, didx) - bst <- xgb.Booster(param, list(dtrain, dtest)) - watchlist <- list(train=dtrain, test=dtest) - ret[[k]] <- list(dtrain=dtrain, booster=bst, watchlist=watchlist, index=folds[[k]]) - } - return (ret) -} - -xgb.cv.aggcv <- function(res, showsd = TRUE) { - header <- res[[1]] - ret <- header[1] - for (i in 2:length(header)) { - kv <- strsplit(header[i], ":")[[1]] - ret <- paste(ret, "\t", kv[1], ":", sep="") - stats <- c() - stats[1] <- as.numeric(kv[2]) - for (j in 2:length(res)) { - tkv <- strsplit(res[[j]][i], ":")[[1]] - stats[j] <- as.numeric(tkv[2]) + # If no 'objective' given in params, it means that user either wants to use + # the default 'reg:linear' objective or has provided a custom obj function. + # Here, assume classification setting when y has 5 or less unique values: + if (length(unique(y)) <= 5) + y <- factor(y) } - ret <- paste(ret, sprintf("%f", mean(stats)), sep="") - if (showsd) { - ret <- paste(ret, sprintf("+%f", stats::sd(stats)), sep="") + folds <- xgb.createFolds(y, nfold) + } else { + # make simple non-stratified folds + kstep <- length(rnd_idx) %/% nfold + folds <- list() + for (i in 1:(nfold - 1)) { + folds[[i]] <- rnd_idx[1:kstep] + rnd_idx <- rnd_idx[-(1:kstep)] } + folds[[nfold]] <- rnd_idx } - return (ret) + return(folds) } -# Shamelessly copied from caret::createFolds -# and simplified by always returning an unnamed list of test indices +# Creates CV folds stratified by the values of y. +# It was borrowed from caret::createFolds and simplified +# by always returning an unnamed list of fold indices. xgb.createFolds <- function(y, k = 10) { - if(is.numeric(y)) { + if (is.numeric(y)) { ## Group the numeric data based on their magnitudes ## and sample within those groups. @@ -197,7 +237,7 @@ xgb.createFolds <- function(y, k = 10) include.lowest = TRUE) } - if(k < length(y)) { + if (k < length(y)) { ## reset levels so that the possible levels and ## the levels in the vector are the same y <- factor(as.character(y)) @@ -207,19 +247,83 @@ xgb.createFolds <- function(y, k = 10) ## For each class, balance the fold allocation as far ## as possible, then resample the remainder. ## The final assignment of folds is also randomized. - for(i in 1:length(numInClass)) { + for (i in 1:length(numInClass)) { ## create a vector of integers from 1:k as many times as possible without ## going over the number of samples in the class. Note that if the number ## of samples in a class is less than k, nothing is producd here. seqVector <- rep(1:k, numInClass[i] %/% k) ## add enough random integers to get length(seqVector) == numInClass[i] - if(numInClass[i] %% k > 0) seqVector <- c(seqVector, sample(1:k, numInClass[i] %% k)) + if (numInClass[i] %% k > 0) seqVector <- c(seqVector, sample(1:k, numInClass[i] %% k)) ## shuffle the integers for fold assignment and assign to this classes's data foldVector[which(y == dimnames(numInClass)$y[i])] <- sample(seqVector) } - } else foldVector <- seq(along = y) + } else { + foldVector <- seq(along = y) + } out <- split(seq(along = y), foldVector) names(out) <- NULL out } + + +# +# Deprectaion notice utilities ------------------------------------------------ +# + +#' Deprecation notices. +#' +#' At this time, some of the parameter names were changed in order to make the code style more uniform. +#' The deprecated parameters would be removed in the next release. +#' +#' To see all the current deprecated and new parameters, check the \code{xgboost:::depr_par_lut} table. +#' +#' A deprecation warning is shown when any of the deprecated parameters is used in a call. +#' An additional warning is shown when there was a partial match to a deprecated parameter +#' (as R is able to partially match parameter names). +#' +#' @name xgboost-deprecated +NULL + +# Lookup table for the deprecated parameters bookkeeping +depr_par_lut <- matrix(c( + 'print.every.n', 'print_every_n', + 'early.stop.round', 'early_stopping_rounds', + 'training.data', 'data', + 'with.stats', 'with_stats', + 'numberOfClusters', 'n_clusters', + 'features.keep', 'features_keep', + 'plot.height','plot_height', + 'plot.width','plot_width', + 'dummy', 'DUMMY' +), ncol=2, byrow = TRUE) +colnames(depr_par_lut) <- c('old', 'new') + +# Checks the dot-parameters for deprecated names +# (including partial matching), gives a deprecation warning, +# and sets new parameters to the old parameters' values within its parent frame. +# WARNING: has side-effects +check.deprecation <- function(..., env = parent.frame()) { + pars <- list(...) + # exact and partial matches + all_match <- pmatch(names(pars), depr_par_lut[,1]) + # indices of matched pars' names + idx_pars <- which(!is.na(all_match)) + if (length(idx_pars) == 0) return() + # indices of matched LUT rows + idx_lut <- all_match[idx_pars] + # which of idx_lut were the exact matches? + ex_match <- depr_par_lut[idx_lut,1] %in% names(pars) + for (i in seq_along(idx_pars)) { + pars_par <- names(pars)[idx_pars[i]] + old_par <- depr_par_lut[idx_lut[i], 1] + new_par <- depr_par_lut[idx_lut[i], 2] + if (!ex_match[i]) { + warning("'", pars_par, "' was partially matched to '", old_par,"'") + } + .Deprecated(new_par, old=old_par, package = 'xgboost') + if (new_par != 'NULL') { + eval(parse(text = paste(new_par, '<-', pars[[pars_par]])), envir = env) + } + } +} diff --git a/R-package/R/xgb.Booster.R b/R-package/R/xgb.Booster.R index 3005d3ea6d18..34a88dca6a83 100644 --- a/R-package/R/xgb.Booster.R +++ b/R-package/R/xgb.Booster.R @@ -1,22 +1,22 @@ # Construct a Booster from cachelist # internal utility function xgb.Booster <- function(params = list(), cachelist = list(), modelfile = NULL) { - if (typeof(cachelist) != "list") { + if (typeof(cachelist) != "list" || + any(sapply(cachelist, class) != 'xgb.DMatrix')) { stop("xgb.Booster only accepts list of DMatrix as cachelist") } - for (dm in cachelist) { - if (class(dm) != "xgb.DMatrix") { - stop("xgb.Booster only accepts list of DMatrix as cachelist") - } - } + handle <- .Call("XGBoosterCreate_R", cachelist, PACKAGE = "xgboost") if (!is.null(modelfile)) { if (typeof(modelfile) == "character") { .Call("XGBoosterLoadModel_R", handle, modelfile, PACKAGE = "xgboost") } else if (typeof(modelfile) == "raw") { .Call("XGBoosterLoadModelFromRaw_R", handle, modelfile, PACKAGE = "xgboost") + } else if (class(modelfile) == "xgb.Booster") { + modelfile <- xgb.Booster.check(modelfile, saveraw=TRUE) + .Call("XGBoosterLoadModelFromRaw_R", handle, modelfile$raw, PACKAGE = "xgboost") } else { - stop("modelfile must be character or raw vector") + stop("modelfile must be either character filename, or raw booster dump, or xgb.Booster object") } } class(handle) <- "xgb.Booster.handle" @@ -28,8 +28,7 @@ xgb.Booster <- function(params = list(), cachelist = list(), modelfile = NULL) { # Convert xgb.Booster.handle to xgb.Booster # internal utility function -xgb.handleToBooster <- function(handle, raw = NULL) -{ +xgb.handleToBooster <- function(handle, raw = NULL) { bst <- list(handle = handle, raw = raw) class(bst) <- "xgb.Booster" return(bst) @@ -43,7 +42,7 @@ xgb.get.handle <- function(object) { xgb.Booster.handle = object, stop("argument must be of either xgb.Booster or xgb.Booster.handle class") ) - if (is.null(handle) | .Call("XGCheckNullPtr_R", handle, PACKAGE="xgboost")) { + if (is.null(handle) || .Call("XGCheckNullPtr_R", handle, PACKAGE="xgboost")) { stop("invalid xgb.Booster.handle") } handle @@ -51,8 +50,10 @@ xgb.get.handle <- function(object) { # Check whether an xgb.Booster object is complete # internal utility function -xgb.Booster.check <- function(bst, saveraw = TRUE) -{ +xgb.Booster.check <- function(bst, saveraw = TRUE) { + if (class(bst) != "xgb.Booster") + stop("argument type must be xgb.Booster") + isnull <- is.null(bst$handle) if (!isnull) { isnull <- .Call("XGCheckNullPtr_R", bst$handle, PACKAGE="xgboost") @@ -66,80 +67,145 @@ xgb.Booster.check <- function(bst, saveraw = TRUE) return(bst) } + #' Predict method for eXtreme Gradient Boosting model #' #' Predicted values based on either xgboost model or model handle object. #' #' @param object Object of class \code{xgb.Booster} or \code{xgb.Booster.handle} -#' @param newdata takes \code{matrix}, \code{dgCMatrix}, local data file or -#' \code{xgb.DMatrix}. -#' @param missing Missing is only used when input is dense matrix, pick a float -#' value that represents missing value. Sometime a data use 0 or other extreme value to represents missing values. -#' @param outputmargin whether the prediction should be shown in the original -#' value of sum of functions, when outputmargin=TRUE, the prediction is -#' untransformed margin value. In logistic regression, outputmargin=T will -#' output value before logistic transformation. -#' @param ntreelimit limit number of trees used in prediction, this parameter is -#' only valid for gbtree, but not for gblinear. set it to be value bigger -#' than 0. It will use all trees by default. -#' @param predleaf whether predict leaf index instead. If set to TRUE, the output will be a matrix object. -#' @param ... Parameters pass to \code{predict.xgb.Booster} +#' @param newdata takes \code{matrix}, \code{dgCMatrix}, local data file or \code{xgb.DMatrix}. +#' @param missing Missing is only used when input is dense matrix. Pick a float value that represents +#' missing values in data (e.g., sometimes 0 or some other extreme value is used). +#' @param outputmargin whether the prediction should be returned in the for of original untransformed +#' sum of predictions from boosting iterations' results. E.g., setting \code{outputmargin=TRUE} for +#' logistic regression would result in predictions for log-odds instead of probabilities. +#' @param ntreelimit limit the number of model's trees or boosting iterations used in prediction (see Details). +#' It will use all the trees by default (\code{NULL} value). +#' @param predleaf whether predict leaf index instead. +#' @param reshape whether to reshape the vector of predictions to a matrix form when there are several +#' prediction outputs per case. This option has no effect when \code{predleaf = TRUE}. +#' @param ... Parameters passed to \code{predict.xgb.Booster} #' #' @details -#' The option \code{ntreelimit} purpose is to let the user train a model with lots -#' of trees but use only the first trees for prediction to avoid overfitting -#' (without having to train a new model with less trees). +#' Note that \code{ntreelimit} is not necesserily equal to the number of boosting iterations +#' and it is not necesserily equal to the number of trees in a model. +#' E.g., in a random forest-like model, \code{ntreelimit} would limit the number of trees. +#' But for multiclass classification, there are multiple trees per iteration, +#' but \code{ntreelimit} limits the number of boosting iterations. +#' +#' Also note that \code{ntreelimit} would currently do nothing for predictions from gblinear, +#' since gblinear doesn't keep its boosting history. +#' +#' One possible practical applications of the \code{predleaf} option is to use the model +#' as a generator of new features which capture non-linearity and interactions, +#' e.g., as implemented in \code{\link{xgb.create.features}}. +#' +#' @return +#' For regression or binary classification, it returns a vector of length \code{nrows(newdata)}. +#' For multiclass classification, either a \code{num_class * nrows(newdata)} vector or +#' a \code{(nrows(newdata), num_class)} dimension matrix is returned, depending on +#' the \code{reshape} value. +#' +#' When \code{predleaf = TRUE}, the output is a matrix object with the +#' number of columns corresponding to the number of trees. #' -#' The option \code{predleaf} purpose is inspired from §3.1 of the paper -#' \code{Practical Lessons from Predicting Clicks on Ads at Facebook}. -#' The idea is to use the model as a generator of new features which capture non linear link -#' from original features. +#' @seealso +#' \code{\link{xgb.train}}. #' #' @examples +#' ## binary classification: +#' #' data(agaricus.train, package='xgboost') #' data(agaricus.test, package='xgboost') #' train <- agaricus.train #' test <- agaricus.test #' -#' bst <- xgboost(data = train$data, label = train$label, max.depth = 2, -#' eta = 1, nthread = 2, nround = 2,objective = "binary:logistic") +#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2, +#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") +#' # use all trees by default #' pred <- predict(bst, test$data) +#' # use only the 1st tree +#' pred <- predict(bst, test$data, ntreelimit = 1) +#' +#' +#' ## multiclass classification in iris dataset: +#' +#' lb <- as.numeric(iris$Species) - 1 +#' num_class <- 3 +#' set.seed(11) +#' bst <- xgboost(data = as.matrix(iris[, -5]), label = lb, +#' max_depth = 4, eta = 0.5, nthread = 2, nrounds = 10, subsample = 0.5, +#' objective = "multi:softprob", num_class = num_class) +#' # predict for softmax returns num_class probability numbers per case: +#' pred <- predict(bst, as.matrix(iris[, -5])) +#' str(pred) +#' # reshape it to a num_class-columns matrix +#' pred <- matrix(pred, ncol=num_class, byrow=TRUE) +#' # convert the probabilities to softmax labels +#' pred_labels <- max.col(pred) - 1 +#' # the following should result in the same error as seen in the last iteration +#' sum(pred_labels != lb)/length(lb) +#' +#' # compare that to the predictions from softmax: +#' set.seed(11) +#' bst <- xgboost(data = as.matrix(iris[, -5]), label = lb, +#' max_depth = 4, eta = 0.5, nthread = 2, nrounds = 10, subsample = 0.5, +#' objective = "multi:softmax", num_class = num_class) +#' pred <- predict(bst, as.matrix(iris[, -5])) +#' str(pred) +#' all.equal(pred, pred_labels) +#' # prediction from using only 5 iterations should result +#' # in the same error as seen in iteration 5: +#' pred5 <- predict(bst, as.matrix(iris[, -5]), ntreelimit=5) +#' sum(pred5 != lb)/length(lb) +#' +#' +#' ## random forest-like model of 25 trees for binary classification: +#' +#' set.seed(11) +#' bst <- xgboost(data = train$data, label = train$label, max_depth = 5, +#' nthread = 2, nrounds = 1, objective = "binary:logistic", +#' num_parallel_tree = 25, subsample = 0.6, colsample_bytree = 0.1) +#' # Inspect the prediction error vs number of trees: +#' lb <- test$label +#' dtest <- xgb.DMatrix(test$data, label=lb) +#' err <- sapply(1:25, function(n) { +#' pred <- predict(bst, dtest, ntreelimit=n) +#' sum((pred > 0.5) != lb)/length(lb) +#' }) +#' plot(err, type='l', ylim=c(0,0.1), xlab='#trees') +#' #' @rdname predict.xgb.Booster #' @export predict.xgb.Booster <- function(object, newdata, missing = NA, - outputmargin = FALSE, ntreelimit = NULL, predleaf = FALSE) { - if (class(object) != "xgb.Booster"){ - stop("predict: model in prediction must be of class xgb.Booster") - } else { - object <- xgb.Booster.check(object, saveraw = FALSE) - } - if (class(newdata) != "xgb.DMatrix") { + outputmargin = FALSE, ntreelimit = NULL, predleaf = FALSE, reshape = FALSE) { + + object <- xgb.Booster.check(object, saveraw = FALSE) + if (class(newdata) != "xgb.DMatrix") newdata <- xgb.DMatrix(newdata, missing = missing) - } - if (is.null(ntreelimit)) { - ntreelimit <- 0 - } else { - if (ntreelimit < 1){ - stop("predict: ntreelimit must be equal to or greater than 1") - } - } - option <- 0 - if (outputmargin) { - option <- option + 1 - } - if (predleaf) { - option <- option + 2 - } - ret <- .Call("XGBoosterPredict_R", object$handle, newdata, as.integer(option), + if (is.null(ntreelimit)) + ntreelimit <- NVL(object$best_ntreelimit, 0) + if (ntreelimit < 0) + stop("ntreelimit cannot be negative") + + option <- 0L + 1L * as.logical(outputmargin) + 2L * as.logical(predleaf) + + ret <- .Call("XGBoosterPredict_R", object$handle, newdata, option[1], as.integer(ntreelimit), PACKAGE = "xgboost") + + if (length(ret) %% nrow(newdata) != 0) + stop("prediction length ", length(ret)," is not multiple of nrows(newdata) ", nrow(newdata)) + npred_per_case <- length(ret) / nrow(newdata) + if (predleaf){ - len <- getinfo(newdata, "nrow") - if (length(ret) == len){ - ret <- matrix(ret,ncol = 1) - } else { - ret <- matrix(ret, ncol = len) - ret <- t(ret) - } + len <- nrow(newdata) + ret <- if (length(ret) == len) { + matrix(ret, ncol = 1) + } else { + t(matrix(ret, ncol = len)) + } + } else if (reshape && npred_per_case > 1) { + ret <- matrix(ret, ncol = length(ret) / nrow(newdata), byrow = TRUE) } return(ret) } @@ -183,9 +249,13 @@ predict.xgb.Booster.handle <- function(object, ...) { #' #' The attribute setters would usually work more efficiently for \code{xgb.Booster.handle} #' than for \code{xgb.Booster}, since only just a handle (pointer) would need to be copied. +#' That would only matter if attributes need to be set many times. +#' Note, however, that when feeding a handle of an \code{xgb.Booster} object to the attribute setters, +#' the raw model cache of an \code{xgb.Booster} object would not be automatically updated, +#' and it would be user's responsibility to call \code{xgb.save.raw} to update it. #' #' The \code{xgb.attributes<-} setter either updates the existing or adds one or several attributes, -#' but doesn't delete the existing attributes which don't have their names in \code{names(attributes)}. +#' but it doesn't delete the other existing attributes. #' #' @return #' \code{xgb.attr} returns either a string value of an attribute @@ -198,8 +268,8 @@ predict.xgb.Booster.handle <- function(object, ...) { #' data(agaricus.train, package='xgboost') #' train <- agaricus.train #' -#' bst <- xgboost(data = train$data, label = train$label, max.depth = 2, -#' eta = 1, nthread = 2, nround = 2, objective = "binary:logistic") +#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2, +#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") #' #' xgb.attr(bst, "my_attribute") <- "my attribute value" #' print(xgb.attr(bst, "my_attribute")) @@ -293,8 +363,8 @@ xgb.attributes <- function(object) { #' data(agaricus.train, package='xgboost') #' train <- agaricus.train #' -#' bst <- xgboost(data = train$data, label = train$label, max.depth = 2, -#' eta = 1, nthread = 2, nround = 2, objective = "binary:logistic") +#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2, +#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") #' #' xgb.parameters(bst) <- list(eta = 0.1) #' @@ -317,3 +387,99 @@ xgb.attributes <- function(object) { } object } + +# Extract # of trees in a model +# TODO: either add a getter to C-interface, or simply set an 'ntree' attribute after each iteration +# internal utility function +xgb.ntree <- function(bst) { + length(grep('^booster', xgb.dump(bst))) +} + + +#' Print xgb.Booster +#' +#' Print information about xgb.Booster. +#' +#' @param x an xgb.Booster object +#' @param verbose whether to print detailed data (e.g., attribute values) +#' @param ... not currently used +#' +#' @examples +#' data(agaricus.train, package='xgboost') +#' train <- agaricus.train +#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2, +#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") +#' attr(bst, 'myattr') <- 'memo' +#' +#' print(bst) +#' print(bst, verbose=TRUE) +#' +#' @export +print.xgb.Booster <- function(x, verbose=FALSE, ...) { + cat('##### xgb.Booster\n') + + if (is.null(x$handle) || .Call("XGCheckNullPtr_R", x$handle, PACKAGE="xgboost")) { + cat("handle is invalid\n") + return(x) + } + + cat('raw: ') + if (!is.null(x$raw)) { + cat(format(object.size(x$raw), units="auto"), '\n') + } else { + cat('NULL\n') + } + if (!is.null(x$call)) { + cat('call:\n ') + print(x$call) + } + + if (!is.null(x$params)) { + cat('params (as set within xgb.train):\n') + cat( ' ', + paste(names(x$params), + paste0('"', unlist(x$params), '"'), + sep=' = ', collapse=', '), '\n', sep='') + } + # TODO: need an interface to access all the xgboosts parameters + + attrs <- xgb.attributes(x) + if (length(attrs) > 0) { + cat('xgb.attributes:\n') + if (verbose) { + cat( paste(paste0(' ',names(attrs)), + paste0('"', unlist(attrs), '"'), + sep=' = ', collapse='\n'), '\n', sep='') + } else { + cat(' ', paste(names(attrs), collapse=', '), '\n', sep='') + } + } + + if (!is.null(x$callbacks) && length(x$callbacks) > 0) { + cat('callbacks:\n') + lapply(callback.calls(x$callbacks), function(x) { + cat(' ') + print(x) + }) + } + + cat('niter: ', x$niter, '\n', sep='') + # TODO: uncomment when faster xgb.ntree is implemented + #cat('ntree: ', xgb.ntree(x), '\n', sep='') + + for (n in setdiff(names(x), c('handle', 'raw', 'call', 'params', 'callbacks','evaluation_log','niter'))) { + if (is.atomic(x[[n]])) { + cat(n, ': ', x[[n]], '\n', sep='') + } else { + cat(n, ':\n\t', sep='') + print(x[[n]]) + } + } + + if (!is.null(x$evaluation_log)) { + cat('evaluation_log:\n') + print(x$evaluation_log, row.names = FALSE, topn = 2) + } + + invisible(x) +} diff --git a/R-package/R/xgb.DMatrix.R b/R-package/R/xgb.DMatrix.R index c5f4fed72576..36f7bfd1a03f 100644 --- a/R-package/R/xgb.DMatrix.R +++ b/R-package/R/xgb.DMatrix.R @@ -187,18 +187,18 @@ getinfo <- function(object, ...) UseMethod("getinfo") #' @rdname getinfo #' @export getinfo.xgb.DMatrix <- function(object, name) { - if (typeof(name) != "character") { - stop("getinfo: name must be character") - } - if (name != "label" && name != "weight" && - name != "base_margin" && name != "nrow") { - stop(paste("getinfo: unknown info name", name)) + if (typeof(name) != "character" || + length(name) != 1 || + !name %in% c('label', 'weight', 'base_margin', 'nrow')) { + stop("getinfo: name must one of the following\n", + " 'label', 'weight', 'base_margin', 'nrow'") } if (name != "nrow"){ ret <- .Call("XGDMatrixGetInfo_R", object, name, PACKAGE = "xgboost") } else { ret <- nrow(object) } + if (length(ret) == 0) return(NULL) return(ret) } diff --git a/R-package/R/xgb.DMatrix.save.R b/R-package/R/xgb.DMatrix.save.R index 63a0be6919bf..9ceec801a97a 100644 --- a/R-package/R/xgb.DMatrix.save.R +++ b/R-package/R/xgb.DMatrix.save.R @@ -2,8 +2,8 @@ #' #' Save xgb.DMatrix object to binary file #' -#' @param DMatrix the DMatrix object -#' @param fname the name of the binary file. +#' @param dmatrix the \code{xgb.DMatrix} object +#' @param fname the name of the file to write. #' #' @examples #' data(agaricus.train, package='xgboost') @@ -12,15 +12,12 @@ #' xgb.DMatrix.save(dtrain, 'xgb.DMatrix.data') #' dtrain <- xgb.DMatrix('xgb.DMatrix.data') #' @export -xgb.DMatrix.save <- function(DMatrix, fname) { - if (typeof(fname) != "character") { - stop("xgb.save: fname must be character") - } - if (class(DMatrix) == "xgb.DMatrix") { - .Call("XGDMatrixSaveBinary_R", DMatrix, fname, as.integer(FALSE), - PACKAGE = "xgboost") - return(TRUE) - } - stop("xgb.DMatrix.save: the input must be xgb.DMatrix") - return(FALSE) +xgb.DMatrix.save <- function(dmatrix, fname) { + if (typeof(fname) != "character") + stop("fname must be character") + if (class(dmatrix) != "xgb.DMatrix") + stop("the input data must be xgb.DMatrix") + + .Call("XGDMatrixSaveBinary_R", dmatrix, fname, 0L, PACKAGE = "xgboost") + return(TRUE) } diff --git a/R-package/R/xgb.create.features.R b/R-package/R/xgb.create.features.R index bd913a81c1de..1e5ae75a1cd2 100644 --- a/R-package/R/xgb.create.features.R +++ b/R-package/R/xgb.create.features.R @@ -2,12 +2,9 @@ #' #' May improve the learning by adding new features to the training data based on the decision trees from a previously learned model. #' -#' @importFrom magrittr %>% -#' @importFrom Matrix cBind -#' @importFrom Matrix sparse.model.matrix -#' #' @param model decision tree boosting model learned on the original data -#' @param training.data original data (usually provided as a \code{dgCMatrix} matrix) +#' @param data original data (usually provided as a \code{dgCMatrix} matrix) +#' @param ... currently not used #' #' @return \code{dgCMatrix} matrix including both the original data and the new features. #' @@ -54,7 +51,7 @@ #' dtrain <- xgb.DMatrix(data = agaricus.train$data, label = agaricus.train$label) #' dtest <- xgb.DMatrix(data = agaricus.test$data, label = agaricus.test$label) #' -#' param <- list(max.depth=2, eta=1, silent=1, objective='binary:logistic') +#' param <- list(max_depth=2, eta=1, silent=1, objective='binary:logistic') #' nround = 4 #' #' bst = xgb.train(params = param, data = dtrain, nrounds = nround, nthread = 2) @@ -79,13 +76,14 @@ #' cat(paste("The accuracy was", accuracy.before, "before adding leaf features and it is now", accuracy.after, "!\n")) #' #' @export -xgb.create.features <- function(model, training.data){ - pred_with_leaf = predict(model, training.data, predleaf = TRUE) +xgb.create.features <- function(model, data, ...){ + check.deprecation(...) + pred_with_leaf = predict(model, data, predleaf = TRUE) cols <- list() for(i in 1:length(trees)){ # max is not the real max but it s not important for the purpose of adding features - leaf.id <- sort(unique(pred_with_leaf[,i])) - cols[[i]] <- factor(x = pred_with_leaf[,i], level = leaf.id) + leaf_id <- sort(unique(pred_with_leaf[,i])) + cols[[i]] <- factor(x = pred_with_leaf[,i], level = leaf_id) } - cBind(training.data, sparse.model.matrix( ~ . -1, as.data.frame(cols))) -} \ No newline at end of file + cBind(data, sparse.model.matrix( ~ . -1, as.data.frame(cols))) +} diff --git a/R-package/R/xgb.cv.R b/R-package/R/xgb.cv.R index c61cdbc5b172..e3a84cec01c7 100644 --- a/R-package/R/xgb.cv.R +++ b/R-package/R/xgb.cv.R @@ -2,17 +2,6 @@ #' #' The cross valudation function of xgboost #' -#' @importFrom data.table data.table -#' @importFrom data.table as.data.table -#' @importFrom magrittr %>% -#' @importFrom data.table := -#' @importFrom data.table rbindlist -#' @importFrom stringr str_extract_all -#' @importFrom stringr str_extract -#' @importFrom stringr str_split -#' @importFrom stringr str_replace -#' @importFrom stringr str_match -#' #' @param params the list of parameters. Commonly used ones are: #' \itemize{ #' \item \code{objective} objective function, common ones are @@ -21,21 +10,23 @@ #' \item \code{binary:logistic} logistic regression for classification #' } #' \item \code{eta} step size of each boosting step -#' \item \code{max.depth} maximum depth of the tree +#' \item \code{max_depth} maximum depth of the tree #' \item \code{nthread} number of thread used in training, if not set, all threads are used #' } #' -#' See \link{xgb.train} for further details. +#' See \code{\link{xgb.train}} for further details. #' See also demo/ for walkthrough example in R. #' @param data takes an \code{xgb.DMatrix} or \code{Matrix} as the input. #' @param nrounds the max number of iterations #' @param nfold the original dataset is randomly partitioned into \code{nfold} equal size subsamples. -#' @param label option field, when data is \code{Matrix} -#' @param missing Missing is only used when input is dense matrix, pick a float -#' value that represents missing value. Sometime a data use 0 or other extreme value to represents missing values. -#' @param prediction A logical value indicating whether to return the prediction vector. -#' @param showsd \code{boolean}, whether show standard deviation of cross validation -#' @param metrics, list of evaluation metrics to be used in corss validation, +#' @param label vector of response values. Should be provided only when data is \code{DMatrix}. +#' @param missing is only used when input is a dense matrix. By default is set to NA, which means +#' that NA values should be considered as 'missing' by the algorithm. +#' Sometimes, 0 or other extreme value might be used to represent missing values. +#' @param prediction A logical value indicating whether to return the test fold predictions +#' from each CV model. This parameter engages the \code{\link{cb.cv.predict}} callback. +#' @param showsd \code{boolean}, whether to show standard deviation of cross validation +#' @param metrics, list of evaluation metrics to be used in cross validation, #' when it is not specified, the evaluation metric is chosen according to objective function. #' Possible options are: #' \itemize{ @@ -46,32 +37,33 @@ #' \item \code{merror} Exact matching error, used to evaluate multi-class classification #' } #' @param obj customized objective function. Returns gradient and second order -#' gradient with given prediction and dtrain. +#' gradient with given prediction and dtrain. #' @param feval custimized evaluation function. Returns -#' \code{list(metric='metric-name', value='metric-value')} with given -#' prediction and dtrain. -#' @param stratified \code{boolean} whether sampling of folds should be stratified by the values of labels in \code{data} -#' @param folds \code{list} provides a possibility of using a list of pre-defined CV folds (each element must be a vector of fold's indices). -#' If folds are supplied, the nfold and stratified parameters would be ignored. +#' \code{list(metric='metric-name', value='metric-value')} with given +#' prediction and dtrain. +#' @param stratified a \code{boolean} indicating whether sampling of folds should be stratified +#' by the values of outcome labels. +#' @param folds \code{list} provides a possibility to use a list of pre-defined CV folds +#' (each element must be a vector of test fold's indices). When folds are supplied, +#' the \code{nfold} and \code{stratified} parameters are ignored. #' @param verbose \code{boolean}, print the statistics during the process -#' @param print.every.n Print every N progress messages when \code{verbose>0}. Default is 1 which means all messages are printed. -#' @param early.stop.round If \code{NULL}, the early stopping function is not triggered. -#' If set to an integer \code{k}, training with a validation set will stop if the performance -#' keeps getting worse consecutively for \code{k} rounds. -#' @param maximize If \code{feval} and \code{early.stop.round} are set, then \code{maximize} must be set as well. -#' \code{maximize=TRUE} means the larger the evaluation score the better. -#' +#' @param print_every_n Print each n-th iteration evaluation messages when \code{verbose>0}. +#' Default is 1 which means all messages are printed. This parameter is passed to the +#' \code{\link{cb.print.evaluation}} callback. +#' @param early_stopping_rounds If \code{NULL}, the early stopping function is not triggered. +#' If set to an integer \code{k}, training with a validation set will stop if the performance +#' doesn't improve for \code{k} rounds. +#' Setting this parameter engages the \code{\link{cb.early.stop}} callback. +#' @param maximize If \code{feval} and \code{early_stopping_rounds} are set, +#' then this parameter must be set as well. +#' When it is \code{TRUE}, it means the larger the evaluation score the better. +#' This parameter is passed to the \code{\link{cb.early.stop}} callback. +#' @param callbacks a list of callback functions to perform various task during boosting. +#' See \code{\link{callbacks}}. Some of the callbacks are automatically created depending on the +#' parameters' values. User can provide either existing or their own callback methods in order +#' to customize the training process. #' @param ... other parameters to pass to \code{params}. #' -#' @return -#' If \code{prediction = TRUE}, a list with the following elements is returned: -#' \itemize{ -#' \item \code{dt} a \code{data.table} with each mean and standard deviation stat for training set and test set -#' \item \code{pred} an array or matrix (for multiclass classification) with predictions for each CV-fold for the model having been trained on the data in all other folds. -#' } -#' -#' If \code{prediction = FALSE}, just a \code{data.table} with each mean and standard deviation stat for training set and test set is returned. -#' #' @details #' The original sample is randomly partitioned into \code{nfold} equal size subsamples. #' @@ -83,168 +75,227 @@ #' #' Adapted from \url{http://en.wikipedia.org/wiki/Cross-validation_\%28statistics\%29#k-fold_cross-validation} #' +#' @return +#' An object of class \code{xgb.cv.synchronous} with the following elements: +#' \itemize{ +#' \item \code{call} a function call. +#' \item \code{params} parameters that were passed to the xgboost library. Note that it does not +#' capture parameters changed by the \code{\link{cb.reset.parameters}} callback. +#' \item \code{callbacks} callback functions that were either automatically assigned or +#' explicitely passed. +#' \item \code{evaluation_log} evaluation history storead as a \code{data.table} with the +#' first column corresponding to iteration number and the rest corresponding to the +#' CV-based evaluation means and standard deviations for the training and test CV-sets. +#' It is created by the \code{\link{cb.evaluation.log}} callback. +#' \item \code{niter} number of boosting iterations. +#' \item \code{folds} the list of CV folds' indices - either those passed through the \code{folds} +#' parameter or randomly generated. +#' \item \code{best_iteration} iteration number with the best evaluation metric value +#' (only available with early stopping). +#' \item \code{best_ntreelimit} the \code{ntreelimit} value corresponding to the best iteration, +#' which could further be used in \code{predict} method +#' (only available with early stopping). +#' \item \code{pred} CV prediction values available when \code{prediction} is set. +#' It is either vector or matrix (see \code{\link{cb.cv.predict}}). +#' \item \code{models} a liost of the CV folds' models. It is only available with the explicit +#' setting of the \code{cb.cv.predict(save_models = TRUE)} callback. +#' } +#' #' @examples #' data(agaricus.train, package='xgboost') #' dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label) -#' history <- xgb.cv(data = dtrain, nround=3, nthread = 2, nfold = 5, metrics=list("rmse","auc"), -#' max.depth =3, eta = 1, objective = "binary:logistic") -#' print(history) +#' cv <- xgb.cv(data = dtrain, nrounds = 3, nthread = 2, nfold = 5, metrics = list("rmse","auc"), +#' max_depth = 3, eta = 1, objective = "binary:logistic") +#' print(cv) +#' print(cv, verbose=TRUE) +#' #' @export xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing = NA, prediction = FALSE, showsd = TRUE, metrics=list(), - obj = NULL, feval = NULL, stratified = TRUE, folds = NULL, verbose = T, print.every.n=1L, - early.stop.round = NULL, maximize = NULL, ...) { - if (typeof(params) != "list") { - stop("xgb.cv: first argument params must be list") - } - if(!is.null(folds)) { - if(class(folds) != "list" | length(folds) < 2) { - stop("folds must be a list with 2 or more elements that are vectors of indices for each CV-fold") - } - nfold <- length(folds) - } - if (nfold <= 1) { - stop("nfold must be bigger than 1") - } - dtrain <- xgb.get.DMatrix(data, label, missing) - dot.params <- list(...) - nms.params <- names(params) - nms.dot.params <- names(dot.params) - if (length(intersect(nms.params,nms.dot.params)) > 0) - stop("Duplicated defined term in parameters. Please check your list of params.") - params <- append(params, dot.params) - params <- append(params, list(silent=1)) - for (mc in metrics) { - params <- append(params, list("eval_metric"=mc)) - } + obj = NULL, feval = NULL, stratified = TRUE, folds = NULL, + verbose = TRUE, print_every_n=1L, + early_stopping_rounds = NULL, maximize = NULL, callbacks = list(), ...) { - # customized objective and evaluation metric interface - if (!is.null(params$objective) && !is.null(obj)) - stop("xgb.cv: cannot assign two different objectives") - if (!is.null(params$objective)) - if (class(params$objective) == 'function') { - obj <- params$objective - params[['objective']] <- NULL - } - # if (!is.null(params$eval_metric) && !is.null(feval)) - # stop("xgb.cv: cannot assign two different evaluation metrics") - if (!is.null(params$eval_metric)) - if (class(params$eval_metric) == 'function') { - feval <- params$eval_metric - params[['eval_metric']] <- NULL - } + check.deprecation(...) + + params <- check.booster.params(params, ...) + # TODO: should we deprecate the redundant 'metrics' parameter? + for (m in metrics) + params <- c(params, list("eval_metric" = m)) + + check.custom.obj() + check.custom.eval() - # Early Stopping - if (!is.null(early.stop.round)){ - if (!is.null(feval) && is.null(maximize)) - stop('Please set maximize to note whether the model is maximizing the evaluation or not.') - if (is.null(maximize) && is.null(params$eval_metric)) - stop('Please set maximize to note whether the model is maximizing the evaluation or not.') - if (is.null(maximize)) - { - if (params$eval_metric %in% c('rmse','logloss','error','merror','mlogloss')) { - maximize <- FALSE - } else { - maximize <- TRUE - } - } + #if (is.null(params[['eval_metric']]) && is.null(feval)) + # stop("Either 'eval_metric' or 'feval' must be provided for CV") + + # Labels + if (class(data) == 'xgb.DMatrix') + labels <- getinfo(data, 'label') + if (is.null(labels)) + stop("Labels must be provided for CV either through xgb.DMatrix, or through 'label=' when 'data' is matrix") + + # CV folds + if(!is.null(folds)) { + if(class(folds) != "list" || length(folds) < 2) + stop("'folds' must be a list with 2 or more elements that are vectors of indices for each CV-fold") + nfold <- length(folds) + } else { + if (nfold <= 1) + stop("'nfold' must be > 1") + folds <- generate.cv.folds(nfold, nrow(data), stratified, label, params) + } + + # Potential TODO: sequential CV + #if (strategy == 'sequential') + # stop('Sequential CV strategy is not yet implemented') - if (maximize) { - bestScore <- 0 - } else { - bestScore <- Inf - } - bestInd <- 0 - earlyStopflag <- FALSE + # verbosity & evaluation printing callback: + params <- c(params, list(silent = 1)) + print_every_n <- max( as.integer(print_every_n), 1L) + if (!has.callbacks(callbacks, 'cb.print.evaluation') && verbose) { + callbacks <- add.cb(callbacks, cb.print.evaluation(print_every_n)) + } + # evaluation log callback: always is on in CV + evaluation_log <- list() + if (!has.callbacks(callbacks, 'cb.evaluation.log')) { + callbacks <- add.cb(callbacks, cb.evaluation.log()) + } + # Early stopping callback + stop_condition <- FALSE + if (!is.null(early_stopping_rounds) && + !has.callbacks(callbacks, 'cb.early.stop')) { + callbacks <- add.cb(callbacks, cb.early.stop(early_stopping_rounds, + maximize=maximize, verbose=verbose)) + } + # CV-predictions callback + if (prediction && + !has.callbacks(callbacks, 'cb.cv.predict')) { + callbacks <- add.cb(callbacks, cb.cv.predict(save_model=FALSE)) + } + # Sort the callbacks into categories + cb <- categorize.callbacks(callbacks) - if (length(metrics) > 1) - warning('Only the first metric is used for early stopping process.') - } + + # create the booster-folds + dall <- xgb.get.DMatrix(data, label, missing) + bst_folds <- lapply(1:length(folds), function(k) { + dtest <- slice(dall, folds[[k]]) + dtrain <- slice(dall, unlist(folds[-k])) + bst <- xgb.Booster(params, list(dtrain, dtest)) + list(dtrain=dtrain, bst=bst, watchlist=list(train=dtrain, test=dtest), index=folds[[k]]) + }) + # a "basket" to collect some results from callbacks + basket <- list() - xgb_folds <- xgb.cv.mknfold(dtrain, nfold, params, stratified, folds) - obj_type <- params[['objective']] - mat_pred <- FALSE - if (!is.null(obj_type) && obj_type == 'multi:softprob') - { - num_class <- params[['num_class']] - if (is.null(num_class)) - stop('must set num_class to use softmax') - predictValues <- matrix(0, nrow(dtrain), num_class) - mat_pred <- TRUE - } - else - predictValues <- rep(0, nrow(dtrain)) - history <- c() - print.every.n <- max(as.integer(print.every.n), 1L) - for (i in 1:nrounds) { - msg <- list() - for (k in 1:nfold) { - fd <- xgb_folds[[k]] - succ <- xgb.iter.update(fd$booster, fd$dtrain, i - 1, obj) - msg[[k]] <- xgb.iter.eval(fd$booster, fd$watchlist, i - 1, feval) %>% str_split("\t") %>% .[[1]] - } - ret <- xgb.cv.aggcv(msg, showsd) - history <- c(history, ret) - if(verbose) - if (0 == (i - 1L) %% print.every.n) - cat(ret, "\n", sep="") + # extract parameters that can affect the relationship b/w #trees and #iterations + num_class <- max(as.numeric(NVL(params[['num_class']], 1)), 1) + num_parallel_tree <- max(as.numeric(NVL(params[['num_parallel_tree']], 1)), 1) - # early_Stopping - if (!is.null(early.stop.round)){ - score <- strsplit(ret,'\\s+')[[1]][1 + length(metrics) + 2] - score <- strsplit(score,'\\+|:')[[1]][[2]] - score <- as.numeric(score) - if ( (maximize && score > bestScore) || (!maximize && score < bestScore)) { - bestScore <- score - bestInd <- i - 1 - } else { - if (i - bestInd > early.stop.round) { - earlyStopflag <- TRUE - cat('Stopping. Best iteration:', bestInd, '\n') - break - } - } - } - } + # those are fixed for CV (no training continuation) + begin_iteration <- 1 + end_iteration <- nrounds + + # synchronous CV boosting: run CV folds' models within each iteration + for (iteration in begin_iteration:end_iteration) { + + for (f in cb$pre_iter) f() + + msg <- lapply(bst_folds, function(fd) { + xgb.iter.update(fd$bst, fd$dtrain, iteration - 1, obj) + xgb.iter.eval(fd$bst, fd$watchlist, iteration - 1, feval) + }) + msg <- simplify2array(msg) + bst_evaluation <- rowMeans(msg) + bst_evaluation_err <- sqrt(rowMeans(msg^2) - bst_evaluation^2) + + for (f in cb$post_iter) f() + + if (stop_condition) break + } + for (f in cb$finalize) f(finalize=TRUE) - if (prediction) { - for (k in 1:nfold) { - fd <- xgb_folds[[k]] - if (!is.null(early.stop.round) && earlyStopflag) { - res <- xgb.iter.eval(fd$booster, fd$watchlist, bestInd, feval, prediction) - } else { - res <- xgb.iter.eval(fd$booster, fd$watchlist, nrounds - 1, feval, prediction) - } - if (mat_pred) { - pred_mat <- matrix(res[[2]],num_class,length(fd$index)) - predictValues[fd$index,] <- t(pred_mat) - } else { - predictValues[fd$index] <- res[[2]] - } - } - } + # the CV result + ret <- list( + call = match.call(), + params = params, + callbacks = callbacks, + evaluation_log = evaluation_log, + niter = end_iteration, + folds = folds + ) + ret <- c(ret, basket) - colnames <- str_split(string = history[1], pattern = "\t")[[1]] %>% .[2:length(.)] %>% str_extract(".*:") %>% str_replace(":","") %>% str_replace("-", ".") - colnamesMean <- paste(colnames, "mean") - if(showsd) colnamesStd <- paste(colnames, "std") + class(ret) <- 'xgb.cv.synchronous' + invisible(ret) +} - colnames <- c() - if(showsd) for(i in 1:length(colnamesMean)) colnames <- c(colnames, colnamesMean[i], colnamesStd[i]) - else colnames <- colnamesMean - type <- rep(x = "numeric", times = length(colnames)) - dt <- utils::read.table(text = "", colClasses = type, col.names = colnames) %>% as.data.table - split <- str_split(string = history, pattern = "\t") - for(line in split) dt <- line[2:length(line)] %>% str_extract_all(pattern = "\\d*\\.+\\d*") %>% unlist %>% as.numeric %>% as.list %>% {rbindlist( list( dt, .), use.names = F, fill = F)} +#' Print xgb.cv result +#' +#' Prints formatted results of \code{xgb.cv}. +#' +#' @param x an \code{xgb.cv.synchronous} object +#' @param verbose whether to print detailed data +#' @param ... passed to \code{data.table.print} +#' +#' @details +#' When not verbose, it would only print the evaluation results, +#' including the best iteration (when available). +#' +#' @examples +#' data(agaricus.train, package='xgboost') +#' train <- agaricus.train +#' cv <- xgb.cv(data = train$data, label = train$label, nfold = 5, max_depth = 2, +#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") +#' print(cv) +#' print(cv, verbose=TRUE) +#' +#' @rdname print.xgb.cv +#' @export +print.xgb.cv.synchronous <- function(x, verbose=FALSE, ...) { + cat('##### xgb.cv ', length(x$folds), '-folds\n', sep='') + + if (verbose) { + if (!is.null(x$call)) { + cat('call:\n ') + print(x$call) + } + if (!is.null(x$params)) { + cat('params (as set within xgb.cv):\n') + cat( ' ', + paste(names(x$params), + paste0('"', unlist(x$params), '"'), + sep=' = ', collapse=', '), '\n', sep='') + } + if (!is.null(x$callbacks) && length(x$callbacks) > 0) { + cat('callbacks:\n') + lapply(callback.calls(x$callbacks), function(x) { + cat(' ') + print(x) + }) + } + + for (n in c('niter', 'best_iteration', 'best_ntreelimit')) { + if (is.null(x[[n]])) + next + cat(n, ': ', x[[n]], '\n', sep='') + } - if (prediction) { - return( list( dt = dt,pred = predictValues)) + if (!is.null(x$pred)) { + cat('pred:\n') + str(x$pred) } - return(dt) -} + } -# Avoid error messages during CRAN check. -# The reason is that these variables are never declared -# They are mainly column names inferred by Data.table... -globalVariables(".") + if (verbose) + cat('evaluation_log:\n') + print(x$evaluation_log, row.names = FALSE, ...) + + if (!is.null(x$best_iteration)) { + cat('Best iteration:\n') + print(x$evaluation_log[x$best_iteration], row.names = FALSE, ...) + } + invisible(x) +} diff --git a/R-package/R/xgb.dump.R b/R-package/R/xgb.dump.R index b39359abd5b0..ce8c8696eb68 100644 --- a/R-package/R/xgb.dump.R +++ b/R-package/R/xgb.dump.R @@ -2,11 +2,6 @@ #' #' Save a xgboost model to text file. Could be parsed later. #' -#' @importFrom magrittr %>% -#' @importFrom stringr str_replace -#' @importFrom data.table fread -#' @importFrom data.table := -#' @importFrom data.table setnames #' @param model the model object. #' @param fname the name of the text file where to save the model text dump. If not provided or set to \code{NULL} the function will return the model as a \code{character} vector. #' @param fmap feature map file representing the type of feature. @@ -15,10 +10,11 @@ #' See demo/ for walkthrough example in R, and #' \url{https://github.com/dmlc/xgboost/blob/master/demo/data/featmap.txt} #' for example Format. -#' @param with.stats whether dump statistics of splits +#' @param with_stats whether dump statistics of splits #' When this option is on, the model dump comes with two additional statistics: #' gain is the approximate loss function gain we get in each split; #' cover is the sum of second order gradient in each node. +#' @param ... currently not used #' #' @return #' if fname is not provided or set to \code{NULL} the function will return the model as a \code{character} vector. Otherwise it will return \code{TRUE}. @@ -28,43 +24,36 @@ #' data(agaricus.test, package='xgboost') #' train <- agaricus.train #' test <- agaricus.test -#' bst <- xgboost(data = train$data, label = train$label, max.depth = 2, -#' eta = 1, nthread = 2, nround = 2,objective = "binary:logistic") +#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2, +#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") #' # save the model in file 'xgb.model.dump' -#' xgb.dump(bst, 'xgb.model.dump', with.stats = TRUE) +#' xgb.dump(bst, 'xgb.model.dump', with_stats = TRUE) #' #' # print the model without saving it to a file #' print(xgb.dump(bst)) #' @export -xgb.dump <- function(model = NULL, fname = NULL, fmap = "", with.stats=FALSE) { - if (class(model) != "xgb.Booster") { - stop("model: argument must be type xgb.Booster") - } else { - model <- xgb.Booster.check(model) - } - if (!(class(fname) %in% c("character", "NULL") && length(fname) <= 1)) { - stop("fname: argument must be type character (when provided)") - } - if (!(class(fmap) %in% c("character", "NULL") && length(fname) <= 1)) { - stop("fmap: argument must be type character (when provided)") - } +xgb.dump <- function(model = NULL, fname = NULL, fmap = "", with_stats=FALSE, ...) { + check.deprecation(...) + if (class(model) != "xgb.Booster") + stop("model: argument must be of type xgb.Booster") + if (!(class(fname) %in% c("character", "NULL") && length(fname) <= 1)) + stop("fname: argument must be of type character (when provided)") + if (!(class(fmap) %in% c("character", "NULL") && length(fmap) <= 1)) + stop("fmap: argument must be of type character (when provided)") + + model <- xgb.Booster.check(model) + model_dump <- .Call("XGBoosterDumpModel_R", model$handle, fmap, as.integer(with_stats), PACKAGE = "xgboost") - longString <- .Call("XGBoosterDumpModel_R", model$handle, fmap, as.integer(with.stats), PACKAGE = "xgboost") - - dt <- fread(paste(longString, collapse = ""), sep = "\n", header = F) - - setnames(dt, "Lines") - - if(is.null(fname)) { - result <- dt[Lines != "0"][, Lines := str_replace(Lines, "^\t+", "")][Lines != ""][, paste(Lines)] - return(result) + if (is.null(fname)) + model_dump <- str_replace_all(model_dump, '\t', '') + + model_dump <- unlist(str_split(model_dump, '\n')) + model_dump <- grep('(^$|^0$)', model_dump, invert = TRUE, value = TRUE) + + if (is.null(fname)) { + return(model_dump) } else { - result <- dt[Lines != "0"][Lines != ""][, paste(Lines)] %>% writeLines(fname) + writeLines(model_dump, fname) return(TRUE) } } - -# Avoid error messages during CRAN check. -# The reason is that these variables are never declared -# They are mainly column names inferred by Data.table... -globalVariables(c("Lines", ".")) diff --git a/R-package/R/xgb.importance.R b/R-package/R/xgb.importance.R index 50a7af5cb67c..0219d562c118 100644 --- a/R-package/R/xgb.importance.R +++ b/R-package/R/xgb.importance.R @@ -2,14 +2,6 @@ #' #' Create a \code{data.table} of the most important features of a model. #' -#' @importFrom data.table data.table -#' @importFrom data.table setnames -#' @importFrom data.table := -#' @importFrom magrittr %>% -#' @importFrom Matrix colSums -#' @importFrom Matrix cBind -#' @importFrom Matrix sparseVector -#' #' @param feature_names names of each feature as a \code{character} vector. Can be extracted from a sparse matrix (see example). If model dump already contains feature names, this argument should be \code{NULL}. #' @param model generated by the \code{xgb.train} function. #' @param data the dataset used for the training step. Will be used with \code{label} parameter for co-occurence computation. More information in \code{Detail} part. This parameter is optional. @@ -46,14 +38,13 @@ #' @examples #' data(agaricus.train, package='xgboost') #' -#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max.depth = 2, -#' eta = 1, nthread = 2, nround = 2,objective = "binary:logistic") +#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2, +#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic") #' -#' # agaricus.train$data@@Dimnames[[2]] represents the column names of the sparse matrix. -#' xgb.importance(agaricus.train$data@@Dimnames[[2]], model = bst) +#' xgb.importance(colnames(agaricus.train$data), model = bst) #' #' # Same thing with co-occurence computation this time -#' xgb.importance(agaricus.train$data@@Dimnames[[2]], model = bst, data = agaricus.train$data, label = agaricus.train$label) +#' xgb.importance(colnames(agaricus.train$data), model = bst, data = agaricus.train$data, label = agaricus.train$label) #' #' @export xgb.importance <- function(feature_names = NULL, model = NULL, data = NULL, label = NULL, target = function(x) ( (x + label) == 2)){ @@ -84,7 +75,7 @@ xgb.importance <- function(feature_names = NULL, model = NULL, data = NULL, labe data.table(Feature = feature_names, Weight = weights) } - model.text.dump <- xgb.dump(model = model, with.stats = T) + model.text.dump <- xgb.dump(model = model, with_stats = T) if(model.text.dump[2] == "bias:"){ result <- model.text.dump %>% linearDump(feature_names, .) diff --git a/R-package/R/xgb.load.R b/R-package/R/xgb.load.R index 03d6a4842a9e..96c3c9d80327 100644 --- a/R-package/R/xgb.load.R +++ b/R-package/R/xgb.load.R @@ -9,8 +9,8 @@ #' data(agaricus.test, package='xgboost') #' train <- agaricus.train #' test <- agaricus.test -#' bst <- xgboost(data = train$data, label = train$label, max.depth = 2, -#' eta = 1, nthread = 2, nround = 2,objective = "binary:logistic") +#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2, +#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic") #' xgb.save(bst, 'xgb.model') #' bst <- xgb.load('xgb.model') #' pred <- predict(bst, test$data) @@ -26,6 +26,6 @@ xgb.load <- function(modelfile) { } else { bst <- xgb.handleToBooster(handle, NULL) } - bst <- xgb.Booster.check(bst) + bst <- xgb.Booster.check(bst, saveraw = TRUE) return(bst) } diff --git a/R-package/R/xgb.model.dt.tree.R b/R-package/R/xgb.model.dt.tree.R index 96d7575262ee..bc6566a49aa3 100644 --- a/R-package/R/xgb.model.dt.tree.R +++ b/R-package/R/xgb.model.dt.tree.R @@ -2,16 +2,11 @@ #' #' Parse a boosted tree model text dump into a \code{data.table} structure. #' -#' @importFrom data.table data.table -#' @importFrom data.table := -#' @importFrom magrittr %>% -#' @importFrom stringr str_match -#' #' @param feature_names character vector of feature names. If the model already #' contains feature names, this argument should be \code{NULL} (default value) #' @param model object of class \code{xgb.Booster} #' @param text \code{character} vector previously generated by the \code{xgb.dump} -#' function (where parameter \code{with.stats = TRUE} should have been set). +#' function (where parameter \code{with_stats = TRUE} should have been set). #' @param n_first_tree limit the parsing to the \code{n} first trees. #' If set to \code{NULL}, all trees of the model are parsed. #' @@ -40,8 +35,8 @@ #' #' data(agaricus.train, package='xgboost') #' -#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max.depth = 2, -#' eta = 1, nthread = 2, nround = 2,objective = "binary:logistic") +#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2, +#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic") #' #' (dt <- xgb.model.dt.tree(colnames(agaricus.train$data), bst)) #' @@ -71,12 +66,12 @@ xgb.model.dt.tree <- function(feature_names = NULL, model = NULL, text = NULL, } if(is.null(text)){ - text <- xgb.dump(model = model, with.stats = T) + text <- xgb.dump(model = model, with_stats = T) } position <- which(!is.na(str_match(text, "booster"))) - addTreeId <- function(x, i) paste(i,x,sep = "-") + add.tree.id <- function(x, i) paste(i, x, sep = "-") anynumber_regex <- "[-+]?[0-9]*\\.?[0-9]+([eE][-+]?[0-9]+)?" @@ -88,7 +83,7 @@ xgb.model.dt.tree <- function(feature_names = NULL, model = NULL, text = NULL, td <- td[Tree <= n_first_tree & !grepl('^booster', t)] td[, Node := str_match(t, "(\\d+):")[,2] %>% as.numeric ] - td[, ID := addTreeId(Node, Tree)] + td[, ID := add.tree.id(Node, Tree)] td[, isLeaf := !is.na(str_match(t, "leaf"))] # parse branch lines @@ -97,7 +92,7 @@ xgb.model.dt.tree <- function(feature_names = NULL, model = NULL, text = NULL, "gain=(", anynumber_regex, "),cover=(", anynumber_regex, ")") # skip some indices with spurious capture groups from anynumber_regex xtr <- str_match(t, rx)[, c(2,3,5,6,7,8,10)] - xtr[, 3:5] <- addTreeId(xtr[, 3:5], Tree) + xtr[, 3:5] <- add.tree.id(xtr[, 3:5], Tree) lapply(1:ncol(xtr), function(i) xtr[,i]) }] # assign feature_names when available @@ -124,4 +119,4 @@ xgb.model.dt.tree <- function(feature_names = NULL, model = NULL, text = NULL, # Avoid error messages during CRAN check. # The reason is that these variables are never declared # They are mainly column names inferred by Data.table... -globalVariables(c("Tree", "Node", "ID", "Feature", "t", "isLeaf",".SD", ".SDcols")) \ No newline at end of file +globalVariables(c("Tree", "Node", "ID", "Feature", "t", "isLeaf",".SD", ".SDcols")) diff --git a/R-package/R/xgb.plot.deepness.R b/R-package/R/xgb.plot.deepness.R index 0efd783acd60..2c4d71a7dccb 100644 --- a/R-package/R/xgb.plot.deepness.R +++ b/R-package/R/xgb.plot.deepness.R @@ -2,7 +2,6 @@ #' #' Plot multiple graph aligned by rows and columns. #' -#' @importFrom data.table data.table #' @param cols number of columns #' @return NULL multiplot <- function(..., cols = 1) { @@ -42,18 +41,18 @@ edge.parser <- function(element) { #' Extract path from root to leaf from data.table #' @param dt.tree data.table containing the nodes and edges of the trees -get.paths.to.leaf <- function(dt.tree) { +get.paths.to.leaf <- function(dt_tree) { dt.not.leaf.edges <- - dt.tree[Feature != "Leaf",.(ID, Yes, Tree)] %>% list(dt.tree[Feature != "Leaf",.(ID, No, Tree)]) %>% rbindlist(use.names = F) + dt_tree[Feature != "Leaf",.(ID, Yes, Tree)] %>% list(dt_tree[Feature != "Leaf",.(ID, No, Tree)]) %>% rbindlist(use.names = F) - trees <- dt.tree[,unique(Tree)] + trees <- dt_tree[,unique(Tree)] paths <- list() for (tree in trees) { graph <- igraph::graph_from_data_frame(dt.not.leaf.edges[Tree == tree]) paths.tmp <- - igraph::shortest_paths(graph, from = paste0(tree, "-0"), to = dt.tree[Tree == tree & + igraph::shortest_paths(graph, from = paste0(tree, "-0"), to = dt_tree[Tree == tree & Feature == "Leaf", c(ID)]) paths <- c(paths, paths.tmp$vpath) } @@ -64,11 +63,6 @@ get.paths.to.leaf <- function(dt.tree) { #' #' Generate a graph to plot the distribution of deepness among trees. #' -#' @importFrom data.table data.table -#' @importFrom data.table rbindlist -#' @importFrom data.table setnames -#' @importFrom data.table := -#' @importFrom magrittr %>% #' @param model dump generated by the \code{xgb.train} function. #' #' @return Two graphs showing the distribution of the model deepness. @@ -78,7 +72,7 @@ get.paths.to.leaf <- function(dt.tree) { #' by tree deepness level. #' #' The purpose of this function is to help the user to find the best trade-off to set -#' the \code{max.depth} and \code{min_child_weight} parameters according to the bias / variance trade-off. +#' the \code{max_depth} and \code{min_child_weight} parameters according to the bias / variance trade-off. #' #' See \link{xgb.train} for more information about these parameters. #' @@ -94,8 +88,8 @@ get.paths.to.leaf <- function(dt.tree) { #' @examples #' data(agaricus.train, package='xgboost') #' -#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max.depth = 15, -#' eta = 1, nthread = 2, nround = 30, objective = "binary:logistic", +#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 15, +#' eta = 1, nthread = 2, nrounds = 30, objective = "binary:logistic", #' min_child_weight = 50) #' #' xgb.plot.deepness(model = bst) diff --git a/R-package/R/xgb.plot.importance.R b/R-package/R/xgb.plot.importance.R index 1fcd7c01438d..8a3f2c82f2e7 100644 --- a/R-package/R/xgb.plot.importance.R +++ b/R-package/R/xgb.plot.importance.R @@ -2,9 +2,9 @@ #' #' Read a data.table containing feature importance details and plot it (for both GLM and Trees). #' -#' @importFrom magrittr %>% #' @param importance_matrix a \code{data.table} returned by the \code{xgb.importance} function. -#' @param numberOfClusters a \code{numeric} vector containing the min and the max range of the possible number of clusters of bars. +#' @param n_clusters a \code{numeric} vector containing the min and the max range of the possible number of clusters of bars. +#' @param ... currently not used #' #' @return A \code{ggplot2} bar graph representing each feature by a horizontal bar. Longer is the bar, more important is the feature. Features are classified by importance and clustered by importance. The group is represented through the color of the bar. #' @@ -20,16 +20,16 @@ #' #(labels = outcome column which will be learned). #' #Each column of the sparse Matrix is a feature in one hot encoding format. #' -#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max.depth = 2, -#' eta = 1, nthread = 2, nround = 2,objective = "binary:logistic") +#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2, +#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") #' -#' #agaricus.train$data@@Dimnames[[2]] represents the column names of the sparse matrix. -#' importance_matrix <- xgb.importance(agaricus.train$data@@Dimnames[[2]], model = bst) +#' importance_matrix <- xgb.importance(colnames(agaricus.train$data), model = bst) #' xgb.plot.importance(importance_matrix) #' #' @export xgb.plot.importance <- - function(importance_matrix = NULL, numberOfClusters = c(1:10)) { + function(importance_matrix = NULL, n_clusters = c(1:10), ...) { + check.deprecation(...) if (!"data.table" %in% class(importance_matrix)) { stop("importance_matrix: Should be a data.table.") } @@ -53,7 +53,7 @@ xgb.plot.importance <- importance_matrix[, .(Gain.or.Weight = sum(get(y.axe.name))), by = Feature] clusters <- - suppressWarnings(Ckmeans.1d.dp::Ckmeans.1d.dp(importance_matrix[,Gain.or.Weight], numberOfClusters)) + suppressWarnings(Ckmeans.1d.dp::Ckmeans.1d.dp(importance_matrix[,Gain.or.Weight], n_clusters)) importance_matrix[,"Cluster":= clusters$cluster %>% as.character] plot <- diff --git a/R-package/R/xgb.plot.multi.trees.R b/R-package/R/xgb.plot.multi.trees.R index c61cb8cd4daf..0b7c9320ebaf 100644 --- a/R-package/R/xgb.plot.multi.trees.R +++ b/R-package/R/xgb.plot.multi.trees.R @@ -2,19 +2,12 @@ #' #' Visualization of the ensemble of trees as a single collective unit. #' -#' @importFrom data.table data.table -#' @importFrom data.table rbindlist -#' @importFrom data.table setnames -#' @importFrom data.table := -#' @importFrom magrittr %>% -#' @importFrom stringr str_detect -#' @importFrom stringr str_extract -#' #' @param model dump generated by the \code{xgb.train} function. #' @param feature_names names of each feature as a \code{character} vector. Can be extracted from a sparse matrix (see example). If model dump already contains feature names, this argument should be \code{NULL}. -#' @param features.keep number of features to keep in each position of the multi trees. -#' @param plot.width width in pixels of the graph to produce -#' @param plot.height height in pixels of the graph to produce +#' @param features_keep number of features to keep in each position of the multi trees. +#' @param plot_width width in pixels of the graph to produce +#' @param plot_height height in pixels of the graph to produce +#' @param ... currently not used #' #' @return Two graphs showing the distribution of the model deepness. #' @@ -34,7 +27,7 @@ #' Moreover, the trees tend to reuse the same features. #' #' The function will project each tree on one, and keep for each position the -#' \code{features.keep} first features (based on Gain per feature measure). +#' \code{features_keep} first features (based on Gain per feature measure). #' #' This function is inspired by this blog post: #' \url{https://wellecks.wordpress.com/2015/02/21/peering-into-the-black-box-visualizing-lambdamart/} @@ -42,15 +35,16 @@ #' @examples #' data(agaricus.train, package='xgboost') #' -#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max.depth = 15, -#' eta = 1, nthread = 2, nround = 30, objective = "binary:logistic", +#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 15, +#' eta = 1, nthread = 2, nrounds = 30, objective = "binary:logistic", #' min_child_weight = 50) #' -#' p <- xgb.plot.multi.trees(model = bst, feature_names = agaricus.train$data@Dimnames[[2]], features.keep = 3) +#' p <- xgb.plot.multi.trees(model = bst, feature_names = colnames(agaricus.train$data), features_keep = 3) #' print(p) #' #' @export -xgb.plot.multi.trees <- function(model, feature_names = NULL, features.keep = 5, plot.width = NULL, plot.height = NULL){ +xgb.plot.multi.trees <- function(model, feature_names = NULL, features_keep = 5, plot_width = NULL, plot_height = NULL, ...){ + check.deprecation(...) tree.matrix <- xgb.model.dt.tree(feature_names = feature_names, model = model) # first number of the path represents the tree, then the following numbers are related to the path to follow @@ -80,7 +74,7 @@ xgb.plot.multi.trees <- function(model, feature_names = NULL, features.keep = 5, tree.matrix[,`:=`(abs.node.position=remove.tree(abs.node.position), Yes=remove.tree(Yes), No=remove.tree(No))] - nodes.dt <- tree.matrix[,.(Quality = sum(Quality)),by = .(abs.node.position, Feature)][,.(Text =paste0(Feature[1:min(length(Feature), features.keep)], " (", Quality[1:min(length(Quality), features.keep)], ")") %>% paste0(collapse = "\n")), by=abs.node.position] + nodes.dt <- tree.matrix[,.(Quality = sum(Quality)),by = .(abs.node.position, Feature)][,.(Text =paste0(Feature[1:min(length(Feature), features_keep)], " (", Quality[1:min(length(Quality), features_keep)], ")") %>% paste0(collapse = "\n")), by=abs.node.position] edges.dt <- tree.matrix[Feature != "Leaf",.(abs.node.position, Yes)] %>% list(tree.matrix[Feature != "Leaf",.(abs.node.position, No)]) %>% rbindlist() %>% setnames(c("From", "To")) %>% .[,.N,.(From, To)] %>% .[,N:=NULL] nodes <- DiagrammeR::create_nodes(nodes = nodes.dt[,abs.node.position], @@ -104,11 +98,11 @@ xgb.plot.multi.trees <- function(model, feature_names = NULL, features.keep = 5, edges_df = edges, graph_attrs = "rankdir = LR") - DiagrammeR::render_graph(graph, width = plot.width, height = plot.height) + DiagrammeR::render_graph(graph, width = plot_width, height = plot_height) } globalVariables( c( "Feature", "no.nodes.abs.pos", "ID", "Yes", "No", "Tree", "yes.nodes.abs.pos", "abs.node.position" ) -) \ No newline at end of file +) diff --git a/R-package/R/xgb.plot.tree.R b/R-package/R/xgb.plot.tree.R index 3d9d55c9f3a5..e1cc72d9fa04 100644 --- a/R-package/R/xgb.plot.tree.R +++ b/R-package/R/xgb.plot.tree.R @@ -2,14 +2,12 @@ #' #' Read a tree model text dump and plot the model. #' -#' @importFrom data.table data.table -#' @importFrom data.table := -#' @importFrom magrittr %>% #' @param feature_names names of each feature as a \code{character} vector. Can be extracted from a sparse matrix (see example). If model dump already contains feature names, this argument should be \code{NULL}. #' @param model generated by the \code{xgb.train} function. Avoid the creation of a dump file. #' @param n_first_tree limit the plot to the n first trees. If \code{NULL}, all trees of the model are plotted. Performance can be low for huge models. -#' @param plot.width the width of the diagram in pixels. -#' @param plot.height the height of the diagram in pixels. +#' @param plot_width the width of the diagram in pixels. +#' @param plot_height the height of the diagram in pixels. +#' @param ... currently not used. #' #' @return A \code{DiagrammeR} of the model. #' @@ -28,15 +26,14 @@ #' @examples #' data(agaricus.train, package='xgboost') #' -#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max.depth = 2, -#' eta = 1, nthread = 2, nround = 2,objective = "binary:logistic") +#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2, +#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic") #' -#' # agaricus.train$data@@Dimnames[[2]] represents the column names of the sparse matrix. -#' xgb.plot.tree(feature_names = agaricus.train$data@@Dimnames[[2]], model = bst) +#' xgb.plot.tree(feature_names = colnames(agaricus.train$data), model = bst) #' #' @export -xgb.plot.tree <- function(feature_names = NULL, model = NULL, n_first_tree = NULL, plot.width = NULL, plot.height = NULL){ - +xgb.plot.tree <- function(feature_names = NULL, model = NULL, n_first_tree = NULL, plot_width = NULL, plot_height = NULL, ...){ + check.deprecation(...) if (class(model) != "xgb.Booster") { stop("model: Has to be an object of class xgb.Booster model generaged by the xgb.train function.") } @@ -75,7 +72,7 @@ xgb.plot.tree <- function(feature_names = NULL, model = NULL, n_first_tree = NUL edges_df = edges, graph_attrs = "rankdir = LR") - DiagrammeR::render_graph(graph, width = plot.width, height = plot.height) + DiagrammeR::render_graph(graph, width = plot_width, height = plot_height) } # Avoid error messages during CRAN check. diff --git a/R-package/R/xgb.save.R b/R-package/R/xgb.save.R index 7d595ddc6128..5b2421b7fabd 100644 --- a/R-package/R/xgb.save.R +++ b/R-package/R/xgb.save.R @@ -3,29 +3,25 @@ #' Save xgboost model from xgboost or xgb.train #' #' @param model the model object. -#' @param fname the name of the binary file. +#' @param fname the name of the file to write. #' #' @examples #' data(agaricus.train, package='xgboost') #' data(agaricus.test, package='xgboost') #' train <- agaricus.train #' test <- agaricus.test -#' bst <- xgboost(data = train$data, label = train$label, max.depth = 2, -#' eta = 1, nthread = 2, nround = 2,objective = "binary:logistic") +#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2, +#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic") #' xgb.save(bst, 'xgb.model') #' bst <- xgb.load('xgb.model') #' pred <- predict(bst, test$data) #' @export xgb.save <- function(model, fname) { - if (typeof(fname) != "character") { - stop("xgb.save: fname must be character") - } - if (class(model) == "xgb.Booster") { - model <- xgb.Booster.check(model) - .Call("XGBoosterSaveModel_R", model$handle, fname, PACKAGE = "xgboost") - return(TRUE) - } - stop("xgb.save: the input must be xgb.Booster. Use xgb.DMatrix.save to save - xgb.DMatrix object.") - return(FALSE) + if (typeof(fname) != "character") + stop("fname must be character") + if (class(model) != "xgb.Booster") + stop("the input must be xgb.Booster. Use xgb.DMatrix.save to save xgb.DMatrix object.") + + .Call("XGBoosterSaveModel_R", model$handle, fname, PACKAGE = "xgboost") + return(TRUE) } diff --git a/R-package/R/xgb.save.raw.R b/R-package/R/xgb.save.raw.R index e61303addfe2..1743b67d761e 100644 --- a/R-package/R/xgb.save.raw.R +++ b/R-package/R/xgb.save.raw.R @@ -10,20 +10,14 @@ #' data(agaricus.test, package='xgboost') #' train <- agaricus.train #' test <- agaricus.test -#' bst <- xgboost(data = train$data, label = train$label, max.depth = 2, -#' eta = 1, nthread = 2, nround = 2,objective = "binary:logistic") +#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2, +#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic") #' raw <- xgb.save.raw(bst) #' bst <- xgb.load(raw) #' pred <- predict(bst, test$data) +#' #' @export xgb.save.raw <- function(model) { - if (class(model) == "xgb.Booster"){ - model <- model$handle - } - if (class(model) == "xgb.Booster.handle") { - raw <- .Call("XGBoosterModelToRaw_R", model, PACKAGE = "xgboost") - return(raw) - } - stop("xgb.raw: the input must be xgb.Booster.handle. Use xgb.DMatrix.save to save - xgb.DMatrix object.") + model <- xgb.get.handle(model) + .Call("XGBoosterModelToRaw_R", model, PACKAGE = "xgboost") } diff --git a/R-package/R/xgb.train.R b/R-package/R/xgb.train.R index 3868ddf89513..022e02246a61 100644 --- a/R-package/R/xgb.train.R +++ b/R-package/R/xgb.train.R @@ -1,8 +1,10 @@ #' eXtreme Gradient Boosting Training #' -#' An advanced interface for training xgboost model. Look at \code{\link{xgboost}} function for a simpler interface. +#' \code{xgb.train} is an advanced interface for training an xgboost model. The \code{xgboost} function provides a simpler interface. #' #' @param params the list of parameters. +#' The complete list of parameters is available at \url{http://xgboost.readthedocs.io/en/latest/parameter.html}. +#' Below is a shorter summary: #' #' 1. General Parameters #' @@ -43,68 +45,139 @@ #' \item \code{binary:logistic} logistic regression for binary classification. Output probability. #' \item \code{binary:logitraw} logistic regression for binary classification, output score before logistic transformation. #' \item \code{num_class} set the number of classes. To use only with multiclass objectives. -#' \item \code{multi:softmax} set xgboost to do multiclass classification using the softmax objective. Class is represented by a number and should be from 0 to \code{num_class}. -#' \item \code{multi:softprob} same as softmax, but output a vector of ndata * nclass, which can be further reshaped to ndata, nclass matrix. The result contains predicted probabilities of each data point belonging to each class. +#' \item \code{multi:softmax} set xgboost to do multiclass classification using the softmax objective. Class is represented by a number and should be from 0 to \code{num_class - 1}. +#' \item \code{multi:softprob} same as softmax, but prediction outputs a vector of ndata * nclass elements, which can be further reshaped to ndata, nclass matrix. The result contains predicted probabilities of each data point belonging to each class. #' \item \code{rank:pairwise} set xgboost to do ranking task by minimizing the pairwise loss. #' } #' \item \code{base_score} the initial prediction score of all instances, global bias. Default: 0.5 #' \item \code{eval_metric} evaluation metrics for validation data. Users can pass a self-defined function to it. Default: metric will be assigned according to objective(rmse for regression, and error for classification, mean average precision for ranking). List is provided in detail section. #' } #' -#' @param data takes an \code{xgb.DMatrix} as the input. +#' @param data input dataset. \code{xgb.train} takes only an \code{xgb.DMatrix} as the input. +#' \code{xgboost}, in addition, also accepts \code{matrix}, \code{dgCMatrix}, or local data file. #' @param nrounds the max number of iterations #' @param watchlist what information should be printed when \code{verbose=1} or -#' \code{verbose=2}. Watchlist is used to specify validation set monitoring -#' during training. For example user can specify -#' watchlist=list(validation1=mat1, validation2=mat2) to watch -#' the performance of each round's model on mat1 and mat2 +#' \code{verbose=2}. Watchlist is used to specify validation set monitoring +#' during training. For example user can specify +#' watchlist=list(validation1=mat1, validation2=mat2) to watch +#' the performance of each round's model on mat1 and mat2 #' #' @param obj customized objective function. Returns gradient and second order -#' gradient with given prediction and dtrain, +#' gradient with given prediction and dtrain. #' @param feval custimized evaluation function. Returns -#' \code{list(metric='metric-name', value='metric-value')} with given -#' prediction and dtrain, +#' \code{list(metric='metric-name', value='metric-value')} with given +#' prediction and dtrain. #' @param verbose If 0, xgboost will stay silent. If 1, xgboost will print -#' information of performance. If 2, xgboost will print information of both -#' @param print.every.n Print every N progress messages when \code{verbose>0}. Default is 1 which means all messages are printed. -#' @param early.stop.round If \code{NULL}, the early stopping function is not triggered. -#' If set to an integer \code{k}, training with a validation set will stop if the performance -#' keeps getting worse consecutively for \code{k} rounds. -#' @param maximize If \code{feval} and \code{early.stop.round} are set, then \code{maximize} must be set as well. -#' \code{maximize=TRUE} means the larger the evaluation score the better. -#' @param save_period save the model to the disk in every \code{save_period} rounds, 0 means no such action. +#' information of performance. If 2, xgboost will print some additional information. +#' Setting \code{verbose > 0} automatically engages the \code{\link{cb.evaluation.log}} and +#' \code{\link{cb.print.evaluation}} callback functions. +#' @param print_every_n Print each n-th iteration evaluation messages when \code{verbose>0}. +#' Default is 1 which means all messages are printed. This parameter is passed to the +#' \code{\link{cb.print.evaluation}} callback. +#' @param early_stopping_rounds If \code{NULL}, the early stopping function is not triggered. +#' If set to an integer \code{k}, training with a validation set will stop if the performance +#' doesn't improve for \code{k} rounds. +#' Setting this parameter engages the \code{\link{cb.early.stop}} callback. +#' @param maximize If \code{feval} and \code{early_stopping_rounds} are set, +#' then this parameter must be set as well. +#' When it is \code{TRUE}, it means the larger the evaluation score the better. +#' This parameter is passed to the \code{\link{cb.early.stop}} callback. +#' @param save_period when it is non-NULL, model is saved to disk after every \code{save_period} rounds, +#' 0 means save at the end. The saving is handled by the \code{\link{cb.save.model}} callback. #' @param save_name the name or path for periodically saved model file. +#' @param xgb_model a previously built model to continue the trainig from. +#' Could be either an object of class \code{xgb.Booster}, or its raw data, or the name of a +#' file with a previously saved model. +#' @param callbacks a list of callback functions to perform various task during boosting. +#' See \code{\link{callbacks}}. Some of the callbacks are automatically created depending on the +#' parameters' values. User can provide either existing or their own callback methods in order +#' to customize the training process. #' @param ... other parameters to pass to \code{params}. +#' @param label vector of response values. Should not be provided when data is +#' a local data file name or an \code{xgb.DMatrix}. +#' @param missing by default is set to NA, which means that NA values should be considered as 'missing' +#' by the algorithm. Sometimes, 0 or other extreme value might be used to represent missing values. +#' This parameter is only used when input is a dense matrix. +#' @param weight a vector indicating the weight for each row of the input. #' #' @details -#' This is the training function for \code{xgboost}. +#' These are the training functions for \code{xgboost}. #' -#' It supports advanced features such as \code{watchlist}, customized objective function (\code{feval}), -#' therefore it is more flexible than \code{\link{xgboost}} function. +#' The \code{xgb.train} interface supports advanced features such as \code{watchlist}, +#' customized objective and evaluation metric functions, therefore it is more flexible +#' than the \code{\link{xgboost}} interface. #' #' Parallelization is automatically enabled if \code{OpenMP} is present. #' Number of threads can also be manually specified via \code{nthread} parameter. #' -#' \code{eval_metric} parameter (not listed above) is set automatically by Xgboost but can be overriden by parameter. Below is provided the list of different metric optimized by Xgboost to help you to understand how it works inside or to use them with the \code{watchlist} parameter. +#' The evaluation metric is chosen automatically by Xgboost (according to the objective) +#' when the \code{eval_metric} parameter is not provided. +#' User may set one or several \code{eval_metric} parameters. +#' Note that when using a customized metric, only this single metric can be used. +#' The folloiwing is the list of built-in metrics for which Xgboost provides optimized implementation: #' \itemize{ #' \item \code{rmse} root mean square error. \url{http://en.wikipedia.org/wiki/Root_mean_square_error} #' \item \code{logloss} negative log-likelihood. \url{http://en.wikipedia.org/wiki/Log-likelihood} #' \item \code{mlogloss} multiclass logloss. \url{https://www.kaggle.com/wiki/MultiClassLogLoss} -#' \item \code{error} Binary classification error rate. It is calculated as \code{(wrong cases) / (all cases)}. For the predictions, the evaluation will regard the instances with prediction value larger than 0.5 as positive instances, and the others as negative instances. -#' \item \code{merror} Multiclass classification error rate. It is calculated as \code{(wrong cases) / (all cases)}. +#' \item \code{error} Binary classification error rate. It is calculated as \code{(# wrong cases) / (# all cases)}. +#' By default, it uses the 0.5 threshold for predicted values to define negative and positive instances. +#' Different threshold (e.g., 0.) could be specified as "error@0." +#' \item \code{merror} Multiclass classification error rate. It is calculated as \code{(# wrong cases) / (# all cases)}. #' \item \code{auc} Area under the curve. \url{http://en.wikipedia.org/wiki/Receiver_operating_characteristic#'Area_under_curve} for ranking evaluation. #' \item \code{ndcg} Normalized Discounted Cumulative Gain (for ranking task). \url{http://en.wikipedia.org/wiki/NDCG} #' } -#' -#' Full list of parameters is available in the Wiki \url{https://github.com/dmlc/xgboost/wiki/Parameters}. #' -#' This function only accepts an \code{\link{xgb.DMatrix}} object as the input. +#' The following callbacks are automatically created when certain parameters are set: +#' \itemize{ +#' \item \code{cb.print.evaluation} is turned on when \code{verbose > 0}; +#' and the \code{print_every_n} parameter is passed to it. +#' \item \code{cb.evaluation.log} is on when \code{verbose > 0} and \code{watchlist} is present. +#' \item \code{cb.early.stop}: when \code{early_stopping_rounds} is set. +#' \item \code{cb.save.model}: when \code{save_period > 0} is set. +#' } +#' +#' @return +#' An object of class \code{xgb.Booster} with the following elements: +#' \itemize{ +#' \item \code{handle} a handle (pointer) to the xgboost model in memory. +#' \item \code{raw} a cached memory dump of the xgboost model saved as R's \code{raw} type. +#' \item \code{niter} number of boosting iterations. +#' \item \code{evaluation_log} evaluation history storead as a \code{data.table} with the +#' first column corresponding to iteration number and the rest corresponding to evaluation +#' metrics' values. It is created by the \code{\link{cb.evaluation.log}} callback. +#' \item \code{call} a function call. +#' \item \code{params} parameters that were passed to the xgboost library. Note that it does not +#' capture parameters changed by the \code{\link{cb.reset.parameters}} callback. +#' \item \code{callbacks} callback functions that were either automatically assigned or +#' explicitely passed. +#' \item \code{best_iteration} iteration number with the best evaluation metric value +#' (only available with early stopping). +#' \item \code{best_ntreelimit} the \code{ntreelimit} value corresponding to the best iteration, +#' which could further be used in \code{predict} method +#' (only available with early stopping). +#' \item \code{best_score} the best evaluation metric value during early stopping. +#' (only available with early stopping). +#' } +#' +#' @seealso +#' \code{\link{callbacks}}, +#' \code{\link{predict.xgb.Booster}}, +#' \code{\link{xgb.cv}} #' #' @examples #' data(agaricus.train, package='xgboost') +#' data(agaricus.test, package='xgboost') +#' #' dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label) -#' dtest <- dtrain +#' dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label) #' watchlist <- list(eval = dtest, train = dtrain) +#' +#' ## A simple xgb.train example: +#' param <- list(max_depth = 2, eta = 1, silent = 1, +#' objective = "binary:logistic", eval_metric = "auc") +#' bst <- xgb.train(param, dtrain, nthread = 2, nrounds = 2, watchlist) +#' +#' ## An xgb.train example where custom objective and evaluation metric are used: #' logregobj <- function(preds, dtrain) { #' labels <- getinfo(dtrain, "label") #' preds <- 1/(1 + exp(-preds)) @@ -117,126 +190,145 @@ #' err <- as.numeric(sum(labels != (preds > 0)))/length(labels) #' return(list(metric = "error", value = err)) #' } -#' param <- list(max.depth = 2, eta = 1, silent = 1, objective=logregobj,eval_metric=evalerror) -#' bst <- xgb.train(param, dtrain, nthread = 2, nround = 2, watchlist) +#' bst <- xgb.train(param, dtrain, nthread = 2, nrounds = 2, watchlist) +#' +#' ## An xgb.train example of using variable learning rates at each iteration: +#' my_etas <- list(eta = c(0.5, 0.1)) +#' bst <- xgb.train(param, dtrain, nthread = 2, nrounds = 2, watchlist, +#' callbacks = list(cb.reset.parameters(my_etas))) +#' +#' ## Explicit use of the cb.evaluation.log callback allows to run +#' ## xgb.train silently but still store the evaluation results: +#' bst <- xgb.train(param, dtrain, nthread = 2, nrounds = 2, watchlist, +#' verbose = 0, callbacks = list(cb.evaluation.log())) +#' print(bst$evaluation_log) +#' +#' ## An 'xgboost' interface example: +#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, +#' max_depth = 2, eta = 1, nthread = 2, nrounds = 2, +#' objective = "binary:logistic") +#' pred <- predict(bst, agaricus.test$data) +#' +#' @rdname xgb.train #' @export -xgb.train <- function(params=list(), data, nrounds, watchlist = list(), - obj = NULL, feval = NULL, verbose = 1, print.every.n=1L, - early.stop.round = NULL, maximize = NULL, - save_period = 0, save_name = "xgboost.model", ...) { +xgb.train <- function(params = list(), data, nrounds, watchlist = list(), + obj = NULL, feval = NULL, verbose = 1, print_every_n=1L, + early_stopping_rounds = NULL, maximize = NULL, + save_period = NULL, save_name = "xgboost.model", + xgb_model = NULL, callbacks = list(), ...) { + + check.deprecation(...) + + params <- check.booster.params(params, ...) + + check.custom.obj() + check.custom.eval() + + # data & watchlist checks dtrain <- data - if (typeof(params) != "list") { - stop("xgb.train: first argument params must be list") + if (class(dtrain) != "xgb.DMatrix") + stop("second argument dtrain must be xgb.DMatrix") + if (length(watchlist) > 0) { + if (typeof(watchlist) != "list" || + !all(sapply(watchlist, class) == "xgb.DMatrix")) + stop("watchlist must be a list of xgb.DMatrix elements") + evnames <- names(watchlist) + if (is.null(evnames) || any(evnames == "")) + stop("each element of the watchlist must have a name tag") + } + + # evaluation printing callback + params <- c(params, list(silent = ifelse(verbose > 1, 0, 1))) + print_every_n <- max( as.integer(print_every_n), 1L) + if (!has.callbacks(callbacks, 'cb.print.evaluation') && verbose) { + callbacks <- add.cb(callbacks, cb.print.evaluation(print_every_n)) } - if (class(dtrain) != "xgb.DMatrix") { - stop("xgb.train: second argument dtrain must be xgb.DMatrix") + # evaluation log callback: it is automatically enabled only when verbose > 0 + evaluation_log <- list() + if (verbose > 0 && + !has.callbacks(callbacks, 'cb.evaluation.log') && + length(watchlist) > 0) { + callbacks <- add.cb(callbacks, cb.evaluation.log()) } - if (verbose > 1) { - params <- append(params, list(silent = 0)) - } else { - params <- append(params, list(silent = 1)) + # Model saving callback + if (!is.null(save_period) && + !has.callbacks(callbacks, 'cb.save.model')) { + callbacks <- add.cb(callbacks, cb.save.model(save_period, save_name)) } - if (length(watchlist) != 0 && verbose == 0) { - warning('watchlist is provided but verbose=0, no evaluation information will be printed') + # Early stopping callback + stop_condition <- FALSE + if (!is.null(early_stopping_rounds) && + !has.callbacks(callbacks, 'cb.early.stop')) { + callbacks <- add.cb(callbacks, cb.early.stop(early_stopping_rounds, + maximize=maximize, verbose=verbose)) } + # Sort the callbacks into categories + cb <- categorize.callbacks(callbacks) - fit.call <- match.call() - dot.params <- list(...) - nms.params <- names(params) - nms.dot.params <- names(dot.params) - if (length(intersect(nms.params,nms.dot.params)) > 0) - stop("Duplicated term in parameters. Please check your list of params.") - params <- append(params, dot.params) + + # Construct a booster (either a new one or load from xgb_model) + handle <- xgb.Booster(params, append(watchlist, dtrain), xgb_model) + bst <- xgb.handleToBooster(handle) - # customized objective and evaluation metric interface - if (!is.null(params$objective) && !is.null(obj)) - stop("xgb.train: cannot assign two different objectives") - if (!is.null(params$objective)) - if (class(params$objective) == 'function') { - obj <- params$objective - params$objective <- NULL - } - if (!is.null(params$eval_metric) && !is.null(feval)) - stop("xgb.train: cannot assign two different evaluation metrics") - if (!is.null(params$eval_metric)) - if (class(params$eval_metric) == 'function') { - feval <- params$eval_metric - params$eval_metric <- NULL - } + # extract parameters that can affect the relationship b/w #trees and #iterations + num_class <- max(as.numeric(NVL(params[['num_class']], 1)), 1) + num_parallel_tree <- max(as.numeric(NVL(params[['num_parallel_tree']], 1)), 1) - # Early stopping - if (!is.null(early.stop.round)){ - if (!is.null(feval) && is.null(maximize)) - stop('Please set maximize to note whether the model is maximizing the evaluation or not.') - if (length(watchlist) == 0) - stop('For early stopping you need at least one set in watchlist.') - if (is.null(maximize) && is.null(params$eval_metric)) - stop('Please set maximize to note whether the model is maximizing the evaluation or not.') - if (is.null(maximize)) - { - if (params$eval_metric %in% c('rmse','logloss','error','merror','mlogloss')) { - maximize <- FALSE - } else { - maximize <- TRUE - } + # When the 'xgb_model' was set, find out how many boosting iterations it has + niter_skip <- 0 + if (!is.null(xgb_model)) { + niter_skip <- as.numeric(xgb.attr(bst, 'niter')) + 1 + if (length(niter_skip) == 0) { + niter_skip <- xgb.ntree(bst) %/% (num_parallel_tree * num_class) } + } - if (maximize) { - bestScore <- 0 - } else { - bestScore <- Inf - } - bestInd <- 0 - earlyStopflag = FALSE + # TODO: distributed code + rank <- 0 + + begin_iteration <- niter_skip + 1 + end_iteration <- niter_skip + nrounds + + # the main loop for boosting iterations + for (iteration in begin_iteration:end_iteration) { + + for (f in cb$pre_iter) f() + + xgb.iter.update(bst$handle, dtrain, iteration - 1, obj) + + bst_evaluation <- numeric(0) + if (length(watchlist) > 0) + bst_evaluation <- xgb.iter.eval(bst$handle, watchlist, iteration - 1, feval) + + xgb.attr(bst$handle, 'niter') <- iteration - 1 - if (length(watchlist) > 1) - warning('Only the first data set in watchlist is used for early stopping process.') - } + for (f in cb$post_iter) f() - handle <- xgb.Booster(params, append(watchlist, dtrain)) - bst <- xgb.handleToBooster(handle) - print.every.n <- max( as.integer(print.every.n), 1L) - for (i in 1:nrounds) { - succ <- xgb.iter.update(bst$handle, dtrain, i - 1, obj) - if (length(watchlist) != 0) { - msg <- xgb.iter.eval(bst$handle, watchlist, i - 1, feval) - if (0 == ( (i - 1) %% print.every.n)) - cat(paste(msg, "\n", sep = "")) - if (!is.null(early.stop.round)) - { - score <- strsplit(msg,':|\\s+')[[1]][3] - score <- as.numeric(score) - if ( (maximize && score > bestScore) || (!maximize && score < bestScore)) { - bestScore <- score - bestInd <- i - 1 - } else { - earlyStopflag = TRUE - if (i - bestInd > early.stop.round) { - cat('Stopping. Best iteration:', bestInd, '\n') - break - } - } - } - } - if (save_period > 0) { - if (i %% save_period == 0) { - xgb.save(bst, save_name) - } - } + if (stop_condition) break } - bst <- xgb.Booster.check(bst) + for (f in cb$finalize) f(finalize=TRUE) + + bst <- xgb.Booster.check(bst, saveraw = TRUE) + + # store the total number of boosting iterations + bst$niter = end_iteration - if (!is.null(early.stop.round)) { - bst$bestScore <- bestScore - bst$bestInd <- bestInd - if (!is.null(params$num_parallel_tree)) { - bst$best_ntreelimit <- (bst$bestInd + 1) * params$num_parallel_tree - } else { - bst$best_ntreelimit <- bst$bestInd + 1 + # store the evaluation results + if (length(evaluation_log) > 0 && + nrow(evaluation_log) > 0) { + # include the previous compatible history when available + if (class(xgb_model) == 'xgb.Booster' && + !is.null(xgb_model$evaluation_log) && + all.equal(colnames(evaluation_log), + colnames(xgb_model$evaluation_log))) { + evaluation_log <- rbindlist(list(xgb_model$evaluation_log, evaluation_log)) } + bst$evaluation_log <- evaluation_log } - attr(bst, "call") <- fit.call - attr(bst, "params") <- params + bst$call <- match.call() + bst$params <- params + bst$callbacks <- callbacks + return(bst) } diff --git a/R-package/R/xgboost.R b/R-package/R/xgboost.R index 92637bb434c0..e70be0e5c742 100644 --- a/R-package/R/xgboost.R +++ b/R-package/R/xgboost.R @@ -1,83 +1,28 @@ -#' eXtreme Gradient Boosting (Tree) library -#' -#' A simple interface for training xgboost model. Look at \code{\link{xgb.train}} function for a more advanced interface. -#' -#' @param data takes \code{matrix}, \code{dgCMatrix}, local data file or -#' \code{xgb.DMatrix}. -#' @param label the response variable. User should not set this field, -#' if data is local data file or \code{xgb.DMatrix}. -#' @param params the list of parameters. -#' -#' Commonly used ones are: -#' \itemize{ -#' \item \code{objective} objective function, common ones are -#' \itemize{ -#' \item \code{reg:linear} linear regression -#' \item \code{binary:logistic} logistic regression for classification -#' } -#' \item \code{eta} step size of each boosting step -#' \item \code{max.depth} maximum depth of the tree -#' \item \code{nthread} number of thread used in training, if not set, all threads are used -#' } -#' -#' Look at \code{\link{xgb.train}} for a more complete list of parameters or \url{https://github.com/dmlc/xgboost/wiki/Parameters} for the full list. -#' -#' See also \code{demo/} for walkthrough example in R. -#' -#' @param nrounds the max number of iterations -#' @param verbose If 0, xgboost will stay silent. If 1, xgboost will print -#' information of performance. If 2, xgboost will print information of both -#' performance and construction progress information -#' @param print.every.n Print every N progress messages when \code{verbose>0}. Default is 1 which means all messages are printed. -#' @param missing Missing is only used when input is dense matrix, pick a float -#' value that represents missing value. Sometimes a data use 0 or other extreme value to represents missing values. -#' @param weight a vector indicating the weight for each row of the input. -#' @param early.stop.round If \code{NULL}, the early stopping function is not triggered. -#' If set to an integer \code{k}, training with a validation set will stop if the performance -#' keeps getting worse consecutively for \code{k} rounds. -#' @param maximize If \code{feval} and \code{early.stop.round} are set, then \code{maximize} must be set as well. -#' \code{maximize=TRUE} means the larger the evaluation score the better. -#' @param save_period save the model to the disk in every \code{save_period} rounds, 0 means no such action. -#' @param save_name the name or path for periodically saved model file. -#' @param ... other parameters to pass to \code{params}. -#' -#' @details -#' This is the modeling function for Xgboost. -#' -#' Parallelization is automatically enabled if \code{OpenMP} is present. -#' -#' Number of threads can also be manually specified via \code{nthread} parameter. -#' -#' @examples -#' data(agaricus.train, package='xgboost') -#' data(agaricus.test, package='xgboost') -#' train <- agaricus.train -#' test <- agaricus.test -#' bst <- xgboost(data = train$data, label = train$label, max.depth = 2, -#' eta = 1, nthread = 2, nround = 2, objective = "binary:logistic") -#' pred <- predict(bst, test$data) -#' +# Simple interface for training an xgboost model. +# Its documentation is combined with xgb.train. +# +#' @rdname xgb.train #' @export xgboost <- function(data = NULL, label = NULL, missing = NA, weight = NULL, params = list(), nrounds, - verbose = 1, print.every.n = 1L, early.stop.round = NULL, - maximize = NULL, save_period = 0, save_name = "xgboost.model", ...) { - dtrain <- xgb.get.DMatrix(data, label, missing, weight) - - params <- append(params, list(...)) + verbose = 1, print_every_n = 1L, + early_stopping_rounds = NULL, maximize = NULL, + save_period = 0, save_name = "xgboost.model", + xgb_model = NULL, callbacks = list(), ...) { - if (verbose > 0) { - watchlist <- list(train = dtrain) - } else { - watchlist <- list() - } + dtrain <- xgb.get.DMatrix(data, label, missing, weight) - bst <- xgb.train(params, dtrain, nrounds, watchlist, verbose = verbose, print.every.n=print.every.n, - early.stop.round = early.stop.round, maximize = maximize, - save_period = save_period, save_name = save_name) + watchlist <- list() + if (verbose > 0) + watchlist$train = dtrain + bst <- xgb.train(params, dtrain, nrounds, watchlist, verbose = verbose, print_every_n=print_every_n, + early_stopping_rounds = early_stopping_rounds, maximize = maximize, + save_period = save_period, save_name = save_name, + xgb_model = xgb_model, callbacks = callbacks, ...) return(bst) } + #' Training part from Mushroom Data Set #' #' This data set is originally from the Mushroom data set, @@ -131,3 +76,26 @@ NULL #' @format A list containing a label vector, and a dgCMatrix object with 1611 #' rows and 126 variables NULL + +# Various imports +#' @importClassesFrom Matrix dgCMatrix dgeMatrix +#' @importFrom Matrix cBind +#' @importFrom Matrix colSums +#' @importFrom Matrix sparse.model.matrix +#' @importFrom Matrix sparseVector +#' @importFrom data.table data.table +#' @importFrom data.table as.data.table +#' @importFrom data.table := +#' @importFrom data.table rbindlist +#' @importFrom data.table setnames +#' @importFrom magrittr %>% +#' @importFrom stringr str_detect +#' @importFrom stringr str_extract +#' @importFrom stringr str_match +#' @importFrom stringr str_replace +#' @importFrom stringr str_replace_all +#' @importFrom stringr str_split +#' +#' @import methods +#' @useDynLib xgboost +NULL diff --git a/R-package/demo/basic_walkthrough.R b/R-package/demo/basic_walkthrough.R index ece168a04d77..f53a83805a2e 100644 --- a/R-package/demo/basic_walkthrough.R +++ b/R-package/demo/basic_walkthrough.R @@ -1,7 +1,8 @@ require(xgboost) require(methods) + # we load in the agaricus dataset -# In this example, we are aiming to predict whether a mushroom can be eaten +# In this example, we are aiming to predict whether a mushroom is edible data(agaricus.train, package='xgboost') data(agaricus.test, package='xgboost') train <- agaricus.train @@ -15,33 +16,33 @@ class(train$data) # note: we are putting in sparse matrix here, xgboost naturally handles sparse input # use sparse matrix when your feature is sparse(e.g. when you are using one-hot encoding vector) print("Training xgboost with sparseMatrix") -bst <- xgboost(data = train$data, label = train$label, max.depth = 2, eta = 1, nround = 2, +bst <- xgboost(data = train$data, label = train$label, max_depth = 2, eta = 1, nrounds = 2, nthread = 2, objective = "binary:logistic") # alternatively, you can put in dense matrix, i.e. basic R-matrix print("Training xgboost with Matrix") -bst <- xgboost(data = as.matrix(train$data), label = train$label, max.depth = 2, eta = 1, nround = 2, +bst <- xgboost(data = as.matrix(train$data), label = train$label, max_depth = 2, eta = 1, nrounds = 2, nthread = 2, objective = "binary:logistic") # you can also put in xgb.DMatrix object, which stores label, data and other meta datas needed for advanced features print("Training xgboost with xgb.DMatrix") dtrain <- xgb.DMatrix(data = train$data, label = train$label) -bst <- xgboost(data = dtrain, max.depth = 2, eta = 1, nround = 2, nthread = 2, +bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nrounds = 2, nthread = 2, objective = "binary:logistic") # Verbose = 0,1,2 print("Train xgboost with verbose 0, no message") -bst <- xgboost(data = dtrain, max.depth = 2, eta = 1, nround = 2, +bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nrounds = 2, nthread = 2, objective = "binary:logistic", verbose = 0) print("Train xgboost with verbose 1, print evaluation metric") -bst <- xgboost(data = dtrain, max.depth = 2, eta = 1, nround = 2, +bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nrounds = 2, nthread = 2, objective = "binary:logistic", verbose = 1) print("Train xgboost with verbose 2, also print information about tree") -bst <- xgboost(data = dtrain, max.depth = 2, eta = 1, nround = 2, +bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nrounds = 2, nthread = 2, objective = "binary:logistic", verbose = 2) # you can also specify data as file path to a LibSVM format input # since we do not have this file with us, the following line is just for illustration -# bst <- xgboost(data = 'agaricus.train.svm', max.depth = 2, eta = 1, nround = 2,objective = "binary:logistic") +# bst <- xgboost(data = 'agaricus.train.svm', max_depth = 2, eta = 1, nrounds = 2,objective = "binary:logistic") #--------------------basic prediction using xgboost-------------- # you can do prediction using the following line @@ -77,19 +78,19 @@ watchlist <- list(train=dtrain, test=dtest) # to train with watchlist, use xgb.train, which contains more advanced features # watchlist allows us to monitor the evaluation result on all data in the list print("Train xgboost using xgb.train with watchlist") -bst <- xgb.train(data=dtrain, max.depth=2, eta=1, nround=2, watchlist=watchlist, +bst <- xgb.train(data=dtrain, max_depth=2, eta=1, nrounds=2, watchlist=watchlist, nthread = 2, objective = "binary:logistic") # we can change evaluation metrics, or use multiple evaluation metrics print("train xgboost using xgb.train with watchlist, watch logloss and error") -bst <- xgb.train(data=dtrain, max.depth=2, eta=1, nround=2, watchlist=watchlist, - eval.metric = "error", eval.metric = "logloss", +bst <- xgb.train(data=dtrain, max_depth=2, eta=1, nrounds=2, watchlist=watchlist, + eval_metric = "error", eval_metric = "logloss", nthread = 2, objective = "binary:logistic") # xgb.DMatrix can also be saved using xgb.DMatrix.save xgb.DMatrix.save(dtrain, "dtrain.buffer") # to load it in, simply call xgb.DMatrix dtrain2 <- xgb.DMatrix("dtrain.buffer") -bst <- xgb.train(data=dtrain2, max.depth=2, eta=1, nround=2, watchlist=watchlist, +bst <- xgb.train(data=dtrain2, max_depth=2, eta=1, nrounds=2, watchlist=watchlist, nthread = 2, objective = "binary:logistic") # information can be extracted from xgb.DMatrix using getinfo label = getinfo(dtest, "label") @@ -98,11 +99,11 @@ err <- as.numeric(sum(as.integer(pred > 0.5) != label))/length(label) print(paste("test-error=", err)) # You can dump the tree you learned using xgb.dump into a text file -xgb.dump(bst, "dump.raw.txt", with.stats = T) +xgb.dump(bst, "dump.raw.txt", with_stats = T) # Finally, you can check which features are the most important. print("Most important features (look at column Gain):") -imp_matrix <- xgb.importance(feature_names = train$data@Dimnames[[2]], model = bst) +imp_matrix <- xgb.importance(feature_names = colnames(train$data), model = bst) print(imp_matrix) # Feature importance bar plot by gain diff --git a/R-package/demo/boost_from_prediction.R b/R-package/demo/boost_from_prediction.R index 7fa7d8545de4..17656507f260 100644 --- a/R-package/demo/boost_from_prediction.R +++ b/R-package/demo/boost_from_prediction.R @@ -11,8 +11,8 @@ watchlist <- list(eval = dtest, train = dtrain) # print('start running example to start from a initial prediction') # train xgboost for 1 round -param <- list(max.depth=2,eta=1,nthread = 2, silent=1,objective='binary:logistic') -bst <- xgb.train( param, dtrain, 1, watchlist ) +param <- list(max_depth=2, eta=1, nthread = 2, silent=1, objective='binary:logistic') +bst <- xgb.train(param, dtrain, 1, watchlist) # Note: we need the margin value instead of transformed prediction in set_base_margin # do predict with output_margin=TRUE, will always give you margin values before logistic transformation ptrain <- predict(bst, dtrain, outputmargin=TRUE) diff --git a/R-package/demo/create_sparse_matrix.R b/R-package/demo/create_sparse_matrix.R index 7a8dfaa82532..6069f33d47fc 100644 --- a/R-package/demo/create_sparse_matrix.R +++ b/R-package/demo/create_sparse_matrix.R @@ -65,11 +65,10 @@ output_vector = df[,Y:=0][Improved == "Marked",Y:=1][,Y] # Following is the same process as other demo cat("Learning...\n") -bst <- xgboost(data = sparse_matrix, label = output_vector, max.depth = 9, - eta = 1, nthread = 2, nround = 10,objective = "binary:logistic") +bst <- xgboost(data = sparse_matrix, label = output_vector, max_depth = 9, + eta = 1, nthread = 2, nrounds = 10, objective = "binary:logistic") -# sparse_matrix@Dimnames[[2]] represents the column names of the sparse matrix. -importance <- xgb.importance(feature_names = sparse_matrix@Dimnames[[2]], model = bst) +importance <- xgb.importance(feature_names = colnames(sparse_matrix), model = bst) print(importance) # According to the matrix below, the most important feature in this dataset to predict if the treatment will work is the Age. The second most important feature is having received a placebo or not. The sex is third. Then we see our generated features (AgeDiscret). We can see that their contribution is very low (Gain column). diff --git a/R-package/demo/cross_validation.R b/R-package/demo/cross_validation.R index 5d748f6797c9..652076165bdd 100644 --- a/R-package/demo/cross_validation.R +++ b/R-package/demo/cross_validation.R @@ -6,7 +6,7 @@ dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label) dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label) nround <- 2 -param <- list(max.depth=2,eta=1,silent=1,nthread = 2, objective='binary:logistic') +param <- list(max_depth=2, eta=1, silent=1, nthread=2, objective='binary:logistic') cat('running cross validation\n') # do cross validation, this will print result out as @@ -19,7 +19,7 @@ cat('running cross validation, disable standard deviation display\n') # [iteration] metric_name:mean_value+std_value # std_value is standard deviation of the metric xgb.cv(param, dtrain, nround, nfold=5, - metrics={'error'}, showsd = FALSE) + metrics='error', showsd = FALSE) ### # you can also do cross validation with cutomized loss function @@ -40,12 +40,12 @@ evalerror <- function(preds, dtrain) { return(list(metric = "error", value = err)) } -param <- list(max.depth=2,eta=1,silent=1, +param <- list(max_depth=2, eta=1, silent=1, objective = logregobj, eval_metric = evalerror) # train with customized objective xgb.cv(params = param, data = dtrain, nrounds = nround, nfold = 5) # do cross validation with prediction values for each fold res <- xgb.cv(params = param, data = dtrain, nrounds = nround, nfold = 5, prediction = TRUE) -res$dt +res$evaluation_log length(res$pred) diff --git a/R-package/demo/custom_objective.R b/R-package/demo/custom_objective.R index 7234ead869a3..3bbb40cca29f 100644 --- a/R-package/demo/custom_objective.R +++ b/R-package/demo/custom_objective.R @@ -33,7 +33,7 @@ evalerror <- function(preds, dtrain) { return(list(metric = "error", value = err)) } -param <- list(max.depth=2, eta=1, nthread = 2, silent=1, +param <- list(max_depth=2, eta=1, nthread = 2, silent=1, objective=logregobj, eval_metric=evalerror) print ('start training with user customized objective') # training with customized objective, we can also do step by step training @@ -57,7 +57,7 @@ logregobjattr <- function(preds, dtrain) { hess <- preds * (1 - preds) return(list(grad = grad, hess = hess)) } -param <- list(max.depth=2, eta=1, nthread = 2, silent=1, +param <- list(max_depth=2, eta=1, nthread = 2, silent=1, objective=logregobjattr, eval_metric=evalerror) print ('start training with user customized objective, with additional attributes in DMatrix') # training with customized objective, we can also do step by step training diff --git a/R-package/demo/early_stopping.R b/R-package/demo/early_stopping.R index aa74aa2eeac5..08342d0f1f5c 100644 --- a/R-package/demo/early_stopping.R +++ b/R-package/demo/early_stopping.R @@ -7,7 +7,7 @@ dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label) # note: for customized objective function, we leave objective as default # note: what we are getting is margin value in prediction # you must know what you are doing -param <- list(max.depth=2,eta=1,nthread = 2, silent=1) +param <- list(max_depth=2, eta=1, nthread = 2, silent=1) watchlist <- list(eval = dtest) num_round <- 20 # user define objective function, given prediction, return gradient and second order gradient @@ -34,7 +34,7 @@ print ('start training with early Stopping setting') bst <- xgb.train(param, dtrain, num_round, watchlist, objective = logregobj, eval_metric = evalerror, maximize = FALSE, - early.stop.round = 3) + early_stopping_round = 3) bst <- xgb.cv(param, dtrain, num_round, nfold = 5, objective = logregobj, eval_metric = evalerror, - maximize = FALSE, early.stop.round = 3) + maximize = FALSE, early_stopping_rounds = 3) diff --git a/R-package/demo/predict_first_ntree.R b/R-package/demo/predict_first_ntree.R index 422201b0a0a2..c8119c594c5c 100644 --- a/R-package/demo/predict_first_ntree.R +++ b/R-package/demo/predict_first_ntree.R @@ -5,7 +5,7 @@ data(agaricus.test, package='xgboost') dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label) dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label) -param <- list(max.depth=2,eta=1,silent=1,objective='binary:logistic') +param <- list(max_depth=2, eta=1, silent=1, objective='binary:logistic') watchlist <- list(eval = dtest, train = dtrain) nround = 2 diff --git a/R-package/demo/predict_leaf_indices.R b/R-package/demo/predict_leaf_indices.R index fc87befb7abc..9aaa1a9ab7d8 100644 --- a/R-package/demo/predict_leaf_indices.R +++ b/R-package/demo/predict_leaf_indices.R @@ -10,7 +10,7 @@ data(agaricus.test, package='xgboost') dtrain <- xgb.DMatrix(data = agaricus.train$data, label = agaricus.train$label) dtest <- xgb.DMatrix(data = agaricus.test$data, label = agaricus.test$label) -param <- list(max.depth=2, eta=1, silent=1, objective='binary:logistic') +param <- list(max_depth=2, eta=1, silent=1, objective='binary:logistic') nround = 4 # training the model for two rounds diff --git a/R-package/man/callbacks.Rd b/R-package/man/callbacks.Rd new file mode 100644 index 000000000000..d49f104f2a5e --- /dev/null +++ b/R-package/man/callbacks.Rd @@ -0,0 +1,38 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/callbacks.R +\name{callbacks} +\alias{callbacks} +\title{Callback closures for booster training.} +\description{ +These are used to perform various service tasks either during boosting iterations or at the end. +This approach helps to modularize many of such tasks without bloating the main training methods, +and it offers . +} +\details{ +By default, a callback function is run after each boosting iteration. +An R-attribute \code{is_pre_iteration} could be set for a callback to define a pre-iteration function. + +When a callback function has \code{finalize} parameter, its finalizer part will also be run after +the boosting is completed. + +WARNING: side-effects!!! Be aware that these callback functions access and modify things in +the environment from which they are called from, which is a fairly uncommon thing to do in R. + +To write a custom callback closure, make sure you first understand the main concepts about R envoronments. +Check either R documentation on \code{\link[base]{environment}} or the +\href{http://adv-r.had.co.nz/Environments.html}{Environments chapter} from the "Advanced R" +book by Hadley Wickham. Further, the best option is to read the code of some of the existing callbacks - +choose ones that do something similar to what you want to achieve. Also, you would need to get familiar +with the objects available inside of the \code{xgb.train} and \code{xgb.cv} internal environments. +} +\seealso{ +\code{\link{cb.print.evaluation}}, +\code{\link{cb.evaluation.log}}, +\code{\link{cb.reset.parameters}}, +\code{\link{cb.early.stop}}, +\code{\link{cb.save.model}}, +\code{\link{cb.cv.predict}}, +\code{\link{xgb.train}}, +\code{\link{xgb.cv}} +} + diff --git a/R-package/man/cb.cv.predict.Rd b/R-package/man/cb.cv.predict.Rd new file mode 100644 index 000000000000..34e9f813ee70 --- /dev/null +++ b/R-package/man/cb.cv.predict.Rd @@ -0,0 +1,43 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/callbacks.R +\name{cb.cv.predict} +\alias{cb.cv.predict} +\title{Callback closure for returning cross-validation based predictions.} +\usage{ +cb.cv.predict(save_models = FALSE) +} +\arguments{ +\item{save_models}{a flag for whether to save the folds' models.} +} +\value{ +Predictions are returned inside of the \code{pred} element, which is either a vector or a matrix, +depending on the number of prediction outputs per data row. The order of predictions corresponds +to the order of rows in the original dataset. Note that when a custom \code{folds} list is +provided in \code{xgb.cv}, the predictions would only be returned properly when this list is a +non-overlapping list of k sets of indices, as in a standard k-fold CV. The predictions would not be +meaningful when user-profided folds have overlapping indices as in, e.g., random sampling splits. +When some of the indices in the training dataset are not included into user-provided \code{folds}, +their prediction value would be \code{NA}. +} +\description{ +Callback closure for returning cross-validation based predictions. +} +\details{ +This callback function saves predictions for all of the test folds, +and also allows to save the folds' models. + +It is a "finalizer" callback and it uses early stopping information whenever it is available, +thus it must be run after the early stopping callback if the early stopping is used. + +Callback function expects the following values to be set in its calling frame: +\code{bst_folds}, +\code{basket}, +\code{data}, +\code{end_iteration}, +\code{num_parallel_tree}, +\code{num_class}. +} +\seealso{ +\code{\link{callbacks}} +} + diff --git a/R-package/man/cb.early.stop.Rd b/R-package/man/cb.early.stop.Rd new file mode 100644 index 000000000000..eec30d7b5771 --- /dev/null +++ b/R-package/man/cb.early.stop.Rd @@ -0,0 +1,63 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/callbacks.R +\name{cb.early.stop} +\alias{cb.early.stop} +\title{Callback closure to activate the early stopping.} +\usage{ +cb.early.stop(stopping_rounds, maximize = FALSE, metric_name = NULL, + verbose = TRUE) +} +\arguments{ +\item{stopping_rounds}{The number of rounds with no improvement in +the evaluation metric in order to stop the training.} + +\item{maximize}{whether to maximize the evaluation metric} + +\item{metric_name}{the name of an evaluation column to use as a criteria for early +stopping. If not set, the last column would be used. +Let's say the test data in \code{watchlist} was labelled as \code{dtest}, +and one wants to use the AUC in test data for early stopping regardless of where +it is in the \code{watchlist}, then one of the following would need to be set: +\code{metric_name='dtest-auc'} or \code{metric_name='dtest_auc'}. +All dash '-' characters in metric names are considered equivalent to '_'.} + +\item{verbose}{whether to print the early stopping information.} +} +\description{ +Callback closure to activate the early stopping. +} +\details{ +This callback function determines the condition for early stopping +by setting the \code{stop_condition = TRUE} flag in its calling frame. + +The following additional fields are assigned to the model's R object: +\itemize{ +\item \code{best_score} the evaluation score at the best iteration +\item \code{best_iteration} at which boosting iteration the best score has occurred (1-based index) +\item \code{best_ntreelimit} to use with the \code{ntreelimit} parameter in \code{predict}. + It differs from \code{best_iteration} in multiclass or random forest settings. +} + +The Same values are also stored as xgb-attributes: +\itemize{ +\item \code{best_iteration} is stored as a 0-based iteration index (for interoperability of binary models) +\item \code{best_msg} message string is also stored. +} + +At least one data element is required in the evaluation watchlist for early stopping to work. + +Callback function expects the following values to be set in its calling frame: +\code{stop_condition}, +\code{bst_evaluation}, +\code{rank}, +\code{bst} (or \code{bst_folds} and \code{basket}), +\code{iteration}, +\code{begin_iteration}, +\code{end_iteration}, +\code{num_parallel_tree}. +} +\seealso{ +\code{\link{callbacks}}, +\code{\link{xgb.attr}} +} + diff --git a/R-package/man/cb.evaluation.log.Rd b/R-package/man/cb.evaluation.log.Rd new file mode 100644 index 000000000000..a71b7f8d38c8 --- /dev/null +++ b/R-package/man/cb.evaluation.log.Rd @@ -0,0 +1,32 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/callbacks.R +\name{cb.evaluation.log} +\alias{cb.evaluation.log} +\title{Callback closure for logging the evaluation history} +\usage{ +cb.evaluation.log() +} +\description{ +Callback closure for logging the evaluation history +} +\details{ +This callback function appends the current iteration evaluation results \code{bst_evaluation} +available in the calling parent frame to the \code{evaluation_log} list in a calling frame. + +The finalizer callback (called with \code{finalize = TURE} in the end) converts +the \code{evaluation_log} list into a final data.table. + +The iteration evaluation result \code{bst_evaluation} must be a named numeric vector. + +Note: in the column names of the final data.table, the dash '-' character is replaced with +the underscore '_' in order to make the column names more like regular R identifiers. + +Callback function expects the following values to be set in its calling frame: +\code{evaluation_log}, +\code{bst_evaluation}, +\code{iteration}. +} +\seealso{ +\code{\link{callbacks}} +} + diff --git a/R-package/man/cb.print.evaluation.Rd b/R-package/man/cb.print.evaluation.Rd new file mode 100644 index 000000000000..aec57fe2d653 --- /dev/null +++ b/R-package/man/cb.print.evaluation.Rd @@ -0,0 +1,28 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/callbacks.R +\name{cb.print.evaluation} +\alias{cb.print.evaluation} +\title{Callback closure for printing the result of evaluation} +\usage{ +cb.print.evaluation(period = 1) +} +\arguments{ +\item{period}{results would be printed every number of periods} +} +\description{ +Callback closure for printing the result of evaluation +} +\details{ +The callback function prints the result of evaluation at every \code{period} iterations. +The initial and the last iteration's evaluations are always printed. + +Callback function expects the following values to be set in its calling frame: +\code{bst_evaluation} (also \code{bst_evaluation_err} when available), +\code{iteration}, +\code{begin_iteration}, +\code{end_iteration}. +} +\seealso{ +\code{\link{callbacks}} +} + diff --git a/R-package/man/cb.reset.parameters.Rd b/R-package/man/cb.reset.parameters.Rd new file mode 100644 index 000000000000..24965c81504b --- /dev/null +++ b/R-package/man/cb.reset.parameters.Rd @@ -0,0 +1,37 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/callbacks.R +\name{cb.reset.parameters} +\alias{cb.reset.parameters} +\title{Callback closure for restetting the booster's parameters at each iteration.} +\usage{ +cb.reset.parameters(new_params) +} +\arguments{ +\item{new_params}{a list where each element corresponds to a parameter that needs to be reset. +Each element's value must be either a vector of values of length \code{nrounds} +to be set at each iteration, +or a function of two parameters \code{learning_rates(iteration, nrounds)} +which returns a new parameter value by using the current iteration number +and the total number of boosting rounds.} +} +\description{ +Callback closure for restetting the booster's parameters at each iteration. +} +\details{ +This is a "pre-iteration" callback function used to reset booster's parameters +at the beginning of each iteration. + +Note that when training is resumed from some previous model, and a function is used to +reset a parameter value, the \code{nround} argument in this function would be the +the number of boosting rounds in the current training. + +Callback function expects the following values to be set in its calling frame: +\code{bst} or \code{bst_folds}, +\code{iteration}, +\code{begin_iteration}, +\code{end_iteration}. +} +\seealso{ +\code{\link{callbacks}} +} + diff --git a/R-package/man/cb.save.model.Rd b/R-package/man/cb.save.model.Rd new file mode 100644 index 000000000000..eef9b6295b66 --- /dev/null +++ b/R-package/man/cb.save.model.Rd @@ -0,0 +1,34 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/callbacks.R +\name{cb.save.model} +\alias{cb.save.model} +\title{Callback closure for saving a model file.} +\usage{ +cb.save.model(save_period = 0, save_name = "xgboost.model") +} +\arguments{ +\item{save_period}{save the model to disk after every +\code{save_period} iterations; 0 means save the model at the end.} + +\item{save_name}{the name or path for the saved model file. +It can contain a \code{\link[base]{sprintf}} formatting specifier +to include the integer iteration number in the file name. +E.g., with \code{save_name} = 'xgboost_%04d.model', +the file saved at iteration 50 would be named "xgboost_0050.model".} +} +\description{ +Callback closure for saving a model file. +} +\details{ +This callback function allows to save an xgb-model file, either periodically after each \code{save_period}'s or at the end. + +Callback function expects the following values to be set in its calling frame: +\code{bst}, +\code{iteration}, +\code{begin_iteration}, +\code{end_iteration}. +} +\seealso{ +\code{\link{callbacks}} +} + diff --git a/R-package/man/get.paths.to.leaf.Rd b/R-package/man/get.paths.to.leaf.Rd index 1fdcfd5d7121..8b19ae6d8512 100644 --- a/R-package/man/get.paths.to.leaf.Rd +++ b/R-package/man/get.paths.to.leaf.Rd @@ -4,7 +4,7 @@ \alias{get.paths.to.leaf} \title{Extract path from root to leaf from data.table} \usage{ -get.paths.to.leaf(dt.tree) +get.paths.to.leaf(dt_tree) } \arguments{ \item{dt.tree}{data.table containing the nodes and edges of the trees} diff --git a/R-package/man/predict.xgb.Booster.Rd b/R-package/man/predict.xgb.Booster.Rd index 504037937a6f..2dc537112d5e 100644 --- a/R-package/man/predict.xgb.Booster.Rd +++ b/R-package/man/predict.xgb.Booster.Rd @@ -6,53 +6,124 @@ \title{Predict method for eXtreme Gradient Boosting model} \usage{ \method{predict}{xgb.Booster}(object, newdata, missing = NA, - outputmargin = FALSE, ntreelimit = NULL, predleaf = FALSE) + outputmargin = FALSE, ntreelimit = NULL, predleaf = FALSE, + reshape = FALSE) \method{predict}{xgb.Booster.handle}(object, ...) } \arguments{ \item{object}{Object of class \code{xgb.Booster} or \code{xgb.Booster.handle}} -\item{newdata}{takes \code{matrix}, \code{dgCMatrix}, local data file or -\code{xgb.DMatrix}.} +\item{newdata}{takes \code{matrix}, \code{dgCMatrix}, local data file or \code{xgb.DMatrix}.} -\item{missing}{Missing is only used when input is dense matrix, pick a float -value that represents missing value. Sometime a data use 0 or other extreme value to represents missing values.} +\item{missing}{Missing is only used when input is dense matrix. Pick a float value that represents +missing values in data (e.g., sometimes 0 or some other extreme value is used).} -\item{outputmargin}{whether the prediction should be shown in the original -value of sum of functions, when outputmargin=TRUE, the prediction is -untransformed margin value. In logistic regression, outputmargin=T will -output value before logistic transformation.} +\item{outputmargin}{whether the prediction should be returned in the for of original untransformed +sum of predictions from boosting iterations' results. E.g., setting \code{outputmargin=TRUE} for +logistic regression would result in predictions for log-odds instead of probabilities.} -\item{ntreelimit}{limit number of trees used in prediction, this parameter is -only valid for gbtree, but not for gblinear. set it to be value bigger -than 0. It will use all trees by default.} +\item{ntreelimit}{limit the number of model's trees or boosting iterations used in prediction (see Details). +It will use all the trees by default (\code{NULL} value).} -\item{predleaf}{whether predict leaf index instead. If set to TRUE, the output will be a matrix object.} +\item{predleaf}{whether predict leaf index instead.} -\item{...}{Parameters pass to \code{predict.xgb.Booster}} +\item{reshape}{whether to reshape the vector of predictions to a matrix form when there are several +prediction outputs per case. This option has no effect when \code{predleaf = TRUE}.} + +\item{...}{Parameters passed to \code{predict.xgb.Booster}} +} +\value{ +For regression or binary classification, it returns a vector of length \code{nrows(newdata)}. +For multiclass classification, either a \code{num_class * nrows(newdata)} vector or +a \code{(nrows(newdata), num_class)} dimension matrix is returned, depending on +the \code{reshape} value. + +When \code{predleaf = TRUE}, the output is a matrix object with the +number of columns corresponding to the number of trees. } \description{ Predicted values based on either xgboost model or model handle object. } \details{ -The option \code{ntreelimit} purpose is to let the user train a model with lots -of trees but use only the first trees for prediction to avoid overfitting -(without having to train a new model with less trees). - -The option \code{predleaf} purpose is inspired from §3.1 of the paper -\code{Practical Lessons from Predicting Clicks on Ads at Facebook}. -The idea is to use the model as a generator of new features which capture non linear link -from original features. +Note that \code{ntreelimit} is not necesserily equal to the number of boosting iterations +and it is not necesserily equal to the number of trees in a model. +E.g., in a random forest-like model, \code{ntreelimit} would limit the number of trees. +But for multiclass classification, there are multiple trees per iteration, +but \code{ntreelimit} limits the number of boosting iterations. + +Also note that \code{ntreelimit} would currently do nothing for predictions from gblinear, +since gblinear doesn't keep its boosting history. + +One possible practical applications of the \code{predleaf} option is to use the model +as a generator of new features which capture non-linearity and interactions, +e.g., as implemented in \code{\link{xgb.create.features}}. } \examples{ +## binary classification: + data(agaricus.train, package='xgboost') data(agaricus.test, package='xgboost') train <- agaricus.train test <- agaricus.test -bst <- xgboost(data = train$data, label = train$label, max.depth = 2, - eta = 1, nthread = 2, nround = 2,objective = "binary:logistic") +bst <- xgboost(data = train$data, label = train$label, max_depth = 2, + eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") +# use all trees by default pred <- predict(bst, test$data) +# use only the 1st tree +pred <- predict(bst, test$data, ntreelimit = 1) + + +## multiclass classification in iris dataset: + +lb <- as.numeric(iris$Species) - 1 +num_class <- 3 +set.seed(11) +bst <- xgboost(data = as.matrix(iris[, -5]), label = lb, + max_depth = 4, eta = 0.5, nthread = 2, nrounds = 10, subsample = 0.5, + objective = "multi:softprob", num_class = num_class) +# predict for softmax returns num_class probability numbers per case: +pred <- predict(bst, as.matrix(iris[, -5])) +str(pred) +# reshape it to a num_class-columns matrix +pred <- matrix(pred, ncol=num_class, byrow=TRUE) +# convert the probabilities to softmax labels +pred_labels <- max.col(pred) - 1 +# the following should result in the same error as seen in the last iteration +sum(pred_labels != lb)/length(lb) + +# compare that to the predictions from softmax: +set.seed(11) +bst <- xgboost(data = as.matrix(iris[, -5]), label = lb, + max_depth = 4, eta = 0.5, nthread = 2, nrounds = 10, subsample = 0.5, + objective = "multi:softmax", num_class = num_class) +pred <- predict(bst, as.matrix(iris[, -5])) +str(pred) +all.equal(pred, pred_labels) +# prediction from using only 5 iterations should result +# in the same error as seen in iteration 5: +pred5 <- predict(bst, as.matrix(iris[, -5]), ntreelimit=5) +sum(pred5 != lb)/length(lb) + + +## random forest-like model of 25 trees for binary classification: + +set.seed(11) +bst <- xgboost(data = train$data, label = train$label, max_depth = 5, + nthread = 2, nrounds = 1, objective = "binary:logistic", + num_parallel_tree = 25, subsample = 0.6, colsample_bytree = 0.1) +# Inspect the prediction error vs number of trees: +lb <- test$label +dtest <- xgb.DMatrix(test$data, label=lb) +err <- sapply(1:25, function(n) { + pred <- predict(bst, dtest, ntreelimit=n) + sum((pred > 0.5) != lb)/length(lb) +}) +plot(err, type='l', ylim=c(0,0.1), xlab='#trees') + +} +\seealso{ +\code{\link{xgb.train}}. } diff --git a/R-package/man/print.xgb.Booster.Rd b/R-package/man/print.xgb.Booster.Rd new file mode 100644 index 000000000000..7f13c328caaa --- /dev/null +++ b/R-package/man/print.xgb.Booster.Rd @@ -0,0 +1,30 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/xgb.Booster.R +\name{print.xgb.Booster} +\alias{print.xgb.Booster} +\title{Print xgb.Booster} +\usage{ +print.xgb.Booster(x, verbose = FALSE, ...) +} +\arguments{ +\item{x}{an xgb.Booster object} + +\item{verbose}{whether to print detailed data (e.g., attribute values)} + +\item{...}{not currently used} +} +\description{ +Print information about xgb.Booster. +} +\examples{ +data(agaricus.train, package='xgboost') +train <- agaricus.train +bst <- xgboost(data = train$data, label = train$label, max_depth = 2, + eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") +attr(bst, 'myattr') <- 'memo' + +print(bst) +print(bst, verbose=TRUE) + +} + diff --git a/R-package/man/print.xgb.cv.Rd b/R-package/man/print.xgb.cv.Rd new file mode 100644 index 000000000000..cfe8878c6cc6 --- /dev/null +++ b/R-package/man/print.xgb.cv.Rd @@ -0,0 +1,32 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/xgb.cv.R +\name{print.xgb.cv.synchronous} +\alias{print.xgb.cv.synchronous} +\title{Print xgb.cv result} +\usage{ +print.xgb.cv.synchronous(x, verbose = FALSE, ...) +} +\arguments{ +\item{x}{an \code{xgb.cv.synchronous} object} + +\item{verbose}{whether to print detailed data} + +\item{...}{passed to \code{data.table.print}} +} +\description{ +Prints formatted results of \code{xgb.cv}. +} +\details{ +When not verbose, it would only print the evaluation results, +including the best iteration (when available). +} +\examples{ +data(agaricus.train, package='xgboost') +train <- agaricus.train +cv <- xgb.cv(data = train$data, label = train$label, nfold = 5, max_depth = 2, + eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") +print(cv) +print(cv, verbose=TRUE) + +} + diff --git a/R-package/man/xgb.DMatrix.save.Rd b/R-package/man/xgb.DMatrix.save.Rd index 78348c3faa6d..9b0e835bede0 100644 --- a/R-package/man/xgb.DMatrix.save.Rd +++ b/R-package/man/xgb.DMatrix.save.Rd @@ -4,12 +4,12 @@ \alias{xgb.DMatrix.save} \title{Save xgb.DMatrix object to binary file} \usage{ -xgb.DMatrix.save(DMatrix, fname) +xgb.DMatrix.save(dmatrix, fname) } \arguments{ -\item{DMatrix}{the DMatrix object} +\item{dmatrix}{the \code{xgb.DMatrix} object} -\item{fname}{the name of the binary file.} +\item{fname}{the name of the file to write.} } \description{ Save xgb.DMatrix object to binary file diff --git a/R-package/man/xgb.attr.Rd b/R-package/man/xgb.attr.Rd index 3429da958786..e8992e71467a 100644 --- a/R-package/man/xgb.attr.Rd +++ b/R-package/man/xgb.attr.Rd @@ -52,16 +52,20 @@ Use \code{\link{`xgb.parameters<-`}} to set or change model parameters. The attribute setters would usually work more efficiently for \code{xgb.Booster.handle} than for \code{xgb.Booster}, since only just a handle (pointer) would need to be copied. +That would only matter if attributes need to be set many times. +Note, however, that when feeding a handle of an \code{xgb.Booster} object to the attribute setters, +the raw model cache of an \code{xgb.Booster} object would not be automatically updated, +and it would be user's responsibility to call \code{xgb.save.raw} to update it. The \code{xgb.attributes<-} setter either updates the existing or adds one or several attributes, -but doesn't delete the existing attributes which don't have their names in \code{names(attributes)}. +but it doesn't delete the other existing attributes. } \examples{ data(agaricus.train, package='xgboost') train <- agaricus.train -bst <- xgboost(data = train$data, label = train$label, max.depth = 2, - eta = 1, nthread = 2, nround = 2, objective = "binary:logistic") +bst <- xgboost(data = train$data, label = train$label, max_depth = 2, + eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") xgb.attr(bst, "my_attribute") <- "my attribute value" print(xgb.attr(bst, "my_attribute")) diff --git a/R-package/man/xgb.create.features.Rd b/R-package/man/xgb.create.features.Rd index cab2ab654dd8..4d1b8a1526d1 100644 --- a/R-package/man/xgb.create.features.Rd +++ b/R-package/man/xgb.create.features.Rd @@ -4,12 +4,14 @@ \alias{xgb.create.features} \title{Create new features from a previously learned model} \usage{ -xgb.create.features(model, training.data) +xgb.create.features(model, data, ...) } \arguments{ \item{model}{decision tree boosting model learned on the original data} -\item{training.data}{original data (usually provided as a \code{dgCMatrix} matrix)} +\item{data}{original data (usually provided as a \code{dgCMatrix} matrix)} + +\item{...}{currently not used} } \value{ \code{dgCMatrix} matrix including both the original data and the new features. @@ -60,7 +62,7 @@ data(agaricus.test, package='xgboost') dtrain <- xgb.DMatrix(data = agaricus.train$data, label = agaricus.train$label) dtest <- xgb.DMatrix(data = agaricus.test$data, label = agaricus.test$label) -param <- list(max.depth=2, eta=1, silent=1, objective='binary:logistic') +param <- list(max_depth=2, eta=1, silent=1, objective='binary:logistic') nround = 4 bst = xgb.train(params = param, data = dtrain, nrounds = nround, nthread = 2) diff --git a/R-package/man/xgb.cv.Rd b/R-package/man/xgb.cv.Rd index f3a1fcfd1916..954702d3450c 100644 --- a/R-package/man/xgb.cv.Rd +++ b/R-package/man/xgb.cv.Rd @@ -6,8 +6,9 @@ \usage{ xgb.cv(params = list(), data, nrounds, nfold, label = NULL, missing = NA, prediction = FALSE, showsd = TRUE, metrics = list(), obj = NULL, - feval = NULL, stratified = TRUE, folds = NULL, verbose = T, - print.every.n = 1L, early.stop.round = NULL, maximize = NULL, ...) + feval = NULL, stratified = TRUE, folds = NULL, verbose = TRUE, + print_every_n = 1L, early_stopping_rounds = NULL, maximize = NULL, + callbacks = list(), ...) } \arguments{ \item{params}{the list of parameters. Commonly used ones are: @@ -18,11 +19,11 @@ xgb.cv(params = list(), data, nrounds, nfold, label = NULL, missing = NA, \item \code{binary:logistic} logistic regression for classification } \item \code{eta} step size of each boosting step - \item \code{max.depth} maximum depth of the tree + \item \code{max_depth} maximum depth of the tree \item \code{nthread} number of thread used in training, if not set, all threads are used } - See \link{xgb.train} for further details. + See \code{\link{xgb.train}} for further details. See also demo/ for walkthrough example in R.} \item{data}{takes an \code{xgb.DMatrix} or \code{Matrix} as the input.} @@ -31,16 +32,18 @@ xgb.cv(params = list(), data, nrounds, nfold, label = NULL, missing = NA, \item{nfold}{the original dataset is randomly partitioned into \code{nfold} equal size subsamples.} -\item{label}{option field, when data is \code{Matrix}} +\item{label}{vector of response values. Should be provided only when data is \code{DMatrix}.} -\item{missing}{Missing is only used when input is dense matrix, pick a float -value that represents missing value. Sometime a data use 0 or other extreme value to represents missing values.} +\item{missing}{is only used when input is a dense matrix. By default is set to NA, which means +that NA values should be considered as 'missing' by the algorithm. +Sometimes, 0 or other extreme value might be used to represent missing values.} -\item{prediction}{A logical value indicating whether to return the prediction vector.} +\item{prediction}{A logical value indicating whether to return the test fold predictions +from each CV model. This parameter engages the \code{\link{cb.cv.predict}} callback.} -\item{showsd}{\code{boolean}, whether show standard deviation of cross validation} +\item{showsd}{\code{boolean}, whether to show standard deviation of cross validation} -\item{metrics, }{list of evaluation metrics to be used in corss validation, +\item{metrics, }{list of evaluation metrics to be used in cross validation, when it is not specified, the evaluation metric is chosen according to objective function. Possible options are: \itemize{ @@ -58,32 +61,61 @@ gradient with given prediction and dtrain.} \code{list(metric='metric-name', value='metric-value')} with given prediction and dtrain.} -\item{stratified}{\code{boolean} whether sampling of folds should be stratified by the values of labels in \code{data}} +\item{stratified}{a \code{boolean} indicating whether sampling of folds should be stratified +by the values of outcome labels.} -\item{folds}{\code{list} provides a possibility of using a list of pre-defined CV folds (each element must be a vector of fold's indices). -If folds are supplied, the nfold and stratified parameters would be ignored.} +\item{folds}{\code{list} provides a possibility to use a list of pre-defined CV folds +(each element must be a vector of test fold's indices). When folds are supplied, +the \code{nfold} and \code{stratified} parameters are ignored.} \item{verbose}{\code{boolean}, print the statistics during the process} -\item{print.every.n}{Print every N progress messages when \code{verbose>0}. Default is 1 which means all messages are printed.} +\item{print_every_n}{Print each n-th iteration evaluation messages when \code{verbose>0}. +Default is 1 which means all messages are printed. This parameter is passed to the +\code{\link{cb.print.evaluation}} callback.} -\item{early.stop.round}{If \code{NULL}, the early stopping function is not triggered. +\item{early_stopping_rounds}{If \code{NULL}, the early stopping function is not triggered. If set to an integer \code{k}, training with a validation set will stop if the performance -keeps getting worse consecutively for \code{k} rounds.} +doesn't improve for \code{k} rounds. +Setting this parameter engages the \code{\link{cb.early.stop}} callback.} -\item{maximize}{If \code{feval} and \code{early.stop.round} are set, then \code{maximize} must be set as well. -\code{maximize=TRUE} means the larger the evaluation score the better.} +\item{maximize}{If \code{feval} and \code{early_stopping_rounds} are set, +then this parameter must be set as well. +When it is \code{TRUE}, it means the larger the evaluation score the better. +This parameter is passed to the \code{\link{cb.early.stop}} callback.} + +\item{callbacks}{a list of callback functions to perform various task during boosting. +See \code{\link{callbacks}}. Some of the callbacks are automatically created depending on the +parameters' values. User can provide either existing or their own callback methods in order +to customize the training process.} \item{...}{other parameters to pass to \code{params}.} } \value{ -If \code{prediction = TRUE}, a list with the following elements is returned: +An object of class \code{xgb.cv.synchronous} with the following elements: \itemize{ - \item \code{dt} a \code{data.table} with each mean and standard deviation stat for training set and test set - \item \code{pred} an array or matrix (for multiclass classification) with predictions for each CV-fold for the model having been trained on the data in all other folds. + \item \code{call} a function call. + \item \code{params} parameters that were passed to the xgboost library. Note that it does not + capture parameters changed by the \code{\link{cb.reset.parameters}} callback. + \item \code{callbacks} callback functions that were either automatically assigned or + explicitely passed. + \item \code{evaluation_log} evaluation history storead as a \code{data.table} with the + first column corresponding to iteration number and the rest corresponding to the + CV-based evaluation means and standard deviations for the training and test CV-sets. + It is created by the \code{\link{cb.evaluation.log}} callback. + \item \code{niter} number of boosting iterations. + \item \code{folds} the list of CV folds' indices - either those passed through the \code{folds} + parameter or randomly generated. + \item \code{best_iteration} iteration number with the best evaluation metric value + (only available with early stopping). + \item \code{best_ntreelimit} the \code{ntreelimit} value corresponding to the best iteration, + which could further be used in \code{predict} method + (only available with early stopping). + \item \code{pred} CV prediction values available when \code{prediction} is set. + It is either vector or matrix (see \code{\link{cb.cv.predict}}). + \item \code{models} a liost of the CV folds' models. It is only available with the explicit + setting of the \code{cb.cv.predict(save_models = TRUE)} callback. } - -If \code{prediction = FALSE}, just a \code{data.table} with each mean and standard deviation stat for training set and test set is returned. } \description{ The cross valudation function of xgboost @@ -102,8 +134,10 @@ Adapted from \url{http://en.wikipedia.org/wiki/Cross-validation_\%28statistics\% \examples{ data(agaricus.train, package='xgboost') dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label) -history <- xgb.cv(data = dtrain, nround=3, nthread = 2, nfold = 5, metrics=list("rmse","auc"), - max.depth =3, eta = 1, objective = "binary:logistic") -print(history) +cv <- xgb.cv(data = dtrain, nrounds = 3, nthread = 2, nfold = 5, metrics = list("rmse","auc"), + max_depth = 3, eta = 1, objective = "binary:logistic") +print(cv) +print(cv, verbose=TRUE) + } diff --git a/R-package/man/xgb.dump.Rd b/R-package/man/xgb.dump.Rd index cafa8ac14019..efbf8b62982b 100644 --- a/R-package/man/xgb.dump.Rd +++ b/R-package/man/xgb.dump.Rd @@ -4,7 +4,7 @@ \alias{xgb.dump} \title{Save xgboost model to text file} \usage{ -xgb.dump(model = NULL, fname = NULL, fmap = "", with.stats = FALSE) +xgb.dump(model = NULL, fname = NULL, fmap = "", with_stats = FALSE, ...) } \arguments{ \item{model}{the model object.} @@ -18,10 +18,12 @@ See demo/ for walkthrough example in R, and \url{https://github.com/dmlc/xgboost/blob/master/demo/data/featmap.txt} for example Format.} -\item{with.stats}{whether dump statistics of splits +\item{with_stats}{whether dump statistics of splits When this option is on, the model dump comes with two additional statistics: gain is the approximate loss function gain we get in each split; cover is the sum of second order gradient in each node.} + +\item{...}{currently not used} } \value{ if fname is not provided or set to \code{NULL} the function will return the model as a \code{character} vector. Otherwise it will return \code{TRUE}. @@ -34,10 +36,10 @@ data(agaricus.train, package='xgboost') data(agaricus.test, package='xgboost') train <- agaricus.train test <- agaricus.test -bst <- xgboost(data = train$data, label = train$label, max.depth = 2, - eta = 1, nthread = 2, nround = 2,objective = "binary:logistic") +bst <- xgboost(data = train$data, label = train$label, max_depth = 2, + eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") # save the model in file 'xgb.model.dump' -xgb.dump(bst, 'xgb.model.dump', with.stats = TRUE) +xgb.dump(bst, 'xgb.model.dump', with_stats = TRUE) # print the model without saving it to a file print(xgb.dump(bst)) diff --git a/R-package/man/xgb.importance.Rd b/R-package/man/xgb.importance.Rd index f30f8149adcd..10258a07dbe8 100644 --- a/R-package/man/xgb.importance.Rd +++ b/R-package/man/xgb.importance.Rd @@ -52,14 +52,13 @@ If you need to remember one thing only: until you want to leave us early, don't \examples{ data(agaricus.train, package='xgboost') -bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max.depth = 2, - eta = 1, nthread = 2, nround = 2,objective = "binary:logistic") +bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2, + eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic") -# agaricus.train$data@Dimnames[[2]] represents the column names of the sparse matrix. -xgb.importance(agaricus.train$data@Dimnames[[2]], model = bst) +xgb.importance(colnames(agaricus.train$data), model = bst) # Same thing with co-occurence computation this time -xgb.importance(agaricus.train$data@Dimnames[[2]], model = bst, data = agaricus.train$data, label = agaricus.train$label) +xgb.importance(colnames(agaricus.train$data), model = bst, data = agaricus.train$data, label = agaricus.train$label) } diff --git a/R-package/man/xgb.load.Rd b/R-package/man/xgb.load.Rd index 92576ad95bbb..1499df2d4f20 100644 --- a/R-package/man/xgb.load.Rd +++ b/R-package/man/xgb.load.Rd @@ -17,8 +17,8 @@ data(agaricus.train, package='xgboost') data(agaricus.test, package='xgboost') train <- agaricus.train test <- agaricus.test -bst <- xgboost(data = train$data, label = train$label, max.depth = 2, - eta = 1, nthread = 2, nround = 2,objective = "binary:logistic") +bst <- xgboost(data = train$data, label = train$label, max_depth = 2, + eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic") xgb.save(bst, 'xgb.model') bst <- xgb.load('xgb.model') pred <- predict(bst, test$data) diff --git a/R-package/man/xgb.model.dt.tree.Rd b/R-package/man/xgb.model.dt.tree.Rd index 6b5193bc09a0..8c839be200c8 100644 --- a/R-package/man/xgb.model.dt.tree.Rd +++ b/R-package/man/xgb.model.dt.tree.Rd @@ -14,7 +14,7 @@ contains feature names, this argument should be \code{NULL} (default value)} \item{model}{object of class \code{xgb.Booster}} \item{text}{\code{character} vector previously generated by the \code{xgb.dump} -function (where parameter \code{with.stats = TRUE} should have been set).} +function (where parameter \code{with_stats = TRUE} should have been set).} \item{n_first_tree}{limit the parsing to the \code{n} first trees. If set to \code{NULL}, all trees of the model are parsed.} @@ -47,8 +47,8 @@ Parse a boosted tree model text dump into a \code{data.table} structure. data(agaricus.train, package='xgboost') -bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max.depth = 2, - eta = 1, nthread = 2, nround = 2,objective = "binary:logistic") +bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2, + eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic") (dt <- xgb.model.dt.tree(colnames(agaricus.train$data), bst)) diff --git a/R-package/man/xgb.parameters.Rd b/R-package/man/xgb.parameters.Rd index e531b5668220..3df866816862 100644 --- a/R-package/man/xgb.parameters.Rd +++ b/R-package/man/xgb.parameters.Rd @@ -23,8 +23,8 @@ than for \code{xgb.Booster}, since only just a handle would need to be copied. data(agaricus.train, package='xgboost') train <- agaricus.train -bst <- xgboost(data = train$data, label = train$label, max.depth = 2, - eta = 1, nthread = 2, nround = 2, objective = "binary:logistic") +bst <- xgboost(data = train$data, label = train$label, max_depth = 2, + eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") xgb.parameters(bst) <- list(eta = 0.1) diff --git a/R-package/man/xgb.plot.deepness.Rd b/R-package/man/xgb.plot.deepness.Rd index e11a7495eab6..71d43ec8f361 100644 --- a/R-package/man/xgb.plot.deepness.Rd +++ b/R-package/man/xgb.plot.deepness.Rd @@ -20,7 +20,7 @@ Display both the number of \code{leaf} and the distribution of \code{weighted ob by tree deepness level. The purpose of this function is to help the user to find the best trade-off to set -the \code{max.depth} and \code{min_child_weight} parameters according to the bias / variance trade-off. +the \code{max_depth} and \code{min_child_weight} parameters according to the bias / variance trade-off. See \link{xgb.train} for more information about these parameters. @@ -36,8 +36,8 @@ This function is inspired by the blog post \url{http://aysent.github.io/2015/11/ \examples{ data(agaricus.train, package='xgboost') -bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max.depth = 15, - eta = 1, nthread = 2, nround = 30, objective = "binary:logistic", +bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 15, + eta = 1, nthread = 2, nrounds = 30, objective = "binary:logistic", min_child_weight = 50) xgb.plot.deepness(model = bst) diff --git a/R-package/man/xgb.plot.importance.Rd b/R-package/man/xgb.plot.importance.Rd index 2f9d5651dfef..0f7ec588a4b1 100644 --- a/R-package/man/xgb.plot.importance.Rd +++ b/R-package/man/xgb.plot.importance.Rd @@ -4,12 +4,14 @@ \alias{xgb.plot.importance} \title{Plot feature importance bar graph} \usage{ -xgb.plot.importance(importance_matrix = NULL, numberOfClusters = c(1:10)) +xgb.plot.importance(importance_matrix = NULL, n_clusters = c(1:10), ...) } \arguments{ \item{importance_matrix}{a \code{data.table} returned by the \code{xgb.importance} function.} -\item{numberOfClusters}{a \code{numeric} vector containing the min and the max range of the possible number of clusters of bars.} +\item{n_clusters}{a \code{numeric} vector containing the min and the max range of the possible number of clusters of bars.} + +\item{...}{currently not used} } \value{ A \code{ggplot2} bar graph representing each feature by a horizontal bar. Longer is the bar, more important is the feature. Features are classified by importance and clustered by importance. The group is represented through the color of the bar. @@ -29,11 +31,10 @@ data(agaricus.train, package='xgboost') #(labels = outcome column which will be learned). #Each column of the sparse Matrix is a feature in one hot encoding format. -bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max.depth = 2, - eta = 1, nthread = 2, nround = 2,objective = "binary:logistic") +bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2, + eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") -#agaricus.train$data@Dimnames[[2]] represents the column names of the sparse matrix. -importance_matrix <- xgb.importance(agaricus.train$data@Dimnames[[2]], model = bst) +importance_matrix <- xgb.importance(colnames(agaricus.train$data), model = bst) xgb.plot.importance(importance_matrix) } diff --git a/R-package/man/xgb.plot.multi.trees.Rd b/R-package/man/xgb.plot.multi.trees.Rd index 4d97c58b40c1..c7186ce92c0a 100644 --- a/R-package/man/xgb.plot.multi.trees.Rd +++ b/R-package/man/xgb.plot.multi.trees.Rd @@ -4,19 +4,21 @@ \alias{xgb.plot.multi.trees} \title{Project all trees on one tree and plot it} \usage{ -xgb.plot.multi.trees(model, feature_names = NULL, features.keep = 5, - plot.width = NULL, plot.height = NULL) +xgb.plot.multi.trees(model, feature_names = NULL, features_keep = 5, + plot_width = NULL, plot_height = NULL, ...) } \arguments{ \item{model}{dump generated by the \code{xgb.train} function.} \item{feature_names}{names of each feature as a \code{character} vector. Can be extracted from a sparse matrix (see example). If model dump already contains feature names, this argument should be \code{NULL}.} -\item{features.keep}{number of features to keep in each position of the multi trees.} +\item{features_keep}{number of features to keep in each position of the multi trees.} -\item{plot.width}{width in pixels of the graph to produce} +\item{plot_width}{width in pixels of the graph to produce} -\item{plot.height}{height in pixels of the graph to produce} +\item{plot_height}{height in pixels of the graph to produce} + +\item{...}{currently not used} } \value{ Two graphs showing the distribution of the model deepness. @@ -39,7 +41,7 @@ its deepness (therefore in a boosting model, all trees have the same shape). Moreover, the trees tend to reuse the same features. The function will project each tree on one, and keep for each position the -\code{features.keep} first features (based on Gain per feature measure). +\code{features_keep} first features (based on Gain per feature measure). This function is inspired by this blog post: \url{https://wellecks.wordpress.com/2015/02/21/peering-into-the-black-box-visualizing-lambdamart/} @@ -47,11 +49,11 @@ This function is inspired by this blog post: \examples{ data(agaricus.train, package='xgboost') -bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max.depth = 15, - eta = 1, nthread = 2, nround = 30, objective = "binary:logistic", +bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 15, + eta = 1, nthread = 2, nrounds = 30, objective = "binary:logistic", min_child_weight = 50) -p <- xgb.plot.multi.trees(model = bst, feature_names = agaricus.train$data@Dimnames[[2]], features.keep = 3) +p <- xgb.plot.multi.trees(model = bst, feature_names = colnames(agaricus.train$data), features_keep = 3) print(p) } diff --git a/R-package/man/xgb.plot.tree.Rd b/R-package/man/xgb.plot.tree.Rd index c087059e0301..3620699bdeca 100644 --- a/R-package/man/xgb.plot.tree.Rd +++ b/R-package/man/xgb.plot.tree.Rd @@ -5,7 +5,7 @@ \title{Plot a boosted tree model} \usage{ xgb.plot.tree(feature_names = NULL, model = NULL, n_first_tree = NULL, - plot.width = NULL, plot.height = NULL) + plot_width = NULL, plot_height = NULL, ...) } \arguments{ \item{feature_names}{names of each feature as a \code{character} vector. Can be extracted from a sparse matrix (see example). If model dump already contains feature names, this argument should be \code{NULL}.} @@ -14,9 +14,11 @@ xgb.plot.tree(feature_names = NULL, model = NULL, n_first_tree = NULL, \item{n_first_tree}{limit the plot to the n first trees. If \code{NULL}, all trees of the model are plotted. Performance can be low for huge models.} -\item{plot.width}{the width of the diagram in pixels.} +\item{plot_width}{the width of the diagram in pixels.} -\item{plot.height}{the height of the diagram in pixels.} +\item{plot_height}{the height of the diagram in pixels.} + +\item{...}{currently not used.} } \value{ A \code{DiagrammeR} of the model. @@ -38,11 +40,10 @@ The function uses \href{http://www.graphviz.org/}{GraphViz} library for that pur \examples{ data(agaricus.train, package='xgboost') -bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max.depth = 2, - eta = 1, nthread = 2, nround = 2,objective = "binary:logistic") +bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2, + eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic") -# agaricus.train$data@Dimnames[[2]] represents the column names of the sparse matrix. -xgb.plot.tree(feature_names = agaricus.train$data@Dimnames[[2]], model = bst) +xgb.plot.tree(feature_names = colnames(agaricus.train$data), model = bst) } diff --git a/R-package/man/xgb.save.Rd b/R-package/man/xgb.save.Rd index db335105c859..85acdecd055d 100644 --- a/R-package/man/xgb.save.Rd +++ b/R-package/man/xgb.save.Rd @@ -9,7 +9,7 @@ xgb.save(model, fname) \arguments{ \item{model}{the model object.} -\item{fname}{the name of the binary file.} +\item{fname}{the name of the file to write.} } \description{ Save xgboost model from xgboost or xgb.train @@ -19,8 +19,8 @@ data(agaricus.train, package='xgboost') data(agaricus.test, package='xgboost') train <- agaricus.train test <- agaricus.test -bst <- xgboost(data = train$data, label = train$label, max.depth = 2, - eta = 1, nthread = 2, nround = 2,objective = "binary:logistic") +bst <- xgboost(data = train$data, label = train$label, max_depth = 2, + eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic") xgb.save(bst, 'xgb.model') bst <- xgb.load('xgb.model') pred <- predict(bst, test$data) diff --git a/R-package/man/xgb.save.raw.Rd b/R-package/man/xgb.save.raw.Rd index 1e9f4a4dbb04..7f808529e8c4 100644 --- a/R-package/man/xgb.save.raw.Rd +++ b/R-package/man/xgb.save.raw.Rd @@ -18,10 +18,11 @@ data(agaricus.train, package='xgboost') data(agaricus.test, package='xgboost') train <- agaricus.train test <- agaricus.test -bst <- xgboost(data = train$data, label = train$label, max.depth = 2, - eta = 1, nthread = 2, nround = 2,objective = "binary:logistic") +bst <- xgboost(data = train$data, label = train$label, max_depth = 2, + eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic") raw <- xgb.save.raw(bst) bst <- xgb.load(raw) pred <- predict(bst, test$data) + } diff --git a/R-package/man/xgb.train.Rd b/R-package/man/xgb.train.Rd index 7f7ae49627ef..a06e21b7b133 100644 --- a/R-package/man/xgb.train.Rd +++ b/R-package/man/xgb.train.Rd @@ -1,16 +1,24 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/xgb.train.R +% Please edit documentation in R/xgb.train.R, R/xgboost.R \name{xgb.train} \alias{xgb.train} +\alias{xgboost} \title{eXtreme Gradient Boosting Training} \usage{ xgb.train(params = list(), data, nrounds, watchlist = list(), obj = NULL, - feval = NULL, verbose = 1, print.every.n = 1L, - early.stop.round = NULL, maximize = NULL, save_period = 0, - save_name = "xgboost.model", ...) + feval = NULL, verbose = 1, print_every_n = 1L, + early_stopping_rounds = NULL, maximize = NULL, save_period = NULL, + save_name = "xgboost.model", xgb_model = NULL, callbacks = list(), ...) + +xgboost(data = NULL, label = NULL, missing = NA, weight = NULL, + params = list(), nrounds, verbose = 1, print_every_n = 1L, + early_stopping_rounds = NULL, maximize = NULL, save_period = 0, + save_name = "xgboost.model", xgb_model = NULL, callbacks = list(), ...) } \arguments{ \item{params}{the list of parameters. + The complete list of parameters is available at \url{http://xgboost.readthedocs.io/en/latest/parameter.html}. + Below is a shorter summary: 1. General Parameters @@ -51,81 +59,152 @@ xgb.train(params = list(), data, nrounds, watchlist = list(), obj = NULL, \item \code{binary:logistic} logistic regression for binary classification. Output probability. \item \code{binary:logitraw} logistic regression for binary classification, output score before logistic transformation. \item \code{num_class} set the number of classes. To use only with multiclass objectives. - \item \code{multi:softmax} set xgboost to do multiclass classification using the softmax objective. Class is represented by a number and should be from 0 to \code{num_class}. - \item \code{multi:softprob} same as softmax, but output a vector of ndata * nclass, which can be further reshaped to ndata, nclass matrix. The result contains predicted probabilities of each data point belonging to each class. + \item \code{multi:softmax} set xgboost to do multiclass classification using the softmax objective. Class is represented by a number and should be from 0 to \code{num_class - 1}. + \item \code{multi:softprob} same as softmax, but prediction outputs a vector of ndata * nclass elements, which can be further reshaped to ndata, nclass matrix. The result contains predicted probabilities of each data point belonging to each class. \item \code{rank:pairwise} set xgboost to do ranking task by minimizing the pairwise loss. } \item \code{base_score} the initial prediction score of all instances, global bias. Default: 0.5 \item \code{eval_metric} evaluation metrics for validation data. Users can pass a self-defined function to it. Default: metric will be assigned according to objective(rmse for regression, and error for classification, mean average precision for ranking). List is provided in detail section. }} -\item{data}{takes an \code{xgb.DMatrix} as the input.} +\item{data}{input dataset. \code{xgb.train} takes only an \code{xgb.DMatrix} as the input. +\code{xgboost}, in addition, also accepts \code{matrix}, \code{dgCMatrix}, or local data file.} \item{nrounds}{the max number of iterations} \item{watchlist}{what information should be printed when \code{verbose=1} or \code{verbose=2}. Watchlist is used to specify validation set monitoring during training. For example user can specify - watchlist=list(validation1=mat1, validation2=mat2) to watch - the performance of each round's model on mat1 and mat2} +watchlist=list(validation1=mat1, validation2=mat2) to watch +the performance of each round's model on mat1 and mat2} \item{obj}{customized objective function. Returns gradient and second order -gradient with given prediction and dtrain,} +gradient with given prediction and dtrain.} \item{feval}{custimized evaluation function. Returns \code{list(metric='metric-name', value='metric-value')} with given -prediction and dtrain,} +prediction and dtrain.} \item{verbose}{If 0, xgboost will stay silent. If 1, xgboost will print -information of performance. If 2, xgboost will print information of both} +information of performance. If 2, xgboost will print some additional information. +Setting \code{verbose > 0} automatically engages the \code{\link{cb.evaluation.log}} and +\code{\link{cb.print.evaluation}} callback functions.} -\item{print.every.n}{Print every N progress messages when \code{verbose>0}. Default is 1 which means all messages are printed.} +\item{print_every_n}{Print each n-th iteration evaluation messages when \code{verbose>0}. +Default is 1 which means all messages are printed. This parameter is passed to the +\code{\link{cb.print.evaluation}} callback.} -\item{early.stop.round}{If \code{NULL}, the early stopping function is not triggered. +\item{early_stopping_rounds}{If \code{NULL}, the early stopping function is not triggered. If set to an integer \code{k}, training with a validation set will stop if the performance -keeps getting worse consecutively for \code{k} rounds.} +doesn't improve for \code{k} rounds. +Setting this parameter engages the \code{\link{cb.early.stop}} callback.} -\item{maximize}{If \code{feval} and \code{early.stop.round} are set, then \code{maximize} must be set as well. -\code{maximize=TRUE} means the larger the evaluation score the better.} +\item{maximize}{If \code{feval} and \code{early_stopping_rounds} are set, +then this parameter must be set as well. +When it is \code{TRUE}, it means the larger the evaluation score the better. +This parameter is passed to the \code{\link{cb.early.stop}} callback.} -\item{save_period}{save the model to the disk in every \code{save_period} rounds, 0 means no such action.} +\item{save_period}{when it is non-NULL, model is saved to disk after every \code{save_period} rounds, +0 means save at the end. The saving is handled by the \code{\link{cb.save.model}} callback.} \item{save_name}{the name or path for periodically saved model file.} +\item{xgb_model}{a previously built model to continue the trainig from. +Could be either an object of class \code{xgb.Booster}, or its raw data, or the name of a +file with a previously saved model.} + +\item{callbacks}{a list of callback functions to perform various task during boosting. +See \code{\link{callbacks}}. Some of the callbacks are automatically created depending on the +parameters' values. User can provide either existing or their own callback methods in order +to customize the training process.} + \item{...}{other parameters to pass to \code{params}.} + +\item{label}{vector of response values. Should not be provided when data is +a local data file name or an \code{xgb.DMatrix}.} + +\item{missing}{by default is set to NA, which means that NA values should be considered as 'missing' +by the algorithm. Sometimes, 0 or other extreme value might be used to represent missing values. +This parameter is only used when input is a dense matrix.} + +\item{weight}{a vector indicating the weight for each row of the input.} +} +\value{ +An object of class \code{xgb.Booster} with the following elements: +\itemize{ + \item \code{handle} a handle (pointer) to the xgboost model in memory. + \item \code{raw} a cached memory dump of the xgboost model saved as R's \code{raw} type. + \item \code{niter} number of boosting iterations. + \item \code{evaluation_log} evaluation history storead as a \code{data.table} with the + first column corresponding to iteration number and the rest corresponding to evaluation + metrics' values. It is created by the \code{\link{cb.evaluation.log}} callback. + \item \code{call} a function call. + \item \code{params} parameters that were passed to the xgboost library. Note that it does not + capture parameters changed by the \code{\link{cb.reset.parameters}} callback. + \item \code{callbacks} callback functions that were either automatically assigned or + explicitely passed. + \item \code{best_iteration} iteration number with the best evaluation metric value + (only available with early stopping). + \item \code{best_ntreelimit} the \code{ntreelimit} value corresponding to the best iteration, + which could further be used in \code{predict} method + (only available with early stopping). + \item \code{best_score} the best evaluation metric value during early stopping. + (only available with early stopping). +} } \description{ -An advanced interface for training xgboost model. Look at \code{\link{xgboost}} function for a simpler interface. +\code{xgb.train} is an advanced interface for training an xgboost model. The \code{xgboost} function provides a simpler interface. } \details{ -This is the training function for \code{xgboost}. +These are the training functions for \code{xgboost}. -It supports advanced features such as \code{watchlist}, customized objective function (\code{feval}), -therefore it is more flexible than \code{\link{xgboost}} function. +The \code{xgb.train} interface supports advanced features such as \code{watchlist}, +customized objective and evaluation metric functions, therefore it is more flexible +than the \code{\link{xgboost}} interface. Parallelization is automatically enabled if \code{OpenMP} is present. Number of threads can also be manually specified via \code{nthread} parameter. -\code{eval_metric} parameter (not listed above) is set automatically by Xgboost but can be overriden by parameter. Below is provided the list of different metric optimized by Xgboost to help you to understand how it works inside or to use them with the \code{watchlist} parameter. +The evaluation metric is chosen automatically by Xgboost (according to the objective) +when the \code{eval_metric} parameter is not provided. +User may set one or several \code{eval_metric} parameters. +Note that when using a customized metric, only this single metric can be used. +The folloiwing is the list of built-in metrics for which Xgboost provides optimized implementation: \itemize{ \item \code{rmse} root mean square error. \url{http://en.wikipedia.org/wiki/Root_mean_square_error} \item \code{logloss} negative log-likelihood. \url{http://en.wikipedia.org/wiki/Log-likelihood} \item \code{mlogloss} multiclass logloss. \url{https://www.kaggle.com/wiki/MultiClassLogLoss} - \item \code{error} Binary classification error rate. It is calculated as \code{(wrong cases) / (all cases)}. For the predictions, the evaluation will regard the instances with prediction value larger than 0.5 as positive instances, and the others as negative instances. - \item \code{merror} Multiclass classification error rate. It is calculated as \code{(wrong cases) / (all cases)}. + \item \code{error} Binary classification error rate. It is calculated as \code{(# wrong cases) / (# all cases)}. + By default, it uses the 0.5 threshold for predicted values to define negative and positive instances. + Different threshold (e.g., 0.) could be specified as "error@0." + \item \code{merror} Multiclass classification error rate. It is calculated as \code{(# wrong cases) / (# all cases)}. \item \code{auc} Area under the curve. \url{http://en.wikipedia.org/wiki/Receiver_operating_characteristic#'Area_under_curve} for ranking evaluation. \item \code{ndcg} Normalized Discounted Cumulative Gain (for ranking task). \url{http://en.wikipedia.org/wiki/NDCG} } - -Full list of parameters is available in the Wiki \url{https://github.com/dmlc/xgboost/wiki/Parameters}. -This function only accepts an \code{\link{xgb.DMatrix}} object as the input. +The following callbacks are automatically created when certain parameters are set: +\itemize{ + \item \code{cb.print.evaluation} is turned on when \code{verbose > 0}; + and the \code{print_every_n} parameter is passed to it. + \item \code{cb.evaluation.log} is on when \code{verbose > 0} and \code{watchlist} is present. + \item \code{cb.early.stop}: when \code{early_stopping_rounds} is set. + \item \code{cb.save.model}: when \code{save_period > 0} is set. +} } \examples{ data(agaricus.train, package='xgboost') +data(agaricus.test, package='xgboost') + dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label) -dtest <- dtrain +dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label) watchlist <- list(eval = dtest, train = dtrain) + +## A simple xgb.train example: +param <- list(max_depth = 2, eta = 1, silent = 1, + objective = "binary:logistic", eval_metric = "auc") +bst <- xgb.train(param, dtrain, nthread = 2, nrounds = 2, watchlist) + +## An xgb.train example where custom objective and evaluation metric are used: logregobj <- function(preds, dtrain) { labels <- getinfo(dtrain, "label") preds <- 1/(1 + exp(-preds)) @@ -138,7 +217,29 @@ evalerror <- function(preds, dtrain) { err <- as.numeric(sum(labels != (preds > 0)))/length(labels) return(list(metric = "error", value = err)) } -param <- list(max.depth = 2, eta = 1, silent = 1, objective=logregobj,eval_metric=evalerror) -bst <- xgb.train(param, dtrain, nthread = 2, nround = 2, watchlist) +bst <- xgb.train(param, dtrain, nthread = 2, nrounds = 2, watchlist) + +## An xgb.train example of using variable learning rates at each iteration: +my_etas <- list(eta = c(0.5, 0.1)) +bst <- xgb.train(param, dtrain, nthread = 2, nrounds = 2, watchlist, + callbacks = list(cb.reset.parameters(my_etas))) + +## Explicit use of the cb.evaluation.log callback allows to run +## xgb.train silently but still store the evaluation results: +bst <- xgb.train(param, dtrain, nthread = 2, nrounds = 2, watchlist, + verbose = 0, callbacks = list(cb.evaluation.log())) +print(bst$evaluation_log) + +## An 'xgboost' interface example: +bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, + max_depth = 2, eta = 1, nthread = 2, nrounds = 2, + objective = "binary:logistic") +pred <- predict(bst, agaricus.test$data) + +} +\seealso{ +\code{\link{callbacks}}, +\code{\link{predict.xgb.Booster}}, +\code{\link{xgb.cv}} } diff --git a/R-package/man/xgboost-deprecated.Rd b/R-package/man/xgboost-deprecated.Rd new file mode 100644 index 000000000000..2cb5462126ed --- /dev/null +++ b/R-package/man/xgboost-deprecated.Rd @@ -0,0 +1,17 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/utils.R +\name{xgboost-deprecated} +\alias{xgboost-deprecated} +\title{Deprecation notices.} +\description{ +At this time, some of the parameter names were changed in order to make the code style more uniform. +The deprecated parameters would be removed in the next release. +} +\details{ +To see all the current deprecated and new parameters, check the \code{xgboost:::depr_par_lut} table. + +A deprecation warning is shown when any of the deprecated parameters is used in a call. +An additional warning is shown when there was a partial match to a deprecated parameter +(as R is able to partially match parameter names). +} + diff --git a/R-package/man/xgboost.Rd b/R-package/man/xgboost.Rd deleted file mode 100644 index e31e5da43058..000000000000 --- a/R-package/man/xgboost.Rd +++ /dev/null @@ -1,83 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/xgboost.R -\name{xgboost} -\alias{xgboost} -\title{eXtreme Gradient Boosting (Tree) library} -\usage{ -xgboost(data = NULL, label = NULL, missing = NA, weight = NULL, - params = list(), nrounds, verbose = 1, print.every.n = 1L, - early.stop.round = NULL, maximize = NULL, save_period = 0, - save_name = "xgboost.model", ...) -} -\arguments{ -\item{data}{takes \code{matrix}, \code{dgCMatrix}, local data file or -\code{xgb.DMatrix}.} - -\item{label}{the response variable. User should not set this field, -if data is local data file or \code{xgb.DMatrix}.} - -\item{missing}{Missing is only used when input is dense matrix, pick a float -value that represents missing value. Sometimes a data use 0 or other extreme value to represents missing values.} - -\item{weight}{a vector indicating the weight for each row of the input.} - -\item{params}{the list of parameters. - -Commonly used ones are: -\itemize{ - \item \code{objective} objective function, common ones are - \itemize{ - \item \code{reg:linear} linear regression - \item \code{binary:logistic} logistic regression for classification - } - \item \code{eta} step size of each boosting step - \item \code{max.depth} maximum depth of the tree - \item \code{nthread} number of thread used in training, if not set, all threads are used -} - - Look at \code{\link{xgb.train}} for a more complete list of parameters or \url{https://github.com/dmlc/xgboost/wiki/Parameters} for the full list. - - See also \code{demo/} for walkthrough example in R.} - -\item{nrounds}{the max number of iterations} - -\item{verbose}{If 0, xgboost will stay silent. If 1, xgboost will print -information of performance. If 2, xgboost will print information of both -performance and construction progress information} - -\item{print.every.n}{Print every N progress messages when \code{verbose>0}. Default is 1 which means all messages are printed.} - -\item{early.stop.round}{If \code{NULL}, the early stopping function is not triggered. -If set to an integer \code{k}, training with a validation set will stop if the performance -keeps getting worse consecutively for \code{k} rounds.} - -\item{maximize}{If \code{feval} and \code{early.stop.round} are set, then \code{maximize} must be set as well. -\code{maximize=TRUE} means the larger the evaluation score the better.} - -\item{save_period}{save the model to the disk in every \code{save_period} rounds, 0 means no such action.} - -\item{save_name}{the name or path for periodically saved model file.} - -\item{...}{other parameters to pass to \code{params}.} -} -\description{ -A simple interface for training xgboost model. Look at \code{\link{xgb.train}} function for a more advanced interface. -} -\details{ -This is the modeling function for Xgboost. - -Parallelization is automatically enabled if \code{OpenMP} is present. - -Number of threads can also be manually specified via \code{nthread} parameter. -} -\examples{ -data(agaricus.train, package='xgboost') -data(agaricus.test, package='xgboost') -train <- agaricus.train -test <- agaricus.test -bst <- xgboost(data = train$data, label = train$label, max.depth = 2, - eta = 1, nthread = 2, nround = 2, objective = "binary:logistic") -pred <- predict(bst, test$data) - -} - diff --git a/R-package/tests/testthat/test_basic.R b/R-package/tests/testthat/test_basic.R index 34d47103f0e5..b962235d0aad 100644 --- a/R-package/tests/testthat/test_basic.R +++ b/R-package/tests/testthat/test_basic.R @@ -8,29 +8,175 @@ train <- agaricus.train test <- agaricus.test set.seed(1994) -test_that("train and predict", { - bst <- xgboost(data = train$data, label = train$label, max.depth = 2, - eta = 1, nthread = 2, nround = 2, objective = "binary:logistic") +test_that("train and predict binary classification", { + nrounds = 2 + expect_output( + bst <- xgboost(data = train$data, label = train$label, max_depth = 2, + eta = 1, nthread = 2, nrounds = nrounds, objective = "binary:logistic") + , "train-error") + expect_equal(class(bst), "xgb.Booster") + expect_equal(bst$niter, nrounds) + expect_false(is.null(bst$evaluation_log)) + expect_equal(nrow(bst$evaluation_log), nrounds) + expect_lt(bst$evaluation_log[, min(train_error)], 0.03) + pred <- predict(bst, test$data) - expect_equal(length(pred), 1611) + expect_length(pred, 1611) + + pred1 <- predict(bst, train$data, ntreelimit = 1) + expect_length(pred1, 6513) + err_pred1 <- sum((pred1 > 0.5) != train$label)/length(train$label) + err_log <- bst$evaluation_log[1, train_error] + expect_lt(abs(err_pred1 - err_log), 10e-6) }) -test_that("early stopping", { - res <- xgb.cv(data = train$data, label = train$label, max.depth = 2, nfold = 5, - eta = 0.3, nthread = 2, nround = 20, objective = "binary:logistic", - early.stop.round = 3, maximize = FALSE) - expect_true(nrow(res) < 20) - bst <- xgboost(data = train$data, label = train$label, max.depth = 2, - eta = 0.3, nthread = 2, nround = 20, objective = "binary:logistic", - early.stop.round = 3, maximize = FALSE) - pred <- predict(bst, test$data) - expect_equal(length(pred), 1611) +test_that("train and predict softprob", { + lb <- as.numeric(iris$Species) - 1 + set.seed(11) + expect_output( + bst <- xgboost(data = as.matrix(iris[, -5]), label = lb, + max_depth = 3, eta = 0.5, nthread = 2, nrounds = 5, + objective = "multi:softprob", num_class=3) + , "train-merror") + expect_false(is.null(bst$evaluation_log)) + expect_lt(bst$evaluation_log[, min(train_merror)], 0.025) + expect_equal(bst$niter * 3, xgb.ntree(bst)) + pred <- predict(bst, as.matrix(iris[, -5])) + expect_length(pred, nrow(iris) * 3) + # row sums add up to total probability of 1: + expect_equal(rowSums(matrix(pred, ncol=3, byrow=TRUE)), rep(1, nrow(iris)), tolerance = 1e-7) + # manually calculate error at the last iteration: + mpred <- predict(bst, as.matrix(iris[, -5]), reshape = TRUE) + expect_equal(as.numeric(t(mpred)), pred) + pred_labels <- max.col(mpred) - 1 + err <- sum(pred_labels != lb)/length(lb) + expect_equal(bst$evaluation_log[5, train_merror], err, tolerance = 5e-6) + # manually calculate error at the 1st iteration: + mpred <- predict(bst, as.matrix(iris[, -5]), reshape = TRUE, ntreelimit = 1) + pred_labels <- max.col(mpred) - 1 + err <- sum(pred_labels != lb)/length(lb) + expect_equal(bst$evaluation_log[1, train_merror], err, tolerance = 5e-6) }) -test_that("save_period", { - bst <- xgboost(data = train$data, label = train$label, max.depth = 2, - eta = 0.3, nthread = 2, nround = 20, objective = "binary:logistic", - save_period = 10, save_name = "xgb.model") - pred <- predict(bst, test$data) - expect_equal(length(pred), 1611) +test_that("train and predict softmax", { + lb <- as.numeric(iris$Species) - 1 + set.seed(11) + expect_output( + bst <- xgboost(data = as.matrix(iris[, -5]), label = lb, + max_depth = 3, eta = 0.5, nthread = 2, nrounds = 5, + objective = "multi:softmax", num_class=3) + , "train-merror") + expect_false(is.null(bst$evaluation_log)) + expect_lt(bst$evaluation_log[, min(train_merror)], 0.025) + expect_equal(bst$niter * 3, xgb.ntree(bst)) + + pred <- predict(bst, as.matrix(iris[, -5])) + expect_length(pred, nrow(iris)) + err <- sum(pred != lb)/length(lb) + expect_equal(bst$evaluation_log[5, train_merror], err, tolerance = 5e-6) +}) + +test_that("train and predict RF", { + set.seed(11) + lb <- train$label + # single iteration + bst <- xgboost(data = train$data, label = lb, max_depth = 5, + nthread = 2, nrounds = 1, objective = "binary:logistic", + num_parallel_tree = 20, subsample = 0.6, colsample_bytree = 0.1) + expect_equal(bst$niter, 1) + expect_equal(xgb.ntree(bst), 20) + + pred <- predict(bst, train$data) + pred_err <- sum((pred > 0.5) != lb)/length(lb) + expect_lt(abs(bst$evaluation_log[1, train_error] - pred_err), 10e-6) + #expect_lt(pred_err, 0.03) + + pred <- predict(bst, train$data, ntreelimit = 20) + pred_err_20 <- sum((pred > 0.5) != lb)/length(lb) + expect_equal(pred_err_20, pred_err) + + #pred <- predict(bst, train$data, ntreelimit = 1) + #pred_err_1 <- sum((pred > 0.5) != lb)/length(lb) + #expect_lt(pred_err, pred_err_1) + #expect_lt(pred_err, 0.08) +}) + +test_that("train and predict RF with softprob", { + lb <- as.numeric(iris$Species) - 1 + nrounds <- 15 + set.seed(11) + bst <- xgboost(data = as.matrix(iris[, -5]), label = lb, + max_depth = 3, eta = 0.9, nthread = 2, nrounds = nrounds, + objective = "multi:softprob", num_class=3, + num_parallel_tree = 4, subsample = 0.5, colsample_bytree = 0.5) + expect_equal(bst$niter, 15) + expect_equal(xgb.ntree(bst), 15*3*4) + # predict for all iterations: + pred <- predict(bst, as.matrix(iris[, -5]), reshape=TRUE) + expect_equal(dim(pred), c(nrow(iris), 3)) + pred_labels <- max.col(pred) - 1 + err <- sum(pred_labels != lb)/length(lb) + expect_equal(bst$evaluation_log[nrounds, train_merror], err, tolerance = 5e-6) + # predict for 7 iterations and adjust for 4 parallel trees per iteration + pred <- predict(bst, as.matrix(iris[, -5]), reshape=TRUE, ntreelimit = 7 * 4) + err <- sum((max.col(pred) - 1) != lb)/length(lb) + expect_equal(bst$evaluation_log[7, train_merror], err, tolerance = 5e-6) +}) + +test_that("use of multiple eval metrics works", { + expect_output( + bst <- xgboost(data = train$data, label = train$label, max_depth = 2, + eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic", + eval_metric = 'error', eval_metric = 'auc', eval_metric = "logloss") + , "train-error.*train-auc.*train-logloss") + expect_false(is.null(bst$evaluation_log)) + expect_equal(dim(bst$evaluation_log), c(2, 4)) + expect_equal(colnames(bst$evaluation_log), c("iter", "train_error", "train_auc", "train_logloss")) +}) + + +test_that("training continuation works", { + dtrain <- xgb.DMatrix(train$data, label = train$label) + watchlist = list(train=dtrain) + param <- list(objective = "binary:logistic", max_depth = 2, eta = 1, nthread = 2) + + # for the reference, use 4 iterations at once: + set.seed(11) + bst <- xgb.train(param, dtrain, nrounds = 4, watchlist) + # first two iterations: + set.seed(11) + bst1 <- xgb.train(param, dtrain, nrounds = 2, watchlist) + # continue for two more: + bst2 <- xgb.train(param, dtrain, nrounds = 2, watchlist, xgb_model = bst1) + expect_equal(bst$raw, bst2$raw) + expect_false(is.null(bst2$evaluation_log)) + expect_equal(dim(bst2$evaluation_log), c(4, 2)) + expect_equal(bst2$evaluation_log, bst$evaluation_log) + # test continuing from raw model data + bst2 <- xgb.train(param, dtrain, nrounds = 2, watchlist, xgb_model = bst1$raw) + expect_equal(bst$raw, bst2$raw) + expect_equal(dim(bst2$evaluation_log), c(2, 2)) + # test continuing from a model in file + xgb.save(bst1, "xgboost.model") + bst2 <- xgb.train(param, dtrain, nrounds = 2, watchlist, xgb_model = "xgboost.model") + expect_equal(bst$raw, bst2$raw) + expect_equal(dim(bst2$evaluation_log), c(2, 2)) +}) + + +test_that("xgb.cv works", { + set.seed(11) + cv <- xgb.cv(data = train$data, label = train$label, max_depth = 2, nfold = 5, + eta = 1., nthread = 2, nrounds = 2, objective = "binary:logistic", + verbose=TRUE) + expect_is(cv, 'xgb.cv.synchronous') + expect_false(is.null(cv$evaluation_log)) + expect_lt(cv$evaluation_log[, min(test_error_mean)], 0.03) + expect_lt(cv$evaluation_log[, min(test_error_std)], 0.004) + expect_equal(cv$niter, 2) + expect_false(is.null(cv$folds) && is.list(cv$folds)) + expect_length(cv$folds, 5) + expect_false(is.null(cv$params) && is.list(cv$params)) + expect_false(is.null(cv$callbacks)) + expect_false(is.null(cv$call)) }) diff --git a/R-package/tests/testthat/test_callbacks.R b/R-package/tests/testthat/test_callbacks.R new file mode 100644 index 000000000000..003eb1069ee9 --- /dev/null +++ b/R-package/tests/testthat/test_callbacks.R @@ -0,0 +1,281 @@ +# More specific testing of callbacks + +require(xgboost) +require(data.table) + +context("callbacks") + +data(agaricus.train, package='xgboost') +data(agaricus.test, package='xgboost') +train <- agaricus.train +test <- agaricus.test + +# add some label noise for early stopping tests +add.noise <- function(label, frac) { + inoise <- sample(length(label), length(label) * frac) + label[inoise] <- !label[inoise] + label +} +set.seed(11) +ltrain <- add.noise(train$label, 0.2) +ltest <- add.noise(test$label, 0.2) +dtrain <- xgb.DMatrix(train$data, label = ltrain) +dtest <- xgb.DMatrix(test$data, label = ltest) +watchlist = list(train=dtrain, test=dtest) + + +err <- function(label, pr) sum((pr > 0.5) != label)/length(label) + +param <- list(objective = "binary:logistic", max_depth = 2, nthread = 2) + + +test_that("cb.print.evaluation works as expected", { + + bst_evaluation <- c('train-auc'=0.9, 'test-auc'=0.8) + bst_evaluation_err <- NULL + begin_iteration <- 1 + end_iteration <- 7 + + f0 <- cb.print.evaluation(period=0) + f1 <- cb.print.evaluation(period=1) + f5 <- cb.print.evaluation(period=5) + + expect_false(is.null(attr(f1, 'call'))) + expect_equal(attr(f1, 'name'), 'cb.print.evaluation') + + iteration <- 1 + expect_silent(f0()) + expect_output(f1(), "\\[1\\]\ttrain-auc:0.900000\ttest-auc:0.800000") + expect_output(f5(), "\\[1\\]\ttrain-auc:0.900000\ttest-auc:0.800000") + expect_null(f1()) + + iteration <- 2 + expect_output(f1(), "\\[2\\]\ttrain-auc:0.900000\ttest-auc:0.800000") + expect_silent(f5()) + + iteration <- 7 + expect_output(f1(), "\\[7\\]\ttrain-auc:0.900000\ttest-auc:0.800000") + expect_output(f5(), "\\[7\\]\ttrain-auc:0.900000\ttest-auc:0.800000") + + bst_evaluation_err <- c('train-auc'=0.1, 'test-auc'=0.2) + expect_output(f1(), "\\[7\\]\ttrain-auc:0.900000\\+0.100000\ttest-auc:0.800000\\+0.200000") +}) + +test_that("cb.evaluation.log works as expected", { + + bst_evaluation <- c('train-auc'=0.9, 'test-auc'=0.8) + bst_evaluation_err <- NULL + + evaluation_log <- list() + f <- cb.evaluation.log() + + expect_false(is.null(attr(f, 'call'))) + expect_equal(attr(f, 'name'), 'cb.evaluation.log') + + iteration <- 1 + expect_silent(f()) + expect_equal(evaluation_log, + list(c(iter=1, bst_evaluation))) + iteration <- 2 + expect_silent(f()) + expect_equal(evaluation_log, + list(c(iter=1, bst_evaluation), c(iter=2, bst_evaluation))) + expect_silent(f(finalize = TRUE)) + expect_equal(evaluation_log, + data.table(iter=1:2, train_auc=c(0.9,0.9), test_auc=c(0.8,0.8))) + + bst_evaluation_err <- c('train-auc'=0.1, 'test-auc'=0.2) + evaluation_log <- list() + f <- cb.evaluation.log() + + iteration <- 1 + expect_silent(f()) + expect_equal(evaluation_log, + list(c(iter=1, c(bst_evaluation, bst_evaluation_err)))) + iteration <- 2 + expect_silent(f()) + expect_equal(evaluation_log, + list(c(iter=1, c(bst_evaluation, bst_evaluation_err)), + c(iter=2, c(bst_evaluation, bst_evaluation_err)))) + expect_silent(f(finalize = TRUE)) + expect_equal(evaluation_log, + data.table(iter=1:2, + train_auc_mean=c(0.9,0.9), train_auc_std=c(0.1,0.1), + test_auc_mean=c(0.8,0.8), test_auc_std=c(0.2,0.2))) +}) + + +param <- list(objective = "binary:logistic", max_depth = 4, nthread = 2) + +test_that("cb.reset.parameters works as expected", { + + # fixed eta + set.seed(111) + bst0 <- xgb.train(param, dtrain, nrounds = 2, watchlist, eta = 0.9) + expect_false(is.null(bst0$evaluation_log)) + expect_false(is.null(bst0$evaluation_log$train_error)) + + # same eta but re-set as a vector parameter in the callback + set.seed(111) + my_par <- list(eta = c(0.9, 0.9)) + bst1 <- xgb.train(param, dtrain, nrounds = 2, watchlist, + callbacks = list(cb.reset.parameters(my_par))) + expect_false(is.null(bst1$evaluation_log$train_error)) + expect_equal(bst0$evaluation_log$train_error, + bst1$evaluation_log$train_error) + + # same eta but re-set via a function in the callback + set.seed(111) + my_par <- list(eta = function(itr, itr_end) 0.9) + bst2 <- xgb.train(param, dtrain, nrounds = 2, watchlist, + callbacks = list(cb.reset.parameters(my_par))) + expect_false(is.null(bst2$evaluation_log$train_error)) + expect_equal(bst0$evaluation_log$train_error, + bst2$evaluation_log$train_error) + + # different eta re-set as a vector parameter in the callback + set.seed(111) + my_par <- list(eta = c(0.6, 0.5)) + bst3 <- xgb.train(param, dtrain, nrounds = 2, watchlist, + callbacks = list(cb.reset.parameters(my_par))) + expect_false(is.null(bst3$evaluation_log$train_error)) + expect_false(all(bst0$evaluation_log$train_error == bst3$evaluation_log$train_error)) + + # resetting multiple parameters at the same time runs with no error + my_par <- list(eta = c(1., 0.5), gamma = c(1, 2), max_depth = c(4, 8)) + expect_error( + bst4 <- xgb.train(param, dtrain, nrounds = 2, watchlist, + callbacks = list(cb.reset.parameters(my_par))) + , NA) # NA = no error + + # expect no learning with 0 learning rate + my_par <- list(eta = c(0., 0.)) + bstX <- xgb.train(param, dtrain, nrounds = 2, watchlist, + callbacks = list(cb.reset.parameters(my_par))) + expect_false(is.null(bstX$evaluation_log$train_error)) + er <- unique(bstX$evaluation_log$train_error) + expect_length(er, 1) + expect_gt(er, 0.4) +}) + +test_that("cb.save.model works as expected", { + files <- c('xgboost_01.model', 'xgboost_02.model', 'xgboost.model') + for (f in files) if (file.exists(f)) file.remove(f) + + bst <- xgb.train(param, dtrain, nrounds = 2, watchlist, eta = 1, + save_period = 1, save_name = "xgboost_%02d.model") + expect_true(file.exists('xgboost_01.model')) + expect_true(file.exists('xgboost_02.model')) + b1 <- xgb.load('xgboost_01.model') + expect_equal(xgb.ntree(b1), 1) + b2 <- xgb.load('xgboost_02.model') + expect_equal(xgb.ntree(b2), 2) + expect_equal(bst$raw, b2$raw) + + # save_period = 0 saves the last iteration's model + bst <- xgb.train(param, dtrain, nrounds = 2, watchlist, eta = 1, save_period = 0) + expect_true(file.exists('xgboost.model')) + b2 <- xgb.load('xgboost.model') + expect_equal(bst$raw, b2$raw) + + for (f in files) if (file.exists(f)) file.remove(f) +}) + +test_that("can store evaluation_log without printing", { + expect_silent( + bst <- xgb.train(param, dtrain, nrounds = 10, watchlist, eta = 1, + verbose = 0, callbacks = list(cb.evaluation.log())) + ) + expect_false(is.null(bst$evaluation_log)) + expect_false(is.null(bst$evaluation_log$train_error)) + expect_lt(bst$evaluation_log[, min(train_error)], 0.2) +}) + +test_that("early stopping xgb.train works", { + set.seed(11) + expect_output( + bst <- xgb.train(param, dtrain, nrounds = 20, watchlist, eta = 0.3, + early_stopping_rounds = 3, maximize = FALSE) + , "Stopping. Best iteration") + expect_false(is.null(bst$best_iteration)) + expect_lt(bst$best_iteration, 19) + expect_equal(bst$best_iteration, bst$best_ntreelimit) + + pred <- predict(bst, dtest) + expect_equal(length(pred), 1611) + err_pred <- err(ltest, pred) + err_log <- bst$evaluation_log[bst$best_iteration, test_error] + expect_equal(err_log, err_pred, tolerance = 5e-6) +}) + +test_that("early stopping using a specific metric works", { + set.seed(11) + expect_output( + bst <- xgb.train(param, dtrain, nrounds = 20, watchlist, eta = 0.6, + eval_metric="logloss", eval_metric="auc", + callbacks = list(cb.early.stop(stopping_rounds = 3, maximize = FALSE, + metric_name = 'test_logloss'))) + , "Stopping. Best iteration") + expect_false(is.null(bst$best_iteration)) + expect_lt(bst$best_iteration, 19) + expect_equal(bst$best_iteration, bst$best_ntreelimit) + + pred <- predict(bst, dtest, ntreelimit = bst$best_ntreelimit) + expect_equal(length(pred), 1611) + logloss_pred <- sum(-ltest * log(pred) - (1 - ltest) * log(1 - pred)) / length(ltest) + logloss_log <- bst$evaluation_log[bst$best_iteration, test_logloss] + expect_equal(logloss_log, logloss_pred, tolerance = 5e-6) +}) + +test_that("early stopping xgb.cv works", { + set.seed(11) + expect_output( + cv <- xgb.cv(param, dtrain, nfold = 5, eta = 0.3, nrounds = 20, + early_stopping_rounds = 3, maximize = FALSE) + , "Stopping. Best iteration") + expect_false(is.null(cv$best_iteration)) + expect_lt(cv$best_iteration, 19) + expect_equal(cv$best_iteration, cv$best_ntreelimit) + # the best error is min error: + expect_true(cv$evaluation_log[, test_error_mean[cv$best_iteration] == min(test_error_mean)]) +}) + +test_that("prediction in xgb.cv works", { + set.seed(11) + nrounds = 4 + cv <- xgb.cv(param, dtrain, nfold = 5, eta = 0.5, nrounds = nrounds, prediction = TRUE) + expect_false(is.null(cv$evaluation_log)) + expect_false(is.null(cv$pred)) + expect_length(cv$pred, nrow(train$data)) + err_pred <- mean( sapply(cv$folds, function(f) mean(err(ltrain[f], cv$pred[f]))) ) + err_log <- cv$evaluation_log[nrounds, test_error_mean] + expect_equal(err_pred, err_log, tolerance = 1e-6) + + # save CV models + set.seed(11) + cvx <- xgb.cv(param, dtrain, nfold = 5, eta = 0.5, nrounds = nrounds, prediction = TRUE, + callbacks = list(cb.cv.predict(save_models = TRUE))) + expect_equal(cv$evaluation_log, cvx$evaluation_log) + expect_length(cvx$models, 5) + expect_true(all(sapply(cvx$models, class) == 'xgb.Booster')) +}) + +test_that("prediction in early-stopping xgb.cv works", { + set.seed(1) + expect_output( + cv <- xgb.cv(param, dtrain, nfold = 5, eta = 0.1, nrounds = 20, + early_stopping_rounds = 5, maximize = FALSE, prediction=TRUE) + , "Stopping. Best iteration") + + expect_false(is.null(cv$best_iteration)) + expect_lt(cv$best_iteration, 19) + expect_false(is.null(cv$evaluation_log)) + expect_false(is.null(cv$pred)) + expect_length(cv$pred, nrow(train$data)) + + err_pred <- mean( sapply(cv$folds, function(f) mean(err(ltrain[f], cv$pred[f]))) ) + err_log <- cv$evaluation_log[cv$best_iteration, test_error_mean] + expect_equal(err_pred, err_log, tolerance = 1e-6) + err_log_last <- cv$evaluation_log[cv$niter, test_error_mean] + expect_gt(abs(err_pred - err_log_last), 1e-4) +}) diff --git a/R-package/tests/testthat/test_custom_objective.R b/R-package/tests/testthat/test_custom_objective.R index 7407246c643f..ba0595c95f6d 100644 --- a/R-package/tests/testthat/test_custom_objective.R +++ b/R-package/tests/testthat/test_custom_objective.R @@ -2,35 +2,50 @@ context('Test models with custom objective') require(xgboost) +set.seed(1994) + data(agaricus.train, package='xgboost') data(agaricus.test, package='xgboost') dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label) dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label) +watchlist <- list(eval = dtest, train = dtrain) + +logregobj <- function(preds, dtrain) { + labels <- getinfo(dtrain, "label") + preds <- 1 / (1 + exp(-preds)) + grad <- preds - labels + hess <- preds * (1 - preds) + return(list(grad = grad, hess = hess)) +} + +evalerror <- function(preds, dtrain) { + labels <- getinfo(dtrain, "label") + err <- as.numeric(sum(labels != (preds > 0))) / length(labels) + return(list(metric = "error", value = err)) +} + +param <- list(max_depth=2, eta=1, nthread = 2, + objective=logregobj, eval_metric=evalerror) +num_round <- 2 test_that("custom objective works", { - - watchlist <- list(eval = dtest, train = dtrain) - num_round <- 2 - - logregobj <- function(preds, dtrain) { - labels <- getinfo(dtrain, "label") - preds <- 1 / (1 + exp(-preds)) - grad <- preds - labels - hess <- preds * (1 - preds) - return(list(grad = grad, hess = hess)) - } - evalerror <- function(preds, dtrain) { - labels <- getinfo(dtrain, "label") - err <- as.numeric(sum(labels != (preds > 0))) / length(labels) - return(list(metric = "error", value = err)) - } - - param <- list(max.depth=2, eta=1, nthread = 2, silent=1, - objective=logregobj, eval_metric=evalerror) - bst <- xgb.train(param, dtrain, num_round, watchlist) expect_equal(class(bst), "xgb.Booster") - expect_equal(length(bst$raw), 1064) + expect_equal(length(bst$raw), 1094) + expect_false(is.null(bst$evaluation_log)) + expect_false(is.null(bst$evaluation_log$eval_error)) + expect_lt(bst$evaluation_log[num_round, eval_error], 0.03) +}) + +test_that("custom objective in CV works", { + cv <- xgb.cv(param, dtrain, num_round, nfold=10, verbose=FALSE) + expect_false(is.null(cv$evaluation_log)) + expect_equal(dim(cv$evaluation_log), c(2, 5)) + expect_lt(cv$evaluation_log[num_round, test_error_mean], 0.03) +}) + +test_that("custom objective using DMatrix attr works", { + attr(dtrain, 'label') <- getinfo(dtrain, 'label') logregobjattr <- function(preds, dtrain) { @@ -40,9 +55,8 @@ test_that("custom objective works", { hess <- preds * (1 - preds) return(list(grad = grad, hess = hess)) } - param <- list(max.depth=2, eta=1, nthread = 2, silent = 1, - objective = logregobjattr, eval_metric = evalerror) + param$objective = logregobjattr bst <- xgb.train(param, dtrain, num_round, watchlist) expect_equal(class(bst), "xgb.Booster") - expect_equal(length(bst$raw), 1064) + expect_equal(length(bst$raw), 1094) }) diff --git a/R-package/tests/testthat/test_helpers.R b/R-package/tests/testthat/test_helpers.R index 10af643b8c1e..64ffd7f58886 100644 --- a/R-package/tests/testthat/test_helpers.R +++ b/R-package/tests/testthat/test_helpers.R @@ -7,25 +7,27 @@ require(vcd) set.seed(1982) data(Arthritis) -data(agaricus.train, package='xgboost') df <- data.table(Arthritis, keep.rownames = F) df[,AgeDiscret := as.factor(round(Age / 10,0))] df[,AgeCat := as.factor(ifelse(Age > 30, "Old", "Young"))] df[,ID := NULL] sparse_matrix <- sparse.model.matrix(Improved~.-1, data = df) -output_vector <- df[,Y := 0][Improved == "Marked",Y := 1][,Y] -bst.Tree <- xgboost(data = sparse_matrix, label = output_vector, max.depth = 9, - eta = 1, nthread = 2, nround = 10, objective = "binary:logistic", booster = "gbtree") +label <- df[, ifelse(Improved == "Marked", 1, 0)] -bst.GLM <- xgboost(data = sparse_matrix, label = output_vector, - eta = 1, nthread = 2, nround = 10, objective = "binary:logistic", booster = "gblinear") +bst.Tree <- xgboost(data = sparse_matrix, label = label, max_depth = 9, + eta = 1, nthread = 2, nrounds = 10, objective = "binary:logistic", booster = "gbtree") -feature.names <- colnames(agaricus.train$data) +bst.GLM <- xgboost(data = sparse_matrix, label = label, + eta = 1, nthread = 2, nrounds = 10, objective = "binary:logistic", booster = "gblinear") + +feature.names <- colnames(sparse_matrix) test_that("xgb.dump works", { - capture.output(print(xgb.dump(bst.Tree))) - capture.output(print(xgb.dump(bst.GLM))) - expect_true(xgb.dump(bst.Tree, 'xgb.model.dump', with.stats = T)) + expect_length(xgb.dump(bst.Tree), 172) + expect_length(xgb.dump(bst.GLM), 14) + expect_true(xgb.dump(bst.Tree, 'xgb.model.dump', with_stats = T)) + expect_true(file.exists('xgb.model.dump')) + expect_gt(file.size('xgb.model.dump'), 8000) }) test_that("xgb-attribute functionality", { @@ -33,12 +35,15 @@ test_that("xgb-attribute functionality", { list.val <- list(my_attr=val, a=123, b='ok') list.ch <- list.val[order(names(list.val))] list.ch <- lapply(list.ch, as.character) + # note: iter is 0-index in xgb attributes + list.default <- list(niter = "9") + list.ch <- c(list.ch, list.default) # proper input: expect_error(xgb.attr(bst.Tree, NULL)) expect_error(xgb.attr(val, val)) # set & get: expect_null(xgb.attr(bst.Tree, "asdf")) - expect_null(xgb.attributes(bst.Tree)) # initially, expect no attributes + expect_equal(xgb.attributes(bst.Tree), list.default) xgb.attr(bst.Tree, "my_attr") <- val expect_equal(xgb.attr(bst.Tree, "my_attr"), val) xgb.attributes(bst.Tree) <- list.val @@ -51,8 +56,10 @@ test_that("xgb-attribute functionality", { # deletion: xgb.attr(bst, "my_attr") <- NULL expect_null(xgb.attr(bst, "my_attr")) - expect_equal(xgb.attributes(bst), list.ch[c("a", "b")]) + expect_equal(xgb.attributes(bst), list.ch[c("a", "b", "niter")]) xgb.attributes(bst) <- list(a=NULL, b=NULL) + expect_equal(xgb.attributes(bst), list.default) + xgb.attributes(bst) <- list(niter=NULL) expect_null(xgb.attributes(bst)) }) @@ -61,19 +68,19 @@ test_that("xgb.model.dt.tree works with and without feature names", { dt.tree <- xgb.model.dt.tree(feature_names = feature.names, model = bst.Tree) expect_equal(names.dt.trees, names(dt.tree)) expect_equal(dim(dt.tree), c(162, 10)) - xgb.model.dt.tree(model = bst.Tree) + expect_output(str(xgb.model.dt.tree(model = bst.Tree)), 'Feature.*\\"3\\"') }) test_that("xgb.importance works with and without feature names", { - importance.Tree <- xgb.importance(feature_names = sparse_matrix@Dimnames[[2]], model = bst.Tree) + importance.Tree <- xgb.importance(feature_names = feature.names, model = bst.Tree) expect_equal(dim(importance.Tree), c(7, 4)) expect_equal(colnames(importance.Tree), c("Feature", "Gain", "Cover", "Frequency")) - xgb.importance(model = bst.Tree) + expect_output(str(xgb.importance(model = bst.Tree)), 'Feature.*\\"3\\"') xgb.plot.importance(importance_matrix = importance.Tree) }) test_that("xgb.importance works with GLM model", { - importance.GLM <- xgb.importance(feature_names = sparse_matrix@Dimnames[[2]], model = bst.GLM) + importance.GLM <- xgb.importance(feature_names = feature.names, model = bst.GLM) expect_equal(dim(importance.GLM), c(10, 2)) expect_equal(colnames(importance.GLM), c("Feature", "Weight")) xgb.importance(model = bst.GLM) @@ -86,10 +93,27 @@ test_that("xgb.plot.tree works with and without feature names", { }) test_that("xgb.plot.multi.trees works with and without feature names", { - xgb.plot.multi.trees(model = bst.Tree, feature_names = feature.names, features.keep = 3) - xgb.plot.multi.trees(model = bst.Tree, features.keep = 3) + xgb.plot.multi.trees(model = bst.Tree, feature_names = feature.names, features_keep = 3) + xgb.plot.multi.trees(model = bst.Tree, features_keep = 3) }) test_that("xgb.plot.deepness works", { xgb.plot.deepness(model = bst.Tree) }) + +test_that("check.deprecation works", { + ttt <- function(a = NNULL, DUMMY=NULL, ...) { + check.deprecation(...) + as.list((environment())) + } + res <- ttt(a = 1, DUMMY = 2, z = 3) + expect_equal(res, list(a = 1, DUMMY = 2)) + expect_warning( + res <- ttt(a = 1, dummy = 22, z = 3) + , "\'dummy\' is deprecated") + expect_equal(res, list(a = 1, DUMMY = 22)) + expect_warning( + res <- ttt(a = 1, dumm = 22, z = 3) + , "\'dumm\' was partially matched to \'dummy\'") + expect_equal(res, list(a = 1, DUMMY = 22)) +}) diff --git a/R-package/tests/testthat/test_parameter_exposure.R b/R-package/tests/testthat/test_parameter_exposure.R index 769059b76ae0..1a0dcb39f549 100644 --- a/R-package/tests/testthat/test_parameter_exposure.R +++ b/R-package/tests/testthat/test_parameter_exposure.R @@ -9,24 +9,22 @@ dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label) dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label) bst <- xgboost(data = dtrain, - max.depth = 2, + max_depth = 2, eta = 1, - nround = 10, + nrounds = 10, nthread = 1, verbose = 0, objective = "binary:logistic") test_that("call is exposed to R", { - model_call <- attr(bst, "call") - expect_is(model_call, "call") + expect_false(is.null(bst$call)) + expect_is(bst$call, "call") }) test_that("params is exposed to R", { - model_params <- attr(bst, "params") - + model_params <- bst$params expect_is(model_params, "list") - expect_equal(model_params$eta, 1) - expect_equal(model_params$max.depth, 2) + expect_equal(model_params$max_depth, 2) expect_equal(model_params$objective, "binary:logistic") }) diff --git a/R-package/tests/testthat/test_poisson_regression.R b/R-package/tests/testthat/test_poisson_regression.R index 5473d930fc9a..a48f2fc4785f 100644 --- a/R-package/tests/testthat/test_poisson_regression.R +++ b/R-package/tests/testthat/test_poisson_regression.R @@ -5,10 +5,10 @@ set.seed(1994) test_that("poisson regression works", { data(mtcars) - bst <- xgboost(data = as.matrix(mtcars[,-11]),label = mtcars[,11], - objective = 'count:poisson', nrounds=5) + bst <- xgboost(data = as.matrix(mtcars[,-11]), label = mtcars[,11], + objective = 'count:poisson', nrounds=10, verbose=0) expect_equal(class(bst), "xgb.Booster") - pred <- predict(bst,as.matrix(mtcars[, -11])) + pred <- predict(bst, as.matrix(mtcars[, -11])) expect_equal(length(pred), 32) - expect_less_than(sqrt(mean( (pred - mtcars[,11]) ^ 2)), 2.5) + expect_lt(sqrt(mean( (pred - mtcars[,11])^2 )), 1.2) }) diff --git a/R-package/vignettes/discoverYourData.Rmd b/R-package/vignettes/discoverYourData.Rmd index ed5d696d40fd..25fff9d3eebb 100644 --- a/R-package/vignettes/discoverYourData.Rmd +++ b/R-package/vignettes/discoverYourData.Rmd @@ -168,8 +168,8 @@ Build the model The code below is very usual. For more information, you can look at the documentation of `xgboost` function (or at the vignette [Xgboost presentation](https://github.com/dmlc/xgboost/blob/master/R-package/vignettes/xgboostPresentation.Rmd)). ```{r} -bst <- xgboost(data = sparse_matrix, label = output_vector, max.depth = 4, - eta = 1, nthread = 2, nround = 10,objective = "binary:logistic") +bst <- xgboost(data = sparse_matrix, label = output_vector, max_depth = 4, + eta = 1, nthread = 2, nrounds = 10,objective = "binary:logistic") ``` @@ -179,7 +179,7 @@ A model which fits too well may [overfit](http://en.wikipedia.org/wiki/Overfitti > Here you can see the numbers decrease until line 7 and then increase. > -> It probably means we are overfitting. To fix that I should reduce the number of rounds to `nround = 4`. I will let things like that because I don't really care for the purpose of this example :-) +> It probably means we are overfitting. To fix that I should reduce the number of rounds to `nrounds = 4`. I will let things like that because I don't really care for the purpose of this example :-) Feature importance ------------------ @@ -189,10 +189,10 @@ Feature importance ### Build the feature importance data.table -In the code below, `sparse_matrix@Dimnames[[2]]` represents the column names of the sparse matrix. These names are the original values of the features (remember, each binary column == one value of one *categorical* feature). +Remember, each binary column corresponds to a single value of one of *categorical* features. ```{r} -importance <- xgb.importance(feature_names = sparse_matrix@Dimnames[[2]], model = bst) +importance <- xgb.importance(feature_names = colnames(sparse_matrix), model = bst) head(importance) ``` @@ -215,7 +215,7 @@ One simple solution is to count the co-occurrences of a feature and a class of t For that purpose we will execute the same function as above but using two more parameters, `data` and `label`. ```{r} -importanceRaw <- xgb.importance(feature_names = sparse_matrix@Dimnames[[2]], model = bst, data = sparse_matrix, label = output_vector) +importanceRaw <- xgb.importance(feature_names = colnames(sparse_matrix), model = bst, data = sparse_matrix, label = output_vector) # Cleaning for better display importanceClean <- importanceRaw[,`:=`(Cover=NULL, Frequency=NULL)] @@ -328,12 +328,12 @@ train <- agaricus.train test <- agaricus.test #Random Forest™ - 1000 trees -bst <- xgboost(data = train$data, label = train$label, max.depth = 4, num_parallel_tree = 1000, subsample = 0.5, colsample_bytree =0.5, nround = 1, objective = "binary:logistic") +bst <- xgboost(data = train$data, label = train$label, max_depth = 4, num_parallel_tree = 1000, subsample = 0.5, colsample_bytree =0.5, nrounds = 1, objective = "binary:logistic") #Boosting - 3 rounds -bst <- xgboost(data = train$data, label = train$label, max.depth = 4, nround = 3, objective = "binary:logistic") +bst <- xgboost(data = train$data, label = train$label, max_depth = 4, nrounds = 3, objective = "binary:logistic") ``` > Note that the parameter `round` is set to `1`. -> [**Random Forests™**](https://www.stat.berkeley.edu/~breiman/RandomForests/cc_papers.htm) is a trademark of Leo Breiman and Adele Cutler and is licensed exclusively to Salford Systems for the commercial release of the software. \ No newline at end of file +> [**Random Forests™**](https://www.stat.berkeley.edu/~breiman/RandomForests/cc_papers.htm) is a trademark of Leo Breiman and Adele Cutler and is licensed exclusively to Salford Systems for the commercial release of the software. diff --git a/R-package/vignettes/xgboost.Rnw b/R-package/vignettes/xgboost.Rnw index dcd0d88b0393..d6d6361b11bd 100644 --- a/R-package/vignettes/xgboost.Rnw +++ b/R-package/vignettes/xgboost.Rnw @@ -84,8 +84,8 @@ data(agaricus.train, package='xgboost') data(agaricus.test, package='xgboost') train <- agaricus.train test <- agaricus.test -bst <- xgboost(data = train$data, label = train$label, max.depth = 2, eta = 1, - nround = 2, objective = "binary:logistic") +bst <- xgboost(data = train$data, label = train$label, max_depth = 2, eta = 1, + nrounds = 2, objective = "binary:logistic") xgb.save(bst, 'model.save') bst = xgb.load('model.save') pred <- predict(bst, test$data) @@ -162,9 +162,9 @@ evalerror <- function(preds, dtrain) { dtest <- xgb.DMatrix(test$data, label = test$label) watchlist <- list(eval = dtest, train = dtrain) -param <- list(max.depth = 2, eta = 1, silent = 1) +param <- list(max_depth = 2, eta = 1, silent = 1) -bst <- xgb.train(param, dtrain, nround = 2, watchlist, logregobj, evalerror) +bst <- xgb.train(param, dtrain, nrounds = 2, watchlist, logregobj, evalerror) @ The gradient and second order gradient is required for the output of customized diff --git a/R-package/vignettes/xgboostPresentation.Rmd b/R-package/vignettes/xgboostPresentation.Rmd index 61ab2f083275..b0eb9effeabf 100644 --- a/R-package/vignettes/xgboostPresentation.Rmd +++ b/R-package/vignettes/xgboostPresentation.Rmd @@ -147,12 +147,12 @@ In a *sparse* matrix, cells containing `0` are not stored in memory. Therefore, We will train decision tree model using the following parameters: * `objective = "binary:logistic"`: we will train a binary classification model ; -* `max.deph = 2`: the trees won't be deep, because our case is very simple ; +* `max_depth = 2`: the trees won't be deep, because our case is very simple ; * `nthread = 2`: the number of cpu threads we are going to use; -* `nround = 2`: there will be two passes on the data, the second one will enhance the model by further reducing the difference between ground truth and prediction. +* `nrounds = 2`: there will be two passes on the data, the second one will enhance the model by further reducing the difference between ground truth and prediction. ```{r trainingSparse, message=F, warning=F} -bstSparse <- xgboost(data = train$data, label = train$label, max.depth = 2, eta = 1, nthread = 2, nround = 2, objective = "binary:logistic") +bstSparse <- xgboost(data = train$data, label = train$label, max_depth = 2, eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") ``` > More complex the relationship between your features and your `label` is, more passes you need. @@ -164,7 +164,7 @@ bstSparse <- xgboost(data = train$data, label = train$label, max.depth = 2, eta Alternatively, you can put your dataset in a *dense* matrix, i.e. a basic **R** matrix. ```{r trainingDense, message=F, warning=F} -bstDense <- xgboost(data = as.matrix(train$data), label = train$label, max.depth = 2, eta = 1, nthread = 2, nround = 2, objective = "binary:logistic") +bstDense <- xgboost(data = as.matrix(train$data), label = train$label, max_depth = 2, eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") ``` ##### xgb.DMatrix @@ -173,7 +173,7 @@ bstDense <- xgboost(data = as.matrix(train$data), label = train$label, max.depth ```{r trainingDmatrix, message=F, warning=F} dtrain <- xgb.DMatrix(data = train$data, label = train$label) -bstDMatrix <- xgboost(data = dtrain, max.depth = 2, eta = 1, nthread = 2, nround = 2, objective = "binary:logistic") +bstDMatrix <- xgboost(data = dtrain, max_depth = 2, eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") ``` ##### Verbose option @@ -184,17 +184,17 @@ One of the simplest way to see the training progress is to set the `verbose` opt ```{r trainingVerbose0, message=T, warning=F} # verbose = 0, no message -bst <- xgboost(data = dtrain, max.depth = 2, eta = 1, nthread = 2, nround = 2, objective = "binary:logistic", verbose = 0) +bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic", verbose = 0) ``` ```{r trainingVerbose1, message=T, warning=F} # verbose = 1, print evaluation metric -bst <- xgboost(data = dtrain, max.depth = 2, eta = 1, nthread = 2, nround = 2, objective = "binary:logistic", verbose = 1) +bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic", verbose = 1) ``` ```{r trainingVerbose2, message=T, warning=F} # verbose = 2, also print information about tree -bst <- xgboost(data = dtrain, max.depth = 2, eta = 1, nthread = 2, nround = 2, objective = "binary:logistic", verbose = 2) +bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic", verbose = 2) ``` ## Basic prediction using XGBoost @@ -287,10 +287,10 @@ For the purpose of this example, we use `watchlist` parameter. It is a list of ` ```{r watchlist, message=F, warning=F} watchlist <- list(train=dtrain, test=dtest) -bst <- xgb.train(data=dtrain, max.depth=2, eta=1, nthread = 2, nround=2, watchlist=watchlist, objective = "binary:logistic") +bst <- xgb.train(data=dtrain, max_depth=2, eta=1, nthread = 2, nrounds=2, watchlist=watchlist, objective = "binary:logistic") ``` -**XGBoost** has computed at each round the same average error metric than seen above (we set `nround` to 2, that is why we have two lines). Obviously, the `train-error` number is related to the training dataset (the one the algorithm learns from) and the `test-error` number to the test dataset. +**XGBoost** has computed at each round the same average error metric than seen above (we set `nrounds` to 2, that is why we have two lines). Obviously, the `train-error` number is related to the training dataset (the one the algorithm learns from) and the `test-error` number to the test dataset. Both training and test error related metrics are very similar, and in some way, it makes sense: what we have learned from the training dataset matches the observations from the test dataset. @@ -299,10 +299,10 @@ If with your own dataset you have not such results, you should think about how y For a better understanding of the learning progression, you may want to have some specific metric or even use multiple evaluation metrics. ```{r watchlist2, message=F, warning=F} -bst <- xgb.train(data=dtrain, max.depth=2, eta=1, nthread = 2, nround=2, watchlist=watchlist, eval.metric = "error", eval.metric = "logloss", objective = "binary:logistic") +bst <- xgb.train(data=dtrain, max_depth=2, eta=1, nthread = 2, nrounds=2, watchlist=watchlist, eval_metric = "error", eval_metric = "logloss", objective = "binary:logistic") ``` -> `eval.metric` allows us to monitor two new metrics for each round, `logloss` and `error`. +> `eval_metric` allows us to monitor two new metrics for each round, `logloss` and `error`. ### Linear boosting @@ -310,7 +310,7 @@ bst <- xgb.train(data=dtrain, max.depth=2, eta=1, nthread = 2, nround=2, watchli Until now, all the learnings we have performed were based on boosting trees. **XGBoost** implements a second algorithm, based on linear boosting. The only difference with previous command is `booster = "gblinear"` parameter (and removing `eta` parameter). ```{r linearBoosting, message=F, warning=F} -bst <- xgb.train(data=dtrain, booster = "gblinear", max.depth=2, nthread = 2, nround=2, watchlist=watchlist, eval.metric = "error", eval.metric = "logloss", objective = "binary:logistic") +bst <- xgb.train(data=dtrain, booster = "gblinear", max_depth=2, nthread = 2, nrounds=2, watchlist=watchlist, eval_metric = "error", eval_metric = "logloss", objective = "binary:logistic") ``` In this specific case, *linear boosting* gets sligtly better performance metrics than decision trees based algorithm. @@ -328,7 +328,7 @@ Like saving models, `xgb.DMatrix` object (which groups both dataset and outcome) xgb.DMatrix.save(dtrain, "dtrain.buffer") # to load it in, simply call xgb.DMatrix dtrain2 <- xgb.DMatrix("dtrain.buffer") -bst <- xgb.train(data=dtrain2, max.depth=2, eta=1, nthread = 2, nround=2, watchlist=watchlist, objective = "binary:logistic") +bst <- xgb.train(data=dtrain2, max_depth=2, eta=1, nthread = 2, nrounds=2, watchlist=watchlist, objective = "binary:logistic") ``` ```{r DMatrixDel, include=FALSE} @@ -363,7 +363,7 @@ xgb.plot.importance(importance_matrix = importance_matrix) You can dump the tree you learned using `xgb.dump` into a text file. ```{r dump, message=T, warning=F} -xgb.dump(bst, with.stats = T) +xgb.dump(bst, with_stats = T) ``` You can plot the trees from your model using ```xgb.plot.tree``