diff --git a/DESCRIPTION b/DESCRIPTION index f66547be..c31f451e 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,6 +1,6 @@ Package: stacks Title: Tidy Model Stacking -Version: 0.2.0.9000 +Version: 0.2.1 Authors@R: c( person(given = "Simon", family = "Couch", diff --git a/NEWS.md b/NEWS.md index 1d739fd7..20837ead 100644 --- a/NEWS.md +++ b/NEWS.md @@ -1,29 +1,20 @@ -# stacks - -## v0.2.0.9000 - -To be released as 0.2.1 - -* Various bug fixes and improvements to documentation. - -### Bug fixes +# v0.2.1 * Updates for importing workflow sets that use the `add_variables()` preprocessor. - * Plot fixes for cases where coefficients are negative. - * Performance and member plots now show the effect of multiple mixture values. +* Package diagrams now have alt text. -## v0.2.0 +# v0.2.0 -### Breaking changes +## Breaking changes This release of the package changes some elements of the internal structure of model stacks. As such, model stacks stored as saved objects will need to be regenerated before predicting, plotting, printing, etc. -### New features +## New features * The package now supports elastic net models as a meta-learner via the `mixture` argument to `blend_predictions`. @@ -34,7 +25,7 @@ be regenerated before predicting, plotting, printing, etc. * Objects tuned with racing methods from the {finetune} package can now be added as candidate members. -### Bug fixes +## Bug fixes * Fixed bug in determining member hyperparameters during member fitting when using non-RMSE/ROC AUC metrics. @@ -43,7 +34,7 @@ be regenerated before predicting, plotting, printing, etc. are not valid column names and use `make.names` for associated candidate members. -### Miscellaneous improvements +## Miscellaneous improvements * Drop {digest} dependency in favor of {tune}/{rsample} "fingerprinting" to check consistency of resamples. @@ -55,6 +46,6 @@ be regenerated before predicting, plotting, printing, etc. more informative. * Various improvements to documentation. -### v0.1.0 +# v0.1.0 Initial release! diff --git a/R/add_candidates.R b/R/add_candidates.R index df63c9f6..e7958987 100644 --- a/R/add_candidates.R +++ b/R/add_candidates.R @@ -184,7 +184,7 @@ add_candidates.default <- function(data_stack, candidates, name, ...) { .set_mode_ <- function(stack, candidates, name) { wf_spec <- attr(candidates, "workflow") %>% - workflows::pull_workflow_spec() + workflows::extract_spec_parsnip() new_mode <- wf_spec$mode old_mode <- attr(stack, "mode") @@ -363,7 +363,7 @@ update_stack_data <- function(stack, new_data) { stack_workflow <- function(x) { res <- workflows::workflow() %>% - workflows::add_model(workflows::pull_workflow_spec(x)) + workflows::add_model(workflows::extract_spec_parsnip(x)) pre <- workflows::pull_workflow_preprocessor(x) diff --git a/R/print.R b/R/print.R index 93082162..aad3d913 100644 --- a/R/print.R +++ b/R/print.R @@ -84,7 +84,7 @@ top_coefs <- function(x, penalty = x$penalty$penalty, n = 10) { sub_models <- purrr::map_dfr(x$cols_map, ~ tibble::tibble(terms = .x), .id = "model_name") model_types <- - purrr::map(x$model_defs, workflows::pull_workflow_spec) %>% + purrr::map(x$model_defs, workflows::extract_spec_parsnip) %>% purrr::map_dfr(~ tibble::tibble(model_type = class(.x)[1]), .id = "model_name") res <- dplyr::left_join(betas, sub_models, by = "terms") %>% diff --git a/README.Rmd b/README.Rmd index fefbe08f..0b7108c6 100644 --- a/README.Rmd +++ b/README.Rmd @@ -43,7 +43,7 @@ stacks is generalized with respect to: * Cross-validation scheme: Any resampling algorithm implemented in [rsample](https://rsample.tidymodels.org/) or adjacent packages is fair game for resampling data for use in training a model stack. * Error metric: Any metric function implemented in [yardstick](https://yardstick.tidymodels.org/) or adjacent packages is fair game for evaluating model stacks and their members. That package provides some infrastructure for creating your own metric functions as well! -stacks uses a regularized linear model to combine predictions from ensemble members, though this model type is only one of many possible learning algorithms that could be used to fit a stacked ensemble model. For implementations of additional ensemble learning algorithms, check out [h2o](http://docs.h2o.ai/h2o/latest-stable/h2o-r/docs/reference/h2o.stackedEnsemble.html) and [SuperLearner](https://CRAN.R-project.org/package=SuperLearner). +stacks uses a regularized linear model to combine predictions from ensemble members, though this model type is only one of many possible learning algorithms that could be used to fit a stacked ensemble model. For implementations of additional ensemble learning algorithms, check out [h2o](https://docs.h2o.ai/h2o/latest-stable/h2o-r/docs/reference/h2o.stackedEnsemble.html) and [SuperLearner](https://CRAN.R-project.org/package=SuperLearner). Rather than diving right into the implementation, we'll focus here on how the pieces fit together, conceptually, in building an ensemble with `stacks`. See the `basics` vignette for an example of the API in action! diff --git a/README.md b/README.md index a0e467c3..ee41d509 100644 --- a/README.md +++ b/README.md @@ -69,7 +69,7 @@ ensemble members, though this model type is only one of many possible learning algorithms that could be used to fit a stacked ensemble model. For implementations of additional ensemble learning algorithms, check out -[h2o](http://docs.h2o.ai/h2o/latest-stable/h2o-r/docs/reference/h2o.stackedEnsemble.html) +[h2o](https://docs.h2o.ai/h2o/latest-stable/h2o-r/docs/reference/h2o.stackedEnsemble.html) and [SuperLearner](https://CRAN.R-project.org/package=SuperLearner). Rather than diving right into the implementation, we’ll focus here on diff --git a/cran-comments.md b/cran-comments.md index a8e1b223..1ad9e821 100644 --- a/cran-comments.md +++ b/cran-comments.md @@ -1,4 +1,4 @@ -# stacks 0.2.0 +# stacks 0.2.1 ## Test environments diff --git a/docs/404.html b/docs/404.html index 703aa1fc..8629860e 100644 --- a/docs/404.html +++ b/docs/404.html @@ -1,86 +1,43 @@ - - -
- + + + + -