From 790b4c1cba3616b016d2da6e2be5d58c372238f1 Mon Sep 17 00:00:00 2001 From: Isaac Kinley Date: Mon, 18 Nov 2024 21:39:25 -0500 Subject: [PATCH] Updates based on output of gp() --- DESCRIPTION | 3 ++- R/generics.R | 16 ++++++++-------- R/td_bcnm.R | 8 ++++---- R/td_ddm.R | 4 ++-- R/td_fn.R | 2 +- R/utils.R | 10 +++++----- README.Rmd | 2 +- man/coef.td_bclm.Rd | 2 +- man/kirby_consistency.Rd | 2 +- man/plot.td_um.Rd | 4 ++-- man/tempodisco-package.Rd | 3 ++- 11 files changed, 29 insertions(+), 27 deletions(-) diff --git a/DESCRIPTION b/DESCRIPTION index 384ecd7..4fa08da 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,6 +1,6 @@ Package: tempodisco Type: Package -Title: Temporal discounting models +Title: Temporal Discounting Models Description: A package for working with temporal discounting data, designed for behavioural researchers to simplify data cleaning/scoring and model fitting. The package implements widely used methods such as computing indifference points from adjusting amount task (Frye et al., 2016, ), testing for non-systematic discounting per the criteria of Johnson & Bickel (2008, ), scoring questionnaires according to the methods of Kirby et al. (1999, ) and Wileyto et al (2004, ), Bayesian model selection using a range of discount functions (Franck et al., 2015, ), drift diffusion models of discounting (Peters & D'Esposito, 2020, ), and model-agnostic measures of discounting such as area under the curve (Myerson et al., 2001, ) and ED50 (Yoon & Higgins, 2008, ). Authors@R: person("Isaac", "Kinley", email = "isaac.kinley@gmail.com", role = c("aut", "cre")) @@ -23,3 +23,4 @@ Suggests: Config/testthat/edition: 3 VignetteBuilder: knitr URL: https://kinleyid.github.io/tempodisco/ +BugReports: https://github.com/kinleyid/tempodisco/issues diff --git a/R/generics.R b/R/generics.R index 2d952d3..3d348c1 100644 --- a/R/generics.R +++ b/R/generics.R @@ -316,7 +316,7 @@ coef.td_bcnm <- function(object, ...) {object$optim$par} #' @family linear binary choice model functions #' @return A named vector of coefficients #' @export -coef.td_bclm <- function(object, df_par = T, ...) { +coef.td_bclm <- function(object, df_par = TRUE, ...) { if (df_par) { # In terms of discount function parameters p <- object$coefficients @@ -528,10 +528,10 @@ deviance.td_ddm <- function(mod, ...) return(-2*logLik.td_ddm(mod)) #' @export plot.td_um <- function(x, type = c('summary', 'endpoints', 'link', 'rt'), - legend = T, + legend = TRUE, p_lines = NULL, p_tol = 0.001, - verbose = T, + verbose = TRUE, del = NULL, val_del = NULL, confint = 0.95, @@ -548,7 +548,7 @@ plot.td_um <- function(x, min_del <- min(data$del) plotting_delays <- seq(min_del, max_del, length.out = 1000) if (is.null(val_del) & ('val_del' %in% names(x$data))) { - val_del = mean(x$data$val_del) + val_del <- mean(x$data$val_del) if (verbose) { cat(sprintf('Plotting indifference curve for val_del = %s (mean of val_del from data used to fit model). Override this behaviour by setting the `val_del` argument to plot() or set verbose = F to suppress this message.\n', val_del)) } @@ -588,11 +588,11 @@ plot.td_um <- function(x, # Split the grid by delay # Using split() with a numerical index is faster than calling tapply() or similar - split_idx <- rep(1:length(plotting_delays), each = length(val_imm_cands)) + split_idx <- rep(seq_along(plotting_delays), each = length(val_imm_cands)) subgrid_list <- split(grid, split_idx) for (p in p_lines) { # Get the val_imm producing (close to) the desired p at each delay - val_imm <- sapply(subgrid_list, function(subgrid) { + val_imm <- vapply(subgrid_list, function(subgrid) { if (max(subgrid$p) < p | min(subgrid$p) > p) { return(NA) } else { @@ -640,7 +640,7 @@ plot.td_um <- function(x, # Plot of psychometric curve if (is.null(val_del)) { - val_del = mean(x$data$val_del) + val_del <- mean(x$data$val_del) if (x$config$gamma_scale %def% 'none' != 'none') { if (verbose) { cat(sprintf('gamma parameter (steepness of psychometric curve curve) is scaled by val_del.\nThus, the curve will have different steepness for a different value of val_del.\nDefaulting to val_del = %s (mean of val_del from data used to fit model).\nUse the `val_del` argument to specify a custom value or use verbose = F to suppress this message.\n', val_del)) @@ -762,7 +762,7 @@ plot.td_um <- function(x, # Plot confidence interval conf_extremum <- (1 - confint)/2 for (p in c(conf_extremum, 1 - conf_extremum)) { - bounds <- sapply(plotting_linpreds, function(drift) { + bounds <- vapply(plotting_linpreds, function(drift) { RWiener::qwiener(p = p, delta = drift, alpha = cf['alpha'], tau = cf['tau'], beta = cf['beta'], resp = 'both') diff --git a/R/td_bcnm.R b/R/td_bcnm.R index d931dc4..12e9a06 100644 --- a/R/td_bcnm.R +++ b/R/td_bcnm.R @@ -79,12 +79,12 @@ td_bcnm <- function( config$transform <- 'identity' } } else if (choice_rule == 'power') { - config$noise_dist = 'logis' - config$gamma_scale = 'none' + config$noise_dist <- 'logis' + config$gamma_scale <- 'none' if (fixed_ends) { - config$transform = 'noise_dist_quantile' + config$transform <- 'noise_dist_quantile' } else { - config$transform = 'log' + config$transform <- 'log' } } diff --git a/R/td_ddm.R b/R/td_ddm.R index 2482946..0f266df 100644 --- a/R/td_ddm.R +++ b/R/td_ddm.R @@ -257,8 +257,8 @@ get_prob_func_ddm <- function(discount_function, drift_transform) { # Compute densities q <- data$rt resp <- ifelse(data$imm_chosen, 'upper', 'lower') - d <- sapply( - 1:nrow(data), + d <- vapply( + seq_len(nrow(data)), function(i) { RWiener::dwiener(q = data$rt[i], delta = drift[i], resp = resp[i], alpha = par['alpha'], tau = par['tau'], beta = par['beta']) diff --git a/R/td_fn.R b/R/td_fn.R index 9cfea70..d574ad4 100644 --- a/R/td_fn.R +++ b/R/td_fn.R @@ -99,7 +99,7 @@ td_fn <- function(predefined = c('hyperbolic', return(interp_result$y) } } - yout <- sapply(xout, get_yout) + yout <- vapply(xout, get_yout) return(yout) } ) diff --git a/R/utils.R b/R/utils.R index 06efe4e..29529ac 100644 --- a/R/utils.R +++ b/R/utils.R @@ -106,7 +106,7 @@ run_optimization <- function(fn, par_starts, par_lims, optim_args, silent = F) { # Try each combination of parameter starting values best_value <- Inf best_optimized <- list() - for (combo_idx in 1:nrow(par_start_combos)) { + for (combo_idx in seq_len(nrow(par_start_combos))) { try( # Optimization may fail { args <- c( @@ -206,7 +206,7 @@ kirby_score <- function(data, discount_function = c('hyperbolic', 'exponential') } most_consistent <- which(data$consistency == max_consistency) - cands <- sapply(most_consistent, function(cand) { + cands <- vapply(most_consistent, function(cand) { geomean(data$k[(cand-1) : cand]) }) if (length(cands) > 1) { @@ -229,7 +229,7 @@ kirby_score <- function(data, discount_function = c('hyperbolic', 'exponential') #' Compute consistency score #' -#' Compute the consistency score per the method of \href{https://doi.org/10.1037//0096-3445.128.1.78}{Kirby et al. (1999)}. This is described in detail in \href{https://doi.org/10.1007/s40614-016-0070-9}{Kaplan et al. (2016)}, where it's suggested that a consistency score below 0.75 might be a sign of inattentive responding. +#' Compute the consistency score per the method of \href{10.1037//0096-3445.128.1.78}{Kirby et al. (1999)}. This is described in detail in \href{https://doi.org/10.1007/s40614-016-0070-9}{Kaplan et al. (2016)}, where it's suggested that a consistency score below 0.75 might be a sign of inattentive responding. #' @param data Responses to score. #' @param discount_function Should \eqn{k} values be computed according to the hyperbolic or exponential discount function? The original method uses the hyperbolic, but in principle the exponential is also possible. #' @return A consistency score @@ -258,7 +258,7 @@ kirby_preproc <- function(data, discount_function = c('hyperbolic', 'exponential data <- data[order(data$k), ] - data$consistency <- sapply(1:nrow(data), function(idx) { + data$consistency <- vapply(seq_len(nrow(data)), function(idx) { mean(c(data$imm_chosen[0:(idx-1)], !data$imm_chosen[(idx):(nrow(data)+1)]), na.rm = T) @@ -317,7 +317,7 @@ delwise_consistencies <- function(data) { rows <- by(data, data$del, function(sdf) { # Get candidate indifference points cand_indiffs <- filter(c(0, sort(sdf$val_rel), 1), rep(0.5, 2))[1:(nrow(sdf) + 1)] - consistencies <- sapply(cand_indiffs, function(ci) { + consistencies <- vapply(cand_indiffs, function(ci) { mean( c(!sdf[sdf$val_rel <= ci, 'imm_chosen'], sdf[sdf$val_rel >= ci, 'imm_chosen']) diff --git a/README.Rmd b/README.Rmd index 2267233..9ef4d42 100644 --- a/README.Rmd +++ b/README.Rmd @@ -17,7 +17,7 @@ knitr::opts_chunk$set( [![R-CMD-check](https://github.com/kinleyid/tempodisco/actions/workflows/R-CMD-check.yaml/badge.svg)](https://github.com/kinleyid/tempodisco/actions/workflows/R-CMD-check.yaml) -[![codecov](https://codecov.io/github/kinleyid/tempodisco/graph/badge.svg?token=CCQXS3SNGB)](https://codecov.io/github/kinleyid/tempodisco) +[![codecov](https://codecov.io/github/kinleyid/tempodisco/graph/badge.svg?token=CCQXS3SNGB)](https://app.codecov.io/github/kinleyid/tempodisco) `tempodisco` is an R package for behavioural researchers working with delay discounting data (also known as temporal discounting intertemporal choice data). It is intended to simplify common tasks such as scoring responses (e.g. computing indifference points from an adjusting amounts procedure, computing the "area under the curve", or computing $k$ values as in the Monetary Choice Questionnaire; [Kirby et al., 1999](https://doi.org/10.1037//0096-3445.128.1.78)), identifying poor-quality data (e.g. non-systematic responding and failed attention checks), modelling choice data using multiple discount functions (e.g. hyperbolic, exponential, etc.---see below), and modelling reaction times using drift diffusion models. diff --git a/man/coef.td_bclm.Rd b/man/coef.td_bclm.Rd index bf4c486..45dad37 100644 --- a/man/coef.td_bclm.Rd +++ b/man/coef.td_bclm.Rd @@ -4,7 +4,7 @@ \alias{coef.td_bclm} \title{Extract model coefficients} \usage{ -\method{coef}{td_bclm}(object, df_par = T, ...) +\method{coef}{td_bclm}(object, df_par = TRUE, ...) } \arguments{ \item{object}{An object of class \code{td_bcnm}} diff --git a/man/kirby_consistency.Rd b/man/kirby_consistency.Rd index 9ba9815..3c3dd1c 100644 --- a/man/kirby_consistency.Rd +++ b/man/kirby_consistency.Rd @@ -15,7 +15,7 @@ kirby_consistency(data, discount_function = c("hyperbolic", "exponential")) A consistency score } \description{ -Compute the consistency score per the method of \href{https://doi.org/10.1037//0096-3445.128.1.78}{Kirby et al. (1999)}. This is described in detail in \href{https://doi.org/10.1007/s40614-016-0070-9}{Kaplan et al. (2016)}, where it's suggested that a consistency score below 0.75 might be a sign of inattentive responding. +Compute the consistency score per the method of \href{10.1037//0096-3445.128.1.78}{Kirby et al. (1999)}. This is described in detail in \href{https://doi.org/10.1007/s40614-016-0070-9}{Kaplan et al. (2016)}, where it's suggested that a consistency score below 0.75 might be a sign of inattentive responding. } \examples{ \dontrun{ diff --git a/man/plot.td_um.Rd b/man/plot.td_um.Rd index 280d690..8969e3b 100644 --- a/man/plot.td_um.Rd +++ b/man/plot.td_um.Rd @@ -7,10 +7,10 @@ \method{plot}{td_um}( x, type = c("summary", "endpoints", "link", "rt"), - legend = T, + legend = TRUE, p_lines = NULL, p_tol = 0.001, - verbose = T, + verbose = TRUE, del = NULL, val_del = NULL, confint = 0.95, diff --git a/man/tempodisco-package.Rd b/man/tempodisco-package.Rd index bebf0b0..032e83e 100644 --- a/man/tempodisco-package.Rd +++ b/man/tempodisco-package.Rd @@ -4,7 +4,7 @@ \name{tempodisco-package} \alias{tempodisco} \alias{tempodisco-package} -\title{tempodisco: Temporal discounting models} +\title{tempodisco: Temporal Discounting Models} \description{ A package for working with temporal discounting data, designed for behavioural researchers to simplify data cleaning/scoring and model fitting. The package implements widely used methods such as computing indifference points from adjusting amount task (Frye et al., 2016, \doi{10.3791/53584}), testing for non-systematic discounting per the criteria of Johnson & Bickel (2008, \doi{10.1037/1064-1297.16.3.264}), scoring questionnaires according to the methods of Kirby et al. (1999, \doi{10.1037//0096-3445.128.1.78}) and Wileyto et al (2004, \doi{10.3758/BF03195548}), Bayesian model selection using a range of discount functions (Franck et al., 2015, \doi{10.1002/jeab.128}), drift diffusion models of discounting (Peters & D'Esposito, 2020, \doi{10.1371/journal.pcbi.1007615}), and model-agnostic measures of discounting such as area under the curve (Myerson et al., 2001, \doi{10.1901/jeab.2001.76-235}) and ED50 (Yoon & Higgins, 2008, \doi{10.1016/j.drugalcdep.2007.12.011}). } @@ -12,6 +12,7 @@ A package for working with temporal discounting data, designed for behavioural r Useful links: \itemize{ \item \url{https://kinleyid.github.io/tempodisco/} + \item Report bugs at \url{https://github.com/kinleyid/tempodisco/issues} } }