From 5907ccbf3e828a6b2a2e9a815f723d344e59711c Mon Sep 17 00:00:00 2001 From: David Widmann Date: Wed, 17 Feb 2021 10:00:53 +0100 Subject: [PATCH 1/3] Reexport LogExpFunctions --- Project.toml | 4 +- src/StatsFuns.jl | 54 +++---- src/basicfuns.jl | 351 ---------------------------------------------- src/constants.jl | 6 - test/basicfuns.jl | 157 --------------------- test/runtests.jl | 2 +- 6 files changed, 32 insertions(+), 542 deletions(-) delete mode 100644 src/basicfuns.jl delete mode 100644 test/basicfuns.jl diff --git a/Project.toml b/Project.toml index 1aa3070..5b56620 100644 --- a/Project.toml +++ b/Project.toml @@ -1,12 +1,14 @@ name = "StatsFuns" uuid = "4c63d2b9-4356-54db-8cca-17b64c39e42c" -version = "0.9.6" +version = "0.9.7" [deps] +LogExpFunctions = "2ab3a3ac-af41-5b50-aa03-7779005ae688" Rmath = "79098fc4-a85e-5d69-aa6a-4863f24498fa" SpecialFunctions = "276daf66-3868-5448-9aa4-cd146d93841b" [compat] +LogExpFunctions = "0.2" Rmath = "0.4, 0.5, 0.6" SpecialFunctions = "0.8, 0.9, 0.10, 1.0" julia = "1" diff --git a/src/StatsFuns.jl b/src/StatsFuns.jl index 922316d..0a279cd 100644 --- a/src/StatsFuns.jl +++ b/src/StatsFuns.jl @@ -24,31 +24,6 @@ export sqrthalfπ, # √(π / 2) invsqrt2, # 1 / √2 invsqrt2π, # 1 / √2π - loghalf, # log(1 / 2) - logtwo, # log(2) - logπ, # log(π) - log2π, # log(2π) - log4π, # log(4π) - - # basicfuns - xlogx, # x * log(x) for x > 0, or 0 when x == 0 - xlogy, # x * log(y) for x > 0, or 0 when x == 0 - logistic, # 1 / (1 + exp(-x)) - logit, # log(x / (1 - x)) - log1psq, # log(1 + x^2) - log1pexp, # log(1 + exp(x)) - log1mexp, # log(1 - exp(x)) - log2mexp, # log(2 - exp(x)) - logexpm1, # log(exp(x) - 1) - softplus, # alias of log1pexp - invsoftplus, # alias of logexpm1 - log1pmx, # log(1 + x) - x - logmxp1, # log(x) - x + 1 - logaddexp, # log(exp(x) + exp(y)) - logsubexp, # log(abs(e^x - e^y)) - logsumexp, # log(sum(exp(x))) - softmax, # exp(x_i) / sum(exp(x)), for i - softmax!, # inplace softmax # distrs/beta betapdf, # pdf of beta distribution @@ -233,10 +208,37 @@ export logmvbeta, # logarithm of multivariate beta function lstirling_asym +# reexports +using LogExpFunctions +using LogExpFunctions: loghalf, logtwo, logπ, log2π, log4π, softplus, invsoftplus + +export + loghalf, # log(1 / 2) + logtwo, # log(2) + logπ, # log(π) + log2π, # log(2π) + log4π, # log(4π) + xlogx, # x * log(x) for x > 0, or 0 when x == 0 + xlogy, # x * log(y) for x > 0, or 0 when x == 0 + logistic, # 1 / (1 + exp(-x)) + logit, # log(x / (1 - x)) + log1psq, # log(1 + x^2) + log1pexp, # log(1 + exp(x)) + log1mexp, # log(1 - exp(x)) + log2mexp, # log(2 - exp(x)) + logexpm1, # log(exp(x) - 1) + softplus, # alias of log1pexp + invsoftplus, # alias of logexpm1 + log1pmx, # log(1 + x) - x + logmxp1, # log(x) - x + 1 + logaddexp, # log(exp(x) + exp(y)) + logsubexp, # log(abs(e^x - e^y)) + logsumexp, # log(sum(exp(x))) + softmax, # exp(x_i) / sum(exp(x)), for i + softmax! # inplace softmax ## source files include("constants.jl") -include("basicfuns.jl") include("misc.jl") include("rmath.jl") diff --git a/src/basicfuns.jl b/src/basicfuns.jl deleted file mode 100644 index 0834812..0000000 --- a/src/basicfuns.jl +++ /dev/null @@ -1,351 +0,0 @@ -# common facilities - -# scalar functions -""" - xlogx(x::Number) - -Compute `x * log(x)`, returning zero if `x` is zero. - -```jldoctest -julia> StatsFuns.xlogx(0) -0.0 -``` -""" -function xlogx(x::Number) - result = x * log(x) - ifelse(iszero(x), zero(result), result) -end - -""" - xlogy(x::Number, y::Number) - -Compute `x * log(y)`, returning zero if `x` is zero. - -```jldoctest -julia> StatsFuns.xlogy(0, 0) -0.0 -``` -""" -function xlogy(x::Number, y::Number) - result = x * log(y) - ifelse(iszero(x) && !isnan(y), zero(result), result) -end - -# The following bounds are precomputed versions of the following abstract -# function, but the implicit interface for AbstractFloat doesn't uniformly -# enforce that all floating point types implement nextfloat and prevfloat. -# @inline function _logistic_bounds(x::AbstractFloat) -# ( -# logit(nextfloat(zero(float(x)))), -# logit(prevfloat(one(float(x)))), -# ) -# end - -@inline _logistic_bounds(x::Float16) = (Float16(-16.64), Float16(7.625)) -@inline _logistic_bounds(x::Float32) = (-103.27893f0, 16.635532f0) -@inline _logistic_bounds(x::Float64) = (-744.4400719213812, 36.7368005696771) - -""" - logistic(x::Real) - -The [logistic](https://en.wikipedia.org/wiki/Logistic_function) sigmoid function mapping a real number to a value in the interval [0,1], -```math -\\sigma(x) = \\frac{1}{e^{-x} + 1} = \\frac{e^x}{1+e^x}. -``` - -Its inverse is the [`logit`](@ref) function. -""" -logistic(x::Real) = inv(exp(-x) + one(x)) - -function logistic(x::Union{Float16, Float32, Float64}) - e = exp(x) - lower, upper = _logistic_bounds(x) - ifelse( - x < lower, - zero(x), - ifelse( - x > upper, - one(x), - e / (one(x) + e) - ) - ) -end - -""" - logit(p::Real) - -The [logit](https://en.wikipedia.org/wiki/Logit) or log-odds transformation, -```math -\\log\\left(\\frac{x}{1-x}\\right), \\text{where} 0 < x < 1 -``` -Its inverse is the [`logistic`](@ref) function. -""" -logit(x::Real) = log(x / (one(x) - x)) - -""" - log1psq(x::Real) - -Return `log(1+x^2)` evaluated carefully for `abs(x)` very small or very large. -""" -log1psq(x::Real) = log1p(abs2(x)) -function log1psq(x::Union{Float32,Float64}) - ax = abs(x) - ax < maxintfloat(x) ? log1p(abs2(ax)) : 2 * log(ax) -end - -""" - log1pexp(x::Real) - -Return `log(1+exp(x))` evaluated carefully for largish `x`. - -This is also called the ["softplus"](https://en.wikipedia.org/wiki/Rectifier_(neural_networks)) -transformation, being a smooth approximation to `max(0,x)`. Its inverse is [`logexpm1`](@ref). -""" -log1pexp(x::Real) = x < 18.0 ? log1p(exp(x)) : x < 33.3 ? x + exp(-x) : oftype(exp(-x), x) -log1pexp(x::Float32) = x < 9.0f0 ? log1p(exp(x)) : x < 16.0f0 ? x + exp(-x) : oftype(exp(-x), x) - -""" - log1mexp(x::Real) - -Return `log(1 - exp(x))` - -See: - * Martin Maechler (2012) "Accurately Computing log(1 − exp(− |a|))", - http://cran.r-project.org/web/packages/Rmpfr/vignettes/log1mexp-note.pdf - -Note: different than Maechler (2012), no negation inside parentheses -""" -log1mexp(x::Real) = x < loghalf ? log1p(-exp(x)) : log(-expm1(x)) - -""" - log2mexp(x::Real) - -Return `log(2 - exp(x))` evaluated as `log1p(-expm1(x))` -""" -log2mexp(x::Real) = log1p(-expm1(x)) - -""" - logexpm1(x::Real) - -Return `log(exp(x) - 1)` or the "invsoftplus" function. -It is the inverse of [`log1pexp`](@ref) (aka "softplus"). -""" -logexpm1(x::Real) = x <= 18.0 ? log(expm1(x)) : x <= 33.3 ? x - exp(-x) : oftype(exp(-x), x) -logexpm1(x::Float32) = x <= 9f0 ? log(expm1(x)) : x <= 16f0 ? x - exp(-x) : oftype(exp(-x), x) - -const softplus = log1pexp -const invsoftplus = logexpm1 - -""" - log1pmx(x::Float64) - -Return `log(1 + x) - x` - -Use naive calculation or range reduction outside kernel range. Accurate ~2ulps for all `x`. -""" -function log1pmx(x::Float64) - if !(-0.7 < x < 0.9) - return log1p(x) - x - elseif x > 0.315 - u = (x-0.5)/1.5 - return _log1pmx_ker(u) - 9.45348918918356180e-2 - 0.5*u - elseif x > -0.227 - return _log1pmx_ker(x) - elseif x > -0.4 - u = (x+0.25)/0.75 - return _log1pmx_ker(u) - 3.76820724517809274e-2 + 0.25*u - elseif x > -0.6 - u = (x+0.5)*2.0 - return _log1pmx_ker(u) - 1.93147180559945309e-1 + 0.5*u - else - u = (x+0.625)/0.375 - return _log1pmx_ker(u) - 3.55829253011726237e-1 + 0.625*u - end -end - -""" - logmxp1(x::Float64) - -Return `log(x) - x + 1` carefully evaluated. -""" -function logmxp1(x::Float64) - if x <= 0.3 - return (log(x) + 1.0) - x - elseif x <= 0.4 - u = (x-0.375)/0.375 - return _log1pmx_ker(u) - 3.55829253011726237e-1 + 0.625*u - elseif x <= 0.6 - u = 2.0*(x-0.5) - return _log1pmx_ker(u) - 1.93147180559945309e-1 + 0.5*u - else - return log1pmx(x - 1.0) - end -end - -# The kernel of log1pmx -# Accuracy within ~2ulps for -0.227 < x < 0.315 -function _log1pmx_ker(x::Float64) - r = x/(x+2.0) - t = r*r - w = @horner(t, - 6.66666666666666667e-1, # 2/3 - 4.00000000000000000e-1, # 2/5 - 2.85714285714285714e-1, # 2/7 - 2.22222222222222222e-1, # 2/9 - 1.81818181818181818e-1, # 2/11 - 1.53846153846153846e-1, # 2/13 - 1.33333333333333333e-1, # 2/15 - 1.17647058823529412e-1) # 2/17 - hxsq = 0.5*x*x - r*(hxsq+w*t)-hxsq -end - - -""" - logaddexp(x::Real, y::Real) - -Return `log(exp(x) + exp(y))`, avoiding intermediate overflow/undeflow, and handling non-finite values. -""" -function logaddexp(x::Real, y::Real) - # ensure Δ = 0 if x = y = ± Inf - Δ = ifelse(x == y, zero(x - y), abs(x - y)) - max(x, y) + log1pexp(-Δ) -end - -Base.@deprecate logsumexp(x::Real, y::Real) logaddexp(x, y) - -""" - logsubexp(x, y) - -Return `log(abs(e^x - e^y))`, preserving numerical accuracy. -""" -logsubexp(x::Real, y::Real) = max(x, y) + log1mexp(-abs(x - y)) - -""" - logsumexp(X) - -Compute `log(sum(exp, X))` in a numerically stable way that avoids intermediate over- and -underflow. - -`X` should be an iterator of real numbers. The result is computed using a single pass over -the data. - -# References - -[Sebastian Nowozin: Streaming Log-sum-exp Computation.](http://www.nowozin.net/sebastian/blog/streaming-log-sum-exp-computation.html) -""" -logsumexp(X) = _logsumexp_onepass(X) - -""" - logsumexp(X::AbstractArray{<:Real}; dims=:) - -Compute `log.(sum(exp.(X); dims=dims))` in a numerically stable way that avoids -intermediate over- and underflow. - -The result is computed using a single pass over the data. - -# References - -[Sebastian Nowozin: Streaming Log-sum-exp Computation.](http://www.nowozin.net/sebastian/blog/streaming-log-sum-exp-computation.html) -""" -logsumexp(X::AbstractArray{<:Real}; dims=:) = _logsumexp(X, dims) - -_logsumexp(X::AbstractArray{<:Real}, ::Colon) = _logsumexp_onepass(X) -function _logsumexp(X::AbstractArray{<:Real}, dims) - # Do not use log(zero(eltype(X))) directly to avoid issues with ForwardDiff (#82) - FT = float(eltype(X)) - xmax_r = reduce(_logsumexp_onepass_op, X; dims=dims, init=(FT(-Inf), zero(FT))) - return @. first(xmax_r) + log1p(last(xmax_r)) -end - -function _logsumexp_onepass(X) - # fallback for empty collections - isempty(X) && return log(sum(X)) - return _logsumexp_onepass_result(_logsumexp_onepass_reduce(X, Base.IteratorEltype(X))) -end - -# function barrier for reductions with single element and without initial element -_logsumexp_onepass_result(x) = float(x) -_logsumexp_onepass_result((xmax, r)::Tuple) = xmax + log1p(r) - -# iterables with known element type -function _logsumexp_onepass_reduce(X, ::Base.HasEltype) - # do not perform type computations if element type is abstract - T = eltype(X) - isconcretetype(T) || return _logsumexp_onepass_reduce(X, Base.EltypeUnknown()) - - FT = float(T) - return reduce(_logsumexp_onepass_op, X; init=(FT(-Inf), zero(FT))) -end - -# iterables without known element type -_logsumexp_onepass_reduce(X, ::Base.EltypeUnknown) = reduce(_logsumexp_onepass_op, X) - -## Reductions for one-pass algorithm: avoid expensive multiplications if numbers are reduced - -# reduce two numbers -function _logsumexp_onepass_op(x1, x2) - a = x1 == x2 ? zero(x1 - x2) : -abs(x1 - x2) - xmax = x1 > x2 ? oftype(a, x1) : oftype(a, x2) - r = exp(a) - return xmax, r -end - -# reduce a number and a partial sum -function _logsumexp_onepass_op(x, (xmax, r)::Tuple) - a = x == xmax ? zero(x - xmax) : -abs(x - xmax) - if x > xmax - _xmax = oftype(a, x) - _r = (r + one(r)) * exp(a) - else - _xmax = oftype(a, xmax) - _r = r + exp(a) - end - return _xmax, _r -end -_logsumexp_onepass_op(xmax_r::Tuple, x) = _logsumexp_onepass_op(x, xmax_r) - -# reduce two partial sums -function _logsumexp_onepass_op((xmax1, r1)::Tuple, (xmax2, r2)::Tuple) - a = xmax1 == xmax2 ? zero(xmax1 - xmax2) : -abs(xmax1 - xmax2) - if xmax1 > xmax2 - xmax = oftype(a, xmax1) - r = r1 + (r2 + one(r2)) * exp(a) - else - xmax = oftype(a, xmax2) - r = r2 + (r1 + one(r1)) * exp(a) - end - return xmax, r -end - -""" - softmax!(r::AbstractArray, x::AbstractArray) - -Overwrite `r` with the `softmax` (or _normalized exponential_) transformation of `x` - -That is, `r` is overwritten with `exp.(x)`, normalized to sum to 1. - -See the [Wikipedia entry](https://en.wikipedia.org/wiki/Softmax_function) -""" -function softmax!(r::AbstractArray{R}, x::AbstractArray{T}) where {R<:AbstractFloat,T<:Real} - n = length(x) - length(r) == n || throw(DimensionMismatch("Inconsistent array lengths.")) - u = maximum(x) - s = 0. - @inbounds for i = 1:n - s += (r[i] = exp(x[i] - u)) - end - invs = convert(R, inv(s)) - @inbounds for i = 1:n - r[i] *= invs - end - r -end - -""" - softmax(x::AbstractArray{<:Real}) - -Return the [`softmax transformation`](https://en.wikipedia.org/wiki/Softmax_function) applied to `x` -""" -softmax!(x::AbstractArray{<:AbstractFloat}) = softmax!(x, x) -softmax(x::AbstractArray{<:Real}) = softmax!(similar(x, Float64), x) diff --git a/src/constants.jl b/src/constants.jl index 03284dc..6688dfa 100644 --- a/src/constants.jl +++ b/src/constants.jl @@ -20,9 +20,3 @@ @irrational invsqrt2 0.7071067811865475244 inv(big(sqrt2)) @irrational invsqrt2π 0.3989422804014326779 inv(big(sqrt2π)) - -@irrational loghalf -0.6931471805599453094 log(big(0.5)) -@irrational logtwo 0.6931471805599453094 log(big(2.)) -@irrational logπ 1.1447298858494001741 log(big(π)) -@irrational log2π 1.8378770664093454836 log(big(2.)*π) -@irrational log4π 2.5310242469692907930 log(big(4.)*π) diff --git a/test/basicfuns.jl b/test/basicfuns.jl deleted file mode 100644 index aff76fa..0000000 --- a/test/basicfuns.jl +++ /dev/null @@ -1,157 +0,0 @@ -using StatsFuns, Test - -@testset "xlogx & xlogy" begin - @test iszero(xlogx(0)) - @test xlogx(2) ≈ 2.0 * log(2.0) - @test_throws DomainError xlogx(-1) - @test isnan(xlogx(NaN)) - - @test iszero(xlogy(0, 1)) - @test xlogy(2, 3) ≈ 2.0 * log(3.0) - @test_throws DomainError xlogy(1, -1) - @test isnan(xlogy(NaN, 2)) - @test isnan(xlogy(2, NaN)) - @test isnan(xlogy(0, NaN)) - - # Since we allow complex/negative values, test for them. See comments in: - # https://github.com/JuliaStats/StatsFuns.jl/pull/95 - - @test xlogx(1 + im) == (1 + im) * log(1 + im) - @test isnan(xlogx(NaN + im)) - @test isnan(xlogx(1 + NaN * im)) - - @test xlogy(-2, 3) == -xlogy(2, 3) - @test xlogy(1 + im, 3) == (1 + im) * log(3) - @test xlogy(1 + im, 2 + im) == (1 + im) * log(2 + im) - @test isnan(xlogy(1 + NaN * im, -1 + im)) - @test isnan(xlogy(0, -1 + NaN * im)) - @test isnan(xlogy(Inf + im * NaN, 1)) - @test isnan(xlogy(0 + im * 0, NaN)) - @test iszero(xlogy(0 + im * 0, 0 + im * Inf)) -end - -@testset "logistic & logit" begin - @test logistic(2) ≈ 1.0 / (1.0 + exp(-2.0)) - @test logistic(-750.0) === 0.0 - @test logistic(-740.0) > 0.0 - @test logistic(+36.0) < 1.0 - @test logistic(+750.0) === 1.0 - @test iszero(logit(0.5)) - @test logit(logistic(2)) ≈ 2.0 -end - -@testset "log1psq" begin - @test iszero(log1psq(0.0)) - @test log1psq(1.0) ≈ log1p(1.0) - @test log1psq(2.0) ≈ log1p(4.0) -end - -# log1pexp, log1mexp, log2mexp & logexpm1 - -@testset "log1pexp" begin - @test log1pexp(2.0) ≈ log(1.0 + exp(2.0)) - @test log1pexp(-2.0) ≈ log(1.0 + exp(-2.0)) - @test log1pexp(10000) ≈ 10000.0 - @test log1pexp(-10000) ≈ 0.0 - - @test log1pexp(2f0) ≈ log(1f0 + exp(2f0)) - @test log1pexp(-2f0) ≈ log(1f0 + exp(-2f0)) - @test log1pexp(10000f0) ≈ 10000f0 - @test log1pexp(-10000f0) ≈ 0f0 -end - -@testset "log1mexp" begin - @test log1mexp(-1.0) ≈ log1p(- exp(-1.0)) - @test log1mexp(-10.0) ≈ log1p(- exp(-10.0)) -end - -@testset "log2mexp" begin - @test log2mexp(0.0) ≈ 0.0 - @test log2mexp(-1.0) ≈ log(2.0 - exp(-1.0)) -end - -@testset "logexpm1" begin - @test logexpm1(2.0) ≈ log(exp(2.0) - 1.0) - @test logexpm1(log1pexp(2.0)) ≈ 2.0 - @test logexpm1(log1pexp(-2.0)) ≈ -2.0 - - @test logexpm1(2f0) ≈ log(exp(2f0) - 1f0) - @test logexpm1(log1pexp(2f0)) ≈ 2f0 - @test logexpm1(log1pexp(-2f0)) ≈ -2f0 -end - -@testset "log1pmx" begin - @test iszero(log1pmx(0.0)) - @test log1pmx(1.0) ≈ log(2.0) - 1.0 - @test log1pmx(2.0) ≈ log(3.0) - 2.0 -end - -@testset "logmxp1" begin - @test iszero(logmxp1(1.0)) - @test logmxp1(2.0) ≈ log(2.0) - 1.0 - @test logmxp1(3.0) ≈ log(3.0) - 2.0 -end - -@testset "logsumexp" begin - @test logaddexp(2.0, 3.0) ≈ log(exp(2.0) + exp(3.0)) - @test logaddexp(10002, 10003) ≈ 10000 + logaddexp(2.0, 3.0) - - @test @inferred(logsumexp([1.0])) == 1.0 - @test @inferred(logsumexp((x for x in [1.0]))) == 1.0 - @test @inferred(logsumexp([1.0, 2.0, 3.0])) ≈ 3.40760596444438 - @test @inferred(logsumexp((1.0, 2.0, 3.0))) ≈ 3.40760596444438 - @test logsumexp([1.0, 2.0, 3.0] .+ 1000.) ≈ 1003.40760596444438 - - @test @inferred(logsumexp([[1.0, 2.0, 3.0] [1.0, 2.0, 3.0] .+ 1000.]; dims=1)) ≈ [3.40760596444438 1003.40760596444438] - @test @inferred(logsumexp([[1.0 2.0 3.0]; [1.0 2.0 3.0] .+ 1000.]; dims=2)) ≈ [3.40760596444438, 1003.40760596444438] - @test @inferred(logsumexp([[1.0, 2.0, 3.0] [1.0, 2.0, 3.0] .+ 1000.]; dims=[1,2])) ≈ [1003.4076059644444] - - # check underflow - @test logsumexp([1e-20, log(1e-20)]) ≈ 2e-20 - - let cases = [([-Inf, -Inf], -Inf), # correct handling of all -Inf - ([-Inf, -Inf32], -Inf), # promotion - ([-Inf32, -Inf32], -Inf32), # Float32 - ([-Inf, Inf], Inf), - ([-Inf, 9.0], 9.0), - ([Inf, 9.0], Inf), - ([0, 0], log(2.0))] # non-float arguments - for (arguments, result) in cases - @test logaddexp(arguments...) ≡ result - @test logsumexp(arguments) ≡ result - end - end - - @test isnan(logsubexp(Inf, Inf)) - @test isnan(logsubexp(-Inf, -Inf)) - @test logsubexp(Inf, 9.0) ≡ Inf - @test logsubexp(-Inf, 9.0) ≡ 9.0 - @test logsubexp(1f2, 1f2) ≡ -Inf32 - @test logsubexp(0, 0) ≡ -Inf - @test logsubexp(3, 2) ≈ 2.541324854612918108978 - - # NaN propagation - @test isnan(logaddexp(NaN, 9.0)) - @test isnan(logaddexp(NaN, Inf)) - @test isnan(logaddexp(NaN, -Inf)) - - @test isnan(logsubexp(NaN, 9.0)) - @test isnan(logsubexp(NaN, Inf)) - @test isnan(logsubexp(NaN, -Inf)) - - @test isnan(logsumexp([NaN, 9.0])) - @test isnan(logsumexp([NaN, Inf])) - @test isnan(logsumexp([NaN, -Inf])) - - # logsumexp with general iterables (issue #63) - xs = range(-500, stop = 10, length = 1000) - @test @inferred(logsumexp(x for x in xs)) == logsumexp(xs) -end - -@testset "softmax" begin - x = [1.0, 2.0, 3.0] - r = exp.(x) ./ sum(exp.(x)) - @test softmax([1.0, 2.0, 3.0]) ≈ r - softmax!(x) - @test x ≈ r -end diff --git a/test/runtests.jl b/test/runtests.jl index b549ad5..88c7c4d 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,4 +1,4 @@ -tests = ["basicfuns", "rmath", "generic", "misc"] +tests = ["rmath", "generic", "misc"] for t in tests fp = "$t.jl" From 0e3f426b4c043273ef9035c0af6786206aa7ea5f Mon Sep 17 00:00:00 2001 From: David Widmann Date: Mon, 19 Apr 2021 17:00:51 +0200 Subject: [PATCH 2/3] Remove explicit imports --- src/StatsFuns.jl | 1 - 1 file changed, 1 deletion(-) diff --git a/src/StatsFuns.jl b/src/StatsFuns.jl index 0a279cd..d7469ac 100644 --- a/src/StatsFuns.jl +++ b/src/StatsFuns.jl @@ -210,7 +210,6 @@ export # reexports using LogExpFunctions -using LogExpFunctions: loghalf, logtwo, logπ, log2π, log4π, softplus, invsoftplus export loghalf, # log(1 / 2) From 8ad19f25ee41beca3393a5a79e0378546e3ef2f6 Mon Sep 17 00:00:00 2001 From: David Widmann Date: Mon, 19 Apr 2021 17:37:33 +0200 Subject: [PATCH 3/3] Bump version --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 125c73c..57aba32 100644 --- a/Project.toml +++ b/Project.toml @@ -1,6 +1,6 @@ name = "StatsFuns" uuid = "4c63d2b9-4356-54db-8cca-17b64c39e42c" -version = "0.9.7" +version = "0.9.8" [deps] LogExpFunctions = "2ab3a3ac-af41-5b50-aa03-7779005ae688"