diff --git a/NEWS.md b/NEWS.md index d09f779206d899..0a80847360a6c2 100644 --- a/NEWS.md +++ b/NEWS.md @@ -872,6 +872,8 @@ Deprecated or removed * The functions `eigs` and `svds` have been moved to the `IterativeEigensolvers` standard library module ([#24714]). + * Sparse array functionality has moved to the `SparseArrays` standard library ([#25249]). + * `@printf` and `@sprintf` have been moved to the `Printf` standard library ([#23929],[#25056]). * `isnumber` has been deprecated in favor of `isnumeric`, `is_assigned_char` diff --git a/base/asyncmap.jl b/base/asyncmap.jl index 9fc681d4ca71eb..6b66799f06dbbc 100644 --- a/base/asyncmap.jl +++ b/base/asyncmap.jl @@ -260,13 +260,6 @@ function asyncmap(f, b::BitArray; kwargs...) return b2 end -# TODO: Optimize for sparse arrays -# For now process as regular arrays and convert back -function asyncmap(f, s::AbstractSparseArray...; kwargs...) - sa = map(Array, s) - return sparse(asyncmap(f, sa...; kwargs...)) -end - mutable struct AsyncCollector f results diff --git a/base/deprecated.jl b/base/deprecated.jl index 3b828b81ecaada..94ee6b38481315 100644 --- a/base/deprecated.jl +++ b/base/deprecated.jl @@ -469,8 +469,6 @@ import .LinAlg: lufact, lufact!, qrfact, qrfact!, cholfact, cholfact! @deprecate cholfact!(A::AbstractMatrix, ::Type{Val{false}}) cholfact!(A, Val(false)) @deprecate cholfact!(A::AbstractMatrix, ::Type{Val{true}}; tol = 0.0) cholfact!(A, Val(true); tol = tol) @deprecate cat(::Type{Val{N}}, A::AbstractArray...) where {N} cat(Val(N), A...) -@deprecate cat(::Type{Val{N}}, A::SparseArrays._SparseConcatGroup...) where {N} cat(Val(N), A...) -@deprecate cat(::Type{Val{N}}, A::SparseArrays._DenseConcatGroup...) where {N} cat(Val(N), A...) @deprecate cat_t(::Type{Val{N}}, ::Type{T}, A, B) where {N,T} cat_t(Val(N), T, A, B) false @deprecate reshape(A::AbstractArray, ::Type{Val{N}}) where {N} reshape(A, Val(N)) @@ -505,6 +503,7 @@ function OverflowError() end # PR #22703 +import .LinAlg: Bidiagonal @deprecate Bidiagonal(dv::AbstractVector, ev::AbstractVector, isupper::Bool) Bidiagonal(dv, ev, ifelse(isupper, :U, :L)) @deprecate Bidiagonal(dv::AbstractVector, ev::AbstractVector, uplo::Char) Bidiagonal(dv, ev, ifelse(uplo == 'U', :U, :L)) @deprecate Bidiagonal(A::AbstractMatrix, isupper::Bool) Bidiagonal(A, ifelse(isupper, :U, :L)) @@ -522,25 +521,6 @@ end # remove parse-with-chains-warn and bitshift-warn # update precedence table in doc/src/manual/mathematical-operations.md -# deprecate remaining vectorized methods over SparseVectors (zero-preserving) -for op in (:floor, :ceil, :trunc, :round, - :log1p, :expm1, :sinpi, - :sin, :tan, :sind, :tand, - :asin, :atan, :asind, :atand, - :sinh, :tanh, :asinh, :atanh) - @eval import .Math: $op - @eval @deprecate ($op)(x::AbstractSparseVector{<:Number,<:Integer}) ($op).(x) -end -# deprecate remaining vectorized methods over SparseVectors (not-zero-preserving) -for op in (:exp, :exp2, :exp10, :log, :log2, :log10, - :cos, :cosd, :acos, :cosh, :cospi, - :csc, :cscd, :acot, :csch, :acsch, - :cot, :cotd, :acosd, :coth, - :sec, :secd, :acotd, :sech, :asech) - @eval import .Math: $op - @eval @deprecate ($op)(x::AbstractSparseVector{<:Number,<:Integer}) ($op).(x) -end - # PR #22182 @deprecate is_apple Sys.isapple @deprecate is_bsd Sys.isbsd @@ -573,6 +553,7 @@ end # PR #22925 # also uncomment constructor tests in test/linalg/bidiag.jl +import .LinAlg: Bidiagonal function Bidiagonal(dv::AbstractVector{T}, ev::AbstractVector{S}, uplo::Symbol) where {T,S} depwarn(string("`Bidiagonal(dv::AbstractVector{T}, ev::AbstractVector{S}, uplo::Symbol) where {T, S}`", " is deprecated, manually convert both vectors to the same type instead."), :Bidiagonal) @@ -582,6 +563,7 @@ end # PR #23035 # also uncomment constructor tests in test/linalg/tridiag.jl +import .LinAlg: SymTridiagonal function SymTridiagonal(dv::AbstractVector{T}, ev::AbstractVector{S}) where {T,S} depwarn(string("`SymTridiagonal(dv::AbstractVector{T}, ev::AbstractVector{S}) ", "where {T, S}` is deprecated, convert both vectors to the same type instead."), :SymTridiagonal) @@ -591,6 +573,7 @@ end # PR #23154 # also uncomment constructor tests in test/linalg/tridiag.jl +import .LinAlg: Tridiagonal function Tridiagonal(dl::AbstractVector{Tl}, d::AbstractVector{Td}, du::AbstractVector{Tu}) where {Tl,Td,Tu} depwarn(string("`Tridiagonal(dl::AbstractVector{Tl}, d::AbstractVector{Td}, du::AbstractVector{Tu}) ", "where {Tl, Td, Tu}` is deprecated, convert all vectors to the same type instead."), :Tridiagonal) @@ -710,11 +693,8 @@ end # PR #23066 @deprecate cfunction(f, r, a::Tuple) cfunction(f, r, Tuple{a...}) -# PR 23341 -import .LinAlg: diagm -@deprecate diagm(A::SparseMatrixCSC) sparse(Diagonal(sparsevec(A))) - # PR #23373 +import .LinAlg: diagm @deprecate diagm(A::BitMatrix) BitMatrix(Diagonal(vec(A))) # PR 23341 @@ -828,33 +808,6 @@ end @deprecate contains(eq::Function, itr, x) any(y->eq(y,x), itr) -# PR #23757 -import .SparseArrays.spdiagm -@deprecate spdiagm(x::AbstractVector) sparse(Diagonal(x)) -function spdiagm(x::AbstractVector, d::Number) - depwarn(string("`spdiagm(x::AbstractVector, d::Number)` is deprecated, use ", - "`spdiagm(d => x)` instead, which now returns a square matrix. To preserve the old ", - "behaviour, use `sparse(SparseArrays.spdiagm_internal(d => x)...)`"), :spdiagm) - I, J, V = SparseArrays.spdiagm_internal(d => x) - return sparse(I, J, V) -end -function spdiagm(x, d) - depwarn(string("`spdiagm((x1, x2, ...), (d1, d2, ...))` is deprecated, use ", - "`spdiagm(d1 => x1, d2 => x2, ...)` instead, which now returns a square matrix. ", - "To preserve the old behaviour, use ", - "`sparse(SparseArrays.spdiagm_internal(d1 => x1, d2 => x2, ...)...)`"), :spdiagm) - I, J, V = SparseArrays.spdiagm_internal((d[i] => x[i] for i in 1:length(x))...) - return sparse(I, J, V) -end -function spdiagm(x, d, m::Integer, n::Integer) - depwarn(string("`spdiagm((x1, x2, ...), (d1, d2, ...), m, n)` is deprecated, use ", - "`spdiagm(d1 => x1, d2 => x2, ...)` instead, which now returns a square matrix. ", - "To specify a non-square matrix and preserve the old behaviour, use ", - "`I, J, V = SparseArrays.spdiagm_internal(d1 => x1, d2 => x2, ...); sparse(I, J, V, m, n)`"), :spdiagm) - I, J, V = SparseArrays.spdiagm_internal((d[i] => x[i] for i in 1:length(x))...) - return sparse(I, J, V, m, n) -end - # deprecate zeros(D::Diagonal[, opts...]) function zeros(D::Diagonal) depwarn(string("`zeros(D::Diagonal)` is deprecated, use ", @@ -987,7 +940,6 @@ function eye(::Type{Diagonal{T}}, n::Int) where T return Diagonal{T}(I, n) end @eval Base.LinAlg import Base.eye -# @eval Base.SparseArrays import Base.eye # SparseArrays has an eye for things cholmod export tic, toq, toc @@ -1021,8 +973,6 @@ function toc() return t end -@eval Base.SparseArrays @deprecate sparse(s::UniformScaling, m::Integer) sparse(s, m, m) - # A[I...] .= with scalar indices should modify the element at A[I...] function Broadcast.dotview(A::AbstractArray, args::Number...) depwarn("the behavior of `A[I...] .= X` with scalar indices will change in the future. Use `A[I...] = X` instead.", :broadcast!) @@ -1100,12 +1050,6 @@ end # PR #25030 @eval LinAlg @deprecate fillslots! fillstored! false -# PR #25037 -@eval SparseArrays @deprecate spones(A::SparseMatrixCSC) LinAlg.fillstored!(copy(A), 1) -@eval SparseArrays @deprecate spones(A::SparseVector) LinAlg.fillstored!(copy(A), 1) -using .SparseArrays.spones -export spones - function diagm(v::BitVector) depwarn(string("`diagm(v::BitVector)` is deprecated, use `diagm(0 => v)` or ", "`BitMatrix(Diagonal(v))` instead."), :diagm) @@ -1154,19 +1098,6 @@ function full(A::Union{Diagonal,Bidiagonal,Tridiagonal,SymTridiagonal}) return Matrix(A) end -# full for sparse arrays -function full(S::Union{SparseVector,SparseMatrixCSC}) - (arrtypestr, desttypestr) = - isa(S, SparseVector) ? ("SparseVector", "Vector") : - isa(S, SparseMatrixCSC) ? ("SparseMatrixCSC", "Matrix") : - error("should not be reachable!") - depwarn(string( - "`full(S::$(arrtypestr))` (and `full` in general) has been deprecated. ", - "To replace `full(S::$(arrtypestr))`, consider `$(desttypestr)(S)` or, ", - "if that option is too narrow, `Array(S)`."), :full) - return Array(S) -end - # full for factorizations function full(F::Union{LinAlg.LU,LinAlg.LQ,LinAlg.QR,LinAlg.QRPivoted,LinAlg.QRCompactWY, LinAlg.SVD,LinAlg.LDLt,LinAlg.Schur,LinAlg.Eigen,LinAlg.Hessenberg, @@ -1279,7 +1210,6 @@ end # issue #22849 @deprecate reinterpret(::Type{T}, a::Array{S}, dims::NTuple{N,Int}) where {T, S, N} reshape(reinterpret(T, vec(a)), dims) -@deprecate reinterpret(::Type{T}, a::SparseMatrixCSC{S}, dims::NTuple{N,Int}) where {T, S, N} reinterpret(T, reshape(a, dims)) @deprecate reinterpret(::Type{T}, a::ReinterpretArray{S}, dims::NTuple{N,Int}) where {T, S, N} reshape(reinterpret(T, vec(a)), dims) # issue #24006 @@ -1316,55 +1246,6 @@ end # deprecate bits to bitstring (#24263, #24281) @deprecate bits bitstring -# deprecate speye -export speye -function speye(n::Integer) - depwarn(string("`speye(n::Integer)` has been deprecated in favor of `I`, `sparse`, and ", - "`SparseMatrixCSC` constructor methods. For a direct replacement, consider ", - "`sparse(1.0I, n, n)`, `SparseMatrixCSC(1.0I, n, n)`, or `SparseMatrixCSC{Float64}(I, n, n)`. ", - "If `Float64` element type is not necessary, consider the shorter `sparse(I, n, n)` ", - "or `SparseMatrixCSC(I, n, n)` (with default `eltype(I)` of `Bool`)."), :speye) - return sparse(1.0I, n, n) -end -function speye(m::Integer, n::Integer) - depwarn(string("`speye(m::Integer, n::Integer)` has been deprecated in favor of `I`, ", - "`sparse`, and `SparseMatrixCSC` constructor methods. For a direct ", - "replacement, consider `sparse(1.0I, m, n)`, `SparseMatrixCSC(1.0I, m, n)`, ", - "or `SparseMatrixCSC{Float64}(I, m, n)`. If `Float64` element type is not ", - " necessary, consider the shorter `sparse(I, m, n)` or `SparseMatrixCSC(I, m, n)` ", - "(with default `eltype(I)` of `Bool`)."), :speye) - return sparse(1.0I, m, n) -end -function speye(::Type{T}, n::Integer) where T - depwarn(string("`speye(T, n::Integer)` has been deprecated in favor of `I`, `sparse`, and ", - "`SparseMatrixCSC` constructor methods. For a direct replacement, consider ", - "`sparse(T(1)I, n, n)` if `T` is concrete or `SparseMatrixCSC{T}(I, n, n)` ", - "if `T` is either concrete or abstract. If element type `T` is not necessary, ", - "consider the shorter `sparse(I, n, n)` or `SparseMatrixCSC(I, n, n)` ", - "(with default `eltype(I)` of `Bool`)."), :speye) - return SparseMatrixCSC{T}(I, n, n) -end -function speye(::Type{T}, m::Integer, n::Integer) where T - depwarn(string("`speye(T, m::Integer, n::Integer)` has been deprecated in favor of `I`, ", - "`sparse`, and `SparseMatrixCSC` constructor methods. For a direct ", - "replacement, consider `sparse(T(1)I, m, n)` if `T` is concrete or ", - "`SparseMatrixCSC{T}(I, m, n)` if `T` is either concrete or abstract. ", - "If element type `T` is not necessary, consider the shorter ", - "`sparse(I, m, n)` or `SparseMatrixCSC(I, m, n)` (with default `eltype(I)` ", - "of `Bool`)."), :speye) - return SparseMatrixCSC{T}(I, m, n) -end -function speye(S::SparseMatrixCSC{T}) where T - depwarn(string("`speye(S::SparseMatrixCSC{T})` has been deprecated in favor of `I`, ", - "`sparse`, and `SparseMatrixCSC` constructor methods. For a direct ", - "replacement, consider `sparse(T(1)I, size(S)...)` if `T` is concrete or ", - "`SparseMatrixCSC{eltype(S)}(I, size(S))` if `T` is either concrete or abstract. ", - "If preserving element type `T` is not necessary, consider the shorter ", - "`sparse(I, size(S)...)` or `SparseMatrixCSC(I, size(S))` (with default ", - "`eltype(I)` of `Bool`)."), :speye) - return SparseMatrixCSC{T}(I, m, n) -end - # issue #24167 @deprecate EnvHash EnvDict @@ -2338,87 +2219,6 @@ end @deprecate A_mul_Bc(A::AbstractVecOrMat{T}, R::AbstractRotation{S}) where {T,S} (*)(A, adjoint(R)) end -# former imports into SparseArrays -@eval Base.SparseArrays begin - import Base: A_mul_B!, Ac_mul_B, Ac_mul_B!, At_mul_B, At_mul_B! - import Base: A_mul_Bc, A_mul_Bt, Ac_mul_Bc, At_mul_Bt - import Base: At_ldiv_B, Ac_ldiv_B, A_ldiv_B! - import Base.LinAlg: At_ldiv_B!, Ac_ldiv_B!, A_rdiv_B!, A_rdiv_Bc!, mul!, ldiv!, rdiv! -end - -# A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from base/sparse/linalg.jl, to deprecate -@eval Base.SparseArrays begin - using Base.LinAlg: Adjoint, Transpose - @deprecate Ac_ldiv_B(A::SparseMatrixCSC, B::RowVector) (\)(adjoint(A), B) - @deprecate At_ldiv_B(A::SparseMatrixCSC, B::RowVector) (\)(transpose(A), B) - @deprecate Ac_ldiv_B(A::SparseMatrixCSC, B::AbstractVecOrMat) (\)(adjoint(A), B) - @deprecate At_ldiv_B(A::SparseMatrixCSC, B::AbstractVecOrMat) (\)(transpose(A), B) - @deprecate A_rdiv_Bc!(A::SparseMatrixCSC{T}, D::Diagonal{T}) where {T} rdiv!(A, adjoint(D)) - @deprecate A_rdiv_Bt!(A::SparseMatrixCSC{T}, D::Diagonal{T}) where {T} rdiv!(A, transpose(D)) - @deprecate A_rdiv_B!(A::SparseMatrixCSC{T}, D::Diagonal{T}) where {T} rdiv!(A, D) - @deprecate A_ldiv_B!(L::LowerTriangular{T,<:SparseMatrixCSCUnion{T}}, B::StridedVecOrMat) where {T} ldiv!(L, B) - @deprecate A_ldiv_B!(U::UpperTriangular{T,<:SparseMatrixCSCUnion{T}}, B::StridedVecOrMat) where {T} ldiv!(U, B) - @deprecate A_mul_Bt(A::SparseMatrixCSC{Tv,Ti}, B::SparseMatrixCSC{Tv,Ti}) where {Tv,Ti} (*)(A, transpose(B)) - @deprecate A_mul_Bc(A::SparseMatrixCSC{Tv,Ti}, B::SparseMatrixCSC{Tv,Ti}) where {Tv,Ti} (*)(A, adjoint(B)) - @deprecate At_mul_B(A::SparseMatrixCSC{Tv,Ti}, B::SparseMatrixCSC{Tv,Ti}) where {Tv,Ti} (*)(transpose(A), B) - @deprecate Ac_mul_B(A::SparseMatrixCSC{Tv,Ti}, B::SparseMatrixCSC{Tv,Ti}) where {Tv,Ti} (*)(adjoint(A), B) - @deprecate At_mul_Bt(A::SparseMatrixCSC{Tv,Ti}, B::SparseMatrixCSC{Tv,Ti}) where {Tv,Ti} (*)(transpose(A), transpose(B)) - @deprecate Ac_mul_Bc(A::SparseMatrixCSC{Tv,Ti}, B::SparseMatrixCSC{Tv,Ti}) where {Tv,Ti} (*)(adjoint(A), adjoint(B)) - @deprecate A_mul_B!(C::StridedVecOrMat, A::SparseMatrixCSC, B::StridedVecOrMat) mul!(C, A, B) - @deprecate Ac_mul_B!(C::StridedVecOrMat, A::SparseMatrixCSC, B::StridedVecOrMat) mul!(C, adjoint(A), B) - @deprecate At_mul_B!(C::StridedVecOrMat, A::SparseMatrixCSC, B::StridedVecOrMat) mul!(C, transpose(A), B) - @deprecate A_mul_B!(α::Number, A::SparseMatrixCSC, B::StridedVecOrMat, β::Number, C::StridedVecOrMat) mul!(α, A, B, β, C) - @deprecate A_mul_B(A::SparseMatrixCSC{TA,S}, x::StridedVector{Tx}) where {TA,S,Tx} (*)(A, x) - @deprecate A_mul_B(A::SparseMatrixCSC{TA,S}, B::StridedMatrix{Tx}) where {TA,S,Tx} (*)(A, B) - @deprecate Ac_mul_B!(α::Number, A::SparseMatrixCSC, B::StridedVecOrMat, β::Number, C::StridedVecOrMat) mul!(α, adjoint(A), B, β, C) - @deprecate Ac_mul_B(A::SparseMatrixCSC{TA,S}, x::StridedVector{Tx}) where {TA,S,Tx} (*)(adjoint(A), x) - @deprecate Ac_mul_B(A::SparseMatrixCSC{TA,S}, B::StridedMatrix{Tx}) where {TA,S,Tx} (*)(adjoint(A), B) - @deprecate At_mul_B!(α::Number, A::SparseMatrixCSC, B::StridedVecOrMat, β::Number, C::StridedVecOrMat) mul!(α, transpose(A), B, β, C) - @deprecate At_mul_B(A::SparseMatrixCSC{TA,S}, x::StridedVector{Tx}) where {TA,S,Tx} (*)(transpose(A), x) - @deprecate At_mul_B(A::SparseMatrixCSC{TA,S}, B::StridedMatrix{Tx}) where {TA,S,Tx} (*)(transpose(A), B) - @deprecate A_mul_Bt(A::SparseMatrixCSC{TvA,TiA}, B::SparseMatrixCSC{TvB,TiB}) where {TvA,TiA,TvB,TiB} (*)(A, transpose(B)) - @deprecate A_mul_Bc(A::SparseMatrixCSC{TvA,TiA}, B::SparseMatrixCSC{TvB,TiB}) where {TvA,TiA,TvB,TiB} (*)(A, adjoint(B)) - @deprecate At_mul_B(A::SparseMatrixCSC{TvA,TiA}, B::SparseMatrixCSC{TvB,TiB}) where {TvA,TiA,TvB,TiB} (*)(transpose(A), B) - @deprecate Ac_mul_B(A::SparseMatrixCSC{TvA,TiA}, B::SparseMatrixCSC{TvB,TiB}) where {TvA,TiA,TvB,TiB} (*)(adjoint(A),B) - @deprecate At_mul_Bt(A::SparseMatrixCSC{TvA,TiA}, B::SparseMatrixCSC{TvB,TiB}) where {TvA,TiA,TvB,TiB} (*)(transpose(A), transpose(B)) - @deprecate Ac_mul_Bc(A::SparseMatrixCSC{TvA,TiA}, B::SparseMatrixCSC{TvB,TiB}) where {TvA,TiA,TvB,TiB} (*)(adjoint(A), adjoint(B)) -end - -# A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from base/sparse/sparsevector.jl, to deprecate -for isunittri in (true, false), islowertri in (true, false) - unitstr = isunittri ? "Unit" : "" - halfstr = islowertri ? "Lower" : "Upper" - tritype = :(Base.LinAlg.$(Symbol(unitstr, halfstr, "Triangular"))) - @eval Base.SparseArrays begin - using Base.LinAlg: Adjoint, Transpose - @deprecate At_ldiv_B(A::$tritype{TA,<:AbstractMatrix}, b::SparseVector{Tb}) where {TA<:Number,Tb<:Number} (\)(transpose(A), b) - @deprecate At_ldiv_B(A::$tritype{TA,<:StridedMatrix}, b::SparseVector{Tb}) where {TA<:Number,Tb<:Number} (\)(transpose(A), b) - @deprecate At_ldiv_B(A::$tritype, b::SparseVector) (\)(transpose(A), b) - @deprecate Ac_ldiv_B(A::$tritype{TA,<:AbstractMatrix}, b::SparseVector{Tb}) where {TA<:Number,Tb<:Number} (\)(adjoint(A), b) - @deprecate Ac_ldiv_B(A::$tritype{TA,<:StridedMatrix}, b::SparseVector{Tb}) where {TA<:Number,Tb<:Number} (\)(adjoint(A), b) - @deprecate Ac_ldiv_B(A::$tritype, b::SparseVector) (\)(adjoint(A), b) - @deprecate A_ldiv_B!(A::$tritype{<:Any,<:StridedMatrix}, b::SparseVector) ldiv!(A, b) - @deprecate At_ldiv_B!(A::$tritype{<:Any,<:StridedMatrix}, b::SparseVector) ldiv!(transpose(A), b) - @deprecate Ac_ldiv_B!(A::$tritype{<:Any,<:StridedMatrix}, b::SparseVector) ldiv!(adjoint(A), b) - end -end -@eval Base.SparseArrays begin - using Base.LinAlg: Adjoint, Transpose - @deprecate Ac_mul_B(A::SparseMatrixCSC, x::AbstractSparseVector) (*)(adjoint(A), x) - @deprecate At_mul_B(A::SparseMatrixCSC, x::AbstractSparseVector) (*)(transpose(A), x) - @deprecate Ac_mul_B!(α::Number, A::SparseMatrixCSC, x::AbstractSparseVector, β::Number, y::StridedVector) mul!(α, adjoint(A), x, β, y) - @deprecate Ac_mul_B!(y::StridedVector{Ty}, A::SparseMatrixCSC, x::AbstractSparseVector{Tx}) where {Tx,Ty} mul!(y, adjoint(A), x) - @deprecate At_mul_B!(α::Number, A::SparseMatrixCSC, x::AbstractSparseVector, β::Number, y::StridedVector) mul!(α, transpose(A), x, β, y) - @deprecate At_mul_B!(y::StridedVector{Ty}, A::SparseMatrixCSC, x::AbstractSparseVector{Tx}) where {Tx,Ty} mul!(y, transpose(A), x) - @deprecate A_mul_B!(α::Number, A::SparseMatrixCSC, x::AbstractSparseVector, β::Number, y::StridedVector) mul!(α, A, x, β, y) - @deprecate A_mul_B!(y::StridedVector{Ty}, A::SparseMatrixCSC, x::AbstractSparseVector{Tx}) where {Tx,Ty} mul!(y, A, x) - @deprecate At_mul_B!(α::Number, A::StridedMatrix, x::AbstractSparseVector, β::Number, y::StridedVector) mul!(α, transpose(A), x, β, y) - @deprecate At_mul_B!(y::StridedVector{Ty}, A::StridedMatrix, x::AbstractSparseVector{Tx}) where {Tx,Ty} mul!(y, transpose(A), x) - @deprecate At_mul_B(A::StridedMatrix{Ta}, x::AbstractSparseVector{Tx}) where {Ta,Tx} (*)(transpose(A), x) - @deprecate A_mul_B!(α::Number, A::StridedMatrix, x::AbstractSparseVector, β::Number, y::StridedVector) mul!(α, A, x, β, y) - @deprecate A_mul_B!(y::StridedVector{Ty}, A::StridedMatrix, x::AbstractSparseVector{Tx}) where {Tx,Ty} mul!(y, A, x) -end - # methods involving RowVector from base/linalg/bidiag.jl, to deprecate @eval Base.LinAlg begin @@ -2439,13 +2239,6 @@ end *(D::Diagonal, adjrowvec::Adjoint{<:Any,<:RowVector}) = (rowvec = adjrowvec.parent; D*rvadjoint(rowvec)) end -# methods involving RowVector from base/sparse/linalg.jl, to deprecate -@eval Base.SparseArrays begin - \(::SparseMatrixCSC, ::RowVector) = throw(DimensionMismatch("Cannot left-divide matrix by transposed vector")) - \(::Adjoint{<:Any,<:SparseMatrixCSC}, ::RowVector) = throw(DimensionMismatch("Cannot left-divide matrix by transposed vector")) - \(::Transpose{<:Any,<:SparseMatrixCSC}, ::RowVector) = throw(DimensionMismatch("Cannot left-divide matrix by transposed vector")) -end - # methods involving RowVector from base/linalg/qr.jl, to deprecate @eval Base.LinAlg begin *(rowvec::RowVector, adjB::Adjoint{<:Any,<:AbstractQ}) = (B = adjB.parent; rvadjoint(B*rvadjoint(rowvec))) @@ -2504,11 +2297,6 @@ end \(A::Transpose{<:Any,<:Factorization{<:Real}}, B::RowVector) = transpose(A.parent) \ B end -# methods involving RowVector from base/sparse/higherorderfns.jl, to deprecate -@eval Base.SparseArrays.HigherOrderFns begin - BroadcastStyle(::Type{<:Base.RowVector{T,<:Vector}}) where T = Broadcast.MatrixStyle() -end - # methods involving RowVector from base/linalg/symmetric.jl, to deprecate @eval Base.LinAlg begin *(A::RowVector, transB::Transpose{<:Any,<:RealHermSymComplexSym}) = A * transB.parent @@ -2795,6 +2583,31 @@ end @deprecate_moved sum_kbn "KahanSummation" @deprecate_moved cumsum_kbn "KahanSummation" +# PR #25249: SparseArrays to stdlib +## the Base.SparseArrays module itself and exported types are deprecated in base/sysimg.jl +## functions that were re-exported from Base +@deprecate_moved nonzeros "SparseArrays" true true +@deprecate_moved permute "SparseArrays" true true +@deprecate_moved blkdiag "SparseArrays" true true +@deprecate_moved dropzeros "SparseArrays" true true +@deprecate_moved dropzeros! "SparseArrays" true true +@deprecate_moved issparse "SparseArrays" true true +@deprecate_moved sparse "SparseArrays" true true +@deprecate_moved sparsevec "SparseArrays" true true +@deprecate_moved spdiagm "SparseArrays" true true +@deprecate_moved sprand "SparseArrays" true true +@deprecate_moved sprandn "SparseArrays" true true +@deprecate_moved spzeros "SparseArrays" true true +@deprecate_moved rowvals "SparseArrays" true true +@deprecate_moved nzrange "SparseArrays" true true +@deprecate_moved nnz "SparseArrays" true true +## functions that where exported from Base.SparseArrays but not from Base +@deprecate_moved droptol! "SparseArrays" false true +## deprecated functions that are moved to stdlib/SparseArrays/src/deprecated.jl +@deprecate_moved spones "SparseArrays" true true +@deprecate_moved speye "SparseArrays" true true + + # PR #25021 @deprecate_moved normalize_string "Unicode" true true @deprecate_moved graphemes "Unicode" true true diff --git a/base/exports.jl b/base/exports.jl index bbc505393a62e8..740e544042b0eb 100644 --- a/base/exports.jl +++ b/base/exports.jl @@ -434,7 +434,6 @@ export minimum, minmax, ndims, - nonzeros, ones, parent, parentindices, @@ -442,7 +441,6 @@ export partialsort!, partialsortperm, partialsortperm!, - permute, permute!, permutedims, permutedims!, @@ -513,7 +511,6 @@ export # linear algebra bkfact!, bkfact, - blkdiag, chol, cholfact!, cholfact, @@ -594,10 +591,6 @@ export ⋅, ×, -# sparse - dropzeros, - dropzeros!, - # bitarrays falses, flipbits!, @@ -1183,22 +1176,4 @@ export @goto, @view, @views, - @static, - -# SparseArrays module re-exports - SparseArrays, - AbstractSparseArray, - AbstractSparseMatrix, - AbstractSparseVector, - SparseMatrixCSC, - SparseVector, - issparse, - sparse, - sparsevec, - spdiagm, - sprand, - sprandn, - spzeros, - rowvals, - nzrange, - nnz + @static diff --git a/base/linalg/bidiag.jl b/base/linalg/bidiag.jl index 34860fefde0986..dbab48db775cf9 100644 --- a/base/linalg/bidiag.jl +++ b/base/linalg/bidiag.jl @@ -178,7 +178,8 @@ broadcast(::typeof(big), B::Bidiagonal) = Bidiagonal(big.(B.dv), big.(B.ev), B.u # On the other hand, similar(B, [neweltype,] shape...) should yield a sparse matrix. # The first method below effects the former, and the second the latter. similar(B::Bidiagonal, ::Type{T}) where {T} = Bidiagonal(similar(B.dv, T), similar(B.ev, T), B.uplo) -similar(B::Bidiagonal, ::Type{T}, dims::Union{Dims{1},Dims{2}}) where {T} = spzeros(T, dims...) +# The method below is moved to SparseArrays for now +# similar(B::Bidiagonal, ::Type{T}, dims::Union{Dims{1},Dims{2}}) where {T} = spzeros(T, dims...) ################### diff --git a/base/linalg/dense.jl b/base/linalg/dense.jl index 74e54947810341..76109f7fd63e87 100644 --- a/base/linalg/dense.jl +++ b/base/linalg/dense.jl @@ -303,8 +303,6 @@ Vector `kv.second` will be placed on the `kv.first` diagonal. versions with fast arithmetic, see [`Diagonal`](@ref), [`Bidiagonal`](@ref) [`Tridiagonal`](@ref) and [`SymTridiagonal`](@ref). -See also: [`spdiagm`](@ref) - # Examples ```jldoctest julia> diagm(1 => [1,2,3]) diff --git a/base/linalg/diagonal.jl b/base/linalg/diagonal.jl index c33b00076f7d14..80ac284f2d12e4 100644 --- a/base/linalg/diagonal.jl +++ b/base/linalg/diagonal.jl @@ -59,7 +59,8 @@ Array(D::Diagonal) = Matrix(D) # On the other hand, similar(D, [neweltype,] shape...) should yield a sparse matrix. # The first method below effects the former, and the second the latter. similar(D::Diagonal, ::Type{T}) where {T} = Diagonal(similar(D.diag, T)) -similar(D::Diagonal, ::Type{T}, dims::Union{Dims{1},Dims{2}}) where {T} = spzeros(T, dims...) +# The method below is moved to SparseArrays for now +# similar(D::Diagonal, ::Type{T}, dims::Union{Dims{1},Dims{2}}) where {T} = spzeros(T, dims...) copyto!(D1::Diagonal, D2::Diagonal) = (copyto!(D1.diag, D2.diag); D1) diff --git a/base/linalg/lapack.jl b/base/linalg/lapack.jl index 29bf3838773369..c39210eacf353d 100644 --- a/base/linalg/lapack.jl +++ b/base/linalg/lapack.jl @@ -3759,7 +3759,7 @@ for (stev, stebz, stegr, stein, elty) in chklapackerror(info[]) if any(ifail .!= 0) # TODO: better error message / type - error("failed to converge eigenvectors:\n$(nonzeros(ifail))") + error("failed to converge eigenvectors:\n$(find(x->x != 0, ifail))") end z end diff --git a/base/linalg/tridiag.jl b/base/linalg/tridiag.jl index 383abe2c22001b..f83c8a60833656 100644 --- a/base/linalg/tridiag.jl +++ b/base/linalg/tridiag.jl @@ -111,7 +111,8 @@ end # On the other hand, similar(S, [neweltype,] shape...) should yield a sparse matrix. # The first method below effects the former, and the second the latter. similar(S::SymTridiagonal, ::Type{T}) where {T} = SymTridiagonal(similar(S.dv, T), similar(S.ev, T)) -similar(S::SymTridiagonal, ::Type{T}, dims::Union{Dims{1},Dims{2}}) where {T} = spzeros(T, dims...) +# The method below is moved to SparseArrays for now +# similar(S::SymTridiagonal, ::Type{T}, dims::Union{Dims{1},Dims{2}}) where {T} = spzeros(T, dims...) #Elementary operations broadcast(::typeof(abs), M::SymTridiagonal) = SymTridiagonal(abs.(M.dv), abs.(M.ev)) @@ -497,7 +498,8 @@ Array(M::Tridiagonal) = Matrix(M) # On the other hand, similar(M, [neweltype,] shape...) should yield a sparse matrix. # The first method below effects the former, and the second the latter. similar(M::Tridiagonal, ::Type{T}) where {T} = Tridiagonal(similar(M.dl, T), similar(M.d, T), similar(M.du, T)) -similar(M::Tridiagonal, ::Type{T}, dims::Union{Dims{1},Dims{2}}) where {T} = spzeros(T, dims...) +# The method below is moved to SparseArrays for now +# similar(M::Tridiagonal, ::Type{T}, dims::Union{Dims{1},Dims{2}}) where {T} = spzeros(T, dims...) # Operations on Tridiagonal matrices copyto!(dest::Tridiagonal, src::Tridiagonal) = (copyto!(dest.dl, src.dl); copyto!(dest.d, src.d); copyto!(dest.du, src.du); dest) diff --git a/base/sysimg.jl b/base/sysimg.jl index e8b57aaa39a506..ed1274125e71a3 100644 --- a/base/sysimg.jl +++ b/base/sysimg.jl @@ -427,10 +427,6 @@ include("libgit2/libgit2.jl") # package manager include("pkg/pkg.jl") -# sparse matrices, vectors, and sparse linear algebra -include("sparse/sparse.jl") -using .SparseArrays - include("asyncmap.jl") # worker threads @@ -499,6 +495,7 @@ Base.require(:IterativeEigensolvers) Base.require(:Mmap) Base.require(:Profile) Base.require(:SharedArrays) +Base.require(:SparseArrays) Base.require(:SuiteSparse) Base.require(:Test) Base.require(:Unicode) @@ -512,6 +509,19 @@ Base.require(:Future) @deprecate_binding Profile root_module(:Profile) true ", run `using Profile` instead" @deprecate_binding Dates root_module(:Dates) true ", run `using Dates` instead" @deprecate_binding Distributed root_module(:Distributed) true ", run `using Distributed` instead" + + # PR #25249 + @deprecate_binding SparseArrays root_module(:SparseArrays) true ", run `using SparseArrays` instead" + @deprecate_binding(AbstractSparseArray, root_module(:SparseArrays).AbstractSparseArray, true, + ", run `using SparseArrays` to load sparse array functionality") + @deprecate_binding(AbstractSparseMatrix, root_module(:SparseArrays).AbstractSparseMatrix, true, + ", run `using SparseArrays` to load sparse array functionality") + @deprecate_binding(AbstractSparseVector, root_module(:SparseArrays).AbstractSparseVector, true, + ", run `using SparseArrays` to load sparse array functionality") + @deprecate_binding(SparseMatrixCSC, root_module(:SparseArrays).SparseMatrixCSC, true, + ", run `using SparseArrays` to load sparse array functionality") + @deprecate_binding(SparseVector, root_module(:SparseArrays).SparseVector, true, + ", run `using SparseArrays` to load sparse array functionality") end empty!(LOAD_PATH) diff --git a/doc/src/base/arrays.md b/doc/src/base/arrays.md index c3f1efca5d5ea6..b2a1074ab520d3 100644 --- a/doc/src/base/arrays.md +++ b/doc/src/base/arrays.md @@ -188,30 +188,3 @@ and can be converted to/from the latter via `Array(bitarray)` and `BitArray(arra ```@docs Base.flipbits! ``` - -## [Sparse Vectors and Matrices](@id stdlib-sparse-arrays) - -Sparse vectors and matrices largely support the same set of operations as their dense counterparts. -The following functions are specific to sparse arrays. - -```@docs -Base.SparseArrays.SparseVector -Base.SparseArrays.SparseMatrixCSC -Base.SparseArrays.sparse -Base.SparseArrays.sparsevec -Base.SparseArrays.issparse -Base.SparseArrays.nnz -Base.SparseArrays.spzeros -Base.SparseArrays.spdiagm -Base.SparseArrays.sprand -Base.SparseArrays.sprandn -Base.SparseArrays.nonzeros -Base.SparseArrays.rowvals -Base.SparseArrays.nzrange -Base.SparseArrays.dropzeros!(::SparseMatrixCSC, ::Bool) -Base.SparseArrays.dropzeros(::SparseMatrixCSC, ::Bool) -Base.SparseArrays.dropzeros!(::SparseVector, ::Bool) -Base.SparseArrays.dropzeros(::SparseVector, ::Bool) -Base.SparseArrays.permute -Base.permute!{Tv, Ti, Tp <: Integer, Tq <: Integer}(::SparseMatrixCSC{Tv,Ti}, ::SparseMatrixCSC{Tv,Ti}, ::AbstractArray{Tp,1}, ::AbstractArray{Tq,1}) -``` diff --git a/doc/src/base/base.md b/doc/src/base/base.md index 82e4a2d55ba6e2..ba0136630a83b7 100644 --- a/doc/src/base/base.md +++ b/doc/src/base/base.md @@ -98,7 +98,6 @@ Base.Markdown Base.Meta Base.Pkg Base.Serializer -Base.SparseArrays Base.StackTraces Base.Sys Base.Threads diff --git a/doc/src/base/index.md b/doc/src/base/index.md index 5356aaf26ffa0f..7dc8a9fff2879e 100644 --- a/doc/src/base/index.md +++ b/doc/src/base/index.md @@ -31,5 +31,6 @@ * [Memory-mapped I/O](@ref) * [Base64](@ref) * [File Events](@ref lib-filewatching) + * [Sparse Arrays](@ref) * [Iterative Eigensolvers](@ref lib-itereigen) * [Printf](@ref) diff --git a/doc/src/base/linalg.md b/doc/src/base/linalg.md index a1b3ae0e292d5b..e6d500201aba2f 100644 --- a/doc/src/base/linalg.md +++ b/doc/src/base/linalg.md @@ -91,7 +91,6 @@ Base.LinAlg.pinv Base.LinAlg.nullspace Base.repmat Base.kron -Base.SparseArrays.blkdiag Base.LinAlg.linreg Base.LinAlg.exp(::StridedMatrix{<:Base.LinAlg.BlasFloat}) Base.LinAlg.log(::StridedMatrix) diff --git a/doc/src/index.md b/doc/src/index.md index 43e8bd9f9578ee..b1028b90017a8c 100644 --- a/doc/src/index.md +++ b/doc/src/index.md @@ -81,6 +81,7 @@ * [Delimited Files](@ref) * [Distributed Computing](@ref) * [File Events](@ref lib-filewatching) + * [Sparse Arrays](@ref) * [Iterative Eigensolvers](@ref lib-itereigen) * [Memory-mapped I/O](@ref) * [Printf](@ref) diff --git a/doc/src/manual/arrays.md b/doc/src/manual/arrays.md index 1d187a3d769b0e..d9071cf0eb2745 100644 --- a/doc/src/manual/arrays.md +++ b/doc/src/manual/arrays.md @@ -762,195 +762,3 @@ julia> r -1.58553 -0.921517 0.0 0.866567 ``` - -## Sparse Vectors and Matrices - -Julia has built-in support for sparse vectors and -[sparse matrices](https://en.wikipedia.org/wiki/Sparse_matrix). Sparse arrays are arrays -that contain enough zeros that storing them in a special data structure leads to savings -in space and execution time, compared to dense arrays. - -### [Compressed Sparse Column (CSC) Sparse Matrix Storage](@id man-csc) - -In Julia, sparse matrices are stored in the [Compressed Sparse Column (CSC) format](https://en.wikipedia.org/wiki/Sparse_matrix#Compressed_sparse_column_.28CSC_or_CCS.29). -Julia sparse matrices have the type [`SparseMatrixCSC{Tv,Ti}`](@ref), where `Tv` is the -type of the stored values, and `Ti` is the integer type for storing column pointers and -row indices. The internal representation of `SparseMatrixCSC` is as follows: - -```julia -struct SparseMatrixCSC{Tv,Ti<:Integer} <: AbstractSparseMatrix{Tv,Ti} - m::Int # Number of rows - n::Int # Number of columns - colptr::Vector{Ti} # Column i is in colptr[i]:(colptr[i+1]-1) - rowval::Vector{Ti} # Row indices of stored values - nzval::Vector{Tv} # Stored values, typically nonzeros -end -``` - -The compressed sparse column storage makes it easy and quick to access the elements in the column -of a sparse matrix, whereas accessing the sparse matrix by rows is considerably slower. Operations -such as insertion of previously unstored entries one at a time in the CSC structure tend to be slow. This is -because all elements of the sparse matrix that are beyond the point of insertion have to be moved -one place over. - -All operations on sparse matrices are carefully implemented to exploit the CSC data structure -for performance, and to avoid expensive operations. - -If you have data in CSC format from a different application or library, and wish to import it -in Julia, make sure that you use 1-based indexing. The row indices in every column need to be -sorted. If your `SparseMatrixCSC` object contains unsorted row indices, one quick way to sort -them is by doing a double transpose. - -In some applications, it is convenient to store explicit zero values in a `SparseMatrixCSC`. These -*are* accepted by functions in `Base` (but there is no guarantee that they will be preserved in -mutating operations). Such explicitly stored zeros are treated as structural nonzeros by many -routines. The [`nnz`](@ref) function returns the number of elements explicitly stored in the -sparse data structure, including structural nonzeros. In order to count the exact number of -numerical nonzeros, use [`count(!iszero, x)`](@ref), which inspects every stored element of a sparse -matrix. [`dropzeros`](@ref), and the in-place [`dropzeros!`](@ref), can be used to -remove stored zeros from the sparse matrix. - -```jldoctest -julia> A = sparse([1, 2, 3], [1, 2, 3], [0, 2, 0]) -3×3 SparseMatrixCSC{Int64,Int64} with 3 stored entries: - [1, 1] = 0 - [2, 2] = 2 - [3, 3] = 0 - -julia> dropzeros(A) -3×3 SparseMatrixCSC{Int64,Int64} with 1 stored entry: - [2, 2] = 2 -``` - -### Sparse Vector Storage - -Sparse vectors are stored in a close analog to compressed sparse column format for sparse -matrices. In Julia, sparse vectors have the type [`SparseVector{Tv,Ti}`](@ref) where `Tv` -is the type of the stored values and `Ti` the integer type for the indices. The internal -representation is as follows: - -```julia -struct SparseVector{Tv,Ti<:Integer} <: AbstractSparseVector{Tv,Ti} - n::Int # Length of the sparse vector - nzind::Vector{Ti} # Indices of stored values - nzval::Vector{Tv} # Stored values, typically nonzeros -end -``` - -As for [`SparseMatrixCSC`](@ref), the `SparseVector` type can also contain explicitly -stored zeros. (See [Sparse Matrix Storage](@ref man-csc).). - -### Sparse Vector and Matrix Constructors - -The simplest way to create a sparse array is to use a function equivalent to the [`zeros`](@ref) -function that Julia provides for working with dense arrays. To produce a -sparse array instead, you can use the same name with an `sp` prefix: - -```jldoctest -julia> spzeros(3) -3-element SparseVector{Float64,Int64} with 0 stored entries -``` - -The [`sparse`](@ref) function is often a handy way to construct sparse arrays. For -example, to construct a sparse matrix we can input a vector `I` of row indices, a vector -`J` of column indices, and a vector `V` of stored values (this is also known as the -[COO (coordinate) format](https://en.wikipedia.org/wiki/Sparse_matrix#Coordinate_list_.28COO.29)). -`sparse(I,J,V)` then constructs a sparse matrix such that `S[I[k], J[k]] = V[k]`. The -equivalent sparse vector constructor is [`sparsevec`](@ref), which takes the (row) index -vector `I` and the vector `V` with the stored values and constructs a sparse vector `R` -such that `R[I[k]] = V[k]`. - -```jldoctest sparse_function -julia> I = [1, 4, 3, 5]; J = [4, 7, 18, 9]; V = [1, 2, -5, 3]; - -julia> S = sparse(I,J,V) -5×18 SparseMatrixCSC{Int64,Int64} with 4 stored entries: - [1 , 4] = 1 - [4 , 7] = 2 - [5 , 9] = 3 - [3 , 18] = -5 - -julia> R = sparsevec(I,V) -5-element SparseVector{Int64,Int64} with 4 stored entries: - [1] = 1 - [3] = -5 - [4] = 2 - [5] = 3 -``` - -The inverse of the [`sparse`](@ref) and [`sparsevec`](@ref) functions is -[`findnz`](@ref), which retrieves the inputs used to create the sparse array. -There is also a [`findn`](@ref) function which only returns the index vectors. - -```jldoctest sparse_function -julia> findnz(S) -([1, 4, 5, 3], [4, 7, 9, 18], [1, 2, 3, -5]) - -julia> findn(S) -([1, 4, 5, 3], [4, 7, 9, 18]) - -julia> findnz(R) -([1, 3, 4, 5], [1, -5, 2, 3]) - -julia> find(!iszero, R) -4-element Array{Int64,1}: - 1 - 3 - 4 - 5 -``` - -Another way to create a sparse array is to convert a dense array into a sparse array using -the [`sparse`](@ref) function: - -```jldoctest -julia> sparse(Matrix(1.0I, 5, 5)) -5×5 SparseMatrixCSC{Float64,Int64} with 5 stored entries: - [1, 1] = 1.0 - [2, 2] = 1.0 - [3, 3] = 1.0 - [4, 4] = 1.0 - [5, 5] = 1.0 - -julia> sparse([1.0, 0.0, 1.0]) -3-element SparseVector{Float64,Int64} with 2 stored entries: - [1] = 1.0 - [3] = 1.0 -``` - -You can go in the other direction using the [`Array`](@ref) constructor. The [`issparse`](@ref) -function can be used to query if a matrix is sparse. - -```jldoctest -julia> issparse(spzeros(5)) -true -``` - -### Sparse matrix operations - -Arithmetic operations on sparse matrices also work as they do on dense matrices. Indexing of, -assignment into, and concatenation of sparse matrices work in the same way as dense matrices. -Indexing operations, especially assignment, are expensive, when carried out one element at a time. -In many cases it may be better to convert the sparse matrix into `(I,J,V)` format using [`findnz`](@ref), -manipulate the values or the structure in the dense vectors `(I,J,V)`, and then reconstruct -the sparse matrix. - -### Correspondence of dense and sparse methods - -The following table gives a correspondence between built-in methods on sparse matrices and their -corresponding methods on dense matrix types. In general, methods that generate sparse matrices -differ from their dense counterparts in that the resulting matrix follows the same sparsity pattern -as a given sparse matrix `S`, or that the resulting sparse matrix has density `d`, i.e. each matrix -element has a probability `d` of being non-zero. - -Details can be found in the [Sparse Vectors and Matrices](@ref stdlib-sparse-arrays) -section of the Julia Base reference. - -| Sparse | Dense | Description | -|:-------------------------- |:---------------------- |:--------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| [`spzeros(m,n)`](@ref) | [`zeros(m,n)`](@ref) | Creates a *m*-by-*n* matrix of zeros. ([`spzeros(m,n)`](@ref) is empty.) | -| [`sparse(I, n, n)`](@ref) | [`Matrix(I,n,n)`](@ref)| Creates a *n*-by-*n* identity matrix. | -| [`Array(S)`](@ref) | [`sparse(A)`](@ref) | Interconverts between dense and sparse formats. | -| [`sprand(m,n,d)`](@ref) | [`rand(m,n)`](@ref) | Creates a *m*-by-*n* random matrix (of density *d*) with iid non-zero elements distributed uniformly on the half-open interval ``[0, 1)``. | -| [`sprandn(m,n,d)`](@ref) | [`randn(m,n)`](@ref) | Creates a *m*-by-*n* random matrix (of density *d*) with iid non-zero elements distributed according to the standard normal (Gaussian) distribution. | -| [`sprandn(m,n,d,X)`](@ref) | [`randn(m,n,X)`](@ref) | Creates a *m*-by-*n* random matrix (of density *d*) with iid non-zero elements distributed according to the *X* distribution. (Requires the `Distributions` package.) | diff --git a/stdlib/Future/test/runtests.jl b/stdlib/Future/test/runtests.jl index 4233dc0fad0bdb..820f4bdd3cedd7 100644 --- a/stdlib/Future/test/runtests.jl +++ b/stdlib/Future/test/runtests.jl @@ -2,6 +2,7 @@ using Test using Future +using SparseArrays @testset "Future.copy! for AbstractSet" begin for S = (Set, BitSet) diff --git a/stdlib/IterativeEigensolvers/test/runtests.jl b/stdlib/IterativeEigensolvers/test/runtests.jl index 06b094e03f84fc..6f9516d295f243 100644 --- a/stdlib/IterativeEigensolvers/test/runtests.jl +++ b/stdlib/IterativeEigensolvers/test/runtests.jl @@ -1,7 +1,7 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license using IterativeEigensolvers -using Test +using Test, SparseArrays @testset "eigs" begin srand(1234) diff --git a/stdlib/SparseArrays/docs/src/index.md b/stdlib/SparseArrays/docs/src/index.md new file mode 100644 index 00000000000000..95e4b4f3bc4426 --- /dev/null +++ b/stdlib/SparseArrays/docs/src/index.md @@ -0,0 +1,216 @@ +# Sparse Arrays + +Julia has support for sparse vectors and [sparse matrices](https://en.wikipedia.org/wiki/Sparse_matrix) +in the `SparseArrays` stdlib module. Sparse arrays are arrays that contain enough zeros +that storing them in a special data structure leads to savings in space and execution time, +compared to dense arrays. + +## [Compressed Sparse Column (CSC) Sparse Matrix Storage](@id man-csc) + +In Julia, sparse matrices are stored in the [Compressed Sparse Column (CSC) format](https://en.wikipedia.org/wiki/Sparse_matrix#Compressed_sparse_column_.28CSC_or_CCS.29). +Julia sparse matrices have the type [`SparseMatrixCSC{Tv,Ti}`](@ref), where `Tv` is the +type of the stored values, and `Ti` is the integer type for storing column pointers and +row indices. The internal representation of `SparseMatrixCSC` is as follows: + +```julia +struct SparseMatrixCSC{Tv,Ti<:Integer} <: AbstractSparseMatrix{Tv,Ti} + m::Int # Number of rows + n::Int # Number of columns + colptr::Vector{Ti} # Column i is in colptr[i]:(colptr[i+1]-1) + rowval::Vector{Ti} # Row indices of stored values + nzval::Vector{Tv} # Stored values, typically nonzeros +end +``` + +The compressed sparse column storage makes it easy and quick to access the elements in the column +of a sparse matrix, whereas accessing the sparse matrix by rows is considerably slower. Operations +such as insertion of previously unstored entries one at a time in the CSC structure tend to be slow. This is +because all elements of the sparse matrix that are beyond the point of insertion have to be moved +one place over. + +All operations on sparse matrices are carefully implemented to exploit the CSC data structure +for performance, and to avoid expensive operations. + +If you have data in CSC format from a different application or library, and wish to import it +in Julia, make sure that you use 1-based indexing. The row indices in every column need to be +sorted. If your `SparseMatrixCSC` object contains unsorted row indices, one quick way to sort +them is by doing a double transpose. + +In some applications, it is convenient to store explicit zero values in a `SparseMatrixCSC`. These +*are* accepted by functions in `Base` (but there is no guarantee that they will be preserved in +mutating operations). Such explicitly stored zeros are treated as structural nonzeros by many +routines. The [`nnz`](@ref) function returns the number of elements explicitly stored in the +sparse data structure, including structural nonzeros. In order to count the exact number of +numerical nonzeros, use [`count(!iszero, x)`](@ref), which inspects every stored element of a sparse +matrix. [`dropzeros`](@ref), and the in-place [`dropzeros!`](@ref), can be used to +remove stored zeros from the sparse matrix. + +```jldoctest +julia> A = sparse([1, 2, 3], [1, 2, 3], [0, 2, 0]) +3×3 SparseMatrixCSC{Int64,Int64} with 3 stored entries: + [1, 1] = 0 + [2, 2] = 2 + [3, 3] = 0 + +julia> dropzeros(A) +3×3 SparseMatrixCSC{Int64,Int64} with 1 stored entry: + [2, 2] = 2 +``` + +## Sparse Vector Storage + +Sparse vectors are stored in a close analog to compressed sparse column format for sparse +matrices. In Julia, sparse vectors have the type [`SparseVector{Tv,Ti}`](@ref) where `Tv` +is the type of the stored values and `Ti` the integer type for the indices. The internal +representation is as follows: + +```julia +struct SparseVector{Tv,Ti<:Integer} <: AbstractSparseVector{Tv,Ti} + n::Int # Length of the sparse vector + nzind::Vector{Ti} # Indices of stored values + nzval::Vector{Tv} # Stored values, typically nonzeros +end +``` + +As for [`SparseMatrixCSC`](@ref), the `SparseVector` type can also contain explicitly +stored zeros. (See [Sparse Matrix Storage](@ref man-csc).). + +## Sparse Vector and Matrix Constructors + +The simplest way to create a sparse array is to use a function equivalent to the [`zeros`](@ref) +function that Julia provides for working with dense arrays. To produce a +sparse array instead, you can use the same name with an `sp` prefix: + +```jldoctest +julia> spzeros(3) +3-element SparseVector{Float64,Int64} with 0 stored entries +``` + +The [`sparse`](@ref) function is often a handy way to construct sparse arrays. For +example, to construct a sparse matrix we can input a vector `I` of row indices, a vector +`J` of column indices, and a vector `V` of stored values (this is also known as the +[COO (coordinate) format](https://en.wikipedia.org/wiki/Sparse_matrix#Coordinate_list_.28COO.29)). +`sparse(I,J,V)` then constructs a sparse matrix such that `S[I[k], J[k]] = V[k]`. The +equivalent sparse vector constructor is [`sparsevec`](@ref), which takes the (row) index +vector `I` and the vector `V` with the stored values and constructs a sparse vector `R` +such that `R[I[k]] = V[k]`. + +```jldoctest sparse_function +julia> I = [1, 4, 3, 5]; J = [4, 7, 18, 9]; V = [1, 2, -5, 3]; + +julia> S = sparse(I,J,V) +5×18 SparseMatrixCSC{Int64,Int64} with 4 stored entries: + [1 , 4] = 1 + [4 , 7] = 2 + [5 , 9] = 3 + [3 , 18] = -5 + +julia> R = sparsevec(I,V) +5-element SparseVector{Int64,Int64} with 4 stored entries: + [1] = 1 + [3] = -5 + [4] = 2 + [5] = 3 +``` + +The inverse of the [`sparse`](@ref) and [`sparsevec`](@ref) functions is +[`findnz`](@ref), which retrieves the inputs used to create the sparse array. +There is also a [`findn`](@ref) function which only returns the index vectors. + +```jldoctest sparse_function +julia> findnz(S) +([1, 4, 5, 3], [4, 7, 9, 18], [1, 2, 3, -5]) + +julia> findn(S) +([1, 4, 5, 3], [4, 7, 9, 18]) + +julia> findnz(R) +([1, 3, 4, 5], [1, -5, 2, 3]) + +julia> find(!iszero, R) +4-element Array{Int64,1}: + 1 + 3 + 4 + 5 +``` + +Another way to create a sparse array is to convert a dense array into a sparse array using +the [`sparse`](@ref) function: + +```jldoctest +julia> sparse(Matrix(1.0I, 5, 5)) +5×5 SparseMatrixCSC{Float64,Int64} with 5 stored entries: + [1, 1] = 1.0 + [2, 2] = 1.0 + [3, 3] = 1.0 + [4, 4] = 1.0 + [5, 5] = 1.0 + +julia> sparse([1.0, 0.0, 1.0]) +3-element SparseVector{Float64,Int64} with 2 stored entries: + [1] = 1.0 + [3] = 1.0 +``` + +You can go in the other direction using the [`Array`](@ref) constructor. The [`issparse`](@ref) +function can be used to query if a matrix is sparse. + +```jldoctest +julia> issparse(spzeros(5)) +true +``` + +## Sparse matrix operations + +Arithmetic operations on sparse matrices also work as they do on dense matrices. Indexing of, +assignment into, and concatenation of sparse matrices work in the same way as dense matrices. +Indexing operations, especially assignment, are expensive, when carried out one element at a time. +In many cases it may be better to convert the sparse matrix into `(I,J,V)` format using [`findnz`](@ref), +manipulate the values or the structure in the dense vectors `(I,J,V)`, and then reconstruct +the sparse matrix. + +## Correspondence of dense and sparse methods + +The following table gives a correspondence between built-in methods on sparse matrices and their +corresponding methods on dense matrix types. In general, methods that generate sparse matrices +differ from their dense counterparts in that the resulting matrix follows the same sparsity pattern +as a given sparse matrix `S`, or that the resulting sparse matrix has density `d`, i.e. each matrix +element has a probability `d` of being non-zero. + +Details can be found in the [Sparse Vectors and Matrices](@ref stdlib-sparse-arrays) +section of the standard library reference. + +| Sparse | Dense | Description | +|:-------------------------- |:---------------------- |:--------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| [`spzeros(m,n)`](@ref) | [`zeros(m,n)`](@ref) | Creates a *m*-by-*n* matrix of zeros. ([`spzeros(m,n)`](@ref) is empty.) | +| [`sparse(I, n, n)`](@ref) | [`Matrix(I,n,n)`](@ref)| Creates a *n*-by-*n* identity matrix. | +| [`Array(S)`](@ref) | [`sparse(A)`](@ref) | Interconverts between dense and sparse formats. | +| [`sprand(m,n,d)`](@ref) | [`rand(m,n)`](@ref) | Creates a *m*-by-*n* random matrix (of density *d*) with iid non-zero elements distributed uniformly on the half-open interval ``[0, 1)``. | +| [`sprandn(m,n,d)`](@ref) | [`randn(m,n)`](@ref) | Creates a *m*-by-*n* random matrix (of density *d*) with iid non-zero elements distributed according to the standard normal (Gaussian) distribution. | +| [`sprandn(m,n,d,X)`](@ref) | [`randn(m,n,X)`](@ref) | Creates a *m*-by-*n* random matrix (of density *d*) with iid non-zero elements distributed according to the *X* distribution. (Requires the `Distributions` package.) | + +# [Sparse Arrays](@id stdlib-sparse-arrays) + +```@docs +SparseArrays.SparseVector +SparseArrays.SparseMatrixCSC +SparseArrays.sparse +SparseArrays.sparsevec +SparseArrays.issparse +SparseArrays.nnz +SparseArrays.spzeros +SparseArrays.spdiagm +SparseArrays.blkdiag +SparseArrays.sprand +SparseArrays.sprandn +SparseArrays.nonzeros +SparseArrays.rowvals +SparseArrays.nzrange +SparseArrays.dropzeros!(::SparseMatrixCSC, ::Bool) +SparseArrays.dropzeros(::SparseMatrixCSC, ::Bool) +SparseArrays.dropzeros!(::SparseVector, ::Bool) +SparseArrays.dropzeros(::SparseVector, ::Bool) +SparseArrays.permute +permute!{Tv, Ti, Tp <: Integer, Tq <: Integer}(::SparseMatrixCSC{Tv,Ti}, ::SparseMatrixCSC{Tv,Ti}, ::AbstractArray{Tp,1}, ::AbstractArray{Tq,1}) +``` diff --git a/base/sparse/sparse.jl b/stdlib/SparseArrays/src/SparseArrays.jl similarity index 75% rename from base/sparse/sparse.jl rename to stdlib/SparseArrays/src/SparseArrays.jl index 28ca964b7bdc05..ab00566065773f 100644 --- a/base/sparse/sparse.jl +++ b/stdlib/SparseArrays/src/SparseArrays.jl @@ -1,5 +1,7 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license +__precompile__(true) + """ Support for sparse arrays. Provides `AbstractSparseArray` and subtypes. """ @@ -36,5 +38,14 @@ include("sparsematrix.jl") include("sparsevector.jl") include("higherorderfns.jl") include("linalg.jl") +include("deprecated.jl") + + +# temporarily moved here and commented out from from base/linalg/diagonal.jl, base/linalg/tridiag.jl +# and base/linalg/bidiag.jl due to their usage of spzeros +similar(B::Bidiagonal, ::Type{T}, dims::Union{Dims{1},Dims{2}}) where {T} = spzeros(T, dims...) +similar(D::Diagonal, ::Type{T}, dims::Union{Dims{1},Dims{2}}) where {T} = spzeros(T, dims...) +similar(S::SymTridiagonal, ::Type{T}, dims::Union{Dims{1},Dims{2}}) where {T} = spzeros(T, dims...) +similar(M::Tridiagonal, ::Type{T}, dims::Union{Dims{1},Dims{2}}) where {T} = spzeros(T, dims...) end diff --git a/base/sparse/abstractsparse.jl b/stdlib/SparseArrays/src/abstractsparse.jl similarity index 100% rename from base/sparse/abstractsparse.jl rename to stdlib/SparseArrays/src/abstractsparse.jl diff --git a/stdlib/SparseArrays/src/deprecated.jl b/stdlib/SparseArrays/src/deprecated.jl new file mode 100644 index 00000000000000..8cae8ed0d544f0 --- /dev/null +++ b/stdlib/SparseArrays/src/deprecated.jl @@ -0,0 +1,229 @@ +# This file is a part of Julia. License is MIT: https://julialang.org/license + +using Base: @deprecate, depwarn + +# BEGIN 0.7 deprecations + +# PR #22475 +import Base: cat +@deprecate cat(::Type{Val{N}}, A::_SparseConcatGroup...) where {N} cat(Val(N), A...) +@deprecate cat(::Type{Val{N}}, A::_DenseConcatGroup...) where {N} cat(Val(N), A...) + +# deprecate remaining vectorized methods over SparseVectors (zero-preserving) +for op in (:floor, :ceil, :trunc, :round, + :log1p, :expm1, :sinpi, + :sin, :tan, :sind, :tand, + :asin, :atan, :asind, :atand, + :sinh, :tanh, :asinh, :atanh) + @eval import Base.Math: $op + @eval @deprecate ($op)(x::AbstractSparseVector{<:Number,<:Integer}) ($op).(x) +end +# deprecate remaining vectorized methods over SparseVectors (not-zero-preserving) +for op in (:exp, :exp2, :exp10, :log, :log2, :log10, + :cos, :cosd, :acos, :cosh, :cospi, + :csc, :cscd, :acot, :csch, :acsch, + :cot, :cotd, :acosd, :coth, + :sec, :secd, :acotd, :sech, :asech) + @eval import Base.Math: $op + @eval @deprecate ($op)(x::AbstractSparseVector{<:Number,<:Integer}) ($op).(x) +end + +# PR 23341 +import Base.LinAlg: diagm +@deprecate diagm(A::SparseMatrixCSC) sparse(Diagonal(sparsevec(A))) + +# PR #23757 +@deprecate spdiagm(x::AbstractVector) sparse(Diagonal(x)) +function spdiagm(x::AbstractVector, d::Number) + depwarn(string("`spdiagm(x::AbstractVector, d::Number)` is deprecated, use ", + "`spdiagm(d => x)` instead, which now returns a square matrix. To preserve the old ", + "behaviour, use `sparse(SparseArrays.spdiagm_internal(d => x)...)`"), :spdiagm) + I, J, V = spdiagm_internal(d => x) + return sparse(I, J, V) +end +function spdiagm(x, d) + depwarn(string("`spdiagm((x1, x2, ...), (d1, d2, ...))` is deprecated, use ", + "`spdiagm(d1 => x1, d2 => x2, ...)` instead, which now returns a square matrix. ", + "To preserve the old behaviour, use ", + "`sparse(SparseArrays.spdiagm_internal(d1 => x1, d2 => x2, ...)...)`"), :spdiagm) + I, J, V = spdiagm_internal((d[i] => x[i] for i in 1:length(x))...) + return sparse(I, J, V) +end +function spdiagm(x, d, m::Integer, n::Integer) + depwarn(string("`spdiagm((x1, x2, ...), (d1, d2, ...), m, n)` is deprecated, use ", + "`spdiagm(d1 => x1, d2 => x2, ...)` instead, which now returns a square matrix. ", + "To specify a non-square matrix and preserve the old behaviour, use ", + "`I, J, V = SparseArrays.spdiagm_internal(d1 => x1, d2 => x2, ...); sparse(I, J, V, m, n)`"), :spdiagm) + I, J, V = spdiagm_internal((d[i] => x[i] for i in 1:length(x))...) + return sparse(I, J, V, m, n) +end + +@deprecate sparse(s::UniformScaling, m::Integer) sparse(s, m, m) + +# PR #25037 +@deprecate spones(A::SparseMatrixCSC) LinAlg.fillstored!(copy(A), 1) +@deprecate spones(A::SparseVector) LinAlg.fillstored!(copy(A), 1) +export spones + +# full for sparse arrays +import Base: full +function full(S::Union{SparseVector,SparseMatrixCSC}) + (arrtypestr, desttypestr) = + isa(S, SparseVector) ? ("SparseVector", "Vector") : + isa(S, SparseMatrixCSC) ? ("SparseMatrixCSC", "Matrix") : + error("should not be reachable!") + depwarn(string( + "`full(S::$(arrtypestr))` (and `full` in general) has been deprecated. ", + "To replace `full(S::$(arrtypestr))`, consider `$(desttypestr)(S)` or, ", + "if that option is too narrow, `Array(S)`."), :full) + return Array(S) +end + +# issue #22849 +import Base: reinterpret +@deprecate reinterpret(::Type{T}, a::SparseMatrixCSC{S}, dims::NTuple{N,Int}) where {T, S, N} reinterpret(T, reshape(a, dims)) + +# deprecate speye +export speye +function speye(n::Integer) + depwarn(string("`speye(n::Integer)` has been deprecated in favor of `I`, `sparse`, and ", + "`SparseMatrixCSC` constructor methods. For a direct replacement, consider ", + "`sparse(1.0I, n, n)`, `SparseMatrixCSC(1.0I, n, n)`, or `SparseMatrixCSC{Float64}(I, n, n)`. ", + "If `Float64` element type is not necessary, consider the shorter `sparse(I, n, n)` ", + "or `SparseMatrixCSC(I, n, n)` (with default `eltype(I)` of `Bool`)."), :speye) + return sparse(1.0I, n, n) +end +function speye(m::Integer, n::Integer) + depwarn(string("`speye(m::Integer, n::Integer)` has been deprecated in favor of `I`, ", + "`sparse`, and `SparseMatrixCSC` constructor methods. For a direct ", + "replacement, consider `sparse(1.0I, m, n)`, `SparseMatrixCSC(1.0I, m, n)`, ", + "or `SparseMatrixCSC{Float64}(I, m, n)`. If `Float64` element type is not ", + " necessary, consider the shorter `sparse(I, m, n)` or `SparseMatrixCSC(I, m, n)` ", + "(with default `eltype(I)` of `Bool`)."), :speye) + return sparse(1.0I, m, n) +end +function speye(::Type{T}, n::Integer) where T + depwarn(string("`speye(T, n::Integer)` has been deprecated in favor of `I`, `sparse`, and ", + "`SparseMatrixCSC` constructor methods. For a direct replacement, consider ", + "`sparse(T(1)I, n, n)` if `T` is concrete or `SparseMatrixCSC{T}(I, n, n)` ", + "if `T` is either concrete or abstract. If element type `T` is not necessary, ", + "consider the shorter `sparse(I, n, n)` or `SparseMatrixCSC(I, n, n)` ", + "(with default `eltype(I)` of `Bool`)."), :speye) + return SparseMatrixCSC{T}(I, n, n) +end +function speye(::Type{T}, m::Integer, n::Integer) where T + depwarn(string("`speye(T, m::Integer, n::Integer)` has been deprecated in favor of `I`, ", + "`sparse`, and `SparseMatrixCSC` constructor methods. For a direct ", + "replacement, consider `sparse(T(1)I, m, n)` if `T` is concrete or ", + "`SparseMatrixCSC{T}(I, m, n)` if `T` is either concrete or abstract. ", + "If element type `T` is not necessary, consider the shorter ", + "`sparse(I, m, n)` or `SparseMatrixCSC(I, m, n)` (with default `eltype(I)` ", + "of `Bool`)."), :speye) + return SparseMatrixCSC{T}(I, m, n) +end +function speye(S::SparseMatrixCSC{T}) where T + depwarn(string("`speye(S::SparseMatrixCSC{T})` has been deprecated in favor of `I`, ", + "`sparse`, and `SparseMatrixCSC` constructor methods. For a direct ", + "replacement, consider `sparse(T(1)I, size(S)...)` if `T` is concrete or ", + "`SparseMatrixCSC{eltype(S)}(I, size(S))` if `T` is either concrete or abstract. ", + "If preserving element type `T` is not necessary, consider the shorter ", + "`sparse(I, size(S)...)` or `SparseMatrixCSC(I, size(S))` (with default ", + "`eltype(I)` of `Bool`)."), :speye) + return SparseMatrixCSC{T}(I, size(S)...) +end + +# former imports into SparseArrays +import Base: A_mul_B!, Ac_mul_B, Ac_mul_B!, At_mul_B, At_mul_B! +import Base: A_mul_Bc, A_mul_Bt, Ac_mul_Bc, At_mul_Bt +import Base: At_ldiv_B, Ac_ldiv_B, A_ldiv_B! +import Base.LinAlg: At_ldiv_B!, Ac_ldiv_B!, A_rdiv_B!, A_rdiv_Bc!, mul!, ldiv!, rdiv! + +# A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from base/sparse/linalg.jl, to deprecate +using Base.LinAlg: Adjoint, Transpose +@deprecate Ac_ldiv_B(A::SparseMatrixCSC, B::RowVector) (\)(adjoint(A), B) +@deprecate At_ldiv_B(A::SparseMatrixCSC, B::RowVector) (\)(transpose(A), B) +@deprecate Ac_ldiv_B(A::SparseMatrixCSC, B::AbstractVecOrMat) (\)(adjoint(A), B) +@deprecate At_ldiv_B(A::SparseMatrixCSC, B::AbstractVecOrMat) (\)(transpose(A), B) +@deprecate A_rdiv_Bc!(A::SparseMatrixCSC{T}, D::Diagonal{T}) where {T} rdiv!(A, adjoint(D)) +@deprecate A_rdiv_Bt!(A::SparseMatrixCSC{T}, D::Diagonal{T}) where {T} rdiv!(A, transpose(D)) +@deprecate A_rdiv_B!(A::SparseMatrixCSC{T}, D::Diagonal{T}) where {T} rdiv!(A, D) +@deprecate A_ldiv_B!(L::LowerTriangular{T,<:SparseMatrixCSCUnion{T}}, B::StridedVecOrMat) where {T} ldiv!(L, B) +@deprecate A_ldiv_B!(U::UpperTriangular{T,<:SparseMatrixCSCUnion{T}}, B::StridedVecOrMat) where {T} ldiv!(U, B) +@deprecate A_mul_Bt(A::SparseMatrixCSC{Tv,Ti}, B::SparseMatrixCSC{Tv,Ti}) where {Tv,Ti} (*)(A, transpose(B)) +@deprecate A_mul_Bc(A::SparseMatrixCSC{Tv,Ti}, B::SparseMatrixCSC{Tv,Ti}) where {Tv,Ti} (*)(A, adjoint(B)) +@deprecate At_mul_B(A::SparseMatrixCSC{Tv,Ti}, B::SparseMatrixCSC{Tv,Ti}) where {Tv,Ti} (*)(transpose(A), B) +@deprecate Ac_mul_B(A::SparseMatrixCSC{Tv,Ti}, B::SparseMatrixCSC{Tv,Ti}) where {Tv,Ti} (*)(adjoint(A), B) +@deprecate At_mul_Bt(A::SparseMatrixCSC{Tv,Ti}, B::SparseMatrixCSC{Tv,Ti}) where {Tv,Ti} (*)(transpose(A), transpose(B)) +@deprecate Ac_mul_Bc(A::SparseMatrixCSC{Tv,Ti}, B::SparseMatrixCSC{Tv,Ti}) where {Tv,Ti} (*)(adjoint(A), adjoint(B)) +@deprecate A_mul_B!(C::StridedVecOrMat, A::SparseMatrixCSC, B::StridedVecOrMat) mul!(C, A, B) +@deprecate Ac_mul_B!(C::StridedVecOrMat, A::SparseMatrixCSC, B::StridedVecOrMat) mul!(C, adjoint(A), B) +@deprecate At_mul_B!(C::StridedVecOrMat, A::SparseMatrixCSC, B::StridedVecOrMat) mul!(C, transpose(A), B) +@deprecate A_mul_B!(α::Number, A::SparseMatrixCSC, B::StridedVecOrMat, β::Number, C::StridedVecOrMat) mul!(α, A, B, β, C) +@deprecate A_mul_B(A::SparseMatrixCSC{TA,S}, x::StridedVector{Tx}) where {TA,S,Tx} (*)(A, x) +@deprecate A_mul_B(A::SparseMatrixCSC{TA,S}, B::StridedMatrix{Tx}) where {TA,S,Tx} (*)(A, B) +@deprecate Ac_mul_B!(α::Number, A::SparseMatrixCSC, B::StridedVecOrMat, β::Number, C::StridedVecOrMat) mul!(α, adjoint(A), B, β, C) +@deprecate Ac_mul_B(A::SparseMatrixCSC{TA,S}, x::StridedVector{Tx}) where {TA,S,Tx} (*)(adjoint(A), x) +@deprecate Ac_mul_B(A::SparseMatrixCSC{TA,S}, B::StridedMatrix{Tx}) where {TA,S,Tx} (*)(adjoint(A), B) +@deprecate At_mul_B!(α::Number, A::SparseMatrixCSC, B::StridedVecOrMat, β::Number, C::StridedVecOrMat) mul!(α, transpose(A), B, β, C) +@deprecate At_mul_B(A::SparseMatrixCSC{TA,S}, x::StridedVector{Tx}) where {TA,S,Tx} (*)(transpose(A), x) +@deprecate At_mul_B(A::SparseMatrixCSC{TA,S}, B::StridedMatrix{Tx}) where {TA,S,Tx} (*)(transpose(A), B) +@deprecate A_mul_Bt(A::SparseMatrixCSC{TvA,TiA}, B::SparseMatrixCSC{TvB,TiB}) where {TvA,TiA,TvB,TiB} (*)(A, transpose(B)) +@deprecate A_mul_Bc(A::SparseMatrixCSC{TvA,TiA}, B::SparseMatrixCSC{TvB,TiB}) where {TvA,TiA,TvB,TiB} (*)(A, adjoint(B)) +@deprecate At_mul_B(A::SparseMatrixCSC{TvA,TiA}, B::SparseMatrixCSC{TvB,TiB}) where {TvA,TiA,TvB,TiB} (*)(transpose(A), B) +@deprecate Ac_mul_B(A::SparseMatrixCSC{TvA,TiA}, B::SparseMatrixCSC{TvB,TiB}) where {TvA,TiA,TvB,TiB} (*)(adjoint(A),B) +@deprecate At_mul_Bt(A::SparseMatrixCSC{TvA,TiA}, B::SparseMatrixCSC{TvB,TiB}) where {TvA,TiA,TvB,TiB} (*)(transpose(A), transpose(B)) +@deprecate Ac_mul_Bc(A::SparseMatrixCSC{TvA,TiA}, B::SparseMatrixCSC{TvB,TiB}) where {TvA,TiA,TvB,TiB} (*)(adjoint(A), adjoint(B)) + +# A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from base/sparse/sparsevector.jl, to deprecate +for isunittri in (true, false), islowertri in (true, false) + unitstr = isunittri ? "Unit" : "" + halfstr = islowertri ? "Lower" : "Upper" + tritype = :(Base.LinAlg.$(Symbol(unitstr, halfstr, "Triangular"))) + @eval #=Base.SparseArrays=# begin + using Base.LinAlg: Adjoint, Transpose + @deprecate At_ldiv_B(A::$tritype{TA,<:AbstractMatrix}, b::SparseVector{Tb}) where {TA<:Number,Tb<:Number} (\)(transpose(A), b) + @deprecate At_ldiv_B(A::$tritype{TA,<:StridedMatrix}, b::SparseVector{Tb}) where {TA<:Number,Tb<:Number} (\)(transpose(A), b) + @deprecate At_ldiv_B(A::$tritype, b::SparseVector) (\)(transpose(A), b) + @deprecate Ac_ldiv_B(A::$tritype{TA,<:AbstractMatrix}, b::SparseVector{Tb}) where {TA<:Number,Tb<:Number} (\)(adjoint(A), b) + @deprecate Ac_ldiv_B(A::$tritype{TA,<:StridedMatrix}, b::SparseVector{Tb}) where {TA<:Number,Tb<:Number} (\)(adjoint(A), b) + @deprecate Ac_ldiv_B(A::$tritype, b::SparseVector) (\)(adjoint(A), b) + @deprecate A_ldiv_B!(A::$tritype{<:Any,<:StridedMatrix}, b::SparseVector) ldiv!(A, b) + @deprecate At_ldiv_B!(A::$tritype{<:Any,<:StridedMatrix}, b::SparseVector) ldiv!(transpose(A), b) + @deprecate Ac_ldiv_B!(A::$tritype{<:Any,<:StridedMatrix}, b::SparseVector) ldiv!(adjoint(A), b) + end +end + +using Base.LinAlg: Adjoint, Transpose +@deprecate Ac_mul_B(A::SparseMatrixCSC, x::AbstractSparseVector) (*)(adjoint(A), x) +@deprecate At_mul_B(A::SparseMatrixCSC, x::AbstractSparseVector) (*)(transpose(A), x) +@deprecate Ac_mul_B!(α::Number, A::SparseMatrixCSC, x::AbstractSparseVector, β::Number, y::StridedVector) mul!(α, adjoint(A), x, β, y) +@deprecate Ac_mul_B!(y::StridedVector{Ty}, A::SparseMatrixCSC, x::AbstractSparseVector{Tx}) where {Tx,Ty} mul!(y, adjoint(A), x) +@deprecate At_mul_B!(α::Number, A::SparseMatrixCSC, x::AbstractSparseVector, β::Number, y::StridedVector) mul!(α, transpose(A), x, β, y) +@deprecate At_mul_B!(y::StridedVector{Ty}, A::SparseMatrixCSC, x::AbstractSparseVector{Tx}) where {Tx,Ty} mul!(y, transpose(A), x) +@deprecate A_mul_B!(α::Number, A::SparseMatrixCSC, x::AbstractSparseVector, β::Number, y::StridedVector) mul!(α, A, x, β, y) +@deprecate A_mul_B!(y::StridedVector{Ty}, A::SparseMatrixCSC, x::AbstractSparseVector{Tx}) where {Tx,Ty} mul!(y, A, x) +@deprecate At_mul_B!(α::Number, A::StridedMatrix, x::AbstractSparseVector, β::Number, y::StridedVector) mul!(α, transpose(A), x, β, y) +@deprecate At_mul_B!(y::StridedVector{Ty}, A::StridedMatrix, x::AbstractSparseVector{Tx}) where {Tx,Ty} mul!(y, transpose(A), x) +@deprecate At_mul_B(A::StridedMatrix{Ta}, x::AbstractSparseVector{Tx}) where {Ta,Tx} (*)(transpose(A), x) +@deprecate A_mul_B!(α::Number, A::StridedMatrix, x::AbstractSparseVector, β::Number, y::StridedVector) mul!(α, A, x, β, y) +@deprecate A_mul_B!(y::StridedVector{Ty}, A::StridedMatrix, x::AbstractSparseVector{Tx}) where {Tx,Ty} mul!(y, A, x) + +# methods involving RowVector from base/sparse/linalg.jl, to deprecate +\(::SparseMatrixCSC, ::RowVector) = throw(DimensionMismatch("Cannot left-divide matrix by transposed vector")) +\(::Adjoint{<:Any,<:SparseMatrixCSC}, ::RowVector) = throw(DimensionMismatch("Cannot left-divide matrix by transposed vector")) +\(::Transpose{<:Any,<:SparseMatrixCSC}, ::RowVector) = throw(DimensionMismatch("Cannot left-divide matrix by transposed vector")) + +# methods involving RowVector from base/sparse/higherorderfns.jl, to deprecate +@eval SparseArrays.HigherOrderFns begin + BroadcastStyle(::Type{<:Base.RowVector{T,<:Vector}}) where T = Broadcast.MatrixStyle() +end + +import Base: asyncmap +@deprecate asyncmap(f, s::AbstractSparseArray...; kwargs...) sparse(asyncmap(f, map(Array, s)...; kwargs...)) + + +# END 0.7 deprecations + +# BEGIN 1.0 deprecations + +# END 1.0 deprecations diff --git a/base/sparse/higherorderfns.jl b/stdlib/SparseArrays/src/higherorderfns.jl similarity index 100% rename from base/sparse/higherorderfns.jl rename to stdlib/SparseArrays/src/higherorderfns.jl diff --git a/base/sparse/linalg.jl b/stdlib/SparseArrays/src/linalg.jl similarity index 100% rename from base/sparse/linalg.jl rename to stdlib/SparseArrays/src/linalg.jl diff --git a/base/sparse/sparsematrix.jl b/stdlib/SparseArrays/src/sparsematrix.jl similarity index 100% rename from base/sparse/sparsematrix.jl rename to stdlib/SparseArrays/src/sparsematrix.jl diff --git a/base/sparse/sparsevector.jl b/stdlib/SparseArrays/src/sparsevector.jl similarity index 99% rename from base/sparse/sparsevector.jl rename to stdlib/SparseArrays/src/sparsevector.jl index 26c3288bef47e3..5008f0f8c70b13 100644 --- a/base/sparse/sparsevector.jl +++ b/stdlib/SparseArrays/src/sparsevector.jl @@ -896,7 +896,7 @@ vec(x::AbstractSparseVector) = x copy(x::AbstractSparseVector) = SparseVector(length(x), copy(nonzeroinds(x)), copy(nonzeros(x))) -function reinterpret(::Type{T}, x::AbstractSparseVector{Tv}) where {T,Tv} +function Base.reinterpret(::Type{T}, x::AbstractSparseVector{Tv}) where {T,Tv} sizeof(T) == sizeof(Tv) || throw(ArgumentError("reinterpret of sparse vectors only supports element types of the same size.")) SparseVector(length(x), copy(nonzeroinds(x)), reinterpret(T, nonzeros(x))) diff --git a/test/sparse/higherorderfns.jl b/stdlib/SparseArrays/test/higherorderfns.jl similarity index 100% rename from test/sparse/higherorderfns.jl rename to stdlib/SparseArrays/test/higherorderfns.jl diff --git a/stdlib/SparseArrays/test/runtests.jl b/stdlib/SparseArrays/test/runtests.jl new file mode 100644 index 00000000000000..e761a2fd34567c --- /dev/null +++ b/stdlib/SparseArrays/test/runtests.jl @@ -0,0 +1,7 @@ +# This file is a part of Julia. License is MIT: https://julialang.org/license + +using Test, SparseArrays + +include("higherorderfns.jl") +include("sparse.jl") +include("sparsevector.jl") diff --git a/test/sparse/sparse.jl b/stdlib/SparseArrays/test/sparse.jl similarity index 90% rename from test/sparse/sparse.jl rename to stdlib/SparseArrays/test/sparse.jl index 00bc183992f7d5..5de30ff9798a62 100644 --- a/test/sparse/sparse.jl +++ b/stdlib/SparseArrays/test/sparse.jl @@ -23,7 +23,7 @@ end end @testset "indtype" begin - @test Base.SparseArrays.indtype(sparse(Int8[1,1],Int8[1,1],[1,1])) == Int8 + @test SparseArrays.indtype(sparse(Int8[1,1],Int8[1,1],[1,1])) == Int8 end @testset "sparse matrix construction" begin @@ -303,8 +303,8 @@ end a = sprand(10, 5, 0.7) b = sprand(5, 15, 0.3) @test maximum(abs.(a*b - Array(a)*Array(b))) < 100*eps() - @test maximum(abs.(Base.SparseArrays.spmatmul(a,b,sortindices=:sortcols) - Array(a)*Array(b))) < 100*eps() - @test maximum(abs.(Base.SparseArrays.spmatmul(a,b,sortindices=:doubletranspose) - Array(a)*Array(b))) < 100*eps() + @test maximum(abs.(SparseArrays.spmatmul(a,b,sortindices=:sortcols) - Array(a)*Array(b))) < 100*eps() + @test maximum(abs.(SparseArrays.spmatmul(a,b,sortindices=:doubletranspose) - Array(a)*Array(b))) < 100*eps() @test Array(kron(a,b)) == kron(Array(a), Array(b)) @test Array(kron(Array(a),b)) == kron(Array(a), Array(b)) @test Array(kron(a,Array(b))) == kron(Array(a), Array(b)) @@ -929,47 +929,47 @@ end @test nnz(A) == 19 # Test argument bounds checking for dropstored!(A, i, j) - @test_throws BoundsError Base.SparseArrays.dropstored!(A, 0, 1) - @test_throws BoundsError Base.SparseArrays.dropstored!(A, 1, 0) - @test_throws BoundsError Base.SparseArrays.dropstored!(A, 1, 11) - @test_throws BoundsError Base.SparseArrays.dropstored!(A, 11, 1) + @test_throws BoundsError SparseArrays.dropstored!(A, 0, 1) + @test_throws BoundsError SparseArrays.dropstored!(A, 1, 0) + @test_throws BoundsError SparseArrays.dropstored!(A, 1, 11) + @test_throws BoundsError SparseArrays.dropstored!(A, 11, 1) # Test argument bounds checking for dropstored!(A, I, J) - @test_throws BoundsError Base.SparseArrays.dropstored!(A, 0:1, 1:1) - @test_throws BoundsError Base.SparseArrays.dropstored!(A, 1:1, 0:1) - @test_throws BoundsError Base.SparseArrays.dropstored!(A, 10:11, 1:1) - @test_throws BoundsError Base.SparseArrays.dropstored!(A, 1:1, 10:11) + @test_throws BoundsError SparseArrays.dropstored!(A, 0:1, 1:1) + @test_throws BoundsError SparseArrays.dropstored!(A, 1:1, 0:1) + @test_throws BoundsError SparseArrays.dropstored!(A, 10:11, 1:1) + @test_throws BoundsError SparseArrays.dropstored!(A, 1:1, 10:11) # Test behavior of dropstored!(A, i, j) # --> Test dropping a single stored entry - Base.SparseArrays.dropstored!(A, 1, 2) + SparseArrays.dropstored!(A, 1, 2) @test nnz(A) == 18 # --> Test dropping a single nonstored entry - Base.SparseArrays.dropstored!(A, 2, 1) + SparseArrays.dropstored!(A, 2, 1) @test nnz(A) == 18 # Test behavior of dropstored!(A, I, J) and derivs. # --> Test dropping a single row including stored and nonstored entries - Base.SparseArrays.dropstored!(A, 1, :) + SparseArrays.dropstored!(A, 1, :) @test nnz(A) == 9 # --> Test dropping a single column including stored and nonstored entries - Base.SparseArrays.dropstored!(A, :, 2) + SparseArrays.dropstored!(A, :, 2) @test nnz(A) == 0 # --> Introduce nonzeros in rows one and two and columns two and three A[1:2,:] = 1 A[:,2:3] = 2 @test nnz(A) == 36 # --> Test dropping multiple rows containing stored and nonstored entries - Base.SparseArrays.dropstored!(A, 1:3, :) + SparseArrays.dropstored!(A, 1:3, :) @test nnz(A) == 14 # --> Test dropping multiple columns containing stored and nonstored entries - Base.SparseArrays.dropstored!(A, :, 2:4) + SparseArrays.dropstored!(A, :, 2:4) @test nnz(A) == 0 # --> Introduce nonzeros in every other row A[1:2:9, :] = 1 @test nnz(A) == 50 # --> Test dropping a block of the matrix towards the upper left - Base.SparseArrays.dropstored!(A, 2:5, 2:5) + SparseArrays.dropstored!(A, 2:5, 2:5) @test nnz(A) == 42 end @@ -1048,110 +1048,112 @@ end @test iA === iS === nothing end -# findmin/findmax/minumum/maximum +@testset "findmin/findmax/minumum/maximum" begin + A = sparse([1.0 5.0 6.0; + 5.0 2.0 4.0]) + for (tup, rval, rind) in [((1,), [1.0 2.0 4.0], [CartesianIndex(1,1) CartesianIndex(2,2) CartesianIndex(2,3)]), + ((2,), reshape([1.0,2.0], 2, 1), reshape([CartesianIndex(1,1),CartesianIndex(2,2)], 2, 1)), + ((1,2), fill(1.0,1,1),fill(CartesianIndex(1,1),1,1))] + @test findmin(A, tup) == (rval, rind) + end -A = sparse([1.0 5.0 6.0; - 5.0 2.0 4.0]) -for (tup, rval, rind) in [((1,), [1.0 2.0 4.0], [CartesianIndex(1,1) CartesianIndex(2,2) CartesianIndex(2,3)]), - ((2,), reshape([1.0,2.0], 2, 1), reshape([CartesianIndex(1,1),CartesianIndex(2,2)], 2, 1)), - ((1,2), fill(1.0,1,1),fill(CartesianIndex(1,1),1,1))] - @test findmin(A, tup) == (rval, rind) -end + for (tup, rval, rind) in [((1,), [5.0 5.0 6.0], [CartesianIndex(2,1) CartesianIndex(1,2) CartesianIndex(1,3)]), + ((2,), reshape([6.0,5.0], 2, 1), reshape([CartesianIndex(1,3),CartesianIndex(2,1)], 2, 1)), + ((1,2), fill(6.0,1,1),fill(CartesianIndex(1,3),1,1))] + @test findmax(A, tup) == (rval, rind) + end -for (tup, rval, rind) in [((1,), [5.0 5.0 6.0], [CartesianIndex(2,1) CartesianIndex(1,2) CartesianIndex(1,3)]), - ((2,), reshape([6.0,5.0], 2, 1), reshape([CartesianIndex(1,3),CartesianIndex(2,1)], 2, 1)), - ((1,2), fill(6.0,1,1),fill(CartesianIndex(1,3),1,1))] - @test findmax(A, tup) == (rval, rind) -end + #issue 23209 -#issue 23209 + A = sparse([1.0 5.0 6.0; + NaN 2.0 4.0]) + for (tup, rval, rind) in [((1,), [NaN 2.0 4.0], [CartesianIndex(2,1) CartesianIndex(2,2) CartesianIndex(2,3)]), + ((2,), reshape([1.0, NaN], 2, 1), reshape([CartesianIndex(1,1),CartesianIndex(2,1)], 2, 1)), + ((1,2), fill(NaN,1,1),fill(CartesianIndex(2,1),1,1))] + @test isequal(findmin(A, tup), (rval, rind)) + end -A = sparse([1.0 5.0 6.0; - NaN 2.0 4.0]) -for (tup, rval, rind) in [((1,), [NaN 2.0 4.0], [CartesianIndex(2,1) CartesianIndex(2,2) CartesianIndex(2,3)]), - ((2,), reshape([1.0, NaN], 2, 1), reshape([CartesianIndex(1,1),CartesianIndex(2,1)], 2, 1)), - ((1,2), fill(NaN,1,1),fill(CartesianIndex(2,1),1,1))] - @test isequal(findmin(A, tup), (rval, rind)) -end + for (tup, rval, rind) in [((1,), [NaN 5.0 6.0], [CartesianIndex(2,1) CartesianIndex(1,2) CartesianIndex(1,3)]), + ((2,), reshape([6.0, NaN], 2, 1), reshape([CartesianIndex(1,3),CartesianIndex(2,1)], 2, 1)), + ((1,2), fill(NaN,1,1),fill(CartesianIndex(2,1),1,1))] + @test isequal(findmax(A, tup), (rval, rind)) + end -for (tup, rval, rind) in [((1,), [NaN 5.0 6.0], [CartesianIndex(2,1) CartesianIndex(1,2) CartesianIndex(1,3)]), - ((2,), reshape([6.0, NaN], 2, 1), reshape([CartesianIndex(1,3),CartesianIndex(2,1)], 2, 1)), - ((1,2), fill(NaN,1,1),fill(CartesianIndex(2,1),1,1))] - @test isequal(findmax(A, tup), (rval, rind)) -end + A = sparse([1.0 NaN 6.0; + NaN 2.0 4.0]) + for (tup, rval, rind) in [((1,), [NaN NaN 4.0], [CartesianIndex(2,1) CartesianIndex(1,2) CartesianIndex(2,3)]), + ((2,), reshape([NaN, NaN], 2, 1), reshape([CartesianIndex(1,2),CartesianIndex(2,1)], 2, 1)), + ((1,2), fill(NaN,1,1),fill(CartesianIndex(2,1),1,1))] + @test isequal(findmin(A, tup), (rval, rind)) + end -A = sparse([1.0 NaN 6.0; - NaN 2.0 4.0]) -for (tup, rval, rind) in [((1,), [NaN NaN 4.0], [CartesianIndex(2,1) CartesianIndex(1,2) CartesianIndex(2,3)]), - ((2,), reshape([NaN, NaN], 2, 1), reshape([CartesianIndex(1,2),CartesianIndex(2,1)], 2, 1)), - ((1,2), fill(NaN,1,1),fill(CartesianIndex(2,1),1,1))] - @test isequal(findmin(A, tup), (rval, rind)) -end + for (tup, rval, rind) in [((1,), [NaN NaN 6.0], [CartesianIndex(2,1) CartesianIndex(1,2) CartesianIndex(1,3)]), + ((2,), reshape([NaN, NaN], 2, 1), reshape([CartesianIndex(1,2),CartesianIndex(2,1)], 2, 1)), + ((1,2), fill(NaN,1,1),fill(CartesianIndex(2,1),1,1))] + @test isequal(findmax(A, tup), (rval, rind)) + end -for (tup, rval, rind) in [((1,), [NaN NaN 6.0], [CartesianIndex(2,1) CartesianIndex(1,2) CartesianIndex(1,3)]), - ((2,), reshape([NaN, NaN], 2, 1), reshape([CartesianIndex(1,2),CartesianIndex(2,1)], 2, 1)), - ((1,2), fill(NaN,1,1),fill(CartesianIndex(2,1),1,1))] - @test isequal(findmax(A, tup), (rval, rind)) -end + A = sparse([Inf -Inf Inf -Inf; + Inf Inf -Inf -Inf]) + for (tup, rval, rind) in [((1,), [Inf -Inf -Inf -Inf], [CartesianIndex(1,1) CartesianIndex(1,2) CartesianIndex(2,3) CartesianIndex(1,4)]), + ((2,), reshape([-Inf -Inf], 2, 1), reshape([CartesianIndex(1,2),CartesianIndex(2,3)], 2, 1)), + ((1,2), fill(-Inf,1,1),fill(CartesianIndex(1,2),1,1))] + @test isequal(findmin(A, tup), (rval, rind)) + end -A = sparse([Inf -Inf Inf -Inf; - Inf Inf -Inf -Inf]) -for (tup, rval, rind) in [((1,), [Inf -Inf -Inf -Inf], [CartesianIndex(1,1) CartesianIndex(1,2) CartesianIndex(2,3) CartesianIndex(1,4)]), - ((2,), reshape([-Inf -Inf], 2, 1), reshape([CartesianIndex(1,2),CartesianIndex(2,3)], 2, 1)), - ((1,2), fill(-Inf,1,1),fill(CartesianIndex(1,2),1,1))] - @test isequal(findmin(A, tup), (rval, rind)) -end + for (tup, rval, rind) in [((1,), [Inf Inf Inf -Inf], [CartesianIndex(1,1) CartesianIndex(2,2) CartesianIndex(1,3) CartesianIndex(1,4)]), + ((2,), reshape([Inf Inf], 2, 1), reshape([CartesianIndex(1,1),CartesianIndex(2,1)], 2, 1)), + ((1,2), fill(Inf,1,1),fill(CartesianIndex(1,1),1,1))] + @test isequal(findmax(A, tup), (rval, rind)) + end -for (tup, rval, rind) in [((1,), [Inf Inf Inf -Inf], [CartesianIndex(1,1) CartesianIndex(2,2) CartesianIndex(1,3) CartesianIndex(1,4)]), - ((2,), reshape([Inf Inf], 2, 1), reshape([CartesianIndex(1,1),CartesianIndex(2,1)], 2, 1)), - ((1,2), fill(Inf,1,1),fill(CartesianIndex(1,1),1,1))] - @test isequal(findmax(A, tup), (rval, rind)) -end + A = sparse([BigInt(10)]) + for (tup, rval, rind) in [((2,), [BigInt(10)], [1])] + @test isequal(findmin(A, tup), (rval, rind)) + end -A = sparse([BigInt(10)]) -for (tup, rval, rind) in [((2,), [BigInt(10)], [1])] - @test isequal(findmin(A, tup), (rval, rind)) -end + for (tup, rval, rind) in [((2,), [BigInt(10)], [1])] + @test isequal(findmax(A, tup), (rval, rind)) + end -for (tup, rval, rind) in [((2,), [BigInt(10)], [1])] - @test isequal(findmax(A, tup), (rval, rind)) -end + A = sparse([BigInt(-10)]) + for (tup, rval, rind) in [((2,), [BigInt(-10)], [1])] + @test isequal(findmin(A, tup), (rval, rind)) + end -A = sparse([BigInt(-10)]) -for (tup, rval, rind) in [((2,), [BigInt(-10)], [1])] - @test isequal(findmin(A, tup), (rval, rind)) -end + for (tup, rval, rind) in [((2,), [BigInt(-10)], [1])] + @test isequal(findmax(A, tup), (rval, rind)) + end -for (tup, rval, rind) in [((2,), [BigInt(-10)], [1])] - @test isequal(findmax(A, tup), (rval, rind)) -end + A = sparse([BigInt(10) BigInt(-10)]) + for (tup, rval, rind) in [((2,), reshape([BigInt(-10)], 1, 1), reshape([CartesianIndex(1,2)], 1, 1))] + @test isequal(findmin(A, tup), (rval, rind)) + end -A = sparse([BigInt(10) BigInt(-10)]) -for (tup, rval, rind) in [((2,), reshape([BigInt(-10)], 1, 1), reshape([CartesianIndex(1,2)], 1, 1))] - @test isequal(findmin(A, tup), (rval, rind)) -end + for (tup, rval, rind) in [((2,), reshape([BigInt(10)], 1, 1), reshape([CartesianIndex(1,1)], 1, 1))] + @test isequal(findmax(A, tup), (rval, rind)) + end -for (tup, rval, rind) in [((2,), reshape([BigInt(10)], 1, 1), reshape([CartesianIndex(1,1)], 1, 1))] - @test isequal(findmax(A, tup), (rval, rind)) + A = sparse(["a", "b"]) + @test_throws MethodError findmin(A, 1) end -A = sparse(["a", "b"]) -@test_throws MethodError findmin(A, 1) - # Support the case when user defined `zero` and `isless` for non-numerical type struct CustomType x::String end Base.zero(::Type{CustomType}) = CustomType("") Base.isless(x::CustomType, y::CustomType) = isless(x.x, y.x) -A = sparse([CustomType("a"), CustomType("b")]) +@testset "findmin/findmax for non-numerical type" begin + A = sparse([CustomType("a"), CustomType("b")]) -for (tup, rval, rind) in [((1,), [CustomType("a")], [1])] - @test isequal(findmin(A, tup), (rval, rind)) -end + for (tup, rval, rind) in [((1,), [CustomType("a")], [1])] + @test isequal(findmin(A, tup), (rval, rind)) + end -for (tup, rval, rind) in [((1,), [CustomType("b")], [2])] - @test isequal(findmax(A, tup), (rval, rind)) + for (tup, rval, rind) in [((1,), [CustomType("b")], [2])] + @test isequal(findmax(A, tup), (rval, rind)) + end end @testset "findn" begin @@ -1190,9 +1192,9 @@ function test_getindex_algs(A::SparseMatrixCSC{Tv,Ti}, I::AbstractVector, J::Abs ((minj < 1) || (maxj > n)) && BoundsError() end - (alg == 0) ? Base.SparseArrays.getindex_I_sorted_bsearch_A(A, I, J) : - (alg == 1) ? Base.SparseArrays.getindex_I_sorted_bsearch_I(A, I, J) : - Base.SparseArrays.getindex_I_sorted_linear(A, I, J) + (alg == 0) ? SparseArrays.getindex_I_sorted_bsearch_A(A, I, J) : + (alg == 1) ? SparseArrays.getindex_I_sorted_bsearch_I(A, I, J) : + SparseArrays.getindex_I_sorted_linear(A, I, J) end @testset "test_getindex_algs" begin @@ -1396,8 +1398,8 @@ end local A = guardsrand(1234321) do triu(sprand(10, 10, 0.2)) end - @test Base.droptol!(A, 0.01).colptr == [1,1,1,2,2,3,4,6,6,7,9] - @test isequal(Base.droptol!(sparse([1], [1], [1]), 1), SparseMatrixCSC(1, 1, Int[1, 1], Int[], Int[])) + @test SparseArrays.droptol!(A, 0.01).colptr == [1,1,1,2,2,3,4,6,6,7,9] + @test isequal(SparseArrays.droptol!(sparse([1], [1], [1]), 1), SparseMatrixCSC(1, 1, Int[1, 1], Int[], Int[])) end @testset "dropzeros[!]" begin @@ -1487,10 +1489,10 @@ end @testset "expandptr" begin local A = sparse(1.0I, 5, 5) - @test Base.SparseArrays.expandptr(A.colptr) == 1:5 + @test SparseArrays.expandptr(A.colptr) == 1:5 A[1,2] = 1 - @test Base.SparseArrays.expandptr(A.colptr) == [1; 2; 2; 3; 4; 5] - @test_throws ArgumentError Base.SparseArrays.expandptr([2; 3]) + @test SparseArrays.expandptr(A.colptr) == [1; 2; 2; 3; 4; 5] + @test_throws ArgumentError SparseArrays.expandptr([2; 3]) end @testset "triu/tril" begin @@ -1697,13 +1699,13 @@ end Ari = ceil.(Int64, 100*Ar) if Base.USE_GPL_LIBS # NOTE: normestinv is probabilistic, so requires a fixed seed (set above in srand(1234)) - @test Base.SparseArrays.normestinv(Ac,3) ≈ norm(inv(Array(Ac)),1) atol=1e-4 - @test Base.SparseArrays.normestinv(Aci,3) ≈ norm(inv(Array(Aci)),1) atol=1e-4 - @test Base.SparseArrays.normestinv(Ar) ≈ norm(inv(Array(Ar)),1) atol=1e-4 - @test_throws ArgumentError Base.SparseArrays.normestinv(Ac,0) - @test_throws ArgumentError Base.SparseArrays.normestinv(Ac,21) + @test SparseArrays.normestinv(Ac,3) ≈ norm(inv(Array(Ac)),1) atol=1e-4 + @test SparseArrays.normestinv(Aci,3) ≈ norm(inv(Array(Aci)),1) atol=1e-4 + @test SparseArrays.normestinv(Ar) ≈ norm(inv(Array(Ar)),1) atol=1e-4 + @test_throws ArgumentError SparseArrays.normestinv(Ac,0) + @test_throws ArgumentError SparseArrays.normestinv(Ac,21) end - @test_throws DimensionMismatch Base.SparseArrays.normestinv(sprand(3,5,.9)) + @test_throws DimensionMismatch SparseArrays.normestinv(sprand(3,5,.9)) end @testset "issue #13008" begin @@ -1852,7 +1854,7 @@ end # Test temporary fix for issue #16548 in PR #16979. Somewhat brittle. Expect to remove with `\` revisions. @testset "issue #16548" begin ms = methods(\, (SparseMatrixCSC, AbstractVecOrMat)).ms - @test all(m -> m.module == Base.SparseArrays, ms) + @test all(m -> m.module == SparseArrays, ms) end @testset "row indexing a SparseMatrixCSC with non-Int integer type" begin @@ -1912,7 +1914,7 @@ end # Check calling of unary minus method specialized for SparseMatrixCSCs @testset "issue #19503" begin - @test which(-, (SparseMatrixCSC,)).module == Base.SparseArrays + @test which(-, (SparseMatrixCSC,)).module == SparseArrays end @testset "issue #14398" begin @@ -1921,16 +1923,16 @@ end @testset "dropstored issue #20513" begin x = sparse(rand(3,3)) - Base.SparseArrays.dropstored!(x, 1, 1) + SparseArrays.dropstored!(x, 1, 1) @test x[1, 1] == 0.0 @test x.colptr == [1, 3, 6, 9] - Base.SparseArrays.dropstored!(x, 2, 1) + SparseArrays.dropstored!(x, 2, 1) @test x.colptr == [1, 2, 5, 8] @test x[2, 1] == 0.0 - Base.SparseArrays.dropstored!(x, 2, 2) + SparseArrays.dropstored!(x, 2, 2) @test x.colptr == [1, 2, 4, 7] @test x[2, 2] == 0.0 - Base.SparseArrays.dropstored!(x, 2, 3) + SparseArrays.dropstored!(x, 2, 3) @test x.colptr == [1, 2, 4, 6] @test x[2, 3] == 0.0 end @@ -1949,47 +1951,47 @@ end @testset "show" begin io = IOBuffer() show(io, MIME"text/plain"(), sparse(Int64[1], Int64[1], [1.0])) - @test String(take!(io)) == "1×1 SparseMatrixCSC{Float64,Int64} with 1 stored entry:\n [1, 1] = 1.0" + @test String(take!(io)) == "1×1 SparseArrays.SparseMatrixCSC{Float64,Int64} with 1 stored entry:\n [1, 1] = 1.0" show(io, MIME"text/plain"(), spzeros(Float32, Int64, 2, 2)) - @test String(take!(io)) == "2×2 SparseMatrixCSC{Float32,Int64} with 0 stored entries" + @test String(take!(io)) == "2×2 SparseArrays.SparseMatrixCSC{Float32,Int64} with 0 stored entries" ioc = IOContext(io, :displaysize => (5, 80), :limit => true) show(ioc, MIME"text/plain"(), sparse(Int64[1], Int64[1], [1.0])) - @test String(take!(io)) == "1×1 SparseMatrixCSC{Float64,Int64} with 1 stored entry:\n [1, 1] = 1.0" + @test String(take!(io)) == "1×1 SparseArrays.SparseMatrixCSC{Float64,Int64} with 1 stored entry:\n [1, 1] = 1.0" show(ioc, MIME"text/plain"(), sparse(Int64[1, 1], Int64[1, 2], [1.0, 2.0])) - @test String(take!(io)) == "1×2 SparseMatrixCSC{Float64,Int64} with 2 stored entries:\n ⋮" + @test String(take!(io)) == "1×2 SparseArrays.SparseMatrixCSC{Float64,Int64} with 2 stored entries:\n ⋮" # even number of rows ioc = IOContext(io, :displaysize => (8, 80), :limit => true) show(ioc, MIME"text/plain"(), sparse(Int64[1,2,3,4], Int64[1,1,2,2], [1.0,2.0,3.0,4.0])) - @test String(take!(io)) == string("4×2 SparseMatrixCSC{Float64,Int64} with 4 stored entries:\n [1, 1]", + @test String(take!(io)) == string("4×2 SparseArrays.SparseMatrixCSC{Float64,Int64} with 4 stored entries:\n [1, 1]", " = 1.0\n [2, 1] = 2.0\n [3, 2] = 3.0\n [4, 2] = 4.0") show(ioc, MIME"text/plain"(), sparse(Int64[1,2,3,4,5], Int64[1,1,2,2,3], [1.0,2.0,3.0,4.0,5.0])) - @test String(take!(io)) == string("5×3 SparseMatrixCSC{Float64,Int64} with 5 stored entries:\n [1, 1]", + @test String(take!(io)) == string("5×3 SparseArrays.SparseMatrixCSC{Float64,Int64} with 5 stored entries:\n [1, 1]", " = 1.0\n ⋮\n [5, 3] = 5.0") show(ioc, MIME"text/plain"(), sparse(fill(1.,5,3))) - @test String(take!(io)) == string("5×3 SparseMatrixCSC{Float64,$Int} with 15 stored entries:\n [1, 1]", + @test String(take!(io)) == string("5×3 SparseArrays.SparseMatrixCSC{Float64,$Int} with 15 stored entries:\n [1, 1]", " = 1.0\n ⋮\n [5, 3] = 1.0") # odd number of rows ioc = IOContext(io, :displaysize => (9, 80), :limit => true) show(ioc, MIME"text/plain"(), sparse(Int64[1,2,3,4,5], Int64[1,1,2,2,3], [1.0,2.0,3.0,4.0,5.0])) - @test String(take!(io)) == string("5×3 SparseMatrixCSC{Float64,Int64} with 5 stored entries:\n [1, 1]", + @test String(take!(io)) == string("5×3 SparseArrays.SparseMatrixCSC{Float64,Int64} with 5 stored entries:\n [1, 1]", " = 1.0\n [2, 1] = 2.0\n [3, 2] = 3.0\n [4, 2] = 4.0\n [5, 3] = 5.0") show(ioc, MIME"text/plain"(), sparse(Int64[1,2,3,4,5,6], Int64[1,1,2,2,3,3], [1.0,2.0,3.0,4.0,5.0,6.0])) - @test String(take!(io)) == string("6×3 SparseMatrixCSC{Float64,Int64} with 6 stored entries:\n [1, 1]", + @test String(take!(io)) == string("6×3 SparseArrays.SparseMatrixCSC{Float64,Int64} with 6 stored entries:\n [1, 1]", " = 1.0\n [2, 1] = 2.0\n ⋮\n [5, 3] = 5.0\n [6, 3] = 6.0") show(ioc, MIME"text/plain"(), sparse(fill(1.,6,3))) - @test String(take!(io)) == string("6×3 SparseMatrixCSC{Float64,$Int} with 18 stored entries:\n [1, 1]", + @test String(take!(io)) == string("6×3 SparseArrays.SparseMatrixCSC{Float64,$Int} with 18 stored entries:\n [1, 1]", " = 1.0\n [2, 1] = 1.0\n ⋮\n [5, 3] = 1.0\n [6, 3] = 1.0") ioc = IOContext(io, :displaysize => (9, 80)) show(ioc, MIME"text/plain"(), sparse(Int64[1,2,3,4,5,6], Int64[1,1,2,2,3,3], [1.0,2.0,3.0,4.0,5.0,6.0])) - @test String(take!(io)) == string("6×3 SparseMatrixCSC{Float64,Int64} with 6 stored entries:\n [1, 1] = 1.0\n", + @test String(take!(io)) == string("6×3 SparseArrays.SparseMatrixCSC{Float64,Int64} with 6 stored entries:\n [1, 1] = 1.0\n", " [2, 1] = 2.0\n [3, 2] = 3.0\n [4, 2] = 4.0\n [5, 3] = 5.0\n [6, 3] = 6.0") end @@ -2079,7 +2081,7 @@ end a = sparse(rand(3,3) .+ 0.1) b = similar(a, Float32, Int32) c = similar(b, Float32, Int32) - Base.SparseArrays.dropstored!(b, 1, 1) + SparseArrays.dropstored!(b, 1, 1) @test length(c.rowval) == 9 @test length(c.nzval) == 9 end diff --git a/test/sparse/sparsevector.jl b/stdlib/SparseArrays/test/sparsevector.jl similarity index 98% rename from test/sparse/sparsevector.jl rename to stdlib/SparseArrays/test/sparsevector.jl index 721bcf6f6f54d2..5558e466e2c90d 100644 --- a/test/sparse/sparsevector.jl +++ b/stdlib/SparseArrays/test/sparsevector.jl @@ -260,13 +260,13 @@ end @testset "dropstored!" begin x = SparseVector(10, [2, 7, 9], [2.0, 7.0, 9.0]) # Test argument bounds checking for dropstored!(x, i) - @test_throws BoundsError Base.SparseArrays.dropstored!(x, 0) - @test_throws BoundsError Base.SparseArrays.dropstored!(x, 11) + @test_throws BoundsError SparseArrays.dropstored!(x, 0) + @test_throws BoundsError SparseArrays.dropstored!(x, 11) # Test behavior of dropstored!(x, i) # --> Test dropping a single stored entry - @test Base.SparseArrays.dropstored!(x, 2) == SparseVector(10, [7, 9], [7.0, 9.0]) + @test SparseArrays.dropstored!(x, 2) == SparseVector(10, [7, 9], [7.0, 9.0]) # --> Test dropping a single nonstored entry - @test Base.SparseArrays.dropstored!(x, 5) == SparseVector(10, [7, 9], [7.0, 9.0]) + @test SparseArrays.dropstored!(x, 5) == SparseVector(10, [7, 9], [7.0, 9.0]) end @testset "find and findnz" begin @@ -680,7 +680,7 @@ end @test spresvec == op.(densevec) @test all(!iszero, spresvec.nzval) resvaltype = typeof(op(zero(eltype(spvec)))) - resindtype = Base.SparseArrays.indtype(spvec) + resindtype = SparseArrays.indtype(spvec) @test isa(spresvec, SparseVector{resvaltype,resindtype}) end end @@ -696,7 +696,7 @@ end spresvec = op.(spvec) @test spresvec == op.(densevec) resvaltype = typeof(op(zero(eltype(spvec)))) - resindtype = Base.SparseArrays.indtype(spvec) + resindtype = SparseArrays.indtype(spvec) @test isa(spresvec, SparseVector{resvaltype,resindtype}) end end @@ -1020,17 +1020,17 @@ end @testset "fkeep!" begin x = sparsevec(1:7, [3., 2., -1., 1., -2., -3., 3.], 7) # droptol - xdrop = Base.droptol!(copy(x), 1.5) + xdrop = SparseArrays.droptol!(copy(x), 1.5) @test exact_equal(xdrop, SparseVector(7, [1, 2, 5, 6, 7], [3., 2., -2., -3., 3.])) - Base.droptol!(xdrop, 2.5) + SparseArrays.droptol!(xdrop, 2.5) @test exact_equal(xdrop, SparseVector(7, [1, 6, 7], [3., -3., 3.])) - Base.droptol!(xdrop, 3.) + SparseArrays.droptol!(xdrop, 3.) @test exact_equal(xdrop, SparseVector(7, Int[], Float64[])) xdrop = copy(x) # This will keep index 1, 3, 4, 7 in xdrop f_drop(i, x) = (abs(x) == 1.) || (i in [1, 7]) - Base.SparseArrays.fkeep!(xdrop, f_drop) + SparseArrays.fkeep!(xdrop, f_drop) @test exact_equal(xdrop, SparseVector(7, [1, 3, 4, 7], [3., -1., 1., 3.])) end @testset "dropzeros[!]" begin @@ -1063,7 +1063,7 @@ end # original dropzeros! test xdrop = sparsevec(1:7, [3., 2., -1., 1., -2., -3., 3.], 7) xdrop.nzval[[2, 4, 6]] = 0.0 - Base.SparseArrays.dropzeros!(xdrop) + SparseArrays.dropzeros!(xdrop) @test exact_equal(xdrop, SparseVector(7, [1, 3, 5, 7], [3, -1., -2., 3.])) end end @@ -1161,9 +1161,9 @@ mutable struct t20488 end @testset "show" begin io = IOBuffer() show(io, MIME"text/plain"(), sparsevec(Int64[1], [1.0])) - @test String(take!(io)) == "1-element SparseVector{Float64,Int64} with 1 stored entry:\n [1] = 1.0" + @test String(take!(io)) == "1-element SparseArrays.SparseVector{Float64,Int64} with 1 stored entry:\n [1] = 1.0" show(io, MIME"text/plain"(), spzeros(Float64, Int64, 2)) - @test String(take!(io)) == "2-element SparseVector{Float64,Int64} with 0 stored entries" + @test String(take!(io)) == "2-element SparseArrays.SparseVector{Float64,Int64} with 0 stored entries" show(io, similar(sparsevec(rand(3) .+ 0.1), t20488)) @test String(take!(io)) == " [1] = #undef\n [2] = #undef\n [3] = #undef" end diff --git a/stdlib/SuiteSparse/src/cholmod.jl b/stdlib/SuiteSparse/src/cholmod.jl index 183010b62dc98f..8d65ceafc014e2 100644 --- a/stdlib/SuiteSparse/src/cholmod.jl +++ b/stdlib/SuiteSparse/src/cholmod.jl @@ -9,7 +9,7 @@ import Base.LinAlg: (\), cholfact, cholfact!, det, diag, ishermitian, isposdef, issuccess, issymmetric, ldltfact, ldltfact!, logdet -using ..SparseArrays +using SparseArrays using Base.Printf.@printf export @@ -17,7 +17,7 @@ export Factor, Sparse -import ..SparseArrays: AbstractSparseMatrix, SparseMatrixCSC, indtype, sparse, spzeros, nnz +import SparseArrays: AbstractSparseMatrix, SparseMatrixCSC, indtype, sparse, spzeros, nnz import ..increment, ..increment!, ..decrement, ..decrement! diff --git a/stdlib/SuiteSparse/src/spqr.jl b/stdlib/SuiteSparse/src/spqr.jl index e844a66a9688e3..acd3d37bbd2c7d 100644 --- a/stdlib/SuiteSparse/src/spqr.jl +++ b/stdlib/SuiteSparse/src/spqr.jl @@ -21,7 +21,7 @@ const ORDERING_BESTAMD = Int32(9) # try COLAMD and AMD; pick best# # tried. If there is a high fill-in with AMD then try METIS(A'A) and take # the best of AMD and METIS. METIS is not tried if it isn't installed. -using ..SparseArrays: SparseMatrixCSC +using SparseArrays: SparseMatrixCSC using ..SuiteSparse.CHOLMOD using ..SuiteSparse.CHOLMOD: change_stype!, free! diff --git a/stdlib/SuiteSparse/src/umfpack.jl b/stdlib/SuiteSparse/src/umfpack.jl index 3647445da3098f..371f1dc5d91d7b 100644 --- a/stdlib/SuiteSparse/src/umfpack.jl +++ b/stdlib/SuiteSparse/src/umfpack.jl @@ -7,8 +7,8 @@ export UmfpackLU import Base: (\), findnz, getproperty, show, size import Base.LinAlg: Factorization, det, lufact, ldiv! -using ..SparseArrays -import ..SparseArrays: nnz +using SparseArrays +import SparseArrays: nnz import ..increment, ..increment!, ..decrement, ..decrement! diff --git a/stdlib/SuiteSparse/test/runtests.jl b/stdlib/SuiteSparse/test/runtests.jl index f27f76068991a7..4cf807452f474d 100644 --- a/stdlib/SuiteSparse/test/runtests.jl +++ b/stdlib/SuiteSparse/test/runtests.jl @@ -1,7 +1,7 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license using Test -using SuiteSparse +using SuiteSparse, SparseArrays if Base.USE_GPL_LIBS include("umfpack.jl") diff --git a/test/abstractarray.jl b/test/abstractarray.jl index 0fc998af8b13cf..9a6b4c1fd65e68 100644 --- a/test/abstractarray.jl +++ b/test/abstractarray.jl @@ -1,5 +1,7 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license +using SparseArrays + A = rand(5,4,3) @testset "Bounds checking" begin @test checkbounds(Bool, A, 1, 1, 1) == true diff --git a/test/ambiguous.jl b/test/ambiguous.jl index b9532891ca5588..d47d0082e5c03f 100644 --- a/test/ambiguous.jl +++ b/test/ambiguous.jl @@ -9,6 +9,8 @@ ambig(x::Int, y::Int) = 4 ambig(x::Number, y) = 5 # END OF LINE NUMBER SENSITIVITY +using SparseArrays + # For curmod_* include("testenv.jl") @@ -278,9 +280,9 @@ end pop!(need_to_handle_undef_sparam, which(Base.LinAlg.promote_leaf_eltypes, (Union{AbstractArray{T}, Tuple{Vararg{T}}} where T<:Number,))) pop!(need_to_handle_undef_sparam, which(Base.LinAlg.promote_leaf_eltypes, (Union{AbstractArray{T}, Tuple{Vararg{T}}} where T<:(AbstractArray{<:Number}),))) - pop!(need_to_handle_undef_sparam, which(Base.SparseArrays._absspvec_vcat, (AbstractSparseArray{Tv, Ti, 1} where {Tv, Ti},))) - pop!(need_to_handle_undef_sparam, which(Base.SparseArrays._absspvec_hcat, (AbstractSparseArray{Tv, Ti, 1} where {Tv, Ti},))) - pop!(need_to_handle_undef_sparam, which(Base.cat, (Any, Base.SparseArrays._TypedDenseConcatGroup{T} where T))) + # pop!(need_to_handle_undef_sparam, which(SparseArrays._absspvec_vcat, (AbstractSparseArray{Tv, Ti, 1} where {Tv, Ti},))) + # pop!(need_to_handle_undef_sparam, which(SparseArrays._absspvec_hcat, (AbstractSparseArray{Tv, Ti, 1} where {Tv, Ti},))) + pop!(need_to_handle_undef_sparam, which(Base.cat, (Any, SparseArrays._TypedDenseConcatGroup{T} where T))) pop!(need_to_handle_undef_sparam, which(Base.float, Tuple{AbstractArray{Union{Missing, T},N} where {T, N}})) pop!(need_to_handle_undef_sparam, which(Base.convert, Tuple{Type{Union{Missing, T}} where T, Any})) pop!(need_to_handle_undef_sparam, which(Base.promote_rule, Tuple{Type{Union{Missing, S}} where S, Type{T} where T})) diff --git a/test/arrayops.jl b/test/arrayops.jl index 79a91b9a46405e..10afe8788b0b36 100644 --- a/test/arrayops.jl +++ b/test/arrayops.jl @@ -3,6 +3,7 @@ # Array test isdefined(Main, :TestHelpers) || @eval Main include("TestHelpers.jl") using Main.TestHelpers.OAs +using SparseArrays @testset "basics" begin @test length([1, 2, 3]) == 3 diff --git a/test/choosetests.jl b/test/choosetests.jl index 21b8585bcbbd21..eca802ee99d281 100644 --- a/test/choosetests.jl +++ b/test/choosetests.jl @@ -36,7 +36,7 @@ function choosetests(choices = []) "char", "strings", "triplequote", "unicode", "intrinsics", "dict", "hashing", "iobuffer", "staged", "offsetarray", "arrayops", "tuple", "reduce", "reducedim", "random", "abstractarray", - "intfuncs", "simdloop", "vecelement", "sparse", + "intfuncs", "simdloop", "vecelement", "bitarray", "copy", "math", "fastmath", "functional", "iterators", "operators", "path", "ccall", "parse", "loading", "bigint", "bigfloat", "sorting", "statistics", "spawn", "backtrace", @@ -101,15 +101,6 @@ function choosetests(choices = []) prepend!(tests, stringtests) end - sparsetests = ["sparse/sparse", "sparse/sparsevector", "sparse/higherorderfns"] - if "sparse" in skip_tests - filter!(x -> (x != "sparse" && !(x in sparsetests)), tests) - elseif "sparse" in tests - # specifically selected case - filter!(x -> x != "sparse", tests) - prepend!(tests, sparsetests) - end - # do subarray before sparse but after linalg if "subarray" in skip_tests filter!(x -> x != "subarray", tests) diff --git a/test/compile.jl b/test/compile.jl index 2fc3702c2885e8..d4fa1e4a5b46e4 100644 --- a/test/compile.jl +++ b/test/compile.jl @@ -220,7 +220,7 @@ try Dict(s => Base.module_uuid(Base.root_module(s)) for s in [:Base64, :CRC32c, :Dates, :DelimitedFiles, :FileWatching, :Future, :IterativeEigensolvers, :Logging, :Mmap, :Printf, :Profile, :SharedArrays, - :SuiteSparse, :Test, :Unicode, :Distributed])) + :SparseArrays, :SuiteSparse, :Test, :Unicode, :Distributed])) @test discard_module.(deps) == deps1 @test current_task()(0x01, 0x4000, 0x30031234) == 2 diff --git a/test/core.jl b/test/core.jl index 7b470ec4113a08..aeb3c509670714 100644 --- a/test/core.jl +++ b/test/core.jl @@ -3,6 +3,8 @@ # test core language features const Bottom = Union{} +using SparseArrays + # For curmod_* include("testenv.jl") diff --git a/test/generic_map_tests.jl b/test/generic_map_tests.jl index 6de24a5f5f42f3..5c79ea08f909b9 100644 --- a/test/generic_map_tests.jl +++ b/test/generic_map_tests.jl @@ -76,7 +76,6 @@ end function run_map_equivalence_tests(mapf) testmap_equivalence(mapf, identity, (1,2,3,4)) - testmap_equivalence(mapf, x->x>0 ? 1.0 : 0.0, sparse(sparse(1.0I, 5, 5))) testmap_equivalence(mapf, (x,y,z)->x+y+z, 1,2,3) testmap_equivalence(mapf, x->x ? false : true, BitMatrix(uninitialized, 10,10)) testmap_equivalence(mapf, x->"foobar", BitMatrix(uninitialized, 10,10)) diff --git a/test/hashing.jl b/test/hashing.jl index b414c05c52ceb0..afb020127e6387 100644 --- a/test/hashing.jl +++ b/test/hashing.jl @@ -1,5 +1,7 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license +using SparseArrays + types = Any[ Bool, Int8, UInt8, Int16, UInt16, Int32, UInt32, Int64, UInt64, Float32, Float64, diff --git a/test/linalg/adjtrans.jl b/test/linalg/adjtrans.jl index c84a551a84c332..8db9afb903e21d 100644 --- a/test/linalg/adjtrans.jl +++ b/test/linalg/adjtrans.jl @@ -4,6 +4,7 @@ using Test using Base.LinAlg: Adjoint, Transpose +using SparseArrays @testset "Adjoint and Transpose inner constructor basics" begin intvec, intmat = [1, 2], [1 2; 3 4] diff --git a/test/linalg/bidiag.jl b/test/linalg/bidiag.jl index 017aa2cdcdef0f..b1eedd0b9ff7ad 100644 --- a/test/linalg/bidiag.jl +++ b/test/linalg/bidiag.jl @@ -3,6 +3,7 @@ using Test using Base.LinAlg: mul! import Base.LinAlg: BlasReal, BlasFloat +using SparseArrays n = 10 #Size of test matrix srand(1) diff --git a/test/linalg/diagonal.jl b/test/linalg/diagonal.jl index 70e14dfbea6af7..9d5aa25e6b84cf 100644 --- a/test/linalg/diagonal.jl +++ b/test/linalg/diagonal.jl @@ -3,6 +3,7 @@ using Test using Base.LinAlg: mul!, ldiv!, rdiv! import Base.LinAlg: BlasFloat, BlasComplex, SingularException +using SparseArrays n=12 #Size of matrix problem to test srand(1) diff --git a/test/linalg/special.jl b/test/linalg/special.jl index 1c50bde7a24d6f..85239901e0379d 100644 --- a/test/linalg/special.jl +++ b/test/linalg/special.jl @@ -1,6 +1,7 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license using Test +using SparseArrays using Base.LinAlg: mul! diff --git a/test/linalg/symmetric.jl b/test/linalg/symmetric.jl index 92fa6bde38c303..9a59addca3e617 100644 --- a/test/linalg/symmetric.jl +++ b/test/linalg/symmetric.jl @@ -1,6 +1,7 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license using Test +using SparseArrays srand(101) diff --git a/test/linalg/triangular.jl b/test/linalg/triangular.jl index f532fe7468234e..91f046d8c44637 100644 --- a/test/linalg/triangular.jl +++ b/test/linalg/triangular.jl @@ -5,6 +5,7 @@ using Test using Base.LinAlg: BlasFloat, errorbounds, full!, naivesub!, transpose!, UnitUpperTriangular, UnitLowerTriangular, mul!, rdiv! +using SparseArrays debug && println("Triangular matrices") diff --git a/test/linalg/tridiag.jl b/test/linalg/tridiag.jl index 2fd6b4a4ca5a0e..7c09384d2eff35 100644 --- a/test/linalg/tridiag.jl +++ b/test/linalg/tridiag.jl @@ -1,5 +1,7 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license +using SparseArrays + #Test equivalence of eigenvectors/singular vectors taking into account possible phase (sign) differences function test_approx_eq_vecs(a::StridedVecOrMat{S}, b::StridedVecOrMat{T}, error=nothing) where {S<:Real,T<:Real} n = size(a, 1) diff --git a/test/linalg/uniformscaling.jl b/test/linalg/uniformscaling.jl index b163144ab7b993..ed4472dc18b1f7 100644 --- a/test/linalg/uniformscaling.jl +++ b/test/linalg/uniformscaling.jl @@ -1,6 +1,7 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license using Test +using SparseArrays srand(123) diff --git a/test/random.jl b/test/random.jl index e2ca41ef438877..5a2443ef4933c0 100644 --- a/test/random.jl +++ b/test/random.jl @@ -2,6 +2,7 @@ isdefined(Main, :TestHelpers) || @eval Main include(joinpath(dirname(@__FILE__), "TestHelpers.jl")) using Main.TestHelpers.OAs +using SparseArrays using Base.Random.dSFMT using Base.Random: Sampler, SamplerRangeFast, SamplerRangeInt, MT_CACHE_F, MT_CACHE_I diff --git a/test/show.jl b/test/show.jl index 8403706b7342a2..4ccbe595fce6d3 100644 --- a/test/show.jl +++ b/test/show.jl @@ -1,5 +1,7 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license +using SparseArrays + # For curmod_* include("testenv.jl")