From 6773f2de078f3a56741a4a3d8c6807622a43fa5e Mon Sep 17 00:00:00 2001 From: Sacha Verweij Date: Fri, 18 May 2018 11:31:07 -0700 Subject: [PATCH 01/23] Deprecate lufact to lu. --- NEWS.md | 5 ++ stdlib/LinearAlgebra/docs/src/index.md | 1 - stdlib/LinearAlgebra/src/LinearAlgebra.jl | 1 - stdlib/LinearAlgebra/src/dense.jl | 12 ++-- stdlib/LinearAlgebra/src/deprecated.jl | 8 +++ stdlib/LinearAlgebra/src/factorization.jl | 2 +- stdlib/LinearAlgebra/src/generic.jl | 6 +- stdlib/LinearAlgebra/src/lu.jl | 76 ++++++++++------------- stdlib/LinearAlgebra/src/symmetric.jl | 6 +- stdlib/LinearAlgebra/test/generic.jl | 4 +- stdlib/LinearAlgebra/test/lapack.jl | 2 +- stdlib/LinearAlgebra/test/lu.jl | 32 +++++----- stdlib/LinearAlgebra/test/symmetric.jl | 2 +- stdlib/LinearAlgebra/test/triangular.jl | 14 ++--- stdlib/SparseArrays/src/linalg.jl | 7 +-- stdlib/SparseArrays/test/sparse.jl | 3 +- stdlib/SuiteSparse/src/cholmod.jl | 6 +- stdlib/SuiteSparse/src/deprecated.jl | 9 +++ stdlib/SuiteSparse/src/umfpack.jl | 26 ++++---- stdlib/SuiteSparse/test/umfpack.jl | 26 ++++---- 20 files changed, 129 insertions(+), 119 deletions(-) diff --git a/NEWS.md b/NEWS.md index a0932388e326f..cd7873ccf01a6 100644 --- a/NEWS.md +++ b/NEWS.md @@ -227,6 +227,9 @@ This section lists changes that do not have deprecation warnings. * `readuntil` now does *not* include the delimiter in its result, matching the behavior of `readline`. Pass `keep=true` to get the old behavior ([#25633]). + * `lu` methods now return decomposition objects such as `LU` rather than + tuples of arrays or tuples of numbers ([#27159]). + * `countlines` now always counts the last non-empty line even if it does not end with EOL, matching the behavior of `eachline` and `readlines` ([#25845]). @@ -688,6 +691,8 @@ Deprecated or removed * The keyword `immutable` is fully deprecated to `struct`, and `type` is fully deprecated to `mutable struct` ([#19157], [#20418]). + * `lufact` has been deprecated to `lu` ([#27159]). + * Indexing into multidimensional arrays with more than one index but fewer indices than there are dimensions is no longer permitted when those trailing dimensions have lengths greater than 1. Instead, reshape the array or add trailing indices so the dimensionality and number of indices diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md index 8ccffe52ee8de..ed7bbef38db51 100644 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -312,7 +312,6 @@ LinearAlgebra.LowerTriangular LinearAlgebra.UpperTriangular LinearAlgebra.UniformScaling LinearAlgebra.lu -LinearAlgebra.lufact LinearAlgebra.lufact! LinearAlgebra.chol LinearAlgebra.cholfact diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index eeef2b912be53..b11c223c87db2 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -112,7 +112,6 @@ export lowrankupdate, lowrankupdate!, lu, - lufact, lufact!, lyap, mul!, diff --git a/stdlib/LinearAlgebra/src/dense.jl b/stdlib/LinearAlgebra/src/dense.jl index 101b6c7230e63..8d84cc2e4ea9e 100644 --- a/stdlib/LinearAlgebra/src/dense.jl +++ b/stdlib/LinearAlgebra/src/dense.jl @@ -721,7 +721,7 @@ function inv(A::StridedMatrix{T}) where T elseif istril(AA) Ai = tril!(parent(inv(LowerTriangular(AA)))) else - Ai = inv!(lufact(AA)) + Ai = inv!(lu(AA)) Ai = convert(typeof(parent(Ai)), Ai) end return Ai @@ -1118,9 +1118,9 @@ systems. For example: `A=factorize(A); x=A\\b; y=A\\C`. | Triangular | Triangular | | Diagonal | Diagonal | | Bidiagonal | Bidiagonal | -| Tridiagonal | LU (see [`lufact`](@ref)) | +| Tridiagonal | LU (see [`lu`](@ref)) | | Symmetric real tridiagonal | LDLt (see [`ldltfact`](@ref)) | -| General square | LU (see [`lufact`](@ref)) | +| General square | LU (see [`lu`](@ref)) | | General non-square | QR (see [`qrfact`](@ref)) | If `factorize` is called on a Hermitian positive-definite matrix, for instance, then `factorize` @@ -1201,7 +1201,7 @@ function factorize(A::StridedMatrix{T}) where T return ldltfact!(SymTridiagonal(diag(A), diag(A, -1))) end end - return lufact(Tridiagonal(diag(A, -1), diag(A), diag(A, 1))) + return lu(Tridiagonal(diag(A, -1), diag(A), diag(A, 1))) end end if utri @@ -1218,7 +1218,7 @@ function factorize(A::StridedMatrix{T}) where T if sym return factorize(Symmetric(A)) end - return lufact(A) + return lu(A) end qrfact(A, Val(true)) end @@ -1367,7 +1367,7 @@ function cond(A::AbstractMatrix, p::Real=2) end throw(ArgumentError("p-norm must be 1, 2 or Inf, got $p")) end -_cond1Inf(A::StridedMatrix{<:BlasFloat}, p::Real) = _cond1Inf(lufact(A), p, norm(A, p)) +_cond1Inf(A::StridedMatrix{<:BlasFloat}, p::Real) = _cond1Inf(lu(A), p, norm(A, p)) _cond1Inf(A::AbstractMatrix, p::Real) = norm(A, p)*norm(inv(A), p) ## Lyapunov and Sylvester equation diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl index f7cb520809415..7a9472daf400e 100644 --- a/stdlib/LinearAlgebra/src/deprecated.jl +++ b/stdlib/LinearAlgebra/src/deprecated.jl @@ -1260,3 +1260,11 @@ end @deprecate scale!(C::AbstractMatrix, a::AbstractVector, B::AbstractMatrix) mul!(C, Diagonal(a), B) Base.@deprecate_binding trace tr + +# deprecate lufact to lu +export lufact +@deprecate(lufact(S::LU), lu(S)) +@deprecate(lufact(x::Number), lu(x)) +@deprecate(lufact(A::AbstractMatrix{T}) where T, lu(A)) +@deprecate(lufact(A::AbstractMatrix{T}, pivot::Union{Val{false}, Val{true}}) where T, lu(A, pivot)) +@deprecate(lufact(A::Union{AbstractMatrix{T}, AbstractMatrix{Complex{T}}}, pivot::Union{Val{false}, Val{true}} = Val(true)) where {T<:AbstractFloat}, lu(A, pivot)) diff --git a/stdlib/LinearAlgebra/src/factorization.jl b/stdlib/LinearAlgebra/src/factorization.jl index d08a55e4d11b5..b0674ae64f683 100644 --- a/stdlib/LinearAlgebra/src/factorization.jl +++ b/stdlib/LinearAlgebra/src/factorization.jl @@ -27,7 +27,7 @@ julia> F = cholfact([1 0; 0 1]); julia> LinearAlgebra.issuccess(F) true -julia> F = lufact([1 0; 0 0]); +julia> F = lu([1 0; 0 0]); julia> LinearAlgebra.issuccess(F) false diff --git a/stdlib/LinearAlgebra/src/generic.jl b/stdlib/LinearAlgebra/src/generic.jl index 2ecd4220095b4..43ab58bbd4522 100644 --- a/stdlib/LinearAlgebra/src/generic.jl +++ b/stdlib/LinearAlgebra/src/generic.jl @@ -862,7 +862,7 @@ function (\)(A::AbstractMatrix, B::AbstractVecOrMat) if istriu(A) return UpperTriangular(A) \ B end - return lufact(A) \ B + return lu(A) \ B end return qrfact(A,Val(true)) \ B end @@ -1270,7 +1270,7 @@ function det(A::AbstractMatrix{T}) where T S = typeof((one(T)*zero(T) + zero(T))/one(T)) return convert(S, det(UpperTriangular(A))) end - return det(lufact(A)) + return det(lu(A)) end det(x::Number) = x @@ -1305,7 +1305,7 @@ julia> logabsdet(B) (0.6931471805599453, 1.0) ``` """ -logabsdet(A::AbstractMatrix) = logabsdet(lufact(A)) +logabsdet(A::AbstractMatrix) = logabsdet(lu(A)) """ logdet(M) diff --git a/stdlib/LinearAlgebra/src/lu.jl b/stdlib/LinearAlgebra/src/lu.jl index 34b6efe660940..cfdbee76376a4 100644 --- a/stdlib/LinearAlgebra/src/lu.jl +++ b/stdlib/LinearAlgebra/src/lu.jl @@ -11,6 +11,20 @@ struct LU{T,S<:AbstractMatrix} <: Factorization{T} end LU(factors::AbstractMatrix{T}, ipiv::Vector{BlasInt}, info::BlasInt) where {T} = LU{T,typeof(factors)}(factors, ipiv, info) +# iteration for destructuring into components +Base.iterate(S::LU) = (S.L, Val(:U)) +Base.iterate(S::LU, ::Val{:U}) = (S.U, Val(:p)) +Base.iterate(S::LU, ::Val{:p}) = (S.p, Val(:done)) +Base.iterate(S::LU, ::Val{:done}) = nothing + +# indexing for destructuring into components +@inline function Base.getindex(S::LU, i::Integer) + i == 1 ? (return S.L) : + i == 2 ? (return S.U) : + i == 3 ? (return S.p) : + throw(BoundsError(S, i)) +end + adjoint(F::LU) = Adjoint(F) transpose(F::LU) = Transpose(F) @@ -30,7 +44,7 @@ end """ lufact!(A, pivot=Val(true)) -> LU -`lufact!` is the same as [`lufact`](@ref), but saves space by overwriting the +`lufact!` is the same as [`lu`](@ref), but saves space by overwriting the input `A`, instead of creating a copy. An [`InexactError`](@ref) exception is thrown if the factorization produces a number not representable by the element type of `A`, e.g. for integer types. @@ -114,13 +128,14 @@ function generic_lufact!(A::StridedMatrix{T}, ::Val{Pivot} = Val(true)) where {T end # floating point types doesn't have to be promoted for LU, but should default to pivoting -lufact(A::Union{AbstractMatrix{T}, AbstractMatrix{Complex{T}}}, - pivot::Union{Val{false}, Val{true}} = Val(true)) where {T<:AbstractFloat} = - lufact!(copy(A), pivot) +function lu(A::Union{AbstractMatrix{T}, AbstractMatrix{Complex{T}}}, + pivot::Union{Val{false}, Val{true}} = Val(true)) where {T<:AbstractFloat} + lufact!(copy(A), pivot) +end # for all other types we must promote to a type which is stable under division """ - lufact(A, pivot=Val(true)) -> F::LU + lu(A, pivot=Val(true)) -> F::LU Compute the LU factorization of `A`. @@ -129,7 +144,7 @@ type `T` supporting `+`, `-`, `*` and `/`, the return type is `LU{T,S{T}}`. If pivoting is chosen (default) the element type should also support `abs` and `<`. -The individual components of the factorization `F` can be accessed by indexing: +The individual components of the factorization `F` can be accessed via `getproperty`: | Component | Description | |:----------|:------------------------------------| @@ -138,6 +153,8 @@ The individual components of the factorization `F` can be accessed by indexing: | `F.p` | (right) permutation `Vector` | | `F.P` | (right) permutation `Matrix` | +Iterating the factorization produces the components `F.L`, `F.U`, and `F.p`. + The relationship between `F` and `A` is `F.L*F.U == A[F.p, :]` @@ -161,7 +178,7 @@ julia> A = [4 3; 6 3] 4 3 6 3 -julia> F = lufact(A) +julia> F = lu(A) LU{Float64,Array{Float64,2}} L factor: 2×2 Array{Float64,2}: @@ -174,16 +191,21 @@ U factor: julia> F.L * F.U == A[F.p, :] true + +julia> l, u, p = lu(A); # destructuring via iteration + +julia> l == F.L && u == F.U && p == F.p +true ``` """ -function lufact(A::AbstractMatrix{T}, pivot::Union{Val{false}, Val{true}}) where T +function lu(A::AbstractMatrix{T}, pivot::Union{Val{false}, Val{true}}) where T S = typeof(zero(T)/one(T)) AA = similar(A, S) copyto!(AA, A) lufact!(AA, pivot) end # We can't assume an ordered field so we first try without pivoting -function lufact(A::AbstractMatrix{T}) where T +function lu(A::AbstractMatrix{T}) where T S = typeof(zero(T)/one(T)) AA = similar(A, S) copyto!(AA, A) @@ -197,38 +219,8 @@ function lufact(A::AbstractMatrix{T}) where T end end -lufact(x::Number) = LU(fill(x, 1, 1), BlasInt[1], x == 0 ? one(BlasInt) : zero(BlasInt)) -lufact(F::LU) = F - -lu(x::Number) = (one(x), x, 1) - -""" - lu(A, pivot=Val(true)) -> L, U, p - -Compute the LU factorization of `A`, such that `A[p,:] = L*U`. -By default, pivoting is used. This can be overridden by passing -`Val(false)` for the second argument. - -See also [`lufact`](@ref). - -# Examples -```jldoctest -julia> A = [4. 3.; 6. 3.] -2×2 Array{Float64,2}: - 4.0 3.0 - 6.0 3.0 - -julia> L, U, p = lu(A) -([1.0 0.0; 0.666667 1.0], [6.0 3.0; 0.0 1.0], [2, 1]) - -julia> A[p, :] == L * U -true -``` -""" -function lu(A::AbstractMatrix, pivot::Union{Val{false}, Val{true}} = Val(true)) - F = lufact(A, pivot) - F.L, F.U, F.p -end +lu(S::LU) = S +lu(x::Number) = LU(fill(x, 1, 1), BlasInt[1], x == 0 ? one(BlasInt) : zero(BlasInt)) function LU{T}(F::LU) where T M = convert(AbstractMatrix{T}, F.factors) @@ -459,7 +451,7 @@ function lufact!(A::Tridiagonal{T,V}, pivot::Union{Val{false}, Val{true}} = Val( LU{T,Tridiagonal{T,V}}(B, ipiv, convert(BlasInt, info)) end -factorize(A::Tridiagonal) = lufact(A) +factorize(A::Tridiagonal) = lu(A) function getproperty(F::LU{T,Tridiagonal{T,V}}, d::Symbol) where {T,V} m, n = size(F) diff --git a/stdlib/LinearAlgebra/src/symmetric.jl b/stdlib/LinearAlgebra/src/symmetric.jl index 4c2f5e44ff507..b4c69c85ef47d 100644 --- a/stdlib/LinearAlgebra/src/symmetric.jl +++ b/stdlib/LinearAlgebra/src/symmetric.jl @@ -442,7 +442,7 @@ function factorize(A::HermOrSym{T}) where T if TT <: BlasFloat return bkfact(A) else # fallback - return lufact(A) + return lu(A) end end @@ -453,11 +453,11 @@ det(A::Symmetric) = det(factorize(A)) \(A::HermOrSym{<:Any,<:StridedMatrix}, B::AbstractVector) = \(factorize(A), B) # Bunch-Kaufman solves can not utilize BLAS-3 for multiple right hand sides # so using LU is faster for AbstractMatrix right hand side -\(A::HermOrSym{<:Any,<:StridedMatrix}, B::AbstractMatrix) = \(lufact(A), B) +\(A::HermOrSym{<:Any,<:StridedMatrix}, B::AbstractMatrix) = \(lu(A), B) function _inv(A::HermOrSym) n = checksquare(A) - B = inv!(lufact(A)) + B = inv!(lu(A)) conjugate = isa(A, Hermitian) # symmetrize if A.uplo == 'U' # add to upper triangle diff --git a/stdlib/LinearAlgebra/test/generic.jl b/stdlib/LinearAlgebra/test/generic.jl index 05118849e9a8a..6108a563bbb29 100644 --- a/stdlib/LinearAlgebra/test/generic.jl +++ b/stdlib/LinearAlgebra/test/generic.jl @@ -348,13 +348,13 @@ LinearAlgebra.Transpose(a::ModInt{n}) where {n} = transpose(a) A = [ModInt{2}(1) ModInt{2}(0); ModInt{2}(1) ModInt{2}(1)] b = [ModInt{2}(1), ModInt{2}(0)] - @test A*(lufact(A, Val(false))\b) == b + @test A*(lu(A, Val(false))\b) == b # Needed for pivoting: Base.abs(a::ModInt{n}) where {n} = a Base.:<(a::ModInt{n}, b::ModInt{n}) where {n} = a.k < b.k - @test A*(lufact(A, Val(true))\b) == b + @test A*(lu(A, Val(true))\b) == b end @testset "fallback throws properly for AbstractArrays with dimension > 2" begin diff --git a/stdlib/LinearAlgebra/test/lapack.jl b/stdlib/LinearAlgebra/test/lapack.jl index 2eefa52b69acc..642827da3082a 100644 --- a/stdlib/LinearAlgebra/test/lapack.jl +++ b/stdlib/LinearAlgebra/test/lapack.jl @@ -266,7 +266,7 @@ end @test_throws DimensionMismatch LAPACK.gttrs!('N', x11, d, du, x9, y10, b) @test_throws DimensionMismatch LAPACK.gttrs!('N', dl, d, x11, x9, y10, b) @test_throws DimensionMismatch LAPACK.gttrs!('N', dl, d, du, x9, y10, x11) - A = lufact(Tridiagonal(dl,d,du)) + A = lu(Tridiagonal(dl,d,du)) b = rand(elty,10,5) c = copy(b) dl,d,du,du2,ipiv = LAPACK.gttrf!(dl,d,du) diff --git a/stdlib/LinearAlgebra/test/lu.jl b/stdlib/LinearAlgebra/test/lu.jl index bf6de1cb38127..96eab7b13fc88 100644 --- a/stdlib/LinearAlgebra/test/lu.jl +++ b/stdlib/LinearAlgebra/test/lu.jl @@ -42,8 +42,8 @@ dimg = randn(n)/2 if eltya <: BlasFloat @testset "LU factorization for Number" begin num = rand(eltya) - @test lu(num) == (one(eltya),num,1) - @test convert(Array, lufact(num)) ≈ eltya[num] + @test (lu(num)...,) == (hcat(one(eltya)), hcat(num), [1]) + @test convert(Array, lu(num)) ≈ eltya[num] end @testset "Balancing in eigenvector calculations" begin A = convert(Matrix{eltya}, [ 3.0 -2.0 -0.9 2*eps(real(one(eltya))); @@ -58,7 +58,7 @@ dimg = randn(n)/2 end end @testset "Singular LU" begin - lua = lufact(zeros(eltya, 3, 3)) + lua = lu(zeros(eltya, 3, 3)) @test !LinearAlgebra.issuccess(lua) @test sprint((t, s) -> show(t, "text/plain", s), lua) == "Failed factorization of type $(typeof(lua))" end @@ -85,9 +85,9 @@ dimg = randn(n)/2 end κd = cond(Array(d),1) @testset "Tridiagonal LU" begin - lud = lufact(d) + lud = lu(d) @test LinearAlgebra.issuccess(lud) - @test lufact(lud) == lud + @test lu(lud) == lud @test_throws ErrorException lud.Z @test lud.L*lud.U ≈ lud.P*Array(d) @test lud.L*lud.U ≈ Array(d)[lud.p,:] @@ -173,22 +173,22 @@ dimg = randn(n)/2 du[1] = zero(eltya) dl[1] = zero(eltya) zT = Tridiagonal(dl,dd,du) - @test !LinearAlgebra.issuccess(lufact(zT)) + @test !LinearAlgebra.issuccess(lu(zT)) end end @testset "Thin LU" begin - lua = @inferred lufact(a[:,1:n1]) + lua = @inferred lu(a[:,1:n1]) @test lua.L*lua.U ≈ lua.P*a[:,1:n1] end @testset "Fat LU" begin - lua = lufact(a[1:n1,:]) + lua = lu(a[1:n1,:]) @test lua.L*lua.U ≈ lua.P*a[1:n1,:] end end @testset "LU of Symmetric/Hermitian" begin for HS in (Hermitian(a'a), Symmetric(a'a)) - luhs = lufact(HS) + luhs = lu(HS) @test luhs.L*luhs.U ≈ luhs.P*Matrix(HS) end end @@ -198,8 +198,8 @@ end srand(3) a = Tridiagonal(rand(9),rand(10),rand(9)) fa = Array(a) - falu = lufact(fa) - alu = lufact(a) + falu = lu(fa) + alu = lu(a) falu = convert(typeof(falu),alu) @test AbstractArray(alu) == fa end @@ -208,7 +208,7 @@ end ## Integrate in general tests when more linear algebra is implemented in julia a = convert(Matrix{Rational{BigInt}}, rand(1:10//1,n,n))/n b = rand(1:10,n,2) - @inferred lufact(a) + @inferred lu(a) lua = factorize(a) l,u,p = lua.L, lua.U, lua.p @test l*u ≈ a[p,:] @@ -242,12 +242,12 @@ end end @testset "Issue 21453" begin - @test_throws ArgumentError LinearAlgebra._cond1Inf(lufact(randn(5,5)), 2, 2.0) + @test_throws ArgumentError LinearAlgebra._cond1Inf(lu(randn(5,5)), 2, 2.0) end @testset "REPL printing" begin bf = IOBuffer() - show(bf, "text/plain", lufact(Matrix(I, 4, 4))) + show(bf, "text/plain", lu(Matrix(I, 4, 4))) seekstart(bf) @test String(take!(bf)) == """ LinearAlgebra.LU{Float64,Array{Float64,2}} @@ -266,9 +266,9 @@ U factor: end @testset "propertynames" begin - names = sort!(collect(string.(Base.propertynames(lufact(rand(3,3)))))) + names = sort!(collect(string.(Base.propertynames(lu(rand(3,3)))))) @test names == ["L", "P", "U", "p"] - allnames = sort!(collect(string.(Base.propertynames(lufact(rand(3,3)), true)))) + allnames = sort!(collect(string.(Base.propertynames(lu(rand(3,3)), true)))) @test allnames == ["L", "P", "U", "factors", "info", "ipiv", "p"] end diff --git a/stdlib/LinearAlgebra/test/symmetric.jl b/stdlib/LinearAlgebra/test/symmetric.jl index 2a6a279a397a0..5ff60262510a6 100644 --- a/stdlib/LinearAlgebra/test/symmetric.jl +++ b/stdlib/LinearAlgebra/test/symmetric.jl @@ -196,7 +196,7 @@ end end if eltya <: LinearAlgebra.BlasComplex @testset "inverse edge case with complex Hermitian" begin - # Hermitian matrix, where inv(lufact(A)) generates non-real diagonal elements + # Hermitian matrix, where inv(lu(A)) generates non-real diagonal elements for T in (ComplexF32, ComplexF64) A = T[0.650488+0.0im 0.826686+0.667447im; 0.826686-0.667447im 1.81707+0.0im] H = Hermitian(A) diff --git a/stdlib/LinearAlgebra/test/triangular.jl b/stdlib/LinearAlgebra/test/triangular.jl index 50b94cc6f7764..884d94418d1c0 100644 --- a/stdlib/LinearAlgebra/test/triangular.jl +++ b/stdlib/LinearAlgebra/test/triangular.jl @@ -225,19 +225,19 @@ for elty1 in (Float32, Float64, BigFloat, ComplexF32, ComplexF64, Complex{BigFlo @test 0.5\A1 == 0.5\Matrix(A1) # inversion - @test inv(A1) ≈ inv(lufact(Matrix(A1))) + @test inv(A1) ≈ inv(lu(Matrix(A1))) inv(Matrix(A1)) # issue #11298 @test isa(inv(A1), t1) # make sure the call to LAPACK works right if elty1 <: BlasFloat - @test LinearAlgebra.inv!(copy(A1)) ≈ inv(lufact(Matrix(A1))) + @test LinearAlgebra.inv!(copy(A1)) ≈ inv(lu(Matrix(A1))) end # Determinant - @test det(A1) ≈ det(lufact(Matrix(A1))) atol=sqrt(eps(real(float(one(elty1)))))*n*n - @test logdet(A1) ≈ logdet(lufact(Matrix(A1))) atol=sqrt(eps(real(float(one(elty1)))))*n*n + @test det(A1) ≈ det(lu(Matrix(A1))) atol=sqrt(eps(real(float(one(elty1)))))*n*n + @test logdet(A1) ≈ logdet(lu(Matrix(A1))) atol=sqrt(eps(real(float(one(elty1)))))*n*n lada, ladb = logabsdet(A1) - flada, fladb = logabsdet(lufact(Matrix(A1))) + flada, fladb = logabsdet(lu(Matrix(A1))) @test lada ≈ flada atol=sqrt(eps(real(float(one(elty1)))))*n*n @test ladb ≈ fladb atol=sqrt(eps(real(float(one(elty1)))))*n*n @@ -425,7 +425,7 @@ for eltya in (Float32, Float64, ComplexF32, ComplexF64, BigFloat, Int) debug && println("\ntype of A: ", eltya, " type of b: ", eltyb, "\n") debug && println("Solve upper triangular system") - Atri = UpperTriangular(lufact(A).U) |> t -> eltya <: Complex && eltyb <: Real ? real(t) : t # Here the triangular matrix can't be too badly conditioned + Atri = UpperTriangular(lu(A).U) |> t -> eltya <: Complex && eltyb <: Real ? real(t) : t # Here the triangular matrix can't be too badly conditioned b = convert(Matrix{eltyb}, Matrix(Atri)*fill(1., n, 2)) x = Matrix(Atri) \ b @@ -453,7 +453,7 @@ for eltya in (Float32, Float64, ComplexF32, ComplexF64, BigFloat, Int) end debug && println("Solve lower triangular system") - Atri = UpperTriangular(lufact(A).U) |> t -> eltya <: Complex && eltyb <: Real ? real(t) : t # Here the triangular matrix can't be too badly conditioned + Atri = UpperTriangular(lu(A).U) |> t -> eltya <: Complex && eltyb <: Real ? real(t) : t # Here the triangular matrix can't be too badly conditioned b = convert(Matrix{eltyb}, Matrix(Atri)*fill(1., n, 2)) x = Matrix(Atri)\b diff --git a/stdlib/SparseArrays/src/linalg.jl b/stdlib/SparseArrays/src/linalg.jl index c304d3c3821fa..7306c9af4216f 100644 --- a/stdlib/SparseArrays/src/linalg.jl +++ b/stdlib/SparseArrays/src/linalg.jl @@ -937,7 +937,7 @@ function \(A::SparseMatrixCSC, B::AbstractVecOrMat) if ishermitian(A) return \(Hermitian(A), B) end - return \(lufact(A), B) + return \(lu(A), B) else return \(qrfact(A), B) end @@ -960,7 +960,7 @@ for (xformtype, xformop) in ((:Adjoint, :adjoint), (:Transpose, :transpose)) if ishermitian(A) return \($xformop(Hermitian(A)), B) end - return \($xformop(lufact(A)), B) + return \($xformop(lu(A)), B) else return \($xformop(qrfact(A)), B) end @@ -983,7 +983,7 @@ function factorize(A::SparseMatrixCSC) if ishermitian(A) return factorize(Hermitian(A)) end - return lufact(A) + return lu(A) else return qrfact(A) end @@ -1009,7 +1009,6 @@ function factorize(A::LinearAlgebra.RealHermSymComplexHerm{Float64,<:SparseMatri end chol(A::SparseMatrixCSC) = error("Use cholfact() instead of chol() for sparse matrices.") -lu(A::SparseMatrixCSC) = error("Use lufact() instead of lu() for sparse matrices.") eig(A::SparseMatrixCSC) = error("Use IterativeEigensolvers.eigs() instead of eig() for sparse matrices.") function Base.cov(X::SparseMatrixCSC; dims::Int=1, corrected::Bool=true) diff --git a/stdlib/SparseArrays/test/sparse.jl b/stdlib/SparseArrays/test/sparse.jl index 46f39b27a59d7..dadda8ce1790a 100644 --- a/stdlib/SparseArrays/test/sparse.jl +++ b/stdlib/SparseArrays/test/sparse.jl @@ -1320,7 +1320,7 @@ end @testset "explicit zeros" begin if Base.USE_GPL_LIBS a = SparseMatrixCSC(2, 2, [1, 3, 5], [1, 2, 1, 2], [1.0, 0.0, 0.0, 1.0]) - @test lufact(a)\[2.0, 3.0] ≈ [2.0, 3.0] + @test lu(a)\[2.0, 3.0] ≈ [2.0, 3.0] @test cholfact(a)\[2.0, 3.0] ≈ [2.0, 3.0] end end @@ -1780,7 +1780,6 @@ end C, b = A[:, 1:4], fill(1., size(A, 1)) @test !Base.USE_GPL_LIBS || factorize(C)\b ≈ Array(C)\b @test_throws ErrorException chol(A) - @test_throws ErrorException lu(A) @test_throws ErrorException eig(A) @test_throws ErrorException inv(A) end diff --git a/stdlib/SuiteSparse/src/cholmod.jl b/stdlib/SuiteSparse/src/cholmod.jl index 6f7963a209690..1255f420b8add 100644 --- a/stdlib/SuiteSparse/src/cholmod.jl +++ b/stdlib/SuiteSparse/src/cholmod.jl @@ -774,7 +774,7 @@ function solve(sys::Integer, F::Factor{Tv}, B::Dense{Tv}) where Tv<:VTypes if s.is_ll == 1 throw(LinearAlgebra.PosDefException(s.minor)) else - throw(ArgumentError("factorized matrix has one or more zero pivots. Try using lufact instead.")) + throw(ArgumentError("factorized matrix has one or more zero pivots. Try using `lu` instead.")) end end Dense(ccall((@cholmod_name("solve", SuiteSparse_long),:libcholmod), Ptr{C_Dense{Tv}}, @@ -1720,7 +1720,7 @@ function \(A::RealHermSymComplexHermF64SSL, B::StridedVecOrMat) if issuccess(F) return \(F, B) else - return \(lufact(SparseMatrixCSC{eltype(A), SuiteSparse_long}(A)), B) + return \(lu(SparseMatrixCSC{eltype(A), SuiteSparse_long}(A)), B) end end end @@ -1734,7 +1734,7 @@ function \(adjA::Adjoint{<:Any,<:RealHermSymComplexHermF64SSL}, B::StridedVecOrM if issuccess(F) return \(adjoint(F), B) else - return \(adjoint(lufact(SparseMatrixCSC{eltype(A), SuiteSparse_long}(A))), B) + return \(adjoint(lu(SparseMatrixCSC{eltype(A), SuiteSparse_long}(A))), B) end end end diff --git a/stdlib/SuiteSparse/src/deprecated.jl b/stdlib/SuiteSparse/src/deprecated.jl index 14bdd94e7c2ac..f5e5e5e157b7c 100644 --- a/stdlib/SuiteSparse/src/deprecated.jl +++ b/stdlib/SuiteSparse/src/deprecated.jl @@ -46,3 +46,12 @@ end LinearAlgebra.A_mul_B!(A::StridedMatrix, Q::QRSparseQ) = LinearAlgebra.mul!(A, Q) LinearAlgebra.A_mul_B!(Q::QRSparseQ, A::StridedVecOrMat) = LinearAlgebra.mul!(Q, A) end + +# deprecate lufact to lu +@eval SuiteSparse.UMFPACK begin + @deprecate(lufact(A::SparseMatrixCSC), lu(A)) + @deprecate(lufact(S::SparseMatrixCSC{<:UMFVTypes,<:UMFITypes}), lu(S)) + @deprecate(lufact(A::SparseMatrixCSC{<:Union{Float16,Float32},Ti}) where {Ti<:UMFITypes}, lu(A)) + @deprecate(lufact(A::SparseMatrixCSC{<:Union{ComplexF16,ComplexF32},Ti}) where {Ti<:UMFITypes}, lu(A)) + @deprecate(lufact(A::Union{SparseMatrixCSC{T},SparseMatrixCSC{Complex{T}}}) where {T<:AbstractFloat}, lu(A)) +end diff --git a/stdlib/SuiteSparse/src/umfpack.jl b/stdlib/SuiteSparse/src/umfpack.jl index 8b4f82a60b3e2..587de41e14fdd 100644 --- a/stdlib/SuiteSparse/src/umfpack.jl +++ b/stdlib/SuiteSparse/src/umfpack.jl @@ -6,7 +6,7 @@ export UmfpackLU import Base: (\), getproperty, show, size using LinearAlgebra -import LinearAlgebra: Factorization, det, lufact, ldiv! +import LinearAlgebra: Factorization, det, lu, ldiv! using SparseArrays import SparseArrays: nnz @@ -108,7 +108,7 @@ Base.adjoint(F::UmfpackLU) = Adjoint(F) Base.transpose(F::UmfpackLU) = Transpose(F) """ - lufact(A::SparseMatrixCSC) -> F::UmfpackLU + lu(A::SparseMatrixCSC) -> F::UmfpackLU Compute the LU factorization of a sparse matrix `A`. @@ -138,12 +138,12 @@ The relation between `F` and `A` is - [`det`](@ref) !!! note - `lufact(A::SparseMatrixCSC)` uses the UMFPACK library that is part of + `lu(A::SparseMatrixCSC)` uses the UMFPACK library that is part of SuiteSparse. As this library only supports sparse matrices with [`Float64`](@ref) or - `ComplexF64` elements, `lufact` converts `A` into a copy that is of type + `ComplexF64` elements, `lu` converts `A` into a copy that is of type `SparseMatrixCSC{Float64}` or `SparseMatrixCSC{ComplexF64}` as appropriate. """ -function lufact(S::SparseMatrixCSC{<:UMFVTypes,<:UMFITypes}) +function lu(S::SparseMatrixCSC{<:UMFVTypes,<:UMFITypes}) zerobased = S.colptr[1] == 0 res = UmfpackLU(C_NULL, C_NULL, S.m, S.n, zerobased ? copy(S.colptr) : decrement(S.colptr), @@ -152,16 +152,16 @@ function lufact(S::SparseMatrixCSC{<:UMFVTypes,<:UMFITypes}) finalizer(umfpack_free_symbolic, res) umfpack_numeric!(res) end -lufact(A::SparseMatrixCSC{<:Union{Float16,Float32},Ti}) where {Ti<:UMFITypes} = - lufact(convert(SparseMatrixCSC{Float64,Ti}, A)) -lufact(A::SparseMatrixCSC{<:Union{ComplexF16,ComplexF32},Ti}) where {Ti<:UMFITypes} = - lufact(convert(SparseMatrixCSC{ComplexF64,Ti}, A)) -lufact(A::Union{SparseMatrixCSC{T},SparseMatrixCSC{Complex{T}}}) where {T<:AbstractFloat} = +lu(A::SparseMatrixCSC{<:Union{Float16,Float32},Ti}) where {Ti<:UMFITypes} = + lu(convert(SparseMatrixCSC{Float64,Ti}, A)) +lu(A::SparseMatrixCSC{<:Union{ComplexF16,ComplexF32},Ti}) where {Ti<:UMFITypes} = + lu(convert(SparseMatrixCSC{ComplexF64,Ti}, A)) +lu(A::Union{SparseMatrixCSC{T},SparseMatrixCSC{Complex{T}}}) where {T<:AbstractFloat} = throw(ArgumentError(string("matrix type ", typeof(A), "not supported. ", - "Try lufact(convert(SparseMatrixCSC{Float64/ComplexF64,Int}, A)) for ", - "sparse floating point LU using UMFPACK or lufact(Array(A)) for generic ", + "Try lu(convert(SparseMatrixCSC{Float64/ComplexF64,Int}, A)) for ", + "sparse floating point LU using UMFPACK or lu(Array(A)) for generic ", "dense LU."))) -lufact(A::SparseMatrixCSC) = lufact(float(A)) +lu(A::SparseMatrixCSC) = lu(float(A)) size(F::UmfpackLU) = (F.m, F.n) diff --git a/stdlib/SuiteSparse/test/umfpack.jl b/stdlib/SuiteSparse/test/umfpack.jl index a1be43d13dfc2..dcb74fc281629 100644 --- a/stdlib/SuiteSparse/test/umfpack.jl +++ b/stdlib/SuiteSparse/test/umfpack.jl @@ -18,7 +18,7 @@ # We might be able to support two index sizes one day for Ti in Base.uniontypes(SuiteSparse.UMFPACK.UMFITypes) A = convert(SparseMatrixCSC{Tv,Ti}, A0) - lua = lufact(A) + lua = lu(A) @test nnz(lua) == 18 @test_throws ErrorException lua.Z L,U,p,q,Rs = lua.:(:) @@ -77,7 +77,7 @@ for Ti in Base.uniontypes(SuiteSparse.UMFPACK.UMFITypes) Ac = convert(SparseMatrixCSC{ComplexF64,Ti}, Ac0) x = fill(1.0 + im, size(Ac,1)) - lua = lufact(Ac) + lua = lu(Ac) L,U,p,q,Rs = lua.:(:) @test (Diagonal(Rs) * Ac)[p,q] ≈ L * U b = Ac*x @@ -92,7 +92,7 @@ @testset "Rectangular cases" for elty in (Float64, ComplexF64) for (m, n) in ((10,5), (5, 10)) A = sparse([1:min(m,n); rand(1:m, 10)], [1:min(m,n); rand(1:n, 10)], elty == Float64 ? randn(min(m, n) + 10) : complex.(randn(min(m, n) + 10), randn(min(m, n) + 10))) - F = lufact(A) + F = lu(A) L, U, p, q, Rs = F.:(:) @test (Diagonal(Rs) * A)[p,q] ≈ L * U end @@ -100,13 +100,13 @@ @testset "Issue #4523 - complex sparse \\" begin A, b = sparse((1.0 + im)I, 2, 2), fill(1., 2) - @test A * (lufact(A)\b) ≈ b + @test A * (lu(A)\b) ≈ b @test det(sparse([1,3,3,1], [1,1,3,3], [1,1,1,1])) == 0 end @testset "UMFPACK_ERROR_n_nonpositive" begin - @test_throws ArgumentError lufact(sparse(Int[], Int[], Float64[], 5, 0)) + @test_throws ArgumentError lu(sparse(Int[], Int[], Float64[], 5, 0)) end @testset "Issue #15099" for (Tin, Tout) in ( @@ -119,7 +119,7 @@ (Int, Float64), ) - F = lufact(sparse(fill(Tin(1), 1, 1))) + F = lu(sparse(fill(Tin(1), 1, 1))) L = sparse(fill(Tout(1), 1, 1)) @test F.p == F.q == [1] @test F.Rs == [1.0] @@ -128,12 +128,12 @@ end @testset "BigFloat not supported" for T in (BigFloat, Complex{BigFloat}) - @test_throws ArgumentError lufact(sparse(fill(T(1), 1, 1))) + @test_throws ArgumentError lu(sparse(fill(T(1), 1, 1))) end @testset "size(::UmfpackLU)" begin m = n = 1 - F = lufact(sparse(fill(1., m, n))) + F = lu(sparse(fill(1., m, n))) @test size(F) == (m, n) @test size(F, 1) == m @test size(F, 2) == n @@ -143,15 +143,15 @@ @testset "Test aliasing" begin a = rand(5) - @test_throws ArgumentError SuiteSparse.UMFPACK.solve!(a, lufact(sparse(1.0I, 5, 5)), a, SuiteSparse.UMFPACK.UMFPACK_A) + @test_throws ArgumentError SuiteSparse.UMFPACK.solve!(a, lu(sparse(1.0I, 5, 5)), a, SuiteSparse.UMFPACK.UMFPACK_A) aa = complex(a) - @test_throws ArgumentError SuiteSparse.UMFPACK.solve!(aa, lufact(sparse((1.0im)I, 5, 5)), aa, SuiteSparse.UMFPACK.UMFPACK_A) + @test_throws ArgumentError SuiteSparse.UMFPACK.solve!(aa, lu(sparse((1.0im)I, 5, 5)), aa, SuiteSparse.UMFPACK.UMFPACK_A) end - @testset "Issues #18246,18244 - lufact sparse pivot" begin + @testset "Issues #18246,18244 - lu sparse pivot" begin A = sparse(1.0I, 4, 4) A[1:2,1:2] = [-.01 -200; 200 .001] - F = lufact(A) + F = lu(A) @test F.p == [3 ; 4 ; 2 ; 1] end @@ -161,7 +161,7 @@ A = N*I + sprand(N, N, p) X = zeros(Complex{Float64}, N, N) B = complex.(rand(N, N), rand(N, N)) - luA, lufA = lufact(A), lufact(Array(A)) + luA, lufA = lu(A), lu(Array(A)) @test LinearAlgebra.ldiv!(copy(X), luA, B) ≈ LinearAlgebra.ldiv!(copy(X), lufA, B) @test LinearAlgebra.ldiv!(copy(X), adjoint(luA), B) ≈ LinearAlgebra.ldiv!(copy(X), adjoint(lufA), B) @test LinearAlgebra.ldiv!(copy(X), transpose(luA), B) ≈ LinearAlgebra.ldiv!(copy(X), transpose(lufA), B) From 8cc18fbc6167021fa58690031d36e5acff98766d Mon Sep 17 00:00:00 2001 From: Sacha Verweij Date: Fri, 18 May 2018 20:58:31 -0700 Subject: [PATCH 02/23] Deprecate schurfact to schur. --- NEWS.md | 6 +- stdlib/LinearAlgebra/docs/src/index.md | 1 - stdlib/LinearAlgebra/src/LinearAlgebra.jl | 1 - stdlib/LinearAlgebra/src/dense.jl | 22 ++-- stdlib/LinearAlgebra/src/deprecated.jl | 7 ++ stdlib/LinearAlgebra/src/schur.jl | 119 +++++++++++----------- stdlib/LinearAlgebra/test/schur.jl | 8 +- 7 files changed, 86 insertions(+), 78 deletions(-) diff --git a/NEWS.md b/NEWS.md index cd7873ccf01a6..820b9c388c263 100644 --- a/NEWS.md +++ b/NEWS.md @@ -230,6 +230,9 @@ This section lists changes that do not have deprecation warnings. * `lu` methods now return decomposition objects such as `LU` rather than tuples of arrays or tuples of numbers ([#27159]). + * `schur` methods now return decomposition objects such as `Schur` and + `GeneralizedSchur` rather than tuples of arrays ([#27159]). + * `countlines` now always counts the last non-empty line even if it does not end with EOL, matching the behavior of `eachline` and `readlines` ([#25845]). @@ -691,7 +694,8 @@ Deprecated or removed * The keyword `immutable` is fully deprecated to `struct`, and `type` is fully deprecated to `mutable struct` ([#19157], [#20418]). - * `lufact` has been deprecated to `lu` ([#27159]). + * `lufact` and `schurfact` have respectively been deprecated to + `lu` and `schur` ([#27159]). * Indexing into multidimensional arrays with more than one index but fewer indices than there are dimensions is no longer permitted when those trailing dimensions have lengths greater than 1. diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md index ed7bbef38db51..9141f874617cf 100644 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -344,7 +344,6 @@ LinearAlgebra.eigfact LinearAlgebra.eigfact! LinearAlgebra.hessfact LinearAlgebra.hessfact! -LinearAlgebra.schurfact LinearAlgebra.schurfact! LinearAlgebra.schur LinearAlgebra.ordschur diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index b11c223c87db2..734006ebe2de6 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -134,7 +134,6 @@ export rdiv!, schur, schurfact!, - schurfact, svd, svdfact!, svdfact, diff --git a/stdlib/LinearAlgebra/src/dense.jl b/stdlib/LinearAlgebra/src/dense.jl index 8d84cc2e4ea9e..77baee243711c 100644 --- a/stdlib/LinearAlgebra/src/dense.jl +++ b/stdlib/LinearAlgebra/src/dense.jl @@ -638,12 +638,12 @@ function log(A::StridedMatrix) return triu!(parent(log(UpperTriangular(complex(A))))) else if isreal(A) - SchurF = schurfact(real(A)) + SchurF = schur(real(A)) else - SchurF = schurfact(A) + SchurF = schur(A) end if !istriu(SchurF.T) - SchurS = schurfact(complex(SchurF.T)) + SchurS = schur(complex(SchurF.T)) logT = SchurS.Z * log(UpperTriangular(SchurS.T)) * SchurS.Z' return SchurF.Z * logT * SchurF.Z' else @@ -692,7 +692,7 @@ function sqrt(A::StridedMatrix{<:Real}) if istriu(A) return triu!(parent(sqrt(UpperTriangular(A)))) else - SchurF = schurfact(complex(A)) + SchurF = schur(complex(A)) R = triu!(parent(sqrt(UpperTriangular(SchurF.T)))) # unwrapping unnecessary? return SchurF.vectors * R * SchurF.vectors' end @@ -706,7 +706,7 @@ function sqrt(A::StridedMatrix{<:Complex}) if istriu(A) return triu!(parent(sqrt(UpperTriangular(A)))) else - SchurF = schurfact(A) + SchurF = schur(A) R = triu!(parent(sqrt(UpperTriangular(SchurF.T)))) # unwrapping unnecessary? return SchurF.vectors * R * SchurF.vectors' end @@ -944,7 +944,7 @@ function acos(A::AbstractMatrix) acosHermA = acos(Hermitian(A)) return isa(acosHermA, Hermitian) ? copytri!(parent(acosHermA), 'U', true) : parent(acosHermA) end - SchurF = schurfact(complex(A)) + SchurF = schur(complex(A)) U = UpperTriangular(SchurF.T) R = triu!(parent(-im * log(U + im * sqrt(I - U^2)))) return SchurF.Z * R * SchurF.Z' @@ -975,7 +975,7 @@ function asin(A::AbstractMatrix) asinHermA = asin(Hermitian(A)) return isa(asinHermA, Hermitian) ? copytri!(parent(asinHermA), 'U', true) : parent(asinHermA) end - SchurF = schurfact(complex(A)) + SchurF = schur(complex(A)) U = UpperTriangular(SchurF.T) R = triu!(parent(-im * log(im * U + sqrt(I - U^2)))) return SchurF.Z * R * SchurF.Z' @@ -1005,7 +1005,7 @@ function atan(A::AbstractMatrix) if ishermitian(A) return copytri!(parent(atan(Hermitian(A))), 'U', true) end - SchurF = schurfact(complex(A)) + SchurF = schur(complex(A)) U = im * UpperTriangular(SchurF.T) R = triu!(parent(log((I + U) / (I - U)) / 2im)) return SchurF.Z * R * SchurF.Z' @@ -1024,7 +1024,7 @@ function acosh(A::AbstractMatrix) acoshHermA = acosh(Hermitian(A)) return isa(acoshHermA, Hermitian) ? copytri!(parent(acoshHermA), 'U', true) : parent(acoshHermA) end - SchurF = schurfact(complex(A)) + SchurF = schur(complex(A)) U = UpperTriangular(SchurF.T) R = triu!(parent(log(U + sqrt(U - I) * sqrt(U + I)))) return SchurF.Z * R * SchurF.Z' @@ -1042,7 +1042,7 @@ function asinh(A::AbstractMatrix) if ishermitian(A) return copytri!(parent(asinh(Hermitian(A))), 'U', true) end - SchurF = schurfact(complex(A)) + SchurF = schur(complex(A)) U = UpperTriangular(SchurF.T) R = triu!(parent(log(U + sqrt(I + U^2)))) return SchurF.Z * R * SchurF.Z' @@ -1060,7 +1060,7 @@ function atanh(A::AbstractMatrix) if ishermitian(A) return copytri!(parent(atanh(Hermitian(A))), 'U', true) end - SchurF = schurfact(complex(A)) + SchurF = schur(complex(A)) U = UpperTriangular(SchurF.T) R = triu!(parent(log((I + U) / (I - U)) / 2)) return SchurF.Z * R * SchurF.Z' diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl index 7a9472daf400e..4e8a2d49f0c51 100644 --- a/stdlib/LinearAlgebra/src/deprecated.jl +++ b/stdlib/LinearAlgebra/src/deprecated.jl @@ -1268,3 +1268,10 @@ export lufact @deprecate(lufact(A::AbstractMatrix{T}) where T, lu(A)) @deprecate(lufact(A::AbstractMatrix{T}, pivot::Union{Val{false}, Val{true}}) where T, lu(A, pivot)) @deprecate(lufact(A::Union{AbstractMatrix{T}, AbstractMatrix{Complex{T}}}, pivot::Union{Val{false}, Val{true}} = Val(true)) where {T<:AbstractFloat}, lu(A, pivot)) + +# deprecate schurfact to schur +export schurfact +@deprecate(schurfact(A::StridedMatrix{<:BlasFloat}), schur(A)) +@deprecate(schurfact(A::StridedMatrix{T}) where T, schur(A)) +@deprecate(schurfact(A::StridedMatrix{T},B::StridedMatrix{T}) where {T<:BlasFloat}, schur(A)) +@deprecate(schurfact(A::StridedMatrix{TA}, B::StridedMatrix{TB}) where {TA,TB}, schur(A)) diff --git a/stdlib/LinearAlgebra/src/schur.jl b/stdlib/LinearAlgebra/src/schur.jl index ec4595351bfe2..51abb54e50a0b 100644 --- a/stdlib/LinearAlgebra/src/schur.jl +++ b/stdlib/LinearAlgebra/src/schur.jl @@ -9,10 +9,24 @@ struct Schur{Ty,S<:AbstractMatrix} <: Factorization{Ty} end Schur(T::AbstractMatrix{Ty}, Z::AbstractMatrix{Ty}, values::Vector) where {Ty} = Schur{Ty, typeof(T)}(T, Z, values) +# iteration for destructuring into components +Base.iterate(S::Schur) = (S.T, Val(:Z)) +Base.iterate(S::Schur, ::Val{:Z}) = (S.Z, Val(:values)) +Base.iterate(S::Schur, ::Val{:values}) = (S.values, Val(:done)) +Base.iterate(S::Schur, ::Val{:done}) = nothing + +# indexing for destructuring into components +@inline function Base.getindex(S::Schur, i::Integer) + i == 1 ? (return S.T) : + i == 2 ? (return S.Z) : + i == 3 ? (return S.values) : + throw(BoundsError(S, i)) +end + """ schurfact!(A::StridedMatrix) -> F::Schur -Same as [`schurfact`](@ref) but uses the input argument `A` as workspace. +Same as [`schur`](@ref) but uses the input argument `A` as workspace. # Examples ```jldoctest @@ -45,13 +59,15 @@ julia> A schurfact!(A::StridedMatrix{<:BlasFloat}) = Schur(LinearAlgebra.LAPACK.gees!('V', A)...) """ - schurfact(A::StridedMatrix) -> F::Schur + schur(A::StridedMatrix) -> F::Schur Computes the Schur factorization of the matrix `A`. The (quasi) triangular Schur factor can be obtained from the `Schur` object `F` with either `F.Schur` or `F.T` and the orthogonal/unitary Schur vectors can be obtained with `F.vectors` or `F.Z` such that `A = F.vectors * F.Schur * F.vectors'`. The eigenvalues of `A` can be obtained with `F.values`. +Iterating the decomposition produces the components `F.T`, `F.Z`, and `F.values`. + # Examples ```jldoctest julia> A = [5. 7.; -2. -4.] @@ -59,7 +75,7 @@ julia> A = [5. 7.; -2. -4.] 5.0 7.0 -2.0 -4.0 -julia> F = schurfact(A) +julia> F = schur(A) Schur{Float64,Array{Float64,2}} T factor: 2×2 Array{Float64,2}: @@ -78,10 +94,21 @@ julia> F.vectors * F.Schur * F.vectors' 2×2 Array{Float64,2}: 5.0 7.0 -2.0 -4.0 + +julia> t, z, vals = F; # destructuring via iteration + +julia> t == F.T && z == F.Z && vals == F.values +true ``` """ -schurfact(A::StridedMatrix{<:BlasFloat}) = schurfact!(copy(A)) -schurfact(A::StridedMatrix{T}) where T = schurfact!(copy_oftype(A, eigtype(T))) +schur(A::StridedMatrix{<:BlasFloat}) = schurfact!(copy(A)) +schur(A::StridedMatrix{T}) where T = schurfact!(copy_oftype(A, eigtype(T))) + +schur(A::Symmetric) = schur(copyto!(similar(parent(A)), A)) +schur(A::Hermitian) = schur(copyto!(similar(parent(A)), A)) +schur(A::UpperTriangular) = schur(copyto!(similar(parent(A)), A)) +schur(A::LowerTriangular) = schur(copyto!(similar(parent(A)), A)) +schur(A::Tridiagonal) = schur(Matrix(A)) function getproperty(F::Schur, d::Symbol) if d == :Schur @@ -106,47 +133,6 @@ function show(io::IO, mime::MIME{Symbol("text/plain")}, F::Schur) show(io, mime, F.values) end -""" - schur(A::StridedMatrix) -> T::Matrix, Z::Matrix, λ::Vector - -Computes the Schur factorization of the matrix `A`. The methods return the (quasi) -triangular Schur factor `T` and the orthogonal/unitary Schur vectors `Z` such that -`A = Z * T * Z'`. The eigenvalues of `A` are returned in the vector `λ`. - -See [`schurfact`](@ref). - -# Examples -```jldoctest -julia> A = [5. 7.; -2. -4.] -2×2 Array{Float64,2}: - 5.0 7.0 - -2.0 -4.0 - -julia> T, Z, lambda = schur(A) -([3.0 9.0; 0.0 -2.0], [0.961524 0.274721; -0.274721 0.961524], [3.0, -2.0]) - -julia> Z * Z' -2×2 Array{Float64,2}: - 1.0 0.0 - 0.0 1.0 - -julia> Z * T * Z' -2×2 Array{Float64,2}: - 5.0 7.0 - -2.0 -4.0 -``` -""" -function schur(A::StridedMatrix) - SchurF = schurfact(A) - SchurF.T, SchurF.Z, SchurF.values -end -schur(A::Symmetric) = schur(copyto!(similar(parent(A)), A)) -schur(A::Hermitian) = schur(copyto!(similar(parent(A)), A)) -schur(A::UpperTriangular) = schur(copyto!(similar(parent(A)), A)) -schur(A::LowerTriangular) = schur(copyto!(similar(parent(A)), A)) -schur(A::Tridiagonal) = schur(Matrix(A)) - - """ ordschur!(F::Schur, select::Union{Vector{Bool},BitVector}) -> F::Schur @@ -209,16 +195,36 @@ function GeneralizedSchur(S::AbstractMatrix{Ty}, T::AbstractMatrix{Ty}, alpha::V GeneralizedSchur{Ty, typeof(S)}(S, T, alpha, beta, Q, Z) end +# iteration for destructuring into components +Base.iterate(S::GeneralizedSchur) = (S.S, Val(:T)) +Base.iterate(S::GeneralizedSchur, ::Val{:T}) = (S.T, Val(:Q)) +Base.iterate(S::GeneralizedSchur, ::Val{:Q}) = (S.Q, Val(:Z)) +Base.iterate(S::GeneralizedSchur, ::Val{:Z}) = (S.Z, Val(:α)) +Base.iterate(S::GeneralizedSchur, ::Val{:α}) = (S.α, Val(:β)) +Base.iterate(S::GeneralizedSchur, ::Val{:β}) = (S.β, Val(:done)) +Base.iterate(S::GeneralizedSchur, ::Val{:done}) = nothing + +# indexing for destructuring into components +@inline function Base.getindex(S::GeneralizedSchur, i::Integer) + i == 1 ? (return S.S) : + i == 2 ? (return S.T) : + i == 3 ? (return S.Q) : + i == 4 ? (return S.Z) : + i == 5 ? (return S.α) : + i == 6 ? (return S.β) : + throw(BoundsError(S, i)) +end + """ schurfact!(A::StridedMatrix, B::StridedMatrix) -> F::GeneralizedSchur -Same as [`schurfact`](@ref) but uses the input matrices `A` and `B` as workspace. +Same as [`schur`](@ref) but uses the input matrices `A` and `B` as workspace. """ schurfact!(A::StridedMatrix{T}, B::StridedMatrix{T}) where {T<:BlasFloat} = GeneralizedSchur(LinearAlgebra.LAPACK.gges!('V', 'V', A, B)...) """ - schurfact(A::StridedMatrix, B::StridedMatrix) -> F::GeneralizedSchur + schur(A::StridedMatrix, B::StridedMatrix) -> F::GeneralizedSchur Computes the Generalized Schur (or QZ) factorization of the matrices `A` and `B`. The (quasi) triangular Schur factors can be obtained from the `Schur` object `F` with `F.S` @@ -226,9 +232,12 @@ and `F.T`, the left unitary/orthogonal Schur vectors can be obtained with `F.lef `F.Q` and the right unitary/orthogonal Schur vectors can be obtained with `F.right` or `F.Z` such that `A=F.left*F.S*F.right'` and `B=F.left*F.T*F.right'`. The generalized eigenvalues of `A` and `B` can be obtained with `F.α./F.β`. + +Iterating the decomposition produces the components `F.S`, `F.T`, `F.Q`, `F.Z`, +`F.α`, and `F.β`. """ -schurfact(A::StridedMatrix{T},B::StridedMatrix{T}) where {T<:BlasFloat} = schurfact!(copy(A),copy(B)) -function schurfact(A::StridedMatrix{TA}, B::StridedMatrix{TB}) where {TA,TB} +schur(A::StridedMatrix{T},B::StridedMatrix{T}) where {T<:BlasFloat} = schurfact!(copy(A),copy(B)) +function schur(A::StridedMatrix{TA}, B::StridedMatrix{TB}) where {TA,TB} S = promote_type(eigtype(TA), TB) return schurfact!(copy_oftype(A, S), copy_oftype(B, S)) end @@ -300,16 +309,6 @@ end Base.propertynames(F::GeneralizedSchur) = (:values, :left, :right, fieldnames(typeof(F))...) -""" - schur(A::StridedMatrix, B::StridedMatrix) -> S::StridedMatrix, T::StridedMatrix, Q::StridedMatrix, Z::StridedMatrix, α::Vector, β::Vector - -See [`schurfact`](@ref). -""" -function schur(A::StridedMatrix, B::StridedMatrix) - SchurF = schurfact(A, B) - SchurF.S, SchurF.T, SchurF.Q, SchurF.Z, SchurF.α, SchurF.β -end - function show(io::IO, mime::MIME{Symbol("text/plain")}, F::GeneralizedSchur) println(io, summary(F)) println(io, "S factor:") diff --git a/stdlib/LinearAlgebra/test/schur.jl b/stdlib/LinearAlgebra/test/schur.jl index 52ba3957fb505..6299e9da71a1d 100644 --- a/stdlib/LinearAlgebra/test/schur.jl +++ b/stdlib/LinearAlgebra/test/schur.jl @@ -27,7 +27,7 @@ aimg = randn(n,n)/2 ε = εa = eps(abs(float(one(eltya)))) d,v = eig(a) - f = schurfact(a) + f = schur(a) @test f.vectors*f.Schur*f.vectors' ≈ a @test sort(real(f.values)) ≈ sort(real(d)) @test sort(imag(f.values)) ≈ sort(imag(d)) @@ -55,7 +55,7 @@ aimg = randn(n,n)/2 # use asym for real schur to enforce tridiag structure # avoiding partly selection of conj. eigenvalues ordschura = eltya <: Complex ? a : asym - S = schurfact(ordschura) + S = schur(ordschura) select = bitrand(n) O = ordschur(S, select) sum(select) != 0 && @test S.values[findall(select)] ≈ O.values[1:sum(select)] @@ -75,7 +75,7 @@ aimg = randn(n,n)/2 a2_sf = view(a, n1+1:n2, n1+1:n2) end @testset "Generalized Schur" begin - f = schurfact(a1_sf, a2_sf) + f = schur(a1_sf, a2_sf) @test f.Q*f.S*f.Z' ≈ a1_sf @test f.Q*f.T*f.Z' ≈ a2_sf @test istriu(f.S) || eltype(a)<:Real @@ -92,7 +92,7 @@ aimg = randn(n,n)/2 @test fstring == "$(summary(f))\nS factor:\n$sstring\nT factor:\n$(tstring)\nQ factor:\n$(qstring)\nZ factor:\n$(zstring)\nα:\n$αstring\nβ:\n$βstring" end @testset "Reorder Generalized Schur" begin - NS = schurfact(a1_sf, a2_sf) + NS = schur(a1_sf, a2_sf) # Currently just testing with selecting gen eig values < 1 select = abs2.(NS.values) .< 1 m = sum(select) From a9306ef2c8dbb90b8b8eca5cd7f32858941f36a6 Mon Sep 17 00:00:00 2001 From: Sacha Verweij Date: Sat, 19 May 2018 10:41:22 -0700 Subject: [PATCH 03/23] Deprecate lqfact to lq. --- NEWS.md | 7 +- stdlib/LinearAlgebra/docs/src/index.md | 1 - stdlib/LinearAlgebra/src/LinearAlgebra.jl | 1 - stdlib/LinearAlgebra/src/deprecated.jl | 5 ++ stdlib/LinearAlgebra/src/lq.jl | 79 +++++++++++++---------- stdlib/LinearAlgebra/test/lq.jl | 59 ++++------------- 6 files changed, 67 insertions(+), 85 deletions(-) diff --git a/NEWS.md b/NEWS.md index 820b9c388c263..c422ac4460f42 100644 --- a/NEWS.md +++ b/NEWS.md @@ -233,6 +233,9 @@ This section lists changes that do not have deprecation warnings. * `schur` methods now return decomposition objects such as `Schur` and `GeneralizedSchur` rather than tuples of arrays ([#27159]). + * `lq` methods now return decomposition objects such as `LQ` + rather than tuples of arrays ([#27159]). + * `countlines` now always counts the last non-empty line even if it does not end with EOL, matching the behavior of `eachline` and `readlines` ([#25845]). @@ -694,8 +697,8 @@ Deprecated or removed * The keyword `immutable` is fully deprecated to `struct`, and `type` is fully deprecated to `mutable struct` ([#19157], [#20418]). - * `lufact` and `schurfact` have respectively been deprecated to - `lu` and `schur` ([#27159]). + * `lufact`, `schurfact`, and `lqfact` have respectively been + deprecated to `lu`, `schur`, and `lq` ([#27159]). * Indexing into multidimensional arrays with more than one index but fewer indices than there are dimensions is no longer permitted when those trailing dimensions have lengths greater than 1. diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md index 9141f874617cf..28a1c2fc24597 100644 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -330,7 +330,6 @@ LinearAlgebra.QR LinearAlgebra.QRCompactWY LinearAlgebra.QRPivoted LinearAlgebra.lqfact! -LinearAlgebra.lqfact LinearAlgebra.lq LinearAlgebra.bkfact LinearAlgebra.bkfact! diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index 734006ebe2de6..05ee8ce25847d 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -129,7 +129,6 @@ export qrfact, lq, lqfact!, - lqfact, rank, rdiv!, schur, diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl index 4e8a2d49f0c51..7e3f5569963fe 100644 --- a/stdlib/LinearAlgebra/src/deprecated.jl +++ b/stdlib/LinearAlgebra/src/deprecated.jl @@ -1275,3 +1275,8 @@ export schurfact @deprecate(schurfact(A::StridedMatrix{T}) where T, schur(A)) @deprecate(schurfact(A::StridedMatrix{T},B::StridedMatrix{T}) where {T<:BlasFloat}, schur(A)) @deprecate(schurfact(A::StridedMatrix{TA}, B::StridedMatrix{TB}) where {TA,TB}, schur(A)) + +# deprecate lqfact to lq +export lqfact +@deprecate lqfact(A::StridedMatrix{<:BlasFloat}) lq(A) +@deprecate lqfact(x::Number) lq(x) diff --git a/stdlib/LinearAlgebra/src/lq.jl b/stdlib/LinearAlgebra/src/lq.jl index 1e875454e07f1..87a33b5b19f17 100644 --- a/stdlib/LinearAlgebra/src/lq.jl +++ b/stdlib/LinearAlgebra/src/lq.jl @@ -7,16 +7,28 @@ struct LQ{T,S<:AbstractMatrix} <: Factorization{T} τ::Vector{T} LQ{T,S}(factors::AbstractMatrix{T}, τ::Vector{T}) where {T,S<:AbstractMatrix} = new(factors, τ) end +LQ(factors::AbstractMatrix{T}, τ::Vector{T}) where {T} = LQ{T,typeof(factors)}(factors, τ) + +# iteration for destructuring into components +Base.iterate(S::LQ) = (S.L, Val(:Q)) +Base.iterate(S::LQ, ::Val{:Q}) = (S.Q, Val(:done)) +Base.iterate(S::LQ, ::Val{:done}) = nothing + +# indexing for destructuring into components +@inline function Base.getindex(S::LQ, i::Integer) + i == 1 ? (return S.L) : + i == 2 ? (return S.Q) : + throw(BoundsError(S, i)) +end struct LQPackedQ{T,S<:AbstractMatrix} <: AbstractMatrix{T} factors::Matrix{T} τ::Vector{T} LQPackedQ{T,S}(factors::AbstractMatrix{T}, τ::Vector{T}) where {T,S<:AbstractMatrix} = new(factors, τ) end - -LQ(factors::AbstractMatrix{T}, τ::Vector{T}) where {T} = LQ{T,typeof(factors)}(factors, τ) LQPackedQ(factors::AbstractMatrix{T}, τ::Vector{T}) where {T} = LQPackedQ{T,typeof(factors)}(factors, τ) + """ lqfact!(A) -> LQ @@ -25,40 +37,41 @@ matrix as a workspace. See also [`lq`](@ref). """ lqfact!(A::StridedMatrix{<:BlasFloat}) = LQ(LAPACK.gelqf!(A)...) """ - lqfact(A) -> LQ + lq(A) -> S::LQ -Compute the LQ factorization of `A`. See also [`lq`](@ref). -""" -lqfact(A::StridedMatrix{<:BlasFloat}) = lqfact!(copy(A)) -lqfact(x::Number) = lqfact(fill(x,1,1)) +Compute the LQ decomposition of `A`. The decomposition's lower triangular +component can be obtained from the `LQ` object `S` via `S.L`, and the +orthogonal/unitary component via `S.Q`, such that `A ≈ S.L*S.Q`. +Iterating the decomposition produces the components `S.L` and `S.Q`. + +The LQ decomposition is the QR decomposition of `transpose(A)`. + +# Examples +```jldoctest +julia> A = [5. 7.; -2. -4.] +2×2 Array{Float64,2}: + 5.0 7.0 + -2.0 -4.0 + +julia> S = lq(A) +LQ{Float64,Array{Float64,2}} with factors L and Q: +[-8.60233 0.0; 4.41741 -0.697486] +[-0.581238 -0.813733; -0.813733 0.581238] + +julia> S.L * S.Q +2×2 Array{Float64,2}: + 5.0 7.0 + -2.0 -4.0 + +julia> l, q = S; # destructuring via iteration + +julia> l == S.L && q == S.Q +true +``` """ - lq(A; full = false) -> L, Q - -Perform an LQ factorization of `A` such that `A = L*Q`. The default (`full = false`) -computes a factorization with possibly-rectangular `L` and `Q`, commonly the "thin" -factorization. The LQ factorization is the QR factorization of `transpose(A)`. If the explicit, -full/square form of `Q` is requested via `full = true`, `L` is not extended with zeros. - -!!! note - While in QR factorization the "thin" factorization is so named due to yielding - either a square or "tall"/"thin" rectangular factor `Q`, in LQ factorization the - "thin" factorization somewhat confusingly produces either a square or "short"/"wide" - rectangular factor `Q`. "Thin" factorizations more broadly are also - referred to as "reduced" factorizatons. -""" -function lq(A::Union{Number,AbstractMatrix}; full::Bool = false, thin::Union{Bool,Nothing} = nothing) - # DEPRECATION TODO: remove deprecated thin argument and associated logic after 0.7 - if thin != nothing - Base.depwarn(string("the `thin` keyword argument in `lq(A; thin = $(thin))` has ", - "been deprecated in favor of `full`, which has the opposite meaning, ", - "e.g. `lq(A; full = $(!thin))`."), :lq) - full::Bool = !thin - end - F = lqfact(A) - L, Q = F.L, F.Q - return L, !full ? Array(Q) : lmul!(Q, Matrix{eltype(Q)}(I, size(Q.factors, 2), size(Q.factors, 2))) -end +lq(A::StridedMatrix{<:BlasFloat}) = lqfact!(copy(A)) +lq(x::Number) = lq(fill(x,1,1)) copy(A::LQ) = LQ(copy(A.factors), copy(A.τ)) diff --git a/stdlib/LinearAlgebra/test/lq.jl b/stdlib/LinearAlgebra/test/lq.jl index cfa25a2ba87ab..ddee110703fdd 100644 --- a/stdlib/LinearAlgebra/test/lq.jl +++ b/stdlib/LinearAlgebra/test/lq.jl @@ -38,14 +38,14 @@ rectangularQ(Q::LinearAlgebra.LQPackedQ) = convert(Array, Q) α = rand(eltya) aα = fill(α,1,1) - @test lqfact(α).L*lqfact(α).Q ≈ lqfact(aα).L*lqfact(aα).Q - @test lq(α)[1]*lq(α)[2] ≈ lqfact(aα).L*lqfact(aα).Q - @test abs(lqfact(α).Q[1,1]) ≈ one(eltya) + @test lq(α).L*lq(α).Q ≈ lq(aα).L*lq(aα).Q + @test lq(α)[1]*lq(α)[2] ≈ lq(aα).L*lq(aα).Q + @test abs(lq(α).Q[1,1]) ≈ one(eltya) tab = promote_type(eltya,eltyb) for i = 1:2 let a = i == 1 ? a : view(a, 1:n - 1, 1:n - 1), b = i == 1 ? b : view(b, 1:n - 1), n = i == 1 ? n : n - 1 - lqa = lqfact(a) + lqa = lq(a) l,q = lqa.L, lqa.Q qra = qrfact(a) @testset "Basic ops" begin @@ -93,7 +93,7 @@ rectangularQ(Q::LinearAlgebra.LQPackedQ) = convert(Array, Q) end @testset "Matmul with LQ factorizations" begin - lqa = lqfact(a[:,1:n1]) + lqa = lq(a[:,1:n1]) l,q = lqa.L, lqa.Q @test rectangularQ(q)*rectangularQ(q)' ≈ Matrix(I, n1, n1) @test squareQ(q)'*squareQ(q) ≈ Matrix(I, n1, n1) @@ -105,43 +105,6 @@ rectangularQ(Q::LinearAlgebra.LQPackedQ) = convert(Array, Q) end end -@testset "correct form of Q from lq(...) (#23729)" begin - # where the original matrix (say A) is square or has more rows than columns, - # then A's factorization's triangular factor (say L) should have the same shape - # as A independent of factorization form ("full", "reduced"/"thin"), and A's factorization's - # orthogonal factor (say Q) should be a square matrix of order of A's number of - # columns independent of factorization form ("full", "reduced"/"thin"), and L and Q - # should have multiplication-compatible shapes. - local m, n = 4, 2 - A = randn(m, n) - for full in (false, true) - L, Q = lq(A, full = full) - @test size(L) == (m, n) - @test size(Q) == (n, n) - @test isapprox(A, L*Q) - end - # where the original matrix has strictly fewer rows than columns ... - m, n = 2, 4 - A = randn(m, n) - # ... then, for a rectangular/"thin" factorization of A, L should be a square matrix - # of order of A's number of rows, Q should have the same shape as A, - # and L and Q should have multiplication-compatible shapes - Lrect, Qrect = lq(A, full = false) - @test size(Lrect) == (m, m) - @test size(Qrect) == (m, n) - @test isapprox(A, Lrect * Qrect) - # ... and, for a full factorization of A, L should have the - # same shape as A, Q should be a square matrix of order of A's number of columns, - # and L and Q should have multiplication-compatible shape. but instead the L returned - # has no zero-padding on the right / is L for the rectangular/"thin" factorization, - # so for L and Q to have multiplication-compatible shapes, L must be zero-padded - # to have the shape of A. - Lfull, Qfull = lq(A, full = true) - @test size(Lfull) == (m, m) - @test size(Qfull) == (n, n) - @test isapprox(A, [Lfull zeros(m, n - m)] * Qfull) -end - @testset "getindex on LQPackedQ (#23733)" begin local m, n function getqs(F::LinearAlgebra.LQ) @@ -152,14 +115,14 @@ end end m, n = 3, 3 # reduced Q 3-by-3, full Q 3-by-3 - implicitQ, explicitQ = getqs(lqfact(randn(m, n))) + implicitQ, explicitQ = getqs(lq(randn(m, n))) @test implicitQ[1, 1] == explicitQ[1, 1] @test implicitQ[m, 1] == explicitQ[m, 1] @test implicitQ[1, n] == explicitQ[1, n] @test implicitQ[m, n] == explicitQ[m, n] m, n = 3, 4 # reduced Q 3-by-4, full Q 4-by-4 - implicitQ, explicitQ = getqs(lqfact(randn(m, n))) + implicitQ, explicitQ = getqs(lq(randn(m, n))) @test implicitQ[1, 1] == explicitQ[1, 1] @test implicitQ[m, 1] == explicitQ[m, 1] @test implicitQ[1, n] == explicitQ[1, n] @@ -168,7 +131,7 @@ end @test implicitQ[m+1, n] == explicitQ[m+1, n] m, n = 4, 3 # reduced Q 3-by-3, full Q 3-by-3 - implicitQ, explicitQ = getqs(lqfact(randn(m, n))) + implicitQ, explicitQ = getqs(lq(randn(m, n))) @test implicitQ[1, 1] == explicitQ[1, 1] @test implicitQ[n, 1] == explicitQ[n, 1] @test implicitQ[1, n] == explicitQ[1, n] @@ -181,7 +144,7 @@ end ((3, 3), 3), # A 3-by-3 => full/square Q 3-by-3 ((3, 4), 4), # A 3-by-4 => full/square Q 4-by-4 ((4, 3), 3) )# A 4-by-3 => full/square Q 3-by-3 - @test size(lqfact(randn(mA, nA)).Q) == (nQ, nQ) + @test size(lq(randn(mA, nA)).Q) == (nQ, nQ) end end @@ -195,7 +158,7 @@ end # A_mul_B*(C, Q) (Ac_mul_B*(C, Q)) operations should work for # *-by-n (n-by-*) C, which we test below via n-by-n C for (mA, nA) in ((3, 3), (3, 4), (4, 3)) - implicitQ, explicitQ = getqs(lqfact(randn(mA, nA))) + implicitQ, explicitQ = getqs(lq(randn(mA, nA))) C = randn(nA, nA) @test *(C, implicitQ) ≈ *(C, explicitQ) @test *(C, adjoint(implicitQ)) ≈ *(C, adjoint(explicitQ)) @@ -212,7 +175,7 @@ end # hence we need also test *-by-m C with # A*_mul_B(C, Q) ops, as below via m-by-m C. mA, nA = 3, 4 - implicitQ, explicitQ = getqs(lqfact(randn(mA, nA))) + implicitQ, explicitQ = getqs(lq(randn(mA, nA))) C = randn(mA, mA) zeroextCright = hcat(C, zeros(eltype(C), mA)) zeroextCdown = vcat(C, zeros(eltype(C), (1, mA))) From ac57d3e5fe06d926f43aa7d330d7594fed0e4021 Mon Sep 17 00:00:00 2001 From: Sacha Verweij Date: Sat, 19 May 2018 12:03:47 -0700 Subject: [PATCH 04/23] Deprecate qrfact to qr. --- NEWS.md | 7 +- doc/src/manual/arrays.md | 48 ++--- .../src/IterativeEigensolvers.jl | 4 +- stdlib/IterativeEigensolvers/test/runtests.jl | 4 +- stdlib/LinearAlgebra/docs/src/index.md | 2 - stdlib/LinearAlgebra/src/LinearAlgebra.jl | 1 - stdlib/LinearAlgebra/src/dense.jl | 4 +- stdlib/LinearAlgebra/src/deprecated.jl | 7 + stdlib/LinearAlgebra/src/generic.jl | 2 +- stdlib/LinearAlgebra/src/qr.jl | 166 +++++++----------- stdlib/LinearAlgebra/test/diagonal.jl | 4 +- stdlib/LinearAlgebra/test/lq.jl | 2 +- stdlib/LinearAlgebra/test/qr.jl | 40 ++--- stdlib/LinearAlgebra/test/special.jl | 4 +- stdlib/SparseArrays/src/linalg.jl | 6 +- stdlib/SuiteSparse/src/deprecated.jl | 7 + stdlib/SuiteSparse/src/spqr.jl | 12 +- stdlib/SuiteSparse/test/spqr.jl | 8 +- test/bitarray.jl | 3 +- 19 files changed, 148 insertions(+), 183 deletions(-) diff --git a/NEWS.md b/NEWS.md index c422ac4460f42..6fdf76a7a1d93 100644 --- a/NEWS.md +++ b/NEWS.md @@ -236,6 +236,9 @@ This section lists changes that do not have deprecation warnings. * `lq` methods now return decomposition objects such as `LQ` rather than tuples of arrays ([#27159]). + * `qr` methods now return decomposition objects such as `QR`, `QRPivoted`, + and `QRCompactWY` rather than tuples of arrays ([#27159]). + * `countlines` now always counts the last non-empty line even if it does not end with EOL, matching the behavior of `eachline` and `readlines` ([#25845]). @@ -697,8 +700,8 @@ Deprecated or removed * The keyword `immutable` is fully deprecated to `struct`, and `type` is fully deprecated to `mutable struct` ([#19157], [#20418]). - * `lufact`, `schurfact`, and `lqfact` have respectively been - deprecated to `lu`, `schur`, and `lq` ([#27159]). + * `lufact`, `schurfact`, `lqfact`, and `qrfact` have respectively + been deprecated to `lu`, `schur`, `lq`, and `qr` ([#27159]). * Indexing into multidimensional arrays with more than one index but fewer indices than there are dimensions is no longer permitted when those trailing dimensions have lengths greater than 1. diff --git a/doc/src/manual/arrays.md b/doc/src/manual/arrays.md index 994fc5f2d69b6..521d160443b12 100644 --- a/doc/src/manual/arrays.md +++ b/doc/src/manual/arrays.md @@ -746,37 +746,37 @@ creating any temporaries, and by calling the appropriate LAPACK function with th dimension size and stride parameters. ```julia-repl -julia> a = rand(10,10) +julia> a = rand(10, 10) 10×10 Array{Float64,2}: - 0.561255 0.226678 0.203391 0.308912 … 0.750307 0.235023 0.217964 - 0.718915 0.537192 0.556946 0.996234 0.666232 0.509423 0.660788 - 0.493501 0.0565622 0.118392 0.493498 0.262048 0.940693 0.252965 - 0.0470779 0.736979 0.264822 0.228787 0.161441 0.897023 0.567641 - 0.343935 0.32327 0.795673 0.452242 0.468819 0.628507 0.511528 - 0.935597 0.991511 0.571297 0.74485 … 0.84589 0.178834 0.284413 - 0.160706 0.672252 0.133158 0.65554 0.371826 0.770628 0.0531208 - 0.306617 0.836126 0.301198 0.0224702 0.39344 0.0370205 0.536062 - 0.890947 0.168877 0.32002 0.486136 0.096078 0.172048 0.77672 - 0.507762 0.573567 0.220124 0.165816 0.211049 0.433277 0.539476 + 0.517515 0.0348206 0.749042 0.0979679 … 0.75984 0.950481 0.579513 + 0.901092 0.873479 0.134533 0.0697848 0.0586695 0.193254 0.726898 + 0.976808 0.0901881 0.208332 0.920358 0.288535 0.705941 0.337137 + 0.657127 0.0317896 0.772837 0.534457 0.0966037 0.700694 0.675999 + 0.471777 0.144969 0.0718405 0.0827916 0.527233 0.173132 0.694304 + 0.160872 0.455168 0.489254 0.827851 … 0.62226 0.0995456 0.946522 + 0.291857 0.769492 0.68043 0.629461 0.727558 0.910796 0.834837 + 0.775774 0.700731 0.700177 0.0126213 0.00822304 0.327502 0.955181 + 0.9715 0.64354 0.848441 0.241474 0.591611 0.792573 0.194357 + 0.646596 0.575456 0.0995212 0.038517 0.709233 0.477657 0.0507231 julia> b = view(a, 2:2:8,2:2:4) -4×2 SubArray{Float64,2,Array{Float64,2},Tuple{StepRange{Int64,Int64},StepRange{Int64,Int64}},false}: - 0.537192 0.996234 - 0.736979 0.228787 - 0.991511 0.74485 - 0.836126 0.0224702 +4×2 view(::Array{Float64,2}, 2:2:8, 2:2:4) with eltype Float64: + 0.873479 0.0697848 + 0.0317896 0.534457 + 0.455168 0.827851 + 0.700731 0.0126213 -julia> (q,r) = qr(b); +julia> (q, r) = qr(b); julia> q -4×2 Array{Float64,2}: - -0.338809 0.78934 - -0.464815 -0.230274 - -0.625349 0.194538 - -0.527347 -0.534856 +4×4 LinearAlgebra.QRCompactWYQ{Float64,Array{Float64,2}}: + -0.722358 0.227524 -0.247784 -0.604181 + -0.0262896 -0.575919 -0.804227 0.144377 + -0.376419 -0.75072 0.540177 -0.0541979 + -0.579497 0.230151 -0.00552346 0.781782 julia> r 2×2 Array{Float64,2}: - -1.58553 -0.921517 - 0.0 0.866567 + -1.20921 -0.383393 + 0.0 -0.910506 ``` diff --git a/stdlib/IterativeEigensolvers/src/IterativeEigensolvers.jl b/stdlib/IterativeEigensolvers/src/IterativeEigensolvers.jl index d21bab552441a..9b527c6112453 100644 --- a/stdlib/IterativeEigensolvers/src/IterativeEigensolvers.jl +++ b/stdlib/IterativeEigensolvers/src/IterativeEigensolvers.jl @@ -317,10 +317,10 @@ function _svds(X; nsv::Int = 6, ritzvec::Bool = true, tol::Float64 = 0.0, maxite # left_sv = sqrt(2) * ex[2][ 1:size(X,1), ind ] .* sign.(ex[1][ind]') if size(X, 1) >= size(X, 2) V = ex[2] - U = qr(rmul!(X*V, Diagonal(inv.(svals))))[1] + U = Array(qr(rmul!(X*V, Diagonal(inv.(svals)))).Q) else U = ex[2] - V = qr(rmul!(X'U, Diagonal(inv.(svals))))[1] + V = Array(qr(rmul!(X'U, Diagonal(inv.(svals)))).Q) end # right_sv = sqrt(2) * ex[2][ size(X,1)+1:end, ind ] diff --git a/stdlib/IterativeEigensolvers/test/runtests.jl b/stdlib/IterativeEigensolvers/test/runtests.jl index 6cbad717dec5d..348b7cff1f5d4 100644 --- a/stdlib/IterativeEigensolvers/test/runtests.jl +++ b/stdlib/IterativeEigensolvers/test/runtests.jl @@ -152,8 +152,8 @@ LinearAlgebra.A_mul_B!(rho2::StridedVector{T},Phi::CPM{T},rho::StridedVector{T}) let # Generate random isometry - (Q,R) = qr(randn(100,50)) - Q = reshape(Q,(50,2,50)) + (Q, R) = qr(randn(100, 50)) + Q = reshape(Array(Q), (50, 2, 50)) # Construct trace-preserving completely positive map from this Phi = CPM(copy(Q)) (d,v,nconv,numiter,numop,resid) = eigs(Phi,nev=1,which=:LM) diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md index 28a1c2fc24597..1985e43770760 100644 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -323,8 +323,6 @@ LinearAlgebra.lowrankdowndate! LinearAlgebra.ldltfact LinearAlgebra.ldltfact! LinearAlgebra.qr -LinearAlgebra.qr! -LinearAlgebra.qrfact LinearAlgebra.qrfact! LinearAlgebra.QR LinearAlgebra.QRCompactWY diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index 05ee8ce25847d..d33dc5aa2eb07 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -126,7 +126,6 @@ export pinv, qr, qrfact!, - qrfact, lq, lqfact!, rank, diff --git a/stdlib/LinearAlgebra/src/dense.jl b/stdlib/LinearAlgebra/src/dense.jl index 77baee243711c..b26a1a1eac410 100644 --- a/stdlib/LinearAlgebra/src/dense.jl +++ b/stdlib/LinearAlgebra/src/dense.jl @@ -1121,7 +1121,7 @@ systems. For example: `A=factorize(A); x=A\\b; y=A\\C`. | Tridiagonal | LU (see [`lu`](@ref)) | | Symmetric real tridiagonal | LDLt (see [`ldltfact`](@ref)) | | General square | LU (see [`lu`](@ref)) | -| General non-square | QR (see [`qrfact`](@ref)) | +| General non-square | QR (see [`qr`](@ref)) | If `factorize` is called on a Hermitian positive-definite matrix, for instance, then `factorize` will return a Cholesky factorization. @@ -1220,7 +1220,7 @@ function factorize(A::StridedMatrix{T}) where T end return lu(A) end - qrfact(A, Val(true)) + qr(A, Val(true)) end factorize(A::Adjoint) = adjoint(factorize(parent(A))) factorize(A::Transpose) = transpose(factorize(parent(A))) diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl index 7e3f5569963fe..53de57643d298 100644 --- a/stdlib/LinearAlgebra/src/deprecated.jl +++ b/stdlib/LinearAlgebra/src/deprecated.jl @@ -1280,3 +1280,10 @@ export schurfact export lqfact @deprecate lqfact(A::StridedMatrix{<:BlasFloat}) lq(A) @deprecate lqfact(x::Number) lq(x) + +# deprecate qrfact to qr +export qrfact +@deprecate(qrfact(x::Number), qr(x)) +@deprecate(qrfact(v::AbstractVector), qr(v)) +@deprecate(qrfact(A::AbstractMatrix{T}) where T, qr(A)) +@deprecate(qrfact(A::AbstractMatrix{T}, arg) where T, qr(A, arg)) diff --git a/stdlib/LinearAlgebra/src/generic.jl b/stdlib/LinearAlgebra/src/generic.jl index 43ab58bbd4522..8c22d377cb716 100644 --- a/stdlib/LinearAlgebra/src/generic.jl +++ b/stdlib/LinearAlgebra/src/generic.jl @@ -864,7 +864,7 @@ function (\)(A::AbstractMatrix, B::AbstractVecOrMat) end return lu(A) \ B end - return qrfact(A,Val(true)) \ B + return qr(A,Val(true)) \ B end (\)(a::AbstractVector, b::AbstractArray) = pinv(a) * b diff --git a/stdlib/LinearAlgebra/src/qr.jl b/stdlib/LinearAlgebra/src/qr.jl index 2e7f0eb437402..9374a3fa594c4 100644 --- a/stdlib/LinearAlgebra/src/qr.jl +++ b/stdlib/LinearAlgebra/src/qr.jl @@ -5,7 +5,7 @@ QR <: Factorization A QR matrix factorization stored in a packed format, typically obtained from -[`qrfact`](@ref). If ``A`` is an `m`×`n` matrix, then +[`qr`](@ref). If ``A`` is an `m`×`n` matrix, then ```math A = Q R @@ -19,6 +19,8 @@ and coefficients ``\\tau_i`` where: Q = \\prod_{i=1}^{\\min(m,n)} (I - \\tau_i v_i v_i^T). ``` +Iterating the decomposition produces the components `Q` and `R`. + The object has two fields: * `factors` is an `m`×`n` matrix. @@ -39,12 +41,25 @@ struct QR{T,S<:AbstractMatrix} <: Factorization{T} end QR(factors::AbstractMatrix{T}, τ::Vector{T}) where {T} = QR{T,typeof(factors)}(factors, τ) +# iteration for destructuring into components +Base.iterate(S::QR) = (S.Q, Val(:R)) +Base.iterate(S::QR, ::Val{:R}) = (S.R, Val(:done)) +Base.iterate(S::QR, ::Val{:done}) = nothing + +# indexing for destructuring into components +@inline function Base.getindex(S::QR, i::Integer) + i == 1 ? (return S.Q) : + i == 2 ? (return S.R) : + throw(BoundsError(S, i)) +end + + # Note. For QRCompactWY factorization without pivoting, the WY representation based method introduced in LAPACK 3.4 """ QRCompactWY <: Factorization A QR matrix factorization stored in a compact blocked format, typically obtained from -[`qrfact`](@ref). If ``A`` is an `m`×`n` matrix, then +[`qr`](@ref). If ``A`` is an `m`×`n` matrix, then ```math A = Q R @@ -62,6 +77,8 @@ Q = \\prod_{i=1}^{\\min(m,n)} (I - \\tau_i v_i v_i^T) = I - V T V^T such that ``v_i`` is the ``i``th column of ``V``, and ``\tau_i`` is the ``i``th diagonal element of ``T``. +Iterating the decomposition produces the components `Q` and `R`. + The object has two fields: * `factors`, as in the [`QR`](@ref) type, is an `m`×`n` matrix. @@ -92,11 +109,23 @@ struct QRCompactWY{S,M<:AbstractMatrix} <: Factorization{S} end QRCompactWY(factors::AbstractMatrix{S}, T::AbstractMatrix{S}) where {S} = QRCompactWY{S,typeof(factors)}(factors, T) +# iteration for destructuring into components +Base.iterate(S::QRCompactWY) = (S.Q, Val(:R)) +Base.iterate(S::QRCompactWY, ::Val{:R}) = (S.R, Val(:done)) +Base.iterate(S::QRCompactWY, ::Val{:done}) = nothing + +# indexing for destructuring into components +@inline function Base.getindex(S::QRCompactWY, i::Integer) + i == 1 ? (return S.Q) : + i == 2 ? (return S.R) : + throw(BoundsError(S, i)) +end + """ QRPivoted <: Factorization A QR matrix factorization with column pivoting in a packed format, typically obtained from -[`qrfact`](@ref). If ``A`` is an `m`×`n` matrix, then +[`qr`](@ref). If ``A`` is an `m`×`n` matrix, then ```math A P = Q R @@ -109,6 +138,8 @@ upper triangular. The matrix ``Q`` is stored as a sequence of Householder reflec Q = \\prod_{i=1}^{\\min(m,n)} (I - \\tau_i v_i v_i^T). ``` +Iterating the decomposition produces the components `Q`, `R`, and `p`. + The object has three fields: * `factors` is an `m`×`n` matrix. @@ -133,6 +164,21 @@ end QRPivoted(factors::AbstractMatrix{T}, τ::Vector{T}, jpvt::Vector{BlasInt}) where {T} = QRPivoted{T,typeof(factors)}(factors, τ, jpvt) +# iteration for destructuring into components +Base.iterate(S::QRPivoted) = (S.Q, Val(:R)) +Base.iterate(S::QRPivoted, ::Val{:R}) = (S.R, Val(:p)) +Base.iterate(S::QRPivoted, ::Val{:p}) = (S.p, Val(:done)) +Base.iterate(S::QRPivoted, ::Val{:done}) = nothing + +# indexing for destructuring into components +@inline function Base.getindex(S::QRPivoted, i::Integer) + i == 1 ? (return S.Q) : + i == 2 ? (return S.R) : + i == 3 ? (return S.p) : + throw(BoundsError(S, i)) +end + + function qrfactUnblocked!(A::AbstractMatrix{T}) where {T} m, n = size(A) τ = zeros(T, min(m,n)) @@ -203,7 +249,7 @@ qrfact!(A::StridedMatrix{<:BlasFloat}) = qrfact!(A, Val(false)) """ qrfact!(A, pivot=Val(false)) -`qrfact!` is the same as [`qrfact`](@ref) when `A` is a subtype of +`qrfact!` is the same as [`qr`](@ref) when `A` is a subtype of `StridedMatrix`, but saves space by overwriting the input `A`, instead of creating a copy. An [`InexactError`](@ref) exception is thrown if the factorization produces a number not representable by the element type of `A`, e.g. for integer types. @@ -244,7 +290,7 @@ qrfact!(A::StridedMatrix) = qrfact!(A, Val(false)) _qreltype(::Type{T}) where T = typeof(zero(T)/sqrt(abs2(one(T)))) """ - qrfact(A, pivot=Val(false)) -> F + qr(A, pivot=Val(false)) -> F Compute the QR factorization of the matrix `A`: an orthogonal (or unitary if `A` is complex-valued) matrix `Q`, and an upper triangular matrix `R` such that @@ -269,6 +315,8 @@ The individual components of the factorization `F` can be accessed by indexing w - `F.p`: the permutation vector of the pivot ([`QRPivoted`](@ref) only) - `F.P`: the permutation matrix of the pivot ([`QRPivoted`](@ref) only) +Iterating the decomposition produces the components `Q`, `R`, and if extant `p`. + The following functions are available for the `QR` objects: [`inv`](@ref), [`size`](@ref), and [`\\`](@ref). When `A` is rectangular, `\\` will return a least squares solution and if the solution is not unique, the one with smallest norm is returned. @@ -285,7 +333,7 @@ julia> A = [3.0 -6.0; 4.0 -8.0; 0.0 1.0] 4.0 -8.0 0.0 1.0 -julia> F = qrfact(A) +julia> F = qr(A) LinearAlgebra.QRCompactWY{Float64,Array{Float64,2}} Q factor: 3×3 LinearAlgebra.QRCompactWYQ{Float64,Array{Float64,2}}: @@ -302,119 +350,23 @@ true ``` !!! note - `qrfact` returns multiple types because LAPACK uses several representations + `qr` returns multiple types because LAPACK uses several representations that minimize the memory storage requirements of products of Householder elementary reflectors, so that the `Q` and `R` matrices can be stored compactly rather as two separate dense matrices. """ -function qrfact(A::AbstractMatrix{T}, arg) where T +function qr(A::AbstractMatrix{T}, arg) where T AA = similar(A, _qreltype(T), size(A)) copyto!(AA, A) return qrfact!(AA, arg) end -function qrfact(A::AbstractMatrix{T}) where T +function qr(A::AbstractMatrix{T}) where T AA = similar(A, _qreltype(T), size(A)) copyto!(AA, A) return qrfact!(AA) end -qrfact(x::Number) = qrfact(fill(x,1,1)) - -""" - qr(A, pivot=Val(false); full::Bool = false) -> Q, R, [p] - -Compute the (pivoted) QR factorization of `A` such that either `A = Q*R` or `A[:,p] = Q*R`. -Also see [`qrfact`](@ref). -The default is to compute a "thin" factorization. Note that `R` is not -extended with zeros when a full/square orthogonal factor `Q` is requested (via `full = true`). -""" -function qr(A::Union{Number,AbstractMatrix}, pivot::Union{Val{false},Val{true}} = Val(false); - full::Bool = false, thin::Union{Bool,Nothing} = nothing) - # DEPRECATION TODO: remove deprecated thin argument and associated logic after 0.7 - if thin != nothing - Base.depwarn(string("the `thin` keyword argument in `qr(A, pivot; thin = $(thin))` has ", - "been deprecated in favor of `full`, which has the opposite meaning, ", - "e.g. `qr(A, pivot; full = $(!thin))`."), :qr) - full::Bool = !thin - end - return _qr(A, pivot, full = full) -end -function _qr(A::Union{Number,AbstractMatrix}, ::Val{false}; full::Bool = false) - F = qrfact(A, Val(false)) - Q, R = F.Q, F.R - sQf1 = size(Q.factors, 1) - return (!full ? Array(Q) : lmul!(Q, Matrix{eltype(Q)}(I, sQf1, sQf1))), R -end -function _qr(A::Union{Number, AbstractMatrix}, ::Val{true}; full::Bool = false) - F = qrfact(A, Val(true)) - Q, R, p = F.Q, F.R, F.p - sQf1 = size(Q.factors, 1) - return (!full ? Array(Q) : lmul!(Q, Matrix{eltype(Q)}(I, sQf1, sQf1))), R, p -end - -""" - qr(v::AbstractVector) -> w, r - -Computes the polar decomposition of a vector. -Returns `w`, a unit vector in the direction of `v`, and -`r`, the norm of `v`. - -See also [`normalize`](@ref), [`normalize!`](@ref), -and [`LinearAlgebra.qr!`](@ref). - -# Examples -```jldoctest -julia> v = [1; 2] -2-element Array{Int64,1}: - 1 - 2 - -julia> w, r = qr(v) -([0.447214, 0.894427], 2.23606797749979) - -julia> w*r == v -true -``` -""" -function qr(v::AbstractVector) - nrm = norm(v) - if !isempty(v) - vv = copy_oftype(v, typeof(v[1]/nrm)) - return __normalize!(vv, nrm), nrm - else - T = typeof(zero(eltype(v))/nrm) - return T[], oneunit(T) - end -end - -""" - LinearAlgebra.qr!(v::AbstractVector) -> w, r - -Computes the polar decomposition of a vector. Instead of returning a new vector -as `qr(v::AbstractVector)`, this function mutates the input vector `v` in place. -Returns `w`, a unit vector in the direction of `v` (this is a mutation of `v`), -and `r`, the norm of `v`. - -See also [`normalize`](@ref), [`normalize!`](@ref), -and [`qr`](@ref). - -# Examples -```jldoctest -julia> v = [1.; 2.] -2-element Array{Float64,1}: - 1.0 - 2.0 - -julia> w, r = LinearAlgebra.qr!(v) -([0.447214, 0.894427], 2.23606797749979) - -julia> w === v -true -``` -""" -function qr!(v::AbstractVector) - nrm = norm(v) - __normalize!(v, nrm), nrm -end +qr(x::Number) = qr(fill(x,1,1)) +qr(v::AbstractVector) = qr(reshape(v, (length(v), 1))) # Conversions QR{T}(A::QR) where {T} = QR(convert(AbstractMatrix{T}, A.factors), convert(Vector{T}, A.τ)) diff --git a/stdlib/LinearAlgebra/test/diagonal.jl b/stdlib/LinearAlgebra/test/diagonal.jl index 06a5d78ba2c84..caabdd291eca3 100644 --- a/stdlib/LinearAlgebra/test/diagonal.jl +++ b/stdlib/LinearAlgebra/test/diagonal.jl @@ -360,9 +360,9 @@ end @testset "multiplication of QR Q-factor and Diagonal (#16615 spot test)" begin D = Diagonal(randn(5)) - Q = qrfact(randn(5, 5)).Q + Q = qr(randn(5, 5)).Q @test D * Q' == Array(D) * Q' - Q = qrfact(randn(5, 5), Val(true)).Q + Q = qr(randn(5, 5), Val(true)).Q @test_throws ArgumentError lmul!(Q, D) end diff --git a/stdlib/LinearAlgebra/test/lq.jl b/stdlib/LinearAlgebra/test/lq.jl index ddee110703fdd..f8ab28755d06b 100644 --- a/stdlib/LinearAlgebra/test/lq.jl +++ b/stdlib/LinearAlgebra/test/lq.jl @@ -47,7 +47,7 @@ rectangularQ(Q::LinearAlgebra.LQPackedQ) = convert(Array, Q) let a = i == 1 ? a : view(a, 1:n - 1, 1:n - 1), b = i == 1 ? b : view(b, 1:n - 1), n = i == 1 ? n : n - 1 lqa = lq(a) l,q = lqa.L, lqa.Q - qra = qrfact(a) + qra = qr(a) @testset "Basic ops" begin @test size(lqa,1) == size(a,1) @test size(lqa,3) == 1 diff --git a/stdlib/LinearAlgebra/test/qr.jl b/stdlib/LinearAlgebra/test/qr.jl index 5bd73c4ccd429..ebfa56888671c 100644 --- a/stdlib/LinearAlgebra/test/qr.jl +++ b/stdlib/LinearAlgebra/test/qr.jl @@ -40,15 +40,15 @@ rectangularQ(Q::LinearAlgebra.AbstractQ) = convert(Array, Q) @testset "QR decomposition of a Number" begin α = rand(eltyb) aα = fill(α, 1, 1) - @test qrfact(α).Q * qrfact(α).R ≈ qrfact(aα).Q * qrfact(aα).R - @test abs(qrfact(α).Q[1,1]) ≈ one(eltyb) + @test qr(α).Q * qr(α).R ≈ qr(aα).Q * qr(aα).R + @test abs(qr(α).Q[1,1]) ≈ one(eltyb) end for (a, b) in ((raw_a, raw_b), (view(raw_a, 1:n-1, 1:n-1), view(raw_b, 1:n-1, 1))) a_1 = size(a, 1) @testset "QR decomposition (without pivoting)" begin - qra = @inferred qrfact(a) + qra = @inferred qr(a) @inferred qr(a) q, r = qra.Q, qra.R @test_throws ErrorException qra.Z @@ -73,7 +73,7 @@ rectangularQ(Q::LinearAlgebra.AbstractQ) = convert(Array, Q) @test qrstring == "$(summary(qra))\nQ factor:\n$qstring\nR factor:\n$rstring" end @testset "Thin QR decomposition (without pivoting)" begin - qra = @inferred qrfact(a[:, 1:n1], Val(false)) + qra = @inferred qr(a[:, 1:n1], Val(false)) @inferred qr(a[:, 1:n1], Val(false)) q,r = qra.Q, qra.R @test_throws ErrorException qra.Z @@ -91,7 +91,6 @@ rectangularQ(Q::LinearAlgebra.AbstractQ) = convert(Array, Q) end end @testset "(Automatic) Fat (pivoted) QR decomposition" begin - @inferred qrfact(a, Val(true)) @inferred qr(a, Val(true)) qrpa = factorize(a[1:n1,:]) @@ -150,7 +149,7 @@ rectangularQ(Q::LinearAlgebra.AbstractQ) = convert(Array, Q) @test_throws DimensionMismatch LinearAlgebra.lmul!(q,zeros(eltya,n1+1)) @test_throws DimensionMismatch LinearAlgebra.lmul!(adjoint(q), zeros(eltya,n1+1)) - qra = qrfact(a[:,1:n1], Val(false)) + qra = qr(a[:,1:n1], Val(false)) q, r = qra.Q, qra.R @test rmul!(copy(squareQ(q)'), q) ≈ Matrix(I, n, n) @test_throws DimensionMismatch rmul!(Matrix{eltya}(I, n+1, n+1),q) @@ -164,17 +163,17 @@ rectangularQ(Q::LinearAlgebra.AbstractQ) = convert(Array, Q) end @testset "transpose errors" begin - @test_throws MethodError transpose(qrfact(randn(3,3))) - @test_throws MethodError adjoint(qrfact(randn(3,3))) - @test_throws MethodError transpose(qrfact(randn(3,3), Val(false))) - @test_throws MethodError adjoint(qrfact(randn(3,3), Val(false))) - @test_throws MethodError transpose(qrfact(big.(randn(3,3)))) - @test_throws MethodError adjoint(qrfact(big.(randn(3,3)))) + @test_throws MethodError transpose(qr(randn(3,3))) + @test_throws MethodError adjoint(qr(randn(3,3))) + @test_throws MethodError transpose(qr(randn(3,3), Val(false))) + @test_throws MethodError adjoint(qr(randn(3,3), Val(false))) + @test_throws MethodError transpose(qr(big.(randn(3,3)))) + @test_throws MethodError adjoint(qr(big.(randn(3,3)))) end @testset "Issue 7304" begin A = [-√.5 -√.5; -√.5 √.5] - Q = rectangularQ(qrfact(A).Q) + Q = rectangularQ(qr(A).Q) @test vecnorm(A-Q) < eps() end @@ -184,15 +183,16 @@ end for T in (Tr, Complex{Tr}) v = convert(Vector{T}, vr) nv, nm = qr(v) - @test norm(nv - [0.6, 0.8], Inf) < eps(Tr) - @test nm == 5.0 + @test norm(nv - [-0.6 -0.8; -0.8 0.6], Inf) < eps(Tr) + @test nm == fill(-5.0, 1, 1) end end end @testset "QR on Ints" begin - @test qr(Int[]) == (Int[],1) - @test LinearAlgebra.qr!(Int[1]) == (Int[1],1) + # not sure what to do about this edge case now that we build decompositions + # for qr(...), so for now just commenting this out + # @test qr(Int[]) == (Int[],1) B = rand(7,2) @test (1:7)\B ≈ Vector(1:7)\B @@ -206,7 +206,7 @@ end A = zeros(1, 2) B = zeros(1, 1) @test A \ B == zeros(2, 1) - @test qrfact(A, Val(true)) \ B == zeros(2, 1) + @test qr(A, Val(true)) \ B == zeros(2, 1) end @testset "Issue 24107" begin @@ -224,8 +224,8 @@ end Ac = copy(A') b = randn(3) c = randn(2) - @test A \b ≈ ldiv!(c, qrfact(A ), b) - @test Ac\c ≈ ldiv!(b, qrfact(Ac, Val(true)), c) + @test A \b ≈ ldiv!(c, qr(A ), b) + @test Ac\c ≈ ldiv!(b, qr(Ac, Val(true)), c) end end # module TestQR diff --git a/stdlib/LinearAlgebra/test/special.jl b/stdlib/LinearAlgebra/test/special.jl index cc04f9421fb1f..fbf63203f8ed8 100644 --- a/stdlib/LinearAlgebra/test/special.jl +++ b/stdlib/LinearAlgebra/test/special.jl @@ -116,10 +116,10 @@ end a = rand(n,n) atri = typ(a) b = rand(n,n) - qrb = qrfact(b,Val(true)) + qrb = qr(b,Val(true)) @test *(atri, adjoint(qrb.Q)) ≈ Matrix(atri) * qrb.Q' @test rmul!(copy(atri), adjoint(qrb.Q)) ≈ Matrix(atri) * qrb.Q' - qrb = qrfact(b,Val(false)) + qrb = qr(b,Val(false)) @test *(atri, adjoint(qrb.Q)) ≈ Matrix(atri) * qrb.Q' @test rmul!(copy(atri), adjoint(qrb.Q)) ≈ Matrix(atri) * qrb.Q' end diff --git a/stdlib/SparseArrays/src/linalg.jl b/stdlib/SparseArrays/src/linalg.jl index 7306c9af4216f..fbd3e5b4ce3eb 100644 --- a/stdlib/SparseArrays/src/linalg.jl +++ b/stdlib/SparseArrays/src/linalg.jl @@ -939,7 +939,7 @@ function \(A::SparseMatrixCSC, B::AbstractVecOrMat) end return \(lu(A), B) else - return \(qrfact(A), B) + return \(qr(A), B) end end for (xformtype, xformop) in ((:Adjoint, :adjoint), (:Transpose, :transpose)) @@ -962,7 +962,7 @@ for (xformtype, xformop) in ((:Adjoint, :adjoint), (:Transpose, :transpose)) end return \($xformop(lu(A)), B) else - return \($xformop(qrfact(A)), B) + return \($xformop(qr(A)), B) end end end @@ -985,7 +985,7 @@ function factorize(A::SparseMatrixCSC) end return lu(A) else - return qrfact(A) + return qr(A) end end diff --git a/stdlib/SuiteSparse/src/deprecated.jl b/stdlib/SuiteSparse/src/deprecated.jl index f5e5e5e157b7c..2576e419ded98 100644 --- a/stdlib/SuiteSparse/src/deprecated.jl +++ b/stdlib/SuiteSparse/src/deprecated.jl @@ -55,3 +55,10 @@ end @deprecate(lufact(A::SparseMatrixCSC{<:Union{ComplexF16,ComplexF32},Ti}) where {Ti<:UMFITypes}, lu(A)) @deprecate(lufact(A::Union{SparseMatrixCSC{T},SparseMatrixCSC{Complex{T}}}) where {T<:AbstractFloat}, lu(A)) end + +# deprecate qrfact to qr +@eval SuiteSparse.SPQR begin + import LinearAlgebra: qrfact + @deprecate(qrfact(A::SparseMatrixCSC{Tv}; tol = _default_tol(A)) where {Tv<:Union{ComplexF64,Float64}}, qr(A; tol=tol)) + @deprecate(qrfact(A::SparseMatrixCSC; tol = _default_tol(A)), qr(A; tol=tol)) +end diff --git a/stdlib/SuiteSparse/src/spqr.jl b/stdlib/SuiteSparse/src/spqr.jl index 05467cdc4bef0..5059f1d75a587 100644 --- a/stdlib/SuiteSparse/src/spqr.jl +++ b/stdlib/SuiteSparse/src/spqr.jl @@ -136,7 +136,7 @@ Base.size(Q::QRSparseQ) = (size(Q.factors, 1), size(Q.factors, 1)) _default_tol(A::SparseMatrixCSC) = 20*sum(size(A))*eps(real(eltype(A)))*maximum(norm(view(A, :, i)) for i in 1:size(A, 2)) -function LinearAlgebra.qrfact(A::SparseMatrixCSC{Tv}; tol = _default_tol(A)) where {Tv <: CHOLMOD.VTypes} +function LinearAlgebra.qr(A::SparseMatrixCSC{Tv}; tol = _default_tol(A)) where {Tv <: CHOLMOD.VTypes} R = Ref{Ptr{CHOLMOD.C_Sparse{Tv}}}() E = Ref{Ptr{CHOLMOD.SuiteSparse_long}}() H = Ref{Ptr{CHOLMOD.C_Sparse{Tv}}}() @@ -156,7 +156,7 @@ function LinearAlgebra.qrfact(A::SparseMatrixCSC{Tv}; tol = _default_tol(A)) whe end """ - qrfact(A) -> QRSparse + qr(A) -> QRSparse Compute the `QR` factorization of a sparse matrix `A`. Fill-reducing row and column permutations are used such that `F.R = F.Q'*A[F.prow,F.pcol]`. The main application of this type is to @@ -171,7 +171,7 @@ julia> A = sparse([1,2,3,4], [1,1,2,2], [1.0,1.0,1.0,1.0]) [3, 2] = 1.0 [4, 2] = 1.0 -julia> qrfact(A) +julia> qr(A) Base.SparseArrays.SPQR.QRSparse{Float64,Int64} Q factor: 4×4 Base.SparseArrays.SPQR.QRSparseQ{Float64,Int64}: @@ -195,8 +195,6 @@ Column permutation: 2 ``` """ -LinearAlgebra.qrfact(A::SparseMatrixCSC; tol = _default_tol(A)) = qrfact(A, Val{true}, tol = tol) - LinearAlgebra.qr(A::SparseMatrixCSC; tol = _default_tol(A)) = qr(A, Val{true}, tol = tol) function LinearAlgebra.lmul!(Q::QRSparseQ, A::StridedVecOrMat) @@ -270,7 +268,7 @@ Extract factors of a QRSparse factorization. Possible values of `d` are # Examples ```jldoctest -julia> F = qrfact(sparse([1,3,2,3,4], [1,1,2,3,4], [1.0,2.0,3.0,4.0,5.0])); +julia> F = qr(sparse([1,3,2,3,4], [1,1,2,3,4], [1.0,2.0,3.0,4.0,5.0])); julia> F.Q 4×4 Base.SparseArrays.SPQR.QRSparseQ{Float64,Int64}: @@ -407,7 +405,7 @@ julia> A = sparse([1,2,4], [1,1,1], [1.0,1.0,1.0], 4, 2) [2, 1] = 1.0 [4, 1] = 1.0 -julia> qrfact(A)\\fill(1.0, 4) +julia> qr(A)\\fill(1.0, 4) 2-element Array{Float64,1}: 1.0 0.0 diff --git a/stdlib/SuiteSparse/test/spqr.jl b/stdlib/SuiteSparse/test/spqr.jl index 38dbfcd856e14..5880c0a8a0dd6 100644 --- a/stdlib/SuiteSparse/test/spqr.jl +++ b/stdlib/SuiteSparse/test/spqr.jl @@ -8,7 +8,7 @@ using LinearAlgebra: rmul!, lmul!, Adjoint, Transpose m, n = 100, 10 nn = 100 -@test size(qrfact(sprandn(m, n, 0.1)).Q) == (m, m) +@test size(qr(sprandn(m, n, 0.1)).Q) == (m, m) @testset "element type of A: $eltyA" for eltyA in (Float64, Complex{Float64}) if eltyA <: Real @@ -17,7 +17,7 @@ nn = 100 A = sparse([1:n; rand(1:m, nn - n)], [1:n; rand(1:n, nn - n)], complex.(randn(nn), randn(nn)), m, n) end - F = qrfact(A) + F = qr(A) @test size(F) == (m,n) @test size(F, 1) == m @test size(F, 2) == n @@ -67,7 +67,7 @@ nn = 100 end # Make sure that conversion to Sparse doesn't use SuiteSparse's symmetric flag - @test qrfact(SparseMatrixCSC{eltyA}(I, 5, 5)) \ fill(eltyA(1), 5) == fill(1, 5) + @test qr(SparseMatrixCSC{eltyA}(I, 5, 5)) \ fill(eltyA(1), 5) == fill(1, 5) end @testset "basic solution of rank deficient ls" begin @@ -83,7 +83,7 @@ end @testset "Issue 26368" begin A = sparse([0.0 1 0 0; 0 0 0 0]) - F = qrfact(A) + F = qr(A) @test F.Q*F.R == A[F.prow,F.pcol] end diff --git a/test/bitarray.jl b/test/bitarray.jl index cc1b537df7e7f..c21467e7906ae 100644 --- a/test/bitarray.jl +++ b/test/bitarray.jl @@ -1468,7 +1468,8 @@ timesofar("cat") b1 = bitrand(n1, n1) @check_bit_operation svd(b1) - @check_bit_operation qr(b1) + @test ((qrb1, qrb1A) = (qr(b1), qr(Array(b1))); + qrb1.Q == qrb1A.Q && qrb1.R == qrb1A.R) b1 = bitrand(v1) @check_bit_operation diagm(0 => b1) BitMatrix From 528de424fbfc4985a4cc01c177e6a7f2fa079173 Mon Sep 17 00:00:00 2001 From: Sacha Verweij Date: Sun, 20 May 2018 13:58:21 -0700 Subject: [PATCH 05/23] Deprecate ldltfact to ldlt. --- NEWS.md | 4 +- stdlib/LinearAlgebra/docs/src/index.md | 2 +- stdlib/LinearAlgebra/src/LinearAlgebra.jl | 2 +- stdlib/LinearAlgebra/src/dense.jl | 4 +- stdlib/LinearAlgebra/src/deprecated.jl | 4 ++ stdlib/LinearAlgebra/src/ldlt.jl | 12 +++--- stdlib/LinearAlgebra/src/tridiag.jl | 2 +- stdlib/SuiteSparse/src/cholmod.jl | 14 +++--- stdlib/SuiteSparse/src/deprecated.jl | 12 ++++++ stdlib/SuiteSparse/test/cholmod.jl | 52 +++++++++++------------ 10 files changed, 62 insertions(+), 46 deletions(-) diff --git a/NEWS.md b/NEWS.md index 6fdf76a7a1d93..ee9f178973422 100644 --- a/NEWS.md +++ b/NEWS.md @@ -700,8 +700,8 @@ Deprecated or removed * The keyword `immutable` is fully deprecated to `struct`, and `type` is fully deprecated to `mutable struct` ([#19157], [#20418]). - * `lufact`, `schurfact`, `lqfact`, and `qrfact` have respectively - been deprecated to `lu`, `schur`, `lq`, and `qr` ([#27159]). + * `lufact`, `schurfact`, `lqfact`, `qrfact`, and `ldltfact` have respectively + been deprecated to `lu`, `schur`, `lq`, `qr`, and `ldlt` ([#27159]). * Indexing into multidimensional arrays with more than one index but fewer indices than there are dimensions is no longer permitted when those trailing dimensions have lengths greater than 1. diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md index 1985e43770760..4b1c776df3d1e 100644 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -320,7 +320,7 @@ LinearAlgebra.lowrankupdate LinearAlgebra.lowrankdowndate LinearAlgebra.lowrankupdate! LinearAlgebra.lowrankdowndate! -LinearAlgebra.ldltfact +LinearAlgebra.ldlt LinearAlgebra.ldltfact! LinearAlgebra.qr LinearAlgebra.qrfact! diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index d33dc5aa2eb07..876f03556de28 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -103,7 +103,7 @@ export kron, ldiv!, ldltfact!, - ldltfact, + ldlt, linreg, logabsdet, logdet, diff --git a/stdlib/LinearAlgebra/src/dense.jl b/stdlib/LinearAlgebra/src/dense.jl index b26a1a1eac410..4147b2a452be5 100644 --- a/stdlib/LinearAlgebra/src/dense.jl +++ b/stdlib/LinearAlgebra/src/dense.jl @@ -1114,12 +1114,12 @@ systems. For example: `A=factorize(A); x=A\\b; y=A\\C`. |:---------------------------|:-----------------------------------------------| | Positive-definite | Cholesky (see [`cholfact`](@ref)) | | Dense Symmetric/Hermitian | Bunch-Kaufman (see [`bkfact`](@ref)) | -| Sparse Symmetric/Hermitian | LDLt (see [`ldltfact`](@ref)) | +| Sparse Symmetric/Hermitian | LDLt (see [`ldlt`](@ref)) | | Triangular | Triangular | | Diagonal | Diagonal | | Bidiagonal | Bidiagonal | | Tridiagonal | LU (see [`lu`](@ref)) | -| Symmetric real tridiagonal | LDLt (see [`ldltfact`](@ref)) | +| Symmetric real tridiagonal | LDLt (see [`ldlt`](@ref)) | | General square | LU (see [`lu`](@ref)) | | General non-square | QR (see [`qr`](@ref)) | diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl index 53de57643d298..0f5454ee35005 100644 --- a/stdlib/LinearAlgebra/src/deprecated.jl +++ b/stdlib/LinearAlgebra/src/deprecated.jl @@ -1287,3 +1287,7 @@ export qrfact @deprecate(qrfact(v::AbstractVector), qr(v)) @deprecate(qrfact(A::AbstractMatrix{T}) where T, qr(A)) @deprecate(qrfact(A::AbstractMatrix{T}, arg) where T, qr(A, arg)) + +# deprecate ldltfact to ldlt +export ldltfact +@deprecate(ldltfact(M::SymTridiagonal{T}) where T, ldlt(M)) diff --git a/stdlib/LinearAlgebra/src/ldlt.jl b/stdlib/LinearAlgebra/src/ldlt.jl index 7db241411102f..86a9ca0c1105c 100644 --- a/stdlib/LinearAlgebra/src/ldlt.jl +++ b/stdlib/LinearAlgebra/src/ldlt.jl @@ -19,7 +19,7 @@ Factorization{T}(F::LDLt{S,U}) where {T,S,U} = LDLt{T,U}(F) """ ldltfact!(S::SymTridiagonal) -> LDLt -Same as [`ldltfact`](@ref), but saves space by overwriting the input `S`, instead of creating a copy. +Same as [`ldlt`](@ref), but saves space by overwriting the input `S`, instead of creating a copy. # Examples ```jldoctest @@ -53,11 +53,11 @@ function ldltfact!(S::SymTridiagonal{T,V}) where {T<:Real,V} end """ - ldltfact(S::SymTridiagonal) -> LDLt + ldlt(S::SymTridiagonal) -> LDLt Compute an `LDLt` factorization of the real symmetric tridiagonal matrix `S` such that `S = L*Diagonal(d)*L'` where `L` is a unit lower triangular matrix and `d` is a vector. The main use of an `LDLt` -factorization `F = ldltfact(S)` is to solve the linear system of equations `Sx = b` with `F\\b`. +factorization `F = ldlt(S)` is to solve the linear system of equations `Sx = b` with `F\\b`. # Examples ```jldoctest @@ -67,7 +67,7 @@ julia> S = SymTridiagonal([3., 4., 5.], [1., 2.]) 1.0 4.0 2.0 ⋅ 2.0 5.0 -julia> ldltS = ldltfact(S); +julia> ldltS = ldlt(S); julia> b = [6., 7., 8.]; @@ -84,12 +84,12 @@ julia> S \\ b 1.3488372093023255 ``` """ -function ldltfact(M::SymTridiagonal{T}) where T +function ldlt(M::SymTridiagonal{T}) where T S = typeof(zero(T)/one(T)) return S == T ? ldltfact!(copy(M)) : ldltfact!(SymTridiagonal{S}(M)) end -factorize(S::SymTridiagonal) = ldltfact(S) +factorize(S::SymTridiagonal) = ldlt(S) function ldiv!(S::LDLt{T,M}, B::AbstractVecOrMat{T}) where {T,M<:SymTridiagonal{T}} n, nrhs = size(B, 1), size(B, 2) diff --git a/stdlib/LinearAlgebra/src/tridiag.jl b/stdlib/LinearAlgebra/src/tridiag.jl index c0a040e66bf8b..d2d298a784082 100644 --- a/stdlib/LinearAlgebra/src/tridiag.jl +++ b/stdlib/LinearAlgebra/src/tridiag.jl @@ -181,7 +181,7 @@ function mul!(C::StridedVecOrMat, S::SymTridiagonal, B::StridedVecOrMat) return C end -(\)(T::SymTridiagonal, B::StridedVecOrMat) = ldltfact(T)\B +(\)(T::SymTridiagonal, B::StridedVecOrMat) = ldlt(T)\B eigfact!(A::SymTridiagonal{<:BlasReal}) = Eigen(LAPACK.stegr!('V', A.dv, A.ev)...) eigfact(A::SymTridiagonal{T}) where T = eigfact!(copy_oftype(A, eigtype(T))) diff --git a/stdlib/SuiteSparse/src/cholmod.jl b/stdlib/SuiteSparse/src/cholmod.jl index 1255f420b8add..6533034ef5e9f 100644 --- a/stdlib/SuiteSparse/src/cholmod.jl +++ b/stdlib/SuiteSparse/src/cholmod.jl @@ -8,7 +8,7 @@ import Base: (*), convert, copy, eltype, getindex, getproperty, show, size, using LinearAlgebra import LinearAlgebra: (\), cholfact, cholfact!, det, diag, ishermitian, isposdef, - issuccess, issymmetric, ldltfact, ldltfact!, logdet + issuccess, issymmetric, ldlt, ldltfact!, logdet using SparseArrays import Libdl @@ -1477,7 +1477,7 @@ Compute the ``LDL'`` factorization of `A`, reusing the symbolic factorization `F view of a `SparseMatrixCSC`. Note that even if `A` doesn't have the type tag, it must still be symmetric or Hermitian. -See also [`ldltfact`](@ref). +See also [`ldlt`](@ref). !!! note This method uses the CHOLMOD library from SuiteSparse, which only supports @@ -1493,7 +1493,7 @@ ldltfact!(F::Factor, A::Union{SparseMatrixCSC{T}, shift = 0.0) where {T<:Real} = ldltfact!(F, Sparse(A), shift = shift) -function ldltfact(A::Sparse; shift::Real=0.0, +function ldlt(A::Sparse; shift::Real=0.0, perm::AbstractVector{SuiteSparse_long}=SuiteSparse_long[]) cm = defaults(common_struct) @@ -1514,13 +1514,13 @@ function ldltfact(A::Sparse; shift::Real=0.0, end """ - ldltfact(A; shift = 0.0, perm=Int[]) -> CHOLMOD.Factor + ldlt(A; shift = 0.0, perm=Int[]) -> CHOLMOD.Factor Compute the ``LDL'`` factorization of a sparse matrix `A`. `A` must be a [`SparseMatrixCSC`](@ref) or a [`Symmetric`](@ref)/[`Hermitian`](@ref) view of a `SparseMatrixCSC`. Note that even if `A` doesn't have the type tag, it must still be symmetric or Hermitian. -A fill-reducing permutation is used. `F = ldltfact(A)` is most frequently +A fill-reducing permutation is used. `F = ldlt(A)` is most frequently used to solve systems of equations `A*x = b` with `F\\b`. The returned factorization object `F` also supports the methods [`diag`](@ref), [`det`](@ref), [`logdet`](@ref), and [`inv`](@ref). @@ -1547,11 +1547,11 @@ it should be a permutation of `1:size(A,1)` giving the ordering to use Many other functions from CHOLMOD are wrapped but not exported from the `Base.SparseArrays.CHOLMOD` module. """ -ldltfact(A::Union{SparseMatrixCSC{T},SparseMatrixCSC{Complex{T}}, +ldlt(A::Union{SparseMatrixCSC{T},SparseMatrixCSC{Complex{T}}, Symmetric{T,SparseMatrixCSC{T,SuiteSparse_long}}, Hermitian{Complex{T},SparseMatrixCSC{Complex{T},SuiteSparse_long}}, Hermitian{T,SparseMatrixCSC{T,SuiteSparse_long}}}; - kws...) where {T<:Real} = ldltfact(Sparse(A); kws...) + kws...) where {T<:Real} = ldlt(Sparse(A); kws...) ## Rank updates diff --git a/stdlib/SuiteSparse/src/deprecated.jl b/stdlib/SuiteSparse/src/deprecated.jl index 2576e419ded98..889e7d4bc391a 100644 --- a/stdlib/SuiteSparse/src/deprecated.jl +++ b/stdlib/SuiteSparse/src/deprecated.jl @@ -62,3 +62,15 @@ end @deprecate(qrfact(A::SparseMatrixCSC{Tv}; tol = _default_tol(A)) where {Tv<:Union{ComplexF64,Float64}}, qr(A; tol=tol)) @deprecate(qrfact(A::SparseMatrixCSC; tol = _default_tol(A)), qr(A; tol=tol)) end + +# deprecate ldltfact to ldlt +@eval SuiteSparse.CHOLMOD begin + import LinearAlgebra: ldltfact + @deprecate(ldltfact(A::Sparse; shift::Real=0.0, perm::AbstractVector{SuiteSparse_long}=SuiteSparse_long[]), ldlt(A; shift=shift, perm=perm)) + @deprecate(ldltfact(A::Union{SparseMatrixCSC{T},SparseMatrixCSC{Complex{T}}, + Symmetric{T,SparseMatrixCSC{T,SuiteSparse_long}}, + Hermitian{Complex{T},SparseMatrixCSC{Complex{T},SuiteSparse_long}}, + Hermitian{T,SparseMatrixCSC{T,SuiteSparse_long}}}; + kws...) where {T<:Real}, + ldlt(A; kws...)) +end diff --git a/stdlib/SuiteSparse/test/cholmod.jl b/stdlib/SuiteSparse/test/cholmod.jl index 3dac1c92eb255..47c9dd2f792f5 100644 --- a/stdlib/SuiteSparse/test/cholmod.jl +++ b/stdlib/SuiteSparse/test/cholmod.jl @@ -107,11 +107,11 @@ srand(123) x = fill(1., n) b = A*x - chma = ldltfact(A) # LDL' form + chma = ldlt(A) # LDL' form @test CHOLMOD.isvalid(chma) @test unsafe_load(pointer(chma)).is_ll == 0 # check that it is in fact an LDLt @test chma\b ≈ x - @test nnz(ldltfact(A, perm=1:size(A,1))) > nnz(chma) + @test nnz(ldlt(A, perm=1:size(A,1))) > nnz(chma) @test size(chma) == size(A) chmal = CHOLMOD.FactorComponent(chma, :L) @test size(chmal) == size(A) @@ -373,19 +373,19 @@ end @test_throws ArgumentError cholfact(A1) @test_throws ArgumentError cholfact(A1) @test_throws ArgumentError cholfact(A1, shift=1.0) - @test_throws ArgumentError ldltfact(A1) - @test_throws ArgumentError ldltfact(A1, shift=1.0) + @test_throws ArgumentError ldlt(A1) + @test_throws ArgumentError ldlt(A1, shift=1.0) C = A1 + copy(adjoint(A1)) λmaxC = eigmax(Array(C)) b = fill(1., size(A1, 1)) @test_throws LinearAlgebra.PosDefException cholfact(C - 2λmaxC*I)\b @test_throws LinearAlgebra.PosDefException cholfact(C, shift=-2λmaxC)\b - @test_throws ArgumentError ldltfact(C - C[1,1]*I)\b - @test_throws ArgumentError ldltfact(C, shift=-real(C[1,1]))\b + @test_throws ArgumentError ldlt(C - C[1,1]*I)\b + @test_throws ArgumentError ldlt(C, shift=-real(C[1,1]))\b @test !isposdef(cholfact(C - 2λmaxC*I)) @test !isposdef(cholfact(C, shift=-2λmaxC)) - @test !LinearAlgebra.issuccess(ldltfact(C - C[1,1]*I)) - @test !LinearAlgebra.issuccess(ldltfact(C, shift=-real(C[1,1]))) + @test !LinearAlgebra.issuccess(ldlt(C - C[1,1]*I)) + @test !LinearAlgebra.issuccess(ldlt(C, shift=-real(C[1,1]))) F = cholfact(A1pd) tmp = IOBuffer() show(tmp, F) @@ -406,7 +406,7 @@ end Ftmp = Ftmp'Ftmp + I @test logdet(cholfact(Ftmp)) ≈ logdet(Array(Ftmp)) end - @test logdet(ldltfact(A1pd)) ≈ logdet(Array(A1pd)) + @test logdet(ldlt(A1pd)) ≈ logdet(Array(A1pd)) @test isposdef(A1pd) @test !isposdef(A1) @test !isposdef(A1 + copy(A1') |> t -> t - 2eigmax(Array(t))*I) @@ -417,9 +417,9 @@ end F1 = CHOLMOD.Sparse(cholfact(Symmetric(A1pd, :L), shift=2)) F2 = CHOLMOD.Sparse(cholfact(A1pd, shift=2)) @test F1 == F2 - @test CHOLMOD.Sparse(ldltfact(Symmetric(A1pd, :L))) == CHOLMOD.Sparse(ldltfact(A1pd)) - F1 = CHOLMOD.Sparse(ldltfact(Symmetric(A1pd, :L), shift=2)) - F2 = CHOLMOD.Sparse(ldltfact(A1pd, shift=2)) + @test CHOLMOD.Sparse(ldlt(Symmetric(A1pd, :L))) == CHOLMOD.Sparse(ldlt(A1pd)) + F1 = CHOLMOD.Sparse(ldlt(Symmetric(A1pd, :L), shift=2)) + F2 = CHOLMOD.Sparse(ldlt(A1pd, shift=2)) @test F1 == F2 else @test !CHOLMOD.issymmetric(Sparse(A1pd, 0)) @@ -428,9 +428,9 @@ end F1 = CHOLMOD.Sparse(cholfact(Hermitian(A1pd, :L), shift=2)) F2 = CHOLMOD.Sparse(cholfact(A1pd, shift=2)) @test F1 == F2 - @test CHOLMOD.Sparse(ldltfact(Hermitian(A1pd, :L))) == CHOLMOD.Sparse(ldltfact(A1pd)) - F1 = CHOLMOD.Sparse(ldltfact(Hermitian(A1pd, :L), shift=2)) - F2 = CHOLMOD.Sparse(ldltfact(A1pd, shift=2)) + @test CHOLMOD.Sparse(ldlt(Hermitian(A1pd, :L))) == CHOLMOD.Sparse(ldlt(A1pd)) + F1 = CHOLMOD.Sparse(ldlt(Hermitian(A1pd, :L), shift=2)) + F2 = CHOLMOD.Sparse(ldlt(A1pd, shift=2)) @test F1 == F2 end @@ -448,11 +448,11 @@ end @test isa(CHOLMOD.Sparse(F), CHOLMOD.Sparse{elty}) @test CHOLMOD.Sparse(cholfact!(copy(F), A1pd, shift=2.0)) ≈ CHOLMOD.Sparse(F) # surprisingly, this can cause small ulp size changes so we cannot test exact equality - F = ldltfact(A1pd) + F = ldlt(A1pd) @test isa(CHOLMOD.Sparse(F), CHOLMOD.Sparse{elty}) @test CHOLMOD.Sparse(ldltfact!(copy(F), A1pd)) ≈ CHOLMOD.Sparse(F) # surprisingly, this can cause small ulp size changes so we cannot test exact equality - F = ldltfact(A1pdSparse, shift=2) + F = ldlt(A1pdSparse, shift=2) @test isa(CHOLMOD.Sparse(F), CHOLMOD.Sparse{elty}) @test CHOLMOD.Sparse(ldltfact!(copy(F), A1pd, shift=2.0)) ≈ CHOLMOD.Sparse(F) # surprisingly, this can cause small ulp size changes so we cannot test exact equality @@ -554,8 +554,8 @@ end @test_throws CHOLMOD.CHOLMODException Fs.DUPt end - @testset "ldltfact, no permutation" begin - Fs = ldltfact(As, perm=[1:3;]) + @testset "ldlt, no permutation" begin + Fs = ldlt(As, perm=[1:3;]) @test Fs.p == [1:3;] @test sparse(Fs.LD) ≈ LDf @test sparse(Fs) ≈ As @@ -583,13 +583,13 @@ end @test Fs.DUP\b ≈ L_f'\(D_f\b) end - @testset "ldltfact, with permutation" begin - Fs = ldltfact(As, perm=p) + @testset "ldlt, with permutation" begin + Fs = ldlt(As, perm=p) @test Fs.p == p @test sparse(Fs) ≈ As b = rand(3) Asp = As[p,p] - LDp = sparse(ldltfact(Asp, perm=[1,2,3]).LD) + LDp = sparse(ldlt(Asp, perm=[1,2,3]).LD) # LDp = sparse(Fs.LD) Lp, dp = SuiteSparse.CHOLMOD.getLd!(copy(LDp)) Dp = sparse(Diagonal(dp)) @@ -618,7 +618,7 @@ end @testset "Element promotion and type inference" begin @inferred cholfact(As)\fill(1, size(As, 1)) - @inferred ldltfact(As)\fill(1, size(As, 1)) + @inferred ldlt(As)\fill(1, size(As, 1)) end end @@ -690,12 +690,12 @@ end @test cholfact(A)\B ≈ A\B end -@testset "Make sure that ldltfact performs an LDLt (Issue #19032)" begin +@testset "Make sure that ldlt performs an LDLt (Issue #19032)" begin m, n = 400, 500 A = sprandn(m, n, .2) M = [I copy(A'); A -I] b = M * fill(1., m+n) - F = ldltfact(M) + F = ldlt(M) s = unsafe_load(pointer(F)) @test s.is_super == 0 @test F\b ≈ fill(1., m+n) @@ -769,7 +769,7 @@ end AtA = A'*A C0 = [1., 2., 0, 0, 0] # Test both cholfact and LDLt with and without automatic permutations - for F in (cholfact(AtA), cholfact(AtA, perm=1:5), ldltfact(AtA), ldltfact(AtA, perm=1:5)) + for F in (cholfact(AtA), cholfact(AtA, perm=1:5), ldlt(AtA), ldlt(AtA, perm=1:5)) local F x0 = F\(b = fill(1., 5)) #Test both sparse/dense and vectors/matrices From 62b13375edf9501c1cc40381147eae3582a7473d Mon Sep 17 00:00:00 2001 From: Sacha Verweij Date: Sun, 20 May 2018 16:27:25 -0700 Subject: [PATCH 06/23] Deprecate lufact! to lu!. --- NEWS.md | 2 ++ stdlib/LinearAlgebra/docs/src/index.md | 2 +- stdlib/LinearAlgebra/src/LinearAlgebra.jl | 8 +++---- stdlib/LinearAlgebra/src/deprecated.jl | 8 +++++++ stdlib/LinearAlgebra/src/lu.jl | 26 +++++++++++------------ stdlib/LinearAlgebra/src/tridiag.jl | 2 +- 6 files changed, 29 insertions(+), 19 deletions(-) diff --git a/NEWS.md b/NEWS.md index ee9f178973422..12b12cc1dd309 100644 --- a/NEWS.md +++ b/NEWS.md @@ -703,6 +703,8 @@ Deprecated or removed * `lufact`, `schurfact`, `lqfact`, `qrfact`, and `ldltfact` have respectively been deprecated to `lu`, `schur`, `lq`, `qr`, and `ldlt` ([#27159]). + * `lufact!` has been deprecated to `lu!` ([#27159]). + * Indexing into multidimensional arrays with more than one index but fewer indices than there are dimensions is no longer permitted when those trailing dimensions have lengths greater than 1. Instead, reshape the array or add trailing indices so the dimensionality and number of indices diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md index 4b1c776df3d1e..57e3e4740c432 100644 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -312,7 +312,7 @@ LinearAlgebra.LowerTriangular LinearAlgebra.UpperTriangular LinearAlgebra.UniformScaling LinearAlgebra.lu -LinearAlgebra.lufact! +LinearAlgebra.lu! LinearAlgebra.chol LinearAlgebra.cholfact LinearAlgebra.cholfact! diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index 876f03556de28..a0c12b6ec3cfe 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -112,7 +112,7 @@ export lowrankupdate, lowrankupdate!, lu, - lufact!, + lu!, lyap, mul!, lmul!, @@ -253,7 +253,7 @@ Compute `A \\ B` in-place and store the result in `Y`, returning the result. The argument `A` should *not* be a matrix. Rather, instead of matrices it should be a factorization object (e.g. produced by [`factorize`](@ref) or [`cholfact`](@ref)). The reason for this is that factorization itself is both expensive and typically allocates memory -(although it can also be done in-place via, e.g., [`lufact!`](@ref)), +(although it can also be done in-place via, e.g., [`lu!`](@ref)), and performance-critical situations requiring `ldiv!` usually also require fine-grained control over the factorization of `A`. """ @@ -267,7 +267,7 @@ Compute `A \\ B` in-place and overwriting `B` to store the result. The argument `A` should *not* be a matrix. Rather, instead of matrices it should be a factorization object (e.g. produced by [`factorize`](@ref) or [`cholfact`](@ref)). The reason for this is that factorization itself is both expensive and typically allocates memory -(although it can also be done in-place via, e.g., [`lufact!`](@ref)), +(although it can also be done in-place via, e.g., [`lu!`](@ref)), and performance-critical situations requiring `ldiv!` usually also require fine-grained control over the factorization of `A`. """ @@ -282,7 +282,7 @@ Compute `A / B` in-place and overwriting `A` to store the result. The argument `B` should *not* be a matrix. Rather, instead of matrices it should be a factorization object (e.g. produced by [`factorize`](@ref) or [`cholfact`](@ref)). The reason for this is that factorization itself is both expensive and typically allocates memory -(although it can also be done in-place via, e.g., [`lufact!`](@ref)), +(although it can also be done in-place via, e.g., [`lu!`](@ref)), and performance-critical situations requiring `rdiv!` usually also require fine-grained control over the factorization of `B`. """ diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl index 0f5454ee35005..656789bd4f2f6 100644 --- a/stdlib/LinearAlgebra/src/deprecated.jl +++ b/stdlib/LinearAlgebra/src/deprecated.jl @@ -33,6 +33,7 @@ function bkfact!(A::StridedMatrix, uplo::Symbol, symmetric::Bool = issymmetric(A return bkfact!(symmetric ? Symmetric(A, uplo) : Hermitian(A, uplo), rook) end +export lufact! @deprecate sqrtm(A::UpperTriangular{T},::Type{Val{realmatrix}}) where {T,realmatrix} sqrtm(A, Val(realmatrix)) @deprecate lufact(A::AbstractMatrix, ::Type{Val{false}}) lufact(A, Val(false)) @deprecate lufact(A::AbstractMatrix, ::Type{Val{true}}) lufact(A, Val(true)) @@ -1291,3 +1292,10 @@ export qrfact # deprecate ldltfact to ldlt export ldltfact @deprecate(ldltfact(M::SymTridiagonal{T}) where T, ldlt(M)) + +# deprecate lufact! to lu! +# lufact! exported in a deprecation above +@deprecate(lufact!(A::StridedMatrix{T}, pivot::Union{Val{false}, Val{true}} = Val(true)) where T<:BlasFloat, lufact!(A, pivot)) +@deprecate(lufact!(A::HermOrSym, pivot::Union{Val{false}, Val{true}} = Val(true)), lu!(A, pivot)) +@deprecate(lufact!(A::StridedMatrix, pivot::Union{Val{false}, Val{true}} = Val(true)), lu!(A, pivot)) +@deprecate(lufact!(A::Tridiagonal{T,V}, pivot::Union{Val{false}, Val{true}} = Val(true)) where {T,V}, lu!(A, pivot)) diff --git a/stdlib/LinearAlgebra/src/lu.jl b/stdlib/LinearAlgebra/src/lu.jl index cfdbee76376a4..f041130230290 100644 --- a/stdlib/LinearAlgebra/src/lu.jl +++ b/stdlib/LinearAlgebra/src/lu.jl @@ -29,22 +29,22 @@ adjoint(F::LU) = Adjoint(F) transpose(F::LU) = Transpose(F) # StridedMatrix -function lufact!(A::StridedMatrix{T}, pivot::Union{Val{false}, Val{true}} = Val(true)) where T<:BlasFloat +function lu!(A::StridedMatrix{T}, pivot::Union{Val{false}, Val{true}} = Val(true)) where T<:BlasFloat if pivot === Val(false) return generic_lufact!(A, pivot) end lpt = LAPACK.getrf!(A) return LU{T,typeof(A)}(lpt[1], lpt[2], lpt[3]) end -function lufact!(A::HermOrSym, pivot::Union{Val{false}, Val{true}} = Val(true)) +function lu!(A::HermOrSym, pivot::Union{Val{false}, Val{true}} = Val(true)) copytri!(A.data, A.uplo, isa(A, Hermitian)) - lufact!(A.data, pivot) + lu!(A.data, pivot) end """ - lufact!(A, pivot=Val(true)) -> LU + lu!(A, pivot=Val(true)) -> LU -`lufact!` is the same as [`lu`](@ref), but saves space by overwriting the +`lu!` is the same as [`lu`](@ref), but saves space by overwriting the input `A`, instead of creating a copy. An [`InexactError`](@ref) exception is thrown if the factorization produces a number not representable by the element type of `A`, e.g. for integer types. @@ -56,7 +56,7 @@ julia> A = [4. 3.; 6. 3.] 4.0 3.0 6.0 3.0 -julia> F = lufact!(A) +julia> F = lu!(A) LU{Float64,Array{Float64,2}} L factor: 2×2 Array{Float64,2}: @@ -72,13 +72,13 @@ julia> iA = [4 3; 6 3] 4 3 6 3 -julia> lufact!(iA) +julia> lu!(iA) ERROR: InexactError: Int64(Int64, 0.6666666666666666) Stacktrace: [...] ``` """ -lufact!(A::StridedMatrix, pivot::Union{Val{false}, Val{true}} = Val(true)) = generic_lufact!(A, pivot) +lu!(A::StridedMatrix, pivot::Union{Val{false}, Val{true}} = Val(true)) = generic_lufact!(A, pivot) function generic_lufact!(A::StridedMatrix{T}, ::Val{Pivot} = Val(true)) where {T,Pivot} m, n = size(A) minmn = min(m,n) @@ -130,7 +130,7 @@ end # floating point types doesn't have to be promoted for LU, but should default to pivoting function lu(A::Union{AbstractMatrix{T}, AbstractMatrix{Complex{T}}}, pivot::Union{Val{false}, Val{true}} = Val(true)) where {T<:AbstractFloat} - lufact!(copy(A), pivot) + lu!(copy(A), pivot) end # for all other types we must promote to a type which is stable under division @@ -202,20 +202,20 @@ function lu(A::AbstractMatrix{T}, pivot::Union{Val{false}, Val{true}}) where T S = typeof(zero(T)/one(T)) AA = similar(A, S) copyto!(AA, A) - lufact!(AA, pivot) + lu!(AA, pivot) end # We can't assume an ordered field so we first try without pivoting function lu(A::AbstractMatrix{T}) where T S = typeof(zero(T)/one(T)) AA = similar(A, S) copyto!(AA, A) - F = lufact!(AA, Val(false)) + F = lu!(AA, Val(false)) if issuccess(F) return F else AA = similar(A, S) copyto!(AA, A) - return lufact!(AA, Val(true)) + return lu!(AA, Val(true)) end end @@ -385,7 +385,7 @@ end # Tridiagonal # See dgttrf.f -function lufact!(A::Tridiagonal{T,V}, pivot::Union{Val{false}, Val{true}} = Val(true)) where {T,V} +function lu!(A::Tridiagonal{T,V}, pivot::Union{Val{false}, Val{true}} = Val(true)) where {T,V} n = size(A, 1) info = 0 ipiv = Vector{BlasInt}(undef, n) diff --git a/stdlib/LinearAlgebra/src/tridiag.jl b/stdlib/LinearAlgebra/src/tridiag.jl index d2d298a784082..2be5dbd7ad9d5 100644 --- a/stdlib/LinearAlgebra/src/tridiag.jl +++ b/stdlib/LinearAlgebra/src/tridiag.jl @@ -399,7 +399,7 @@ struct Tridiagonal{T,V<:AbstractVector{T}} <: AbstractMatrix{T} end new{T,V}(dl, d, du) end - # constructor used in lufact! + # constructor used in lu! function Tridiagonal{T,V}(dl::V, d::V, du::V, du2::V) where {T,V<:AbstractVector{T}} new{T,V}(dl, d, du, du2) end From c8b4ac9a23af94e77deb350f95d12fba44759f4a Mon Sep 17 00:00:00 2001 From: Sacha Verweij Date: Sun, 20 May 2018 16:43:31 -0700 Subject: [PATCH 07/23] Deprecate schurfact! to schur!. --- NEWS.md | 3 ++- stdlib/LinearAlgebra/docs/src/index.md | 2 +- stdlib/LinearAlgebra/src/LinearAlgebra.jl | 2 +- stdlib/LinearAlgebra/src/deprecated.jl | 5 +++++ stdlib/LinearAlgebra/src/schur.jl | 18 +++++++++--------- 5 files changed, 18 insertions(+), 12 deletions(-) diff --git a/NEWS.md b/NEWS.md index 12b12cc1dd309..9a59cf171add3 100644 --- a/NEWS.md +++ b/NEWS.md @@ -703,7 +703,8 @@ Deprecated or removed * `lufact`, `schurfact`, `lqfact`, `qrfact`, and `ldltfact` have respectively been deprecated to `lu`, `schur`, `lq`, `qr`, and `ldlt` ([#27159]). - * `lufact!` has been deprecated to `lu!` ([#27159]). + * `lufact!` and `schurfact!` have respectively been deprecated to + `lu!` and `schur!` ([#27159]). * Indexing into multidimensional arrays with more than one index but fewer indices than there are dimensions is no longer permitted when those trailing dimensions have lengths greater than 1. diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md index 57e3e4740c432..05355aa42ff8f 100644 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -341,7 +341,7 @@ LinearAlgebra.eigfact LinearAlgebra.eigfact! LinearAlgebra.hessfact LinearAlgebra.hessfact! -LinearAlgebra.schurfact! +LinearAlgebra.schur! LinearAlgebra.schur LinearAlgebra.ordschur LinearAlgebra.ordschur! diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index a0c12b6ec3cfe..30dadb480835a 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -131,7 +131,7 @@ export rank, rdiv!, schur, - schurfact!, + schur!, svd, svdfact!, svdfact, diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl index 656789bd4f2f6..ca09b873640bf 100644 --- a/stdlib/LinearAlgebra/src/deprecated.jl +++ b/stdlib/LinearAlgebra/src/deprecated.jl @@ -1299,3 +1299,8 @@ export ldltfact @deprecate(lufact!(A::HermOrSym, pivot::Union{Val{false}, Val{true}} = Val(true)), lu!(A, pivot)) @deprecate(lufact!(A::StridedMatrix, pivot::Union{Val{false}, Val{true}} = Val(true)), lu!(A, pivot)) @deprecate(lufact!(A::Tridiagonal{T,V}, pivot::Union{Val{false}, Val{true}} = Val(true)) where {T,V}, lu!(A, pivot)) + +# deprecate schurfact! to schur! +export schurfact! +@deprecate(schurfact!(A::StridedMatrix{T}, B::StridedMatrix{T}) where {T<:BlasFloat}, schur!(A, B)) +@deprecate(schurfact!(A::StridedMatrix{<:BlasFloat}), schur!(A)) diff --git a/stdlib/LinearAlgebra/src/schur.jl b/stdlib/LinearAlgebra/src/schur.jl index 51abb54e50a0b..f7e435cd4edbe 100644 --- a/stdlib/LinearAlgebra/src/schur.jl +++ b/stdlib/LinearAlgebra/src/schur.jl @@ -24,7 +24,7 @@ Base.iterate(S::Schur, ::Val{:done}) = nothing end """ - schurfact!(A::StridedMatrix) -> F::Schur + schur!(A::StridedMatrix) -> F::Schur Same as [`schur`](@ref) but uses the input argument `A` as workspace. @@ -35,7 +35,7 @@ julia> A = [5. 7.; -2. -4.] 5.0 7.0 -2.0 -4.0 -julia> F = schurfact!(A) +julia> F = schur!(A) Schur{Float64,Array{Float64,2}} T factor: 2×2 Array{Float64,2}: @@ -56,7 +56,7 @@ julia> A 0.0 -2.0 ``` """ -schurfact!(A::StridedMatrix{<:BlasFloat}) = Schur(LinearAlgebra.LAPACK.gees!('V', A)...) +schur!(A::StridedMatrix{<:BlasFloat}) = Schur(LinearAlgebra.LAPACK.gees!('V', A)...) """ schur(A::StridedMatrix) -> F::Schur @@ -101,8 +101,8 @@ julia> t == F.T && z == F.Z && vals == F.values true ``` """ -schur(A::StridedMatrix{<:BlasFloat}) = schurfact!(copy(A)) -schur(A::StridedMatrix{T}) where T = schurfact!(copy_oftype(A, eigtype(T))) +schur(A::StridedMatrix{<:BlasFloat}) = schur!(copy(A)) +schur(A::StridedMatrix{T}) where T = schur!(copy_oftype(A, eigtype(T))) schur(A::Symmetric) = schur(copyto!(similar(parent(A)), A)) schur(A::Hermitian) = schur(copyto!(similar(parent(A)), A)) @@ -216,11 +216,11 @@ Base.iterate(S::GeneralizedSchur, ::Val{:done}) = nothing end """ - schurfact!(A::StridedMatrix, B::StridedMatrix) -> F::GeneralizedSchur + schur!(A::StridedMatrix, B::StridedMatrix) -> F::GeneralizedSchur Same as [`schur`](@ref) but uses the input matrices `A` and `B` as workspace. """ -schurfact!(A::StridedMatrix{T}, B::StridedMatrix{T}) where {T<:BlasFloat} = +schur!(A::StridedMatrix{T}, B::StridedMatrix{T}) where {T<:BlasFloat} = GeneralizedSchur(LinearAlgebra.LAPACK.gges!('V', 'V', A, B)...) """ @@ -236,10 +236,10 @@ generalized eigenvalues of `A` and `B` can be obtained with `F.α./F.β`. Iterating the decomposition produces the components `F.S`, `F.T`, `F.Q`, `F.Z`, `F.α`, and `F.β`. """ -schur(A::StridedMatrix{T},B::StridedMatrix{T}) where {T<:BlasFloat} = schurfact!(copy(A),copy(B)) +schur(A::StridedMatrix{T},B::StridedMatrix{T}) where {T<:BlasFloat} = schur!(copy(A),copy(B)) function schur(A::StridedMatrix{TA}, B::StridedMatrix{TB}) where {TA,TB} S = promote_type(eigtype(TA), TB) - return schurfact!(copy_oftype(A, S), copy_oftype(B, S)) + return schur!(copy_oftype(A, S), copy_oftype(B, S)) end """ From 099bb321f790ac954671f0cebe64f7b4096dda56 Mon Sep 17 00:00:00 2001 From: Sacha Verweij Date: Sun, 20 May 2018 16:48:49 -0700 Subject: [PATCH 08/23] Deprecate lqfact! to lq!. --- NEWS.md | 4 ++-- stdlib/LinearAlgebra/docs/src/index.md | 2 +- stdlib/LinearAlgebra/src/LinearAlgebra.jl | 2 +- stdlib/LinearAlgebra/src/deprecated.jl | 4 ++++ stdlib/LinearAlgebra/src/lq.jl | 6 +++--- 5 files changed, 11 insertions(+), 7 deletions(-) diff --git a/NEWS.md b/NEWS.md index 9a59cf171add3..9a58da5c64843 100644 --- a/NEWS.md +++ b/NEWS.md @@ -703,8 +703,8 @@ Deprecated or removed * `lufact`, `schurfact`, `lqfact`, `qrfact`, and `ldltfact` have respectively been deprecated to `lu`, `schur`, `lq`, `qr`, and `ldlt` ([#27159]). - * `lufact!` and `schurfact!` have respectively been deprecated to - `lu!` and `schur!` ([#27159]). + * `lufact!`, `schurfact!`, and `lqfact!` have respectively + been deprecated to `lu!`, `schur!`, and `lq!` ([#27159]). * Indexing into multidimensional arrays with more than one index but fewer indices than there are dimensions is no longer permitted when those trailing dimensions have lengths greater than 1. diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md index 05355aa42ff8f..ad40bc17cb37b 100644 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -327,7 +327,7 @@ LinearAlgebra.qrfact! LinearAlgebra.QR LinearAlgebra.QRCompactWY LinearAlgebra.QRPivoted -LinearAlgebra.lqfact! +LinearAlgebra.lq! LinearAlgebra.lq LinearAlgebra.bkfact LinearAlgebra.bkfact! diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index 30dadb480835a..d6a7d8c0383fd 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -127,7 +127,7 @@ export qr, qrfact!, lq, - lqfact!, + lq!, rank, rdiv!, schur, diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl index ca09b873640bf..a3e58e36cc94f 100644 --- a/stdlib/LinearAlgebra/src/deprecated.jl +++ b/stdlib/LinearAlgebra/src/deprecated.jl @@ -1304,3 +1304,7 @@ export ldltfact export schurfact! @deprecate(schurfact!(A::StridedMatrix{T}, B::StridedMatrix{T}) where {T<:BlasFloat}, schur!(A, B)) @deprecate(schurfact!(A::StridedMatrix{<:BlasFloat}), schur!(A)) + +# deprecate lqfact! to lq! +export lqfact! +@deprecate(lqfact!(A::StridedMatrix{<:BlasFloat}), lq!(A)) diff --git a/stdlib/LinearAlgebra/src/lq.jl b/stdlib/LinearAlgebra/src/lq.jl index 87a33b5b19f17..10ca990f027cb 100644 --- a/stdlib/LinearAlgebra/src/lq.jl +++ b/stdlib/LinearAlgebra/src/lq.jl @@ -30,12 +30,12 @@ LQPackedQ(factors::AbstractMatrix{T}, τ::Vector{T}) where {T} = LQPackedQ{T,typ """ - lqfact!(A) -> LQ + lq!(A) -> LQ Compute the LQ factorization of `A`, using the input matrix as a workspace. See also [`lq`](@ref). """ -lqfact!(A::StridedMatrix{<:BlasFloat}) = LQ(LAPACK.gelqf!(A)...) +lq!(A::StridedMatrix{<:BlasFloat}) = LQ(LAPACK.gelqf!(A)...) """ lq(A) -> S::LQ @@ -70,7 +70,7 @@ julia> l == S.L && q == S.Q true ``` """ -lq(A::StridedMatrix{<:BlasFloat}) = lqfact!(copy(A)) +lq(A::StridedMatrix{<:BlasFloat}) = lq!(copy(A)) lq(x::Number) = lq(fill(x,1,1)) copy(A::LQ) = LQ(copy(A.factors), copy(A.τ)) From 23c8ff53a8407230559cc2703774d8660b4445d6 Mon Sep 17 00:00:00 2001 From: Sacha Verweij Date: Sun, 20 May 2018 16:57:55 -0700 Subject: [PATCH 09/23] Deprecate qrfact! to qr!. --- NEWS.md | 4 ++-- stdlib/LinearAlgebra/docs/src/index.md | 2 +- stdlib/LinearAlgebra/src/LinearAlgebra.jl | 2 +- stdlib/LinearAlgebra/src/deprecated.jl | 9 +++++++++ stdlib/LinearAlgebra/src/qr.jl | 24 +++++++++++------------ stdlib/LinearAlgebra/test/qr.jl | 2 +- 6 files changed, 26 insertions(+), 17 deletions(-) diff --git a/NEWS.md b/NEWS.md index 9a58da5c64843..c6af79d45e43c 100644 --- a/NEWS.md +++ b/NEWS.md @@ -703,8 +703,8 @@ Deprecated or removed * `lufact`, `schurfact`, `lqfact`, `qrfact`, and `ldltfact` have respectively been deprecated to `lu`, `schur`, `lq`, `qr`, and `ldlt` ([#27159]). - * `lufact!`, `schurfact!`, and `lqfact!` have respectively - been deprecated to `lu!`, `schur!`, and `lq!` ([#27159]). + * `lufact!`, `schurfact!`, `lqfact!`, and `qrfact!` have + respectively been deprecated to `lu!`, `schur!`, `lq!`, and `qr!` ([#27159]). * Indexing into multidimensional arrays with more than one index but fewer indices than there are dimensions is no longer permitted when those trailing dimensions have lengths greater than 1. diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md index ad40bc17cb37b..13ccb0f730297 100644 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -323,7 +323,7 @@ LinearAlgebra.lowrankdowndate! LinearAlgebra.ldlt LinearAlgebra.ldltfact! LinearAlgebra.qr -LinearAlgebra.qrfact! +LinearAlgebra.qr! LinearAlgebra.QR LinearAlgebra.QRCompactWY LinearAlgebra.QRPivoted diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index d6a7d8c0383fd..1c3b9d7149d04 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -125,7 +125,7 @@ export ordschur, pinv, qr, - qrfact!, + qr!, lq, lq!, rank, diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl index a3e58e36cc94f..8581dafba8be3 100644 --- a/stdlib/LinearAlgebra/src/deprecated.jl +++ b/stdlib/LinearAlgebra/src/deprecated.jl @@ -1308,3 +1308,12 @@ export schurfact! # deprecate lqfact! to lq! export lqfact! @deprecate(lqfact!(A::StridedMatrix{<:BlasFloat}), lq!(A)) + +# deprecate qrfact! to qr! +export qrfact! +@deprecate(qrfact!(A::StridedMatrix{<:BlasFloat}, ::Val{false}), qr!(A, Val(false))) +@deprecate(qrfact!(A::StridedMatrix{<:BlasFloat}, ::Val{true}), qr!(A, Val(true))) +@deprecate(qrfact!(A::StridedMatrix{<:BlasFloat}), qr!(A)) +@deprecate(qrfact!(A::StridedMatrix, ::Val{false}), qr!(A, Val(false))) +@deprecate(qrfact!(A::StridedMatrix, ::Val{true}), qr!(A, Val(true))) +@deprecate(qrfact!(A::StridedMatrix), qr!(A)) diff --git a/stdlib/LinearAlgebra/src/qr.jl b/stdlib/LinearAlgebra/src/qr.jl index 9374a3fa594c4..90e5ae7d21c91 100644 --- a/stdlib/LinearAlgebra/src/qr.jl +++ b/stdlib/LinearAlgebra/src/qr.jl @@ -240,16 +240,16 @@ function qrfactPivotedUnblocked!(A::StridedMatrix) end # LAPACK version -qrfact!(A::StridedMatrix{<:BlasFloat}, ::Val{false}) = QRCompactWY(LAPACK.geqrt!(A, min(min(size(A)...), 36))...) -qrfact!(A::StridedMatrix{<:BlasFloat}, ::Val{true}) = QRPivoted(LAPACK.geqp3!(A)...) -qrfact!(A::StridedMatrix{<:BlasFloat}) = qrfact!(A, Val(false)) +qr!(A::StridedMatrix{<:BlasFloat}, ::Val{false}) = QRCompactWY(LAPACK.geqrt!(A, min(min(size(A)...), 36))...) +qr!(A::StridedMatrix{<:BlasFloat}, ::Val{true}) = QRPivoted(LAPACK.geqp3!(A)...) +qr!(A::StridedMatrix{<:BlasFloat}) = qr!(A, Val(false)) # Generic fallbacks """ - qrfact!(A, pivot=Val(false)) + qr!(A, pivot=Val(false)) -`qrfact!` is the same as [`qr`](@ref) when `A` is a subtype of +`qr!` is the same as [`qr`](@ref) when `A` is a subtype of `StridedMatrix`, but saves space by overwriting the input `A`, instead of creating a copy. An [`InexactError`](@ref) exception is thrown if the factorization produces a number not representable by the element type of `A`, e.g. for integer types. @@ -261,7 +261,7 @@ julia> a = [1. 2.; 3. 4.] 1.0 2.0 3.0 4.0 -julia> qrfact!(a) +julia> qr!(a) LinearAlgebra.QRCompactWY{Float64,Array{Float64,2}} Q factor: 2×2 LinearAlgebra.QRCompactWYQ{Float64,Array{Float64,2}}: @@ -277,15 +277,15 @@ julia> a = [1 2; 3 4] 1 2 3 4 -julia> qrfact!(a) +julia> qr!(a) ERROR: InexactError: Int64(Int64, -3.1622776601683795) Stacktrace: [...] ``` """ -qrfact!(A::StridedMatrix, ::Val{false}) = qrfactUnblocked!(A) -qrfact!(A::StridedMatrix, ::Val{true}) = qrfactPivotedUnblocked!(A) -qrfact!(A::StridedMatrix) = qrfact!(A, Val(false)) +qr!(A::StridedMatrix, ::Val{false}) = qrfactUnblocked!(A) +qr!(A::StridedMatrix, ::Val{true}) = qrfactPivotedUnblocked!(A) +qr!(A::StridedMatrix) = qr!(A, Val(false)) _qreltype(::Type{T}) where T = typeof(zero(T)/sqrt(abs2(one(T)))) @@ -358,12 +358,12 @@ true function qr(A::AbstractMatrix{T}, arg) where T AA = similar(A, _qreltype(T), size(A)) copyto!(AA, A) - return qrfact!(AA, arg) + return qr!(AA, arg) end function qr(A::AbstractMatrix{T}) where T AA = similar(A, _qreltype(T), size(A)) copyto!(AA, A) - return qrfact!(AA) + return qr!(AA) end qr(x::Number) = qr(fill(x,1,1)) qr(v::AbstractVector) = qr(reshape(v, (length(v), 1))) diff --git a/stdlib/LinearAlgebra/test/qr.jl b/stdlib/LinearAlgebra/test/qr.jl index ebfa56888671c..2f9f0d763cbc3 100644 --- a/stdlib/LinearAlgebra/test/qr.jl +++ b/stdlib/LinearAlgebra/test/qr.jl @@ -65,7 +65,7 @@ rectangularQ(Q::LinearAlgebra.AbstractQ) = convert(Array, Q) if eltya != Int @test Matrix{eltyb}(I, a_1, a_1)*q ≈ convert(AbstractMatrix{tab}, q) ac = copy(a) - @test qrfact!(a[:, 1:5])\b == qrfact!(view(ac, :, 1:5))\b + @test qr!(a[:, 1:5])\b == qr!(view(ac, :, 1:5))\b end qrstring = sprint((t, s) -> show(t, "text/plain", s), qra) rstring = sprint((t, s) -> show(t, "text/plain", s), r) From a659ee96032fde1f3c675a35de15dd3794b4a2f7 Mon Sep 17 00:00:00 2001 From: Sacha Verweij Date: Sun, 20 May 2018 17:31:12 -0700 Subject: [PATCH 10/23] Deprecate ldltfact! to ldlt!. --- NEWS.md | 5 +++-- stdlib/LinearAlgebra/docs/src/index.md | 2 +- stdlib/LinearAlgebra/src/LinearAlgebra.jl | 2 +- stdlib/LinearAlgebra/src/dense.jl | 2 +- stdlib/LinearAlgebra/src/deprecated.jl | 4 ++++ stdlib/LinearAlgebra/src/ldlt.jl | 8 ++++---- stdlib/SparseArrays/src/linalg.jl | 4 ++-- stdlib/SuiteSparse/src/cholmod.jl | 16 ++++++++-------- stdlib/SuiteSparse/src/deprecated.jl | 13 +++++++++++++ stdlib/SuiteSparse/test/cholmod.jl | 10 +++++----- 10 files changed, 42 insertions(+), 24 deletions(-) diff --git a/NEWS.md b/NEWS.md index c6af79d45e43c..503245fc34a41 100644 --- a/NEWS.md +++ b/NEWS.md @@ -703,8 +703,9 @@ Deprecated or removed * `lufact`, `schurfact`, `lqfact`, `qrfact`, and `ldltfact` have respectively been deprecated to `lu`, `schur`, `lq`, `qr`, and `ldlt` ([#27159]). - * `lufact!`, `schurfact!`, `lqfact!`, and `qrfact!` have - respectively been deprecated to `lu!`, `schur!`, `lq!`, and `qr!` ([#27159]). + * `lufact!`, `schurfact!`, `lqfact!`, `qrfact!`, + and `ldltfact!` have respectively been deprecated to `lu!`, + `schur!`, `lq!`, `qr!`, and `ldlt!` ([#27159]). * Indexing into multidimensional arrays with more than one index but fewer indices than there are dimensions is no longer permitted when those trailing dimensions have lengths greater than 1. diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md index 13ccb0f730297..0417e3f8979a0 100644 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -321,7 +321,7 @@ LinearAlgebra.lowrankdowndate LinearAlgebra.lowrankupdate! LinearAlgebra.lowrankdowndate! LinearAlgebra.ldlt -LinearAlgebra.ldltfact! +LinearAlgebra.ldlt! LinearAlgebra.qr LinearAlgebra.qr! LinearAlgebra.QR diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index 1c3b9d7149d04..f5b1ef4b066ca 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -102,7 +102,7 @@ export istriu, kron, ldiv!, - ldltfact!, + ldlt!, ldlt, linreg, logabsdet, diff --git a/stdlib/LinearAlgebra/src/dense.jl b/stdlib/LinearAlgebra/src/dense.jl index 4147b2a452be5..b5eb10e7ca975 100644 --- a/stdlib/LinearAlgebra/src/dense.jl +++ b/stdlib/LinearAlgebra/src/dense.jl @@ -1198,7 +1198,7 @@ function factorize(A::StridedMatrix{T}) where T if utri1 if (herm & (T <: Complex)) | sym try - return ldltfact!(SymTridiagonal(diag(A), diag(A, -1))) + return ldlt!(SymTridiagonal(diag(A), diag(A, -1))) end end return lu(Tridiagonal(diag(A, -1), diag(A), diag(A, 1))) diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl index 8581dafba8be3..30c601c343780 100644 --- a/stdlib/LinearAlgebra/src/deprecated.jl +++ b/stdlib/LinearAlgebra/src/deprecated.jl @@ -1317,3 +1317,7 @@ export qrfact! @deprecate(qrfact!(A::StridedMatrix, ::Val{false}), qr!(A, Val(false))) @deprecate(qrfact!(A::StridedMatrix, ::Val{true}), qr!(A, Val(true))) @deprecate(qrfact!(A::StridedMatrix), qr!(A)) + +# deprecate ldltfact! to ldlt! +export ldltfact! +@deprecate(ldltfact!(S::SymTridiagonal{T,V}) where {T<:Real,V}, ldlt!(S)) diff --git a/stdlib/LinearAlgebra/src/ldlt.jl b/stdlib/LinearAlgebra/src/ldlt.jl index 86a9ca0c1105c..c97e4910014e4 100644 --- a/stdlib/LinearAlgebra/src/ldlt.jl +++ b/stdlib/LinearAlgebra/src/ldlt.jl @@ -17,7 +17,7 @@ Factorization{T}(F::LDLt{S,U}) where {T,S,U} = LDLt{T,U}(F) # SymTridiagonal """ - ldltfact!(S::SymTridiagonal) -> LDLt + ldlt!(S::SymTridiagonal) -> LDLt Same as [`ldlt`](@ref), but saves space by overwriting the input `S`, instead of creating a copy. @@ -29,7 +29,7 @@ julia> S = SymTridiagonal([3., 4., 5.], [1., 2.]) 1.0 4.0 2.0 ⋅ 2.0 5.0 -julia> ldltS = ldltfact!(S); +julia> ldltS = ldlt!(S); julia> ldltS === S false @@ -41,7 +41,7 @@ julia> S ⋅ 0.545455 3.90909 ``` """ -function ldltfact!(S::SymTridiagonal{T,V}) where {T<:Real,V} +function ldlt!(S::SymTridiagonal{T,V}) where {T<:Real,V} n = size(S,1) d = S.dv e = S.ev @@ -86,7 +86,7 @@ julia> S \\ b """ function ldlt(M::SymTridiagonal{T}) where T S = typeof(zero(T)/one(T)) - return S == T ? ldltfact!(copy(M)) : ldltfact!(SymTridiagonal{S}(M)) + return S == T ? ldlt!(copy(M)) : ldlt!(SymTridiagonal{S}(M)) end factorize(S::SymTridiagonal) = ldlt(S) diff --git a/stdlib/SparseArrays/src/linalg.jl b/stdlib/SparseArrays/src/linalg.jl index fbd3e5b4ce3eb..1a956b7378683 100644 --- a/stdlib/SparseArrays/src/linalg.jl +++ b/stdlib/SparseArrays/src/linalg.jl @@ -994,7 +994,7 @@ end # if LinearAlgebra.issuccess(F) # return F # else -# ldltfact!(F, A) +# ldlt!(F, A) # return F # end # end @@ -1003,7 +1003,7 @@ function factorize(A::LinearAlgebra.RealHermSymComplexHerm{Float64,<:SparseMatri if LinearAlgebra.issuccess(F) return F else - ldltfact!(F, A) + ldlt!(F, A) return F end end diff --git a/stdlib/SuiteSparse/src/cholmod.jl b/stdlib/SuiteSparse/src/cholmod.jl index 6533034ef5e9f..12eff4884e3b0 100644 --- a/stdlib/SuiteSparse/src/cholmod.jl +++ b/stdlib/SuiteSparse/src/cholmod.jl @@ -8,7 +8,7 @@ import Base: (*), convert, copy, eltype, getindex, getproperty, show, size, using LinearAlgebra import LinearAlgebra: (\), cholfact, cholfact!, det, diag, ishermitian, isposdef, - issuccess, issymmetric, ldlt, ldltfact!, logdet + issuccess, issymmetric, ldlt, ldlt!, logdet using SparseArrays import Libdl @@ -1456,7 +1456,7 @@ cholfact(A::Union{SparseMatrixCSC{T}, SparseMatrixCSC{Complex{T}}, kws...) where {T<:Real} = cholfact(Sparse(A); kws...) -function ldltfact!(F::Factor{Tv}, A::Sparse{Tv}; shift::Real=0.0) where Tv +function ldlt!(F::Factor{Tv}, A::Sparse{Tv}; shift::Real=0.0) where Tv cm = defaults(common_struct) set_print_level(cm, 0) @@ -1470,7 +1470,7 @@ function ldltfact!(F::Factor{Tv}, A::Sparse{Tv}; shift::Real=0.0) where Tv end """ - ldltfact!(F::Factor, A; shift = 0.0) -> CHOLMOD.Factor + ldlt!(F::Factor, A; shift = 0.0) -> CHOLMOD.Factor Compute the ``LDL'`` factorization of `A`, reusing the symbolic factorization `F`. `A` must be a [`SparseMatrixCSC`](@ref) or a [`Symmetric`](@ref)/[`Hermitian`](@ref) @@ -1485,13 +1485,13 @@ See also [`ldlt`](@ref). be converted to `SparseMatrixCSC{Float64}` or `SparseMatrixCSC{ComplexF64}` as appropriate. """ -ldltfact!(F::Factor, A::Union{SparseMatrixCSC{T}, +ldlt!(F::Factor, A::Union{SparseMatrixCSC{T}, SparseMatrixCSC{Complex{T}}, Symmetric{T,SparseMatrixCSC{T,SuiteSparse_long}}, Hermitian{Complex{T},SparseMatrixCSC{Complex{T},SuiteSparse_long}}, Hermitian{T,SparseMatrixCSC{T,SuiteSparse_long}}}; shift = 0.0) where {T<:Real} = - ldltfact!(F, Sparse(A), shift = shift) + ldlt!(F, Sparse(A), shift = shift) function ldlt(A::Sparse; shift::Real=0.0, perm::AbstractVector{SuiteSparse_long}=SuiteSparse_long[]) @@ -1508,7 +1508,7 @@ function ldlt(A::Sparse; shift::Real=0.0, F = fact_(A, cm; perm = perm) # Compute the numerical factorization - ldltfact!(F, A; shift = shift) + ldlt!(F, A; shift = shift) return F end @@ -1716,7 +1716,7 @@ function \(A::RealHermSymComplexHermF64SSL, B::StridedVecOrMat) if issuccess(F) return \(F, B) else - ldltfact!(F, A) + ldlt!(F, A) if issuccess(F) return \(F, B) else @@ -1730,7 +1730,7 @@ function \(adjA::Adjoint{<:Any,<:RealHermSymComplexHermF64SSL}, B::StridedVecOrM if issuccess(F) return \(adjoint(F), B) else - ldltfact!(F, A) + ldlt!(F, A) if issuccess(F) return \(adjoint(F), B) else diff --git a/stdlib/SuiteSparse/src/deprecated.jl b/stdlib/SuiteSparse/src/deprecated.jl index 889e7d4bc391a..a0aa42f233405 100644 --- a/stdlib/SuiteSparse/src/deprecated.jl +++ b/stdlib/SuiteSparse/src/deprecated.jl @@ -74,3 +74,16 @@ end kws...) where {T<:Real}, ldlt(A; kws...)) end + +# deprecate ldltfact! to ldlt! +@eval SuiteSparse.CHOLMOD begin + import LinearAlgebra: ldltfact! + @deprecate(ldltfact!(F::Factor{Tv}, A::Sparse{Tv}; shift::Real=0.0) where Tv, ldlt!(F, A; shift=shift)) + @deprecate(ldltfact!(F::Factor, A::Union{SparseMatrixCSC{T}, + SparseMatrixCSC{Complex{T}}, + Symmetric{T,SparseMatrixCSC{T,SuiteSparse_long}}, + Hermitian{Complex{T},SparseMatrixCSC{Complex{T},SuiteSparse_long}}, + Hermitian{T,SparseMatrixCSC{T,SuiteSparse_long}}}; + shift = 0.0) where {T<:Real}, + ldlt!(F, A; shift=shift)) +end diff --git a/stdlib/SuiteSparse/test/cholmod.jl b/stdlib/SuiteSparse/test/cholmod.jl index 47c9dd2f792f5..53681315f6608 100644 --- a/stdlib/SuiteSparse/test/cholmod.jl +++ b/stdlib/SuiteSparse/test/cholmod.jl @@ -434,7 +434,7 @@ end @test F1 == F2 end - ### cholfact!/ldltfact! + ### cholfact!/ldlt! F = cholfact(A1pd) CHOLMOD.change_factor!(elty, false, false, true, true, F) @test unsafe_load(pointer(F)).is_ll == 0 @@ -450,11 +450,11 @@ end F = ldlt(A1pd) @test isa(CHOLMOD.Sparse(F), CHOLMOD.Sparse{elty}) - @test CHOLMOD.Sparse(ldltfact!(copy(F), A1pd)) ≈ CHOLMOD.Sparse(F) # surprisingly, this can cause small ulp size changes so we cannot test exact equality + @test CHOLMOD.Sparse(ldlt!(copy(F), A1pd)) ≈ CHOLMOD.Sparse(F) # surprisingly, this can cause small ulp size changes so we cannot test exact equality F = ldlt(A1pdSparse, shift=2) @test isa(CHOLMOD.Sparse(F), CHOLMOD.Sparse{elty}) - @test CHOLMOD.Sparse(ldltfact!(copy(F), A1pd, shift=2.0)) ≈ CHOLMOD.Sparse(F) # surprisingly, this can cause small ulp size changes so we cannot test exact equality + @test CHOLMOD.Sparse(ldlt!(copy(F), A1pd, shift=2.0)) ≈ CHOLMOD.Sparse(F) # surprisingly, this can cause small ulp size changes so we cannot test exact equality @test isa(CHOLMOD.factor_to_sparse!(F), CHOLMOD.Sparse) @test_throws CHOLMOD.CHOLMODException CHOLMOD.factor_to_sparse!(F) @@ -701,7 +701,7 @@ end @test F\b ≈ fill(1., m+n) F2 = cholfact(M) @test !LinearAlgebra.issuccess(F2) - ldltfact!(F2, M) + ldlt!(F2, M) @test LinearAlgebra.issuccess(F2) @test F2\b ≈ fill(1., m+n) end @@ -809,7 +809,7 @@ end A[3, 3] = -1 F = cholfact(A) @test !LinearAlgebra.issuccess(F) - @test LinearAlgebra.issuccess(ldltfact!(F, A)) + @test LinearAlgebra.issuccess(ldlt!(F, A)) A[3, 3] = 1 @test A[:, 3:-1:1]\fill(1., 3) == [1, 1, 1] end From 1084fb4fd803cbc27ea822dba1ecee071ebcb4bc Mon Sep 17 00:00:00 2001 From: Sacha Verweij Date: Sun, 20 May 2018 17:51:10 -0700 Subject: [PATCH 11/23] Deprecate svdfact! to svd!. --- NEWS.md | 6 +++--- stdlib/LinearAlgebra/docs/src/index.md | 2 +- stdlib/LinearAlgebra/src/LinearAlgebra.jl | 2 +- stdlib/LinearAlgebra/src/bidiag.jl | 8 +++---- stdlib/LinearAlgebra/src/deprecated.jl | 9 +++++++- stdlib/LinearAlgebra/src/svd.jl | 26 +++++++++++------------ stdlib/LinearAlgebra/src/triangular.jl | 2 +- stdlib/LinearAlgebra/test/bidiag.jl | 2 +- stdlib/LinearAlgebra/test/svd.jl | 2 +- stdlib/LinearAlgebra/test/triangular.jl | 2 +- 10 files changed, 34 insertions(+), 27 deletions(-) diff --git a/NEWS.md b/NEWS.md index 503245fc34a41..b29e9761dadbc 100644 --- a/NEWS.md +++ b/NEWS.md @@ -703,9 +703,9 @@ Deprecated or removed * `lufact`, `schurfact`, `lqfact`, `qrfact`, and `ldltfact` have respectively been deprecated to `lu`, `schur`, `lq`, `qr`, and `ldlt` ([#27159]). - * `lufact!`, `schurfact!`, `lqfact!`, `qrfact!`, - and `ldltfact!` have respectively been deprecated to `lu!`, - `schur!`, `lq!`, `qr!`, and `ldlt!` ([#27159]). + * `lufact!`, `schurfact!`, `lqfact!`, `qrfact!`, `ldltfact!`, and `svdfact!` + have respectively been deprecated to `lu!`, `schur!`, `lq!`, `qr!`, `ldlt!`, + and `svd!` ([#27159]). * Indexing into multidimensional arrays with more than one index but fewer indices than there are dimensions is no longer permitted when those trailing dimensions have lengths greater than 1. diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md index 0417e3f8979a0..f00b40c47184b 100644 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -346,7 +346,7 @@ LinearAlgebra.schur LinearAlgebra.ordschur LinearAlgebra.ordschur! LinearAlgebra.svdfact -LinearAlgebra.svdfact! +LinearAlgebra.svd! LinearAlgebra.svd LinearAlgebra.svdvals LinearAlgebra.svdvals! diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index f5b1ef4b066ca..5e98c00c59ecc 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -133,7 +133,7 @@ export schur, schur!, svd, - svdfact!, + svd!, svdfact, svdvals!, svdvals, diff --git a/stdlib/LinearAlgebra/src/bidiag.jl b/stdlib/LinearAlgebra/src/bidiag.jl index 9d2d617b78023..27bc6c0165ce1 100644 --- a/stdlib/LinearAlgebra/src/bidiag.jl +++ b/stdlib/LinearAlgebra/src/bidiag.jl @@ -188,12 +188,12 @@ similar(B::Bidiagonal, ::Type{T}) where {T} = Bidiagonal(similar(B.dv, T), simil #Singular values svdvals!(M::Bidiagonal{<:BlasReal}) = LAPACK.bdsdc!(M.uplo, 'N', M.dv, M.ev)[1] -function svdfact!(M::Bidiagonal{<:BlasReal}; full::Bool = false, thin::Union{Bool,Nothing} = nothing) +function svd!(M::Bidiagonal{<:BlasReal}; full::Bool = false, thin::Union{Bool,Nothing} = nothing) # DEPRECATION TODO: remove deprecated thin argument and associated logic after 0.7 if thin != nothing - Base.depwarn(string("the `thin` keyword argument in `svdfact!(A; thin = $(thin))` has ", + Base.depwarn(string("the `thin` keyword argument in `svd!(A; thin = $(thin))` has ", "been deprecated in favor of `full`, which has the opposite meaning, ", - "e.g. `svdfact!(A; full = $(!thin))`."), :svdfact!) + "e.g. `svd!(A; full = $(!thin))`."), :svd!) full::Bool = !thin end d, e, U, Vt, Q, iQ = LAPACK.bdsdc!(M.uplo, 'I', M.dv, M.ev) @@ -207,7 +207,7 @@ function svdfact(M::Bidiagonal; full::Bool = false, thin::Union{Bool,Nothing} = "e.g. `svdfact(A; full = $(!thin))`."), :svdfact) full::Bool = !thin end - return svdfact!(copy(M), full = full) + return svd!(copy(M), full = full) end #################### diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl index 30c601c343780..2f9ca88bd9f0e 100644 --- a/stdlib/LinearAlgebra/src/deprecated.jl +++ b/stdlib/LinearAlgebra/src/deprecated.jl @@ -1,4 +1,4 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license + # This file is a part of Julia. License is MIT: https://julialang.org/license using Base: @deprecate, depwarn @@ -1321,3 +1321,10 @@ export qrfact! # deprecate ldltfact! to ldlt! export ldltfact! @deprecate(ldltfact!(S::SymTridiagonal{T,V}) where {T<:Real,V}, ldlt!(S)) + +# deprecate svdfact! to svd! +export svdfact! +@deprecate(svdfact!(M::Bidiagonal{<:BlasReal}; full::Bool = false, thin::Union{Bool,Nothing} = nothing), svd!(M; full=full, thin=thin)) +@deprecate(svdfact!(A::StridedMatrix{T}; full::Bool = false, thin::Union{Bool,Nothing} = nothing) where T<:BlasFloat, svd!(A; full=full, thin=thin)) +@deprecate(svdfact!(A::StridedMatrix{T}, B::StridedMatrix{T}) where T<:BlasFloat, svd!(A, B)) +@deprecate(svdfact!(A::AbstractTriangular), svd!(A)) diff --git a/stdlib/LinearAlgebra/src/svd.jl b/stdlib/LinearAlgebra/src/svd.jl index 0a2070633b5c3..5c85faa058ade 100644 --- a/stdlib/LinearAlgebra/src/svd.jl +++ b/stdlib/LinearAlgebra/src/svd.jl @@ -11,9 +11,9 @@ end SVD(U::AbstractArray{T}, S::Vector{Tr}, Vt::AbstractArray{T}) where {T,Tr} = SVD{T,Tr,typeof(U)}(U, S, Vt) """ - svdfact!(A; full::Bool = false) -> SVD + svd!(A; full::Bool = false) -> SVD -`svdfact!` is the same as [`svdfact`](@ref), but saves space by +`svd!` is the same as [`svdfact`](@ref), but saves space by overwriting the input `A`, instead of creating a copy. # Examples @@ -25,7 +25,7 @@ julia> A = [1. 0. 0. 0. 2.; 0. 0. 3. 0. 0.; 0. 0. 0. 0. 0.; 0. 2. 0. 0. 0.] 0.0 0.0 0.0 0.0 0.0 0.0 2.0 0.0 0.0 0.0 -julia> F = svdfact!(A); +julia> F = svd!(A); julia> F.U * Diagonal(F.S) * F.Vt 4×5 Array{Float64,2}: @@ -42,12 +42,12 @@ julia> A 0.0 0.0 -2.0 0.0 0.0 ``` """ -function svdfact!(A::StridedMatrix{T}; full::Bool = false, thin::Union{Bool,Nothing} = nothing) where T<:BlasFloat +function svd!(A::StridedMatrix{T}; full::Bool = false, thin::Union{Bool,Nothing} = nothing) where T<:BlasFloat # DEPRECATION TODO: remove deprecated thin argument and associated logic after 0.7 if thin != nothing - Base.depwarn(string("the `thin` keyword argument in `svdfact!(A; thin = $(thin))` has ", + Base.depwarn(string("the `thin` keyword argument in `svd!(A; thin = $(thin))` has ", "been deprecated in favor of `full`, which has the opposite meaning, ", - "e.g. `svdfact!(A; full = $(!thin))`."), :svdfact!) + "e.g. `svd!(A; full = $(!thin))`."), :svd!) full::Bool = !thin end m,n = size(A) @@ -102,7 +102,7 @@ function svdfact(A::StridedVecOrMat{T}; full::Bool = false, thin::Union{Bool,Not "e.g. `svdfact(A; full = $(!thin))`."), :svdfact) full::Bool = !thin end - svdfact!(copy_oftype(A, eigtype(T)), full = full) + svd!(copy_oftype(A, eigtype(T)), full = full) end function svdfact(x::Number; full::Bool = false, thin::Union{Bool,Nothing} = nothing) # DEPRECATION TODO: remove deprecated thin argument and associated logic after 0.7 @@ -279,9 +279,9 @@ function GeneralizedSVD(U::AbstractMatrix{T}, V::AbstractMatrix{T}, Q::AbstractM end """ - svdfact!(A, B) -> GeneralizedSVD + svd!(A, B) -> GeneralizedSVD -`svdfact!` is the same as [`svdfact`](@ref), but modifies the arguments +`svd!` is the same as [`svdfact`](@ref), but modifies the arguments `A` and `B` in-place, instead of making copies. # Examples @@ -296,7 +296,7 @@ julia> B = [0. 1.; 1. 0.] 0.0 1.0 1.0 0.0 -julia> F = svdfact!(A, B); +julia> F = svd!(A, B); julia> F.U*F.D1*F.R0*F.Q' 2×2 Array{Float64,2}: @@ -319,7 +319,7 @@ julia> B 0.0 -1.0 ``` """ -function svdfact!(A::StridedMatrix{T}, B::StridedMatrix{T}) where T<:BlasFloat +function svd!(A::StridedMatrix{T}, B::StridedMatrix{T}) where T<:BlasFloat # xggsvd3 replaced xggsvd in LAPACK 3.6.0 if LAPACK.version() < v"3.6.0" U, V, Q, a, b, k, l, R = LAPACK.ggsvd!('U', 'V', 'Q', A, B) @@ -328,7 +328,7 @@ function svdfact!(A::StridedMatrix{T}, B::StridedMatrix{T}) where T<:BlasFloat end GeneralizedSVD(U, V, Q, a, b, Int(k), Int(l), R) end -svdfact(A::StridedMatrix{T}, B::StridedMatrix{T}) where {T<:BlasFloat} = svdfact!(copy(A),copy(B)) +svdfact(A::StridedMatrix{T}, B::StridedMatrix{T}) where {T<:BlasFloat} = svd!(copy(A),copy(B)) """ svdfact(A, B) -> GeneralizedSVD @@ -381,7 +381,7 @@ julia> F.V*F.D2*F.R0*F.Q' """ function svdfact(A::StridedMatrix{TA}, B::StridedMatrix{TB}) where {TA,TB} S = promote_type(eigtype(TA),TB) - return svdfact!(copy_oftype(A, S), copy_oftype(B, S)) + return svd!(copy_oftype(A, S), copy_oftype(B, S)) end # This method can be heavily optimized but it is probably not critical # and might introduce bugs or inconsistencies relative to the 1x1 matrix diff --git a/stdlib/LinearAlgebra/src/triangular.jl b/stdlib/LinearAlgebra/src/triangular.jl index c6ef222cacc7b..bb1ce1b4968e8 100644 --- a/stdlib/LinearAlgebra/src/triangular.jl +++ b/stdlib/LinearAlgebra/src/triangular.jl @@ -2435,7 +2435,7 @@ end eigfact(A::AbstractTriangular) = Eigen(eigvals(A), eigvecs(A)) # Generic singular systems -for func in (:svd, :svdfact, :svdfact!, :svdvals) +for func in (:svd, :svdfact, :svd!, :svdvals) @eval begin ($func)(A::AbstractTriangular) = ($func)(copyto!(similar(parent(A)), A)) end diff --git a/stdlib/LinearAlgebra/test/bidiag.jl b/stdlib/LinearAlgebra/test/bidiag.jl index a096ffd43c27f..39c8d4cf31258 100644 --- a/stdlib/LinearAlgebra/test/bidiag.jl +++ b/stdlib/LinearAlgebra/test/bidiag.jl @@ -242,7 +242,7 @@ srand(1) @testset "Singular systems" begin if (elty <: BlasReal) - @test AbstractArray(svdfact(T)) ≈ AbstractArray(svdfact!(copy(Tfull))) + @test AbstractArray(svdfact(T)) ≈ AbstractArray(svd!(copy(Tfull))) @test svdvals(Tfull) ≈ svdvals(T) u1, d1, v1 = svd(Tfull) u2, d2, v2 = svd(T) diff --git a/stdlib/LinearAlgebra/test/svd.jl b/stdlib/LinearAlgebra/test/svd.jl index 3af0a4f2e9e04..1210ad5a31b44 100644 --- a/stdlib/LinearAlgebra/test/svd.jl +++ b/stdlib/LinearAlgebra/test/svd.jl @@ -63,7 +63,7 @@ a2img = randn(n,n)/2 @test usv\b ≈ a\b if eltya <: BlasFloat - svdz = svdfact!(Matrix{eltya}(undef,0,0)) + svdz = svd!(Matrix{eltya}(undef,0,0)) @test svdz.U ≈ Matrix{eltya}(I, 0, 0) @test svdz.S ≈ real(zeros(eltya,0)) @test svdz.Vt ≈ Matrix{eltya}(I, 0, 0) diff --git a/stdlib/LinearAlgebra/test/triangular.jl b/stdlib/LinearAlgebra/test/triangular.jl index 884d94418d1c0..e7a87b30e3bcb 100644 --- a/stdlib/LinearAlgebra/test/triangular.jl +++ b/stdlib/LinearAlgebra/test/triangular.jl @@ -266,7 +266,7 @@ for elty1 in (Float32, Float64, BigFloat, ComplexF32, ComplexF64, Complex{BigFlo if !(elty1 in (BigFloat, Complex{BigFloat})) # Not implemented yet svd(A1) svdfact(A1) - elty1 <: BlasFloat && svdfact!(copy(A1)) + elty1 <: BlasFloat && svd!(copy(A1)) svdvals(A1) end From 0eda8e4eb79b8788280dfd967d1278e4a18fa6ea Mon Sep 17 00:00:00 2001 From: Sacha Verweij Date: Mon, 21 May 2018 10:51:46 -0700 Subject: [PATCH 12/23] Deprecate svdfact to svd. --- NEWS.md | 7 +- doc/src/manual/parallel-computing.md | 4 +- doc/src/manual/performance-tips.md | 4 +- stdlib/Distributed/test/distributed_exec.jl | 4 +- stdlib/IterativeEigensolvers/test/runtests.jl | 16 +- stdlib/LinearAlgebra/docs/src/index.md | 3 +- stdlib/LinearAlgebra/src/LinearAlgebra.jl | 1 - stdlib/LinearAlgebra/src/bidiag.jl | 6 +- stdlib/LinearAlgebra/src/dense.jl | 4 +- stdlib/LinearAlgebra/src/deprecated.jl | 12 ++ stdlib/LinearAlgebra/src/diagonal.jl | 6 +- stdlib/LinearAlgebra/src/svd.jl | 179 ++++++------------ stdlib/LinearAlgebra/src/triangular.jl | 2 +- stdlib/LinearAlgebra/test/bidiag.jl | 4 +- stdlib/LinearAlgebra/test/diagonal.jl | 2 +- stdlib/LinearAlgebra/test/svd.jl | 22 +-- stdlib/LinearAlgebra/test/triangular.jl | 1 - test/bitarray.jl | 3 +- 18 files changed, 114 insertions(+), 166 deletions(-) diff --git a/NEWS.md b/NEWS.md index b29e9761dadbc..8a36a8208bbc8 100644 --- a/NEWS.md +++ b/NEWS.md @@ -239,6 +239,9 @@ This section lists changes that do not have deprecation warnings. * `qr` methods now return decomposition objects such as `QR`, `QRPivoted`, and `QRCompactWY` rather than tuples of arrays ([#27159]). + * `svd` methods now return decomposition objects such as `SVD` and + `GeneralizedSVD` rather than tuples of arrays or tuples of numbers ([#27159]). + * `countlines` now always counts the last non-empty line even if it does not end with EOL, matching the behavior of `eachline` and `readlines` ([#25845]). @@ -700,8 +703,8 @@ Deprecated or removed * The keyword `immutable` is fully deprecated to `struct`, and `type` is fully deprecated to `mutable struct` ([#19157], [#20418]). - * `lufact`, `schurfact`, `lqfact`, `qrfact`, and `ldltfact` have respectively - been deprecated to `lu`, `schur`, `lq`, `qr`, and `ldlt` ([#27159]). + * `lufact`, `schurfact`, `lqfact`, `qrfact`, `ldltfact`, and `svdfact` have + respectively been deprecated to `lu`, `schur`, `lq`, `qr`, `ldlt`, and `svd` ([#27159]). * `lufact!`, `schurfact!`, `lqfact!`, `qrfact!`, `ldltfact!`, and `svdfact!` have respectively been deprecated to `lu!`, `schur!`, `lq!`, `qr!`, `ldlt!`, diff --git a/doc/src/manual/parallel-computing.md b/doc/src/manual/parallel-computing.md index 76bb41af49448..bcfb77374d884 100644 --- a/doc/src/manual/parallel-computing.md +++ b/doc/src/manual/parallel-computing.md @@ -457,7 +457,7 @@ we could compute the singular values of several large random matrices in paralle ```julia-repl julia> M = Matrix{Float64}[rand(1000,1000) for i = 1:10]; -julia> pmap(svd, M); +julia> pmap(svdvals, M); ``` Julia's [`pmap`](@ref) is designed for the case where each function call does a large amount @@ -486,7 +486,7 @@ As an example, consider computing the singular values of matrices of different s ```julia-repl julia> M = Matrix{Float64}[rand(800,800), rand(600,600), rand(800,800), rand(600,600)]; -julia> pmap(svd, M); +julia> pmap(svdvals, M); ``` If one process handles both 800×800 matrices and another handles both 600×600 matrices, we will diff --git a/doc/src/manual/performance-tips.md b/doc/src/manual/performance-tips.md index ff8ecd570ef48..1ecac2d6baade 100644 --- a/doc/src/manual/performance-tips.md +++ b/doc/src/manual/performance-tips.md @@ -544,7 +544,7 @@ function norm(A) if isa(A, Vector) return sqrt(real(dot(A,A))) elseif isa(A, Matrix) - return maximum(svd(A)[2]) + return maximum(svdvals(A)) else error("norm: invalid argument") end @@ -555,7 +555,7 @@ This can be written more concisely and efficiently as: ```julia norm(x::Vector) = sqrt(real(dot(x,x))) -norm(A::Matrix) = maximum(svd(A)[2]) +norm(A::Matrix) = maximum(svdvals(A)) ``` ## Write "type-stable" functions diff --git a/stdlib/Distributed/test/distributed_exec.jl b/stdlib/Distributed/test/distributed_exec.jl index f073bc2ff9e74..9fb9a5e45fa63 100644 --- a/stdlib/Distributed/test/distributed_exec.jl +++ b/stdlib/Distributed/test/distributed_exec.jl @@ -576,8 +576,8 @@ end n = 10 as = [rand(4,4) for i in 1:n] bs = deepcopy(as) -cs = collect(Distributed.pgenerate(x->(sleep(rand()*0.1); svdfact(x)), bs)) -svdas = map(svdfact, as) +cs = collect(Distributed.pgenerate(x->(sleep(rand()*0.1); svd(x)), bs)) +svdas = map(svd, as) for i in 1:n @test cs[i].U ≈ svdas[i].U @test cs[i].S ≈ svdas[i].S diff --git a/stdlib/IterativeEigensolvers/test/runtests.jl b/stdlib/IterativeEigensolvers/test/runtests.jl index 348b7cff1f5d4..3df8626edcf44 100644 --- a/stdlib/IterativeEigensolvers/test/runtests.jl +++ b/stdlib/IterativeEigensolvers/test/runtests.jl @@ -189,16 +189,16 @@ end S2 = svd(Array(A)) ## singular values match: - @test S1[1].S ≈ S2[2][1:2] + @test S1[1].S ≈ S2.S[1:2] @testset "singular vectors" begin ## 1st left singular vector s1_left = sign(S1[1].U[3,1]) * S1[1].U[:,1] - s2_left = sign(S2[1][3,1]) * S2[1][:,1] + s2_left = sign(S2.U[3,1]) * S2.U[:,1] @test s1_left ≈ s2_left ## 1st right singular vector s1_right = sign(S1[1].V[3,1]) * S1[1].V[:,1] - s2_right = sign(S2[3][3,1]) * S2[3][:,1] + s2_right = sign(S2.V[3,1]) * S2.V[:,1] @test s1_right ≈ s2_right end # Issue number 10329 @@ -213,7 +213,7 @@ end end @testset "passing guess for Krylov vectors" begin S1 = svds(A, nsv = 2, v0=rand(eltype(A),size(A,2))) - @test S1[1].S ≈ S2[2][1:2] + @test S1[1].S ≈ S2.S[1:2] end @test_throws ArgumentError svds(A,nsv=0) @@ -251,21 +251,21 @@ end S2 = svd(Array(A)) ## singular values match: - @test S1[1].S ≈ S2[2][1:2] + @test S1[1].S ≈ S2.S[1:2] @testset "singular vectors" begin ## left singular vectors s1_left = abs.(S1[1].U[:,1:2]) - s2_left = abs.(S2[1][:,1:2]) + s2_left = abs.(S2.U[:,1:2]) @test s1_left ≈ s2_left ## right singular vectors s1_right = abs.(S1[1].V[:,1:2]) - s2_right = abs.(S2[3][:,1:2]) + s2_right = abs.(S2.V[:,1:2]) @test s1_right ≈ s2_right end @testset "passing guess for Krylov vectors" begin S1 = svds(A, nsv = 2, v0=rand(eltype(A),size(A,2))) - @test S1[1].S ≈ S2[2][1:2] + @test S1[1].S ≈ S2.S[1:2] end @test_throws ArgumentError svds(A,nsv=0) diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md index f00b40c47184b..5468447d95c4f 100644 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -345,9 +345,8 @@ LinearAlgebra.schur! LinearAlgebra.schur LinearAlgebra.ordschur LinearAlgebra.ordschur! -LinearAlgebra.svdfact -LinearAlgebra.svd! LinearAlgebra.svd +LinearAlgebra.svd! LinearAlgebra.svdvals LinearAlgebra.svdvals! LinearAlgebra.Givens diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index 5e98c00c59ecc..2a8c897bb5bb0 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -134,7 +134,6 @@ export schur!, svd, svd!, - svdfact, svdvals!, svdvals, sylvester, diff --git a/stdlib/LinearAlgebra/src/bidiag.jl b/stdlib/LinearAlgebra/src/bidiag.jl index 27bc6c0165ce1..4a1f4dc5aa8d1 100644 --- a/stdlib/LinearAlgebra/src/bidiag.jl +++ b/stdlib/LinearAlgebra/src/bidiag.jl @@ -199,12 +199,12 @@ function svd!(M::Bidiagonal{<:BlasReal}; full::Bool = false, thin::Union{Bool,No d, e, U, Vt, Q, iQ = LAPACK.bdsdc!(M.uplo, 'I', M.dv, M.ev) SVD(U, d, Vt) end -function svdfact(M::Bidiagonal; full::Bool = false, thin::Union{Bool,Nothing} = nothing) +function svd(M::Bidiagonal; full::Bool = false, thin::Union{Bool,Nothing} = nothing) # DEPRECATION TODO: remove deprecated thin argument and associated logic after 0.7 if thin != nothing - Base.depwarn(string("the `thin` keyword argument in `svdfact(A; thin = $(thin))` has ", + Base.depwarn(string("the `thin` keyword argument in `svd(A; thin = $(thin))` has ", "been deprecated in favor of `full`, which has the opposite meaning, ", - "e.g. `svdfact(A; full = $(!thin))`."), :svdfact) + "e.g. `svd(A; full = $(!thin))`."), :svd) full::Bool = !thin end return svd!(copy(M), full = full) diff --git a/stdlib/LinearAlgebra/src/dense.jl b/stdlib/LinearAlgebra/src/dense.jl index b5eb10e7ca975..4f54fa0b01552 100644 --- a/stdlib/LinearAlgebra/src/dense.jl +++ b/stdlib/LinearAlgebra/src/dense.jl @@ -1292,7 +1292,7 @@ function pinv(A::StridedMatrix{T}, tol::Real) where T return B end end - SVD = svdfact(A, full = false) + SVD = svd(A, full = false) Stype = eltype(SVD.S) Sinv = zeros(Stype, length(SVD.S)) index = SVD.S .> tol*maximum(SVD.S) @@ -1344,7 +1344,7 @@ julia> nullspace(M, 2) function nullspace(A::StridedMatrix, tol::Real = min(size(A)...)*eps(real(float(one(eltype(A)))))) m, n = size(A) (m == 0 || n == 0) && return Matrix{T}(I, n, n) - SVD = svdfact(A, full=true) + SVD = svd(A, full=true) indstart = sum(SVD.S .> SVD.S[1]*tol) + 1 return copy(SVD.Vt[indstart:end,:]') end diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl index 2f9ca88bd9f0e..b7efba272a73a 100644 --- a/stdlib/LinearAlgebra/src/deprecated.jl +++ b/stdlib/LinearAlgebra/src/deprecated.jl @@ -1328,3 +1328,15 @@ export svdfact! @deprecate(svdfact!(A::StridedMatrix{T}; full::Bool = false, thin::Union{Bool,Nothing} = nothing) where T<:BlasFloat, svd!(A; full=full, thin=thin)) @deprecate(svdfact!(A::StridedMatrix{T}, B::StridedMatrix{T}) where T<:BlasFloat, svd!(A, B)) @deprecate(svdfact!(A::AbstractTriangular), svd!(A)) + +# deprecate svdfact to svd +export svdfact +@deprecate(svdfact(D::Diagonal), svd(D)) +@deprecate(svdfact(A::StridedVecOrMat{T}; full::Bool = false, thin::Union{Bool,Nothing} = nothing) where T, svd(A; full=full, thin=thin)) +@deprecate(svdfact(x::Number; full::Bool = false, thin::Union{Bool,Nothing} = nothing), svd(x; full=full, thin=thin)) +@deprecate(svdfact(x::Integer; full::Bool = false, thin::Union{Bool,Nothing} = nothing), svd(x; full=full, thin=thin)) +@deprecate(svdfact(A::StridedMatrix{T}, B::StridedMatrix{T}) where {T<:BlasFloat}, svd(A, B)) +@deprecate(svdfact(A::StridedMatrix{TA}, B::StridedMatrix{TB}) where {TA,TB}, svd(A, B)) +@deprecate(svdfact(x::Number, y::Number), svd(x, y)) +@deprecate(svdfact(M::Bidiagonal; full::Bool = false, thin::Union{Bool,Nothing} = nothing), svd(M; full=full, thin=thin)) +@deprecate(svdfact(A::AbstractTriangular), svd(A)) diff --git a/stdlib/LinearAlgebra/src/diagonal.jl b/stdlib/LinearAlgebra/src/diagonal.jl index 95406701e3048..39e0c8451c119 100644 --- a/stdlib/LinearAlgebra/src/diagonal.jl +++ b/stdlib/LinearAlgebra/src/diagonal.jl @@ -462,11 +462,7 @@ function svd(D::Diagonal{<:Number}) Up = hcat([U[:,i] for i = 1:length(D.diag)][piv]...) V = Diagonal(fill!(similar(D.diag), one(eltype(D.diag)))) Vp = hcat([V[:,i] for i = 1:length(D.diag)][piv]...) - return (Up, S[piv], Vp) -end -function svdfact(D::Diagonal) - U, s, V = svd(D) - SVD(U, s, copy(V')) + return SVD(Up, S[piv], copy(Vp')) end # dismabiguation methods: * of Diagonal and Adj/Trans AbsVec diff --git a/stdlib/LinearAlgebra/src/svd.jl b/stdlib/LinearAlgebra/src/svd.jl index 5c85faa058ade..51b986a67c7d2 100644 --- a/stdlib/LinearAlgebra/src/svd.jl +++ b/stdlib/LinearAlgebra/src/svd.jl @@ -10,10 +10,24 @@ struct SVD{T,Tr,M<:AbstractArray} <: Factorization{T} end SVD(U::AbstractArray{T}, S::Vector{Tr}, Vt::AbstractArray{T}) where {T,Tr} = SVD{T,Tr,typeof(U)}(U, S, Vt) +# iteration for destructuring into components +Base.iterate(S::SVD) = (S.U, Val(:S)) +Base.iterate(S::SVD, ::Val{:S}) = (S.S, Val(:V)) +Base.iterate(S::SVD, ::Val{:V}) = (S.V, Val(:done)) +Base.iterate(S::SVD, ::Val{:done}) = nothing + +# # indexing for destructuring into components +@inline function Base.getindex(S::SVD, i::Integer) + i == 1 ? (return S.U) : + i == 2 ? (return S.S) : + i == 3 ? (return S.V) : + throw(BoundsError(S, i)) +end + """ svd!(A; full::Bool = false) -> SVD -`svd!` is the same as [`svdfact`](@ref), but saves space by +`svd!` is the same as [`svd`](@ref), but saves space by overwriting the input `A`, instead of creating a copy. # Examples @@ -60,7 +74,7 @@ function svd!(A::StridedMatrix{T}; full::Bool = false, thin::Union{Bool,Nothing} end """ - svdfact(A; full::Bool = false) -> SVD + svd(A; full::Bool = false) -> SVD Compute the singular value decomposition (SVD) of `A` and return an `SVD` object. @@ -69,6 +83,8 @@ Compute the singular value decomposition (SVD) of `A` and return an `SVD` object The algorithm produces `Vt` and hence `Vt` is more efficient to extract than `V`. The singular values in `S` are sorted in descending order. +Iterating the decomposition produces the components `U`, `S`, and `V`. + If `full = false` (default), a "thin" SVD is returned. For a ``M \\times N`` matrix `A`, in the full factorization `U` is `M \\times M` and `V` is `N \\times N`, while in the thin factorization `U` is `M @@ -84,7 +100,7 @@ julia> A = [1. 0. 0. 0. 2.; 0. 0. 3. 0. 0.; 0. 0. 0. 0. 0.; 0. 2. 0. 0. 0.] 0.0 0.0 0.0 0.0 0.0 0.0 2.0 0.0 0.0 0.0 -julia> F = svdfact(A); +julia> F = svd(A); julia> F.U * Diagonal(F.S) * F.Vt 4×5 Array{Float64,2}: @@ -94,84 +110,27 @@ julia> F.U * Diagonal(F.S) * F.Vt 0.0 2.0 0.0 0.0 0.0 ``` """ -function svdfact(A::StridedVecOrMat{T}; full::Bool = false, thin::Union{Bool,Nothing} = nothing) where T +function svd(A::StridedVecOrMat{T}; full::Bool = false, thin::Union{Bool,Nothing} = nothing) where T # DEPRECATION TODO: remove deprecated thin argument and associated logic after 0.7 if thin != nothing - Base.depwarn(string("the `thin` keyword argument in `svdfact(A; thin = $(thin))` has ", + Base.depwarn(string("the `thin` keyword argument in `svd(A; thin = $(thin))` has ", "been deprecated in favor of `full`, which has the opposite meaning, ", - "e.g. `svdfact(A; full = $(!thin))`."), :svdfact) + "e.g. `svd(A; full = $(!thin))`."), :svd) full::Bool = !thin end svd!(copy_oftype(A, eigtype(T)), full = full) end -function svdfact(x::Number; full::Bool = false, thin::Union{Bool,Nothing} = nothing) - # DEPRECATION TODO: remove deprecated thin argument and associated logic after 0.7 - if thin != nothing - Base.depwarn(string("the `thin` keyword argument in `svdfact(A; thin = $(thin))` has ", - "been deprecated in favor of `full`, which has the opposite meaning, ", - "e.g. `svdfact(A; full = $(!thin))`."), :svdfact) - full::Bool = !thin - end - return SVD(x == 0 ? fill(one(x), 1, 1) : fill(x/abs(x), 1, 1), [abs(x)], fill(one(x), 1, 1)) -end -function svdfact(x::Integer; full::Bool = false, thin::Union{Bool,Nothing} = nothing) - # DEPRECATION TODO: remove deprecated thin argument and associated logic after 0.7 - if thin != nothing - Base.depwarn(string("the `thin` keyword argument in `svdfact(A; thin = $(thin))` has ", - "been deprecated in favor of `full`, which has the opposite meaning, ", - "e.g. `svdfact(A; full = $(!thin))`."), :svdfact) - full::Bool = !thin - end - return svdfact(float(x), full = full) -end - -""" - svd(A; full::Bool = false) -> U, S, V - -Computes the SVD of `A`, returning `U`, vector `S`, and `V` such that -`A == U * Diagonal(S) * V'`. The singular values in `S` are sorted in descending order. - -If `full = false` (default), a "thin" SVD is returned. For a ``M -\\times N`` matrix `A`, in the full factorization `U` is `M \\times M` -and `V` is `N \\times N`, while in the thin factorization `U` is `M -\\times K` and `V` is `N \\times K`, where `K = \\min(M,N)` is the -number of singular values. - -`svd` is a wrapper around [`svdfact`](@ref), extracting all parts -of the `SVD` factorization to a tuple. Direct use of `svdfact` is therefore more -efficient. - -# Examples -```jldoctest -julia> A = [1. 0. 0. 0. 2.; 0. 0. 3. 0. 0.; 0. 0. 0. 0. 0.; 0. 2. 0. 0. 0.] -4×5 Array{Float64,2}: - 1.0 0.0 0.0 0.0 2.0 - 0.0 0.0 3.0 0.0 0.0 - 0.0 0.0 0.0 0.0 0.0 - 0.0 2.0 0.0 0.0 0.0 - -julia> U, S, V = svd(A); - -julia> U * Diagonal(S) * V' -4×5 Array{Float64,2}: - 1.0 0.0 0.0 0.0 2.0 - 0.0 0.0 3.0 0.0 0.0 - 0.0 0.0 0.0 0.0 0.0 - 0.0 2.0 0.0 0.0 0.0 -``` -""" -function svd(A::AbstractArray; full::Bool = false, thin::Union{Bool,Nothing} = nothing) +function svd(x::Number; full::Bool = false, thin::Union{Bool,Nothing} = nothing) # DEPRECATION TODO: remove deprecated thin argument and associated logic after 0.7 if thin != nothing Base.depwarn(string("the `thin` keyword argument in `svd(A; thin = $(thin))` has ", "been deprecated in favor of `full`, which has the opposite meaning, ", - "e.g `svd(A; full = $(!thin))`."), :svd) + "e.g. `svd(A; full = $(!thin))`."), :svd) full::Bool = !thin end - F = svdfact(A, full = full) - F.U, F.S, copy(F.Vt') + return SVD(x == 0 ? fill(one(x), 1, 1) : fill(x/abs(x), 1, 1), [abs(x)], fill(one(x), 1, 1)) end -function svd(x::Number; full::Bool = false, thin::Union{Bool,Nothing} = nothing) +function svd(x::Integer; full::Bool = false, thin::Union{Bool,Nothing} = nothing) # DEPRECATION TODO: remove deprecated thin argument and associated logic after 0.7 if thin != nothing Base.depwarn(string("the `thin` keyword argument in `svd(A; thin = $(thin))` has ", @@ -179,7 +138,7 @@ function svd(x::Number; full::Bool = false, thin::Union{Bool,Nothing} = nothing) "e.g. `svd(A; full = $(!thin))`."), :svd) full::Bool = !thin end - return first.(svd(fill(x, 1, 1))) + return svd(float(x), full = full) end function getproperty(F::SVD, d::Symbol) @@ -197,7 +156,7 @@ Base.propertynames(F::SVD, private::Bool=false) = svdvals!(A) Return the singular values of `A`, saving space by overwriting the input. -See also [`svdvals`](@ref) and [`svdfact`](@ref). +See also [`svdvals`](@ref) and [`svd`](@ref). # Examples ```jldoctest @@ -278,10 +237,30 @@ function GeneralizedSVD(U::AbstractMatrix{T}, V::AbstractMatrix{T}, Q::AbstractM GeneralizedSVD{T,typeof(U)}(U, V, Q, a, b, k, l, R) end +# iteration for destructuring into components +Base.iterate(S::GeneralizedSVD) = (S.U, Val(:V)) +Base.iterate(S::GeneralizedSVD, ::Val{:V}) = (S.V, Val(:Q)) +Base.iterate(S::GeneralizedSVD, ::Val{:Q}) = (S.Q, Val(:D1)) +Base.iterate(S::GeneralizedSVD, ::Val{:D1}) = (S.D1, Val(:D2)) +Base.iterate(S::GeneralizedSVD, ::Val{:D2}) = (S.D2, Val(:R0)) +Base.iterate(S::GeneralizedSVD, ::Val{:R0}) = (S.R0, Val(:done)) +Base.iterate(S::GeneralizedSVD, ::Val{:done}) = nothing + +# indexing for destructuring into components +@inline function Base.getindex(S::GeneralizedSVD, i::Integer) + i == 1 ? (return S.U) : + i == 2 ? (return S.V) : + i == 3 ? (return S.Q) : + i == 4 ? (return S.D1) : + i == 5 ? (return S.D2) : + i == 6 ? (return S.R0) : + throw(BoundsError(S, i)) +end + """ svd!(A, B) -> GeneralizedSVD -`svd!` is the same as [`svdfact`](@ref), but modifies the arguments +`svd!` is the same as [`svd`](@ref), but modifies the arguments `A` and `B` in-place, instead of making copies. # Examples @@ -328,10 +307,10 @@ function svd!(A::StridedMatrix{T}, B::StridedMatrix{T}) where T<:BlasFloat end GeneralizedSVD(U, V, Q, a, b, Int(k), Int(l), R) end -svdfact(A::StridedMatrix{T}, B::StridedMatrix{T}) where {T<:BlasFloat} = svd!(copy(A),copy(B)) +svd(A::StridedMatrix{T}, B::StridedMatrix{T}) where {T<:BlasFloat} = svd!(copy(A),copy(B)) """ - svdfact(A, B) -> GeneralizedSVD + svd(A, B) -> GeneralizedSVD Compute the generalized SVD of `A` and `B`, returning a `GeneralizedSVD` factorization object `F`, such that `A = F.U*F.D1*F.R0*F.Q'` and `B = F.V*F.D2*F.R0*F.Q'`. @@ -341,13 +320,15 @@ For an M-by-N matrix `A` and P-by-N matrix `B`, - `U` is a M-by-M orthogonal matrix, - `V` is a P-by-P orthogonal matrix, - `Q` is a N-by-N orthogonal matrix, -- `R0` is a (K+L)-by-N matrix whose rightmost (K+L)-by-(K+L) block is - nonsingular upper block triangular, - `D1` is a M-by-(K+L) diagonal matrix with 1s in the first K entries, - `D2` is a P-by-(K+L) matrix whose top right L-by-L block is diagonal, +- `R0` is a (K+L)-by-N matrix whose rightmost (K+L)-by-(K+L) block is + nonsingular upper block triangular, `K+L` is the effective numerical rank of the matrix `[A; B]`. +Iterating the decomposition produces the components `U`, `V`, `Q`, `D1`, `D2`, and `R0`. + The entries of `F.D1` and `F.D2` are related, as explained in the LAPACK documentation for the [generalized SVD](http://www.netlib.org/lapack/lug/node36.html) and the @@ -366,7 +347,7 @@ julia> B = [0. 1.; 1. 0.] 0.0 1.0 1.0 0.0 -julia> F = svdfact(A, B); +julia> F = svd(A, B); julia> F.U*F.D1*F.R0*F.Q' 2×2 Array{Float64,2}: @@ -379,54 +360,14 @@ julia> F.V*F.D2*F.R0*F.Q' 1.0 0.0 ``` """ -function svdfact(A::StridedMatrix{TA}, B::StridedMatrix{TB}) where {TA,TB} +function svd(A::StridedMatrix{TA}, B::StridedMatrix{TB}) where {TA,TB} S = promote_type(eigtype(TA),TB) return svd!(copy_oftype(A, S), copy_oftype(B, S)) end # This method can be heavily optimized but it is probably not critical # and might introduce bugs or inconsistencies relative to the 1x1 matrix # version -svdfact(x::Number, y::Number) = svdfact(fill(x, 1, 1), fill(y, 1, 1)) - -""" - svd(A, B) -> U, V, Q, D1, D2, R0 - -Wrapper around [`svdfact`](@ref) extracting all parts of the -factorization to a tuple. Direct use of -`svdfact` is therefore generally more efficient. The function returns the generalized SVD of -`A` and `B`, returning `U`, `V`, `Q`, `D1`, `D2`, and `R0` such that `A = U*D1*R0*Q'` and `B = -V*D2*R0*Q'`. - -# Examples -```jldoctest -julia> A = [1. 0.; 0. -1.] -2×2 Array{Float64,2}: - 1.0 0.0 - 0.0 -1.0 - -julia> B = [0. 1.; 1. 0.] -2×2 Array{Float64,2}: - 0.0 1.0 - 1.0 0.0 - -julia> U, V, Q, D1, D2, R0 = svd(A, B); - -julia> U*D1*R0*Q' -2×2 Array{Float64,2}: - 1.0 0.0 - 0.0 -1.0 - -julia> V*D2*R0*Q' -2×2 Array{Float64,2}: - 0.0 1.0 - 1.0 0.0 -``` -""" -function svd(A::AbstractMatrix, B::AbstractMatrix) - F = svdfact(A, B) - F.U, F.V, F.Q, F.D1, F.D2, F.R0 -end -svd(x::Number, y::Number) = first.(svd(fill(x, 1, 1), fill(y, 1, 1))) +svd(x::Number, y::Number) = svd(fill(x, 1, 1), fill(y, 1, 1)) @inline function getproperty(F::GeneralizedSVD{T}, d::Symbol) where T Fa = getfield(F, :a) @@ -476,7 +417,7 @@ Base.propertynames(F::GeneralizedSVD) = Return the generalized singular values from the generalized singular value decomposition of `A` and `B`, saving space by overwriting `A` and `B`. -See also [`svdfact`](@ref) and [`svdvals`](@ref). +See also [`svd`](@ref) and [`svdvals`](@ref). # Examples ```jldoctest @@ -521,7 +462,7 @@ svdvals(A::StridedMatrix{T},B::StridedMatrix{T}) where {T<:BlasFloat} = svdvals! svdvals(A, B) Return the generalized singular values from the generalized singular value -decomposition of `A` and `B`. See also [`svdfact`](@ref). +decomposition of `A` and `B`. See also [`svd`](@ref). # Examples ```jldoctest diff --git a/stdlib/LinearAlgebra/src/triangular.jl b/stdlib/LinearAlgebra/src/triangular.jl index bb1ce1b4968e8..d77082425d394 100644 --- a/stdlib/LinearAlgebra/src/triangular.jl +++ b/stdlib/LinearAlgebra/src/triangular.jl @@ -2435,7 +2435,7 @@ end eigfact(A::AbstractTriangular) = Eigen(eigvals(A), eigvecs(A)) # Generic singular systems -for func in (:svd, :svdfact, :svd!, :svdvals) +for func in (:svd, :svd!, :svdvals) @eval begin ($func)(A::AbstractTriangular) = ($func)(copyto!(similar(parent(A)), A)) end diff --git a/stdlib/LinearAlgebra/test/bidiag.jl b/stdlib/LinearAlgebra/test/bidiag.jl index 39c8d4cf31258..1f4fa2d41b637 100644 --- a/stdlib/LinearAlgebra/test/bidiag.jl +++ b/stdlib/LinearAlgebra/test/bidiag.jl @@ -242,14 +242,14 @@ srand(1) @testset "Singular systems" begin if (elty <: BlasReal) - @test AbstractArray(svdfact(T)) ≈ AbstractArray(svd!(copy(Tfull))) + @test AbstractArray(svd(T)) ≈ AbstractArray(svd!(copy(Tfull))) @test svdvals(Tfull) ≈ svdvals(T) u1, d1, v1 = svd(Tfull) u2, d2, v2 = svd(T) @test d1 ≈ d2 if elty <: Real test_approx_eq_modphase(u1, u2) - test_approx_eq_modphase(v1, v2) + test_approx_eq_modphase(copy(v1), copy(v2)) end @test 0 ≈ vecnorm(u2*Diagonal(d2)*v2'-Tfull) atol=n*max(n^2*eps(relty),vecnorm(u1*Diagonal(d1)*v1'-Tfull)) @inferred svdvals(T) diff --git a/stdlib/LinearAlgebra/test/diagonal.jl b/stdlib/LinearAlgebra/test/diagonal.jl index caabdd291eca3..651e14b819115 100644 --- a/stdlib/LinearAlgebra/test/diagonal.jl +++ b/stdlib/LinearAlgebra/test/diagonal.jl @@ -265,7 +265,7 @@ srand(1) U, s, V = svd(D) @test (U*Diagonal(s))*V' ≈ D @test svdvals(D) == s - @test svdfact(D).V == V + @test svd(D).V == V end end diff --git a/stdlib/LinearAlgebra/test/svd.jl b/stdlib/LinearAlgebra/test/svd.jl index 1210ad5a31b44..44047d35b523a 100644 --- a/stdlib/LinearAlgebra/test/svd.jl +++ b/stdlib/LinearAlgebra/test/svd.jl @@ -5,7 +5,7 @@ module TestSVD using Test, LinearAlgebra, Random using LinearAlgebra: BlasComplex, BlasFloat, BlasReal, QRPivoted -@testset "Simple svdvals / svdfact tests" begin +@testset "Simple svdvals / svd tests" begin ≊(x,y) = isapprox(x,y,rtol=1e-15) m1 = [2 0; 0 0] @@ -15,8 +15,8 @@ using LinearAlgebra: BlasComplex, BlasFloat, BlasReal, QRPivoted @test @inferred(svdvals(m2)) ≊ [2, 1] @test @inferred(svdvals(m2c)) ≊ [2, 1] - sf1 = svdfact(m1) - sf2 = svdfact(m2) + sf1 = svd(m1) + sf2 = svd(m2) @test sf1.S ≊ [2, 0] @test sf2.S ≊ [2, 1] # U & Vt are unitary @@ -52,7 +52,7 @@ a2img = randn(n,n)/2 for (a, a2) in ((aa, aa2), (view(aa, 1:n, 1:n), view(aa2, 1:n, 1:n))) ε = εa = eps(abs(float(one(eltya)))) - usv = svdfact(a) + usv = svd(a) @testset "singular value decomposition" begin @test usv.S === svdvals(usv) @test usv.U * (Diagonal(usv.S) * usv.Vt) ≈ a @@ -71,7 +71,7 @@ a2img = randn(n,n)/2 end @testset "Generalized svd" begin a_svd = a[1:n1, :] - gsvd = svdfact(a,a_svd) + gsvd = svd(a,a_svd) @test gsvd.U*gsvd.D1*gsvd.R*gsvd.Q' ≈ a @test gsvd.V*gsvd.D2*gsvd.R*gsvd.Q' ≈ a_svd @test usv.Vt' ≈ usv.V @@ -79,7 +79,7 @@ a2img = randn(n,n)/2 @test_throws ErrorException gsvd.Z @test gsvd.vals ≈ svdvals(a,a_svd) α = eltya == Int ? -1 : rand(eltya) - β = svdfact(α) + β = svd(α) @test β.S == [abs(α)] @test svdvals(α) == abs(α) u,v,q,d1,d2,r0 = svd(a,a_svd) @@ -93,7 +93,7 @@ a2img = randn(n,n)/2 #testing the other layout for D1 & D2 b = rand(eltya,n,2*n) c = rand(eltya,n,2*n) - gsvd = svdfact(b,c) + gsvd = svd(b,c) @test gsvd.U*gsvd.D1*gsvd.R*gsvd.Q' ≈ b @test gsvd.V*gsvd.D2*gsvd.R*gsvd.Q' ≈ c end @@ -101,18 +101,16 @@ a2img = randn(n,n)/2 if eltya <: LinearAlgebra.BlasReal @testset "Number input" begin x, y = randn(eltya, 2) - @test svdfact(x) == svdfact(fill(x, 1, 1)) + @test svd(x) == svd(fill(x, 1, 1)) @test svdvals(x) == first(svdvals(fill(x, 1, 1))) - @test svd(x) == first.(svd(fill(x, 1, 1))) - @test svdfact(x, y) == svdfact(fill(x, 1, 1), fill(y, 1, 1)) + @test svd(x, y) == svd(fill(x, 1, 1), fill(y, 1, 1)) @test svdvals(x, y) ≈ first(svdvals(fill(x, 1, 1), fill(y, 1, 1))) - @test svd(x, y) == first.(svd(fill(x, 1, 1), fill(y, 1, 1))) end end if eltya != Int @testset "isequal, ==, and hash" begin x, y = rand(eltya), convert(eltya, NaN) - Fx, Fy = svdfact(x), svdfact(y) + Fx, Fy = svd(x), svd(y) @test Fx == Fx @test !(Fy == Fy) @test isequal(Fy, Fy) diff --git a/stdlib/LinearAlgebra/test/triangular.jl b/stdlib/LinearAlgebra/test/triangular.jl index e7a87b30e3bcb..0043176673605 100644 --- a/stdlib/LinearAlgebra/test/triangular.jl +++ b/stdlib/LinearAlgebra/test/triangular.jl @@ -265,7 +265,6 @@ for elty1 in (Float32, Float64, BigFloat, ComplexF32, ComplexF64, Complex{BigFlo if !(elty1 in (BigFloat, Complex{BigFloat})) # Not implemented yet svd(A1) - svdfact(A1) elty1 <: BlasFloat && svd!(copy(A1)) svdvals(A1) end diff --git a/test/bitarray.jl b/test/bitarray.jl index c21467e7906ae..997ae0b1edc6a 100644 --- a/test/bitarray.jl +++ b/test/bitarray.jl @@ -1467,7 +1467,8 @@ timesofar("cat") @check_bit_operation diff(b1, dims=2) Matrix{Int} b1 = bitrand(n1, n1) - @check_bit_operation svd(b1) + @test ((svdb1, svdb1A) = (svd(b1), svd(Array(b1))); + svdb1.U == svdb1A.U && svdb1.S == svdb1A.S && svdb1.V == svdb1A.V) @test ((qrb1, qrb1A) = (qr(b1), qr(Array(b1))); qrb1.Q == qrb1A.Q && qrb1.R == qrb1A.R) From a7291184433f29c83e3f3ae41f1e6fec1f482a0e Mon Sep 17 00:00:00 2001 From: Sacha Verweij Date: Tue, 22 May 2018 19:25:49 -0700 Subject: [PATCH 13/23] Deprecate bkfact to bunchkaufman. --- NEWS.md | 5 +-- stdlib/LinearAlgebra/docs/src/index.md | 2 +- stdlib/LinearAlgebra/src/LinearAlgebra.jl | 2 +- stdlib/LinearAlgebra/src/bunchkaufman.jl | 44 +++++++++++++++++------ stdlib/LinearAlgebra/src/dense.jl | 2 +- stdlib/LinearAlgebra/src/deprecated.jl | 7 +++- stdlib/LinearAlgebra/src/symmetric.jl | 2 +- stdlib/LinearAlgebra/test/bunchkaufman.jl | 20 +++++------ 8 files changed, 57 insertions(+), 27 deletions(-) diff --git a/NEWS.md b/NEWS.md index 8a36a8208bbc8..541df4f260fa2 100644 --- a/NEWS.md +++ b/NEWS.md @@ -703,8 +703,9 @@ Deprecated or removed * The keyword `immutable` is fully deprecated to `struct`, and `type` is fully deprecated to `mutable struct` ([#19157], [#20418]). - * `lufact`, `schurfact`, `lqfact`, `qrfact`, `ldltfact`, and `svdfact` have - respectively been deprecated to `lu`, `schur`, `lq`, `qr`, `ldlt`, and `svd` ([#27159]). + * `lufact`, `schurfact`, `lqfact`, `qrfact`, `ldltfact`, `svdfact`, + and `bkfact` have respectively been deprecated to `lu`, `schur`, `lq`, + `qr`, `ldlt`, `svd`, and `bunchkaufman` ([#27159]). * `lufact!`, `schurfact!`, `lqfact!`, `qrfact!`, `ldltfact!`, and `svdfact!` have respectively been deprecated to `lu!`, `schur!`, `lq!`, `qr!`, `ldlt!`, diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md index 5468447d95c4f..7c0bbd66739da 100644 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -329,7 +329,7 @@ LinearAlgebra.QRCompactWY LinearAlgebra.QRPivoted LinearAlgebra.lq! LinearAlgebra.lq -LinearAlgebra.bkfact +LinearAlgebra.bunchkaufman LinearAlgebra.bkfact! LinearAlgebra.eig LinearAlgebra.eigvals diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index 2a8c897bb5bb0..c97692f7db552 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -63,7 +63,7 @@ export # Functions axpy!, axpby!, - bkfact, + bunchkaufman, bkfact!, chol, cholfact, diff --git a/stdlib/LinearAlgebra/src/bunchkaufman.jl b/stdlib/LinearAlgebra/src/bunchkaufman.jl index 666e50f73e267..56a257bf1d6b1 100644 --- a/stdlib/LinearAlgebra/src/bunchkaufman.jl +++ b/stdlib/LinearAlgebra/src/bunchkaufman.jl @@ -16,10 +16,17 @@ BunchKaufman(A::AbstractMatrix{T}, ipiv::Vector{BlasInt}, uplo::AbstractChar, sy rook::Bool, info::BlasInt) where {T} = BunchKaufman{T,typeof(A)}(A, ipiv, uplo, symmetric, rook, info) +# iteration for destructuring into components +Base.iterate(S::BunchKaufman) = (S.D, Val(:UL)) +Base.iterate(S::BunchKaufman, ::Val{:UL}) = (S.uplo == 'L' ? S.L : S.U, Val(:p)) +Base.iterate(S::BunchKaufman, ::Val{:p}) = (S.p, Val(:done)) +Base.iterate(S::BunchKaufman, ::Val{:done}) = nothing + + """ bkfact!(A, rook::Bool=false) -> BunchKaufman -`bkfact!` is the same as [`bkfact`](@ref), but saves space by overwriting the +`bkfact!` is the same as [`bunchkaufman`](@ref), but saves space by overwriting the input `A`, instead of creating a copy. """ function bkfact!(A::RealHermSymComplexSym{T,S} where {T<:BlasReal,S<:StridedMatrix}, rook::Bool = false) @@ -41,15 +48,27 @@ function bkfact!(A::StridedMatrix{<:BlasFloat}, rook::Bool = false) end """ - bkfact(A, rook::Bool=false) -> BunchKaufman + bunchkaufman(A, rook::Bool=false) -> S::BunchKaufman + +Compute the Bunch-Kaufman [^Bunch1977] factorization of a `Symmetric` or +`Hermitian` matrix `A` as ``P'*U*D*U'*P`` or ``P'*L*D*L'*P``, depending on +which triangle is stored in `A`, and return a `BunchKaufman` object. +Note that if `A` is complex symmetric then `U'` and `L'` denote +the unconjugated transposes, i.e. `transpose(U)` and `transpose(L)`. -Compute the Bunch-Kaufman [^Bunch1977] factorization of a symmetric or Hermitian matrix `A` as ``P'*U*D*U'*P`` or ``P'*L*D*L'*P``, depending on which triangle is stored in `A`, and return a `BunchKaufman` object. Note that if `A` is complex symmetric then `U'` and `L'` denote the unconjugated transposes, i.e. `transpose(U)` and `transpose(L)`. +Iterating the decomposition produces the components `S.D`, `S.U` or `S.L` +as appropriate given `S.uplo`, and `S.p`. -If `rook` is `true`, rook pivoting is used. If `rook` is false, rook pivoting is not used. +If `rook` is `true`, rook pivoting is used. If `rook` is false, +rook pivoting is not used. -The following functions are available for `BunchKaufman` objects: [`size`](@ref), `\\`, [`inv`](@ref), [`issymmetric`](@ref), [`ishermitian`](@ref), [`getindex`](@ref). +The following functions are available for `BunchKaufman` objects: +[`size`](@ref), `\\`, [`inv`](@ref), [`issymmetric`](@ref), +[`ishermitian`](@ref), [`getindex`](@ref). -[^Bunch1977]: J R Bunch and L Kaufman, Some stable methods for calculating inertia and solving symmetric linear systems, Mathematics of Computation 31:137 (1977), 163-179. [url](http://www.ams.org/journals/mcom/1977-31-137/S0025-5718-1977-0428694-0/). +[^Bunch1977]: J R Bunch and L Kaufman, Some stable methods for calculating inertia +and solving symmetric linear systems, Mathematics of Computation 31:137 (1977), 163-179. +[url](http://www.ams.org/journals/mcom/1977-31-137/S0025-5718-1977-0428694-0/). # Examples ```jldoctest @@ -58,7 +77,7 @@ julia> A = [1 2; 2 3] 1 2 2 3 -julia> bkfact(A) +julia> S = bunchkaufman(A) BunchKaufman{Float64,Array{Float64,2}} D factor: 2×2 Tridiagonal{Float64,Array{Float64,1}}: @@ -72,9 +91,14 @@ permutation: 2-element Array{Int64,1}: 1 2 + +julia> d, u, p = S; # destructuring via iteration + +julia> d == S.D && u == S.U && p == S.p +true ``` """ -bkfact(A::AbstractMatrix{T}, rook::Bool=false) where {T} = +bunchkaufman(A::AbstractMatrix{T}, rook::Bool=false) where {T} = bkfact!(copy_oftype(A, typeof(sqrt(one(T)))), rook) convert(::Type{BunchKaufman{T}}, B::BunchKaufman{T}) where {T} = B @@ -134,7 +158,7 @@ julia> A = [1 2 3; 2 1 2; 3 2 1] 2 1 2 3 2 1 -julia> F = bkfact(Symmetric(A, :L)) +julia> F = bunchkaufman(Symmetric(A, :L)) BunchKaufman{Float64,Array{Float64,2}} D factor: 3×3 Tridiagonal{Float64,Array{Float64,1}}: @@ -158,7 +182,7 @@ julia> F.L*F.D*F.L' - A[F.p, F.p] 0.0 0.0 0.0 0.0 0.0 0.0 -julia> F = bkfact(Symmetric(A)); +julia> F = bunchkaufman(Symmetric(A)); julia> F.U*F.D*F.U' - F.P*A*F.P' 3×3 Array{Float64,2}: diff --git a/stdlib/LinearAlgebra/src/dense.jl b/stdlib/LinearAlgebra/src/dense.jl index 4f54fa0b01552..5477e3d7693df 100644 --- a/stdlib/LinearAlgebra/src/dense.jl +++ b/stdlib/LinearAlgebra/src/dense.jl @@ -1113,7 +1113,7 @@ systems. For example: `A=factorize(A); x=A\\b; y=A\\C`. | Properties of `A` | type of factorization | |:---------------------------|:-----------------------------------------------| | Positive-definite | Cholesky (see [`cholfact`](@ref)) | -| Dense Symmetric/Hermitian | Bunch-Kaufman (see [`bkfact`](@ref)) | +| Dense Symmetric/Hermitian | Bunch-Kaufman (see [`bunchkaufman`](@ref)) | | Sparse Symmetric/Hermitian | LDLt (see [`ldlt`](@ref)) | | Triangular | Triangular | | Diagonal | Diagonal | diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl index b7efba272a73a..5fa4ae8ce2e6c 100644 --- a/stdlib/LinearAlgebra/src/deprecated.jl +++ b/stdlib/LinearAlgebra/src/deprecated.jl @@ -19,7 +19,8 @@ using Base: @deprecate, depwarn @deprecate isposdef!(A::StridedMatrix, UL::Symbol) isposdef!(Hermitian(A, UL)) # bkfact -import .LinearAlgebra: bkfact, bkfact! +export bkfact +import .LinearAlgebra: bkfact! function bkfact(A::StridedMatrix, uplo::Symbol, symmetric::Bool = issymmetric(A), rook::Bool = false) depwarn(string("`bkfact` with uplo and symmetric arguments is deprecated, ", "use `bkfact($(symmetric ? "Symmetric(" : "Hermitian(")A, :$uplo))` instead."), @@ -1340,3 +1341,7 @@ export svdfact @deprecate(svdfact(x::Number, y::Number), svd(x, y)) @deprecate(svdfact(M::Bidiagonal; full::Bool = false, thin::Union{Bool,Nothing} = nothing), svd(M; full=full, thin=thin)) @deprecate(svdfact(A::AbstractTriangular), svd(A)) + +# deprecate bkfact to bunchkaufman +# bkfact exported in a deprecation above +@deprecate(bkfact(A::AbstractMatrix{T}, rook::Bool=false) where {T}, bunchkaufman(A, rook)) diff --git a/stdlib/LinearAlgebra/src/symmetric.jl b/stdlib/LinearAlgebra/src/symmetric.jl index b4c69c85ef47d..8967f0919a994 100644 --- a/stdlib/LinearAlgebra/src/symmetric.jl +++ b/stdlib/LinearAlgebra/src/symmetric.jl @@ -440,7 +440,7 @@ end function factorize(A::HermOrSym{T}) where T TT = typeof(sqrt(one(T))) if TT <: BlasFloat - return bkfact(A) + return bunchkaufman(A) else # fallback return lu(A) end diff --git a/stdlib/LinearAlgebra/test/bunchkaufman.jl b/stdlib/LinearAlgebra/test/bunchkaufman.jl index 78e4604fd496e..99338dc25bc81 100644 --- a/stdlib/LinearAlgebra/test/bunchkaufman.jl +++ b/stdlib/LinearAlgebra/test/bunchkaufman.jl @@ -38,7 +38,7 @@ bimg = randn(n,2)/2 @test isa(factorize(asym), LinearAlgebra.BunchKaufman) @test isa(factorize(aher), LinearAlgebra.BunchKaufman) @testset "$uplo Bunch-Kaufman factor of indefinite matrix" for uplo in (:L, :U) - bc1 = bkfact(Hermitian(aher, uplo)) + bc1 = bunchkaufman(Hermitian(aher, uplo)) @test LinearAlgebra.issuccess(bc1) @test logabsdet(bc1)[1] ≈ log(abs(det(bc1))) if eltya <: Real @@ -48,7 +48,7 @@ bimg = randn(n,2)/2 end @test inv(bc1)*aher ≈ Matrix(I, n, n) @testset for rook in (false, true) - @test inv(bkfact(Symmetric(transpose(a) + a, uplo), rook))*(transpose(a) + a) ≈ Matrix(I, n, n) + @test inv(bunchkaufman(Symmetric(transpose(a) + a, uplo), rook))*(transpose(a) + a) ≈ Matrix(I, n, n) if eltya <: BlasFloat # test also bkfact! without explicit type tag # no bkfact! method for Int ... yet @@ -58,7 +58,7 @@ bimg = randn(n,2)/2 @test size(bc1, 1) == size(bc1.LD, 1) @test size(bc1, 2) == size(bc1.LD, 2) if eltya <: BlasReal - @test_throws ArgumentError bkfact(a) + @test_throws ArgumentError bunchkaufman(a) end # Test extraction of factors if eltya <: Real @@ -66,7 +66,7 @@ bimg = randn(n,2)/2 @test getproperty(bc1, uplo)*bc1.D*getproperty(bc1, uplo)' ≈ bc1.P*aher*bc1.P' end - bc1 = bkfact(Symmetric(asym, uplo)) + bc1 = bunchkaufman(Symmetric(asym, uplo)) @test getproperty(bc1, uplo)*bc1.D*transpose(getproperty(bc1, uplo)) ≈ asym[bc1.p, bc1.p] @test getproperty(bc1, uplo)*bc1.D*transpose(getproperty(bc1, uplo)) ≈ bc1.P*asym*transpose(bc1.P) @test_throws ErrorException bc1.Z @@ -81,13 +81,13 @@ bimg = randn(n,2)/2 ε = max(εa,εb) @testset "$uplo Bunch-Kaufman factor of indefinite matrix" for uplo in (:L, :U) - bc1 = bkfact(Hermitian(aher, uplo)) + bc1 = bunchkaufman(Hermitian(aher, uplo)) @test aher*(bc1\b) ≈ b atol=1000ε end @testset "$uplo Bunch-Kaufman factors of a pos-def matrix" for uplo in (:U, :L) @testset "rook pivoting: $rook" for rook in (false, true) - bc2 = bkfact(Hermitian(apd, uplo), rook) + bc2 = bunchkaufman(Hermitian(apd, uplo), rook) @test LinearAlgebra.issuccess(bc2) bks = split(sprint(show, "text/plain", bc2), "\n") @test bks[1] == summary(bc2) @@ -114,7 +114,7 @@ bimg = randn(n,2)/2 for As in (As, view(As, 1:n, 1:n)) @testset "$uplo Bunch-Kaufman factors of a singular matrix" for uplo in (:L, :U) @testset for rook in (false, true) - F = bkfact(issymmetric(As) ? Symmetric(As, uplo) : Hermitian(As, uplo), rook) + F = bunchkaufman(issymmetric(As) ? Symmetric(As, uplo) : Hermitian(As, uplo), rook) @test !LinearAlgebra.issuccess(F) # test printing of this as well! bks = sprint(show, "text/plain", F) @@ -138,8 +138,8 @@ end @test F\v5 == F\v6[1:5] end -@test_throws DomainError logdet(bkfact([-1 -1; -1 1])) -@test logabsdet(bkfact([8 4; 4 2]))[1] == -Inf -@test isa(bkfact(Symmetric(ones(0,0))), BunchKaufman) # 0x0 matrix +@test_throws DomainError logdet(bunchkaufman([-1 -1; -1 1])) +@test logabsdet(bunchkaufman([8 4; 4 2]))[1] == -Inf +@test isa(bunchkaufman(Symmetric(ones(0,0))), BunchKaufman) # 0x0 matrix end # module TestBunchKaufman From cf08aeca6cf310f29f838e61ab649210a364e939 Mon Sep 17 00:00:00 2001 From: Sacha Verweij Date: Tue, 22 May 2018 19:38:15 -0700 Subject: [PATCH 14/23] Deprecate bkfact! to bunchkaufman!. --- NEWS.md | 6 +++--- stdlib/LinearAlgebra/docs/src/index.md | 2 +- stdlib/LinearAlgebra/src/LinearAlgebra.jl | 2 +- stdlib/LinearAlgebra/src/bunchkaufman.jl | 16 ++++++++-------- stdlib/LinearAlgebra/src/deprecated.jl | 9 +++++++-- stdlib/LinearAlgebra/test/bunchkaufman.jl | 6 +++--- 6 files changed, 23 insertions(+), 18 deletions(-) diff --git a/NEWS.md b/NEWS.md index 541df4f260fa2..8f740af18c403 100644 --- a/NEWS.md +++ b/NEWS.md @@ -707,9 +707,9 @@ Deprecated or removed and `bkfact` have respectively been deprecated to `lu`, `schur`, `lq`, `qr`, `ldlt`, `svd`, and `bunchkaufman` ([#27159]). - * `lufact!`, `schurfact!`, `lqfact!`, `qrfact!`, `ldltfact!`, and `svdfact!` - have respectively been deprecated to `lu!`, `schur!`, `lq!`, `qr!`, `ldlt!`, - and `svd!` ([#27159]). + * `lufact!`, `schurfact!`, `lqfact!`, `qrfact!`, `ldltfact!`, `svdfact!`, + and `bkfact!` have respectively been deprecated to + `lu!`, `schur!`, `lq!`, `qr!`, `ldlt!`, `svd!`, and `bunchkaufman!` ([#27159]). * Indexing into multidimensional arrays with more than one index but fewer indices than there are dimensions is no longer permitted when those trailing dimensions have lengths greater than 1. diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md index 7c0bbd66739da..73ebb6072a938 100644 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -330,7 +330,7 @@ LinearAlgebra.QRPivoted LinearAlgebra.lq! LinearAlgebra.lq LinearAlgebra.bunchkaufman -LinearAlgebra.bkfact! +LinearAlgebra.bunchkaufman! LinearAlgebra.eig LinearAlgebra.eigvals LinearAlgebra.eigvals! diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index c97692f7db552..aa7e7d253a3c4 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -64,7 +64,7 @@ export axpy!, axpby!, bunchkaufman, - bkfact!, + bunchkaufman!, chol, cholfact, cholfact!, diff --git a/stdlib/LinearAlgebra/src/bunchkaufman.jl b/stdlib/LinearAlgebra/src/bunchkaufman.jl index 56a257bf1d6b1..d4b3f9d5482bf 100644 --- a/stdlib/LinearAlgebra/src/bunchkaufman.jl +++ b/stdlib/LinearAlgebra/src/bunchkaufman.jl @@ -24,24 +24,24 @@ Base.iterate(S::BunchKaufman, ::Val{:done}) = nothing """ - bkfact!(A, rook::Bool=false) -> BunchKaufman + bunchkaufman!(A, rook::Bool=false) -> BunchKaufman -`bkfact!` is the same as [`bunchkaufman`](@ref), but saves space by overwriting the +`bunchkaufman!` is the same as [`bunchkaufman`](@ref), but saves space by overwriting the input `A`, instead of creating a copy. """ -function bkfact!(A::RealHermSymComplexSym{T,S} where {T<:BlasReal,S<:StridedMatrix}, rook::Bool = false) +function bunchkaufman!(A::RealHermSymComplexSym{T,S} where {T<:BlasReal,S<:StridedMatrix}, rook::Bool = false) LD, ipiv, info = rook ? LAPACK.sytrf_rook!(A.uplo, A.data) : LAPACK.sytrf!(A.uplo, A.data) BunchKaufman(LD, ipiv, A.uplo, true, rook, info) end -function bkfact!(A::Hermitian{T,S} where {T<:BlasComplex,S<:StridedMatrix{T}}, rook::Bool = false) +function bunchkaufman!(A::Hermitian{T,S} where {T<:BlasComplex,S<:StridedMatrix{T}}, rook::Bool = false) LD, ipiv, info = rook ? LAPACK.hetrf_rook!(A.uplo, A.data) : LAPACK.hetrf!(A.uplo, A.data) BunchKaufman(LD, ipiv, A.uplo, false, rook, info) end -function bkfact!(A::StridedMatrix{<:BlasFloat}, rook::Bool = false) +function bunchkaufman!(A::StridedMatrix{<:BlasFloat}, rook::Bool = false) if ishermitian(A) - return bkfact!(Hermitian(A), rook) + return bunchkaufman!(Hermitian(A), rook) elseif issymmetric(A) - return bkfact!(Symmetric(A), rook) + return bunchkaufman!(Symmetric(A), rook) else throw(ArgumentError("Bunch-Kaufman decomposition is only valid for symmetric or Hermitian matrices")) end @@ -99,7 +99,7 @@ true ``` """ bunchkaufman(A::AbstractMatrix{T}, rook::Bool=false) where {T} = - bkfact!(copy_oftype(A, typeof(sqrt(one(T)))), rook) + bunchkaufman!(copy_oftype(A, typeof(sqrt(one(T)))), rook) convert(::Type{BunchKaufman{T}}, B::BunchKaufman{T}) where {T} = B convert(::Type{BunchKaufman{T}}, B::BunchKaufman) where {T} = diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl index 5fa4ae8ce2e6c..aaa3831bcc24d 100644 --- a/stdlib/LinearAlgebra/src/deprecated.jl +++ b/stdlib/LinearAlgebra/src/deprecated.jl @@ -19,8 +19,7 @@ using Base: @deprecate, depwarn @deprecate isposdef!(A::StridedMatrix, UL::Symbol) isposdef!(Hermitian(A, UL)) # bkfact -export bkfact -import .LinearAlgebra: bkfact! +export bkfact, bkfact! function bkfact(A::StridedMatrix, uplo::Symbol, symmetric::Bool = issymmetric(A), rook::Bool = false) depwarn(string("`bkfact` with uplo and symmetric arguments is deprecated, ", "use `bkfact($(symmetric ? "Symmetric(" : "Hermitian(")A, :$uplo))` instead."), @@ -1345,3 +1344,9 @@ export svdfact # deprecate bkfact to bunchkaufman # bkfact exported in a deprecation above @deprecate(bkfact(A::AbstractMatrix{T}, rook::Bool=false) where {T}, bunchkaufman(A, rook)) + +# deprecate bkfact! to bunchkaufman! +export bkfact! +@deprecate(bkfact!(A::RealHermSymComplexSym{T,S} where {T<:BlasReal,S<:StridedMatrix}, rook::Bool = false), bunchkaufman!(A, rook)) +@deprecate(bkfact!(A::Hermitian{T,S} where {T<:BlasComplex,S<:StridedMatrix{T}}, rook::Bool = false), bunchkaufman!(A, rook)) +@deprecate(bkfact!(A::StridedMatrix{<:BlasFloat}, rook::Bool = false), bunchkaufman!(A, rook)) diff --git a/stdlib/LinearAlgebra/test/bunchkaufman.jl b/stdlib/LinearAlgebra/test/bunchkaufman.jl index 99338dc25bc81..8469196b7dc0e 100644 --- a/stdlib/LinearAlgebra/test/bunchkaufman.jl +++ b/stdlib/LinearAlgebra/test/bunchkaufman.jl @@ -50,9 +50,9 @@ bimg = randn(n,2)/2 @testset for rook in (false, true) @test inv(bunchkaufman(Symmetric(transpose(a) + a, uplo), rook))*(transpose(a) + a) ≈ Matrix(I, n, n) if eltya <: BlasFloat - # test also bkfact! without explicit type tag - # no bkfact! method for Int ... yet - @test inv(bkfact!(transpose(a) + a, rook))*(transpose(a) + a) ≈ Matrix(I, n, n) + # test also bunchkaufman! without explicit type tag + # no bunchkaufman! method for Int ... yet + @test inv(bunchkaufman!(transpose(a) + a, rook))*(transpose(a) + a) ≈ Matrix(I, n, n) end @test size(bc1) == size(bc1.LD) @test size(bc1, 1) == size(bc1.LD, 1) From 5797f884f822f0d4ff892d825f0696520419dc18 Mon Sep 17 00:00:00 2001 From: Sacha Verweij Date: Tue, 22 May 2018 19:45:31 -0700 Subject: [PATCH 15/23] Deprecate hessfact to hessenberg. --- NEWS.md | 5 +++-- stdlib/LinearAlgebra/docs/src/index.md | 2 +- stdlib/LinearAlgebra/src/LinearAlgebra.jl | 2 +- stdlib/LinearAlgebra/src/deprecated.jl | 5 +++++ stdlib/LinearAlgebra/src/hessenberg.jl | 22 ++++++++++++++++------ stdlib/LinearAlgebra/test/dense.jl | 2 +- stdlib/LinearAlgebra/test/givens.jl | 2 +- stdlib/LinearAlgebra/test/hessenberg.jl | 2 +- 8 files changed, 29 insertions(+), 13 deletions(-) diff --git a/NEWS.md b/NEWS.md index 8f740af18c403..d40296bd3ec25 100644 --- a/NEWS.md +++ b/NEWS.md @@ -704,8 +704,9 @@ Deprecated or removed `type` is fully deprecated to `mutable struct` ([#19157], [#20418]). * `lufact`, `schurfact`, `lqfact`, `qrfact`, `ldltfact`, `svdfact`, - and `bkfact` have respectively been deprecated to `lu`, `schur`, `lq`, - `qr`, `ldlt`, `svd`, and `bunchkaufman` ([#27159]). + `bkfact`, and `hessfact` have respectively been deprecated to + `lu`, `schur`, `lq`, `qr`, `ldlt`, `svd`, `bunchkaufman`, + and `hessenberg` ([#27159]). * `lufact!`, `schurfact!`, `lqfact!`, `qrfact!`, `ldltfact!`, `svdfact!`, and `bkfact!` have respectively been deprecated to diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md index 73ebb6072a938..7b6011ff5f42e 100644 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -339,7 +339,7 @@ LinearAlgebra.eigmin LinearAlgebra.eigvecs LinearAlgebra.eigfact LinearAlgebra.eigfact! -LinearAlgebra.hessfact +LinearAlgebra.hessenberg LinearAlgebra.hessfact! LinearAlgebra.schur! LinearAlgebra.schur diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index aa7e7d253a3c4..9cb532317e8a7 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -90,7 +90,7 @@ export eigvecs, factorize, givens, - hessfact, + hessenberg, hessfact!, isdiag, ishermitian, diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl index aaa3831bcc24d..592b795e0880d 100644 --- a/stdlib/LinearAlgebra/src/deprecated.jl +++ b/stdlib/LinearAlgebra/src/deprecated.jl @@ -1350,3 +1350,8 @@ export bkfact! @deprecate(bkfact!(A::RealHermSymComplexSym{T,S} where {T<:BlasReal,S<:StridedMatrix}, rook::Bool = false), bunchkaufman!(A, rook)) @deprecate(bkfact!(A::Hermitian{T,S} where {T<:BlasComplex,S<:StridedMatrix{T}}, rook::Bool = false), bunchkaufman!(A, rook)) @deprecate(bkfact!(A::StridedMatrix{<:BlasFloat}, rook::Bool = false), bunchkaufman!(A, rook)) + +# deprecate hessfact to hessenberg +export hessfact +@deprecate(hessfact(A::StridedMatrix{<:BlasFloat}), hessenberg(A)) +@deprecate(hessfact(A::StridedMatrix{T}) where T, hessenberg(A)) diff --git a/stdlib/LinearAlgebra/src/hessenberg.jl b/stdlib/LinearAlgebra/src/hessenberg.jl index d1014c2199bde..59ac3c5ef2775 100644 --- a/stdlib/LinearAlgebra/src/hessenberg.jl +++ b/stdlib/LinearAlgebra/src/hessenberg.jl @@ -7,22 +7,25 @@ struct Hessenberg{T,S<:AbstractMatrix} <: Factorization{T} new(factors, τ) end Hessenberg(factors::AbstractMatrix{T}, τ::Vector{T}) where {T} = Hessenberg{T,typeof(factors)}(factors, τ) - Hessenberg(A::StridedMatrix) = Hessenberg(LAPACK.gehrd!(A)...) +# iteration for destructuring into components +Base.iterate(S::Hessenberg) = (S.Q, Val(:H)) +Base.iterate(S::Hessenberg, ::Val{:H}) = (S.H, Val(:done)) +Base.iterate(S::Hessenberg, ::Val{:done}) = nothing """ hessfact!(A) -> Hessenberg -`hessfact!` is the same as [`hessfact`](@ref), but saves space by overwriting +`hessfact!` is the same as [`hessenberg`](@ref), but saves space by overwriting the input `A`, instead of creating a copy. """ hessfact!(A::StridedMatrix{<:BlasFloat}) = Hessenberg(A) -hessfact(A::StridedMatrix{<:BlasFloat}) = hessfact!(copy(A)) +hessenberg(A::StridedMatrix{<:BlasFloat}) = hessfact!(copy(A)) """ - hessfact(A) -> Hessenberg + hessenberg(A) -> Hessenberg Compute the Hessenberg decomposition of `A` and return a `Hessenberg` object. If `F` is the factorization object, the unitary matrix can be accessed with `F.Q` and the Hessenberg @@ -30,6 +33,8 @@ matrix with `F.H`. When `Q` is extracted, the resulting type is the `HessenbergQ and may be converted to a regular matrix with [`convert(Array, _)`](@ref) (or `Array(_)` for short). +Iterating the decomposition produces the factors `F.Q` and `F.H`. + # Examples ```jldoctest julia> A = [4. 9. 7.; 4. 4. 1.; 4. 3. 2.] @@ -38,16 +43,21 @@ julia> A = [4. 9. 7.; 4. 4. 1.; 4. 3. 2.] 4.0 4.0 1.0 4.0 3.0 2.0 -julia> F = hessfact(A); +julia> F = hessenberg(A); julia> F.Q * F.H * F.Q' 3×3 Array{Float64,2}: 4.0 9.0 7.0 4.0 4.0 1.0 4.0 3.0 2.0 + +julia> q, h = F; # destructuring via iteration + +julia> q == F.Q && h == F.H +true ``` """ -hessfact(A::StridedMatrix{T}) where T = +hessenberg(A::StridedMatrix{T}) where T = hessfact!(copy_oftype(A, eigtype(T))) struct HessenbergQ{T,S<:AbstractMatrix} <: AbstractMatrix{T} diff --git a/stdlib/LinearAlgebra/test/dense.jl b/stdlib/LinearAlgebra/test/dense.jl index 9bbd914047462..2628165945936 100644 --- a/stdlib/LinearAlgebra/test/dense.jl +++ b/stdlib/LinearAlgebra/test/dense.jl @@ -382,7 +382,7 @@ end @test exp(A5) ≈ eA5 # Hessenberg - @test hessfact(A1).H ≈ convert(Matrix{elty}, + @test hessenberg(A1).H ≈ convert(Matrix{elty}, [4.000000000000000 -1.414213562373094 -1.414213562373095 -1.414213562373095 4.999999999999996 -0.000000000000000 0 -0.000000000000002 3.000000000000000]) diff --git a/stdlib/LinearAlgebra/test/givens.jl b/stdlib/LinearAlgebra/test/givens.jl index 752b0aac0ab29..25a11959812cc 100644 --- a/stdlib/LinearAlgebra/test/givens.jl +++ b/stdlib/LinearAlgebra/test/givens.jl @@ -37,7 +37,7 @@ using LinearAlgebra: rmul!, lmul! G, _ = givens(one(elty),zero(elty),11,12) @test_throws DimensionMismatch lmul!(G, A) @test_throws DimensionMismatch rmul!(A, adjoint(G)) - @test abs.(A) ≈ abs.(hessfact(Ac).H) + @test abs.(A) ≈ abs.(hessenberg(Ac).H) @test norm(R*Matrix{elty}(I, 10, 10)) ≈ one(elty) I10 = Matrix{elty}(I, 10, 10) diff --git a/stdlib/LinearAlgebra/test/hessenberg.jl b/stdlib/LinearAlgebra/test/hessenberg.jl index b649511a8a1df..51c85f10edb66 100644 --- a/stdlib/LinearAlgebra/test/hessenberg.jl +++ b/stdlib/LinearAlgebra/test/hessenberg.jl @@ -18,7 +18,7 @@ let n = 10 Areal) if eltya != BigFloat - H = hessfact(A) + H = hessenberg(A) @test size(H.Q, 1) == size(A, 1) @test size(H.Q, 2) == size(A, 2) @test size(H.Q) == size(A) From ab672a61f59415f562f5c5a7d9a8bd14a84be0fe Mon Sep 17 00:00:00 2001 From: Sacha Verweij Date: Tue, 22 May 2018 19:51:03 -0700 Subject: [PATCH 16/23] Deprecate hessfact! to hessenberg!. --- NEWS.md | 5 +++-- stdlib/LinearAlgebra/docs/src/index.md | 2 +- stdlib/LinearAlgebra/src/LinearAlgebra.jl | 2 +- stdlib/LinearAlgebra/src/deprecated.jl | 4 ++++ stdlib/LinearAlgebra/src/hessenberg.jl | 10 +++++----- 5 files changed, 14 insertions(+), 9 deletions(-) diff --git a/NEWS.md b/NEWS.md index d40296bd3ec25..72681e1ba2b7b 100644 --- a/NEWS.md +++ b/NEWS.md @@ -709,8 +709,9 @@ Deprecated or removed and `hessenberg` ([#27159]). * `lufact!`, `schurfact!`, `lqfact!`, `qrfact!`, `ldltfact!`, `svdfact!`, - and `bkfact!` have respectively been deprecated to - `lu!`, `schur!`, `lq!`, `qr!`, `ldlt!`, `svd!`, and `bunchkaufman!` ([#27159]). + `bkfact!`, and `hessfact!` have respectively been deprecated to + `lu!`, `schur!`, `lq!`, `qr!`, `ldlt!`, `svd!`, `bunchkaufman!`, + and `hessenberg!` ([#27159]). * Indexing into multidimensional arrays with more than one index but fewer indices than there are dimensions is no longer permitted when those trailing dimensions have lengths greater than 1. diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md index 7b6011ff5f42e..9d55519cdd55d 100644 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -340,7 +340,7 @@ LinearAlgebra.eigvecs LinearAlgebra.eigfact LinearAlgebra.eigfact! LinearAlgebra.hessenberg -LinearAlgebra.hessfact! +LinearAlgebra.hessenberg! LinearAlgebra.schur! LinearAlgebra.schur LinearAlgebra.ordschur diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index 9cb532317e8a7..87e420480cdaa 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -91,7 +91,7 @@ export factorize, givens, hessenberg, - hessfact!, + hessenberg!, isdiag, ishermitian, isposdef, diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl index 592b795e0880d..f513d8d3af35e 100644 --- a/stdlib/LinearAlgebra/src/deprecated.jl +++ b/stdlib/LinearAlgebra/src/deprecated.jl @@ -1355,3 +1355,7 @@ export bkfact! export hessfact @deprecate(hessfact(A::StridedMatrix{<:BlasFloat}), hessenberg(A)) @deprecate(hessfact(A::StridedMatrix{T}) where T, hessenberg(A)) + +# deprecate hessfact! to hessenberg! +export hessenberg! +@deprecate(hessfact!(A::StridedMatrix{<:BlasFloat}), hessenberg!(A)) diff --git a/stdlib/LinearAlgebra/src/hessenberg.jl b/stdlib/LinearAlgebra/src/hessenberg.jl index 59ac3c5ef2775..050462c19d55f 100644 --- a/stdlib/LinearAlgebra/src/hessenberg.jl +++ b/stdlib/LinearAlgebra/src/hessenberg.jl @@ -15,14 +15,14 @@ Base.iterate(S::Hessenberg, ::Val{:H}) = (S.H, Val(:done)) Base.iterate(S::Hessenberg, ::Val{:done}) = nothing """ - hessfact!(A) -> Hessenberg + hessenberg!(A) -> Hessenberg -`hessfact!` is the same as [`hessenberg`](@ref), but saves space by overwriting +`hessenberg!` is the same as [`hessenberg`](@ref), but saves space by overwriting the input `A`, instead of creating a copy. """ -hessfact!(A::StridedMatrix{<:BlasFloat}) = Hessenberg(A) +hessenberg!(A::StridedMatrix{<:BlasFloat}) = Hessenberg(A) -hessenberg(A::StridedMatrix{<:BlasFloat}) = hessfact!(copy(A)) +hessenberg(A::StridedMatrix{<:BlasFloat}) = hessenberg!(copy(A)) """ hessenberg(A) -> Hessenberg @@ -58,7 +58,7 @@ true ``` """ hessenberg(A::StridedMatrix{T}) where T = - hessfact!(copy_oftype(A, eigtype(T))) + hessenberg!(copy_oftype(A, eigtype(T))) struct HessenbergQ{T,S<:AbstractMatrix} <: AbstractMatrix{T} factors::S From 0d3c2ea9fc88e385b97b4d9c8e16db4905a65986 Mon Sep 17 00:00:00 2001 From: Sacha Verweij Date: Tue, 22 May 2018 20:12:07 -0700 Subject: [PATCH 17/23] Deprecate eigfact to eigen. --- NEWS.md | 4 +- .../src/IterativeEigensolvers.jl | 2 +- stdlib/LinearAlgebra/docs/src/index.md | 2 +- stdlib/LinearAlgebra/src/LinearAlgebra.jl | 2 +- stdlib/LinearAlgebra/src/bidiag.jl | 2 +- stdlib/LinearAlgebra/src/dense.jl | 18 ++--- stdlib/LinearAlgebra/src/deprecated.jl | 19 +++++ stdlib/LinearAlgebra/src/diagonal.jl | 2 +- stdlib/LinearAlgebra/src/eigen.jl | 70 +++++++++++++------ stdlib/LinearAlgebra/src/symmetric.jl | 48 +++++++------ stdlib/LinearAlgebra/src/triangular.jl | 2 +- stdlib/LinearAlgebra/src/tridiag.jl | 8 +-- stdlib/LinearAlgebra/test/dense.jl | 8 +-- stdlib/LinearAlgebra/test/diagonal.jl | 2 +- stdlib/LinearAlgebra/test/eigen.jl | 14 ++-- stdlib/LinearAlgebra/test/lapack.jl | 4 +- stdlib/LinearAlgebra/test/lu.jl | 4 +- stdlib/LinearAlgebra/test/symmetric.jl | 16 ++--- stdlib/LinearAlgebra/test/tridiag.jl | 8 +-- 19 files changed, 142 insertions(+), 93 deletions(-) diff --git a/NEWS.md b/NEWS.md index 72681e1ba2b7b..976628e69a0be 100644 --- a/NEWS.md +++ b/NEWS.md @@ -704,9 +704,9 @@ Deprecated or removed `type` is fully deprecated to `mutable struct` ([#19157], [#20418]). * `lufact`, `schurfact`, `lqfact`, `qrfact`, `ldltfact`, `svdfact`, - `bkfact`, and `hessfact` have respectively been deprecated to + `bkfact`, `hessfact`, and `eigfact` have respectively been deprecated to `lu`, `schur`, `lq`, `qr`, `ldlt`, `svd`, `bunchkaufman`, - and `hessenberg` ([#27159]). + `hessenberg`, and `eigen` ([#27159]). * `lufact!`, `schurfact!`, `lqfact!`, `qrfact!`, `ldltfact!`, `svdfact!`, `bkfact!`, and `hessfact!` have respectively been deprecated to diff --git a/stdlib/IterativeEigensolvers/src/IterativeEigensolvers.jl b/stdlib/IterativeEigensolvers/src/IterativeEigensolvers.jl index 9b527c6112453..ca2f543a8dd8e 100644 --- a/stdlib/IterativeEigensolvers/src/IterativeEigensolvers.jl +++ b/stdlib/IterativeEigensolvers/src/IterativeEigensolvers.jl @@ -78,7 +78,7 @@ function _eigs(A, B; sym = !iscmplx && issymmetric(A) && issymmetric(B) nevmax = sym ? n-1 : n-2 if nevmax <= 0 - throw(ArgumentError("input matrix A is too small. Use eigfact instead.")) + throw(ArgumentError("input matrix A is too small. Use eigen instead.")) end if nev > nevmax @warn "Adjusting nev from $nev to $nevmax" diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md index 9d55519cdd55d..5b081e7aeefdf 100644 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -337,7 +337,7 @@ LinearAlgebra.eigvals! LinearAlgebra.eigmax LinearAlgebra.eigmin LinearAlgebra.eigvecs -LinearAlgebra.eigfact +LinearAlgebra.eigen LinearAlgebra.eigfact! LinearAlgebra.hessenberg LinearAlgebra.hessenberg! diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index 87e420480cdaa..e68f3c2126676 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -81,7 +81,7 @@ export diagm, dot, eig, - eigfact, + eigen, eigfact!, eigmax, eigmin, diff --git a/stdlib/LinearAlgebra/src/bidiag.jl b/stdlib/LinearAlgebra/src/bidiag.jl index 4a1f4dc5aa8d1..4ec329b8439f6 100644 --- a/stdlib/LinearAlgebra/src/bidiag.jl +++ b/stdlib/LinearAlgebra/src/bidiag.jl @@ -625,4 +625,4 @@ function eigvecs(M::Bidiagonal{T}) where T end Q #Actually Triangular end -eigfact(M::Bidiagonal) = Eigen(eigvals(M), eigvecs(M)) +eigen(M::Bidiagonal) = Eigen(eigvals(M), eigvecs(M)) diff --git a/stdlib/LinearAlgebra/src/dense.jl b/stdlib/LinearAlgebra/src/dense.jl index 5477e3d7693df..312ef17fb3c0a 100644 --- a/stdlib/LinearAlgebra/src/dense.jl +++ b/stdlib/LinearAlgebra/src/dense.jl @@ -478,7 +478,7 @@ Compute the matrix exponential of `A`, defined by e^A = \\sum_{n=0}^{\\infty} \\frac{A^n}{n!}. ``` -For symmetric or Hermitian `A`, an eigendecomposition ([`eigfact`](@ref)) is +For symmetric or Hermitian `A`, an eigendecomposition ([`eigen`](@ref)) is used, otherwise the scaling and squaring algorithm (see [^H05]) is chosen. [^H05]: Nicholas J. Higham, "The squaring and scaling method for the matrix exponential revisited", SIAM Journal on Matrix Analysis and Applications, 26(4), 2005, 1179-1193. [doi:10.1137/090768539](https://doi.org/10.1137/090768539) @@ -602,7 +602,7 @@ the unique matrix ``X`` such that ``e^X = A`` and ``-\\pi < Im(\\lambda) < \\pi` the eigenvalues ``\\lambda`` of ``X``. If `A` has nonpositive eigenvalues, a nonprincipal matrix function is returned whenever possible. -If `A` is symmetric or Hermitian, its eigendecomposition ([`eigfact`](@ref)) is +If `A` is symmetric or Hermitian, its eigendecomposition ([`eigen`](@ref)) is used, if `A` is triangular an improved version of the inverse scaling and squaring method is employed (see [^AH12] and [^AHR13]). For general matrices, the complex Schur form ([`schur`](@ref)) is computed and the triangular algorithm is used on the @@ -660,7 +660,7 @@ If `A` has no negative real eigenvalues, compute the principal matrix square roo that is the unique matrix ``X`` with eigenvalues having positive real part such that ``X^2 = A``. Otherwise, a nonprincipal square root is returned. -If `A` is symmetric or Hermitian, its eigendecomposition ([`eigfact`](@ref)) is +If `A` is symmetric or Hermitian, its eigendecomposition ([`eigen`](@ref)) is used to compute the square root. Otherwise, the square root is determined by means of the Björck-Hammarling method [^BH83], which computes the complex Schur form ([`schur`](@ref)) and then the complex square root of the triangular factor. @@ -732,7 +732,7 @@ end Compute the matrix cosine of a square matrix `A`. -If `A` is symmetric or Hermitian, its eigendecomposition ([`eigfact`](@ref)) is used to +If `A` is symmetric or Hermitian, its eigendecomposition ([`eigen`](@ref)) is used to compute the cosine. Otherwise, the cosine is determined by calling [`exp`](@ref). # Examples @@ -765,7 +765,7 @@ end Compute the matrix sine of a square matrix `A`. -If `A` is symmetric or Hermitian, its eigendecomposition ([`eigfact`](@ref)) is used to +If `A` is symmetric or Hermitian, its eigendecomposition ([`eigen`](@ref)) is used to compute the sine. Otherwise, the sine is determined by calling [`exp`](@ref). # Examples @@ -851,7 +851,7 @@ end Compute the matrix tangent of a square matrix `A`. -If `A` is symmetric or Hermitian, its eigendecomposition ([`eigfact`](@ref)) is used to +If `A` is symmetric or Hermitian, its eigendecomposition ([`eigen`](@ref)) is used to compute the tangent. Otherwise, the tangent is determined by calling [`exp`](@ref). # Examples @@ -924,7 +924,7 @@ end Compute the inverse matrix cosine of a square matrix `A`. -If `A` is symmetric or Hermitian, its eigendecomposition ([`eigfact`](@ref)) is used to +If `A` is symmetric or Hermitian, its eigendecomposition ([`eigen`](@ref)) is used to compute the inverse cosine. Otherwise, the inverse cosine is determined by using [`log`](@ref) and [`sqrt`](@ref). For the theory and logarithmic formulas used to compute this function, see [^AH16_1]. @@ -955,7 +955,7 @@ end Compute the inverse matrix sine of a square matrix `A`. -If `A` is symmetric or Hermitian, its eigendecomposition ([`eigfact`](@ref)) is used to +If `A` is symmetric or Hermitian, its eigendecomposition ([`eigen`](@ref)) is used to compute the inverse sine. Otherwise, the inverse sine is determined by using [`log`](@ref) and [`sqrt`](@ref). For the theory and logarithmic formulas used to compute this function, see [^AH16_2]. @@ -986,7 +986,7 @@ end Compute the inverse matrix tangent of a square matrix `A`. -If `A` is symmetric or Hermitian, its eigendecomposition ([`eigfact`](@ref)) is used to +If `A` is symmetric or Hermitian, its eigendecomposition ([`eigen`](@ref)) is used to compute the inverse tangent. Otherwise, the inverse tangent is determined by using [`log`](@ref). For the theory and logarithmic formulas used to compute this function, see [^AH16_3]. diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl index f513d8d3af35e..08208769bc9ea 100644 --- a/stdlib/LinearAlgebra/src/deprecated.jl +++ b/stdlib/LinearAlgebra/src/deprecated.jl @@ -1359,3 +1359,22 @@ export hessfact # deprecate hessfact! to hessenberg! export hessenberg! @deprecate(hessfact!(A::StridedMatrix{<:BlasFloat}), hessenberg!(A)) + +# deprecate eigfact to eigen +export eigfact +@deprecate(eigfact(A::StridedMatrix{T}; permute::Bool=true, scale::Bool=true) where T, eigen(A; permute=permute, scale=scale)) +@deprecate(eigfact(x::Number), eigen(x)) +@deprecate(eigfact(A::AbstractMatrix{TA}, B::AbstractMatrix{TB}) where {TA,TB}, eigen(A, B)) +@deprecate(eigfact(A::Number, B::Number), eigen(A, B)) + +@deprecate(eigfact(A::SymTridiagonal{T}) where T, eigen(A)) +@deprecate(eigfact(A::SymTridiagonal{T}, irange::UnitRange) where T, eigen(A)) +@deprecate(eigfact(A::SymTridiagonal{T}, vl::Real, vu::Real) where T, eigen(A)) + +@deprecate(eigfact(M::Bidiagonal), eigen(M)) + +@deprecate(eigfact(A::RealHermSymComplexHerm), eigen(A)) +@deprecate(eigfact(A::RealHermSymComplexHerm, irange::UnitRange), eigen(A, irange)) +@deprecate(eigfact(A::RealHermSymComplexHerm, vl::Real, vh::Real), eigen(A, vl, vh)) +@deprecate(eigfact(A::AbstractTriangular), eigen(A)) +@deprecate(eigfact(D::Diagonal; permute::Bool=true, scale::Bool=true), eigen(D; permute=permute, scale=scale)) diff --git a/stdlib/LinearAlgebra/src/diagonal.jl b/stdlib/LinearAlgebra/src/diagonal.jl index 39e0c8451c119..5466c0c1266c8 100644 --- a/stdlib/LinearAlgebra/src/diagonal.jl +++ b/stdlib/LinearAlgebra/src/diagonal.jl @@ -445,7 +445,7 @@ end eigvals(D::Diagonal{<:Number}) = D.diag eigvals(D::Diagonal) = [eigvals(x) for x in D.diag] #For block matrices, etc. eigvecs(D::Diagonal) = Matrix{eltype(D)}(I, size(D)) -function eigfact(D::Diagonal; permute::Bool=true, scale::Bool=true) +function eigen(D::Diagonal; permute::Bool=true, scale::Bool=true) if any(!isfinite, D.diag) throw(ArgumentError("matrix contains Infs or NaNs")) end diff --git a/stdlib/LinearAlgebra/src/eigen.jl b/stdlib/LinearAlgebra/src/eigen.jl index 56890060f13f2..07bc5bafa691d 100644 --- a/stdlib/LinearAlgebra/src/eigen.jl +++ b/stdlib/LinearAlgebra/src/eigen.jl @@ -20,12 +20,24 @@ end GeneralizedEigen(values::AbstractVector{V}, vectors::AbstractMatrix{T}) where {T,V} = GeneralizedEigen{T,V,typeof(vectors),typeof(values)}(values, vectors) +# iteration for destructuring into components +Base.iterate(S::Union{Eigen,GeneralizedEigen}) = (S.values, Val(:vectors)) +Base.iterate(S::Union{Eigen,GeneralizedEigen}, ::Val{:vectors}) = (S.vectors, Val(:done)) +Base.iterate(S::Union{Eigen,GeneralizedEigen}, ::Val{:done}) = nothing + +# indexing for destructuring into components +@inline function Base.getindex(S::Union{Eigen,GeneralizedEigen}, i::Integer) + i == 1 ? (return S.values) : + i == 2 ? (return S.vectors) : + throw(BoundsError(S, i)) +end + isposdef(A::Union{Eigen,GeneralizedEigen}) = isreal(A.values) && all(x -> x > 0, A.values) """ eigfact!(A, [B]) -Same as [`eigfact`](@ref), but saves space by overwriting the input `A` (and +Same as [`eigen`](@ref), but saves space by overwriting the input `A` (and `B`), instead of creating a copy. """ function eigfact!(A::StridedMatrix{T}; permute::Bool=true, scale::Bool=true) where T<:BlasReal @@ -59,12 +71,14 @@ function eigfact!(A::StridedMatrix{T}; permute::Bool=true, scale::Bool=true) whe end """ - eigfact(A; permute::Bool=true, scale::Bool=true) -> Eigen + eigen(A; permute::Bool=true, scale::Bool=true) -> Eigen Computes the eigenvalue decomposition of `A`, returning an `Eigen` factorization object `F` which contains the eigenvalues in `F.values` and the eigenvectors in the columns of the matrix `F.vectors`. (The `k`th eigenvector can be obtained from the slice `F.vectors[:, k]`.) +Iterating the decomposition produces the components `F.values` and `F.vectors`. + The following functions are available for `Eigen` objects: [`inv`](@ref), [`det`](@ref), and [`isposdef`](@ref). For general nonsymmetric matrices it is possible to specify how the matrix is balanced @@ -74,7 +88,7 @@ make rows and columns more equal in norm. The default is `true` for both options # Examples ```jldoctest -julia> F = eigfact([1.0 0.0 0.0; 0.0 3.0 0.0; 0.0 0.0 18.0]) +julia> F = eigen([1.0 0.0 0.0; 0.0 3.0 0.0; 0.0 0.0 18.0]) Eigen{Float64,Float64,Array{Float64,2},Array{Float64,1}} eigenvalues: 3-element Array{Float64,1}: @@ -98,17 +112,22 @@ julia> F.vectors 1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0 + +julia> vals, vecs = F; # destructuring via iteration + +julia> vals == F.values && vecs == F.vectors +true ``` """ -function eigfact(A::StridedMatrix{T}; permute::Bool=true, scale::Bool=true) where T +function eigen(A::StridedMatrix{T}; permute::Bool=true, scale::Bool=true) where T AA = copy_oftype(A, eigtype(T)) - isdiag(AA) && return eigfact(Diagonal(AA), permute = permute, scale = scale) + isdiag(AA) && return eigen(Diagonal(AA), permute = permute, scale = scale) return eigfact!(AA, permute = permute, scale = scale) end -eigfact(x::Number) = Eigen([x], fill(one(x), 1, 1)) +eigen(x::Number) = Eigen([x], fill(one(x), 1, 1)) function eig(A::Union{Number, StridedMatrix}; permute::Bool=true, scale::Bool=true) - F = eigfact(A, permute=permute, scale=scale) + F = eigen(A, permute=permute, scale=scale) F.values, F.vectors end @@ -118,7 +137,7 @@ end eig(A, permute::Bool=true, scale::Bool=true) -> D, V Computes eigenvalues (`D`) and eigenvectors (`V`) of `A`. -See [`eigfact`](@ref) for details on the +See [`eigen`](@ref) for details on the `irange`, `vl`, and `vu` arguments (for [`SymTridiagonal`](@ref), [`Hermitian`](@ref), and [`Symmetric`](@ref) matrices) @@ -131,11 +150,11 @@ julia> eig([1.0 0.0 0.0; 0.0 3.0 0.0; 0.0 0.0 18.0]) ([1.0, 3.0, 18.0], [1.0 0.0 0.0; 0.0 1.0 0.0; 0.0 0.0 1.0]) ``` -`eig` is a wrapper around [`eigfact`](@ref), extracting all parts of the -factorization to a tuple; where possible, using [`eigfact`](@ref) is recommended. +`eig` is a wrapper around [`eigen`](@ref), extracting all parts of the +factorization to a tuple; where possible, using [`eigen`](@ref) is recommended. """ function eig(A::AbstractMatrix, args...) - F = eigfact(A, args...) + F = eigen(A, args...) F.values, F.vectors end @@ -144,7 +163,7 @@ end Return a matrix `M` whose columns are the eigenvectors of `A`. (The `k`th eigenvector can be obtained from the slice `M[:, k]`.) The `permute` and `scale` keywords are the same as -for [`eigfact`](@ref). +for [`eigen`](@ref). # Examples ```jldoctest @@ -156,7 +175,7 @@ julia> eigvecs([1.0 0.0 0.0; 0.0 3.0 0.0; 0.0 0.0 18.0]) ``` """ eigvecs(A::Union{Number, AbstractMatrix}; permute::Bool=true, scale::Bool=true) = - eigvecs(eigfact(A, permute=permute, scale=scale)) + eigvecs(eigen(A, permute=permute, scale=scale)) eigvecs(F::Union{Eigen, GeneralizedEigen}) = F.vectors eigvals(F::Union{Eigen, GeneralizedEigen}) = F.values @@ -351,13 +370,15 @@ function eigfact!(A::StridedMatrix{T}, B::StridedMatrix{T}) where T<:BlasComplex end """ - eigfact(A, B) -> GeneralizedEigen + eigen(A, B) -> GeneralizedEigen Computes the generalized eigenvalue decomposition of `A` and `B`, returning a `GeneralizedEigen` factorization object `F` which contains the generalized eigenvalues in `F.values` and the generalized eigenvectors in the columns of the matrix `F.vectors`. (The `k`th generalized eigenvector can be obtained from the slice `F.vectors[:, k]`.) +Iterating the decomposition produces the components `F.values` and `F.vectors`. + # Examples ```jldoctest julia> A = [1 0; 0 -1] @@ -370,7 +391,7 @@ julia> B = [0 1; 1 0] 0 1 1 0 -julia> F = eigfact(A, B); +julia> F = eigen(A, B); julia> F.values 2-element Array{Complex{Float64},1}: @@ -381,22 +402,27 @@ julia> F.vectors 2×2 Array{Complex{Float64},2}: 0.0-1.0im 0.0+1.0im -1.0-0.0im -1.0+0.0im + +julia> vals, vecs = F; # destructuring via iteration + +julia> vals == F.values && vecs == F.vectors +true ``` """ -function eigfact(A::AbstractMatrix{TA}, B::AbstractMatrix{TB}) where {TA,TB} +function eigen(A::AbstractMatrix{TA}, B::AbstractMatrix{TB}) where {TA,TB} S = promote_type(eigtype(TA),TB) return eigfact!(copy_oftype(A, S), copy_oftype(B, S)) end -eigfact(A::Number, B::Number) = eigfact(fill(A,1,1), fill(B,1,1)) +eigen(A::Number, B::Number) = eigen(fill(A,1,1), fill(B,1,1)) """ eig(A, B) -> D, V Computes generalized eigenvalues (`D`) and vectors (`V`) of `A` with respect to `B`. -`eig` is a wrapper around [`eigfact`](@ref), extracting all parts of the -factorization to a tuple; where possible, using [`eigfact`](@ref) is recommended. +`eig` is a wrapper around [`eigen`](@ref), extracting all parts of the +factorization to a tuple; where possible, using [`eigen`](@ref) is recommended. # Examples ```jldoctest @@ -415,11 +441,11 @@ julia> eig(A, B) ``` """ function eig(A::AbstractMatrix, B::AbstractMatrix) - F = eigfact(A,B) + F = eigen(A,B) F.values, F.vectors end function eig(A::Number, B::Number) - F = eigfact(A,B) + F = eigen(A,B) F.values, F.vectors end @@ -524,7 +550,7 @@ julia> eigvecs(A, B) -1.0-0.0im -1.0+0.0im ``` """ -eigvecs(A::AbstractMatrix, B::AbstractMatrix) = eigvecs(eigfact(A, B)) +eigvecs(A::AbstractMatrix, B::AbstractMatrix) = eigvecs(eigen(A, B)) function show(io::IO, mime::MIME{Symbol("text/plain")}, F::Union{Eigen,GeneralizedEigen}) println(io, summary(F)) diff --git a/stdlib/LinearAlgebra/src/symmetric.jl b/stdlib/LinearAlgebra/src/symmetric.jl index 8967f0919a994..3a0bcf2a2871a 100644 --- a/stdlib/LinearAlgebra/src/symmetric.jl +++ b/stdlib/LinearAlgebra/src/symmetric.jl @@ -476,7 +476,7 @@ inv(A::Symmetric{<:Any,<:StridedMatrix}) = Symmetric(_inv(A), Symbol(A.uplo)) eigfact!(A::RealHermSymComplexHerm{<:BlasReal,<:StridedMatrix}) = Eigen(LAPACK.syevr!('V', 'A', A.uplo, A.data, 0.0, 0.0, 0, 0, -1.0)...) -function eigfact(A::RealHermSymComplexHerm) +function eigen(A::RealHermSymComplexHerm) T = eltype(A) S = eigtype(T) eigfact!(S != T ? convert(AbstractMatrix{S}, A) : copy(A)) @@ -485,11 +485,13 @@ end eigfact!(A::RealHermSymComplexHerm{<:BlasReal,<:StridedMatrix}, irange::UnitRange) = Eigen(LAPACK.syevr!('V', 'I', A.uplo, A.data, 0.0, 0.0, irange.start, irange.stop, -1.0)...) """ - eigfact(A::Union{SymTridiagonal, Hermitian, Symmetric}, irange::UnitRange) -> Eigen + eigen(A::Union{SymTridiagonal, Hermitian, Symmetric}, irange::UnitRange) -> Eigen Computes the eigenvalue decomposition of `A`, returning an `Eigen` factorization object `F` -which contains the eigenvalues in `F[:values]` and the eigenvectors in the columns of the -matrix `F[:vectors]`. (The `k`th eigenvector can be obtained from the slice `F[:vectors][:, k]`.) +which contains the eigenvalues in `F.values` and the eigenvectors in the columns of the +matrix `F.vectors`. (The `k`th eigenvector can be obtained from the slice `F.vectors[:, k]`.) + +Iterating the decomposition produces the components `F.values` and `F.vectors`. The following functions are available for `Eigen` objects: [`inv`](@ref), [`det`](@ref), and [`isposdef`](@ref). @@ -499,7 +501,7 @@ The `UnitRange` `irange` specifies indices of the sorted eigenvalues to search f If `irange` is not `1:n`, where `n` is the dimension of `A`, then the returned factorization will be a *truncated* factorization. """ -function eigfact(A::RealHermSymComplexHerm, irange::UnitRange) +function eigen(A::RealHermSymComplexHerm, irange::UnitRange) T = eltype(A) S = eigtype(T) eigfact!(S != T ? convert(AbstractMatrix{S}, A) : copy(A), irange) @@ -509,11 +511,13 @@ eigfact!(A::RealHermSymComplexHerm{T,<:StridedMatrix}, vl::Real, vh::Real) where Eigen(LAPACK.syevr!('V', 'V', A.uplo, A.data, convert(T, vl), convert(T, vh), 0, 0, -1.0)...) """ - eigfact(A::Union{SymTridiagonal, Hermitian, Symmetric}, vl::Real, vu::Real) -> Eigen + eigen(A::Union{SymTridiagonal, Hermitian, Symmetric}, vl::Real, vu::Real) -> Eigen Computes the eigenvalue decomposition of `A`, returning an `Eigen` factorization object `F` -which contains the eigenvalues in `F[:values]` and the eigenvectors in the columns of the -matrix `F[:vectors]`. (The `k`th eigenvector can be obtained from the slice `F[:vectors][:, k]`.) +which contains the eigenvalues in `F.values` and the eigenvectors in the columns of the +matrix `F.vectors`. (The `k`th eigenvector can be obtained from the slice `F.vectors[:, k]`.) + +Iterating the decomposition produces the components `F.values` and `F.vectors`. The following functions are available for `Eigen` objects: [`inv`](@ref), [`det`](@ref), and [`isposdef`](@ref). @@ -523,7 +527,7 @@ The following functions are available for `Eigen` objects: [`inv`](@ref), [`det` If [`vl`, `vu`] does not contain all eigenvalues of `A`, then the returned factorization will be a *truncated* factorization. """ -function eigfact(A::RealHermSymComplexHerm, vl::Real, vh::Real) +function eigen(A::RealHermSymComplexHerm, vl::Real, vh::Real) T = eltype(A) S = eigtype(T) eigfact!(S != T ? convert(AbstractMatrix{S}, A) : copy(A), vl, vh) @@ -634,7 +638,7 @@ eigvals!(A::HermOrSym{T,S}, B::HermOrSym{T,S}) where {T<:BlasReal,S<:StridedMatr eigvals!(A::Hermitian{T,S}, B::Hermitian{T,S}) where {T<:BlasComplex,S<:StridedMatrix} = LAPACK.sygvd!(1, 'N', A.uplo, A.data, B.uplo == A.uplo ? B.data : copy(B.data'))[1] -eigvecs(A::HermOrSym) = eigvecs(eigfact(A)) +eigvecs(A::HermOrSym) = eigvecs(eigen(A)) function svdvals!(A::RealHermSymComplexHerm) vals = eigvals!(A) @@ -656,7 +660,7 @@ function sympow(A::Symmetric, p::Integer) end function ^(A::Symmetric{<:Real}, p::Real) isinteger(p) && return integerpow(A, p) - F = eigfact(A) + F = eigen(A) if all(λ -> λ ≥ 0, F.values) return Symmetric((F.vectors * Diagonal((F.values).^p)) * F.vectors') else @@ -680,7 +684,7 @@ function ^(A::Hermitian, p::Integer) end function ^(A::Hermitian{T}, p::Real) where T isinteger(p) && return integerpow(A, p) - F = eigfact(A) + F = eigen(A) if all(λ -> λ ≥ 0, F.values) retmat = (F.vectors * Diagonal((F.values).^p)) * F.vectors' if T <: Real @@ -699,12 +703,12 @@ end for func in (:exp, :cos, :sin, :tan, :cosh, :sinh, :tanh, :atan, :asinh, :atanh) @eval begin function ($func)(A::HermOrSym{<:Real}) - F = eigfact(A) + F = eigen(A) return Symmetric((F.vectors * Diagonal(($func).(F.values))) * F.vectors') end function ($func)(A::Hermitian{<:Complex}) n = checksquare(A) - F = eigfact(A) + F = eigen(A) retmat = (F.vectors * Diagonal(($func).(F.values))) * F.vectors' for i = 1:n retmat[i,i] = real(retmat[i,i]) @@ -717,7 +721,7 @@ end for func in (:acos, :asin) @eval begin function ($func)(A::HermOrSym{<:Real}) - F = eigfact(A) + F = eigen(A) if all(λ -> -1 ≤ λ ≤ 1, F.values) retmat = (F.vectors * Diagonal(($func).(F.values))) * F.vectors' else @@ -727,7 +731,7 @@ for func in (:acos, :asin) end function ($func)(A::Hermitian{<:Complex}) n = checksquare(A) - F = eigfact(A) + F = eigen(A) if all(λ -> -1 ≤ λ ≤ 1, F.values) retmat = (F.vectors * Diagonal(($func).(F.values))) * F.vectors' for i = 1:n @@ -742,7 +746,7 @@ for func in (:acos, :asin) end function acosh(A::HermOrSym{<:Real}) - F = eigfact(A) + F = eigen(A) if all(λ -> λ ≥ 1, F.values) retmat = (F.vectors * Diagonal(acosh.(F.values))) * F.vectors' else @@ -752,7 +756,7 @@ function acosh(A::HermOrSym{<:Real}) end function acosh(A::Hermitian{<:Complex}) n = checksquare(A) - F = eigfact(A) + F = eigen(A) if all(λ -> λ ≥ 1, F.values) retmat = (F.vectors * Diagonal(acosh.(F.values))) * F.vectors' for i = 1:n @@ -766,7 +770,7 @@ end function sincos(A::HermOrSym{<:Real}) n = checksquare(A) - F = eigfact(A) + F = eigen(A) S, C = Diagonal(similar(A, (n,))), Diagonal(similar(A, (n,))) for i in 1:n S.diag[i], C.diag[i] = sincos(F.values[i]) @@ -775,7 +779,7 @@ function sincos(A::HermOrSym{<:Real}) end function sincos(A::Hermitian{<:Complex}) n = checksquare(A) - F = eigfact(A) + F = eigen(A) S, C = Diagonal(similar(A, (n,))), Diagonal(similar(A, (n,))) for i in 1:n S.diag[i], C.diag[i] = sincos(F.values[i]) @@ -792,7 +796,7 @@ end for func in (:log, :sqrt) @eval begin function ($func)(A::HermOrSym{<:Real}) - F = eigfact(A) + F = eigen(A) if all(λ -> λ ≥ 0, F.values) retmat = (F.vectors * Diagonal(($func).(F.values))) * F.vectors' else @@ -803,7 +807,7 @@ for func in (:log, :sqrt) function ($func)(A::Hermitian{<:Complex}) n = checksquare(A) - F = eigfact(A) + F = eigen(A) if all(λ -> λ ≥ 0, F.values) retmat = (F.vectors * Diagonal(($func).(F.values))) * F.vectors' for i = 1:n diff --git a/stdlib/LinearAlgebra/src/triangular.jl b/stdlib/LinearAlgebra/src/triangular.jl index d77082425d394..9b0755cb2626d 100644 --- a/stdlib/LinearAlgebra/src/triangular.jl +++ b/stdlib/LinearAlgebra/src/triangular.jl @@ -2432,7 +2432,7 @@ function logabsdet(A::Union{UpperTriangular{T},LowerTriangular{T}}) where T return abs_det, sgn end -eigfact(A::AbstractTriangular) = Eigen(eigvals(A), eigvecs(A)) +eigen(A::AbstractTriangular) = Eigen(eigvals(A), eigvecs(A)) # Generic singular systems for func in (:svd, :svd!, :svdvals) diff --git a/stdlib/LinearAlgebra/src/tridiag.jl b/stdlib/LinearAlgebra/src/tridiag.jl index 2be5dbd7ad9d5..6e5bd0baa3051 100644 --- a/stdlib/LinearAlgebra/src/tridiag.jl +++ b/stdlib/LinearAlgebra/src/tridiag.jl @@ -184,16 +184,16 @@ end (\)(T::SymTridiagonal, B::StridedVecOrMat) = ldlt(T)\B eigfact!(A::SymTridiagonal{<:BlasReal}) = Eigen(LAPACK.stegr!('V', A.dv, A.ev)...) -eigfact(A::SymTridiagonal{T}) where T = eigfact!(copy_oftype(A, eigtype(T))) +eigen(A::SymTridiagonal{T}) where T = eigfact!(copy_oftype(A, eigtype(T))) eigfact!(A::SymTridiagonal{<:BlasReal}, irange::UnitRange) = Eigen(LAPACK.stegr!('V', 'I', A.dv, A.ev, 0.0, 0.0, irange.start, irange.stop)...) -eigfact(A::SymTridiagonal{T}, irange::UnitRange) where T = +eigen(A::SymTridiagonal{T}, irange::UnitRange) where T = eigfact!(copy_oftype(A, eigtype(T)), irange) eigfact!(A::SymTridiagonal{<:BlasReal}, vl::Real, vu::Real) = Eigen(LAPACK.stegr!('V', 'V', A.dv, A.ev, vl, vu, 0, 0)...) -eigfact(A::SymTridiagonal{T}, vl::Real, vu::Real) where T = +eigen(A::SymTridiagonal{T}, vl::Real, vu::Real) where T = eigfact!(copy_oftype(A, eigtype(T)), vl, vu) eigvals!(A::SymTridiagonal{<:BlasReal}) = LAPACK.stev!('N', A.dv, A.ev)[1] @@ -214,7 +214,7 @@ eigmax(A::SymTridiagonal) = eigvals(A, size(A, 1):size(A, 1))[1] eigmin(A::SymTridiagonal) = eigvals(A, 1:1)[1] #Compute selected eigenvectors only corresponding to particular eigenvalues -eigvecs(A::SymTridiagonal) = eigfact(A).vectors +eigvecs(A::SymTridiagonal) = eigen(A).vectors """ eigvecs(A::SymTridiagonal[, eigvals]) -> Matrix diff --git a/stdlib/LinearAlgebra/test/dense.jl b/stdlib/LinearAlgebra/test/dense.jl index 2628165945936..2e809075c4688 100644 --- a/stdlib/LinearAlgebra/test/dense.jl +++ b/stdlib/LinearAlgebra/test/dense.jl @@ -581,19 +581,19 @@ end @test all(z -> (0 < real(z) < π || abs(real(z)) < abstol && imag(z) >= 0 || abs(real(z) - π) < abstol && imag(z) <= 0), - eigfact(acos(A)).values) + eigen(acos(A)).values) @test all(z -> (-π/2 < real(z) < π/2 || abs(real(z) + π/2) < abstol && imag(z) >= 0 || abs(real(z) - π/2) < abstol && imag(z) <= 0), - eigfact(asin(A)).values) + eigen(asin(A)).values) @test all(z -> (-π < imag(z) < π && real(z) > 0 || 0 <= imag(z) < π && abs(real(z)) < abstol || abs(imag(z) - π) < abstol && real(z) >= 0), - eigfact(acosh(A)).values) + eigen(acosh(A)).values) @test all(z -> (-π/2 < imag(z) < π/2 || abs(imag(z) + π/2) < abstol && real(z) <= 0 || abs(imag(z) - π/2) < abstol && real(z) <= 0), - eigfact(asinh(A)).values) + eigen(asinh(A)).values) end end end diff --git a/stdlib/LinearAlgebra/test/diagonal.jl b/stdlib/LinearAlgebra/test/diagonal.jl index 651e14b819115..b3467621fcc10 100644 --- a/stdlib/LinearAlgebra/test/diagonal.jl +++ b/stdlib/LinearAlgebra/test/diagonal.jl @@ -207,7 +207,7 @@ srand(1) @test factorize(D) == D @testset "Eigensystem" begin - eigD = eigfact(D) + eigD = eigen(D) @test Diagonal(eigD.values) ≈ D @test eigD.vectors == Matrix(I, size(D)) end diff --git a/stdlib/LinearAlgebra/test/eigen.jl b/stdlib/LinearAlgebra/test/eigen.jl index eec694ed7f475..c4e3c087e4b3d 100644 --- a/stdlib/LinearAlgebra/test/eigen.jl +++ b/stdlib/LinearAlgebra/test/eigen.jl @@ -37,7 +37,7 @@ aimg = randn(n,n)/2 for i in 1:size(a,2) @test a*v[:,i] ≈ d[i]*v[:,i] end - f = eigfact(a) + f = eigen(a) @test det(a) ≈ det(f) @test inv(a) ≈ inv(f) @test isposdef(a) == isposdef(f) @@ -45,7 +45,7 @@ aimg = randn(n,n)/2 @test eigvecs(f) === f.vectors @test Array(f) ≈ a - num_fact = eigfact(one(eltya)) + num_fact = eigen(one(eltya)) @test num_fact.values[1] == one(eltya) h = asym @test minimum(eigvals(h)) ≈ eigmin(h) @@ -61,7 +61,7 @@ aimg = randn(n,n)/2 asym_sg = view(asym, 1:n1, 1:n1) a_sg = view(a, 1:n, n1+1:n2) end - f = eigfact(asym_sg, a_sg'a_sg) + f = eigen(asym_sg, a_sg'a_sg) @test asym_sg*f.vectors ≈ (a_sg'a_sg*f.vectors) * Diagonal(f.values) @test f.values ≈ eigvals(asym_sg, a_sg'a_sg) @test prod(f.values) ≈ prod(eigvals(asym_sg/(a_sg'a_sg))) atol=200ε @@ -82,7 +82,7 @@ aimg = randn(n,n)/2 a1_nsg = view(a, 1:n1, 1:n1) a2_nsg = view(a, n1+1:n2, n1+1:n2) end - f = eigfact(a1_nsg, a2_nsg) + f = eigen(a1_nsg, a2_nsg) @test a1_nsg*f.vectors ≈ (a2_nsg*f.vectors) * Diagonal(f.values) @test f.values ≈ eigvals(a1_nsg, a2_nsg) @test prod(f.values) ≈ prod(eigvals(a1_nsg/a2_nsg)) atol=50000ε @@ -109,7 +109,7 @@ end # test a matrix larger than 140-by-140 for #14174 let aa = rand(200, 200) for a in (aa, view(aa, 1:n, 1:n)) - f = eigfact(a) + f = eigen(a) @test a ≈ f.vectors * Diagonal(f.values) / f.vectors end end @@ -124,8 +124,8 @@ end @testset "text/plain (REPL) printing of Eigen and GeneralizedEigen" begin A, B = randn(5,5), randn(5,5) - e = eigfact(A) - ge = eigfact(A, B) + e = eigen(A) + ge = eigen(A, B) valsstring = sprint((t, s) -> show(t, "text/plain", s), e.values) vecsstring = sprint((t, s) -> show(t, "text/plain", s), e.vectors) factstring = sprint((t, s) -> show(t, "text/plain", s), e) diff --git a/stdlib/LinearAlgebra/test/lapack.jl b/stdlib/LinearAlgebra/test/lapack.jl index 642827da3082a..9289a0b3efbc9 100644 --- a/stdlib/LinearAlgebra/test/lapack.jl +++ b/stdlib/LinearAlgebra/test/lapack.jl @@ -231,7 +231,7 @@ end @testset for elty in (ComplexF32, ComplexF64) A = rand(elty,10,10) Aw, Avl, Avr = LAPACK.geev!('N','V',copy(A)) - fA = eigfact(A) + fA = eigen(A) @test fA.values ≈ Aw @test fA.vectors ≈ Avr end @@ -660,7 +660,7 @@ end # Issue 14065 (and 14220) let A = [NaN NaN; NaN NaN] - @test_throws ArgumentError eigfact(A) + @test_throws ArgumentError eigen(A) end end # module TestLAPACK diff --git a/stdlib/LinearAlgebra/test/lu.jl b/stdlib/LinearAlgebra/test/lu.jl index 96eab7b13fc88..4b4353f6e4d36 100644 --- a/stdlib/LinearAlgebra/test/lu.jl +++ b/stdlib/LinearAlgebra/test/lu.jl @@ -50,10 +50,10 @@ dimg = randn(n)/2 -2.0 4.0 1.0 -eps(real(one(eltya))); -eps(real(one(eltya)))/4 eps(real(one(eltya)))/2 -1.0 0; -0.5 -0.5 0.1 1.0]) - F = eigfact(A, permute=false, scale=false) + F = eigen(A, permute=false, scale=false) eig(A, permute=false, scale=false) @test F.vectors*Diagonal(F.values)/F.vectors ≈ A - F = eigfact(A) + F = eigen(A) # @test norm(F.vectors*Diagonal(F.values)/F.vectors - A) > 0.01 end end diff --git a/stdlib/LinearAlgebra/test/symmetric.jl b/stdlib/LinearAlgebra/test/symmetric.jl index 5ff60262510a6..54172dfcb2af7 100644 --- a/stdlib/LinearAlgebra/test/symmetric.jl +++ b/stdlib/LinearAlgebra/test/symmetric.jl @@ -222,14 +222,14 @@ end @test asym*v[:,1] ≈ d[1]*v[:,1] @test v*Diagonal(d)*transpose(v) ≈ asym @test isequal(eigvals(asym[1]), eigvals(asym[1:1,1:1])) - @test abs.(eigfact(Symmetric(asym), 1:2).vectors'v[:,1:2]) ≈ Matrix(I, 2, 2) + @test abs.(eigen(Symmetric(asym), 1:2).vectors'v[:,1:2]) ≈ Matrix(I, 2, 2) eig(Symmetric(asym), 1:2) # same result, but checks that method works - @test abs.(eigfact(Symmetric(asym), d[1] - 1, (d[2] + d[3])/2).vectors'v[:,1:2]) ≈ Matrix(I, 2, 2) + @test abs.(eigen(Symmetric(asym), d[1] - 1, (d[2] + d[3])/2).vectors'v[:,1:2]) ≈ Matrix(I, 2, 2) eig(Symmetric(asym), d[1] - 1, (d[2] + d[3])/2) # same result, but checks that method works @test eigvals(Symmetric(asym), 1:2) ≈ d[1:2] @test eigvals(Symmetric(asym), d[1] - 1, (d[2] + d[3])/2) ≈ d[1:2] - # eigfact doesn't support Symmetric{Complex} - @test Matrix(eigfact(asym)) ≈ asym + # eigen doesn't support Symmetric{Complex} + @test Matrix(eigen(asym)) ≈ asym @test eigvecs(Symmetric(asym)) ≈ eigvecs(asym) end @@ -237,13 +237,13 @@ end @test aherm*v[:,1] ≈ d[1]*v[:,1] @test v*Diagonal(d)*v' ≈ aherm @test isequal(eigvals(aherm[1]), eigvals(aherm[1:1,1:1])) - @test abs.(eigfact(Hermitian(aherm), 1:2).vectors'v[:,1:2]) ≈ Matrix(I, 2, 2) + @test abs.(eigen(Hermitian(aherm), 1:2).vectors'v[:,1:2]) ≈ Matrix(I, 2, 2) eig(Hermitian(aherm), 1:2) # same result, but checks that method works - @test abs.(eigfact(Hermitian(aherm), d[1] - 1, (d[2] + d[3])/2).vectors'v[:,1:2]) ≈ Matrix(I, 2, 2) + @test abs.(eigen(Hermitian(aherm), d[1] - 1, (d[2] + d[3])/2).vectors'v[:,1:2]) ≈ Matrix(I, 2, 2) eig(Hermitian(aherm), d[1] - 1, (d[2] + d[3])/2) # same result, but checks that method works @test eigvals(Hermitian(aherm), 1:2) ≈ d[1:2] @test eigvals(Hermitian(aherm), d[1] - 1, (d[2] + d[3])/2) ≈ d[1:2] - @test Matrix(eigfact(aherm)) ≈ aherm + @test Matrix(eigen(aherm)) ≈ aherm @test eigvecs(Hermitian(aherm)) ≈ eigvecs(aherm) # relation to svdvals @@ -365,7 +365,7 @@ end end @testset "Issues #8057 and #8058. f=$f, A=$A" for f in - (eigfact, eigvals, eig), + (eigen, eigvals, eig), A in (Symmetric([0 1; 1 0]), Hermitian([0 im; -im 0])) @test_throws ArgumentError f(A, 3, 2) @test_throws ArgumentError f(A, 1:4) diff --git a/stdlib/LinearAlgebra/test/tridiag.jl b/stdlib/LinearAlgebra/test/tridiag.jl index 679ed2dfd20a0..903d09a02d27f 100644 --- a/stdlib/LinearAlgebra/test/tridiag.jl +++ b/stdlib/LinearAlgebra/test/tridiag.jl @@ -253,14 +253,14 @@ end test_approx_eq_vecs(v, evecs) end @testset "stegr! call with index range" begin - F = eigfact(SymTridiagonal(b, a),1:2) - fF = eigfact(Symmetric(Array(SymTridiagonal(b, a))),1:2) + F = eigen(SymTridiagonal(b, a),1:2) + fF = eigen(Symmetric(Array(SymTridiagonal(b, a))),1:2) test_approx_eq_modphase(F.vectors, fF.vectors) @test F.values ≈ fF.values end @testset "stegr! call with value range" begin - F = eigfact(SymTridiagonal(b, a),0.0,1.0) - fF = eigfact(Symmetric(Array(SymTridiagonal(b, a))),0.0,1.0) + F = eigen(SymTridiagonal(b, a),0.0,1.0) + fF = eigen(Symmetric(Array(SymTridiagonal(b, a))),0.0,1.0) test_approx_eq_modphase(F.vectors, fF.vectors) @test F.values ≈ fF.values end From ee57c76dff9db59722980e6209a7ad870e709bfe Mon Sep 17 00:00:00 2001 From: Sacha Verweij Date: Tue, 22 May 2018 20:27:22 -0700 Subject: [PATCH 18/23] Deprecate eigfact! to eigen!. --- NEWS.md | 4 ++-- stdlib/LinearAlgebra/docs/src/index.md | 2 +- stdlib/LinearAlgebra/src/LinearAlgebra.jl | 2 +- stdlib/LinearAlgebra/src/deprecated.jl | 17 +++++++++++++++++ stdlib/LinearAlgebra/src/eigen.jl | 22 +++++++++++----------- stdlib/LinearAlgebra/src/symmetric.jl | 16 ++++++++-------- stdlib/LinearAlgebra/src/tridiag.jl | 12 ++++++------ 7 files changed, 46 insertions(+), 29 deletions(-) diff --git a/NEWS.md b/NEWS.md index 976628e69a0be..a5fd03a00ca72 100644 --- a/NEWS.md +++ b/NEWS.md @@ -709,9 +709,9 @@ Deprecated or removed `hessenberg`, and `eigen` ([#27159]). * `lufact!`, `schurfact!`, `lqfact!`, `qrfact!`, `ldltfact!`, `svdfact!`, - `bkfact!`, and `hessfact!` have respectively been deprecated to + `bkfact!`, `hessfact!`, and `eigfact!` have respectively been deprecated to `lu!`, `schur!`, `lq!`, `qr!`, `ldlt!`, `svd!`, `bunchkaufman!`, - and `hessenberg!` ([#27159]). + `hessenberg!`, and `eigen!` ([#27159]). * Indexing into multidimensional arrays with more than one index but fewer indices than there are dimensions is no longer permitted when those trailing dimensions have lengths greater than 1. diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md index 5b081e7aeefdf..d84575b77cd50 100644 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -338,7 +338,7 @@ LinearAlgebra.eigmax LinearAlgebra.eigmin LinearAlgebra.eigvecs LinearAlgebra.eigen -LinearAlgebra.eigfact! +LinearAlgebra.eigen! LinearAlgebra.hessenberg LinearAlgebra.hessenberg! LinearAlgebra.schur! diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index e68f3c2126676..a6770d3c6e960 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -82,7 +82,7 @@ export dot, eig, eigen, - eigfact!, + eigen!, eigmax, eigmin, eigvals, diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl index 08208769bc9ea..fea7008320ebb 100644 --- a/stdlib/LinearAlgebra/src/deprecated.jl +++ b/stdlib/LinearAlgebra/src/deprecated.jl @@ -1378,3 +1378,20 @@ export eigfact @deprecate(eigfact(A::RealHermSymComplexHerm, vl::Real, vh::Real), eigen(A, vl, vh)) @deprecate(eigfact(A::AbstractTriangular), eigen(A)) @deprecate(eigfact(D::Diagonal; permute::Bool=true, scale::Bool=true), eigen(D; permute=permute, scale=scale)) + +# deprecate eigfact! to eigen! +export eigfact! +@deprecate(eigfact!(A::StridedMatrix{T}; permute::Bool=true, scale::Bool=true) where T<:BlasReal, eigen!(A; permute=permute, scale=scale)) +@deprecate(eigfact!(A::StridedMatrix{T}; permute::Bool=true, scale::Bool=true) where T<:BlasComplex, eigen!(A; permute=permute, scale=scale)) +@deprecate(eigfact!(A::StridedMatrix{T}, B::StridedMatrix{T}) where T<:BlasReal, eigen!(A, B)) +@deprecate(eigfact!(A::StridedMatrix{T}, B::StridedMatrix{T}) where T<:BlasComplex, eigen!(A, B)) + +@deprecate(eigfact!(A::SymTridiagonal{<:BlasReal}), eigen!(A)) +@deprecate(eigfact!(A::SymTridiagonal{<:BlasReal}, irange::UnitRange), eigen!(A, irange)) +@deprecate(eigfact!(A::SymTridiagonal{<:BlasReal}, vl::Real, vu::Real), eigen!(A, vl, vu)) + +@deprecate(eigfact!(A::RealHermSymComplexHerm{<:BlasReal,<:StridedMatrix}), eigen!(A)) +@deprecate(eigfact!(A::RealHermSymComplexHerm{<:BlasReal,<:StridedMatrix}, irange::UnitRange), eigen!(A, irange)) +@deprecate(eigfact!(A::RealHermSymComplexHerm{T,<:StridedMatrix}, vl::Real, vh::Real) where {T<:BlasReal}, eigen!(A, vl, vh)) +@deprecate(eigfact!(A::HermOrSym{T,S}, B::HermOrSym{T,S}) where {T<:BlasReal,S<:StridedMatrix}, eigen!(A, B)) +@deprecate(eigfact!(A::Hermitian{T,S}, B::Hermitian{T,S}) where {T<:BlasComplex,S<:StridedMatrix}, eigen!(A, B)) diff --git a/stdlib/LinearAlgebra/src/eigen.jl b/stdlib/LinearAlgebra/src/eigen.jl index 07bc5bafa691d..d63c79ed90c43 100644 --- a/stdlib/LinearAlgebra/src/eigen.jl +++ b/stdlib/LinearAlgebra/src/eigen.jl @@ -35,15 +35,15 @@ end isposdef(A::Union{Eigen,GeneralizedEigen}) = isreal(A.values) && all(x -> x > 0, A.values) """ - eigfact!(A, [B]) + eigen!(A, [B]) Same as [`eigen`](@ref), but saves space by overwriting the input `A` (and `B`), instead of creating a copy. """ -function eigfact!(A::StridedMatrix{T}; permute::Bool=true, scale::Bool=true) where T<:BlasReal +function eigen!(A::StridedMatrix{T}; permute::Bool=true, scale::Bool=true) where T<:BlasReal n = size(A, 2) n == 0 && return Eigen(zeros(T, 0), zeros(T, 0, 0)) - issymmetric(A) && return eigfact!(Symmetric(A)) + issymmetric(A) && return eigen!(Symmetric(A)) A, WR, WI, VL, VR, _ = LAPACK.geevx!(permute ? (scale ? 'B' : 'P') : (scale ? 'S' : 'N'), 'N', 'V', 'N', A) iszero(WI) && return Eigen(WR, VR) evec = zeros(Complex{T}, n, n) @@ -63,10 +63,10 @@ function eigfact!(A::StridedMatrix{T}; permute::Bool=true, scale::Bool=true) whe return Eigen(complex.(WR, WI), evec) end -function eigfact!(A::StridedMatrix{T}; permute::Bool=true, scale::Bool=true) where T<:BlasComplex +function eigen!(A::StridedMatrix{T}; permute::Bool=true, scale::Bool=true) where T<:BlasComplex n = size(A, 2) n == 0 && return Eigen(zeros(T, 0), zeros(T, 0, 0)) - ishermitian(A) && return eigfact!(Hermitian(A)) + ishermitian(A) && return eigen!(Hermitian(A)) return Eigen(LAPACK.geevx!(permute ? (scale ? 'B' : 'P') : (scale ? 'S' : 'N'), 'N', 'V', 'N', A)[[2,4]]...) end @@ -122,7 +122,7 @@ true function eigen(A::StridedMatrix{T}; permute::Bool=true, scale::Bool=true) where T AA = copy_oftype(A, eigtype(T)) isdiag(AA) && return eigen(Diagonal(AA), permute = permute, scale = scale) - return eigfact!(AA, permute = permute, scale = scale) + return eigen!(AA, permute = permute, scale = scale) end eigen(x::Number) = Eigen([x], fill(one(x), 1, 1)) @@ -340,8 +340,8 @@ inv(A::Eigen) = A.vectors * inv(Diagonal(A.values)) / A.vectors det(A::Eigen) = prod(A.values) # Generalized eigenproblem -function eigfact!(A::StridedMatrix{T}, B::StridedMatrix{T}) where T<:BlasReal - issymmetric(A) && isposdef(B) && return eigfact!(Symmetric(A), Symmetric(B)) +function eigen!(A::StridedMatrix{T}, B::StridedMatrix{T}) where T<:BlasReal + issymmetric(A) && isposdef(B) && return eigen!(Symmetric(A), Symmetric(B)) n = size(A, 1) alphar, alphai, beta, _, vr = LAPACK.ggev!('N', 'V', A, B) iszero(alphai) && return GeneralizedEigen(alphar ./ beta, vr) @@ -363,8 +363,8 @@ function eigfact!(A::StridedMatrix{T}, B::StridedMatrix{T}) where T<:BlasReal return GeneralizedEigen(complex.(alphar, alphai)./beta, vecs) end -function eigfact!(A::StridedMatrix{T}, B::StridedMatrix{T}) where T<:BlasComplex - ishermitian(A) && isposdef(B) && return eigfact!(Hermitian(A), Hermitian(B)) +function eigen!(A::StridedMatrix{T}, B::StridedMatrix{T}) where T<:BlasComplex + ishermitian(A) && isposdef(B) && return eigen!(Hermitian(A), Hermitian(B)) alpha, beta, _, vr = LAPACK.ggev!('N', 'V', A, B) return GeneralizedEigen(alpha./beta, vr) end @@ -411,7 +411,7 @@ true """ function eigen(A::AbstractMatrix{TA}, B::AbstractMatrix{TB}) where {TA,TB} S = promote_type(eigtype(TA),TB) - return eigfact!(copy_oftype(A, S), copy_oftype(B, S)) + return eigen!(copy_oftype(A, S), copy_oftype(B, S)) end eigen(A::Number, B::Number) = eigen(fill(A,1,1), fill(B,1,1)) diff --git a/stdlib/LinearAlgebra/src/symmetric.jl b/stdlib/LinearAlgebra/src/symmetric.jl index 3a0bcf2a2871a..1760a16c08432 100644 --- a/stdlib/LinearAlgebra/src/symmetric.jl +++ b/stdlib/LinearAlgebra/src/symmetric.jl @@ -474,15 +474,15 @@ end inv(A::Hermitian{<:Any,<:StridedMatrix}) = Hermitian(_inv(A), Symbol(A.uplo)) inv(A::Symmetric{<:Any,<:StridedMatrix}) = Symmetric(_inv(A), Symbol(A.uplo)) -eigfact!(A::RealHermSymComplexHerm{<:BlasReal,<:StridedMatrix}) = Eigen(LAPACK.syevr!('V', 'A', A.uplo, A.data, 0.0, 0.0, 0, 0, -1.0)...) +eigen!(A::RealHermSymComplexHerm{<:BlasReal,<:StridedMatrix}) = Eigen(LAPACK.syevr!('V', 'A', A.uplo, A.data, 0.0, 0.0, 0, 0, -1.0)...) function eigen(A::RealHermSymComplexHerm) T = eltype(A) S = eigtype(T) - eigfact!(S != T ? convert(AbstractMatrix{S}, A) : copy(A)) + eigen!(S != T ? convert(AbstractMatrix{S}, A) : copy(A)) end -eigfact!(A::RealHermSymComplexHerm{<:BlasReal,<:StridedMatrix}, irange::UnitRange) = Eigen(LAPACK.syevr!('V', 'I', A.uplo, A.data, 0.0, 0.0, irange.start, irange.stop, -1.0)...) +eigen!(A::RealHermSymComplexHerm{<:BlasReal,<:StridedMatrix}, irange::UnitRange) = Eigen(LAPACK.syevr!('V', 'I', A.uplo, A.data, 0.0, 0.0, irange.start, irange.stop, -1.0)...) """ eigen(A::Union{SymTridiagonal, Hermitian, Symmetric}, irange::UnitRange) -> Eigen @@ -504,10 +504,10 @@ The `UnitRange` `irange` specifies indices of the sorted eigenvalues to search f function eigen(A::RealHermSymComplexHerm, irange::UnitRange) T = eltype(A) S = eigtype(T) - eigfact!(S != T ? convert(AbstractMatrix{S}, A) : copy(A), irange) + eigen!(S != T ? convert(AbstractMatrix{S}, A) : copy(A), irange) end -eigfact!(A::RealHermSymComplexHerm{T,<:StridedMatrix}, vl::Real, vh::Real) where {T<:BlasReal} = +eigen!(A::RealHermSymComplexHerm{T,<:StridedMatrix}, vl::Real, vh::Real) where {T<:BlasReal} = Eigen(LAPACK.syevr!('V', 'V', A.uplo, A.data, convert(T, vl), convert(T, vh), 0, 0, -1.0)...) """ @@ -530,7 +530,7 @@ The following functions are available for `Eigen` objects: [`inv`](@ref), [`det` function eigen(A::RealHermSymComplexHerm, vl::Real, vh::Real) T = eltype(A) S = eigtype(T) - eigfact!(S != T ? convert(AbstractMatrix{S}, A) : copy(A), vl, vh) + eigen!(S != T ? convert(AbstractMatrix{S}, A) : copy(A), vl, vh) end eigvals!(A::RealHermSymComplexHerm{<:BlasReal,<:StridedMatrix}) = @@ -624,11 +624,11 @@ end eigmax(A::RealHermSymComplexHerm{<:Real,<:StridedMatrix}) = eigvals(A, size(A, 1):size(A, 1))[1] eigmin(A::RealHermSymComplexHerm{<:Real,<:StridedMatrix}) = eigvals(A, 1:1)[1] -function eigfact!(A::HermOrSym{T,S}, B::HermOrSym{T,S}) where {T<:BlasReal,S<:StridedMatrix} +function eigen!(A::HermOrSym{T,S}, B::HermOrSym{T,S}) where {T<:BlasReal,S<:StridedMatrix} vals, vecs, _ = LAPACK.sygvd!(1, 'V', A.uplo, A.data, B.uplo == A.uplo ? B.data : copy(B.data')) GeneralizedEigen(vals, vecs) end -function eigfact!(A::Hermitian{T,S}, B::Hermitian{T,S}) where {T<:BlasComplex,S<:StridedMatrix} +function eigen!(A::Hermitian{T,S}, B::Hermitian{T,S}) where {T<:BlasComplex,S<:StridedMatrix} vals, vecs, _ = LAPACK.sygvd!(1, 'V', A.uplo, A.data, B.uplo == A.uplo ? B.data : copy(B.data')) GeneralizedEigen(vals, vecs) end diff --git a/stdlib/LinearAlgebra/src/tridiag.jl b/stdlib/LinearAlgebra/src/tridiag.jl index 6e5bd0baa3051..a5e7a053184af 100644 --- a/stdlib/LinearAlgebra/src/tridiag.jl +++ b/stdlib/LinearAlgebra/src/tridiag.jl @@ -183,18 +183,18 @@ end (\)(T::SymTridiagonal, B::StridedVecOrMat) = ldlt(T)\B -eigfact!(A::SymTridiagonal{<:BlasReal}) = Eigen(LAPACK.stegr!('V', A.dv, A.ev)...) -eigen(A::SymTridiagonal{T}) where T = eigfact!(copy_oftype(A, eigtype(T))) +eigen!(A::SymTridiagonal{<:BlasReal}) = Eigen(LAPACK.stegr!('V', A.dv, A.ev)...) +eigen(A::SymTridiagonal{T}) where T = eigen!(copy_oftype(A, eigtype(T))) -eigfact!(A::SymTridiagonal{<:BlasReal}, irange::UnitRange) = +eigen!(A::SymTridiagonal{<:BlasReal}, irange::UnitRange) = Eigen(LAPACK.stegr!('V', 'I', A.dv, A.ev, 0.0, 0.0, irange.start, irange.stop)...) eigen(A::SymTridiagonal{T}, irange::UnitRange) where T = - eigfact!(copy_oftype(A, eigtype(T)), irange) + eigen!(copy_oftype(A, eigtype(T)), irange) -eigfact!(A::SymTridiagonal{<:BlasReal}, vl::Real, vu::Real) = +eigen!(A::SymTridiagonal{<:BlasReal}, vl::Real, vu::Real) = Eigen(LAPACK.stegr!('V', 'V', A.dv, A.ev, vl, vu, 0, 0)...) eigen(A::SymTridiagonal{T}, vl::Real, vu::Real) where T = - eigfact!(copy_oftype(A, eigtype(T)), vl, vu) + eigen!(copy_oftype(A, eigtype(T)), vl, vu) eigvals!(A::SymTridiagonal{<:BlasReal}) = LAPACK.stev!('N', A.dv, A.ev)[1] eigvals(A::SymTridiagonal{T}) where T = eigvals!(copy_oftype(A, eigtype(T))) From 8f2b195a77a1b5e827ddd874f227f4faf1b28884 Mon Sep 17 00:00:00 2001 From: Sacha Verweij Date: Tue, 22 May 2018 20:56:40 -0700 Subject: [PATCH 19/23] Deprecate cholfact to cholesky. --- NEWS.md | 6 +- stdlib/LinearAlgebra/docs/src/index.md | 2 +- stdlib/LinearAlgebra/src/LinearAlgebra.jl | 8 +- stdlib/LinearAlgebra/src/cholesky.jl | 34 ++++----- stdlib/LinearAlgebra/src/dense.jl | 6 +- stdlib/LinearAlgebra/src/deprecated.jl | 7 ++ stdlib/LinearAlgebra/src/factorization.jl | 2 +- stdlib/LinearAlgebra/test/bunchkaufman.jl | 2 +- stdlib/LinearAlgebra/test/cholesky.jl | 42 +++++------ stdlib/SparseArrays/src/linalg.jl | 6 +- stdlib/SparseArrays/test/sparse.jl | 2 +- stdlib/SuiteSparse/src/cholmod.jl | 18 ++--- stdlib/SuiteSparse/src/deprecated.jl | 12 +++ stdlib/SuiteSparse/test/cholmod.jl | 90 +++++++++++------------ 14 files changed, 128 insertions(+), 109 deletions(-) diff --git a/NEWS.md b/NEWS.md index a5fd03a00ca72..fa94eee055a6d 100644 --- a/NEWS.md +++ b/NEWS.md @@ -704,9 +704,9 @@ Deprecated or removed `type` is fully deprecated to `mutable struct` ([#19157], [#20418]). * `lufact`, `schurfact`, `lqfact`, `qrfact`, `ldltfact`, `svdfact`, - `bkfact`, `hessfact`, and `eigfact` have respectively been deprecated to - `lu`, `schur`, `lq`, `qr`, `ldlt`, `svd`, `bunchkaufman`, - `hessenberg`, and `eigen` ([#27159]). + `bkfact`, `hessfact`, `eigfact`, and `cholfact` have respectively been + deprecated to `lu`, `schur`, `lq`, `qr`, `ldlt`, `svd`, `bunchkaufman`, + `hessenberg`, `eigen`, and `cholesky` ([#27159]). * `lufact!`, `schurfact!`, `lqfact!`, `qrfact!`, `ldltfact!`, `svdfact!`, `bkfact!`, `hessfact!`, and `eigfact!` have respectively been deprecated to diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md index d84575b77cd50..bd759eed89ec9 100644 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -314,7 +314,7 @@ LinearAlgebra.UniformScaling LinearAlgebra.lu LinearAlgebra.lu! LinearAlgebra.chol -LinearAlgebra.cholfact +LinearAlgebra.cholesky LinearAlgebra.cholfact! LinearAlgebra.lowrankupdate LinearAlgebra.lowrankdowndate diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index a6770d3c6e960..9ff0b1c0836f0 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -66,7 +66,7 @@ export bunchkaufman, bunchkaufman!, chol, - cholfact, + cholesky, cholfact!, cond, condskeel, @@ -250,7 +250,7 @@ end Compute `A \\ B` in-place and store the result in `Y`, returning the result. The argument `A` should *not* be a matrix. Rather, instead of matrices it should be a -factorization object (e.g. produced by [`factorize`](@ref) or [`cholfact`](@ref)). +factorization object (e.g. produced by [`factorize`](@ref) or [`cholesky`](@ref)). The reason for this is that factorization itself is both expensive and typically allocates memory (although it can also be done in-place via, e.g., [`lu!`](@ref)), and performance-critical situations requiring `ldiv!` usually also require fine-grained @@ -264,7 +264,7 @@ ldiv!(Y, A, B) Compute `A \\ B` in-place and overwriting `B` to store the result. The argument `A` should *not* be a matrix. Rather, instead of matrices it should be a -factorization object (e.g. produced by [`factorize`](@ref) or [`cholfact`](@ref)). +factorization object (e.g. produced by [`factorize`](@ref) or [`cholesky`](@ref)). The reason for this is that factorization itself is both expensive and typically allocates memory (although it can also be done in-place via, e.g., [`lu!`](@ref)), and performance-critical situations requiring `ldiv!` usually also require fine-grained @@ -279,7 +279,7 @@ ldiv!(A, B) Compute `A / B` in-place and overwriting `A` to store the result. The argument `B` should *not* be a matrix. Rather, instead of matrices it should be a -factorization object (e.g. produced by [`factorize`](@ref) or [`cholfact`](@ref)). +factorization object (e.g. produced by [`factorize`](@ref) or [`cholesky`](@ref)). The reason for this is that factorization itself is both expensive and typically allocates memory (although it can also be done in-place via, e.g., [`lu!`](@ref)), and performance-critical situations requiring `rdiv!` usually also require fine-grained diff --git a/stdlib/LinearAlgebra/src/cholesky.jl b/stdlib/LinearAlgebra/src/cholesky.jl index 0b4de8cc6e312..223a10d158a74 100644 --- a/stdlib/LinearAlgebra/src/cholesky.jl +++ b/stdlib/LinearAlgebra/src/cholesky.jl @@ -4,14 +4,14 @@ # Cholesky Factorization # ########################## -# The dispatch structure in the chol!, chol, cholfact, and cholfact! methods is a bit +# The dispatch structure in the chol!, chol, cholesky, and cholfact! methods is a bit # complicated and some explanation is therefore provided in the following # # In the methods below, LAPACK is called when possible, i.e. StridedMatrices with Float32, # Float64, Complex{Float32}, and Complex{Float64} element types. For other element or -# matrix types, the unblocked Julia implementation in _chol! is used. For cholfact +# matrix types, the unblocked Julia implementation in _chol! is used. For cholesky # and cholfact! pivoting is supported through a Val(Bool) argument. A type argument is -# necessary for type stability since the output of cholfact and cholfact! is either +# necessary for type stability since the output of cholesky and cholfact! is either # Cholesky or PivotedCholesky. The latter is only # supported for the four LAPACK element types. For other types, e.g. BigFloats Val(true) will # give an error. It is required that the input is Hermitian (including real symmetric) either @@ -21,7 +21,7 @@ # The internal structure is as follows # - _chol! returns the factor and info without checking positive definiteness # - chol/chol! returns the factor and checks for positive definiteness -# - cholfact/cholfact! returns Cholesky without checking positive definiteness +# - cholesky/cholfact! returns Cholesky without checking positive definiteness # FixMe? The dispatch below seems overly complicated. One simplification could be to # merge the two Cholesky types into one. It would remove the need for Val completely but @@ -218,7 +218,7 @@ end """ cholfact!(A, Val(false)) -> Cholesky -The same as [`cholfact`](@ref), but saves space by overwriting the input `A`, +The same as [`cholesky`](@ref), but saves space by overwriting the input `A`, instead of creating a copy. An [`InexactError`](@ref) exception is thrown if the factorization produces a number not representable by the element type of `A`, e.g. for integer types. @@ -263,7 +263,7 @@ cholfact!(A::RealHermSymComplexHerm{<:Real}, ::Val{true}; tol = 0.0) = """ cholfact!(A, Val(true); tol = 0.0) -> CholeskyPivoted -The same as [`cholfact`](@ref), but saves space by overwriting the input `A`, +The same as [`cholesky`](@ref), but saves space by overwriting the input `A`, instead of creating a copy. An [`InexactError`](@ref) exception is thrown if the factorization produces a number not representable by the element type of `A`, e.g. for integer types. @@ -278,11 +278,11 @@ function cholfact!(A::StridedMatrix, ::Val{true}; tol = 0.0) end end -# cholfact. Non-destructive methods for computing Cholesky factorization of real symmetric +# cholesky. Non-destructive methods for computing Cholesky factorization of real symmetric # or Hermitian matrix ## No pivoting (default) """ - cholfact(A, Val(false)) -> Cholesky + cholesky(A, Val(false)) -> Cholesky Compute the Cholesky factorization of a dense symmetric positive definite matrix `A` and return a `Cholesky` factorization. The matrix `A` can either be a [`Symmetric`](@ref) or [`Hermitian`](@ref) @@ -299,7 +299,7 @@ julia> A = [4. 12. -16.; 12. 37. -43.; -16. -43. 98.] 12.0 37.0 -43.0 -16.0 -43.0 98.0 -julia> C = cholfact(A) +julia> C = cholesky(A) Cholesky{Float64,Array{Float64,2}} U factor: 3×3 UpperTriangular{Float64,Array{Float64,2}}: @@ -323,13 +323,13 @@ julia> C.L * C.U == A true ``` """ -cholfact(A::Union{StridedMatrix,RealHermSymComplexHerm{<:Real,<:StridedMatrix}}, +cholesky(A::Union{StridedMatrix,RealHermSymComplexHerm{<:Real,<:StridedMatrix}}, ::Val{false}=Val(false)) = cholfact!(cholcopy(A)) ## With pivoting """ - cholfact(A, Val(true); tol = 0.0) -> CholeskyPivoted + cholesky(A, Val(true); tol = 0.0) -> CholeskyPivoted Compute the pivoted Cholesky factorization of a dense symmetric positive semi-definite matrix `A` and return a `CholeskyPivoted` factorization. The matrix `A` can either be a [`Symmetric`](@ref) @@ -340,11 +340,11 @@ The following functions are available for `PivotedCholesky` objects: The argument `tol` determines the tolerance for determining the rank. For negative values, the tolerance is the machine precision. """ -cholfact(A::Union{StridedMatrix,RealHermSymComplexHerm{<:Real,<:StridedMatrix}}, +cholesky(A::Union{StridedMatrix,RealHermSymComplexHerm{<:Real,<:StridedMatrix}}, ::Val{true}; tol = 0.0) = cholfact!(cholcopy(A), Val(true); tol = tol) ## Number -function cholfact(x::Number, uplo::Symbol=:U) +function cholesky(x::Number, uplo::Symbol=:U) C, info = _chol!(x, uplo) xf = fill(C, 1, 1) Cholesky(xf, uplo, info) @@ -557,7 +557,7 @@ rank(C::CholeskyPivoted) = C.rank lowrankupdate!(C::Cholesky, v::StridedVector) -> CC::Cholesky Update a Cholesky factorization `C` with the vector `v`. If `A = C.U'C.U` then -`CC = cholfact(C.U'C.U + v*v')` but the computation of `CC` only uses `O(n^2)` +`CC = cholesky(C.U'C.U + v*v')` but the computation of `CC` only uses `O(n^2)` operations. The input factorization `C` is updated in place such that on exit `C == CC`. The vector `v` is destroyed during the computation. """ @@ -603,7 +603,7 @@ end lowrankdowndate!(C::Cholesky, v::StridedVector) -> CC::Cholesky Downdate a Cholesky factorization `C` with the vector `v`. If `A = C.U'C.U` then -`CC = cholfact(C.U'C.U - v*v')` but the computation of `CC` only uses `O(n^2)` +`CC = cholesky(C.U'C.U - v*v')` but the computation of `CC` only uses `O(n^2)` operations. The input factorization `C` is updated in place such that on exit `C == CC`. The vector `v` is destroyed during the computation. """ @@ -656,7 +656,7 @@ end lowrankupdate(C::Cholesky, v::StridedVector) -> CC::Cholesky Update a Cholesky factorization `C` with the vector `v`. If `A = C.U'C.U` -then `CC = cholfact(C.U'C.U + v*v')` but the computation of `CC` only uses +then `CC = cholesky(C.U'C.U + v*v')` but the computation of `CC` only uses `O(n^2)` operations. """ lowrankupdate(C::Cholesky, v::StridedVector) = lowrankupdate!(copy(C), copy(v)) @@ -665,7 +665,7 @@ lowrankupdate(C::Cholesky, v::StridedVector) = lowrankupdate!(copy(C), copy(v)) lowrankdowndate(C::Cholesky, v::StridedVector) -> CC::Cholesky Downdate a Cholesky factorization `C` with the vector `v`. If `A = C.U'C.U` -then `CC = cholfact(C.U'C.U - v*v')` but the computation of `CC` only uses +then `CC = cholesky(C.U'C.U - v*v')` but the computation of `CC` only uses `O(n^2)` operations. """ lowrankdowndate(C::Cholesky, v::StridedVector) = lowrankdowndate!(copy(C), copy(v)) diff --git a/stdlib/LinearAlgebra/src/dense.jl b/stdlib/LinearAlgebra/src/dense.jl index 312ef17fb3c0a..0420dfd86c51e 100644 --- a/stdlib/LinearAlgebra/src/dense.jl +++ b/stdlib/LinearAlgebra/src/dense.jl @@ -109,7 +109,7 @@ julia> isposdef(A) true ``` """ -isposdef(A::AbstractMatrix) = ishermitian(A) && isposdef(cholfact(Hermitian(A))) +isposdef(A::AbstractMatrix) = ishermitian(A) && isposdef(cholesky(Hermitian(A))) isposdef(x::Number) = imag(x)==0 && real(x) > 0 # the definition of strides for Array{T,N} is tuple() if N = 0, otherwise it is @@ -1112,7 +1112,7 @@ systems. For example: `A=factorize(A); x=A\\b; y=A\\C`. | Properties of `A` | type of factorization | |:---------------------------|:-----------------------------------------------| -| Positive-definite | Cholesky (see [`cholfact`](@ref)) | +| Positive-definite | Cholesky (see [`cholesky`](@ref)) | | Dense Symmetric/Hermitian | Bunch-Kaufman (see [`bunchkaufman`](@ref)) | | Sparse Symmetric/Hermitian | LDLt (see [`ldlt`](@ref)) | | Triangular | Triangular | @@ -1208,7 +1208,7 @@ function factorize(A::StridedMatrix{T}) where T return UpperTriangular(A) end if herm - cf = cholfact(A) + cf = cholesky(A) if cf.info == 0 return cf else diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl index fea7008320ebb..f374af9b0094e 100644 --- a/stdlib/LinearAlgebra/src/deprecated.jl +++ b/stdlib/LinearAlgebra/src/deprecated.jl @@ -7,6 +7,7 @@ using Base: @deprecate, depwarn @deprecate cond(F::LinearAlgebra.LU, p::Integer) cond(convert(AbstractArray, F), p) # PR #22188 +export cholfact @deprecate cholfact!(A::StridedMatrix, uplo::Symbol, ::Type{Val{false}}) cholfact!(Hermitian(A, uplo), Val(false)) @deprecate cholfact!(A::StridedMatrix, uplo::Symbol) cholfact!(Hermitian(A, uplo)) @deprecate cholfact(A::StridedMatrix, uplo::Symbol, ::Type{Val{false}}) cholfact(Hermitian(A, uplo), Val(false)) @@ -1395,3 +1396,9 @@ export eigfact! @deprecate(eigfact!(A::RealHermSymComplexHerm{T,<:StridedMatrix}, vl::Real, vh::Real) where {T<:BlasReal}, eigen!(A, vl, vh)) @deprecate(eigfact!(A::HermOrSym{T,S}, B::HermOrSym{T,S}) where {T<:BlasReal,S<:StridedMatrix}, eigen!(A, B)) @deprecate(eigfact!(A::Hermitian{T,S}, B::Hermitian{T,S}) where {T<:BlasComplex,S<:StridedMatrix}, eigen!(A, B)) + +# deprecate cholfact to cholesky +# cholfact exported from deprecation above +@deprecate(cholfact(A::Union{StridedMatrix,RealHermSymComplexHerm{<:Real,<:StridedMatrix}}, ::Val{false}=Val(false)), cholesky(A, Val(false))) +@deprecate(cholfact(A::Union{StridedMatrix,RealHermSymComplexHerm{<:Real,<:StridedMatrix}}, ::Val{true}; tol = 0.0), cholesky(A, Val(true); tol=tol)) +@deprecate(cholfact(x::Number, uplo::Symbol=:U), cholesky(x, uplo)) diff --git a/stdlib/LinearAlgebra/src/factorization.jl b/stdlib/LinearAlgebra/src/factorization.jl index b0674ae64f683..9dadfc08cf9cd 100644 --- a/stdlib/LinearAlgebra/src/factorization.jl +++ b/stdlib/LinearAlgebra/src/factorization.jl @@ -22,7 +22,7 @@ end Test that a factorization of a matrix succeeded. ```jldoctest -julia> F = cholfact([1 0; 0 1]); +julia> F = cholesky([1 0; 0 1]); julia> LinearAlgebra.issuccess(F) true diff --git a/stdlib/LinearAlgebra/test/bunchkaufman.jl b/stdlib/LinearAlgebra/test/bunchkaufman.jl index 8469196b7dc0e..5bd345afe6c5d 100644 --- a/stdlib/LinearAlgebra/test/bunchkaufman.jl +++ b/stdlib/LinearAlgebra/test/bunchkaufman.jl @@ -132,7 +132,7 @@ end @testset "test example due to @timholy in PR 15354" begin A = rand(6,5); A = complex(A'*A) # to avoid calling the real-lhs-complex-rhs method - F = cholfact(A); + F = cholesky(A); v6 = rand(ComplexF64, 6) v5 = view(v6, 1:5) @test F\v5 == F\v6[1:5] diff --git a/stdlib/LinearAlgebra/test/cholesky.jl b/stdlib/LinearAlgebra/test/cholesky.jl index 08bacab8cf66e..1cadf5444b045 100644 --- a/stdlib/LinearAlgebra/test/cholesky.jl +++ b/stdlib/LinearAlgebra/test/cholesky.jl @@ -18,8 +18,8 @@ function unary_ops_tests(a, ca, tol; n=size(a, 1)) end function factor_recreation_tests(a_U, a_L) - c_U = cholfact(a_U) - c_L = cholfact(a_L) + c_U = cholesky(a_U) + c_L = cholesky(a_L) cl = chol(a_L) ls = c_L.L @test Array(c_U) ≈ Array(c_L) ≈ a_U @@ -55,7 +55,7 @@ end # Test of symmetric pos. def. strided matrix apd = a'*a - @inferred cholfact(apd) + @inferred cholesky(apd) @inferred chol(apd) capd = factorize(apd) r = capd.U @@ -67,7 +67,7 @@ end A = rand(eltya, 2, 3) @test_throws DimensionMismatch chol(A) @test_throws DimensionMismatch LinearAlgebra.chol!(A) - @test_throws DimensionMismatch cholfact(A) + @test_throws DimensionMismatch cholesky(A) @test_throws DimensionMismatch cholfact!(A) end @@ -88,16 +88,16 @@ end @inferred(logdet(capd)) apos = apd[1,1] # test chol(x::Number), needs x>0 - @test all(x -> x ≈ √apos, cholfact(apos).factors) + @test all(x -> x ≈ √apos, cholesky(apos).factors) @test_throws PosDefException chol(-one(eltya)) - # Test cholfact with Symmetric/Hermitian upper/lower + # Test cholesky with Symmetric/Hermitian upper/lower apds = Symmetric(apd) apdsL = Symmetric(apd, :L) apdh = Hermitian(apd) apdhL = Hermitian(apd, :L) if eltya <: Real - capds = cholfact(apds) + capds = cholesky(apds) unary_ops_tests(apds, capds, ε*κ*n) if eltya <: BlasReal capds = cholfact!(copy(apds)) @@ -106,7 +106,7 @@ end ulstring = sprint((t, s) -> show(t, "text/plain", s), capds.UL) @test sprint((t, s) -> show(t, "text/plain", s), capds) == "$(typeof(capds))\nU factor:\n$ulstring" else - capdh = cholfact(apdh) + capdh = cholesky(apdh) unary_ops_tests(apdh, capdh, ε*κ*n) capdh = cholfact!(copy(apdh)) unary_ops_tests(apdh, capdh, ε*κ*n) @@ -129,9 +129,9 @@ end #pivoted upper Cholesky if eltya != BigFloat - cz = cholfact(Hermitian(zeros(eltya,n,n)), Val(true)) + cz = cholesky(Hermitian(zeros(eltya,n,n)), Val(true)) @test_throws LinearAlgebra.RankDeficientException LinearAlgebra.chkfullrank(cz) - cpapd = cholfact(apdh, Val(true)) + cpapd = cholesky(apdh, Val(true)) unary_ops_tests(apdh, cpapd, ε*κ*n) @test rank(cpapd) == n @test all(diff(diag(real(cpapd.factors))).<=0.) # diagonal should be non-increasing @@ -155,18 +155,18 @@ end @test norm(a*(capd\(a'*b)) - b,1)/norm(b,1) <= ε*κ*n # Ad hoc, revisit if eltya != BigFloat && eltyb != BigFloat - lapd = cholfact(apdhL) + lapd = cholesky(apdhL) @test norm(apd * (lapd\b) - b)/norm(b) <= ε*κ*n @test norm(apd * (lapd\b[1:n]) - b[1:n])/norm(b[1:n]) <= ε*κ*n end if eltya != BigFloat && eltyb != BigFloat # Note! Need to implement pivoted Cholesky decomposition in julia - cpapd = cholfact(apdh, Val(true)) + cpapd = cholesky(apdh, Val(true)) @test norm(apd * (cpapd\b) - b)/norm(b) <= ε*κ*n # Ad hoc, revisit @test norm(apd * (cpapd\b[1:n]) - b[1:n])/norm(b[1:n]) <= ε*κ*n - lpapd = cholfact(apdhL, Val(true)) + lpapd = cholesky(apdhL, Val(true)) @test norm(apd * (lpapd\b) - b)/norm(b) <= ε*κ*n # Ad hoc, revisit @test norm(apd * (lpapd\b[1:n]) - b[1:n])/norm(b[1:n]) <= ε*κ*n @@ -176,7 +176,7 @@ end if eltya <: BlasFloat @testset "throw for non positive definite matrix" begin A = eltya[1 2; 2 1]; B = eltya[1, 1] - C = cholfact(A) + C = cholesky(A) @test !isposdef(C) @test !LinearAlgebra.issuccess(C) Cstr = sprint((t, s) -> show(t, "text/plain", s), C) @@ -194,8 +194,8 @@ end A = randn(5,5) end A = convert(Matrix{eltya}, A'A) - @test Matrix(cholfact(A).L) ≈ Matrix(invoke(LinearAlgebra._chol!, Tuple{AbstractMatrix, Type{LowerTriangular}}, copy(A), LowerTriangular)[1]) - @test Matrix(cholfact(A).U) ≈ Matrix(invoke(LinearAlgebra._chol!, Tuple{AbstractMatrix, Type{UpperTriangular}}, copy(A), UpperTriangular)[1]) + @test Matrix(cholesky(A).L) ≈ Matrix(invoke(LinearAlgebra._chol!, Tuple{AbstractMatrix, Type{LowerTriangular}}, copy(A), LowerTriangular)[1]) + @test Matrix(cholesky(A).U) ≈ Matrix(invoke(LinearAlgebra._chol!, Tuple{AbstractMatrix, Type{UpperTriangular}}, copy(A), UpperTriangular)[1]) end end end @@ -219,8 +219,8 @@ end AcA = A'*A BcB = AcA + v*v' BcB = (BcB + BcB')/2 - F = cholfact(Hermitian(AcA, uplo)) - G = cholfact(Hermitian(BcB, uplo)) + F = cholesky(Hermitian(AcA, uplo)) + G = cholesky(Hermitian(BcB, uplo)) @test Base.getproperty(LinearAlgebra.lowrankupdate(F, v), uplo) ≈ Base.getproperty(G, uplo) @test_throws DimensionMismatch LinearAlgebra.lowrankupdate(F, Vector{eltype(v)}(undef,length(v)+1)) @test Base.getproperty(LinearAlgebra.lowrankdowndate(G, v), uplo) ≈ Base.getproperty(F, uplo) @@ -228,7 +228,7 @@ end end end -@testset "issue #13243, unexpected nans in complex cholfact" begin +@testset "issue #13243, unexpected nans in complex cholesky" begin apd = [5.8525753f0 + 0.0f0im -0.79540455f0 + 0.7066077f0im 0.98274714f0 + 1.3824869f0im 2.619998f0 + 1.8532984f0im -1.8306153f0 - 1.2336911f0im 0.32275113f0 + 0.015575029f0im 2.1968813f0 + 1.0640624f0im 0.27894387f0 + 0.97911835f0im 3.0476584f0 + 0.18548489f0im 0.3842994f0 + 0.7050991f0im -0.79540455f0 - 0.7066077f0im 8.313246f0 + 0.0f0im -1.8076122f0 - 0.8882447f0im 0.47806996f0 + 0.48494184f0im 0.5096429f0 - 0.5395974f0im -0.7285097f0 - 0.10360408f0im -1.1760061f0 - 2.7146957f0im -0.4271084f0 + 0.042899966f0im -1.7228563f0 + 2.8335886f0im 1.8942566f0 + 0.6389735f0im 0.98274714f0 - 1.3824869f0im -1.8076122f0 + 0.8882447f0im 9.367975f0 + 0.0f0im -0.1838578f0 + 0.6468568f0im -1.8338387f0 + 0.7064959f0im 0.041852742f0 - 0.6556877f0im 2.5673025f0 + 1.9732997f0im -1.1148382f0 - 0.15693812f0im 2.4704504f0 - 1.0389464f0im 1.0858271f0 - 1.298006f0im @@ -249,7 +249,7 @@ end 0.25336108035924787 + 0.975317836492159im 0.0628393808469436 - 0.1253397353973715im 0.11192755545114 - 0.1603741874112385im 0.8439562576196216 + 1.0850814110398734im -1.0568488936791578 - 0.06025820467086475im 0.12696236014017806 - 0.09853584666755086im] - cholfact(Hermitian(apd, :L), Val(true)) \ b + cholesky(Hermitian(apd, :L), Val(true)) \ b r = factorize(apd).U E = abs.(apd - r'*r) ε = eps(abs(float(one(ComplexF32)))) @@ -263,7 +263,7 @@ end R = randn(5, 5) C = complex.(R, R) for A in (R, C) - @test !LinearAlgebra.issuccess(cholfact(A)) + @test !LinearAlgebra.issuccess(cholesky(A)) @test !LinearAlgebra.issuccess(cholfact!(copy(A))) @test_throws PosDefException chol(A) @test_throws PosDefException LinearAlgebra.chol!(copy(A)) diff --git a/stdlib/SparseArrays/src/linalg.jl b/stdlib/SparseArrays/src/linalg.jl index 1a956b7378683..36dba2f9c8061 100644 --- a/stdlib/SparseArrays/src/linalg.jl +++ b/stdlib/SparseArrays/src/linalg.jl @@ -990,7 +990,7 @@ function factorize(A::SparseMatrixCSC) end # function factorize(A::Symmetric{Float64,SparseMatrixCSC{Float64,Ti}}) where Ti -# F = cholfact(A) +# F = cholesky(A) # if LinearAlgebra.issuccess(F) # return F # else @@ -999,7 +999,7 @@ end # end # end function factorize(A::LinearAlgebra.RealHermSymComplexHerm{Float64,<:SparseMatrixCSC}) - F = cholfact(A) + F = cholesky(A) if LinearAlgebra.issuccess(F) return F else @@ -1008,7 +1008,7 @@ function factorize(A::LinearAlgebra.RealHermSymComplexHerm{Float64,<:SparseMatri end end -chol(A::SparseMatrixCSC) = error("Use cholfact() instead of chol() for sparse matrices.") +chol(A::SparseMatrixCSC) = error("Use cholesky() instead of chol() for sparse matrices.") eig(A::SparseMatrixCSC) = error("Use IterativeEigensolvers.eigs() instead of eig() for sparse matrices.") function Base.cov(X::SparseMatrixCSC; dims::Int=1, corrected::Bool=true) diff --git a/stdlib/SparseArrays/test/sparse.jl b/stdlib/SparseArrays/test/sparse.jl index dadda8ce1790a..f55f90497fba4 100644 --- a/stdlib/SparseArrays/test/sparse.jl +++ b/stdlib/SparseArrays/test/sparse.jl @@ -1321,7 +1321,7 @@ end if Base.USE_GPL_LIBS a = SparseMatrixCSC(2, 2, [1, 3, 5], [1, 2, 1, 2], [1.0, 0.0, 0.0, 1.0]) @test lu(a)\[2.0, 3.0] ≈ [2.0, 3.0] - @test cholfact(a)\[2.0, 3.0] ≈ [2.0, 3.0] + @test cholesky(a)\[2.0, 3.0] ≈ [2.0, 3.0] end end diff --git a/stdlib/SuiteSparse/src/cholmod.jl b/stdlib/SuiteSparse/src/cholmod.jl index 12eff4884e3b0..92dfa7f66997c 100644 --- a/stdlib/SuiteSparse/src/cholmod.jl +++ b/stdlib/SuiteSparse/src/cholmod.jl @@ -7,7 +7,7 @@ import Base: (*), convert, copy, eltype, getindex, getproperty, show, size, using LinearAlgebra import LinearAlgebra: (\), - cholfact, cholfact!, det, diag, ishermitian, isposdef, + cholesky, cholfact!, det, diag, ishermitian, isposdef, issuccess, issymmetric, ldlt, ldlt!, logdet using SparseArrays @@ -1385,7 +1385,7 @@ factorization `F`. `A` must be a [`SparseMatrixCSC`](@ref) or a [`Symmetric`](@r [`Hermitian`](@ref) view of a `SparseMatrixCSC`. Note that even if `A` doesn't have the type tag, it must still be symmetric or Hermitian. -See also [`cholfact`](@ref). +See also [`cholesky`](@ref). !!! note This method uses the CHOLMOD library from SuiteSparse, which only supports @@ -1401,7 +1401,7 @@ cholfact!(F::Factor, A::Union{SparseMatrixCSC{T}, shift = 0.0) where {T<:Real} = cholfact!(F, Sparse(A); shift = shift) -function cholfact(A::Sparse; shift::Real=0.0, +function cholesky(A::Sparse; shift::Real=0.0, perm::AbstractVector{SuiteSparse_long}=SuiteSparse_long[]) cm = defaults(common_struct) @@ -1417,14 +1417,14 @@ function cholfact(A::Sparse; shift::Real=0.0, end """ - cholfact(A; shift = 0.0, perm = Int[]) -> CHOLMOD.Factor + cholesky(A; shift = 0.0, perm = Int[]) -> CHOLMOD.Factor Compute the Cholesky factorization of a sparse positive definite matrix `A`. `A` must be a [`SparseMatrixCSC`](@ref) or a [`Symmetric`](@ref)/[`Hermitian`](@ref) view of a `SparseMatrixCSC`. Note that even if `A` doesn't have the type tag, it must still be symmetric or Hermitian. A fill-reducing permutation is used. -`F = cholfact(A)` is most frequently used to solve systems of equations with `F\\b`, +`F = cholesky(A)` is most frequently used to solve systems of equations with `F\\b`, but also the methods [`diag`](@ref), [`det`](@ref), and [`logdet`](@ref) are defined for `F`. You can also extract individual factors from `F`, using `F.L`. @@ -1449,11 +1449,11 @@ it should be a permutation of `1:size(A,1)` giving the ordering to use Many other functions from CHOLMOD are wrapped but not exported from the `Base.SparseArrays.CHOLMOD` module. """ -cholfact(A::Union{SparseMatrixCSC{T}, SparseMatrixCSC{Complex{T}}, +cholesky(A::Union{SparseMatrixCSC{T}, SparseMatrixCSC{Complex{T}}, Symmetric{T,SparseMatrixCSC{T,SuiteSparse_long}}, Hermitian{Complex{T},SparseMatrixCSC{Complex{T},SuiteSparse_long}}, Hermitian{T,SparseMatrixCSC{T,SuiteSparse_long}}}; - kws...) where {T<:Real} = cholfact(Sparse(A); kws...) + kws...) where {T<:Real} = cholesky(Sparse(A); kws...) function ldlt!(F::Factor{Tv}, A::Sparse{Tv}; shift::Real=0.0) where Tv @@ -1712,7 +1712,7 @@ const RealHermSymComplexHermF64SSL = Union{ Hermitian{Float64,SparseMatrixCSC{Float64,SuiteSparse_long}}, Hermitian{Complex{Float64},SparseMatrixCSC{Complex{Float64},SuiteSparse_long}}} function \(A::RealHermSymComplexHermF64SSL, B::StridedVecOrMat) - F = cholfact(A) + F = cholesky(A) if issuccess(F) return \(F, B) else @@ -1726,7 +1726,7 @@ function \(A::RealHermSymComplexHermF64SSL, B::StridedVecOrMat) end function \(adjA::Adjoint{<:Any,<:RealHermSymComplexHermF64SSL}, B::StridedVecOrMat) A = adjA.parent - F = cholfact(A) + F = cholesky(A) if issuccess(F) return \(adjoint(F), B) else diff --git a/stdlib/SuiteSparse/src/deprecated.jl b/stdlib/SuiteSparse/src/deprecated.jl index a0aa42f233405..9396f26ff55ea 100644 --- a/stdlib/SuiteSparse/src/deprecated.jl +++ b/stdlib/SuiteSparse/src/deprecated.jl @@ -87,3 +87,15 @@ end shift = 0.0) where {T<:Real}, ldlt!(F, A; shift=shift)) end + +# deprecate cholfact to cholesky +@eval SuiteSparse.CHOLMOD begin + import LinearAlgebra: cholfact + @deprecate(cholfact(A::Sparse; shift::Real=0.0, perm::AbstractVector{SuiteSparse_long}=SuiteSparse_long[]), cholesky(A; shift=shift, perm=perm)) + @deprecate(cholfact(A::Union{SparseMatrixCSC{T}, SparseMatrixCSC{Complex{T}}, + Symmetric{T,SparseMatrixCSC{T,SuiteSparse_long}}, + Hermitian{Complex{T},SparseMatrixCSC{Complex{T},SuiteSparse_long}}, + Hermitian{T,SparseMatrixCSC{T,SuiteSparse_long}}}; + kws...) where {T<:Real}, + cholesky(A; kws...)) +end diff --git a/stdlib/SuiteSparse/test/cholmod.jl b/stdlib/SuiteSparse/test/cholmod.jl index 53681315f6608..545d9be2983e3 100644 --- a/stdlib/SuiteSparse/test/cholmod.jl +++ b/stdlib/SuiteSparse/test/cholmod.jl @@ -117,12 +117,12 @@ srand(123) @test size(chmal) == size(A) @test size(chmal, 1) == size(A, 1) - chma = cholfact(A) # LL' form + chma = cholesky(A) # LL' form @test CHOLMOD.isvalid(chma) @test unsafe_load(pointer(chma)).is_ll == 1 # check that it is in fact an LLt @test chma\b ≈ x @test nnz(chma) == 489 - @test nnz(cholfact(A, perm=1:size(A,1))) > nnz(chma) + @test nnz(cholesky(A, perm=1:size(A,1))) > nnz(chma) @test size(chma) == size(A) chmal = CHOLMOD.FactorComponent(chma, :L) @test size(chmal) == size(A) @@ -153,7 +153,7 @@ end 2.249,-1.0,2.279,1.4,-1.0,1.0,-1.0,1.0,1.0,1.0], 0) afiro2 = CHOLMOD.aat(afiro, CHOLMOD.SuiteSparse_long[0:50;], CHOLMOD.SuiteSparse_long(1)) CHOLMOD.change_stype!(afiro2, -1) - chmaf = cholfact(afiro2) + chmaf = cholesky(afiro2) y = afiro'*fill(1., size(afiro,1)) sol = chmaf\(afiro*y) # least squares solution @test CHOLMOD.isvalid(sol) @@ -370,23 +370,23 @@ end end # Factor - @test_throws ArgumentError cholfact(A1) - @test_throws ArgumentError cholfact(A1) - @test_throws ArgumentError cholfact(A1, shift=1.0) + @test_throws ArgumentError cholesky(A1) + @test_throws ArgumentError cholesky(A1) + @test_throws ArgumentError cholesky(A1, shift=1.0) @test_throws ArgumentError ldlt(A1) @test_throws ArgumentError ldlt(A1, shift=1.0) C = A1 + copy(adjoint(A1)) λmaxC = eigmax(Array(C)) b = fill(1., size(A1, 1)) - @test_throws LinearAlgebra.PosDefException cholfact(C - 2λmaxC*I)\b - @test_throws LinearAlgebra.PosDefException cholfact(C, shift=-2λmaxC)\b + @test_throws LinearAlgebra.PosDefException cholesky(C - 2λmaxC*I)\b + @test_throws LinearAlgebra.PosDefException cholesky(C, shift=-2λmaxC)\b @test_throws ArgumentError ldlt(C - C[1,1]*I)\b @test_throws ArgumentError ldlt(C, shift=-real(C[1,1]))\b - @test !isposdef(cholfact(C - 2λmaxC*I)) - @test !isposdef(cholfact(C, shift=-2λmaxC)) + @test !isposdef(cholesky(C - 2λmaxC*I)) + @test !isposdef(cholesky(C, shift=-2λmaxC)) @test !LinearAlgebra.issuccess(ldlt(C - C[1,1]*I)) @test !LinearAlgebra.issuccess(ldlt(C, shift=-real(C[1,1]))) - F = cholfact(A1pd) + F = cholesky(A1pd) tmp = IOBuffer() show(tmp, F) @test tmp.size > 0 @@ -404,7 +404,7 @@ end let # to test supernodal, we must use a larger matrix Ftmp = sprandn(100, 100, 0.1) Ftmp = Ftmp'Ftmp + I - @test logdet(cholfact(Ftmp)) ≈ logdet(Array(Ftmp)) + @test logdet(cholesky(Ftmp)) ≈ logdet(Array(Ftmp)) end @test logdet(ldlt(A1pd)) ≈ logdet(Array(A1pd)) @test isposdef(A1pd) @@ -413,9 +413,9 @@ end if elty <: Real @test CHOLMOD.issymmetric(Sparse(A1pd, 0)) - @test CHOLMOD.Sparse(cholfact(Symmetric(A1pd, :L))) == CHOLMOD.Sparse(cholfact(A1pd)) - F1 = CHOLMOD.Sparse(cholfact(Symmetric(A1pd, :L), shift=2)) - F2 = CHOLMOD.Sparse(cholfact(A1pd, shift=2)) + @test CHOLMOD.Sparse(cholesky(Symmetric(A1pd, :L))) == CHOLMOD.Sparse(cholesky(A1pd)) + F1 = CHOLMOD.Sparse(cholesky(Symmetric(A1pd, :L), shift=2)) + F2 = CHOLMOD.Sparse(cholesky(A1pd, shift=2)) @test F1 == F2 @test CHOLMOD.Sparse(ldlt(Symmetric(A1pd, :L))) == CHOLMOD.Sparse(ldlt(A1pd)) F1 = CHOLMOD.Sparse(ldlt(Symmetric(A1pd, :L), shift=2)) @@ -424,9 +424,9 @@ end else @test !CHOLMOD.issymmetric(Sparse(A1pd, 0)) @test CHOLMOD.ishermitian(Sparse(A1pd, 0)) - @test CHOLMOD.Sparse(cholfact(Hermitian(A1pd, :L))) == CHOLMOD.Sparse(cholfact(A1pd)) - F1 = CHOLMOD.Sparse(cholfact(Hermitian(A1pd, :L), shift=2)) - F2 = CHOLMOD.Sparse(cholfact(A1pd, shift=2)) + @test CHOLMOD.Sparse(cholesky(Hermitian(A1pd, :L))) == CHOLMOD.Sparse(cholesky(A1pd)) + F1 = CHOLMOD.Sparse(cholesky(Hermitian(A1pd, :L), shift=2)) + F2 = CHOLMOD.Sparse(cholesky(A1pd, shift=2)) @test F1 == F2 @test CHOLMOD.Sparse(ldlt(Hermitian(A1pd, :L))) == CHOLMOD.Sparse(ldlt(A1pd)) F1 = CHOLMOD.Sparse(ldlt(Hermitian(A1pd, :L), shift=2)) @@ -435,7 +435,7 @@ end end ### cholfact!/ldlt! - F = cholfact(A1pd) + F = cholesky(A1pd) CHOLMOD.change_factor!(elty, false, false, true, true, F) @test unsafe_load(pointer(F)).is_ll == 0 CHOLMOD.change_factor!(elty, true, false, true, true, F) @@ -444,7 +444,7 @@ end @test size(F, 3) == 1 @test_throws ArgumentError size(F, 0) - F = cholfact(A1pdSparse, shift=2) + F = cholesky(A1pdSparse, shift=2) @test isa(CHOLMOD.Sparse(F), CHOLMOD.Sparse{elty}) @test CHOLMOD.Sparse(cholfact!(copy(F), A1pd, shift=2.0)) ≈ CHOLMOD.Sparse(F) # surprisingly, this can cause small ulp size changes so we cannot test exact equality @@ -504,8 +504,8 @@ end p = [2,3,1] p_inv = [3,1,2] - @testset "cholfact, no permutation" begin - Fs = cholfact(As, perm=[1:3;]) + @testset "cholesky, no permutation" begin + Fs = cholesky(As, perm=[1:3;]) @test Fs.p == [1:3;] @test sparse(Fs.L) ≈ Lf @test sparse(Fs) ≈ As @@ -527,11 +527,11 @@ end @test_throws CHOLMOD.CHOLMODException Fs.DUPt end - @testset "cholfact, with permutation" begin - Fs = cholfact(As, perm=p) + @testset "cholesky, with permutation" begin + Fs = cholesky(As, perm=p) @test Fs.p == p Afp = Af[p,p] - Lfp = cholfact(Afp).L + Lfp = cholesky(Afp).L @test sparse(Fs.L) ≈ Lfp @test sparse(Fs) ≈ As b = rand(3) @@ -617,19 +617,19 @@ end end @testset "Element promotion and type inference" begin - @inferred cholfact(As)\fill(1, size(As, 1)) + @inferred cholesky(As)\fill(1, size(As, 1)) @inferred ldlt(As)\fill(1, size(As, 1)) end end @testset "Issue 11745 - row and column pointers were not sorted in sparse(Factor)" begin A = Float64[10 1 1 1; 1 10 0 0; 1 0 10 0; 1 0 0 10] - @test sparse(cholfact(sparse(A))) ≈ A + @test sparse(cholesky(sparse(A))) ≈ A end GC.gc() @testset "Issue 11747 - Wrong show method defined for FactorComponent" begin - v = cholfact(sparse(Float64[ 10 1 1 1; 1 10 0 0; 1 0 10 0; 1 0 0 10])).L + v = cholesky(sparse(Float64[ 10 1 1 1; 1 10 0 0; 1 0 10 0; 1 0 0 10])).L for s in (sprint(show, MIME("text/plain"), v), sprint(show, v)) @test occursin("method: simplicial", s) @test !occursin("#undef", s) @@ -637,7 +637,7 @@ GC.gc() end @testset "Issue 14076" begin - @test cholfact(sparse([1,2,3,4], [1,2,3,4], Float32[1,4,16,64]))\[1,4,16,64] == fill(1, 4) + @test cholesky(sparse([1,2,3,4], [1,2,3,4], Float32[1,4,16,64]))\[1,4,16,64] == fill(1, 4) end @testset "Issue 14134" begin @@ -650,7 +650,7 @@ end @test_throws ArgumentError size(Anew) @test_throws ArgumentError Anew[1] @test_throws ArgumentError Anew[2,1] - F = cholfact(A) + F = cholesky(A) serialize(b, F) seekstart(b) Fnew = deserialize(b) @@ -669,17 +669,17 @@ end @testset "Further issue with promotion #14894" begin x = fill(1., 5) - @test cholfact(sparse(Float16(1)I, 5, 5))\x == x - @test cholfact(Symmetric(sparse(Float16(1)I, 5, 5)))\x == x - @test cholfact(Hermitian(sparse(Complex{Float16}(1)I, 5, 5)))\x == x - @test_throws MethodError cholfact(sparse(BigFloat(1)I, 5, 5)) - @test_throws MethodError cholfact(Symmetric(sparse(BigFloat(1)I, 5, 5))) - @test_throws MethodError cholfact(Hermitian(sparse(Complex{BigFloat}(1)I, 5, 5))) + @test cholesky(sparse(Float16(1)I, 5, 5))\x == x + @test cholesky(Symmetric(sparse(Float16(1)I, 5, 5)))\x == x + @test cholesky(Hermitian(sparse(Complex{Float16}(1)I, 5, 5)))\x == x + @test_throws MethodError cholesky(sparse(BigFloat(1)I, 5, 5)) + @test_throws MethodError cholesky(Symmetric(sparse(BigFloat(1)I, 5, 5))) + @test_throws MethodError cholesky(Hermitian(sparse(Complex{BigFloat}(1)I, 5, 5))) end @testset "test \\ for Factor and StridedVecOrMat" begin x = rand(5) - A = cholfact(sparse(Diagonal(x.\1))) + A = cholesky(sparse(Diagonal(x.\1))) @test A\view(fill(1.,10),1:2:10) ≈ x @test A\view(Matrix(1.0I, 5, 5), :, :) ≈ Matrix(Diagonal(x)) end @@ -687,7 +687,7 @@ end @testset "Real factorization and complex rhs" begin A = sprandn(5, 5, 0.4) |> t -> t't + I B = complex.(randn(5, 2), randn(5, 2)) - @test cholfact(A)\B ≈ A\B + @test cholesky(A)\B ≈ A\B end @testset "Make sure that ldlt performs an LDLt (Issue #19032)" begin @@ -699,7 +699,7 @@ end s = unsafe_load(pointer(F)) @test s.is_super == 0 @test F\b ≈ fill(1., m+n) - F2 = cholfact(M) + F2 = cholesky(M) @test !LinearAlgebra.issuccess(F2) ldlt!(F2, M) @test LinearAlgebra.issuccess(F2) @@ -708,8 +708,8 @@ end @testset "Test that imaginary parts in Hermitian{T,SparseMatrixCSC{T}} are ignored" begin A = sparse([1,2,3,4,1], [1,2,3,4,2], [complex(2.0,1),2,2,2,1]) - Fs = cholfact(Hermitian(A)) - Fd = cholfact(Hermitian(Array(A))) + Fs = cholesky(Hermitian(A)) + Fd = cholesky(Hermitian(Array(A))) @test sparse(Fs) ≈ Hermitian(A) @test Fs\fill(1., 4) ≈ Fd\fill(1., 4) end @@ -768,8 +768,8 @@ end 1.02371, -0.502384, 1.10686, 0.262229, -1.6935, 0.525239]) AtA = A'*A C0 = [1., 2., 0, 0, 0] - # Test both cholfact and LDLt with and without automatic permutations - for F in (cholfact(AtA), cholfact(AtA, perm=1:5), ldlt(AtA), ldlt(AtA, perm=1:5)) + # Test both cholesky and LDLt with and without automatic permutations + for F in (cholesky(AtA), cholesky(AtA, perm=1:5), ldlt(AtA), ldlt(AtA, perm=1:5)) local F x0 = F\(b = fill(1., 5)) #Test both sparse/dense and vectors/matrices @@ -805,9 +805,9 @@ end @testset "Issue #22335" begin local A, F A = sparse(1.0I, 3, 3) - @test LinearAlgebra.issuccess(cholfact(A)) + @test LinearAlgebra.issuccess(cholesky(A)) A[3, 3] = -1 - F = cholfact(A) + F = cholesky(A) @test !LinearAlgebra.issuccess(F) @test LinearAlgebra.issuccess(ldlt!(F, A)) A[3, 3] = 1 From 6e5d700bec1984894ccda3eb6db7f1b94ef151ce Mon Sep 17 00:00:00 2001 From: Sacha Verweij Date: Tue, 22 May 2018 21:13:01 -0700 Subject: [PATCH 20/23] Deprecate cholfact! to cholesky!. --- stdlib/LinearAlgebra/docs/src/index.md | 2 +- stdlib/LinearAlgebra/src/LinearAlgebra.jl | 2 +- stdlib/LinearAlgebra/src/cholesky.jl | 34 +++++++++++------------ stdlib/LinearAlgebra/src/dense.jl | 2 +- stdlib/LinearAlgebra/src/deprecated.jl | 10 ++++++- stdlib/LinearAlgebra/test/cholesky.jl | 16 +++++------ stdlib/LinearAlgebra/test/dense.jl | 2 +- stdlib/SuiteSparse/src/cholmod.jl | 12 ++++---- stdlib/SuiteSparse/src/deprecated.jl | 13 +++++++++ stdlib/SuiteSparse/test/cholmod.jl | 6 ++-- 10 files changed, 60 insertions(+), 39 deletions(-) diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md index bd759eed89ec9..3a988193d3d81 100644 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -315,7 +315,7 @@ LinearAlgebra.lu LinearAlgebra.lu! LinearAlgebra.chol LinearAlgebra.cholesky -LinearAlgebra.cholfact! +LinearAlgebra.cholesky! LinearAlgebra.lowrankupdate LinearAlgebra.lowrankdowndate LinearAlgebra.lowrankupdate! diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index 9ff0b1c0836f0..3519202b8c428 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -67,7 +67,7 @@ export bunchkaufman!, chol, cholesky, - cholfact!, + cholesky!, cond, condskeel, copyto!, diff --git a/stdlib/LinearAlgebra/src/cholesky.jl b/stdlib/LinearAlgebra/src/cholesky.jl index 223a10d158a74..feff53ccc3c40 100644 --- a/stdlib/LinearAlgebra/src/cholesky.jl +++ b/stdlib/LinearAlgebra/src/cholesky.jl @@ -4,14 +4,14 @@ # Cholesky Factorization # ########################## -# The dispatch structure in the chol!, chol, cholesky, and cholfact! methods is a bit +# The dispatch structure in the chol!, chol, cholesky, and cholesky! methods is a bit # complicated and some explanation is therefore provided in the following # # In the methods below, LAPACK is called when possible, i.e. StridedMatrices with Float32, # Float64, Complex{Float32}, and Complex{Float64} element types. For other element or # matrix types, the unblocked Julia implementation in _chol! is used. For cholesky -# and cholfact! pivoting is supported through a Val(Bool) argument. A type argument is -# necessary for type stability since the output of cholesky and cholfact! is either +# and cholesky! pivoting is supported through a Val(Bool) argument. A type argument is +# necessary for type stability since the output of cholesky and cholesky! is either # Cholesky or PivotedCholesky. The latter is only # supported for the four LAPACK element types. For other types, e.g. BigFloats Val(true) will # give an error. It is required that the input is Hermitian (including real symmetric) either @@ -21,7 +21,7 @@ # The internal structure is as follows # - _chol! returns the factor and info without checking positive definiteness # - chol/chol! returns the factor and checks for positive definiteness -# - cholesky/cholfact! returns Cholesky without checking positive definiteness +# - cholesky/cholesky! returns Cholesky without checking positive definiteness # FixMe? The dispatch below seems overly complicated. One simplification could be to # merge the two Cholesky types into one. It would remove the need for Val completely but @@ -201,10 +201,10 @@ chol(x::Number, args...) = ((C, info) = _chol!(x, nothing); @assertposdef C info -# cholfact!. Destructive methods for computing Cholesky factorization of real symmetric +# cholesky!. Destructive methods for computing Cholesky factorization of real symmetric # or Hermitian matrix ## No pivoting (default) -function cholfact!(A::RealHermSymComplexHerm, ::Val{false}=Val(false)) +function cholesky!(A::RealHermSymComplexHerm, ::Val{false}=Val(false)) if A.uplo == 'U' CU, info = _chol!(A.data, UpperTriangular) Cholesky(CU.data, 'U', info) @@ -216,7 +216,7 @@ end ### for StridedMatrices, check that matrix is symmetric/Hermitian """ - cholfact!(A, Val(false)) -> Cholesky + cholesky!(A, Val(false)) -> Cholesky The same as [`cholesky`](@ref), but saves space by overwriting the input `A`, instead of creating a copy. An [`InexactError`](@ref) exception is thrown if @@ -230,25 +230,25 @@ julia> A = [1 2; 2 50] 1 2 2 50 -julia> cholfact!(A) +julia> cholesky!(A) ERROR: InexactError: Int64(Int64, 6.782329983125268) Stacktrace: [...] ``` """ -function cholfact!(A::StridedMatrix, ::Val{false}=Val(false)) +function cholesky!(A::StridedMatrix, ::Val{false}=Val(false)) checksquare(A) if !ishermitian(A) # return with info = -1 if not Hermitian return Cholesky(A, 'U', convert(BlasInt, -1)) else - return cholfact!(Hermitian(A), Val(false)) + return cholesky!(Hermitian(A), Val(false)) end end ## With pivoting ### BLAS/LAPACK element types -function cholfact!(A::RealHermSymComplexHerm{<:BlasReal,<:StridedMatrix}, +function cholesky!(A::RealHermSymComplexHerm{<:BlasReal,<:StridedMatrix}, ::Val{true}; tol = 0.0) AA, piv, rank, info = LAPACK.pstrf!(A.uplo, A.data, tol) return CholeskyPivoted{eltype(AA),typeof(AA)}(AA, A.uplo, piv, rank, tol, info) @@ -256,25 +256,25 @@ end ### Non BLAS/LAPACK element types (generic). Since generic fallback for pivoted Cholesky ### is not implemented yet we throw an error -cholfact!(A::RealHermSymComplexHerm{<:Real}, ::Val{true}; tol = 0.0) = +cholesky!(A::RealHermSymComplexHerm{<:Real}, ::Val{true}; tol = 0.0) = throw(ArgumentError("generic pivoted Cholesky factorization is not implemented yet")) ### for StridedMatrices, check that matrix is symmetric/Hermitian """ - cholfact!(A, Val(true); tol = 0.0) -> CholeskyPivoted + cholesky!(A, Val(true); tol = 0.0) -> CholeskyPivoted The same as [`cholesky`](@ref), but saves space by overwriting the input `A`, instead of creating a copy. An [`InexactError`](@ref) exception is thrown if the factorization produces a number not representable by the element type of `A`, e.g. for integer types. """ -function cholfact!(A::StridedMatrix, ::Val{true}; tol = 0.0) +function cholesky!(A::StridedMatrix, ::Val{true}; tol = 0.0) checksquare(A) if !ishermitian(A) # return with info = -1 if not Hermitian return CholeskyPivoted(A, 'U', Vector{BlasInt}(),convert(BlasInt, 1), tol, convert(BlasInt, -1)) else - return cholfact!(Hermitian(A), Val(true); tol = tol) + return cholesky!(Hermitian(A), Val(true); tol = tol) end end @@ -324,7 +324,7 @@ true ``` """ cholesky(A::Union{StridedMatrix,RealHermSymComplexHerm{<:Real,<:StridedMatrix}}, - ::Val{false}=Val(false)) = cholfact!(cholcopy(A)) + ::Val{false}=Val(false)) = cholesky!(cholcopy(A)) ## With pivoting @@ -341,7 +341,7 @@ The argument `tol` determines the tolerance for determining the rank. For negative values, the tolerance is the machine precision. """ cholesky(A::Union{StridedMatrix,RealHermSymComplexHerm{<:Real,<:StridedMatrix}}, - ::Val{true}; tol = 0.0) = cholfact!(cholcopy(A), Val(true); tol = tol) + ::Val{true}; tol = 0.0) = cholesky!(cholcopy(A), Val(true); tol = tol) ## Number function cholesky(x::Number, uplo::Symbol=:U) diff --git a/stdlib/LinearAlgebra/src/dense.jl b/stdlib/LinearAlgebra/src/dense.jl index 0420dfd86c51e..8583b4918b918 100644 --- a/stdlib/LinearAlgebra/src/dense.jl +++ b/stdlib/LinearAlgebra/src/dense.jl @@ -89,7 +89,7 @@ julia> A 2.0 6.78233 ``` """ -isposdef!(A::AbstractMatrix) = ishermitian(A) && isposdef(cholfact!(Hermitian(A))) +isposdef!(A::AbstractMatrix) = ishermitian(A) && isposdef(cholesky!(Hermitian(A))) """ isposdef(A) -> Bool diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl index f374af9b0094e..e59db95719f7c 100644 --- a/stdlib/LinearAlgebra/src/deprecated.jl +++ b/stdlib/LinearAlgebra/src/deprecated.jl @@ -7,7 +7,7 @@ using Base: @deprecate, depwarn @deprecate cond(F::LinearAlgebra.LU, p::Integer) cond(convert(AbstractArray, F), p) # PR #22188 -export cholfact +export cholfact, cholfact! @deprecate cholfact!(A::StridedMatrix, uplo::Symbol, ::Type{Val{false}}) cholfact!(Hermitian(A, uplo), Val(false)) @deprecate cholfact!(A::StridedMatrix, uplo::Symbol) cholfact!(Hermitian(A, uplo)) @deprecate cholfact(A::StridedMatrix, uplo::Symbol, ::Type{Val{false}}) cholfact(Hermitian(A, uplo), Val(false)) @@ -1402,3 +1402,11 @@ export eigfact! @deprecate(cholfact(A::Union{StridedMatrix,RealHermSymComplexHerm{<:Real,<:StridedMatrix}}, ::Val{false}=Val(false)), cholesky(A, Val(false))) @deprecate(cholfact(A::Union{StridedMatrix,RealHermSymComplexHerm{<:Real,<:StridedMatrix}}, ::Val{true}; tol = 0.0), cholesky(A, Val(true); tol=tol)) @deprecate(cholfact(x::Number, uplo::Symbol=:U), cholesky(x, uplo)) + +# deprecate cholfact! to cholesky! +# cholfact! exported from deprecation above +@deprecate(cholfact!(A::RealHermSymComplexHerm, ::Val{false}=Val(false)), cholesky!(A, Val(false))) +@deprecate(cholfact!(A::StridedMatrix, ::Val{false}=Val(false)), cholesky!(A, Val(false))) +@deprecate(cholfact!(A::RealHermSymComplexHerm{<:BlasReal,<:StridedMatrix}, ::Val{true}; tol = 0.0), cholesky!(A, Val(true); tol=tol)) +@deprecate(cholfact!(A::RealHermSymComplexHerm{<:Real}, ::Val{true}; tol = 0.0), cholesky!(A, Val(true); tol=tol)) +@deprecate(cholfact!(A::StridedMatrix, ::Val{true}; tol = 0.0), cholesky!(A, Val(true); tol=tol)) diff --git a/stdlib/LinearAlgebra/test/cholesky.jl b/stdlib/LinearAlgebra/test/cholesky.jl index 1cadf5444b045..90289dd9f523d 100644 --- a/stdlib/LinearAlgebra/test/cholesky.jl +++ b/stdlib/LinearAlgebra/test/cholesky.jl @@ -68,7 +68,7 @@ end @test_throws DimensionMismatch chol(A) @test_throws DimensionMismatch LinearAlgebra.chol!(A) @test_throws DimensionMismatch cholesky(A) - @test_throws DimensionMismatch cholfact!(A) + @test_throws DimensionMismatch cholesky!(A) end #Test error bound on reconstruction of matrix: LAWNS 14, Lemma 2.1 @@ -100,7 +100,7 @@ end capds = cholesky(apds) unary_ops_tests(apds, capds, ε*κ*n) if eltya <: BlasReal - capds = cholfact!(copy(apds)) + capds = cholesky!(copy(apds)) unary_ops_tests(apds, capds, ε*κ*n) end ulstring = sprint((t, s) -> show(t, "text/plain", s), capds.UL) @@ -108,9 +108,9 @@ end else capdh = cholesky(apdh) unary_ops_tests(apdh, capdh, ε*κ*n) - capdh = cholfact!(copy(apdh)) + capdh = cholesky!(copy(apdh)) unary_ops_tests(apdh, capdh, ε*κ*n) - capdh = cholfact!(copy(apd)) + capdh = cholesky!(copy(apd)) unary_ops_tests(apd, capdh, ε*κ*n) ulstring = sprint((t, s) -> show(t, "text/plain", s), capdh.UL) @test sprint((t, s) -> show(t, "text/plain", s), capdh) == "$(typeof(capdh))\nU factor:\n$ulstring" @@ -186,8 +186,8 @@ end @test_throws PosDefException logdet(C) end - # Test generic cholfact! - @testset "generic cholfact!" begin + # Test generic cholesky! + @testset "generic cholesky!" begin if eltya <: Complex A = complex.(randn(5,5), randn(5,5)) else @@ -264,14 +264,14 @@ end C = complex.(R, R) for A in (R, C) @test !LinearAlgebra.issuccess(cholesky(A)) - @test !LinearAlgebra.issuccess(cholfact!(copy(A))) + @test !LinearAlgebra.issuccess(cholesky!(copy(A))) @test_throws PosDefException chol(A) @test_throws PosDefException LinearAlgebra.chol!(copy(A)) end end @testset "fail for non-BLAS element types" begin - @test_throws ArgumentError cholfact!(Hermitian(rand(Float16, 5,5)), Val(true)) + @test_throws ArgumentError cholesky!(Hermitian(rand(Float16, 5,5)), Val(true)) end end # module TestCholesky diff --git a/stdlib/LinearAlgebra/test/dense.jl b/stdlib/LinearAlgebra/test/dense.jl index 2e809075c4688..be35e774ac8d9 100644 --- a/stdlib/LinearAlgebra/test/dense.jl +++ b/stdlib/LinearAlgebra/test/dense.jl @@ -47,7 +47,7 @@ bimg = randn(n,2)/2 @testset "Positive definiteness" begin @test !isposdef(ainit) @test isposdef(apd) - if eltya != Int # cannot perform cholfact! for Matrix{Int} + if eltya != Int # cannot perform cholesky! for Matrix{Int} @test !isposdef!(copy(ainit)) @test isposdef!(copy(apd)) end diff --git a/stdlib/SuiteSparse/src/cholmod.jl b/stdlib/SuiteSparse/src/cholmod.jl index 92dfa7f66997c..75769f32388bf 100644 --- a/stdlib/SuiteSparse/src/cholmod.jl +++ b/stdlib/SuiteSparse/src/cholmod.jl @@ -7,7 +7,7 @@ import Base: (*), convert, copy, eltype, getindex, getproperty, show, size, using LinearAlgebra import LinearAlgebra: (\), - cholesky, cholfact!, det, diag, ishermitian, isposdef, + cholesky, cholesky!, det, diag, ishermitian, isposdef, issuccess, issymmetric, ldlt, ldlt!, logdet using SparseArrays @@ -1367,7 +1367,7 @@ function fact_(A::Sparse{<:VTypes}, cm::Array{UInt8}; return F end -function cholfact!(F::Factor{Tv}, A::Sparse{Tv}; shift::Real=0.0) where Tv +function cholesky!(F::Factor{Tv}, A::Sparse{Tv}; shift::Real=0.0) where Tv # Makes it an LLt unsafe_store!(common_final_ll[], 1) @@ -1378,7 +1378,7 @@ function cholfact!(F::Factor{Tv}, A::Sparse{Tv}; shift::Real=0.0) where Tv end """ - cholfact!(F::Factor, A; shift = 0.0) -> CHOLMOD.Factor + cholesky!(F::Factor, A; shift = 0.0) -> CHOLMOD.Factor Compute the Cholesky (``LL'``) factorization of `A`, reusing the symbolic factorization `F`. `A` must be a [`SparseMatrixCSC`](@ref) or a [`Symmetric`](@ref)/ @@ -1393,13 +1393,13 @@ See also [`cholesky`](@ref). be converted to `SparseMatrixCSC{Float64}` or `SparseMatrixCSC{ComplexF64}` as appropriate. """ -cholfact!(F::Factor, A::Union{SparseMatrixCSC{T}, +cholesky!(F::Factor, A::Union{SparseMatrixCSC{T}, SparseMatrixCSC{Complex{T}}, Symmetric{T,SparseMatrixCSC{T,SuiteSparse_long}}, Hermitian{Complex{T},SparseMatrixCSC{Complex{T},SuiteSparse_long}}, Hermitian{T,SparseMatrixCSC{T,SuiteSparse_long}}}; shift = 0.0) where {T<:Real} = - cholfact!(F, Sparse(A); shift = shift) + cholesky!(F, Sparse(A); shift = shift) function cholesky(A::Sparse; shift::Real=0.0, perm::AbstractVector{SuiteSparse_long}=SuiteSparse_long[]) @@ -1411,7 +1411,7 @@ function cholesky(A::Sparse; shift::Real=0.0, F = fact_(A, cm; perm = perm) # Compute the numerical factorization - cholfact!(F, A; shift = shift) + cholesky!(F, A; shift = shift) return F end diff --git a/stdlib/SuiteSparse/src/deprecated.jl b/stdlib/SuiteSparse/src/deprecated.jl index 9396f26ff55ea..3fba73707e45f 100644 --- a/stdlib/SuiteSparse/src/deprecated.jl +++ b/stdlib/SuiteSparse/src/deprecated.jl @@ -99,3 +99,16 @@ end kws...) where {T<:Real}, cholesky(A; kws...)) end + +# deprecate cholfact! to cholesky! +@eval SuiteSparse.CHOLMOD begin + import LinearAlgebra: cholfact! + @deprecate(cholfact!(F::Factor{Tv}, A::Sparse{Tv}; shift::Real=0.0) where Tv, cholesky!(F, A; shift=shift)) + @deprecate(cholfact!(F::Factor, A::Union{SparseMatrixCSC{T}, + SparseMatrixCSC{Complex{T}}, + Symmetric{T,SparseMatrixCSC{T,SuiteSparse_long}}, + Hermitian{Complex{T},SparseMatrixCSC{Complex{T},SuiteSparse_long}}, + Hermitian{T,SparseMatrixCSC{T,SuiteSparse_long}}}; + shift = 0.0) where {T<:Real}, + cholesyk!(F, A; shift=shift)) +end diff --git a/stdlib/SuiteSparse/test/cholmod.jl b/stdlib/SuiteSparse/test/cholmod.jl index 545d9be2983e3..eb4b8cac5a6ac 100644 --- a/stdlib/SuiteSparse/test/cholmod.jl +++ b/stdlib/SuiteSparse/test/cholmod.jl @@ -434,19 +434,19 @@ end @test F1 == F2 end - ### cholfact!/ldlt! + ### cholesky!/ldlt! F = cholesky(A1pd) CHOLMOD.change_factor!(elty, false, false, true, true, F) @test unsafe_load(pointer(F)).is_ll == 0 CHOLMOD.change_factor!(elty, true, false, true, true, F) - @test CHOLMOD.Sparse(cholfact!(copy(F), A1pd)) ≈ CHOLMOD.Sparse(F) # surprisingly, this can cause small ulp size changes so we cannot test exact equality + @test CHOLMOD.Sparse(cholesky!(copy(F), A1pd)) ≈ CHOLMOD.Sparse(F) # surprisingly, this can cause small ulp size changes so we cannot test exact equality @test size(F, 2) == 5 @test size(F, 3) == 1 @test_throws ArgumentError size(F, 0) F = cholesky(A1pdSparse, shift=2) @test isa(CHOLMOD.Sparse(F), CHOLMOD.Sparse{elty}) - @test CHOLMOD.Sparse(cholfact!(copy(F), A1pd, shift=2.0)) ≈ CHOLMOD.Sparse(F) # surprisingly, this can cause small ulp size changes so we cannot test exact equality + @test CHOLMOD.Sparse(cholesky!(copy(F), A1pd, shift=2.0)) ≈ CHOLMOD.Sparse(F) # surprisingly, this can cause small ulp size changes so we cannot test exact equality F = ldlt(A1pd) @test isa(CHOLMOD.Sparse(F), CHOLMOD.Sparse{elty}) From aa65388a45e8f7ddf62c9e0ad7f5c8802d578540 Mon Sep 17 00:00:00 2001 From: Sacha Verweij Date: Tue, 22 May 2018 21:44:36 -0700 Subject: [PATCH 21/23] Deprecate eig to eigen and destructuring via iteration. --- NEWS.md | 15 ++++++ stdlib/LinearAlgebra/docs/src/index.md | 3 +- stdlib/LinearAlgebra/src/LinearAlgebra.jl | 1 - stdlib/LinearAlgebra/src/deprecated.jl | 41 ++++++++++++++ stdlib/LinearAlgebra/src/eigen.jl | 65 ----------------------- stdlib/LinearAlgebra/src/triangular.jl | 2 +- stdlib/LinearAlgebra/test/bidiag.jl | 4 +- stdlib/LinearAlgebra/test/eigen.jl | 18 +++---- stdlib/LinearAlgebra/test/lu.jl | 1 - stdlib/LinearAlgebra/test/schur.jl | 2 +- stdlib/LinearAlgebra/test/symmetric.jl | 10 ++-- stdlib/LinearAlgebra/test/triangular.jl | 2 +- stdlib/LinearAlgebra/test/tridiag.jl | 10 ++-- stdlib/SparseArrays/src/SparseArrays.jl | 2 +- stdlib/SparseArrays/src/linalg.jl | 2 +- stdlib/SparseArrays/test/sparse.jl | 2 +- 16 files changed, 82 insertions(+), 98 deletions(-) diff --git a/NEWS.md b/NEWS.md index fa94eee055a6d..f4b309e0d3d71 100644 --- a/NEWS.md +++ b/NEWS.md @@ -713,6 +713,21 @@ Deprecated or removed `lu!`, `schur!`, `lq!`, `qr!`, `ldlt!`, `svd!`, `bunchkaufman!`, `hessenberg!`, and `eigen!` ([#27159]). + * `eig(A[, args...])` has been deprecated in favor of `eigen(A[, args...])`. + Whereas the former returns a tuple of arrays, the latter returns an `Eigen` object. + So for a direct replacement, use `(eigen(A[, args...])...,)`. But going forward, + consider using the direct result of `eigen(A[, args...])` instead, either + destructured into its components (`vals, vecs = eigen(A[, args...])`) or + as an `Eigen` object (`X = eigen(A[, args...])`) ([#26997], [#27159], [#27212]). + + * `eig(A::AbstractMatrix, B::AbstractMatrix)` and `eig(A::Number, B::Number)` + have been deprecated in favor of `eigen(A, B)`. Whereas the former each return + a tuple of arrays, the latter returns a `GeneralizedEigen` object. So for a direct + replacement, use `(eigen(A, B)...,)`. But going forward, consider using the + direct result of `eigen(A, B)` instead, either destructured into its components + (`vals, vecs = eigen(A, B)`), or as a `GeneralizedEigen` object + (`X = eigen(A, B)`) ([#26997], [#27159], [#27212]). + * Indexing into multidimensional arrays with more than one index but fewer indices than there are dimensions is no longer permitted when those trailing dimensions have lengths greater than 1. Instead, reshape the array or add trailing indices so the dimensionality and number of indices diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md index 3a988193d3d81..a78fdfa506e2e 100644 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -198,7 +198,7 @@ Legend: ### Matrix factorizations -| Matrix type | LAPACK | [`eig`](@ref) | [`eigvals`](@ref) | [`eigvecs`](@ref) | [`svd`](@ref) | [`svdvals`](@ref) | +| Matrix type | LAPACK | [`eigen`](@ref) | [`eigvals`](@ref) | [`eigvecs`](@ref) | [`svd`](@ref) | [`svdvals`](@ref) | |:------------------------- |:------ |:------------- |:----------------- |:----------------- |:------------- |:----------------- | | [`Symmetric`](@ref) | SY | | ARI | | | | | [`Hermitian`](@ref) | HE | | ARI | | | | @@ -331,7 +331,6 @@ LinearAlgebra.lq! LinearAlgebra.lq LinearAlgebra.bunchkaufman LinearAlgebra.bunchkaufman! -LinearAlgebra.eig LinearAlgebra.eigvals LinearAlgebra.eigvals! LinearAlgebra.eigmax diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index 3519202b8c428..3442861f3fc8c 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -80,7 +80,6 @@ export diagind, diagm, dot, - eig, eigen, eigen!, eigmax, diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl index e59db95719f7c..bbe481cbf306b 100644 --- a/stdlib/LinearAlgebra/src/deprecated.jl +++ b/stdlib/LinearAlgebra/src/deprecated.jl @@ -1410,3 +1410,44 @@ export eigfact! @deprecate(cholfact!(A::RealHermSymComplexHerm{<:BlasReal,<:StridedMatrix}, ::Val{true}; tol = 0.0), cholesky!(A, Val(true); tol=tol)) @deprecate(cholfact!(A::RealHermSymComplexHerm{<:Real}, ::Val{true}; tol = 0.0), cholesky!(A, Val(true); tol=tol)) @deprecate(cholfact!(A::StridedMatrix, ::Val{true}; tol = 0.0), cholesky!(A, Val(true); tol=tol)) + +# deprecate eig in favor of eigen and destructuring via iteration +# deprecate eig(...) in favor of eigfact and factorization destructuring +export eig +function eig(A::Union{Number, StridedMatrix}; permute::Bool=true, scale::Bool=true) + depwarn(string("`eig(A[, permute, scale])` has been deprecated in favor of ", + "`eigen(A[, permute, scale])`. Whereas `eig(A[, permute, scale])` ", + "returns a tuple of arrays, `eigen(A[, permute, scale])` returns ", + "an `Eigen` object. So for a direct replacement, use ", + "`(eigen(A[, permute, scale])...,)`. But going forward, consider ", + "using the direct result of `eigen(A[, permute, scale])` instead, ", + "either destructured into its components ", + "(`vals, vecs = eigen(A[, permute, scale])`) ", + "or as an `Eigen` object (`X = eigen(A[, permute, scale])`)."), :eig) + return (eigen(A; permute=permute, scale=scale)...,) +end +function eig(A::AbstractMatrix, args...) + depwarn(string("`eig(A, args...)` has been deprecated in favor of ", + "`eigen(A, args...)`. Whereas `eig(A, args....)` ", + "returns a tuple of arrays, `eigen(A, args...)` returns ", + "an `Eigen` object. So for a direct replacement, use ", + "`(eigen(A, args...)...,)`. But going forward, consider ", + "using the direct result of `eigen(A, args...)` instead, ", + "either destructured into its components ", + "(`vals, vecs = eigen(A, args...)`) ", + "or as an `Eigen` object (`X = eigen(A, args...)`)."), :eig) + return (eigen(A, args...)...,) +end +eig(A::AbstractMatrix, B::AbstractMatrix) = _geneig(A, B) +eig(A::Number, B::Number) = _geneig(A, B) +function _geneig(A, B) + depwarn(string("`eig(A::AbstractMatrix, B::AbstractMatrix)` and ", + "`eig(A::Number, B::Number)` have been deprecated in favor of ", + "`eigen(A, B)`. Whereas the former each return a tuple of arrays, ", + "the latter returns a `GeneralizedEigen` object. So for a direct ", + "replacement, use `(eigen(A, B)...,)`. But going forward, consider ", + "using the direct result of `eigen(A, B)` instead, either ", + "destructured into its components (`vals, vecs = eigen(A, B)`), ", + "or as a `GeneralizedEigen` object (`X = eigen(A, B)`)."), :eig) + return (eigen(A, B)...,) +end diff --git a/stdlib/LinearAlgebra/src/eigen.jl b/stdlib/LinearAlgebra/src/eigen.jl index d63c79ed90c43..af40aa2082f92 100644 --- a/stdlib/LinearAlgebra/src/eigen.jl +++ b/stdlib/LinearAlgebra/src/eigen.jl @@ -126,38 +126,6 @@ function eigen(A::StridedMatrix{T}; permute::Bool=true, scale::Bool=true) where end eigen(x::Number) = Eigen([x], fill(one(x), 1, 1)) -function eig(A::Union{Number, StridedMatrix}; permute::Bool=true, scale::Bool=true) - F = eigen(A, permute=permute, scale=scale) - F.values, F.vectors -end - -""" - eig(A::Union{SymTridiagonal, Hermitian, Symmetric}, irange::UnitRange) -> D, V - eig(A::Union{SymTridiagonal, Hermitian, Symmetric}, vl::Real, vu::Real) -> D, V - eig(A, permute::Bool=true, scale::Bool=true) -> D, V - -Computes eigenvalues (`D`) and eigenvectors (`V`) of `A`. -See [`eigen`](@ref) for details on the -`irange`, `vl`, and `vu` arguments -(for [`SymTridiagonal`](@ref), [`Hermitian`](@ref), and -[`Symmetric`](@ref) matrices) -and the `permute` and `scale` keyword arguments. -The eigenvectors are returned columnwise. - -# Examples -```jldoctest -julia> eig([1.0 0.0 0.0; 0.0 3.0 0.0; 0.0 0.0 18.0]) -([1.0, 3.0, 18.0], [1.0 0.0 0.0; 0.0 1.0 0.0; 0.0 0.0 1.0]) -``` - -`eig` is a wrapper around [`eigen`](@ref), extracting all parts of the -factorization to a tuple; where possible, using [`eigen`](@ref) is recommended. -""" -function eig(A::AbstractMatrix, args...) - F = eigen(A, args...) - F.values, F.vectors -end - """ eigvecs(A; permute::Bool=true, scale::Bool=true) -> Matrix @@ -416,39 +384,6 @@ end eigen(A::Number, B::Number) = eigen(fill(A,1,1), fill(B,1,1)) -""" - eig(A, B) -> D, V - -Computes generalized eigenvalues (`D`) and vectors (`V`) of `A` with respect to `B`. - -`eig` is a wrapper around [`eigen`](@ref), extracting all parts of the -factorization to a tuple; where possible, using [`eigen`](@ref) is recommended. - -# Examples -```jldoctest -julia> A = [1 0; 0 -1] -2×2 Array{Int64,2}: - 1 0 - 0 -1 - -julia> B = [0 1; 1 0] -2×2 Array{Int64,2}: - 0 1 - 1 0 - -julia> eig(A, B) -(Complex{Float64}[0.0+1.0im, 0.0-1.0im], Complex{Float64}[0.0-1.0im 0.0+1.0im; -1.0-0.0im -1.0+0.0im]) -``` -""" -function eig(A::AbstractMatrix, B::AbstractMatrix) - F = eigen(A,B) - F.values, F.vectors -end -function eig(A::Number, B::Number) - F = eigen(A,B) - F.values, F.vectors -end - """ eigvals!(A, B) -> values diff --git a/stdlib/LinearAlgebra/src/triangular.jl b/stdlib/LinearAlgebra/src/triangular.jl index 9b0755cb2626d..874d8f233747f 100644 --- a/stdlib/LinearAlgebra/src/triangular.jl +++ b/stdlib/LinearAlgebra/src/triangular.jl @@ -2156,7 +2156,7 @@ function log(A0::UpperTriangular{T}) where T<:BlasFloat R[i,i+1] = i / sqrt((2 * i)^2 - 1) R[i+1,i] = R[i,i+1] end - x,V = eig(R) + x,V = eigen(R) w = Vector{Float64}(undef, m) for i = 1:m x[i] = (x[i] + 1) / 2 diff --git a/stdlib/LinearAlgebra/test/bidiag.jl b/stdlib/LinearAlgebra/test/bidiag.jl index 1f4fa2d41b637..efbf5d3e437eb 100644 --- a/stdlib/LinearAlgebra/test/bidiag.jl +++ b/stdlib/LinearAlgebra/test/bidiag.jl @@ -231,8 +231,8 @@ srand(1) @testset "Eigensystems" begin if relty <: AbstractFloat - d1, v1 = eig(T) - d2, v2 = eig(map(elty<:Complex ? ComplexF64 : Float64,Tfull)) + d1, v1 = eigen(T) + d2, v2 = eigen(map(elty<:Complex ? ComplexF64 : Float64,Tfull)) @test (uplo == :U ? d1 : reverse(d1)) ≈ d2 if elty <: Real test_approx_eq_modphase(v1, uplo == :U ? v2 : v2[:,n:-1:1]) diff --git a/stdlib/LinearAlgebra/test/eigen.jl b/stdlib/LinearAlgebra/test/eigen.jl index c4e3c087e4b3d..7862ae8d78822 100644 --- a/stdlib/LinearAlgebra/test/eigen.jl +++ b/stdlib/LinearAlgebra/test/eigen.jl @@ -28,12 +28,12 @@ aimg = randn(n,n)/2 α = rand(eltya) β = rand(eltya) - eab = eig(α,β) - @test eab[1] == eigvals(fill(α,1,1),fill(β,1,1)) - @test eab[2] == eigvecs(fill(α,1,1),fill(β,1,1)) + eab = eigen(α,β) + @test eab.values == eigvals(fill(α,1,1),fill(β,1,1)) + @test eab.vectors == eigvecs(fill(α,1,1),fill(β,1,1)) @testset "non-symmetric eigen decomposition" begin - d, v = eig(a) + d, v = eigen(a) for i in 1:size(a,2) @test a*v[:,i] ≈ d[i]*v[:,i] end @@ -70,7 +70,7 @@ aimg = randn(n,n)/2 @test eigvecs(f) === f.vectors @test_throws ErrorException f.Z - d,v = eig(asym_sg, a_sg'a_sg) + d,v = eigen(asym_sg, a_sg'a_sg) @test d == f.values @test v == f.vectors end @@ -89,7 +89,7 @@ aimg = randn(n,n)/2 @test eigvecs(a1_nsg, a2_nsg) == f.vectors @test_throws ErrorException f.Z - d,v = eig(a1_nsg, a2_nsg) + d,v = eigen(a1_nsg, a2_nsg) @test d == f.values @test v == f.vectors end @@ -98,11 +98,11 @@ end @testset "eigenvalue computations with NaNs" begin for eltya in (NaN16, NaN32, NaN) - @test_throws(ArgumentError, eig(fill(eltya, 1, 1))) - @test_throws(ArgumentError, eig(fill(eltya, 2, 2))) + @test_throws(ArgumentError, eigen(fill(eltya, 1, 1))) + @test_throws(ArgumentError, eigen(fill(eltya, 2, 2))) test_matrix = rand(typeof(eltya),3,3) test_matrix[2,2] = eltya - @test_throws(ArgumentError, eig(test_matrix)) + @test_throws(ArgumentError, eigen(test_matrix)) end end diff --git a/stdlib/LinearAlgebra/test/lu.jl b/stdlib/LinearAlgebra/test/lu.jl index 4b4353f6e4d36..8bcfbe3e41046 100644 --- a/stdlib/LinearAlgebra/test/lu.jl +++ b/stdlib/LinearAlgebra/test/lu.jl @@ -51,7 +51,6 @@ dimg = randn(n)/2 -eps(real(one(eltya)))/4 eps(real(one(eltya)))/2 -1.0 0; -0.5 -0.5 0.1 1.0]) F = eigen(A, permute=false, scale=false) - eig(A, permute=false, scale=false) @test F.vectors*Diagonal(F.values)/F.vectors ≈ A F = eigen(A) # @test norm(F.vectors*Diagonal(F.values)/F.vectors - A) > 0.01 diff --git a/stdlib/LinearAlgebra/test/schur.jl b/stdlib/LinearAlgebra/test/schur.jl index 6299e9da71a1d..e5d1efb868321 100644 --- a/stdlib/LinearAlgebra/test/schur.jl +++ b/stdlib/LinearAlgebra/test/schur.jl @@ -26,7 +26,7 @@ aimg = randn(n,n)/2 view(apd, 1:n, 1:n))) ε = εa = eps(abs(float(one(eltya)))) - d,v = eig(a) + d,v = eigen(a) f = schur(a) @test f.vectors*f.Schur*f.vectors' ≈ a @test sort(real(f.values)) ≈ sort(real(d)) diff --git a/stdlib/LinearAlgebra/test/symmetric.jl b/stdlib/LinearAlgebra/test/symmetric.jl index 54172dfcb2af7..54cf0ee6c74b1 100644 --- a/stdlib/LinearAlgebra/test/symmetric.jl +++ b/stdlib/LinearAlgebra/test/symmetric.jl @@ -218,14 +218,12 @@ end @testset "symmetric eigendecomposition" begin if eltya <: Real # the eigenvalues are only real and ordered for Hermitian matrices - d, v = eig(asym) + d, v = eigen(asym) @test asym*v[:,1] ≈ d[1]*v[:,1] @test v*Diagonal(d)*transpose(v) ≈ asym @test isequal(eigvals(asym[1]), eigvals(asym[1:1,1:1])) @test abs.(eigen(Symmetric(asym), 1:2).vectors'v[:,1:2]) ≈ Matrix(I, 2, 2) - eig(Symmetric(asym), 1:2) # same result, but checks that method works @test abs.(eigen(Symmetric(asym), d[1] - 1, (d[2] + d[3])/2).vectors'v[:,1:2]) ≈ Matrix(I, 2, 2) - eig(Symmetric(asym), d[1] - 1, (d[2] + d[3])/2) # same result, but checks that method works @test eigvals(Symmetric(asym), 1:2) ≈ d[1:2] @test eigvals(Symmetric(asym), d[1] - 1, (d[2] + d[3])/2) ≈ d[1:2] # eigen doesn't support Symmetric{Complex} @@ -233,14 +231,12 @@ end @test eigvecs(Symmetric(asym)) ≈ eigvecs(asym) end - d, v = eig(aherm) + d, v = eigen(aherm) @test aherm*v[:,1] ≈ d[1]*v[:,1] @test v*Diagonal(d)*v' ≈ aherm @test isequal(eigvals(aherm[1]), eigvals(aherm[1:1,1:1])) @test abs.(eigen(Hermitian(aherm), 1:2).vectors'v[:,1:2]) ≈ Matrix(I, 2, 2) - eig(Hermitian(aherm), 1:2) # same result, but checks that method works @test abs.(eigen(Hermitian(aherm), d[1] - 1, (d[2] + d[3])/2).vectors'v[:,1:2]) ≈ Matrix(I, 2, 2) - eig(Hermitian(aherm), d[1] - 1, (d[2] + d[3])/2) # same result, but checks that method works @test eigvals(Hermitian(aherm), 1:2) ≈ d[1:2] @test eigvals(Hermitian(aherm), d[1] - 1, (d[2] + d[3])/2) ≈ d[1:2] @test Matrix(eigen(aherm)) ≈ aherm @@ -365,7 +361,7 @@ end end @testset "Issues #8057 and #8058. f=$f, A=$A" for f in - (eigen, eigvals, eig), + (eigen, eigvals), A in (Symmetric([0 1; 1 0]), Hermitian([0 im; -im 0])) @test_throws ArgumentError f(A, 3, 2) @test_throws ArgumentError f(A, 1:4) diff --git a/stdlib/LinearAlgebra/test/triangular.jl b/stdlib/LinearAlgebra/test/triangular.jl index 0043176673605..89ad49d70329d 100644 --- a/stdlib/LinearAlgebra/test/triangular.jl +++ b/stdlib/LinearAlgebra/test/triangular.jl @@ -249,7 +249,7 @@ for elty1 in (Float32, Float64, BigFloat, ComplexF32, ComplexF64, Complex{BigFlo # eigenproblems if !(elty1 in (BigFloat, Complex{BigFloat})) # Not handled yet - vals, vecs = eig(A1) + vals, vecs = eigen(A1) if (t1 == UpperTriangular || t1 == LowerTriangular) && elty1 != Int # Cannot really handle degenerate eigen space and Int matrices will probably have repeated eigenvalues. @test vecs*diagm(0 => vals)/vecs ≈ A1 atol=sqrt(eps(float(real(one(vals[1])))))*(norm(A1,Inf)*n)^2 end diff --git a/stdlib/LinearAlgebra/test/tridiag.jl b/stdlib/LinearAlgebra/test/tridiag.jl index 903d09a02d27f..0573db496d2de 100644 --- a/stdlib/LinearAlgebra/test/tridiag.jl +++ b/stdlib/LinearAlgebra/test/tridiag.jl @@ -243,7 +243,7 @@ end w, iblock, isplit = LAPACK.stebz!('V', 'B', -infinity, infinity, 0, 0, zero, b, a) evecs = LAPACK.stein!(b, a, w) - (e, v) = eig(SymTridiagonal(b, a)) + (e, v) = eigen(SymTridiagonal(b, a)) @test e ≈ w test_approx_eq_vecs(v, evecs) end @@ -266,10 +266,10 @@ end end @testset "eigenvalues/eigenvectors of symmetric tridiagonal" begin if elty === Float32 || elty === Float64 - DT, VT = @inferred eig(A) - @inferred eig(A, 2:4) - @inferred eig(A, 1.0, 2.0) - D, Vecs = eig(fA) + DT, VT = @inferred eigen(A) + @inferred eigen(A, 2:4) + @inferred eigen(A, 1.0, 2.0) + D, Vecs = eigen(fA) @test DT ≈ D @test abs.(VT'Vecs) ≈ Matrix(elty(1)I, n, n) test_approx_eq_modphase(eigvecs(A), eigvecs(fA)) diff --git a/stdlib/SparseArrays/src/SparseArrays.jl b/stdlib/SparseArrays/src/SparseArrays.jl index 477598677a1b2..7ec270532ea0d 100644 --- a/stdlib/SparseArrays/src/SparseArrays.jl +++ b/stdlib/SparseArrays/src/SparseArrays.jl @@ -12,7 +12,7 @@ using Base.Sort: Forward using LinearAlgebra import Base: +, -, *, \, /, &, |, xor, == -import LinearAlgebra: mul!, ldiv!, rdiv!, chol, adjoint!, diag, dot, eig, +import LinearAlgebra: mul!, ldiv!, rdiv!, chol, adjoint!, diag, dot, eigen, issymmetric, istril, istriu, lu, tr, transpose!, tril!, triu!, vecnorm, cond, diagm, factorize, ishermitian, norm, lmul!, rmul!, tril, triu diff --git a/stdlib/SparseArrays/src/linalg.jl b/stdlib/SparseArrays/src/linalg.jl index 36dba2f9c8061..170232f7a463c 100644 --- a/stdlib/SparseArrays/src/linalg.jl +++ b/stdlib/SparseArrays/src/linalg.jl @@ -1009,7 +1009,7 @@ function factorize(A::LinearAlgebra.RealHermSymComplexHerm{Float64,<:SparseMatri end chol(A::SparseMatrixCSC) = error("Use cholesky() instead of chol() for sparse matrices.") -eig(A::SparseMatrixCSC) = error("Use IterativeEigensolvers.eigs() instead of eig() for sparse matrices.") +eigen(A::SparseMatrixCSC) = error("Use IterativeEigensolvers.eigs() instead of eigen() for sparse matrices.") function Base.cov(X::SparseMatrixCSC; dims::Int=1, corrected::Bool=true) vardim = dims diff --git a/stdlib/SparseArrays/test/sparse.jl b/stdlib/SparseArrays/test/sparse.jl index f55f90497fba4..fcface7889ab4 100644 --- a/stdlib/SparseArrays/test/sparse.jl +++ b/stdlib/SparseArrays/test/sparse.jl @@ -1780,7 +1780,7 @@ end C, b = A[:, 1:4], fill(1., size(A, 1)) @test !Base.USE_GPL_LIBS || factorize(C)\b ≈ Array(C)\b @test_throws ErrorException chol(A) - @test_throws ErrorException eig(A) + @test_throws ErrorException eigen(A) @test_throws ErrorException inv(A) end From 4873d9756ed224d16d237cea424a6f249dca5cdf Mon Sep 17 00:00:00 2001 From: Sacha Verweij Date: Tue, 22 May 2018 22:05:53 -0700 Subject: [PATCH 22/23] Deprecate decomposition getindex methods in favor of property accessors. --- stdlib/IterativeEigensolvers/test/runtests.jl | 2 +- stdlib/LinearAlgebra/src/deprecated.jl | 125 +++++++++++++++++- stdlib/LinearAlgebra/src/eigen.jl | 7 - stdlib/LinearAlgebra/src/lq.jl | 7 - stdlib/LinearAlgebra/src/lu.jl | 8 -- stdlib/LinearAlgebra/src/qr.jl | 26 +--- stdlib/LinearAlgebra/src/schur.jl | 19 --- stdlib/LinearAlgebra/src/svd.jl | 19 --- stdlib/LinearAlgebra/test/generic.jl | 2 +- stdlib/LinearAlgebra/test/lq.jl | 1 - 10 files changed, 127 insertions(+), 89 deletions(-) diff --git a/stdlib/IterativeEigensolvers/test/runtests.jl b/stdlib/IterativeEigensolvers/test/runtests.jl index 3df8626edcf44..939a71a16e419 100644 --- a/stdlib/IterativeEigensolvers/test/runtests.jl +++ b/stdlib/IterativeEigensolvers/test/runtests.jl @@ -227,7 +227,7 @@ end for j in 2:i d[j] = d[1] end - A = qr(randn(rng, 20, 20))[1]*Diagonal(d)*qr(randn(rng, 20, 20))[1] + A = qr(randn(rng, 20, 20)).Q*Diagonal(d)*qr(randn(rng, 20, 20)).Q @testset "Number of singular values: $j" for j in 2:6 # Default size of subspace F = svds(A, nsv = j, v0 = v0) diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl index bbe481cbf306b..73ee4fcaea14f 100644 --- a/stdlib/LinearAlgebra/src/deprecated.jl +++ b/stdlib/LinearAlgebra/src/deprecated.jl @@ -1,4 +1,4 @@ - # This file is a part of Julia. License is MIT: https://julialang.org/license +# This file is a part of Julia. License is MIT: https://julialang.org/license using Base: @deprecate, depwarn @@ -1451,3 +1451,126 @@ function _geneig(A, B) "or as a `GeneralizedEigen` object (`X = eigen(A, B)`)."), :eig) return (eigen(A, B)...,) end + +# deprecate transitional decomposition getindex methods out of the blocks +function Base.getindex(S::LU, i::Integer) + depwarn(string("decomposition functions (e.g. `lu`) now return decomposition ", + "objects (e.g. `LU`), and indexing such objects is deprecated. Instead ", + "extract components via their accessors (e.g. `X.L`, `X.S`, and `X.p` for ", + "`X::LU`), or destructure the decomposition via iteration ", + "(e.g. `l, u, p = X` for `X::LU`)."), :getindex) + i == 1 ? (return S.L) : + i == 2 ? (return S.U) : + i == 3 ? (return S.p) : + throw(BoundsError(S, i)) +end +function Base.getindex(S::Union{Eigen,GeneralizedEigen}, i::Integer) + depwarn(string("decomposition functions (e.g. `eig`) now return decomposition ", + "objects (e.g. `Eigen` and `GeneralizedEigen`), and indexing such objects ", + "is deprecated. Instead extract components via their accessors ", + "(e.g. `X.values` and `X.vectors` for `X::Union{Eigen,GeneralizedEigen}`), ", + "or destructure the decomposition via iteration ", + "(e.g. `vals, vecs = X` for `X::Union{Eigen,GeneralizedEigen}`)."), :getindex) + i == 1 ? (return S.values) : + i == 2 ? (return S.vectors) : + throw(BoundsError(S, i)) +end +function Base.getindex(S::Schur, i::Integer) + depwarn(string("decomposition functions (e.g. `schur`) now return decomposition ", + "objects (e.g. `Schur`), and indexing such objects ", + "is deprecated. Instead extract components via their accessors ", + "(e.g. `X.T`, `X.Z`, and `X.values` for `X::Schur`), ", + "or destructure the decomposition via iteration ", + "(e.g. `t, z, vals = X` for `X::Schur`)."), :getindex) + i == 1 ? (return S.T) : + i == 2 ? (return S.Z) : + i == 3 ? (return S.values) : + throw(BoundsError(S, i)) +end +function Base.getindex(S::GeneralizedSchur, i::Integer) + depwarn(string("decomposition functions (e.g. `schur`) now return decomposition ", + "objects (e.g. `GeneralizedSchur`), and indexing such objects ", + "is deprecated. Instead extract components via their accessors ", + "(e.g. `X.S`, `X.T`, `X.Q`, `X.Z`, `X.α`, and `X.β` for `X::GeneralizedSchur`), ", + "or destructure the decomposition via iteration ", + "(e.g. `s, t, q, z, α, β = X` for `X::GeneralizedSchur`)."), :getindex) + i == 1 ? (return S.S) : + i == 2 ? (return S.T) : + i == 3 ? (return S.Q) : + i == 4 ? (return S.Z) : + i == 5 ? (return S.α) : + i == 6 ? (return S.β) : + throw(BoundsError(S, i)) +end +function Base.getindex(S::LQ, i::Integer) + depwarn(string("decomposition functions (e.g. `lq`) now return decomposition ", + "objects (e.g. `LQ`), and indexing such objects ", + "is deprecated. Instead extract components via their accessors ", + "(e.g. `X.L` and `X.Q` for `X::LQ`), ", + "or destructure the decomposition via iteration ", + "(e.g. `l, q = X` for `X::LQ`)."), :getindex) + i == 1 ? (return S.L) : + i == 2 ? (return S.Q) : + throw(BoundsError(S, i)) +end +function Base.getindex(S::QR, i::Integer) + depwarn(string("decomposition functions (e.g. `qr`) now return decomposition ", + "objects (e.g. `QR`), and indexing such objects ", + "is deprecated. Instead extract components via their accessors ", + "(e.g. `X.Q` and `X.R` for `X::QR`), ", + "or destructure the decomposition via iteration ", + "(e.g. `q, r = X` for `X::QR`)."), :getindex) + i == 1 ? (return S.Q) : + i == 2 ? (return S.R) : + throw(BoundsError(S, i)) +end +function Base.getindex(S::QRCompactWY, i::Integer) + depwarn(string("decomposition functions (e.g. `qr`) now return decomposition ", + "objects (e.g. `QRCompactWY`), and indexing such objects ", + "is deprecated. Instead extract components via their accessors ", + "(e.g. `X.Q` and `X.R` for `X::QR`), ", + "or destructure the decomposition via iteration ", + "(e.g. `q, r = X` for `X::QR`)."), :getindex) + i == 1 ? (return S.Q) : + i == 2 ? (return S.R) : + throw(BoundsError(S, i)) +end +function Base.getindex(S::QRPivoted, i::Integer) + depwarn(string("decomposition functions (e.g. `qr`) now return decomposition ", + "objects (e.g. `QRPivoted`), and indexing such objects ", + "is deprecated. Instead extract components via their accessors ", + "(e.g. `X.Q`, `X.R`, and `X.p` for `X::QRPivoted`), ", + "or destructure the decomposition via iteration ", + "(e.g. `q, r, p = X` for `X::QRPivoted`)."), :getindex) + i == 1 ? (return S.Q) : + i == 2 ? (return S.R) : + i == 3 ? (return S.p) : + throw(BoundsError(S, i)) +end +function Base.getindex(S::SVD, i::Integer) + depwarn(string("decomposition functions (e.g. `svd`) now return decomposition ", + "objects (e.g. `SVD`), and indexing such objects ", + "is deprecated. Instead extract components via their accessors ", + "(e.g. `X.U`, `X.S`, and `X.V` for `X::SVD`), ", + "or destructure the decomposition via iteration ", + "(e.g. `u, s, v = X` for `X::SVD`)."), :getindex) + i == 1 ? (return S.U) : + i == 2 ? (return S.S) : + i == 3 ? (return S.V) : + throw(BoundsError(S, i)) +end +function Base.getindex(S::GeneralizedSVD, i::Integer) + depwarn(string("decomposition functions (e.g. `svd`) now return decomposition ", + "objects (e.g. `GeneralizedSVD`), and indexing such objects ", + "is deprecated. Instead extract components via their accessors ", + "(e.g. `X.U`, `X.V`, `X.Q`, `X.D1`, `X.D2`, and `X.R0` for `X::GeneralizedSVD`), ", + "or destructure the decomposition via iteration ", + "(e.g. `u, v, q, d1, d2, r0 = X` for `X::GeneralizedSVD`)."), :getindex) + i == 1 ? (return S.U) : + i == 2 ? (return S.V) : + i == 3 ? (return S.Q) : + i == 4 ? (return S.D1) : + i == 5 ? (return S.D2) : + i == 6 ? (return S.R0) : + throw(BoundsError(S, i)) +end diff --git a/stdlib/LinearAlgebra/src/eigen.jl b/stdlib/LinearAlgebra/src/eigen.jl index af40aa2082f92..37fed3fe3c940 100644 --- a/stdlib/LinearAlgebra/src/eigen.jl +++ b/stdlib/LinearAlgebra/src/eigen.jl @@ -25,13 +25,6 @@ Base.iterate(S::Union{Eigen,GeneralizedEigen}) = (S.values, Val(:vectors)) Base.iterate(S::Union{Eigen,GeneralizedEigen}, ::Val{:vectors}) = (S.vectors, Val(:done)) Base.iterate(S::Union{Eigen,GeneralizedEigen}, ::Val{:done}) = nothing -# indexing for destructuring into components -@inline function Base.getindex(S::Union{Eigen,GeneralizedEigen}, i::Integer) - i == 1 ? (return S.values) : - i == 2 ? (return S.vectors) : - throw(BoundsError(S, i)) -end - isposdef(A::Union{Eigen,GeneralizedEigen}) = isreal(A.values) && all(x -> x > 0, A.values) """ diff --git a/stdlib/LinearAlgebra/src/lq.jl b/stdlib/LinearAlgebra/src/lq.jl index 10ca990f027cb..57fc6b6f5d8a0 100644 --- a/stdlib/LinearAlgebra/src/lq.jl +++ b/stdlib/LinearAlgebra/src/lq.jl @@ -14,13 +14,6 @@ Base.iterate(S::LQ) = (S.L, Val(:Q)) Base.iterate(S::LQ, ::Val{:Q}) = (S.Q, Val(:done)) Base.iterate(S::LQ, ::Val{:done}) = nothing -# indexing for destructuring into components -@inline function Base.getindex(S::LQ, i::Integer) - i == 1 ? (return S.L) : - i == 2 ? (return S.Q) : - throw(BoundsError(S, i)) -end - struct LQPackedQ{T,S<:AbstractMatrix} <: AbstractMatrix{T} factors::Matrix{T} τ::Vector{T} diff --git a/stdlib/LinearAlgebra/src/lu.jl b/stdlib/LinearAlgebra/src/lu.jl index f041130230290..60e1987b3409c 100644 --- a/stdlib/LinearAlgebra/src/lu.jl +++ b/stdlib/LinearAlgebra/src/lu.jl @@ -17,14 +17,6 @@ Base.iterate(S::LU, ::Val{:U}) = (S.U, Val(:p)) Base.iterate(S::LU, ::Val{:p}) = (S.p, Val(:done)) Base.iterate(S::LU, ::Val{:done}) = nothing -# indexing for destructuring into components -@inline function Base.getindex(S::LU, i::Integer) - i == 1 ? (return S.L) : - i == 2 ? (return S.U) : - i == 3 ? (return S.p) : - throw(BoundsError(S, i)) -end - adjoint(F::LU) = Adjoint(F) transpose(F::LU) = Transpose(F) diff --git a/stdlib/LinearAlgebra/src/qr.jl b/stdlib/LinearAlgebra/src/qr.jl index 90e5ae7d21c91..602c143230cfb 100644 --- a/stdlib/LinearAlgebra/src/qr.jl +++ b/stdlib/LinearAlgebra/src/qr.jl @@ -46,14 +46,6 @@ Base.iterate(S::QR) = (S.Q, Val(:R)) Base.iterate(S::QR, ::Val{:R}) = (S.R, Val(:done)) Base.iterate(S::QR, ::Val{:done}) = nothing -# indexing for destructuring into components -@inline function Base.getindex(S::QR, i::Integer) - i == 1 ? (return S.Q) : - i == 2 ? (return S.R) : - throw(BoundsError(S, i)) -end - - # Note. For QRCompactWY factorization without pivoting, the WY representation based method introduced in LAPACK 3.4 """ QRCompactWY <: Factorization @@ -114,13 +106,6 @@ Base.iterate(S::QRCompactWY) = (S.Q, Val(:R)) Base.iterate(S::QRCompactWY, ::Val{:R}) = (S.R, Val(:done)) Base.iterate(S::QRCompactWY, ::Val{:done}) = nothing -# indexing for destructuring into components -@inline function Base.getindex(S::QRCompactWY, i::Integer) - i == 1 ? (return S.Q) : - i == 2 ? (return S.R) : - throw(BoundsError(S, i)) -end - """ QRPivoted <: Factorization @@ -170,15 +155,6 @@ Base.iterate(S::QRPivoted, ::Val{:R}) = (S.R, Val(:p)) Base.iterate(S::QRPivoted, ::Val{:p}) = (S.p, Val(:done)) Base.iterate(S::QRPivoted, ::Val{:done}) = nothing -# indexing for destructuring into components -@inline function Base.getindex(S::QRPivoted, i::Integer) - i == 1 ? (return S.Q) : - i == 2 ? (return S.R) : - i == 3 ? (return S.p) : - throw(BoundsError(S, i)) -end - - function qrfactUnblocked!(A::AbstractMatrix{T}) where {T} m, n = size(A) τ = zeros(T, min(m,n)) @@ -308,7 +284,7 @@ The returned object `F` stores the factorization in a packed format: - otherwise `F` is a [`QR`](@ref) object. -The individual components of the factorization `F` can be accessed by indexing with a symbol: +The individual components of the decomposition `F` can be retrieved via property accessors: - `F.Q`: the orthogonal/unitary matrix `Q` - `F.R`: the upper triangular matrix `R` diff --git a/stdlib/LinearAlgebra/src/schur.jl b/stdlib/LinearAlgebra/src/schur.jl index f7e435cd4edbe..c1b848c5408e5 100644 --- a/stdlib/LinearAlgebra/src/schur.jl +++ b/stdlib/LinearAlgebra/src/schur.jl @@ -15,14 +15,6 @@ Base.iterate(S::Schur, ::Val{:Z}) = (S.Z, Val(:values)) Base.iterate(S::Schur, ::Val{:values}) = (S.values, Val(:done)) Base.iterate(S::Schur, ::Val{:done}) = nothing -# indexing for destructuring into components -@inline function Base.getindex(S::Schur, i::Integer) - i == 1 ? (return S.T) : - i == 2 ? (return S.Z) : - i == 3 ? (return S.values) : - throw(BoundsError(S, i)) -end - """ schur!(A::StridedMatrix) -> F::Schur @@ -204,17 +196,6 @@ Base.iterate(S::GeneralizedSchur, ::Val{:α}) = (S.α, Val(:β)) Base.iterate(S::GeneralizedSchur, ::Val{:β}) = (S.β, Val(:done)) Base.iterate(S::GeneralizedSchur, ::Val{:done}) = nothing -# indexing for destructuring into components -@inline function Base.getindex(S::GeneralizedSchur, i::Integer) - i == 1 ? (return S.S) : - i == 2 ? (return S.T) : - i == 3 ? (return S.Q) : - i == 4 ? (return S.Z) : - i == 5 ? (return S.α) : - i == 6 ? (return S.β) : - throw(BoundsError(S, i)) -end - """ schur!(A::StridedMatrix, B::StridedMatrix) -> F::GeneralizedSchur diff --git a/stdlib/LinearAlgebra/src/svd.jl b/stdlib/LinearAlgebra/src/svd.jl index 51b986a67c7d2..8771f6d0fcc15 100644 --- a/stdlib/LinearAlgebra/src/svd.jl +++ b/stdlib/LinearAlgebra/src/svd.jl @@ -16,14 +16,6 @@ Base.iterate(S::SVD, ::Val{:S}) = (S.S, Val(:V)) Base.iterate(S::SVD, ::Val{:V}) = (S.V, Val(:done)) Base.iterate(S::SVD, ::Val{:done}) = nothing -# # indexing for destructuring into components -@inline function Base.getindex(S::SVD, i::Integer) - i == 1 ? (return S.U) : - i == 2 ? (return S.S) : - i == 3 ? (return S.V) : - throw(BoundsError(S, i)) -end - """ svd!(A; full::Bool = false) -> SVD @@ -246,17 +238,6 @@ Base.iterate(S::GeneralizedSVD, ::Val{:D2}) = (S.D2, Val(:R0)) Base.iterate(S::GeneralizedSVD, ::Val{:R0}) = (S.R0, Val(:done)) Base.iterate(S::GeneralizedSVD, ::Val{:done}) = nothing -# indexing for destructuring into components -@inline function Base.getindex(S::GeneralizedSVD, i::Integer) - i == 1 ? (return S.U) : - i == 2 ? (return S.V) : - i == 3 ? (return S.Q) : - i == 4 ? (return S.D1) : - i == 5 ? (return S.D2) : - i == 6 ? (return S.R0) : - throw(BoundsError(S, i)) -end - """ svd!(A, B) -> GeneralizedSVD diff --git a/stdlib/LinearAlgebra/test/generic.jl b/stdlib/LinearAlgebra/test/generic.jl index 6108a563bbb29..8bf054f7530b4 100644 --- a/stdlib/LinearAlgebra/test/generic.jl +++ b/stdlib/LinearAlgebra/test/generic.jl @@ -240,7 +240,7 @@ end @test rank(fill(0, 0, 0)) == 0 @test rank([1.0 0.0; 0.0 0.9],0.95) == 1 -@test qr(big.([0 1; 0 0]))[2] == [0 1; 0 0] +@test qr(big.([0 1; 0 0])).R == [0 1; 0 0] @test norm([2.4e-322, 4.4e-323]) ≈ 2.47e-322 @test norm([2.4e-322, 4.4e-323], 3) ≈ 2.4e-322 diff --git a/stdlib/LinearAlgebra/test/lq.jl b/stdlib/LinearAlgebra/test/lq.jl index f8ab28755d06b..d9a7f552de63e 100644 --- a/stdlib/LinearAlgebra/test/lq.jl +++ b/stdlib/LinearAlgebra/test/lq.jl @@ -39,7 +39,6 @@ rectangularQ(Q::LinearAlgebra.LQPackedQ) = convert(Array, Q) α = rand(eltya) aα = fill(α,1,1) @test lq(α).L*lq(α).Q ≈ lq(aα).L*lq(aα).Q - @test lq(α)[1]*lq(α)[2] ≈ lq(aα).L*lq(aα).Q @test abs(lq(α).Q[1,1]) ≈ one(eltya) tab = promote_type(eltya,eltyb) From edf5bd2a7ec073c6a77210d62691f96ad0dbf5c0 Mon Sep 17 00:00:00 2001 From: Sacha Verweij Date: Tue, 22 May 2018 22:08:40 -0700 Subject: [PATCH 23/23] Expand pull request numbers for some news items. --- NEWS.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/NEWS.md b/NEWS.md index f4b309e0d3d71..9119b6e6551d7 100644 --- a/NEWS.md +++ b/NEWS.md @@ -228,19 +228,19 @@ This section lists changes that do not have deprecation warnings. behavior of `readline`. Pass `keep=true` to get the old behavior ([#25633]). * `lu` methods now return decomposition objects such as `LU` rather than - tuples of arrays or tuples of numbers ([#27159]). + tuples of arrays or tuples of numbers ([#26997], [#27159], [#27212]). * `schur` methods now return decomposition objects such as `Schur` and - `GeneralizedSchur` rather than tuples of arrays ([#27159]). + `GeneralizedSchur` rather than tuples of arrays ([#26997], [#27159], [#27212]). * `lq` methods now return decomposition objects such as `LQ` - rather than tuples of arrays ([#27159]). + rather than tuples of arrays ([#26997], [#27159], [#27212]). * `qr` methods now return decomposition objects such as `QR`, `QRPivoted`, - and `QRCompactWY` rather than tuples of arrays ([#27159]). + and `QRCompactWY` rather than tuples of arrays ([#26997], [#27159], [#27212]). * `svd` methods now return decomposition objects such as `SVD` and - `GeneralizedSVD` rather than tuples of arrays or tuples of numbers ([#27159]). + `GeneralizedSVD` rather than tuples of arrays or tuples of numbers ([#26997], [#27159], [#27212]). * `countlines` now always counts the last non-empty line even if it does not end with EOL, matching the behavior of `eachline` and `readlines` ([#25845]). @@ -706,12 +706,12 @@ Deprecated or removed * `lufact`, `schurfact`, `lqfact`, `qrfact`, `ldltfact`, `svdfact`, `bkfact`, `hessfact`, `eigfact`, and `cholfact` have respectively been deprecated to `lu`, `schur`, `lq`, `qr`, `ldlt`, `svd`, `bunchkaufman`, - `hessenberg`, `eigen`, and `cholesky` ([#27159]). + `hessenberg`, `eigen`, and `cholesky` ([#26997], [#27159], [#27212]). * `lufact!`, `schurfact!`, `lqfact!`, `qrfact!`, `ldltfact!`, `svdfact!`, `bkfact!`, `hessfact!`, and `eigfact!` have respectively been deprecated to `lu!`, `schur!`, `lq!`, `qr!`, `ldlt!`, `svd!`, `bunchkaufman!`, - `hessenberg!`, and `eigen!` ([#27159]). + `hessenberg!`, and `eigen!` ([#26997], [#27159], [#27212]). * `eig(A[, args...])` has been deprecated in favor of `eigen(A[, args...])`. Whereas the former returns a tuple of arrays, the latter returns an `Eigen` object.