Skip to content

Commit

Permalink
Merge pull request JuliaLang#9 from JuliaLang/master
Browse files Browse the repository at this point in the history
update to 05c323d [ci skip]
  • Loading branch information
tkelman committed Mar 6, 2014
2 parents 45f02b6 + 05c323d commit a320bad
Show file tree
Hide file tree
Showing 28 changed files with 234 additions and 112 deletions.
12 changes: 11 additions & 1 deletion NEWS.md
Original file line number Diff line number Diff line change
Expand Up @@ -143,14 +143,23 @@ Library improvements

* `condskeel` for Skeel condition numbers ([#5726]).

* norm(Matrix) no longer calculates vector norm when first dimension is one ([#5545]).
* `norm(::Matrix)` no longer calculates a vector norm when the first
dimension is one ([#5545]); it always uses the operator (induced)
matrix norm.

* New `vecnorm(itr, p=2)` function that computes the norm of
any iterable collection of numbers as if it were a vector of
the same length. This generalizes and replaces `normfro` ([#6057]),
and `norm` is now type-stable ([#6056]).

* Sparse linear algebra

* Faster sparse `kron` ([#4958]).

* `sparse(A) \ B` now supports a matrix `B` of right-hand sides ([#5196]).

* `eigs(A, sigma)` now uses shift-and-invert for nonzero shifts `sigma` and inverse iteration for `which="SM"`. If `sigma==nothing` (the new default), computes ordinary (forward) iterations. ([#5776])

* Dense linear algebra for special matrix types

* Interconversions between the special matrix types `Diagonal`, `Bidiagonal`,
Expand Down Expand Up @@ -283,6 +292,7 @@ Deprecated or removed
[#5427]: https://github.com/JuliaLang/julia/pull/5427
[#5748]: https://github.com/JuliaLang/julia/issues/5748
[#5511]: https://github.com/JuliaLang/julia/issues/5511
[#5776]: https://github.com/JuliaLang/julia/issues/5776
[#5819]: https://github.com/JuliaLang/julia/issues/5819
[#4871]: https://github.com/JuliaLang/julia/issues/4871
[#4996]: https://github.com/JuliaLang/julia/issues/4996
Expand Down
1 change: 1 addition & 0 deletions base/deprecated.jl
Original file line number Diff line number Diff line change
Expand Up @@ -400,6 +400,7 @@ eval(Sys, :(@deprecate shlib_list dllist))
IntSet(xs::Integer...) = (s=IntSet(); for a in xs; push!(s,a); end; s)
Set{T<:Number}(xs::T...) = Set{T}(xs)

@deprecate normfro(A) vecnorm(A)

# 0.3 discontinued functions

Expand Down
16 changes: 0 additions & 16 deletions base/dict.jl
Original file line number Diff line number Diff line change
Expand Up @@ -288,22 +288,6 @@ hash(a::AbstractArray{Bool}) = hash(bitpack(a))

hash(x::ANY) = object_id(x)

if WORD_SIZE == 64
hash(s::ByteString) =
ccall(:memhash, Uint64, (Ptr{Void}, Int), s.data, length(s.data))
hash(s::ByteString, seed::Union(Int,Uint)) =
ccall(:memhash_seed, Uint64, (Ptr{Void}, Int, Uint32),
s.data, length(s.data), uint32(seed))
else
hash(s::ByteString) =
ccall(:memhash32, Uint32, (Ptr{Void}, Int), s.data, length(s.data))
hash(s::ByteString, seed::Union(Int,Uint)) =
ccall(:memhash32_seed, Uint32, (Ptr{Void}, Int, Uint32),
s.data, length(s.data), uint32(seed))
end

hash(s::String) = hash(bytestring(s))

hash(x::Expr) = bitmix(hash(x.head),hash(x.args)+43)


Expand Down
2 changes: 1 addition & 1 deletion base/exports.jl
Original file line number Diff line number Diff line change
Expand Up @@ -627,7 +627,6 @@ export
lufact!,
lufact,
norm,
normfro,
null,
peakflops,
pinv,
Expand Down Expand Up @@ -656,6 +655,7 @@ export
tril,
triu!,
triu,
vecnorm,

# sparse
etree,
Expand Down
2 changes: 1 addition & 1 deletion base/linalg.jl
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,6 @@ export
lufact,
lufact!,
norm,
normfro,
null,
peakflops,
pinv,
Expand Down Expand Up @@ -114,6 +113,7 @@ export
triu,
tril!,
triu!,
vecnorm,

# Operators
\,
Expand Down
10 changes: 6 additions & 4 deletions base/linalg/arnoldi.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ using .ARPACK
## eigs

function eigs(A;nev::Integer=6, ncv::Integer=20, which::ASCIIString="LM",
tol=0.0, maxiter::Integer=1000, sigma=0,v0::Vector=zeros((0,)),
tol=0.0, maxiter::Integer=1000, sigma=nothing, v0::Vector=zeros((0,)),
ritzvec::Bool=true, complexOP::Bool=false)
n = chksquare(A)
(n <= 6) && (nev = min(n-1, nev))
Expand All @@ -13,18 +13,20 @@ function eigs(A;nev::Integer=6, ncv::Integer=20, which::ASCIIString="LM",
T = eltype(A)
cmplx = T<:Complex
bmat = "I"

if !isempty(v0)
length(v0)==n || throw(DimensionMismatch(""))
eltype(v0)==T || error("Starting vector must have eltype $T")
else
v0=zeros(T,(0,))
end

if sigma == 0
if sigma===nothing # normal iteration
mode = 1
sigma = 0
linop(x) = A * x
else
C = lufact(A - sigma*eye(A))
else # inverse iteration with level shift
C = lufact(sigma==0 ? A : A - sigma*eye(A))
if cmplx
mode = 3
linop(x) = C\x
Expand Down
2 changes: 2 additions & 0 deletions base/linalg/blas.jl
Original file line number Diff line number Diff line change
Expand Up @@ -169,6 +169,7 @@ for (fname, elty, ret_type) in ((:dnrm2_,:Float64,:Float64),
end
end
nrm2(x::StridedVector) = nrm2(length(x), x, stride(x,1))
nrm2(x::Array) = nrm2(length(x), pointer(x), 1)

## asum
for (fname, elty, ret_type) in ((:dasum_,:Float64,:Float64),
Expand All @@ -185,6 +186,7 @@ for (fname, elty, ret_type) in ((:dasum_,:Float64,:Float64),
end
end
asum(x::StridedVector) = asum(length(x), x, stride(x,1))
asum(x::Array) = asum(length(x), pointer(x), 1)

## axpy
for (fname, elty) in ((:daxpy_,:Float64),
Expand Down
2 changes: 1 addition & 1 deletion base/linalg/cholmod.jl
Original file line number Diff line number Diff line change
Expand Up @@ -386,7 +386,7 @@ function copy{Tv<:CHMVTypes}(B::CholmodDense{Tv})
(Ptr{c_CholmodDense{Tv}},Ptr{Uint8}), &B.c, cmn(Int32)))
end

function norm{Tv<:CHMVTypes}(D::CholmodDense{Tv},p::Number=1)
function norm{Tv<:CHMVTypes}(D::CholmodDense{Tv},p::Real=1)
ccall((:cholmod_norm_dense, :libcholmod), Float64,
(Ptr{c_CholmodDense{Tv}}, Cint, Ptr{Uint8}),
&D.c, p == 1 ? 1 :(p == Inf ? 1 : throw(ArgumentError("p must be 1 or Inf"))),cmn(Int32))
Expand Down
4 changes: 2 additions & 2 deletions base/linalg/dense.jl
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,11 @@ function norm{T<:BlasFloat, TI<:Integer}(x::StridedVector{T}, rx::Union(Range1{T
BLAS.nrm2(length(rx), pointer(x)+(first(rx)-1)*sizeof(T), step(rx))
end

function norm{T<:BlasFloat}(x::StridedVector{T}, p::Number=2)
function vecnorm{T<:BlasFloat}(x::Union(Array{T},StridedVector{T}), p::Real=2)
length(x) == 0 && return zero(T)
p == 1 && T <: Real && return BLAS.asum(x)
p == 2 && return BLAS.nrm2(x)
invoke(norm, (AbstractVector, Number), x, p)
invoke(vecnorm, (Any, Real), x, p)
end

function triu!{T}(M::Matrix{T}, k::Integer)
Expand Down
126 changes: 83 additions & 43 deletions base/linalg/generic.jl
Original file line number Diff line number Diff line change
Expand Up @@ -51,54 +51,97 @@ diag(A::AbstractVector) = error("use diagm instead of diag to construct a diagon

#diagm{T}(v::AbstractVecOrMat{T})

function normMinusInf(x::AbstractVector)
n = length(x)
n > 0 || return real(zero(eltype(x)))
a = abs(x[1])
@inbounds for i = 2:n
a = min(a, abs(x[i]))
# special cases of vecnorm; note that they don't need to handle isempty(x)
function vecnormMinusInf(x)
s = start(x)
(v, s) = next(x, s)
minabs = abs(v)
while !done(x, s)
(v, s) = next(x, s)
minabs = Base.scalarmin(minabs, abs(v))
end
return a
return float(minabs)
end
function normInf(x::AbstractVector)
a = real(zero(eltype(x)))
@inbounds for i = 1:length(x)
a = max(a, abs(x[i]))
function vecnormInf(x)
s = start(x)
(v, s) = next(x, s)
maxabs = abs(v)
while !done(x, s)
(v, s) = next(x, s)
maxabs = Base.scalarmax(maxabs, abs(v))
end
return a
return float(maxabs)
end
function norm1(x::AbstractVector)
a = real(zero(eltype(x)))
@inbounds for i = 1:length(x)
a += abs(x[i])
function vecnorm1(x)
s = start(x)
(v, s) = next(x, s)
av = float(abs(v))
T = typeof(av)
sum::promote_type(Float64, T) = av
while !done(x, s)
(v, s) = next(x, s)
sum += abs(v)
end
return a
return convert(T, sum)
end
function norm2(x::AbstractVector)
nrmInfInv = inv(norm(x,Inf))
isinf(nrmInfInv) && return zero(nrmInfInv)
a = abs2(x[1]*nrmInfInv)
@inbounds for i = 2:length(x)
a += abs2(x[i]*nrmInfInv)
function vecnorm2(x)
maxabs = vecnormInf(x)
maxabs == 0 && return maxabs
s = start(x)
(v, s) = next(x, s)
T = typeof(maxabs)
scale::promote_type(Float64, T) = 1/maxabs
y = abs(v)*scale
sum::promote_type(Float64, T) = y*y
while !done(x, s)
(v, s) = next(x, s)
y = abs(v)*scale
sum += y*y
end
return sqrt(a)/nrmInfInv
return convert(T, maxabs * sqrt(sum))
end
function normp(x::AbstractVector,p::Number)
absx = convert(Vector{typeof(sqrt(abs(x[1])))}, abs(x))
dx = maximum(absx)
dx == 0 && return zero(typeof(absx))
scale!(absx, 1/dx)
pp = convert(eltype(absx), p)
return dx*sum(absx.^pp)^inv(pp)
function vecnormp(x, p)
if p > 1 || p < 0 # need to rescale to avoid overflow/underflow
maxabs = vecnormInf(x)
maxabs == 0 && return maxabs
s = start(x)
(v, s) = next(x, s)
T = typeof(maxabs)
spp::promote_type(Float64, T) = p
scale::promote_type(Float64, T) = 1/maxabs
ssum::promote_type(Float64, T) = (abs(v)*scale)^spp
while !done(x, s)
(v, s) = next(x, s)
ssum += (abs(v)*scale)^spp
end
return convert(T, maxabs * ssum^inv(spp))
else # 0 < p < 1, no need for rescaling (but technically not a true norm)
s = start(x)
(v, s) = next(x, s)
av = float(abs(v))
T = typeof(av)
pp::promote_type(Float64, T) = p
sum::promote_type(Float64, T) = av^pp
while !done(x, s)
(v, s) = next(x, s)
sum += abs(v)^pp
end
return convert(T, sum^inv(pp))
end
end
function norm(x::AbstractVector, p::Number=2)
p == 0 && return countnz(x)
p == Inf && return normInf(x)
p == -Inf && return normMinusInf(x)
p == 1 && return norm1(x)
p == 2 && return norm2(x)
normp(x,p)
function vecnorm(itr, p::Real=2)
isempty(itr) && return float(real(zero(eltype(itr))))
p == 2 && return vecnorm2(itr)
p == 1 && return vecnorm1(itr)
p == Inf && return vecnormInf(itr)
p == 0 && return convert(typeof(float(real(zero(eltype(itr))))),
countnz(itr))
p == -Inf && return vecnormMinusInf(itr)
vecnormp(itr,p)
end
vecnorm(x::Number, p::Real=2) = p == 0 ? real(x==0 ? zero(x) : one(x)) : abs(x)

norm(x::AbstractVector, p::Real=2) = vecnorm(x, p)

function norm1{T}(A::AbstractMatrix{T})
m,n = size(A)
Expand Down Expand Up @@ -133,9 +176,9 @@ function normInf{T}(A::AbstractMatrix{T})
end
return nrm
end
function norm{T}(A::AbstractMatrix{T}, p::Number=2)
p == 1 && return norm1(A)
function norm{T}(A::AbstractMatrix{T}, p::Real=2)
p == 2 && return norm2(A)
p == 1 && return norm1(A)
p == Inf && return normInf(A)
throw(ArgumentError("invalid p-norm p=$p. Valid: 1, 2, Inf"))
end
Expand All @@ -146,9 +189,6 @@ function norm(x::Number, p=2)
float(abs(x))
end

normfro(A::AbstractMatrix) = norm(reshape(A, length(A)))
normfro(x::Number) = abs(x)

rank(A::AbstractMatrix, tol::Real) = sum(svdvals(A) .> tol)
function rank(A::AbstractMatrix)
m,n = size(A)
Expand Down
4 changes: 2 additions & 2 deletions base/linalg/sparse.jl
Original file line number Diff line number Diff line change
Expand Up @@ -457,9 +457,9 @@ end
diff(a::SparseMatrixCSC, dim::Integer)= dim==1 ? sparse_diff1(a) : sparse_diff2(a)

## norm and rank
normfro(A::SparseMatrixCSC) = norm(A.nzval,2)
vecnorm(A::SparseMatrixCSC, p::Real=2) = vecnorm(A.nzval, p)

function norm(A::SparseMatrixCSC,p::Number=1)
function norm(A::SparseMatrixCSC,p::Real=1)
m, n = size(A)
if m == 0 || n == 0 || isempty(A)
return real(zero(eltype(A)))
Expand Down
2 changes: 1 addition & 1 deletion base/pkg/entry.jl
Original file line number Diff line number Diff line change
Expand Up @@ -292,7 +292,7 @@ function pull_request(dir::String, commit::String="", url::String="")
branch = "pull-request/$(commit[1:8])"
info("Pushing changes as branch $branch")
Git.run(`push -q $fork $commit:refs/heads/$branch`, dir=dir)
pr_url = "$(response["html_url"])/compare/$branch?expand=1"
pr_url = "$(response["html_url"])/compare/$branch"
@osx? run(`open $pr_url`) : info("To create a pull-request open:\n\n $pr_url\n")
end

Expand Down
8 changes: 2 additions & 6 deletions base/quadgk.jl
Original file line number Diff line number Diff line change
Expand Up @@ -42,10 +42,6 @@ immutable Segment
end
isless(i::Segment, j::Segment) = isless(i.E, j.E)

# use norm(A,1) for matrices since it is cheaper than norm(A) = norm(A,2),
# but only assume that norm(x) exists for more general vector spaces
cheapnorm(A::AbstractMatrix) = norm(A,1)
cheapnorm(x) = norm(x)

# Internal routine: approximately integrate f(x) over the interval (a,b)
# by evaluating the integration rule (x,w,gw). Return a Segment.
Expand Down Expand Up @@ -156,14 +152,14 @@ end

function quadgk{T<:FloatingPoint}(f, a::T,b::T,c::T...;
abstol=zero(T), reltol=sqrt(eps(T)),
maxevals=10^7, order=7, norm=cheapnorm)
maxevals=10^7, order=7, norm=vecnorm)
do_quadgk(f, [a, b, c...], order, T, abstol, reltol, maxevals, norm)
end

function quadgk{T<:FloatingPoint}(f, a::Complex{T},
b::Complex{T},c::Complex{T}...;
abstol=zero(T), reltol=sqrt(eps(T)),
maxevals=10^7, order=7, norm=cheapnorm)
maxevals=10^7, order=7, norm=vecnorm)
do_quadgk(f, [a, b, c...], order, T, abstol, reltol, maxevals, norm)
end

Expand Down
5 changes: 2 additions & 3 deletions base/random.jl
Original file line number Diff line number Diff line change
Expand Up @@ -83,11 +83,10 @@ function make_seed(n::Integer)
seed = Uint32[]
while true
push!(seed, n & 0xffffffff)
n2 = n >> 32
if n2 == 0 || n2 == n
n >>= 32
if n == 0
return seed
end
n = n2
end
end

Expand Down
Loading

0 comments on commit a320bad

Please sign in to comment.