diff --git a/benchmark/core.jl b/benchmark/core.jl index c05646d04..a0e9e229a 100644 --- a/benchmark/core.jl +++ b/benchmark/core.jl @@ -7,7 +7,7 @@ function bench_iteredges(g::AbstractGraph) end function bench_has_edge(g::AbstractGraph) - srand(1) + Random.srand(1) nvg = nv(g) srcs = rand([1:nvg;], cld(nvg, 4)) dsts = rand([1:nvg;], cld(nvg, 4)) diff --git a/src/LightGraphs.jl b/src/LightGraphs.jl index 71cb134d8..6d8f4a03d 100644 --- a/src/LightGraphs.jl +++ b/src/LightGraphs.jl @@ -1,25 +1,25 @@ __precompile__(true) module LightGraphs +using SimpleTraits + import CodecZlib import DataStructures - -using SimpleTraits -using SharedArrays -using SparseArrays -using LinearAlgebra -using IterativeEigensolvers -using SharedArrays -using Markdown -using DelimitedFiles +import SharedArrays +import SparseArrays +import LinearAlgebra +import IterativeEigensolvers +import SharedArrays +import Markdown +import DelimitedFiles import Base: write, ==, <, *, ≈, convert, isless, issubset, union, intersect, reverse, reverse!, isassigned, getindex, setindex!, show, print, copy, in, sum, size, eltype, length, ndims, transpose, ctranspose, join, start, next, done, eltype, get, Pair, Tuple, zero -import Random: GLOBAL_RNG +import Random import Distributed: @distributed, @sync import SparseArrays: sparse, blockdiag -import LinearAlgebra: issymmetric, mul! +import LinearAlgebra: issymmetric, mul!, Diagonal export # Interface diff --git a/src/SimpleGraphs/simpledigraph.jl b/src/SimpleGraphs/simpledigraph.jl index 1d6767997..159a9ed61 100644 --- a/src/SimpleGraphs/simpledigraph.jl +++ b/src/SimpleGraphs/simpledigraph.jl @@ -31,7 +31,7 @@ SimpleDiGraph(n::T) where T<:Integer = SimpleDiGraph{T}(n) SimpleDiGraph(::Type{T}) where T<:Integer = SimpleDiGraph{T}(zero(T)) # sparse adjacency matrix constructor: SimpleDiGraph(adjmx) -function SimpleDiGraph{T}(adjmx::SparseMatrixCSC{U}) where T<:Integer where U<:Real +function SimpleDiGraph{T}(adjmx::SparseArrays.SparseMatrixCSC{U}) where T<:Integer where U<:Real dima, dimb = size(adjmx) isequal(dima, dimb) || throw(ArgumentError("Adjacency / distance matrices must be square")) diff --git a/src/centrality/closeness.jl b/src/centrality/closeness.jl index d4ea76cea..bdaa3c7d8 100644 --- a/src/centrality/closeness.jl +++ b/src/centrality/closeness.jl @@ -44,7 +44,7 @@ function parallel_closeness_centrality( n_v = Int(nv(g)) - closeness = SharedVector{Float64}(n_v) + closeness = SharedArrays.SharedVector{Float64}(n_v) @sync @distributed for u in vertices(g) if degree(g, u) == 0 # no need to do Dijkstra here @@ -63,5 +63,5 @@ function parallel_closeness_centrality( end end end - return sdata(closeness) + return SharedArrays.sdata(closeness) end diff --git a/src/centrality/eigenvector.jl b/src/centrality/eigenvector.jl index 191734450..936a50c03 100644 --- a/src/centrality/eigenvector.jl +++ b/src/centrality/eigenvector.jl @@ -24,4 +24,4 @@ eigenvector of the adjacency matrix \$\\mathbf{A}\$. - Mark E. J. Newman: Networks: An Introduction. Oxford University Press, USA, 2010, pp. 169. """ -eigenvector_centrality(g::AbstractGraph) = abs.(vec(eigs(adjacency_matrix(g), nev=1)[2]))::Vector{Float64} +eigenvector_centrality(g::AbstractGraph) = abs.(vec(IterativeEigensolvers.eigs(adjacency_matrix(g), nev=1)[2]))::Vector{Float64} diff --git a/src/centrality/katz.jl b/src/centrality/katz.jl index 37bd15e23..11d9cc4f4 100644 --- a/src/centrality/katz.jl +++ b/src/centrality/katz.jl @@ -30,9 +30,9 @@ the centrality calculated for each node in `g`. function katz_centrality(g::AbstractGraph, α::Real=0.3) nvg = nv(g) v = ones(Float64, nvg) - spI = sparse(one(Float64) * I, nvg, nvg) + spI = sparse(one(Float64) * LinearAlgebra.I, nvg, nvg) A = adjacency_matrix(g, Bool; dir=:in) v = (spI - α * A) \ v - v /= norm(v) + v /= LinearAlgebra.norm(v) return v end diff --git a/src/centrality/radiality.jl b/src/centrality/radiality.jl index f381ca370..8cc4c775e 100644 --- a/src/centrality/radiality.jl +++ b/src/centrality/radiality.jl @@ -36,8 +36,8 @@ function parallel_radiality_centrality(g::AbstractGraph)::Vector{Float64} n_v = nv(g) vs = vertices(g) n = ne(g) - meandists = SharedVector{Float64}(Int(n_v)) - maxdists = SharedVector{Float64}(Int(n_v)) + meandists = SharedArrays.SharedVector{Float64}(Int(n_v)) + maxdists = SharedArrays.SharedVector{Float64}(Int(n_v)) @sync @distributed for i = 1:n_v d = dijkstra_shortest_paths(g, vs[i]) diff --git a/src/distance.jl b/src/distance.jl index 39b764b0e..ecf06689f 100644 --- a/src/distance.jl +++ b/src/distance.jl @@ -77,11 +77,11 @@ function parallel_eccentricity( distmx::AbstractMatrix{T} = weights(g) ) where T <: Real vlen = length(vs) - eccs = SharedVector{T}(vlen) + eccs = SharedArrays.SharedVector{T}(vlen) @sync @distributed for i = 1:vlen eccs[i] = maximum(dijkstra_shortest_paths(g, vs[i], distmx).dists) end - d = sdata(eccs) + d = SharedArrays.sdata(eccs) maximum(d) == typemax(T) && warn("Infinite path length detected") return d end diff --git a/src/edit_distance.jl b/src/edit_distance.jl index fb774014e..4bd684185 100644 --- a/src/edit_distance.jl +++ b/src/edit_distance.jl @@ -119,7 +119,7 @@ vertex v ∈ G₂. `p=1`: the p value for p-norm calculation. """ function MinkowskiCost(μ₁::AbstractVector, μ₂::AbstractVector; p::Real=1) - (u, v) -> norm(μ₁[u] - μ₂[v], p) + (u, v) -> LinearAlgebra.norm(μ₁[u] - μ₂[v], p) end """ @@ -132,5 +132,5 @@ Return value similar to `MinkowskiCost`, but ensure costs smaller than 2τ. `τ=1`: value specifying half of the upper limit of the Minkowski cost. """ function BoundedMinkowskiCost(μ₁::AbstractVector, μ₂::AbstractVector; p::Real=1, τ::Real=1) - (u, v) -> 1 / (1 / (2τ) + exp(-norm(μ₁[u] - μ₂[v], p))) + (u, v) -> 1 / (1 / (2τ) + exp(-LinearAlgebra.norm(μ₁[u] - μ₂[v], p))) end diff --git a/src/generators/euclideangraphs.jl b/src/generators/euclideangraphs.jl index 67df4d224..36f8ef856 100644 --- a/src/generators/euclideangraphs.jl +++ b/src/generators/euclideangraphs.jl @@ -8,7 +8,7 @@ a matrix with the points' positions. function euclidean_graph(N::Int, d::Int; L=1., seed = -1, kws...) rng = LightGraphs.getRNG(seed) - points = rmul!(rand(rng, d, N), L) + points = LinearAlgebra.rmul!(rand(rng, d, N), L) return (euclidean_graph(points; L=L, kws...)..., points) end @@ -49,7 +49,7 @@ function euclidean_graph(points::Matrix; else throw(ArgumentError("$bc is not a valid boundary condition")) end - dist = norm(Δ, p) + dist = LinearAlgebra.norm(Δ, p) if dist < cutoff e = Edge(i, j) add_edge!(g, e) diff --git a/src/generators/randgraphs.jl b/src/generators/randgraphs.jl index b748b914b..a15d46a4d 100644 --- a/src/generators/randgraphs.jl +++ b/src/generators/randgraphs.jl @@ -195,9 +195,9 @@ function _suitable(edges::Set{Edge}, potential_edges::Dict{T,T}) where T<:Intege return false end -_try_creation(n::Integer, k::Integer, rng::AbstractRNG) = _try_creation(n, fill(k, n), rng) +_try_creation(n::Integer, k::Integer, rng::Random.AbstractRNG) = _try_creation(n, fill(k, n), rng) -function _try_creation(n::T, k::Vector{T}, rng::AbstractRNG) where T<:Integer +function _try_creation(n::T, k::Vector{T}, rng::Random.AbstractRNG) where T<:Integer edges = Set{Edge}() m = 0 stubs = zeros(T, sum(k)) @@ -211,7 +211,7 @@ function _try_creation(n::T, k::Vector{T}, rng::AbstractRNG) where T<:Integer while !isempty(stubs) potential_edges = Dict{T,T}() - shuffle!(rng, stubs) + Random.shuffle!(rng, stubs) for i in 1:2:length(stubs) s1, s2 = stubs[i:(i + 1)] if (s1 > s2) @@ -298,7 +298,7 @@ function barabasi_albert!(g::AbstractGraph, n::Integer, k::Integer; seed::Int=-1 n0 == n && return g # seed random number generator - seed > 0 && srand(seed) + seed > 0 && Random.srand(seed) # add missing vertices sizehint!(g.fadjlist, n) @@ -503,7 +503,7 @@ function static_scale_free(n::Integer, m::Integer, α_out::Real, α_in::Float64; fitness_out = _construct_fitness(n, α_out, finite_size_correction) fitness_in = _construct_fitness(n, α_in, finite_size_correction) # eliminate correlation - shuffle!(fitness_in) + Random.shuffle!(fitness_in) static_fitness_model(m, fitness_out, fitness_in, seed=seed) end @@ -755,7 +755,7 @@ mutable struct StochasticBlockModel{T<:Integer,P<:Real} n::T nodemap::Array{T} affinities::Matrix{P} - rng::MersenneTwister + rng::Random.MersenneTwister end ==(sbm::StochasticBlockModel, other::StochasticBlockModel) = @@ -789,7 +789,7 @@ and external probabilities `externalp`. function sbmaffinity(internalp::Vector{T}, externalp::Real, sizes::Vector{U}) where T<:Real where U<:Integer numblocks = length(sizes) numblocks == length(internalp) || throw(ArgumentError("Inconsistent input dimensions: internalp, sizes")) - B = diagm(0=>internalp) + externalp * (ones(numblocks, numblocks) - I) + B = LinearAlgebra.diagm(0=>internalp) + externalp * (ones(numblocks, numblocks) - LinearAlgebra.I) return B end @@ -810,7 +810,7 @@ function StochasticBlockModel(internalp::Vector{T}, externalp::Real, end -const biclique = ones(2, 2) - eye(2) +const biclique = ones(2, 2) - Matrix{Float64}(LinearAlgebra.I, 2, 2) #TODO: this documentation needs work. sbromberger 20170326 """ @@ -826,7 +826,7 @@ The blocks are connected with probability `between`. """ function nearbipartiteaffinity(sizes::Vector{T}, between::Real, intra::Real) where T<:Integer numblocks = div(length(sizes), 2) - return kron(between * eye(numblocks), biclique) + eye(2numblocks) * intra + return kron(between * Matrix{Float64}(LinearAlgebra.I, numblocks, numblocks), biclique) + Matrix{Float64}(LinearAlgebra.I, 2*numblocks, 2*numblocks) * intra end #Return a generator for edges from a stochastic block model near-bipartite graph. @@ -841,7 +841,7 @@ nearbipartiteSBM(sizes, between, inter, noise; seed::Int = -1) = Generate a stream of random pairs in `1:n` using random number generator `RNG`. """ -function random_pair(rng::AbstractRNG, n::Integer) +function random_pair(rng::Random.AbstractRNG, n::Integer) f(ch) = begin while true put!(ch, Edge(rand(rng, 1:n), rand(rng, 1:n))) @@ -938,10 +938,10 @@ function kronecker(SCALE, edgefactor, A=0.57, B=0.19, C=0.19) ij .+= 2^(ib - 1) .* (hcat(ii_bit, jj_bit)) end - p = randperm(N) + p = Random.randperm(N) ij = p[ij] - p = randperm(M) + p = Random.randperm(M) ij = ij[p, :] g = SimpleDiGraph(N) diff --git a/src/graphcut/normalized_cut.jl b/src/graphcut/normalized_cut.jl index 8aa8b7328..8e9658be7 100644 --- a/src/graphcut/normalized_cut.jl +++ b/src/graphcut/normalized_cut.jl @@ -12,13 +12,13 @@ function _normalized_cut_cost(cut, W::AbstractMatrix, D) return cut_cost/sum(D*cut) + cut_cost/sum(D*(.~cut)) end -function _normalized_cut_cost(cut, W::SparseMatrixCSC, D) +function _normalized_cut_cost(cut, W::SparseArrays.SparseMatrixCSC, D) cut_cost = 0 - rows = rowvals(W) - vals = nonzeros(W) + rows = SparseArrays.rowvals(W) + vals = SparseArrays.nonzeros(W) n = size(W, 2) for i = 1:n - for j in nzrange(W, i) + for j in SparseArrays.nzrange(W, i) row = rows[j] if cut[i] != cut[row] cut_cost += vals[j]/2 @@ -65,7 +65,7 @@ function _partition_weightmx(cut, W::AbstractMatrix) return (W1, W2, vmap1, vmap2) end -function _partition_weightmx(cut, W::SparseMatrixCSC) +function _partition_weightmx(cut, W::SparseArrays.SparseMatrixCSC) nv = length(cut) nv2 = sum(cut) nv1 = nv - nv2 @@ -86,13 +86,13 @@ function _partition_weightmx(cut, W::SparseMatrixCSC) end end - rows = rowvals(W) - vals = nonzeros(W) + rows = SparseArrays.rowvals(W) + vals = SparseArrays.nonzeros(W) I1 = Vector{Int}(); I2 = Vector{Int}() J1 = Vector{Int}(); J2 = Vector{Int}() V1 = Vector{Float64}(); V2 = Vector{Float64}() for i = 1:nv - for j in nzrange(W, i) + for j in SparseArrays.nzrange(W, i) row = rows[j] if cut[i] == cut[row] == false push!(I1, newvid[i]) @@ -112,18 +112,18 @@ end function _recursive_normalized_cut(W, thres=thres, num_cuts=num_cuts) m, n = size(W) - D = Diagonal(vec(sum(W, dims=2))) + D = LinearAlgebra.Diagonal(vec(sum(W, dims=2))) m == 1 && return [1] #get eigenvector corresponding to second smallest eigenvalue - # v = eigs(D-W, D, nev=2, which=:SR)[2][:,2] + # v = IterativeEigensolvers.eigs(D-W, D, nev=2, which=:SR)[2][:,2] # At least some versions of ARPACK have a bug, this is a workaround invDroot = sqrt.(inv(D)) # equal to Cholesky factorization for diagonal D if n > 10 - ret = eigs(invDroot'*(D-W)*invDroot, nev=2, which=:SR)[2][:,2] + ret = IterativeEigensolvers.eigs(invDroot'*(D-W)*invDroot, nev=2, which=:SR)[2][:,2] else - ret = eigfact(Matrix(invDroot'*(D-W)*invDroot)).vectors[:,2] + ret = LinearAlgebra.eigen(Matrix(invDroot'*(D-W)*invDroot)).vectors[:,2] end v = invDroot*ret diff --git a/src/linalg/LinAlg.jl b/src/linalg/LinAlg.jl index 63433333e..bb2238fdb 100644 --- a/src/linalg/LinAlg.jl +++ b/src/linalg/LinAlg.jl @@ -1,9 +1,9 @@ module LinAlg using SimpleTraits -using SparseArrays -using LinearAlgebra -using IterativeEigensolvers +import SparseArrays +import LinearAlgebra +import IterativeEigensolvers using ..LightGraphs import LightGraphs: IsDirected, AbstractGraph, inneighbors, diff --git a/src/linalg/graphmatrices.jl b/src/linalg/graphmatrices.jl index 17ef7247e..a84efd0c7 100644 --- a/src/linalg/graphmatrices.jl +++ b/src/linalg/graphmatrices.jl @@ -1,4 +1,4 @@ -const SparseMatrix{T} = SparseMatrixCSC{T,Int64} +const SparseMatrix{T} = SparseArrays.SparseMatrixCSC{T,Int64} """ GraphMatrix{T} @@ -85,14 +85,14 @@ function AveragingAdjacency(adjmat::CombinatorialAdjacency) return AveragingAdjacency(adjmat, sf) end -perron(adjmat::NormalizedAdjacency) = sqrt.(adjmat.A.D) / norm(sqrt.(adjmat.A.D)) +perron(adjmat::NormalizedAdjacency) = sqrt.(adjmat.A.D) / LinearAlgebra.norm(sqrt.(adjmat.A.D)) struct PunchedAdjacency{T} <: Adjacency{T} A::NormalizedAdjacency{T} perron::Vector{T} end function PunchedAdjacency(adjmat::CombinatorialAdjacency) - perron = sqrt.(adjmat.D) / norm(sqrt.(adjmat.D)) + perron = sqrt.(adjmat.D) / LinearAlgebra.norm(sqrt.(adjmat.D)) return PunchedAdjacency(NormalizedAdjacency(adjmat), perron) end @@ -111,7 +111,7 @@ struct Noop end Base.broadcast(::typeof(*), ::Noop, x) = x -Diagonal(::Noop) = Noop() +LinearAlgebra.Diagonal(::Noop) = Noop() ==(g::GraphMatrix, h::GraphMatrix) = typeof(g) == typeof(h) && (g.A == h.A) @@ -197,7 +197,7 @@ convert(::Type{CombinatorialAdjacency}, adjmat::CombinatorialAdjacency) = adjmat function sparse(lapl::M) where M<:Laplacian adjmat = adjacency(lapl) A = sparse(adjmat) - L = sparse(Diagonal(diag(lapl))) - A + L = sparse(LinearAlgebra.Diagonal(diag(lapl))) - A return L end @@ -207,7 +207,7 @@ end function sparse(adjmat::Adjacency) A = sparse(adjmat.A) - return Diagonal(prescalefactor(adjmat)) * (A * Diagonal(postscalefactor(adjmat))) + return LinearAlgebra.Diagonal(prescalefactor(adjmat)) * (A * Diagonal(postscalefactor(adjmat))) end @@ -215,7 +215,7 @@ end function convert(::Type{SparseMatrix{T}}, lapl::Laplacian{T}) where T adjmat = adjacency(lapl) A = convert(SparseMatrix{T}, adjmat) - L = sparse(Diagonal(diag(lapl))) - A + L = sparse(LinearAlgebra.Diagonal(diag(lapl))) - A return L end @@ -236,7 +236,7 @@ diag(lapl::Laplacian) = ones(size(lapl)[2]) function *(adjmat::PunchedAdjacency{T}, x::AbstractVector{T}) where T<:Number y = adjmat.A * x - return y - dot(adjmat.perron, y) * adjmat.perron + return y - LinearAlgebra.dot(adjmat.perron, y) * adjmat.perron end function mul!(Y, A::Adjacency, B) @@ -245,10 +245,10 @@ function mul!(Y, A::Adjacency, B) # The last call to mul! must be (Y, postscalefactor, tmp) # so we need to write to tmp in the second step must be (tmp, A.A, Y) # and the first step (Y, prescalefactor, B) - tmp1 = Diagonal(prescalefactor(A)) * B + tmp1 = LinearAlgebra.Diagonal(prescalefactor(A)) * B tmp = similar(Y) mul!(tmp, A.A, tmp1) - return mul!(Y, Diagonal(postscalefactor(A)), tmp) + return mul!(Y, LinearAlgebra.Diagonal(postscalefactor(A)), tmp) end mul!(Y, A::CombinatorialAdjacency, B) = mul!(Y, A.A, B) @@ -257,14 +257,14 @@ mul!(Y, A::CombinatorialAdjacency, B) = mul!(Y, A.A, B) # This is true for all Adjacency where the postscalefactor is a Noop # at time of writing this is just StochasticAdjacency and CombinatorialAdjacency function mul!(Y, A::StochasticAdjacency, B) - tmp = Diagonal(prescalefactor(A)) * B + tmp = LinearAlgebra.Diagonal(prescalefactor(A)) * B mul!(Y, A.A, tmp) return Y end function mul!(Y, adjmat::PunchedAdjacency, x) y = adjmat.A * x - Y[:] = y - dot(adjmat.perron, y) * adjmat.perron + Y[:] = y - LinearAlgebra.dot(adjmat.perron, y) * adjmat.perron return Y end @@ -290,9 +290,9 @@ function symmetrize(A::SparseMatrix, which=:or) end T = A if which == :triu - T = triu(A) + T = LinearAlgebra.triu(A) elseif which == :tril - T = tril(A) + T = LinearAlgebra.tril(A) elseif which == :sum T = A else diff --git a/src/linalg/spectral.jl b/src/linalg/spectral.jl index 6b8c62b21..fef0713ae 100644 --- a/src/linalg/spectral.jl +++ b/src/linalg/spectral.jl @@ -49,7 +49,7 @@ function _adjacency_matrix(g::AbstractGraph{U}, T::DataType, neighborfn::Functio colpt[j + 1] = colpt[j] + length(dsts) append!(rowval, sort!(dsts)) end - spmx = SparseMatrixCSC(n_v, n_v, colpt, rowval, ones(T, nz)) + spmx = SparseArrays.SparseMatrixCSC(n_v, n_v, colpt, rowval, ones(T, nz)) # this is inefficient. There should be a better way of doing this. # the issue is that adjacency matrix entries for self-loops are 2, @@ -80,7 +80,7 @@ function laplacian_matrix(g::AbstractGraph{U}, T::DataType=Int; dir::Symbol=:uns dir = is_directed(g) ? :both : :out end A = adjacency_matrix(g, T; dir=dir) - D = convert(SparseMatrixCSC{T, U}, Diagonal(sparse(sum(A, dims=2)[:]))) + D = convert(SparseArrays.SparseMatrixCSC{T, U}, LinearAlgebra.Diagonal(sparse(sum(A, dims=2)[:]))) return D - A end @@ -98,10 +98,10 @@ by vertex. Default values for `T` are the same as those in Converts the matrix to dense with ``nv^2`` memory usage. ### Implementation Notes -Use `eigs(laplacian_matrix(g); kwargs...)` to compute some of the +Use `IterativeEigensolvers.eigs(laplacian_matrix(g); kwargs...)` to compute some of the eigenvalues/eigenvectors. """ -laplacian_spectrum(g::AbstractGraph, T::DataType=Int; dir::Symbol=:unspec) = eigvals(Matrix(laplacian_matrix(g, T; dir=dir))) +laplacian_spectrum(g::AbstractGraph, T::DataType=Int; dir::Symbol=:unspec) = LinearAlgebra.eigvals(Matrix(laplacian_matrix(g, T; dir=dir))) """ Return the eigenvalues of the adjacency matrix for a graph `g`, indexed @@ -115,14 +115,14 @@ by vertex. Default values for `T` are the same as those in Converts the matrix to dense with ``nv^2`` memory usage. ### Implementation Notes -Use `eigs(adjacency_matrix(g); kwargs...)` to compute some of the +Use `IterativeEigensolvers.eigs(adjacency_matrix(g); kwargs...)` to compute some of the eigenvalues/eigenvectors. """ function adjacency_spectrum(g::AbstractGraph, T::DataType=Int; dir::Symbol=:unspec) if dir == :unspec dir = is_directed(g) ? :both : :out end - return eigvals(Matrix(adjacency_matrix(g, T; dir=dir))) + return LinearAlgebra.eigvals(Matrix(adjacency_matrix(g, T; dir=dir))) end """ @@ -162,7 +162,7 @@ function incidence_matrix(g::AbstractGraph, T::DataType=Int; oriented=false) end end - spmx = SparseMatrixCSC(n_v, n_e, colpt, rowval, nzval) + spmx = SparseArrays.SparseMatrixCSC(n_v, n_e, colpt, rowval, nzval) return spmx end @@ -183,8 +183,8 @@ function spectral_distance end A₁ = adjacency_matrix(G₁) A₂ = adjacency_matrix(G₂) - λ₁ = k < nv(G₁) - 1 ? eigs(A₁, nev=k, which=:LR)[1] : eigvals(Matrix(A₁))[end:-1:(end - (k - 1))] - λ₂ = k < nv(G₂) - 1 ? eigs(A₂, nev=k, which=:LR)[1] : eigvals(Matrix(A₂))[end:-1:(end - (k - 1))] + λ₁ = k < nv(G₁) - 1 ? IterativeEigensolvers.eigs(A₁, nev=k, which=:LR)[1] : LinearAlgebra.eigvals(Matrix(A₁))[end:-1:(end - (k - 1))] + λ₂ = k < nv(G₂) - 1 ? IterativeEigensolvers.eigs(A₂, nev=k, which=:LR)[1] : LinearAlgebra.eigvals(Matrix(A₂))[end:-1:(end - (k - 1))] return sum(abs, (λ₁ - λ₂)) end diff --git a/src/shortestpaths/dijkstra.jl b/src/shortestpaths/dijkstra.jl index cb09b3ef0..5273f3224 100644 --- a/src/shortestpaths/dijkstra.jl +++ b/src/shortestpaths/dijkstra.jl @@ -138,8 +138,8 @@ function parallel_multisource_dijkstra_shortest_paths( r_v = length(sources) # TODO: remove `Int` once julialang/#23029 / #23032 are resolved - dists = SharedMatrix{T}(Int(r_v), Int(n_v)) - parents = SharedMatrix{U}(Int(r_v), Int(n_v)) + dists = SharedArrays.SharedMatrix{T}(Int(r_v), Int(n_v)) + parents = SharedArrays.SharedMatrix{U}(Int(r_v), Int(n_v)) @sync @distributed for i in 1:r_v state = dijkstra_shortest_paths(g, sources[i], distmx) @@ -147,6 +147,6 @@ function parallel_multisource_dijkstra_shortest_paths( parents[i, :] = state.parents end - result = MultipleDijkstraState(sdata(dists), sdata(parents)) + result = MultipleDijkstraState(SharedArrays.sdata(dists), SharedArrays.sdata(parents)) return result end diff --git a/src/traversals/diffusion.jl b/src/traversals/diffusion.jl index 5dd631d65..de7684daf 100644 --- a/src/traversals/diffusion.jl +++ b/src/traversals/diffusion.jl @@ -54,7 +54,7 @@ function diffusion(g::AbstractGraph{T}, local_p = p end - randsubseq!(randsubseq_buf, outn, local_p) + Random.randsubseq!(randsubseq_buf, outn, local_p) union!(new_infections, randsubseq_buf) end end diff --git a/src/traversals/greedy_color.jl b/src/traversals/greedy_color.jl index b5cdabf1d..2bff13dc3 100644 --- a/src/traversals/greedy_color.jl +++ b/src/traversals/greedy_color.jl @@ -69,7 +69,7 @@ function parallel_random_greedy_color( ) where T<:Integer best = @distributed (best_color) for i in 1:reps - seq = shuffle(vertices(g)) + seq = Random.shuffle(vertices(g)) perm_greedy_color(g, seq) end @@ -87,11 +87,11 @@ function seq_random_greedy_color( reps::Integer ) where T <: Integer - seq = shuffle(vertices(g)) + seq = Random.shuffle(vertices(g)) best = perm_greedy_color(g, seq) for i in 2:reps - shuffle!(seq) + Random.shuffle!(seq) best = best_color(best, perm_greedy_color(g, seq)) end return best diff --git a/src/utils.jl b/src/utils.jl index 6ee94e9af..fa0e33f05 100644 --- a/src/utils.jl +++ b/src/utils.jl @@ -9,7 +9,7 @@ Sample `k` element from array `a` without repetition and eventually excluding el ### Implementation Notes Changes the order of the elements in `a`. For a non-mutating version, see [`sample`](@ref). """ -function sample!(rng::AbstractRNG, a::AbstractVector, k::Integer; exclude = ()) +function sample!(rng::Random.AbstractRNG, a::AbstractVector, k::Integer; exclude = ()) minsize = k + length(exclude) length(a) < minsize && throw(ArgumentError("vector must be at least size $minsize")) res = Vector{eltype(a)}() @@ -42,7 +42,7 @@ Unlike [`sample!`](@ref), does not produce side effects. """ sample(a::UnitRange, k::Integer; exclude = ()) = sample!(getRNG(), collect(a), k; exclude = exclude) -getRNG(seed::Integer = -1) = seed >= 0 ? MersenneTwister(seed) : GLOBAL_RNG +getRNG(seed::Integer = -1) = seed >= 0 ? Random.MersenneTwister(seed) : Random.GLOBAL_RNG """ insorted(item, collection) @@ -53,10 +53,3 @@ Return true if `item` is in sorted collection `collection`. Does not verify that `collection` is sorted. """ insorted(item, collection) = !isempty(searchsorted(collection, item)) - -""" - eye(m) - -Return the `m` x `m` identity matrix with type specified by the type of `m`. -""" -eye(m::T) where T <: Number = Matrix{T}(I, m , m) diff --git a/test/centrality/betweenness.jl b/test/centrality/betweenness.jl index 1cef66074..6a414f426 100644 --- a/test/centrality/betweenness.jl +++ b/test/centrality/betweenness.jl @@ -17,8 +17,8 @@ y = @inferred(betweenness_centrality(g, endpoints=true, normalize=false)) yp = parallel_betweenness_centrality(g, endpoints=true, normalize=false) @test all(isapprox(y, yp)) - @test round.(y[1:3], 4) == - round.([122.10760591498584, 159.0072453120582, 176.39547945994505], 4) + @test round.(y[1:3], digits=4) == + round.([122.10760591498584, 159.0072453120582, 176.39547945994505], digits=4) diff --git a/test/centrality/eigenvector.jl b/test/centrality/eigenvector.jl index 86e396c34..764527b69 100644 --- a/test/centrality/eigenvector.jl +++ b/test/centrality/eigenvector.jl @@ -4,13 +4,13 @@ for g in testgraphs(g1) y = @inferred(eigenvector_centrality(g)) - @test round.(y, 3) == round.([ + @test round.(y, digits=3) == round.([ 0.3577513877490464, 0.3577513877490464, 0.5298987782873977, 0.5298987782873977, 0.4271328349194304 - ], 3) + ], digits=3) end for g in testdigraphs(g2) y = @inferred(eigenvector_centrality(g)) - @test round.(y, 3) == round.([0.5, 0.5, 0.5, 0.5], 3) + @test round.(y, digits=3) == round.([0.5, 0.5, 0.5, 0.5], digits=3) end end diff --git a/test/centrality/katz.jl b/test/centrality/katz.jl index 05eead525..99b38d249 100644 --- a/test/centrality/katz.jl +++ b/test/centrality/katz.jl @@ -3,6 +3,6 @@ add_edge!(g5, 1, 2); add_edge!(g5, 2, 3); add_edge!(g5, 1, 3); add_edge!(g5, 3, 4) for g in testdigraphs(g5) z = @inferred(katz_centrality(g, 0.4)) - @test round.(z, 2) == [0.32, 0.44, 0.62, 0.56] + @test round.(z, digits=2) == [0.32, 0.44, 0.62, 0.56] end end diff --git a/test/centrality/pagerank.jl b/test/centrality/pagerank.jl index 45af94c44..9e4490994 100644 --- a/test/centrality/pagerank.jl +++ b/test/centrality/pagerank.jl @@ -6,7 +6,7 @@ M = Matrix{Float64}(adjacency_matrix(g)) M = M' M[:, danglingnodes] .= sum(danglingnodes) ./ nv(g) - M = M * Diagonal(1 ./ sum(M, dims=1)[:]) + M = M * LinearAlgebra.Diagonal(1 ./ sum(M, dims=1)[:]) @assert all(1.01 .>= sum(M, dims=1).>=0.999) # v = inv(I-β*M) * ((1-β)/nv(g) * ones(nv(g), 1)) v = inv(I-α*M) * ((1-α)/nv(g) * ones(nv(g), 1)) @@ -19,7 +19,7 @@ M = Matrix{Float64}(adjacency_matrix(g)) @show M = M' M[:, danglingnodes] = sum(danglingnodes) ./ nv(g) - @show M = M * Diagonal(1 ./ sum(M, dims=1)[:]) + @show M = M * LinearAlgebra.Diagonal(1 ./ sum(M, dims=1)[:]) @show sum(M,1) @assert all(1.01 .>= sum(M, 1).>=0.999) return α*M .+ (1-α)*p diff --git a/test/connectivity.jl b/test/connectivity.jl index c0df23856..1471f8ea1 100644 --- a/test/connectivity.jl +++ b/test/connectivity.jl @@ -128,12 +128,12 @@ # figure 2 example fig2 = spzeros(5, 5) - fig2[[3, 10, 11, 13, 14, 17, 18, 19, 22]] = 1 + fig2[[3, 10, 11, 13, 14, 17, 18, 19, 22]] .= 1 fig2 = SimpleDiGraph(fig2) # figure 3 example fig3 = spzeros(8, 8) - fig3[[1, 7, 9, 13, 14, 15, 18, 20, 23, 27, 28, 31, 33, 34, 37, 45, 46, 49, 57, 63, 64]] = 1 + fig3[[1, 7, 9, 13, 14, 15, 18, 20, 23, 27, 28, 31, 33, 34, 37, 45, 46, 49, 57, 63, 64]] .= 1 fig3 = SimpleDiGraph(fig3) scc_fig3 = Vector[[3, 4], [2, 5, 6], [8], [1, 7]] fig3_cond = SimpleDiGraph(4); @@ -149,7 +149,7 @@ # figure 8 example fig8 = spzeros(6, 6) - fig8[[2, 10, 13, 21, 24, 27, 35]] = 1 + fig8[[2, 10, 13, 21, 24, 27, 35]] .= 1 fig8 = SimpleDiGraph(fig8) @test Set(@inferred(strongly_connected_components(fig1))) == Set(scc_fig1) diff --git a/test/generators/binomial.jl b/test/generators/binomial.jl index 37a741157..f7cf13a24 100644 --- a/test/generators/binomial.jl +++ b/test/generators/binomial.jl @@ -5,6 +5,7 @@ using Distributions using LightGraphs using StatsBase using Base.Test +import Random import Base: - import LightGraphs: randbn @@ -37,7 +38,7 @@ function binomial_test(n, p, s) @show dσ - lσ @test abs(dσ - lσ) / dσ < .10 end -srand(1234) +Random.srand(1234) n = 10000 p = 0.3 s = 100000 diff --git a/test/generators/randgraphs.jl b/test/generators/randgraphs.jl index e25be070d..db313dd5d 100644 --- a/test/generators/randgraphs.jl +++ b/test/generators/randgraphs.jl @@ -185,13 +185,15 @@ @test nv(rd) == 1000 @test ne(rd) == 4000 @test is_directed(rd) - @test std(outdegree(rd)) == 0 + outdegree_rd = @inferred(outdegree(rd)) + @test all(outdegree_rd .== outdegree_rd[1]) rd = random_regular_digraph(1000, 4, dir=:in) @test nv(rd) == 1000 @test ne(rd) == 4000 @test is_directed(rd) - @test std(indegree(rd)) == 0 + indegree_rd = @inferred(indegree(rd)) + @test all(indegree_rd .== indegree_rd[1]) rr = random_regular_graph(10, 8, seed=4) @test nv(rr) == 10 @@ -254,7 +256,7 @@ bp = blockfractions(sbm, g) ./ (sizes * sizes') ratios = bp ./ (sbm.affinities ./ sum(sbm.affinities)) test_sbm(sbm, bp) - @test norm(collect(ratios)) < 0.25 + @test LinearAlgebra.norm(collect(ratios)) < 0.25 sizes = [200, 200, 100] internaldeg = 15 @@ -272,14 +274,14 @@ bp = blockfractions(sbm, g) ./ (sizes * sizes') test_sbm(sbm, bp) ratios = bp ./ (sbm.affinities ./ sum(sbm.affinities)) - @test norm(collect(ratios)) < 0.25 + @test LinearAlgebra.norm(collect(ratios)) < 0.25 # check that average degree is not too high # factor of two is cushion for random process @test mean(degree(g)) <= 4//2*numedges/sum(sizes) # check that the internal degrees are higher than the external degrees # 5//4 is cushion for random process. - @test all(sum(bc-diagm(0=>diag(bc)), dims=1) .<= 5//4 .* diag(bc)) + @test all(sum(bc-LinearAlgebra.diagm(0=>diag(bc)), dims=1) .<= 5//4 .* diag(bc)) sbm2 = StochasticBlockModel(0.5*ones(4), 0.3, 10*ones(Int,4)) diff --git a/test/graphcut/normalized_cut.jl b/test/graphcut/normalized_cut.jl index 25701effb..6eaeedd82 100644 --- a/test/graphcut/normalized_cut.jl +++ b/test/graphcut/normalized_cut.jl @@ -33,7 +33,7 @@ @test labels == [1, 1, 1, 2, 2, 2] || labels == [2, 2, 2, 1, 1, 1] end - w = SparseMatrixCSC(w) + w = SparseArrays.SparseMatrixCSC(w) for g in testgraphs(gx) labels = @inferred(normalized_cut(g, 1, w)) @test labels == [1, 1, 1, 2, 2, 2] || labels == [2, 2, 2, 1, 1, 1] @@ -53,7 +53,7 @@ @test labels == [1, 1, 2, 2] || labels == [2, 2, 1, 1] end - w = SparseMatrixCSC(w) + w = SparseArrays.SparseMatrixCSC(w) for g in testgraphs(gx) labels = @inferred(normalized_cut(g, 0.1, w)) @test labels == [1, 1, 2, 2] || labels == [2, 2, 1, 1] diff --git a/test/linalg/graphmatrices.jl b/test/linalg/graphmatrices.jl index 00ab3fd83..47ca0cbca 100644 --- a/test/linalg/graphmatrices.jl +++ b/test/linalg/graphmatrices.jl @@ -17,10 +17,10 @@ adjmat, stochmat, adjhat, avgmat = constructors(mat) @test adjmat.D == vec(sum(mat, dims=1)) @test adjmat.A == mat - @test isa(sparse(mat), SparseMatrixCSC) - @test isa(sparse(stochmat), SparseMatrixCSC) - @test isa(sparse(adjhat), SparseMatrixCSC) - @test isa(sparse(avgmat), SparseMatrixCSC) + @test isa(sparse(mat), SparseArrays.SparseMatrixCSC) + @test isa(sparse(stochmat), SparseArrays.SparseMatrixCSC) + @test isa(sparse(adjhat), SparseArrays.SparseMatrixCSC) + @test isa(sparse(avgmat), SparseArrays.SparseMatrixCSC) @test isa(convert(CombinatorialAdjacency, adjmat), CombinatorialAdjacency) @test isa(convert(CombinatorialAdjacency, avgmat), CombinatorialAdjacency) @test prescalefactor(adjhat) == postscalefactor(adjhat) @@ -94,13 +94,13 @@ @test g(NormalizedLaplacian(adjhat)) > 1e-13 @test g(StochasticLaplacian(stochmat)) > 1e-13 - @test eigs(adjmat, which=:LR)[1][1] > 1.0 - @test eigs(stochmat, which=:LR)[1][1] ≈ 1.0 - @test eigs(avgmat, which=:LR)[1][1] ≈ 1.0 - @test eigs(lapl, which=:LR)[1][1] > 2.0 - @test_throws MethodError eigs(lapl, which=:SM)[1][1] # --> greater_than(-0.0) + @test IterativeEigensolvers.eigs(adjmat, which=:LR)[1][1] > 1.0 + @test IterativeEigensolvers.eigs(stochmat, which=:LR)[1][1] ≈ 1.0 + @test IterativeEigensolvers.eigs(avgmat, which=:LR)[1][1] ≈ 1.0 + @test IterativeEigensolvers.eigs(lapl, which=:LR)[1][1] > 2.0 + @test_throws MethodError IterativeEigensolvers.eigs(lapl, which=:SM)[1][1] # --> greater_than(-0.0) lhat = NormalizedLaplacian(adjhat) - @test eigs(lhat, which=:LR)[1][1] < 2.0 + 1e-9 + @test IterativeEigensolvers.eigs(lhat, which=:LR)[1][1] < 2.0 + 1e-9 end function test_other(mat, n) @@ -141,8 +141,8 @@ @test_throws MethodError symmetrize(NormalizedAdjacency(adjmat)).A # --> adjmat.A @test symmetrize(adjmat).A == adjmat.A # these tests are basically the code - @test symmetrize(adjmat, :triu).A == triu(adjmat.A) + triu(adjmat.A)' - @test symmetrize(adjmat, :tril).A == tril(adjmat.A) + tril(adjmat.A)' + @test symmetrize(adjmat, :triu).A == LinearAlgebra.triu(adjmat.A) + LinearAlgebra.triu(adjmat.A)' + @test symmetrize(adjmat, :tril).A == LinearAlgebra.tril(adjmat.A) + LinearAlgebra.tril(adjmat.A)' @test symmetrize(adjmat, :sum).A == adjmat.A + adjmat.A @test_throws ArgumentError symmetrize(adjmat, :fake) @@ -152,16 +152,16 @@ adjmat = CombinatorialAdjacency(mat) ahatp = PunchedAdjacency(adjmat) y = ahatp * perron(ahatp) - @test dot(y, ahatp.perron) ≈ 0.0 atol = 1.0e-8 + @test LinearAlgebra.dot(y, ahatp.perron) ≈ 0.0 atol = 1.0e-8 @test sum(abs, y) ≈ 0.0 atol = 1.0e-8 - eval, evecs = eigs(ahatp, which=:LM) + eval, evecs = IterativeEigensolvers.eigs(ahatp, which=:LM) @test eval[1] - (1 + 1.0e-8) <= 0 - @test dot(perron(ahatp), evecs[:, 1]) ≈ 0.0 atol = 1e-8 + @test LinearAlgebra.dot(perron(ahatp), evecs[:, 1]) ≈ 0.0 atol = 1e-8 ahat = ahatp.A @test isa(ahat, NormalizedAdjacency) z = ahatp * perron(ahat) - @test norm(z) ≈ 0.0 atol = 1e-8 + @test LinearAlgebra.norm(z) ≈ 0.0 atol = 1e-8 end @@ -183,7 +183,7 @@ """Computes the stationary distribution of a random walk""" function stationarydistribution(R::StochasticAdjacency; kwargs...) - er = eigs(R, nev=1, which=:LR; kwargs...) + er = IterativeEigensolvers.eigs(R, nev=1, which=:LR; kwargs...) l1 = er[1][1] abs(l1 - 1) < 1e-8 || error("failed to compute stationary distribution") # TODO 0.7: should we change the error type to InexactError? p = real(er[2][:, 1]) @@ -204,7 +204,7 @@ n = 100 p = 16 / n M = sprand(n, n, p) - M.nzval[:] = 1.0 + M.nzval[:] .= 1.0 A = CombinatorialAdjacency(M) sd = stationarydistribution(A; ncv=10) @test all(sd .>= 0) diff --git a/test/linalg/spectral.jl b/test/linalg/spectral.jl index 2147c3464..ae33b258a 100644 --- a/test/linalg/spectral.jl +++ b/test/linalg/spectral.jl @@ -93,9 +93,9 @@ Matrix(nbt::Nonbacktracking) = Matrix(sparse(nbt)) T = eltype(g) amat = adjacency_matrix(g, Float64; dir=dir) lmat = laplacian_matrix(g, Float64; dir=dir) - @test isa(amat, SparseMatrixCSC{Float64,T}) - @test isa(lmat, SparseMatrixCSC{Float64,T}) - evals = eigvals(Matrix(lmat)) + @test isa(amat, SparseArrays.SparseMatrixCSC{Float64,T}) + @test isa(lmat, SparseArrays.SparseMatrixCSC{Float64,T}) + evals = LinearAlgebra.eigvals(Matrix(lmat)) @test all(evals .>= -1e-15) # positive semidefinite @test (minimum(evals)) ≈ 0 atol = 1e-13 end @@ -133,20 +133,20 @@ Matrix(nbt::Nonbacktracking) = Matrix(sparse(nbt)) B, emap = non_backtracking_matrix(g) Bs = sparse(nbt) @test sparse(B) == Bs - @test eigs(nbt, nev=1)[1] ≈ eigs(B, nev=1)[1] atol = 1e-5 + @test IterativeEigensolvers.eigs(nbt, nev=1)[1] ≈ IterativeEigensolvers.eigs(B, nev=1)[1] atol = 1e-5 # check that matvec works x = ones(Float64, nbt.m) y = nbt * x z = B * x - @test norm(y - z) < 1e-8 + @test LinearAlgebra.norm(y - z) < 1e-8 #check that matmat works and Matrix(nbt) == B - @test norm(nbt * LightGraphs.eye(nbt.m) - B) < 1e-8 + @test LinearAlgebra.norm(nbt * Matrix{Float64}(LinearAlgebra.I, nbt.m, nbt.m) - B) < 1e-8 #check that matmat works and Matrix(nbt) == B - @test norm(nbt * LightGraphs.eye(nbt.m) - B) < 1e-8 + @test LinearAlgebra.norm(nbt * Matrix{Float64}(LinearAlgebra.I, nbt.m, nbt.m) - B) < 1e-8 #check that we can use the implicit matvec in nonbacktrack_embedding @test size(y) == size(x) @@ -156,7 +156,6 @@ Matrix(nbt::Nonbacktracking) = Matrix(sparse(nbt)) @test Matrix(B₁) == Matrix(B) @test B₁ * ones(size(B₁)[2]) == B * ones(size(B)[2]) @test size(B₁) == size(B) - # @test norm(eigs(B₁)[1] - eigs(B)[1]) ≈ 0.0 atol=1e-8 @test !issymmetric(B₁) @test eltype(B₁) == Float64 end diff --git a/test/simplegraphs/simplegraphs.jl b/test/simplegraphs/simplegraphs.jl index 9a56381e9..25f0bf084 100644 --- a/test/simplegraphs/simplegraphs.jl +++ b/test/simplegraphs/simplegraphs.jl @@ -194,8 +194,8 @@ using Random # We create an edge list, shuffle it and reverse half of its edges # using this edge list should result in the same graph edge_list = [e for e in edges(g)] - shuffle!(MersenneTwister(0), edge_list) - for i in rand(MersenneTwister(0), 1:length(edge_list), length(edge_list) ÷ 2) + Random.shuffle!(Random.MersenneTwister(0), edge_list) + for i in rand(Random.MersenneTwister(0), 1:length(edge_list), length(edge_list) ÷ 2) e = edge_list[i] Te = typeof(e) edge_list[i] = Te(dst(e), src(e)) @@ -229,7 +229,7 @@ using Random for g in testdigraphs(g_dir) # We create an edge list and shuffle it edge_list = [e for e in edges(g)] - shuffle!(MersenneTwister(0), edge_list) + Random.shuffle!(Random.MersenneTwister(0), edge_list) edge_iter = (e for e in edge_list) edge_set = Set(edge_list)