Skip to content

Commit

Permalink
Merge branch 'master' into manybody
Browse files Browse the repository at this point in the history
  • Loading branch information
aryavorskiy committed Mar 6, 2024
2 parents 252b912 + bb71f52 commit 15a3b53
Show file tree
Hide file tree
Showing 15 changed files with 255 additions and 35 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/breakage.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ jobs:
with:
version: 1
arch: x64
- uses: actions/cache@v3
- uses: actions/cache@v4
env:
cache-name: cache-artifacts
with:
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/ci-jet.yml
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ jobs:
with:
version: ${{ matrix.version }}
arch: ${{ matrix.arch }}
- uses: actions/cache@v3
- uses: actions/cache@v4
env:
cache-name: cache-artifacts
with:
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ jobs:
with:
version: ${{ matrix.version }}
arch: ${{ matrix.arch }}
- uses: actions/cache@v3
- uses: actions/cache@v4
env:
cache-name: cache-artifacts
with:
Expand Down
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
/docs/build/
Manifest.toml
.vscode
27 changes: 5 additions & 22 deletions Project.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
name = "QuantumOpticsBase"
uuid = "4f57444f-1401-5e15-980d-4471b28d5678"
version = "0.4.18"
version = "0.4.21"

[deps]
Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
Expand All @@ -21,31 +21,14 @@ UnsafeArrays = "c4a57d5a-5b31-53a6-b365-19f8c011fbd6"
Adapt = "1, 2, 3.3"
FFTW = "1.2"
FastExpm = "1.1.0"
FastGaussQuadrature = "0.5"
FastGaussQuadrature = "0.5, 1"
FillArrays = "0.13, 1"
LRUCache = "1"
LinearAlgebra = "1"
QuantumInterface = "0.3.3"
Random = "1"
RandomMatrices = "0.5"
SparseArrays = "1"
Strided = "1, 2"
UnsafeArrays = "1"
julia = "1.6"

[extras]
Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595"
Dates = "ade2ca70-3891-5945-98fb-dc099432e06a"
FFTW = "7a1cc6ca-52ef-59f5-83cd-3a7055c09341"
FillArrays = "1a297f60-69ca-5386-bcde-b61e274b549b"
JET = "c3a54625-cd67-489e-a8e7-0a5a0ff4e31b"
LRUCache = "8ac3fa9e-de4c-5943-b1dc-09c6b5f20637"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91"
Strided = "5e0ebb24-38b0-5f93-81fe-25c709ecae67"
StridedViews = "4db3bf67-4bd7-4b4e-b153-31dc3fb37143"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
UnsafeArrays = "c4a57d5a-5b31-53a6-b365-19f8c011fbd6"

[targets]
test = ["LinearAlgebra", "SparseArrays", "Random", "Test", "Aqua", "JET", "Adapt", "Dates", "FFTW", "LRUCache", "Strided", "StridedViews", "UnsafeArrays", "FillArrays", "StatsBase"]
3 changes: 3 additions & 0 deletions src/QuantumOpticsBase.jl
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,8 @@ export Basis, GenericBasis, CompositeBasis, basis,
#operators_lazytensor
LazyTensor, lazytensor_use_cache, lazytensor_clear_cache,
lazytensor_cachesize, lazytensor_disable_cache, lazytensor_enable_cache,
#states_lazyket
LazyKet,
#time_dependent_operators
AbstractTimeDependentOperator, TimeDependentSum, set_time!,
current_time, time_shift, time_stretch, time_restrict,
Expand Down Expand Up @@ -76,6 +78,7 @@ include("operators_lazysum.jl")
include("operators_lazyproduct.jl")
include("operators_lazytensor.jl")
include("time_dependent_operator.jl")
include("states_lazyket.jl")
include("superoperators.jl")
include("spin.jl")
include("fock.jl")
Expand Down
5 changes: 4 additions & 1 deletion src/operators.jl
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import Base: ==, +, -, *, /, ^, length, one, exp, conj, conj!, transpose
import LinearAlgebra: tr, ishermitian
import SparseArrays: sparse
import QuantumInterface: AbstractOperator
import QuantumInterface: AbstractOperator, AbstractKet

"""
Abstract type for operators with a data field.
Expand Down Expand Up @@ -111,6 +111,9 @@ Expectation value of the given operator `op` for the specified `state`.
"""
expect(op::AbstractOperator{B,B}, state::Ket{B}) where B = dot(state.data, (op * state).data)

# TODO upstream this one
# expect(op::AbstractOperator{B,B}, state::AbstractKet{B}) where B = norm(op * state) ^ 2

function expect(indices, op::AbstractOperator{B,B}, state::Ket{B2}) where {B,B2<:CompositeBasis}
N = length(state.basis.shape)
indices_ = complement(N, indices)
Expand Down
2 changes: 1 addition & 1 deletion src/operators_lazytensor.jl
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ if there is no corresponding operator (i.e. it would be an identity operater).
suboperator(op::LazyTensor, index::Integer) = op.operators[findfirst(isequal(index), op.indices)]

"""
suboperators(op::LazyTensor, index)
suboperators(op::LazyTensor, indices)
Return the suboperators corresponding to the subsystems specified by `indices`. Fails
if there is no corresponding operator (i.e. it would be an identity operater).
Expand Down
3 changes: 2 additions & 1 deletion src/operators_sparse.jl
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ mul!(result::Bra{B2},b::Bra{B1},M::SparseOpPureType{B1,B2},alpha,beta) where {B1
-(op1::EyeOpType{BL,BR},op2::SparseOpType{BL,BR}) where {BL,BR} = sparse(op1) - op2
+(op1::SparseOpType{BL,BR},op2::EyeOpType{BL,BR}) where {BL,BR} = sparse(op2) + op1
-(op2::SparseOpType{BL,BR},op1::EyeOpType{BL,BR}) where {BL,BR} = op2 - sparse(op1)
+(op1::EyeOpType{BL,BR},op2::EyeOpType{BL,BR}) where {BL,BR} = sparse(op1) + sparse(op1)
+(op1::EyeOpType{BL,BR},op2::EyeOpType{BL,BR}) where {BL,BR} = sparse(op1) + sparse(op2)
-(op2::EyeOpType{BL,BR},op1::EyeOpType{BL,BR}) where {BL,BR} = sparse(op2) - sparse(op1)
-(op::EyeOpType) = -sparse(op)
*(op::EyeOpType,a::T) where {T<:Number} = a*sparse(op)
Expand All @@ -118,3 +118,4 @@ mul!(result::Bra{B2},b::Bra{B1},M::SparseOpPureType{B1,B2},alpha,beta) where {B1
tensor(a::EyeOpType, b::SparseOpType) = tensor(sparse(a),b)
tensor(a::SparseOpType, b::EyeOpType) = tensor(a,sparse(b))
tensor(a::EyeOpType, b::EyeOpType) = tensor(sparse(a),sparse(b))
dagger(a::EyeOpType) = dagger(sparse(a))
148 changes: 148 additions & 0 deletions src/states_lazyket.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,148 @@
"""
LazyKet(b, kets)
Lazy implementation of a tensor product of kets.
The subkets are stored in the `kets` field.
The main purpose of such a ket are simple computations for large product states, such as expectation values.
It's used to compute numeric initial states in QuantumCumulants.jl (see QuantumCumulants.initial_values).
"""
mutable struct LazyKet{B,T} <: AbstractKet{B,T}
basis::B
kets::T
function LazyKet(b::B, kets::T) where {B<:CompositeBasis,T<:Tuple}
N = length(b.bases)
for n=1:N
@assert isa(kets[n], Ket)
@assert kets[n].basis == b.bases[n]
end
new{B,T}(b, kets)
end
end
function LazyKet(b::CompositeBasis, kets::Vector)
return LazyKet(b,Tuple(kets))
end

Base.eltype(ket::LazyKet) = Base.promote_type(eltype.(ket.kets)...)

Base.isequal(x::LazyKet, y::LazyKet) = isequal(x.basis, y.basis) && isequal(x.kets, y.kets)
Base.:(==)(x::LazyKet, y::LazyKet) = (x.basis == y.basis) && (x.kets == y.kets)

# conversion to dense
Ket(ket::LazyKet) = (ket.kets...)

# no lazy bras for now
dagger(x::LazyKet) = throw(MethodError("dagger not implemented for LazyKet: LazyBra is currently not implemented at all!"))

# tensor with other kets
function tensor(x::LazyKet, y::Ket)
return LazyKet(x.basis y.basis, (x.kets..., y))
end
function tensor(x::Ket, y::LazyKet)
return LazyKet(x.basis y.basis, (x, y.kets...))
end
function tensor(x::LazyKet, y::LazyKet)
return LazyKet(x.basis y.basis, (x.kets..., y.kets...))
end

# norms
norm(state::LazyKet) = prod(norm.(state.kets))
function normalize!(state::LazyKet)
for ket in state.kets
normalize!(ket)
end
return state
end
function normalize(state::LazyKet)
y = deepcopy(state)
normalize!(y)
return y
end

# expect
function expect(op::LazyTensor{B, B}, state::LazyKet{B}) where B <: Basis
check_samebases(op); check_samebases(op.basis_l, state.basis)
ops = op.operators
inds = op.indices
kets = state.kets

T = promote_type(eltype(op), eltype(state))
exp_val = convert(T, op.factor)

# loop over all operators and match with corresponding kets
for (i, op_) in enumerate(op.operators)
exp_val *= expect(op_, kets[inds[i]])
end

# loop over remaining kets and just add the norm (should be one for normalized ones, but hey, who knows..)
for i in 1:length(kets)
if i inds
exp_val *= dot(kets[i].data, kets[i].data)
end
end

return exp_val
end

function expect(op::LazyProduct{B,B}, state::LazyKet{B}) where B <: Basis
check_samebases(op); check_samebases(op.basis_l, state.basis)

tmp_state1 = deepcopy(state)
tmp_state2 = deepcopy(state)
for i = length(op.operators):-1:1
mul!(tmp_state2, op.operators[i], tmp_state1)
for j = 1:length(state.kets)
copyto!(tmp_state1.kets[j].data, tmp_state2.kets[j].data)
end
end

T = promote_type(eltype(op), eltype(state))
exp_val = convert(T, op.factor)
for i = 1:length(state.kets)
exp_val *= dot(state.kets[i].data, tmp_state2.kets[i].data)
end

return exp_val
end

function expect(op::LazySum{B,B}, state::LazyKet{B}) where B <: Basis
check_samebases(op); check_samebases(op.basis_l, state.basis)

T = promote_type(eltype(op), eltype(state))
exp_val = zero(T)
for (factor, sub_op) in zip(op.factors, op.operators)
exp_val += factor * expect(sub_op, state)
end

return exp_val
end


# mul! with lazytensor -- needed to compute lazyproduct averages (since ⟨op1 * op2⟩ doesn't factorize)
# this mul! is the only one that really makes sense
function mul!(y::LazyKet{BL}, op::LazyOperator{BL,BR}, x::LazyKet{BR}) where {BL, BR}
T = promote_type(eltype(y), eltype(op), eltype(x))
mul!(y, op, x, one(T), zero(T))
end
function mul!(y::LazyKet{BL}, op::LazyTensor{BL, BR}, x::LazyKet{BR}, alpha, beta) where {BL, BR}
iszero(beta) || throw("Error: cannot perform muladd operation on LazyKets since addition is not implemented. Convert them to dense kets using Ket(x) in order to perform muladd operations.")

iszero(alpha) && (_zero_op_mul!(y.kets[1].data, beta); return result)

missing_index_allowed = samebases(op)
(length(y.basis.bases) == length(x.basis.bases)) || throw(IncompatibleBases())

for i in 1:length(y.kets)
if i op.indices
mul!(y.kets[i], op.operators[i], x.kets[i])
else
missing_index_allowed || throw("Can't multiply a LazyOperator with a Ket when there's missing indices and the bases are different.
A missing index is equivalent to applying an identity operator, but that's not possible when mapping from one basis to another!")

copyto!(y.kets[i].data, x.kets[i].data)
end
end

rmul!(y.kets[1].data, op.factor * alpha)
return y
end
18 changes: 18 additions & 0 deletions test/Project.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
[deps]
Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595"
Dates = "ade2ca70-3891-5945-98fb-dc099432e06a"
FFTW = "7a1cc6ca-52ef-59f5-83cd-3a7055c09341"
FillArrays = "1a297f60-69ca-5386-bcde-b61e274b549b"
JET = "c3a54625-cd67-489e-a8e7-0a5a0ff4e31b"
LRUCache = "8ac3fa9e-de4c-5943-b1dc-09c6b5f20637"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
QuantumInterface = "5717a53b-5d69-4fa3-b976-0bf2f97ca1e5"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
RandomMatrices = "2576dda1-a324-5b11-aa66-c48ed7e3c618"
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91"
Strided = "5e0ebb24-38b0-5f93-81fe-25c709ecae67"
StridedViews = "4db3bf67-4bd7-4b4e-b153-31dc3fb37143"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
UnsafeArrays = "c4a57d5a-5b31-53a6-b365-19f8c011fbd6"
2 changes: 1 addition & 1 deletion test/test_aqua.jl
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ using StatsBase
Aqua.test_all(QuantumOpticsBase;
ambiguities=(exclude=[FillArrays.reshape, # Due to https://github.com/JuliaArrays/FillArrays.jl/issues/105#issuecomment-1518406018
StatsBase.TestStat, StatsBase.:(==) , StatsBase.sort!],), # Due to https://github.com/JuliaStats/StatsBase.jl/issues/861
piracy=(broken=true,)
piracies=(broken=true,)
)
# manual piracy check to exclude identityoperator
pirates = [pirate for pirate in Aqua.Piracy.hunt(QuantumOpticsBase) if pirate.name [:identityoperator,:identitysuperoperator]]
Expand Down
7 changes: 4 additions & 3 deletions test/test_jet.jl
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ function (::MayThrowIsOk)(report_type::Type{<:InferenceErrorReport}, @nospeciali
end

# imported to be declared as modules filtered out from analysis result
using LinearAlgebra, LRUCache, Strided, StridedViews, Dates, SparseArrays
using LinearAlgebra, LRUCache, Strided, StridedViews, Dates, SparseArrays, RandomMatrices

@testset "jet" begin
if get(ENV,"JET_TEST","")=="true"
Expand All @@ -31,10 +31,11 @@ using LinearAlgebra, LRUCache, Strided, StridedViews, Dates, SparseArrays
AnyFrameModule(Strided),
AnyFrameModule(StridedViews),
AnyFrameModule(Dates),
AnyFrameModule(SparseArrays))
AnyFrameModule(SparseArrays),
AnyFrameModule(RandomMatrices))
)
@show rep
@test length(JET.get_reports(rep)) <= 6
@test length(JET.get_reports(rep)) <= 24
@test_broken length(JET.get_reports(rep)) == 0
end
end # testset
13 changes: 10 additions & 3 deletions test/test_operators_sparse.jl
Original file line number Diff line number Diff line change
Expand Up @@ -132,9 +132,16 @@ for _IEye in (identityoperator(b_l), identityoperator(b1a, b1b))
@test isa(IEye+IEye, SparseOpType)
@test isa(IEye-IEye, SparseOpType)
@test isa(-IEye, SparseOpType)
@test isa(tensor(IEye, sparse(IEye)), SparseOpType)
@test isa(tensor(sparse(IEye), IEye), SparseOpType)
@test isa(tensor(IEye, IEye), SparseOpType)
if VERSION.major == 1 && VERSION.minor == 6
# julia 1.6 LTS, something's broken here
@test_skip isa(tensor(IEye, sparse(IEye)), SparseOpType)
@test_skip isa(tensor(sparse(IEye), IEye), SparseOpType)
@test_skip isa(tensor(IEye, IEye), SparseOpType)
else
@test isa(tensor(IEye, sparse(IEye)), SparseOpType)
@test isa(tensor(sparse(IEye), IEye), SparseOpType)
@test isa(tensor(IEye, IEye), SparseOpType)
end
end
end

Expand Down
Loading

0 comments on commit 15a3b53

Please sign in to comment.