diff --git a/Project.toml b/Project.toml index 08838afd..e463ad0b 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "PastaQ" uuid = "30b07047-aa8b-4c78-a4e8-24d720215c19" authors = ["Giacomo Torlai ", "Matthew Fishman "] -version = "0.0.19" +version = "0.0.20" [deps] ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" diff --git a/src/exports.jl b/src/exports.jl index d586fa45..e33fd449 100644 --- a/src/exports.jl +++ b/src/exports.jl @@ -60,7 +60,6 @@ export # lpdo.jl LPDO, - normalize!, logtr, tr, diff --git a/src/imports.jl b/src/imports.jl index 4b7e1c1a..79561845 100644 --- a/src/imports.jl +++ b/src/imports.jl @@ -18,7 +18,6 @@ import ITensors: expect import LinearAlgebra: - normalize!, tr, norm diff --git a/test/array.jl b/test/array.jl index 75953bef..1fa18b00 100644 --- a/test/array.jl +++ b/test/array.jl @@ -76,7 +76,7 @@ end end # LPDO density matrix - ρ = normalize!(randomstate(qubits; χ = 4, ξ = 2)) + ρ = PastaQ.normalize!(randomstate(qubits; χ = 4, ξ = 2)) ρmat = PastaQ.array(ρ) ρmpo = MPO(ρ) @@ -125,7 +125,7 @@ end # LPDO Choi matrix - Λ = normalize!(randomprocess(qubits; χ = 4, ξ = 2); localnorm = 2) + Λ = PastaQ.normalize!(randomprocess(qubits; χ = 4, ξ = 2); localnorm = 2) Λmat = PastaQ.array(Λ) Λmpo = MPO(Λ) @@ -181,7 +181,7 @@ end @test ρtest ≈ ρmat # LPDO density matrix - ρ = normalize!(randomstate(qubits; χ = 4, ξ = 2)) + ρ = PastaQ.normalize!(randomstate(qubits; χ = 4, ξ = 2)) ρmat = PastaQ.array(ρ) ρprod = prod(ρ) ρtest = PastaQ.array(ρprod) @@ -200,7 +200,7 @@ end @test Λmat ≈ Λtest # LPDO Choi matrix - Λ = normalize!(randomprocess(qubits; χ = 4, ξ = 2); localnorm = 2) + Λ = PastaQ.normalize!(randomprocess(qubits; χ = 4, ξ = 2); localnorm = 2) Λmat = PastaQ.array(Λ) Λprod = prod(Λ) Λtest = PastaQ.array(Λprod) diff --git a/test/distances.jl b/test/distances.jl index 80beca2f..b7967bbe 100644 --- a/test/distances.jl +++ b/test/distances.jl @@ -17,8 +17,8 @@ using Random ρ1 = runcircuit(sites, circuit1; noise=("DEP", (p=0.01,))) ρ2 = runcircuit(sites, circuit2; noise=("DEP", (p=0.01,))) # LPDO density matrix - ϱ1 = normalize!(randomstate(sites; χ = 2, ξ = 3)) - ϱ2 = normalize!(randomstate(sites; χ = 2, ξ = 3)) + ϱ1 = PastaQ.normalize!(randomstate(sites; χ = 2, ξ = 3)) + ϱ2 = PastaQ.normalize!(randomstate(sites; χ = 2, ξ = 3)) ψ1vec = PastaQ.array(ψ1) ρ1mat = PastaQ.array(ρ1) @@ -86,13 +86,13 @@ end ρ1 = MPO(ϱ1) ρ2 = MPO(ϱ2) - ψ1vec = PastaQ.array(normalize!(copy(ψ1))) - ρ1mat = PastaQ.array(normalize!(copy(ρ1))) - ϱ1mat = PastaQ.array(normalize!(copy(ϱ1))) + ψ1vec = PastaQ.array(PastaQ.normalize!(copy(ψ1))) + ρ1mat = PastaQ.array(PastaQ.normalize!(copy(ρ1))) + ϱ1mat = PastaQ.array(PastaQ.normalize!(copy(ϱ1))) - ψ2vec = PastaQ.array(normalize!(copy(ψ2))) - ρ2mat = PastaQ.array(normalize!(copy(ρ2))) - ϱ2mat = PastaQ.array(normalize!(copy(ϱ2))) + ψ2vec = PastaQ.array(PastaQ.normalize!(copy(ψ2))) + ρ2mat = PastaQ.array(PastaQ.normalize!(copy(ρ2))) + ϱ2mat = PastaQ.array(PastaQ.normalize!(copy(ϱ2))) @test fidelity(ψ1, ψ2) ≈ abs2(ψ1vec' * ψ2vec) @test fidelity(ψ1, ρ2) ≈ ψ1vec' * ρ2mat * ψ1vec @@ -151,12 +151,12 @@ end ρ1 = PastaQ.choimatrix(sites, circuit1; noise=("DEP", (p=0.01,))) ρ2 = PastaQ.choimatrix(sites, circuit2; noise=("DEP", (p=0.01,))) # LPDO Choi matrix - ϱ1 = normalize!(randomprocess(sites; mixed=true)) - ϱ2 = normalize!(randomprocess(sites; mixed=true)) + ϱ1 = PastaQ.normalize!(randomprocess(sites; mixed=true)) + ϱ2 = PastaQ.normalize!(randomprocess(sites; mixed=true)) @disable_warn_order begin ϕ1 = PastaQ.unitary_mpo_to_choi_mps(U1) - normalize!(ϕ1) + PastaQ.normalize!(ϕ1) ϕ1vec = PastaQ.array(ϕ1) ρ1mat = PastaQ.array(ρ1) ρ1mat = ρ1mat / tr(ρ1mat) @@ -164,7 +164,7 @@ end ϱ1mat = ϱ1mat / tr(ϱ1mat) ϕ2 = PastaQ.unitary_mpo_to_choi_mps(U2) - normalize!(ϕ2) + PastaQ.normalize!(ϕ2) ϕ2vec = PastaQ.array(ϕ2) ρ2mat = PastaQ.array(ρ2) ρ2mat = ρ2mat / tr(ρ2mat) @@ -221,13 +221,13 @@ end @disable_warn_order begin ϕ1 = PastaQ.unitary_mpo_to_choi_mps(U1) - normalize!(ϕ1) + PastaQ.normalize!(ϕ1) ϕ1vec = PastaQ.array(ϕ1) ϱ1mat = PastaQ.array(ϱ1) ϱ1mat = ϱ1mat / tr(ϱ1mat) ϕ2 = PastaQ.unitary_mpo_to_choi_mps(U2) - normalize!(ϕ2) + PastaQ.normalize!(ϕ2) ϕ2vec = PastaQ.array(ϕ2) ϱ2mat = PastaQ.array(ϱ2) ϱ2mat = ϱ2mat / tr(ϱ2mat) diff --git a/test/optimizers.jl b/test/optimizers.jl index 7ad28d45..e7460a44 100644 --- a/test/optimizers.jl +++ b/test/optimizers.jl @@ -102,7 +102,7 @@ end U = randomprocess(N; χ=χ) Φ = LPDO(PastaQ.unitary_mpo_to_choi_mps(U)) - normalize!(Φ; localnorm=2) + PastaQ.normalize!(Φ; localnorm=2) opt = Optimisers.Descent(0.1) st = PastaQ.state(opt, Φ) @@ -128,7 +128,7 @@ end data = data_in .=> data_out Λ = randomprocess(N; χ=χ, ξ = 3) - normalize!(Λ; localnorm=2) + PastaQ.normalize!(Λ; localnorm=2) opt = Optimisers.Descent(0.1) st = PastaQ.state(opt, Λ) diff --git a/test/processtomography.jl b/test/processtomography.jl index 1ea86a3c..efbd5539 100644 --- a/test/processtomography.jl +++ b/test/processtomography.jl @@ -254,7 +254,7 @@ end @test length(Λ) == N logZ = 2 * lognorm(Λ.X) sqrt_localZ = [] - normalize!(Λ; (sqrt_localnorms!)=sqrt_localZ, localnorm=2) + PastaQ.normalize!(Λ; (sqrt_localnorms!)=sqrt_localZ, localnorm=2) @test logZ ≈ N * log(2) + 2.0 * sum(log.(sqrt_localZ)) @test abs2(norm(Λ.X)) ≈ 2^N end @@ -269,7 +269,7 @@ end num_grad = numgradslogZ(Λ) sqrt_localnorms = [] - normalize!(Λ; (sqrt_localnorms!)=sqrt_localnorms, localnorm=2) + PastaQ.normalize!(Λ; (sqrt_localnorms!)=sqrt_localnorms, localnorm=2) @test norm(Λ.X)^2 ≈ 2^N alg_grad, _ = PastaQ.gradlogZ(Λ; sqrt_localnorms=sqrt_localnorms) @@ -296,7 +296,7 @@ end Λ = LPDO(PastaQ.unitary_mpo_to_choi_mps(U)) num_grad = numgradsnll(Λ, data) sqrt_localnorms = [] - normalize!(Λ; (sqrt_localnorms!)=sqrt_localnorms, localnorm=2) + PastaQ.normalize!(Λ; (sqrt_localnorms!)=sqrt_localnorms, localnorm=2) alg_grad, _ = PastaQ.gradnll(Λ, data; sqrt_localnorms=sqrt_localnorms) for j in 1:N @@ -316,7 +316,7 @@ end num_grad = numgradsTP(Λ; accuracy=1e-8) Γ_test = PastaQ.TP(Λ) sqrt_localnorms = [] - normalize!(Λ; (sqrt_localnorms!)=sqrt_localnorms, localnorm=2) + PastaQ.normalize!(Λ; (sqrt_localnorms!)=sqrt_localnorms, localnorm=2) alg_grad_logZ, logZ = PastaQ.gradlogZ(Λ; sqrt_localnorms=sqrt_localnorms) @@ -355,7 +355,7 @@ end num_grads = num_gradZ + num_gradNLL + trace_preserving_regularizer * num_gradTP sqrt_localnorms = [] - normalize!(Λ; (sqrt_localnorms!)=sqrt_localnorms, localnorm=2) + PastaQ.normalize!(Λ; (sqrt_localnorms!)=sqrt_localnorms, localnorm=2) ex_loss = PastaQ.nll(Λ, data) + 2 * lognorm(Λ.X) alg_grads, loss = PastaQ.gradients( @@ -381,7 +381,7 @@ end @test length(Λ) == N logZ = logtr(Λ) localZ = [] - normalize!(Λ; (sqrt_localnorms!)=localZ, localnorm=2) + PastaQ.normalize!(Λ; (sqrt_localnorms!)=localZ, localnorm=2) @test logZ ≈ N * log(2) + 2.0 * sum(log.(localZ)) @test tr(Λ) ≈ 2^N end @@ -394,7 +394,7 @@ end Λ = randomprocess(N; mixed=true, χ=χ, ξ=ξ) num_grad = numgradslogZ(Λ) sqrt_localnorms = [] - normalize!(Λ; (sqrt_localnorms!)=sqrt_localnorms, localnorm=2) + PastaQ.normalize!(Λ; (sqrt_localnorms!)=sqrt_localnorms, localnorm=2) @test tr(Λ) ≈ 2^N alg_grad, _ = PastaQ.gradlogZ(Λ; sqrt_localnorms=sqrt_localnorms) @@ -421,7 +421,7 @@ end Λ = randomprocess(N; mixed=true, χ=χ, ξ=ξ) num_grad = numgradsnll(Λ, data) sqrt_localnorms = [] - normalize!(Λ; (sqrt_localnorms!)=sqrt_localnorms, localnorm=2) + PastaQ.normalize!(Λ; (sqrt_localnorms!)=sqrt_localnorms, localnorm=2) alg_grad, loss = PastaQ.gradnll(Λ, data; sqrt_localnorms=sqrt_localnorms) @test loss ≈ PastaQ.nll(Λ, data) @@ -444,7 +444,7 @@ end num_grad = numgradsTP(Λ; accuracy=1e-8) Γ_test = PastaQ.TP(Λ) sqrt_localnorms = [] - normalize!(Λ; (sqrt_localnorms!)=sqrt_localnorms, localnorm=2) + PastaQ.normalize!(Λ; (sqrt_localnorms!)=sqrt_localnorms, localnorm=2) Γ_test = PastaQ.TP(Λ) alg_grad_logZ, logZ = PastaQ.gradlogZ(Λ; sqrt_localnorms=sqrt_localnorms) @@ -477,7 +477,7 @@ end Λ = randomprocess(N; mixed=true, χ=χ, ξ=ξ) num_grad = numgradsnll(Λ, data) sqrt_localnorms = [] - normalize!(Λ; (sqrt_localnorms!)=sqrt_localnorms, localnorm=2) + PastaQ.normalize!(Λ; (sqrt_localnorms!)=sqrt_localnorms, localnorm=2) alg_grad, loss = PastaQ.gradnll(Λ, data; sqrt_localnorms=sqrt_localnorms) TP_distance = PastaQ.TP(Λ) @@ -490,7 +490,7 @@ end num_grads = num_gradZ + num_gradNLL + trace_preserving_regularizer * num_gradTP sqrt_localnorms = [] - normalize!(Λ; (sqrt_localnorms!)=sqrt_localnorms, localnorm=2) + PastaQ.normalize!(Λ; (sqrt_localnorms!)=sqrt_localnorms, localnorm=2) ex_loss = PastaQ.nll(Λ, data) + 2 * lognorm(Λ.X) alg_grads, loss = PastaQ.gradients( diff --git a/test/statetomography.jl b/test/statetomography.jl index 7919a21e..32f73a24 100644 --- a/test/statetomography.jl +++ b/test/statetomography.jl @@ -177,7 +177,7 @@ numgradsnll(M::MPS, args...; kwargs...) = numgradsnll(LPDO(M), args...; kwargs.. @test length(ψ) == N logZ = lognorm(ψ) localZ = [] - normalize!(ψ; (localnorms!)=localZ) + PastaQ.normalize!(ψ; (localnorms!)=localZ) @test logZ ≈ sum(log.(localZ)) @test norm(ψ) ≈ 1 end @@ -196,7 +196,7 @@ end # 2. Globally normalized ψ = randomstate(N; χ=χ) - normalize!(ψ) + PastaQ.normalize!(ψ) @test norm(ψ)^2 ≈ 1 alg_grad, _ = PastaQ.gradlogZ(ψ) num_grad = numgradslogZ(ψ) @@ -209,7 +209,7 @@ end num_grad = numgradslogZ(ψ) localnorms = [] - normalize!(ψ; (localnorms!)=localnorms) + PastaQ.normalize!(ψ; (localnorms!)=localnorms) @test norm(ψ) ≈ 1 alg_grad, _ = PastaQ.gradlogZ(ψ; localnorms=localnorms) for j in 1:N @@ -234,7 +234,7 @@ end # 2. Globally normalized ψ = randomstate(N; χ=χ) - normalize!(ψ) + PastaQ.normalize!(ψ) num_grad = numgradsnll(ψ, data) alg_grad, loss = PastaQ.gradnll(ψ, data) for j in 1:N @@ -245,7 +245,7 @@ end ψ = randomstate(N; χ=χ) num_grad = numgradsnll(ψ, data) localnorms = [] - normalize!(ψ; (localnorms!)=localnorms) + PastaQ.normalize!(ψ; (localnorms!)=localnorms) @test norm(ψ) ≈ 1 alg_grad_localnorm, loss = PastaQ.gradnll(ψ, data; localnorms=localnorms) for j in 1:N @@ -276,7 +276,7 @@ end # 2. Globally normalized ψ = randomstate(N; χ=χ) - normalize!(ψ) + PastaQ.normalize!(ψ) num_gradZ = numgradslogZ(ψ) num_gradNLL = numgradsnll(ψ, data) num_grads = num_gradZ + num_gradNLL @@ -297,7 +297,7 @@ end num_grads = num_gradZ + num_gradNLL localnorms = [] - normalize!(ψ; (localnorms!)=localnorms) + PastaQ.normalize!(ψ; (localnorms!)=localnorms) NLL = PastaQ.nll(ψ, data) ex_loss = NLL @test norm(ψ)^2 ≈ 1 @@ -319,7 +319,7 @@ end @test length(ρ) == N logZ = logtr(ρ) localZ = [] - normalize!(ρ; (sqrt_localnorms!)=localZ) + PastaQ.normalize!(ρ; (sqrt_localnorms!)=localZ) @test logZ ≈ 2.0 * sum(log.(localZ)) @test tr(ρ) ≈ 1 end @@ -345,7 +345,7 @@ end # 2. Globally normalized ρ = randomstate(N; mixed=true, χ=χ, ξ=ξ) - normalize!(ρ) + PastaQ.normalize!(ρ) @test tr(ρ) ≈ 1 alg_grad, _ = PastaQ.gradlogZ(ρ) num_grad = numgradslogZ(ρ) @@ -364,7 +364,7 @@ end num_grad = numgradslogZ(ρ) sqrt_localnorms = [] - normalize!(ρ; (sqrt_localnorms!)=sqrt_localnorms) + PastaQ.normalize!(ρ; (sqrt_localnorms!)=sqrt_localnorms) @test tr(ρ) ≈ 1 alg_grad, _ = PastaQ.gradlogZ(ρ; sqrt_localnorms=sqrt_localnorms) @@ -404,7 +404,7 @@ end ## 2. Globally normalized ρ = randomstate(N; mixed=true, χ=χ, ξ=ξ) - normalize!(ρ) + PastaQ.normalize!(ρ) num_grad = numgradsnll(ρ, data) alg_grad, loss = PastaQ.gradnll(ρ, data) @@ -421,7 +421,7 @@ end ρ = randomstate(N; mixed=true, χ=χ, ξ=ξ) num_grad = numgradsnll(ρ, data) sqrt_localnorms = [] - normalize!(ρ; (sqrt_localnorms!)=sqrt_localnorms) + PastaQ.normalize!(ρ; (sqrt_localnorms!)=sqrt_localnorms) @test tr(ρ) ≈ 1 alg_grad, loss = PastaQ.gradnll(ρ, data; sqrt_localnorms=sqrt_localnorms) alg_gradient = permutedims(ITensors.array(alg_grad[1]), [3, 1, 2])