licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 299 | n = 20
m = 400
# Define an input of 20 photons among 400 modes
i = Input{Bosonic}(first_modes(n,m))
# Define the interferometer
interf = RandHaar(m)
# Set the output measurement
o = FockSample()
# Create the event
ev = Event(i, o, interf)
# Simulate
sample!(ev)
# output:
# state = [0,1,0,...]
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 35 | using Pkg
Pkg.add("BosonSampling")
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 316 | using BosonSampling
using Plots; pyplot()
n = 20
trunc = 10
T = OneParameterInterpolation
for x in 0:0.01:1
i =
Input{T}(first_modes(n,n),x)
set1 = [0 for i in 1:n]
set1[1] = 1
part = Partition([Subset(set1)])
F = Fourier(n)
(idx,pdf) =
compute_probabilities_partition(F,part,i)
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 4550 | using Revise
using BosonSampling
using Plots
using ProgressMeter
using Distributions
using Random
using Test
using ArgCheck
using StatsBase
using ColorSchemes
using Interpolations
using Dierckx
using LinearAlgebra
using PrettyTables
using LaTeXStrings
using JLD
using AutoHashEquals
using LinearRegression
using DataStructures
color_map = ColorSchemes.rainbow
max_density = 1
min_density = 0.03
steps = 30
n_iter = 100
invert_densities = [max_density * (max_density/min_density)^((i-1)/(steps-1)) for i in 1:steps]
function power_law_with_n(n,k)
partition_sizes = k:k
m_array = Int.(floor.(n * invert_densities))
tvd_array = zeros((length(partition_sizes), length(m_array)))
var_array = copy(tvd_array)
for (k,n_subsets) in enumerate(partition_sizes)
#@show n_subsets
for i in 1:length(m_array)
this_tvd = tvd_equilibrated_partition_real_average(m_array[i], n_subsets, n, niter = n_iter)
tvd_array[k,i] = (n_subsets <= m_array[i] ? this_tvd[1] : missing)
var_array[k,i] = (n_subsets <= m_array[i] ? this_tvd[2] : missing)
end
end
x_data = reverse(1 ./ invert_densities)
y_data = reverse(tvd_array[1,:])
get_power_law_log_log(x_data,y_data)
end
for n in 5:2:13
@show n
power_law_with_n(n,2)
end
# n = 7
# power law: y = 0.44038585499823646 * x^0.982801094275387
# n = 9
# power law: y = 0.4232947463279576 * x^0.9788828718055166
# n = 11
# power law: y = 0.4123148441313412 * x^0.9564544056604489
# n = 13
# power law: y = 0.4052999461220922 * x^0.9403720479501786
# getting the constants
for k in 2:3
println("c($k) = $(power_law_with_n(5,k)[3])")
end
########### now with various x values
max_density = 1
min_density = 0.03
steps = 30
n_iter = 100
invert_densities = [max_density * (max_density/min_density)^((i-1)/(steps-1)) for i in 1:steps]
function tvd_equilibrated_partition_real_average(m, n_subsets, n, x1,x2; niter = 100)
tvd_array = zeros(niter)
for i in 1:niter
ib = Input{OneParameterInterpolation}(first_modes(n,m), x1)
id = Input{OneParameterInterpolation}(first_modes(n,m), x2)
interf = RandHaar(m)
part = equilibrated_partition(m,n_subsets)
o = PartitionCountsAll(part)
evb = Event(ib,o,interf)
evd = Event(id,o,interf)
pb = compute_probability!(evb)
pd = compute_probability!(evd)
pdf_dist = pd.proba
pdf_bos = pb.proba
tvd_array[i] = tvd(pdf_bos,pdf_dist)
end
mean(tvd_array), var(tvd_array)
end
function power_law_with_n(n,k,x1,x2)
partition_sizes = k:k
m_array = Int.(floor.(n * invert_densities))
tvd_array = zeros((length(partition_sizes), length(m_array)))
var_array = copy(tvd_array)
for (k,n_subsets) in enumerate(partition_sizes)
#@show n_subsets
for i in 1:length(m_array)
this_tvd = tvd_equilibrated_partition_real_average(m_array[i], n_subsets, n, x1,x2, niter = n_iter)
tvd_array[k,i] = (n_subsets <= m_array[i] ? this_tvd[1] : missing)
var_array[k,i] = (n_subsets <= m_array[i] ? this_tvd[2] : missing)
end
end
x_data = reverse(1 ./ invert_densities)
y_data = reverse(tvd_array[1,:])
pw = get_power_law_log_log(x_data,y_data)
#println("power law: y = $(exp((pw[3]))) * x^$(pw[2])")
(pw[3],pw[2])
end
x1 = 0.9
x2 = 1
n = 8
n_subsets = 2
x_array = collect(range(0,0.99, length = 5))
coeff_array = zeros(size(x_array))
pow_array = similar(coeff_array)
for (i,x1) in enumerate(x_array)
@show x1
coeff_array[i], pow_array[i] = power_law_with_n(n,n_subsets, x1,x2)
end
plt = plot()
scatter!(x_array, coeff_array, label = L"c(2,x)")
scatter!(x_array, pow_array, label = L"r")
xlabel!(L"x")
plot!(legend=:bottomleft)
ylims!((0,1))
savefig("docs/publication/partitions/images/publication/coefficient_power_law_x.png")
# power law: y = 0.4255501939319418 * x^0.9544422903731435
# x1 = 0.2
# power law: y = 0.412435170994033 * x^0.9586765280506229
# x1 = 0.3
# power law: y = 0.3847821931515173 * x^0.95136263045717
# x1 = 0.4
# power law: y = 0.3480553311932919 * x^0.9448014560980827
# x1 = 0.5
# power law: y = 0.30545075876388483 * x^0.9369041670040945
# x1 = 0.6
# power law: y = 0.25594645413311284 * x^0.93214011275294
# x1 = 0.7
# power law: y = 0.19842509628821695 * x^0.9228609093664767
# x1 = 0.8
# power law: y = 0.13609099850609438 * x^0.9142275360704729
# x1 = 0.9
# power law: y = 0.06974770770257517 * x^0.9052904889824115
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 3664 | using Revise
using BosonSampling
using Plots
using ProgressMeter
using Distributions
using Random
using Test
using ArgCheck
using StatsBase
using ColorSchemes
using Interpolations
using Dierckx
using LinearAlgebra
using PrettyTables
using LaTeXStrings
using JLD
using AutoHashEquals
using LinearRegression
using DataStructures
cd("docs/publication/partitions/images/efficiency/")
color_map = ColorSchemes.rainbow
function tvd_equilibrated_partition_real_average_x(x, m, n_subsets, n; niter = 100)
tvd_array = zeros(niter)
for i in 1:niter
ib = Input{Bosonic}(first_modes(n,m))
id = Input{OneParameterInterpolation}(first_modes(n,m), x)
interf = RandHaar(m)
part = equilibrated_partition(m,n_subsets)
o = PartitionCountsAll(part)
evb = Event(ib,o,interf)
evd = Event(id,o,interf)
pb = compute_probability!(evb)
pd = compute_probability!(evd)
pdf_dist = pd.proba
pdf_bos = pb.proba
tvd_array[i] = tvd(pdf_bos,pdf_dist)
end
mean(tvd_array), var(tvd_array)
end
n_subsets = 2
x_array = [0.9,0.95,0.99]
n_max = 16
########### const density #############
for x in x_array
@show x
n_array = collect(5:n_max)
m_array = n_array
tvd_array = []
tvd_var_array = []
for (n,m) in zip(n_array, m_array)
t, v = tvd_equilibrated_partition_real_average_x(x, m, n_subsets, n)
push!(tvd_array, t)
push!(tvd_var_array, v)
end
scatter(n_array, tvd_array, yerr = sqrt.(tvd_var_array), yaxis = :log10)
xlabel!("n")
ylabel!("tvd B, x = $x")
title!("n = m")
savefig("tvd_equilibrated_partition_real_average_x(x, m, n_subsets, n) const density const x = $x.png")
end
########### no collision density #############
for x in x_array
@show x
n_array = collect(5:n_max)
m_array = n_array .^2
tvd_array = []
tvd_var_array = []
for (n,m) in zip(n_array, m_array)
t, v = tvd_equilibrated_partition_real_average_x(x, m, n_subsets, n)
push!(tvd_array, t)
push!(tvd_var_array, v)
end
scatter(n_array, tvd_array, yerr = sqrt.(tvd_var_array), yaxis =:log10)
xlabel!("n")
ylabel!("tvd B, x = $x")
title!("n = m^2")
savefig("tvd_equilibrated_partition_real_average_x(x, m, n_subsets, n) no collision const x = $x.png")
end
########### const density log log #############
for x in x_array
@show x
n_array = collect(5:n_max)
m_array = n_array
tvd_array = []
tvd_var_array = []
for (n,m) in zip(n_array, m_array)
t, v = tvd_equilibrated_partition_real_average_x(x, m, n_subsets, n)
push!(tvd_array, t)
push!(tvd_var_array, v)
end
scatter(n_array, tvd_array, yerr = sqrt.(tvd_var_array), yaxis = :log10, xaxis =:log10)
xlabel!("n")
ylabel!("tvd B, x = $x")
title!("n = m")
savefig("tvd_equilibrated_partition_real_average_x(x, m, n_subsets, n) const density log log const x = $x.png")
end
########### no collision density #############
for x in x_array
@show x
n_array = collect(5:n_max)
m_array = n_array .^2
tvd_array = []
tvd_var_array = []
for (n,m) in zip(n_array, m_array)
t, v = tvd_equilibrated_partition_real_average_x(x, m, n_subsets, n)
push!(tvd_array, t)
push!(tvd_var_array, v)
end
scatter(n_array, tvd_array, yerr = sqrt.(tvd_var_array), xaxis =:log10, yaxis =:log10)
xlabel!("n")
ylabel!("tvd B, x = $x")
title!("n = m^2")
savefig("tvd_equilibrated_partition_real_average_x(x, m, n_subsets, n) no collision log log const x = $x.png")
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 3415 | using Revise
using BosonSampling
using Plots
using ProgressMeter
using Distributions
using Random
using Test
using ArgCheck
using StatsBase
using ColorSchemes
using Interpolations
using Dierckx
using LinearAlgebra
using PrettyTables
using LaTeXStrings
using JLD
using AutoHashEquals
using LinearRegression
n = 10
m = 20
###### checking if the 1 distinguishable formula holds ######
# set the interf before the rest
interf = RandHaar(m)
# S matrix with one dist photon
"""
gram_one_dist_photon(dist_photon, n)
Gram matrix with all indistinguishable photons except the one at position `dist_photon`.
"""
function gram_one_dist_photon(dist_photon, n)
mat = ones(ComplexF64,(n,n))
mat[dist_photon, :] .= 0
mat[:, dist_photon] .= 0
mat[dist_photon, dist_photon] = 1
mat
end
ib = Input{Bosonic}(first_modes(n,m))
i(dist_photon) = Input{UserDefinedGramMatrix}(first_modes(n,m), gram_one_dist_photon(dist_photon, n))
s1 = Subset(first_modes(Int(m/2), m))
part = Partition(s1)
o = PartitionCount(PartitionOccupancy(ModeOccupation([Int(n/2)]), n, part))
"""
Exact probability with `x`.
"""
function actual_probability(x)
ix = Input{OneParameterInterpolation}(first_modes(n,m),x)
ev = Event(ix,o,interf)
compute_probability!(ev)
ev.proba_params.probability
end
"""
Approximate probability proposal.
"""
function approximate_probability(x)
ϵ = 1 - x
ib = Input{Bosonic}(first_modes(n,m))
i(dist_photon) = Input{UserDefinedGramMatrix}(first_modes(n,m), gram_one_dist_photon(dist_photon, n))
evb = Event(ib,o,interf)
compute_probability!(evb)
pb = evb.proba_params.probability
p_dist_array = zeros(n)
for dist_photon in 1:n
evd = Event(i(dist_photon),o,interf)
compute_probability!(evd)
p_dist_array[dist_photon] = evd.proba_params.probability
end
p_dist = mean(p_dist_array)
pb * (1 - n*ϵ) + n*ϵ * p_dist
end
x = 0.9
actual_probability(x)
approximate_probability(x)
x_array = collect(range(0.5, 1, length = 30))
plot(x_array, actual_probability.(x_array), label = "p(x)")
plot!(x_array, approximate_probability.(x_array), label = "papprox(x)")
title!("validity of Eq. 157, n = $n, m = $m")
xlabel!("x")
savefig("src/partitions/images/validity_approximate_partition_formula_n = $n, m = $m.png")
###### lower bound ######
function get_avg_proba(n,m,x, niter = 1000)
results = zeros(niter)
for j in 1:niter
i = Input{OneParameterInterpolation}(first_modes(n,m),x)
s1 = Subset(first_modes(Int(m/2), m))
interf = RandHaar(m)
part = Partition(s1)
o = PartitionCount(PartitionOccupancy(ModeOccupation([Int(n/2)]), n, part))
ev = Event(i,o,interf)
compute_probability!(ev)
results[j] = ev.proba_params.probability
end
mean(results)
end
x_array = collect(range(0.9,1, length = 100))
proba_array = get_avg_proba.(n,m,x_array)
plot(x_array, proba_array)
n = 10
m_array = collect(n:4:10n)
# x = 0.8
# proba_array = get_avg_proba.(n,m_array,x)
# plot(m_array, proba_array, label = "x = $x")
# x = 1
# proba_array = get_avg_proba.(n,m_array,x)
# plot!(m_array, proba_array, label = "x = $x")
x = 0.9
tvd_lower_bound_array = abs.(get_avg_proba.(n,m_array,x) - get_avg_proba.(n,m_array,1))
plot(m_array, tvd_lower_bound_array)
n_array = collect(2:2:16)
plot(n_array, get_avg_proba.(n_array,n_array,1))
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 671 | using Revise
using BosonSampling
using Plots
using ProgressMeter
using Distributions
using Random
using Test
using ArgCheck
using StatsBase
using ColorSchemes
using Interpolations
using Dierckx
using LinearAlgebra
using PrettyTables
using LaTeXStrings
using JLD
using AutoHashEquals
using LinearRegression
function foo(n, r_s, r_z)
U = copy(rand_haar(n))
S = rand_gram_matrix_rank(n,r_s)
@argcheck rank(S) == r_s
phases = zeros(Complex, n)
phases[1:r_z] = exp.(1im * rand(r_z)) .- 1
diagm(phases)
pr = U' * diagm(phases) * U
pr = convert(Matrix{ComplexF64}, pr)
@show rank(pr .* S)
end
for i in 1:1
foo(10,2,2)
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 2050 | using BosonSampling
using Plots
###### checking if linear around bosonic ######
# in Totally Destructive Many-Particle Interference
# they claim that around the bosonic case, when p_bos = 0
# the perturbation is linear and proportional to p_d
# we believe this is false
n = 8
m = n
interf = Fourier(m)
ib = Input{OneParameterInterpolation}(first_modes(n,m),1)
id = Input{OneParameterInterpolation}(first_modes(n,m),0)
# write a suppressed event
state = zeros(Int, n)
state[1] = n - 1
state[2] = 1
state = ModeOccupation(state)
o = FockDetection(state)
evb = Event(ib, o, interf)
evd = Event(id, o, interf)
events = [evb, evd]
for event in events
compute_probability!(event)
end
p_b = evb.proba_params.probability
p_d = evd.proba_params.probability
function p(x)
i = Input{OneParameterInterpolation}(first_modes(n,m),x)
ev = Event(i, o, interf)
compute_probability!(ev)
ev.proba_params.probability
end
p_claim(x) = n*(1-x)* p_d
x_array = [x for x in range(0.9,1,length = 100)]
p_x_array = p.(x_array)
p_claim_array = p_claim.(x_array)
plt = plot(x_array, p_x_array, label = "p_x")
plot!(x_array, p_claim_array, label = "p_claim")
title!("Fourier, n = $(n)")
savefig(plt, "src/certification/images/check_dittel_approximation_fourier_n=$(n).png")
###### checking if the prediction about partition expansion is right ######
n = 8
m = n
n_subsets = 2
count_index = Int(n/2) + 1 ### event we look at
interf = RandHaar(m)
ib = Input{OneParameterInterpolation}(first_modes(n,m),1)
i(x) = Input{OneParameterInterpolation}(first_modes(n,m),x)
part = equilibrated_partition(m, n_subsets)
o = PartitionCountsAll(part)
function proba_partition(x, count_index)
this_ev = Event(i(x), o, interf)
compute_probability!(this_ev)
this_ev.proba_params.probability.proba[count_index]
end
this_ev = Event(i(x), o, interf)
compute_probability!(this_ev)
this_ev.proba_params.probability.proba[count_index]
x_array = collect(range(0.8, 1, length = 100))
plot(x_array, proba_partition.(x_array, count_index))
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 363 | using Revise
using BosonSampling
using Plots
using ProgressMeter
using Distributions
using Random
using Test
using ArgCheck
using StatsBase
using ColorSchemes
using Interpolations
using Dierckx
using LinearAlgebra
using PrettyTables
using LaTeXStrings
using JLD
using AutoHashEquals
using LinearRegression
using DataStructures
color_map = ColorSchemes.rainbow
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 41269 | using Revise
using BosonSampling
using Plots
using ProgressMeter
using Distributions
using Random
using Test
using ArgCheck
using StatsBase
using ColorSchemes
using Interpolations
using Dierckx
using LinearAlgebra
using PrettyTables
using LaTeXStrings
using JLD
using AutoHashEquals
using LinearRegression
using DataStructures
color_map = ColorSchemes.rainbow
cd("docs/publication/partitions/")
### preludes ###
function tvd_equilibrated_partition_real_average(m, n_subsets, n; niter = 100)
tvd_array = zeros(niter)
for i in 1:niter
ib = Input{Bosonic}(first_modes(n,m))
id = Input{Distinguishable}(first_modes(n,m))
interf = RandHaar(m)
part = equilibrated_partition(m,n_subsets)
o = PartitionCountsAll(part)
evb = Event(ib,o,interf)
evd = Event(id,o,interf)
pb = compute_probability!(evb)
pd = compute_probability!(evd)
pdf_dist = pd.proba
pdf_bos = pb.proba
tvd_array[i] = tvd(pdf_bos,pdf_dist)
end
mean(tvd_array), var(tvd_array)
end
function tvd_equilibrated_partition_real_average_x(x, m, n_subsets, n; niter = 100)
tvd_array = zeros(niter)
for i in 1:niter
ib = Input{Bosonic}(first_modes(n,m))
id = Input{OneParameterInterpolation}(first_modes(n,m), x)
interf = RandHaar(m)
part = equilibrated_partition(m,n_subsets)
o = PartitionCountsAll(part)
evb = Event(ib,o,interf)
evd = Event(id,o,interf)
pb = compute_probability!(evb)
pd = compute_probability!(evd)
pdf_dist = pd.proba
pdf_bos = pb.proba
tvd_array[i] = tvd(pdf_bos,pdf_dist)
end
mean(tvd_array), var(tvd_array)
end
### bosonic to distinguishable single subset ###
n = 14
m = n
n_iter = 1000
function labels(x)
if x == 1
"Bosonic"
elseif x == 0
"Distinguishable"
else
"x = $x"
end
end
function add_this_x!(x)
results = []
i = Input{OneParameterInterpolation}(first_modes(n,m),x)
subset = Subset(first_modes(Int(n/2), m))
part = Partition(subset)
o = PartitionCountsAll(part)
for iter in 1:n_iter
interf = RandHaar(m)
ev = Event(i,o,interf)
compute_probability!(ev)
push!(results, ev.proba_params.probability.proba)
end
x_data = collect(0:n)
y_data = [mean([results[iter][i] for iter in 1:n_iter]) for i in 1:n+1]
y_err_data = [sqrt(var([results[iter][i] for iter in 1:n_iter])) for i in 1:n+1]
x_spl = range(0,n, length = 1000)
spl = Spline1D(x_data,y_data)
y_spl = spl(x_spl)
# plot the error only on the extreme cases
y_err(x) = ((x == 0 || x == 1) ? y_err_data : nothing)
if x == 0 || x == 1
scatter!(x_data, y_data, yerr = y_err(x), c = get(color_map, x), label = "", m = :cross)
end
plot!(x_spl , y_spl, c = get(color_map, x), label = labels(x), xticks = 0:n, grid = true)
end
plt = plot()
for x in [0, 0.6, 0.8, 1]#range(0,1, length = 3)
add_this_x!(x)
end
display(plt)
xlabel!(L"k")
ylabel!(L"p(k)")
savefig(plt,"./images/publication/bosonic_to_distinguishable.png")
### evolution of the TVD numbers of subsets ###
begin
#
# partition_sizes = 2:3
#
# n_array = collect(2:14)
# m_array_const_density = 1 .* n_array
# m_array_no_coll = n_array .^2
#
# plt = plot()
#
#
# @showprogress for n_subsets in partition_sizes
#
# m_array = m_array_const_density
#
# const_density = [n_subsets <= m_array[i] ? tvd_equilibrated_partition_real_average(m_array[i], n_subsets, n_array[i]) : missing for i in 1:length(n_array)]
# scatter!(n_array, const_density, label = "m = n, K = $n_subsets")
#
# m_array = m_array_no_coll
# no_collision = [n_subsets <= m_array[i] ? tvd_equilibrated_partition_real_average(m_array[i],n_subsets, n_array[i]) : missing for i in 1:length(n_array)]
# scatter!(n_array, no_collision, label = "m = n^2, K = $n_subsets", markershape=:+)
#
# end
#
# ylabel!("TVD bos-dist")
# xlabel!("n")
# ylims!(-0.05,0.8)
#
# title!("equilibrated partition TVD")
#
# savefig(plt, "images/publication/equilibrated partition TVD_legend")
#
# plot!(legend = false)
#
# savefig(plt, "images/publication/equilibrated partition TVD")
#
# plt
end
###### TVD with boson density ######
max_density = 1
min_density = 0.03
steps = 10
n_iter = 100
invert_densities = [max_density * (max_density/min_density)^((i-1)/(steps-1)) for i in 1:steps]
n = 10
m_array = Int.(floor.(n * invert_densities))
partition_sizes = 2:4
tvd_array = zeros((length(partition_sizes), length(m_array)))
var_array = copy(tvd_array)
for (k,n_subsets) in enumerate(partition_sizes)
@show n_subsets
@showprogress for i in 1:length(m_array)
this_tvd = tvd_equilibrated_partition_real_average(m_array[i], n_subsets, n, niter = n_iter)
tvd_array[k,i] = (n_subsets <= m_array[i] ? this_tvd[1] : missing)
var_array[k,i] = (n_subsets <= m_array[i] ? this_tvd[2] : missing)
end
end
save("data/boson_density.jld", "tvd_array", tvd_array, "var_array" ,var_array)
pwd()
a = load("data/save/boson_density.jld")
tvd_array = a["tvd_array"]
partition_color(k, partition_sizes) = get(color_map, k / length(partition_sizes))
plt = plot()
for (k,K) in enumerate(partition_sizes)
x_data = reverse(1 ./ invert_densities)
y_data = reverse(tvd_array[k,:])
x_spl = range(minimum(x_data),maximum(x_data), length = 1000)
spl = Spline1D(x_data,y_data)
y_spl = spl(x_spl)
#
# scatter!(x_data , y_data, yerr = sqrt.(var_array[k,:]), c = lost_photon_color(k,lost_photons), label = "", m = :cross, xaxis=:log10)
#
# scatter!(x_data , y_data, yerr = sqrt.(var_array[k,:]), c = partition_color(k,partition_sizes), label = "", m = :cross, xaxis=:log10, yaxis = :log10)
scatter!(x_data , y_data, c = partition_color(k,partition_sizes), label = "", m = :cross, xaxis=:log10, yaxis=:log10)
# scatter!(x_data , y_data, yerr = sqrt.(var_array[k,:]), c = partition_color(k,partition_sizes), label = "", m = :cross, xaxis=:log10)
ylims!((0.01,1))
legend_string = "K = $K"
plot!(x_spl, y_spl, c = partition_color(k,partition_sizes), label = LaTeXString(legend_string), xaxis=:log10, xminorticks = 10, xminorgrid = true, yminorticks = 10, yminorgrid = true)
#
# plot!(x_spl, y_spl, c = partition_color(k,partition_sizes), label = "K = $K", xaxis=:log10, yaxis =:log10)
# plot!(η_array,tvd_η_array[k,:], label = "up to $lost lost", c = lost_photon_color(k,lost_photons))
end
plt = plot!(legend=:bottomright)
xlabel!(L"ρ")
ylabel!(L"TVD(B,D)")
display(plt)
savefig(plt, "images/publication/density.png")
x_data = reverse(1 ./ invert_densities)
y_data = reverse(tvd_array[1,:])
get_power_law_log_log(x_data,y_data)
###### TVD with x-model ######
n = 10
m = 10
partition_sizes = 2:4
x_array = collect(range(0,1,length = 10))
tvd_x_array = zeros((length(partition_sizes), length(x_array)))
niter = 10
for (k,n_subsets) in enumerate(partition_sizes)
for (j,x) in enumerate(x_array)
tvd_array = zeros(niter)
for i in 1:niter
ib = Input{Bosonic}(first_modes(n,m))
ix = Input{OneParameterInterpolation}(first_modes(n,m),x)
interf = RandHaar(m)
part = equilibrated_partition(m,n_subsets)
o = PartitionCountsAll(part)
evb = Event(ib,o,interf)
evx = Event(ix,o,interf)
pb = compute_probability!(evb)
px = compute_probability!(evx)
pdf_x = px.proba
pdf_bos = pb.proba
tvd_array[i] = tvd(pdf_bos,pdf_x)
end
tvd_x_array[k,j] = mean(tvd_array)
end
end
save("data/tvd_with_x.jld", "tvd_x_array", tvd_x_array, "x_array" ,x_array)
partition_color(k, partition_sizes) = get(color_map, (k-1) / (length(partition_sizes)-1))
plt = plot()
for (k,n_subsets) in enumerate(partition_sizes)
x_data = x_array
y_data = tvd_x_array[k,:]
x_spl = range(minimum(x_data),maximum(x_data), length = 1000)
spl = Spline1D(x_data,y_data)
y_spl = spl(x_spl)
plot!(x_spl,y_spl, label = "K = $n_subsets", c = partition_color(k, partition_sizes))
end
xlabel!("x")
ylabel!("TVD(x,B)")
plt
savefig(plt,"./images/publication/x_model.png")
###### TVD with loss ######
n = 10
m = n
partition_sizes = 2:4
η_array = collect(range(0.8,1,length = 10))
tvd_η_array = zeros((length(partition_sizes), length(η_array)))
var_η_array = copy(tvd_η_array)
niter = 10
for (k,n_subsets) in enumerate(partition_sizes)
@show n_subsets
@showprogress for (j,η) in enumerate(η_array)
tvd_array = zeros(niter)
ib = Input{Bosonic}(first_modes(n,2m))
id = Input{Distinguishable}(first_modes(n,2m))
part = to_lossy(equilibrated_partition(m,n_subsets))
o = PartitionCountsAll(part)
for i in 1:niter
interf = UniformLossInterferometer(η,m)
evb = Event(ib,o,interf)
evd = Event(id,o,interf)
pb = compute_probability!(evb)
pd = compute_probability!(evd)
pdf_dist = pd.proba
pdf_bos = pb.proba
tvd_array[i] = tvd(pdf_bos,pdf_dist)
end
tvd_η_array[k,j] = mean(tvd_array)
var_η_array[k,j] = var(tvd_array)
end
end
# plt = plot()
# for (k,n_subsets) in enumerate(partition_sizes)
#
# plot!(η_array,tvd_η_array[k,:], label = "K = $k")
#
# end
#
# xlabel!("η")
# ylabel!("TVD(B,D)")
#
# plt
save("data/tvd_with_loss.jld", "η_array", η_array, "tvd_η_array" ,tvd_η_array, "var_η_array", var_η_array)
partition_color(k, partition_sizes) = get(color_map, k / length(partition_sizes))
plt = plot()
for (k,lost) in enumerate(partition_sizes)
x_data = η_array
y_data = tvd_η_array[k,:]
x_spl = range(minimum(x_data),maximum(x_data), length = 1000)
spl = Spline1D(x_data,y_data)
y_spl = spl(x_spl)
scatter!(x_data , y_data, yerr = sqrt.(var_η_array[k,:]), c = lost_photon_color(k,lost_photons), label = "", m = :cross)
scatter!(x_data , y_data, c = lost_photon_color(k,lost_photons), label = "", m = :cross)
plot!(x_spl, y_spl, c = lost_photon_color(k,lost_photons), label = "K = $k")
# plot!(η_array,tvd_η_array[k,:], label = "up to $lost lost", c = lost_photon_color(k,lost_photons))
end
plt = plot!(legend=:bottomright)
plot!(legend = false)
xlabel!("η")
ylabel!("TVD(B,D)")
display(plt)
savefig(plt, "images/publication/partition_with_loss.png")
###### TVD with how many photons were lost ######
n = 16
m = n
lost_photons = collect(0:n)
n_subsets = 2
η_array = collect(range(0.8,1,length = 10))
tvd_η_array = zeros((length(lost_photons), length(η_array)))
var_η_array = copy(tvd_η_array)
niter = 10
@showprogress for (j,η) in enumerate(η_array)
tvd_array = zeros((length(lost_photons),niter))
ib = Input{Bosonic}(first_modes(n,2m))
id = Input{Distinguishable}(first_modes(n,2m))
part = to_lossy(equilibrated_partition(m,n_subsets))
o = PartitionCountsAll(part)
@showprogress for i in 1:niter
interf = UniformLossInterferometer(η,m)
evb = Event(ib,o,interf)
evd = Event(id,o,interf)
pb = compute_probability!(evb)
pd = compute_probability!(evd)
pb_sorted = sort_by_lost_photons(pb)
pd_sorted = sort_by_lost_photons(pd)
for (k,lost) in enumerate(lost_photons)
tvd_array[k,i] = tvd_less_than_k_lost_photons(k, pb_sorted, pd_sorted)
end
end
for (k,lost) in enumerate(lost_photons)
tvd_η_array[k,j] = mean(tvd_array[k,:])
var_η_array[k,j] = var(tvd_array[k,:])
end
end
save("data/tvd_with_lost_photons.jld", "η_array", η_array, "tvd_η_array" ,tvd_η_array, "var_η_array", var_η_array)
# setting the number of lost photons to plot
lost_photons = collect(0:10)
begin
function lost_photon_color(k, lost_photons)
lost = k-1
x = lost / length(lost_photons)
get(color_map, x)
end
minimum(η_array)
plt = plot()
for (k,lost) in Iterators.reverse(enumerate(lost_photons))
x_data = η_array
y_data = tvd_η_array[k,:]
x_spl = range(minimum(x_data),maximum(x_data), length = 1000)
spl = Spline1D(x_data,y_data)
y_spl = spl(x_spl)
#scatter!(x_data , y_data, yerr = sqrt.(var_η_array[k,:]), c = lost_photon_color(k,lost_photons), label = "", m = :cross)
scatter!(x_data , y_data, c = lost_photon_color(k,lost_photons), label = "", m = :cross)
plot!(x_spl, y_spl, c = lost_photon_color(k,lost_photons), label = "l <= $lost")
# plot!(η_array,tvd_η_array[k,:], label = "up to $lost lost", c = lost_photon_color(k,lost_photons))
end
plt = plot!(legend=:bottomright)
plot!(legend = false)
xlabel!("η")
ylabel!("TVD(B,D)")
display(plt)
savefig(plt, "images/publication/lost_photons.png")
end
plt
###### relative independance of the choice of partition size ######
###### bayesian certification examples ######
n = 10
m = 30
n_trials = 500
n_samples = 500
n_subsets = 2
sample_array = zeros((n_trials, n_samples+1))
@showprogress for i in 1:n_trials
interf = RandHaar(m)
ib = Input{Bosonic}(first_modes(n,m))
id = Input{Distinguishable}(first_modes(n,m))
part = equilibrated_partition(m,n_subsets)
o = PartitionCountsAll(part)
evb = Event(ib,o,interf)
evd = Event(id,o,interf)
for ev_theory in [evb,evd]
ev_theory.proba_params.probability == nothing ? compute_probability!(ev_theory) : nothing
end
pb = evb.proba_params.probability
ib = evb.input_state
interf = evb.interferometer
p_partition_B(ev) = p_partition(ev, evb)
p_partition_D(ev) = p_partition(ev, evd)
p_q = HypothesisFunction(p_partition_B)
p_a = HypothesisFunction(p_partition_D)
events = []
for j in 1:n_samples
ev = Event(ib,PartitionCount(wsample(pb.counts, pb.proba)), interf)
push!(events, ev)
end
certif = Bayesian(events, p_q, p_a)
compute_probability!(certif)
sample_array[i, :] = certif.probabilities
end
sample_array = sample_array[:,1:end]
plt = plot()
for i in 1:size(sample_array,1)
scatter!(sample_array[i,:], c = :black, marker=1)
end
plot!(legend = false)
xlabel!("n_samples")
ylabel!("confidence")
x_ = collect(range(0,n_samples+1, length=div(n_samples,5)))
y_ = collect(range(0,1.1, length=1000))
D = Dict()
for i in 1:length(x_)-1
for j in 1:length(y_)-1
D["[$(x_[i]),$(x_[i+1]),$(y_[j]),$(y_[j+1])]"] = 0
end
end
D = sort(D)
@showprogress for x in 1:n_trials
for y in 1:n_samples
val = sample_array[x,y]
for x1 in 1:length(x_)-1
for y1 in 1:length(y_)-1
if val >= y_[y1] && val <= y_[y1+1]
if y >= x_[x1] && y <= x_[x1+1]
D["[$(x_[x1]),$(x_[x1+1]),$(y_[y1]),$(y_[y1+1])]"] += 1
end
end
end
end
end
end
D = sort(D)
M = zeros(Float32, length(x_)-1, length(y_)-1)
for x1 in 1:length(x_)-1
for y1 in 1:length(y_)-1
M[x1,y1] = log(D["[$(x_[x1]),$(x_[x1+1]),$(y_[y1]),$(y_[y1+1])]"]/sum(sample_array[:]))
end
end
using PlotThemes
theme(:dracula)
x_ = x_[1:end-1]
y_ = y_[1:end-1]
heatmap(x_,y_,M',dpi=800)
savefig("/Users/antoinerestivo/BosonSampling.jl/docs/publication/partitions/density_3_normalized.png")
x_pixels = Int(n_samples/10) + 1
x_grid = collect(range(0,n_samples, length = x_pixels))
y_pixels = 50 + 1
y_grid = collect(range(0,1, length = y_pixels))
pixels = zeros(Int, (x_pixels, y_pixels))
for xi in 1:(length(x_grid)-1)
for yi in 1:(length(y_grid)-1)
for x in 1:size(sample_array, 1)
for y in sample_array[x,:]
if x >= x_grid[xi] && x < x_grid[xi+1]
if y >= y_grid[yi] && y < y_grid[yi+1]
pixels[xi,yi] += 1
end
end
end
end
end
end
heatmap(1:size(pixels,1), 1:size(pixels,2), pixels, c=cgrad([:blue, :white,:red, :yellow]), xlabel="x values", ylabel="y values",title="My title")
pixels
sample_array[:,10]
#scatter(sample_array[1,:])
###### number of samples needed from bayesian ######
n = 10
partition_sizes = 2:3
max_density = 1
min_density = 0.07
steps = 20
n_trials = 1000
maxiter = 100000
invert_densities = [max_density * (max_density/min_density)^((i-1)/(steps-1)) for i in 1:steps]
m_array = Int.(floor.(n * invert_densities))
n_samples_array = zeros((length(partition_sizes), length(m_array)))
n_samples_array_var_array = copy(n_samples_array)
for (k,n_subsets) in enumerate(partition_sizes)
@show n_subsets
@showprogress for (i,m) in enumerate(m_array)
trials = []
for i in 1:n_trials
interf = RandHaar(m)
ib = Input{Bosonic}(first_modes(n,m))
id = Input{Distinguishable}(first_modes(n,m))
part = equilibrated_partition(m,n_subsets)
o = PartitionCountsAll(part)
evb = Event(ib,o,interf)
evd = Event(id,o,interf)
push!(trials , number_of_samples(evb,evd, maxiter = maxiter))
end
trials = remove_nothing(trials)
n_samples_array[k,i] = (n_subsets <= m_array[i] ? mean(trials) : missing)
n_samples_array_var_array[k,i] = (n_subsets <= m_array[i] ? var(trials) : missing)
end
end
save("data/number_samples.jld", "n_samples_array", n_samples_array, "n_samples_array_var_array" , n_samples_array_var_array)
l = load("data/save/number_samples.jld")
n_samples_array = l["n_samples_array"]
partition_color(k, partition_sizes) = get(color_map, k / length(partition_sizes))
plt = plot()
for (k,K) in enumerate(partition_sizes)
x_data = reverse(1 ./ invert_densities)
y_data = reverse(n_samples_array[k,:])
x_spl = range(minimum(x_data),maximum(x_data), length = 1000)
spl = Spline1D(x_data,y_data)
y_spl = spl(x_spl)
#
# scatter!(x_data , y_data, yerr = sqrt.(var_array[k,:]), c = lost_photon_color(k,lost_photons), label = "", m = :cross, xaxis=:log10)
#
# scatter!(x_data , y_data, yerr = sqrt.(var_array[k,:]), c = partition_color(k,partition_sizes), label = "", m = :cross, xaxis=:log10, yaxis = :log10)
scatter!(x_data , y_data, c = partition_color(k,partition_sizes), label = "", m = :cross, xaxis=:log10,yaxis =:log10)
# scatter!(x_data , y_data, yerr = sqrt.(var_array[k,:]), c = partition_color(k,partition_sizes), label = "", m = :cross, xaxis=:log10)
plot!(x_data, y_data, c = partition_color(k,partition_sizes), label = "K = $K", xaxis=:log10, yaxis =:log10, xminorticks = 10, xminorgrid = true, yminorticks = 10; yminorgrid = true)
#
# plot!(x_spl, y_spl, c = partition_color(k,partition_sizes), label = "K = $K", xaxis=:log10, yaxis =:log10)
# plot!(η_array,tvd_η_array[k,:], label = "up to $lost lost", c = lost_photon_color(k,lost_photons))
end
plt = plot!(legend=:topright)
#ylims!(0, maxiter)
xlabel!("ρ")
ylabel!("samples")
display(plt)
savefig(plt, "images/publication/number_samples.png")
k = 1
x_data = reverse(1 ./ invert_densities)
y_data = reverse(n_samples_array[k,:])
get_power_law_log_log(x_data, y_data)
###### number of samples needed x-model ######
# how many trials to reject the hypothesis that the input is
# Bosonic while it is actually the x-model
n = 10
m = n
partition_sizes = 2:3
steps = 10
n_trials = 1000
maxiter = 100000
p_null = 0.05
x_array = collect(range(0.8,0.99,length = steps))
n_samples_array = zeros((length(partition_sizes), length(x_array)))
n_samples_array_var_array = copy(n_samples_array)
tr = []
for (k,n_subsets) in enumerate(partition_sizes)
@show n_subsets
@showprogress for (j,x) in enumerate(x_array)
trials = Vector{Float64}()
for i in 1:n_trials
interf = RandHaar(m)
ib = Input{OneParameterInterpolation}(first_modes(n,m),x)
id = Input{Bosonic}(first_modes(n,m))
part = equilibrated_partition(m,n_subsets)
o = PartitionCountsAll(part)
evb = Event(ib,o,interf)
evd = Event(id,o,interf)
#@show number_of_samples(evb,evd, maxiter = maxiter)
for ev_theory in [evb,evd]
ev_theory.proba_params.probability == nothing ? compute_probability!(ev_theory) : nothing
end
pb = evb.proba_params.probability
ib = evb.input_state
pd = evd.proba_params.probability
id = evd.input_state
interf = evb.interferometer
p_partition_B(ev) = p_partition(ev, evb)
p_partition_D(ev) = p_partition(ev, evd)
p_a = HypothesisFunction(p_partition_B)
p_q = HypothesisFunction(p_partition_D)
χ = 1
for n_samples in 1:maxiter
ev = Event(ib,PartitionCount(wsample(pb.counts, pb.proba)), interf)
χ = update_confidence(ev, p_q.f, p_a.f, χ)
if confidence(χ) <= p_null
#@show n_samples
push!(trials , n_samples)
break
end
end
end
#@show trials
clean_trials = Vector{Float64}()
for trial in trials
if trial != Inf && trial > 0
push!(clean_trials,trial)
end
end
if !isempty(clean_trials)
n_samples_array[k,j] = mean(clean_trials)
else
@warn "trials empty"
end
#(n_subsets <= m_array[i] ? mean(trials) : missing)
#n_samples_array_var_array[k,i] = (n_subsets <= m_array[i] ? var(trials) : missing)
end
end
save("data/number_samples_x.jld", "n_samples_array", n_samples_array, "n_samples_array_var_array" , n_samples_array_var_array)
partition_color(k, partition_sizes) = get(color_map, k / length(partition_sizes))
plt = plot()
for (k,K) in enumerate(partition_sizes)
x_data = log10.(x_array)
y_data = log10.(n_samples_array[k,:])
x_spl = range(minimum(x_data),maximum(x_data), length = 1000)
spl = Spline1D(x_data,y_data)
y_spl = spl(x_spl)
#
# scatter!(x_data , y_data, yerr = sqrt.(var_array[k,:]), c = lost_photon_color(k,lost_photons), label = "", m = :cross, xaxis=:log10)
#
# scatter!(x_data , y_data, yerr = sqrt.(var_array[k,:]), c = partition_color(k,partition_sizes), label = "", m = :cross, xaxis=:log10, yaxis = :log10)
scatter!(10 .^ x_data , 10 .^ y_data, c = partition_color(k,partition_sizes), label = "", m = :cross)
# scatter!(x_data , y_data, yerr = sqrt.(var_array[k,:]), c = partition_color(k,partition_sizes), label = "", m = :cross, xaxis=:log10)
plot!(10 .^ x_spl, 10 .^ y_spl, c = partition_color(k,partition_sizes), label = "K = $K", yaxis = :log10, yminorticks = 10, yminorgrid = true)
#
# plot!(x_spl, y_spl, c = partition_color(k,partition_sizes), label = "K = $K", xaxis=:log10, yaxis =:log10)
# plot!(η_array,tvd_η_array[k,:], label = "up to $lost lost", c = lost_photon_color(k,lost_photons))
end
xlims!(0.79,1)
ylims!(100,10^5)
plt = plot!(legend=:topleft)
#ylims!(0, maxiter)
xlabel!(L"x")
ylabel!(L"n_s")
display(plt)
savefig(plt, "images/publication/number_samples_x.png")
####### Fourier partition ######
n = 2*10
m = n
labels(x) = x == 1 ? "Bosonic" : "Distinguishable"
alp(x) = x == 1 ? 1 : 1
function add_this_x(x)
i = Input{OneParameterInterpolation}(first_modes(n,m),x)
subset = Subset(Int.([isodd(i) for i in 1:m]))
interf = Fourier(m)
part = Partition(subset)
o = PartitionCountsAll(part)
ev = Event(i,o,interf)
compute_probability!(ev)
p = bar([0:n] , ev.proba_params.probability.proba, c = get(color_map, x/2), label = labels(x), alpha=alp(x))
# scatter!([0:n] , ev.proba_params.probability.proba, c = get(color_map, x/2), label = "", m = :xcross)
#plot!(x_spl , y_spl, c = get(color_map, x), label = labels(x))
xlabel!(L"n")
ylabel!(L"p")
ylims!((0,0.25))
p
end
p1 = add_this_x(1)
p2 = add_this_x(0)
plt = plot(p1,p2, layout = (2,1))
display(plt)
savefig(plt,"./images/publication/fourier.png")
###### plot of evolution with n,m ######
x_array = [0,0.5,0.8,0.9,0.95,0.99] # we compare x = 1 to this x
function plot_evolution_n_m_x(x)
n_max = 16
n_array = collect(6:n_max)
m_no_coll(n) = n^2
m_dense(n) = n
m_sparse(n) = 5n
n_iter = 100
partition_sizes = 2:3
laws = [m_sparse, m_no_coll]#[m_dense, m_sparse, m_no_coll]
y_max = zeros(length(laws))
plots = []
for (pl, m_law) in enumerate(laws)
plt = plot()
m_array = m_law.(n_array)
tvd_array = zeros((length(partition_sizes), length(m_array)))
var_array = copy(tvd_array)
for (k,n_subsets) in enumerate(partition_sizes)
@showprogress for (i,(n,m)) in enumerate(zip(n_array, m_array))
this_tvd = tvd_equilibrated_partition_real_average_x(x, m, n_subsets, n, niter = n_iter)
tvd_array[k,i] = (n_subsets <= m ? this_tvd[1] : missing)
var_array[k,i] = (n_subsets <= m ? this_tvd[2] : missing)
end
end
save("data/evolution_n_m_$(String(Symbol(m_law)))_x=$x.jld", "tvd_array", tvd_array, "var_array" ,var_array, "partition_sizes", partition_sizes, "n_array", n_array, "x",x)
tvd_array
function partition_color(k, partition_sizes)
if length(partition_sizes) > 1
return get(color_map, (k-1) / (length(partition_sizes)-1))
else
return get(color_map, 0.5)
end
end
for (k,K) in enumerate(partition_sizes)
x_data = n_array
y_data = tvd_array[k,:]
x_spl = collect(range(minimum(x_data),maximum(x_data), length = 1000))
spl = Spline1D(x_data,y_data)
y_spl = spl(x_spl)
legend_string = "K = $K"
if m_law == m_sparse
lr_func(x) = mean(y_data) # removed the slope
plot!(x_spl, lr_func.(x_spl), c = partition_color(k,partition_sizes), label = LaTeXString(legend_string))
else
lr_func = get_power_law_log_log(x_data,y_data)[1]
plot!(x_spl, lr_func.(x_spl), c = partition_color(k,partition_sizes), label = LaTeXString(legend_string))
end
#
# scatter!(x_data , y_data, yerr = sqrt.(var_array[k,:]), c = lost_photon_color(k,lost_photons), label = "", m = :cross, xaxis=:log10)
#
# scatter!(x_data , y_data, yerr = sqrt.(var_array[k,:]), c = partition_color(k,partition_sizes), label = "", m = :cross, xaxis=:log10, yaxis = :log10)
scatter!(x_data , y_data, yerr = sqrt.(var_array[k,:]), c = partition_color(k,partition_sizes), label = "", m = :cross, xticks = x_data)
# scatter!(x_data , y_data, yerr = sqrt.(var_array[k,:]), c = partition_color(k,partition_sizes), label = "", m = :cross, xaxis=:log10)
#
# plot!(x_spl, y_spl, c = partition_color(k,partition_sizes), label = "K = $K", xaxis=:log10, yaxis =:log10)
# plot!(η_array,tvd_η_array[k,:], label = "up to $lost lost", c = lost_photon_color(k,lost_photons))
end
plt = plot!(legend=:topright)
y_max[pl] = 1.3*(maximum(tvd_array + sqrt.(var_array)))
xlabel!(L"n")
ylabel!(L"TVD(B,x=%$x)")
display(plt)
push!(plots, plt)
end
plt = plot(plots[1],plots[2], layout = (2,1))
# ylims!((0, 0.18))
ylims!((0, maximum(y_max)))
display(plt)
savefig(plt, "images/publication/size_x=$x.png")
end
for x in x_array
@show x
plot_evolution_n_m_x(x)
end
###### loss and its influence on validation time ######
#
# we want to plot the amount of time necessary for validation
# should we plot it for different amount of lost photons considered and different loss regimes?
# for this we should simply plot the inverse of the number of samples needed
# compared to a reference to get a speedup factor
# we sample bosonic events
# we want to compare to distinguishable events
# we look at the relative time taken to attain a threshold of certainty
# if using up to lost_up_to photons
begin
n = 10
m = n
n_subsets = 2
lost_photons = collect(0:n)
n_unitaries = 100 # number of unitaries on which averaged
n_trials_each_unitary = 1000
max_iter = 100000 # max number of samples taken for bayesian estimation
threshold = 0.95 # confidence to attain
η_array = collect(range(0.8,1,length = 5))
speed_up_array = zeros((length(η_array), length(lost_photons)))
speed_up_var_array = copy(speed_up_array)
alternative_hypothesis_x = 0.9
@showprogress for (η_index ,η) in enumerate(η_array)
n_sample_this_run = zeros((n_unitaries * n_trials_each_unitary, length(lost_photons)))
p_lost_this_run = copy(n_sample_this_run)
# to store data before averaging
for unitary in 1:n_unitaries
ib = Input{Bosonic}(first_modes(n,2m))
id = Input{Distinguishable}(first_modes(n,2m))
part = to_lossy(equilibrated_partition(m,n_subsets))
o = PartitionCountsAll(part)
interf = UniformLossInterferometer(η,m)
evb = Event(ib,o,interf)
evd = Event(id,o,interf)
pb = compute_probability!(evb)
pd = compute_probability!(evd)
pb_sorted = sort_by_lost_photons(pb)
pd_sorted = sort_by_lost_photons(pd)
p_lost = [sum(pb_sorted[i].proba) for i in 1:length(lost_photons)]
# in this simple model, this is the same for both kind of particles
#
# if η == 1
# @show p_lost
# end
function n_samples_loss(lost_up_to)
χ = 1
n_sampled = 0
for iter in 1:max_iter
n_sampled += 1
lost = wsample(p_lost[1:lost_up_to+1]) - 1
# @show lost
event_sampled = wsample(pb_sorted[lost+1].proba)
χ *= (pb_sorted[lost+1].proba)[event_sampled] / (pd_sorted[lost+1].proba)[event_sampled]
if confidence(χ) >= threshold
return n_sampled
end
end
@warn "max_iter raised, need to raise it"
return nothing
end
#
# n_sample_this_run_array = zeros((n_trials_each_unitary, length(lost_photons)))
# time_factor_this_run_array = zeros((n_trials_each_unitary, length(lost_photons)))
# speed_up_this_run_array = zeros((n_trials_each_unitary, length(lost_photons)))
for trial_this_unitary in 1:n_trials_each_unitary
# n_sample_this_run = [n_samples_loss(i) for i in lost_photons]
# n_sample_this_run_array[trial_this_unitary, :] = n_sample_this_run
#
#
# time_factor(lost_up_to) = n_sample_this_run[lost_up_to + 1] / sum(p_lost[1:lost_up_to+1])
# speed_up(lost_up_to) = (time_factor(lost_up_to) / time_factor(0))^(-1)
#
# time_factor_this_run_array[trial_this_unitary, :] = [time_factor(lost_up_to) for lost_up_to in lost_photons]
# speed_up_this_run_array[trial_this_unitary, :] = [speed_up(lost_up_to) for lost_up_to in lost_photons]
#
# @show [time_factor(lost_up_to) for lost_up_to in lost_photons]
# @show [speed_up(lost_up_to) for lost_up_to in lost_photons]
#
n_sample_this_run[(unitary-1)*n_trials_each_unitary + trial_this_unitary, :] = [n_samples_loss(i) for i in lost_photons]
p_lost_this_run[(unitary-1)*n_trials_each_unitary + trial_this_unitary, :] = p_lost
end
# @show [mean(time_factor_this_run_array[:, lost_up_to + 1]) for lost_up_to in lost_photons]
# @show [mean(speed_up_this_run_array[:, lost_up_to + 1]) for lost_up_to in lost_photons]
end
# @show "----"
# @show [mean(n_sample_this_run[:, lost_up_to + 1]) for lost_up_to in lost_photons]
#
# @show [mean(sum(p_lost_this_run[:, 1: lost_up_to + 1])) for lost_up_to in lost_photons]
time_factor_average = [mean(n_sample_this_run[:, lost_up_to + 1]) / mean(sum(p_lost_this_run[:, 1: lost_up_to + 1])) for lost_up_to in lost_photons]
speed_up_array[η_index,:] = [time_factor_average[1] / time_factor_average[lost_up_to + 1] for lost_up_to in lost_photons]
# @show speed_up_array[η_index,:] = speed_up_average
# speed_up_var_array[η_index,:] = [var(this_run_speed_up[:, lost+1]) for lost in lost_photons]
end
save("data/speed_up_loss_(n=$n m=$m n_subsets = $n_subsets alternative_x = $alternative_hypothesis_x).jld", "speed_up_array", speed_up_array, "η_array", η_array)
end
begin
# setting the number of lost photons to plot
lost_photons = collect(0:5)
begin
function lost_photon_color(k, lost_photons)
lost = k-1
x = lost / (length(lost_photons))
get(color_map, x)
end
plt = plot()
function add_line(k, lost, c, label)
x_data = η_array
y_data = speed_up_array[:, k]
x_spl = range(minimum(x_data),maximum(x_data), length = 1000)
spl = Spline1D(x_data,y_data)
y_spl = spl(x_spl)
#scatter!(x_data , y_data, yerr = sqrt.(var_η_array[k,:]), c = lost_photon_color(k,lost_photons), label = "", m = :cross)
# scatter!(x_data , y_data , yerr = sqrt.(speed_up_var_array[:, k]), c = lost_photon_color(k,lost_photons), label = "", m = :cross)
# scatter!(x_data , y_data, yaxis = :log10, yminorticks = 10, c = lost_photon_color(k,lost_photons), label = "", m = :cross)
# plot!(x_spl, y_spl, yaxis = :log10, yminorticks = 10, c = lost_photon_color(k,lost_photons), label = "l <= $lost")
plot!(x_spl, y_spl, c = c, label = label)
# plot!(η_array,tvd_η_array[k,:], label = "up to $lost lost", c = lost_photon_color(k,lost_photons))
end
add_line(1,0,lost_photon_color(0,1), L"reference")
for (k,lost) in enumerate(lost_photons) #Iterators.reverse(enumerate(lost_photons))
# if k>1
add_line(k,lost,lost_photon_color(k,lost_photons), (k == 1 ? "" : L"l \leq %$lost"))
# end
end
add_line(n+1,n, :red, L"all")
###
plt = plot!(legend=:topright)
# plot!(legend = false)
ylims!((0, 1.2 * maximum(speed_up_array)))
xlabel!(L"η")
ylabel!(L"speed up")
display(plt)
savefig(plt, "images/publication/speed_up_(n=$n m=$m n_subsets = $n_subsets alternative_x = $alternative_hypothesis_x).png")
end
end
###### density exponents ######
n_array = collect(4:1:18)
results = zeros((2, length(n_array)))
function exponent_fit(n)
max_density = 1
min_density = 0.03
steps = 10
n_iter = 100
invert_densities = [max_density * (max_density/min_density)^((i-1)/(steps-1)) for i in 1:steps]
m_array = Int.(ceil.(n * invert_densities))
partition_sizes = 2
tvd_array = zeros((length(partition_sizes), length(m_array)))
var_array = copy(tvd_array)
for (k,n_subsets) in enumerate(partition_sizes)
@show n_subsets
@showprogress for i in 1:length(m_array)
this_tvd = tvd_equilibrated_partition_real_average(m_array[i], n_subsets, n, niter = n_iter)
tvd_array[k,i] = (n_subsets <= m_array[i] ? this_tvd[1] : missing)
var_array[k,i] = (n_subsets <= m_array[i] ? this_tvd[2] : missing)
end
end
x_data = reverse(1 ./ invert_densities)
y_data = reverse(tvd_array[1,:])
get_power_law_log_log(x_data,y_data)
end
for (k,n) in enumerate(n_array)
@show n
this_run = exponent_fit(n)
results[1, k] = this_run[3]
results[2, k] = this_run[2]
end
plt = plot()
begin
col = [get(color_map, 0), get(color_map, 1)]
scatter!(n_array, results[2,:], label = L"r", c = col[1])
scatter!(n_array, results[1,:], label = L"c(2)", c = col[2])
ylims!((0,1.2))
hline!([mean(results[2,:])], c = col[1], linestyle = :dash, label = L"\langle r \rangle = %$(round(mean(results[2,:]), digits = 3))")
hline!([mean(results[1,:])], c = col[2], linestyle = :dash, label = L"\langle c(2) \rangle = %$(round(mean(results[1,:]), digits = 3))")
plot!(legend=:bottomright)
xlabel!(L"n")
xlims!((n_array[1]-1, n_array[end]+2))
display(plt)
end
savefig(plt, "images/publication/power_law_validity.png")
###### plot of decay of error bars with size ######
# we want to plot the decay of the sqrt(var)/mean of the TVD
n_max = 16
n_array = collect(8:n_max)
m_no_coll(n) = n^2
m_sparse(n) = 5n
n_iter = 100
partition_sizes = 2:3
laws = [m_sparse, m_no_coll]
# laws = [m_sparse]
plots = []
for m_law in laws
m_array = m_law.(n_array)
tvd_array = zeros((length(partition_sizes), length(m_array)))
var_array = copy(tvd_array)
for (k,n_subsets) in enumerate(partition_sizes)
@showprogress for (i,(n,m)) in enumerate(zip(n_array, m_array))
this_tvd = tvd_equilibrated_partition_real_average(m, n_subsets, n, niter = n_iter)
tvd_array[k,i] = (n_subsets <= m ? this_tvd[1] : missing)
var_array[k,i] = (n_subsets <= m ? this_tvd[2] : missing)
end
end
save("data/coefficient_variation$(String(Symbol(m_law))).jld", "tvd_array", tvd_array, "var_array" ,var_array)
end
begin
println("********")
for name in saved_names
plt = plot()
a = load(name)
partition_sizes = a["partition_sizes"]
n_array = a["n_array"]
tvd_array = a["tvd_array"]
var_array = a["var_array"]
# tvd_array
partition_color(k, partition_sizes) = get(color_map, (k-1) / (length(partition_sizes) -1))
for (k,K) in enumerate(partition_sizes)
x_data = n_array
y_data = sqrt.(var_array[k,:]) ./ tvd_array[k,:]
x_spl = collect(range(minimum(x_data),maximum(x_data), length = 1000))
spl = Spline1D(x_data,y_data)
y_spl = spl(x_spl)
legend_string = "K = $K"
# if m_law == m_sparse
# lr_func(x) = mean(y_data) # removed the slope
#
# plot!(x_spl, lr_func.(x_spl), c = partition_color(k,partition_sizes), label = LaTeXString(legend_string))
#
# else
lr_func = get_power_law_log_log(x_data,y_data)[1]
plot!(x_spl, lr_func.(x_spl), c = partition_color(k,partition_sizes), label = LaTeXString(legend_string))
# end
#
# scatter!(x_data , y_data, yerr = sqrt.(var_array[k,:]), c = lost_photon_color(k,lost_photons), label = "", m = :cross, xaxis=:log10)
#
# scatter!(x_data , y_data, yerr = sqrt.(var_array[k,:]), c = partition_color(k,partition_sizes), label = "", m = :cross, xaxis=:log10, yaxis = :log10)
scatter!(x_data , y_data, c = partition_color(k,partition_sizes), label = "", m = :cross)
ylims!((0, 1.2*maximum(y_data)), xticks = x_data)
# scatter!(x_data , y_data, yerr = sqrt.(var_array[k,:]), c = partition_color(k,partition_sizes), label = "", m = :cross, xaxis=:log10)
#ylims!((0.01,1))
#
# plot!(x_spl, y_spl, c = partition_color(k,partition_sizes), label = "K = $K", xaxis=:log10, yaxis =:log10)
# plot!(η_array,tvd_η_array[k,:], label = "up to $lost lost", c = lost_photon_color(k,lost_photons))
end
plt = plot!(legend=:topright)
xlabel!(L"n")
ylabel!(L"\sqrt{\sigma}/TVD(B,D)")
display(plt)
push!(plots, plt)
end
plt = plot(plots[1],plots[2], layout = (2,1))
name = saved_names[1]
a = load(name)
partition_sizes = a["partition_sizes"]
n_array = a["n_array"]
tvd_array = a["tvd_array"]
var_array = a["var_array"]
plot!(xticks = n_array)
ylims!((0,0.3))
display(plt)
savefig(plt, "images/publication/coefficient_variation_size.png")
end
############## end ###############
# cd("..")
# cd("..")
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 123 | using JLD
cd("docs/publication/partitions/data/save")
data = load("number_samples_x.jld")
print(data["n_samples_array"])
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 2475 | module BosonSampling
using Permanents
using Plots
using Test
using Combinatorics
using Random
using IterTools
using Statistics
using LinearAlgebra #removed so as to be able to use generic types such as BigFloats, can be put back if needed
using PolynomialRoots
using StatsBase
#using JLD
using CSV
using DataFrames
using Tables
using Plots#; plotly() #plotly allows to make "dynamic" plots where you can point the mouse and see the values, they can also be saved like that but note that this makes them heavy (and html instead of png)
using PrettyTables #to display large matrices
using Roots
using BenchmarkTools
using Optim
using ProgressMeter
using ProgressBars
using Parameters
using ArgCheck
using Distributions
using Luxor
using AutoHashEquals
using LinearRegression
using HypothesisTests
using SimpleTraits
using Parameters
using UnPack
using Dates
using JLD
using DelimitedFiles
using ColorSchemes
# using Distributed
# using SharedArrays
@consts begin
ATOL = 1e-10
SAFETY_FACTOR_FULL_BUNCHING = 10
end
include("special_matrices.jl")
include("matrix_tests.jl")
include("proba_tools.jl")
include("circuits/circuit_elements.jl")
include("types/type_functions.jl")
include("types/types.jl")
include("scattering.jl")
include("bunching/bunching.jl")
include("partitions/legacy.jl")
include("partitions/partition_expectation_values.jl")
include("partitions/partitions.jl")
include("boson_samplers/tools.jl")
include("boson_samplers/classical_sampler.jl")
include("boson_samplers/cliffords_sampler.jl")
include("boson_samplers/methods.jl")
include("boson_samplers/metropolis_sampler.jl")
include("boson_samplers/noisy_sampler.jl")
include("boson_samplers/sample.jl")
include("boson_samplers/gaussian_sampler.jl")
include("distributions/noisy_distribution.jl")
include("distributions/theoretical_distribution.jl")
include("permanent_conjectures/bapat_sunder.jl")
include("permanent_conjectures/counter_example_functions.jl")
include("permanent_conjectures/counter_example_numerical_search.jl")
include("permanent_conjectures/permanent_on_top.jl")
include("certification/experimental_data_generation.jl")
include("certification/bayesian.jl")
include("certification/correlators.jl")
include("visual.jl")
include("loop/loop_functions.jl")
include("experiments/data_conversion.jl")
permanent = ryser
for n in names(@__MODULE__; all=true)
if Base.isidentifier(n) && n ∉ (Symbol(@__MODULE__), :eval, :include)
@eval export $n
end
end
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 694 |
function make_directory(path; delete_contents_if_existing = true)
"""makes a directory and goes inside, removes the previous contents of a
similarly named directory if existing"""
try
if delete_contents_if_existing
rm(path, recursive = true)
else
pass()
end
catch err
finally
mkpath(path)
cd(path)
end
end
function print_error(err)
"""prints the error in a try/catch"""
buf = IOBuffer()
showerror(buf, err)
message = String(take!(buf))
end
# to save julia variables : using JLD2
function save_matrix(mat, filename = "matrix.csv")
CSV.write(filename, DataFrame(mat), header=false)
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 121 | using Revise
using BosonSampling
using Permanents
using PrettyTables
using ArgCheck
using Test
#
# using DocumenterTools
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 849 | function is_hermitian(M; atol = ATOL)
isapprox(M, M', atol = atol)
end
function is_positive_semidefinite(M; atol = ATOL)
!any(eigvals(M) .< - atol)
end
function is_a_gram_matrix(M; atol = ATOL)
is_hermitian(M, atol = atol) && is_positive_semidefinite(M, atol = atol) && all([isapprox(M[i,i], one(eltype(M)), atol = atol) for i in 1:size(M)[1]])
end
function is_unitary(U; atol = ATOL)
isapprox(U' * U, Matrix(I, size(U)), atol = atol)
end
function is_orthonormal(U; atol = ATOL)
for i = 1:size(U)[2]
isapprox(abs(norm(U[:, i])), 1., atol = atol) ? nothing : return false
for j = 1:size(U)[2]
if j < i
isapprox(abs(dot(U[:, i], U[:, j])), 0., atol = atol) ? nothing : return false
end
end
end
end
function is_column_normalized(M; atol = ATOL)
for i = 1:size(M)[2]
@test abs(norm(M[:, i])) ≈ 1. atol = atol
end
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 3122 | """
clean_proba_(probability:Number, atol=ATOL)
Checks whether a (complex) number is close enough to a valid probability with tolerance
`ATOL`. If so, convert it to a positive real number.
"""
function clean_proba(probability::Number, atol=ATOL)
"""checks if a (complex) number is a close enough probability
converts it to a positive real number if complex or just positive if real"""
not_a_proba() = error(probability , " is not a probability")
if real(probability) >= -ATOL && real(probability) <= 1 + ATOL
if isa(probability,Complex)
if abs(imag(probability)) <= ATOL
return abs(probability)
else
not_a_proba()
end
elseif isa(probability,Real)
return abs(probability)
else
error(typeof(probability), " probability type undefined")
end
else
not_a_proba()
end
end
"""
clean_pdf(A::Array, atol=ATOL)
Checks wether an array is an acceptable discrete probability distribution with
tolerance `ATOL`. If so, converts its elements to normalized positive real numbers.
"""
function clean_pdf(A::Array, atol = ATOL)
"""checks if an array has all elements as acceptable probabilities within atol
and converts them to that and summing to one within length(A) * atol
and renormalizes"""
A .= clean_proba.(A)
normalization = sum(A)
if isapprox(normalization, 1, atol = length(A) * atol)
A .= 1/normalization * A
return convert(Vector{Real}, A)
else
error("A not normalized")
end
end
"""
isa_pdf(pdf)
Asserts if `pdf` is a valid probability distribution.
"""
function isa_pdf(pdf)
"""asserts if pdf is a probability distribution"""
clean_pdf(pdf)
end
"""
isa_probability(p)
Asserts if `p`is a valid probability.
"""
function isa_probability(probability::Number, atol=ATOL)
try
clean_proba(probability, atol)
return true
catch
return false
end
end
"""
tvd(a,b)
Computes the total variation distance between two probability distributions.
"""
function tvd(a,b)
"""total variation distance"""
sum(abs.(a-b))
end
"""
sqr(a,b)
Computes the euclidian distance between two probability distributions.
"""
function sqr(a,b)
"""euclidian distance"""
sqrt(sum((a-b).^2))
end
function remove_nothing(trials)
new_trials = []
for trial in trials
if isa(trial, Number)
push!(new_trials, trial)
end
end
trials = new_trials
end
"""
get_power_law_log_log(x_data,y_data)
Gets a power law of type y = exp(c) * x^m from data that looks like a line in a loglog plot.
"""
function get_power_law_log_log(x_data,y_data)
@argcheck all(x_data .> 0)
@argcheck all(y_data .> 0)
x_data = log.(x_data)
y_data = log.(y_data)
lr = linregress(x_data,y_data)
m,c = lr.coeffs
println("power law: y = $(exp(c)) * x^$m")
power_law(x) = exp(c)x^(m)
(x-> power_law(x), m, (exp(c)))
end
function do_with_probability(p)
"""returns true with probability p, false with (1-p)"""
rand() < p ? true : false
end
function between_one_and_zero(a)
a >= 0 && a <=1
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 12871 | """
iterate_until_collisionless(f)
Sample `f` until the result is collisionless.
"""
function iterate_until_collisionless(f)
"""samples f until the result is collisionless
as it takes a function, example usage :
random_mode_occupation_collisionless(n::Int,m::Int) = iterate_until_collisionless(() -> random_mode_occupation(n,m))
"""
while true
state = f()
if is_collisionless(state)
return state
end
end
end
"""
fill_arrangement(occupation_vector)
fill_arrangement(r::ModeOccupation)
fill_arrangement(input::Input)
Convert a mode occupation list to a mode assignement.
"""
function fill_arrangement(occupation_vector)
"""from a mode occupation list to a mode assignment list (Tichy : s to d(s))
For instance if vect{s} = (2,0,1) then vect{d} = (1,1,3) """
arrangement = zeros(eltype(occupation_vector), sum(occupation_vector))
position = 1
for i in 1:length(occupation_vector)
for j in 1:occupation_vector[i]
arrangement[position] = i
position += 1
end
end
arrangement
end
fill_arrangement(r::ModeOccupation) = fill_arrangement(r.state)
fill_arrangement(inp::Input) = fill_arrangement(inp.r)
"""
random_occupancy(n::Int, m::Int)
Return a vector of size `m` with `n` randomly placed ones.
"""
function random_occupancy(n::Int, m::Int)
""" returns a vector of size m with n randomly placed ones """
# if n > m
# throw(ArgumentError("Implemented at most one photon per mode"))
# else
# occupancy_vector = shuffle(append!(ones(n), zeros(m-n)))
# occupancy_vector = Int.(occupancy_vector)
# end
#
# return occupancy_vector
occupancy_vector = zeros(Int, m)
for i in 1:n
occupancy_vector[rand(1:m)] += 1
end
occupancy_vector
end
"""
random_mode_occupation(n::Int, m::Int)
Create a [`ModeOccupation`](@ref) from a mode occupation list of `n` ramdomly placed ones
among `m` sites.
"""
random_mode_occupation(n::Int, m::Int) = ModeOccupation(random_occupancy(n,m))
"""
random_mode_occupation_collisionless(n::Int, m::Int)
Create a [`ModeOccupation`](@ref) from a random mode occupation that is likely collisionless.
"""
function random_mode_occupation_collisionless(n::Int, m::Int)
n<=m ? iterate_until_collisionless(() -> random_mode_occupation(n,m)) : error("n>m cannot make for collisionless mode occupations")
end
function random_mode_list_collisionless(n::Int, m::Int)
mo = random_mode_occupation_collisionless(n,m)
convert(ModeList, mo)
end
"""
at_most_one_photon_per_bin(occupancy_vector::Vector{Int})
check_at_most_one_particle_per_mode(occ)
Check wether `occupancy_vector` contains more than one photon per site.
"""
function at_most_one_photon_per_bin(occupancy_vector::Vector{Int})
all(in([0,1]).(occupancy_vector))
end
check_at_most_one_particle_per_mode(occ) = at_most_one_photon_per_bin(occ) ? nothing : error("more than one input per mode")
"""
occupancy_vector_to_partition(occupancy_vector)
occupancy_vector_to_mode_occupancy(occupancy_vector)
Return a partition of occupied modes from an `occupancy_vector`.
"""
function occupancy_vector_to_partition(occupancy_vector)
partition = []
check_at_most_one_particle_per_mode(occupancy_vector)
# a partition cannot be defined by taking twice the same mode, it would lead to
# hidden errors in the results (double counting)
for (mode, selected) in enumerate(occupancy_vector)
if selected == 1
append!(partition, mode)
end
end
partition
end
"""
occupancy_vector_to_mode_occupancy(occupancy_vector)
Return a partition of occupied modes from an `occupancy_vector`.
"""
function occupancy_vector_to_mode_occupancy(occupancy_vector)
occupancy_vector_to_partition(occupancy_vector)
end
"""
mode_occupancy_to_occupancy_vector(mo::Vector{Int}, m::Int)
Goes from [2,2,5] to [0,2,0,0,1,0] (if m=6).
"""
function mode_occupancy_to_occupancy_vector(mo::Vector{Int}, m::Int)
ov = zeros(Int,m)
for mode in mo
ov[mode] += 1
end
ov
end
"""
scattering_matrix(U::Matrix, input_state::Vector{Int}, output_state::Vector{Int})
scattering_matrix(U::Interferometer, r::ModeOccupation, s::ModeOccupation)
scattering_matrix(U::Interferometer, i::Input, o::FockDetection)
Return the submatrix of `U` whose rows and columns are respectively defined by
`input_state` and `output_state`.
!!! note "Reference"
[http://arxiv.org/abs/quant-ph/0406127v1](http://arxiv.org/abs/quant-ph/0406127v1)
"""
function scattering_matrix(U::Matrix, input_state::Vector{Int}, output_state::Vector{Int})
"""
U = interferometer matrix, size m*m
input_state = input occupation number vector (s[i] is the number of photons in the ith input)
output_state = output occupation number vector (r[i] is the number of photons in the ith output)
n photons
follows http://arxiv.org/abs/quant-ph/0406127v1
"""
m = size(U,1)
n = sum(input_state)
if length(input_state) != m || length(output_state) != m
@show m
@show length(input_state)
@show length(output_state)
throw(DimensionMismatch())
elseif sum(input_state) != sum(output_state)
error("photon number not the same at input and output")
end
index_input = fill_arrangement(input_state)
index_output = fill_arrangement(output_state)
try
U[index_input,index_output]
catch err
if isa(MethodError, err)
copy(U)[index_input,index_output]
else
println(err)
end
end
end
scattering_matrix(interf::Interferometer, r::ModeOccupation, s::ModeOccupation) = scattering_matrix(interf.U,r.state,s.state)
scattering_matrix(interf::Interferometer, i::Input, o::FockDetection) = scattering_matrix(interf.U,i.r,o.s)
function vector_factorial(occupancy_vector)
"""returns the function mu at the denominator of event probabilities"""
prod(factorial.(occupancy_vector))
end
vector_factorial(r::ModeOccupation) = vector_factorial(r.state)
vector_factorial(i::Input) = vector_factorial(i.r)
vector_factorial(o::FockDetection) = vector_factorial(o.s)
"""
bosonic_amplitude(U, input_state, output_state, permanent=ryser)
process_amplitude(U, input_state, output_state, permanent=ryser)
Compute the probability amplitude to go from `input_state` to `output_state`
through the interferomter `U` in the [`Bosonic`](@ref) case.
"""
function bosonic_amplitude(U, input_state, output_state, permanent = ryser)
"""event amplitude"""
permanent(scattering_matrix(U, input_state, output_state))/sqrt(vector_factorial(input_state) * vector_factorial(output_state))
end
function process_amplitude(U, input_state, output_state, permanent = ryser)
bosonic_amplitude(U, input_state, output_state, permanent)
end
"""
bosonic_probability(U, input_state, output_state)
process_probability(U, input_state, output_state)
Compute the probability to go from `input_state` to `output_state`
through the interferometer `U` in the [`Bosonic`](@ref) case.
"""
function bosonic_probability(U, input_state, output_state)
"""bosonic process_probability"""
abs(process_amplitude(U, input_state, output_state))^2
end
function process_probability(U, input_state, output_state)
#@warn "obsolete function, use bosonic_probability or probability"
bosonic_probability(U, input_state, output_state)
end
"""
distinguishable_probability(U, input_state, output_state, permanent=ryser)
process_probability_distinguishable(U, input_state, output_state, permanent=ryser)
Compute the probability to go from `input_state` to `output_state` through
the interferomter `U` in the [`Distinguishable`](@ref) case.
"""
function distinguishable_probability(U, input_state, output_state, permanent = ryser)
"""distinguishable (or classical) process_probability"""
permanent(abs.(scattering_matrix(U, input_state, output_state)).^2)/(vector_factorial(input_state) * vector_factorial(output_state))
end
function process_probability_distinguishable(U, input_state, output_state, permanent = ryser)
#@warn "obsolete function, use distinguishable_probability or probability"
distinguishable_probability(U, input_state, output_state, permanent)
end
### need to implement partial distinguishability processs probabilities ###
"""
process_probability_partial(U, S, input_state, output_state)
process_probability_partial(interf::Interferometer, input_state::Input{TIn} where {TIn<:PartDist},output_state::FockDetection)
Compute the probability to go from `input_state` to `output_state` through the
interferometer `U` in the [`PartDist`](@ref) case where partial distinguishable is described
by the [`GramMatrix`](@ref) `S`.
!!! note "Reference"
[https://arxiv.org/abs/1410.7687](https://arxiv.org/abs/1410.7687)
"""
function process_probability_partial(U, S, input_state,output_state)
"""computes the partially distinguishable process probability according to Tichy's tensor permanent https://arxiv.org/abs/1410.7687"""
n = size(S,1)
@argcheck sum(input_state) == sum(output_state) "particles not conserved"
@argcheck sum(input_state) == n "S matrix doesnt have the same number of photons as the input"
M = scattering_matrix(U, input_state, output_state)
W = Array{eltype(U)}(undef, (n,n,n))
for ss in 1:n
for rr in 1:n
for j in 1:n
W[ss,rr,j] = M[ss, j] * conj(M[rr, j]) * S[rr, ss]
end
end
end
1/(vector_factorial(input_state) * vector_factorial(output_state)) * ryser_tensor(W)
end
process_probability_partial(interf::Interferometer, input_state::Input{TIn} where {TIn<:PartDist},output_state::FockDetection) = process_probability_partial(interf.U, input_state.G.S, input_state.r.state,output_state.s.state)
"""
compute_probability(ev::Event{TIn, TOut}) where {TIn<:InputType, TOut<:FockDetection}
Given an [`Event`](@ref), gives the probability to get the outcome `TOut` when `TIn`
passes though the interferometer `ev.interferometer`.
"""
function compute_probability!(ev::Event{TIn,TOut}) where {TIn<:InputType, TOut<:FockDetection}
check_probability_empty(ev)
ev.proba_params.precision = eps()
ev.proba_params.failure_probability = 0
if TIn == Bosonic
ev.proba_params.probability = bosonic_probability(ev.interferometer.U, ev.input_state.r.state, ev.output_measurement.s.state)
elseif TIn == Distinguishable
ev.proba_params.probability = distinguishable_probability(ev.interferometer.U, ev.input_state.r.state, ev.output_measurement.s.state)
else
ev.proba_params.probability = process_probability_partial(ev.interferometer, ev.input_state, ev.output_measurement)
end
ev.proba_params.probability = clean_proba(ev.proba_params.probability)
end
"""
output_mode_occupation(n::Int, m::Int)
Return all possible configurations of `n` photons among `m` modes.
"""
function output_mode_occupation(number_photons, number_modes)
nlist = collect(1:number_modes)
for i = 1:number_photons-1
sublist = []
for j = 1:length(nlist)
for k = 1:number_modes
push!(sublist, vcat(nlist[j], k))
end
end
nlist = sublist
end
nlist
end
"""
check_suppression_law(event)
Check if the event is suppressed according to the [rule](https://arxiv.org/pdf/1002.5038.pdf).
"""
function check_suppression_law(event)
if mod(sum(event), length(event)) != 0
return true
else
return false
end
end
### this is an old function that needs to be cleaned
# function partition_probability_distribution_distinguishable(part, U)
# """generates a vector giving the probability to have k photons in
# the partition part for the interferometer U by the random walk method
# discussed in a 22/02/21 email
# part is written like (1,2,4) if it is the output modes 1, 2 and 4"""
#
# function proba_photon_in_partition(i, part, U)
# """returns the probability that the photon i ends up in the set of outputs part, for distinguishable particles only"""
#
# sum([abs(U[i, j])^2 for j in part]) # note the inversion line column
# end
#
#
# function walk(probability_vector, walk_number, part, U)
#
# new_probability_vector = similar(probability_vector)
# n = size(U)[1]
# proba_this_walk = proba_photon_in_partition(walk_number, part, U)
#
# for i in 1 : n+1
# new_probability_vector[i] = (1-proba_this_walk) * probability_vector[i] + proba_this_walk * probability_vector[i != 1 ? i-1 : n+1]
# end
#
# new_probability_vector
# end
#
# n = size(U)[1]
# probability_vector = zeros(n+1)
# probability_vector[1] = 1
#
# for walk_number in 1 : n
# probability_vector = walk(probability_vector, walk_number, part, U)
# end
#
# if !(isapprox(sum(probability_vector), 1., rtol = 1e-8))
# println("WARNING : probabilites do not sum to one ($(sum(probability_vector)))")
# end
#
# probability_vector
# end
function is_collisionless(r)
all(r .<= 1)
end
is_collisionless(r::ModeOccupation) = is_collisionless(r.state)
is_collisionless(i::Input) = is_collisionless(i.r.state)
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 11682 | using LinearAlgebra
"""
fourier_matrix(n::Int; normalized=true)
Returns a ``n``-by-``n`` Fourier matrix with optional normalization (`true` by default).
"""
function fourier_matrix(n::Int; normalized = true)
U = Array{ComplexF64}(undef, n, n)
for i in 1:n
for j in 1:n
U[i,j] = exp((2im * pi / n)* (i-1) * (j-1))
end
end
if normalized
return 1/sqrt(n) * U
else
return U
end
end
"""
hadamard_matrix(n::Int; normalized=true)
Returns a ``n``-by-``n`` Hadamard matrix with optional normalization (`true` by default).
"""
function hadamard_matrix(n::Int, normalized = true )
H = Array{ComplexF64}(undef, n, n)
for i in 1:n
for j in 1:n
bi = map(x->parse(Int,x), split(bitstring(Int8(i-1)),""))
bj = map(x->parse(Int,x), split(bitstring(UInt8(j-1)),""))
H[i,j] = (-1)^(sum(bi.&bj))
end
end
if normalized
return 1/sqrt(n) * H
else
return H
end
end
"""
sylvester_matrix(p::Int; normalized=true)
Returns a ``2^p``-by-``2^p`` Sylvester matrix with optional normalization
(`true` by default) following [https://arxiv.org/abs/1502.06372](https://arxiv.org/abs/1502.06372).
"""
function sylvester_matrix(p; normalized = true)
"""returns the sylvester matrix of size 2^p"""
function sylvester_element(i,j,p)
"""returns the element A(i,j) for the sylvester matrix of size 2^p"""
# following https://arxiv.org/abs/1502.06372
if p == 0
return 1
else
i_b = digits(i-1, base = 2, pad = p+1)
j_b = digits(j-1, base = 2, pad = p+1)
return (-1)^(sum(i_b .* j_b))
end
end
A = Array{ComplexF64}(undef, 2^p, 2^p)
for i in 1:2^p
for j in 1:2^p
A[i,j] = sylvester_element(i,j,p)
end
end
normalized == true ? 1/sqrt(2^p) * A : A
end
"""
rand_haar(n::Int)
Returns a ``n``-by-``n`` Haar distributed unitary matrix following [https://case.edu/artsci/math/esmeckes/Meckes_SAMSI_Lecture2.pdf](https://case.edu/artsci/math/esmeckes/Meckes_SAMSI_Lecture2.pdf).
"""
function rand_haar(n::Int)
"""generates a Haar distributed unitary matrix of size n*n"""
# follows https://case.edu/artsci/math/esmeckes/Meckes_SAMSI_Lecture2.pdf
qr(randn(ComplexF64, n,n)).Q
end
function matrix_test(n::Int)
"""matrix of 1,2,3... for testing your code"""
U = Array{Int64}(undef, n, n)
for i in 1:n
for j in 1:n
U[i,j] = (i-1) * n + j
end
end
U
end
function antihermitian_test_matrix(n::Int)
h = randn(ComplexF64, n, n)
for i = 1:n
h[i,i] = 1im*imag(h[i,i])
end
for i = 1:n
for j = i+1:n
h[j,i] = -conj(h[i,j])
end
end
h
end
function hermitian_test_matrix(n::Int)
h = randn(ComplexF64, n,n)
for i = 1:n
h[i,i] = real(h[i,i])
end
for i = 1:n
for j = i+1:n
h[j,i] = conj(h[i,j])
end
end
h
end
function permutation_matrix_special_partition_fourier(n::Int)
"""P * [1,1,1,1,1,0,0,0,0,0] = [1, 0, 1, 0, 1, 0, 1, 0, 1, 0]"""
P = zeros(Int, n,n)
for i in 1:2:n
P[i, Int((i+1)/2)] = 1
end
for i in 2:2:n
P[i, Int(n/2 + i/2)] = 1
end
P
end
"""
rand_gram_matrix_from_orthonormal_basis(n::Int, r::Int)
Returns a ``n``-by-``n`` random Gram matrix that generates orthonormal basis of `r` vectors.
"""
function rand_gram_matrix_from_orthonormal_basis(n,r)
"""generates a random gram matrix with the property that the
generating vector basis of r vectors is orthonormal (as is
strangely the case in Drury's work)"""
function normalized_random_vector(n)
v = rand(ComplexF64, n)
1/norm(v) .* v
end
if r >= n
throw(ArgumentError("need rank < dim to have a non trivial result"))
end
generating_vectors = 0.
#while det(generating_vectors) ≈ 0. #check it's a basis
generating_vectors = hcat([normalized_random_vector(n) for i = 1:r]...)
#### TODO caveat : we don't check for linear independance so there is a chance it doesn't work though highly unlikely
#end
generating_vectors = modified_gram_schmidt(generating_vectors)
is_orthonormal(generating_vectors)
generating_vectors * generating_vectors'
end
function modified_gram_schmidt(input_vectors)
"""does the modified gram schmidt (numerically stable) on the columns of the matrix input_vectors"""
function projector(a, u)
"""projector of a on u"""
dot(u,a)/dot(u,u) .* u
end
final_vectors = copy(input_vectors)
for i in 1:size(input_vectors)[2]
if i == 1
#normalize first vector
final_vectors[:,1] /= norm(final_vectors[:, 1])
else
for j in 1:i-1
final_vectors[:, i] -= projector(final_vectors[:, i], final_vectors[:, j])
end
final_vectors[:, i] /= norm(final_vectors[:, i])
end
end
is_orthonormal(final_vectors)
final_vectors
end
"""
rand_gram_matrix(n::Int)
Returns a full rank ``n``-by-``n`` random Gram matrix.
"""
function rand_gram_matrix(n::Int)
"""random gram matrix with full rank """
function normalized_random_vector(n)
v = rand(ComplexF64, n)
1/norm(v) .* v
end
generating_vectors = 0.
while det(generating_vectors) ≈ 0. #check it's a basis
generating_vectors = hcat([normalized_random_vector(n) for i = 1:n]...)
end
generating_vectors' * generating_vectors
end
"""
rand_gram_matrix_rank(n::Int, r::Int)
Returns a ``n``-by-``n`` random Gram matrix of maximum rank and great likelihood `r`.
"""
function rand_gram_matrix_rank(n, r)
"""random gram matrix with rank at most r, and with great likelihood r
"""
function normalized_random_vector(r)
v = rand(ComplexF64, r)
1/norm(v) .* v
end
generating_vectors = 0.
generating_vectors = hcat([normalized_random_vector(r) for i = 1:n]...)
generating_vectors' * generating_vectors
end
"""
rand_gram_matrix_real(n::Int)
Returns a real ``n``-by-``n`` random Gram matrix of full rank.
"""
function rand_gram_matrix_real(n)
"""random gram matrix with full rank and real el"""
function normalized_random_vector(n)
v = rand(Float64, n)
1/norm(v) .* v
end
generating_vectors = 0.
while det(generating_vectors) ≈ 0. #check it's a basis
generating_vectors = hcat([normalized_random_vector(n) for i = 1:n]...)
end
generating_vectors' * generating_vectors
end
"""
rand_gram_matrix_positive(n::Int)
Returns a positive elements ``n``-by-``n`` random Gram matrix of full rank.
"""
function rand_gram_matrix_positive(n::Int)
"""random gram matrix with full rank and positive el"""
function normalized_random_vector(n)
v = abs.(rand(Float64, n))
1/norm(v) .* v
end
generating_vectors = 0.
while det(generating_vectors) ≈ 0. #check it's a basis
generating_vectors = hcat([normalized_random_vector(n) for i = 1:n]...)
end
generating_vectors' * generating_vectors
end
# functions to clear matrices for better readability of a matrix by a human
function set_small_values_to_zero(x, eps)
abs(x) < eps ? 0.0 : x
end
function set_small_imag_real_to_zero(x, eps = 1e-7)
if abs(real(x)) < eps
x = 0.0 + 1im * imag(x)
end
if abs(imag(x)) < eps
x = real(x) + 0.0im
end
x
end
function make_matrix_more_readable(M)
set_small_imag_real_to_zero.(M)
end
function display_matrix_more_readable(M)
pretty_table(make_matrix_more_readable(M))
end
####### sub matrices ########
function remove_row_col(A, rows_to_remove, cols_to_remove)
"""hands back the matrix A without the rows labelled in rows_to_remove, columns in cols_to_remove
ex :
rows_to_remove = [1]
cols_to_remove = [2]
to have A(1,2) in the notations of Minc"""
n = size(A)[1]
m = size(A)[2]
range_rows = collect(1:n)
range_col = collect(1:m)
setdiff!(range_rows, rows_to_remove)
setdiff!(range_col, cols_to_remove)
A[range_rows, range_col]
end
### permanent related quantities ###
function sub_permanent_matrix(A)
n = size(A)[1]
sub_permanents = similar(A)
for i in 1 : n
for j in 1 : n
sub_permanents[i,j] = ryser(remove_row_col(A, [i], [j]))
end
end
sub_permanents
end
function gram_from_n_r_vectors(M)
# generate a random matrix of n r-vectors M
# normalize it to have a gram matrix
n = size(M)[1]
for i in 1 : n
M[:,i] ./= norm(M[:,i])
end
M' * M # our rank r n*n gram matrix
end
"""
gram_matrix_one_param(n::Int, x::Real)
Returns a ``n``-by-``n`` Gram matrix parametrized by the real ``0 ≤ x ≦ 1``.
"""
function gram_matrix_one_param(n::Int, x::Real)
S = 1.0 * Matrix(I, n, n)
for i in 1:n
for j in 1:n
i!=j ? S[i,j] = x : continue
end
end
S
end
function column_normalize(M)
for col in 1:size(M,2) #column normalize
M[:, col] = M[:, col]./norm(M[:, col])
end
M
end
"""
perturbed_gram_matrix(M, epsilon)
Returns a Gram matrix generated by the columns of `M` which are perturbed by
a Gaussian quantity of variance epsilon once normalized.
"""
function perturbed_gram_matrix(M, epsilon)
"""M defines the set of vectors generating the gram matrix
each column is a generating vector for the gram matrix
we perturb them by some random gaussian amount with set variance epsilon once normalized """
M = column_normalize(M)
d = Normal(0.0, epsilon)
perturbation_vector = rand(d,size(M))
M += perturbation_vector
M = column_normalize(M)
M' * M
end
"""
perturbed_unitary(U, epsilon)
Returns a unitary matrix whose columns are generating vector perturbed by a
random Gaussian quantity with variance epsilon once normalized.
"""
function perturbed_unitary(U, epsilon)
"""U a unitary matrix each column is a generating vector
we perturb them by some random gaussian amount with set variance epsilon once normalized """
d = Normal(0.0, epsilon)
perturbation_vector = rand(d,size(U))
U += perturbation_vector
U = modified_gram_schmidt(U)
U
end
"""
direct_sum(A::Matrix, B::Matrix)
Performs the matrix direct sum between `A` and `B`.
"""
direct_sum(A::Matrix, B::Matrix) = [A zeros(size(A)[1], size(B)[2]);
zeros(size(B)[1], size(A)[1]) B]
"""
symplectic_mat(n::Int)
Returns the symplectic matrix ``J`` of dimension `2n`:
```math
J =
\begin{pmatrix}
0_n & I_n \\
-I_n & 0_n
\end{pmatrix}
```
"""
function symplectic_mat(n::Int)
id = Matrix{Float64}(I,n,n)
Z = zeros(Float64,n,n)
return [Z id; -id Z]
end
"""
X_mat(n::Int)
Returns the ``X`` matrix of dimension `2n` defined as
```math
X =
\begin{pmatrix}
0_n & I_n \\
-I_n & 0_n
\end{pmatrix}
```
!!! note "Reference"
[The Boundary for Quantum Advantage in Gaussian Boson Sampling](https://arxiv.org/pdf/2108.01622.pdf)
"""
function X_mat(n::Int)
id = Matrix{Float64}(I,n,n)
Z = zeros(Float64,n,n)
return [Z id; id Z]
end
"""
husimiQ_matrix(V::Matrix)
Returns the complex-valued covariance of the state's Husimi Q-function.
!!! note "Reference"
[The Walrus documentation](https://the-walrus.readthedocs.io/en/latest/_modules/thewalrus/quantum/conversions.html#Amat)
"""
function husimiQ_matrix(V::Matrix)
n = div(LinearAlgebra.checksquare(V), 2)
x = V[1:n, 1:n]
xp = V[1:n, n+1:2n]
p = V[n+1:2n, n+1:2n]
a_dag_a = (x+p+(xp-transpose(xp))im - 2*Matrix{ComplexF64}(I,n,n)) / 4
aa = (x-p+(xp+transpose(xp))im) / 4
return [a_dag_a conj(aa); aa conj(a_dag_a)] + Matrix{ComplexF64}(I, 2n, 2n)
end
"""
A_mat(V::Matrix)
Return the matrix ``A`` defined from the Wigner covariance matrix ``V``.
!!! note "Reference"
[The Boundary for Quantum Advantage in Gaussian Boson Sampling](https://arxiv.org/pdf/2108.01622.pdf)
"""
function A_mat(V::Matrix)
id = Matrix{eltype(V)}(I,size(V))
X = X_mat(div(size(V)[1],2))
O = id - inv(husimiQ_matrix(V))
return X * conj(O)
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 1738 | """
visualize_sampling(input::Input, output)
Draw a schematic representation of the setup.
"""
function visualize_sampling(input::Input, output)
Drawing(1500, 1500, "basic_test.png")
origin()
background("white")
setopacity(1)
setline(8)
sethue("black")
polysmooth(box(O, 1000, 720, vertices=true), 10, :stroke)
fontsize(120)
fontface("italic")
textcentred("U")
setline(5)
inter = 700/(input.m-1)
counter = 1
for i in 10:inter:710
line(Point(-550,i-360), Point(-500,i-360), :stroke)
line(Point(500,i-360), Point(550,i-360), :stroke)
if input.r.state[counter] == 1
p = Point(-550,i-360)
sethue("blue")
circle(p,15,:fill)
sethue("black")
end
counter += 1
end
sethue("red")
fontsize(25)
fontface("italic")
counter = countmap(output)
for i = 1:length(output)
p = Point(550, output[i]*inter-350-inter)
circle(p,15,:fill)
if counter[output[i]] > 1
sethue("black")
Luxor.text(string(counter[output[i]]), p, valign=:center, halign=:center)
sethue("red")
end
end
clipreset()
finish()
preview()
end
"""
visualize_proba(input::Input, output, data)
Draw a schematic representation associated to the event with probability `data`.
"""
function visualize_proba(input::Input, output, data)
nlist = output_mode_occupation(input.n, input.m)
if output in nlist
idx = findfirst(x -> x==output, nlist)
print("event probability: ", data[idx])
visualize_sampling(input, output)
else
throw(ArgumentError("invalid argument(s)"))
end
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 969 | """
classical_sampler(U, n, m)
classical_sampler(;input::Input, interf::Interferometer)
Sample photons according to the [`Distinguishable`](@ref) case.
"""
function classical_sampler(U, n, m)
output_state = zeros(Int,m)
output_modes = collect(1:m)
#@warn "check U or U'"
for j in 1:n
this_output_mode = wsample(output_modes, abs.(U[j,:]) .^2)
output_state[this_output_mode] += 1
end
output_state
end
classical_sampler(;input::Input, interf::Interferometer) = classical_sampler(interf.U, input.n, input.m)
"""
classical_sampler(ev::Event{TIn, TOut}; occupancy_vector = true) where {TIn<:InputType, TOut <: FockSample}
Sampler for an `Event`. Note the difference of behaviour if `occupancy_vector = true`.
"""
function classical_sampler(ev::Event{TIn, TOut}; occupancy_vector = true) where {TIn<:InputType, TOut <: FockSample}
classical_sampler(input = ev.input_state, interf = ev.interferometer)
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 1488 | """
cliffords_sampler(;input::Input, interf::Interferometer, nthreads=1)
Sample photons according to the [`Bosonic`](@ref) case following
[Clifford & Clifford](https://arxiv.org/pdf/2005.04214.pdf) algorithm performed
(at most) in ``O(n2^m + Poly(n,m))`` time and ``O(m)`` space.
The optional parameter `nthreads` is used to deploy the algorithm on several threads.
"""
function cliffords_sampler(;input::Input, interf::Interferometer)
m = input.m
n = input.n
A = interf.U[:,1:n]
A = A[:, shuffle(1:end)]
weight_array = map(a -> abs(a).^2, A[:,1])
sample_array = [wsample(1:m, Weights(weight_array))]
for k in 2:n
subA = A[:,1:k]
@inbounds permanent_matrix = reshape(subA[sample_array, :], k-1, k)
unormalized_pmf = LaplaceExpansion(permanent_matrix, subA)
weight_array = unormalized_pmf/sum(unormalized_pmf)
push!(sample_array, wsample(1:m, Weights(weight_array)))
end
return sort(sample_array)
end
"""
cliffords_sampler(ev::Event{TIn, TOut}; occupancy_vector = true) where {TIn<:InputType, TOut <: FockSample}
Sampler for an [`Event`](@ref). Note the difference of behaviour if `occupancy_vector = true`.
"""
function cliffords_sampler(ev::Event{TIn, TOut}; occupancy_vector = true) where {TIn<:InputType, TOut <: FockSample}
s = cliffords_sampler(input = ev.input_state, interf = ev.interferometer)
occupancy_vector ? mode_occupancy_to_occupancy_vector(s, ev.input_state.m) : s
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 11072 | """
gaussian_sampler(;input::GaussianInput{T}, nsamples=Int(1e3), burn_in=200, thinning_rate=100, mean_n=nothing) where {T<:Gaussian}
Simulate noiseless Gaussian Boson Sampling with photon number resolving detectors via a MIS sampler.
Post-select the proposed states by fixing the mean photon number `mean_n`.
!!! note "Reference"
[The Boundary for Quantum Advantage in Gaussian Boson Sampling](https://arxiv.org/pdf/2108.01622.pdf)
"""
function gaussian_sampler_PNRD(;input::GaussianInput{T}, nsamples=Int(1e3), burn_in=200, thinning_rate=100, mean_n=nothing) where {T<:Gaussian}
Ri = input.displacement
V = input.covariance_matrix
n = div(LinearAlgebra.checksquare(V), 2)
mean_n == nothing ? mean_n = input.n : nothing
xx_pp_ordering = vcat([i for i in 1:2:2n-1], [i for i in 2:2:2n])
Ri = Ri[xx_pp_ordering]
V = V[xx_pp_ordering, xx_pp_ordering]
D, S = williamson_decomp(V)
id = Matrix{eltype(V)}(I, size(V))
Did = D - id
for i in 1:size(Did)[1]
for j in 1:size(Did)[2]
Did[i,j] < 0 ? Did[i,j] = 0 : nothing
end
end
Tv = S * transpose(S)
W = S * Did * transpose(S)
sqrtW = real.(S * sqrt.(Did))
A = A_mat(Tv)
Q = husimiQ_matrix(Tv)
detQ = det(Q)
B = A[1:n, 1:n]
B2 = abs.(B).^2
# sample_R() = sqrtW * rand(MvNormal(Ri, id))
function sample_R()
R = sqrtW * rand(MvNormal(Ri, id))
return R[xx_pp_ordering]
end
compute_displacement(R::AbstractVector) = [(R[i]+im*R[i+n])/sqrt(2) for i in 1:n]
sample_displacement() = compute_displacement(sample_R())
function sample_pattern()
displacement = sample_displacement()
G = abs.(displacement).^2
pattern = [rand(Poisson(g)) for g in G]
for j in 1:length(displacement)
pattern[j] += 2 * rand(Poisson(0.5 * B2[j,j]))
for k in j+1:length(displacement)
m = rand(Poisson(B2[j,k]))
pattern[j] += m
pattern[k] += m
end
end
return pattern, displacement
end
function valid_sampling()
pattern = []
displacement = []
count = nothing
while count != mean_n
pattern, displacement = sample_pattern()
count = sum(pattern)
end
return pattern, displacement
end
function sampled_probability(pattern, displacement)
G = abs.(displacement).^2
Gn = duplicate_row_col(G, pattern)
Bn = duplicate_row_col(B2, pattern)
for i in 1:length(Gn)
Bn[i,i] = Gn[i]
end
lhaf = hafnian(Bn, loop=true)
factor = exp(-sum(G))
rescale = prod(factorial.(pattern))
return real((factor*lhaf)/rescale)
end
function target_probability(pattern, displacement)
gamma = displacement .- B*conj(displacement)
gamma_n = duplicate_row_col(gamma, pattern)
Bn = duplicate_row_col(B, pattern)
for i in 1:length(gamma_n)
Bn[i,i] = gamma_n[i]
end
lhaf = hafnian(Bn, loop=true)
arg_exp = -1/2 * (norm(displacement)^2 - dot(conj(displacement), B*conj(displacement)))
factor = abs(exp(arg_exp))^2
rescale = prod(factorial.(pattern)) * sqrt(detQ)
return real(factor * lhaf * conj(lhaf) / rescale)
end
pattern, displacement = valid_sampling()
chain_pattern_ = [pattern]
pattern_chain = pattern
p_proposal_chain = sampled_probability(pattern, displacement)
proposal_ = [p_proposal_chain]
p_target_chain = target_probability(pattern, displacement)
target_ = [p_target_chain]
outcomes = Vector{Int8}(undef, nsamples)
for i in ProgressBar(1:nsamples)
pattern, displacement = valid_sampling()
p_proposal = sampled_probability(pattern, displacement)
p_target = target_probability(pattern, displacement)
p_accept = min(1, (p_proposal_chain*p_target)/(p_target_chain*p_proposal))
if rand() <= p_accept
push!(chain_pattern_, pattern)
p_proposal_chain = p_proposal
p_target_chain = p_target
pattern_chain = pattern
outcomes[i] = 1
else
push!(chain_pattern_, pattern_chain)
outcomes[i] = 0
end
push!(proposal_, p_proposal)
push!(target_, p_target)
end
res = chain_pattern_[burn_in:burn_in+thinning_rate]
outcomes = outcomes[burn_in:burn_in+thinning_rate]
idx = findall(el -> el!=0, outcomes)
return res
end
function gaussian_sampler_treshold(;input::GaussianInput{T}, nsamples=Int(1e3), burn_in=200, thinning_rate=100, mean_click=nothing) where {T<:Gaussian}
Ri = input.displacement
V = input.covariance_matrix
n = div(LinearAlgebra.checksquare(V), 2)
xx_pp_ordering = vcat([i for i in 1:2:2n-1], [i for i in 2:2:2n])
V = V[xx_pp_ordering, xx_pp_ordering]
Ri = Ri[xx_pp_ordering]
D, S = williamson_decomp(V)
id = Matrix{eltype(V)}(I, size(V))
Did = D - id
for i in 1:size(Did)[1]
for j in 1:size(Did)[2]
Did[i,j] < 0 ? Did[i,j] = 0 : nothing
end
end
Tv = S * transpose(S)
W = S * Did * transpose(S)
sqrtW = real.(S * sqrt.(Did))
A = A_mat(Tv)
Q = husimiQ_matrix(Tv)
detQ = det(Q)
B = A[1:n, 1:n]
B2 = abs.(B).^2
function sample_R(cov::AbstractMatrix)
R = cov * rand(MvNormal(Ri, id))
return R[xx_pp_ordering]
end
compute_displacement(R::AbstractVector) = [(R[i]+im*R[i+n])/sqrt(2) for i in 1:n]
sample_displacement() = compute_displacement(sample_R())
function sample_pattern()
R = sample_R(sqrtW)
displacement = compute_displacement(R)
G = abs.(displacement).^2
pattern = [rand(Poisson(g)) for g in G]
for j in 1:length(displacement)
pattern[j] += 2 * rand(Poisson(0.5 * B2[j,j]))
for k in j+1:length(displacement)
m = rand(Poisson(B2[j,k]))
pattern[j] += m
pattern[k] += m
end
end
return pattern, R
end
function valid_sampling()
count = nothing
photon_pattern = []
click_modes = []
R = []
while count != mean_click
photon_pattern, R = sample_pattern()
click_modes = findall(el -> el!=0, photon_pattern)
count = length(click_modes)
end
click_pattern = zeros(Int64, n)
click_pattern[click_modes] .= 1
x = zeros(Float64, n)
for i in click_modes
d = Pareto(photon_pattern[i], 1)
x[i] = 1/rand(Pareto(photon_pattern[i],1))
end
η = 1 .- x
Rx = copy(R)
Vx = copy(V)
for i in 1:n
Vx[i,:] *= sqrt(η[i])
Vx[i+n,:] *= sqrt(η[i])
Vx[:,i] *= sqrt(η[i])
Vx[:,i+n] *= sqrt(η[i])
Vx[i,i] += (1-η[i])
Vx[i+n,i+n] += (1-η[i])
Rx[i] *= sqrt(η[i])
Rx[i+n] *= sqrt(η[i])
end
Dx, Sx = williamson_decomp(Vx)
Tx = Sx * transpose(Sx)
Didx = Dx - id
for i in 1:size(Didx)[1]
for j in 1:size(Didx)[2]
Didx[i,j] < 0 ? Didx[i,j] = 0 : nothing
end
end
sqrtWx = real.(Sx * sqrt.(Didx))
Rx .+= sample_R(sqrtWx)
return click_pattern, R, x, Rx, Tx
end
function sampled_probability(pattern, dx, Cx)
dxn = duplicate_row_col(dx, pattern)
Cxn = duplicate_row_col(Cx, pattern)
for i in 1:length(dxn)
Cxn[i,i] = dxn[i]
end
lhaf = hafnian(Cxn, loop=true)
factor = real.(exp(-0.5 * sum(Cx[:]) - sum(dx)))
return lhaf * factor
end
function target_probability(pattern, displacement, Tx)
Qx = husimiQ_matrix(Tx)
Ax = A_mat(Tx)
Bx = conj(Ax[1:n, n+1:2n])
γ = displacement - Bx * conj(displacement)
γn = duplicate_row_col(γ, pattern)
Bn = duplicate_row_col(Bx, pattern)
for i in 1:length(γn)
Bn[i,i] = γn[i]
end
lhaf = hafnian(Bn, loop=true)
arg_exp = -1/2 * (norm(displacement)^2 - dot(conj(displacement), Bx*conj(displacement)))
factor = abs(exp(arg_exp))^2
rescale = real(sqrt(det(Qx)))
return real(factor * lhaf * conj(lhaf) / rescale)
end
function apply_loss(x, displacement)
Cx = copy(B2)
Gxi = abs.(displacement).^2
Gx = copy(Gxi)
η = 1 .- x
for i in 1:n
for j in 1:n
Cx[i,j] *= η[i] * η[j]
Gx[i] += x[j] * B2[i,j]
end
Gx[i] *= η[i]
end
return Cx, Gx
end
click_pattern, R, x, Rx, Tx = valid_sampling()
displacement = compute_displacement(R)
displacement_x = compute_displacement(Rx)
Cx, dx = apply_loss(x, displacement)
p_proposal_chain = sampled_probability(click_pattern, dx, Cx)
p_target_chain = target_probability(click_pattern, displacement_x, Tx)
chain_pattern_ = [click_pattern]
pattern_chain = click_pattern
proposal_ = [p_proposal_chain]
target_ = [p_target_chain]
outcomes = Vector{Int8}(undef, nsamples)
for i in ProgressBar(1:nsamples)
click_pattern, R, x, Rx, Tx = valid_sampling()
displacement = compute_displacement(R)
displacement_x = compute_displacement(Rx)
Cx, dx = apply_loss(x, displacement)
p_proposal = sampled_probability(click_pattern, dx, Cx)
p_target = target_probability(click_pattern, displacement_x, Tx)
p_accept = min(1, (p_proposal_chain*p_target)/(p_target_chain*p_proposal))
if rand() <= p_accept
push!(chain_pattern_, click_pattern)
p_proposal_chain = p_proposal
p_target_chain = p_target
pattern_chain = click_pattern
outcomes[i] = 1
else
push!(chain_pattern_, pattern_chain)
outcomes[i] = 0
end
push!(proposal_, p_proposal)
push!(target_, p_target)
end
res = chain_pattern_[burn_in:burn_in+thinning_rate]
outcomes = outcomes[burn_in:burn_in+thinning_rate]
idx = findall(el -> el!=0, outcomes)
return res[idx]
end
function gaussian_sampler(ev::GaussianEvent{TIn,TOut}; nsamples=Int(1e3), burn_in=200, thinning_rate=100, mean_val=nothing) where {TIn<:Gaussian, TOut<:Union{FockSample,TresholdDetection}}
if TOut == FockSample
gaussian_sampler_PNRD(input=ev.input_state, nsamples=nsamples, burn_in=burn_in, thinning_rate=thinning_rate, mean_n=mean_val)
elseif TOut == TresholdDetection
mean_val == nothing ? mean_val = 1 : nothing
return gaussian_sampler_treshold(input=ev.input_state, nsamples=nsamples, burn_in=burn_in, thinning_rate=thinning_rate, mean_click=mean_val)
else
error("Measurement ", TOut, " not implemented")
end
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 1480 | function sampling(;input::Input, interf::Interferometer, distinguishability::Real, reflectivity::Real, exact=nothing)
if exact == nothing || exact == true
if distinguishability == 1 && reflectivity == 1
return cliffords_sampler(input=input, interf=interf)
elseif distinguishability == 0 && reflectivity == 1
return classical_sampler(input=input, interf=interf)
else
throw(ArgumentError("need approximation"))
end
else
if distinguishability == 1 && reflectivity == 1
n = input.r.n
m = input.r.m
U = copy(interf.U)
# generate a collisionless state as a starting point
starting_state = iterate_until_collisionless(() -> random_occupancy(n,m))
known_pdf(state) = process_probability_distinguishable(U, input_state, state)
target_pdf(state) = process_probability(U, input_state, state)
known_sampler = () -> iterate_until_collisionless(() -> classical_sampler(U = U, m = m, n = n)) # gives a classical sampler
return metropolis_sampler(;target_pdf = target_pdf, known_pdf = known_pdf , known_sampler = known_sampler , starting_state = starting_state, n_iter = 100)
else
return noisy_sampling(input=input, distinguishability=distinguishability, reflectivity=reflectivity, interf=interf)
end
end
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 1624 | function metropolis_sampler(;target_pdf, known_pdf, known_sampler, starting_state, n_iter, n_burn = 100, n_thinning = 100)
function transition_probability(target_pdf, known_pdf, new_state, previous_state)
"""metropolis_independant_sampler transition probability, see eq 1 in https://arxiv.org/abs/1705.00686"""
min(1, target_pdf(new_state) * known_pdf(previous_state)/(target_pdf(previous_state) * known_pdf(new_state)))
end
function update_state_parameters(target_pdf, known_pdf, new_state, previous_state)
if do_with_probability(transition_probability(target_pdf, known_pdf, new_state, previous_state))
return new_state
else
return previous_state
end
end
function metropolis_iteration!(target_pdf, known_pdf, known_sampler, new_state, previous_state)
new_state[:] = known_sampler()[:]
new_state[:] = update_state_parameters(target_pdf, known_pdf, new_state, previous_state)[:]
previous_state[:] = new_state[:]
end
samples = Array{eltype(starting_state)}(undef, n_iter, size(starting_state,1))
previous_state = similar(starting_state) #x
new_state = similar(previous_state)
previous_state = starting_state
if n_burn == n_thinning
@showprogress for iter in 1:n_iter
for i in 1:n_thinning
metropolis_iteration!(target_pdf, known_pdf, known_sampler, new_state, previous_state)
end
samples[iter,:] = new_state[:]
end
else
throw(Exception("n_burn != n_thinning not implemented"))
end
samples
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 2378 | """
metropolis_sampler(;target_pdf, known_pdf, known_sampler, starting_state, n_iter, n_burn = 100, n_thinning = 100)
Implement a metropolis independent sampler for standard boson sampling following. The burn
in period `n_burn`and the thinning interval `n_thinning` both have default value of 100.
!!! note "Reference"
[https://arxiv.org/abs/1705.00686](https://arxiv.org/abs/1705.00686): As the paper is limited to collinionless events, we keep track of this thanks
to [`iterate_until_collisionless`](@ref).
!!! warning
Burn in perdiod and thinning interval must have the same value.
"""
function metropolis_sampler(;target_pdf, known_pdf, known_sampler, starting_state, n_iter, n_burn = 100, n_thinning = 100)
function transition_probability(target_pdf, known_pdf, new_state, previous_state)
"""metropolis_independant_sampler transition probability, see eq 1 in https://arxiv.org/abs/1705.00686"""
min(1, target_pdf(new_state) * known_pdf(previous_state)/(target_pdf(previous_state) * known_pdf(new_state)))
end
function do_with_probability(p)
"""returns true with probability p, false with (1-p)"""
rand() < p ? true : false
end
function update_state_parameters(target_pdf, known_pdf, new_state, previous_state)
if do_with_probability(transition_probability(target_pdf, known_pdf, new_state, previous_state))
return new_state
else
return previous_state
end
end
function metropolis_iteration!(target_pdf, known_pdf, known_sampler, new_state, previous_state)
new_state[:] = known_sampler()[:]
new_state[:] = update_state_parameters(target_pdf, known_pdf, new_state, previous_state)[:]
previous_state[:] = new_state[:]
end
samples = Array{eltype(starting_state)}(undef, n_iter, size(starting_state,1))
previous_state = similar(starting_state) #x
new_state = similar(previous_state)
previous_state = starting_state
if n_burn == n_thinning
@showprogress for iter in 1:n_iter
for i in 1:n_thinning
metropolis_iteration!(target_pdf, known_pdf, known_sampler, new_state, previous_state)
end
samples[iter,:] = new_state[:]
end
else
throw(Exception("n_burn != n_thinning not implemented"))
end
samples
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 2027 | """
noisy_sampler(;input::Input, loss::Real, interf::Interferometer)
Sample partially-distinguishable photons through a lossy interferometer, which
runs (at most) in ``O(n2^m + Poly(n,m))`` time.
!!! note "Reference"
[https://arxiv.org/pdf/1907.00022.pdf](https://arxiv.org/pdf/1907.00022.pdf)
"""
function noisy_sampler(;input::Input, loss::Real, interf::Interferometer)
list_assignement = fill_arrangement(input.r.state)
l = rand(Binomial(input.n, loss))
remaining_photons = Int.(zeros(input.m))
remaining_subset = rand(collect(multiset_combinations(list_assignement, l)))
remaining_photons[remaining_subset] .= 1
list_assignement = fill_arrangement(remaining_photons)
i = rand(Binomial(l, input.distinguishability_param))
bosonic_input = Int.(zeros(input.m))
bosonic_subset = rand(collect(multiset_combinations(list_assignement, i)))
bosonic_input[bosonic_subset] .= 1
classical_input = remaining_photons .- bosonic_input
classical_input = Input{Distinguishable}(ModeOccupation(classical_input))
bosonic_input = Input{Bosonic}(ModeOccupation(bosonic_input))
if classical_input.r.state != zeros(input.m)
classical_output = fill_arrangement(classical_sampler(input=classical_input, interf=interf))
else
classical_output = []
end
if bosonic_input.r.state != zeros(input.m)
bosonic_output = cliffords_sampler(input=bosonic_input, interf=interf)
else
bosonic_output = []
end
return sort(append!(classical_output, bosonic_output))
end
"""
noisy_sampler(ev::Event{TIn,TOut}, loss::Real; occupancy_vector=true) where {TIn<:InputType, TOut<:FockSample}
Noisy sampler for en [`Event`](@ref).
"""
function noisy_sampler(ev::Event{TIn,TOut}, loss::Real; occupancy_vector=true) where {TIn<:InputType, TOut<:FockSample}
s = noisy_sampler(input=ev.input_state, loss=loss, interf=ev.interferometer)
occupancy_vector ? mode_occupancy_to_occupancy_vector(Vector{Int64}(s), ev.input_state.m) : s
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 637 | # function sample!(ev::Event{TIn, TOut}) where {TIn<:InputType, TOut <: PartitionSample}
#
# check_probability_empty(ev)
#
# if TIn == Distinguishable
# ev.output_measurement.s = ModeOccupation(classical_sampler(ev))
# elseif TIn == Bosonic
# ev.output_measurement.s = ModeOccupation(cliffords_sampler(ev))
# else
# error("not implemented")
# end
#
# end
#
#
# ev = Event(ib,PartitionCount(wsample(pb.counts, pb.proba)), interf)
########this would not be efficient because recomputing compute_probability!(ev_theory) each time...
####### although we could store it somewhere and pass it out
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 3734 | """
sample!(ev::Event{TIn, TOut}) where {TIn<:InputType, TOut <: FockSample}
sample!(ev::Event{TIn,TOut}, loss::Real) where {TIn<:InputType, TOut<:FockSample}
Simulate a boson sampling experiment form a given [`Event`](@ref).
"""
function sample!(ev::Event{TIn, TOut}) where {TIn<:InputType, TOut <: FockSample}
check_probability_empty(ev)
if TIn == Distinguishable
ev.output_measurement.s = ModeOccupation(classical_sampler(ev))
elseif TIn == Bosonic
ev.output_measurement.s = ModeOccupation(cliffords_sampler(ev))
elseif TIn == OneParameterInterpolation
ev.output_measurement.s = ModeOccupation(noisy_sampler(ev,1))
else
error("not implemented")
end
end
function sample!(ev::Event{TIn,TOut}, loss::Real) where {TIn<:InputType, TOut<:FockSample}
check_probability_empty(ev)
if TIn <: PartDist
if TIn == OneParameterInterpolation
ev.output_measurement.s = ModeOccupation(noisy_sampler(ev,loss))
else
error("model is not valid")
end
else
error("not implemented")
end
end
# define the sampling algorithm:
# the function sample! is modified to take into account
# the new measurement type
# this allows to keep the same syntax and in fact reuse
# any function that would have previously used sample!
# at no cost
function sample!(ev::Event{TIn, TOut}) where {TIn<:InputType, TOut <: DarkCountFockSample}
# sample without dark counts
ev_no_dark = Event(ev.input_state, FockSample(), ev.interferometer)
sample!(ev_no_dark)
sample_no_dark = ev_no_dark.output_measurement.s
# now, apply the dark counts to "perfect" samples
observe_dark_count(p) = Int(do_with_probability(p)) # 1 with probability p, 0 with probability 1-p
dark_counts = [observe_dark_count(ev.output_measurement.p) for i in 1: ev.input_state.m]
ev.output_measurement.s = sample_no_dark + dark_counts
end
function sample!(ev::Event{TIn, TOut}) where {TIn<:InputType, TOut <: RealisticDetectorsFockSample}
# sample with dark counts but seeing all photons
ev_dark = Event(ev.input_state, DarkCountFockSample(ev.output_measurement.p_dark), ev.interferometer)
sample!(ev_dark)
sample_dark = ev_dark.output_measurement.s
# remove each of the readings with p_no_count
for mode in 1:sample_dark.m
if do_with_probability(ev.output_measurement.p_no_count)
sample_dark.s[mode] = 0
end
end
ev.output_measurement.s = sample_dark
end
function sample!(params::SamplingParameters)
sample!(params.ev)
end
"""
scattershot_sampling(n::Int, m::Int; N=1000, interf=nothing)
Simulate `N` times a scattershot boson sampling experiment with `n` photons among `m` modes.
The interferometer is set to [`RandHaar`](@ref) by default.
"""
function scattershot_sampling(n::Int, m::Int; N=1000, interf=nothing)
out_ = Vector{Vector{Int64}}(undef, N)
in_ = Vector{Vector{Int64}}(undef, N)
for i in 1:N
input = Input{Bosonic}(ModeOccupation(random_occupancy(n,m)))
interf == nothing ? F = RandHaar(m) : F = interf
ev = Event(input, FockSample(), F)
sample!(ev)
in_[i] = input.r.state
out_[i] = ev.output_measurement.s.state
end
joint = sort([[in_[i],out_[i]] for i in 1:N])
res = counter(joint)
k = collect(keys(res))
k = reduce(hcat,k)'
vals = collect(values(res))
k1 = unique(k[:,1])
k2 = unique(k[:,2])
M = zeros(length(k1),length(k2))
for i in 1:length(k1)
for j in 1:length(k2)
M[i,j] = res[[k1[i]',k2[j]']]
end
end
k1 = [string(i) for i in k1]
k2 = [string(i) for i in k2]
heatmap(k1,k2,M')
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 2546 | """
duplicate_row_col(A::Array, lengths::Vector{Int})
Returns a matrix where the column and row ``i`` of ``A`` are repeated ``lengths_i`` times.
"""
function duplicate_row_col(A::Array, lengths::Vector{Int})
x = axes(A,1)
@argcheck length(x) == length(lengths)
res = similar(x, sum(lengths))
i = 1
for idx in 1:length(x)
tmp = x[idx]
for kdx in 1:lengths[idx]
res[i] = tmp
i += 1
end
end
if length(size(A)) == 2
return A[res,res]
else
return A[res]
end
end
"""
williamson_decomp(V::AbstractMatrix)
Find the Williamson decomposition of the positive semidefinite matrix ``V``.
Returns ``D`` and ``S`` such that ``V = S D S^T
!!! note "Reference"
[The Walrus documentation](https://the-walrus.readthedocs.io/en/latest/code/decompositions.html?highlight=williamson#thewalrus.decompositions.williamson)
"""
function williamson_decomp(V::AbstractMatrix)
@argcheck isposdef(V) && size(V)[1] == size(V)[2] && size(V)[1] % 2 == 0
n = div(LinearAlgebra.checksquare(V), 2)
J = symplectic_mat(n)
v = real.(sqrt(inv(V)))
inter = v * J * v
F = schur(inter)
T = F.T
Z = F.Z
perm = vcat([i for i in 1:2:2n-1], [i+1 for i in 1:2:2n-1])
x = [0 1; 1 0]
id = Matrix{eltype(V)}(I, 2, 2)
seq = []
for i in 1:2:2n-1
T[i,i+1] > 0 ? push!(seq, id) : push!(seq, x)
end
p = seq[1]
for i in 2:length(seq)
p = direct_sum(p, seq[i])
end
Zp = Z * p
Zp = Zp[:, perm]
Tp = p * T * p
d = [1/Tp[i,i+1] for i in 1:2:2n-1]
D = diagm(vcat(d,d))
S = transpose(inv(v * Zp * sqrt(D)))
return D, S
end
function LaplaceExpansion(perm_mat, full_mat)
s_full = size(full_mat)[1]
s_perm = size(perm_mat)[2]
res = Vector{Float64}(undef, s_full)
global v_perms = Vector{ComplexF64}(undef, s_perm)
Threads.@threads for i in 1:s_perm
v_perms[i] = ryser(perm_mat[:,1:end .!= i])
end
@simd for i in 1:s_full
rowA = full_mat[i,:]
res[i] = abs.(dot(rowA, v_perms')).^2
end
return res
end
function total_variation_distance(p, q)
if length(p) > length(q)
vcat(q, zeros(length(p)-length(q)))
elseif length(q) > length(p)
vcat(p, zeros(length(q)-length(p)))
end
return sum(abs(p[i]-q[i]) for i = 1:length(p))
end
function collect_sub_mat_perm(U)
sub_mat = [remove_row_col(U, [], [i]) for i in 1:size(U)[2]]
return [fast_glynn_perm(m) for m in sub_mat]
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 705 | include("packages_loop.jl")
n_samples = 1000
begin
n = 5
m = n
interf = RandHaar(m)
T = OneParameterInterpolation
x = 1.
o = FockSample()
end
sample_number_modes_occupied(params::SamplingParameters) = number_modes_occupied(BosonSampling.sample!(params))
mean_number_modes_occupied(params::SamplingParameters, n_samples::Int) = mean([sample_number_modes_occupied(params) for i in 1:n_samples])
mean_number_modes_occupied(params, n_samples)
x_array = 0:0.1:1
n_occ = []
for x in x_array
params = SamplingParameters(n=n,m=m,interf = interf, T=T,x=x, o=o)
set_parameters!(params)
push!(n_occ,mean_number_modes_occupied(params, n_samples))
end
plot(x_array, n_occ)
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 4975 | """
H_matrix(U, input_state, partition_occupancy_vector)
H_matrix(interf::Interferometer, i::Input, o::FockDetection)
H_matrix(interf::Interferometer, i::Input, subset_modes::ModeOccupation)
H matrix for a partition defined by `partition_occupancy_vector`, see definition
in the article below.
!!! note
Conventions follow the author's [Boson bunching is not
maximized by indistinguishable particles](https://arxiv.org/abs/2203.01306)
which are the ones compatible with Tichy's conventions (Shshnovitch has a
different one for the evolution of the creation operators).
"""
function H_matrix(U, input_state::Vector, partition_occupancy_vector::Vector)
part = occupancy_vector_to_partition(partition_occupancy_vector)
input_modes = occupancy_vector_to_mode_occupancy(input_state)
number_photons = sum(input_state)
if number_photons == 0
error("no input photons")
end
if size(U,1) < number_photons
@warn "more photons than modes (trivially gives the identity as photons are conserved)"
end
H = Matrix{ComplexF64}(undef,number_photons,number_photons)
for i in 1: number_photons
for j in 1:number_photons
H[i,j] = sum([conj(U[l, input_modes[i]]) * U[l,input_modes[j]] for l in part])
end
end
H
end
H_matrix(interf::Interferometer, i::Input, o::FockDetection) = H_matrix(interf.U, i.r.state, o.s)
H_matrix(interf::Interferometer, i::Input, subset_modes::ModeOccupation) = isa_subset(subset_modes) ? H_matrix(interf.U, i.r.state, subset_modes.state) : error("invalid subset")
H_matrix(interf::Interferometer, i::Input, subset::Subset) = H_matrix(interf::Interferometer, i::Input, ModeOccupation(subset.subset))
"""
full_bunching_probability(interf::Interferometer, i::Input, subset_modes::Subset)
full_bunching_probability(interf::Interferometer, i::Input, mo::ModeOccupation)
Computes the probability that all n photons end up in the subset of chosen
output modes following.
!!! note "Reference"
[Universality of Generalized Bunching and Efficient Assessment of Boson Sampling](https://arxiv.org/abs/1509.01561)
"""
function full_bunching_probability(interf::Interferometer, i::Input, subset_modes::Subset)
return clean_proba(permanent(H_matrix(interf,i,subset_modes) .* transpose(i.G.S)))
end
function full_bunching_probability(interf::Interferometer, i::Input, mo::ModeOccupation)
return clean_proba(permanent(H_matrix(interf,i,mo) .* transpose(i.G.S)))
end
#
# """
#
# bunching_events(input_state::Input, sub::Subset)
#
# generates the output configurations corresponding to a full
# bunching in the subset_modes
# """
# function bunching_events(input_state::Input, sub::Subset)
#
# #photon_distribution_in_subset_modes =
# all_mode_configurations(input_state, sub, only_photon_number_conserving = false)
#
# ######### convert to output ModeOccupations
#
# end
"""
bunching_probability_brute_force_bosonic(U, input_state, output_state; print_output = false)
bunching_probability_brute_force_bosonic(interf::Interferometer, i::Input, subset_modes::ModeOccupation)
Bosonic bunching probability by direct summation of all possible cases
`bunching_event_proba` gives the probability to get the event of ``[1^n 0^(m-n)]``.
"""
function bunching_probability_brute_force_bosonic(U, input_state, output_state; print_output = false)
n = sum(input_state)
m = size(U,1)
bunching_proba = 0
bunching_proba_array = []
bunching_event_proba = nothing
print_output ? println("bunching probabilities : ") : nothing
for t in reverse.(Iterators.product(fill(0:n,n)...))[:]
if sum(t) == n # cases where t is physical
output_state = zeros(Int,m)
output_state[1:n] .= t[1:n]
this_proba = process_probability(U, input_state, output_state)
bunching_proba += this_proba
push!(bunching_proba_array,[t, this_proba])
print_output ? println("output = ", t, " p = ", this_proba) : nothing
if output_state[1:n] == ones(Int, n)
bunching_event_proba = this_proba
end
end
end
H = H_matrix(U,input_state,partition)
@test bunching_proba ≈ real(permanent(H))
return bunching_proba, bunching_proba_array, bunching_event_proba
end
bunching_probability_brute_force_bosonic(interf::Interferometer, i::Input, subset_modes::ModeOccupation) = bunching_probability_brute_force_bosonic(interf.U, i.r.state, subset_modes.r.state; print_output = false)
"""
is_fully_bunched(ev::Event, subset::Subset)
Tells if all photons end up in the subset.
"""
is_fully_bunched(ev::Event, subset::Subset) = sum(ev.output_measurement.s.state .* subset.subset) == ev.input_state.n
"""
n_bunched_events(events::Vector{Event}, subset::Subset)
Gives the number of fully bunched events in subset.
"""
function n_bunched_events(events::Vector{Event}, subset::Subset)
n_bunched = 0
for ev in events
is_fully_bunched(ev, subset) ? n_bunched+= 1 : nothing
end
n_bunched
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 469 | begin
using Revise
using BosonSampling
using Plots
using ProgressMeter
using Distributions
using Random
using Test
using ArgCheck
using StatsBase
using ColorSchemes
using Interpolations
using Dierckx
using LinearAlgebra
using PrettyTables
using LaTeXStrings
using JLD
using AutoHashEquals
using LinearRegression
using DataStructures
using Parameters
using UnPack
using Optim
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 7021 | ### first, a simple bayesian estimator ###
# for the theory, see 1904.12318 page 3
confidence(χ) = χ == Inf ? 1. : χ/(1+χ)
function update_confidence(event, p_q, p_a, χ)
χ *= p_q(event)/p_a(event)
χ
end
"""
compute_confidence(events,p_q, p_a)
A bayesian confidence estimator: return the probability that the null hypothesis
Q is right compared to the alternative hypothesis A.
"""
function compute_confidence(events,p_q, p_a)
confidence(compute_χ(events,p_q, p_a))
end
"""
compute_confidence_array(events, p_q, p_a)
Return an array of the probabilities of H being true as we process more and
more events.
"""
function compute_confidence_array(events, p_q, p_a)
χ_array = [1.]
for event in events
push!(χ_array, update_confidence(event, p_q, p_a, χ_array[end]))
end
confidence.(χ_array)
end
function compute_χ(events, p_q, p_a)
χ = 1.
for event in events
χ = update_confidence(event, p_q, p_a, χ)
end
χ
end
"""
certify!(b::Bayesian)
Updates all probabilities associated with a `Bayesian` `Certifier`.
"""
function certify!(b::Union{Bayesian, BayesianPartition})
b.probabilities = compute_confidence_array(b.events, b.null_hypothesis.f, b.alternative_hypothesis.f)
b.confidence = b.probabilities[end]
end
"""
function certify!(fb::FullBunching)
Returns the p_value of the null and alternative hypothesis.
"""
function certify!(fb::FullBunching)
events = fb.events
ev = fb.events[1]
input_modes = ev.input_state.r
interf = ev.interferometer
ib = Input{Bosonic}(input_modes)
id = Input{Distinguishable}(input_modes)
p_full_bos = full_bunching_probability(interf, ib, fb.subset)
p_full_dist = full_bunching_probability(interf, id, fb.subset)
p_full_observed = n_bunched_events(events, fb.subset)/length(events)
p_value_bosonic = pvalue(OneSampleTTest([Int(is_fully_bunched(ev, fb.subset)) for ev in events], p_full_bos))
p_value_dist = pvalue(OneSampleTTest([Int(is_fully_bunched(ev, fb.subset)) for ev in events], p_full_dist))
TNull = typeof(fb.null_hypothesis)
TAlternative = typeof(fb.alternative_hypothesis)
if TNull == Bosonic && TAlternative == Distinguishable
fb.p_value_null = p_value_bosonic
fb.p_value_alternative = p_value_dist
elseif TNull == Distinguishable && TAlternative == Bosonic
fb.p_value_null = p_value_dist
fb.p_value_alternative = p_value_bosonic
else
error("not implemented")
end
(fb.p_value_null, fb.p_value_alternative)
end
"""
function certify!(fb::FullBunching)
Returns the p_value of the null and alternative hypothesis.
"""
function certify!(corr::Correlators)
events = corr.events
ev = corr.events[1]
input_modes = ev.input_state.r
interf = ev.interferometer
ib = Input{Bosonic}(input_modes)
id = Input{Distinguishable}(input_modes)
corr_b = correlators_nm_cv_s(interf, ib)
corr_d = correlators_nm_cv_s(interf, id)
@warn "to be implemented"
# corr_observed = n_bunched_events(events, fb.subset)/length(events)
#
# p_value_bosonic = pvalue(OneSampleTTest([Int(is_fully_bunched(ev, fb.subset)) for ev in events], p_full_bos))
# p_value_dist = pvalue(OneSampleTTest([Int(is_fully_bunched(ev, fb.subset)) for ev in events], p_full_dist))
#
# TNull = typeof(fb.null_hypothesis)
# TAlternative = typeof(fb.alternative_hypothesis)
#
# if TNull == Bosonic && TAlternative == Distinguishable
# fb.p_value_null = p_value_bosonic
# fb.p_value_alternative = p_value_dist
#
# elseif TNull == Distinguishable && TAlternative == Bosonic
# fb.p_value_null = p_value_dist
# fb.p_value_alternative = p_value_bosonic
# else
# error("not implemented")
# end
#
# (fb.p_value_null, fb.p_value_alternative)
end
"""
p_B(event::Event{TIn, TOut}) where {TIn<:InputType, TOut <: FockDetection}
Outputs the probability that a given `FockDetection` would have if the `InputType` was `Bosonic` for this event.
"""
function p_B(event::Event{TIn, TOut}) where {TIn<:InputType, TOut <: FockDetection}
interf = event.interferometer
r = event.input_state.r
input_state = Input{Bosonic}(r)
output_state = event.output_measurement
event_H = Event(input_state, output_state, interf)
compute_probability!(event_H)
event_H.proba_params.probability
end
"""
p_D(event::Event{TIn, TOut}) where {TIn<:InputType, TOut <: FockDetection}
Outputs the probability that a given `FockDetection` would have if the `InputType` was `Distinguishable` for this event.
"""
function p_D(event::Event{TIn, TOut}) where {TIn<:InputType, TOut <: FockDetection}
interf = event.interferometer
r = event.input_state.r
input_state = Input{Distinguishable}(r)
output_state = event.output_measurement
event_A = Event(input_state, output_state, interf)
compute_probability!(event_A)
event_A.proba_params.probability
end
# """
# compute_probability!(b::BayesianPartition)
#
# Updates all probabilities associated with a `BayesianPartition` `Certifier`.
# """
# function compute_probability!(b::BayesianPartition)
#
# b.probabilities = compute_confidence_array(b.events, b.null_hypothesis.f, b.alternative_hypothesis.f)
# b.confidence = b.probabilities[end]
#
# end
"""
number_of_samples(evb::Event{TIn, TOut}, evd::Event{TIn, TOut}; p_null = 0.95, maxiter = 10000) where {TIn <:InputType, TOut <:PartitionCountsAll}
Outputs the number of samples required to attain a confidence that the null hypothesis (underlied by the parameters sent in `evb`) is true compared the alternative (underlied by `evd`) through a bayesian partition sample.
Note that this gives a specific sample - this function should be averaged over many trials to obtain a reliable estimate.
"""
function number_of_samples(evb::Event{TIn1, TOut}, evd::Event{TIn2, TOut}; p_null = 0.95, maxiter = 10000) where {TIn1 <:InputType,TIn2 <:InputType, TOut <:PartitionCountsAll}
# need to sample from one and then do the treatment as usual
# here we sample from pb
# compute the probabilities if they are not already known
for ev_theory in [evb,evd]
ev_theory.proba_params.probability == nothing ? compute_probability!(ev_theory) : nothing
end
pb = evb.proba_params.probability
ib = evb.input_state
interf = evb.interferometer
p_partition_B(ev) = p_partition(ev, evb)
p_partition_D(ev) = p_partition(ev, evd)
p_q = HypothesisFunction(p_partition_B)
p_a = HypothesisFunction(p_partition_D)
χ = 1
for n_samples in 1:maxiter
ev = Event(ib,PartitionCount(wsample(pb.counts, pb.proba)), interf)
χ = update_confidence(ev, p_q.f, p_a.f, χ)
if confidence(χ) >= p_null
return n_samples
break
end
end
@warn "number of iterations reached, confidence(χ) = $(confidence(χ))"
return nothing
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 2129 | using Revise
using BosonSampling:sample!
using BosonSampling
using Plots
using ProgressMeter
using Distributions
using Random
using Test
using ArgCheck
using StatsBase
using ColorSchemes
using Interpolations
using Dierckx
using LinearAlgebra
using PrettyTables
using LaTeXStrings
using JLD
using AutoHashEquals
using HypothesisTests
### tests with standard boson sampling ###
# we will test that we are indeed in the bosonic case
# compared to the distinguishable one
# first we generate a series of bosonic events
n_events = 200
n = 3
m = 8
interf = RandHaar(m)
TIn = Bosonic
input_state = Input{TIn}(first_modes(n,m))
events = []
for i in 1:n_events
# note that we don't compute the event probability
# as we would just have experimental observations
# of counts
ev = Event(input_state, FockSample(), interf)
sample!(ev)
ev = convert(Event{TIn, FockDetection}, ev)
push!(events, ev)
end
# now we have the vector of observed events with probabilities
events
events[1]
# next, from events, recover the probabilities under both
# hypothesis for instance
p_B(events[1])
p_D(events[1])
# hypothesis : the events were from a bosonic distribution
# for that we use the Bayesian type
p_q = HypothesisFunction(p_B)
p_a = HypothesisFunction(p_D)
certif = Bayesian(events, p_q, p_a)
BosonSampling.certify!(certif)
certif.confidence
scatter(certif.probabilities)
###### Bayesian tests for partitions ######
# need to provide the interferometer as well as some data to compute the probabilities
# this can be extracted from experimental if given it but here we have to generate it
# generate what would be experimental data
m = 14
n = 5
events = generate_experimental_data(n_events = 1000, n = n,m = m, interf = RandHaar(m), TIn = Bosonic)
n_subsets = 3
part = equilibrated_partition(m,n_subsets)
certif = BayesianPartition(events, Bosonic(), Distinguishable(), part)
certify!(certif)
plot(certif.probabilities)
part
# full bunching
subset_size = m-n
subset = Subset(first_modes(subset_size, m))
fb = FullBunching(events, Bosonic(), Distinguishable(), subset_size)
certify!(fb)
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 2738 | # n = 8
# m = 8
# interf = RandHaar(m)
# TIn = Bosonic
# input_state = Input{TIn}(first_modes(n,m))
function C(i,j, interf::Interferometer, input_state::Input{T}) where {T <: Union{Bosonic, Distinguishable}}
U = interf.U
q = input_state.r.state
coeff_d(i,j) = - sum([U[q[k], i] * U[q[k], j] * conj(U[q[k], i]) * conj(U[q[k], j]) for k in 1:n])
correction_coeff_b(i,j) = sum([k !=l ? (U[q[k], i] * U[q[l], j] * conj(U[q[l], i]) * conj(U[q[k], j])) : 0 for k in 1:n for l in 1:n])
TIn = get_parametric_type(input_state)[1]
if TIn == Bosonic
return coeff_d(i,j) + correction_coeff_b(i,j)
elseif TIn == Distinguishable
return coeff_d(i,j)
else
error("not implemented")
end
end
function C_dataset(interf::Interferometer, input_state::Input{T}) where {T <: Union{Bosonic, Distinguishable}}
U = interf.U
m = interf.m
[C(i,j, interf, input_state) for i in 1:m for j in 1:m]
end
function correlators_nm_cv_s(interf::Interferometer, input_state::Input{T}) where {T <: Union{Bosonic, Distinguishable}}
C_data = C_dataset(interf, input_state)
moments = [mean(C_data.^k) for k in 1:3]
m = interf.m
n = input_state.n
nm = moments[1] * m^2 /n
cv = sqrt( moments[2] - moments[1]^2 ) / moments[1]
s = ( moments[3] - 3* moments[1] * moments[2] -2 * moments[1]^3 )/(moments[2] - moments[1]^2 )^(3/2)
@warn "there seems to be a sign mistake!"
(nm,cv,s)
end
# n = 3
# m = 8
# interf = RandHaar(m)
# TIn = Bosonic
#
# events = generate_experimental_data(n_events = 100, n = n,m = m, interf = RandHaar(m), TIn = Bosonic)
function C(i,j, events::Vector{Event})
count_in(i, ev::Event) = ev.output_measurement.s.state[i]
counts_i = [count_in(i, ev) for ev in events]
counts_j = [count_in(j, ev) for ev in events]
mean(counts_i .* counts_j) - mean(counts_i) * mean(counts_j)
end
function C_dataset(events::Vector{Event})
ev = events[1]
interf = ev.interferometer
m = interf.m
[C(i,j, events) for i in 1:m for j in 1:m]
end
function correlators_nm_cv_s(events::Vector{Event})
C_data = C_dataset(events)
moments = [mean(C_data.^k) for k in 1:3]
ev = events[1]
interf = ev.interferometer
m = interf.m
n = ev.input_state.n
nm = moments[1] * m^2 /n
cv = sqrt( moments[2] - moments[1]^2 ) / moments[1]
s = ( moments[3] - 3* moments[1] * moments[2] -2 * moments[1]^3 )/(moments[2] - moments[1]^2 )^(3/2)
@warn "there seems to be a sign mistake!"
@warn "there seems to be a mistake as nm is zero!"
(nm,cv,s)
end
# correlators_nm_cv_s(events)
#
# C_data = C_dataset(events)
#
# mean(C_data)
#
# moments = [mean(C_data.^k) for k in 1:3]
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 2103 | using Revise
using BosonSampling:sample!
using BosonSampling
using Plots
using ProgressMeter
using Distributions
using Random
using Test
using ArgCheck
using StatsBase
using ColorSchemes
using Interpolations
using Dierckx
using LinearAlgebra
using PrettyTables
using LaTeXStrings
using JLD
using AutoHashEquals
using HypothesisTests
### tests with standard boson sampling ###
# we will test that we are indeed in the bosonic case
# compared to the distinguishable one
# first we generate a series of bosonic events
n = 8
m = 8
interf = RandHaar(m)
TIn = Bosonic
events = generate_experimental_data(n_events = 100, n = n,m = m, interf = RandHaar(m), TIn = Bosonic)
# next, from events, recover the probabilities under both
# hypothesis for instance
p_B(events[1])
p_D(events[1])
# hypothesis : the events were from a bosonic distribution
# for that we use the Bayesian type
p_q = HypothesisFunction(p_B)
p_a = HypothesisFunction(p_D)
certif = Bayesian(events, p_q, p_a)
BosonSampling.certify!(certif)
certif.confidence
scatter(certif.probabilities)
###### Bayesian tests for partitions ######
# need to provide the interferometer as well as some data to compute the probabilities
# this can be extracted from experimental if given it but here we have to generate it
# generate what would be experimental data
m = 14
n = 5
events = generate_experimental_data(n_events = 10, n = n,m = m, interf = RandHaar(m), TIn = Bosonic)
n_subsets = 3
part = equilibrated_partition(m,n_subsets)
certif = BayesianPartition(events, Bosonic(), Distinguishable(), part)
certify!(certif)
plot(certif.probabilities)
part
### full bunching###
m = 20
n = 5
events = generate_experimental_data(n_events = 1000, n = n,m = m, interf = RandHaar(m), TIn = Bosonic)
subset_size = m-n
subset = Subset(first_modes(subset_size, m))
fb = FullBunching(events, Bosonic(), Distinguishable(), subset_size)
certify!(fb)
### correlators ###
m = 20
n = 5
interf = RandHaar(m)
events = generate_experimental_data(n_events = 1000, n = n,m = m, interf = interf, TIn = Bosonic)
correlators_nm_cv_s(interf, input_state)
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 968 | function generate_experimental_data(;n_events, n, m, interf, TIn, x = nothing, input_mode_occupation = nothing)
if input_mode_occupation == nothing
input_mode_occupation = first_modes(n,m)
end
if TIn == OneParameterInterpolation
@argcheck x != nothing "need to set partial distinguishability"
input_state = Input{TIn}(input_mode_occupation, x)
elseif TIn == Bosonic || TIn == Distinguishable
input_state = Input{TIn}(input_mode_occupation)
end
events = []
for i in 1:n_events
# note that we don't compute the event probability
# as we would just have experimental observations
# of counts
ev = Event(input_state, FockSample(), interf)
BosonSampling.sample!(ev)
ev = convert(Event{TIn, FockDetection}, ev)
push!(events, ev)
end
if !isa(events, Vector{Event})
events = convert(Vector{Event}, events)
end
events
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 1854 | ###### beam splitter and special counter example matrices ######
function beam_splitter(transmission_amplitude = sqrt(0.5))
"""2d beam_splitter matrix, follows the conventions of Leonardo"""
# |t|^2 is the "transmission probability"
t = transmission_amplitude
r = sqrt(1-t^2)
bs = [[t -r]; [r t]]#[[t r]; [-r t]]
end
function beam_splitter_modes(;in_up, in_down, out_up, out_down, transmission_amplitude, n)
"""beam splitter incorrporated for connecting specific input modes, output modes in a dimension n interferometer"""
if in_up == in_down || out_up == out_down
throw(ArgumentError())
end
bs = Matrix{ComplexF64}(I, n, n)
sub_bs = beam_splitter(transmission_amplitude)
bs[in_up, out_up] = sub_bs[1,1]
bs[in_down, out_down] = sub_bs[2,2]
bs[in_down, out_up] = sub_bs[2,1]
bs[in_up, out_down] = sub_bs[1,2]
#
# # before convention checks:
# bs[in_down, out_up] = sub_bs[1,2] ####### this change is a bit ad hoc and needs to be checked carefully with the conventions
# bs[in_up, out_down] = sub_bs[2,1]
#
bs
end
function rotation_matrix(angle::Float64)
[cos(angle) -sin(angle);
sin(angle) cos(angle)]
end
function rotation_matrix_modes(;in_up, in_dow, out_up, out_down, angle, n)
"""beam splitter incorrporated for connecting specific input modes, output modes in a dimension n interferometer"""
if in_up == in_down || out_up == out_down
throw(ArgumentError())
end
r = Matrix{ComplexF64}(I, n, n)
sub_r = rotation_matrix(angle)
r[in_up, out_up] = sub_r[1,1]
r[in_down, out_down] = sub_r[2,2]
# bs[in_down, out_up] = sub_bs[2,1]
# bs[in_up, out_down] = sub_bs[1,2]
r[in_down, out_up] = sub_r[1,2] ####### this change is a bit ad hoc and needs to be checked carefully with the conventions
r[in_up, out_down] = sub_r[2,1]
r
end
phase_shift(phase) = [[1 0]; [0 exp(phase*im)]]
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 4423 | """
noisy_distribution(;input::Input, loss::Real, interf::Interferometer, exact=true, approx=true, samp=true, error=1e-4, failure_probability=1e-4)
Compute the exact and/or approximated and/or sampled probability distribution of
all possible output configurations of partially-distinguishable photons through a
lossy interferometer.
By default, `exact`, `approx` and `samp` are set to `true` meaning that `noisy_distribution`
returns an array containing the three distributions.
!!! note
- The probabilities within a distribution are indexed following the same order as [`output_mode_occupation(n,m)`](@ref)
- The approximated distribution has error and failure probability of ``1e^{-4}``.
!!! note "Reference"
[https://arxiv.org/pdf/1809.01953.pdf](https://arxiv.org/pdf/1809.01953.pdf)
"""
function noisy_distribution(;input::Input, loss::Real, interf::Interferometer, exact=true, approx=true, samp=true, error=1e-4, failure_probability=1e-4)
output = []
ϵ = error
δ = failure_probability
input_modes = input.r.state
number_photons = input.n
number_modes = input.m
U = interf.U
if get_parametric_type(input)[1] == OneParameterInterpolation
distinguishability = input.distinguishability_param
else
get_parametric_type(input)[1] == Bosonic ? distinguishability = 1.0 : distinguishability = 0.0
end
number_output_photons = trunc(Int, number_photons*loss)
input_occupancy_modes = fill_arrangement(input_modes)
if loss == 1 ||distinguishability == 0
throw(ArgumentError("invalid input parameters"))
end
ki = (log(ϵ*δ*(1-loss*distinguishability^2)/2))/log(loss*distinguishability^2)
ki = 10^(trunc(Int, log(10, ki)))
ki < 10 ? k = number_photons : k = min(ki, number_photons)
kmax = k-1
combs = collect(combinations(input_occupancy_modes, k))
nlist = output_mode_occupation(k, number_modes)
function compute_pi(i::Integer, ans)
res = 0
for j = 1:length(combs)
perm = collect(permutations(combs[j]))
for l = 1:length(perm)
count = sum(collect(Int(perm[l][ll] == combs[j][ll]) for ll = 1:length(perm[l])))
korder = k - count
if ans == true
pterm = distinguishability^korder * ryser(U[combs[j], nlist[i]] .* conj(U[perm[l], nlist[i]]))
pterm = real(pterm / (binomial(number_photons, k) * factorial(k)))
res += pterm
elseif ans == false && korder <= kmax
pterm = distinguishability^korder * ryser(U[combs[j], nlist[i]] .* conj(U[perm[l], nlist[i]]))
pterm = real(pterm / (binomial(number_photons, k) * factorial(k)))
res += pterm
end
end
end
return res
end
compute_full_distribution(ans) = map(i->compute_pi(i,ans), 1:length(nlist))
function sampling(n_samples = 1e5)
b = nlist[1]
comb = input_occupancy_modes[1:k]
proba = compute_pi(1, true)
current_pos = 1
lsample = []
bprob = zeros(length(nlist))
for i = 1:n_samples
pos_test = rand(1:length(nlist))
b_test = nlist[pos_test]
comb_test = combs[rand(1:length(combs))]
perm = collect(permutations(comb_test))
prob_test = 0
for j = 1:length(perm)
count = sum(collect(Int(k - sum(perm[j]) == comb_test)))
if count <= kmax
prob_test += ryser(U[comb_test, b_test] .* conj(U[perm[j], b_test]))
end
end
prob_test = real(prob_test)
if prob_test > proba
b = b_test
proba = prob_test
comb = comb_test
current_pos = pos_test
elseif prob_test / proba > rand()
b = b_test
proba = prob_test
comb = comb_test
current_pos = pos_test
end
bprob[current_pos] += 1 / n_samples
push!(lsample) = current_pos
end
return bprob
end
exact ? push!(output, compute_full_distribution(true)) : nothing
approx ? push!(output, compute_full_distribution(false)) : nothing
samp ? push!(output, sampling()) : nothing
return output
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 2030 | """
theoretical_distribution(;input::Input, interf::Interferometer, i=nothing)
Compute the probability distribution of all possible output configurations of
fully/partially-indistinguishable photons through a lossless interferometer.
!!! note
- The probabilities within the distribution are indexed following the same order as `output_mode_occupation(n,m)`
- If `i` (with default value `nothing`) is set to an integer
`theoretical_distribution` returns the probability to find the photons in
the i'th configuration given by `output_mode_occupation`
"""
function theoretical_distribution(;input::Input, interf::Interferometer, i=nothing)
input_modes = input.r.state
number_photons = input.n
number_modes = input.m
if get_parametric_type(input)[1] == OneParameterInterpolation
distinguishability = input.distinguishability_param
else
get_parametric_type(input)[1] == Bosonic ? distinguishability = 1.0 : distinguishability = 0.0
end
U = interf.U
S = input.G.S
input_event = fill_arrangement(input_modes)
output_events = output_mode_occupation(number_photons, number_modes)
function compute_pi(event)
M = U[input_event, event]
output_modes = fill_arrangement(event)
if distinguishability == 1
return abs(ryser(M)).^2
else
W = Array{eltype(S)}(undef, (number_photons, number_photons, number_photons))
for rr in 1:number_photons
for ss in 1:number_photons
for j in 1:number_photons
W[rr,ss,j] = real(M[ss,j] * conj(M[rr,j]) * S[rr,ss])
end
end
end
return ryser_tensor(W)
end
end
complete_distribution() = map(e->compute_pi(e)/factorial(number_photons), output_events)
if i == nothing
return complete_distribution()
else
return compute_pi(output_events[i])
end
end
(1,2,4)
(2,1,4)
ModeOccupation([1,2,4])
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 2492 | """
convert_csv_to_samples(path_to_file::String, m::Int, input_type = ThresholdModeOccupation, input_format = (input_data) -> ModeList(input_data, m), samples_type = MultipleCounts)
Reads a CSV file containing experimental data. Converts it into a collection of samples of type `input_type` following the type of detectors available. `input_format` holds how the data is encoded, for instance as a `ModeList` or `ModeOccupation`.
You can choose the encoding of the samples to be either a Vector of observed events (like detector readings) or a `MultipleCounts` if there are many events.
If using `ModeOccupation`, defaults work for files written as follow: each line is of form
# 45, 1,1,0
# for finding 45 occurences of the mode occupation 1,1,0
and you should use
input_format = (input_data) -> ModeOccupation(input_data)
If using `ModeList`, defaults work for files written as follow: each line is of form
# 49, 1, 4, 5, 6, 7, 8, 10
# for finding 49 occurences of the mode list (1, 4, 5, 6, 7, 8, 10)
and you should use
input_format = (input_data) -> ModeList(input_data, m)
For this line, the function will output 49 identical samples, or a `MultipleCounts`. This is not the most efficient use of memory but allows for an easier conversion with existing functions.
Example usage:
convert_csv_to_samples("data/loop_examples/test.csv", 10)
"""
function convert_csv_to_samples(path_to_file::String, m::Int, input_type = ThresholdModeOccupation, input_format = (input_data) -> ModeOccupation(input_data), samples_type = MultipleCounts)
data = readdlm(path_to_file, ',', Int)
samples = Vector{input_type}()
if samples_type == Vector
for (output_number, output_pattern) in enumerate(eachrow(data[:,2:end]))
for count in 1:data[output_number,1]
push!(samples, input_type(input_format(convert(Array{Int},output_pattern))))
end
end
return samples
elseif samples_type == MultipleCounts
counts = Vector{Real}()
for (output_number, output_pattern) in enumerate(eachrow(data[:,2:end]))
push!(samples, input_type(input_format(convert(Array{Int},output_pattern))))
push!(counts, data[output_number,1])
end
return MultipleCounts(samples, counts ./ sum(counts)) # normalize to make it a probability as is eaten by MultipleCounts
else
error("not implemented")
end
return nothing
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 1569 | # convert CSV data into a vector of events
# conventions:
# the CSV file needs to have the same number of columns - make different files if using a different m
using DelimitedFiles
using Dates
### working with csv ###
# mode occupation
samples = convert_csv_to_samples("data/loop_examples/test_mode_occupation.csv", 4)
# mode list:
samples = convert_csv_to_samples("data/loop_examples/test_mode_list.csv", 10, (input_data) -> ModeList(input_data, m))
### run info ###
n = 4
sparsity = 2
m = sparsity * n
# x = 0.9
d = Uniform(0,2pi)
ϕ = nothing # rand(d,m)
η_loss_lines = nothing # 0.9 * ones(m)
η_loss_bs = nothing # 1. * ones(m-1)
η = 0.5 * ones(m-1)
params = LoopSamplingParameters(n=n, m=m, η = η, η_loss_bs = η_loss_bs, η_loss_lines = η_loss_lines, ϕ = ϕ)
### generating fake samples ###
# this is an example with fake samples, you will need to convert yours in the right format using a ModeList
# samples = Vector{ThresholdModeOccupation}()
# n_samples = 10
#
# for i in 1:n_samples
#
# push!(samples, ThresholdModeOccupation(random_mode_list_collisionless(n,m)))
#
# end
#
# samples
### extra info on the run ###
extra_info = "this experiment was realised on... we faced various problems..."
### compiling everything in a single type structure ###
this_experiment = OneLoopData(params = params, samples = samples, extra_info = extra_info, name = "example")
### saving as a Julia format ###
save(this_experiment)
d = load("data/one_loop/example.jld")
loaded_experiment = d["data"]
loaded_experiment.samples
loaded_experiment.extra_info
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 3531 | include("packages_loop.jl")
color_map = ColorSchemes.rainbow1
partition_correlator(i,j,m) = begin
subsets = [Subset(ModeList(i,m)), Subset(ModeList(j,m))]
#push!(subsets, Subset(last_modes(m,2m))) # loss subset
Partition(subsets)
end
partition_mean(i,m) = begin
subsets = [Subset(ModeList(i,m))]
#push!(subsets, Subset(last_modes(m,2m))) # loss subset
Partition(subsets)
end
n = 6
sparsity = 3
m = sparsity * n
# x = 0.9
d = Uniform(0,2pi)
ϕ = nothing # rand(d,m)
η_loss_lines = nothing # 0.9 * ones(m)
η_loss_bs = nothing # 1. * ones(m-1)
η = 0.5 * ones(m-1)
params = LoopSamplingParameters(n=n, m=m, η = η, η_loss_bs = η_loss_bs, η_loss_lines = η_loss_lines, ϕ = ϕ)
psp = convert(PartitionSamplingParameters, params)
psp.mode_occ = equilibrated_input(sparsity, m)
"""
correlator(i,j, psp::PartitionSamplingParameters)
Computes the correlation <n_i n_j> - <n_i><n_j> for a given `PartitionSamplingParameters`. Info about distinguishability etc is contained in these parameters.
"""
function correlator(i,j, psp::PartitionSamplingParameters)
# @argcheck length(mc.counts[1].counts.state) == 2 "only implemented two correlators"
# psp = convert(PartitionSamplingParameters, params)
### compute cross term ###
psp.part = partition_correlator(i,j,m)
set_parameters!(psp)
compute_probability!(psp)
mc = psp.ev.proba_params.probability
cross_term = sum([mc.proba[k] * prod(mc.counts[k].counts.state) for k in 1:length(mc.counts)])
### compute average i ###
psp.part = partition_mean(i,m)
set_parameters!(psp)
compute_probability!(psp)
mc = psp.ev.proba_params.probability
avg_i = sum([mc.proba[k] * mc.counts[k].counts.state[1] for k in 1:length(mc.counts)])
### compute average i ###
psp.part = partition_mean(j,m)
set_parameters!(psp)
compute_probability!(psp)
mc = psp.ev.proba_params.probability
avg_j = sum([mc.proba[k] * mc.counts[k].counts.state[1] for k in 1:length(mc.counts)])
cross_term - avg_i * avg_j
end
min_i = 1
max_i = 8
begin
plt = plot()
for i in min_i:max_i
@show i
col_frac = (i-min_i) / (max_i - min_i - 1)
plot!([abs(correlator(i,j,psp)) for j in i+1:m-1], label = "i = $i", c = get(color_map, col_frac), yaxis = :log10)
end
xlabel!("offset r")
ylabel!("c(i,i+r)")
plt
end
"""
correlation_length(i, psp::PartitionSamplingParameters)
Gives the correlation length as described in IV.D. in https://arxiv.org/abs/1712.09869.
"""
function correlation_length(psp::PartitionSamplingParameters)
i_1 = 1 # use the first input for the correlator first index, then look at the slope given by varying the index j
x_data = [j for j in i_1+1:m-1]
y_data = log.([abs(correlator(i_1,j,psp)) for j in i_1+1:m-1])
lr = linregress(x_data,y_data)
m_slope,c = lr.coeffs
-1/m_slope
end
"""
get_psp(θ)
With the global parameters currently in use (n,m,...) defines a `PartitionSamplingParameters` with all beam splitters of transmissivity θ in order to compute the `correlation_length`.
"""
function get_psp(θ)
η = θ * ones(m-1)
params = LoopSamplingParameters(n=n, m=m, η = η, η_loss_bs = η_loss_bs, η_loss_lines = η_loss_lines, ϕ = ϕ)
psp = convert(PartitionSamplingParameters, params)
end
function correlation_length(θ)
correlation_length(get_psp(θ))
end
θ = 0.01:0.1:0.99
plot(θ, correlation_length.(θ), yaxis = :log10)
ylabel!("corr length")
xlabel!("transmissivity")
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 1523 | include("packages_loop.jl")
###### what do we want to do ######
# find the parameters of reflectivities that allow for the best TVD, or an interesting behaviour
# fix the thermalization example to have this interesting case
# be able to optimize on reflectivity parameters
# for this I would like to be able to encapsulate everything in PartitionSamplingParameters etc
###### development ######
params = LoopSamplingParameters(m=10)
"""
get_partition_sampling_parameters(params::LoopSamplingParameters, T2::Type{T} where {T<:InputType} = Distinguishable)
Unpacks the `params` to obtain a `PartitionSamplingParameters` compatible with as interferometer the circuit induced by `params`.
"""
function get_partition_sampling_parameters(params::LoopSamplingParameters)
@unpack n, m, input_type, i, η, η_loss_bs, η_loss_lines, d, ϕ, p_dark, p_no_count = params
PartitionSamplingParameters(n=n, m=m, T= get_parametric_type(params.i)[1], )
end
psp = convert(PartitionSamplingParameters, params)
compute_probability!(psp)
###### to threshold ######
mo = ModeOccupation([2,1,0])
ModeOccupation([(mode >= 1 ? 1 : 0) for mode in mo.state])
part = partition_thermalization_pnr(m)
[(length(subset)) for subset in part.subsets] == ones(length(part.subsets))
to_threshold(psp_b.ev.proba_params.probability.counts[10])
length(part.subsets[1])
# change a MultipleCounts to threshold
mc = psp_b.ev.proba_params.probability
typeof(mc)
mc.counts[1]
BosonSampling.to_threshold(mc::MultipleCounts)
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 6353 | include("packages_loop.jl")
# note: just go to the bottom for a minimum usage, the first two blocks are kept as intermediary building blocks to this abstract usage for debugging purposes
begin
### 2d HOM without loss but with ModeList example ###
n = 2
m = 2
i = Input{Bosonic}(first_modes(n,m))
o = FockDetection(ModeOccupation([1,1])) # detecting bunching, should be 0.5 in probability if there was no loss
transmission_amplitude_loss_array = 0:0.1:1
output_proba = []
circuit = LosslessCircuit(2)
interf = BeamSplitter(1/sqrt(2))
target_modes = ModeList([1,2], m)
add_element!(circuit, interf, target_modes)
ev = Event(i,o, circuit)
compute_probability!(ev)
### one d ex ##
n = 1
m = 1
function lossy_line_example(η_loss)
circuit = LossyCircuit(1)
interf = LossyLine(η_loss)
target_modes = ModeList([1],m)
add_element_lossy!(circuit, interf, target_modes)
circuit
end
lossy_line_example(0.9)
transmission_amplitude_loss_array = 0:0.1:1
output_proba = []
i = Input{Bosonic}(to_lossy(first_modes(n,m)))
o = FockDetection(to_lossy(first_modes(n,m)))
for transmission in transmission_amplitude_loss_array
ev = Event(i,o, lossy_line_example(transmission))
@show compute_probability!(ev)
push!(output_proba, ev.proba_params.probability)
end
print(output_proba)
plot(transmission_amplitude_loss_array, output_proba)
ylabel!("p no lost")
xlabel!("transmission amplitude")
### the same with autoconversion of the input and output dimensions ###
i = Input{Bosonic}(first_modes(n,m))
o = FockDetection(first_modes(n,m))
for transmission in transmission_amplitude_loss_array
ev = Event(i,o, lossy_line_example(transmission))
@show compute_probability!(ev)
push!(output_proba, ev.proba_params.probability)
end
### 2d HOM with loss example ###
n = 2
m = 2
i = Input{Bosonic}(first_modes(n,m))
o = FockDetection(ModeOccupation([2,0])) # detecting bunching, should be 0.5 in probability if there was no loss
transmission_amplitude_loss_array = 0:0.1:1
output_proba = []
function lossy_bs_example(η_loss)
circuit = LossyCircuit(2)
interf = LossyBeamSplitter(1/sqrt(2), η_loss)
target_modes = ModeList([1,2],m)
add_element_lossy!(circuit, interf, target_modes)
circuit
end
for transmission in transmission_amplitude_loss_array
ev = Event(i,o, lossy_bs_example(transmission))
compute_probability!(ev)
push!(output_proba, ev.proba_params.probability)
end
@test output_proba ≈ [0.0, 5.0000000000000016e-5, 0.0008000000000000003, 0.004049999999999998, 0.012800000000000004, 0.031249999999999993, 0.06479999999999997, 0.12004999999999996, 0.20480000000000007, 0.32805, 0.4999999999999999]
### building the loop ###
n = 3
m = n
i = Input{Bosonic}(first_modes(n,m))
η = 1/sqrt(2) .* ones(m-1)
# 1/sqrt(2) .* [1,0] #ones(m-1) # see selection of target_modes = [i, i+1] for m-1
# [1/sqrt(2), 1] #1/sqrt(2) .* ones(m-1) # see selection of target_modes = [i, i+1] for m-1
η_loss = 1. .* ones(m-1)
circuit = LosslessCircuit(m)
for mode in 1:m-1
interf = BeamSplitter(η[mode])#LossyBeamSplitter(η[mode], η_loss[mode])
#target_modes_in = ModeList([mode, mode+1], circuit.m_real)
#target_modes_out = ModeList([mode, mode+1], circuit.m_real)
target_modes_in = ModeList([mode, mode+1], m)
target_modes_out = target_modes_in
add_element!(circuit, interf, target_modes_in, target_modes_out)
end
############## lossy_target_modes needs to be changed, it need to take into account the size of the circuit rather than that of the target modes
#outputs compatible with two photons top mode
o1 = FockDetection(ModeOccupation([2,1,0]))
o2 = FockDetection(ModeOccupation([2,0,1]))
o_array = [o1,o2]
p_two_photon_first_mode = 0
for o in o_array
ev = Event(i,o, circuit)
@show compute_probability!(ev)
p_two_photon_first_mode += ev.proba_params.probability
end
p_two_photon_first_mode
o3 = FockDetection(ModeOccupation([3,0,0]))
ev = Event(i,o3, circuit)
@show compute_probability!(ev)
### loop with loss and types ###
begin
n = 3
m = n
i = Input{Bosonic}(first_modes(n,m))
η = 1/sqrt(2) .* ones(m-1)
η_loss_bs = 0.9 .* ones(m-1)
η_loss_lines = 0.9 .* ones(m)
d = Uniform(0, 2pi)
ϕ = rand(d, m)
end
circuit = LossyLoop(m, η, η_loss_bs, η_loss_lines, ϕ).circuit
o1 = FockDetection(ModeOccupation([2,1,0]))
o2 = FockDetection(ModeOccupation([2,0,1]))
o_array = [o1,o2]
p_two_photon_first_mode = 0
for o in o_array
ev = Event(i,o, circuit)
@show compute_probability!(ev)
p_two_photon_first_mode += ev.proba_params.probability
end
p_two_photon_first_mode
o3 = FockDetection(ModeOccupation([3,0,0]))
ev = Event(i,o3, circuit)
@show compute_probability!(ev)
Event(i,o3, circuit)
compute_probability!(ev)
end
begin
### sampling ###
begin
n = 3
m = n
i = Input{Bosonic}(first_modes(n,m))
η = 1/sqrt(2) .* ones(m-1)
η_loss_bs = 0.9 .* ones(m-1)
η_loss_lines = 0.9 .* ones(m)
d = Uniform(0, 2pi)
ϕ = rand(d, m)
end
circuit = LossyLoop(m, η, η_loss_bs, η_loss_lines, ϕ).circuit
p_dark = 0.01
p_no_count = 0.1
o = FockSample()
ev = Event(i,o, circuit)
BosonSampling.sample!(ev)
o = DarkCountFockSample(p_dark)
ev = Event(i,o, circuit)
BosonSampling.sample!(ev)
o = RealisticDetectorsFockSample(p_dark, p_no_count)
ev = Event(i,o, circuit)
BosonSampling.sample!(ev)
end
###### sample with a new circuit each time ######
# have a look at the documentation for the parameters and functions
get_sample_loop(LoopSamplingParameters(n = 10, input_type = Distinguishable))
build_loop(LoopSamplingParameters(n = 10, input_type = Distinguishable))
### partitions ###
params = PartitionSamplingParameters(n = 10, m = 10)
compute_probability!(params)
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 3085 | # an interesting experiment to run
# use η_thermalization for the reflectivities
# we look at the number of photons in the last bin
# this would require pseudo-photon number resolution, but as you see, not up to
# many photons
# if we can recover the plot, it is one of the ways to show a difference between
# bosonic (giving a thermal distribution) versus distinguishable
"""
η_thermalization(n)
Defines the transmissivities required for the thermalization scheme.
"""
η_thermalization(n) = [1-((i-1)/i)^2 for i in 2:n]
"""
partition_thermalization(m)
Defines the last mode, single mode subset for thermalization. This corresponds to the first mode of the interferometer with spatial bins (to be checked).
"""
partition_thermalization(m) = begin
s1 = Subset(ModeList(m,m))
s2 = Subset(first_modes(m-1,m))
Partition([s1,s2])
end
"""
partition_thermalization_loss(m)
Defines the last mode, single mode subset for thermalization. This corresponds to the first mode of the interferometer with spatial bins (to be checked). Loss modes included in the second subset
"""
partition_thermalization_loss(m) = begin
s1 = Subset(ModeList(m,2m))
s2 = Subset(first_modes(m-1,2m)+last_modes(m,2m))
Partition([s1,s2])
end
"""
partition_thermalization_pnr(m)
Defines the modes corresponding to the pseudo number resolution as subsets for thermalization. This corresponds to the first mode of the interferometer with spatial bins. Loss modes included in the last subset.
"""
partition_thermalization_pnr(m) = begin
subsets = [Subset(ModeList(i,2m)) for i in n:m]
#push!(subsets, Subset(last_modes(m,2m))) # loss subset
Partition(subsets)
end
"""
η_pnr(steps)
Gives chosen refectivities for pseudo photon number resolution at the end of the loop with `steps`.
"""
η_pnr(steps) = [i/(steps + 1) for i in 1:steps]
"""
tvd_reflectivities(η)
Computes the total variation distance for B,D inputs for a one loop setup with reflectivities η.
"""
function tvd_reflectivities(η)
if all(between_one_and_zero.(η))
params = LoopSamplingParameters(n=n, m=m, η = η, η_loss_bs = η_loss_bs, η_loss_lines = η_loss_lines, ϕ = ϕ)
psp_b = convert(PartitionSamplingParameters, params)
psp_d = convert(PartitionSamplingParameters, params) # a way to act as copy
psp_b.mode_occ = equilibrated_input(sparsity, m)
psp_d.mode_occ = equilibrated_input(sparsity, m)
part = equilibrated_partition(m, n_subsets)
psp_b.part = part
psp_d.part = part
psp_b.T = OneParameterInterpolation
psp_b.x = x
set_parameters!(psp_b)
psp_d.T = Distinguishable
set_parameters!(psp_d)
compute_probability!(psp_b)
compute_probability!(psp_d)
pdf_bos = psp_b.ev.proba_params.probability.proba
pdf_dist = psp_d.ev.proba_params.probability.proba
@show tvd(pdf_bos,pdf_dist)
#display(plot(η))
return -tvd(pdf_bos,pdf_dist)
else
println("invalid reflectivity")
return 100
end
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 2943 | include("packages_loop.jl")
###### fixed random unitary ######
# fix a random unitary with potential noise (as of now just rand phases)
# find the partition data and tvd
# repeat over niter
# reflectivities chosen at random
"""
loop_partition_tvd(params::LoopSamplingParameters, n_subsets::Int = 2)
Outputs the TVD between the partition probabilities for `Bosonic`, `Distinguishable` over the Interferometer defined by `params` (builds a new loop at each call so if they contain randomness this is taken into account however if the randomness is called at the definition of params you need to rerun it before feeding it into the function).
"""
function loop_partition_tvd(params::LoopSamplingParameters, n_subsets::Int = 2)
ib = Input{Bosonic}(first_modes(n,m))
id = Input{Distinguishable}(first_modes(n,m))
interf = build_loop(params)
part = equilibrated_partition(m,n_subsets)
o = PartitionCountsAll(part)
evb = Event(ib,o,interf)
evd = Event(id,o,interf)
pb = compute_probability!(evb)
pd = compute_probability!(evd)
pdf_dist = pd.proba
pdf_bos = pb.proba
tvd(pdf_bos,pdf_dist)
end
begin
n = 10
m = n
niter = 1000
n_subsets = 2
tvd_array = zeros(niter)
for i in 1:niter
params = LoopSamplingParameters(n=n, η_loss_bs = nothing, η_loss_lines = nothing)
tvd_array[i] = loop_partition_tvd(params, n_subsets)
end
mean(tvd_array), sqrt.(var(tvd_array))
end
# end
params = LoopSamplingParameters(n = n, input_type = Distinguishable, η_loss_lines = 0.3 * ones(m))
params.η_loss_lines
build_loop(params).U
pretty_table(build_loop(params).U)
###############s need to make the both compatible
PartitionCountsAll<:OutputMeasurementType
@with_kw mutable struct PartitionLoopSamplingParameters
params::LoopSamplingParameters
@unpack n, m, input_type, i, η, η_loss_bs, η_loss_lines, d, ϕ, p_dark, p_no_count = params
interf::Interferometer = build_loop(params)
@warn "need to update this in lossy case"
part::Partition = equilibrated_partition(m, n_subsets)
end
### no phase ###
begin
n = 10
m = n
params = LoopSamplingParameters(n=n, η = η_thermalization(n), η_loss_bs = nothing, η_loss_lines = nothing) #, ϕ = nothing
@show params
@show η_thermalization(n)
@show partition_thermalization(m)
ib = Input{Bosonic}(first_modes(n,m))
id = Input{Distinguishable}(first_modes(n,m))
interf = build_loop(params)
pretty_table(interf.U)
part = partition_thermalization(m)
o = PartitionCountsAll(part)
evb = Event(ib,o,interf)
evd = Event(id,o,interf)
pb = compute_probability!(evb)
pd = compute_probability!(evd)
@show pdf_dist
pdf_dist = pd.proba
pdf_bos = pb.proba
n_in = [i for i in 0:n]
plot(n_in, pdf_bos, label = "B", xticks = n_in)
plot!(n_in, pdf_dist, label = "D")
ylims!((0,1))
end
typeof(pb)
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 1261 | include("packages_loop.jl")
n = 2
sparsity = 3
m = sparsity * n
n_subsets = 3
n_subsets > 3 && m > 12 ? (@warn "may be slow") : nothing
x = 0.9
equilibrated_input(sparsity, m)
# photons are homogeneously distributed
d = Uniform(0,2pi)
ϕ = nothing # rand(d,m)
η_loss_lines = 1. * ones(m)
η_loss_bs = 1. * ones(m-1)
# η_0 = rand(m-1)
η_0 = 1/sqrt(2) * ones(m-1)
#η_0 = η_thermalization(m)
lower = zeros(length(η_0))
upper = ones(length(η_0))
#optimize(tvd_reflectivities, lower, upper, η_0)
sol = optimize(tvd_reflectivities, η_0, Optim.Options(time_limit = 60.0))
@show sol.minimizer
begin
plot(sol.minimizer, label = "optim : $(-tvd_reflectivities(sol.minimizer))")
plot!(η_thermalization(m), label = "thermalization : $(-tvd_reflectivities(η_thermalization(m)))")
plot!(η_0, label = "initial : $(-tvd_reflectivities(η_0))")
ylims!((0,1.3))
ylabel!("η_i")
xlabel!("loop pass")
end
### identical ###
tvd_one_reflectivity(η) = tvd_reflectivities(η[1] * ones(m-1))
η_0 = [0.5]
sol = optimize(tvd_one_reflectivity, η_0, Optim.Options(time_limit = 15.0))
@show sol.minimizer
### what happens with the thermalization pattern ###
(-tvd_reflectivities(η_thermalization(m)))
part = equilibrated_partition(m, n_subsets)
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 470 | begin
using Revise
using BosonSampling
using Plots
using ProgressMeter
using Distributions
using Random
using Test
using ArgCheck
using StatsBase
using ColorSchemes
using Interpolations
using Dierckx
using LinearAlgebra
using PrettyTables
using LaTeXStrings
using JLD
using AutoHashEquals
using LinearRegression
using DataStructures
using Parameters
using UnPack
using Optim
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 4495 | include("packages_loop.jl")
# an interesting experiment to run
# use η_thermalization for the reflectivities
# we look at the number of photons in the last bin
# this would require pseudo-photon number resolution, but as you see, not up to
# many photons
# if we can recover the plot, it is one of the ways to show a difference between
# bosonic (giving a thermal distribution) versus distinguishable
### no phase ###
n = 10
m = n
d = Uniform(0,2pi)
ϕ = nothing # rand(d,m)
params = LoopSamplingParameters(n=n, η = η_thermalization(n), η_loss_bs = nothing, η_loss_lines = nothing, ϕ = ϕ)
psp_b = convert(PartitionSamplingParameters, params)
psp_d = convert(PartitionSamplingParameters, params) # a way to act as copy
psp_b.part = partition_thermalization(m)
psp_d.part = partition_thermalization(m)
set_parameters!(psp_b)
psp_d.T = Distinguishable
set_parameters!(psp_d)
compute_probability!(psp_b)
compute_probability!(psp_d)
########## need to copy psp, it doesn't make the difference
pdf_bos = psp_b.ev.proba_params.probability.proba
pdf_dist = psp_d.ev.proba_params.probability.proba
n_in = [i for i in 0:n]
plot(n_in, pdf_bos, label = "B", xticks = n_in)
plot!(n_in, pdf_dist, label = "D")
ylims!((0,1))
###### theory probabilities ######
pdf_dist_theory(k) = binomial(n,k)/n^k * (1-1/m)^(n-k)
pdf_bos_theory(k) = sum((-1)^(k+a) * binomial(a,k) * binomial(n,a) * factorial(a) / m^(a) for a in k:n)
plot!(n_in, pdf_dist_theory.(n_in), label = "D theory")
plot!(n_in, pdf_bos_theory.(n_in), label = "B theory")
xlabel!("number of photons in last bin")
ylabel!("probability")
### with loss and partial distinguishability ###
n = 10
m = n
d = Uniform(0,2pi)
ϕ = nothing # rand(d,m)
η_loss_lines = 0.9 * ones(m)
η_loss_bs = 1. * ones(m-1)
params = LoopSamplingParameters(n=n, η = η_thermalization(n), η_loss_bs = η_loss_bs, η_loss_lines = η_loss_lines, ϕ = ϕ)
psp_b = convert(PartitionSamplingParameters, params)
psp_d = convert(PartitionSamplingParameters, params) # a way to act as copy
psp_b.part = partition_thermalization_loss(m)
psp_d.part = partition_thermalization_loss(m)
# psp_b.part = partition_thermalization(m)
# psp_d.part = partition_thermalization(m)
#
psp_b.T = OneParameterInterpolation
psp_b.x = 0.8
set_parameters!(psp_b)
psp_d.T = Distinguishable
set_parameters!(psp_d)
compute_probability!(psp_b)
compute_probability!(psp_d)
pdf_bos = psp_b.ev.proba_params.probability.proba
pdf_dist = psp_d.ev.proba_params.probability.proba
n_in = [i for i in 0:n]
plot(n_in, pdf_bos, label = "partial dist", xticks = n_in)
plot!(n_in, pdf_dist, label = "D")
ylims!((0,1))
###### theory probabilities ######
pdf_dist_theory(k) = binomial(n,k)/n^k * (1-1/m)^(n-k)
pdf_bos_theory(k) = sum((-1)^(k+a) * binomial(a,k) * binomial(n,a) * factorial(a) / m^(a) for a in k:n)
plot!(n_in, pdf_dist_theory.(n_in), label = "D theory")
plot!(n_in, pdf_bos_theory.(n_in), label = "B theory")
xlabel!("number of photons in last bin")
ylabel!("probability")
###### with pseudo photon number resolution ######
n = 10
steps_pnr = 2 # number of bins for pseudo pnr
steps_pnr > 2 && n > 6 ? (@warn "may be slow") : nothing
m = n + steps_pnr
x = 0.8
d = Uniform(0,2pi)
ϕ = nothing # rand(d,m)
η_loss_lines = 0.9 * ones(m)
η_loss_bs = 1. * ones(m-1)
η = vcat(η_thermalization(n), η_pnr(steps_pnr))
### LORENZO these are the transmissivities
@show η
params = LoopSamplingParameters(n=n, m=m,η = η, η_loss_bs = η_loss_bs, η_loss_lines = η_loss_lines, ϕ = ϕ)
psp_b = convert(PartitionSamplingParameters, params)
psp_d = convert(PartitionSamplingParameters, params) # a way to act as copy
psp_b.part = partition_thermalization_pnr(m)
psp_d.part = partition_thermalization_pnr(m)
psp_b.T = OneParameterInterpolation
psp_b.x = x
set_parameters!(psp_b)
psp_d.T = Distinguishable
set_parameters!(psp_d)
compute_probability!(psp_b)
compute_probability!(psp_d)
################################ it looks like loss is not taken into account !
################################ need to convert to threshold detection
mc_b = psp_b.ev.proba_params.probability
mc_d = psp_d.ev.proba_params.probability
mc_b = to_threshold(mc_b)
mc_d = to_threshold(mc_d)
pdf_bos = mc_b.proba
pdf_dist = mc_d.proba
n_config = length(pdf_bos)
config = 1:n_config
bar(config,pdf_bos, label = "x=$x", alpha = 0.5)
bar!(config, pdf_dist, label = "D", alpha = 0.5)
xlabel!("output configuration")
ylabel!("p")
params.i
sum(pdf_dist) ############ this shouldn't be the case !
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 4306 | # brute force tests to see which matrices U maximize tvd(P^D - P^B)
function brute_force_maximize_over_haar_partition_pdf(; n_modes = 4, n_trials = 10,n_photons = 2, partition_size = 2, dist = tvd, saveimg = true, start_with_fourier_special_partition = false)
if start_with_fourier_special_partition
# apply the inverse transformation to the fourier matrix so that if the output for U = fourier_matrix
# would be (1 0 1 0 1 0) it is now (111000)
# note that this special partition only really make sense for partition size = n_modes / 2
# thus we will skip all other partition sizes
if 2*partition_size != n_modes
throw(ArgumentError("special partition only make sense when the partition is half the modes"))
elseif n_modes%2 != 0
throw(ArgumentError("n_modes must be even to make sense"))
end
U_best = inv(permutation_matrix_special_partition_fourier(n_modes)) * fourier_matrix(n_modes)
else
U_best = fourier_matrix(n_modes)
end
is_the_fourier_matrix = true
dist_best = distance_partition_pdfs(U = U_best, n_photons = n_photons, partition_size = partition_size, dist = dist)
for trial in 1:n_trials
U_this_iter = rand_haar(n_modes)
dist_this_iter = distance_partition_pdfs(U = U_this_iter, n_photons = n_photons, partition_size = partition_size, dist = dist)
if dist_this_iter > dist_best
U_best = U_this_iter
dist_best = dist_this_iter
is_the_fourier_matrix = false
end
end
if saveimg
# plotting the best result
cd(starting_directory)
make_directory("images", delete_contents_if_existing = false)
make_directory("most sensitive interferometer", delete_contents_if_existing = false)
make_directory("$dist", delete_contents_if_existing = false)
make_directory("fourier_special_partition : $start_with_fourier_special_partition", delete_contents_if_existing = false)
input_state = zeros(Int, n_modes)
input_state[1:n_photons] = ones(Int,n_photons)
partition_occupancy_vector = zeros(Int, n_modes)
partition_occupancy_vector[1:partition_size] = ones(Int, partition_size)
part = occupancy_vector_to_partition(partition_occupancy_vector)
pdf_bosonic = proba_partition(U_best, partition_occupancy_vector, input_state = input_state)
pdf_dist = partition_probability_distribution_distinguishable(part, U_best, number_photons = n_photons)
x = 0:n_photons
y = [pdf_bosonic, pdf_dist]
plt = Plots.scatter(x,y, label = ["bosonic" "dist"], ylims = (0,1.05))
matrix_plot_real = heatmap(real.(U_best))
matrix_plot_img = heatmap(imag.(U_best))
#display(plt)
savefig(plt, "most sensitive matrix to dist (distributions), n_modes = $n_modes, n_photons = $n_photons, partition_size = $partition_size, n_trials = $n_trials, is_the_fourier_matrix = $is_the_fourier_matrix")
savefig(matrix_plot_real, "most sensitive matrix to dist (real), n_modes = $n_modes, n_photons = $n_photons, partition_size = $partition_size, n_trials = $n_trials, is_the_fourier_matrix = $is_the_fourier_matrix")
savefig(matrix_plot_img, "most sensitive matrix to dist (img), n_modes = $n_modes, n_photons = $n_photons, partition_size = $partition_size, n_trials = $n_trials, is_the_fourier_matrix = $is_the_fourier_matrix")
save_matrix(U_best, "most sensitive matrix to dist (matrix), n_modes = $n_modes, n_photons = $n_photons, partition_size = $partition_size, n_trials = $n_trials, is_the_fourier_matrix = $is_the_fourier_matrix")
cd(starting_directory)
end
(dist_best, U_best)
end
for n_modes = 2:16
for n_photons = 1:n_modes
for partition_size = 1:n_modes
try
brute_force_maximize_over_haar_partition_pdf(;n_modes = n_modes, n_trials = 10000, n_photons = n_photons, partition_size = partition_size, dist = sqr, start_with_fourier_special_partition = true)
catch
end
end
end
end
brute_force_maximize_over_haar_partition_pdf(n_modes = 100, n_trials = 10000, n_photons = 10, partition_size = 50, dist = sqr, start_with_fourier_special_partition = true)
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 1794 | n = 10
A = Diagonal(range(1, stop=2, length=n))
f(x) = dot(x,A*x)/2
g(x) = A*x
g!(stor,x) = copyto!(stor,g(x))
x0 = randn(n)
manif = Optim.Sphere()
Optim.optimize(f, g!, x0, Optim.ConjugateGradient(manifold=manif))
n = 10
A = Diagonal(range(1, stop=2, length=n))
f(x) = dot(x[1:n],A*x[1:n])/2 + dot(x[n+1:2n],A*x[n+1:2n])/2
x0 = randn(2n)
manif = Optim.ProductManifold(Optim.Sphere(), Optim.Sphere(), (n,), (n,))
res = Optim.optimize(f, x0, Optim.ConjugateGradient(manifold=manif))
v =Optim.minimizer(res)
norm(v[1:n])
norm(v[n+1:end])
norm(v)
#### iterative manifold ####
# r column vectors noramlized of dimension n
n = 10
r = n
manif = Optim.Sphere()
for layer = 1:r-1
manif = Optim.ProductManifold(Optim.Sphere(), manif, (n,), (layer * n,))
end
function array_to_matrix(array, r, n)
"""gives a (r,n) matrix from the r, n-sized vectors concatenated in array"""
mat = Matrix{eltype(array)}(undef, r, n)
for line = 1 : r
for col = 1 : n
mat[line, col] = array[(line-1)*n+col]
end
end
mat
end
function objective(array, r, n)
M = array_to_matrix(array, r, n)
gram_matrix = M'*M
-abs(permanent_ryser(gram_matrix))
end
function gradient_permanent!(G, array, n, r)
A = array_to_matrix(array, r, n)
for i = 1:n
for j = 1:n
grad[i,j] = permanent_ryser(remove_row_col(A, i, j))
end
end
end
function foo(array)
objective(array,r,n)
end
function g!(G, array)
gradient_permanent!(G, array, n, r)
end
res = Optim.optimize(foo,rand_gram_matrix(n), Optim.GradientDescent(manifold=manif), Optim.Options(x_tol = 1e-16, iterations = 100000))
Float64(factorial(n))
v = Optim.minimizer(res)
println("###############")
for i = 1 : n
println(real(norm(v[i,:])))
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 4581 | # follows Conjugate gradient algorithm for optimization under unitary Traian Abrudan ,1,2 , Jan Eriksson 2 , Visa Koivunen
# matrix constraint Signal Processing 89 (2009) 1704–1714
function step_size(w_k, h_k; q, euclidian_gradient, p = 5)
"""
step_size(w_k, h_k; q, euclidian_gradient, p = 5)
implements the geodesic search algorithm of table 1 in Conjugate gradient algorithm for optimization under unitary Traian Abrudan ,1,2 , Jan Eriksson 2 , Visa Koivunen matrix constraint Signal Processing 89 (2009) 1704–1714"""
n = size(w_k)[1]
omega_max = maximum(abs.(eigvals(h_k)))
t_mu = 2pi/(q*omega_max)
mu = [i * t_mu/p for i in 0:p]
r = Matrix{ComplexF64}[]
push!(r, Matrix{ComplexF64}(I,n,n))
push!(r, exp(-t_mu/p * h_k))
for i in 2:p
push!(r, r[2]*r[end])
end
derivatives = zeros(ComplexF64, p+1)
for i = 0:p
derivatives[i+1] = -2 * real(tr(euclidian_gradient(r[i+1]*w_k) * w_k'*r[i+1]'*h_k'))
end
a = zeros(ComplexF64, p+1)
a[1] = derivatives[1]
mat_mu = zeros(ComplexF64, p,p)
for i = 1:p
mat_mu[:, i] = mu[2:end].^i
end
a[2:end] = inv(mat_mu) * (derivatives[2:end].-a[1])
positive_roots = []
for root in roots(reverse(a))
if imag(root) ≈ 0. atol = 1e-7
if real(root) > 0.
push!(positive_roots, real(root))
end
end
end
#step_size
if positive_roots == []
return 0.
else
return minimum(positive_roots)
end
end
"""
minimize_over_unitary_matrices(;euclidian_gradient, q, n, p=5, tol = 1e-6, max_iter = 100)
returns the (optimized matrix, optimization_success)
implements the minimization algorithm of table 3 in Conjugate gradient algorithm for optimization under unitary Traian Abrudan ,1,2 , Jan Eriksson 2 , Visa Koivunen matrix constraint Signal Processing 89 (2009) 1704–1714"""
function minimize_over_unitary_matrices(;euclidian_gradient, q, n, p=5, tol = 1e-6, max_iter = 100)
optimization_success = false
function inner_product(a,b)
0.5 * real(tr(a'*b))
end
k = 0
w_k = Matrix{ComplexF64}(I,n,n)
g_k = similar(w_k)
h_k = similar(w_k)
while k < max_iter
#println("iter $(k+1)/$max_iter")
if k % n^2 == 0
gamma_k = euclidian_gradient(w_k)
g_k = gamma_k * w_k' - w_k * gamma_k'
h_k = g_k
end
if inner_product(g_k,g_k) < tol
#println("optimisation achieved")
optimization_success = true
break
end
w_k_plus_1 = exp(-step_size(w_k, h_k; q = q, euclidian_gradient = euclidian_gradient, p = p) * h_k)*w_k
gamma_k_plus_1 = euclidian_gradient(w_k_plus_1)
g_k_plus_1 = gamma_k_plus_1 * w_k_plus_1' - w_k_plus_1*gamma_k_plus_1'
gamma_small_k = inner_product((g_k_plus_1 - g_k) , g_k_plus_1) / inner_product(g_k , g_k)
h_k_plus_1 = g_k_plus_1 + gamma_small_k * h_k
if inner_product(h_k_plus_1, g_k_plus_1) < 0.
h_k_plus_1 = g_k_plus_1
end
k+=1
gamma_k = gamma_k_plus_1
w_k = w_k_plus_1
h_k = h_k_plus_1
end
if optimization_success == false
#println("optimisation failed")
end
return (w_k, optimization_success)
end
function run_brockett_optimization_test(;maxiter = 100)
achieved_an_optimization = false
iter = 0
this_test = nothing
while achieved_an_optimization == false && iter < maxiter
iter += 1
n = 3
N = diagm(1:n)
sigma = hermitian_test_matrix(n)
q_brockett = 2
function objective_function(w, sigma, N)
tr(w' * sigma * w * N)
end
function euclidian_gradient_brockett(w,sigma,N)
sigma*w*N
end
function euclidian_gradient(w)
euclidian_gradient_brockett(w,sigma,N)
end
result= minimize_over_unitary_matrices(;euclidian_gradient = euclidian_gradient, q = q_brockett,n=n, tol = 1e-7, max_iter = 100000)
achieved_an_optimization = result[2]
if achieved_an_optimization
optimized_matrix = result[1]
this_test = @test real.(optimized_matrix' * sigma * optimized_matrix) ≈ diagm(sort(eigvals(sigma), rev = true)) atol = 1e-2
break
end
end
if achieved_an_optimization == false
println("no optimization succeeded during the test, maybe be statistical but looks like an anomaly")
end
this_test
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 6258 | ### contains functions to compute probabilities in subsets
### which we used to call partitions
### the functions are in the process of being rewritten for
### multisets
"""
matrix_phi(k, U, occupancy_vector, n)
"""
function matrix_phi(k, U, occupancy_vector, n)
"""matrix P(phi_k) in the notes
n = number of photons
occupancy_vector is a Int64 vector of {0,1}^m with one if the mode belongs to the partition, zero otherwise
remark : opposite sign convention from the notes called notes_singlemode_output
by default for the r = (1,1, ..., 1) input """
if k > n
throw(ArgumentError("k > n"))
end
check_at_most_one_particle_per_mode(occupancy_vector)
m = size(U,1)
mat = Matrix{eltype(U)}(I, size(U))
for i in 1:m
if occupancy_vector[i] == 1
mat[i,i] = exp(-2im * pi * k / (n+1))
end
end
mat
end
"""
proba_partition_partial(;U, S, occupancy_vector, input_state, checks=true)
Return a ``n+1`` sized array giving the probability to find `[0,1,...]`, photons
inside the bins given by `occupancy_vector` at the output of `U`.
!!! note
- We take `U` of dimension ``m`` while `M` is the scattering matrix, as in
Tichy, ``M_ij = U_{d_i}, _{d_j}``.
- Given ``n`` photons, generally in the first modes, the distinguishability
matrix is defined as in Tichy, ``S_{ij} = <phi_{d_i}|phi_{d_j}>``.
This is not a problem as it does not depend on the output partition but be aware of it.
"""
function proba_partition_partial(; U, S, occupancy_vector, input_state, checks = true)
"""returns a n+1 sized array giving the probability of have [zero, one, ...]
photons inside the bins given by occupancy_vector for the interferometer
given by the matrix U (perfectly indistinguishable photons)
like proba_partition but with partial distinguishability defined through the S
matrix, with conventions as in Tichy (note that we take the S matrix to be n*n
while the interferometer has m modes)
NOTE :
in the following, we take U to be m*m
while M is the scattering matrix, as in Tichy, M_ij = U_{d_i}, _{d_j}
and have n photons, generally in the first n modes
the distinguishability matrix is defined as in tichy, to be n*n,
S_{ij} = <phi_{d_i}|phi_{d_j}>
this is not a problem as it does not depend on the output partition but be aware of it
"""
m = size(U,1)
n = sum(input_state)
if n == 0
throw(ArgumentError("number of photons ill defined, zero cannot be computed but trivial"))
end
if size(S,1) != n
throw(ArgumentError("S matrix does not have the size required for this number of photons"))
end
if checks
if !is_unitary(U)
throw(ArgumentError("U not unitary"))
elseif !is_a_gram_matrix(S)
throw(ArgumentError("S not gram"))
else
println("Note : checking unitarity of U, that S is gram, slows down significantly probabilities computations")
end
end
function scattering_matrix_amplitudes(U, input_state, occupancy_vector, k)
"""U tilde in the handwritten notes, corresponds to U^dagger Lambda(eta) U"""
scattering_matrix(U' * matrix_phi(k, U, occupancy_vector, sum(input_state)) * U, input_state, input_state)
end
proba_fourier = 1/vector_factorial(input_state) .* [ryser(copy(transpose(S)) .* scattering_matrix_amplitudes(U, input_state, occupancy_vector, k)) for k in 0:n]
if length(proba_fourier) == 0
throw(ArgumentError("cannot compute the idft of an empty array"))
end
p_recovered = [1/(n+1) * sum([exp(2im * pi * j * k / (n+1)) * proba_fourier[k+1] for k in 0:n]) for j in 0:n] # direct inverse fourier transform
if any([!isapprox(imag(p_recovered[i]), 0., atol = 1e-7, rtol = 1e-5) for i in 1:length(p_recovered)])
println("WARNING : proba[$j] has a significant imaginary part of $(imag(tmp))")
elseif !(isapprox(sum(real.(p_recovered)), 1., rtol = 1e-5))
println("WARNING : probabilites do not sum to one ($(sum(real.(tmp)))), maybe be erreneous")
end
real.(p_recovered)
end
"""
proba_partition_bosonic(;U, occupancy_vector, input_state=ones(Int, size(U,1)), checks=true)
Indistinguishable version of [`proba_partition_partial`](@ref).
"""
function proba_partition_bosonic(;U, occupancy_vector, input_state = ones(Int, size(U,1)), checks = true)
"""indistinguishable version of proba_partition_partial"""
n = sum(input_state)
proba_partition_partial(U = U , S = ones(n,n), occupancy_vector = occupancy_vector, input_state = input_state, checks = checks)
end
function proba_partition_distinguishable(;U, occupancy_vector, input_state = ones(Int, size(U,1)), checks = true)
"""indistinguishable version of proba_partition_partial"""
n = sum(input_state)
proba_partition_partial(U = U , S = Matrix{eltype(U)}(I,n,n), occupancy_vector = occupancy_vector, input_state = input_state, checks = checks)
end
"""
partition_probability_distribution_distinguishable_rand_walk(part, U)
Generate a vector giving the probability to have ``k`` photons in the partition
`part` at the output of the interferomter `U`.
"""
function partition_probability_distribution_distinguishable_rand_walk(part, U)
"""generates a vector giving the probability to have k photons in
the partition part for the interferometer U by the random walk method
discussed in a 22/02/21 email
part is written like (1,2,4) if it is the output modes 1, 2 and 4"""
function proba_photon_in_partition(i, part, U)
"""returns the probability that the photon i ends up in the set of outputs part, for distinguishable particles only"""
sum([abs(U[i, j])^2 for j in part]) # note the inversion line column
end
function walk(probability_vector, walk_number, part, U)
new_probability_vector = similar(probability_vector)
n = size(U)[1]
proba_this_walk = proba_photon_in_partition(walk_number, part, U)
for i in 1 : n+1
new_probability_vector[i] = (1-proba_this_walk) * probability_vector[i] + proba_this_walk * probability_vector[i != 1 ? i-1 : n+1]
end
new_probability_vector
end
n = size(U)[1]
probability_vector = zeros(n+1)
probability_vector[1] = 1
for walk_number in 1 : n
probability_vector = walk(probability_vector, walk_number, part, U)
end
if !(isapprox(sum(probability_vector), 1., rtol = 1e-8))
println("WARNING : probabilites do not sum to one ($(sum(probability_vector)))")
end
probability_vector
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 4845 | # following Asymptotic Gaussian law for
# noninteracting indistinguishable
# particles in random networks
# Valery S. Shchesnovich
"""
partition_expectation_values(partition_size_vector, partition_counts)
partition_expectation_values(part_occ::PartitionOccupancy)
Return the Haar averaged probability of photon number count in binned outputs
for [`Distinguishable`](@ref) and [`Bosonic`](@ref) particles.
!!! note "Reference"
[https://www.nature.com/articles/s41598-017-00044-8.pdf](https://www.nature.com/articles/s41598-017-00044-8.pdf)
"""
function partition_expectation_values(partition_size_vector, partition_counts)
m = sum(partition_size_vector)
number_partitions = length(partition_size_vector)
partition_size_ratio = partition_size_vector ./ m # q
n = sum(partition_counts)
@test all(partition_counts .>= 0)
# factorials can quickly get pretty big so this is required
if any(partition_counts .> 20) || n > 20
n = big(n)
partition_counts = big.(partition_counts)
end
proba_dist = factorial(n) / vector_factorial(partition_counts) * prod(partition_size_ratio .^ partition_counts)
proba_bosonic = proba_dist * prod([partition_counts[i] > 1 ? prod([1+l/partition_size_vector[i] for l in 1:partition_counts[i]-1]) : 1 for i in 1:length(partition_size_vector)]) /prod([1+l/m for l in 1:n-1])
proba_dist, proba_bosonic
end
partition_expectation_values(part_occ::PartitionOccupancy) = partition_expectation_values(partition_occupancy_to_partition_size_vector_and_counts(part_occ)...)
"""
subset_expectation_value(subset_size, k, n, m)
Return the Haar averaged probability to find `k` from `n` photons inside a subset of
binned output modes of length `size_subset` among `m` modes for [`Distinguishable`](@ref)
and [`Bosonic`](@ref) cases.
"""
function subset_expectation_value(subset_size, k,n,m)
partition_size_vector = [subset_size, m-subset_size]
partition_counts = [k,n-k]
partition_expectation_values(partition_size_vector, partition_counts)
end
"""
subset_relative_distance_of_averages(subset_size, n, m)
"""
function subset_relative_distance_of_averages(subset_size,n,m)
@warn "check tvd conventions"
0.5*abs(sum([abs(subset_expectation_value(subset_size, k,n,m)[1] - subset_expectation_value(subset_size, k,n,m)[2]) for k in 0:n]))
end
"""
choose_best_average_subset(;m, n, distance=tvd)
Return the ideal subset size on average and its total variance distance.
"""
function choose_best_average_subset(;m,n, distance = tvd)
"""returns the ideal subset size on average and its TVD,
to compare with the full bunching test of shsnovitch"""
function distance_this_subset_size(subset_size)
proba_dist = [subset_expectation_value(subset_size,k,n,m)[1] for k in 0:n]
proba_bos = [subset_expectation_value(subset_size,k,n,m)[2] for k in 0:n]
distance(proba_dist, proba_bos)
end
max_distance = 0
subset_size_max_ratio = nothing
for subset_size in 1:m-1
if distance_this_subset_size(subset_size) > max_distance
max_distance = distance_this_subset_size(subset_size)
subset_size_max_ratio = subset_size
end
end
subset_size_max_ratio, distance_this_subset_size(subset_size_max_ratio)
end
"""
best_partition_size(;m, n, n_subsets, distance=tvd)
Return the ideal `partition_size_vector` for a given number of subsets `n_subsets`
and the Haar averaged TVD in second parameter.
!!! note
For a single subset, `n_subsets`=2 as we need a complete partition, occupying all modes.
"""
function best_partition_size(;m,n, n_subsets, distance = tvd)
"""return the ideal partition_size_vector for a given number
of subsets n_subsets as well as the haar averaged tvd in second parameter
for a single subset, n_subsets = 2 as we need a complete partition, occupying all modes"""
@argcheck n_subsets >= 2 "we need a complete partition, occupying all modes"
@argcheck n_subsets <= m "more partition bins than output modes"
part_list = all_mode_configurations(m,n_subsets, only_photon_number_conserving = true)
remove_trivial_partitions!(part_list)
max_distance = 0
part_max_ratio = nothing
for part in ranked_partition_list(part_list)
events = all_mode_configurations(n,n_subsets, only_photon_number_conserving = true)
pdf = [partition_expectation_values(part, event) for event in events]
pdf_dist = hcat(collect.(pdf)...)[1,:]
pdf_bos = hcat(collect.(pdf)...)[2,:]
this_distance = distance(pdf_bos,pdf_dist)
println(part, " : ", this_distance)
if this_distance > max_distance
max_distance = this_distance
part_max_ratio = part
end
end
part_max_ratio, max_distance
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 15147 | """
all_mode_configurations(n, n_subset; only_photon_number_conserving=false)
all_mode_configurations(input_state::Input, part::Partition; only_photon_number_conserving=false)
all_mode_configurations(input_state::Input, sub::Subset; only_photon_number_conserving=false)
Generate all possible photon counts of `n` photons in a partition/subset
of `n_subset` subsets.
!!! note
- Does not take into account photon number conservation by default
- This is the photon counting in partitions and not events outputs but it
can be used likewise
"""
function all_mode_configurations(n,n_subset; only_photon_number_conserving = false)
array = []
for i in 1:(n+1)^(n_subset)
this_vector = digits(i-1, base = n+1, pad = n_subset)
if only_photon_number_conserving
if sum(this_vector) == n
push!(array,this_vector)
end
else
push!(array,this_vector)
end
end
array
end
all_mode_configurations(input_state::Input,part::Partition; only_photon_number_conserving = false) = all_mode_configurations(input_state.n,part.n_subset; only_photon_number_conserving = only_photon_number_conserving)
all_mode_configurations(input_state::Input,sub::Subset; only_photon_number_conserving = false) = all_mode_configurations(input_state.n,1; only_photon_number_conserving = only_photon_number_conserving)
"""
remove_trivial_partitions!(part_list)
In a list of partitions sizes, ex. `[[2,0],[1,1],[0,2]]`, keeps only
the elements with non trivial subset size, in this ex. only `[1,1]`.
"""
function remove_trivial_partitions!(part_list)
filter!(x -> !any(x .== 0), part_list)
end
"""
ranked_partition_list(part_list)
Remove partitions such as `[1,2]` when `[2,1]` is already counted as only the
size of the partition counts; only keeps vectors of decreasing count.
"""
function ranked_partition_list(part_list)
output_partitions = []
for part in part_list
keep_this_part = true
for i in 1:length(part)-1
if part[i] < part[i+1]
keep_this_part = false
break
end
end
if keep_this_part
push!(output_partitions, part)
end
end
output_partitions
end
"""
photon_number_conserving_events(physical_indexes, n; partition_spans_all_modes=false)
Return only the events conserving photon number `n`.
!!! note
- If `partition_spans_all_modes`=`false`, gives all events with less than `n` or `n` photons
- If `partition_spans_all_modes` = `true` only exact photon number conserving physical_indexes
"""
function photon_number_conserving_events(physical_indexes, n; partition_spans_all_modes = false)
results = []
for index in physical_indexes
if partition_spans_all_modes == false
if sum(index) <= n
push!(results, index)
end
else
if sum(index) == n
push!(results, index)
end
end
end
results
end
"""
photon_number_non_conserving_events(physical_indexes, n; partition_spans_all_modes=false)
Return the elements not conserving the number of photons.
"""
function photon_number_non_conserving_events(physical_indexes,n ; partition_spans_all_modes = false)
setdiff(physical_indexes, photon_number_conserving_events(physical_indexes, n, ; partition_spans_all_modes = partition_spans_all_modes))
end
"""
check_photon_conservation(physical_indexes, pdf, n; atol=ATOL, partition_spans_all_modes=false)
Check if probabilities corresponding to non photon number conserving events are zero.
"""
function check_photon_conservation(physical_indexes, pdf, n; atol = ATOL, partition_spans_all_modes = false)
events_to_check = photon_number_non_conserving_events(physical_indexes,n; partition_spans_all_modes = partition_spans_all_modes)
for (i, index) in enumerate(physical_indexes)
if index in events_to_check
@argcheck isapprox(clean_proba(pdf[i]),0, atol=atol)# "forbidden event has non zero probability"
end
end
end
"""
compute_probabilities_partition(physical_interferometer::Interferometer, part::Partition, input_state::Input)
Compute the probability to find a certain photon counts in a partition `part` of
the output modes for the given interferometer.
Return `(counts = physical_indexes, probabilities = pdf) corresponding to the
occupation numbers in the partition and the associated probability.
"""
function compute_probabilities_partition(physical_interferometer::Interferometer, part::Partition, input_state::Input)
@argcheck at_most_one_photon_per_bin(input_state.r) "more than one input per mode is not implemented"
n = input_state.n
# if LossParameters(typeof(physical_interferometer)) == IsLossy()
# m = physical_interferometer.m_real
# else
m = input_state.m
# end
mode_occupation_list = fill_arrangement(input_state)
S = input_state.G.S
physical_indexes = []
pdf = []
if occupies_all_modes(part)
# in this case we use a the trick of removing the last subset
# and computing all as if in the partition without the last
# subset and then to infer the photon number in the last one
# form photon conservation
(small_indexes, small_pdf) = compute_probabilities_partition(physical_interferometer, remove_last_subset(part), input_state)
for (this_count, p) in zip(small_indexes, small_pdf)
new_count = copy(this_count)
if n-sum(this_count) >=0 # photon number conserving case
append!(new_count, n-sum(this_count))
push!(physical_indexes, new_count)
push!(pdf, p)
end
end
else
fourier_indexes = all_mode_configurations(n,part.n_subset, only_photon_number_conserving = false)
probas_fourier = Array{ComplexF64}(undef, length(fourier_indexes))
virtual_interferometer_matrix = similar(physical_interferometer.U)
for (index_fourier_array, fourier_index) in enumerate(fourier_indexes)
# for each fourier index, we recompute the virtual interferometer
virtual_interferometer_matrix = physical_interferometer.U
diag = [1.0 + 0im for i in 1:m]
# this is not type stable
# but need it to be a complex float at least
for (i,fourier_element) in enumerate(fourier_index)
this_phase = exp(2*pi*1im/(n+1) * fourier_element)
for j in 1:length(diag)
if part.subsets[i].subset[j] == 1
diag[j] *= this_phase
end
end
end
virtual_interferometer_matrix *= Diagonal(diag)
virtual_interferometer_matrix *= physical_interferometer.U'
# beware, only the modes corresponding to the
# virtual_interferometer_matrix[input_config,input_config]
# must be taken into account !
probas_fourier[index_fourier_array] = permanent(virtual_interferometer_matrix[mode_occupation_list,mode_occupation_list] .* S)
end
physical_indexes = copy(fourier_indexes)
probas_physical(physical_index) = 1/(n+1)^(part.n_subset) * sum(probas_fourier[i] * exp(-2pi*1im/(n+1) * dot(physical_index, fourier_index)) for (i,fourier_index) in enumerate(fourier_indexes))
pdf = [probas_physical(physical_index) for physical_index in physical_indexes]
end
pdf = clean_pdf(pdf)
check_photon_conservation(physical_indexes, pdf, n; partition_spans_all_modes = occupies_all_modes(part))
(physical_indexes, pdf)
end
"""
compute_probability_partition_occupancy(physical_interferometer::Interferometer, part_occupancy::PartitionOccupancy, input_state::Input)
Compute the probability to find a partition occupancy.
!!! note
Inefficient to use multiple times for the same physical setting, rather use
compute_probabilities_partition.
"""
function compute_probability_partition_occupancy(physical_interferometer::Interferometer, part_occupancy::PartitionOccupancy, input_state::Input)
(physical_indexes, pdf) = compute_probabilities_partition(physical_interferometer, part_occupancy.partition, input_state::Input)
for (i,counts) in enumerate(physical_indexes)
if counts == part_occupancy.counts.state
return pdf[i]
end
end
nothing
end
function print_pdfs(physical_indexes, pdf, n; physical_events_only = false, partition_spans_all_modes = false)
indexes_to_print = physical_events_only ? photon_number_conserving_events(physical_indexes, n; partition_spans_all_modes = partition_spans_all_modes) : physical_indexes
println("---------------")
println("Partition results : ")
for (i, index) in enumerate(physical_indexes)
if index in indexes_to_print
println("index = $index, p = $(pdf[i])")
end
end
println("---------------")
end
"""
compute_probability!(ev::Event{TIn,TOut}) where {TIn<:InputType, TOut<:PartitionCount}
compute_probability!(ev::Event{TIn,TOut}) where {TIn<:InputType, TOut<:PartitionCountsAll}
Given a defined [`Event`](@ref), computes/updates its probability or set of probabilities
(for instance if looking at partition outputs, with `MultipleCounts` begin filled).
This function is defined separately as it is most often the most time consuming
step of calculations and one may which to separate the evaluation of probabilities
from preliminary definitions.
"""
function compute_probability!(ev::Event{TIn,TOut}) where {TIn<:InputType, TOut<:PartitionCount}
check_probability_empty(ev)
ev.proba_params.precision = eps()
ev.proba_params.failure_probability = 0
ev.proba_params.probability = compute_probability_partition_occupancy(ev.interferometer, ev.output_measurement.part_occupancy, ev.input_state)
end
function compute_probability!(ev::Event{TIn,TOut}) where {TIn<:InputType, TOut<:PartitionCountsAll}
check_probability_empty(ev)
ev.proba_params.precision = eps()
ev.proba_params.failure_probability = 0
i = ev.input_state
part = ev.output_measurement.part
if i.m != part.m
if i.m == 2*part.m
# @warn "converting the partition to a lossy one"
part = to_lossy(part)
ev.output_measurement = PartitionCountsAll(part)
else
error("incompatible i, part")
end
end
(part_occ, pdf) = compute_probabilities_partition(ev.interferometer, ev.output_measurement.part, i)
# clean up to keep photon number conserving events (possibly lossy events in the partition occupies all modes)
part_occ_physical = []
pdf_physical = []
n = i.n
for (i,occ) in enumerate(part_occ)
if sum(occ) <= n
if occupies_all_modes(ev.output_measurement.part)
if sum(occ) == n
push!(part_occ_physical, occ)
push!(pdf_physical, pdf[i])
end
else
push!(part_occ_physical, occ)
push!(pdf_physical, pdf[i])
end
end
end
mc = MultipleCounts([PartitionOccupancy(ModeOccupation(occ),n,part) for occ in part_occ_physical], pdf_physical)
ev.proba_params.probability = EventProbability(mc).probability
end
"""
to_partition_count(event::Event{TIn, TOut}, part::Partition) where {TIn<:InputType, TOut <: FockDetection}
Converts an `Event` with `FockDetection` to a `PartitionCount` one.
"""
function to_partition_count(ev::Event{TIn, TOut}, part::Partition) where {TIn<:InputType, TOut <: Union{FockDetection, FockSample}}
n_subsets = part.n_subset
counts_array = zeros(Int,n_subsets)
for i in 1:n_subsets
counts_array[i] = sum(ev.output_measurement.s.state .* part.subsets[i].subset)
end
@argcheck sum(counts_array) == ev.input_state.n
o = PartitionCount(PartitionOccupancy(ModeOccupation(counts_array), ev.input_state.n, part))
new_ev = Event(ev.input_state, o, ev.interferometer)
new_ev
end
"""
p_partition(ev::Event{TIn1, TOut1}, ev_theory::Event{TIn2, TOut2}) where {TIn1<:InputType, TOut1 <: PartitionCount, TIn2 <:InputType, TOut2 <:PartitionCountsAll}
Outputs the probability that an observed count in `ev` happens under the conditions set by `ev_theory`.
For instance, if we take the conditions
ib = Input{Bosonic}(first_modes(n,m))
part = equilibrated_partition(m,n_subsets)
o = PartitionCountsAll(part)
evb = Event(ib,o,interf)
then
p_partition(ev, evb)
gives the probability that this `ev` is observed under the hypotheses of `ev_theory`.
"""
function p_partition(ev::Event{TIn1, TOut1}, ev_theory::Event{TIn2, TOut2}) where {TIn1<:InputType, TOut1 <: PartitionCount, TIn2 <:InputType, TOut2 <:PartitionCountsAll}
#check interferometer, input configuration
@argcheck ev.output_measurement.part_occupancy.partition == ev_theory.output_measurement.part
@argcheck ev.interferometer == ev_theory.interferometer
@argcheck ev.input_state.r == ev_theory.input_state.r
# compute the probabilities if they are not already known
ev_theory.proba_params.probability == nothing ? compute_probability!(ev_theory) : nothing
p = ev_theory.proba_params.probability
observed_count = ev.output_measurement.part_occupancy.counts.state
# look up the probability of this count
proba_this_count = nothing
for (proba, theoretical_count) in zip(p.proba, p.counts)
if observed_count == theoretical_count.counts.state
proba_this_count = proba
break
end
end
proba_this_count
end
function compute_probability!(params::PartitionSamplingParameters)
@unpack n, m, interf, T, mode_occ, i, n_subsets, part, o, ev = params
compute_probability!(ev)
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 735 | """
violates_bapat_sunder(A,B, tol = ATOL)
Checks if matrices `A` and `B` violate the Bapat-Sunder conjecture, see
[Boson bunching is not maximized by indistinguishable particles](https://arxiv.org/abs/2203.01306)
"""
function violates_bapat_sunder(A,B, tol = ATOL)
diagonal_el_product = prod([B[i,i] for i in 1:size(B)[1]])
# @test (imag(diagonal_el_product) ≈ 0.)
if !(imag(diagonal_el_product) ≈ 0.)
throw(Exception("product of diagonal elements of B is not real"))
elseif !(is_positive_semidefinite(A) && is_positive_semidefinite(B))
throw(Exception("A or B not semidefinitepositive"))
else
return abs(ryser(A .* B)) / (abs(ryser(A)) * real(prod([B[i,i] for i in 1:size(B)[1]]))) > 1+tol
end
return nothing
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 2633 | # functions relating to counter examples of the bapat sunder conjecture
# and its physical realization
"""
cholesky_semi_definite_positive(A)
cholesky decomposition (`A` = R' * R) for a sdp but not strictly positive
definite matrix
"""
function cholesky_semi_definite_positive(A)
# method found in some forum
X = sqrt(A)
QR_dec = qr(X)
R = QR_dec.R
try
@test R' * R ≈ A
return R
catch err
throw(Exception(err))
# println(err)
# println(R)
# pretty_table(R)
end
end
"""
incorporate_in_a_unitary(X)
incorporates the renormalized matrix X in a double sized unitary through the
proof of Lemma 29 of Aaronson Arkipov seminal [The Computational Complexity of Linear Optics](https://arxiv.org/abs/1011.3245)
"""
function incorporate_in_a_unitary(X)
Y = X / norm(X)
I_yy = Matrix(I, size(Y)) - Y'*Y
@test is_hermitian(I_yy)
@test is_positive_semidefinite(I_yy)
Z = cholesky_semi_definite_positive(I_yy)
@test Y'*Y + Z'*Z ≈ Matrix(I, size(Y))
W = rand(ComplexF64, 2 .* size(Y))
n = size(Y)[1]
W[1:n, 1:n] = Y
W[n+1:2n, 1:n] = Z
W = modified_gram_schmidt(W)
@test is_unitary(W)
W
end
"""
incorporate_in_a_unitary_non_square(X)
same as `incorporate_in_a_unitary` but for a matrix renormalized `X` of type (m,n) with m >= n
generates a minimally sized unitary (ex 9*9 interferometer for the 7*2 M' of the first counter example of drury)
"""
function incorporate_in_a_unitary_non_square(X)
# pad X with zeros
(m,n) = size(X)
if m<n
throw(ArgumentError("m<n not implemented"))
end
#padding with zeros
X_extended = zeros(eltype(X), (m,m))
X_extended[1:m, 1:n] = X
#now the same as incorporate_in_a_unitary but adapted to take only the minimally necessary elements
Y = X_extended / norm(X_extended)
I_yy = Matrix(I, size(Y)) - Y'*Y
@test is_hermitian(I_yy)
@test is_positive_semidefinite(I_yy)
Z = cholesky_semi_definite_positive(I_yy)
@test Y'*Y + Z'*Z ≈ Matrix(I, size(Y))
W = rand(ComplexF64, (m+n, m+n))
W[1:m, 1:n] = Y[1:m, 1:n] #not taking the padding
W[m+1:m+n, 1:n] = Z[1:n, 1:n] #taking only the minimal elements necessary to make the first n columns an orthonormal basis
W = modified_gram_schmidt(W)
@test is_unitary(W)
W
end
"""
add_columns_to_make_square_unitary(M_dagger)
Makes a square matrix `U` of which the first columns are `M_dagger`, which has to be
unitary by itself by a random choice of vectors that are then orthonormalized.
"""
function add_columns_to_make_square_unitary(M_dagger)
n,r = size(M_dagger)
U = rand(ComplexF64, (n,n))
U[1:n,1:r] = M_dagger
U = modified_gram_schmidt(U)
is_unitary(U)
U
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 1149 | # search for counter examples to bapat sunder to show that it is hard
"""
search_until_user_stop(search_function)
Runs `search_function` until user-stop (Ctrl+C).
"""
function search_until_user_stop(search_function)
n_trials = 1
try
while true
search_function()
n_trials += 1
end
catch err
if isa(err, InterruptException)
print("no counter example found after ")
end
finally
println("$n_trials trials")
end
end
"""
random_search_counter_example_bapat_sunder(;m,n,r, physical_H = true)
Brute-force search of counter-examples of rank `r`.
"""
function random_search_counter_example_bapat_sunder(;m,n,r, physical_H = true)
S = rand_gram_matrix_rank(n,r)
if physical_H
U = rand_haar(m)
input_state = [i <= n ? 1 : 0 for i in 1:m]
partition = [i <= r ? 1 : 0 for i in 1:m]
H = H_matrix(U, input_state, partition)
else
H = rand_gram_matrix_rank(n)
end
if violates_bapat_sunder(H,S)
println("counter example found : ")
println(H)
println(S)
end
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 2223 | # functions relating to more general input to the generalized bunching conjecture
# instead of pure state inputs
"""
schur_matrix(H)
computes the Schur matrix as defined in Eq. 1 of Linear Algebra and its Applications 490 (2016) 196–201
"""
function schur_matrix(H)
@test is_positive_semidefinite(H)
n = size(H,1)
schur_mat = Matrix{eltype(H)}(undef,(factorial(n), factorial(n)))
@showprogress for (i,sigma) in enumerate(permutations(collect(1:n)))
for (j,rho) in enumerate(permutations(collect(1:n)))
schur_mat[i,j] = prod([H[sigma[k], rho[k]] for k in 1:n])
end
end
schur_mat
end
function find_permutation_index(this_perm, permutation_array)
findfirst(x->x == this_perm, permutation_array)
end
function multiply_permutations(a,b)
a[b[:]]
end
"""
J_array(theta, n)
returns the `J` as defined in in Eq.10 of [Universality of Generalized Bunching and Efficient Assessment of Boson Sampling](https://arxiv.org/abs/1509.01561), with the permutations coming in the order given by `permutations(collect(1:n))`
"""
function J_array(theta, n)
J = zeros(eltype(theta), factorial(n))
permutation_array = collect(permutations(collect(1:n)))
@showprogress for (i,sigma) in enumerate(permutations(collect(1:n)))
for (j,tau) in enumerate(permutations(collect(1:n)))
J[i] += conj(theta[j]) * theta[find_permutation_index(multiply_permutations(tau, sigma), permutation_array)]
end
end
J
end
"""
density_matrix_from_J(J,n)
density matrix associated to a J function as defined in [Universality of Generalized Bunching and Efficient Assessment of Boson Sampling](https://arxiv.org/abs/1509.01561), computed through Eq. 46.
"""
function density_matrix_from_J(J,n)
density_matrix = zeros(eltype(J), factorial(n), factorial(n))
permutation_array = collect(permutations(collect(1:n)))
@showprogress for (i,sigma) in enumerate(permutations(collect(1:n)))
for (j,tau) in enumerate(permutations(collect(1:n)))
density_matrix[j,find_permutation_index(multiply_permutations(sigma, tau), permutation_array)] += J[i]
end
end
density_matrix/factorial(n)
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 7493 | abstract type Certifier end
"""
HypothesisFunction
Stores a function acting as a hypothesis. The function `f` eeds to output the probability associated with an `Event` under that hypothesis. It could be any type of `Event`, such as `FockDetection` or `PartitionOccupancy`.
"""
struct HypothesisFunction
f::Function
end
"""
Bayesian(events::Vector{Event}, null_hypothesis::HypothesisFunction, alternative_hypothesis::HypothesisFunction)
Certifies using bayesian testing.
"""
mutable struct Bayesian <: Certifier
events::Vector{Event} # input data as events - note that they shouldn't have probabilities associated, just observations
probabilities::Vector{Real} # array containing the condifence updated at each run for plotting purposes
confidence::Union{Real, Nothing} # gives the confidence that the null_hypothesis is true
n_events::Int
null_hypothesis::HypothesisFunction
alternative_hypothesis::HypothesisFunction
function Bayesian(events, null_hypothesis::HypothesisFunction, alternative_hypothesis::HypothesisFunction)
if !isa(events, Vector{Event})
events = convert(Vector{Event}, events)
end
for event in events
check_probability_empty(event, resetting_message = false)
end
new(events, Vector{Real}(), nothing, length(events), null_hypothesis, alternative_hypothesis)
end
Bayesian(events, null_hypothesis::Function, alternative_hypothesis::Function) = Bayesian(events, HypothesisFunction(null_hypothesis), HypothesisFunction(alternative_hypothesis))
end
mutable struct BayesianPartition <: Certifier
events::Vector{Event} # input data as events - note that they shouldn't have probabilities associated, just observations
probabilities::Vector{Real} # array containing the condifence updated at each run for plotting purposes
confidence::Union{Real, Nothing} # gives the confidence that the null_hypothesis is true
n_events::Int
null_hypothesis::Union{HypothesisFunction, InputType}
alternative_hypothesis::Union{HypothesisFunction, InputType}
# an input type such as Bosonic can be specified and the correct HypothesisFunction will be created
part::Union{Partition, Nothing}
n_subsets::Int
# function BayesianPartition(events, null_hypothesis::HypothesisFunction, alternative_hypothesis::HypothesisFunction; n_subsets = 2)
#
# if !isa(events, Vector{Event})
# events = convert(Vector{Event}, events)
# end
#
# for event in events
# check_probability_empty(event, resetting_message = false)
# end
#
# ev = events[1]
# part = equilibrated_partition(ev.input_state.m, n_subsets)
# new(events, Vector{Real}(), nothing, length(events), null_hypothesis, alternative_hypothesis, part)
# end
#
# BayesianPartition(events, null_hypothesis::Function, alternative_hypothesis::Function; n_subsets = 2) = Bayesian(events, HypothesisFunction(null_hypothesis), HypothesisFunction(alternative_hypothesis), n_subsets = n_subsets)
function BayesianPartition(events, null_hypothesis::TIn1, alternative_hypothesis::TIn2, part::Partition) where {TIn1 <: Union{Bosonic, Distinguishable}} where {TIn2 <: Union{Bosonic, Distinguishable}}
if !isa(events, Vector{Event})
events = convert(Vector{Event}, events)
end
for event in events
check_probability_empty(event, resetting_message = false)
########### need to add a check that always same interferometer, input
end
@argcheck TIn1 != TIn2 "no alternative_hypothesis"
ev = events[1]
input_modes = ev.input_state.r
interf = ev.interferometer
ib = Input{Bosonic}(input_modes)
id = Input{Distinguishable}(input_modes)
o = PartitionCountsAll(part)
evb = Event(ib,o,interf)
evd = Event(id,o,interf)
pb = compute_probability!(evb)
pd = compute_probability!(evd)
p_partition_B(ev) = p_partition(to_partition_count(ev, part), evb)
p_partition_D(ev) = p_partition(to_partition_count(ev, part), evd)
if TIn1 == Bosonic
p_q = HypothesisFunction(p_partition_B)
p_a = HypothesisFunction(p_partition_D)
elseif TIn1 == Distinguishable
p_a = HypothesisFunction(p_partition_B)
p_q = HypothesisFunction(p_partition_D)
end
new(events, Vector{Real}(), nothing, length(events), p_q, p_a, part, part.n_subset)
end
end
mutable struct FullBunching <: Certifier
events::Vector{Event} # input data as events - note that they shouldn't have probabilities associated, just observations
confidence::Union{Real, Nothing} # gives the confidence that the null_hypothesis is true
null_hypothesis::Union{HypothesisFunction, InputType}
alternative_hypothesis::Union{HypothesisFunction, InputType}
# an input type such as Bosonic can be specified and the correct HypothesisFunction will be created
subset::Subset
subset_size::Int
p_value_null::Union{Real, Nothing}
p_value_alternative::Union{Real, Nothing}
function FullBunching(events, null_hypothesis::TIn1, alternative_hypothesis::TIn2, subset_size::Int) where {TIn1 <: Union{Bosonic, Distinguishable}} where {TIn2 <: Union{Bosonic, Distinguishable}}
if !isa(events, Vector{Event})
events = convert(Vector{Event}, events)
end
for event in events
check_probability_empty(event, resetting_message = false)
########### need to add a check that always same interferometer, input
end
@argcheck TIn1 != TIn2 "no alternative_hypothesis"
ev = events[1]
input_modes = ev.input_state.r
m = ev.input_state.m
n = ev.input_state.n
@argcheck (subset_size > 0 && subset_size < m) "invalid subset"
n*(m-subset_size) < SAFETY_FACTOR_FULL_BUNCHING * m ? (@warn "invalid subset size for high bunching probability") : nothing
subset = Subset(first_modes(subset_size, input_modes.m))
new(events, nothing, null_hypothesis, alternative_hypothesis,subset, subset_size, nothing, nothing)
end
end
mutable struct Correlators <: Certifier
events::Vector{Event} # input data as events - note that they shouldn't have probabilities associated, just observations
confidence::Union{Real, Nothing} # gives the confidence that the null_hypothesis is true
null_hypothesis::Union{HypothesisFunction, InputType}
alternative_hypothesis::Union{HypothesisFunction, InputType}
p_value_null::Union{Real, Nothing}
p_value_alternative::Union{Real, Nothing}
function Correlators(events, null_hypothesis::TIn1, alternative_hypothesis::TIn2) where {TIn1 <: Union{Bosonic, Distinguishable}} where {TIn2 <: Union{Bosonic, Distinguishable}}
if !isa(events, Vector{Event})
events = convert(Vector{Event}, events)
end
for event in events
check_probability_empty(event, resetting_message = false)
########### need to add a check that always same interferometer, input
end
@argcheck TIn1 != TIn2 "no alternative_hypothesis"
ev = events[1]
input_modes = ev.input_state.r
m = ev.input_state.m
n = ev.input_state.n
new(events, nothing, null_hypothesis, alternative_hypothesis, nothing, nothing)
end
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 11109 | abstract type Circuit <: Interferometer end
abstract type CircuitElement <: Interferometer end
abstract type LossyCircuitElement <: Interferometer end
"""
LosslessCircuit(m::Int)
Creates an empty circuit with `m` input modes. The unitary representing the circuit
is accessed via the field `.U`.
Fields:
- m::Int
- circuit_elements::Vector{Interferometer}
- U::Union{Matrix{ComplexF64}, Nothing}
"""
mutable struct LosslessCircuit <: Circuit
m::Int
m_real::Int
circuit_elements::Vector{Interferometer}
U::Union{Matrix, Nothing}
function LosslessCircuit(m::Int)
new(m, m, [], nothing)
end
end
LossParameters(::Type{LosslessCircuit}) = IsLossless()
"""
LossyCircuit(m_real::Int)
Lossy `Interferometer` constructed from `circuit_elements`.
"""
mutable struct LossyCircuit <: Circuit
m_real::Int
m::Int
circuit_elements::Vector{Interferometer}
U_physical::Union{Matrix{Complex}, Nothing} # physical matrix
U::Union{Matrix{Complex}, Nothing} # virtual 2m*2m interferometer
function LossyCircuit(m_real::Int)
new(m_real, 2*m_real, [], nothing, nothing)
end
end
LossParameters(::Type{LossyCircuit}) = IsLossy()
"""
BeamSplitter(transmission_amplitude::Float64)
Creates a beam-splitter with tunable transmissivity.
Fields:
- transmission_amplitude::Float
- U::Matrix{Complex}
- m::Int
"""
struct BeamSplitter <: CircuitElement
transmission_amplitude::Real
U::Matrix
m::Int
BeamSplitter(transmission_amplitude::Real) = new(transmission_amplitude, beam_splitter(transmission_amplitude), 2)
end
LossParameters(::Type{BeamSplitter}) = IsLossless()
"""
PhaseShift(phase)
Creates a phase-shifter with parameter `phase`.
Fields:
- phase::Float64
- m::Int
- U::Matrix{ComplexF64}
"""
struct PhaseShift <: CircuitElement
phase::Real
U::Matrix
m::Int
PhaseShift(phase) = new(phase, exp(1im * phase) * ones((1,1)), 1)
end
LossParameters(::Type{PhaseShift}) = IsLossless()
#
# """
# Rotation(angle::Float64)
#
# Creates a Rotation matrix with tunable angle.
#
# Fields:
# - angle::Float64
# - U::Matrix{ComplexF64}
# - m::Int
# """
# struct Rotation <: Interferometer
# angle::Real
# U::Matrix
# m::Int
# Rotation(angle::Real) = new(angle, rotation_matrix(angle),2)
# end
#
# """
# PhaseShift(phase::Float64)
#
# Creates a phase-shifter with parameter `phase`.
#
# Fields:
# - phase::Float64
# - m::Int
# - U::Matrix{ComplexF64}
# """
# struct PhaseShift <: Interferometer
# phase::Real
# U::Matrix
# m::Int
# PhaseShift(phase::Real) = new(phase, phase_shift(phase), 2)
# end
"""
LossyBeamSplitter(transmission_amplitude, η_loss)
Creates a beam-splitter with tunable transmissivity and loss. Uniform model of loss: each input line i1,i2 has a beam splitter in front with transmission amplitude of `transmission_amplitude_loss` into an environment mode.
"""
struct LossyBeamSplitter <: LossyCircuitElement
transmission_amplitude::Real
η_loss::Real
U::Matrix
m::Int
m_real::Int
function LossyBeamSplitter(transmission_amplitude::Real, η_loss::Real)
@argcheck between_one_and_zero(transmission_amplitude)
@argcheck between_one_and_zero(η_loss)
new(transmission_amplitude, η_loss, virtual_interferometer_uniform_loss(beam_splitter(transmission_amplitude),η_loss), 4,2)
end
end
LossParameters(::Type{LossyBeamSplitter}) = IsLossy()
"""
LossyLine(η_loss)
Optical line with some loss: represented by a `BeamSplitter` with a transmission amplitude of `transmission_amplitude_loss` into an environment mode.
"""
struct LossyLine <: LossyCircuitElement
η_loss::Real
U::Matrix
m::Int
m_real::Int
function LossyLine(η_loss::Real)
@argcheck between_one_and_zero(η_loss)
new(η_loss, virtual_interferometer_uniform_loss(ones((1,1)), η_loss), 2,1)
end
end
LossParameters(::Type{LossyLine}) = IsLossy()
"""
RandomPhaseShifter <: CircuitElement
Applies a RandomPhase shift to a single optical line according to a distribution `d`. For a uniform phase shift for instance
d = Uniform(0, 2pi)
"""
struct RandomPhaseShifter <: CircuitElement
U::Matrix
m::Int
d::Union{Distribution, Nothing}
function RandomPhaseShifter(d::Distribution)
new(exp(1im * rand(d)) * ones((1,1)), 1, d)
end
function RandomPhaseShifter(ϕ::Real)
new(exp(1im * ϕ) * ones((1,1)), 1, nothing)
end
end
LossParameters(::Type{RandomPhaseShifter}) = IsLossless()
struct LossyLineWithRandomPhase <: LossyCircuitElement
η_loss::Real
U::Matrix
m::Int
m_real::Int
d::Union{Distribution, Real, Nothing}
function LossyLineWithRandomPhase(η_loss::Real, ϕ::Union{Real, Distribution})
m = 1
circuit = LossyCircuit(m)
target_modes_in = ModeList([1], circuit.m_real)
target_modes_out = target_modes_in
interf = LossyLine(η_loss)
add_element_lossy!(circuit, interf, target_modes_in, target_modes_out)
interf = RandomPhaseShifter(ϕ)
add_element_lossy!(circuit, interf, target_modes_in, target_modes_out)
new(η_loss, circuit.U, circuit.m, circuit.m_real, ϕ)
end
end
LossParameters(::Type{LossyLineWithRandomPhase}) = IsLossy()
"""
is_compatible(target_modes_in::ModeList, circuit::Circuit)
Checks compatibility of `target_modes_in` and `circuit`.
"""
function is_compatible(target_modes_in::ModeList, circuit::Circuit)
if circuit.m != target_modes_in.m
if circuit.m == 2*target_modes_in.m
error("use add_element_lossy! instead")
else
@show circuit.m
@show target_modes_in.m
error("target_modes_in.m")
end
end
true
end
"""
add_element!(circuit::Circuit, interf::Interferometer; target_modes::Vector{Int})
add_element!(circuit::Circuit, interf::Interferometer; target_modes_in::Vector{Int}, target_modes_out::Vector{Int})
Adds the circuit element `interf` that will be applied on `target_modes` to the `circuit`.
Will automatically update the unitary representing the circuit.
If giving a single target modes, assumes that they are the same for out and in
"""
function add_element!(circuit::Circuit, interf::Interferometer, target_modes_in::ModeList, target_modes_out::ModeList = target_modes_in)
@argcheck is_compatible(target_modes_in, target_modes_out)
@argcheck is_compatible(target_modes_in, circuit)
push!(circuit.circuit_elements, interf)
circuit.U == nothing ? circuit.U = Matrix{ComplexF64}(I, circuit.m, circuit.m) : nothing
if interf.m == circuit.m
circuit.U = interf.U * circuit.U
else
u = Matrix{ComplexF64}(I, circuit.m, circuit.m)
for i in 1:size(interf.U)[1]
for j in 1:size(interf.U)[2]
u[target_modes_in.modes[i], target_modes_out.modes[j]] = interf.U[i,j]
end
end
# @show pretty_table(circuit.U)
# @show pretty_table(u)
circuit.U *= u
# @show pretty_table(circuit.U)
end
end
# # if giving a single target modes, assumes that they are the same for out and in
# function add_element!(circuit::Circuit, interf::Interferometer; target_modes::Vector{Int})
# println("temporarily disabled")
# # add_element!(circuit, interf; target_modes_in = target_modes, target_modes_out = target_modes)
#
# end
#
# add_element!(circuit::Circuit, interf::Interferometer; target_modes::ModeOccupation) = add_element!(circuit=circuit, interf=interf, target_modes=target_modes.state)
#
# add_element!(circuit::Circuit, interf::Interferometer; target_modes::Vector{Int}) = add_element!(circuit=circuit, interf=interf, target_modes=target_modes)
#
# bug:
# WARNING: Method definition add_element!(BosonSampling.Circuit, BosonSampling.Interferometer) in module BosonSampling at /home/benoitseron/.julia/dev/BosonSampling/src/types/circuits.jl:73 overwritten at /home/benoitseron/.julia/dev/BosonSampling/src/types/circuits.jl:75.
# ** incremental compilation may be fatally broken for this module **
#
# WARNING: Method definition add_element!##kw(Any, typeof(BosonSampling.add_element!), BosonSampling.Circuit, BosonSampling.Interferometer) in module BosonSampling at /home/benoitseron/.julia/dev/BosonSampling/src/types/circuits.jl:73 overwritten at /home/benoitseron/.julia/dev/BosonSampling/src/types/circuits.jl:75.
# ** incremental compilation may be fatally broken for this module **
# add_element!(circuit::Circuit, interf::Interferometer; target_modes::ModeList) = begin
#
# mo = convert(ModeOccupation, target_modes)
# add_element!(circuit=circuit, interf=interf, target_modes=mo)
#
# end
function add_element_lossy!(circuit::LossyCircuit, interf::Interferometer, target_modes_in::ModeList, target_modes_out::ModeList = target_modes_in)
# @warn "health checks commented"
if !(LossParameters(typeof(interf)) == IsLossy())
# convert to a LossyInterferometer any lossless element just for size requirements and consistency
#println("converting to lossy")
interf = to_lossy(interf)
end
if target_modes_in.m != circuit.m_real
println("unexpected length")
if target_modes_in.m == 2*circuit.m_real
@warn "target_modes given with size 2*interf.m_real, discarding last m_real mode info and using the convention that mode i is lost into mode i+m_real"
else
@show circuit.m
@show target_modes_in.m
error("invalid size of target_modes_in.m")
end
end
# @show target_modes_in
# @show lossy_target_modes(target_modes_in)
add_element!(circuit, interf, lossy_target_modes(target_modes_in), lossy_target_modes(target_modes_out))
end
#
# function add_element_lossy!(circuit::LossyCircuit, interf::Interferometer, target_modes_in::ModeOccupation, target_modes_out::ModeOccupation = target_modes_in)
#
# target_modes_in = target_modes_in.state
# target_modes_out = target_modes_out.state
#
# @show target_modes_in
#
# add_element_lossy!(circuit, interf, target_modes_in, target_modes_out)
#
# end
#
#
# function add_element_lossy!(circuit::LossyCircuit, interf::Interferometer, target_modes_in::ModeList, target_modes_out::ModeList = target_modes_in)
#
# target_modes_in = convert(ModeOccupation, target_modes_in)
# target_modes_out = convert(ModeOccupation, target_modes_out)
#
# @show target_modes_in
#
# add_element_lossy!(circuit, interf, target_modes_in, target_modes_out)
#
# end
Base.show(io::IO, interf::Interferometer) = begin
print(io, "Interferometer :\n\n", "Type : ", typeof(interf), "\n", "m : ", interf.m, "\n", "U : ", "\n", interf.U)
end
Base.show(io::IO, interf::UserDefinedInterferometer) = begin
print(io, "Interferometer :\n\n", "Type : ", typeof(interf), "\n", "m : ", interf.m, "\nUnitary : \n")
pretty_table(io, interf.U)
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 6190 | """
MultipleCounts()
MultipleCounts(counts, proba)
Holds something like the photon counting probabilities with their respective
probability (in order to use them as a single observation). Can be declared
empty as a placeholder.
Fields:
- counts::Union{Nothing, Vector{ModeOccupation}, Vector{PartitionOccupancy}},
- proba::Union{Nothing,Vector{Real}}
"""
mutable struct MultipleCounts
counts::Union{Nothing, Vector{ModeOccupation}, Vector{PartitionOccupancy}, Vector{ThresholdModeOccupation}}
proba::Union{Nothing,Vector{Real}}
MultipleCounts() = new(nothing,nothing)
MultipleCounts(counts, proba) = new(counts,proba)
end
function initialise_to_empty_vectors!(mc::MultipleCounts, type_proba, type_counts)
mc.proba = Vector{type_proba}()
mc.counts = Vector{type_counts}()
end
Base.show(io::IO, pb::MultipleCounts) = begin
if pb.proba == nothing
println(io, "Empty MultipleCounts")
else
for i in 1:length(pb.proba)
println(io, "output: \n")
println(io, pb.counts[i])
println(io, "p = $(pb.proba[i])")
println(io, "--------------------------------------")
end
end
end
"""
to_threshold(mc::MultipleCounts)
Transforms a `MultipleCounts` into the equivalent for threshold detectors.
"""
function to_threshold(mc::MultipleCounts)
count_proba = Dict()
for (count, proba) in zip(mc.counts, mc.proba)
new_count = to_threshold(count)
if new_count in keys(count_proba)
count_proba[new_count] += proba
else
count_proba[new_count] = proba
end
end
# println("######")
# @show count_proba
counts = Vector{typeof(mc.counts[1])}()
probas = Vector{typeof(mc.proba[1])}()
for key in keys(count_proba)
push!(counts, key)
push!(probas, count_proba[key])
end
# @show counts
# @show probas
MultipleCounts(counts, probas)
end
"""
EventProbability(probability::Union{Nothing, Number})
EventProbability(mc::MultipleCounts)
Holds the probability or probabilities of an [`Event`](@ref).
Fields:
- probability::Union{Number,Nothing, MultipleCounts}
- precision::Union{Number,Nothing}
- failure_probability::Union{Number,Nothing}
"""
mutable struct EventProbability
probability::Union{Number,Nothing, MultipleCounts}
precision::Union{Number,Nothing} # see remarks in conventions
failure_probability::Union{Number,Nothing}
function EventProbability(probability::Union{Nothing, Number})
if probability == nothing
new(nothing, nothing, nothing)
else
try
probability = clean_proba(probability)
new(probability,nothing,nothing)
catch
error("invalid probability")
end
end
end
function EventProbability(mc::MultipleCounts)
try
mc.proba = clean_pdf(mc.proba)
new(mc,nothing,nothing)
catch
error("invalid probability")
end
end
end
"""
Event{TIn<:InputType, TOut<:OutputMeasurementType}
Event linking an input to an output.
Fields:
- input_state::Input{TIn}
- output_measurement::TOut
- proba_params::EventProbability
- interferometer::Interferometer
"""
mutable struct Event{TIn<:InputType, TOut<:OutputMeasurementType}
input_state::Input{TIn}
output_measurement::TOut
proba_params::EventProbability
interferometer::Interferometer
function Event{TIn,TOut}(input_state, output_measurement, interferometer::Interferometer, proba_params = nothing) where {TIn<:InputType, TOut<:OutputMeasurementType}
if proba_params == nothing
proba_params = EventProbability(nothing)
end
# in case input or outputs are given with m instead of 2m
# for lossy cases, convert them to have the proper dimension for
# computations
if (LossParameters(typeof(interferometer)) == IsLossy())
if input_state.m != 2*interferometer.m_real
#println("converting Input to lossy")
input_state = to_lossy(input_state)
end
if StateMeasurement(typeof(output_measurement)) == FockStateMeasurement()
if output_measurement.s == nothing
output_measurement.s = ModeOccupation(zeros(2*interferometer.m))
elseif output_measurement.s.m != 2*interferometer.m_real
#println("converting Output to lossy")
to_lossy!(output_measurement)
end
end
end
new{TIn,TOut}(input_state, output_measurement, proba_params, interferometer)
end
Event(i,o,interf,p = nothing) = Event{get_parametric_type(i)[1], get_parametric_type(o)[1]}(i,o,interf,p)
end
# Base.show(io::IO, ev::Event) = begin
# println("Event:\n")
# println("input state: ", ev.input_state.r, " (",get_parametric_type(ev.input_state)[1],")", "\n")
# println("output measurement: ", ev.output_measurement, "\n")
# println(ev.interferometer, "\n")
# println("proba_params: ", ev.proba_params)
# end
struct GaussianEvent{TIn<:Gaussian, TOut<:OutputMeasurementType}
input_state::GaussianInput{TIn}
output_measurement::TOut
interferometer::Union{Interferometer, Nothing}
function GaussianEvent{TIn,TOut}(input_state, output_measurement, interferometer=nothing) where {TIn<:Gaussian, TOut<:OutputMeasurementType}
new{TIn,TOut}(input_state, output_measurement, interferometer)
end
GaussianEvent(i,o,interf=nothing) = GaussianEvent{get_parametric_type(i)[1], get_parametric_type(o)[1]}(i,o,interf)
end
# Base.show(io::IO, ev::GaussianEvent) = begin
# println("Event:\n")
# println("input state: ", ev.input_state.r, " (",get_parametric_type(ev.input_state)[1],")", "\n")
# println("output measurement: ", ev.output_measurement, "\n")
# println(ev.interferometer, "\n")
# end
function check_probability_empty(ev::Event; resetting_message = true)
if ev.proba_params.probability != nothing
if resetting_message
@warn "probability was already set in, rewriting"
else
@warn "unexpected probabilities found in Event"
end
end
end
Base.convert(::Type{Event{TIn, FockDetection}}, ev::Event{TIn, FockSample}) where {TIn <: InputType} = Event(ev.input_state, convert(FockDetection, ev.output_measurement), ev.interferometer, ev.proba_params)
# fs = FockSample([1,2,3])
# convert(FockDetection, fs)
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 757 |
"""
@with_kw mutable struct OneLoopData
Contains experimental data telling everything necessary about an experiment running a one loop boson sampler.
For high number of detections, using a `Vector{ThresholdModeOccupation}` is inefficient, so `samples` can also be a `MultipleCounts`.
"""
@with_kw mutable struct OneLoopData
params::LoopSamplingParameters
samples::Union{Vector{ThresholdModeOccupation}, MultipleCounts}
date::DateTime = now()
name::String = string(date)
extra_info
end
"""
save(data::OneLoopData; path_to_file::String = "data/one_loop/")
Saves a OneLoopData.
"""
function JLD.save(data::OneLoopData; path_to_file::String = "data/one_loop/")
save(path_to_file * "$(data.name).jld", "data", data)
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 13044 | """
Supertype to any concrete input type such as [`Bosonic`](@ref), [`PartDist`](@ref), [`Distinguishable`](@ref)
and [`Undef`](@ref).
"""
abstract type InputType end
"""
Type used to express that we don't know what partial distinguishability experimental samples have.
"""
struct UnknownInput <: InputType
end
"""
Type used to notify that the input is made of FockState indistiguishable photons.
"""
struct Bosonic <: InputType
end
"""
Type used to notify that the input is made of FockState partially distinguishable
photons.
"""
abstract type PartDist <: InputType
end
"""
One parameter model of partial distinguishability interpolating between indistinguishable
and fully distinguishable photons FockState.
!!! note "Reference"
[Sampling of partially distinguishable bosons and the relation to the
multidimensional permanent](https://arxiv.org/pdf/1410.7687.pdf)
"""
struct OneParameterInterpolation <: PartDist
end
"""
Model of partially distinguishable photons FockState described by a randomly generated [`GramMatrix`](@ref).
"""
struct RandomGramMatrix <: PartDist
end
"""
Model of partially distinguishable photons FockState described by a provided [`GramMatrix`](@ref).
"""
struct UserDefinedGramMatrix <: PartDist
end
"""
Model of distinguishable photons FockState.
"""
struct Distinguishable <: InputType
end
"""
Model of photons FockState with undefined [`GramMatrix`](@ref).
"""
struct Undef <: InputType
end
"""
Supertype to any concrete input type of Gaussian state.
"""
abstract type Gaussian end
"""
VacuumState()
Type used to notify that the input is made of the vacuum state.
Fields:
displacement::Vector{Complex}
covariance_matrix::Matrix{Complex}
"""
struct VacuumState <: Gaussian
displacement::Vector{Float64}
covariance_matrix::Matrix{Float64}
function VacuumState()
new(zeros(2), [1.0 0.0; 0.0 1.0])
end
end
"""
CoherentState(displacement_parameter::Complex)
Type used to notify that the input is made of a coherent state.
Fields:
- displacement_parameter::Complex
- displacement::Vector{Complex}
- covariance_matrix::Matrix{Complex}
"""
struct CoherentState <: Gaussian
displacement_parameter::Complex
displacement::Vector{Float64}
covariance_matrix::Matrix{Float64}
function CoherentState(displacement_parameter::Complex)
new(displacement_parameter,
sqrt(2) * [real(displacement_parameter); imag(displacement_parameter)],
[1/2 0; 0 1/2])
end
end
"""
ThermalState(mean_photon_number::Real)
Type used to notify that the input is made of a thermal state.
Fields:
mean_photon_number::Real
displacement::Vector{Complex}
covariance_matrix::Matrix{Complex}
"""
struct ThermalState <: Gaussian
mean_photon_number::Real
displacement::Vector{Float64}
covariance_matrix::Matrix{Float64}
function ThermalState(mean_photon_number::Real)
new(mean_photon_number,
zeros(2),
[mean_photon_number+1/2 0; 0 mean_photon_number+1/2])
end
end
"""
SingleModeSqueezedVacuum(squeezing_parameter::Real)
Type used to notify that the input is made of single mode squeezed state.
Fields:
- squeezing_parameter::Real
- displacement::Vector{Complex}
- covariance_matrix::Matrix{Complex}
"""
struct SingleModeSqueezedVacuum <: Gaussian
squeezing_parameter::Real
displacement::Vector{Float64}
covariance_matrix::Matrix{Float64}
function SingleModeSqueezedVacuum(squeezing_parameter::Real)
new(squeezing_parameter,
zeros(2),
[exp(-2*squeezing_parameter) 0;
0 exp(2*squeezing_parameter)])
end
end
"""
OrthonormalBasis(vector_matrix::Union{Matrix, Nothing})
Basis of vectors ``v_1,...,v_n`` stored as columns in a ``n``-by-``r`` matrix
possibly empty.
Fields:
vectors_matrix::Union{Matrix,Nothing}
"""
mutable struct OrthonormalBasis
vectors_matrix::Union{Matrix,Nothing}
function OrthonormalBasis(vectors_matrix = nothing)
if vectors_matrix == nothing
new(nothing)
else
is_orthonormal(vectors_matrix, atol = ATOL) ? new(vectors_matrix) : error("invalid orthonormal basis")
end
end
end
"""
GramMatrix{T}(n::Int) where {T<:InputType}
GramMatrix{T}(n::Int, distinguishability_param::Real) where {T<:InputType}
GramMatrix{T}(n::Int, S::Matrix) where {T<:InputType}
Matrix of partial distinguishability. Will automatically generate the proper
matrix related to the provided [`InputType`](@ref).
Fields:
- n::Int: photons number
- S::Matrix: Gram matrix
- rank::Union{Int, Nothing}
- distinguishability_param::Union{Real, Nothing}
- generating_vectors::OrthonormalBasis
"""
struct GramMatrix{T<:InputType}
n::Int
S::Matrix
rank::Union{Int,Nothing}
distinguishability_param::Union{Real,Nothing}
generating_vectors::OrthonormalBasis
function GramMatrix{T}(n::Int) where {T<:InputType}
if T == Bosonic
return new{T}(n, ones(ComplexF64,n,n), nothing, nothing, OrthonormalBasis())
elseif T == Distinguishable
return new{T}(n, Matrix{ComplexF64}(I,n,n), nothing, nothing, OrthonormalBasis())
elseif T == RandomGramMatrix
return new{T}(n, rand_gram_matrix(n), nothing, nothing, OrthonormalBasis())
elseif T == Undef
return new{T}(n, Matrix{ComplexF64}(undef,n,n), nothing, nothing, OrthonormalBasis())
else
error("type ", T, " not implemented")
end
end
function GramMatrix{T}(n::Int, distinguishability_param::Real) where {T<:InputType}
if T == OneParameterInterpolation
return new{T}(n, gram_matrix_one_param(n,distinguishability_param), nothing, distinguishability_param, OrthonormalBasis())
else
T in [Bosonic,Distinguishable,RandomGramMatrix,Undef] ? error("S matrix should not be specified for type ", T) : error("type ", T, " not implemented")
end
end
function GramMatrix{T}(n::Int, S::Matrix) where {T<:InputType}
if T == UserDefinedGramMatrix
return new{T}(n, S, nothing, nothing, OrthonormalBasis())
else
T in [Bosonic,Distinguishable,RandomGramMatrix,Undef] ? error("S matrix should not be specified for type ", T) : error("type ", T, " not implemented")
end
end
end
"""
Input{T<:InputType}
Input{T}(r::ModeOccupation) where {T<:InputType}
Input{T}(r::ModeOccupation, G::GramMatrix) where {T<:InputType}
Input state at the entrance of the interferometer.
Fields:
- r::ModeOccupation
- n::Int
- m::Int: modes numbers
- G::GramMatrix
- distinguishability_param::Union{Real, Nothing}
"""
struct Input{T<:InputType}
r::ModeOccupation
n::Int
m::Int
G::GramMatrix
distinguishability_param::Union{Real,Nothing}
function Input{T}(r::ModeOccupation, n::Int, m::Int, G::GramMatrix, distinguishability_param::Union{Real,Nothing}) where {T<:InputType}
new{T}(r,n,m,G, distinguishability_param)
end
function Input{T}(r::ModeOccupation) where {T<:InputType}
if T in [Bosonic, Distinguishable, Undef, RandomGramMatrix]
return new{T}(r, r.n, r.m, GramMatrix{T}(r.n), nothing)
else
error("type ", T, " not implemented")
end
end
function Input{T}(r::ModeOccupation, distinguishability_param::Real) where {T<:InputType}
if T == OneParameterInterpolation
return new{T}(r, r.n, r.m, GramMatrix{T}(r.n,distinguishability_param), distinguishability_param)
else
error("type ", T, " not implemented")
end
end
function Input{T}(r::ModeOccupation, S::Matrix) where {T<:InputType}
if T == UserDefinedGramMatrix
return new{T}(r, r.n, r.m, GramMatrix{T}(r.n,S), nothing)
else
error("type ", T, " not implemented")
end
end
end
at_most_one_photon_per_bin(input::Input) = at_most_one_photon_per_bin(input.r)
"""
GaussianInput{T<:Gaussian}
GaussianInput{T}(r::ModeOccupation, squeezing_parameters::Vector, source_transmission::Union{Vector, Nothing})
Input state made off Gaussian states at the entrance of the interferometer.
Models of partial distinguishability for Gausian states being of different nature than those for Fock states,
we distinct input of [`Bosonic`](@ref) Fock states with indistinguishable Gaussian states.
!!! note
The notion of [`ModeOccupation`](@ref) here is also different. Even though
the object is the same, the mode occuputation here as to be understood as a "boolean"
where the value `0` tells us that the mode is fed with the vacuum while `1`
states that the mode contains a Gaussian state of type `T`.
"""
struct GaussianInput{T<:Gaussian}
r::ModeOccupation
n::Int
m::Int
displacement::Vector{Float64}
covariance_matrix::Matrix{Float64}
displacement_parameters::Union{Vector, Nothing}
mean_photon_numbers::Union{Vector, Nothing}
squeezing_parameters::Union{Vector, Nothing}
function GaussianInput{T}() where {T<:Gaussian}
if T == VacuumState
return Input{Bosonic}(first_modes(0,r.m))
else
error("type ", T, " not implemented")
end
end
function GaussianInput{CoherentState}(r::ModeOccupation, displacement_parameters::Vector)
r.state[1] == 1 ? sq = CoherentState(displacement_parameters[1]) : sq = VacuumState()
cov = sq.covariance_matrix
R = sq.displacement
for i in 2:r.m
if r.state[i] == 1
sq = CoherentState(displacement_parameters[i])
R = [R;sq.displacement]
sigma = sq.covariance_matrix
else
vacc = VacuumState()
R = [R;vacc.displacement]
sigma = vacc.covariance_matrix
end
cov = direct_sum(cov, sigma)
end
return new{CoherentState}(r, r.n, r.m, R, cov, displacement_parameters, nothing, nothing, nothing)
return new{ThermalState}(r, r.n, r.m, R, cov, nothing, displacement_parameters, nothing, nothing)
end
# function GaussianInput{T}(r::ModeOccupation, mean_photon_numbers::Vector) where {T<:Gaussian}
#
# if T == ThermalState
# r.state[1] == 1 ? sq = ThermalState(mean_photon_numbers[1]) : sq = VacuumState()
# cov = sq.covariance_matrix
# R = sq.displacement
#
# for i in 2:r.m
# if r.state[i] == 1
# sq = ThermalState[mean_photon_numbers[i]]
# R = [R;sq.displacement]
# sigma = sq.covariance_matrix
# else
# vacc = VacuumState()
# R = [R;vacc.displacement]
# sigma = vacc.covariance_matrix
# end
# cov = direct_sum(cov, sigma)
# end
# return new{T}(r, r.n, r.m, R, cov, nothing, displacement_parameters, nothing, nothing)
# else
# error("type ", T, " not implemented")
# end
#
# end
function GaussianInput{SingleModeSqueezedVacuum}(r::ModeOccupation, squeezing_parameters::Vector)
r.state[1] == 1 ? sq = SingleModeSqueezedVacuum(squeezing_parameters[1]) : sq = VacuumState()
cov = sq.covariance_matrix
R = sq.displacement
for i in 2:r.m
if r.state[i] == 1
sq = SingleModeSqueezedVacuum(squeezing_parameters[i])
R = [R;sq.displacement]
sigma = sq.covariance_matrix
else
vacc = VacuumState()
R = [R;vacc.displacement]
sigma = vacc.covariance_matrix
end
cov = direct_sum(cov, sigma)
end
return new{SingleModeSqueezedVacuum}(r, r.n, r.m, R, cov, nothing, nothing, squeezing_parameters)
end
end
"""
get_spectrum(state::Gaussian, k::Int)
Get the spectrum in the Fock basis of a `state` up to `k` photons.
"""
function get_spectrum(state::Gaussian, k::Int)
if typeof(state) == VacuumState
return 1
elseif typeof(state) == CoherentState
α = state.displacement_parameter
return [exp(-0.5*abs(α)^2) * (α^n)/sqrt(factorial(n)) for n in 0:k]
elseif typeof(state) == ThermalState
μ = state.mean_photon_number
return [μ^j / (1+μ)^(j+1) for j in 0:k]
elseif typeof(state) == SingleModeSqueezedVacuum
r = state.squeezing_parameter
res = Vector{Float64}(undef, k)
for i in 0:k
if iseven(i)
res[i] = sqrt(sech(r)) * sqrt(factorial(2n))/factorial(n) * (-0.5*tanh(r))^n
else
res[i] = 0
end
end
return res
end
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 2089 | ### Interferometers ###
"""
Supertype to any concrete interferomter type such as [`UserDefinedInterferometer`](@ref),
[`RandHaar`](@ref), [`Fourier`](@ref),...
"""
abstract type Interferometer end
"""
IsLossy{T}
IsLossless{T}
Trait to refer to quantities with inclusion of loss: the real `Interferometer` has dimension `m_real * m_real` while we model it by a `2m_real * 2m_real` one where the last `m_real` modes are environment modes containing the lost photons.
"""
abstract type LossParameters end
struct IsLossy <: LossParameters end
struct IsLossless <: LossParameters end
"""
UserDefinedInterferometer(U::Matrix)
Creates an instance of [`Interferometer`](@ref) from a given unitary matrix `U`.
Fields:
- m::Int
- U::Matrix
"""
struct UserDefinedInterferometer <: Interferometer
#actively checks unitarity, inefficient if outputing many matrices that are known to be unitary
m::Int
U::Matrix
UserDefinedInterferometer(U) = is_unitary(U) ? new(size(U,1), U) : error("input matrix is non unitary")
end
LossParameters(::Type{UserDefinedInterferometer}) = IsLossless()
"""
RandHaar(m::Int)
Creates an instance of [`Interferometer`](@ref) from a Haar distributed unitary matrix of dimension `m`.
Fields:
- m::Int
- U::Matrix
"""
struct RandHaar <: Interferometer
m::Int
U::Matrix{ComplexF64}
RandHaar(m) = new(m,rand_haar(m))
end
LossParameters(::Type{RandHaar}) = IsLossless()
"""
Fourier(m::Int)
Creates a Fourier [`Interferometer`](@ref) of dimension `m`.
Fields:
- m::Int
- U::Matrix
"""
struct Fourier <: Interferometer
m::Int
U::Matrix{ComplexF64}
Fourier(m::Int) = new(m,fourier_matrix(m))
end
LossParameters(::Type{Fourier}) = IsLossless()
"""
Hadamard(m::Int)
Creates a Hadamard [`Interferometer`](@ref) of dimension `m`.
Fields:
- m::Int
- U::Matrix
"""
struct Hadamard <: Interferometer
m::Int
U::Matrix
Hadamard(m::Int) = new(m,hadamard_matrix(m))
end
LossParameters(::Type{Hadamard}) = IsLossless()
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 6033 | abstract type Loop <: Circuit end
"""
LosslessLoop(m, η, ϕ)
Creates a LosslessLoop, see [`build_loop`](@ref) for the fields.
"""
mutable struct LosslessLoop<: Loop
m::Int
η::Union{T, Vector{T}} where {T<:Real}
ϕ::Union{Nothing, T, Vector{T}} where {T<:Real}
circuit::Circuit
U::Union{Nothing, Matrix}
function LosslessLoop(m, η, ϕ)
circuit = build_loop(m, η, nothing, nothing, ϕ)
new(m, η, ϕ, circuit, circuit.U)
end
end
LossParameters(::Type{LosslessLoop}) = IsLossless()
"""
LossyLoop(m::Int)
Creates a LossyLoop, see [`build_loop`](@ref) for the fields.
"""
mutable struct LossyLoop<: Loop
m::Int
η::Union{T, Vector{T}} where {T<:Real}
η_loss_bs::Union{Nothing, T, Vector{T}} where {T<:Real}
η_loss_lines::Union{Nothing, T, Vector{T}} where {T<:Real}
ϕ::Union{Nothing, T, Vector{T}} where {T<:Real}
circuit::Circuit
U::Union{Nothing, Matrix}
function LossyLoop(m, η, η_loss_bs, η_loss_lines, ϕ)
circuit = build_loop(m, η, η_loss_bs, η_loss_lines, ϕ)
new(m, η, η_loss_bs, η_loss_lines, ϕ, circuit, circuit.U)
end
end
LossParameters(::Type{LossyLoop}) = IsLossy()
"""
build_loop(m::Int, η::Union{T, Vector{T}}, η_loss_bs::Union{Nothing, T, Vector{T}} = nothing, η_loss_lines::Union{Nothing, T, Vector{T}} = nothing, ϕ::Union{Nothing, T, Vector{T}} = nothing) where {T<:Real}
build_loop(params::LoopSamplingParameters)
Outputs a `Circuit` corresponding to the experiment discussed with the Walther group.
A pulse of `n` photons in `m` temporal modes is sent through a variable `BeamSplitter` and a `LossyLineWithRandomPhase` as described in "Scalable Boson Sampling with Time-Bin Encoding Using a Loop-Based Architecture".
Fields:
- m::Int dimension
- η::Union{T, Vector{T}} array of beam splitter transmissivities
- η_loss_bs::Union{Nothing, T, Vector{T}} array of beam splitter transmissivities for accounting loss
- η_loss_lines::Union{Nothing, T, Vector{T}} array of beam delay lines transmissivities for accounting loss
- ϕ::Union{Nothing, T, Vector{T}} array of phases applied by the delay lines
"""
function build_loop(m::Int, η::Union{T, Vector{T}}, η_loss_bs::Union{Nothing, T, Vector{T}} = nothing, η_loss_lines::Union{Nothing, T, Vector{T}} = nothing, ϕ::Union{Nothing, T, Vector{T}} = nothing) where {T<:Real}
for param in [η, η_loss_bs, η_loss_lines, ϕ]
if param != nothing
if length(param) != m-1
if length(param) == 1
@warn "only a single $(Symbol(param)) given - acts as a single beam splitter and not an array of beam splitters with a similar transmissivity"
else
if param in [ϕ, η_loss_lines]
if length(param) != m
error("invalid $(Symbol(param)), needs to be of size $(m)")
end
else
error("invalid $(Symbol(param)), needs to be of size $(m-1)")
end
end
end
end
end
function add_line!(mode, lossy)
if ϕ == nothing
if lossy && η_loss_lines != nothing
interf = LossyLine(η_loss_lines[mode])
else
interf = RandomPhaseShifter(0.)
end
else
if η_loss_lines == nothing
interf = RandomPhaseShifter(ϕ[mode])
else
interf = LossyLineWithRandomPhase(η_loss_lines[mode], ϕ[mode])
end
target_modes_in = ModeList([mode], circuit.m_real)
target_modes_out = target_modes_in
if lossy
add_element_lossy!(circuit, interf, target_modes_in, target_modes_out)
else
add_element!(circuit, interf, target_modes_in, target_modes_out)
end
end
end
if η_loss_bs != nothing || η_loss_lines != nothing
lossy = true
circuit = LossyCircuit(m)
for mode in 1:m-1
add_line!(mode, lossy)
if η_loss_bs != nothing
interf = LossyBeamSplitter(η[mode], η_loss_bs[mode])
else
interf = LossyBeamSplitter(η[mode], 1.)
end
target_modes_in = ModeList([mode, mode+1], circuit.m_real)
target_modes_out = target_modes_in
add_element_lossy!(circuit, interf, target_modes_in, target_modes_out)
end
add_line!(m, lossy) # last pass has no interaction with a BS
return circuit
else
lossy = false
circuit = LosslessCircuit(m)
for mode in 1:m-1
add_line!(mode, lossy)
interf = BeamSplitter(η[mode])#LossyBeamSplitter(η[mode], η_loss[mode])
#target_modes_in = ModeList([mode, mode+1], circuit.m_real)
#target_modes_out = ModeList([mode, mode+1], circuit.m_real)
target_modes_in = ModeList([mode, mode+1], m)
target_modes_out = target_modes_in
add_element!(circuit, interf, target_modes_in, target_modes_out)
end
add_line!(m, lossy)
return circuit
end
end
function build_loop(params::LoopSamplingParameters)
@unpack n, m, input_type, i, η, η_loss_bs, η_loss_lines, d, ϕ, p_dark, p_no_count = params
build_loop(m, η, η_loss_bs, η_loss_lines, ϕ)
end
"""
get_sample_loop(params::LoopSamplingParameters)
Obtains a sample for `LoopSamplingParameters` by reconstructing the circuit each time (as is needed for adding a random phase).
"""
function get_sample_loop(params::LoopSamplingParameters)
@unpack n, m, input_type, i, η, η_loss_bs, η_loss_lines, d, ϕ, p_dark, p_no_count = params
circuit = LossyLoop(m, η, η_loss_bs, η_loss_lines, ϕ).circuit
o = RealisticDetectorsFockSample(p_dark, p_no_count)
ev = Event(i,o, circuit)
BosonSampling.sample!(ev)
ev.output_measurement.s
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 9389 | loss_amplitude_to_transmission_amplitude(loss::Real) = sqrt(1-loss^2)
"""
virtual_interferometer_uniform_loss(real_interf::Matrix, η)
virtual_interferometer_uniform_loss(real_interf::Interferometer, η)
Simulates a simple, uniformly lossy interferometer: take a 2m*2m interferometer and introduce beam splitters in front with transmittance `η`. Returns the corresponding virtual interferometer.
"""
function virtual_interferometer_uniform_loss(U::Matrix, η)
# define a (2m) * (2m) virtual interferometer V
# connect the remaining branches of the input beam splitters
# (of course this could be made different but this is the simplest,
# no thought case)
m = size(U,1)
V = Matrix{eltype(U)}(I, (2m, 2m))
# loss beam splitters
for i in 1:m
V *= beam_splitter_modes(in_up = i, in_down = i+m, out_up = i, out_down = m+i, transmission_amplitude = η, n = 2m)
end
U_large = Matrix{eltype(U)}(I, (2m, 2m))
U_large[1:m,1:m] = U
V *= U_large
#V = V[1:2m, 1:2m] # disregard virtual mode to connect second input branch of beam splitters
############ this must clearly not be good for unitarity
#V = copy(transpose(V))
V
end
function virtual_interferometer_uniform_loss(real_interf::Interferometer, η)
virtual_interferometer_uniform_loss(real_interf.U)
end
"""
virtual_interferometer_general_loss(V::Matrix, W::Matrix, η::Vector{Real})
Generic lossy interferometer composed of two unitary matrices `V`, `W` forming the
physical matrix `U` = `V*W`. In between `V` and `W` are sandwiched a diagonal array
of beam splitters, with transmissivity `η` (`m`-dimensional vector corresponding
to the transmissivity of each layer) : `U_total` = `V*Diag(η)*W`. This generates `U_total`.
"""
function virtual_interferometer_general_loss(V::Matrix, W::Matrix, η::Vector{Real})
# see GeneralLossInterferometer for info
m = size(V,1)
U_virtual = Matrix{eltype(V)}(I, (2m, 2m))
U_virtual[1:m, 1:m] = W
# loss beam splitters
for i in 1:m
U_virtual *= beam_splitter_modes(in_up = i, in_down = i+m, out_up = i, out_down = m+i, transmission_amplitude = η[i], n = 2m)
end
U_virtual *= V
U_virtual = copy(transpose(U_virtual))
U_virtual
end
"""
to_lossy(s::Subset)
Transforms a subset of size `m` into a `2m` one with the last `m` modes being a empty (the environment modes).
"""
function to_lossy(s::Subset)
subset = s.subset
new_subset = zeros(Int, 2*length(subset))
new_subset[1:length(subset)] .= subset
Subset(new_subset)
end
"""
to_lossy(part::Partition)
Transforms a partition of size `m` into a `2m` one with the last `m` modes being a subset (the environment modes).
"""
function to_lossy(part::Partition)
n = part.subsets[1].n
m = part.subsets[1].m
environment = Subset(last_modes(m,2m))
new_subsets = Vector{Subset}()
for subset in part.subsets
push!(new_subsets, to_lossy(subset))
end
push!(new_subsets, environment)
Partition(new_subsets)
end
"""
to_lossy(mo::ModeOccupation)
Transforms a `ModeOccupation` into the same with extra padding to account for the environment modes.
"""
function to_lossy(mo::ModeOccupation)
cat(mo,zeros(mo))
end
function to_lossy(state::Vector{Int})
vcat(state,zeros(eltype(state), length(state)))
end
function to_lossy(interf::Interferometer)
if (LossParameters(typeof(interf)) == IsLossy())
error("$interf is already a LossyInterferometer")
else
U = interf.U
m = interf.m
U_lossy = Matrix{eltype(U)}(I, 2 .* size(U))
U_lossy[1:m,1:m] = U
return UserDefinedLossyInterferometer(U_lossy)
end
end
function to_lossy(i::Input{T}) where {T<:InputType}
Input{T}(to_lossy(i.r), i.n, 2*i.m, i.G, i.distinguishability_param)
end
function to_lossy!(o::OutputMeasurementType)
# if StateMeasurement(typeof(output_measurement)) == FockStateMeasurement
if StateMeasurement(typeof(o)) == FockStateMeasurement()
o.s = to_lossy(o.s)
else
error("not implemented")
end
end
"""
lossy_target_modes(target_modes::Vector{Int})
Converts a vector of mode occupation into the same concatenated twice. This allows for modes occupied by circuit elements to have their loss mode attributed. For instance, a LossyLine targeting mode 1 with m=2 has
target_modes = [1,0]
lossy_target_modes(target_modes) = [1,0,1,0]
"""
function lossy_target_modes(target_modes::Vector{Int})::Vector{Int}
vcat(target_modes, target_modes)
end
function lossy_target_modes(target_modes::ModeList)
new_modes = vcat(target_modes.modes, target_modes.modes .+ target_modes.m)
ModeList(new_modes, 2*target_modes.m)
end
"""
isa_transmissitivity(η::Real)
isa_transmissitivity(η::Vector{Real})
Asserts that η is a valid transmissivity.
"""
function isa_transmissitivity(η::Real)
(0<= η && η <= 1)
end
isa_transmissitivity(η::Vector{Real}) = isa_transmissitivity.(η)
"""
UniformLossInterferometer <: LossyInterferometer
UniformLossInterferometer(η::Real, U_physical::Matrix)
UniformLossInterferometer(η::Real, U_physical::Interferometer)
UniformLossInterferometer(m::Int, η::Real)
Simulates a simple, uniformly lossy interferometer: take a 2m*2m interferometer and introduce beam splitters in front with transmittance `η`. Returns the corresponding interferometer as a separate type.
The last form, `UniformLossInterferometer(m::Int, η::Real)` samples from a Haar random unitary.
"""
struct UniformLossInterferometer <: Interferometer
m_real::Int
m::Int
η::Real #transmissivity of the upfront beamsplitters
U_physical::Union{Matrix{Complex},Nothing} # physical matrix
U::Union{Matrix{Complex},Nothing} # virtual 2m*2m interferometer
function UniformLossInterferometer(η::Real, U_physical::Matrix)
if isa_transmissitivity(η)
U_virtual = virtual_interferometer_uniform_loss(U_physical, η)
new(size(U_physical,1), size(U_virtual,1), η, U_physical, U_virtual)
else
error("incorrect η")
end
end
UniformLossInterferometer(η::Real, U_physical::Interferometer) = UniformLossInterferometer(η, U_physical.U)
UniformLossInterferometer(η::Real, m::Int) = UniformLossInterferometer(η, RandHaar(m))
end
"""
GeneralLossInterferometer <: LossyInterferometer
Generic lossy interferometer composed of two unitary matrices `V`, `W` forming the
physical matrix `U` = `V*W`. In between `V` and `W` are sandwiched a diagonal array
of beam splitters, with transmissivity `η` (`m`-dimensional vector corresponding
to the transmissivity of each layer) : `U_total` = `V*Diag(η)*W`
"""
struct GeneralLossInterferometer <: Interferometer
m_real::Int
m::Int
η::Vector{Real} #transmissivity of the upfront beamsplitters
U_physical::Union{Matrix{Complex},Nothing} # physical matrix
V::Union{Matrix{Complex},Nothing}
W::Union{Matrix{Complex},Nothing}
U::Union{Matrix{Complex},Nothing} # virtual 2m*2m interferometer
function GeneralLossInterferometer(η::Vector{Real}, V::Matrix, W::Matrix)
if isa_transmissitivity(η)
U_virtual = virtual_interferometer_general_loss(V,W, η)
new(size(U_physical,1), size(U_virtual,1), η, U_physical,V,W, U_virtual)
else
error("invalid η")
end
end
end
struct UserDefinedLossyInterferometer <: Interferometer
m_real::Int
m::Int
η::Union{Real, Vector{Real}, Nothing} #transmissivity of the upfront beamsplitters
U_physical::Union{Matrix{Complex},Nothing} # physical matrix
U::Union{Matrix{Complex},Nothing} # virtual 2m*2m interferometer
function UserDefinedLossyInterferometer(U::Matrix)
m_real = Int(size(U,1)/2)
new(m_real, 2*m_real, nothing, U[1:m_real,1:m_real], U)
end
end
"""
sort_by_lost_photons(pb::MultipleCounts)
Outputs a (n+1) sized array of MultipleCounts containing 0,...,n lost photons.
"""
function sort_by_lost_photons(pb::MultipleCounts)
# number of lost photons is the number of photons in the last subset
n = pb.counts[1].n
sorted_array = [MultipleCounts() for i in 0:n]
initialise_to_empty_vectors!.(sorted_array, Real, PartitionOccupancy)
for (p, count) in zip(pb.proba, pb.counts)
n_lost = count.counts.state[end]
push!(sorted_array[n_lost+1].proba, p)
push!(sorted_array[n_lost+1].counts, count)
end
sorted_array
end
"""
tvd_k_lost_photons(k, pb_sorted, pd_sorted)
Gives the tvd between `pb_sorted` and `pd_sorted`, which should be an output of `sort_by_lost_photons(pb::MultipleCounts)`, for exactly `k` lost photons. Note that if you want the info regarding data considering up to `k` lost photons, you need to use [`tvd_less_than_k_lost_photons(k, pb_sorted, pd_sorted)`](@ref).
"""
function tvd_k_lost_photons(k, pb_sorted, pd_sorted)
tvd(pb_sorted[k].proba,pd_sorted[k].proba)
end
"""
tvd_less_than_k_lost_photons(k, pb_sorted, pd_sorted)
Gives the TVD obtained by considering 0,...,k photons lost. The tvd for each number of photons lost is summed using [`tvd_k_lost_photons(k, pb_sorted, pd_sorted)`](@ref).
"""
function tvd_less_than_k_lost_photons(k, pb_sorted, pd_sorted)
sum(tvd(pb_sorted[j].proba,pd_sorted[j].proba) for j in 1:k)
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 5211 | ### measurements ###
abstract type OutputMeasurementType end
"""
StateMeasurement
Type trait to know which kind of state the detectors will measure, such as Fock or Gaussian.
"""
abstract type StateMeasurement end
struct FockStateMeasurement <: StateMeasurement end
struct PartitionMeasurement <: StateMeasurement end
struct GaussianStateMeasurement <: StateMeasurement end
"""
FockDetection(s::ModeOccupation)
Measuring the probability of getting the [`ModeOccupation`](@ref) `s` at the output.
Fields:
- s::ModeOccupation
"""
mutable struct FockDetection <: OutputMeasurementType
s::ModeOccupation
FockDetection(s::ModeOccupation) = new(s) #at_most_one_photon_per_bin(s) ? new(s) : error("more than one detector per more")
end
StateMeasurement(::Type{FockDetection}) = FockStateMeasurement()
"""
PartitionCount(part_occupancy::PartitionOccupancy)
Measuring the probability of getting a specific count for a given partition `part_occupancy`.
Fields:
- part_occupancy::PartitionOccupancy
"""
struct PartitionCount <: OutputMeasurementType
part_occupancy::PartitionOccupancy
PartitionCount(part_occupancy::PartitionOccupancy) = new(part_occupancy)
end
StateMeasurement(::Type{PartitionCount}) = PartitionMeasurement()
"""
PartitionCountsAll(part::Partition)
Measuring all possible counts probabilities in the partition `part`.
Fields:
- part::Partition
"""
struct PartitionCountsAll <: OutputMeasurementType
part::Partition
PartitionCountsAll(part::Partition) = new(part)
end
StateMeasurement(::Type{PartitionCountsAll}) = PartitionMeasurement()
struct OutputMeasurement{T<:OutputMeasurementType}
# as in most of boson sampling literature, this is for detectors blind to
# internal degrees of freedom
s::Union{ModeOccupation,Nothing} # position of the detectors for Fock measurement
# function OutputMeasurement{T}(s::ModeOccupation) where {T<:OutputMeasurementType}
# if T == FockDetection
# return at_most_one_photon_per_bin(s) ? new(s, nothing) : error("more than one detector per more")
# else
# return error(T, " not implemented")
# end
# end
function OutputMeasurement{FockDetection}(s::ModeOccupation)
@warn "OutputMeasurement{FockDetection} obsolete, replace with FockDetection"
at_most_one_photon_per_bin(s) ? new(s) : error("more than one detector per more")
end
OutputMeasurement(s::ModeOccupation) = OutputMeasurement{FockDetection}(s::ModeOccupation)
end
"""
FockSample <: OutputMeasurementType
Container holding a sample from typical boson sampler.
"""
mutable struct FockSample <: OutputMeasurementType
s::Union{ModeOccupation, Nothing}
FockSample() = new(nothing)
FockSample(s::Vector) = FockSample(ModeOccupation(s))
FockSample(s::ModeOccupation) = new(s)
end
StateMeasurement(::Type{FockSample}) = FockStateMeasurement()
Base.convert(::Type{FockDetection}, fs::FockSample) = FockDetection(fs.s)
"""
DarkCountFockSample(p)
Same as [`FockSample`](@ref) but each output mode has an extra probability `p` of giving a positive reading no matter if there is genuinely a photon.
"""
mutable struct DarkCountFockSample <: OutputMeasurementType
s::Union{ModeOccupation, Nothing} # observed output, possibly undefined
p::Real # probability of a dark count in each mode
DarkCountFockSample(p::Real) = isa_probability(p) ? new(nothing, p) : error("invalid probability")
# instantiate if no known output
end
StateMeasurement(::Type{DarkCountFockSample}) = FockStateMeasurement()
"""
RealisticDetectorsFockSample(p_dark::Real, p_no_count::Real)
Same as [`DarkCountFockSample`](@ref) with the added possibility that no reading is observed although there is a photon. This same probability also removes dark counts (first a dark count sample is generated then readings are discarded with probability `p_no_count`).
"""
mutable struct RealisticDetectorsFockSample <: OutputMeasurementType
s::Union{ModeOccupation, Nothing} # observed output, possibly undefined
p_dark::Real # probability of a dark count in each mode
p_no_count::Real # probability that there is a photon but it is not seen
# instantiate if no known output
RealisticDetectorsFockSample(p_dark::Real, p_no_count::Real) = begin
if isa_probability(p_dark) && isa_probability(p_no_count)
new(nothing, p_dark, p_no_count)
else
error("invalid probability")
end
end
end
StateMeasurement(::Type{RealisticDetectorsFockSample}) = FockStateMeasurement()
"""
PartitionSample <: OutputMeasurementType
Container holding a sample from `Partition` photon count.
"""
mutable struct PartitionSample <: OutputMeasurementType
part_occ::Union{PartitionOccupancy, Nothing}
PartitionSample() = new(nothing)
PartitionSample(p::PartitionOccupancy) = new(p)
end
StateMeasurement(::Type{PartitionSample}) = PartitionMeasurement()
mutable struct TresholdDetection <: OutputMeasurementType
s::Union{Vector{Int64}, Nothing}
TresholdDetection() = new(nothing)
TresholdDetection(s::Vector{Int64}) = new(s)
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 12480 | """
ModeOccupation(state)
A list of the size of the number of modes `m`, with entry `j` of `state` being the number of photons in mode `j`. See also [`ModeList`](@ref).
fields:
- n::Int
- m::Int
- state::Vector{Int}
"""
@auto_hash_equals struct ModeOccupation
n::Int
m::Int
state::Vector{Int}
ModeOccupation(state) = all(state[:] .>= 0) ? new(sum(state), length(state), state) : error("negative photon counts")
end
Base.show(io::IO, i::ModeOccupation) = print(io, "state = ", i.state)
"""
number_modes_occupied(mo::ModeOccupation)
Number of modes having at least a photon.
"""
number_modes_occupied(mo::ModeOccupation) = sum(to_threshold(mo).state)
"""
:+(s1::ModeOccupation, s2::ModeOccupation)
:+(s1::ModeOccupation, s2::Vector{Int})
:+(s2::Vector{Int}, s1::ModeOccupation)
Adds two mode occupations, for instance
s1 = ModeOccupation([0,1])
s2 = ModeOccupation([1,0])
(s1+s2).state == [1,1]
Also works with just a vector and a mode occupation.
"""
Base.:+(s1::ModeOccupation, s2::ModeOccupation) = begin
return ModeOccupation(s1.state + s2.state)
end
Base.:+(s1::ModeOccupation, s2::Vector{Int}) = begin
@argcheck size(s1.state) == size(s2) "incompatible sizes"
return ModeOccupation(s1.state + s2)
end
Base.:+(s2::Vector{Int}, s1::ModeOccupation) = begin
return s1 + s2
end
"""
Base.zeros(mo::ModeOccupation)
Returns a `ModeOccupation` similar to the input but with a state made of zeros.
"""
function Base.zeros(mo::ModeOccupation)
physical_state = mo.state
state = zeros(eltype(physical_state), size(physical_state))
ModeOccupation(state)
end
"""
to_threshold(mo::ModeOccupation)
Converts a `ModeOccupation` into threshold detection.
"""
function to_threshold(mo::ModeOccupation)
ModeOccupation([(mode >= 1 ? 1 : 0) for mode in mo.state])
end
"""
Base.cat(s1::ModeOccupation, s2::ModeOccupation)
Concatenates two `ModeOccupation`.
"""
function Base.cat(s1::ModeOccupation, s2::ModeOccupation)
ModeOccupation(vcat(s1.state, s2.state))
end
"""
ModeList(state)
ModeList(state,m)
Contrasting to [`ModeOccupation`](@ref) this list is of size `n`, the number of photons. Entry `j` is the index of the mode occupied by photon `j`.
This can also be used just to select modes for instance.
See also [`ModeOccupation`](@ref).
fields:
- n::Int
- m::Union{Int, Nothing}
- modes::Vector{Int}
"""
@auto_hash_equals struct ModeList
n::Int
m::Union{Int, Nothing}
modes::Vector{Int}
ModeList(modes::Vector{Int}) = ModeList(modes, nothing)
# all(modes[:] .>= 1) ? new(length(modes), nothing, modes) : error("modes start at one")
function ModeList(modes::Vector{Int}, m)
if all(modes[:] .>= 1) && (m == nothing ? true : all(modes[:] .<= m))
new(length(modes), m, modes)
else
error("incoherent or invalid mode inputs")
end
end
ModeList(mode::Int, m = nothing) = ModeList([mode],m)
end
"""
is_compatible(target_modes_in::ModeList, target_modes_out::ModeList)
Checks compatibility of `ModeList`s.
"""
function is_compatible(target_modes_in::ModeList, target_modes_out::ModeList)
if target_modes_in == target_modes_out
return true
else
@argcheck target_modes_in.n == target_modes_out.n
@argcheck target_modes_in.m == target_modes_out.m
true
end
end
function Base.convert(::Type{ModeOccupation}, ml::ModeList)
if ml.m == nothing
error("need to give m")
else
state = zeros(Int, ml.m)
for mode in ml.modes
state[mode] += 1
end
return ModeOccupation(state)
end
end
function Base.convert(::Type{ModeList}, mo::ModeOccupation)
mode_list = Vector{Int}()
for (mode, n_in) in enumerate(mo.state)
if n_in > 0
for photon in 1:n_in
push!(mode_list, mode)
end
end
end
ModeList(mode_list, mo.m)
end
at_most_one_photon_per_bin(state) = all(state[:] .<= 1)
at_most_one_photon_per_bin(r::ModeOccupation) = at_most_one_photon_per_bin(r.state)
isa_subset(subset_modes::Vector{Int}) = (at_most_one_photon_per_bin(subset_modes) && sum(subset_modes) != 0)
isa_subset(subset_modes::ModeOccupation) = isa_subset(subset_modes.state)
"""
first_modes(n::Int, m::Int)
Create a [`ModeOccupation`](@ref) with `n` photons in the first sites of `m` modes.
"""
first_modes(n::Int,m::Int) = n<=m ? ModeOccupation([i <= n ? 1 : 0 for i in 1:m]) : error("n>m")
first_modes_array(n::Int,m::Int) = first_modes(n,m).state
"""
last_modes(n::Int, m::Int)
Create a [`ModeOccupation`](@ref) with `n` photons in the last sites of `m` modes.
"""
last_modes(n::Int,m::Int) = n<=m ? ModeOccupation([i > m-n ? 1 : 0 for i in 1:m]) : error("n>m")
last_modes_array(n::Int,m::Int) = last_modes(n,m).state
equilibrated_input(sparsity, m) = ModeOccupation([((i-1) % sparsity) == 0 ? 1 : 0 for i in 1:m])
"""
mutable struct ThresholdModeOccupation
Holds threshold detector clicks. Example
ThresholdModeOccupation(ModeList([1,2,4], 4))
"""
@auto_hash_equals mutable struct ThresholdModeOccupation
m::Int
clicks::Vector{Int}
function ThresholdModeOccupation(ml::ModeList)
clicks = convert(ModeOccupation, ml).state
if !all(clicks[:] .>= 0)
error("negative mode clicks")
elseif !all(clicks[:] .<= 1)
error("clicks can be at most one")
else
new(ml.m, clicks)
end
end
function ThresholdModeOccupation(mo::ModeOccupation)
clicks = mo.state
if !all(clicks[:] .>= 0)
error("negative mode clicks")
elseif !all(clicks[:] .<= 1)
error("clicks can be at most one")
else
new(mo.m, clicks)
end
end
end
# example ThresholdModeOccupation(ModeList([1,2,4], 4))
"""
Subset(state::Vector{Int})
Create a mode occupation list with at most one count per mode.
Fields:
- n::Int
- m::Int
- subset::Vector{Int}
"""
@auto_hash_equals struct Subset
# basically a mode occupation list with at most one count per mode
n::Int
m::Int
subset::Vector{Int}
function Subset(state)
isa_subset(state) ? new(sum(state), length(state), state) : error("invalid subset")
end
function Subset(modeocc::ModeOccupation)
state = modeocc.state
isa_subset(state) ? new(sum(state), length(state), state) : error("invalid subset")
end
function Subset(ml::ModeList)
Subset(convert(ModeOccupation, ml))
end
end
Base.show(io::IO, s::Subset) = print(io, "subset = ", convert(Vector{Int},occupancy_vector_to_partition(s.subset)))
Base.length(subset::Subset) = sum(subset.subset)
function check_disjoint_subsets(s1::Subset, s2::Subset)
@argcheck s1.m == s2.m "subsets do not have the same dimension"
@argcheck all(s1.subset .* s2.subset .== 0) "subsets overlap"
end
function check_subset_overlap(subsets::Vector{Subset})
if length(subsets) == 1
return false
end
for (i,subset_1) in enumerate(subsets)
for (j,subset_2) in enumerate(subsets)
if i>j
check_disjoint_subsets(subset_1, subset_2)
end
end
end
end
function check_subset_overlap(subset::Subset)
nothing
end
"""
Partition(subsets::Vector{Subset})
Create a partition from multiple [`Subset`](@ref).
"""
@auto_hash_equals struct Partition
subsets::Vector{Subset}
n_subset::Int
m::Int
function Partition(subsets)
check_subset_overlap(subsets)
new(subsets, length(subsets), subsets[1].m)
end
function Partition(subset::Subset)
Partition([subset])
end
end
Base.show(io::IO, part::Partition) = begin
println(io, "partition =")
for s in part.subsets
println(io, s)
end
end
"""
partition_from_subset_lengths(subset_lengths)
Return a partition from a vector of subset lengths.
"""
function partition_from_subset_lengths(subset_lengths)
"""returns a partition from a vector of subset lengths, such as [2,1] gives Partition([[1,1],[1]])"""
m = sum(subset_lengths)
subsets = []
mode = 1
for subset_length in subset_lengths
subset_vector = zeros(Int, m)
for j in 0:subset_length-1
if mode+j <= m
subset_vector[mode+j ] = 1
end
end
Subset(subset_vector)
mode += subset_length
push!(subsets, Subset(subset_vector))
end
Partition(convert(Vector{Subset}, subsets))
end
"""
equilibrated_partition_vector(m,n_subsets)
Returns a (most) equilibrated partition possible by euclidian division.
(a problem is that euclidian distribution may give n_subsets or n_subsets+1 if not done like below - here it is the most obvious thing I could think of to get a somewhat equilibrated partition with a constant number of subsets)
"""
function equilibrated_partition_vector(m,n_subsets)
q = div(m,n_subsets)
y = n_subsets
r = rem(m,n_subsets)
first_part = [q for i in 1:y]
first_part[1] += r
first_part
end
equilibrated_mode_occupation(m,n_subsets) = ModeOccupation(equilibrated_partition_vector(m,n_subsets))
"""
equilibrated_partition(m,n_subsets)
Returns a most equilibrated_partition according to the principles of [`equilibrated_partition_vector`](@ref).
"""
function equilibrated_partition(m,n_subsets)
partition_from_subset_lengths(equilibrated_partition_vector(m,n_subsets))
end
"""
occupies_all_modes(part::Partition)
Check wether a partition occupies all modes or not.
"""
function occupies_all_modes(part::Partition)
"""checks if a partition occupies all m modes"""
occupied_modes = zeros(Int, part.m)
for s in part.subsets
occupied_modes .+= s.subset
end
all(occupied_modes .== 1)
end
"""
PartitionOccupancy(counts::ModeOccupation, n::Int, partition::Partition)
Fields:
- counts::ModeOccupation
- partition::Partition
- n::Int
- m::Int
"""
@auto_hash_equals struct PartitionOccupancy
counts::ModeOccupation
partition::Partition
n::Int
m::Int
function PartitionOccupancy(counts::ModeOccupation, n::Int, partition::Partition)
@argcheck counts.m == partition.n_subset "counts do not have as many modes as parition has subsets"
@argcheck sum(counts.state) <= n "more photons at the output than input"
if occupies_all_modes(partition)
@argcheck sum(counts.state) == n "photons lost in a partition that occupies all modes"
end
new(counts, partition, n, partition.subsets[1].m)
end
end
Base.show(io::IO, part_occ::PartitionOccupancy) = begin
for (i, count) in enumerate(part_occ.counts.state)
println(io, count," in ", part_occ.partition.subsets[i])
end
end
function to_threshold(part_occ::PartitionOccupancy)
if [(length(subset)) for subset in part_occ.partition.subsets] == ones(length(part_occ.partition.subsets))
return PartitionOccupancy(to_threshold(part_occ.counts),part_occ.n, part_occ.partition)
else
error("subsets span multiple mode and threshold action is not clear")
end
end
function partition_occupancy_to_partition_size_vector_and_counts(part_occ::PartitionOccupancy)
part = part_occ.partition
@argcheck occupies_all_modes(part) "need to have a partition that occupies all modes if using Shsnovitch's formulas in partition_expectation_values.jl"
partition_size_vector = [part.subsets[i].n for i in 1:length(part.subsets)]
partition_counts = part_occ.counts.state
partition_size_vector, partition_counts
end
remove_last_subset(part::Partition) = Partition(part.subsets[1:end-1])
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 7928 |
# """
# PartitionSamplingParameters
#
# Type holding info on a numerical experiment simulating the probability distributions of photon counting in partitions, used for comparing two input types `T1`,`T2`.
#
# By default the interferometer is set at nothing - you need to use `set_interferometer!`.
#
# Fields:
# n::Int
# m::Int
# interf::Interferometer = RandHaar(m)
#
# T1::Type{T} where {T<:InputType} = Bosonic
# T2::Type{T} where {T<:InputType} = Distinguishable
# mode_occ_1::ModeOccupation = first_modes(n,m)
# mode_occ_2::ModeOccupation = first_modes(n,m)
#
# i1::Input = Input{T1}(mode_occ_1)
# i2::Input = Input{T1}(mode_occ_2)
#
# n_subsets::Int = 2
# part::Partition = equilibrated_partition(m, n_subsets)
#
# o::OutputMeasurementType = PartitionCountsAll(part)
# ev1::Event = Event(i1,o,interf)
# ev2::Event = Event(i2,o,interf)
# """
# @with_kw mutable struct PartitionSamplingParameters
#
# n::Int
# m::Int = n
# m_real::Int = m
# interf::Union{Interferometer, Nothing} = nothing
#
# # begin
# # if interf != nothing
# # @warn "use set_interferometer! to update the events as well as everything in a lossy form (if it is a lossy interferometer)"
# # end
# # end
#
# T1::Type{T} where {T<:InputType} = Bosonic
# T2::Type{T} where {T<:InputType} = Distinguishable
# mode_occ_1::ModeOccupation = first_modes(n,m)
# mode_occ_2::ModeOccupation = first_modes(n,m)
#
# i1::Input = Input{T1}(mode_occ_1)
# i2::Input = Input{T2}(mode_occ_2)
#
# n_subsets::Int = 2
#
# part::Partition = equilibrated_partition(m, n_subsets)
#
# o::OutputMeasurementType = PartitionCountsAll(part)
# ev1::Union{Event, Nothing} = nothing
# ev2::Union{Event, Nothing} = nothing
#
#
#
# end
@with_kw mutable struct SamplingParameters
n::Int
m::Int = n
m_real::Int = m
interf::Union{Interferometer, Nothing} = nothing
T::Type{T} where {T<:InputType} = Bosonic
mode_occ::ModeOccupation = first_modes(n,m)
x::Union{Nothing, Real} = nothing
i::Union{Input, Nothing} = nothing
o::Union{OutputMeasurementType, Nothing} = nothing
ev::Union{Event, Nothing} = nothing
end
@with_kw mutable struct PartitionSamplingParameters
n::Int
m::Int = n
m_real::Int = m
interf::Union{Interferometer, Nothing} = nothing
# begin
# if interf != nothing
# @warn "use set_interferometer! to update the events as well as everything in a lossy form (if it is a lossy interferometer)"
# end
# end
T::Type{T} where {T<:InputType} = Bosonic
mode_occ::ModeOccupation = first_modes(n,m)
x::Union{Nothing, Real} = nothing
i::Union{Input, Nothing} = nothing
# if T == OneParameterInterpolation
# i = Input{T1}(mode_occ,x)
# elseif T in [Bosonic, Distinguishable]
# i = Input{T1}(mode_occ)
# else
# error("type not implemented")
# end
n_subsets::Int = 2
part::Partition = equilibrated_partition(m, n_subsets)
o::OutputMeasurementType = PartitionCountsAll(part)
ev::Union{Event, Nothing} = nothing
end
"""
set_input!(params::PartitionSamplingParameters)
set_input!(params::SamplingParameters)
`PartitionSamplingParameters` is initially defined with a nothing input, this function acts as an outer constructor to make it compatible with peculiarities of the `@with_kw` used in the type definition.
"""
function set_input!(params::Union{PartitionSamplingParameters, SamplingParameters})
if params.T in [Bosonic, Distinguishable]
params.i = Input{params.T}(params.mode_occ)
elseif params.T == OneParameterInterpolation
params.i = Input{params.T}(params.mode_occ, params.x)
else
error("type not implemented")
end
end
"""
set_interferometer!(interf::Interferometer, params::PartitionSamplingParameters)
function set_interferometer!(params::PartitionSamplingParameters)
Updates the interferometer in a PartitionSamplingParameters, including the definition of the events and upgrade to lossy if needed of other quantities.
This function acts as an outer constructor to make it compatible with peculiarities of the `@with_kw` used in the type definition.
"""
function set_interferometer!(interf::Interferometer, params::Union{PartitionSamplingParameters, SamplingParameters})
if params.i == nothing
set_input!(params)
end
params.interf = interf
if LossParameters(typeof(interf)) == IsLossy()
@argcheck interf.m == 2*params.m
for field in [:mode_occ, :i,:part, :o]
getfield(params, field) = to_lossy(getfield(params, field))
end
params.n_subsets += 1
else
@argcheck interf.m == params.m
end
params.ev = Event(params.i,params.o,params.interf)
end
set_interferometer!(params::Union{PartitionSamplingParameters, SamplingParameters}) = set_interferometer!(params.interf, params)
function set_partition!(params::PartitionSamplingParameters)
params.o = PartitionCountsAll(params.part)
params.ev = Event(params.i,params.o,params.interf)
end
function set_measurement!(o::OutputMeasurementType, params::SamplingParameters)
if StateMeasurement(typeof(o)) == FockStateMeasurement()
params.o = o
params.ev = Event(params.i,params.o,params.interf)
else
error("invalid measurement")
end
end
set_measurement!(params::SamplingParameters) = set_measurement!(params.o, params)
function set_parameters!(params::Union{PartitionSamplingParameters, SamplingParameters})
set_input!(params)
if typeof(params) == PartitionSamplingParameters
set_partition!(params)
elseif typeof(params) == SamplingParameters
set_measurement!(params)
end
set_interferometer!(params)
end
"""
LoopSamplingParameters(...)
Container for sampling parameters with a LoopSampler. Parameters are set by default as defined, and you can change only the ones needed, for instance to sample `Distinguishable` particles instead, just do
LoopSamplingParameters(input_type = Distinguishable)
and to change the number of photons with it
LoopSamplingParameters(n = 10 ,input_type = Distinguishable)
To be used with [`get_sample_loop`](@ref).
By default it applies a random phase at each optical line.
"""
@with_kw mutable struct LoopSamplingParameters
n::Int = 4
m::Int = n
x::Union{Real, Nothing} = nothing
input_type::Type{T} where {T<:InputType} = Bosonic
i::Input = begin
if input_type in [Bosonic, Distinguishable]
i = Input{input_type}(first_modes(n,m))
elseif input_type == OneParameterInterpolation
if x == nothing
error("x not given")
else
i = Input{input_type}(first_modes(n,m), x)
end
end
end
η::Union{T, Vector{T}} where {T<:Real} = 1/sqrt(2) .* ones(m-1)
η_loss_bs::Union{Nothing, T, Vector{T}} where {T<:Real} = 1 .* ones(m-1)
η_loss_lines::Union{Nothing, T, Vector{T}} where {T<:Real} = 1 .* ones(m)
d::Union{Nothing, Real, Distribution} = Uniform(0, 2pi)
ϕ::Union{Nothing, T, Vector{T}} where {T<:Real} = rand(d, m)
p_dark::Real = 0.0
p_no_count::Real = 0.0
end
function Base.convert(::Type{PartitionSamplingParameters}, params::LoopSamplingParameters)
@unpack n, m, input_type, i, η, η_loss_bs, η_loss_lines, d, ϕ, p_dark, p_no_count = params
interf = build_loop(params)
ps = PartitionSamplingParameters(n=n, m=m, T= get_parametric_type(i)[1], interf = interf, mode_occ = i.r, x = i.distinguishability_param,i=i)
set_interferometer!(ps)
ps
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 403 | """
get_parametric_type(i)
Return the types `T1`,..,`Tn` of a parametric type `i::T{T1,..,Tn}`.
!!! note
- If the parametric type has only one parameter, use `get_parametric_type(i)[1]`.
- If no parametric type, returns an array containing the type itself.
"""
function get_parametric_type(i)
length(collect(typeof(i).parameters)) > 0 ? collect(typeof(i).parameters) : [typeof(i)]
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 310 | ### merging all types ###
include("interferometers.jl")
include("partitions.jl")
include("input.jl")
include("measurements.jl")
include("events.jl")
include("loss.jl")
include("certification.jl")
include("circuits.jl")
include("sampling_structures.jl")
include("loop.jl")
include("experiments_structures.jl")
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 5486 | @testset "circuits and loss" begin
### 2d HOM without loss but with ModeList example ###
n = 2
m = 2
i = Input{Bosonic}(first_modes(n,m))
o = FockDetection(ModeOccupation([1,1])) # detecting bunching, should be 0.5 in probability if there was no loss
transmission_amplitude_loss_array = 0:0.1:1
output_proba = []
circuit = LosslessCircuit(2)
interf = BeamSplitter(1/sqrt(2))
target_modes = ModeList([1,2], m)
add_element!(circuit, interf, target_modes)
ev = Event(i,o, circuit)
compute_probability!(ev)
@test isapprox(ev.proba_params.probability, 0., atol = eps())
### 1d with loss example ###
n = 1
m = 1
function lossy_line_example(η_loss)
circuit = LossyCircuit(1)
interf = LossyLine(η_loss)
target_modes = ModeList([1], m)
add_element_lossy!(circuit, interf, target_modes)
circuit
end
lossy_line_example(0.9)
transmission_amplitude_loss_array = 0:0.1:1
output_proba = []
i = Input{Bosonic}(to_lossy(first_modes(n,m)))
o = FockDetection(to_lossy(first_modes(n,m)))
for transmission in transmission_amplitude_loss_array
ev = Event(i,o, lossy_line_example(transmission))
compute_probability!(ev)
push!(output_proba, ev.proba_params.probability)
end
@test output_proba ≈ [0.0, 0.010000000000000002, 0.04000000000000001, 0.09, 0.16000000000000003, 0.25, 0.36, 0.48999999999999994, 0.6400000000000001, 0.81, 1.0]
### 2d HOM with loss example ###
n = 2
m = 2
i = Input{Bosonic}(first_modes(n,m))
o = FockDetection(ModeOccupation([2,0])) # detecting bunching, should be 0.5 in probability if there was no loss
transmission_amplitude_loss_array = 0:0.1:1
output_proba = []
function lossy_bs_example(η_loss)
circuit = LossyCircuit(2)
interf = LossyBeamSplitter(1/sqrt(2), η_loss)
target_modes = ModeList([1,2],m)
add_element_lossy!(circuit, interf, target_modes)
circuit
end
for transmission in transmission_amplitude_loss_array
ev = Event(i,o, lossy_bs_example(transmission))
compute_probability!(ev)
push!(output_proba, ev.proba_params.probability)
end
@test output_proba ≈ [0.0, 5.0000000000000016e-5, 0.0008000000000000003, 0.004049999999999998, 0.012800000000000004, 0.031249999999999993, 0.06479999999999997, 0.12004999999999996, 0.20480000000000007, 0.32805, 0.4999999999999999]
### building the loop ###
n = 3
m = n
η = 1/sqrt(2) .* ones(m-1)
# 1/sqrt(2) .* [1,0] #ones(m-1) # see selection of target_modes = [i, i+1] for m-1
# [1/sqrt(2), 1] #1/sqrt(2) .* ones(m-1) # see selection of target_modes = [i, i+1] for m-1
# η_loss = 1. .* ones(m-1)
circuit = LosslessCircuit(m) #LossyCircuit(m)
for mode in 1:m-1
interf = BeamSplitter(η[mode]) #LossyBeamSplitter(reflectivities[mode], η_loss[mode])
target_modes_in = ModeList([mode, mode+1], m)
target_modes_out = target_modes_in
add_element!(circuit, interf, target_modes_in, target_modes_out)
end
i = Input{Bosonic}(first_modes(n,m))
#outputs compatible with two photons top mode
o1 = FockDetection(ModeOccupation([2,1,0]))
o2 = FockDetection(ModeOccupation([2,0,1]))
o_array = [o1,o2]
p_two_photon_first_mode = 0
for o in o_array
ev = Event(i,o, circuit)
compute_probability!(ev)
p_two_photon_first_mode += ev.proba_params.probability
end
@test p_two_photon_first_mode ≈ 0.5
o3 = FockDetection(ModeOccupation([3,0,0]))
ev = Event(i,o3, circuit)
compute_probability!(ev)
@test ev.proba_params.probability ≈ 0.
### equivalence of lossy constructed circuit and nonlossy ###
begin
n = 3
m = n
i = Input{Bosonic}(first_modes(n,m))
η = 1/sqrt(2) .* ones(m-1)
# 1/sqrt(2) .* [1,0] #ones(m-1) # see selection of target_modes = [i, i+1] for m-1
# [1/sqrt(2), 1] #1/sqrt(2) .* ones(m-1) # see selection of target_modes = [i, i+1] for m-1
η_loss = 1 .* ones(m-1)
circuit = LossyCircuit(m)
for mode in 1:m-1
interf = LossyBeamSplitter(η[mode], η_loss[mode])
target_modes_in = ModeList([mode, mode+1], circuit.m_real)
target_modes_out = target_modes_in
add_element_lossy!(circuit, interf, target_modes_in, target_modes_out)
end
sub_circuit_lossy = circuit.U[1:3, 1:3]
circuit = LosslessCircuit(m)
for mode in 1:m-1
interf = BeamSplitter(η[mode])#LossyBeamSplitter(η[mode], η_loss[mode])
#target_modes_in = ModeList([mode, mode+1], circuit.m_real)
#target_modes_out = ModeList([mode, mode+1], circuit.m_real)
target_modes_in = ModeList([mode, mode+1], m)
target_modes_out = target_modes_in
add_element!(circuit, interf, target_modes_in, target_modes_out)
end
pretty_table(sub_circuit_lossy)
pretty_table(circuit.U)
@test sub_circuit_lossy ≈ circuit.U
end
### loop with loss and types ###
begin
n = 3
m = n
i = Input{Bosonic}(first_modes(n,m))
η = 1/sqrt(2) .* ones(m-1)
η_loss_bs = 0.9 .* ones(m-1)
η_loss_lines = 0.9 .* ones(m)
d = Uniform(0, 2pi)
ϕ = rand(d, m)
end
circuit = LossyLoop(m, η, η_loss_bs, η_loss_lines, ϕ).circuit
o1 = FockDetection(ModeOccupation([2,1,0]))
o2 = FockDetection(ModeOccupation([2,0,1]))
o_array = [o1,o2]
p_two_photon_first_mode = 0
for o in o_array
ev = Event(i,o, circuit)
@show compute_probability!(ev)
p_two_photon_first_mode += ev.proba_params.probability
end
@test p_two_photon_first_mode ≈ 0.09324549025354557
o3 = FockDetection(ModeOccupation([3,0,0]))
ev = Event(i,o3, circuit)
compute_probability!(ev)
@test ev.proba_params.probability ≈ 0.
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 7455 | using BosonSampling
using Test
using JLD
# using Plots
### scattering ###
m = 5
n = 3
interf = RandHaar(m)
i = Input{RandomGramMatrix}(first_modes(n,m))
o = FockDetection(first_modes(n,m))
ev = Event(i,o,interf)
compute_probability!(ev)
ev.proba_params
### bunching ###
m = 5
n = 3
interf = RandHaar(m)
ib = Input{Bosonic}(first_modes(n,m))
ipd = Input{RandomGramMatrix}(first_modes(n,m))
subset_modes = first_modes(n,m)
typeof(subset_modes)
pb = full_bunching_probability(interf, ib, subset_modes)
ppd = full_bunching_probability(interf, ipd, subset_modes)
@test pb/ppd > 1. # this doesn't HAVE TO pass but will pass in nearly all
# cases
### Classical sampling ###
m = 16
n = 3
input = Input{Distinguishable}(first_modes(n,m))
interf = RandHaar(m)
out = classical_sampler(input=input, interf=interf)
## Cliffords sampler ###
m = 16
n = 3
input = Input{Bosonic}(first_modes(n,m))
interf = RandHaar(m)
out = cliffords_sampler(input=input, interf=interf)
## Noisy sampling ###
m = 16
n = 3
x = 0.8 # distinguishability
η = 0.8 # reflectivity
input = Input{OneParameterInterpolation}(first_modes(n,m), x)
interf = RandHaar(m)
out = noisy_sampler(input=input, loss=η, interf=interf)
### MIS sampling ###
n = 8
m = n^2
starting_state = zeros(Int, m)
input_state = first_modes_array(n,m)
U = copy(rand_haar(m))
# generate a collisionless state as a starting point
starting_state = iterate_until_collisionless(() -> random_occupancy(n,m))
known_pdf(state) = process_probability_distinguishable(U, input_state, state)
target_pdf(state) = process_probability(U, input_state, state)
known_sampler = () -> iterate_until_collisionless(() -> classical_sampler(U, m, n)) # gives a classical sampler
# samples = metropolis_sampler(;target_pdf = target_pdf, known_pdf = known_pdf, known_sampler = known_sampler, starting_state = starting_state, n_iter = 100)
### Noisy distribution ###
n = 3
m = 6
x = 0.8
η = 0.8
input = Input{OneParameterInterpolation}(first_modes(n,m), x)
interf = RandHaar(m)
output_statistics = noisy_distribution(input=input, loss=η, interf=interf)
p_exact = output_statistics[1]
p_approx = output_statistics[2]
p_sampled = output_statistics[3]
### Theoretical distribution ###
n = 3
m = 6
input = Input{Bosonic}(first_modes(n,m))
interf = RandHaar(m)
output_distribution = theoretical_distribution(input=input, interf=interf)
### Usage Interferometer ###
B = BeamSplitter(1/sqrt(2))
n = 2 # photon number
m = 2 # mode number
proba_bunching = Vector{Float64}(undef, 0)
x_ = Vector{Float64}(undef, 0)
# Compute the probability of coincidence measurement
for x = -1:0.01:1
local input = Input{OneParameterInterpolation}(first_modes(n,m), 1-x^2)
p_theo = theoretical_distribution(input=input, interf=B)
push!(x_, x)
push!(proba_bunching, p_theo[2] + p_theo[3]) # store the probabilty to observe one photon in each mode
end
# plot(x_, proba_bunching, xlabel="distinguishability parameter", ylabel="coincidence probability", label=nothing, dpi=300)
# savefig("docs/src/tutorial/proba_bunching.png")
### subsets ###
s1 = Subset([1,1,0,0,0])
s2 = Subset([0,0,1,1,0])
s3 = Subset([1,0,1,0,0])
"subsets are not allowed to overlap"
# check_subset_overlap([s1,s2,s3]) will fail
### HOM tests: one mode ###
input_state = Input{Bosonic}(first_modes(n,m))
set1 = [1,0]
physical_interferometer = Fourier(m)
part = Partition([Subset(set1)])
(physical_indexes, pdf) = compute_probabilities_partition(physical_interferometer, part, input_state)
### HOM tests: mode1, mode2 ###
n = 2
m = 2
input_state = Input{Bosonic}(first_modes(n,m))
set1 = [1,0]
set2 = [0,1]
physical_interferometer = Fourier(m)
part = Partition([Subset(set1), Subset(set2)])
(physical_indexes, pdf) = compute_probabilities_partition(physical_interferometer, part, input_state)
print_pdfs(physical_indexes, pdf,n; partition_spans_all_modes = true, physical_events_only = true)
# for a single count
part_occ = PartitionOccupancy(ModeOccupation([1,1]),2,part)
compute_probability_partition_occupancy(physical_interferometer, part_occ, input_state)
# the same using an event
PartitionCount(part_occ)
o = PartitionCount(part_occ)
ev = Event(input_state, o, physical_interferometer)
############ need to change the constructor of Event
get_parametric_type(input_state)
get_parametric_type(o)
length(collect(typeof(o).parameters))
typeof(o)
collect(typeof(input_state).parameters)
### multiset for a random interferometer ###
m = 4
n = 3
inp = Input{Bosonic}(first_modes(n,m))
set1 = zeros(Int,m)
set2 = zeros(Int,m)
set1[1:2] .= 1
set2[3:4] .= 1
physical_interferometer = RandHaar(m)
part = Partition([Subset(set1), Subset(set2)])
(physical_indexes, pdf) = compute_probabilities_partition(physical_interferometer, part, inp)
fourier_indexes = copy(physical_indexes)
print_pdfs(physical_indexes, pdf, n; physical_events_only = true, partition_spans_all_modes = true)
#print_pdfs(physical_indexes, probas_fourier, n)
### partitions, subsets ###
n = 2
m = 5
s1 = Subset([1,1,0,0,0])
s2 = Subset([0,0,1,1,0])
part = Partition([s1,s2])
part_occ = PartitionOccupancy(ModeOccupation([2,0]),n,part)
i = Input{Bosonic}(first_modes(n,m))
o = PartitionCount(part_occ)
interf = RandHaar(m)
ev = Event(i,o,interf)
compute_probability!(ev)
### multiple counts probabilities ###
m = 10
n = 3
set1 = zeros(Int,m)
set2 = zeros(Int,m)
set1[1:2] .= 1
set2[3:4] .= 1
interf = RandHaar(m)
part = Partition([Subset(set1), Subset(set2)])
i = Input{Bosonic}(first_modes(n,m))
o = PartitionCountsAll(part)
ev = Event(i,o,interf)
compute_probability!(ev)
### Circuit ###
n = 6
input = Input{Bosonic}(first_modes(n,n))
my_circuit = Circuit(input.m)
add_element!(circuit=my_circuit, interf=RandHaar(input.m), target_modes=input.r.state)
add_element!(circuit=my_circuit, interf=BeamSplitter(0.2), target_modes=[1,3])
add_element!(circuit=my_circuit, interf=Fourier(3), target_modes=[2,4,5])
is_unitary(my_circuit.U)
### partition tutorial ###
s1 = Subset([1,1,0,0,0])
s2 = Subset([0,0,1,1,0])
s3 = Subset([1,0,1,0,0])
#check_subset_overlap([s1,s2,s3]) # will fail
n = 2
m = 2
input_state = Input{Bosonic}(first_modes(n,m))
set1 = [1,0]
set2 = [0,1]
physical_interferometer = Fourier(m)
part = Partition([Subset(set1), Subset(set2)])
occupies_all_modes(part)
(physical_indexes, pdf) = compute_probabilities_partition(physical_interferometer, part, input_state)
print_pdfs(physical_indexes, pdf,n; partition_spans_all_modes = true, physical_events_only = true)
n = 2
m = 5
s1 = Subset([1,1,0,0,0])
s2 = Subset([0,0,1,1,0])
part = Partition([s1,s2])
part_occ = PartitionOccupancy(ModeOccupation([2,0]),n,part)
i = Input{Bosonic}(first_modes(n,m))
o = PartitionCount(part_occ)
interf = RandHaar(m)
ev = Event(i,o,interf)
compute_probability!(ev)
o = PartitionCountsAll(part)
ev = Event(i,o,interf)
compute_probability!(ev)
### Check Gaussian sampler ###
r = first_modes(4,4)
s = ones(r.m)
i = GaussianInput{SingleModeSqueezedVacuum}(r,s)
o = FockSample()
ev = GaussianEvent(i,o)
gaussian_sampler(ev,nsamples=120,burn_in=20,thinning_rate=10)
typeof(o)
ans = true
### dark counts ###
n = 10
m = 10
p_dark = 0.1
input_state = first_modes(n,m)
interf = RandHaar(m)
i = Input{Bosonic}(input_state)
o = DarkCountFockSample(p_dark)
ev = Event(i,o,interf)
sample!(ev)
### RealisticDetectorsFockSample ###
p_no_count = 0.1
o = RealisticDetectorsFockSample(p_dark, p_no_count)
ev = Event(i,o,interf)
sample!(ev)
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 282 | @testset "partition types" begin
n = 2
m = 2
@test is_compatible(ModeList([1,2], m),ModeList([1,2],m))
@test is_compatible(ModeList([1,2], m),ModeList([2,1],m))
@test_throws ArgumentError is_compatible(ModeList([1,2], m),ModeList([2,1])) == ArgumentError
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 5936 | using BosonSampling
using Plots
using ProgressMeter
using Distributions
using Random
using Test
using ArgCheck
using StatsBase
using ColorSchemes
using Interpolations
using Dierckx
using LinearAlgebra
using PrettyTables
using LaTeXStrings
using JLD
using AutoHashEquals
using LinearRegression
using DataStructures
include("tools.jl")
@testset "BosonSampling.jl" begin
@testset "scattering matrix" begin
input_state = [2,0]
output_state = [1,1]
U = matrix_test(2)
@test scattering_matrix(U, input_state, output_state) == [1 2; 1 2]
input_state = [1,1]
output_state = [0,2]
U = matrix_test(2)
@test scattering_matrix(U, input_state, output_state) == [2 2; 4 4]
end
@testset "process_probability: HOM" begin
@test process_probability(fourier_matrix(2), [1,1], [0,2]) ≈ 0.5 atol = 1e-8
@test process_probability(fourier_matrix(2), [1,1], [2,0]) ≈ 0.5 atol = 1e-8
@test process_probability(fourier_matrix(2), [1,1], [1,1]) ≈ 0. atol = 1e-8
S_bosonic = ones(2,2)
S_dist = [1 0; 0 1]
@test process_probability_partial(fourier_matrix(2), S_bosonic, [1,1], [0,2]) ≈ 0.5 atol = 1e-8
@test process_probability_partial(fourier_matrix(2), S_bosonic, [1,1], [2,0]) ≈ 0.5 atol = 1e-8
@test process_probability_partial(fourier_matrix(2), S_bosonic, [1,1], [1,1]) ≈ 0. atol = 1e-8
@test process_probability_partial(fourier_matrix(2), S_dist, [1,1], [0,2]) ≈ 0.25 atol = 1e-8
@test process_probability_partial(fourier_matrix(2), S_dist, [1,1], [2,0]) ≈ 0.25 atol = 1e-8
@test process_probability_partial(fourier_matrix(2), S_dist, [1,1], [1,1]) ≈ 0.5 atol = 1e-8
end
@testset "probability distribution of photons in modes : HOM" begin
U = fourier_matrix(2)
occupancy_vector = [1, 0]
@test proba_partition_bosonic(U = U, occupancy_vector = occupancy_vector) ≈ [0.5,0,0.5] atol = 1e-5
occupancy_vector = [0, 1]
@test proba_partition_bosonic(U = U, occupancy_vector = occupancy_vector) ≈ [0.5,0,0.5] atol = 1e-5
occupancy_vector = [1, 1]
@test proba_partition_bosonic(U = U, occupancy_vector = occupancy_vector) ≈ [0,0,1.] atol = 1e-5
end
@testset "probability distribution of distinguishable photons in modes : HOM" begin
U = fourier_matrix(2)
part = [1,0]
@test proba_partition_distinguishable(occupancy_vector = part, U = U) ≈ [0.25,0.5,0.25] atol = 1e-8
end
@testset "partial distinguishability partitions" begin
m = 10
n = 4
input_state = zeros(Int, m)
occupancy_vector = zeros(Int, m)
for i=1:n
input_state[i] = 1
end
occupancy_vector[1] = 1
U = fourier_matrix(10)
@test proba_partition_partial(U = U, S = ones(n, n), occupancy_vector = occupancy_vector, input_state = input_state) == proba_partition_bosonic(U = U, occupancy_vector = occupancy_vector, input_state = input_state)
@test proba_partition_partial(U = U, S = Matrix{Float64}(I, n, n), occupancy_vector = occupancy_vector, input_state = input_state) ≈ proba_partition_distinguishable(occupancy_vector = occupancy_vector, U = U, input_state = input_state)
end
@testset "theoretical_distribution" begin
n = 3
occupation = random_mode_occupation_collisionless(n,n)
i = Input{Bosonic}(occupation)
interf = Fourier(i.r.m)
p_theo = theoretical_distribution(input=i, interf=interf)
@test sum(p_theo) ≈ 1 atol=1e-9
@test all(p -> p>=0, p_theo)
output_events = output_mode_occupation(n,n)
for i = 1:length(output_events)
if check_suppression_law(output_events[i])
@test p_theo[i] ≈ 0 atol=1e-6
end
end
end
@testset "noisy statistics" begin
n = 3
occupation = random_mode_occupation_collisionless(n,n)
i = Input{OneParameterInterpolation}(occupation, 0.5)
interf = Fourier(i.r.m)
res = noisy_distribution(input=i, loss=0.5, interf=interf)
p_exact = res[1]
p_approx = res[2]
p_samp = res[3]
@test sum(p_exact) ≈ 1 atol=1e-9
@test sum(p_approx) ≈ 1 atol=1e-9
@test sum(p_samp) ≈ 1 atol=1e-9
@test all(p->p>=0, p_exact)
@test all(p->p>=0, p_approx)
@test all(p->p>=0, p_samp)
i = Input{Bosonic}(occupation)
res = noisy_distribution(input=i, loss=0.999, interf=interf, approx=false, samp=false)
p_exact = res[1]
output_events = output_mode_occupation(n,n)
for i = 1:length(output_events)
if check_suppression_law(output_events[i])
@test p_exact[i] ≈ 0 atol=1e-6
end
end
end
@testset "suppression law boson samplers" begin
n = 3
interf = Fourier(n)
input_clifford_sampler = Input{Bosonic}(first_modes(n,n))
input_noisy_sampler = Input{OneParameterInterpolation}(first_modes(n,n), 1.0)
out_clifford_sampler = cliffords_sampler(input=input_clifford_sampler, interf=interf)
out_noisy_sampler = noisy_sampler(input=input_noisy_sampler, loss=1.0, interf=interf)
@test !check_suppression_law(out_clifford_sampler)
@test !check_suppression_law(out_noisy_sampler)
end
@testset "check analytical counter example interferometer" begin
n = 7
r = 2
complete_interferometer = Matrix{ComplexF16}(I,n,n)
bottom_dft = Matrix{ComplexF64}(I,n,n)
bottom_dft[r+1:n, r+1:n] = fourier_matrix(n-r)
complete_interferometer *= bottom_dft
for top_mode in 1:r
complete_interferometer *= beam_splitter_modes(in_up=top_mode, in_down=top_mode+r, out_up=top_mode, out_down=top_mode+r,transmission_amplitude=sqrt(r/n), n=n)
end
complete_interferometer = transpose(complete_interferometer)
circuit = Circuit(n)
add_element!(circuit=circuit, interf=Fourier(n-r), target_modes=[i for i in r+1:n])
add_element!(circuit=circuit, interf=BeamSplitter(sqrt(r/n)), target_modes=[3,1])
add_element!(circuit=circuit, interf=BeamSplitter(sqrt(r/n)), target_modes=[4,2])
@test complete_interferometer == circuit.U
end
@testset "examples usage" begin
@test include("example_usage.jl")
end
include("circuits_and_loss.jl")
include("partitions.jl")
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 1754 | @testset "sampling" begin
@testset "loop" begin
### sampling ###
function loop_tests()
begin
n = 3
m = n
i = Input{Bosonic}(first_modes(n,m))
η = 1/sqrt(2) .* ones(m-1)
η_loss_bs = 0.9 .* ones(m-1)
η_loss_lines = 0.9 .* ones(m)
d = Uniform(0, 2pi)
ϕ = rand(d, m)
end
circuit = LossyLoop(m, η, η_loss_bs, η_loss_lines, ϕ).circuit
p_dark = 0.01
p_no_count = 0.1
o = FockSample()
ev = Event(i,o, circuit)
BosonSampling.sample!(ev)
o = DarkCountFockSample(p_dark)
ev = Event(i,o, circuit)
BosonSampling.sample!(ev)
o = RealisticDetectorsFockSample(p_dark, p_no_count)
ev = Event(i,o, circuit)
BosonSampling.sample!(ev)
###### sample with a new circuit each time ######
get_sample_loop(LoopSamplingParameters(n = 10 ,input_type = Distinguishable))
### method specialisation according to the type of lossy input ###
n = 6
m = n
get_sample_loop(LoopSamplingParameters(n=n, input_type = Distinguishable, η_loss_bs = nothing, η_loss_lines = 0.9 .* ones(m)))
get_sample_loop(LoopSamplingParameters(n=n, input_type = Distinguishable, η_loss_bs = 0.9 .* ones(m-1), η_loss_lines = nothing))
smpl = get_sample_loop(LoopSamplingParameters(n=n, input_type = Distinguishable, η_loss_bs = nothing, η_loss_lines = nothing))
@test length(smpl.state) == n
end
runs_without_errors(loop_tests)
end
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 236 | @testset "sampling structures" begin
params = PartitionSamplingParameters(n = 10, m = 10)
# set_input!(params)
set_interferometer!(build_loop(LoopSamplingParameters(m=10)), params)
compute_probability!(params)
end | BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | code | 161 | function runs_without_errors(f::Function)
@test begin
try
f()
true
catch
false
end
end
end
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | docs | 1433 | <img src="https://github.com/benoitseron/BosonSampling.jl/blob/main/docs/src/assets/logo-dark.png" alt="BosonSampling.jl" width="400">
[](https://benoitseron.github.io/BosonSampling.jl/stable)
[](https://benoitseron.github.io/BosonSampling.jl/dev)
#
This project implements standard and scattershot BosonSampling in Julia, including boson samplers and certification and optimization tools.
## Functionalities
A wide variety of tools are available:
* Boson-samplers, including partial distinguishability and loss
* Bunching tools and functions
* Various tools to validate experimental boson-samplers
* User-defined optical circuits built from optical elements
* Optimization functions over unitary matrices
* Photon counting tools for subsets and partitions of the output modes
* Tools to study permanent and generalized matrix function conjectures and counter-examples
## Installation
To install the package, launch a Julia REPL session and type
julia> using Pkg; Pkg.add("BosonSampling")
Alternatively type on the `]` key. Then enter
add BosonSampling
To use the package, write
using BosonSampling
in your file.
## Authors
This package is written by Benoit Seron and Antoine Restivo. The original research presented in the package is done in collaboration with Dr. Leonardo Novo, Prof. Nicolas Cerf.
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | docs | 741 | ```julia
using BosonSampling
using Plots
# Set experimental parameters
Δω = 1
# Set the model of partial distinguishability
T = OneParameterInterpolation
# Define the unbalanced beams-plitter
B = BeamSplitter(1/sqrt(2))
# Set each particle in a different mode
r_i = ModeOccupation([1,1])
# Define the output as detecting a coincidence
r_f = ModeOccupation([1,1])
o = FockDetection(r_f)
# Will store the events probability
events = []
for Δt in -4:0.01:4
# distinguishability
dist = exp(-(Δω * Δt)^2)
i = Input{T}(r_i,dist)
# Create the event
ev = Event(i,o,B)
# Compute its probability to occur
compute_probability!(ev)
# Store the event and its probability
push!(events, ev)
end
plot!(P_coinc)
``` | BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | docs | 1600 | ```julia
# define a new measurement type of a simple dark counting detector
mutable struct DarkCountFockSample <: OutputMeasurementType
s::Union{ModeOccupation, Nothing} # observed output, possibly undefined
p::Real # probability of a dark count in each mode
DarkCountFockSample(p::Real) = isa_probability(p) ? new(nothing, p) : error("invalid probability")
# instantiate if no known output
end
# works DarkCountFockSample(0.01)
# fails DarkCountFockSample(-1)
# define the sampling algorithm:
# the function sample! is modified to take into account
# the new measurement type
# this allows to keep the same syntax and in fact reuse
# any function that would have previously used sample!
# at no cost
function BosonSampling.sample!(ev::Event{TIn, TOut}) where {TIn<:InputType, TOut <: DarkCountFockSample}
# sample without dark counts
ev_no_dark = Event(ev.input_state, FockSample(), ev.interferometer)
sample!(ev_no_dark)
sample_no_dark = ev_no_dark.output_measurement.s
# now, apply the dark counts to "perfect" samples
observe_dark_count(p) = Int(do_with_probability(p)) # 1 with probability p, 0 with probability 1-p
dark_counts = [observe_dark_count(ev.output_measurement.p) for i in 1: ev.input_state.m]
ev.output_measurement.s = sample_no_dark + dark_counts
end
### example ###
# experiment parameters
n = 10
m = 10
p_dark = 0.1
input_state = first_modes(n,m)
interf = RandHaar(m)
i = Input{Bosonic}(input_state)
o = DarkCountFockSample(p_dark)
ev = Event(i,o,interf)
sample!(ev)
# output:
# state = [3, 1, 0, 3, 0, 1, 2, 0, 0, 0]
``` | BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | docs | 360 | ```julia
# Define the input with one particle in
# each input mode
r_i = ModeOccupation([1,1])
i = Input{Bosonic}(r_i)
# Define the unbalanced interferometer
B = BeamSplitter(1/sqrt(2))
# Define the output
r_f = ModeOccupation([1,1])
o = FockDetection(r_f)
# Create the event
ev = Event(i,o,B)
# Compute its probability to occur
compute_probability!(ev)
``` | BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | docs | 347 | ```julia
# Set the model of partial distinguishability
T = OneParameterInterpolation
# Define an input of 3 photons placed
# among 5 modes
x = 0.74
i = Input{T}(first_modes(3,5), x)
# Interferometer
l = 0.63
U = RandHaar(i.m)
# Compute the full output statistics
p_exact, p_truncated, p_sampled =
noisy_distribution(input=i,loss=l,interf=U)
``` | BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | docs | 202 | ```julia
i = Input{Bosonic}(first_modes(2,2))
set1 = Subset([1,0])
set2 = Subset([0,1])
interf = Fourier(2)
part = Partition([set1,set2])
(idx,pdf) =
compute_probabilities_partition(interf,part,i)
``` | BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | docs | 508 | ```julia
n = 25
m = 400
# Experiment parameters
input_state = first_modes(n,m)
interf = RandHaar(m)
i = Input{Bosonic}(input_state)
# Subset selection
s = Subset(first_modes(Int(m/2),m))
part = Partition(s)
# Want to find all photon counting probabilities
o = PartitionCountsAll(part)
# Define the event and compute probabilities
ev = Event(i,o,interf)
compute_probability!(ev)
# About 30s execution time on a single core
#
# output:
#
# 0 in subset = [1, 2,..., 200]
# p = 4.650035467008141e-8
# ...
``` | BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | docs | 311 | ```julia
n = 20
m = 400
# Define an input of 20 photons among 400 modes
i = Input{Bosonic}(first_modes(n,m))
# Define the interferometer
interf = RandHaar(m)
# Set the output measurement
o = FockSample()
# Create the event
ev = Event(i, o, interf)
# Simulate
sample!(ev)
# output:
# state = [0,1,0,...]
``` | BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | docs | 47 | ```julia
using Pkg
Pkg.add("BosonSampling")
``` | BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | docs | 328 | ```julia
using BosonSampling
using Plots; pyplot()
n = 20
trunc = 10
T = OneParameterInterpolation
for x in 0:0.01:1
i =
Input{T}(first_modes(n,n),x)
set1 = [0 for i in 1:n]
set1[1] = 1
part = Partition([Subset(set1)])
F = Fourier(n)
(idx,pdf) =
compute_probabilities_partition(F,part,i)
end
``` | BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | docs | 1720 | # BosonSampling
[](https://AntoineRestivo.github.io/BosonSampling.jl/stable)
[](https://AntoineRestivo.github.io/BosonSampling.jl/dev)
This project implements standard and scattershot BosonSampling in Julia, including boson samplers and certification and optimization tools.
## Functionalities
A wide variety of tools are available:
* Boson-samplers, including partial distinguishability and loss
* Bunching tools and functions
* Various tools to validate experimental boson-samplers
* User-defined optical circuits built from optical elements
* Optimization functions over unitary matrices
* Photon counting tools for subsets and partitions of the output modes
* Tools to study permanent and generalized matrix function conjectures and counter-examples
## Installation
To install the package, launch a Julia REPL session and type
julia> using Pkg; Pkg.add("BosonSampling")
Alternatively type on the `]` key. Then enter
add BosonSampling
To use the package, write
using BosonSampling
in your file.
## Related package
The present package takes advantage of efficient computation of matrix permanent from [Permanents.jl](https://github.com/benoitseron/Permanents.jl.git).
## Authors & License
- [Benoît Seron](mailto:[email protected])
- [Antoine Restivo](mailto:[email protected])
Contact can be made by clicking on our names.
The original research presented in the package is done in collaboration with Dr. Leonardo Novo, Prof. Nicolas Cerf.
BosonSampling.jl is licensied under the [MIT license](https://github.com/benoitseron/BosonSampling.jl/blob/main/LICENSE).
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | docs | 580 | # BosonSampling.jl Documentation
## About
```@contents
Pages = ["about.md"]
```
## Tutorial
```@contents
Pages = ["tutorial/installation.md",
"tutorial/basic_usage.md",
"tutorial/loss.md",
"tutorial/user_defined_models.md",
"tutorial/boson_samplers.md",
"tutorial/partitions.md",
"tutorial/bunching.md",
"tutorial/certification.md",
"tutorial/optimization.md",
"tutorial/compute_distr.md",
"tutorial/circuits.md",
"tutorial/permanent_conjectures.md"]
```
## API
### Types
### Functions
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | docs | 3233 | # Conventions
Photon creation operators are changed as
``a_j -> \sum_j U_jk b_k``
when going through the interferometer ``\op{U}``.
Thus, the rows correspond to the input, columns to the output, that is: the probability that a single goes from `j` to `k` is ``|U_jk|^2``
This is the conventions used by most people. Let us warn that Valery Shchesnovich uses a convention that is incompatible: ``\op{U}`` needs to be changed to ``\op{U}^\dagger``. (And likewise defines the Gram matrix as the transpose of ours, see below.)
By default, we will use [Tichy's conventions](https://arxiv.org/abs/1312.4266)
* Input vector = `r` or `input_state`
* Output vector = `s` or `output_state`
* For detection that if not just an event `output_measurement`
* interferometer matrix = `U`
* interferometer matrix `M` of Tichy (with rows corresponding to the input,...) = `scattering_matrix`
* dimension of the interferometer = `m` (size of the interferometer) or (`n` in previous code
or where the number of photons is irrelevant or called number_photons)
* number of modes occupied = `n`, `number_photons`
See for Tichy: https://arxiv.org/abs/1410.7687, and for Shchesnovich: https://arxiv.org/abs/1410.1506
## Operators change
* Tichy: ```\hat{A}_{j,|\Phi\rangle}^{\dagger} \rightarrow \hat{U} \hat{A}_{j,|\Phi\rangle}^{\dagger} \hat{U}^{-1}=\sum_{k=1}^{m} U_{j, k} \hat{B}_{k,|\Phi\rangle}^{\dagger}```
* Shchesnovich: ```A spatial unitary network can be defined by an unitary transformation between input $a_{k, s}^{\dagger}(\omega)$ and output $b_{k, s}^{\dagger}(\omega)$ photon creation operators, we set $a_{k, s}^{\dagger}(\omega)=\sum_{l=1}^{M} U_{k l} b_{l, s}^{\dagger}(\omega)$, where $U_{k l}$ is the unitary matrix describing such an optical network.```
This means that if Tichy uses U, then Shchesnovich has in place U^†.
## Scattering matrix
* Tichy: ```Using the mode assignment list $\vec{d}(\vec{s})=\left(d_{1}, \ldots d_{n}\right)$ [49], which indicates the mode in which the $j$ th particle resides, the effective scattering matrix becomes
$$
M=U_{\vec{d}(\vec{r}), \vec{d}(\vec{s})}
$$
where our convention identifies the $j$ th row (column) with the $j$ th input (output) mode, as illustrated in Fig. 1)(a).```
* Shchesnovich: identical
## Bunching
The H-matrix follows a convention different from that of Valery Shchesnovich: `H_{a,b} = \sum _{l \in \mathcal{K}} U_{l,a} U_{l,b}^{*}`, see [`H_matrix`](@ref).
## Conventions regarding Julia:
Unlike most languages, the counting goes from 1,2,3... instead of starting at
zero as 0,1,2,...
## Gram matrices :
Gram matrices are defined as ``(<\phi_i|\phi_j>); i,j = 1:n``. This means that if the label of the photons are swapped, you need to enter another distinguishability matrix with swapped labels accordingly. See [`GramMatrix`](@ref).
## Warning about precision :
in [`EventProbability`](@ref) :
precision is set to machine precision `eps()` when doing non-randomised methods
although it is of course larger and this should be implemented
with permanent approximations, see for instance
https://arxiv.org/abs/1904.06229
## Distances :
Beware of the different TVD conventions (1/2 in front or not). See [`tvd`](@ref) for instance.
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | docs | 861 | # Benchmarks
## Why Julia?
When simulating a boson sampling experiment, via for instance [`cliffords_sampler`](@ref) or
[`noisy_sampler`](@ref), the most time consuming part is the computation of the
probabilities. Indeed, the probability to detect the state ``|l_1,…,l_m>`` at
the output of an interferometer ``\hat{U}`` from an input state ``|ψ_{in}> = |k_1,…,k_m>``
is related to the permanent of ``U`` through
```math
|<n_1,…,n_m|\hat{U}|ψ_{in}>|^2 = \frac{|Perm(U)|^2}{k_1!… k_m! l_1! … l_m!}.
```
Having an intensive usage of the [`Ryser's`](https://en.wikipedia.org/wiki/Computing_the_permanent#Ryser_formula) algorithm to compute such probabilities,
we compare here the running time of the latter algorithm from different implementations to compute the permanent of Haar distributed random matrices of dimension `n`:

| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | docs | 83 | ```@autodocs
Modules = [BosonSampling]
Pages = ["bayesian.jl"]
Private = false
```
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | docs | 83 | ```@autodocs
Modules = [BosonSampling]
Pages = ["bunching.jl"]
Private = false
```
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | docs | 87 | ```@autodocs
Modules = [BosonSampling]
Pages = ["distribution.jl"]
Private = false
```
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | docs | 144 | ```@autodocs
Modules = [BosonSampling]
Pages = ["legacy.jl", "partition_expectation_values.jl", "partitions/partitions.jl"]
Private = false
```
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | docs | 181 | ```@autodocs
Modules = [BosonSampling]
Pages = ["bapat_sunder.jl", "counter_example_functions.jl", "counter_example_numerical_search.jl", "permanent_on_top.jl"]
Private = false
```
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
|
[
"MIT"
] | 1.0.2 | 993320357ce108b09d7707bb172ad04a27713bbf | docs | 86 | ```@autodocs
Modules = [BosonSampling]
Pages = ["proba_tools.jl"]
Private = false
```
| BosonSampling | https://github.com/benoitseron/BosonSampling.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.