licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 2296 | """
$(TYPEDEF)
A hybrid noise model with both low and high frequency noise. The high frequency noise is characterized by Ohmic bath and the low frequence noise is characterized by the MRT width `W`.
$(FIELDS)
"""
struct HybridOhmicBath <: AbstractBath
"MRT width (2π GHz)"
W::Float64
"low spectrum reorganization energy (2π GHz)"
ϵl::Float64
"strength of high frequency Ohmic bath"
η::Float64
"cutoff frequency"
ωc::Float64
"inverse temperature"
β::Float64
end
function Base.show(io::IO, ::MIME"text/plain", m::HybridOhmicBath)
print(
io,
"Hybrid Ohmic bath instance:\n",
"W (mK): ",
freq_2_temperature(m.W / 2 / pi),
"\n",
"ϵl (GHz): ",
m.ϵl / 2 / pi,
"\n",
"η (unitless): ",
m.η / 2 / π,
"\n",
"ωc (GHz): ",
m.ωc / pi / 2,
"\n",
"T (mK): ",
β_2_temperature(m.β)
)
end
"""
HybridOhmic(W, η, fc, T)
Construct HybridOhmicBath object with parameters in physical units. `W`: MRT width (mK); `η`: interaction strength (unitless); `fc`: Ohmic cutoff frequency (GHz); `T`: temperature (mK).
"""
function HybridOhmic(W, η, fc, T)
# scaling of W to the unit of angular frequency
W = 2 * pi * temperature_2_freq(W)
# scaling of η because different definition of Ohmic spectrum
η = 2 * π * η
β = temperature_2_β(T)
ωc = 2 * π * fc
ϵl = W^2 * β / 2
HybridOhmicBath(W, ϵl, η, ωc, β)
end
"""
Gₕ(ω, bath::HybridOhmicBath)
High frequency noise spectrum of the HybridOhmicBath `bath`.
"""
function Gₕ(ω, bath::HybridOhmicBath)
η = bath.η
S0 = η / bath.β
if isapprox(ω, 0, atol=1e-8)
return 1 / S0
else
return 4 * η * ω * exp(-abs(ω) / bath.ωc) / (1 - exp(-bath.β * ω)) / (ω^2 + 4*S0^2)
end
end
"""
Gₗ(ω, bath::HybridOhmicBath)
Low frequency noise specturm of the HybridOhmicBath `bath`.
"""
function Gₗ(ω, bath::HybridOhmicBath)
W² = bath.W^2
ϵ = bath.ϵl
sqrt(π / 2 / W²) * exp(-(ω - 4ϵ)^2 / 8 / W²)
end
function spectrum(ω, bath::HybridOhmicBath)
Gl(x) = Gₗ(x, bath)
Gh(x) = Gₕ(x, bath)
integrand(x) = Gl(ω - x) * Gh(x)
a, b = sort([0.0, bath.ϵl])
res, err = quadgk(integrand, -Inf, a, b, Inf)
res / 2 / π
end | OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 1955 | import SpecialFunctions: trigamma
"""
OhmicBath
Ohmic bath object to hold a particular parameter set.
**Fields**
- `η` -- strength.
- `ωc` -- cutoff frequence.
- `β` -- inverse temperature.
"""
struct OhmicBath <: AbstractBath
η::Float64
ωc::Float64
β::Float64
end
"""
$(SIGNATURES)
Construct OhmicBath from parameters with physical unit: `η`--unitless interaction strength; `fc`--cutoff frequency in GHz; `T`--temperature in mK.
"""
function Ohmic(η, fc, T)
ωc = 2 * π * fc
β = temperature_2_β(T)
OhmicBath(η, ωc, β)
end
"""
$(SIGNATURES)
Calculate spectral density ``γ(ω)`` of `bath`.
"""
function spectrum(ω, bath::OhmicBath)
if isapprox(ω, 0.0, atol=1e-9)
return 2 * pi * bath.η / bath.β
else
return 2 * pi * bath.η * ω * exp(-abs(ω) / bath.ωc) / (1 - exp(-bath.β * ω))
end
end
"""
$(SIGNATURES)
Calculate the two point correlation function ``C(τ)`` of `bath`.
"""
function correlation(τ, bath::OhmicBath)
x2 = 1 / bath.β / bath.ωc
x1 = 1.0im * τ / bath.β
bath.η * (trigamma(-x1 + 1 + x2) + trigamma(x1 + x2)) / bath.β^2
end
"""
$(SIGNATURES)
Calculate the polaron transformed correlation function of `bath`.
"""
function polaron_correlation(τ, bath::OhmicBath)
res = (1 + 1.0im * bath.ωc * τ)^(-4 * bath.η)
if !isapprox(τ, 0, atol=1e-9)
x = π * τ / bath.β
res *= (x / sinh(x))^(4 * bath.η)
end
res
end
@inline polaron_correlation(t1, t2, bath::OhmicBath) = polaron_correlation(t1 - t2, bath)
function info_freq(bath::OhmicBath)
println("ωc (GHz): ", bath.ωc / pi / 2)
println("T (GHz): ", temperature_2_freq(β_2_temperature(bath.β)))
end
function Base.show(io::IO, ::MIME"text/plain", m::OhmicBath)
print(
io,
"Ohmic bath instance:\n",
"η (unitless): ",
m.η,
"\n",
"ωc (GHz): ",
m.ωc / pi / 2,
"\n",
"T (mK): ",
β_2_temperature(m.β),
)
end
| OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 1541 | import Distributions: Exponential, product_distribution
"""
$(TYPEDEF)
A symetric random telegraph noise with switch rate `γ/2` magnitude `b`
$(FIELDS)
"""
struct SymetricRTN
"Magnitude"
b::Any
"Two times the switching probability"
γ::Any
end
correlation(τ, R::SymetricRTN) = R.b^2 * exp(-R.γ * τ)
spectrum(ω, R::SymetricRTN) = 2 * R.b^2 * R.γ / (ω^2 + R.γ^2)
construct_distribution(R::SymetricRTN) = Exponential(1 / R.γ)
"""
$(TYPEDEF)
An ensemble of random telegraph noise.
$(FIELDS)
"""
struct EnsembleFluctuator{T} <: StochasticBath
"A list of RTNs"
f::Vector{T}
end
"""
$(SIGNATURES)
Build the `EnsembleFluctuator` object from a list of amplitudes `b` and a list of switch rates `ω`.
# Examples
```julia-repl
julia> EnsembleFluctuator([1, 1], [1, 2])
Fluctuator ensemble with 2 fluctuators
```
"""
EnsembleFluctuator(b::AbstractArray{T}, ω::AbstractArray{T}) where {T<:Number} =
EnsembleFluctuator([SymetricRTN(x, y) for (x, y) in zip(b, ω)])
correlation(τ, E::EnsembleFluctuator) = sum((x) -> correlation(τ, x), E.f)
spectrum(ω, E::EnsembleFluctuator) = sum((x) -> spectrum(ω, x), E.f)
construct_distribution(E::EnsembleFluctuator) =
product_distribution([construct_distribution(x) for x in E.f])
Base.length(E::EnsembleFluctuator) = length(E.f)
Base.show(io::IO, ::MIME"text/plain", E::EnsembleFluctuator) =
print(io, "Fluctuator ensemble with ", length(E), " fluctuators")
Base.show(io::IO, E::EnsembleFluctuator) =
print(io, "Fluctuator ensemble with ", length(E), " fluctuators")
| OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 6018 | """
$(TYPEDEF)
Defines constant system bath coupling operators.
# Fields
$(FIELDS)
"""
struct ConstantCouplings <: AbstractCouplings
"1-D array for independent coupling operators"
mats::Vector{AbstractMatrix}
"String representation for the coupling (for display purpose)"
str_rep::Union{Vector{String},Nothing}
end
(c::ConstantCouplings)(t) = c.mats
Base.iterate(c::ConstantCouplings, state = 1) =
state > length(c.mats) ? nothing : ((x) -> c.mats[state], state + 1)
Base.getindex(c::ConstantCouplings, inds...) = (x) -> getindex(c.mats, inds...)
Base.length(c::ConstantCouplings) = length(c.mats)
Base.eltype(c::ConstantCouplings) = typeof(c.mats[1])
Base.size(c::ConstantCouplings) = size(c.mats[1])
Base.size(c::ConstantCouplings, d) = size(c.mats[1], d)
isconstant(::ConstantCouplings) = true
"""
function ConstantCouplings(mats::Union{Vector{Matrix{T}},Vector{SparseMatrixCSC{T,Int}}}; unit=:h) where {T<:Number}
Constructor of `ConstantCouplings` object. `mats` is 1-D array of matrices. `str_rep` is the optional string representation of the coupling terms. `unit` is the unit one -- `:h` or `:ħ`. The `mats` will be scaled by ``2π`` is unit is `:h`.
"""
function ConstantCouplings(
mats::Union{Vector{Matrix{T}},Vector{SparseMatrixCSC{T,Int}}};
unit = :h,
) where {T<:Number}
msize = size(mats[1])
if msize[1] <= 10
if issparse(mats[1])
@warn "For matrices smaller than 10×10, use StaticArrays by default."
mats = Array.(mats)
end
mats = [SMatrix{msize[1],msize[2]}(unit_scale(unit) * m) for m in mats]
else
mats = unit_scale(unit) .* mats
end
ConstantCouplings(mats, nothing)
end
"""
function ConstantCouplings(c::Vector{T}; sp = false, unit=:h) where T <: AbstractString
If the first argument is a 1-D array of strings. The constructor will automatically construct the matrics represented by the string representations.
"""
function ConstantCouplings(
c::Vector{T};
sp = false,
unit = :h,
) where {T<:AbstractString}
mats = q_translate.(c, sp = sp)
msize = size(mats[1])
if msize[1] <= 10
if sp
@warn "For matrices smaller than 10×10, use StaticArrays by default."
mats = Array.(mats)
end
mats = [SMatrix{msize[1],msize[2]}(unit_scale(unit) * m) for m in mats]
else
mats = unit_scale(unit) .* mats
end
ConstantCouplings(mats, c)
end
function rotate(C::ConstantCouplings, v)
mats = [v' * m * v for m in C.mats]
ConstantCouplings(mats, unit=:ħ)
end
"""
function collective_coupling(op, num_qubit; sp = false, unit = :h)
Create `ConstantCouplings` object with operator `op` on each qubits. `op` is the string representation of one of the Pauli matrices. `num_qubit` is the total number of qubits. `sp` set whether to use sparse matrices. `unit` set the unit one -- `:h` or `:ħ`.
"""
function collective_coupling(op, num_qubit; sp = false, unit = :h)
res = Vector{String}()
for i = 1:num_qubit
temp = "I"^(i - 1) * uppercase(op) * "I"^(num_qubit - i)
push!(res, temp)
end
ConstantCouplings(res; sp = sp, unit = unit)
end
Base.summary(C::ConstantCouplings) = string(
TYPE_COLOR,
nameof(typeof(C)),
NO_COLOR,
" with ",
TYPE_COLOR,
eltype(C.mats[1]),
NO_COLOR,
)
function Base.show(io::IO, C::ConstantCouplings)
println(io, summary(C))
print(io, "and string representation: ")
show(io, C.str_rep)
end
"""
$(TYPEDEF)
Defines a single time dependent system bath coupling operator. It is defined as ``S(s)=∑f(s)×M``. Keyword argument `unit` set the unit one -- `:h` or `:ħ`.
# Fields
$(FIELDS)
# Examples
```julia-repl
julia> TimeDependentCoupling([(s)->s], [σz], unit=:ħ)
```
"""
struct TimeDependentCoupling
"1-D array of time dependent functions"
funcs::Any
"1-D array of constant matrics"
mats::Any
function TimeDependentCoupling(funcs, mats; unit = :h)
new(funcs, unit_scale(unit) * mats)
end
end
(c::TimeDependentCoupling)(t) = sum((x) -> x[1](t) * x[2], zip(c.funcs, c.mats))
Base.size(c::TimeDependentCoupling) = size(c.mats[1])
Base.size(c::TimeDependentCoupling, d) = size(c.mats[1], d)
abstract type AbstractTimeDependentCouplings <: AbstractCouplings end
isconstant(::AbstractTimeDependentCouplings) = false
"""
$(TYPEDEF)
Defines an 1-D array of time dependent system bath coupling operators.
# Fields
$(FIELDS)
"""
struct TimeDependentCouplings <: AbstractTimeDependentCouplings
"A tuple of single `TimeDependentCoupling` operators"
coupling::Tuple
function TimeDependentCouplings(args...)
new(args)
end
end
(c::TimeDependentCouplings)(t) = [x(t) for x in c.coupling]
Base.size(c::TimeDependentCouplings) = size(c.coupling[1])
Base.size(c::TimeDependentCouplings, d) = size(c.coupling[1], d)
"""
$(TYPEDEF)
`CustomCouplings` is a container for any user defined coupling operators.
# Fields
$(FIELDS)
"""
struct CustomCouplings <: AbstractTimeDependentCouplings
"A 1-D array of callable objects that returns coupling matrices"
coupling::Any
"Size of the coupling operator"
size::Any
end
"""
$(SIGNATURES)
Create a `CustomCouplings` object from a list of functions `funcs`.
"""
function CustomCouplings(funcs; unit = :h)
mat = funcs[1](0.0)
if unit == :h
funcs = [(s) -> 2π * f(s) for f in funcs]
elseif unit != :ħ
throw(ArgumentError("The unit can only be :h or :ħ."))
end
CustomCouplings(funcs, size(mat))
end
(c::CustomCouplings)(s) = [x(s) for x in c.coupling]
Base.size(c::CustomCouplings) = c.size
Base.size(c::CustomCouplings, d) = c.size[d]
Base.getindex(c::AbstractTimeDependentCouplings, inds...) = getindex(c.coupling, inds...)
Base.iterate(c::AbstractTimeDependentCouplings, state = 1) =
Base.iterate(c.coupling, state)
Base.length(c::AbstractTimeDependentCouplings) = length(c.coupling)
Base.eltype(c::AbstractTimeDependentCouplings) = typeof(c.coupling[1])
| OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 6878 | """
$(TYPEDEF)
Base for types defining system bath interactions in open quantum system models.
"""
abstract type AbstractInteraction end
"""
$(TYPEDEF)
An object to hold coupling operator and the corresponding bath object.
$(FIELDS)
"""
struct Interaction <: AbstractInteraction
"system operator"
coupling::AbstractCouplings
"bath coupling to the system operator"
bath::AbstractBath
end
isconstant(x::Interaction) = isconstant(x.coupling)
rotate(i::Interaction, v) = Interaction(rotate(i.coupling, v), i.bath)
"""
$(TYPEDEF)
A Lindblad operator, define by a rate ``γ`` and corresponding operator ``L```.
$(FIELDS)
"""
struct Lindblad <: AbstractInteraction
"Lindblad rate"
γ::Any
"Lindblad operator"
L::Any
"size"
size::Tuple
end
Lindblad(γ::Number, L::AbstractMatrix) = Lindblad((s) -> γ, (s) -> L, size(L))
Lindblad(γ::Number, L) = Lindblad((s) -> γ, L, size(L(0)))
Lindblad(γ, L::AbstractMatrix) = Lindblad(γ, (s) -> L, size(L))
function Lindblad(γ, L)
if !(typeof(γ(0)) <: Number)
throw(ArgumentError("γ should return a number."))
end
if !(typeof(L(0)) <: Matrix)
throw(ArgumentError("L should return a matrix."))
end
Lindblad(γ, L, size(L(0)))
end
Base.size(lind::Lindblad) = lind.size
"""
$(TYPEDEF)
An container for different system-bath interactions.
$(FIELDS)
"""
struct InteractionSet{T<:Tuple}
"A tuple of Interaction"
interactions::T
end
InteractionSet(inters::AbstractInteraction...) = InteractionSet(inters)
rotate(inters::InteractionSet, v) = InteractionSet([rotate(i, v) for i in inters]...)
Base.length(inters::InteractionSet) = Base.length(inters.interactions)
Base.getindex(inters::InteractionSet, key...) =
Base.getindex(inters.interactions, key...)
Base.iterate(iters::InteractionSet, state=1) =
Base.iterate(iters.interactions, state)
# The following functions are used to build different Liouvillians from
# the `InteractionSet`. They are not publicly available in the current
# release.
function redfield_from_interactions(iset::InteractionSet, U, Ta, atol, rtol)
kernels = [build_redfield_kernel(i) for i in iset if !(typeof(i.bath) <: StochasticBath)]
[RedfieldLiouvillian(kernels, U, Ta, atol, rtol)]
end
function cg_from_interactions(iset::InteractionSet, U, tf, Ta, atol, rtol)
if Ta === nothing || ndims(Ta) == 0
kernels = [build_cg_kernel(i, tf, Ta) for i in iset if !(typeof(i.bath) <: StochasticBath)]
else
if length(Ta) != length(iset)
throw(ArgumentError("Ta should have the same length as the interaction sets."))
end
kernels = [build_cg_kernel(i, tf, t) for (i, t) in zip(iset, Ta) if !(typeof(i.bath) <: StochasticBath)]
end
[CGLiouvillian(kernels, U, atol, rtol)]
end
function ule_from_interactions(iset::InteractionSet, U, Ta, atol, rtol)
kernels = [build_ule_kernel(i) for i in iset if !(typeof(i.bath) <: StochasticBath)]
[ULELiouvillian(kernels, U, Ta, atol, rtol)]
end
function davies_from_interactions(iset::InteractionSet, ω_range, lambshift::Bool, lambshift_kwargs)
davies_list = []
for i in iset
coupling = i.coupling
bath = i.bath
if !(typeof(bath) <: StochasticBath)
γfun = build_spectrum(bath)
Sfun = build_lambshift(ω_range, lambshift, bath, lambshift_kwargs)
if typeof(bath) <: CorrelatedBath
push!(davies_list, CorrelatedDaviesGenerator(coupling, γfun, Sfun, build_inds(bath)))
else
push!(davies_list, DaviesGenerator(coupling, γfun, Sfun))
end
end
end
davies_list
end
function davies_from_interactions(gap_idx, iset::InteractionSet, ω_range, lambshift::Bool, lambshift_kwargs::Dict)
davies_list = []
for i in iset
coupling = i.coupling
bath = i.bath
if !(typeof(bath) <: StochasticBath)
γfun = build_spectrum(bath)
Sfun = build_lambshift(ω_range, lambshift, bath, lambshift_kwargs)
if typeof(bath) <: CorrelatedBath
# TODO: optimize the performance of `CorrelatedDaviesGenerator`` if `coupling` is constant
push!(davies_list, build_const_correlated_davies(coupling, gap_idx, γfun, Sfun, build_inds(bath)))
else
push!(davies_list, build_const_davies(coupling, gap_idx, γfun, Sfun))
end
end
end
davies_list
end
function onesided_ame_from_interactions(iset::InteractionSet, ω_range, lambshift::Bool, lambshift_kwargs)
l_list = []
for i in iset
coupling = i.coupling
bath = i.bath
if !(typeof(bath) <: StochasticBath)
γfun = build_spectrum(bath)
Sfun = build_lambshift(ω_range, lambshift, bath, lambshift_kwargs)
if typeof(i.bath) <: CorrelatedBath
inds = build_inds(bath)
else
inds = ((i, i) for i in 1:length(coupling))
γfun = SingleFunctionMatrix(γfun)
Sfun = SingleFunctionMatrix(Sfun)
end
push!(l_list, OneSidedAMELiouvillian(coupling, γfun, Sfun, inds))
end
end
l_list
end
function fluctuator_from_interactions(iset::InteractionSet)
f_list = []
for i in iset
coupling = i.coupling
bath = i.bath
if typeof(bath) <: EnsembleFluctuator
num = length(coupling)
dist = construct_distribution(bath)
b0 = [x.b for x in bath.f] .* rand([-1, 1], length(dist), num)
next_τ, next_idx = findmin(rand(dist, num))
push!(f_list, FluctuatorLiouvillian(coupling, dist, b0, next_idx, next_τ, sum(b0, dims=1)[:]))
end
end
f_list
end
lindblad_from_interactions(iset::InteractionSet) =
[LindbladLiouvillian([i for i in iset if typeof(i) <: Lindblad])]
function build_redfield_kernel(i::Interaction)
coupling = i.coupling
bath = i.bath
cfun = build_correlation(bath)
rinds = typeof(cfun) == SingleFunctionMatrix ?
((i, i) for i = 1:length(coupling)) : build_inds(bath)
# the kernels is current set as a tuple
(rinds, coupling, cfun)
end
function build_cg_kernel(i::Interaction, tf, Ta)
coupling = i.coupling
cfun = build_correlation(i.bath)
Ta = Ta === nothing ? coarse_grain_timescale(i.bath, tf)[1] : Ta
rinds = typeof(cfun) == SingleFunctionMatrix ?
((i, i) for i = 1:length(coupling)) : build_inds(i.bath)
# the kernels is current set as a tuple
(rinds, coupling, cfun, Ta)
end
function build_ule_kernel(i::Interaction)
coupling = i.coupling
cfun = build_jump_correlation(i.bath)
rinds = typeof(cfun) == SingleFunctionMatrix ?
((i, i) for i = 1:length(coupling)) : build_inds(i.bath)
# the kernels is current set as a tuple
(rinds, coupling, cfun)
end | OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 4286 | abstract type AbstractDiagonalOperator{T <: Number} end
abstract type AbstractGeometricOperator{T <: Number} end
Base.length(D::AbstractDiagonalOperator) = length(D.u_cache)
Base.eltype(::AbstractDiagonalOperator{T}) where {T} = T
Base.size(G::AbstractGeometricOperator, inds...) = size(G.u_cache, inds...)
struct DiagonalOperator{T} <: AbstractDiagonalOperator{T}
ω_vec::Tuple
u_cache::Vector{T}
end
function DiagonalOperator(funcs...)
num_type = typeof(funcs[1](0.0))
DiagonalOperator{num_type}(funcs, zeros(num_type, length(funcs)))
end
DiagonalOperator(funcs::Vector{T}) where {T} = DiagonalOperator(funcs...)
function (D::DiagonalOperator)(t)
for i in eachindex(D.ω_vec)
D.u_cache[i] = D.ω_vec[i](t)
end
Diagonal(D.u_cache)
end
struct DiagonalFunction{T} <: AbstractDiagonalOperator{T}
func
u_cache
end
function DiagonalFunction(func)
tmp = func(0.0)
DiagonalFunction{eltype(tmp)}(func, zero(tmp))
end
function (D::DiagonalFunction)(t)
D.u_cache .= D.func(t)
Diagonal(D.u_cache)
end
struct GeometricOperator{T} <: AbstractGeometricOperator{T}
funcs::Tuple
u_cache::Matrix{T}
end
function GeometricOperator(funcs...)
dim = (sqrt(1 + 8 * length(funcs)) - 1) / 2
if !isinteger(dim)
throw(ArgumentError("Invalid input length."))
else
dim = Int(dim)
end
num_type = typeof(funcs[1](0.0))
GeometricOperator{num_type}(funcs, zeros(num_type, dim + 1, dim + 1))
end
GeometricOperator(funcs::Vector{T}) where {T} = GeometricOperator(funcs...)
function (G::GeometricOperator)(t)
len = size(G.u_cache, 1)
for j = 1:len
for i = (j + 1):len
G.u_cache[i, j] = G.funcs[i - 1 + (j - 1) * len](t)
end
end
Hermitian(G.u_cache, :L)
end
struct ZeroGeometricOperator{T} <: AbstractGeometricOperator{T}
size::Int
end
(G::ZeroGeometricOperator{T})(s) where {T} = Diagonal(zeros(T, G.size))
"""
$(TYPEDEF)
Defines a time dependent Hamiltonian in adiabatic frame.
# Fields
$(FIELDS)
"""
struct AdiabaticFrameHamiltonian{T} <: AbstractDenseHamiltonian{T}
"Geometric part"
geometric::AbstractGeometricOperator
"Adiabatic part"
diagonal::AbstractDiagonalOperator
"Size of the Hamiltonian"
size::Any
end
function AdiabaticFrameHamiltonian(D::AbstractDiagonalOperator{T}, G::AbstractGeometricOperator) where {T}
l = length(D)
AdiabaticFrameHamiltonian{complex(T)}(G, D, (l, l))
end
issparse(::AdiabaticFrameHamiltonian) = false
"""
function AdiabaticFrameHamiltonian(ωfuns, geofuns)
Constructor of adiabatic frame Hamiltonian. `ωfuns` is a 1-D array of functions which specify the eigen energies (in `GHz`) of the Hamiltonian. `geofuns` is a 1-D array of functions which specifies the geometric phases of the Hamiltonian. `geofuns` can be thought as a flattened lower triangular matrix (without diagonal elements) in column-major order.
"""
function AdiabaticFrameHamiltonian(ωfuns, geofuns)
if isa(ωfuns, AbstractVector)
D = DiagonalOperator(ωfuns)
elseif isa(ωfuns, Function)
D = DiagonalFunction(ωfuns)
end
l = length(D)
T = eltype(D)
if isnothing(geofuns) || isempty(geofuns)
G = ZeroGeometricOperator{T}(l)
else
G = GeometricOperator(geofuns)
if size(G, 1) != l
error("Diagonal and geometric operators do not match in size.")
end
end
AdiabaticFrameHamiltonian(D, G)
end
function (H::AdiabaticFrameHamiltonian)(tf::Real, s::Real)
ω = 2π * H.diagonal(s)
off = H.geometric(s) / tf
ω + off
end
get_cache(H::AdiabaticFrameHamiltonian{T}) where {T} = zeros(T, size(H))
"""
function evaluate(H::AdiabaticFrameHamiltonian, s, tf)
Evaluate the adiabatic frame Hamiltonian at (unitless) time `s`, with total annealing time `tf` (in the unit of ``ns``). The final result is given in unit of ``GHz``.
"""
function evaluate(H::AdiabaticFrameHamiltonian, s, tf)
ω = H.diagonal(s)
off = H.geometric(s) / tf
ω + off
end
function (h::AdiabaticFrameHamiltonian)(
du::Matrix{T},
u::Matrix{T},
tf::Real,
s::Real,
) where {T <: Number}
ω = h.diagonal(s)
du .= -2.0im * π * (ω * u - u * ω)
G = h.geometric(s)
du .+= -1.0im * (G * u - u * G) / tf
end
| OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 1455 | """
$(TYPEDEF)
Defines a time dependent dense Hamiltonian object with custom function.
# Fields
$(FIELDS)
"""
struct CustomDenseHamiltonian{T<:Number,dimensionless_time,in_place} <: AbstractHamiltonian{T}
"""Function for the Hamiltonian `H(s)`"""
f::Any
"""Size"""
size::Tuple
end
issparse(::CustomDenseHamiltonian) = false
function hamiltonian_from_function(func; in_place=false, dimensionless_time=true)
hmat = func(0.0)
CustomDenseHamiltonian{eltype(hmat),dimensionless_time,in_place}(func, size(hmat))
end
get_cache(H::CustomDenseHamiltonian) = zeros(eltype(H), size(H))
(H::CustomDenseHamiltonian)(s) = H.f(s)
function update_cache!(cache, H::CustomDenseHamiltonian{T,dt,false}, ::Any, s::Real) where {T,dt}
cache .= -1.0im * H(s)
end
function update_cache!(cache, H::CustomDenseHamiltonian{T,dt,true}, ::Any, s::Real) where {T,dt}
H.f(cache, s)
end
function update_vectorized_cache!(cache, H::CustomDenseHamiltonian, ::Any, s::Real)
hmat = H(s)
iden = one(hmat)
cache .= 1.0im * (transpose(hmat) ⊗ iden - iden ⊗ hmat)
end
function (h::CustomDenseHamiltonian{T,dt,false})(du, u::AbstractMatrix, ::Any, s::Real) where {T,dt}
fill!(du, 0.0 + 0.0im)
H = h(s)
gemm!('N', 'N', -1.0im, H, u, 1.0 + 0.0im, du)
gemm!('N', 'N', 1.0im, u, H, 1.0 + 0.0im, du)
end
function (h::CustomDenseHamiltonian{T,dt,true})(du, u::AbstractMatrix, p::Any, s::Real) where {T,dt}
H.f(du, u, p, s)
end | OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 5580 | """
$(TYPEDEF)
Defines a time dependent Hamiltonian object using Julia arrays.
# Fields
$(FIELDS)
"""
struct DenseHamiltonian{T<:Number,dimensionless_time} <: AbstractDenseHamiltonian{T}
"List of time dependent functions"
f::Vector
"List of constant matrices"
m::Vector
"Internal cache"
u_cache::Matrix{T}
"Size"
size::Tuple
end
"""
$(SIGNATURES)
Constructor of the `DenseHamiltonian` type. `funcs` and `mats` are lists of time-dependent functions and the corresponding matrices. The Hamiltonian can be represented as ``∑ᵢfuncs[i](s)×mats[i]``.
`unit` specifies wether `:h` or `:ħ` is set to one when defining `funcs` and `mats`. The `mats` will be scaled by ``2π`` if unit is `:h`.
`dimensionless_time` specifies wether the arguments of the functions are dimensionless (normalized to total evolution time).
"""
function DenseHamiltonian(funcs, mats; unit=:h, dimensionless_time=true)
if any((x) -> size(x) != size(mats[1]), mats)
throw(ArgumentError("Matrices in the list do not have the same size."))
end
if is_complex(funcs, mats)
mats = complex.(mats)
end
hsize = size(mats[1])
mats = unit_scale(unit) * mats
cache = similar(mats[1])
DenseHamiltonian{eltype(mats[1]),dimensionless_time}(funcs, mats, cache, hsize)
end
function Base.:+(h1::DenseHamiltonian, h2::DenseHamiltonian)
@assert size(h1) == size(h2) "The two Hamiltonians need to have the same size."
@assert isdimensionlesstime(h1) == isdimensionlesstime(h2) "The two Hamiltonians need to have the time arguments."
(m1, m2) = promote(h1.m, h2.m)
cache = similar(m1[1])
mats = [m1; m2]
funcs = [h1.f; h2.f]
hsize = size(h1)
DenseHamiltonian{eltype(m1[1]),isdimensionlesstime(h1)}(funcs, mats, cache, hsize)
end
"""
function (h::DenseHamiltonian)(s::Real)
Calling the Hamiltonian returns the value ``2πH(s)``. The argument `s` is the (dimensionless) time. The returned matrix is in the unit of angular frequency.
"""
function (h::DenseHamiltonian)(s::Real)
fill!(h.u_cache, 0.0)
for i = 1:length(h.f)
@inbounds axpy!(h.f[i](s), h.m[i], h.u_cache)
end
h.u_cache
end
# The third argument is not essential for `DenseHamiltonian`
# It exists to keep the `update_cache!` interface consistent across
# all `AbstractHamiltonian` types
function update_cache!(cache, H::DenseHamiltonian, ::Any, s::Real)
fill!(cache, 0.0)
for i = 1:length(H.m)
@inbounds axpy!(-1.0im * H.f[i](s), H.m[i], cache)
end
end
function update_vectorized_cache!(cache, H::DenseHamiltonian, ::Any, s::Real)
hmat = H(s)
iden = one(hmat)
cache .= 1.0im * (transpose(hmat) ⊗ iden - iden ⊗ hmat)
end
function (h::DenseHamiltonian)(du, u::AbstractMatrix, ::Any, s::Real)
fill!(du, 0.0 + 0.0im)
H = h(s)
gemm!('N', 'N', -1.0im, H, u, 1.0 + 0.0im, du)
gemm!('N', 'N', 1.0im, u, H, 1.0 + 0.0im, du)
end
function Base.convert(S::Type{T}, H::DenseHamiltonian{M}) where {T<:Complex,M}
mats = [convert.(S, x) for x in H.m]
cache = similar(H.u_cache, complex{M})
DenseHamiltonian{eltype(mats[1]),isdimensionlesstime(H)}(H.f, mats, cache, size(H))
end
function Base.convert(S::Type{T}, H::DenseHamiltonian{M}) where {T<:Real,M}
f_val = sum((x) -> x(0.0), H.f)
if !(typeof(f_val) <: Real)
throw(TypeError(:convert, "H.f", Real, typeof(f_val)))
end
mats = [convert.(S, x) for x in H.m]
cache = similar(H.u_cache, real(M))
DenseHamiltonian{eltype(mats[1]),isdimensionlesstime(H)}(H.f, mats, cache, size(H))
end
function Base.copy(H::DenseHamiltonian)
mats = copy(H.m)
DenseHamiltonian{eltype(mats[1])}(H.f, mats, copy(H.u_cache), size(H))
end
function rotate(H::DenseHamiltonian, v)
mats = [v' * m * v for m in H.m]
DenseHamiltonian(H.f, mats, unit=:ħ)
end
"""
isdimensionlesstime(H)
Check whether the argument of a time dependent Hamiltonian is the dimensionless time.
"""
isdimensionlesstime(::DenseHamiltonian{T,B}) where {T,B} = B
issparse(::DenseHamiltonian) = false
"""
$(TYPEDEF)
Defines a time independent Hamiltonian object with a Julia array.
# Fields
$(FIELDS)
"""
struct ConstantDenseHamiltonian{T<:Number} <: AbstractDenseHamiltonian{T}
"Internal cache"
u_cache::Matrix{T}
"Size"
size::Tuple
end
function ConstantDenseHamiltonian(mat; unit=:h)
mat = unit_scale(unit) * mat
ConstantDenseHamiltonian{eltype(mat)}(mat, size(mat))
end
isconstant(::ConstantDenseHamiltonian) = true
issparse(::ConstantDenseHamiltonian) = false
function (h::ConstantDenseHamiltonian)(::Real)
h.u_cache
end
function update_cache!(cache, H::ConstantDenseHamiltonian, ::Any, ::Real)
cache .= -1.0im * H.u_cache
end
function update_vectorized_cache!(cache, H::ConstantDenseHamiltonian, p, ::Real)
hmat = H.u_cache
iden = one(hmat)
cache .= 1.0im * (transpose(hmat) ⊗ iden - iden ⊗ hmat)
end
function (h::ConstantDenseHamiltonian)(du, u::AbstractMatrix, ::Any, ::Real)
fill!(du, 0.0 + 0.0im)
H = h.u_cache
gemm!('N', 'N', -1.0im, H, u, 1.0 + 0.0im, du)
gemm!('N', 'N', 1.0im, u, H, 1.0 + 0.0im, du)
end
function Base.convert(S::Type{T}, H::ConstantDenseHamiltonian{M}) where {T<:Number,M}
mat = convert.(S, H.u_cache)
ConstantDenseHamiltonian{eltype(mat)}(mat, size(H))
end
function Base.copy(H::ConstantDenseHamiltonian)
mat = copy(H.u_cache)
ConstantDenseHamiltonian{eltype(mat[1])}(mat, size(H))
end
function rotate(H::ConstantDenseHamiltonian, v)
mat = v' * H.u_cache * v
ConstantDenseHamiltonian(mat, size(mat))
end | OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 8341 | """
$(SIGNATURES)
Evaluates a time-dependent Hamiltonian at time `s`, expressed in units of `GHz`.
For generic `AbstractHamiltonian` types, it defaults to `H.(s)/2/π`.
"""
evaluate(H::AbstractHamiltonian, s::Real) = H.(s) / 2 / π
"""
$(SIGNATURES)
This function provides a generic `evaluate` interface for `AbstractHamiltonian`
types that accepts two arguments. It ensures that other concrete Hamiltonian
types behave consistently with methods designed for `AdiabaticFrameHamiltonian`.
"""
evaluate(H::AbstractHamiltonian, ::Any, s::Real) = H.(s) / 2 / π
"""
isconstant(H)
Verifies if a Hamiltonian is constant. By default, it returns false for a generic
Hamiltonian.
"""
isconstant(::AbstractHamiltonian) = false
"""
issparse(H)
Verifies if a Hamiltonian is sparse. By default, it returns false for a generic
Hamiltonian.
"""
issparse(::AbstractHamiltonian) = false
Base.eltype(::AbstractHamiltonian{T}) where {T} = T
Base.size(H::AbstractHamiltonian) = H.size
Base.size(H::AbstractHamiltonian, dim::T) where {T<:Integer} = H.size[dim]
get_cache(H::AbstractHamiltonian) = H.u_cache
"""
$(SIGNATURES)
Update the internal cache `cache` according to the value of the Hamiltonian `H`
at given time `t`: ``cache = -iH(p, t)``. The third argument, `p`
is reserved for passing additional info to the `AbstractHamiltonian` object.
Currently, it is only used by `AdiabaticFrameHamiltonian` to pass the total
evolution time `tf`. To keep the interface consistent across all
`AbstractHamiltonian` types, the `update_cache!` method for all subtypes of
`AbstractHamiltonian` should keep the argument `p`.
Fallback to `cache .= -1.0im * H(p, t)` for generic `AbstractHamiltonian` type.
"""
update_cache!(cache, H::AbstractHamiltonian, p, t::Real) = cache .= -1.0im * H(p, t)
"""
$(SIGNATURES)
This function calculates the vectorized version of the commutation relation
between the Hamiltonian `H` at time `t` and the density matrix ``ρ``, and then
updates the cache in-place.
The commutation relation is given by ``[H, ρ] = Hρ - ρH``. The vectorized
commutator is given by ``I⊗H-H^T⊗I``.
...
# Arguments
- `cache`: the variable to be updated in-place, storing the vectorized commutator.
- `H::AbstractHamiltonian`: an instance of AbstractHamiltonian, representing the Hamiltonian of the system.
- `p`: unused parameter, kept for function signature consistency with other methods.
- `t`: a real number representing the time at which the Hamiltonian is evaluated.
# Returns
The function does not return anything as the update is performed in-place on cache.
...
"""
function update_vectorized_cache!(cache, H::AbstractHamiltonian, p, t::Real)
hmat = H(t)
iden = one(hmat)
cache .= 1.0im * (transpose(hmat) ⊗ iden - iden ⊗ hmat)
end
"""
$(SIGNATURES)
This function implements the Liouville-von Neumann equation, describing the time
evolution of a quantum system governed by the Hamiltonian `h`.
The Liouville-von Neumann equation is given by ``du/dt = -i[H, ρ]``, where ``H``
is the Hamiltonian of the system, ``ρ`` is the density matrix (`u` in this
context), and ``[H, ρ]`` is the commutation relation between ``H`` and ``ρ``,
defined as ``Hρ - ρH``.
The function is written in such a way that it can be directly passed to
differential equation solvers in Julia, such as those in
`DifferentialEquations.jl`, as the system function representing the ODE to be solved.
...
# Arguments
- `du`: the derivative of the density matrix with respect to time. The result
of the calculation will be stored here.
- `u`: an instance of `AbstractMatrix`, representing the density matrix of the system.
- `p`: unused parameter, kept for function signature consistency with other methods.
- `t`: a real number representing the time at which the Hamiltonian is evaluated.
# Returns
The function does not return anything as the update is performed in-place on `du`.
...
"""
function (h::AbstractHamiltonian)(du, u::AbstractMatrix, p, t::Real)
H = h(t)
Hρ = -1.0im * H * u
du .= Hρ - transpose(Hρ)
end
"""
$(SIGNATURES)
The `AbstractHamiltonian` type can be called with two arguments. The first
argument is reserved to pass additional info to the `AbstractHamiltonian` object.
Currently, it is only used by `AdiabaticFrameHamiltonian` to pass the total
evolution time `tf`.
Fallback to `H(t)` for generic `AbstractHamiltonian` type.
"""
(H::AbstractHamiltonian)(::Any, t::Real) = H(t)
Base.summary(H::AbstractHamiltonian) = string(
TYPE_COLOR,
nameof(typeof(H)),
NO_COLOR,
" with ",
TYPE_COLOR,
typeof(H).parameters[1],
NO_COLOR,
)
function Base.show(io::IO, A::AbstractHamiltonian)
println(io, summary(A))
print(io, "with size: ")
show(io, size(A))
end
"""
$(SIGNATURES)
Default eigenvalue decomposition method for an abstract Hamiltonian `H` at
time `t`. Keyword argument `lvl` specifies the number of levels to keep in
the output.
The function returns a tuple (w, v), where `w` is a vector of eigenvalues,
and `v` is a matrix where each column represents an eigenvector. (The `k`th
eigenvector can be extracted using the slice `v[:, k]`.)
"""
# If H(t) returns an array, the `Array` function will not allocate a new
# variable
haml_eigs_default(H::AbstractHamiltonian, t, lvl::Integer) = eigen!(
H(t) |> Array |> Hermitian, 1:lvl)
haml_eigs_default(H::AbstractHamiltonian, t, ::Nothing) = eigen(
H(t) |> Array |> Hermitian)
haml_eigs(H::AbstractHamiltonian, t, lvl; kwargs...) = haml_eigs_default(H, t,
lvl; kwargs...)
#function eigen!(M::Hermitian{T, S}, lvl::UnitRange) where T<:Number where S<:Union{SMatrix, MMatrix}
# w, v = eigen(Hermitian(M))
# w[lvl], v[:, lvl]
#end
"""
$(SIGNATURES)
Calculate the eigen value decomposition of the Hamiltonian `H` at time `t`.
Keyword argument `lvl` specifies the number of levels to keep in the output.
`w` is a vector of eigenvalues and `v` is a matrix of the eigenvectors in the
columns. (The `k`th eigenvector can be obtained from the slice `v[:, k]`.) `w`
will be in unit of `GHz`.
"""
function eigen_decomp(H::AbstractHamiltonian, t::Real; lvl::Int=2, kwargs...)
w, v = haml_eigs(H, t, lvl; kwargs...)
real(w)[1:lvl] / 2 / π, v[:, 1:lvl]
end
eigen_decomp(H::AbstractHamiltonian; lvl::Int=2, kwargs...) = isconstant(H) ?
eigen_decomp(H, 0, lvl=lvl, kwargs...) : throw(ArgumentError("H must be a constant Hamiltonian"))
"""
$(SIGNATURES)
Calculate the eigen value decomposition of the Hamiltonian `H` at an array of
time points `s`. The output keeps the lowest `lvl` eigenstates and their
corresponding eigenvalues. Output `(vals, vecs)` have the dimensions of
`(lvl, length(s))` and `(size(H, 1), lvl, length(s))` respectively.
"""
function eigen_decomp(
H::AbstractHamiltonian,
s::AbstractArray{Float64,1};
lvl::Int=2,
kwargs...
)
s_dim = length(s)
res_val = Array{eltype(H),2}(undef, (lvl, s_dim))
res_vec = Array{eltype(H),3}(undef, (size(H, 1), lvl, s_dim))
for (i, s_val) in enumerate(s)
val, vec = haml_eigs(H, s_val, lvl; kwargs...)
res_val[:, i] = val[1:lvl]
res_vec[:, :, i] = vec[:, 1:lvl]
end
res_val, res_vec
end
"""
$(SIGNATURES)
For a time series quantum states given by `states`, whose time points are given
by `s`, calculate the population of instantaneous eigenstates of `H`. The levels
of the instantaneous eigenstates are specified by `lvl`, which can be any slice index.
"""
function inst_population(s, states, H::AbstractHamiltonian; lvl=1:1)
if typeof(lvl) <: Int
lvl = lvl:lvl
end
pop = Array{Array{Float64,1},1}(undef, length(s))
for (i, v) in enumerate(s)
w, v = eigen_decomp(H, v, lvl=maximum(lvl))
if ndims(states[i]) == 1
inst_state = view(v, :, lvl)'
pop[i] = abs2.(inst_state * states[i])
elseif ndims(states[i]) == 2
l = length(lvl)
temp = Array{Float64,1}(undef, l)
for j in range(1, length=l)
inst_state = view(v, :, j)
temp[j] = real(inst_state' * states[i] * inst_state)
end
pop[i] = temp
end
end
pop
end
function is_complex(f_list, m_list)
any(m_list) do m
eltype(m) <: Complex
end || any(f_list) do f
typeof(f(0)) <: Complex
end
end
| OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 1707 | function ConstantHamiltonian(mat::Matrix; unit=:h, static=true)
# use static array for size smaller than 100
# can be turned off by setting `static` to false
if static && size(mat, 1) <= 10
mat = SMatrix{size(mat, 1),size(mat, 2)}(mat)
return ConstantStaticDenseHamiltonian(mat, unit=unit)
end
ConstantDenseHamiltonian(mat, unit=unit)
end
ConstantHamiltonian(mat::Union{SMatrix,MMatrix}; unit=:h, static=true) = ConstantStaticDenseHamiltonian(mat, unit=unit)
function ConstantHamiltonian(mat::SparseMatrixCSC; unit=:h, static=true)
mat = unit_scale(unit) * mat
ConstantSparseHamiltonian(mat, size(mat))
end
function Hamiltonian(f, mats::AbstractVector{T}; unit=:h, dimensionless_time=true, static=true) where {T<:Matrix}
hsize = size(mats[1])
# use static array for size smaller than 100
# can be turned off by setting `static` to false
if static && hsize[1] <= 10
mats = [SMatrix{hsize[1],hsize[2]}(unit_scale(unit) * m) for m in mats]
return StaticDenseHamiltonian(f, mats, unit=unit, dimensionless_time=dimensionless_time)
end
DenseHamiltonian(f, mats, unit=unit, dimensionless_time=dimensionless_time)
end
Hamiltonian(f, mats::AbstractVector{T}; unit=:h, dimensionless_time=true, static=true) where {T<:Union{SMatrix,MMatrix}} = StaticDenseHamiltonian(f, mats, unit=unit, dimensionless_time=dimensionless_time)
Hamiltonian(f, mats::AbstractVector{T}; unit=:h, dimensionless_time=true, static=true) where {T<:SparseMatrixCSC} = SparseHamiltonian(f, mats, unit=unit, dimensionless_time=dimensionless_time)
Hamiltonian(mats::AbstractMatrix; unit=:h, static=true) = ConstantHamiltonian(mats, unit=unit, static=static) | OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 3946 | """
$(TYPEDEF)
Defines interpolating DenseHamiltonian object.
# Fields
$(FIELDS)
"""
struct InterpDenseHamiltonian{T,isdimensionlesstime} <: AbstractDenseHamiltonian{T}
"Interpolating object"
interp_obj::Any
"Size"
size::Any
end
"""
$(TYPEDEF)
Defines interpolating SparseHamiltonian object.
# Fields
$(FIELDS)
"""
struct InterpSparseHamiltonian{T,dimensionless_time} <: AbstractHamiltonian{T}
"Interpolating object"
interp_obj::Any
"Size"
size::Any
end
isdimensionlesstime(::InterpDenseHamiltonian{T,B}) where {T,B} = B
isdimensionlesstime(::InterpSparseHamiltonian{T,B}) where {T,B} = B
issparse(::InterpDenseHamiltonian) = false
issparse(::InterpSparseHamiltonian) = true
function InterpDenseHamiltonian(
s,
hmat;
method="bspline",
order=1,
unit=:h,
dimensionless_time=true
)
if ndims(hmat) == 3
hsize = size(hmat)[1:2]
htype = eltype(hmat)
elseif ndims(hmat) == 1
hsize = size(hmat[1])
htype = eltype(sum(hmat))
else
throw(ArgumentError("Invalid input data dimension."))
end
interp_obj = construct_interpolations(
s,
unit_scale(unit) * hmat,
method=method,
order=order,
)
InterpDenseHamiltonian{htype,dimensionless_time}(interp_obj, hsize)
end
function (H::InterpDenseHamiltonian)(s)
H.interp_obj(1:size(H, 1), 1:size(H, 1), s)
end
# The argument `p` is not essential for `InterpDenseHamiltonian`
# It exists to keep the `update_cache!` interface consistent across
# all `AbstractHamiltonian` types
function update_cache!(cache, H::InterpDenseHamiltonian, tf, s::Real)
for i = 1:size(H, 1)
for j = 1:size(H, 1)
@inbounds cache[i, j] = -1.0im * H.interp_obj(i, j, s)
end
end
end
function update_vectorized_cache!(cache, H::InterpDenseHamiltonian, tf, s)
hmat = H(s)
iden = Matrix{eltype(H)}(I, size(H))
cache .= 1.0im * (transpose(hmat) ⊗ iden - iden ⊗ hmat)
end
function get_cache(H::InterpDenseHamiltonian{T}, vectorize) where {T}
if vectorize == true
hsize = size(H, 1) * size(H, 1)
Matrix{T}(undef, hsize, hsize)
else
Matrix{T}(undef, size(H))
end
end
get_cache(H::InterpDenseHamiltonian{T}) where {T} = Matrix{T}(undef, size(H))
function (h::InterpDenseHamiltonian)(
du,
u::Matrix{T},
::Any,
t::Real,
) where {T<:Complex}
fill!(du, 0.0 + 0.0im)
H = h(t)
gemm!('N', 'N', -1.0im, H, u, 1.0 + 0.0im, du)
gemm!('N', 'N', 1.0im, u, H, 1.0 + 0.0im, du)
end
function InterpSparseHamiltonian(
s_axis,
H_list::AbstractArray{SparseMatrixCSC{T,Int},1};
unit=:h,
dimensionless_time=true
) where {T<:Number}
interp_obj = construct_interpolations(
collect(s_axis),
unit_scale(unit) * H_list,
method="gridded",
order=1,
)
InterpSparseHamiltonian{T,dimensionless_time}(interp_obj, size(H_list[1]))
end
function (H::InterpSparseHamiltonian)(s)
H.interp_obj(s)
end
function get_cache(H::InterpSparseHamiltonian{T}, vectorize) where {T}
if vectorize == true
hsize = size(H, 1) * size(H, 1)
spzeros(T, hsize, hsize)
else
spzeros(T, size(H)...)
end
end
get_cache(H::InterpSparseHamiltonian{T}) where {T} = spzeros(T, size(H)...)
# The argument `p` is not essential for `InterpSparseHamiltonian`
# It exists to keep the `update_cache!` interface consistent across
# all `AbstractHamiltonian` types
update_cache!(cache, H::InterpSparseHamiltonian, p, s::Real) =
cache .= -1.0im * H(s)
function update_vectorized_cache!(
cache,
H::InterpSparseHamiltonian,
tf,
s::Real,
)
hmat = H(s)
iden = sparse(I, size(H))
cache .= 1.0im * (transpose(hmat) ⊗ iden - iden ⊗ hmat)
end
function (h::InterpSparseHamiltonian)(
du,
u::Matrix{T},
tf,
s::Real,
) where {T<:Number}
H = h(s)
du .= -1.0im * (H * u - u * H)
end
| OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 6610 | """
$(TYPEDEF)
Defines a time dependent Hamiltonian object with sparse matrices.
# Fields
$(FIELDS)
"""
struct SparseHamiltonian{T<:Number,dimensionless_time} <: AbstractHamiltonian{T}
"List of time dependent functions"
f::Any
"List of constant matrices"
m::Vector{SparseMatrixCSC{T,Int}}
"Internal cache"
u_cache::SparseMatrixCSC{T,Int}
"Size"
size::Tuple
end
"""
$(SIGNATURES)
Constructor of the `SparseHamiltonian` type. `funcs` and `mats` are lists of time-dependent functions and the corresponding matrices. The Hamiltonian can be represented as ``∑ᵢfuncs[i](s)×mats[i]``. `unit` specifies wether `:h` or `:ħ` is set to one when defining `funcs` and `mats`. The `mats` will be scaled by ``2π`` if unit is `:h`.
"""
function SparseHamiltonian(funcs, mats; unit=:h, dimensionless_time=true)
if any((x) -> size(x) != size(mats[1]), mats)
throw(ArgumentError("Matrices in the list do not have the same size."))
end
if is_complex(funcs, mats)
mats = complex.(mats)
end
cache = similar(sum(mats))
fill!(cache, 0.0)
mats = unit_scale(unit) * mats
SparseHamiltonian{eltype(mats[1]),dimensionless_time}(funcs, mats, cache, size(mats[1]))
end
function Base.:+(h1::SparseHamiltonian, h2::SparseHamiltonian)
@assert size(h1) == size(h2) "The two Hamiltonians need to have the same size."
@assert isdimensionlesstime(h1) == isdimensionlesstime(h2) "The two Hamiltonians need to have the time arguments."
(m1, m2) = promote(h1.m, h2.m)
mats = [m1; m2]
cache = similar(sum(mats))
funcs = [h1.f; h2.f]
hsize = size(h1)
SparseHamiltonian{eltype(m1[1]),isdimensionlesstime(h1)}(funcs, mats, cache, hsize)
end
isdimensionlesstime(::SparseHamiltonian{T,B}) where {T,B} = B
issparse(::SparseHamiltonian) = true
"""
function (h::SparseHamiltonian)(t::Real)
Calling the Hamiltonian returns the value ``2πH(t)``.
"""
function (h::SparseHamiltonian)(s::Real)
fill!(h.u_cache, 0.0)
for (f, m) in zip(h.f, h.m)
h.u_cache .+= f(s) * m
end
h.u_cache
end
# The third argument is not essential for `SparseHamiltonian`
# It exists to keep the `update_cache!` interface consistent across
# all `AbstractHamiltonian` types
function update_cache!(cache, H::SparseHamiltonian, ::Any, s::Real)
fill!(cache, 0.0)
for (f, m) in zip(H.f, H.m)
cache .+= -1.0im * f(s) * m
end
end
function update_vectorized_cache!(cache, H::SparseHamiltonian, ::Any, s::Real)
hmat = H(s)
iden = sparse(I, size(H))
cache .= 1.0im * (transpose(hmat) ⊗ iden - iden ⊗ hmat)
end
function (h::SparseHamiltonian)(
du,
u::AbstractMatrix,
::Any,
s::Real,
)
H = h(s)
du .= -1.0im * (H * u - u * H)
end
function Base.convert(S::Type{T}, H::SparseHamiltonian{M}) where {T<:Complex,M}
mats = [convert.(S, x) for x in H.m]
cache = similar(H.u_cache, complex{M})
SparseHamiltonian{eltype(mats[1]),isdimensionlesstime(H)}(H.f, mats, cache, size(H))
end
function Base.convert(S::Type{T}, H::SparseHamiltonian{M}) where {T<:Real,M}
f_val = sum((x) -> x(0.0), H.f)
if !(typeof(f_val) <: Real)
throw(TypeError(:convert, "H.f", Real, typeof(f_val)))
end
mats = [convert.(S, x) for x in H.m]
cache = similar(H.u_cache, real(M))
SparseHamiltonian{eltype(mats[1]),isdimensionlesstime(H)}(H.f, mats, cache, size(H))
end
"""
haml_eigs_default(H::SparseHamiltonian, t, lvl::Integer; kwargs...)
Perform the eigendecomposition of a sparse Hamiltonian `H` at a given time `t`.
The argument `lvl` specifies the number of levels to retain in the output. This
method utilizes the LOBPCG algorithm when the size `d` of the Hamiltonian
satisfies `d >= 3 * lvl`, and Julia's built-in `eigen` function otherwise.
The function returns a tuple `(λ, X)`, where `λ` is a vector of eigenvalues and
`X` is a matrix where each column represents an eigenvector.
Keyword arguments `kwargs...` can be used to pass additional parameters to the
LOBPCG algorithm.
"""
function haml_eigs_default(H::SparseHamiltonian{T,B}, t, lvl::Integer;
lobpcg=true, kwargs...) where {T<:Number,B}
d = size(H, 1)
# LOBPCG algorithm only works when 3*lvl >= d
# TODO: filter the keyword arguments
if d >= 3 * lvl && lobpcg
X0 = randn(T, (d, lvl))
res = lobpcg_hyper(H(t), X0; kwargs...)
return (res.λ, res.X)
else
return eigen(H(t) |> Array |> Hermitian)
end
end
"""
haml_eigs_default(H::SparseHamiltonian, t, X0::Matrix, kwargs...)
Perform the eigendecomposition of a sparse Hamiltonian `H` at a given time `t`
using an initial guess `X0` for the eigenvectors. This method uses the LOBPCG
algorithm.
The function returns a tuple `(λ, X)`, where `λ` is a vector of eigenvalues and
`X` is a matrix where each column represents an eigenvector.
Keyword arguments `kwargs...` can be used to pass additional parameters to the LOBPCG algorithm.
"""
function haml_eigs_default(H::SparseHamiltonian, t, X0::Matrix, kwargs...)
res = lobpcg_hyper(H(t), X0; kwargs...)
return (res.λ, res.X)
end
haml_eigs_default(H::SparseHamiltonian, t, ::Nothing) =
eigen(H(t) |> Array |> Hermitian)
"""
$(TYPEDEF)
Defines a time independent Hamiltonian object with sparse matrices.
# Fields
$(FIELDS)
"""
struct ConstantSparseHamiltonian{T<:Number} <: AbstractHamiltonian{T}
"Internal cache"
u_cache::SparseMatrixCSC{T,Int}
"Size"
size::Tuple
end
function SparseHamiltonian(mat; unit=:h)
mat = unit_scale(unit) * mat
ConstantSparseHamiltonian(mat, size(mat))
end
isconstant(::ConstantSparseHamiltonian) = true
issparse(::ConstantSparseHamiltonian) = true
function (h::ConstantSparseHamiltonian)(::Real)
h.u_cache
end
function update_cache!(cache, H::ConstantSparseHamiltonian, ::Any, ::Real)
cache .= -1.0im * H.u_cache
end
function update_vectorized_cache!(cache, H::ConstantSparseHamiltonian, ::Any, ::Real)
hmat = H.u_cache
iden = sparse(I, size(H))
cache .= 1.0im * (transpose(hmat) ⊗ iden - iden ⊗ hmat)
end
function (h::ConstantSparseHamiltonian)(
du,
u::AbstractMatrix,
::Any,
::Real,
)
H = h.u_cache
du .= -1.0im * (H * u - u * H)
end
function Base.convert(S::Type{T}, H::ConstantSparseHamiltonian{M}) where {T<:Number,M}
mat = convert.(S, H.u_cache)
ConstantSparseHamiltonian{eltype(mat)}(mat, size(H))
end
function rotate(H::ConstantSparseHamiltonian, v)
hsize = size(H)
mat = v' * H.u_cache * v
issparse(mat) ? ConstantSparseHamiltonian(mat, hsize) : ConstantDenseHamiltonian(mat, hsize)
end | OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 5001 | """
$(TYPEDEF)
Defines a time dependent Hamiltonian object using static arrays.
# Fields
$(FIELDS)
"""
struct StaticDenseHamiltonian{T<:Number,dimensionless_time} <: AbstractDenseHamiltonian{T}
"List of time dependent functions"
f::Vector
"List of constant matrices"
m::Vector
"Internal cache"
u_cache::MMatrix
"Size"
size::Tuple
end
function StaticDenseHamiltonian(funcs, mats; unit=:h, dimensionless_time=true)
if any((x) -> size(x) != size(mats[1]), mats)
throw(ArgumentError("Matrices in the list do not have the same size."))
end
if is_complex(funcs, mats)
mats = complex.(mats)
end
hsize = size(mats[1])
mats = unit_scale(unit) * mats
cache = similar(mats[1])
StaticDenseHamiltonian{eltype(mats[1]),dimensionless_time}(funcs, mats, cache, hsize)
end
isdimensionlesstime(::StaticDenseHamiltonian{T,B}) where {T,B} = B
issparse(::StaticDenseHamiltonian) = false
"""
function (h::StaticDenseHamiltonian)(s::Real)
Calling the Hamiltonian returns the value ``2πH(s)``. The argument `s` is (dimensionless) time. The returned matrix is in the unit of angular frequency.
"""
function (h::StaticDenseHamiltonian)(s::Real)
fill!(h.u_cache, 0.0)
for i = 1:length(h.f)
@inbounds axpy!(h.f[i](s), h.m[i], h.u_cache)
end
h.u_cache
end
# The third argument is not essential for `StaticDenseHamiltonian`
# It exists to keep the `update_cache!` interface consistent across
# all `AbstractHamiltonian` types
function update_cache!(cache, H::StaticDenseHamiltonian, ::Any, s::Real)
fill!(cache, 0.0)
for i = 1:length(H.m)
@inbounds axpy!(-1.0im * H.f[i](s), H.m[i], cache)
end
end
function update_vectorized_cache!(cache, H::StaticDenseHamiltonian, ::Any, s::Real)
hmat = H(s)
iden = one(hmat)
cache .= 1.0im * (transpose(hmat) ⊗ iden - iden ⊗ hmat)
end
function (h::StaticDenseHamiltonian)(du, u::AbstractMatrix, ::Any, s::Real)
fill!(du, 0.0 + 0.0im)
H = h(s)
gemm!('N', 'N', -1.0im, H, u, 1.0 + 0.0im, du)
gemm!('N', 'N', 1.0im, u, H, 1.0 + 0.0im, du)
end
function Base.convert(S::Type{T}, H::StaticDenseHamiltonian{M}) where {T<:Complex,M}
mats = [convert.(S, x) for x in H.m]
cache = similar(H.u_cache, complex{M})
StaticDenseHamiltonian{eltype(mats[1])}(H.f, mats, cache, size(H))
end
function Base.convert(S::Type{T}, H::StaticDenseHamiltonian{M}) where {T<:Real,M}
f_val = sum((x) -> x(0.0), H.f)
if !(typeof(f_val) <: Real)
throw(TypeError(:convert, "H.f", Real, typeof(f_val)))
end
mats = [convert.(S, x) for x in H.m]
cache = similar(H.u_cache, real(M))
StaticDenseHamiltonian{eltype(mats[1])}(H.f, mats, cache, size(H))
end
function Base.copy(H::StaticDenseHamiltonian)
mats = copy(H.m)
StaticDenseHamiltonian{eltype(mats[1])}(H.f, mats, copy(H.u_cache), size(H))
end
function rotate(H::StaticDenseHamiltonian, v)
hsize = size(H)
mats = [SMatrix{hsize[1],hsize[2]}(v' * m * v) for m in H.m]
StaticDenseHamiltonian(H.f, mats, unit=:ħ, dimensionless_time=isdimensionlesstime(H))
end
function haml_eigs_default(H::StaticDenseHamiltonian, t, lvl::Integer)
w, v = eigen(Hermitian(H(t)))
w[1:lvl], v[:, 1:lvl]
end
"""
$(TYPEDEF)
Defines a time independent Hamiltonian object using static arrays.
# Fields
$(FIELDS)
"""
struct ConstantStaticDenseHamiltonian{T<:Number} <: AbstractDenseHamiltonian{T}
"Internal cache"
u_cache::AbstractMatrix{T}
"Size"
size::Tuple
end
function ConstantStaticDenseHamiltonian(mat; unit=:h)
mat = unit_scale(unit) * mat
ConstantStaticDenseHamiltonian{eltype(mat)}(mat, size(mat))
end
isconstant(::ConstantStaticDenseHamiltonian) = true
issparse(::ConstantStaticDenseHamiltonian) = false
function (h::ConstantStaticDenseHamiltonian)(::Real)
h.u_cache
end
function update_cache!(cache, H::ConstantStaticDenseHamiltonian, ::Any, ::Real)
cache .= -1.0im * H.u_cache
end
function update_vectorized_cache!(cache, H::ConstantStaticDenseHamiltonian, p, ::Real)
hmat = H.u_cache
iden = one(hmat)
cache .= 1.0im * (transpose(hmat) ⊗ iden - iden ⊗ hmat)
end
function (h::ConstantStaticDenseHamiltonian)(du, u::AbstractMatrix, ::Any, ::Real)
fill!(du, 0.0 + 0.0im)
H = h.u_cache
du .= -1.0im * (H * u - u * H)
end
function Base.convert(S::Type{T}, H::ConstantStaticDenseHamiltonian{M}) where {T<:Number,M}
mat = convert.(S, H.u_cache)
ConstantStaticDenseHamiltonian{eltype(mat)}(mat, size(H))
end
function Base.copy(H::ConstantStaticDenseHamiltonian)
mat = copy(H.u_cache)
ConstantStaticDenseHamiltonian{eltype(mat[1])}(mat, size(H))
end
function rotate(H::ConstantStaticDenseHamiltonian, v)
hsize = size(H)
mat = SMatrix{hsize[1],hsize[2]}(v' * H.u_cache * v)
ConstantStaticDenseHamiltonian(mat, hsize)
end
function haml_eigs_default(H::ConstantStaticDenseHamiltonian, t, lvl::Integer)
w, v = eigen(Hermitian(H(t)))
w[1:lvl], v[:, 1:lvl]
end | OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 3737 | # The content in this file is for test purpose only
"""
cpvagk(f, t, a, b, tol=256*eps())
Calculate the Cauchy principle value integration of the form ``𝒫∫_a^b f(x)/(x-t) dx``. The algorithm is adapted from [P. Keller, 02.01.2015](https://www.sciencedirect.com/science/article/pii/S0377042715004422)
"""
function cpvagk(f, t, a, b, tol=256*eps())
# Adapted from P. Keller, 02.01.2015
# CPVAGK(F,T,A,B,TOL) = CPVInt[a,b]F(x)/(x-T)dx.
#
# Based on QUADGK - the built-in Matlab adaptive integrator.
#
# Q = CPVAGK(F,T,A,B,TOL) approximates the above integral
# and tries to make the absolute error less than TOL.
# The default value of TOL is 256*eps.
#
# [Q,ERR] = CPVAGK(F,T,A,B,TOL) also provides the estimate of
# the absolute error. We suggest to always check the value of ERR.
# In case a QUADGK warning message appear, the approximate error bound
# provided inside the warning message should be ingnored!
# Consider only the value of ERR as error estimate.
#
# Test (research) version.
# P. Keller, 02.01.2015
if b<=t || t <= a
error("CPVAGK: Invalid arguments")
end
s = min(1.0, t-a, b-t); #computing the split point(s)
a = float(a)
b = float(b)
s = float(s)
t = float(t)
epsab = epsf = eps()
ft = f(t)
abt = 0.25*(abs(a) + abs(b) + 2*abs(t))
tcond = 1 + 0.5*(max(1,abt)-1)^2/max(1,abt);
#Estimating possible accuracy loss...
# a) related to rounding t and interval endpoints:
v = [a+2*epsf*abs(a), b-2*epsf*abs(b)];
fd = max(max(1.0, abs(t)), abs.(v)...).*abs.(f.(v))./[t-a,b-t];
erra = 0.75*epsab * sum(fd);
# b) related to the magnitude of the 1st and 2nd derivative:
sts = 255*s/256 # divided differences five steps...
sth = min(sts, min(2,(b-a))./[82,70,32,22]...)
ste = min(sts, exp(3/8*log(max(1,abs(t))*epsf)))
dfs = max(abs(f(t+sts)-ft), abs(f(t-sts)-ft)) / sts # <|f'[t-s,t+s]|
dfh = max(abs(f(t+sth)-ft), abs(f(t-sth)-ft))./ sth./ [3/2, 7/4, 2, 3]
df1 = abs(f(t+ste^2)-f(t-ste^2))/ste^2; # ~~ 2*|f'(t)|
df2 = sqrt(abs(f(t+ste)-2*ft+f(t-ste)))/ste; # ~~ |f"(t)|^(1/2)
errb1 = 17*tcond*epsf*(max(df1,dfh...) + abs(ft));
errb2 = 10*tcond*epsf*df2;
# c) related to the distance from interval endpoints:
errc = 0.75*epsf*abt*abs(ft)/min(b-t,t-a)
# Setting (safe) error tolerances, absolute and relative...
atol = max(tol/2, 8*dfs*epsf, erra, errb1, errb2, errc);
rtol = max(100*epsf, errb2, errc/max(sqrt(epsf), abs(ft)));
# Setting parameters for quadgk()
params = (atol=atol, rtol=rtol, maxevals=163840)
# Computing the integrals...
q1 = 0
e1 = 0
q2 = 0
e2 = 0
if s < t-a
q1, e1 = quadgk((x)->(f(x)-ft)./(x-t), a, t-s; params...)
end
if s < b-t
q2, e2 = quadgk((x)->(f(x)-ft)./(x-t), t+s, b; params...)
end
q3, e3 = quadgk((x)->(f(t+x)-f(t-x))./x, 0, s; params...)
# Computing the final result and error estimation...
q = q1 + q2 + q3 + ft*log((b-t)/(t-a))
err = e1 + e2 + e3 + max(erra,errc) + errb1 + errb2 + 10*tcond*epsf*max(abs.([q1,q2,q3,q])...)
q, err
end
"""
$(SIGNATURES)
Calculate the Lamb shift of spectrum `γ`. `atol` is the absolute tolerance for Cauchy principal value integral.
"""
function lambshift_cpvagk(w, γ; atol = 1e-7)
g(x) = γ(x) / (x - w)
cpv, cperr = cpvagk(γ, w, w - 1.0, w + 1.0)
negv, negerr = quadgk(g, -Inf, w - 1.0)
posv, poserr = quadgk(g, w + 1.0, Inf)
v = cpv + negv + posv
err = cperr + negerr + poserr
if (err > atol) || (isnan(err))
@warn "Absolute error of integration is larger than the tolerance."
end
-v / 2 / pi
end
| OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 620 | """
$(TYPEDEF)
A tag for inplace unitary function. `func(cache, t)` is the actual inplace update function.
# Fields
$(FIELDS)
"""
struct InplaceUnitary
"inplace update function"
func::Any
end
isinplace(::Any) = false
isinplace(::InplaceUnitary) = true
"""
$(SIGNATURES)
Calculate the Lamb shift of spectrum `γ` at angular frequency `ω`. All keyword arguments of `quadgk` function is supported.
"""
function lambshift(ω, γ; kwargs...)
integrand = (x)->(γ(ω+x) - γ(ω-x))/x
integral, = quadgk(integrand, 0, Inf; kwargs...)
# TODO: do something with the error information
-integral / 2 / π
end
| OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 2020 | import HCubature: hcubature
"""
$(TYPEDEF)
`CGLiouvillian` defines the Liouvillian operator corresponding to the CGME.
# Fields
$(FIELDS)
"""
struct CGLiouvillian <: AbstractLiouvillian
"CGME kernels"
kernels::Any
"close system unitary"
unitary::Any
"absolute error tolerance for integration"
atol::Float64
"relative error tolerance for integration"
rtol::Float64
"cache matrix for inplace unitary"
Ut1::Union{Matrix,MMatrix}
"cache matrix for inplace unitary"
Ut2::Union{Matrix,MMatrix}
"cache matrix for integration"
Ut::Union{Matrix,MMatrix}
end
function CGLiouvillian(kernels, U, atol, rtol)
m_size = size(kernels[1][2])
Λ = m_size[1] <= 10 ? zeros(MMatrix{m_size[1],m_size[2],ComplexF64}) :
zeros(ComplexF64, m_size[1], m_size[2])
unitary = isinplace(U) ? U.func : (cache, t) -> cache .= U(t)
CGLiouvillian(kernels, unitary, atol, rtol, similar(Λ), similar(Λ), Λ)
end
function (CG::CGLiouvillian)(du, u, p, t::Real)
tf = p.tf
for (inds, coupling, cfun, Ta) in CG.kernels
lower_bound = t < Ta / 2 ? [-t, -t] : [-Ta, -Ta] / 2
upper_bound = t + Ta / 2 > tf ? [tf, tf] .- t : [Ta, Ta] / 2
for (i, j) in inds
function integrand(x)
Ut = CG.Ut
Ut1 = CG.Ut1
Ut2 = CG.Ut2
CG.unitary(Ut, t)
Ut2 .= CG.unitary(Ut2, t + x[2]) * Ut'
Ut1 .= CG.unitary(Ut1, t + x[1]) * Ut'
At1 = Ut .= Ut1' * coupling[i](p(t + x[1])) * Ut1
At2 = Ut1 .= Ut2' * coupling[j](p(t + x[2])) * Ut2
cfun[i, j](t + x[2], t + x[1]) *
(At1 * u * At2 - 0.5 * (At2 * At1 * u + u * At2 * At1)) / Ta
end
cg_res, err = hcubature(
integrand,
lower_bound,
upper_bound,
rtol=CG.rtol,
atol=CG.atol,
)
axpy!(1.0, cg_res, du)
end
end
end
| OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 11881 | import StatsBase: sample, Weights
"""
$(TYPEDEF)
`DaviesGenerator` defines a Davies generator.
# Fields
$(FIELDS)
"""
struct DaviesGenerator <: AbstractLiouvillian
"System bath coupling operators"
coupling::AbstractCouplings
"Spectrum density"
γ::Any
"Lambshift spectral density"
S::Any
end
function (D::DaviesGenerator)(du, ρ, gap_idx::GapIndices, v, s::Real)
l = size(du, 1)
# pre-rotate all the system bath coupling operators into the energy eigenbasis
cs = [v' * c * v for c in D.coupling(s)]
Hₗₛ = spzeros(ComplexF64, l, l)
for (w, a, b) in positive_gap_indices(gap_idx)
g₊ = D.γ(w)
g₋ = D.γ(-w)
for c in cs
L₊ = sparse(a, b, c[a+(b.-1)*l], l, l)
L₋ = sparse(b, a, c[b+(a.-1)*l], l, l)
LL₊ = L₊' * L₊
LL₋ = L₋' * L₋
du .+= g₊ * (L₊ * ρ * L₊' - 0.5 * (LL₊ * ρ + ρ * LL₊)) + g₋ * (L₋ * ρ * L₋' - 0.5 * (LL₋ * ρ + ρ * LL₋))
Hₗₛ += D.S(w) * LL₊ + D.S(-w) * LL₋
end
end
g0 = D.γ(0)
a, b = zero_gap_indices(gap_idx)
for c in cs
L = sparse(a, b, c[a+(b.-1)*l], l, l)
LL = L' * L
du .+= g0 * (L * ρ * L' - 0.5 * (LL * ρ + ρ * LL))
Hₗₛ += D.S(0) * LL
end
du .-= 1.0im * (Hₗₛ * ρ - ρ * Hₗₛ)
end
function (D::DaviesGenerator)(du, ρ, gap_idx::GapIndices, s::Real)
l = size(du, 1)
cs = [c for c in D.coupling(s)]
Hₗₛ = spzeros(ComplexF64, l, l)
for (w, a, b) in positive_gap_indices(gap_idx)
g₊ = D.γ(w)
g₋ = D.γ(-w)
for c in cs
L₊ = sparse(a, b, c[a+(b.-1)*l], l, l)
L₋ = sparse(b, a, c[b+(a.-1)*l], l, l)
LL₊ = L₊' * L₊
LL₋ = L₋' * L₋
du .+= g₊ * (L₊ * ρ * L₊' - 0.5 * (LL₊ * ρ + ρ * LL₊)) + g₋ * (L₋ * ρ * L₋' - 0.5 * (LL₋ * ρ + ρ * LL₋))
Hₗₛ += D.S(w) * LL₊ + D.S(-w) * LL₋
end
end
g0 = D.γ(0)
a, b = zero_gap_indices(gap_idx)
for c in cs
L = sparse(a, b, c[a+(b.-1)*l], l, l)
LL = L' * L
du .+= g0 * (L * ρ * L' - 0.5 * (LL * ρ + ρ * LL))
Hₗₛ += D.S(0) * LL
end
du .-= 1.0im * (Hₗₛ * ρ - ρ * Hₗₛ)
end
function update_cache!(cache, D::DaviesGenerator, gap_idx::GapIndices, v, s::Real)
l = size(cache, 1)
cs = [v' * c * v for c in D.coupling(s)]
H_eff = spzeros(ComplexF64, l, l)
for (w, a, b) in positive_gap_indices(gap_idx)
g₊ = D.γ(w)
g₋ = D.γ(-w)
for c in cs
L₊ = sparse(a, b, c[a+(b.-1)*l], l, l)
L₋ = sparse(b, a, c[b+(a.-1)*l], l, l)
LL₊ = L₊' * L₊
LL₋ = L₋' * L₋
H_eff -= (1.0im * D.S(w) + 0.5 * g₊) * LL₊ + (1.0im * D.S(-w) + 0.5 * g₋) * LL₋
end
end
g0 = D.γ(0)
a, b = zero_gap_indices(gap_idx)
for c in cs
L = sparse(a, b, c[a+(b.-1)*l], l, l)
LL = L' * L
H_eff -= (1.0im * D.S(0) + 0.5 * g0) * LL
end
cache .+= H_eff
end
# Constant Davies Generator types
struct ConstDaviesGenerator <: AbstractLiouvillian
"Precomputed Lindblad operators"
Linds::Vector
"Precomputed Lambshift Hamiltonian"
Hₗₛ::AbstractMatrix
"Precomputed DiffEq operator"
A::AbstractMatrix
end
function (D::ConstDaviesGenerator)(du, ρ, ::Any, ::Any)
for L in D.Linds
LL = L' * L
du .+= L * ρ * L' - 0.5 * (LL * ρ + ρ * LL)
end
du .-= 1.0im * (D.Hₗₛ * ρ - ρ * D.Hₗₛ)
end
update_cache!(cache, D::ConstDaviesGenerator, ::Any, ::Real) = cache .+= D.A
struct ConstHDaviesGenerator <: AbstractLiouvillian
"GapIndices for AME"
gap_idx
"System bath coupling operators"
coupling::AbstractCouplings
"Spectrum density"
γ::Any
"Lambshift spectral density"
S::Any
end
function (D::ConstHDaviesGenerator)(du, ρ, ::Any, s::Real)
l = size(du, 1)
cs = [c for c in D.coupling(s)]
Hₗₛ = spzeros(ComplexF64, l, l)
for (w, a, b) in positive_gap_indices(D.gap_idx)
g₊ = D.γ(w)
g₋ = D.γ(-w)
for c in cs
L₊ = sparse(a, b, c[a+(b.-1)*l], l, l)
L₋ = sparse(b, a, c[b+(a.-1)*l], l, l)
LL₊ = L₊' * L₊
LL₋ = L₋' * L₋
du .+= g₊ * (L₊ * ρ * L₊' - 0.5 * (LL₊ * ρ + ρ * LL₊)) + g₋ * (L₋ * ρ * L₋' - 0.5 * (LL₋ * ρ + ρ * LL₋))
Hₗₛ += D.S(w) * LL₊ + D.S(-w) * LL₋
end
end
g0 = D.γ(0)
a, b = zero_gap_indices(D.gap_idx)
for c in cs
L = sparse(a, b, c[a+(b.-1)*l], l, l)
LL = L' * L
du .+= g0 * (L * ρ * L' - 0.5 * (LL * ρ + ρ * LL))
Hₗₛ += D.S(0) * LL
end
du .-= 1.0im * (Hₗₛ * ρ - ρ * Hₗₛ)
end
"""
$(SIGNATURES)
Build the constant Davies generator types if the corresponding Hamiltonian is constant.
...
# Arguments
- `coupling::AbstractCouplings`: system bath coupling operators.
- `gap_idx::GapIndices`: `GapIndices` object generated from the constant Hamiltonian.
- `γfun`: bath spectral function.
- `Sfun`: function for the lambshift.
...
"""
function build_const_davies(couplings::AbstractCouplings, gap_idx::GapIndices, γfun, Sfun)
if isconstant(couplings)
l = get_lvl(gap_idx)
res = []
Hₗₛ = spzeros(ComplexF64, l, l)
A = spzeros(ComplexF64, l, l)
for (w, a, b) in OpenQuantumBase.positive_gap_indices(gap_idx)
g₊ = γfun(w) |> sqrt
g₋ = γfun(-w) |> sqrt
S₊ = Sfun(w)
S₋ = Sfun(-w)
for c in couplings(0)
L₊ = sparse(a, b, c[a + (b .- 1)*l], l, l)
L₋ = sparse(b, a, c[b + (a .- 1)*l], l, l)
LL₊ = L₊'*L₊
LL₋ = L₋'*L₋
push!(res, g₊*L₊)
push!(res, g₋*L₋)
Hₗₛ += S₊*LL₊ + S₋*LL₋
A -= 0.5 * g₊^2 * LL₊ + 0.5 * g₋^2 * LL₋
end
end
g0 = γfun(0) |> sqrt
S0 = Sfun(0)
a, b = OpenQuantumBase.zero_gap_indices(gap_idx)
for c in couplings(0)
L = sparse(a, b, c[a + (b .- 1)*l], l, l)
LL = L'*L
push!(res, g0*L)
Hₗₛ += S0*LL
A -= 0.5 * g0^2 * LL
end
A -= 1.0im * Hₗₛ
return ConstDaviesGenerator(res, Hₗₛ, A)
else
return ConstHDaviesGenerator(gap_idx, couplings, γfun, Sfun)
end
end
"""
$(TYPEDEF)
Defines correlated Davies generator
# Fields
$(FIELDS)
"""
struct CorrelatedDaviesGenerator <: AbstractLiouvillian
"System bath coupling operators"
coupling::AbstractCouplings
"Spectrum density"
γ::Any
"Lambshift spectral density"
S::Any
"Indices to iterate"
inds::Any
end
function (D::CorrelatedDaviesGenerator)(du, ρ, gap_idx::GapIndices, s::Real)
l = size(du, 1)
Hₗₛ = spzeros(ComplexF64, l, l)
for (w, a, b) in positive_gap_indices(gap_idx)
for (α, β) in D.inds
g₊ = D.γ[α, β](w)
g₋ = D.γ[α, β](-w)
Aα = D.coupling[α](s)
Aβ = D.coupling[β](s)
L₊ = sparse(a, b, Aβ[a+(b.-1)*l], l, l)
L₊d = sparse(a, b, Aα[a+(b.-1)*l], l, l)'
L₋ = sparse(b, a, Aβ[b+(a.-1)*l], l, l)
L₋d = sparse(b, a, Aα[b+(a.-1)*l], l, l)'
LL₊ = L₊d * L₊
LL₋ = L₋d * L₋
du .+= g₊ * (L₊ * ρ * L₊d - 0.5 * (LL₊ * ρ + ρ * LL₊)) + g₋ * (L₋ * ρ * L₋d - 0.5 * (LL₋ * ρ + ρ * LL₋))
Hₗₛ += D.S[α, β](w) * LL₊ + D.S[α, β](-w) * LL₋
end
end
a, b = zero_gap_indices(gap_idx)
for (α, β) in D.inds
g0 = D.γ[α, β](0)
Aα = D.coupling[α](s)
Aβ = D.coupling[β](s)
L = sparse(a, b, Aβ[a+(b.-1)*l], l, l)
Ld = sparse(a, b, Aα[a+(b.-1)*l], l, l)'
LL = Ld * L
du .+= g0 * (L * ρ * Ld - 0.5 * (LL * ρ + ρ * LL))
Hₗₛ += D.S[α, β](0) * LL
end
du .-= 1.0im * (Hₗₛ * ρ - ρ * Hₗₛ)
end
function (D::CorrelatedDaviesGenerator)(du, ρ, gap_idx::GapIndices, v, s::Real)
l = size(du, 1)
Hₗₛ = spzeros(ComplexF64, l, l)
for (w, a, b) in positive_gap_indices(gap_idx)
for (α, β) in D.inds
g₊ = D.γ[α, β](w)
g₋ = D.γ[α, β](-w)
Aα = v' * D.coupling[α](s) * v
Aβ = V' * D.coupling[β](s) * v
L₊ = sparse(a, b, Aβ[a+(b.-1)*l], l, l)
L₊d = sparse(a, b, Aα[a+(b.-1)*l], l, l)'
L₋ = sparse(b, a, Aβ[b+(a.-1)*l], l, l)
L₋d = sparse(b, a, Aα[b+(a.-1)*l], l, l)'
LL₊ = L₊d * L₊
LL₋ = L₋d * L₋
du .+= g₊ * (L₊ * ρ * L₊d - 0.5 * (LL₊ * ρ + ρ * LL₊)) + g₋ * (L₋ * ρ * L₋d - 0.5 * (LL₋ * ρ + ρ * LL₋))
Hₗₛ += D.S[α, β](w) * LL₊ + D.S[α, β](-w) * LL₋
end
end
a, b = zero_gap_indices(gap_idx)
for (α, β) in D.inds
g0 = D.γ[α, β](0)
Aα = v' * D.coupling[α](s) * v
Aβ = v' * D.coupling[β](s) * v
L = sparse(a, b, Aβ[a+(b.-1)*l], l, l)
Ld = sparse(a, b, Aα[a+(b.-1)*l], l, l)'
LL = Ld * L
du .+= g0 * (L * ρ * Ld - 0.5 * (LL * ρ + ρ * LL))
Hₗₛ += D.S[α, β](0) * LL
end
du .-= 1.0im * (Hₗₛ * ρ - ρ * Hₗₛ)
end
struct ConstHCorrelatedDaviesGenerator <:AbstractLiouvillian
"GapIndices for AME"
gap_idx
"System bath coupling operators"
coupling::AbstractCouplings
"Spectrum density"
γ::Any
"Lambshift spectral density"
S::Any
"Indices to iterate"
inds::Any
end
function (D::ConstHCorrelatedDaviesGenerator)(du, ρ, ::Any, s::Real)
l = size(du, 1)
Hₗₛ = spzeros(ComplexF64, l, l)
for (w, a, b) in positive_gap_indices(D.gap_idx)
for (α, β) in D.inds
g₊ = D.γ[α, β](w)
g₋ = D.γ[α, β](-w)
Aα = D.coupling[α](s)
Aβ = D.coupling[β](s)
L₊ = sparse(a, b, Aβ[a+(b.-1)*l], l, l)
L₊d = sparse(a, b, Aα[a+(b.-1)*l], l, l)'
L₋ = sparse(b, a, Aβ[b+(a.-1)*l], l, l)
L₋d = sparse(b, a, Aα[b+(a.-1)*l], l, l)'
LL₊ = L₊d * L₊
LL₋ = L₋d * L₋
du .+= g₊ * (L₊ * ρ * L₊d - 0.5 * (LL₊ * ρ + ρ * LL₊)) + g₋ * (L₋ * ρ * L₋d - 0.5 * (LL₋ * ρ + ρ * LL₋))
Hₗₛ += D.S[α, β](w) * LL₊ + D.S[α, β](-w) * LL₋
end
end
a, b = zero_gap_indices(D.gap_idx)
for (α, β) in D.inds
g0 = D.γ[α, β](0)
Aα = D.coupling[α](s)
Aβ = D.coupling[β](s)
L = sparse(a, b, Aβ[a+(b.-1)*l], l, l)
Ld = sparse(a, b, Aα[a+(b.-1)*l], l, l)'
LL = Ld * L
du .+= g0 * (L * ρ * Ld - 0.5 * (LL * ρ + ρ * LL))
Hₗₛ += D.S[α, β](0) * LL
end
du .-= 1.0im * (Hₗₛ * ρ - ρ * Hₗₛ)
end
function build_const_correlated_davies(couplings, gap_idx, γfun, Sfun, inds)
ConstHCorrelatedDaviesGenerator(gap_idx, couplings, γfun, Sfun, inds)
end
"""
$(TYPEDEF)
Defines the one-sided AME Liouvillian operator.
# Fields
$(FIELDS)
"""
struct OneSidedAMELiouvillian <: AbstractLiouvillian
"System bath coupling operators"
coupling::AbstractCouplings
"Spectrum density"
γ::Any
"Lambshift spectral density"
S::Any
"Indices to iterate"
inds::Any
end
function (A::OneSidedAMELiouvillian)(dρ, ρ, g_idx::GapIndices, v, s::Real)
ω_ba = gap_matrix(g_idx)
for (α, β) in A.inds
γm = A.γ[α, β].(ω_ba)
sm = A.S[α, β].(ω_ba)
Aα = v' * A.coupling[α](s) * v
Λ = (0.5 * γm + 1.0im * sm) .* Aα
Aβ = v' * A.coupling[β](s) * v
𝐊₂ = Aβ * Λ * ρ - Λ * ρ * Aβ
𝐊₂ = 𝐊₂ + 𝐊₂'
axpy!(-1.0, 𝐊₂, dρ)
end
end
function (A::OneSidedAMELiouvillian)(du, u, g_idx::GapIndices, s::Real)
ω_ba = gap_matrix(g_idx)
for (α, β) in A.inds
γm = A.γ[α, β].(ω_ba)
sm = A.S[α, β].(ω_ba)
Aα = A.coupling[α](s)
Aβ = A.coupling[β](s)
Λ = (0.5 * γm + 1.0im * sm) .* Aα
𝐊₂ = Aβ * Λ * u - Λ * u * Aβ
𝐊₂ = 𝐊₂ + 𝐊₂'
axpy!(-1.0, 𝐊₂, du)
end
end | OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 4848 | """
$(TYPEDEF)
Defines a total Liouvillian to feed to the solver using the `DiffEqOperator`
interface. It contains both closed-system and open-system Liouvillians.
# Fields
$(FIELDS)
"""
struct DiffEqLiouvillian{diagonalization,adiabatic_frame}
"Hamiltonian"
H::AbstractHamiltonian
"Open system in eigenbasis"
opensys_eig::Vector{AbstractLiouvillian}
"Open system in normal basis"
opensys::Vector{AbstractLiouvillian}
"Levels to truncate"
lvl::Integer
"Number of digits to round for zero gap value"
digits::Integer
"Number of significant digits to round for gaps"
sigdigits::Integer
"Internal cache"
u_cache::AbstractMatrix
end
"""
$(SIGNATURES)
The constructor of the `DiffEqLiouvillian` type. `opensys_eig` is a list of
open-system Liouvillians that which require diagonalization of the Hamiltonian.
`opensys` is a list of open-system Liouvillians which does not require
diagonalization of the Hamiltonian. `lvl` is the truncation levels of the energy
eigenbasis if the method supports the truncation.
"""
function DiffEqLiouvillian(
H::AbstractHamiltonian,
opensys_eig,
opensys,
lvl;
digits::Integer=8,
sigdigits::Integer=8
)
# for DenseHamiltonian smaller than 10×10, do not truncate
if !(typeof(H) <: AbstractSparseHamiltonian) && (size(H, 1) <= 10)
lvl = size(H, 1)
u_cache = similar(get_cache(H))
else
lvl = size(H, 1) < lvl ? size(H, 1) : lvl
# for SparseHamiltonian, we will create dense matrix cache
# for the truncated subspace
u_cache = Matrix{eltype(H)}(undef, lvl, lvl)
end
diagonalization = isempty(opensys_eig) ? false : true
adiabatic_frame = typeof(H) <: AdiabaticFrameHamiltonian
DiffEqLiouvillian{diagonalization,adiabatic_frame}(H, opensys_eig, opensys, lvl, digits, sigdigits, u_cache)
end
# TODO: merge `build_diffeq_liouvillian` with `DiffEqLiouvillian`
function build_diffeq_liouvillian(H, opensys_eig, opensys, lvl; digits::Integer=8, sigdigits::Integer=8)
if isconstant(H)
# spzeros((a, b)) is not supported in Julia 1.6
DiffEqLiouvillian{false,true}(H, opensys_eig, opensys, lvl, digits, sigdigits, spzeros(size(H, 1), size(H, 2)))
else
DiffEqLiouvillian(H, opensys_eig, opensys, lvl, digits=digits, sigdigits=sigdigits)
end
end
function (Op::DiffEqLiouvillian{false,false})(du, u, p, t)
s = p(t)
Op.H(du, u, p, s)
for lv in Op.opensys
lv(du, u, p, t)
end
end
function update_cache!(cache, Op::DiffEqLiouvillian{false,false}, p, t)
update_cache!(cache, Op.H, p, p(t))
for lv in Op.opensys
update_cache!(cache, lv, p, t)
end
end
function update_vectorized_cache!(cache, Op::DiffEqLiouvillian{false,false}, p, t)
update_vectorized_cache!(cache, Op.H, p, p(t))
for lv in Op.opensys
update_vectorized_cache!(cache, lv, p, t)
end
end
function (Op::DiffEqLiouvillian{true,false})(du, u, p, t)
s = p(t)
w, v = haml_eigs(Op.H, s, Op.lvl)
# preprocessing the gaps and their indices
gap_ind = GapIndices(w, Op.digits, Op.sigdigits)
# rotate the density matrix into eigen basis
ρ = v' * u * v
H = Diagonal(w)
Op.u_cache .= -1.0im * (H * ρ - ρ * H)
for lv in Op.opensys_eig
lv(Op.u_cache, ρ, gap_ind, v, s)
end
# rotate the density matrix back into computational basis
mul!(du, v, Op.u_cache * v')
for lv in Op.opensys
lv(du, u, p, t)
end
end
function update_cache!(cache, Op::DiffEqLiouvillian{true,false}, p, t::Real)
s = p(t)
w, v = haml_eigs(Op.H, s, Op.lvl)
# preprocessing the gaps and their indices
gap_ind = GapIndices(w, Op.digits, Op.sigdigits)
# initialze the cache as Hamiltonian in eigenbasis
fill!(Op.u_cache, 0.0)
for i = 1:length(w)
@inbounds Op.u_cache[i, i] = -1.0im * w[i]
end
for lv in Op.opensys_eig
update_cache!(Op.u_cache, lv, gap_ind, v, s)
end
mul!(cache, v, Op.u_cache * v')
for lv in Op.opensys
update_cache!(cache, lv, p, t)
end
end
function (Op::DiffEqLiouvillian{true,true})(du, u, p, t)
# This function is for the Liouville operators in adiabatic frame
s = p(t)
H = Op.H(p.tf, s)
w = diag(H)
gap_ind = GapIndices(w, Op.digits, Op.sigdigits)
du .= -1.0im * (H * u - u * H)
for lv in Op.opensys_eig
lv(du, u, gap_ind, s)
end
end
function (Op::DiffEqLiouvillian{false,true})(du, u, p, t)
s = p(t)
H = Op.H(p.tf, s)
du .= -1.0im * (H * u - u * H)
for lv in Op.opensys
lv(du, u, nothing, s)
end
end
function update_cache!(cache, Op::DiffEqLiouvillian{false,true}, p, t::Real)
s = p(t)
H = Op.H(p.tf, s)
cache .= -1.0im * H
for lv in Op.opensys
update_cache!(cache, lv, p, t)
end
end
| OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 3146 | """
$(TYPEDEF)
`ULELiouvillian` defines the Liouvillian operator corresponding the universal Lindblad equation.
# Fields
$(FIELDS)
"""
struct ULELiouvillian <: AbstractLiouvillian
"""Lindblad kernels"""
kernels::Any
"""close system unitary"""
unitary::Any
"""absolute error tolerance for integration"""
atol::Float64
"""relative error tolerance for integration"""
rtol::Float64
"""cache matrix for inplace unitary"""
Ut::Union{Matrix,MMatrix}
"""cache matrix for inplace unitary"""
Uτ::Union{Matrix,MMatrix}
"""cache matrix for integration"""
Λ::Union{Matrix,MMatrix}
"""cache matrix for Lindblad operator"""
LO::Union{Matrix,MMatrix}
"""tf minus coarse grain time scale"""
Ta::Real
end
function ULELiouvillian(kernels, U, Ta, atol, rtol)
m_size = size(kernels[1][2])
Λ = m_size[1] <= 10 ? zeros(MMatrix{m_size[1],m_size[2],ComplexF64}) :
zeros(ComplexF64, m_size[1], m_size[2])
unitary = isinplace(U) ? U.func : (cache, t) -> cache .= U(t)
ULELiouvillian(kernels, unitary, atol, rtol, similar(Λ),
similar(Λ), similar(Λ), Λ, Ta)
end
function (L::ULELiouvillian)(du, u, p, t)
s = p(t)
LO = fill!(L.LO, 0.0)
for (inds, coupling, cfun) in L.kernels
for (i, j) in inds
function integrand(cache, x)
L.unitary(L.Ut, t)
L.unitary(L.Uτ, x)
L.Ut .= L.Ut * L.Uτ'
mul!(L.Uτ, coupling[j](s), L.Ut')
# The 5 arguments mul! will to produce NaN when it
# should not. May switch back to it when this is fixed.
# mul!(cache, L.Ut, L.Uτ, cfun[i, j](t, x), 0)
mul!(cache, L.Ut, L.Uτ)
lmul!(cfun[i, j](t, x), cache)
end
quadgk!(
integrand,
L.Λ,
max(0.0, t - L.Ta),
min(t + L.Ta, p.tf),
rtol=L.rtol,
atol=L.atol,
)
axpy!(1.0, L.Λ, LO)
end
end
du .+= LO * u * LO' - 0.5 * (LO' * LO * u + u * LO' * LO)
end
"""
$(TYPEDEF)
The Liouvillian operator in Lindblad form.
"""
struct LindbladLiouvillian <: AbstractLiouvillian
"1-d array of Lindblad rates"
γ::Vector
"1-d array of Lindblad operataors"
L::Vector
"size"
size::Tuple
end
Base.length(lind::LindbladLiouvillian) = length(lind.γ)
function LindbladLiouvillian(L::Vector{Lindblad})
if any((x) -> size(x) != size(L[1]), L)
throw(ArgumentError("All Lindblad operators should have the same size."))
end
LindbladLiouvillian([lind.γ for lind in L], [lind.L for lind in L], size(L[1]))
end
function (Lind::LindbladLiouvillian)(du, u, p, t)
s = p(t)
for (γfun, Lfun) in zip(Lind.γ, Lind.L)
L = Lfun(s)
γ = γfun(s)
du .+= γ * (L * u * L' - 0.5 * ( L' * L * u + u * L' * L))
end
end
function update_cache!(cache, lind::LindbladLiouvillian, p, t::Real)
s = p(t)
for (γfun, Lfun) in zip(lind.γ, lind.L)
L = Lfun(s)
γ = γfun(s)
cache .-= 0.5 * γ * L' * L
end
end | OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 3715 | """
$(TYPEDEF)
`GapIndices` contains unique gaps and the corresponding indices. The information is used to calculate the Davies generator.
# Fields
$(FIELDS)
"""
struct GapIndices
"Energies"
w::AbstractVector{Real}
"Unique positive gaps"
uniq_w::Vector{Real}
"a indices for the corresponding gaps in uniq_w"
uniq_a
"b indices for the corresponding gaps in uniq_w"
uniq_b
"a indices for the 0 gap"
a0::Vector{Int}
"b indices for the 0 gap"
b0::Vector{Int}
end
function GapIndices(w::AbstractVector{T}, digits::Integer, sigdigits::Integer) where T<:Real
l = length(w)
gaps = Float64[]
a_idx = Vector{Int}[]
b_idx = Vector{Int}[]
a0_idx = Int[]
b0_idx = Int[]
for i in 1:l-1
for j in i+1:l
gap = w[j] - w[i]
if abs(gap) ≤ 10.0^(-digits)
push!(a0_idx, i)
push!(b0_idx, j)
push!(a0_idx, j)
push!(b0_idx, i)
else
gap = round(gap, sigdigits=sigdigits)
idx = searchsortedfirst(gaps, gap)
if idx == length(gaps) + 1
push!(gaps, gap)
push!(a_idx, [i])
push!(b_idx, [j])
elseif gaps[idx] == gap
push!(a_idx[idx], i)
push!(b_idx[idx], j)
else
insert!(gaps, idx, gap)
insert!(a_idx, idx, [i])
insert!(b_idx, idx, [j])
end
end
end
end
append!(a0_idx, 1:l)
append!(b0_idx, 1:l)
GapIndices(w, gaps, a_idx, b_idx, a0_idx, b0_idx)
end
positive_gap_indices(G::GapIndices) = zip(G.uniq_w, G.uniq_a, G.uniq_b)
zero_gap_indices(G::GapIndices) = G.a0, G.b0
gap_matrix(G::GapIndices) = G.w' .- G.w
get_lvl(G::GapIndices) = length(G.w)
get_gaps_num(G::GapIndices) = 2*length(G.uniq_w)+1
# TODO: merge `build_correlation` function with `GapIndices`
"""
$(SIGNATURES)
Build `GapIndices` type from a list of energies.
...
# Arguments
- `w::AbstractVector`: energies of the Hamiltonian.
- `digits::Integer`: the number of digits to keep when checking if a gap is zero.
- `sigdigits::Interger`: the number of significant digits when rounding non-zero gaps for comparison.
- `cutoff_freq::Real`: gaps that are larger than the cutoff frequency (in the 2π frequency unit) are neglected.
- `truncate_lvl::Integer`: energy levels that are higher than the `truncate_lvl` are neglected.
...
"""
function build_gap_indices(w::AbstractVector, digits::Integer, sigdigits::Integer, cutoff_freq::Real, truncate_lvl::Integer)
l = truncate_lvl
gaps = Float64[]
a_idx = Vector{Int}[]
b_idx = Vector{Int}[]
a0_idx = Int[]
b0_idx = Int[]
for i in 1:l-1
for j in i+1:l
gap = w[j] - w[i]
if abs(gap) ≤ 10.0^(-digits)
push!(a0_idx, i)
push!(b0_idx, j)
push!(a0_idx, j)
push!(b0_idx, i)
elseif abs(gap) ≤ cutoff_freq
gap = round(gap, sigdigits=sigdigits)
idx = searchsortedfirst(gaps, gap)
if idx == length(gaps) + 1
push!(gaps, gap)
push!(a_idx, [i])
push!(b_idx, [j])
elseif gaps[idx] == gap
push!(a_idx[idx], i)
push!(b_idx[idx], j)
else
insert!(gaps, idx, gap)
insert!(a_idx, idx, [i])
insert!(b_idx, idx, [j])
end
end
end
end
append!(a0_idx, 1:l)
append!(b0_idx, 1:l)
GapIndices(w, gaps, a_idx, b_idx, a0_idx, b0_idx)
end | OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 3213 | RedfieldOperator(H, R) = OpenSysOp(H, R, size(H, 1))
"""
$(TYPEDEF)
Defines RedfieldLiouvillian.
# Fields
$(FIELDS)
"""
struct RedfieldLiouvillian <: AbstractLiouvillian
"Redfield kernels"
kernels::Any
"close system unitary"
unitary::Any
"absolute error tolerance for integration"
atol::Float64
"relative error tolerance for integration"
rtol::Float64
"cache matrix for inplace unitary"
Ut::Union{Matrix,MMatrix}
"cache matrix for inplace unitary"
Uτ::Union{Matrix,MMatrix}
"cache matrix for integration"
Λ::Union{Matrix,MMatrix}
"tf minus coarse grain time scale"
Ta::Real
end
function RedfieldLiouvillian(kernels, U, Ta, atol, rtol)
m_size = size(kernels[1][2])
Λ = m_size[1] <= 10 ? zeros(MMatrix{m_size[1],m_size[2],ComplexF64}) :
zeros(ComplexF64, m_size[1], m_size[2])
# if the unitary does not in place operation, assign a pesudo inplace
# function
unitary = isinplace(U) ? U.func : (cache, t) -> cache .= U(t)
RedfieldLiouvillian(kernels, unitary, atol, rtol, similar(Λ),
similar(Λ), Λ, Ta)
end
function (R::RedfieldLiouvillian)(du, u, p, t::Real)
s = p(t)
for (inds, coupling, cfun) in R.kernels
for (i, j) in inds
function integrand(cache, x)
R.unitary(R.Ut, t)
R.unitary(R.Uτ, x)
R.Ut .= R.Ut * R.Uτ'
mul!(R.Uτ, coupling[j](p(x)), R.Ut')
# The 5 arguments mul! will to produce NaN when it
# should not. May switch back to it when this is fixed.
# mul!(cache, R.Ut, R.Uτ, cfun[i, j](t, x), 0)
mul!(cache, R.Ut, R.Uτ)
lmul!(cfun[i, j](t, x), cache)
end
quadgk!(
integrand,
R.Λ,
max(0.0, t - R.Ta),
t,
rtol=R.rtol,
atol=R.atol,
)
SS = coupling[i](s)
𝐊₂ = SS * R.Λ * u - R.Λ * u * SS
𝐊₂ = 𝐊₂ + 𝐊₂'
axpy!(-1.0, 𝐊₂, du)
end
end
end
function update_vectorized_cache!(cache, R::RedfieldLiouvillian, p, t::Real)
iden = one(R.Λ)
s = p(t)
for (inds, coupling, cfun) in R.kernels
for (i, j) in inds
function integrand(cache, x)
R.unitary(R.Ut, t)
R.unitary(R.Uτ, x)
R.Ut .= R.Ut * R.Uτ'
mul!(R.Uτ, coupling[j](p(x)), R.Ut')
# The 5 arguments mul! will to produce NaN when it
# should not. May switch back to it when this is fixed.
# mul!(cache, R.Ut, R.Uτ, cfun[i, j](t, x), 0)
mul!(cache, R.Ut, R.Uτ)
lmul!(cfun[i, j](t, x), cache)
end
quadgk!(
integrand,
R.Λ,
max(0.0, t - R.Ta),
t,
rtol=R.rtol,
atol=R.atol,
)
SS = coupling[i](s)
SΛ = SS * R.Λ
cache .-= (
iden ⊗ SΛ + conj(SΛ) ⊗ iden - transpose(SS) ⊗ R.Λ -
conj(R.Λ) ⊗ SS
)
end
end
end | OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 1466 | """
$(TYPEDEF)
Defines a fluctuator ensemble controller
# Fields
$(FIELDS)
"""
mutable struct FluctuatorLiouvillian <: AbstractLiouvillian
"system-bath coupling operator"
coupling::Any
"waitting time distribution for every fluctuators"
dist::Any
"cache for each fluctuator value"
b0::Any
"index of the fluctuator to be flipped next"
next_idx::Any
"time interval for next flip event"
next_τ::Any
"noise value"
n::Any
end
function (F::FluctuatorLiouvillian)(du, u, p, t)
s = p(t)
H = sum(F.n .* F.coupling(s))
du .= -1.0im * (H*u - u*H)
# gemm! does not work for static matrix
#gemm!('N', 'N', -1.0im, H, u, 1.0 + 0.0im, du)
#gemm!('N', 'N', 1.0im, u, H, 1.0 + 0.0im, du)
end
function update_cache!(cache, F::FluctuatorLiouvillian, p, t)
s = p(t)
cache .+= -1.0im * sum(F.n .* F.coupling(s))
end
function update_vectorized_cache!(cache, F::FluctuatorLiouvillian, p, t)
s = p(t)
hmat = sum(F.n .* F.coupling(s))
iden = one(hmat)
cache .+= 1.0im * (transpose(hmat) ⊗ iden - iden ⊗ hmat)
end
function next_state!(F::FluctuatorLiouvillian)
next_τ, next_idx = findmin(rand(F.dist, size(F.b0, 2)))
F.next_τ = next_τ
F.next_idx = next_idx
F.b0[next_idx] *= -1
nothing
end
function reset!(F::FluctuatorLiouvillian, initializer)
F.b0 = abs.(F.b0) .* initializer(length(F.dist), size(F.b0, 2))
F.n = sum(F.b0, dims = 1)[:]
next_state!(F)
end
| OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 2602 |
function lind_jump(lind::LindbladLiouvillian, u, p, s::Real)
prob = Float64[]
ops = Vector{Matrix{ComplexF64}}()
for (γfun, Lfun) in zip(lind.γ, lind.L)
L = Lfun(s)
γ = γfun(s)
push!(prob, γ * norm(L * u)^2)
push!(ops, L)
end
sample(ops, Weights(prob))
end
function ame_jump(D::DaviesGenerator, u, gap_idx::GapIndices, v, s)
l = get_lvl(gap_idx)
prob_dim = get_gaps_num(gap_idx) * length(D.coupling)
prob = Array{Float64,1}(undef, prob_dim)
tag = Array{Tuple{Int,Vector{Int},Vector{Int},Float64},1}(undef, prob_dim)
idx = 1
ϕb = v' * u
σab = [v' * op * v for op in D.coupling(s)]
for (w, a, b) in positive_gap_indices(gap_idx)
g₊ = D.γ(w)
g₋ = D.γ(-w)
for i in eachindex(σab)
L₊ = sparse(a, b, σab[i][a+(b.-1)*l], l, l)
L₋ = sparse(b, a, σab[i][b+(a.-1)*l], l, l)
ϕ₊ = L₊ * ϕb
prob[idx] = g₊ * real(ϕ₊' * ϕ₊)
tag[idx] = (i, a, b, sqrt(g₊))
idx += 1
ϕ₋ = L₋ * ϕb
prob[idx] = g₋ * real(ϕ₋' * ϕ₋)
tag[idx] = (i, b, a, sqrt(g₋))
idx += 1
end
end
g0 = D.γ(0)
a, b = zero_gap_indices(gap_idx)
for i in eachindex(σab)
L = sparse(a, b, σab[i][a+(b.-1)*l], l, l)
ϕ = L * ϕb
prob[idx] = real(g0 * (ϕ' * ϕ))
tag[idx] = (i, a, b, sqrt(g0))
idx += 1
end
choice = sample(tag, Weights(prob))
L = choice[4] * sparse(choice[2], choice[3], σab[choice[1]][choice[2]+(choice[3].-1)*l], l, l)
v * L * v'
end
function ame_jump(D::ConstDaviesGenerator, u, ::Any, ::Any)
prob = [real(u' * L' * L * u) for L in D.Linds]
sample(D.Linds, Weights(prob))
end
# TODO: Better implemention of ame_jump function
"""
$(SIGNATURES)
Calculate the jump operator for the `DiffEqLiouvillian` at time `t`.
"""
function lindblad_jump(Op::DiffEqLiouvillian{true,false}, u, p, t::Real)
s = p(t)
w, v = haml_eigs(Op.H, s, Op.lvl)
gap_idx = GapIndices(w, Op.digits, Op.sigdigits)
resample([ame_jump(x, u, gap_idx, v, s) for x in Op.opensys_eig], u)
end
function lindblad_jump(Op::DiffEqLiouvillian{false,false}, u, p, t::Real)
s = p(t)
resample([lind_jump(x, u, p, s) for x in Op.opensys], u)
end
function lindblad_jump(Op::DiffEqLiouvillian{false,true}, u, p, t)
s = p(t)
resample([ame_jump(x, u, p, s) for x in Op.opensys], u)
end
function resample(Ls, u)
if length(Ls) == 1
Ls[1]
else
prob = [norm(L * u) for L in Ls]
sample(Ls, Weights(prob))
end
end | OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 6141 | """
$(TYPEDEF)
Object for a projected low level system. The projection is only valid for real Hamiltonians.
# Fields
$(FIELDS)
"""
struct ProjectedSystem
"Time grid (unitless) for projection"
s::AbstractArray{Float64,1}
"Energy values for different levels"
ev::Array{Vector{Float64},1}
"Geometric terms"
dθ::Array{Vector{Float64},1}
"Projected system bath interaction operators"
op::Array{Array{Matrix{Float64},1},1}
"Direction for the calculation"
direct::Symbol
"Number of leves to keep"
lvl::Int
"Energy eigenstates at the final time"
ref::Array{Float64,2}
end
function ProjectedSystem(s, lvl, direction, ref)
len = length(s)
ev = Vector{Vector{Float64}}()
dθ = Vector{Vector{Float64}}()
op = Vector{Vector{Matrix{Float64}}}()
ProjectedSystem(s, ev, dθ, op, direction, lvl, ref)
end
"""
$(SIGNATURES)
Project a Hamiltonian `H` to the lowest `lvl` level subspace. `s_axis` is the grid of (unitless) times on which the projection is calculated. `dH` is the derivative of the Hamiltonian. `coupling` is the system-bath interaction operator. Both of `coupling` and `dH` should be callable with annealing parameter `s`. `atol` and `rtol` are the absolute and relative error tolerance to distinguish two degenerate energy levels. `direction`, which can be either `:forward` or `backward`, controls whether to start the calcuation from the starting point or the end point. Currently this function only support real Hamiltonian with non-degenerate energies.
"""
function project_to_lowlevel(
H::AbstractHamiltonian{T},
s_axis::AbstractArray{S,1},
coupling,
dH;
lvl=2,
digits::Integer=6,
direction=:forward,
refs=zeros(0, 0),
) where {T <: Real,S <: Real}
if direction == :forward
_s_axis = s_axis
update_rule = push!
elseif direction == :backward
_s_axis = reverse(s_axis)
update_rule = pushfirst!
else
throw(ArgumentError("direction $direction is not supported."))
end
if isempty(refs)
w, v = haml_eigs(H, _s_axis[1], lvl)
# this is needed for StaticArrays
w = w[1:lvl]
v = v[:, 1:lvl]
d_inds = find_degenerate(w, digits=digits)
if !isempty(d_inds)
@warn "Degenerate energy levels detected at" _s_axis[1]
@warn "With" d_inds
end
refs = Array(v)
projected_system = ProjectedSystem(s_axis, lvl, direction, refs)
update_params!(projected_system, w, dH(_s_axis[1]), coupling(_s_axis[1]), update_rule, d_inds)
_s_axis = _s_axis[2:end]
else
projected_system = ProjectedSystem(s_axis, lvl, direction, refs)
end
for s in _s_axis
w, v = haml_eigs(H, s, lvl)
# this is needed for StaticArrays
w = w[1:lvl]
v = v[:, 1:lvl]
d_inds = find_degenerate(w, digits=digits)
if !isempty(d_inds)
@warn "Possible degenerate detected at" s
@warn "With levels" d_inds
end
update_refs!(projected_system, v, lvl, d_inds)
update_params!(projected_system, w, dH(s), coupling(s), update_rule, d_inds)
end
projected_system
end
function project_to_lowlevel(
H::AbstractHamiltonian{T},
s_axis::AbstractArray{S,1},
coupling,
dH;
lvl=2,
digits::Integer=6,
direction=:forward,
refs=zeros(0, 0),
) where {T <: Complex,S <: Real}
@warn "The projection method only works with real Hamitonians. Convert the complex Hamiltonian to real one."
H_real = convert(Real, H)
project_to_lowlevel(H_real, s_axis, coupling, dH, lvl=lvl, digits=digits, direction=direction, refs=refs)
end
function update_refs!(refs, v, lvl, d_inds)
# update reference vectors for degenerate subspace
if !isempty(d_inds)
for inds in d_inds
v[:, inds] = (v[:, inds] / refs[:, inds]) * v[:, inds]
end
flat_d_inds = reduce(vcat, d_inds)
else
flat_d_inds = []
end
# update reference vectors for non-degenerate states
for i in (k for k in 1:lvl if !(k in flat_d_inds))
#for i in 1:lvl
if v[:, i]' * refs[:, i] < 0
refs[:, i] = -v[:, i]
else
refs[:, i] = v[:, i]
end
end
end
update_refs!(sys::ProjectedSystem, v, lvl, d_inds) = update_refs!(sys.ref, v, lvl, d_inds)
function update_params!(sys::ProjectedSystem, w, dH, interaction, update_rule, d_inds)
# update energies
E = w / 2 / π
update_rule(sys.ev, E)
# update dθ
dθ = Vector{Float64}()
for j = 1:sys.lvl
for i = (j + 1):sys.lvl
if any((x) -> issubset([i,j], x), d_inds)
# for degenerate levels, push in 0 for now
push!(dθ, 0.0)
else
vi = @view sys.ref[:, i]
vj = @view sys.ref[:, j]
t = vi' * dH * vj / (E[j] - E[i])
push!(dθ, t)
end
end
end
update_rule(sys.dθ, dθ)
# update projected interaction operators
op = [sys.ref' * x * sys.ref for x in interaction]
update_rule(sys.op, op)
end
"""
get_dθ(sys::ProjectedSystem, i=1, j=2)
Get the geometric terms between i, j energy levels from `ProjectedSystem`.
"""
function get_dθ(sys::ProjectedSystem, i=1, j=2)
if j > i
idx = (2 * sys.lvl - i) * (i - 1) ÷ 2 + (j - i)
return [-x[idx] for x in sys.dθ]
elseif j < i
idx = (2 * sys.lvl - j) * (j - 1) ÷ 2 + (i - j)
return [x[idx] for x in sys.dθ]
else
error("No diagonal element for dθ.")
end
end
"""
function concatenate(args...)
Concatenate multiple `ProjectedSystem` objects into a single one. The arguments need to be in time order. The `ref` field of the new object will have the same value as the last input arguments.
"""
function concatenate(args...)
s = vcat([sys.s for sys in args]...)
ev = vcat([sys.ev for sys in args]...)
dθ = vcat([sys.dθ for sys in args]...)
op = vcat([sys.op for sys in args]...)
ref = args[end].ref
lvl = args[end].lvl
ProjectedSystem(s, ev, dθ, op, ref, lvl)
end | OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 1028 | using OpenQuantumBase, Test
struct T_OPENSYS <: OpenQuantumBase.AbstractLiouvillian end
struct T_COUPLINGS <: OpenQuantumBase.AbstractCouplings end
struct T_BATH <: OpenQuantumBase.AbstractBath end
H = DenseHamiltonian([(x) -> x], [σz])
u0 = PauliVec[1][1]
annealing = Annealing(H, u0)
@test annealing.H == H
@test annealing.annealing_parameter(10, 5) == 0.5
evo = Evolution(H, u0)
@test evo.H == H
@test evo.annealing_parameter(10, 5) == 0.5
ode_params = ODEParams(T_OPENSYS(), 10, (tf, t)->t / tf)
@test typeof(ode_params.L) == T_OPENSYS
@test ode_params.tf == 10
@test ode_params(5) == 0.5
coupling = ConstantCouplings(["Z"])
inter = Interaction(coupling, T_BATH())
inter_set = InteractionSet(inter, inter)
@test inter_set[1] == inter
annealing = Annealing(H, u0, coupling = coupling, bath = T_BATH())
@test annealing.interactions[1].coupling == coupling
@test typeof(annealing.interactions[1].bath) <: T_BATH
@test_throws ArgumentError Annealing(H, u0, coupling = coupling, bath = T_BATH(), interactions = inter_set)
| OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 282 | using OpenQuantumBase, LinearAlgebra, Test
num_qubits = 3
H₃ = random_ising(num_qubits)
@test size(H₃) == (2^num_qubits, 2^num_qubits)
@test isdiag(H₃)
@test !issparse(H₃)
@test issparse(random_ising(num_qubits, sp=true))
@test alt_sec_chain(1,0.5,1,3) == σz⊗σz⊗σi + 0.5*σi⊗σz⊗σz
| OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 1540 | using OpenQuantumBase, Test
x = range(0,stop=10,length=100)
y1 = Array(x) + 1.0im*Array(x)
y2 = (10.0+10.0im) .- Array(x)
inter_complex = construct_interpolations(x, y1)
inter_real = construct_interpolations(x, 10 .- collect(x))
gridded_2_range_inter = construct_interpolations(collect(x), y1)
test_x = range(0,stop=10,length=50)
exp_complex = collect(test_x) + 1.0im*collect(test_x)
res_complex = inter_complex.(test_x)
exp_real = (10.0) .- collect(test_x)
res_real = inter_real.(test_x)
@test res_complex ≈ exp_complex
@test res_real ≈ exp_real
@test isa(inter_complex(-1), Complex)
@test inter_complex(-1) ≈ 0
@test inter_real(11) ≈ 0
@test gridded_2_range_inter.(test_x) ≈ exp_complex
@test gradient(inter_real, 2.3) ≈ -1
@test gradient(inter_real, [0.13, 0.21]) ≈ [-1, -1]
# Test for multi-demension array
y_array = transpose(hcat(y1, y2))
y_array_itp = construct_interpolations(x, y_array, order=1)
@test exp_complex ≈ y_array_itp(1, test_x)
@test y_array_itp([1, 2], -1) ≈ [0, 0]
# Test for extrapolation
x = range(1.0,stop=10.0)
y = 10 .- collect(x)
y_c = x + 1.0im*y
eitp = construct_interpolations(x, Array(x); extrapolation="line")
@test isapprox(eitp(0.0),0.0, atol=1e-8)
eitp = construct_interpolations(x, y_c; extrapolation="line")
@test imag(eitp(0)) ≈ 10
eitp = construct_interpolations(x, y_c; extrapolation="flat")
@test eitp(0) == 1.0 + 9.0im
x = [1.0, 3, 4, 5, 6, 7, 8, 9, 10]
@test_logs (:warn,"The grid is not uniform. Using grided linear interpolation.") construct_interpolations(x, 10 .- x, method="bspline")
| OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 1716 | using OpenQuantumBase, Test
H = DenseHamiltonian([(s) -> 1 - s, (s) -> s], [σx, σz])
dH(s) = (-real(σx) + real(σz))
coupling = ConstantCouplings(["Z"])
@test_logs (:warn, "The projection method only works with real Hamitonians. Convert the complex Hamiltonian to real one.") project_to_lowlevel(H, [0.0, 0.5, 1.0], coupling, dH)
t_obj = project_to_lowlevel(H, [0.0, 0.5, 1.0], coupling, dH)
@test t_obj.ev ≈ [[-1.0, 1.0], [-0.707107, 0.707107], [-1.0, 1.0]] atol = 1e-6
@test t_obj.dθ ≈ [[0.5], [1.0], [0.5]]
@test get_dθ(t_obj) ≈ -[0.5, 1.0, 0.5]
@test t_obj.op ./ 2 ./ π ≈ [
[[0 -1.0; -1.0 0]],
[[-0.707107 -0.707107; -0.707107 0.707107]],
[[-1.0 0.0; 0.0 1.0]],
] atol = 1e-6
# An exact solvable example
H = DenseHamiltonian([(s)->cos(π*s/2), (s)->sin(π*s/2)], [σx, σz])
dH(s) = π*(-sin(π*s/2)*σx+cos(π*s/2)*σz)/2
coupling = ConstantCouplings(["Z"])
t_obj = project_to_lowlevel(H, 0:0.01:1, coupling, dH)
@test all((x)->isapprox(x, [-1, 1]), t_obj.ev)
@test all((x)->isapprox(x[1], π/4), t_obj.dθ)
t_obj = project_to_lowlevel(H, 0:0.01:1, coupling, dH, direction=:backward)
@test all((x)->isapprox(x, [-1, 1]), t_obj.ev)
@test all((x)->isapprox(x[1], π/4), t_obj.dθ)
#TODO: the following tests randomly fail on Julia 1.7, need to find a permanent fix
#= H = SparseHamiltonian(
[(s) -> 1 - s, (s) -> s],
[-standard_driver(2, sp=true) / 2, (spσz ⊗ spσi - 0.1spσz ⊗ spσz) / 2],
)
dH(s) = standard_driver(2, sp=true) / 2 + (spσz ⊗ spσi - 0.1spσz ⊗ spσz) / 2
coupling = ConstantCouplings(["ZI", "IZ"])
t_obj = project_to_lowlevel(H, [0.0, 0.5, 1.0], coupling, dH)
@test t_obj.ev ≈ [
[-1.0, 0.0],
[-0.6044361719689455, -0.10443617196894575],
[-0.55, -0.45],
] atol = 1e-6 =#
| OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 2265 | using SafeTestsets
@time begin
@time @safetestset "Base Utilities" begin
include("utilities/base_util.jl")
end
@time @safetestset "Math Utilities" begin
include("utilities/math_util.jl")
end
@time @safetestset "Multi-qubits Hamiltonian Construction" begin
include("utilities/multi_qubits_construction.jl")
end
@time @safetestset "Development tools" begin
include("dev_tools.jl")
end
@time @safetestset "Displays" begin
include("utilities/display.jl")
end
@time @safetestset "Interpolations" begin
include("interpolations.jl")
end
@time @safetestset "Dense Hamiltonian" begin
include("hamiltonian/dense_hamiltonian.jl")
end
@time @safetestset "Sparse Hamiltonian" begin
include("hamiltonian/sparse_hamiltonian.jl")
end
@time @safetestset "Constant Hamiltonian" begin
include("hamiltonian/constant_hamiltonian.jl")
end
@time @safetestset "Adiabatic Frame Hamiltonian" begin
include("hamiltonian/adiabatic_frame_hamiltonian.jl")
end
@time @safetestset "Interpolation Hamiltonian" begin
include("hamiltonian/interp_hamiltonian.jl")
end
@time @safetestset "Custom Hamiltonian" begin
include("hamiltonian/custom_hamiltonian.jl")
end
@time @safetestset "Coupling" begin
include("coupling_bath_interaction/coupling.jl")
end
@time @safetestset "Bath" begin
include("coupling_bath_interaction/bath.jl")
end
@time @safetestset "Interactions" begin
include("coupling_bath_interaction/interaction.jl")
end
@time @safetestset "Open System utilities" begin
include("opensys/util.jl")
end
@time @safetestset "Davies and AME" begin
include("opensys/davies.jl")
end
@time @safetestset "Redfield" begin
include("opensys/redfield.jl")
end
@time @safetestset "Lindblad" begin
include("opensys/lindblad.jl")
end
@time @safetestset "Stochastic" begin
include("opensys/stochastic.jl")
end
@time @safetestset "Annealing/Evolution" begin
include("annealing.jl")
end
@time @safetestset "Projections" begin
include("projection.jl")
end
end
| OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 3313 | using OpenQuantumBase, Test
# test suite for Ohmic bath
η = 1e-4
ωc = 8 * pi
β = 1 / 2.23
bath = OpenQuantumBase.OhmicBath(η, ωc, β)
cfun_test = OpenQuantumBase.build_correlation(bath)
γfun_test = OpenQuantumBase.build_spectrum(bath)
@test correlation(0.02, 0.01, bath) == correlation(0.01, bath)
@test cfun_test[1, 1](0.02, 0.01) == correlation(0.01, bath)
@test γ(0.0, bath) == 2 * pi * η / β
@test γfun_test(0.0) == 2 * pi * η / β
@test spectrum(0.0, bath) == 2 * pi * η / β
@test S(0.0, bath) ≈ OpenQuantumBase.lambshift_cpvagk(0.0, (x)->γ(x, bath)) atol = 1e-4 rtol = 1e-4
# the following test is kept as a consistency check
@test OpenQuantumBase.lambshift_cpvagk(0.0, (x)->γ(x, bath)) ≈ -0.0025132734115775254 rtol = 1e-4
η = 1e-4;fc = 4;T = 16
bath = Ohmic(η, fc, T)
τsb, err_τsb = τ_SB(bath)
τb, err_τb = τ_B(bath, 100, τsb)
@test τsb ≈ 284.61181493 atol = 1e-6 rtol = 1e-6
@test τb ≈ 0.07638653 atol = 1e-6 rtol = 1e-6
τc, err_τc = coarse_grain_timescale(bath, 100)
@test τc ≈ sqrt(τsb * τb / 5) atol = 1e-6 rtol = 1e-6
# test suite for CustomBath
cfun = (t) -> exp(-abs(t))
sfun = (ω) -> 2 / (1 + ω^2)
bath = CustomBath(correlation=cfun, spectrum=sfun)
@test correlation(1, bath) ≈ exp(-1)
@test correlation(2, 1, bath) == correlation(1, bath)
@test spectrum(0, bath) ≈ 2
@test γ(0, bath) ≈ 2
@test S(0, bath) == 0
# test suite for ensemble fluctuators
rtn = OpenQuantumBase.SymetricRTN(2.0, 2.0)
@test 4 * exp(-2 * 3) == correlation(3, rtn)
@test 2 * 4 * 2 / (4 + 4) == spectrum(2, rtn)
ensemble_rtn = EnsembleFluctuator([1.0, 2.0], [2.0, 1.0])
@test exp(-2 * 3) + 4 * exp(-3) == correlation(3, ensemble_rtn)
@test 2 * 2 / (9 + 4) + 2 * 4 / (9 + 1) == spectrum(3, ensemble_rtn)
# test suite for HybridOhmic bath
η = 0.01; W = 5; fc = 4; T = 12.5
bath = HybridOhmic(W, η, fc, T)
@test_broken spectrum(0.0, bath) ≈ 1.7045312175373621
@test S(0.0, bath) ≈ OpenQuantumBase.lambshift_cpvagk(0.0, (x)->γ(x, bath)) atol = 1e-4 rtol = 1e-4
# the following test is kept as a consistency check
@test_broken OpenQuantumBase.lambshift_cpvagk(0.0, (x)->γ(x, bath)) ≈ -0.2872777516270734
# test suite for correlated bath
coupling = ConstantCouplings([σ₊, σ₋], unit=:ħ)
γfun= (w) -> w >= 0 ? exp(-w) : exp(0.8w)
cbath = CorrelatedBath(((1, 2), (2, 1)), spectrum=[(w) -> 0 γfun; γfun (w) -> 0])
γm = OpenQuantumBase.build_spectrum(cbath)
@test γm[1, 1](0.0) == 0
@test γm[2, 2](0.0) == 0
@test γm[1, 2](0.5) == exp(-0.5)
@test γm[2, 1](-0.5) == exp(-0.5*0.8)
@test_throws ArgumentError OpenQuantumBase.build_correlation(cbath)
lambfun_1 = OpenQuantumBase.build_lambshift([0.0], false, cbath, Dict())
@test lambfun_1[1,1](0.0) == 0
@test lambfun_1[2,2](0.1) == 0
@test lambfun_1[1,2](0.5) == 0
@test lambfun_1[2,2](1.0) == 0
lambfun_2 = OpenQuantumBase.build_lambshift([], true, cbath, Dict())
lambfun_3 = OpenQuantumBase.build_lambshift(range(-5,5,length=1000), true, cbath, Dict())
lambfun_4 = OpenQuantumBase.build_lambshift([], true, cbath, Dict(:order=>1))
lambfun_5 = OpenQuantumBase.build_lambshift([0.0, 0.01], true, cbath, Dict(:order=>1))
@test isapprox(lambfun_2[1,2](0.0), 0.03551, atol=1e-4)
@test isapprox(lambfun_3[1,2](0.0), 0.03551, atol=1e-4)
@test lambfun_2[1,1](0) == 0
@test lambfun_3[2,2](0) == 0
@test lambfun_4[1,2](0) ≈ 0.03551 atol=1e-4
@test lambfun_5[2,1](0.01) ≈ 0.05019 atol=1e-4
| OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 1443 | using OpenQuantumBase, Test
c = ConstantCouplings(["ZI", "IZ"])
@test isequal(c.mats[1], 2π*σz⊗σi)
@test c.mats[2] == 2π*σi⊗σz
@test c[2](2.0) == 2π*σi⊗σz
res = c(0.2)
@test isequal(res[1], 2π*σz⊗σi)
@test res[2] == 2π*σi⊗σz
crot = rotate(c, σx⊗σi)
@test crot[1](0) ≈ -2π*σz⊗σi
@test crot[2](0.5) ≈ 2π*σi⊗σz
c = ConstantCouplings(["ZI", "IZ"], unit=:ħ)
@test isequal(c.mats[1], σz⊗σi)
@test isequal(c.mats[2], σi⊗σz)
res = c(0.2)
@test res[1] == σz⊗σi
@test res[2] == σi⊗σz
@test [op(0) for op in c] == [σz⊗σi, σi⊗σz]
c = ConstantCouplings([σz⊗σi, σi⊗σz], unit=:ħ)
@test isequal(c.mats[1], σz⊗σi)
@test isequal(c.mats[2], σi⊗σz)
res = c(0.2)
@test res[1] == σz⊗σi
@test res[2] == σi⊗σz
@test [op(0) for op in c] == [σz⊗σi, σi⊗σz]
c = ConstantCouplings(["ZI", "IZ"], sp=true)
@test isequal(c.mats[1], 2π*spσz⊗spσi)
@test isequal(c.mats[2], 2π*spσi⊗spσz)
c1 = TimeDependentCoupling([(s)->s], [σz], unit=:ħ)
@test c1(0.5) == 0.5σz
c2 = TimeDependentCoupling([(s)->s], [σx], unit=:ħ)
c = TimeDependentCouplings(c1, c2)
@test size(c) == (2, 2)
@test [op for op in c(0.5)] == [c1(0.5), c2(0.5)]
@test [op(0.5) for op in c] == [c1(0.5), c2(0.5)]
c = collective_coupling("Z", 2, unit=:ħ)
@test isequal(c(0.1), [σz⊗σi, σi⊗σz])
test_coupling = [(s)->s*σx, (s)->(1-s)*σz]
coupling = CustomCouplings(test_coupling, unit=:ħ)
@test size(coupling) == (2, 2)
@test coupling(0.5) == 0.5 * [σx, σz]
@test [c(0.2) for c in coupling] == [0.2*σx, 0.8*σz]
| OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 525 | using OpenQuantumBase, Test
η = 1e-4;T = 16
X = ConstantCouplings(["Z"])
bath_x = Ohmic(η, 1, T)
Z = ConstantCouplings(["X"])
bath_z = Ohmic(η, 4, T)
inter_x = Interaction(X, bath_x)
inter_z = Interaction(Z, bath_z)
inter_set = InteractionSet(inter_x, inter_z)
U(t) = exp(-1.0im * σz * t)
CGL = OpenQuantumBase.cg_from_interactions(inter_set, U, 10, 10, 1e-4, 1e-4)
@test length(CGL) == 1
@test CGL[1].kernels[1][2] == X
@test CGL[1].kernels[2][2] == Z
@test CGL[1].kernels[1][3][1,1](0.2, 0.1) ≈ correlation(0.1, bath_x) | OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 1538 | using OpenQuantumBase, Test
import LinearAlgebra: Diagonal
funcs = [(x) -> x, (x) -> 1 - x]
test_diag_operator = OpenQuantumBase.DiagonalOperator(funcs)
@test test_diag_operator(0.5) == OpenQuantumBase.Diagonal([0.5, 0.5])
test_geometric_operator = OpenQuantumBase.GeometricOperator(((x) -> -1.0im * x))
@test test_geometric_operator(0.5) == [0 0.5im; -0.5im 0]
@test_throws ArgumentError OpenQuantumBase.GeometricOperator((x) -> x, (x) -> 1 - x)
H = AdiabaticFrameHamiltonian(funcs, [])
@test !isconstant(H)
@test H(2, 0.5) ≈ Diagonal([π, π])
H = AdiabaticFrameHamiltonian(funcs, nothing)
@test H(2, 0.0) ≈ Diagonal([0, 2π])
H = AdiabaticFrameHamiltonian((s)->[s, 1 - s], nothing)
@test H(2, 0.5) ≈ Diagonal([π, π])
# in_place update for matrices
du = [1.0 + 0.0im 0; 0 0]
u = PauliVec[2][1]
ρ = u * u'
hres = Diagonal([0, 2π])
H(du, ρ, 10, 0.0)
@test du ≈ -1.0im * (hres * ρ - ρ * hres)
dθ = (s) -> π / 2
gap = (s) -> (cos(2 * π * s) + 1) / 2
H = AdiabaticFrameHamiltonian([(x) -> -gap(x), (x) -> gap(x)], [dθ])
u = PauliVec[2][1]
ρ = u * u'
@test get_cache(H) == zeros(eltype(H), 2, 2)
@test size(H) == (2, 2)
@test H(10, 0.5) ≈ π * σx / 20
@test H(5, 0.0) ≈ π * σx / 10 - 2π * σz
# in_place update for vector
cache = get_cache(H)
update_cache!(cache, H, 10, 0.0)
@test cache ≈ -1.0im * (π * σx / 20 - 2π * σz)
update_cache!(cache, H, 10, 0.5)
@test cache ≈ -1.0im * (π * σx / 20)
# in_place update for matrices
du = [1.0 + 0.0im 0; 0 0]
hres = π * σx / 20 - 2π * σz
H(du, ρ, 10, 0.0)
@test du ≈ -1.0im * (hres * ρ - ρ * hres)
| OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 1988 | # # Constant Hamiltonian Interface
using OpenQuantumBase, Test, LinearAlgebra
# In HOQST a constant Hamiltonian can be constructed using the `ConstantHamiltonian` interface:
H₁ = ConstantHamiltonian(σx, unit=:ħ)
H₂ = ConstantHamiltonian(σx, static=false)
H₃ = ConstantHamiltonian(spσx⊗spσx)
# or `Hamiltonian` interface
Hₛ₁ = Hamiltonian(σx, unit=:ħ)
Hₛ₃ = Hamiltonian(spσx⊗spσx)
cache₁ = H₁ |> get_cache |> similar
cache₂ = H₂ |> get_cache |> similar
cache₃ = H₃ |> get_cache |> similar
@test H₁(2)==Hₛ₁(2)==σx
@test H₂(1)==2π*σx
@test H₃(0.2)==Hₛ₃(0.2)==2π*(σx⊗σx)
update_cache!(cache₁, H₁, nothing, 0.2)
update_cache!(cache₂, H₂, nothing, 0.1)
update_cache!(cache₃, H₃, nothing, 0.5)
@test cache₁ == -1.0im*σx
@test cache₂ == -2π*1im*σx
@test cache₃ == -2π*1im*(σx⊗σx)
vcache₁ = cache₁⊗cache₁ |> similar
vcache₂ = cache₂⊗cache₂ |> similar
vcache₃ = cache₃⊗spσi⊗spσi + spσi⊗spσi⊗cache₃ |> similar
update_vectorized_cache!(vcache₁, H₁, nothing, 0.2)
update_vectorized_cache!(vcache₂, H₂, nothing, 0.1)
update_vectorized_cache!(vcache₃, H₃, nothing, 0.5)
@test vcache₁ == -1.0im * OpenQuantumBase.vectorized_commutator(H₁(0.1))
@test vcache₂ == -1.0im * OpenQuantumBase.vectorized_commutator(H₂(1))
@test vcache₃ == -1.0im * OpenQuantumBase.vectorized_commutator(H₃(2))
ρ₁ = ones(ComplexF64, 2, 2)/2
ρ₃ = ones(ComplexF64, 4, 4)/4
H₁(cache₁, ρ₁, nothing, 0)
H₂(cache₂, ρ₁, nothing, 0.5)
H₃(cache₃, ρ₃, nothing, 1)
@test cache₁ == -1.0im*(H₁(0)*ρ₁-ρ₁*H₁(0))
@test cache₂ == -1.0im*(H₂(0.5)*ρ₁-ρ₁*H₂(0.5))
@test cache₃ == -1.0im*(H₃(1)*ρ₃-ρ₃*H₃(1))
we₁ = [-1, 1]/2/π
we₂ = [-1, 1]
we₃ = [-1, -1, 1, 1]
w₁, v₁ = eigen_decomp(H₁, 0)
w₂, v₂ = eigen_decomp(H₂, 0.5)
w₃, v₃= eigen_decomp(H₃, 1, lvl=4)
@test we₁ ≈ w₁
@test we₂ ≈ w₂
@test we₃ ≈ w₃
@test H₁(0) ≈ 2π * v₁'*Diagonal(w₁)*v₁
@test H₂(0.5) ≈ 2π * v₂'*Diagonal(w₂)*v₂
Hr₁ = rotate(H₁, v₁)
Hr₂ = rotate(H₂, v₂)
Hr₃ = rotate(H₃, v₃)
@test Hr₁(0) ≈2π*Diagonal(w₁)
@test Hr₂(0.5) ≈ 2π*Diagonal(w₂)
@test Hr₃(1) ≈ 2π*Diagonal(w₃) | OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 466 | using OpenQuantumBase, Test
import LinearAlgebra: Diagonal
f(s) = (1 - s) * σx + s * σz
H = hamiltonian_from_function(f)
@test H(0.5) == 0.5 * (σx + σz)
@test !isconstant(H)
cache = get_cache(H)
update_cache!(cache, H, 2.0, 0.5)
@test cache == -0.5im * (σx + σz)
du = zeros(ComplexF64, 2, 2)
ρ = PauliVec[1][1] * PauliVec[1][1]'
H(du, ρ, nothing ,0.5)
@test du ≈ -1.0im * (f(0.5) * ρ - ρ * f(0.5))
w, v = haml_eigs(H, 0.5, 2)
@test v' * Diagonal(w) * v ≈ H(0.5) | OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 2519 | using OpenQuantumBase, Test
A = (s) -> (1 - s)
B = (s) -> s
H = DenseHamiltonian([A, B], [σx, σz])
Hc = H |> get_cache
@test size(H) == (2, 2)
@test size(Hc) == size(H)
@test eltype(Hc) == eltype(H)
@test H(0.5) == π * (σx + σz)
@test evaluate(H, 0.5) == (σx + σz) / 2
@test !isconstant(H)
@test isdimensionlesstime(H)
H1 = DenseHamiltonian([A], [σx])
H2 = DenseHamiltonian([B], [σz])
H3 = H1 + H2
@test H3.m == H.m
@test H3.f == H.f
# update_cache method
C = similar(σz)
update_cache!(C, H, 10, 0.5)
@test C == -1im * π * (σx + σz)
# update_vectorized_cache method
C = get_cache(H)
C = C ⊗ C
update_vectorized_cache!(C, H, 10, 0.5)
temp = -1im * π * (σx + σz)
@test C == σi ⊗ temp - transpose(temp) ⊗ σi
# in-place update for matrices
du = [1.0+0.0im 0; 0 0]
ρ = PauliVec[1][1] * PauliVec[1][1]'
H(du, ρ, 2, 0.5)
@test du ≈ -1.0im * π * ((σx + σz) * ρ - ρ * (σx + σz))
# eigen-decomposition
w, v = eigen_decomp(H, 0.5)
@test w ≈ [-1, 1] / √2
@test abs(v[:, 1]' * [1 - sqrt(2), 1] / sqrt(4 - 2 * sqrt(2))) ≈ 1
@test abs(v[:, 2]' * [1 + sqrt(2), 1] / sqrt(4 + 2 * sqrt(2))) ≈ 1
Hrot = rotate(H, v)
@test evaluate(Hrot, 0.5) ≈ [-1 0; 0 1] / sqrt(2)
# error message test
@test_throws ArgumentError DenseHamiltonian([(s) -> 1 - s, (s) -> s], [σx, σz], unit=:hh)
Hnd = DenseHamiltonian([A, B], [σx, σz], dimensionless_time=false)
@test !isdimensionlesstime(Hnd)
# test for Hamiltonian interface
@test Hamiltonian([A, B], [σx, σz], static=false) |> typeof <: DenseHamiltonian
# test for Static Hamiltonian type
Hst = Hamiltonian([A, B], [σx, σz], unit=:ħ)
Hstc = Hst |> get_cache
@test size(Hst) == (2, 2)
@test size(Hstc) == size(Hst)
@test eltype(Hstc) == eltype(Hst)
@test Hst(0.5) == (σx + σz) / 2
@test evaluate(Hst, 0.5) == (σx + σz) / 4 / π
@test !isconstant(Hst)
@test isdimensionlesstime(Hst)
# update_cache method
update_cache!(Hstc, Hst, 10, 0.5)
@test Hstc == -0.5im * (σx + σz)
# update_vectorized_cache method
C = Hstc ⊗ Hstc
update_vectorized_cache!(C, Hst, 10, 0.5)
temp = -0.5im * (σx + σz)
@test C == σi ⊗ temp - transpose(temp) ⊗ σi
# in-place update for matrices
du = [1.0+0.0im 0; 0 0]
ρ = PauliVec[1][1] * PauliVec[1][1]'
Hst(du, ρ, 2, 0.5)
@test du ≈ -0.5im * ((σx + σz) * ρ - ρ * (σx + σz))
# eigen-decomposition
w, v = eigen_decomp(Hst, 0.5)
@test 2π * w ≈ [-1, 1] / √2
@test abs(v[:, 1]' * [1 - sqrt(2), 1] / sqrt(4 - 2 * sqrt(2))) ≈ 1
@test abs(v[:, 2]' * [1 + sqrt(2), 1] / sqrt(4 + 2 * sqrt(2))) ≈ 1
Hrot = rotate(Hst, v)
@test 2π * evaluate(Hrot, 0.5) ≈ [-1 0; 0 1] / sqrt(2)
| OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 1387 | using OpenQuantumBase, Test
A = (s)->(1 - s)
B = (s)->s
u = [1.0 + 0.0im, 1] / sqrt(2)
ρ = PauliVec[1][1] * PauliVec[1][1]'
H = build_example_hamiltonian(1)
t_axis = range(0, 1, length=10)
H_list = [evaluate(H, t) for t in t_axis]
H_interp = InterpDenseHamiltonian(t_axis, H_list)
@test H_interp(0.5) == H(0.5)
@test !isconstant(H_interp)
# update_cache method
C = get_cache(H_interp, false)
update_cache!(C, H_interp, 10, 0.5)
@test C ≈ -1im * π * (σx + σz)
# update_vectorized_cache method
C = get_cache(H_interp, true)
update_vectorized_cache!(C, H, 10, 0.5)
temp = -1im * π * (σx + σz)
@test C == σi⊗temp - transpose(temp)⊗σi
# in-place update for matrices
du = [1.0 + 0.0im 0; 0 0]
H_interp(du, ρ, 2.0, 0.5)
@test du ≈ -1.0im * π * ((σx + σz) * ρ - ρ * (σx + σz))
H = build_example_hamiltonian(1, sp=true)
t_axis = range(0, 1, length=10)
H_list = [evaluate(H, t) for t in t_axis]
H_interp = InterpSparseHamiltonian(t_axis, H_list)
@test H_interp(0.5) == H(0.5)
# update_cache method
C = get_cache(H_interp, false)
update_cache!(C, H_interp, 10, 0.5)
@test C ≈ -1im * π * (spσx + spσz)
# update_vectorized_cache method
C = get_cache(H_interp, true)
update_vectorized_cache!(C, H, 10, 0.5)
temp = -1im * π * (spσx + spσz)
@test C == spσi⊗temp - transpose(temp)⊗spσi
du = [1.0 + 0.0im 0; 0 0]
H_interp(du, ρ, 1.0, 0.5)
@test du ≈ -1.0im * π * ((σx + σz) * ρ - ρ * (σx + σz))
| OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 1831 | using OpenQuantumBase, Test
import SparseArrays: spzeros
import LinearAlgebra: Diagonal, I
A = (s) -> (1 - s)
B = (s) -> s
u = [1.0 + 0.0im, 1] / sqrt(2)
ρ = u * u'
H_sparse = SparseHamiltonian([A, B], [spσx, spσz])
@test isdimensionlesstime(H_sparse)
Hs1 = SparseHamiltonian([A], [spσx])
Hs2 = SparseHamiltonian([B], [spσz])
Hs3 = Hs1 + Hs2
@test Hs3.m == H_sparse.m
@test Hs3.f == H_sparse.f
H_real = convert(Real, H_sparse)
@test eltype(H_real) <: Real
@test H_sparse(0.0) ≈ H_real(0.0)
@test !isconstant(H_sparse)
@test size(H_sparse) == (2, 2)
@test issparse(H_sparse)
@test H_sparse(0) ≈ 2π * spσx
@test evaluate(H_sparse, 0) == spσx
@test H_sparse(0.5) ≈ π * (spσx + spσz)
@test evaluate(H_sparse, 0.5) == (spσx + spσz) / 2
@test get_cache(H_sparse) ≈ π * (spσx + spσz)
du = [1.0+0.0im 0; 0 0]
H_sparse(du, ρ, 1.0, 0.5)
@test du ≈ -1.0im * π * ((σx + σz) * ρ - ρ * (σx + σz))
# update_cache method
C = similar(spσz)
update_cache!(C, H_sparse, 10, 0.5)
@test C == -1im * π * (spσx + spσz)
# update_vectorized_cache method
C = C ⊗ C
update_vectorized_cache!(C, H_sparse, 10, 0.5)
temp = -1im * π * (spσx + spσz)
@test C == spσi ⊗ temp - transpose(temp) ⊗ spσi
H_sparse = SparseHamiltonian([A, B], real.([spσx ⊗ spσi + spσi ⊗ spσx, 0.1spσz ⊗ spσi - spσz ⊗ spσz]))
w, v = eigen_decomp(H_sparse, 1.0)
vf = [0, 0, 0, 1.0]
@test w ≈ [-1.1, -0.9]
@test abs(v[end, 1]) ≈ 1
@test abs(v[1, 2]) ≈ 1
# ## Test suite for eigen decomposition of `SparseHamiltonian`
np = 5
Hd = standard_driver(np, sp=true)
Hp = alt_sec_chain(1, 0.5, 1, np, sp=true)
H_sparse = SparseHamiltonian([A, B], [Hd, Hp], unit=:ħ)
w1, v1 = haml_eigs(H_sparse, 0.5, nothing)
wl, vl = haml_eigs(H_sparse, 0.5, 3)
@test w1[1:3] ≈ wl
@test abs.(v1[:, 1:3]' * vl) |> Diagonal ≈ I
w2, v2 = haml_eigs(H_sparse, 0.5, 3, lobpcg=false)
@test w1 ≈ w2
@test v1 ≈ v2 | OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 7079 | using OpenQuantumBase, Test, Random
import LinearAlgebra: Diagonal, diag
# # Dense Hamiltonian AME tests
# ## Set up problems
# Set up mock functions for the bath spectrum and corresponding lambshift
gamma(x) = x >= 0 ? x + 1 : (1 - x) * exp(x)
lamb(x) = x + 0.1
# ## Two-qubit dense Hamiltonian
H = DenseHamiltonian(
[(s) -> 1 - s, (s) -> s],
[-standard_driver(2), (0.1 * σz ⊗ σi + 0.5 * σz ⊗ σz)]
)
coupling = ConstantCouplings(["ZI+IZ"])
davies = OpenQuantumBase.DaviesGenerator(coupling, gamma, lamb)
onesided = OpenQuantumBase.OneSidedAMELiouvillian(coupling, OpenQuantumBase.SingleFunctionMatrix(gamma), OpenQuantumBase.SingleFunctionMatrix(lamb), [(1, 1)])
ops = [2π * (σz ⊗ σi + σi ⊗ σz)]
w, v = eigen_decomp(H, 0.5, lvl=4)
w = 2π * w
ψ = (v[:, 1] + v[:, 2] + v[:, 3]) / sqrt(3)
ρ = ψ * ψ'
u = v' * ρ * v
g_idx = OpenQuantumBase.GapIndices(w, 8, 8)
# ## Tests
# Test density matrix update function for `DaviesGenerator` type
dρ = OpenQuantumBase.ame_update_test(ops, ρ, w, v, gamma, lamb)
du = zeros(ComplexF64, (4, 4))
davies(du, u, g_idx, v, 0.5)
@test v * du * v' ≈ dρ atol = 1e-6 rtol = 1e-6
onesided_dρ = OpenQuantumBase.onesided_ame_update_test(ops, ρ, w, v, gamma, lamb)
du = zeros(ComplexF64, (4, 4))
onesided(du, u, g_idx, v, 0.5)
@test v * du * v' ≈ onesided_dρ atol = 1e-6 rtol = 1e-6
onesided_dρ = OpenQuantumBase.onesided_ame_update_test([v * op * v' for op in ops], ρ, w, v, gamma, lamb)
du = zeros(ComplexF64, (4, 4))
onesided(du, u, g_idx, 0.5)
@test du ≈ v' * onesided_dρ * v atol = 1e-6 rtol = 1e-6
cache = zeros(ComplexF64, (4, 4))
exp_effective_H = OpenQuantumBase.ame_trajectory_Heff_test(ops, w, v, gamma, lamb)
update_cache!(cache, davies, g_idx, v, 0.5)
@test v * cache * v' ≈ -1.0im * exp_effective_H atol = 1e-6 rtol = 1e-6
# Test for dense Hamiltonian DiffEqLiouvillian
ame_op = DiffEqLiouvillian(H, [davies], [], 4)
p = ODEParams(H, 2.0, (tf, t) -> t / tf)
exp_effective_H = OpenQuantumBase.ame_trajectory_Heff_test(ops, w, v, gamma, lamb) + H(0.5)
cache = zeros(ComplexF64, 4, 4)
update_cache!(cache, ame_op, p, 1)
@test cache ≈ -1.0im * exp_effective_H atol = 1e-6 rtol = 1e-6
hmat = H(0.5)
expected_drho = dρ - 1.0im * (hmat * ρ - ρ * hmat)
du = zeros(ComplexF64, (4, 4))
ame_op(du, ρ, p, 1)
@test du ≈ expected_drho atol = 1e-6 rtol = 1e-6
#= ================ Test for lindblad_jump ================= =#
#TODO: better test routine for `lindblad_jump`
jump_op = OpenQuantumBase.lindblad_jump(ame_op, ψ, p, 1)
@test size(jump_op) == (4, 4)
# ## Two-qubit constant Hamiltonian
Hc = Hamiltonian(standard_driver(2) + alt_sec_chain(1, 0.5, 1, 2))
wc, vc = eigen_decomp(Hc, lvl=2^2)
wc = 2π*wc
ψc = (vc[:, 1] + vc[:, 2] + vc[:, 3]) / sqrt(3)
ρc = ψc * ψc'
uc = vc' * ρc * vc
couplings_const = collective_coupling("Z", 2)
gapidx_const = OpenQuantumBase.build_gap_indices(wc, 8, 8, Inf, 4)
davies_const = OpenQuantumBase.build_const_davies(rotate(couplings_const, vc), gapidx_const, gamma, lamb)
# Test density matrix update function for `ConstDaviesGenerator` type
dρ = OpenQuantumBase.ame_update_test(couplings_const(0), ρc, wc, vc, gamma, lamb)
du = zeros(ComplexF64, (4, 4))
davies_const(du, uc, nothing, 0)
@test dρ ≈ vc * du * vc'
effective_H = OpenQuantumBase.ame_trajectory_Heff_test(couplings_const(0), wc, vc, gamma, lamb)
cache = zeros(ComplexF64, (4, 4))
update_cache!(cache, davies_const, nothing, 0)
@test -1.0im * effective_H ≈ vc * cache * vc'
# Test for lindblad_jump
Lc = OpenQuantumBase.build_diffeq_liouvillian(Hamiltonian(OpenQuantumBase.sparse(OpenQuantumBase.Diagonal(wc)), unit=:ħ), [], [davies_const] , 4)
jump_op = OpenQuantumBase.lindblad_jump(Lc, vc'*ψc, p, 1)
@test size(jump_op) == (4, 4)
#= === Test for ense Hamiltonian with size smaller than truncation levels === =#
# Dense Hamiltonian with size smaller than truncation levels
H = DenseHamiltonian(
[(s) -> 1 - s, (s) -> s],
[-standard_driver(4), random_ising(4)]
)
coupling = collective_coupling("Z", 4)
davies = OpenQuantumBase.DaviesGenerator(coupling, gamma, lamb)
ame_op = DiffEqLiouvillian(H, [davies], [], 20)
p = ODEParams(H, 2.0, (tf, t) -> t / tf)
cache = zeros(ComplexF64, 16, 16)
update_cache!(cache, ame_op, p, 1)
@test cache != zeros(ComplexF64, 16, 16)
#= ===== Tests for sparse Hamiltonians ===== =#
#= ===== Problem set up ===== =#
Hd = standard_driver(4; sp=true)
Hp = q_translate("-0.9ZZII+IZZI-0.9IIZZ"; sp=true)
H = SparseHamiltonian([(s) -> 1 - s, (s) -> s], [Hd, Hp])
ops = [2π * q_translate("ZIII+IZII+IIZI+IIIZ")]
coupling = ConstantCouplings(["ZIII+IZII+IIZI+IIIZ"])
davies = OpenQuantumBase.DaviesGenerator(coupling, gamma, lamb)
w, v = eigen_decomp(H, 0.5, lvl=4, lobpcg=false)
w = 2π * real(w)
g_idx = OpenQuantumBase.GapIndices(w, 8, 8)
ψ = (v[:, 1] + v[:, 2] + v[:, 3]) / sqrt(3)
ρ = ψ * ψ'
u = v' * ρ * v
dρ = OpenQuantumBase.ame_update_test(ops, ρ, w, v, gamma, lamb)
exp_effective_H =
OpenQuantumBase.ame_trajectory_Heff_test(ops, w, v, gamma, lamb) + v * Diagonal(w) * v'
#= ============ Tests ============ =#
ame_op = DiffEqLiouvillian(H, [davies], [], 4)
du = zeros(ComplexF64, (16, 16))
ame_op(du, ρ, p, 1)
hmat = H(0.5)
@test isapprox(
du,
dρ - 1.0im * (hmat * ρ - ρ * hmat),
atol=1e-6,
rtol=1e-6,
)
cache = zeros(ComplexF64, 16, 16)
update_cache!(cache, ame_op, p, 1)
@test cache ≈ -1im * exp_effective_H atol = 1e-6 rtol = 1e-6
#= ===== Tests for adiabatc frame Hamiltonians ===== =#
#= ===== Problem set up ===== =#
H = AdiabaticFrameHamiltonian((s) -> [0, s, 1 - s, 1], nothing)
hmat = H(2.0, 0.4)
w = diag(hmat)
v = collect(Diagonal(ones(4)))
coupling = CustomCouplings([(s) -> s * (σx ⊗ σi + σi ⊗ σx) + (1 - s) * (σz ⊗ σi + σi ⊗ σz)])
davies = OpenQuantumBase.DaviesGenerator(coupling, gamma, lamb)
ψ = (v[:, 1] + v[:, 2] + v[:, 3]) / sqrt(3)
ρ = ψ * ψ'
#= ============ Tests ============ =#
dρ = OpenQuantumBase.ame_update_test(coupling(0.4), ρ, w, v, gamma, lamb)
p = ODEParams(H, 2.0, (tf, t) -> t / tf)
ame_op = DiffEqLiouvillian(H, [davies], [], 4)
du = zeros(ComplexF64, (4, 4))
ame_op(du, ρ, p, 0.4 * 2)
@test isapprox(
du,
dρ - 1.0im * (hmat * ρ - ρ * hmat),
atol=1e-6,
rtol=1e-6,
)
# test suite for CorrelatedDaviesGenerator
coupling = ConstantCouplings([σ₊, σ₋], unit=:ħ)
gfun = (w) -> w >= 0 ? 1.0 : exp(-0.5)
cbath = CorrelatedBath(((1, 2), (2, 1)), spectrum=[(w)->0 gfun; gfun (w)->0])
D = OpenQuantumBase.davies_from_interactions(InteractionSet(Interaction(coupling, cbath)), 1:10, false, Dict())[1]
@test typeof(D) <: OpenQuantumBase.CorrelatedDaviesGenerator
du = zeros(ComplexF64, 2, 2)
ρ = [0.5 0; 0 0.5]
ω = [1, 2]
g_idx = OpenQuantumBase.GapIndices(ω, 8, 8)
D(du, ρ, g_idx, 0.5)
@test du ≈ zeros(2, 2)
coupling = ConstantCouplings([σ₋, σ₋], unit=:ħ)
D = OpenQuantumBase.davies_from_interactions(InteractionSet(Interaction(coupling, cbath)), 1:10, false, Dict())[1]
@test typeof(D) <: OpenQuantumBase.CorrelatedDaviesGenerator
du = zeros(ComplexF64, 2, 2)
ρ = [0.5 0.5; 0.5 0.5]
ω = [1, 2]
g_idx = OpenQuantumBase.GapIndices(ω, 8, 8)
D(du, ρ, g_idx, 0.5)
@test du ≈ [-exp(-0.5) -0.5*exp(-0.5); -0.5*exp(-0.5) exp(-0.5)]
| OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 1623 | using OpenQuantumBase, Test, Random
coupling = ConstantCouplings(["Z"], unit=:ħ)
jfun(t₁, t₂) = 1.0
jfun(τ) = 1.0
# TODO: add test for unitary using StaticArrays
# const Sx = SMatrix{2,2}(σx)
unitary(t) = exp(-1.0im * t * σx)
tf = 5.0
u0 = PauliVec[1][1]
ρ = u0 * u0'
kernels = [(((1, 1),), coupling, OpenQuantumBase.SingleFunctionMatrix(jfun))]
L = OpenQuantumBase.quadgk((x) -> unitary(x)' * σz * unitary(x), 0, 5)[1]
ulind = OpenQuantumBase.ULELiouvillian(kernels, unitary, tf, 1e-8, 1e-6)
p = ODEParams(nothing, 5.0, (tf, t) -> t / tf)
dρ = zero(ρ)
ulind(dρ, ρ, p, 5.0)
@test dρ ≈ L * ρ * L' - 0.5 * (L' * L * ρ + ρ * L' * L) atol = 1e-6 rtol = 1e-6
# test for EᵨEnsemble
u0 = EᵨEnsemble([0.5, 0.5], [PauliVec[3][1], PauliVec[3][2]])
@test all((x)->x∈[PauliVec[3][1], PauliVec[3][2]], [sample_state_vector(u0) for i in 1:4])
Lz = Lindblad((s) -> 0.5, (s) -> σz)
Lx = Lindblad((s) -> 0.2, (s) -> σx)
Ł = OpenQuantumBase.lindblad_from_interactions(InteractionSet(Lz))[1]
p = ODEParams(nothing, 5.0, (tf, t) -> t / tf)
dρ = zero(ρ)
Ł(dρ, ρ, p, 5.0)
@test dρ ≈ [0 -0.5; -0.5 0]
cache = zeros(2, 2)
update_cache!(cache, Ł, p, 5.0)
@test cache == -0.25 * σz' * σz
Ł = OpenQuantumBase.lindblad_from_interactions(InteractionSet(Lz, Lx))[1]
p = ODEParams(nothing, 5.0, (tf, t) -> t / tf)
dρ = zero(ρ)
Ł(dρ, PauliVec[2][1] * PauliVec[2][1]', p, 5.0)
@test dρ ≈ [0 0.7im; -0.7im 0]
cache = zeros(2, 2)
update_cache!(cache, Ł, p, 5.0)
@test cache == -0.25 * σz' * σz - 0.1 * σx' * σx
Random.seed!(1234)
sample_res = [OpenQuantumBase.lind_jump(Ł, PauliVec[1][1], p, 0.5) for i in 1:4]
@test all((x)->x∈[σz, σx], sample_res) | OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 2127 | using OpenQuantumBase, Test
coupling = ConstantCouplings(["Z"], unit=:ħ)
cfun(t₁, t₂) = 1.0
cfun(τ) = 1.0
# TODO: add test for unitary using StaticArrays
# const Sx = SMatrix{2,2}(σx)
unitary(t) = exp(-1.0im * t * σx)
tf = 5.0
u0 = PauliVec[1][1]
ρ = u0 * u0'
kernels = [(((1, 1),), coupling, OpenQuantumBase.SingleFunctionMatrix(cfun))]
redfield = OpenQuantumBase.RedfieldLiouvillian(kernels, unitary, tf, 1e-8, 1e-6)
p = ODEParams(nothing, 5.0, (tf, t) -> t / tf)
Λ = OpenQuantumBase.quadgk((x) -> unitary(x)' * σz * unitary(x), 0, 5)[1]
dρ = zero(ρ)
redfield(dρ, ρ, p, 5.0)
@test dρ ≈ -(σz * (Λ * ρ - ρ * Λ') - (Λ * ρ - ρ * Λ') * σz) atol = 1e-6 rtol =
1e-6
A = zero(ρ ⊗ σi)
update_vectorized_cache!(A, redfield, p, 5.0)
@test A * ρ[:] ≈ -(σz * (Λ * ρ - ρ * Λ') - (Λ * ρ - ρ * Λ') * σz)[:]
# Λ = OpenQuantumBase.quadgk((x) -> unitary(x)' * σz * unitary(x), 0, 2.5)[1]
# dρ = zero(ρ)
# redfield(dρ, ρ, p, 2.5)
# @test dρ ≈ -(σz * (Λ * ρ - ρ * Λ') - (Λ * ρ - ρ * Λ') * σz) atol = 1e-6 rtol =
# 1e-6
#
# A = zero(ρ ⊗ σi)
# update_vectorized_cache!(A, redfield, UnitTime(5.0), 2.5)
# @test A * ρ[:] ≈ -(σz*(Λ*ρ-ρ*Λ')-(Λ*ρ-ρ*Λ')*σz)[:]
# test for CustomCouplings
coupling = CustomCouplings([(s) -> σz], unit=:ħ)
bath = CustomBath(correlation=(τ) -> 1.0)
interactions = InteractionSet(Interaction(coupling, bath))
redfield = OpenQuantumBase.redfield_from_interactions(interactions, unitary, tf, 1e-8, 1e-6)[1]
A = zero(ρ ⊗ σi)
Λ = OpenQuantumBase.quadgk((x) -> unitary(x)' * σz * unitary(x), 0, 2.5)[1]
update_vectorized_cache!(A, redfield, p, 2.5)
@test A * ρ[:] ≈ -(σz * (Λ * ρ - ρ * Λ') - (Λ * ρ - ρ * Λ') * σz)[:]
# =============== CGME Test ===================
kernels = [(((1, 1),), coupling, OpenQuantumBase.SingleFunctionMatrix(cfun), 1)]
cgop = OpenQuantumBase.CGLiouvillian(kernels, unitary, 1e-8, 1e-6)
dρ = zero(ρ)
function integrand(x)
a1 = unitary(x[1])' * σz * unitary(x[1])
a2 = unitary(x[2])' * σz * unitary(x[2])
a1 * ρ * a2 - 0.5 * (a2 * a1 * ρ + ρ * a2 * a1)
end
exp_res, err = OpenQuantumBase.hcubature(integrand, [-0.5, -0.5], [0.5, 0.5])
cgop(dρ, ρ, p, 2.5)
@test dρ ≈ exp_res
| OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 1141 | using OpenQuantumBase, Random
bath = EnsembleFluctuator([1, 1], [1, 2])
interaction = InteractionSet(Interaction(ConstantCouplings(["Z"], unit=:ħ), bath))
fluct = OpenQuantumBase.fluctuator_from_interactions(interaction)[1]
cache_exp = -1.0im * sum(fluct.b0 .* [σz, σz])
cache = zeros(ComplexF64, 2, 2)
p = ODEParams(nothing, 5.0, (tf, t) -> t / tf)
update_cache!(cache, fluct, p, 2.5)
@test cache ≈ cache_exp
cache = zeros(ComplexF64, 4, 4)
update_vectorized_cache!(cache, fluct, p, 2.5)
@test cache ≈ one(cache_exp)⊗cache_exp - transpose(cache_exp)⊗one(cache_exp)
Random.seed!(1234)
random_1 = rand([-1, 1], 2, 1)
τ, idx = findmin(rand(fluct.dist, 1))
Random.seed!(1234)
OpenQuantumBase.reset!(fluct, (x, y) -> rand([-1, 1], x, y))
random_1[idx] *= -1
@test fluct.b0[1] == random_1[1] && fluct.b0[2] == random_1[2]
@test fluct.next_τ ≈ τ
@test fluct.next_idx == idx
b0 = copy(fluct.b0)
Random.seed!(1234)
random_1 = rand(fluct.dist, 1)
τ, idx = findmin(random_1)
b0[idx] *= -1
Random.seed!(1234)
OpenQuantumBase.next_state!(fluct)
@test fluct.b0[1] == b0[1] && fluct.b0[2] == b0[2]
@test fluct.next_τ ≈ τ
@test fluct.next_idx == idx | OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 408 | using OpenQuantumBase
w = [-3, 1, 3, 3, 4.5, 5.5, 8]
gidx = OpenQuantumBase.GapIndices(w, 8, 8)
uniq_w, indices, indices0 = OpenQuantumBase.find_unique_gap(w)
@test gidx.uniq_w == uniq_w
@test gidx.uniq_a == [[x.I[1] for x in i] for i in indices]
@test gidx.uniq_b == [[x.I[2] for x in i] for i in indices]
@test Set([(i,j) for (i,j) in zip(gidx.a0, gidx.b0)]) == Set([(x.I[1], x.I[2]) for x in indices0])
| OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 1113 | using OpenQuantumBase, Test
@test σx*PauliVec[1][1] == PauliVec[1][1]
@test σx*PauliVec[1][2] == -PauliVec[1][2]
@test σy*PauliVec[2][1] == PauliVec[2][1]
@test σy*PauliVec[2][2] == -PauliVec[2][2]
@test σz*PauliVec[3][1] == PauliVec[3][1]
@test σz*PauliVec[3][2] == -PauliVec[3][2]
@test bloch_to_state(π/2, 0.0) ≈ PauliVec[1][1]
@test bloch_to_state(0, 0) ≈ PauliVec[3][1]
@test bloch_to_state(π/2, π/2) ≈ PauliVec[2][1]
@test_throws ArgumentError bloch_to_state(2π, 0)
@test_throws ArgumentError bloch_to_state(π, 3π)
@test creation_operator(3) ≈ [0 0 0; 1 0 0; 0 sqrt(2) 0]
@test annihilation_operator(3) ≈ [0 1 0; 0 0 sqrt(2); 0 0 0]
@test number_operator(3) ≈ [0 0 0; 0 1 0; 0 0 2]
pauli_exp = "-0.1X1X2 + Z2"
res = OpenQuantumBase.split_pauli_expression(pauli_exp)
@test res[1] == ["-", "0.1", "X1X2"]
@test res[2] == ["+", "", "Z2"]
pauli_exp = "Y2X1-2Z1"
res = OpenQuantumBase.split_pauli_expression(pauli_exp)
@test res[1] == ["", "", "Y2X1"]
@test res[2] == ["-", "2", "Z1"]
pauli_exp = "X1X2 + 1.0imZ1"
res = OpenQuantumBase.split_pauli_expression(pauli_exp)
@test res[2] == ["+", "1.0im", "Z1"]
| OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 1896 | using OpenQuantumBase, Test
replstr(x, kv::Pair...) = sprint((io,x) -> show(IOContext(io, :limit => true, :displaysize => (24, 80), kv...), MIME("text/plain"), x), x)
# only test the repl string
#showstr(x, kv::Pair...) = sprint((io,x) -> show(IOContext(io, :limit => true, :displaysize => (24, 80), kv...), x), x)
A = (s) -> (1 - s)
B = (s) -> s
u = [1.0 + 0.0im, 1] / sqrt(2)
ρ = u * u'
H = DenseHamiltonian([A, B], [σx, σz])
@test replstr(H) == "\e[36mDenseHamiltonian\e[0m with \e[36mComplexF64\e[0m\nwith size: (2, 2)"
annealing = Annealing(H, u)
@test_broken replstr(annealing) == "\e[36mAnnealing\e[0m with \e[36mOpenQuantumBase.DenseHamiltonian{ComplexF64}\e[0m and u0 \e[36mVector{ComplexF64}\e[0m\nu0 size: (2,)"
coupling = ConstantCouplings(["Z"])
@test replstr(coupling) == "\e[36mConstantCouplings\e[0m with \e[36mComplexF64\e[0m\nand string representation: [\"Z\"]"
@test replstr(ConstantCouplings([σz])) == "\e[36mConstantCouplings\e[0m with \e[36mComplexF64\e[0m\nand string representation: nothing"
η = 0.01; W = 5; fc = 4; T = 12.5
@test replstr(HybridOhmic(W, η, fc, T)) == "Hybrid Ohmic bath instance:\nW (mK): 5.0\nϵl (GHz): 0.02083661222512523\nη (unitless): 0.01\nωc (GHz): 4.0\nT (mK): 12.5"
η = 1e-4; ωc = 4; T = 12
bath = Ohmic(η, ωc, T)
@test replstr(bath) == "Ohmic bath instance:\nη (unitless): 0.0001\nωc (GHz): 4.0\nT (mK): 12.0"
interaction = Interaction(coupling, bath)
@test replstr(interaction) == "\e[36mInteraction\e[0m with \e[36mConstantCouplings\e[0m with \e[36mComplexF64\e[0m\nand string representation: [\"Z\"]\nand bath: OpenQuantumBase.OhmicBath(0.0001, 25.132741228718345, 0.6365195925819416)"
iset = InteractionSet(interaction)
@test replstr(iset) == "\e[36mInteractionSet\e[0m with 1 interactions"
H = SparseHamiltonian([A, B], [spσx, spσz])
@test replstr(H) == "\e[36mSparseHamiltonian\e[0m with \e[36mComplexF64\e[0m\nwith size: (2, 2)" | OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 3554 | using OpenQuantumBase, LinearAlgebra, Test
# === matrix decomposition
v = 1.0 * σx + 2.0 * σy + 3.0 * σz
res = matrix_decompose(v, [σx, σy, σz])
@test isapprox(res, [1.0, 2.0, 3.0])
# === positivity test ===
r = rand(2)
m =
r[1] * PauliVec[1][2] * PauliVec[1][2]' +
r[2] * PauliVec[1][1] * PauliVec[1][1]'
@test check_positivity(m)
@test !check_positivity(σx)
@test_logs (:warn, "Input fails the numerical test for Hermitian matrix. Use the upper triangle to construct a new Hermitian matrix.") check_positivity(σ₊)
# == units conversion test ===
@test temperature_2_freq(1e3) ≈ 20.8366176361328 atol = 1e-4 rtol = 1e-4
@test freq_2_temperature(20) ≈ 959.8489324422699 atol = 1e-4 rtol = 1e-4
@test temperature_2_freq(1e3) ≈ 1 / temperature_2_β(1e3) / 2 / π
@test β_2_temperature(0.47) ≈ 16.251564065921915
# === unitary test ===
u_res = exp(-1.0im * 5 * 0.5 * σx)
@test check_unitary(u_res)
@test !check_unitary([0 1; 0 0])
# === integration test ===
@test OpenQuantumBase.cpvagk((x) -> 1.0, 0, -1, 1)[1] == 0
# == Hamiltonian analysis ===
DH = DenseHamiltonian([(s) -> 1 - s, (s) -> s], [σx, σz], unit=:ħ)
# hfun(s) = (1-s)*real(σx)+ s*real(σz)
# dhfun(s) = -real(σx) + real(σz)
t = [0.0, 1.0]
states = [PauliVec[1][2], PauliVec[1][1]]
res = inst_population(t, states, DH, lvl=1:2)
@test isapprox(res, [[1.0, 0], [0.5, 0.5]])
SH = SparseHamiltonian(
[(s) -> -(1 - s), (s) -> s],
[standard_driver(2, sp=true), (0.1 * spσz ⊗ spσi + spσz ⊗ spσz)],
unit=:ħ,
)
H_check = DenseHamiltonian(
[(s) -> -(1 - s), (s) -> s],
[standard_driver(2), (0.1 * σz ⊗ σi + σz ⊗ σz)],
unit=:ħ,
)
# spdhfun(s) =
# real(standard_driver(2, sp = true) + (0.1 * spσz ⊗ spσi + spσz ⊗ spσz))
interaction = [spσz ⊗ spσi, spσi ⊗ spσz]
spw, spv = eigen_decomp(SH, [0.5])
w, v = eigen_decomp(H_check, [0.5])
@test w ≈ spw atol = 1e-4
@test isapprox(spv[:, 1, 1], v[:, 1, 1], atol=1e-4) ||
isapprox(spv[:, 1, 1], -v[:, 1, 1], atol=1e-4)
@test isapprox(spv[:, 2, 1], v[:, 2, 1], atol=1e-4) ||
isapprox(spv[:, 2, 1], -v[:, 2, 1], atol=1e-4)
# == utility math functions ==
@test log_uniform(1, 10, 3) == [1, 10^0.5, 10]
v = sqrt.([0.4, 0.6])
ρ1 = v * v'
ρ2 = [0.5 0; 0 0.5]
ρ3 = ones(3, 3) / 3
@test ρ1 == partial_trace(ρ1 ⊗ ρ2 ⊗ ρ2, [1])
@test ρ2 == partial_trace(ρ1 ⊗ ρ2 ⊗ ρ2, [2])
@test ρ3 ≈ partial_trace(ρ1 ⊗ ρ2 ⊗ ρ3, [2, 2, 3], [3])
@test_throws ArgumentError partial_trace(ρ1 ⊗ ρ2 ⊗ ρ3, [3, 2, 3], [3])
@test_throws ArgumentError partial_trace(rand(4, 5), [2, 2], [1])
@test purity(ρ1) ≈ 1
@test purity(ρ2) == 0.5
@test check_pure_state(ρ1)
@test !check_pure_state(ρ2)
@test !check_pure_state([0.4 0.5; 0.5 0.6])
ρ = PauliVec[1][1] * PauliVec[1][1]'
σ = PauliVec[3][1] * PauliVec[3][1]'
@test fidelity(ρ, σ) ≈ 0.5
@test fidelity(ρ, ρ) ≈ 1
@test check_density_matrix(ρ)
@test !check_density_matrix(σx)
w = [-0.000000003, 0, 1, 1.000000001, 1.000000004, 1.000000006, 2, 3, 4, 4, 5, 5]
@test find_degenerate(w, digits=8) == [[1, 2], [3, 4, 5], [9, 10], [11, 12]]
@test isempty(find_degenerate([1, 2, 3]))
gibbs = gibbs_state(σz, 12)
@test gibbs[2, 2] ≈ 1 / (1 + exp(-temperature_2_β(12) * 2))
@test gibbs[1, 1] ≈ 1 - 1 / (1 + exp(-temperature_2_β(12) * 2))
@test low_level_matrix(σz ⊗ σz + 0.1σz ⊗ σi, 2) == [0.0+0.0im 0.0+0.0im 0.0+0.0im 0.0+0.0im
0.0+0.0im -0.9+0.0im 0.0+0.0im 0.0+0.0im
0.0+0.0im 0.0+0.0im -1.1+0.0im 0.0+0.0im
0.0+0.0im 0.0+0.0im 0.0+0.0im 0.0+0.0im]
@test_logs (:warn, "Subspace dimension bigger than total dimension.") low_level_matrix(σz ⊗ σz + 0.1σz ⊗ σi, 5)
u = haar_unitary(2)
@test u'*u ≈ I | OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | code | 1154 | using OpenQuantumBase, Test
@test q_translate("ZZ+0.5ZI-XZ") == σz⊗σz + 0.5σz⊗σi - σx⊗σz
@test single_clause(["x"], [2], 0.5, 2) == 0.5σi⊗σx
@test single_clause(["x"], [2], 0.5, 2, sp = true) == 0.5spσi⊗spσx
@test single_clause(["z","z"], [2,3], -2, 4) == -2σi⊗σz⊗σz⊗σi
@test single_clause([σ₊], [3], 0.1, 3) == 0.1σi⊗σi⊗σ₊
@test single_clause([σ₊], [3], 0.1, 3, sp=true) == 0.1spσi⊗spσi⊗σ₊
@test standard_driver(2) == σx⊗σi + σi⊗σx
@test standard_driver(2, sp = true) == spσx⊗spσi + spσi⊗spσx
@test collective_operator("z", 3) ≈ σz⊗σi⊗σi + σi⊗σz⊗σi + σi⊗σi⊗σz
@test collective_operator("z", 3, sp = true) ≈ spσz⊗spσi⊗spσi + spσi⊗spσz⊗spσi + spσi⊗spσi⊗spσz
@test local_field_term([-1.0, 0.5], [1,3], 3) ≈ -1.0σz⊗σi⊗σi + 0.5σi⊗σi⊗σz
@test local_field_term([-1.0, 0.5], [1,3], 3, sp = true) ≈ -1.0spσz⊗spσi⊗spσi + 0.5spσi⊗spσi⊗spσz
@test two_local_term([-1.0, 0.5], [[1,3],[1,2]], 3) ≈ -1.0σz⊗σi⊗σz + 0.5σz⊗σz⊗σi
v0 = [1.0+0.0im, 0]
v1 = [0, 1.0+0.0im]
@test q_translate_state("0011") == v0⊗v0⊗v1⊗v1
@test q_translate_state("0.1(111)+(011)") == 0.1*v1⊗v1⊗v1 + v0⊗v1⊗v1
@test q_translate_state("(111)+(011)"; normal=true) == (v1⊗v1⊗v1 + v0⊗v1⊗v1)/sqrt(2)
| OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.7.9 | fb083fe83f74926e63328d412d6d37838142c524 | docs | 549 | <img src="assets/logo.jpg" width="256"/>
# OpenQuantumBase.jl

[](https://codecov.io/gh/USCqserver/OpenQuantumBase.jl)
OpenQuantumBase.jl is a component package for [OpenQuantumTools.jl](https://github.com/USCqserver/OpenQuantumTools.jl). It holds the
common types and utility functions which are shared by other component packages
in order to reduce the size of dependencies. | OpenQuantumBase | https://github.com/USCqserver/OpenQuantumBase.jl.git |
|
[
"MIT"
] | 0.1.1 | 96a6c2a1069d6258d9d8ca982c070621619660f9 | code | 724 | using QuDiffEq
using OrdinaryDiffEq, Test
"""
Linearizing non linear ODE and solving it using QuLDE
du/dt = f
u0 -> inital condition for the ode
Jacobian J and vector b is calculated at every time step h.
Δu -> difference for fixed point
Equation input to QuLDE circuit : d(Δu)/dt = J * Δu + b
k -> order of Taylor Expansion in QuLDE circuit
Δu is added to previous value of u.
"""
function f(du,u,p,t)
du[1] = -2*(u[2]^2)*u[1]
du[2] = 3*(u[1]^(1.5)) - 0.1*u[2]
end
u0 = [0.2,0.1]
h = 0.1
k = 2
tspan = (0.0,0.8)
prob = ODEProblem(f,u0,tspan)
qsol = solve(prob,QuLDE(k),dt = h)
sol = solve(prob,Tsit5(),dt = h,adaptive = false)
r_out = transpose(hcat(sol.u...))
@test isapprox.(r_out,qsol, atol = 1e-3) |> all
| QuDiffEq | https://github.com/QuantumBFS/QuDiffEq.jl.git |
|
[
"MIT"
] | 0.1.1 | 96a6c2a1069d6258d9d8ca982c070621619660f9 | code | 1684 | using Yao
using QuDiffEq
using LinearAlgebra
using BitBasis
using OrdinaryDiffEq
using Plots
using Test
# vertex and egde values
vertx = 7
ege = 8
#Constructing incidence matrix B
B = zeros(vertx,ege)
@inbounds for i in 1:vertx
B[i,i] = -1
B[i,i+1] = 1
end
B[1,1] = 1
de = 0.5
sn = sin.((0.0:de:(vertx-1)*de)*2*pi/((vertx-1)*de))
u0 = ComplexF64.(sn)
# Intial conditions (stationary to begin with)
u1 = [u0; zero(u0); 0.0; 0.0]
u_ = Float64[u0 zero(u0)]
k = 2 # order in Taylor expansion
t = 1e-2 # time step
B_t = transpose(B)
n = 11 # number of steps
a = 1e-1 # spactial discretization
function make_hamiltonian(B,B_t,a)
vertx,ege = size(B)
n = nextpow(2,vertx+ege)
H = zeros(n,n)
@inbounds H[1:vertx,vertx+1:vertx+ege] = B
@inbounds H[vertx+1:vertx+ege,1:vertx] = B_t
H = -im/a*H
return H
end
function do_pde(ϕ,B,B_t,k,t,a,n)
vertx, = size(B)
H = make_hamiltonian(B,B_t,a)
res = Array{Array{ComplexF64,1},1}(undef,n)
res[1] = @view ϕ[1:vertx]
for i in 2:n
r, N = taylorsolve(H,ϕ,k,t)
ϕ = N*vec(state(r))
res[i] = @view ϕ[1:vertx]
end
res_real = real(res)
return res_real
end
#Dirichlet
res1 = do_pde(u1,B,B_t,k,t,a,n)
const D1 = -1/(a*a)*B*B_t # Laplacian Operator
function f(du,u,p,t)
buffer, D = p
u1 = @view(u[:,1])
u2 = @view(u[:,2])
mul!(buffer, D, u2)
Du = buffer
du[:,1] = Du
du[:,2] = u1
end
tspan = (0.0,0.1)
prob1 = ODEProblem(f,u_,tspan,(zero(u_[:,1]), D1))
sol1 = solve(prob1,Tsit5(),dt=0.01,adaptive = false)
s1 = Array{Array{Float64,1},1}(undef,n)
for i in 1:n
s1[i] = @view (sol1.u[i][:,1])
end
@test isapprox.(res1,s1, atol = 1e-2) |> all
| QuDiffEq | https://github.com/QuantumBFS/QuDiffEq.jl.git |
|
[
"MIT"
] | 0.1.1 | 96a6c2a1069d6258d9d8ca982c070621619660f9 | code | 726 | using Documenter, QuDiffEq
const PAGES = [
"Home" => "index.md",
"Tutorial" => ["tutorial/lin.md","tutorial/nlin.md",] ,
"Manual" => ["man/algs.md", "man/taylor.md", ]
]
makedocs(sitename="QuDiffEq.jl",
modules = [QuDiffEq],
format = Documenter.HTML(
prettyurls = ("deploy" in ARGS),
canonical = ("deploy" in ARGS) ? "https://quantumbfs.github.io/QuDiffEq.jl/latest/" : nothing,
assets = ["assets/favicon.ico"],
),
doctest = ("doctest=true" in ARGS),
clean = false,
linkcheck = !("skiplinks" in ARGS),
pages = PAGES)
deploydocs(
repo = "github.com/QuantumBFS/QuDiffEq.jl.git",
target = "build",
)
| QuDiffEq | https://github.com/QuantumBFS/QuDiffEq.jl.git |
|
[
"MIT"
] | 0.1.1 | 96a6c2a1069d6258d9d8ca982c070621619660f9 | code | 2451 | export hhlcircuit, hhlproject!, hhlsolve, HHLCRot
import YaoArrayRegister.u1rows!
"""
HHLCRot{N, NC} <: PrimitiveBlock{N}
Controlled rotation gate used in HHL algorithm, applied on N qubits.
* cbits: control bits.
* ibit:: the ancilla bit.
* C_value:: the value of constant "C", should be smaller than the spectrum "gap".
"""
struct HHLCRot{N, NC, T} <: PrimitiveBlock{N}
cbits::Vector{Int}
ibit::Int
C_value::T
HHLCRot{N}(cbits::Vector{Int}, ibit::Int, C_value::T) where {N, T} = new{N, length(cbits), T}(cbits, ibit, C_value)
end
@inline function hhlrotmat(λ::Real, C_value::Real)
b = C_value/λ
a = sqrt(1-b^2)
a, -b, b, a
end
function YaoBlocks._apply!(reg::ArrayReg, hr::HHLCRot{N, NC, T}) where {N, NC, T}
mask = bmask(hr.ibit)
step = 1<<(hr.ibit-1)
step_2 = step*2
nbit = nqubits(reg)
for j = 0:step_2:size(reg.state, 1)-step
for i = j+1:j+step
λ = bfloat(readbit(i-1, hr.cbits...), nbits=nbit-1)
if λ >= hr.C_value
u = hhlrotmat(λ, hr.C_value)
YaoArrayRegister.u1rows!(state(reg), i, i+step, u...)
end
end
end
reg
end
"""
hhlproject!(all_bit::ArrayReg, n_reg::Int) -> Vector
project to aiming state |1>|00>|u>, and return |u> vector.
"""
function hhlproject!(all_bit::ArrayReg, n_reg::Int)
all_bit |> focus!(1:(n_reg+1)...) |> select!(1) |> state |> vec
end
"""
Function to build up a HHL circuit.
"""
function hhlcircuit(UG, n_reg::Int, C_value::Real)
n_b = nqubits(UG)
n_all = 1 + n_reg + n_b
pe = PEBlock(UG, n_reg, n_b)
cr = HHLCRot{n_reg+1}([2:n_reg+1...], 1, C_value)
chain(n_all, subroutine(n_all, pe, [2:n_all...,]), subroutine(n_all, cr, [1:(n_reg+1)...,]), subroutine(n_all, pe', [2:n_all...,]))
end
"""
hhlsolve(A::Matrix, b::Vector) -> Vector
solving linear system using HHL algorithm. Here, A must be hermitian.
"""
function hhlsolve(A::Matrix, b::Vector, n_reg::Int, C_value::Real)
if !ishermitian(A)
throw(ArgumentError("Input matrix not hermitian!"))
end
UG = matblock(exp(2π*im.*A))
# Generating input bits
all_bit = join(ArrayReg(b), zero_state(n_reg), zero_state(1))
# Construct HHL circuit.
circuit = hhlcircuit(UG, n_reg, C_value)
# Apply bits to the circuit.
apply!(all_bit, circuit)
# Get state of aiming state |1>|00>|u>.
hhlproject!(all_bit, n_reg) ./ C_value
end
| QuDiffEq | https://github.com/QuantumBFS/QuDiffEq.jl.git |
|
[
"MIT"
] | 0.1.1 | 96a6c2a1069d6258d9d8ca982c070621619660f9 | code | 1365 | export PEBlock, projection_analysis
"""
PEBlock(UG, n_reg, n_b) -> ChainBlock
phase estimation circuit.
* `UG`: the input unitary matrix.
* `n_reg`: the number of bits to store phases,
* `n_b`: the number of bits to store vector.
"""
function PEBlock(UG::GeneralMatrixBlock, n_reg::Int, n_b::Int)
nbit = n_b + n_reg
# Apply Hadamard Gate.
hs = repeat(nbit, H, 1:n_reg)
# Construct a control circuit.
control_circuit = chain(nbit)
for i = 1:n_reg
push!(control_circuit, control(nbit, (i,), (n_reg+1:nbit...,)=>UG))
if i != n_reg
UG = matblock(mat(UG) * mat(UG))
end
end
# Inverse QFT Block.
iqft = concentrate(nbit, QFT{n_reg}()',[1:n_reg...,])
chain(hs, control_circuit, iqft)
end
"""
projection_analysis(evec::Matrix, reg::ArrayReg) -> Tuple
Analyse using state projection.
It returns a tuple of (most probable configuration, the overlap matrix, the relative probability for this configuration)
"""
function projection_analysis(evec::Matrix, reg::ArrayReg)
overlap = evec'*state(reg)
amp_relative = Float64[]
bs = Int[]
for b in basis(overlap)
mc = argmax(view(overlap, b+1, :) .|> abs)-1
push!(amp_relative, abs2(overlap[b+1, mc+1])/sum(overlap[b+1, :] .|> abs2))
push!(bs, mc)
end
bs, overlap, amp_relative
end
| QuDiffEq | https://github.com/QuantumBFS/QuDiffEq.jl.git |
|
[
"MIT"
] | 0.1.1 | 96a6c2a1069d6258d9d8ca982c070621619660f9 | code | 2092 | export QuLDE, LDEMSAlgHHL, QuNLDE
export QuEuler, QuLeapfrog, QuAB2, QuAB3, QuAB4
abstract type QuODEAlgorithm <: DiffEqBase.AbstractODEAlgorithm end
"""
LDEMSAlgHHL <: QuODEAlgorithm
Multi-step methods based on HHL
"""
abstract type LDEMSAlgHHL <: QuODEAlgorithm end
"""
QuLDE <: QuODEAlgorithm
Linear differential equation solvers (non-HHL)
* k : order of Taylor series expansion
"""
struct QuLDE <: QuODEAlgorithm
k::Int
QuLDE(k = 3) = new(k)
end
"""
QuNLDE <: QuODEAlgorithm
Linear differential equation solvers (non-HHL)
* k : order of Taylor series expansion
* ϵ : precision
"""
struct QuNLDE <: QuODEAlgorithm
k::Int
ϵ::Real
QuNLDE(k = 3, ϵ = 1e-3) = new(k,ϵ)
end
"""
QuEuler{T} <: LDEMSAlgHHL
Euler Method using HHL (1-step method)
"""
struct QuEuler{T} <: LDEMSAlgHHL
step::Int
α::Vector{T}
β::Vector{T}
nreg::Int
QuEuler(nreg = 12,::Type{T} = Float64) where {T} = new{T}(1,[1.0,],[1.0,],nreg)
end
"""
QuLeapfrog{T} <: LDEMSAlgHHL
Leapfrog Method using HHL (2-step method)
"""
struct QuLeapfrog{T} <: LDEMSAlgHHL
step::Int
α::Vector{T}
β::Vector{T}
nreg::Int
QuLeapfrog(nreg = 12,::Type{T} = Float64) where {T} = new{T}(2,[0, 1.0],[2.0, 0],nreg)
end
"""
QuAB2{T} <: LDEMSAlgHHL
AB2 Method using HHL (2-step method)
"""
struct QuAB2{T} <: LDEMSAlgHHL
step::Int
α::Vector{T}
β::Vector{T}
nreg::Int
QuAB2(nreg = 12,::Type{T} = Float64) where {T} = new{T}(2,[1.0, 0], [1.5, -0.5],nreg)
end
"""
QuAB3{T} <: LDEMSAlgHHL
AB3 Method using HHL (3-step method)
"""
struct QuAB3{T} <: LDEMSAlgHHL
step::Int
α::Vector{T}
β::Vector{T}
nreg::Int
QuAB3(nreg = 12,::Type{T} = Float64) where {T} = new{T}(3,[1.0, 0, 0], [23/12, -16/12, 5/12],nreg)
end
"""
QuAB4{T} <: LDEMSAlgHHL
AB4 Method using HHL (4-step method)
"""
struct QuAB4{T} <: LDEMSAlgHHL
step::Int
α::Vector{T}
β::Vector{T}
nreg::Int
QuAB4(nreg = 12,::Type{T} = Float64) where {T} = new{T}(4,[1.0, 0, 0, 0], [55/24, -59/24, 37/24, -9/24],nreg)
end
| QuDiffEq | https://github.com/QuantumBFS/QuDiffEq.jl.git |
|
[
"MIT"
] | 0.1.1 | 96a6c2a1069d6258d9d8ca982c070621619660f9 | code | 339 | module QuDiffEq
using Yao
using YaoBlocks
using DiffEqBase
using BitBasis
using LinearAlgebra
using ForwardDiff
using YaoExtensions
include("QuDiffProblem.jl")
include("QuDiffAlgs.jl")
include("TaylorTrunc.jl")
include("QuDiffHHL.jl")
include("HHL.jl")
include("PhaseEstimation.jl")
include("QuLDE.jl")
include("QuNLDE.jl")
end # module
| QuDiffEq | https://github.com/QuantumBFS/QuDiffEq.jl.git |
|
[
"MIT"
] | 0.1.1 | 96a6c2a1069d6258d9d8ca982c070621619660f9 | code | 4023 | export array_qudiff, prepare_init_state, bval, aval
"""
Based on : arxiv.org/abs/1010.2745v2
x' = Ax + b
* A - input matrix.
* b - input vector.
* x - inital vector
* N - dimension of b (as a power of 2).
* h - step size.
* tspan - time span.
* array_qudiff(N_t,N,h,A) - generates matrix for k-step solver
* prepare_init_state(b,x,h,N_t) - generates inital states
LDEMSAlgHHL
* step - step for multistep method
* α - coefficients for xₙ
* β - coefficent for xₙ'
"""
function bval(g::Function,alg::LDEMSAlgHHL,t,h)
b = zero(g(1))
for i in 1:(alg.step)
b += alg.β[i]*g(t-(i-1)*h)
end
return b
end
function aval(g::Function,alg::LDEMSAlgHHL,t,h)
sz, = size(g(1))
A = Array{ComplexF64}(undef,sz,(alg.step + 1)*sz)
i_mat = Matrix{Float64}(I, size(g(1)))
A[1:sz,sz*(alg.step) + 1:sz*(alg.step + 1)] = i_mat
for i in 1:alg.step
A[1:sz,sz*(i - 1) + 1: sz*i] = -1*(alg.α[alg.step - i + 1]*i_mat + h*alg.β[alg.step - i + 1]*g(t - (alg.step - i)*h))
end
return A
end
function prepare_init_state(g::Function,alg::LDEMSAlgHHL,tspan::NTuple{2, Float64},x::Vector,h::Float64)
N_t = round(Int, (tspan[2] - tspan[1])/h + 1) #number of time steps
N = nextpow(2,2*N_t + 1) # To ensure we have a power of 2 dimension for matrix
sz, = size(g(1))
init_state = zeros(ComplexF64,2*(N)*sz)
#inital value
init_state[1:sz] = x
for i in 2:N_t
b = bval(alg,h*(i - 1) + tspan[1],h) do t g(t) end
init_state[Int(sz*(i - 1) + 1):Int(sz*(i))] = h*b
end
return init_state, N-1, N_t, sz
end
function array_qudiff(g::Function,alg::LDEMSAlgHHL,tspan::NTuple{2, Float64},h::Float64)
sz, = size(g(1))
i_mat = Matrix{Float64}(I, size(g(1)))
N_t = round(Int, (tspan[2] - tspan[1])/h + 1) #number of time steps
N = nextpow(2,2*N_t + 1) # To ensure we have a power of 2 dimension for matrix
A_ = zeros(ComplexF64, N*sz, N*sz)
# Generates First two rows
@inbounds A_[1:sz, 1:sz] = i_mat
@inbounds A_[sz + 1:2*sz, 1:sz] = -1*(i_mat + h*g(tspan[1]))
@inbounds A_[sz + 1:2*sz,sz+1:sz*2] = i_mat
#Generates additional rows based on k - step
for i in 3:alg.step
@inbounds A_[sz*(i - 1) + 1:sz*i, sz*(i - 3) + 1:sz*i] = aval(QuAB2(),(i-2)*h + tspan[1],h) do t g(t) end
end
for i in alg.step + 1:N_t
@inbounds A_[sz*(i - 1) + 1:sz*(i), sz*(i - alg.step - 1) + 1:sz*i] = aval(alg,(i - 2)*h + tspan[1],h) do t g(t) end
end
#Generates half mirroring matrix
for i in N_t + 1:N
@inbounds A_[sz*(i - 1) + 1:sz*(i), sz*(i - 2) + 1:sz*(i - 1)] = -1*i_mat
@inbounds A_[sz*(i - 1) + 1:sz*(i), sz*(i - 1) + 1:sz*i] = i_mat
end
A_ = [zero(A_) A_;A_' zero(A_)]
return A_
end
function _array_qudiff(A::AbstractMatrix{T}, alg, tspan, dt) where {T}
At(t) = A
array_qudiff(alg, tspan, dt) do t At(t) end
end
_array_qudiff(A::Function, alg, tspan, dt) = array_qudiff(A, alg, tspan, dt)
function _prepare_init_state(b::Vector{T}, alg, tspan, x, dt) where {T}
bt(t) = b
prepare_init_state(alg, tspan, x, dt) do t bt(t) end
end
_prepare_init_state(b::Function, alg, tspan, x, dt) = prepare_init_state(b, alg, tspan, x, dt)
function DiffEqBase.solve(prob::QuLDEProblem{uType,tType,isinplace, F, P}, alg::LDEMSAlgHHL; dt = (prob.tspan[2]-prob.tspan[1])/10, kwargs...) where {uType,tType,isinplace, F, P}
A = prob.A
b = prob.b
tspan = prob.tspan
x = prob.u0
nreg = alg.nreg
matx = _array_qudiff(A, alg, tspan, dt)
initstate, N_p, N_t, sz = _prepare_init_state(b, alg, tspan, x, dt)
λ = maximum(eigvals(matx))
C_value = minimum(eigvals(matx) .|> abs)*0.01;
matx = 1/(λ*2)*matx
initstate = initstate*1/(2*λ) |> normalize!
res = hhlsolve(matx,initstate, nreg, C_value)
res = res/λ
N = Int(log2(sz))
r = res[(N_p + 1)*2 + 2^N - 1: (N_p + 1)*2 + 2^N + 2*N_t - 2]# To ensure we have a power of 2 dimension for matrix
return r
end;
| QuDiffEq | https://github.com/QuantumBFS/QuDiffEq.jl.git |
|
[
"MIT"
] | 0.1.1 | 96a6c2a1069d6258d9d8ca982c070621619660f9 | code | 1646 | export QuLDEProblem
"""
Linear ODE Problem definition
"""
struct QuODEFunction{iip,PType}<: DiffEqBase.AbstractODEFunction{iip}
linmatrix::Array{PType,2}
QuODEFunction(linmatrix::Array{Complex{PType},2}) where PType = new{true,Complex{PType}}(linmatrix)
QuODEFunction(linmatrix::Array{PType,2}) where PType = new{true,Complex{PType}}(linmatrix)
end
abstract type QuODEProblem{uType,tType,isinplace} <: DiffEqBase.AbstractODEProblem{uType,tType,isinplace} end
struct QuLDEProblem{uType,tType,isinplace, F, P, bType} <: QuODEProblem{uType,tType,isinplace}
A::F
b::P
u0::uType
tspan::Tuple{tType,tType}
function QuLDEProblem(A::QuODEFunction{iip,CPType},b::Array{T,1},u0::Array{G,1},tspan,::Type{bType} = false;kwargs...) where {iip,CPType,T,G,bType}
new{Array{CPType,1},typeof(tspan),iip,typeof(A.linmatrix),Array{CPType,1},bType}(A.linmatrix,b,u0,tspan)
end
function QuLDEProblem(A,b::Array{T,1},u0::Array{G,1},tspan;kwargs...,) where {T,G}
f = QuODEFunction(A)
CPType = eltype(f.linmatrix)
new{Array{CPType,1},typeof(tspan[1]),isinplace(f),typeof(f.linmatrix),Array{CPType,1}, false}(f.linmatrix,b,u0,tspan)
end
function QuLDEProblem(A,b::Array{G,1},tspan;kwargs...,) where {G}
f = QuODEFunction(A)
CPType = eltype(f.linmatrix)
u0 = nothing
new{Nothing,typeof(tspan[1]),isinplace(f),typeof(f.linmatrix), Array{CPType,1}, true}(f.linmatrix,b,u0,tspan)
end
function QuLDEProblem(A,b,u0::Array{G,1},tspan;kwargs...,) where {G}
new{Array{G,1},typeof(tspan[1]),true,typeof(A),typeof(b), false}(A,b,u0,tspan)
end
end
| QuDiffEq | https://github.com/QuantumBFS/QuDiffEq.jl.git |
|
[
"MIT"
] | 0.1.1 | 96a6c2a1069d6258d9d8ca982c070621619660f9 | code | 4281 | export quldecircuit
"""
quldecircuit(n::Int,blk::TaylorParam,VS1::AbstractMatrix,VS2::AbstractMatrix) -> ChainBlock{n}
Generates circuit for solving linear differental equations for a unitary H input.
"""
function quldecircuit(n::Int,blk::TaylorParam,VS1::AbstractMatrix,VS2::AbstractMatrix)
C_tilda = blk.C_tilda
D_tilda = blk.D_tilda
N = blk.N
circinit = circuit_ends(n,blk,VS1,VS2)
circmid = circuit_intermediate(n,1,blk)
circfin =circuit_ends(n,blk,VS1',VS2')
CPType = eltype(blk.H)
V = CPType[C_tilda/N D_tilda/N; D_tilda/N -1*C_tilda/N]
V = matblock(V)
push!(circfin,put(1=>V))
return chain(circinit, circmid,circfin)
end
"""
quldecircuit(n::Int,blk::TaylorParam,VS1::AbstractMatrix,VS2::AbstractMatrix,VT::AbstractMatrix) -> ChainBlock{n}
Generates circuit for solving linear differental equations for a non-unitary H input.
"""
function quldecircuit(n::Int,blk::TaylorParam,VS1::AbstractMatrix,VS2::AbstractMatrix,VT::AbstractMatrix)
C_tilda = blk.C_tilda
D_tilda = blk.D_tilda
N = blk.N
circinit = circuit_ends(n,blk,VS1,VS2,VT)
circmid = circuit_intermediate(n,1,blk)
circfin =circuit_ends(n,blk,VS1',VS2',VT')
CPType = eltype(blk.H)
V = CPType[C_tilda/N D_tilda/N; D_tilda/N -1*C_tilda/N]
V = matblock(V)
push!(circfin,put(1=>V))
return chain(circinit, circmid,circfin)
end
function DiffEqBase.solve(prob::QuLDEProblem{uType,tType,isinplace, F, P, false}, alg::QuLDE; kwargs...) where {uType,tType,isinplace, F, P}
opn = opnorm(prob.A)
b = prob.b
t = prob.tspan[2] - prob.tspan[1]
x = prob.u0
nbit = log2i(length(b))
k = alg.k
CPType = eltype(x)
blk = TaylorParam(k,t,prob)
VS1 = calc_vs1(blk,x,opn)
VS2 = calc_vs2(blk,b,opn)
rs = blk.rs
l = blk.l
if rs !=k
n = 1 + rs + nbit
inreg = join(ArrayReg(x/norm(x)), zero_state(CPType,rs), ( (blk.C_tilda/blk.N) * zero_state(CPType, 1) )) + join( ArrayReg(b/norm(b)), zero_state(CPType, rs), ((blk.D_tilda/blk.N) * ArrayReg(CPType, bit"1") ))
cir = quldecircuit(n,blk,VS1,VS2)
else
n = 1 + k*(1 + l) + nbit
VT = calc_vt(CPType)
inreg = join(ArrayReg(x/norm(x)), zero_state(CPType, k*l), zero_state(CPType, k), ( (blk.C_tilda/blk.N) * zero_state(CPType, 1) ) )+ join(ArrayReg(b/norm(b)), zero_state(CPType, k*l), zero_state(CPType, k), ((blk.D_tilda/blk.N) * ArrayReg(CPType, bit"1") ))
cir = quldecircuit(n,blk,VS1,VS2,VT)
end
res = apply!(inreg,cir) |> focus!(1:n - nbit...,) |> select!(0) |> state
out = (blk.N^2)*(vec(res))
return out
end
function DiffEqBase.solve(prob::QuLDEProblem{uType, tType, isinplace, F, P, true}, alg::QuLDE; kwargs...) where {uType, tType, isinplace, F, P}
opn = opnorm(prob.A)
b = prob.b
t = prob.tspan[2] - prob.tspan[1]
k = alg.k
nbit = log2i(length((b)))
blk = TaylorParam(k,t,prob)
CPType = eltype(b)
VS2 = calc_vs2(blk,b,opn)
l = blk.l
rs = blk.rs
if rs !=k
n = rs + nbit
inreg = join(ArrayReg(b/norm(b)), zero_state(CPType, rs))
cir = taylorcircuit(n,blk,VS2)
else
n = k*(1 + l) + nbit
inreg = join(ArrayReg(b/norm(b)), zero_state(CPType, k*l), zero_state(CPType, k))
VT = calc_vt(CPType)
cir = taylorcircuit(n,blk,VS2,VT)
end
r = apply!(inreg,cir) |> focus!(1:n - nbit...,) |> select!(0) |> state
out = blk.N * vec(r)
return out
end;
function DiffEqBase.solve(prob::ODEProblem, alg::QuLDE; dt = (prob.tspan[2]-prob.tspan[1])/10 ,kwargs...)
u0 = prob.u0
siz, = size(u0)
if !ispow2(siz) || siz == 1
throw("Enter arrays of length that are powers of 2 greater than 1.")
end
nbit = log2i(siz)
f = prob.f
p = prob.p
tspan = prob.tspan
k = alg.k
len = round(Int,(tspan[2] - tspan[1])/dt) + 1
res = Array{eltype(u0),2}(undef,len,siz)
utemp = u0
b = zero(u0)
for i in 0:len - 2
res[i+1,:] = utemp
f(b,utemp,p,i*dt + tspan[1])
J = ForwardDiff.jacobian((du,u) -> f(du,u,p, i*dt+tspan[1]),b,utemp)
qprob = QuLDEProblem(J,b,(0.0,dt))
out = solve(qprob,QuLDE(k))
utemp = real(out + utemp)
end
res[end,:] = utemp
return res
end
| QuDiffEq | https://github.com/QuantumBFS/QuDiffEq.jl.git |
|
[
"MIT"
] | 0.1.1 | 96a6c2a1069d6258d9d8ca982c070621619660f9 | code | 3127 | export func_transform, euler_matrix,euler_matrix_update, nonlinear_transform, make_input_vector, make_hermitian
"""
make_input_vector(x::Vector{T}) -> Vector{T}
Generates input vector for `QuNLDE` iterations.
"""
function make_input_vector(x::Vector{T}) where T
if !(norm(x) ≈ 1)
throw(ArgumentError("Input vector is not normalized"))
end
len = length(x)
siz = nextpow(2,len + 1)
z = zeros(T, siz)
z[1] = one(T)
z[2:len+1] = x
normalize!(z)
reg = kron([1,0], z, z)
return reg
end
"""
make_hermitian(A::Matrix) -> Matrix
Returns hermitian matrix containing A.
"""
function make_hermitian(A::Matrix)
if ishermitian(A)
return A
end
return [zero(A) im*A'; -im*A zero(A)]
end
function nonlinear_transform(H::Matrix, x::Vector, k::Int, ϵ::Real = 1e-4)
r, N = taylorsolve(im*H,x,k,ϵ)
n = log2i(length(x))
nb = Int((n-1)/2)
r = relax!(r) |> focus!(n)|> select!(1) |> focus!(1:nb...,) |> select!(0)
return r,sqrt(2)*N/ϵ
end
"""
func_transform(A::Matrix, x::Vector, k::Int) -> ArrayReg, <: Complex
Function transform sub-routine. Returns state register and inverse probability of finding it.
"""
function func_transform(A::Matrix, x::Vector, k::Int,ϵ::Real = 1e-4)
reg = make_input_vector(x)
H = make_hermitian(A)
r, N = nonlinear_transform(H,reg,k,ϵ)
return r, N
end
"""
euler_matrix(A::Matrix{CPType},b::Vector{CPType},h::Real) -> Matrix
Generates matrix for forward Euler iteration.
"""
function euler_matrix(A::Matrix{CPType},b::Vector{CPType},h::Real) where CPType
n = length(b)
A = CPType(h)*A
A[1,1] = 1
@inbounds for i in 1:n
A[4*i+ 1, i+1] += 1
end
return A
end
"""
euler_matrix(A::Matrix{CPType},b::Vector{CPType},h::Real) -> Matrix
Updates euler matrix for forward Euler iteration.
"""
function euler_matrix_update(A::Matrix{CPType}, b::Vector{CPType}, nrm::Real) where CPType
n = length(b)
A = CPType(nrm^2)*A
A[1,1] = 1
@inbounds for i in 1:n
A[4*i+ 1, 1] = A[4*i+ 1, 1]/(nrm^2)
@. A[4*i+ 1, 2:n+1] = A[4*i+ 1, 2:n+1]/nrm
for j in 1:n
A[4*i + 1, 4*j + 1] = A[4*i + 1, 4*j + 1]/nrm
end
end
return A
end
function DiffEqBase.solve(prob::QuODEProblem,alg::QuNLDE; dt = (prob.tspan[2]-prob.tspan[1])/10, kwargs...)
A = prob.A
b = prob.b
k = alg.k
ϵ = alg.ϵ
tspan = prob.tspan
len = round(Int,(tspan[2] - tspan[1])/dt) + 1
siz = length(b)
res = Array{eltype(b),2}(undef,len,siz)
res[1,:] = b
A = euler_matrix(A,b,dt)
H = make_hermitian(A)
reg = make_input_vector(b)
r, N = nonlinear_transform(H,reg,k,ϵ)
ntem = 1
@views for step in 2:len - 1
tem = vec(state(r))*N*sqrt(2)
res[step,:] = tem[2:siz+1]
ntem = norm(res[step,:])
C = euler_matrix_update(A,b,ntem)
H = make_hermitian(C)
reg = res[step,:]/ntem
reg = make_input_vector(reg)
r, N = nonlinear_transform(H,reg,k,ϵ)
end
tem = vec(state(r))*N*sqrt(2)
@views res[len,:] = tem[2:siz+1]
return res
end
| QuDiffEq | https://github.com/QuantumBFS/QuDiffEq.jl.git |
|
[
"MIT"
] | 0.1.1 | 96a6c2a1069d6258d9d8ca982c070621619660f9 | code | 10024 | export TaylorParam
export taylorcircuit, taylorsolve, circuit_ends, circuit_intermediate
export v,lc
#export get_param_type, circuit_final
const C(m, x, opn, t, c) = norm(x)*(opn*t*c)^(m)/factorial(m)
const D(m, x, opn, t, c) = norm(x)*(opn*t*c)^(m-1)*t/factorial(m)
"""
TaylorParam(k::Int, t::L, H::Matrix, x::Array{CPType,1})
TaylorParam(k::Int,t::L,prob::QuLDEProblem{uType, tType, isinplace, F, P, T})
Assigns values to parameters required for Taylor series based Hamiltonian simulation.
* k : sets order of Taylor series expansion
* t : time of evolution
* H : Hamiltonian
* x : intial vector
* prob : wrapper for Linear differential equation problems (contains H, x, b)
"""
struct TaylorParam{CPType, UType, L, HM}
k::Int # Taylor expansion upto k
t::L
H::HM
l::Int
rs::Int
C_tilda::CPType
D_tilda::CPType
N::CPType
function TaylorParam(k::Int, t::L, H::Matrix, x::Array{CPType,1}) where {L,CPType}
opn = opnorm(H)
u = isunitary(H/opn)
C_tilda = 0
if u
c = 1
rs = log2i(k+1)
l = 0
else
c = 2
rs = k
l = 2
end
for i in 0:k
C_tilda = C_tilda + C(i, x, opn,t,c)
end
N = C_tilda
C_tilda = sqrt(C_tilda)
new{CPType, u, L, Array{CPType,2}}(k, t, H, l, rs, C_tilda, zero(C_tilda), N)
end
function TaylorParam(k::Int,t::L,prob::QuLDEProblem{uType, tType, isinplace, F, P, T}) where {L,uType, tType, isinplace, F, P, T}
CPType = eltype(prob.A)
opn = opnorm(prob.A)
u = isunitary(prob.A/opn)
D_tilda = 0
C_tilda = 0
if u
c = 1
rs = log2i(k+1)
l = 0
else
c = 2
rs = k
l = 2
end
for i in 1:k
D_tilda = D_tilda + D(i, prob.b, opn,t,c)
end
N = D_tilda
D_tilda = sqrt(D_tilda)
if !(T)
for i in 0:k
C_tilda = C_tilda + C(i, prob.u0, opn, t,c)
end
C_tilda = sqrt(C_tilda)
N = sqrt(C_tilda^2 + D_tilda^2)
end
new{CPType, u, L, Array{CPType,2}}(k, t, prob.A, l, rs, C_tilda, D_tilda, N)
end
end
"""
calc_vs1(blk::TaylorParam, x::Vector{CPType}, opn::Real) -> Matrix{CPType}
Calculates VS1 block for Taylor circuit.
"""
function calc_vs1(blk::TaylorParam, x::Vector{CPType}, opn::Real) where CPType
k = blk.k
rs = blk.rs
t = blk.t
C_tilda = blk.C_tilda
VS1 = rand(CPType,2^rs,2^rs)
@inbounds VS1[:,1] = zero(VS1[:,1])
if rs == k
@inbounds for j in 0:k
VS1[(2^k - 2^(k-j) + 1),1] = sqrt(C(j, x, opn, t,2))/C_tilda
end
else
@inbounds for j in 0:k
VS1[j+1,1] = sqrt(C(j, x, opn, t,1))/C_tilda
end
end
VS1 = -1*qr(VS1).Q
return VS1
end
"""
calc_vs2(blk::TaylorParam, x::Vector{CPType}, opn::Real) -> Matrix{CPType}
opn: operator norm of the input matrix.
Calculates VS2 block for Taylor circuit.
"""
function calc_vs2(blk::TaylorParam, x::Vector{CPType}, opn::Real) where CPType
k = blk.k
rs = blk.rs
t = blk.t
D_tilda = blk.D_tilda
VS2 = rand(CPType,2^rs,2^rs)
@inbounds VS2[:,1] = zero(VS2[:,1])
if rs == k
@inbounds for j in 0:k-1
VS2[(2^k - 2^(k-j) + 1),1] = sqrt(D(j+1, x, opn, t, 2))/D_tilda
end
else
@inbounds for j in 0:k - 1
VS2[j+1,1] = sqrt(D(j+1, x, opn, t, 1))/D_tilda
end
end
VS2 = -1*qr(VS2).Q
return VS2
end
"""
unitary_decompose(H::Array{T,2}) -> Array{Array{T,2},1}
Generates a linear compostion of unitary matrices for argument H.
"""
function unitary_decompose(H::Array{T,2}) where T
if isunitary(H)
return H
end
Mu = H/opnorm(H)
nbit, = size(H)
nbit = log2i(nbit)
B1 = convert(Array{T,2},1/2*(Mu + Mu'))
B2 = convert(Array{T,2},-im/2*(Mu - Mu'))
iden = Matrix{T}(I,size(B1))
F = Array{Array{T,2},1}(undef,4)
F[1] = B1 + im*sqrt(iden - B1*B1)
F[2] = B1 - im*sqrt(iden - B1*B1)
F[3] = im*B2 - sqrt(iden - B2*B2)
F[4] = im*B2 + sqrt(iden - B2*B2)
return F
end
"""
calc_vt(::Type{CPType}) -> Matrix{CPType}
Generates VT block for non-unitary Taylor circuit.
"""
function calc_vt(::Type{CPType} = ComplexF32) where CPType
VT = rand(CPType,4,4)
VT[:,1] = 0.5*ones(4)
VT = -1*qr(VT).Q
return VT
end
"""
v(n::Int,c::Int, T::Int, V::AbstractMatrix) -> ChainBlock{n}
Builds T input block.
n : total number of qubits
V : T input matrix
c : starting qubit
"""
v(n::Int,c::Int, T::Int, V::AbstractMatrix) = concentrate(n, matblock(V), (c + 1:T + c...,))
"""
v(n::Int,c::Int, j::Tuple, T::Int, V::AbstractMatrix) -> ControlBlock{n}
Builds a T input, j - control block.
n : total number of qubits
V : T input matrix
c : starting qubit
j : control bits tuple
"""
v(n::Int,c::Int, j::Tuple, T::Int, V::AbstractMatrix) = control(n, j,(1 + c:T + c...,)=>matblock(V))
lc(n::Int,c::Int, i::Int, k::Int,l::Int, V::AbstractMatrix) = concentrate(n, matblock(V), (k+c+1+(i-1)*l:k+1+c+i*l-1...,))
"""
circuit_ends(n::Int, blk::TaylorParam{CPType, true}, VS1::AbstractMatrix, VS2::AbstractMatrix)
Generates the part of circuit that computes and decomputes the superposition of the ancilla bits, in unitary H `quldecircuit`
"""
circuit_ends(n::Int, blk::TaylorParam{CPType, true}, VS1::AbstractMatrix, VS2::AbstractMatrix) where CPType = chain(n, v(n, 1,(-1,), blk.rs, VS1),v(n, 1, (1,), blk.rs, VS2))
"""
circuit_ends(n::Int, blk::TaylorParam{CPType, true}, VS1::AbstractMatrix)
Generates the part of circuit that computes and decomputes the superposition of the ancilla bits, in unitary H `taylorcircuit`
"""
circuit_ends(n::Int, blk::TaylorParam{CPType, true}, VS1::AbstractMatrix) where CPType = chain(n, v(n, 0, blk.rs, VS1))
"""
circuit_ends(n::Int, blk::TaylorParam{CPType, false}, VS1::AbstractMatrix, VS2::AbstractMatrix, VT::AbstractMatrix)
Generates the part of circuit that computes and decomputes the superposition of the ancilla bits, in non-unitary H `quldecircuit`
"""
function circuit_ends(n::Int, blk::TaylorParam{CPType, false}, VS1::AbstractMatrix, VS2::AbstractMatrix, VT::AbstractMatrix) where CPType
cir = chain(n, v(n, 1, (-1,), blk.rs, VS1),v(n, 1, (1,),blk.rs, VS2))
for i in 1:blk.k
push!(cir, lc(n,1,i,blk.k,blk.l,VT))
end
return cir
end
"""
circuit_ends(n::Int, blk::TaylorParam{CPType, false}, VS1::AbstractMatrix, VT::AbstractMatrix)
Generates the part of circuit that computes and decomputes the superposition of the ancilla bits, in non-unitary H `taylorcircuit`
"""
function circuit_ends(n::Int, blk::TaylorParam{CPType, false}, VS1::AbstractMatrix, VT::AbstractMatrix) where CPType
cir = chain(n, v(n, 0, blk.rs,VS1))
for i in 1:blk.k
push!(cir, lc(n,0,i,blk.k,blk.l,VT))
end
return cir
end
"""
circuit_intermediate(n::Int, c::Int, blk::TaylorParam{CPType, true})
Generates the intermediate part of the circuit for unitary H.
"""
function circuit_intermediate(n::Int, c::Int, blk::TaylorParam{CPType, true}) where CPType
H = blk.H/opnorm(blk.H)
k = blk.k
l = blk.l
rs = blk.rs
cir = chain(n)
nbit = n - rs - c
a = Array{Int32,1}(undef, rs)
U = Matrix{CPType}(I, 1<<nbit,1<<nbit)
for i in 0:k
digits!(a,i,base = 2)
G = matblock(U)
push!(cir,control(n, (-1*collect(1+c:rs+c).*((-1*ones(Int, rs)).^a)...,), (rs+1+c:n...,)=>G))
U = H*U
end
return cir
end
"""
circuit_intermediate(n::Int, c::Int, blk::TaylorParam{CPType, false})
Generates the intermediate part of the circuit for non-unitary H.
"""
function circuit_intermediate(n::Int, c::Int, blk::TaylorParam{CPType, false}) where CPType
H = blk.H/opnorm(blk.H)
k = blk.k
l = blk.l
rs = blk.rs
cir = chain(n)
nbit = n - k*(l+1) - c
F = unitary_decompose(H)
a = Array{Int64,1}(undef, l)
for i in 1:k
for j in 0:2^l-1
digits!(a,j,base = 2)
push!(cir, control(n, (i + c, -1*collect(k+1+c+(i-1)*l:k+1+c+i*l-1).*((-1*ones(Int, l)).^a)...,), (n - nbit + 1 : n...,)=>matblock(F[j+1])))
end
end
return cir
end
"""
taylorcircuit(n::Int, blk::TaylorParam, VS1::Matrix) -> ChainBlock{n}
Generates circuit for a unitary H input.
"""
function taylorcircuit(n::Int, blk::TaylorParam, VS1::Matrix)
circinit = circuit_ends(n,blk,VS1)
circmid = circuit_intermediate(n,0,blk)
circfin =circuit_ends(n,blk,VS1')
return chain(circinit, circmid, circfin)
end
"""
taylorcircuit(n::Int, blk::TaylorParam, VS1::Matrix, VT::Matrix) ->-> ChainBlock{n}
Generates circuit for a non-unitary H input.
"""
function taylorcircuit(n::Int, blk::TaylorParam, VS1::Matrix, VT::Matrix)
circinit = circuit_ends(n,blk,VS1,VT)
circmid = circuit_intermediate(n,0,blk)
circfin =circuit_ends(n,blk,VS1',VT')
return chain(circinit, circmid, circfin)
end
"""
taylorsolve(H::Array{CPType,2}, x::Vector{CPType}, k::Int, t::Real) -> ArrayReg, CPType
Simulates a Hamiltonian using the Taylor truncation method. Returns the state register and inverse probability of finding it.
"""
function taylorsolve(H::Array{CPType,2}, x::Vector{CPType}, k::Int, t::Real) where CPType
opn = opnorm(H)
nbit = log2i(length(x))
blk = TaylorParam(k,t,H,x)
VS1 = calc_vs1(blk,x,opn)
rs = blk.rs
k = blk.k
l = blk.l
if rs != k
n = rs + nbit
inreg = join(ArrayReg(x/norm(x)), zero_state(CPType,rs))
cir = taylorcircuit(n, blk, VS1)
else
n = k*(1 + l) + nbit
VT = calc_vt(CPType)
inreg = join(ArrayReg(x/norm(x)), zero_state(CPType, k*l), zero_state(CPType, k))
cir = taylorcircuit(n, blk, VS1, VT)
end
r = apply!(inreg,cir) |> focus!(1:n - nbit...,) |> select!(0)
return r, blk.N
end
| QuDiffEq | https://github.com/QuantumBFS/QuDiffEq.jl.git |
|
[
"MIT"
] | 0.1.1 | 96a6c2a1069d6258d9d8ca982c070621619660f9 | code | 2403 | using Yao
using BitBasis
using QuDiffEq
using Test, LinearAlgebra
function crot(n_reg::Int, C_value::Real)
n_rot = n_reg + 1
rot = chain(n_rot)
θ = zeros(1<<n_reg - 1)
for i = 1:(1<<n_reg - 1)
c_bit = Vector(2:n_rot)
λ = 0.0
for j = 1:n_reg
if (readbit(i,j) == 0)
c_bit[j] = -c_bit[j]
end
end
λ = i/(1<<n_reg)
# println("\nλ($i) = $λ")
# println("c_bit($i) = $c_bit\n")
sin_value = C_value / λ
if (sin_value) > 1
return println("C_value = $C_value, λ = $λ, sinθ = $sin_value > 1, please lower C_value.\n")
end
θ[(i)] = 2.0*asin(C_value / λ)
push!(rot, control(c_bit, 1=>Ry(θ[(i)])))
end
rot
end
@testset "HHLCRot" begin
hr = HHLCRot{4}([4,3,2], 1, 0.01)
reg = rand_state(4)
@test reg |> copy |> hr |> isnormalized
hr2 = crot(3, 0.01)
reg1 = reg |> copy |> hr
reg2 = reg |> copy |> hr2
@test fidelity(reg1, reg2)[] ≈ 1
end
"""
hhl_problem(nbit::Int) -> Tuple
Returns (A, b), where
* `A` is positive definite, hermitian, with its maximum eigenvalue λ_max < 1.
* `b` is normalized.
"""
function hhl_problem(nbit::Int)
siz = 1<<nbit
base_space = qr(randn(ComplexF64, siz, siz)).Q
phases = rand(siz)
signs = Diagonal(phases)
A = base_space*signs*base_space'
# reinforce hermitian, see issue: https://github.com/JuliaLang/julia/issues/28885
A = (A+A')/2
b = normalize(rand(ComplexF64, siz))
A, b
end
@testset "HHLtest" begin
# Set up initial conditions.
## A: Matrix in linear equation A|x> = |b>.
## signs: Diagonal Matrix of eigen values of A.
## base_space: the eigen space of A.
## x: |x>.
using Random
Random.seed!(2)
N = 3
A, b = hhl_problem(N)
x = A^(-1)*b # base_i = base_space[:,i] ϕ1 = (A*base_i./base_i)[1]
## n_b : number of bits for |b>.
## n_reg: number of PE register.
## n_all: number of all bits.
n_reg = 12
## C_value: value of constant C in control rotation.
## It should be samller than the minimum eigen value of A.
C_value = minimum(eigvals(A) .|> abs)*0.25
#C_value = 1.0/(1<<n_reg) * 0.9
res = hhlsolve(A, b, n_reg, C_value)
# Test whether HHL circuit returns correct coefficient of |1>|00>|u>.
@test isapprox.(x, res, atol=0.5) |> all
end
| QuDiffEq | https://github.com/QuantumBFS/QuDiffEq.jl.git |
|
[
"MIT"
] | 0.1.1 | 96a6c2a1069d6258d9d8ca982c070621619660f9 | code | 1475 | using Yao
using BitBasis
using Test, LinearAlgebra
using QuDiffEq
"""
random phase estimation problem setup.
"""
function rand_phaseest_setup(N::Int)
U = rand_unitary(1<<N)
b = randn(ComplexF64, 1<<N); b=b/norm(b)
phases, evec = eigen(U)
ϕs = @. mod(angle(phases)/2/π, 1)
return U, b, ϕs, evec
end
@testset "phaseest" begin
# Generate a random matrix.
N = 3
V = rand_unitary(1<<N)
# Initial Set-up.
phases = rand(1<<N)
ϕ = Int(0b111101)/(1<<6)
phases[3] = ϕ
signs = exp.(2π*im.*phases)
U = V*Diagonal(signs)*V'
b = V[:,3]
# Define ArrayReg and U operator.
M = 6
reg1 = zero_state(M)
reg2 = ArrayReg(b)
UG = matblock(U)
# circuit
circuit = PEBlock(UG, M, N)
# run
reg = apply!(join(reg2, reg1), circuit)
# measure
res = breflect(measure(focus!(copy(reg), 1:M); nshots=10)[1]; nbits=M) / (1<<M)
@test res ≈ ϕ
@test apply!(reg, circuit |> adjoint) ≈ join(reg2, reg1)
end
@testset "phaseest, non-eigen" begin
# Generate a random matrix.
N = 3
U, b, ϕs, evec = rand_phaseest_setup(N)
# Define ArrayReg and U operator.
M = 6
reg1 = zero_state(M)
reg2 = ArrayReg(b)
UG = matblock(U);
# run circuit
reg= join(reg2, reg1)
pe = PEBlock(UG, M, N)
apply!(reg, pe)
# measure
bs, proj, amp_relative = projection_analysis(evec, focus!(reg, M+1:M+N))
@test isapprox(ϕs, bfloat.(bs, nbits=M), atol=0.05)
end
| QuDiffEq | https://github.com/QuantumBFS/QuDiffEq.jl.git |
|
[
"MIT"
] | 0.1.1 | 96a6c2a1069d6258d9d8ca982c070621619660f9 | code | 1200 | using Yao
using BitBasis
using Random
using Test, LinearAlgebra
using OrdinaryDiffEq
using QuDiffEq
function diffeq_problem(nbit::Int)
siz = 1<<nbit
A = (rand(ComplexF64, siz,siz))
A = (A + A')/2
b = normalize!(rand(ComplexF64, siz))
x = normalize!(rand(ComplexF64, siz))
A, b, x
end
@testset "Linear_differential_equations_HHL" begin
Random.seed!(2)
N = 1
h = 0.1
tspan = (0.0,0.6)
N_t = round(Int, 2*(tspan[2] - tspan[1])/h + 3)
M, v, x = diffeq_problem(N)
A(t) = M
b(t) = v
nreg = 12
f(u,p,t) = M*u + v;
prob = ODEProblem(f, x, tspan)
qprob = QuLDEProblem(A, b, x, tspan)
sol = solve(prob, Tsit5(), dt = h, adaptive = false)
s = vcat(sol.u...)
res = solve(qprob, QuEuler(nreg), dt = h)
@test isapprox.(s, res, atol = 0.5) |> all
res = solve(qprob, QuLeapfrog(nreg), dt = h)
@test isapprox.(s, res, atol = 0.3) |> all
res = solve(qprob, QuAB2(nreg), dt = h)
@test isapprox.(s, res, atol = 0.3) |> all
res = solve(qprob, QuAB3(nreg), dt = h)
@test isapprox.(s, res, atol = 0.3) |> all
res = solve(qprob, QuAB4(nreg),dt = h)
@test isapprox.(s, res, atol = 0.3) |> all
end;
| QuDiffEq | https://github.com/QuantumBFS/QuDiffEq.jl.git |
|
[
"MIT"
] | 0.1.1 | 96a6c2a1069d6258d9d8ca982c070621619660f9 | code | 1496 | using Yao
using QuDiffEq
using LinearAlgebra
using OrdinaryDiffEq
using BitBasis
using Random
using Test
using YaoBlocks
#Linear Diff Equation Unitary M
function diffeqProblem(nbit::Int)
siz = 1<<nbit
Au = rand_unitary(siz)
An = rand(ComplexF64,siz,siz)
b = normalize!(rand(ComplexF64, siz))
x = normalize!(rand(ComplexF64, siz))
Au,An,b, x
end
@testset "QuLDE_Test" begin
Random.seed!(2)
N = 1
k = 3
tspan = (0.0,0.4)
Au,An,b,x = diffeqProblem(N)
qprob = QuLDEProblem(Au, b, x, tspan)
f(u,p,t) = Au*u + b;
prob = ODEProblem(f, x, tspan)
sol = solve(prob, Tsit5(), dt = 0.1, adaptive = false)
s = sol.u[end]
out = solve(qprob, QuLDE(k))
@test isapprox.(s, out, atol = 0.01) |> all
qprob = QuLDEProblem(An, b, x, tspan)
f(u,p,t) = An*u + b;
prob = ODEProblem(f, x, tspan)
sol = solve(prob, Tsit5(), dt = 0.1, adaptive = false)
s = sol.u[end]
out = solve(qprob, QuLDE(k))
@test isapprox.(s, out, atol = 0.02) |> all
# u0 equal to zero
tspan = (0.0,0.1)
qprob = QuLDEProblem(Au,b,tspan)
out = solve(qprob,QuLDE(k))
t = tspan[2] - tspan[1]
r_out = (exp(Au*t) - Diagonal(ones(length(b))))*Au^(-1)*b
@test isapprox.(r_out, out, atol = 1e-3) |> all
qprob = QuLDEProblem(An,b,tspan)
out = solve(qprob,QuLDE(k))
t = tspan[2] - tspan[1]
r_out = (exp(An*t) - Diagonal(ones(length(b))))*An^(-1)*b
@test isapprox.(r_out, out, atol = 1e-3) |> all
end
| QuDiffEq | https://github.com/QuantumBFS/QuDiffEq.jl.git |
|
[
"MIT"
] | 0.1.1 | 96a6c2a1069d6258d9d8ca982c070621619660f9 | code | 1023 | using Yao
using QuDiffEq
using LinearAlgebra
using BitBasis
using Random
using Test
using YaoBlocks
using OrdinaryDiffEq
#Linear Diff Equation Unitary M
function f(du,u,p,t)
du[1] = -3*u[1]^2 + u[2]
du[2] = -u[2]^2 - u[1]*u[2]
end
@testset "QuNLDE_Test" begin
Random.seed!(4)
N = 2
k = 3
siz = nextpow(2, N + 1)
x = normalize!(rand(N))
A = zeros(ComplexF32,2^(siz),2^(siz))
A[1,1] = ComplexF32(1)
A[5,3] = ComplexF32(1)
A[5,6] = ComplexF32(-3)
A[9,11] = ComplexF32(-1)
A[9,7] = ComplexF32(-1)
tspan = (0.0,0.4)
qprob = QuLDEProblem(A, x, tspan)
r, N = func_transform(qprob.A, qprob.b, k)
out = N*vec(state(r))
r_out = zero(x)
f(r_out, x,1,1)
@test isapprox.(r_out, out[2:3]*sqrt(2), atol = 1e-3) |> all
prob = ODEProblem(f, x, tspan)
sol = solve(prob, Euler(), dt = 0.1, adaptive = false)
r_out = transpose(hcat(sol.u...))
out = solve(qprob, QuNLDE(3), dt = 0.1)
@test isapprox.(r_out,real(out), atol = 1e-3) |> all
end
| QuDiffEq | https://github.com/QuantumBFS/QuDiffEq.jl.git |
|
[
"MIT"
] | 0.1.1 | 96a6c2a1069d6258d9d8ca982c070621619660f9 | code | 913 | using Yao
using QuDiffEq
using LinearAlgebra
using BitBasis
using Random
using Test
using YaoBlocks
#Linear Diff Equation Unitary M
function diffeqProblem(nbit::Int)
siz = 1<<nbit
Au = rand_unitary(siz)
An = rand(ComplexF64,siz,siz)
b = normalize!(rand(ComplexF64, siz))
x = normalize!(rand(ComplexF64, siz))
Au,An,b, x
end
@testset "TaylorTrunc_Test" begin
Random.seed!(2)
N = 1
k = 3
tspan = (0.0,0.1)
Au,An,b,x = diffeqProblem(N)
qprob = QuLDEProblem(Au, b, x, tspan)
r,N = taylorsolve(qprob.A,qprob.u0,k,tspan[2])
out = N*vec(state(r))
r_out = exp(qprob.A*tspan[2])*qprob.u0
@test isapprox.(r_out, out, atol = 1e-3) |> all
qprob = QuLDEProblem(An, b, x, tspan)
r,N = taylorsolve(qprob.A,qprob.u0,k,tspan[2])
out = N*vec(state(r))
r_out = exp(qprob.A*tspan[2])*qprob.u0
@test isapprox.(r_out, out, atol = 1e-3) |> all
end
| QuDiffEq | https://github.com/QuantumBFS/QuDiffEq.jl.git |
|
[
"MIT"
] | 0.1.1 | 96a6c2a1069d6258d9d8ca982c070621619660f9 | code | 473 | using QuDiffEq, Yao
using Test, Random, LinearAlgebra
@testset "HHL_tests" begin
include("HHL_tests.jl")
end
@testset "PhaseEstimation_tests" begin
include("PhaseEstimation_tests.jl")
end
@testset "QuLDE_test" begin
include("QuLDE_tests.jl")
end
@testset "QuNLDE_test" begin
include("QuNLDE_tests.jl")
end
@testset "QuDiffHHL_tests" begin
include("QuDiffHHL_tests.jl")
end
@testset "TaylorTrunc_tests" begin
include("TaylorTrunc_tests.jl")
end
| QuDiffEq | https://github.com/QuantumBFS/QuDiffEq.jl.git |
|
[
"MIT"
] | 0.1.1 | 96a6c2a1069d6258d9d8ca982c070621619660f9 | docs | 1013 | # v0.1.0 release note
[QuDiffEq.jl](https://github.com/QuantumBFS/QuDiffEq.jl) was developed as part of JSoC 2019. It allows one to solve differential equations using quantum algorithms.
We have the following algorithms and example in place:
* `QuLDE` for linear differential equations. There is also a rountine for using `QuLDE` to solve non-linear differential equations by making linear approximations of the functions at hand. [Read more.](https://nextjournal.com/dgan181/julia-soc-19-quantum-algorithms-for-differential-equations/edit)
* Several HHL-based multistep methods for linear differential equations.
* `QuNLDE` for solving non-linear quadratic differential equations. [Read more.](https://nextjournal.com/dgan181/jsoc-19-non-linear-differential-equation-solver-and-simulating-of-the-wave-equation/edit)
* Simulation of the wave equation using quantum algorithms. [Read more.](https://nextjournal.com/dgan181/jsoc-19-non-linear-differential-equation-solver-and-simulating-of-the-wave-equation/edit)
| QuDiffEq | https://github.com/QuantumBFS/QuDiffEq.jl.git |
|
[
"MIT"
] | 0.1.1 | 96a6c2a1069d6258d9d8ca982c070621619660f9 | docs | 2526 | # QuDiffEq
[](https://github.com/QuantumBFS/QuDiffEq.jl/actions/workflows/CI.yml)
[](https://codecov.io/gh/dgan181/QuDiffEq.jl)
Quantum algorithms for solving differential equations.
This project is part of Julia's Season of Contribution 2019.
For an introduction to the algorithms and an overview of the features, you can take a look at the blog posts: [#1](https://nextjournal.com/dgan181/julia-soc-19-quantum-algorithms-for-differential-equations/edit), [#2](https://nextjournal.com/dgan181/jsoc-19-non-linear-differential-equation-solver-and-simulating-of-the-wave-equation/edit).
## Installation
<p>
QuDiffEq is a
<a href="https://julialang.org">
<img src="https://raw.githubusercontent.com/JuliaLang/julia-logo-graphics/master/images/julia.ico" width="16em">
Julia Language
</a>
package. To install QuDiffEq,
please <a href="https://docs.julialang.org/en/v1/manual/getting-started/">open
Julia's interactive session (known as REPL)</a> and press <kbd>]</kbd> key in the REPL to use the package mode, then type the following command
</p>
```julia
pkg> add QuDiffEq
```
## Algorithms
- Quantum Algorithms for Linear Differential Equations,
- Based on truncated Taylor series
- Based on HHL.
- Quantum Algorithms for Non Linear Differential Equations.
## Built With
* [Yao](https://github.com/QuantumBFS/Yao.jl) - A framework for Quantum Algorithm Design
* [QuAlgorithmZoo](https://github.com/QuantumBFS/QuAlgorithmZoo.jl) - A repository for Quantum Algorithms
## Authors
See the list of [contributors](https://github.com/QuantumBFS/QuDiffEq.jl/graphs/contributors) who participated in this project.
## License
This project is licensed under the MIT License - see the [LICENSE.md](https://github.com/QuantumBFS/QuDiffEq.jl/blob/master/LICENSE) file for details
## References
- D. W. Berry. *High-order quantum algorithm for solving linear differential equations* (https://arxiv.org/abs/1807.04553)
- Tao Xin et al. *A Quantum Algorithm for Solving Linear Differential Equations: Theory and Experiment* (https://arxiv.org/abs/1807.04553)
- Sarah K. Leyton, Tobias J. Osborne. *A quantum algorithm to solve nonlinear differential equations*(https://arxiv.org/abs/0812.4423)
- P. C.S. Costa et al. *Quantum Algorithm for Simulating the Wave Equation* (https://arxiv.org/abs/1711.05394)
| QuDiffEq | https://github.com/QuantumBFS/QuDiffEq.jl.git |
|
[
"MIT"
] | 0.1.1 | 96a6c2a1069d6258d9d8ca982c070621619660f9 | docs | 571 | # QuDiffEq
[QuDiffEq](https://github.com/QuantumBFS/QuDiffEq.jl) is a package for solving differential equations using quantum algorithms. It makes use of the Yao.jl, a quantum simulator in Julia.
## Features
- Quantum algorithms for linear differential equation
- Based on HHL.
- Based on Truncated Taylor series
- Quantum algorithm for non-linear differential equation
## Tutorials
```@contents
Pages = [
"tutorial/lin.md",
"tutorial/nlin.md"
]
Depth = 2
```
## Manual
```@contents
Pages = [
"man/algs.md"
"man/taylor.md"
]
Depth = 2
```
| QuDiffEq | https://github.com/QuantumBFS/QuDiffEq.jl.git |
|
[
"MIT"
] | 0.1.1 | 96a6c2a1069d6258d9d8ca982c070621619660f9 | docs | 352 | # Quantum Algorithms for Differential Equations
## Taylor truncation based algorithms
```@docs
QuDiffEq.QuLDE
QuDiffEq.QuNLDE
```
## HHL based algorithms
The following algorithms are found in the article: arxiv.org/abs/1010.2745v2 .
```@docs
QuDiffEq.LDEMSAlgHHL
QuDiffEq.QuEuler
QuDiffEq.QuLeapfrog
QuDiffEq.QuAB2
QuDiffEq.QuAB3
QuDiffEq.QuAB4
```
| QuDiffEq | https://github.com/QuantumBFS/QuDiffEq.jl.git |
|
[
"MIT"
] | 0.1.1 | 96a6c2a1069d6258d9d8ca982c070621619660f9 | docs | 3508 | # Taylor Truncation
Taylor truncation based Hamiltonian simulation (https://arxiv.org/abs/1412.4687) has many clear advantages. It has better complexity dependence on the precision and allows a greater range of Hamiltonians to be simulated.
The package provides circuits for five kinds of problems:
- Unitary Taylor simulation
- Non-unitary Taylor simulation
- Unitary QuLDE Problem
- Non-unitary QuLDEProblem
- Solution by linearising a non-linear differential equation
To simulate the following the algorithm, simulates the Taylor expansion of ``e^{iHt}`` upto ``k`` orders.
```math
|u(t)⟩ = e^{iHt}|u(0)⟩
```
We have,
```math
|x(t)\rangle \approx \sum^{k}_{m=0}\frac{||x(0)||(||M||t)^{m}}{m!}\mathcal{M}^{m}|x(0)\rangle
```
where ``M = ||M||\mathcal{M} = iH ``
There are two cases to consider:
1. ``\mathcal{M}`` is unitary. In addition to the vector state register, we have ``T = log_{2}(k+1)`` ancillary bits. The ancilla is in ``|0⟩`` to begin with. The `VS1` block acts on the ancilla register to generate an appropriate superpostion
```math
\sum^{k}_{m=0}\frac{||x(0)||(||M||t)^{m}}{m!} |m\rangle
```
Multiplication of ``\mathcal{M}^{j}`` block is controlled by ``|j⟩`` in the ancilla register. `VS1'`, the adjoint of `VS1`, un-computes the ancilla registers. The desired result is obtained when the resulting state is projected onto ``|0\rangle`` ancilla state.
2. ``\mathcal{M}`` is non-unitary. ``\mathcal{M}`` is expressed as a linear combination of four (at most) unitary i.e. ``\mathcal{M} =\sum_{i} \frac{1}{2} F_{i}``. We have two registers of sizes ``k`` and ``2k``.These registers participate in control-multiplications, as control bits. `VS1` behaves differently to that in the unitary case. In the first register, states with ``j`` ``1``'s (where ``j \in \{0,1,...,k\}``) are raised to probability amplitudes equal to the term with the ``j^{th}`` power in the summation above, while rest of the states are given zero probability. The mappings used is ``m = 2^{k} - 2^{j}`` , ``m`` corresponds to the basis state in the first register. This register governs the the power ``F_i``s need to be raised to. The second register is superposed by `VT`, where each new state corresponds to a ``F_i``. When un-computed and measured in the zero ancilla state, we obtain the desired result.
```@autodocs
Modules = [QuDiffEq]
Pages = ["TaylorTrunc.jl",]
```
## Quantum Linear Differential Equation
A linear differential equation is written as
```math
\frac{dx}{dt} = Mx + b
```
``M`` is an arbitrary N × N matrix, ``x`` and ``b`` are N dimensional vectors.
The modified version of the `taylorcircuit` is employed here. The transformation over ``x`` is the same as in the `taylorcircuit`, but there is simultaneous transformation over `b` as well. There is an additional ancillary bit that allows the distinction between ``x`` and ``b``. This superpostion is brought about by `V` matrix. Like `VS1` in `taylorcircuit`, `VS2` facilitates the transformation on ``b`` in the simulation.
```@autodocs
Modules = [QuDiffEq]
Pages = ["QuLDE.jl"]
```
## Quantum Non-linear Differential Equation
The non-linear solver constitutes two sub-routines.
Firstly, the function transform sub-routine, which employs of the `taylorcircuit`. The function transform lets us map ``z`` to ``P(z)``, where ``P`` is a quadratic polynomial.
Secondly, the forward Euler method. The polynomial here is : `` z + hf(z)``. ``f`` is the derivative .
```@autodocs
Modules = [QuDiffEq]
Pages = ["QuNLDE.jl"]
```
| QuDiffEq | https://github.com/QuantumBFS/QuDiffEq.jl.git |
|
[
"MIT"
] | 0.1.1 | 96a6c2a1069d6258d9d8ca982c070621619660f9 | docs | 2496 | # Linear Differential Equations
A linear differential equation is written as
```math
\frac{dx}{dt} = Mx + b
```
`M` is an arbitrary N × N matrix, `x` and `b` are N dimensional vectors.
`QuDiffEq` allows for the following methods of solving a linear differential equation.
## QuLDE
LDE algorithm based on Taylor Truncation. This method evaluates the vector at the last time step, without going through the intermediate steps, unlike other solvers.
The exact solution for ``x(t)`` is give by -
```math
x(t) = e^{Mt}x(0) + (e^{Mt} - I)M^{-1}b
```
This can be Taylor expanded up to the ``k^{th}``order as -
```math
x(t) \approx \sum^{k}_{m=0}\frac{(Mt)^{m}}{m!}x(0) + \sum^{k-1}_{n=1}\frac{(Mt)^{n-1}t}{n!}b
```
The vectors ``x(0)`` and ``b`` are encoded as state - ``|x(0)\rangle = \sum_{i} \frac{x_{i}}{||x||} |i\rangle`` and ``|b\rangle = \sum_{i} \frac{b_{i}}{||b||} |i\rangle`` with ``\{|i \rangle\}``as the computational basis states. We can also write ``M`` as ``M = ||M||\mathcal{M}``. We then get:
```math
|x(t)\rangle \approx \sum^{k}_{m=0}\frac{||x(0)||(||M||t)^{m}}{m!}\mathcal{M}^{m}|x(0)\rangle + \sum^{k-1}_{n=1}\frac{||b||(||M||t)^{n-1}t}{n!}\mathcal{M}^{n-1}|b\rangle
```
To bring about the above transformation, we use the `quldecircuit`.
Note : `QuLDE` works only with constant `M` and `b`. There is no such restriction on the other algorithms.
## LDEMSAlgHHL
- `QuEuler`
- `QuLeapfrog`
- `QuAB2`
- `QuAB3`
- `QuAB4`
The HHL algorithm is used for solving a system of linear equations. One can model multistep methods as linear equations, which then can be simulated through HHL.
## Usage
Firstly, we need to define a `QuLDEProblem` for matrix `M` (may be time dependent), initial vector `x` and vector `b` (may be time dependent). `tspan` is the time interval.
```@example lin
using QuDiffEq
using OrdinaryDiffEq, Test
using Random
using LinearAlgebra
siz = 2
M = rand(ComplexF64,siz,siz)
b = normalize!(rand(ComplexF64, siz))
x = normalize!(rand(ComplexF64, siz))
tspan = (0.0,0.4)
qprob = QuLDEProblem(M,b,x,tspan);
```
To solve the problem we use `solve()` after deciding on an algorithm e.g. `alg = QuAB3()` . Here, is an example for `QuLDE`.
```@example lin
alg = QuLDE()
res = solve(qprob,alg)
```
Let's compare the result with a `Tsit5()` from `OrdinaryDiffEq`
```@example lin
f(u,p,t) = M*u + b;
prob = ODEProblem(f, x, tspan)
sol = solve(prob, Tsit5(), dt = 0.1, adaptive = false)
s = sol.u[end]
@test isapprox.(s, res, atol = 0.02) |> all
```
| QuDiffEq | https://github.com/QuantumBFS/QuDiffEq.jl.git |
|
[
"MIT"
] | 0.1.1 | 96a6c2a1069d6258d9d8ca982c070621619660f9 | docs | 5085 | # Non-linear Differential Equations
The problem at hand is a set of differential equations. For simplicity, we consider a two variable set.
```math
\begin{array}{rcl} \frac{dx}{dt} & = & f_1(x,y) \\ \frac{dy}{dt} &=& f_2(x,y)\end{array}
```
`QuDiffEq` has two algorithms for solving non-linear differential equations.
## QuNLDE
Uses function transformation and the forward Euler method. (quadratic differential equations only)
The algorithm makes use of two sub-routines :
1. A function transformation routine - this is a mapping from ``z = (x,y)`` to a polynomial, ``P(z)`` . The function transformation is done using a Hamiltonian simulation. The *Taylor Truncation method* is used here.
2. A differential equations solver - Forward Euler is used for this purpose. We make use of the mapping, ``z \rightarrow z + hf(z)``
The functions ``f_i`` being quadratic can be expressed as a sum of monomials with coefficients ``\alpha_i ^{kl}``.
```math
f_i = \sum_{k,l = 0 \rightarrow n} \alpha_i^{kl} z_k z_l
```
``z_0``is equal to 1.
To begin, we encode the vector z , after normalising it, in a state
```math
|\phi\rangle = \frac{1}{\sqrt{2}} |0\rangle + \frac{1}{\sqrt{2}} |z\rangle
```
with ``|z\rangle = \sum z_k |k\rangle``
The tensor product, ``|\phi\rangle|\phi\rangle`` gives us the a set of all possible monomials in a quadratic equation.
```math
|\phi\rangle|\phi\rangle = \frac{1}{2} |0\rangle + \frac{1}{2} \sum_{k,l = 0}^n z_k z_l|k\rangle |l\rangle
```
What's required now is an operator that assigns corresponding coefficients to each monomial. We define an operator ``A``,
```math
A = \sum_{i,k,l = 0}^{n}a_i^{kl}|i0⟩⟨kl|
```
``a_0^{kl} = 1`` , for ``k=l=0`` , and is zero otherwise.
``A`` acting on ``|\phi\rangle|\phi\rangle`` gives us the desired result
```math
A |\phi\rangle|\phi\rangle = \sum_{i,k,l = 0}^{n}a_i^{kl}z_k z_l|i⟩|0⟩
```
For efficient simulation, the mapping has to be *sparse* in nature. In general, the functions ``f_i``will not be measure preserving i.e. they do not preserve the norm of their arguments. In that case, the operator needs to be adjusted by appropriately multiplying its elements by ||z|| or ||z||^2.
To actually carry out the simulation, we need to build a hermitian operator containing ``A``. A well-known trick is to write the hamiltonian ``H =−iA\otimes|1⟩⟨0|+iA^† ⊗|0⟩⟨1|`` (this is von Neumann measurement prescription). is simulated (using Taylor Truncation method). The resulting state is post-selected ``|1\rangle`` on to precisely get the what we are looking for.
## QuLDE
Linearises the differential equation at every iteration.
The system is linearised about the point ``(x^{*},y^{*})``. We obtain the equation,
```math
\frac{d\Delta u}{dt} = J * \Delta u + b
```
with
```math
J = \begin{pmatrix}
\frac{\partial f_1}{\partial x} & \frac{\partial f_1}{\partial y} \\
\frac{\partial f_2}{\partial x} &\frac{\partial f_2}{\partial y}
\end{pmatrix}
```
```math
b = \begin{pmatrix}
f_1(x^{*},y^{*})\\
f_2(x^{*},y^{*})
\end{pmatrix}
```
where ``\Delta u`` is naturally zero. We then have, ``\Delta u_{new} = (e^{Jt} - I)J^{-1}b``. This equation is simulated with `quldecircuit`.
## Usage
Let's say we want to solve the following set of differential equations.
```math
\begin{array}{rcl} \frac{dz_1}{dt} & = & z_2 - 3 z_{1}^{2} \\ \frac{dz_2}{dt} &=& -z_{2}^{2} - z_1 z_{2} \end{array}
```
Let's take the time interval to be from 0.0 to 0.4. We define the in initial vector randomly.
```@example nlin
using QuDiffEq
using OrdinaryDiffEq
using LinearAlgebra
tspan = (0.0,0.4)
x = [0.6, 0.8];
```
- For `QuNLDE`, we need to define a `<: QuODEProblem`. At present, we use only `QuLDEProblem` as a Qu problem wrapper.
`QuNLDE` can solve only quadratic differential equations. `A` is the coefficient matrix for the quadratic differential equation.
```@example nlin
N = 2 # size of the input vector
siz = nextpow(2, N + 1)
A = zeros(ComplexF32,2^(siz),2^(siz));
A[1,1] = ComplexF32(1);
A[5,3] = ComplexF32(1);
A[5,6] = ComplexF32(-3);
A[9,11] = ComplexF32(-1);
A[9,7] = ComplexF32(-1);
nothing # hide
```
```@example nlin
qprob = QuLDEProblem(A,x,tspan);
```
To solve the problem we use `solve()`
```@example nlin
res = solve(qprob,QuNLDE(), dt = 0.1)
```
Comparing the result with `Euler()`
```@example nlin
function f(du,u,p,t)
du[1] = -3*u[1]^2 + u[2]
du[2] = -u[2]^2 - u[1]*u[2]
end
prob = ODEProblem(f, x, tspan)
sol = solve(prob, Euler(), dt = 0.1, adaptive = false)
using Plots;
plot(sol.t,real.(res),lw = 1,label="QuNLDE()")
plot!(sol,lw = 3, ls=:dash,label="Euler()")
```

- For `QuLDE`, the problem is defined as a `ODEProblem`, similar to that in OrdinaryDiffEq.jl . `f` is the differential equation written symbolically. We can use prob from the previous case itself.
```@example nlin
res = solve(prob,QuLDE(),dt = 0.1)
```
```@example nlin
sol = solve(prob, Tsit5(), dt = 0.1, adaptive = false)
using Plots
plot(sol.t,real.(res),lw = 1,label="QuNLDE()")
plot!(sol,lw = 3, ls=:dash,label="Tsit5()")
```

| QuDiffEq | https://github.com/QuantumBFS/QuDiffEq.jl.git |
|
[
"MIT"
] | 0.2.1 | 243a1cd3ce74cda7768c7b5df320a58577fe03de | code | 874 | using DirectConvolution
using Documenter
DocMeta.setdocmeta!(DirectConvolution, :DocTestSetup, :(using DirectConvolution); recursive=true)
makedocs(modules=[DirectConvolution],
# doctest = false,
authors="vincent-picaud <[email protected]> and contributors",
repo="https://github.com/vincent-picaud/DirectConvolution.jl/blob/{commit}{path}#{line}",
sitename="DirectConvolution.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://vincent-picaud.github.io/DirectConvolution.jl",
assets=String[],
),
pages=[
"Home" => "index.md",
],
)
deploydocs(
repo="github.com/vincent-picaud/DirectConvolution.jl.git"
)
| DirectConvolution | https://github.com/vincent-picaud/DirectConvolution.jl.git |
|
[
"MIT"
] | 0.2.1 | 243a1cd3ce74cda7768c7b5df320a58577fe03de | code | 177 | using DirectConvolution
using Documenter
DocMeta.setdocmeta!(DirectConvolution, :DocTestSetup, :(using DirectConvolution); recursive=true)
doctest(DirectConvolution, fix=true)
| DirectConvolution | https://github.com/vincent-picaud/DirectConvolution.jl.git |
|
[
"MIT"
] | 0.2.1 | 243a1cd3ce74cda7768c7b5df320a58577fe03de | code | 1572 | t#
# Benchmark example
#
using DirectConvolution
using DSP, BenchmarkTools,LinearAlgebra
# function bench_directconv(filter,signal)
# wrapped_filter = LinearFilter(filter,0)
# convolved = directConv(wrapped_filter,signal)
# convolved
# end
function bench_directconv(filter,signal)
convolved = similar(signal)
n=length(convolved)
directConv!(filter,0,-1,signal,convolved,1:n)
convolved
end
function bench_dsp(filter,signal)
convolved = conv(filter,signal)
n=length(signal)
convolved[1:n]
end
# function bench_check(;filter_length,signal_length)
# r1 =
# end
function bench(;filter_length,signal_length)
@assert filter_length>0 && signal_length>0
filter = rand(filter_length)
signal = rand(signal_length)
t1 = @benchmark bench_directconv($filter,$signal)
t2 = @benchmark bench_dsp($filter,$signal)
t1_min=minimum(t1.times)
t2_min=minimum(t2.times)
println()
println("filter = $filter_length, signal = $signal_length")
println("DirectConvolution: $(t1_min)μs, $(t1.allocs) allocs")
println("DSP : $(t2_min)μs, $(t2.allocs) allocs")
println("Ratio ",t2_min/t1_min)
end
t = bench(filter_length=5,signal_length=10)
t = bench(filter_length=5,signal_length=1000)
t = bench(filter_length=5,signal_length=10000)
t = bench(filter_length=15,signal_length=10)
t = bench(filter_length=15,signal_length=1000)
t = bench(filter_length=15,signal_length=10000)
t = bench(filter_length=50,signal_length=1000)
t = bench(filter_length=500,signal_length=1000)
| DirectConvolution | https://github.com/vincent-picaud/DirectConvolution.jl.git |
|
[
"MIT"
] | 0.2.1 | 243a1cd3ce74cda7768c7b5df320a58577fe03de | code | 232 | module DirectConvolution
const RootDir = pathof(DirectConvolution)
using StaticArrays
using LinearAlgebra
include("utils.jl")
include("linearFilter.jl")
include("core.jl")
include("SG_Filter.jl")
include("udwt.jl")
end # module
| DirectConvolution | https://github.com/vincent-picaud/DirectConvolution.jl.git |
|
[
"MIT"
] | 0.2.1 | 243a1cd3ce74cda7768c7b5df320a58577fe03de | code | 5947 | export SG_Filter, maxDerivativeOrder, polynomialOrder, apply_SG_filter, apply_SG_filter2D
import Base: filter, length
function _Vandermonde(T::DataType=Float64;halfWidth::Int=5,degree::Int=2)::Array{T,2}
@assert halfWidth>=0
@assert degree>=0
x=T[i for i in -halfWidth:halfWidth]
n = degree+1
m = length(x)
V = Array{T}(undef,m, n)
for i = 1:m
V[i,1] = T(1)
end
for j = 2:n
for i = 1:m
V[i,j] = x[i] * V[i,j-1]
end
end
return V
end
# ================================================================
struct SG_Filter{T<:AbstractFloat,N}
_filter_set::Array{LinearFilter_DefaultCentered{T,N},1}
end
"""
function SG_Filter(T::DataType=Float64;halfWidth::Int=5,degree::Int=2)
Creates a `SG_Filter` structure used to store Savitzky-Golay filters.
* filter length is 2*`halfWidth`+1
* polynomial degree is `degree`, which defines `maxDerivativeOrder`
You can apply these filters using the
* `apply_SG_filter`
* `apply_SG_filter2D`
functions.
Example:
```jldoctest
julia> sg = SG_Filter(halfWidth=5,degree=3);
julia> maxDerivativeOrder(sg)
3
julia> length(sg)
11
julia> filter(sg,derivativeOrder=2)
Filter(r=-5:5,c=[0.03497, 0.01399, -0.002331, -0.01399, -0.02098, -0.02331, -0.02098, -0.01399, -0.002331, 0.01399, 0.03497])
```
"""
function SG_Filter(T::DataType=Float64;halfWidth::Int=5,degree::Int=2)::SG_Filter
@assert degree>=0
@assert halfWidth>=1
@assert 2*halfWidth>degree
V=_Vandermonde(T,halfWidth=halfWidth,degree=degree)
Q,R=qr(V)
# breaking change in Julia V1.0,
# see https://github.com/JuliaLang/julia/issues/27397
#
# before Q was a "plain" matrix, now stored in compact form
#
# SG=R\Q'
#
# must be replaced by
#
# Q=Q*Matrix(I, size(V))
# SG=R\Q'
#
Q=Q*Matrix(I, size(V))
SG=R\Q'
n_filter,n_coef = size(SG)
buffer=Array{LinearFilter_DefaultCentered{T,n_coef},1}()
for i in 1:n_filter
SG[i,:]*=factorial(i-1)
push!(buffer,LinearFilter(SG[i,:]))
end
# Returns filters set
return SG_Filter{T,n_coef}(buffer)
end
# ================================================================
"""
function filter(sg::SG_Filter{T,N};derivativeOrder::Int=0)
Returns the filter to be used to compute the smoothed derivatives of order `derivativeOrder`.
See: `SG_Filter`
"""
function filter(sg::SG_Filter{T,N};derivativeOrder::Int=0) where {T<:AbstractFloat,N}
@assert 0<= derivativeOrder <= maxDerivativeOrder(sg)
return sg._filter_set[derivativeOrder+1]
end
"""
function length(sg::SG_Filter{T,N})
Returns filter length, this is an odd number.
See: `SG_Filter`
"""
Base.length(sg::SG_Filter{T,N}) where {T<:AbstractFloat,N} = length(filter(sg))
"""
function maxDerivativeOrder(sg::SG_Filter{T,N})
Maximum order of the smoothed derivatives we can compute using `sg` filters.
See: `SG_Filter`
"""
maxDerivativeOrder(sg::SG_Filter{T,N}) where {T<:AbstractFloat,N} = size(sg._filter_set,1)-1
"""
function polynomialOrder(sg::SG_Filter{T,N})
Returns the degree of the polynomial used to construct the
Savitzky-Golay filters. This is mainly a 'convenience' function, as it
is equivalent to `maxDerivativeOrder`
See: `SG_Filter`
"""
polynomialOrder(sg::SG_Filter{T,N}) where {T<:AbstractFloat,N} = maxDerivativeOrder(sg)
"""
function apply_SG_filter(signal::Array{T,1},
sg::SG_Filter{T};
derivativeOrder::Int=0,
left_BE::Type{<:BoundaryExtension}=ConstantBE,
right_BE::Type{<:BoundaryExtension}=ConstantBE)
Applies an 1D Savitzky-Golay and returns the smoothed signal.
"""
function apply_SG_filter(signal::AbstractArray{T,1},
sg::SG_Filter{T};
derivativeOrder::Int=0,
left_BE::Type{<:BoundaryExtension}=ConstantBE,
right_BE::Type{<:BoundaryExtension}=ConstantBE) where {T<:AbstractFloat}
return directCrossCorrelation(filter(sg,derivativeOrder=derivativeOrder),
signal,
left_BE,
right_BE)
end
"""
function apply_SG_filter2D(signal::Array{T,2},
sg_I::SG_Filter{T},
sg_J::SG_Filter{T};
derivativeOrder_I::Int=0,
derivativeOrder_J::Int=0,
min_I_BE::Type{<:BoundaryExtension}=ConstantBE,
max_I_BE::Type{<:BoundaryExtension}=ConstantBE,
min_J_BE::Type{<:BoundaryExtension}=ConstantBE,
max_J_BE::Type{<:BoundaryExtension}=ConstantBE)
Applies an 2D Savitzky-Golay and returns the smoothed signal.
"""
function apply_SG_filter2D(signal::AbstractArray{T,2},
sg_I::SG_Filter{T},
sg_J::SG_Filter{T};
derivativeOrder_I::Int=0,
derivativeOrder_J::Int=0,
min_I_BE::Type{<:BoundaryExtension}=ConstantBE,
max_I_BE::Type{<:BoundaryExtension}=ConstantBE,
min_J_BE::Type{<:BoundaryExtension}=ConstantBE,
max_J_BE::Type{<:BoundaryExtension}=ConstantBE) where {T<:AbstractFloat}
return directCrossCorrelation2D(filter(sg_I,derivativeOrder=derivativeOrder_I),
filter(sg_J,derivativeOrder=derivativeOrder_J),
signal,
min_I_BE,
max_I_BE,
min_J_BE,
max_J_BE)
end
| DirectConvolution | https://github.com/vincent-picaud/DirectConvolution.jl.git |
|
[
"MIT"
] | 0.2.1 | 243a1cd3ce74cda7768c7b5df320a58577fe03de | code | 15043 | export directConv, directConv!, directConv2D!, directCrossCorrelation,directCrossCorrelation2D
export BoundaryExtension, ZeroPaddingBE, ConstantBE, PeriodicBE, MirrorBE
export boundaryExtension
# first index
const tilde_i0 = Int(1)
# ================================================================
# Used for tag dispatching, parent of available boundary extensions
# ================================================================
"""
abstract type BoundaryExtension end
Abstract type associated to boundary extension.
"""
abstract type BoundaryExtension end
"""
struct ZeroPaddingBE <: BoundaryExtension end
A type used to tag zero padding extension
"""
struct ZeroPaddingBE <: BoundaryExtension end
"""
struct ConstantBE <: BoundaryExtension end
A type used to tag constant constant extension
"""
struct ConstantBE <: BoundaryExtension end
"""
struct PeriodicBE <: BoundaryExtension end
A type used to tag periodic extension
"""
struct PeriodicBE <: BoundaryExtension end
"""
struct MirrorBE <: BoundaryExtension end
A type used to tag mirror extension
"""
struct MirrorBE <: BoundaryExtension end
"""
scale(λ::Int,Ω::UnitRange{Int})
Range scaling.
**Caveat:**
We do not use Julia `*` operator as it returns a step range:
```jldoctest
julia> r=6:8
6:8
julia> -2*r
-12:-2:-16
```
What we need is:
```jldoctest
julia> r=6:8
6:8
julia> scale(-2,r)
-16:-12
```
"""
function scale(λ::Int,Ω::UnitRange{Int})
ifelse(λ>0,
UnitRange(λ*first(Ω),λ*last(Ω)),
UnitRange(λ*last(Ω),λ*first(Ω)))
end
#+BoundaryExtension,Internal
#
# In
# $$
# \gamma[k]=\sum\limits_{i\in\Omega^\alpha}\alpha[i]\beta[k+\lambda i],\text{ with }\lambda\in\mathbb{Z}^*
# $$
# the computation of $\gamma[k],\ k\in\Omega^\gamma$ is splitted into two parts:
# - one part $\Omega^\gamma \cap \Omega^\gamma_1$ *free of boundary effect*,
# - one part $\Omega^\gamma \setminus \Omega^\gamma_1$ *that requires boundary extension* $\tilde{\beta}=\Phi(\beta,k)$
#
# *Example:*
#!DirectConvolution.compute_Ωγ1(-1:2,-2,1:20)
function compute_Ωγ1(Ωα::UnitRange{Int},
λ::Int,
Ωβ::UnitRange{Int})
λΩα = scale(λ,Ωα)
UnitRange(first(Ωβ)-first(λΩα),
last(Ωβ)-last(λΩα))
end
#+BoundaryExtension,Internal
#
# Left relative complement
#
# $$
# (A\setminus B)_{\text{Left}}=[ l(A), \min{(u(A),l(B)-1)} ]
# $$
#
# *Example:*
#!DirectConvolution.relativeComplement_left(1:10,-5:5)
#
# $(A\setminus B)=\{6,7,8,9,10\}$ and the left part (elements that are
# $\in A$ but on the left side of $B$) is *empty*.
function relativeComplement_left(A::UnitRange{Int},
B::UnitRange{Int})
UnitRange(first(A),
min(last(A),first(B)-1))
end
#+BoundaryExtension,Internal
#
# Left relative complement
#
# $$
# (A\setminus B)_{\text{Right}}=[ \max{(l(A),u(B)+1)}, u(A) ]
# $$
#
# *Example:*
#!DirectConvolution.relativeComplement_right(1:10,-5:5)
#
# $(A\setminus B)=\{6,7,8,9,10\}$ and the right part (elements that are
# $\in A$ but on the right side of $B$) is $\{6,7,8,9,10\}$
function relativeComplement_right(A::UnitRange{Int},
B::UnitRange{Int})
UnitRange(max(first(A),last(B)+1),
last(A))
end
# ================================================================
"""
boundaryExtension(β::AbstractArray{T,1},
k::Int,
::Type{BOUNDARY_EXT_TYPE})
Computes extended boundary value `β[k]` given boundary extension type
`BOUNDARY_EXT_TYPE`
Available `BOUNDARY_EXT_TYPE` are:
* `ZeroPaddingBE`: zero padding
* `ConstantBE`: constant boundary extension padding
* `PeriodicBE`: periodic boundary extension padding
* `MirrorBE`: mirror symmetry boundary extension padding
The routine is robust in the sens that there is no restriction on `k`
value.
```jldoctest
julia> dom = [-5:10;];
julia> hcat(dom,map(x->boundaryExtension([1; 2; 3],x,ZeroPaddingBE),dom))'
2×16 adjoint(::Matrix{Int64}) with eltype Int64:
-5 -4 -3 -2 -1 0 1 2 3 4 5 6 7 8 9 10
0 0 0 0 0 0 1 2 3 0 0 0 0 0 0 0
julia> hcat(dom,map(x->boundaryExtension([1; 2; 3],x,ConstantBE),dom))'
2×16 adjoint(::Matrix{Int64}) with eltype Int64:
-5 -4 -3 -2 -1 0 1 2 3 4 5 6 7 8 9 10
1 1 1 1 1 1 1 2 3 3 3 3 3 3 3 3
julia> hcat(dom,map(x->boundaryExtension([1; 2; 3],x,PeriodicBE),dom))'
2×16 adjoint(::Matrix{Int64}) with eltype Int64:
-5 -4 -3 -2 -1 0 1 2 3 4 5 6 7 8 9 10
1 2 3 1 2 3 1 2 3 1 2 3 1 2 3 1
julia> hcat(dom,map(x->boundaryExtension([1; 2; 3],x,MirrorBE),dom))'
2×16 adjoint(::Matrix{Int64}) with eltype Int64:
-5 -4 -3 -2 -1 0 1 2 3 4 5 6 7 8 9 10
3 2 1 2 3 2 1 2 3 2 1 2 3 2 1 2
```
"""
function boundaryExtension end
function boundaryExtension(β::AbstractArray{T,1},
k::Int,
::Type{ZeroPaddingBE}) where {T <: Number}
kmin = tilde_i0
kmax = length(β) + kmin - 1
if (k>=kmin)&&(k<=kmax)
β[k]
else
T(0)
end
end
function boundaryExtension(β::AbstractArray{T,1},
k::Int,
::Type{ConstantBE}) where {T <: Number}
kmin = tilde_i0
kmax = length(β) + kmin - 1
if k<kmin
β[kmin]
elseif k<=kmax
β[k]
else
β[kmax]
end
end
function boundaryExtension(β::AbstractArray{T,1},
k::Int,
::Type{PeriodicBE}) where {T <: Number}
kmin = tilde_i0
kmax = length(β) + kmin - 1
β[kmin+mod(k-kmin,1+kmax-kmin)]
end
function boundaryExtension(β::AbstractArray{T,1},
k::Int,
::Type{MirrorBE}) where {T <: Number}
kmin = tilde_i0
kmax = length(β) + kmin - 1
β[kmax-abs(kmax-kmin-mod(k-kmin,2*(kmax-kmin)))]
end
# ================================================================
#+Convolution,Internal
function directConv!(tilde_α::AbstractArray{T,1},
α_offset::Int,
λ::Int,
β::AbstractArray{T,1},
γ::AbstractArray{T,1},
Ωγ::UnitRange{Int},
::Type{LeftBE}=ZeroPaddingBE,
::Type{RightBE}=ZeroPaddingBE;
accumulate::Bool=false)::Nothing where {T <: Number,
LeftBE <: BoundaryExtension,
RightBE <: BoundaryExtension}
# Sanity check
@assert λ!=0
@assert (first(Ωγ)>=1)&&(last(Ωγ)<=length(γ))
# Initialization
Ωα = filter_range(length(tilde_α),α_offset)
Ωβ = UnitRange(1,length(β))
tilde_Ωα = 1:length(Ωα)
if !accumulate
for k in Ωγ
γ[k]=0
end
end
rΩγ1=intersect(Ωγ,compute_Ωγ1(Ωα,λ,Ωβ))
# rΩγ1 part: no boundary effect
#
β_offset = λ*(first(Ωα)-tilde_i0)
for k in rΩγ1
@simd for i in tilde_Ωα
@inbounds γ[k]+=tilde_α[i]*β[k+λ*i+β_offset]
end
end
# Left part
#
rΩγ1_left = relativeComplement_left(Ωγ,rΩγ1)
for k in rΩγ1_left
for i in tilde_Ωα
γ[k]+=tilde_α[i]*boundaryExtension(β,k+λ*i+β_offset,LeftBE)
end
end
# Right part
#
rΩγ1_right = relativeComplement_right(Ωγ,rΩγ1)
for k in rΩγ1_right
for i in tilde_Ωα
γ[k]+=tilde_α[i]*boundaryExtension(β,k+λ*i+β_offset,RightBE)
end
end
nothing
end
# """
# directConv!
# Computes a convolution.
# Inplace modification of ``\\gamma[k], k\\in\\Omega_\\gamma``
# ```math
# \gamma[k]=\sum\limits_{i\in\Omega^\alpha}\alpha[i]\beta[k+\lambda i],\text{ with }\lambda\in\mathbb{Z}^*
# ```
# If ``k\\notin \\Omega_\\gamma``, ``\\gamma[k]`` is unmodified.
# If *accumulate=false* then an erasing step ``\\gamma[k]=0, k\\in\\Omega_\\gamma`` is performed before computation.
# If ``\\lambda=-1`` you compute a convolution, if ``\\lambda=+1`` you
# compute a cross-correlation.
# **Example:**
# ```@jldoctest
# julia> β=[1:15;]
# julia> γ=ones(Int,15)
# julia> α=LinearFilter([0,0,1],0)
# julia> directConv!(α,1,β,γ,5:10)
# julia> hcat([1:length(γ);],γ)
# ```
# """
"""
directConv!
Computes a convolution.
```math
\\gamma[k]=\\sum\\limits_{i\\in\\Omega^\\alpha}\\alpha[i]\\beta[k+\\lambda i]
```
The following example shows how to apply inplace the `[0 0 1]` filter on `γ[5:10]`
```jldoctest
julia> β=[1:15;];
julia> γ=ones(Int,15);
julia> α=LinearFilter([0,0,1],0);
julia> directConv!(α,1,β,γ,5:10)
julia> hcat([1:length(γ);],γ)
15×2 Matrix{Int64}:
1 1
2 1
3 1
4 1
5 7
6 8
7 9
8 10
9 11
10 12
11 1
12 1
13 1
14 1
15 1
```
"""
function directConv!(α::LinearFilter{T},
λ::Int,
β::AbstractArray{T,1},
γ::AbstractArray{T,1},
Ωγ::UnitRange{Int},
::Type{LeftBE}=ZeroPaddingBE,
::Type{RightBE}=ZeroPaddingBE;
accumulate::Bool=false)::Nothing where {T <: Number,
LeftBE <: BoundaryExtension,
RightBE <: BoundaryExtension}
directConv!(fcoef(α),
offset(α),
λ,
β,
γ,
Ωγ,
LeftBE,
RightBE,
accumulate=accumulate)
nothing
end
#+Convolution
#
# Computes a convolution.
#
# Convenience function that allocate $\gamma$ and compute all its
# component using [[directConv_details][]]
#
# *Returns:* $\gamma$ a created vector of length identical to the $\beta$ one.
#
# *Example:*
#!β=[1:15;];
#!γ=ones(Int,15);
#!α=LinearFilter([0,0,1],0);
#!γ=directConv(α,1,β);
#!hcat([1:length(γ);],γ)'
#
function directConv(α::LinearFilter{T},
λ::Int64,
β::AbstractArray{T,1},
::Type{LeftBE}=ZeroPaddingBE,
::Type{RightBE}=ZeroPaddingBE) where {T <: Number,
LeftBE <: BoundaryExtension,
RightBE <: BoundaryExtension}
γ = Array{T,1}(undef,length(β))
directConv!(α,
λ,
β,
γ,
UnitRange(1,length(γ)),
LeftBE,
RightBE,
accumulate=false)
return γ
end
#+Convolution
#
# Computes a convolution.
#
# This is a convenience function where $\lambda=-1$
#
# *Returns:* $\gamma$ a created vector of length identical to the $\beta$ one.
#
function directConv(α::LinearFilter{T},
β::AbstractArray{T,1},
::Type{LeftBE}=ZeroPaddingBE,
::Type{RightBE}=ZeroPaddingBE) where {T <: Number,
LeftBE <: BoundaryExtension,
RightBE <: BoundaryExtension}
return directConv(α,-1,β,LeftBE,RightBE)
end
#+Convolution
#
# Computes a cross-correlation
#
# This is a convenience function where $\lambda=+1$
#
# *Returns:* $\gamma$ a created vector of length identical to the $\beta$ one.
#
function directCrossCorrelation(α::LinearFilter{T},
β::AbstractArray{T,1},
::Type{LeftBE}=ZeroPaddingBE,
::Type{RightBE}=ZeroPaddingBE) where {T <: Number,
LeftBE <: BoundaryExtension,
RightBE <: BoundaryExtension}
return directConv(α,+1,β,LeftBE,RightBE)
end
# 2D
# +Convolution L:directConv2D_inplace
# Computes a 2D (separable) convolution.
#
# For general information about parameters, see [[directConv_details][]]
#
# α_I must be interpreted as filter for *running index I*
#
# CAVEAT: the result overwrites β
#
# TODO: @parallel
function directConv2D!(α_I::LinearFilter{T},
λ_I::Int,
α_J::LinearFilter{T},
λ_J::Int,
β::AbstractArray{T,2},
min_I_BE::Type{<:BoundaryExtension}=ZeroPaddingBE,
max_I_BE::Type{<:BoundaryExtension}=ZeroPaddingBE,
min_J_BE::Type{<:BoundaryExtension}=ZeroPaddingBE,
max_J_BE::Type{<:BoundaryExtension}=ZeroPaddingBE)::Nothing where {T<:Number}
γ=similar(β)
(n,m)=size(β)
α_I_coef=fcoef(α_I)
α_I_offset=offset(α_I)
α_J_coef=fcoef(α_J)
α_J_offset=offset(α_J)
Ωγ_I = 1:n
Ωγ_J = 1:m
# i running (for filter)
for j in 1:m
directConv!(α_I_coef,
α_I_offset,
λ_I,
view(β,:,j),
view(γ,:,j),
Ωγ_I,
min_I_BE,
max_I_BE,
accumulate=false)
end
# j running (for filter)
for i in 1:n
directConv!(α_J_coef,
α_J_offset,
λ_J,
view(γ,i,:),
view(β,i,:),
Ωγ_J,
min_J_BE,
max_J_BE,
accumulate=false)
end
nothing
end
# +Convolution
# Computes a 2D cross-correlation
#
# This is a wrapper that calls [[directConv2D_inplace][]]
#
# *Note:* β is not modified, instead the function returns the result.
#
function directCrossCorrelation2D(α_I::LinearFilter{T},
α_J::LinearFilter{T},
β::AbstractArray{T,2},
min_I_BE::Type{<:BoundaryExtension}=ZeroPaddingBE,
max_I_BE::Type{<:BoundaryExtension}=ZeroPaddingBE,
min_J_BE::Type{<:BoundaryExtension}=ZeroPaddingBE,
max_J_BE::Type{<:BoundaryExtension}=ZeroPaddingBE)::Array{T,2} where {T<:Number}
γ=similar(β)
γ.=β
directConv2D!(α_I,+1,α_J,+1,γ,
min_I_BE,
max_I_BE,
min_J_BE,
max_J_BE)
return γ
end
| DirectConvolution | https://github.com/vincent-picaud/DirectConvolution.jl.git |
|
[
"MIT"
] | 0.2.1 | 243a1cd3ce74cda7768c7b5df320a58577fe03de | code | 4144 | # Some TODO:
# Rename:
# LinearFilter -> AbstractLinearFilter
# LinearFilter_Default -> LinearFilter
# LinearFilter_DefaultCentered -> LinearFilter_Centered (or only compute the right offset)
#
export LinearFilter
export fcoef, length, offset, range
import Base: length, range, isapprox, show
"""
abstract type LinearFilter{T<:Number} end
Abstract type defining a linear filter.
"""
abstract type LinearFilter{T<:Number} end
"""
fcoef(c::LinearFilter)
Returns filter coefficients
"""
fcoef(c::LinearFilter) = c._fcoef
"""
length(c::LinearFilter)::Int
Returns filter length
"""
length(c::LinearFilter)::Int = length(fcoef(c))
"""
offset(c::LinearFilter)::Int
Returns filter offset
**Caveat:** the first position is **0** (and not **1**)
"""
offset(c::LinearFilter)::Int = c._offset
# Internal
#
# Computes [[range_filter][]] using primitive types.
# This allows reuse by =directConv!= for instance.
#
# *Caveat:* do not overload Base.range !!!
filter_range(size::Int,offset::Int)::UnitRange = UnitRange(-offset,size-offset-1)
"""
range(c::LinearFilter)::UnitRange
Returns filter range Ω
Filter support of a filter α is defined by Ω = [ - offset(α), length(α) - offset(α) - 1 ]
"""
range(c::LinearFilter)::UnitRange = filter_range(length(c),offset(c))
# For convenience only, used in utests
#
function isapprox(f::LinearFilter{T},v::AbstractArray{T,1}) where {T<:Number}
return isapprox(fcoef(f),v)
end
#
# Pretty print
#
# I defined this afterward to avoid jldoctest to fail because of
# different rounding errors occuring for different architectures.
#
# Now filters coefficient are rounded before printing.
#
function Base.show(io::IO, f::LinearFilter)
r = range(f)
coef = fcoef(f)
print(io,"Filter(r=$r,c=")
print(io, round.(coef; sigdigits=4))
println(io,")")
end
"""
struct LinearFilter_Default{T<:Number,N}
Default linear filter.
You can create a filter as follows
```jldoctest
julia> linear_filter=LinearFilter([1,-2,1],1)
Filter(r=-1:1,c=[1.0, -2.0, 1.0])
julia> offset(linear_filter)
1
julia> range(linear_filter)
-1:1
```
"""
struct LinearFilter_Default{T<:Number,N} <: LinearFilter{T}
_fcoef::SVector{N,T}
_offset::Int
end
#+LinearFilter,Internal
# Creates a linear filter from a coefficient vector and its associated offset
#
# *Example:*
#!linear_filter=LinearFilter(rand(3),5)
#!offset(linear_filter)
#!range(linear_filter)
#
function LinearFilter_Default(c::AbstractArray{T,1},offset::Int) where {T<:Number}
v=SVector{length(c),T}(c)
return LinearFilter_Default{T,length(c)}(v,offset)
end
#+LinearFilter,Internal
#
# Default *centered* linear filter
#
# Array length has to be odd, 2n+1. Filter offset is n by construction.
#
struct LinearFilter_DefaultCentered{T<:Number,N} <: LinearFilter{T}
_fcoef::SVector{N,T}
end
#+LinearFilter,Internal
function LinearFilter_DefaultCentered(c::AbstractArray{T,1}) where {T<:Number}
N = length(c)
@assert isodd(length(c)) "Centered filters must have an odd number of coefficients, $N is even"
return LinearFilter_DefaultCentered{T,N}(SVector{N,T}(c))
end
#+LinearFilter,Internal
offset(f::LinearFilter_DefaultCentered{T,N}) where {T<:Number,N} = (N-1)>>1
# Once that we have defined LinearFilter_Default and
# LinearFilter_DefaultCentered we can unify construction. Switch to
# the right type is decided according to arguments
#+LinearFilter
#
# Creates a linear filter from its coefficients and an offset
#
# The *offset* is the position of the filter coefficient to be aligned with zero, see [[range_filter][]].
#
# *Example:*
#!f=LinearFilter([0:5;],4);
#!hcat([range(f);],fcoef(f))
#
function LinearFilter(c::AbstractArray{T,1},offset::Int)::LinearFilter where {T}
return LinearFilter_Default(c,offset)
end
#+LinearFilter
#
# Creates a centered linear filter, it must have an odd number of
# coefficients, $2n+1$ and is centered by construction (offset=n)
#
# *Example:*
#!f=LinearFilter([0:4;]);
#!hcat([range(f);],fcoef(f))
#
function LinearFilter(c::AbstractArray{T,1})::LinearFilter where {T}
return LinearFilter_DefaultCentered(c)
end
| DirectConvolution | https://github.com/vincent-picaud/DirectConvolution.jl.git |
|
[
"MIT"
] | 0.2.1 | 243a1cd3ce74cda7768c7b5df320a58577fe03de | code | 6653 | export UDWT_Filter_Haar, UDWT_Filter_Starck2
export ϕ_filter,ψ_filter,tildeϕ_filter,tildeψ_filter
export udwt, scale, inverse_udwt!, inverse_udwt
import Base: length
"""
abstract type UDWT_Filter_Biorthogonal{T<:Number} end
Abstract type defining the ϕ, ψ, tildeϕ and tildeψ filters associated
to an undecimated biorthogonal wavelet transform
Subtypes of this structure are:
* [`UDWT_Filter`](@ref UDWT_Filter)
Associated methods are:
* [`ϕ_filter`](@ref ϕ_filter)
* `ψ_filter`
* `tildeϕ_filter`
* `tildeψ_filter`
"""
abstract type UDWT_Filter_Biorthogonal{T<:Number} end
"""
ϕ_filter(c::UDWT_Filter_Biorthogonal)
Returns the ϕ filter
"""
ϕ_filter(c::UDWT_Filter_Biorthogonal)::LinearFilter = c._ϕ_filter
ψ_filter(c::UDWT_Filter_Biorthogonal)::LinearFilter = c._ψ_filter
tildeϕ_filter(c::UDWT_Filter_Biorthogonal)::LinearFilter = c._tildeϕ_filter
tildeψ_filter(c::UDWT_Filter_Biorthogonal)::LinearFilter = c._tildeψ_filter
"""
abstract type UDWT_Filter{T<:Number} <: UDWT_Filter_Biorthogonal{T} end
A specialization of `UDWT_Filter_Biorthogonal` for **orthogonal** filters.
For orthogonal filters we have: ϕ=tildeϕ, ψ=tildeψ
"""
abstract type UDWT_Filter{T<:Number} <: UDWT_Filter_Biorthogonal{T}
end
#+UDWT_Filter
tildeϕ_filter(c::UDWT_Filter)::LinearFilter = ϕ_filter(c)
#+UDWT_Filter
tildeψ_filter(c::UDWT_Filter)::LinearFilter = ψ_filter(c)
# Filter examples
#+UDWT_Filter_Available
# Haar filter
struct UDWT_Filter_Haar{T<:AbstractFloat} <: UDWT_Filter{T}
_ϕ_filter::LinearFilter_Default{T,2}
_ψ_filter::LinearFilter_Default{T,2}
#+UDWT_Filter_Available
# Creates an instance
UDWT_Filter_Haar{T}() where {T<:Real} = new(LinearFilter_Default{T,2}(SVector{2,T}([+1/2 +1/2]),0),
LinearFilter_Default{T,2}(SVector{2,T}([-1/2 +1/2]),0))
end
#+UDWT_Filter_Available
# Starck2 filter
#
# Defined by Eq. 6 from http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4060954
struct UDWT_Filter_Starck2{T<:AbstractFloat} <: UDWT_Filter_Biorthogonal{T}
_ϕ_filter::LinearFilter_DefaultCentered{T,5}
_ψ_filter::LinearFilter_DefaultCentered{T,5}
_tildeϕ_filter::LinearFilter_DefaultCentered{T,1}
_tildeψ_filter::LinearFilter_DefaultCentered{T,1}
#+UDWT_Filter_Available
# Creates an instance
UDWT_Filter_Starck2{T}() where {T<:Real} = new(LinearFilter_DefaultCentered{T,5}(SVector{5,T}([+1/16 +4/16 +6/16 +4/16 +1/16])),
LinearFilter_DefaultCentered{T,5}(SVector{5,T}([-1/16 -4/16 +10/16 -4/16 -1/16])),
LinearFilter_DefaultCentered{T,1}(SVector{1,T}([+1])),
LinearFilter_DefaultCentered{T,1}(SVector{1,T}([+1])))
end
#+UDWT
# A structure to store 1D UDWT
#
struct UDWT{T<:Number}
filter::UDWT_Filter_Biorthogonal{T}
# TODO also store boundary condition
W::Array{T,2}
V::Array{T,1}
#+UDWT
# Creates an instance
#
# *Parameters:*
# - *filter*: used filter
# - *scale* : max scale
# - *n*: signal length
#
UDWT{T}(filter::UDWT_Filter_Biorthogonal{T};
n::Int=0,
scale::Int=0) where {T<:Number} =
new(filter,
Array{T,2}(undef,n,scale),
Array{T,1}(undef,n))
end
#+UDWT
# Returns max scale
scale(udwt::UDWT)::Int = size(udwt.W,2)
#+UDWT
# Returns expected signal length
length(udwt::UDWT)::Int = size(udwt.W,1)
#+UDWT
#
# Performs an 1D undecimated wavelet transform
#
# $$(\mathcal{W}_{j+1}f)[u]=(\bar{g}_j*\mathcal{V}_{j}f)[u]$$
# $$(\mathcal{V}_{j+1}f)[u]=(\bar{h}_j*\mathcal{V}_{j}f)[u]$$
#
function udwt(signal::AbstractArray{T,1},
filter::UDWT_Filter_Biorthogonal{T};
scale::Int=3) where {T<:Number}
@assert scale>=0
boundary = PeriodicBE
n = length(signal)
udwt_domain = UDWT{T}(filter,n=n,scale=scale)
Ωγ = 1:n
Vs = Array{T,1}(undef,n)
Vsp1 = Array{T,1}(undef,n)
Vs .= signal
for s in 1:scale
twoPowScale = 2^(s-1)
Wsp1 = @view udwt_domain.W[:,s]
# Computes Vs+1 from Vs
#
directConv!(ϕ_filter(filter),
twoPowScale,
Vs,
Vsp1,
Ωγ,
boundary,
boundary)
# Computes Ws+1 from Ws
#
directConv!(ψ_filter(filter),
twoPowScale,
Vs,
Wsp1,
Ωγ,
boundary,
boundary)
@swap(Vs,Vsp1)
end
udwt_domain.V .= Vs
return udwt_domain
end
# +UDWT
#
# Performs an 1D *inverse* undecimated wavelet transform
#
# *Caveat:* uses a pre-allocated vector =reconstructed_signal=
#
function inverse_udwt!(udwt_domain::UDWT{T},
reconstructed_signal::AbstractArray{T,1}) where {T<:Number}
@assert length(udwt_domain) == length(reconstructed_signal)
boundary = PeriodicBE
maxScale = scale(udwt_domain)
n = length(reconstructed_signal)
Ωγ = 1:n
buffer = Array{T,1}(undef,n)
reconstructed_signal .= udwt_domain.V
for s in maxScale:-1:1
twoPowScale = 2^(s-1)
# Computes Vs from Vs+1
#
directConv!(tildeϕ_filter(udwt_domain.filter),
-twoPowScale,
reconstructed_signal,
buffer,
Ωγ,
boundary,
boundary)
# Computes Ws from Ws+1
#
Ws = @view udwt_domain.W[:,s]
directConv!(tildeψ_filter(udwt_domain.filter),
-twoPowScale,
Ws,
buffer,
Ωγ,
boundary,
boundary,
accumulate=true)
for i in Ωγ
@inbounds reconstructed_signal[i]=buffer[i]
end
end
end
#+UDWT
#
# Performs an 1D *inverse* undecimated wavelet transform
#
# *Returns:* a vector containing the reconstructed signal.
#
function inverse_udwt(udwt_domain::UDWT{T})::Array{T,1} where {T<:Number}
reconstructed_signal=Array{T,1}(undef,length(udwt_domain))
inverse_udwt!(udwt_domain,reconstructed_signal)
return reconstructed_signal
end
| DirectConvolution | https://github.com/vincent-picaud/DirectConvolution.jl.git |
|
[
"MIT"
] | 0.2.1 | 243a1cd3ce74cda7768c7b5df320a58577fe03de | code | 129 | export @swap
macro swap(x,y)
quote
local tmp = $(esc(x))
$(esc(x)) = $(esc(y))
$(esc(y)) = tmp
end
end
| DirectConvolution | https://github.com/vincent-picaud/DirectConvolution.jl.git |
|
[
"MIT"
] | 0.2.1 | 243a1cd3ce74cda7768c7b5df320a58577fe03de | code | 660 | # @testset "Savitzky-Golay definition" begin
# sg=SavitzkyGolay_Filter(rand(11))
# @test offset(sg) == 5
# @test range(sg) == -5:5
# @test length(sg) == 11
# end
@testset "Savitzky-Golay" begin
m=SG_Filter(Float64,halfWidth=2,degree=0)
@test filter(m,derivativeOrder=0) ≈ [1/5; 1/5; 1/5; 1/5; 1/5]
m=SG_Filter(Float64,halfWidth=3,degree=2)
@test filter(m,derivativeOrder=1) ≈ [-(3/28); -(1/14); -(1/28); 0; (1/28); (1/14); (3/28)]
@test filter(m,derivativeOrder=2) ≈ [5/42; 0; -(1/14); -(2/21); -(1/14); 0; 5/42]
@test maxDerivativeOrder(m) == 2
@test length(m) == 2*3+1
@test polynomialOrder(m) == 2
end
| DirectConvolution | https://github.com/vincent-picaud/DirectConvolution.jl.git |
|
[
"MIT"
] | 0.2.1 | 243a1cd3ce74cda7768c7b5df320a58577fe03de | code | 1947 | @testset "Example α_offset" begin
α0=LinearFilter(Float64[0,1,0],0)
α1=LinearFilter(Float64[0,1,0],1)
β=collect(Float64,1:6)
γ1=directConv(α0,1,β,ZeroPaddingBE,ZeroPaddingBE)
γ2=directConv(α1,1,β,ZeroPaddingBE,ZeroPaddingBE)
@test γ1 ≈ [2.0, 3.0, 4.0, 5.0, 6.0, 0.0]
@test γ2 ≈ [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
end;
@testset "Adjoint operator" begin
α=LinearFilter(rand(4),2);
β=rand(10);
vβ=rand(length(β))
d1=dot(directConv(α,-3,vβ,ZeroPaddingBE,ZeroPaddingBE),β)
d2=dot(directConv(α,+3,β,ZeroPaddingBE,ZeroPaddingBE),vβ)
@test isapprox(d1,d2)
d1=dot(directConv(α,-3,vβ,PeriodicBE,PeriodicBE),β)
d2=dot(directConv(α,+3,β,PeriodicBE,PeriodicBE),vβ)
@test isapprox(d1,d2)
end;
@testset "Convolution commutativity" begin
α=rand(4);
αf=LinearFilter(α,0)
β=rand(10);
βf=LinearFilter(β,0)
v1=zeros(20)
v2=zeros(20)
directConv!(αf,-1,
β,v1,UnitRange(1,20),ZeroPaddingBE,ZeroPaddingBE)
directConv!(βf,-1,
α,v2,UnitRange(1,20),ZeroPaddingBE,ZeroPaddingBE)
@test isapprox(v1,v2)
end;
@testset "Interval split" begin
α=LinearFilter(rand(4),3)
β=rand(10);
γ=directConv(α,2,β,MirrorBE,PeriodicBE) # global computation
Γ=zeros(length(γ))
Ω1=UnitRange(1:3)
Ω2=UnitRange(4:length(γ))
directConv!(α,2,β,Γ,Ω1,MirrorBE,PeriodicBE) # compute on Ω1
directConv!(α,2,β,Γ,Ω2,MirrorBE,PeriodicBE) # compute on Ω2
@test isapprox(γ,Γ)
end;
@testset "2D convolution" begin
β=rand(5,8)
β_save=deepcopy(β)
α_I=LinearFilter(Float64[0,2,0])
α_J=LinearFilter(Float64[0,0,2,0,0])
directConv2D!(α_I,1,α_J,-1,β)
@test isapprox(β,4*β_save)
end;
@testset "2D crossCorrelation" begin
β=rand(5,8)
α_I=LinearFilter(Float64[0,2,0])
α_J=LinearFilter(Float64[0,0,2,0,0])
γ=directCrossCorrelation2D(α_I,α_J,β)
@test isapprox(γ,4*β)
end;
| DirectConvolution | https://github.com/vincent-picaud/DirectConvolution.jl.git |
|
[
"MIT"
] | 0.2.1 | 243a1cd3ce74cda7768c7b5df320a58577fe03de | code | 460 | @testset "LinearFilter_DefaultCentered" begin
filter=DirectConvolution.LinearFilter_DefaultCentered(rand(11))
@test offset(filter) == 5
@test range(filter) == -5:5
@test length(filter) == 11
end
@testset "LinearFilter_Default" begin
v=rand(11)
filter=DirectConvolution.LinearFilter_Default(v,4)
@test offset(filter) == 4
@test range(filter) == -4:6
@test length(filter) == length(v)
@test fcoef(filter) ≈ v
end
| DirectConvolution | https://github.com/vincent-picaud/DirectConvolution.jl.git |
|
[
"MIT"
] | 0.2.1 | 243a1cd3ce74cda7768c7b5df320a58577fe03de | code | 256 | using DirectConvolution
using Test
using LinearAlgebra
@testset "DirectConvolution" begin
include("utils.jl")
include("linearFilter.jl")
include("directConvolution.jl")
include("SG_Filter.jl")
include("udwt.jl")
end;
nothing
| DirectConvolution | https://github.com/vincent-picaud/DirectConvolution.jl.git |
|
[
"MIT"
] | 0.2.1 | 243a1cd3ce74cda7768c7b5df320a58577fe03de | code | 841 | @testset "Haar" begin
haar_udwt = UDWT_Filter_Haar{Float64}()
@test ϕ_filter(haar_udwt) ≈ [1/2, 1/2]
@test tildeϕ_filter(haar_udwt) ≈ [1/2, 1/2]
@test ψ_filter(haar_udwt) ≈ [-1/2, 1/2]
@test tildeψ_filter(haar_udwt) ≈ [-1/2, 1/2]
end
@testset "Starck2" begin
starck2_udwt = UDWT_Filter_Starck2{Float64}()
@test fcoef(tildeϕ_filter(starck2_udwt)) ≈ [1]
end
@testset "UDWT Transform" begin
signal = rand(20)
for filter in [UDWT_Filter_Haar{Float64}()
UDWT_Filter_Starck2{Float64}()]
s = 4
m = udwt(signal,filter,scale=s)
@test size(m.W) == (length(signal),s)
@test size(m.V) == (length(signal),)
@test scale(m) == s
signal_from_inv = inverse_udwt(m)
@test signal ≈ signal_from_inv
end
end
| DirectConvolution | https://github.com/vincent-picaud/DirectConvolution.jl.git |
|
[
"MIT"
] | 0.2.1 | 243a1cd3ce74cda7768c7b5df320a58577fe03de | code | 202 | @testset "swap" begin
a=Int(1)
b=Int(2)
@swap(a,b)
@test a == 2
@test b == 1
a=rand(5)
b=rand(8)
@swap(a,b)
@test length(a) == 8
@test length(b) == 5
end;
| DirectConvolution | https://github.com/vincent-picaud/DirectConvolution.jl.git |
|
[
"MIT"
] | 0.2.1 | 243a1cd3ce74cda7768c7b5df320a58577fe03de | docs | 757 | # DirectConvolution [](https://vincent-picaud.github.io/DirectConvolution.jl/stable) [](https://vincent-picaud.github.io/DirectConvolution.jl/dev) [](https://github.com/vincent-picaud/DirectConvolution.jl/actions) [](https://codecov.io/gh/vincent-picaud/DirectConvolution.jl)
A Julia package to compute: γ[k] = Σ_i α[i] β[k+λi]
Has applications to filtering, wavelet transform...
Work in progress: documentation rewriting (using Documenter.jl)
| DirectConvolution | https://github.com/vincent-picaud/DirectConvolution.jl.git |
|
[
"MIT"
] | 0.2.1 | 243a1cd3ce74cda7768c7b5df320a58577fe03de | docs | 1050 | # DirectConvolution
[](https://travis-ci.org/vincent-picaud/DirectConvolution.jl)
[](http://codecov.io/github/vincent-picaud/DirectConvolution.jl?branch=master)
This package provides functions related to convolution products using
direct (no FFT) methods. For short filters this approach is faster and
more versatile than the Julia native conv(...) function.
Currently supported features:
- 1D convolution, cross-correlation, boundary extensions...
- Savitzky-Golay filters
- Undecimated Wavelet Transform

You can read documentation directly
[here](https://vincent-picaud.github.io/DirectConvolution.jl/index.html),
however if you want to use the css theme you must clone this repo and browse it locally:
```
firefox docs/index.html
```
| DirectConvolution | https://github.com/vincent-picaud/DirectConvolution.jl.git |
|
[
"MIT"
] | 0.2.1 | 243a1cd3ce74cda7768c7b5df320a58577fe03de | docs | 3913 | ```@meta
CurrentModule = DirectConvolution
```
# DirectConvolution
Documentation for [DirectConvolution](https://github.com/vincent-picaud/DirectConvolution.jl).
This package allows efficient computation of
```math
\gamma[k] = \sum\limits_{i\in\Omega_\alpha}\alpha[i]\beta[k+\lambda i]
```
where ``\alpha`` is a filter of support ``\Omega_\alpha`` defined as follows (see [`filter of support`](@ref range(c::LinearFilter))):
```math
\Omega_\alpha = \llbracket -\text{offset}(\alpha), -\text{offset}(\alpha) +\text{length}(\alpha)-1 \rrbracket
```.
For ``\lambda=-1`` you get a convolution, for ``\lambda=+1`` a
[wiki:cross-correlation](https://en.wikipedia.org/wiki/Cross-correlation)
whereas using ``\lambda=\pm 2^n`` is useful to implement the
undecimated wavelet transform (the so called [wiki:algorithme à
trous](https://en.wikipedia.org/wiki/Stationary_wavelet_transform)).
```@setup session_1
using DirectConvolution
using DelimitedFiles
using LinearAlgebra
using Plots
ENV["GKSwstype"]=100
gr()
rootDir = joinpath(dirname(pathof(DirectConvolution)), "..")
dataDir = joinpath(rootDir,"data")
```
# Use cases
These demos use data stored in the `data/` folder.
```@repl session_1
dataDir
```
There are one 1D signal and one 2D signal:
```@example session_1
data_1D = readdlm(joinpath(dataDir,"signal_1.csv"),',');
data_1D_x = @view data_1D[:,1]
data_1D_y = @view data_1D[:,2]
plot(data_1D_x,data_1D_y,label="signal")
```
```@example session_1
data_2D=readdlm(joinpath(dataDir,"surface.data"));
surface(data_2D,label="2D signal")
```
## Savitzky-Golay filters
This example shows how to compute and use [wiki: Savitzky-Golay
filters](https://en.wikipedia.org/wiki/Savitzky%E2%80%93Golay_filter).
### Filter creation
Creates a set of Savitzky-Golay filters using polynomial of degree $3$
with a window width of $11=2\times 5+1$.
```@example session_1
sg = SG_Filter(Float64,halfWidth=5,degree=3);
```
This can be checked with
```@repl session_1
length(sg)
polynomialOrder(sg)
```
### 1D signal smoothing
Apply this filter on an unidimensional signal:
```@example session_1
data_1D_y_smoothed = apply_SG_filter(data_1D_y,sg,derivativeOrder=0)
plot(data_1D_x,data_1D_y_smoothed,linewidth=3,label="smoothed signal")
plot!(data_1D_x,data_1D_y,label="signal")
```
### 2D signal smoothing
Create two filters, one for the `I` direction, the other for the `J`
direction. Then, apply these filters on a two dimensional signal.
```@example session_1
sg_I = SG_Filter(Float64,halfWidth=5,degree=3);
sg_J = SG_Filter(Float64,halfWidth=3,degree=3);
data_2D_smoothed = apply_SG_filter2D(data_2D,
sg_I,
sg_J,
derivativeOrder_I=0,
derivativeOrder_J=0)
surface(data_2D_smoothed,label="Smoothed 2D signal");
```
## Wavelet transform
Choose a wavelet filter:
```@example session_1
filter = UDWT_Filter_Starck2{Float64}()
```
Perform a UDWT transform:
```@example session_1
data_1D_udwt = udwt(data_1D_y,filter,scale=8)
```
Plot Results:
```@example session_1
label=["W$i" for i in 1:scale(data_1D_udwt)];
plot(data_1D_udwt.W,label=reshape(label,1,scale(data_1D_udwt)))
plot!(data_1D_udwt.V,label="V$(scale(data_1D_udwt))");
plot!(data_1D_y,label="signal")
```
Inverse the transform (more precisely because of the coefficient
redundancy, a pseudo-inverse is used):
```@example session_1
data_1D_y_reconstructed = inverse_udwt(data_1D_udwt);
norm(data_1D_y-data_1D_y_reconstructed)
```
To smooth the signal a (very) rough solution would be to cancel the two finer scales:
```@example session_1
data_1D_udwt.W[:,1] .= 0
data_1D_udwt.W[:,2] .= 0
data_1D_y_reconstructed = inverse_udwt(data_1D_udwt)
plot(data_1D_y_reconstructed,linewidth=3, label="smoothed");
plot!(data_1D_y,label="signal")
```
# API
```@index
```
```@autodocs
Modules = [DirectConvolution]
```
| DirectConvolution | https://github.com/vincent-picaud/DirectConvolution.jl.git |
|
[
"MIT"
] | 0.0.2 | f348346ad524aa2c100ecc4678128c100d446440 | code | 223 | module FluxKAN
using Flux
using LinearAlgebra
using ChainRulesCore
# Write your package code here.
include("./KALnet.jl")
include("./KACnet.jl")
include("./KAGnet.jl")
include("./KAGLnet.jl")
include("./examples.jl")
end
| FluxKAN | https://github.com/cometscome/FluxKAN.jl.git |
|
[
"MIT"
] | 0.0.2 | f348346ad524aa2c100ecc4678128c100d446440 | code | 4806 |
```
KACnet:
Chebyshev polynomials version
```
mutable struct KACnet{in_dim,out_dim,polynomial_order}
base_weight
poly_weight
layer_norm
base_activation
in_dim
out_dim
polynomial_order
end
function KACnet(in_dim, out_dim; polynomial_order=3, base_activation=SiLU)
base_weight = Dense(in_dim, out_dim; bias=false)
poly_weight = Dense(in_dim * (polynomial_order + 1), out_dim; bias=false)
if out_dim == 1
layer_norm = Dense(out_dim, out_dim; bias=false)
else
layer_norm = LayerNorm(out_dim)
end
return KACnet{in_dim,out_dim,polynomial_order}(base_weight,
poly_weight, layer_norm, base_activation, in_dim, out_dim, polynomial_order)
end
function KACnet(base_weight, poly_weight, layer_norm, base_activation, in_dim, out_dim, polynomial_order)
return KACnet{in_dim,out_dim,polynomial_order}(base_weight, poly_weight,
layer_norm, base_activation,
in_dim, out_dim, polynomial_order
)
end
export KACnet
Flux.@layer KACnet
function compute_chebyshev_polynomials(x, order)
# Base case polynomials P0 and P1
P0 = zero(x)
fill!(P0,1)
#P0 = ones(eltype(x), size(x)...)#x.new_ones(x.shape) # P0 = 1 for all x
if order == 0
#return P0
return [P0]
end
P1 = deepcopy(x)
chebyshev_polys = [P0, P1]
# Compute higher order polynomials using recurrence
for n = 1:order-1
Cp1 = chebyshev_polys[end]
Cp0 = chebyshev_polys[end-1]
Pn = map((x,cp1,cp0) -> 2*x*cp1 - cp0,x,Cp1,Cp0)
#Pn = 2 .* x .* chebyshev_polys[end] .- chebyshev_polys[end-1] #2x Tn - T_{n-1}
#Pn = ((2.0 * n + 1.0) .* x .* chebyshev_polys[end] - n .* chebyshev_polys[end-1]) ./ (n + 1.0)
push!(chebyshev_polys, Pn)
end
return chebyshev_polys
end
export compute_chebyshev_polynomials
function ChainRulesCore.rrule(::typeof(compute_chebyshev_polynomials), x, order)
# Base case polynomials P0 and P1
P0 = zero(x)
fill!(P0,1)
#P0 = ones(eltype(x), size(x)...)#x.new_ones(x.shape) # P0 = 1 for all x
if order == 0
y = [P0]
else
P1 = deepcopy(x)
chebyshev_polys = [P0, P1]
# Compute higher order polynomials using recurrence
for n = 1:order-1
Cp1 = chebyshev_polys[end]
Cp0 = chebyshev_polys[end-1]
Pn = map((x,cp1,cp0) -> 2*x*cp1 - cp0,x,Cp1,Cp0)
#Pn = 2 .* x .* chebyshev_polys[end] .- chebyshev_polys[end-1] #2x Tn - T_{n-1}
#Pn = ((2.0 * n + 1.0) .* x .* chebyshev_polys[end] - n .* chebyshev_polys[end-1]) ./ (n + 1.0)
push!(chebyshev_polys, Pn)
end
y = chebyshev_polys
end
function pullback(ybar)
sbar = NoTangent()
dP0 = zero(x)
if order == 0
#dchebyshev_polys = [dP0]
dchebyshev_polys = dP0
else
dP1 = zero(x)
fill!(dP1,1)
#ones(eltype(x), size(x)...)
dchebyshev_polys = [dP0, dP1]
for n = 1:order-1
dCp1 = dchebyshev_polys[end]
dCp0 = dchebyshev_polys[end-1]
dPn = map((x,cp1,cp0) -> 2*x*cp1 - cp0,x,dCp1,dCp0)
#dPn = 2 .* x .* dchebyshev_polys[end] .- dchebyshev_polys[end-1] #2x Tn - T_{n-1}
push!(dchebyshev_polys, dPn)
end
end
dLdPdPdx = zero(x)
for n = 1:length(ybar)
dLdPdPdx .+= dchebyshev_polys[n] .* ybar[n] * (n - 1)
end
return sbar, dLdPdPdx, sbar
end
return y, pullback
end
function (m::KACnet{in_dim,out_dim,polynomial_order})(x) where {in_dim,out_dim,polynomial_order}
y = KACnet_forward(x, m.base_weight, m.poly_weight, m.layer_norm, m.base_activation, polynomial_order)
end
function KACnet_forward(x, base_weight, poly_weight, layer_norm, base_activation, polynomial_order)
# Apply base activation to input and then linear transform with base weights
xt =base_activation.(x)
base_output = base_weight(xt)
#base_output = base_weight(map(x -> base_activation(x),x))#base_weight(base_activation.(x))
# Normalize x to the range [-1, 1] for stable chebyshev polynomial computation
xmin = minimum(x)
xmax = maximum(x)
dx = xmax - xmin
x_normalized = normalize_x(x, xmin, dx)
# Compute chebyshev polynomials for the normalized x
chebyshev_polys = compute_chebyshev_polynomials(x_normalized, polynomial_order)
chebyshev_basis = cat(chebyshev_polys..., dims=1)
# Compute polynomial output using polynomial weights
poly_output = poly_weight(chebyshev_basis)
# Combine base and polynomial outputs, normalize, and activate
y = base_activation.(layer_norm(base_output .+ poly_output))
return y
end
| FluxKAN | https://github.com/cometscome/FluxKAN.jl.git |
|
[
"MIT"
] | 0.0.2 | f348346ad524aa2c100ecc4678128c100d446440 | code | 4792 |
mutable struct Radial_distribution_function_L
grids#::Vector{Float64}
denominator#::Float64
num_grids#::Int64
grid_max#::Float64
grid_min#::Float64
end
```
KAGLnet:
Gaussian version with learnable grids
```
mutable struct KAGLnet{in_dim,out_dim,num_grids}
base_weight
poly_weight
layer_norm
base_activation
in_dim#::Int64
out_dim#::Int64
num_grids#::Int64
rdf::Radial_distribution_function_L
hasbase::Bool
end
function Radial_distribution_function_L(num_grids, grid_min, grid_max)
grids = range(grid_min, grid_max, length=num_grids)
grids_W = Dense(1, num_grids; bias=false)
grids_W.weight .= reshape(collect(grids), :, 1)
denominator = (grid_max - grid_min) / (num_grids - 1) |> f32
#println(typeof(denominator))
return Radial_distribution_function_L(grids_W.weight, denominator, num_grids, grid_max, grid_min)
end
export Radial_distribution_function_L
Flux.@layer Radial_distribution_function_L trainable = (grids,)
function (m::Radial_distribution_function_L)(x)
y = rdf_foward_L(x, m.num_grids, m.grids, m.denominator)
end
function rdf_foward_L(x, num_grids, grids, denominator)
y = []
for n = 1:num_grids
yn = zero(x)
yn .= exp.(-((x .- grids[n]) ./ denominator) .^ 2)
push!(y, yn)
end
return y
end
function ChainRulesCore.rrule(::typeof(rdf_foward_L), x, num_grids, grids, denominator)
y = []
for n = 1:num_grids
yn = zero(x)
yn .= exp.(-((x .- grids[n]) ./ denominator) .^ 2)
push!(y, yn)
end
function pullback(ybar)
sbar = NoTangent()
dLdGdx = @thunk(begin
dy = []
for n = 1:num_grids
dyn = (-2 .* (x .- grids[n]) ./ denominator^2) .* y[n]
#dyn = 2 * (-((x .- grids[n]) ./ denominator)) * exp.(-((x .- grids[n]) ./ denominator) .^ 2)
push!(dy, dyn)
end
dLdGdx = zero(x)
for n = 1:length(ybar)
dLdGdx .+= dy[n] .* ybar[n]
end
dLdGdx
end)
dLdGdg = @thunk(begin
dy = []
for n = 1:num_grids
dyn = (2 .* (x .- grids[n]) ./ denominator^2) .* y[n]
push!(dy, dyn)
end
dLdGdg = zero(grids)
for n = 1:length(ybar)
dLdGdg[n] = sum(dy[n] .* ybar[n])
end
dLdGdg
end)
return sbar, dLdGdx, sbar, dLdGdg, sbar
end
return y, pullback
end
function KAGLnet(in_dim, out_dim; num_grids=8, base_activation=SiLU, grid_max=1, grid_min=-1, hasbase=true)
base_weight = Dense(in_dim, out_dim; bias=false)
poly_weight = Dense(in_dim * num_grids, out_dim; bias=false)
if out_dim == 1
layer_norm = Dense(out_dim, out_dim; bias=false)
else
layer_norm = LayerNorm(out_dim)
end
rdf = Radial_distribution_function_L(num_grids, grid_min, grid_max)
return KAGLnet{in_dim,out_dim,num_grids}(base_weight,
poly_weight, layer_norm, base_activation, in_dim, out_dim, num_grids, rdf, hasbase)
end
function KAGLnet(base_weight, poly_weight, layer_norm, base_activation, in_dim, out_dim, num_grids, rdf, hasbase)
return KAGLnet{in_dim,out_dim,num_grids}(base_weight, poly_weight,
layer_norm, base_activation,
in_dim, out_dim, num_grids, rdf, hasbase
)
end
export KAGLnet
Flux.@layer KAGLnet
function (m::KAGLnet{in_dim,out_dim,num_grids})(x) where {in_dim,out_dim,num_grids}
y = KAGLnet_forward(x, m.base_weight, m.poly_weight, m.layer_norm, m.base_activation, m.rdf, m.hasbase)
end
function KAGLnet_forward(x, base_weight, poly_weight, layer_norm, base_activation, rdf, hasbase)
# Apply base activation to input and then linear transform with base weights
if hasbase
base_output = base_weight(base_activation.(x))
end
# Normalize x to the range [-1, 1] for stable chebyshev polynomial computation
xmin = minimum(x)
xmax = maximum(x)
dx = xmax - xmin
x_normalized = normalize_x(x, xmin, dx)
# Compute chebyshev polynomials for the normalized x
chebyshev_polys = rdf(x_normalized)
#compute_chebyshev_polynomials(x_normalized, polynomial_order)
#chebyshev_polys = compute_chebyshev_polynomials(x_normalized, polynomial_order)
chebyshev_basis = cat(chebyshev_polys..., dims=1)
# Compute polynomial output using polynomial weights
poly_output = poly_weight(chebyshev_basis)
# Combine base and polynomial outputs, normalize, and activate
if hasbase
y = base_output .+ poly_output
else
y = poly_output
end
y = base_activation.(layer_norm(y))
#y = base_activation.(layer_norm(base_output .+ poly_output))
return y
end
| FluxKAN | https://github.com/cometscome/FluxKAN.jl.git |
|
[
"MIT"
] | 0.0.2 | f348346ad524aa2c100ecc4678128c100d446440 | code | 4774 |
mutable struct Radial_distribution_function
grids#::Vector{Float64}
denominator
num_grids
grid_max
grid_min
end
```
KAGnet:
Gaussian version
```
mutable struct KAGnet{in_dim,out_dim,num_grids}
base_weight
poly_weight
layer_norm
base_activation
in_dim
out_dim
num_grids
rdf::Radial_distribution_function
end
function Radial_distribution_function(num_grids, grid_min, grid_max)
grids = range(grid_min, grid_max, length=num_grids)
grids_W = Dense(1, num_grids; bias=false)
#display(reshape(collect(grids), :, 1))
#display(grids_W.weight)
grids_W.weight .= reshape(collect(grids), :, 1)
denominator = (grid_max - grid_min) / (num_grids - 1) |> f32
return Radial_distribution_function(grids_W.weight, denominator, num_grids, grid_max, grid_min)
end
export Radial_distribution_function
Flux.@layer Radial_distribution_function trainable = ()
function (m::Radial_distribution_function)(x)
y = rdf_foward(x, m.num_grids, m.grids, m.denominator)
end
function gauss_f(x,g,denominator)
y = zero(x)
@. y = exp(-((x - g) / denominator) ^ 2)
return y
end
function rdf_foward(x, num_grids, grids, denominator)
y = []
#y = map(g -> gauss_f(x,g,denominator),grids)
#return y
for n = 1:num_grids
yn = zero(x)
yn .= exp.(-((x .- grids[n]) ./ denominator) .^ 2)
push!(y, yn)
end
return y
end
function ChainRulesCore.rrule(::typeof(rdf_foward), x, num_grids, grids, denominator)
y = []
for n = 1:num_grids
yn = zero(x)
yn .= exp.(-((x .- grids[n]) ./ denominator) .^ 2)
push!(y, yn)
end
function pullback(ybar)
sbar = NoTangent()
dLdGdx = @thunk(begin
dy = []
for n = 1:num_grids
dyn = (-2 .* (x .- grids[n]) ./ denominator^2) .* y[n]
#dyn = 2 * (-((x .- grids[n]) ./ denominator)) * exp.(-((x .- grids[n]) ./ denominator) .^ 2)
push!(dy, dyn)
end
dLdGdx = zero(x)
for n = 1:length(ybar)
dLdGdx .+= dy[n] .* ybar[n]
end
dLdGdx
end)
dLdGdg = sbar
#= note: not implemented now.
@thunk(begin
dy = []
for n = 1:num_grids
dyn = (2 .* (x .- grids[n]) ./ denominator^2) .* y[n]
push!(dy, dyn)
end
dLdGdg = zero(grids)
for n = 1:length(ybar)
dLdGdg[n] = sum(dy[n] .* ybar, dims=1)
end
dLdGdg
end)
=#
return sbar, dLdGdx, sbar, dLdGdg, sbar
end
return y, pullback
end
function KAGnet(in_dim, out_dim; num_grids=8, base_activation=SiLU, grid_max=1, grid_min=-1)
base_weight = Dense(in_dim, out_dim; bias=false)
poly_weight = Dense(in_dim * num_grids, out_dim; bias=false)
if out_dim == 1
layer_norm = Dense(out_dim, out_dim; bias=false)
else
layer_norm = LayerNorm(out_dim)
end
rdf = Radial_distribution_function(num_grids, grid_min, grid_max)
return KAGnet{in_dim,out_dim,num_grids}(base_weight,
poly_weight, layer_norm, base_activation, in_dim, out_dim, num_grids, rdf)
end
function KAGnet(base_weight, poly_weight, layer_norm, base_activation, in_dim, out_dim, num_grids, rdf)
return KAGnet{in_dim,out_dim,num_grids}(base_weight, poly_weight,
layer_norm, base_activation,
in_dim, out_dim, num_grids, rdf
)
end
export KAGnet
Flux.@layer KAGnet
function (m::KAGnet{in_dim,out_dim,num_grids})(x) where {in_dim,out_dim,num_grids}
y = KAGnet_forward(x, m.base_weight, m.poly_weight, m.layer_norm, m.base_activation, m.rdf)
end
function KAGnet_forward(x, base_weight, poly_weight, layer_norm, base_activation, rdf)
# Apply base activation to input and then linear transform with base weights
xt = base_activation.(x)
base_output = base_weight(xt)
#base_output = base_weight(base_activation.(x))
# Normalize x to the range [-1, 1] for stable chebyshev polynomial computation
xmin = minimum(x)
xmax = maximum(x)
dx = xmax - xmin
x_normalized = normalize_x(x, xmin, dx)
# Compute chebyshev polynomials for the normalized x
chebyshev_polys = rdf(x_normalized)
#compute_chebyshev_polynomials(x_normalized, polynomial_order)
#chebyshev_polys = compute_chebyshev_polynomials(x_normalized, polynomial_order)
chebyshev_basis = cat(chebyshev_polys..., dims=1)
# Compute polynomial output using polynomial weights
poly_output = poly_weight(chebyshev_basis)
# Combine base and polynomial outputs, normalize, and activate
y = base_activation.(layer_norm(base_output .+ poly_output))
return y
end
| FluxKAN | https://github.com/cometscome/FluxKAN.jl.git |
|
[
"MIT"
] | 0.0.2 | f348346ad524aa2c100ecc4678128c100d446440 | code | 4268 |
mutable struct KALnet{in_dim,out_dim,polynomial_order}
base_weight
poly_weight
layer_norm
base_activation
in_dim::Int64
out_dim::Int64
polynomial_order::Int64
end
function KALnet(in_dim, out_dim; polynomial_order=3, base_activation=SiLU)
base_weight = Dense(in_dim, out_dim; bias=false)
poly_weight = Dense(in_dim * (polynomial_order + 1), out_dim; bias=false)
if out_dim == 1
layer_norm = Dense(out_dim, out_dim; bias=false)
else
layer_norm = LayerNorm(out_dim)
end
return KALnet{in_dim,out_dim,polynomial_order}(base_weight,
poly_weight, layer_norm, base_activation, in_dim, out_dim, polynomial_order)
end
function KALnet(base_weight, poly_weight, layer_norm, base_activation, in_dim, out_dim, polynomial_order)
return KALnet{in_dim,out_dim,polynomial_order}(base_weight, poly_weight,
layer_norm, base_activation,
in_dim, out_dim, polynomial_order
)
end
export KALnet
Flux.@layer KALnet
SiLU(x) = x / (1 + exp(-x))
function compute_legendre_polynomials(x, order)
# Base case polynomials P0 and P1
P0 = zero(x)
fill!(P0,1)
#P0 = ones(eltype(x), size(x)...)#x.new_ones(x.shape) # P0 = 1 for all x
if order == 0
#return P0
return [P0]
end
P1 = deepcopy(x)
legendre_polys = [P0, P1]
# Compute higher order polynomials using recurrence
for n = 1:order-1
Pn = ((2.0 * n + 1.0) .* x .* legendre_polys[end] - n .* legendre_polys[end-1]) ./ (n + 1.0)
push!(legendre_polys, Pn)
end
return legendre_polys
end
export compute_legendre_polynomials
function ChainRulesCore.rrule(::typeof(compute_legendre_polynomials), x, order)
# Base case polynomials P0 and P1
P0 = zero(x)
fill!(P0,1)
#P0 = ones(eltype(x), size(x)...)#x.new_ones(x.shape) # P0 = 1 for all x
if order == 0
y = [P0]
else
P1 = deepcopy(x)
legendre_polys = [P0, P1]
# Compute higher order polynomials using recurrence
for n = 1:order-1
Pn = ((2.0 * n + 1.0) .* x .* legendre_polys[end] - n .* legendre_polys[end-1]) ./ (n + 1.0)
push!(legendre_polys, Pn)
end
y = legendre_polys
end
function pullback(ybar)
sbar = NoTangent()
dP0 = zero(x)
if order == 0
#dlegendre_polys = [dP0]
dlegendre_polys = dP0
else
dP1 = zero(x)
fill!(dP1,1)
#dP1 = ones(eltype(x), size(x)...)
dlegendre_polys = [dP0, dP1]
for n = 1:order-1
dPn = (n + 1) * legendre_polys[n+2] + x .* dlegendre_polys[end]
# ((2.0 * n + 1.0) .* x .* legendre_polys[end] - n .* legendre_polys[end-1]) ./ (n + 1.0)
push!(dlegendre_polys, dPn)
end
end
dLdPdPdx = zero(x)
for n = 1:length(ybar)
dLdPdPdx .+= dlegendre_polys[n] .* ybar[n]
end
return sbar, dLdPdPdx, sbar
end
return y, pullback
end
function (m::KALnet{in_dim,out_dim,polynomial_order})(x) where {in_dim,out_dim,polynomial_order}
y = KALnet_forward(x, m.base_weight, m.poly_weight, m.layer_norm, m.base_activation, polynomial_order)
end
function normalize_x(x, xmin, dx)
return 2 * (x .- xmin) / dx .- 1
end
function KALnet_forward(x, base_weight, poly_weight, layer_norm, base_activation, polynomial_order)
# Apply base activation to input and then linear transform with base weights
#base_output = base_weight(base_activation.(x))
xt =base_activation.(x)
base_output = base_weight(xt)
# Normalize x to the range [-1, 1] for stable Legendre polynomial computation
xmin = minimum(x)
xmax = maximum(x)
dx = xmax - xmin
x_normalized = normalize_x(x, xmin, dx)
# Compute Legendre polynomials for the normalized x
legendre_polys = compute_legendre_polynomials(x_normalized, polynomial_order)
legendre_basis = cat(legendre_polys..., dims=1)
# Compute polynomial output using polynomial weights
poly_output = poly_weight(legendre_basis)
# Combine base and polynomial outputs, normalize, and activate
y = base_activation.(layer_norm(base_output .+ poly_output))
return y
end
| FluxKAN | https://github.com/cometscome/FluxKAN.jl.git |
|
[
"MIT"
] | 0.0.2 | f348346ad524aa2c100ecc4678128c100d446440 | code | 3196 | using Flux
using Flux.Data: DataLoader
using Flux: onehotbatch, onecold
using Flux.Losses: logitcrossentropy
using MLDatasets
function MNIST_KAN(; batch_size=256, epochs=20, nhidden=64, polynomial_order=3, method="Legendre")
# Loading Dataset
x_train, y_train = MLDatasets.MNIST.traindata(Float32)
x_test, y_test = MLDatasets.MNIST.testdata(Float32)
# Reshape Data in order to flatten each image into a linear array
x_train = Flux.flatten(x_train) # 784×60000
x_test = Flux.flatten(x_test) # 784×10000
# One-hot-encode the labels
y_train = onehotbatch(y_train, 0:9) # 10×60000
y_test = onehotbatch(y_test, 0:9) # 10×10000
img_size = (28, 28, 1)
input_size = prod(img_size) # 784
nclasses = 10 # 0~9
# Define model
#model = Chain(
# Dense(input_size, 32, relu),
# Dense(32, nclasses)
#)
nn = nhidden
if method == "Legendre"
model = Chain(
KALnet(input_size, nn; polynomial_order),
KALnet(nn, nclasses; polynomial_order)
)
elseif method == "Chebyshev"
model = Chain(
KACnet(input_size, nn; polynomial_order),
KACnet(nn, nclasses; polynomial_order)
)
elseif method == "Gaussian"
model = Chain(
KAGnet(input_size, nn; num_grids=polynomial_order + 1),
KAGnet(nn, nclasses; num_grids=polynomial_order + 1)
)
elseif method == "GaussianLearnable"
model = Chain(
KAGLnet(input_size, nn; num_grids=polynomial_order + 1),
KAGLnet(nn, nclasses; num_grids=polynomial_order + 1)
)
else
error("method = $medhod is not supported")
end
display(model)
# parameter to be learned in the model
parameters = Flux.params(model)
# batch size and number of epochs
#batch_size = 256
#epochs = 30
# Create minibatch loader for training and testing
train_loader = DataLoader((x_train, y_train), batchsize=batch_size, shuffle=true)
test_loader = DataLoader((x_test, y_test), batchsize=batch_size, shuffle=true)
# Define optimizer
opt = ADAM()
# calculate loss for given data or collection of data
function loss(x, y)
ŷ = model(x)
return logitcrossentropy(ŷ, y, agg=sum)
end
# calculate loss and accuracy of given collection of data
function loss_accuracy(loader)
acc = 0.0
ls = 0.0
num = 0
for (x, y) in loader
ŷ = model(x)
ls += logitcrossentropy(ŷ, y, agg=sum)
acc += sum(onecold(ŷ) .== onecold(y))
num += size(x, 2)
end
return ls / num, acc / num
end
function callback(epoch)
#display(model[2])
println("Epoch=$epoch")
train_loss, train_accuracy = loss_accuracy(train_loader)
test_loss, test_accuracy = loss_accuracy(test_loader)
println(" train_loss = $train_loss, train_accuracy = $train_accuracy")
println(" test_loss = $test_loss, test_accuracy = $test_accuracy")
end
for epoch in 1:epochs
Flux.train!(loss, parameters, train_loader, opt)
callback(epoch)
end
end
| FluxKAN | https://github.com/cometscome/FluxKAN.jl.git |
|
[
"MIT"
] | 0.0.2 | f348346ad524aa2c100ecc4678128c100d446440 | code | 6891 | using FluxKAN
using Test
using LegendrePolynomials
using Flux
function test()
x = rand(3, 4)
order = 4
display(x)
y = compute_legendre_polynomials(x, order)
for n = 0:4
yi = Pl.(x, n)
@test yi == y[n+1]
display(yi)
end
end
function test2()
x = rand(Float32, 3, 4)
kan = KALnet(3, 2)
#println(Flux.params(kan))
display(kan)
y = kan(x)
kan = KACnet(3, 2)
#println(Flux.params(kan))
display(kan)
y = kan(x)
kan = KAGnet(3, 2)
#println(Flux.params(kan))
display(kan)
y = kan(x)
kan = KAGLnet(3, 2)
#println(Flux.params(kan))
display(kan)
y = kan(x)
x = rand(Float64, 3, 4)
kan = KAGLnet(3, 2) |> f64
#println(Flux.params(kan))
display(kan)
y = kan(x)
println("test2 end")
#display(y)
end
function test3(method="L")
n = 100
x0 = range(-2, length=n, stop=2) #Julia 1.0.0以降はlinspaceではなくこの書き方になった。
a0 = 3.0
a1 = 2.0
b0 = 1.0
y0 = zeros(Float32, n)
f(x0) = a0 .* x0 .+ a1 .* x0 .^ 2 .+ b0 .+ 3 * cos.(20 * x0)
y0[:] = f.(x0)
function make_φ(x0, n, k)
φ = zeros(Float32, k, n)
for i in 1:k
φ[i, :] = x0 .^ (i - 1)
end
return φ
end
k = 4
φ = make_φ(x0, n, k)
#model = Dense(k, 1) #モデルの生成。W*x + b : W[1,k],b[1]
#model = Chain(Dense(k, 10, relu), Dense(10, 1))
if method == "L"
model = Chain(KALnet(k, 10), KALnet(10, 1))
elseif method == "C"
model = Chain(KACnet(k, 10), KACnet(10, 1))
elseif method == "G"
model = Chain(KAGnet(k, 10), KAGnet(10, 1))
elseif method == "GL"
model = Chain(KAGLnet(k, 10), KAGLnet(10, 1))
end
display(model)
#println("W = ", model[1].weight)
#println("b = ", model[1].bias)
loss(x, y) = Flux.mse(model(x), y) #loss関数。mseは平均二乗誤差
opt = ADAM() #最適化に使う関数。ここではADAMを使用。
function make_random_batch(x, y, batchsize)
numofdata = length(y)
A = rand(1:numofdata, batchsize)
data = []
for i = 1:batchsize
push!(data, (x[:, A[i]], y[A[i]])) #ランダムバッチを作成。 [(x1,y1),(x2,y2),...]という形式
end
return data
end
function train_batch!(xtest, ytest, model, loss, opt, nt)
for it = 1:nt
data = make_random_batch(xtest, ytest, batchsize)
Flux.train!(loss, Flux.params(model), data, opt)
if it % 100 == 0
lossvalue = 0.0
for i = 1:length(ytest)
lossvalue += loss(xtest[:, i], ytest[i])
end
println("$(it)-th loss = ", lossvalue / length(y0))
end
end
end
batchsize = 20
nt = 2000
train_batch!(φ, y0, model, loss, opt, nt) #学習
display(model)
#println(model[1].weight) #W
#println(model[1].bias) #b
end
function test4(method="L")
function make_data(f)
num = 47
numt = 19
numtrain = num * num
numtest = numt * numt
xtrain = range(-2, 2, length=num)
ytrain = range(-2, 2, length=num)
xtest = range(-2, 2, length=numt)
ytest = range(-2, 2, length=numt)
count = 0
ztrain = Float32[]
for i = 1:num
for j = 1:num
count += 1
push!(ztrain, f(xtrain[i], ytrain[j]))
end
end
count = 0
ztest = Float32[]
for i = 1:numt
for j = 1:numt
count += 1
push!(ztest, f(xtest[i], ytest[j]))
end
end
return xtrain, ytrain, ztrain, xtest, ytest, ztest
end
function make_inputoutput(x, y, z)
count = 0
numx = length(x)
numy = length(y)
input = zeros(Float64, 2, numx * numy)
output = zeros(Float64, 2, numx * numy)
count = 0
for i = 1:numx
for j = 1:numy
count += 1
input[1, count] = x[i]
input[2, count] = y[j]
output[1, count] = z[count]
end
end
return input, output
end
function train_batch!(x_train, y_train, model, loss, opt_state, x_test, y_test, nepoch, batchsize)
numtestdata = size(y_test)[2]
train_loader = Flux.DataLoader((x_train, y_train), batchsize=batchsize, shuffle=true)
for it = 1:nepoch
for (x, y) in train_loader
grads = Flux.gradient(m -> loss(m(x), y), model)[1]
Flux.update!(opt_state, model, grads)
end
if it % 10 == 0
lossvalue = loss(model(x_test), y_test) / numtestdata
println("$it-th testloss: $lossvalue")
end
end
end
function main(method="L")
num = 30
x = range(-2, 2, length=num)
y = range(-2, 2, length=num)
f(x, y) = x * y + cos(3 * x) + exp(y / 5) * x + tanh(y) * cos(3 * y)
z = [f(i, j) for i in x, j in y]'
#p = plot(x, y, z, st=:wireframe)
#savefig("original.png")
xtrain, ytrain, ztrain, xtest, ytest, ztest = make_data(f)
input_train, output_train = make_inputoutput(xtrain, ytrain, ztrain)
input_test, output_test = make_inputoutput(xtest, ytest, ztest)
#train_loader = Flux.DataLoader((input_train,output_train), batchsize=5, shuffle=true);
#model = Chain(Dense(2, 10, relu), Dense(10, 10, relu), Dense(10, 10, relu), Dense(10, 1))
hasbase = true
if method == "L"
model = Chain(KALnet(2, 10), KALnet(10, 1))
elseif method == "C"
model = Chain(KACnet(2, 10), KACnet(10, 1))
elseif method == "G"
model = Chain(KAGnet(2, 10), KAGnet(10, 1))
elseif method == "GL"
model = Chain(KAGLnet(2, 10; hasbase), KAGLnet(10, 1; hasbase))
end
display(model)
rule = Adam()
opt_state = Flux.setup(rule, model)
loss(y_hat, y) = sum((y_hat .- y) .^ 2)
nepoch = 1000
batchsize = 128
train_batch!(input_train, output_train, model, loss, opt_state, input_test, output_test, nepoch, batchsize)
znn = [model([i
j])[1] for i in x, j in y]'
#p = plot(x, y, [znn], st=:wireframe)
#savefig("dense.png")
display(model)
end
main(method)
end
@testset "FluxKAN.jl" begin
@testset "legendre_polynomials" begin
# Write your tests here.
test()
end
@testset "KAN" begin
# Write your tests here.
test2()
test3("L")
test3("C")
test3("G")
test3("GL")
println("test 4")
println("KAL")
test4("L")
println("KAC")
test4("C")
println("KAG")
test4("G")
println("KAGL")
test4("GL")
end
end
| FluxKAN | https://github.com/cometscome/FluxKAN.jl.git |
|
[
"MIT"
] | 0.0.2 | f348346ad524aa2c100ecc4678128c100d446440 | docs | 3792 | # FluxKAN: Julia version of the TorchKAN
[](https://github.com/cometscome/FluxKAN.jl/actions/workflows/CI.yml?query=branch%3Amain)
This is a Julia version of the [TorchKAN](https://github.com/1ssb/torchkan).
In the TorchKAN,
> TorchKAN introduces a simplified KAN model and its variations, including KANvolver and KAL-Net, designed for high-performance image classification and leveraging polynomial transformations for enhanced feature detection.
In the original TorchKAN, the package uses the PyTorch.
In the FluxKAN, this package uses the Flux.jl.
I rewrote the TorchKAN with the Julia language. Now this package has
- KAL-Net: Utilizing Legendre Polynomials in Kolmogorov Arnold Legendre Networks
In addition, I implemented Chebyshev polynomials in KAN.
- KAC-Net: Utilizing Chebyshev Polynomials in Kolmogorov Arnold Chebyshev Networks
I implemented the Gaussian Radial Basis Functions introduced in [fastkan](https://github.com/ZiyaoLi/fast-kan):
- KAG-Net: Utilizing Gaussian radial basis functions in Kolmogorov Arnold Gaussian Networks (non-trainable grids)
- KAGL-Net: (Experimental) Utilizing Gaussian radial basis functions in Kolmogorov Arnold Gaussian Networks (trainable grids)
# install
```
add https://github.com/cometscome/FluxKAN.jl
```
# How to use
You can use ```KALnet``` layer like ```Dense``` layer in Flux.jl.
For example, the model is defined as
```julia
using FluxKAN
model = Chain(KALnet(2, 10), KALnet(10, 1))
```
or
```julia
using FluxKAN
model = Chain(KALnet(2, 10, polynomial_order=3), KALnet(10, 1, polynomial_order=3))
```
If you want to use the Chebyshev polynomials, you can use ```KACnet```.
```julia
using FluxKAN
model = Chain(KACnet(2, 10, polynomial_order=3), KACnet(10, 1, polynomial_order=3))
```
If you want to use the Gaussian radial basis functions, you can use ```KAGnet```.
```julia
using FluxKAN
model = Chain(KAGnet(2, 10, num_grids=4), KAGnet(10, 1, num_grids=4))
```
In the KAGnet, the grid points are fixed.
I implemented the Gaussian function with learnable grid points. But this is experimental. You can use ```KAGLnet```.
# MNIST
```julia
using FluxKAN
FluxKAN.MNIST_KAN()
```
or
```julia
using FluxKAN
FluxKAN.MNIST_KAN(; batch_size=256, epochs=20, nhidden=64, polynomial_order=3,method= "Legendre")
```
We can choose ```Legendre```, ```Chebyshev```, or ```Gaussian```.
# GPU support
With the use of the CUDA.jl, we can use the GPU. But now only ```KALnet``` and ```KACnet``` support GPU calculations.
Please see [the manual of Flux.jl](https://fluxml.ai/Flux.jl/stable/gpu/).
## Author
Yuki Nagai, Ph. D.
Associate Professor in the Information Technology Center, The University of Tokyo.
## Contact
For support, please contact: [email protected]
## Cite this Project
If this project is used in your research or referenced for baseline results, please use the following BibTeX entries.
```bibtex
@misc{torchkan,
author = {Subhransu S. Bhattacharjee},
title = {TorchKAN: Simplified KAN Model with Variations},
year = {2024},
publisher = {GitHub},
journal = {GitHub repository},
howpublished = {\url{https://github.com/1ssb/torchkan/}}
}
@misc{fluxkan,
author = {Yuki Nagai},
title = {FluxKAN: Julia version of the TorchKAN},
year = {2024},
publisher = {GitHub},
journal = {GitHub repository},
howpublished = {\url{https://github.com/cometscome/FluxKAN.jl}}
}
```
## References
- [0] Ziming Liu et al., "KAN: Kolmogorov-Arnold Networks", 2024, arXiv. https://arxiv.org/abs/2404.19756
- [1] https://github.com/KindXiaoming/pykan
- [2] https://github.com/Blealtan/efficient-kan
- [3] https://github.com/1ssb/torchkan
- [4] https://github.com/ZiyaoLi/fast-kan | FluxKAN | https://github.com/cometscome/FluxKAN.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.