licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.1.0 | 2a928ffe1d85382b22a2d232fb7ebf07c5fa5210 | code | 676 | using Test: @testset, @test, @test_throws, @test_broken
using AlignedArrays
@testset "AlignedArrays" begin
@testset "AlignedArrays" begin
a = AlignedVector{Int, 256}(undef, 3)
@test eltype(a) === Int
@test length(a) === 3
@test reinterpret(Int, pointer(a)) % 256 == 0
a[1] = 1234
@test a[1] == 1234
a .= zeros(Int, 3)
@test a[1] == a[2] == a[3] == 0
end
@testset "PageAlignedArrays" begin
a = PageAlignedVector{Int}(undef, 3)
@test eltype(a) === Int
@test length(a) === 3
@test reinterpret(Int, pointer(a)) % AlignedArrays.PAGESIZE == 0
a[1] = 1234
@test a[1] == 1234
a .= zeros(Int, 3)
@test a[1] == a[2] == a[3] == 0
end
end
| AlignedArrays | https://github.com/analytech-solutions/AlignedArrays.jl.git |
|
[
"MIT"
] | 0.1.0 | 2a928ffe1d85382b22a2d232fb7ebf07c5fa5210 | docs | 1577 | # AlignedArrays.jl
[](https://github.com/analytech-solutions/AlignedArrays.jl/actions)
Array wrappers for working with aligned memory allocations suitable for efficient GPU and RDMA transfers.
# Usage
AlignedArrays.jl is still in early development, and currently only works for Linux systems.
Basic usage follows that of standard Array, Vector, Matrix types, but with the added parameter depicting the alignment of the array's memory.
Use `AlignedArray`, `AlignedVector`, or `AlignedMatrix` to specify memory alignment as a type parameter.
We provide `PageAlignedArray`, `PageAlignedArray`, and `PageAlignedArray` for convenience when allocations using the system's page-alignment is desired.
```jl
julia> using AlignedArrays
julia> x = Vector{Int32}(undef, 5)
5-element Array{Int32,1}:
1897413280
32662
1826880912
32662
1730212208
julia> y = PageAlignedVector{Int32}(undef, 5)
5-element Array{Int32,1}:
0
0
0
0
0
julia> z = AlignedVector{Int32, 1024}(undef, 5)
5-element Array{Int32,1}:
-1
-1
-1
-1
-1
julia> typeof(y)
AlignedArray{Int32,1,4096}
julia> typeof(z)
AlignedArray{Int32,1,1024}
julia> pointer(x)
Ptr{Int32} @0x00007f966a213850
julia> pointer(y)
Ptr{Int32} @0x00000000029cf000
julia> pointer(z)
Ptr{Int32} @0x00000000029fd800
julia> y .= x
5-element Array{Int32,1}:
1897413280
32662
1826880912
32662
1730212208
julia> for i in y
println(i)
end
1897413280
32662
1826880912
32662
1730212208
```
| AlignedArrays | https://github.com/analytech-solutions/AlignedArrays.jl.git |
|
[
"MIT"
] | 0.1.0 | 7e314a58dbc369aba96b4ea0991145ee0d3b671e | code | 7689 | module Objects
export Parameter
export Particle
export Coupling
export Lorentz
export Vertex
export CouplingOrder
export Decay
export FormFactor
export anti
export is_goldstone_boson
export is_self_conjugate
struct Parameter{T<:Number}
name::String
nature::String
value::Union{T, Expr, Symbol}
tex_name::String
lhablock::Union{String, Missing}
lhacode::Union{Integer, Missing}
function Parameter(; kwargs...)
if kwargs[:nature] == "external" && (
!haskey(kwargs, :lhablock) || !haskey(kwargs, :lhacode)
)
error("Need LHA information for external parameter $(kwargs.name).")
end
lhablock = haskey(kwargs, :lhablock) ? kwargs[:lhablock] : missing
lhacode = if haskey(kwargs, :lhacode)
@assert length(kwargs[:lhacode]) == 1
first(kwargs[:lhacode])
else
missing
end
value = if isa(kwargs[:value], String)
tmp = Meta.parse(kwargs[:value])
if isa(tmp, Real) && kwargs[:type] == "complex"
complex(tmp)
else
tmp
end
else
@assert isa(kwargs[:value], Number)
if isa(kwargs[:value], Real) && kwargs[:type] == "complex"
complex(kwargs[:value])
else
kwargs[:value]
end
end
if kwargs[:type] == "real"
return new{Real}(
kwargs[:name], kwargs[:nature], value,
kwargs[:texname], lhablock, lhacode
)
elseif kwargs[:type] == "complex"
# if isa(value, Real)
# return new{Complex}(
# kwargs[:name], kwargs[:nature], complex(value),
# kwargs[:texname], lhablock, lhacode
# )
# end
return new{Complex}(
kwargs[:name], kwargs[:nature], value,
kwargs[:texname], lhablock, lhacode
)
else
error("Type $(kwargs.type) is not supported.")
end
end
end
struct Particle
pdg_code::Int
name::String
anti_name::String
spin::Int
color::Int
mass::Union{Real, Parameter{Real}, Symbol, Expr}
width::Union{Real, Parameter{Real}, Symbol, Expr}
tex_name::String
anti_tex_name::String
charge::Union{Integer, Rational}
optional_properties::Dict{Symbol, Any}
Particle(
pdg_code::Int,
name::String,
anti_name::String,
spin::Int,
color::Int,
mass::Union{Real, Parameter{Real}, Symbol},
width::Union{Real, Parameter{Real}, Symbol},
tex_name::String,
anti_tex_name::String,
charge::Real,
optional_properties::Dict{Symbol, Any}
) = new(
pdg_code, name, anti_name,
spin, color, mass, width,
tex_name, anti_tex_name,
isa(charge, AbstractFloat) ? rationalize(charge) : charge,
optional_properties
)
function Particle(; kwargs...)
required_args = [
:pdg_code, :name, :antiname,
:spin, :color, :mass, :width,
:texname, :antitexname, :charge
]
optional_properties = Dict{Symbol, Any}(
:propagating => true,
:GoldstoneBoson => false,
:propagator => nothing
)
for key ∈ setdiff(keys(kwargs), required_args)
optional_properties[key] = kwargs[key]
end
charge = isa(kwargs[:charge], Integer) ? kwargs[:charge] : rationalize(kwargs[:charge])
optional_properties[:line] = find_line_type(
kwargs[:spin], kwargs[:color];
self_conjugate_flag=(kwargs[:name]==kwargs[:antiname])
)
new(
kwargs[:pdg_code],
kwargs[:name],
kwargs[:antiname],
kwargs[:spin],
kwargs[:color],
kwargs[:mass],
kwargs[:width],
kwargs[:texname],
kwargs[:antitexname],
charge,
optional_properties
)
end
end
struct Coupling
name::String
value::Union{Expr, Symbol}
order::Dict{String, Int}
function Coupling(; kwargs...)
value = if isa(kwargs[:value], String)
value_str = replace(
kwargs[:value],
"**" => "^",
"cmath." => "",
"complexconjugate" => "conj",
".*" => ". *"
)
Meta.parse(value_str)
else
@assert isa(kwargs[:value], Number)
kwargs[:value]
end
return new(kwargs[:name], value, kwargs[:order])
end
end
struct Lorentz
name::String
spins::Vector{Integer}
structure::String
Lorentz(; structure="exteranl", kwargs...) = new(kwargs[:name], kwargs[:spins], structure)
end
struct Vertex
name::String
particles::Vector{Particle}
color::Vector{String}
lorentz::Vector{Lorentz}
couplings::Dict{Tuple, Coupling}
Vertex(; kwargs...) = new(kwargs[:name], kwargs[:particles], kwargs[:color], kwargs[:lorentz], kwargs[:couplings])
end
struct CouplingOrder
name::String
expansion_order::Integer
hierarchy::Integer
perturbative_expansion::Integer
CouplingOrder(;perturbation_expansion::Integer=0, kwargs...) = new(
kwargs[:name],
kwargs[:expansion_order],
kwargs[:hierarchy],
perturbation_expansion
)
end
struct Decay
name::String
particle::Particle
particle_widths::Dict{Tuple, String}
Decay(; kwargs...) = new(kwargs[:name], kwargs[:particle], kwargs[:partial_widths])
end
struct FormFactor
name::String
type
value
end
function anti(p::Particle)::Particle
if is_self_conjugate(p)
return p
end
fixed_properties = [:line, :propagating, :GoldstoneBoson, :propagator]
anti_properties = Dict{Symbol, Any}()
for key ∈ fixed_properties
anti_properties[key] = p.optional_properties[key]
end
to_be_flipped_property_names = setdiff(
keys(p.optional_properties),
fixed_properties
)
for property_name ∈ to_be_flipped_property_names
anti_properties[property_name] = - p.optional_properties[property_name]
end
new_color = (p.color ∈ [1, 8]) ? p.color : -p.color
return Particle(
-p.pdg_code,
p.anti_name,
p.name,
p.spin,
new_color,
p.mass,
p.width,
p.anti_tex_name,
p.tex_name,
-p.charge,
anti_properties
)
end
function find_line_type(spin::Integer, color::Integer; self_conjugate_flag::Bool=false)::String
if spin == 1
return "dashed"
elseif spin == 2
if !self_conjugate_flag
return "straight"
elseif color == 1
return "swavy"
else
return "scurly"
end
elseif spin == 3
if color == 1
return "wavy"
else
return "curly"
end
elseif spin == 5
return "double"
elseif spin == -1
return "dotted"
else
return "dashed" # not supported
end
end
is_goldstone_boson(p::Particle) = p.optional_properties.GoldstoneBoson
is_self_conjugate(p::Particle) = p.name == p.anti_name
Base.zero(::Type{Parameter}) = Parameter(
name = "ZERO",
nature = "internal",
type = "real",
value = "0.0",
texname = "0"
)
end # module Objects
| UniversalFeynRulesOutput | https://github.com/Fenyutanchan/UniversalFeynRulesOutput.jl.git |
|
[
"MIT"
] | 0.1.0 | 7e314a58dbc369aba96b4ea0991145ee0d3b671e | code | 297 | module UniversalFeynRulesOutput
import Pkg
export convert_model
include("read.jl")
include("write.jl")
function convert_model(model_path::String)::String
contents = read_model(model_path)
return write_model(model_path, contents)
end
end # module UniversalFeynRulesOutput
| UniversalFeynRulesOutput | https://github.com/Fenyutanchan/UniversalFeynRulesOutput.jl.git |
|
[
"MIT"
] | 0.1.0 | 7e314a58dbc369aba96b4ea0991145ee0d3b671e | code | 10566 |
basic_model_files = [
"particles.py",
"couplings.py",
"lorentz.py",
"parameters.py",
"vertices.py",
"coupling_orders.py",
]
extra_model_files = [
"decays.py",
"form_factors.py",
"propagators.py",
"CT_vertices.py"
]
function check_model(model_path::String)
@assert isdir(model_path)
@assert all(
isfile,
map(
file_name -> joinpath(model_path, file_name),
basic_model_files
)
)
end
function read_CT_vertices(model_path::String)::Vector{String}
file_path = joinpath(model_path, "CT_vertices.py")
if !isfile(file_path)
return String[]
end
end
function read_couplings(model_path::String)::Vector{String}
file_path = joinpath(model_path, "couplings.py")
@assert isfile(file_path)
file_contents = readlines(file_path)
begin_line_indices = findall(
contains("Coupling("),
file_contents
)
end_line_indices = map(
begin_line_index -> findnext(endswith(')'), file_contents, begin_line_index),
begin_line_indices
)
coupling_str_list = String[]
for (begin_line_index, end_line_index) ∈ zip(begin_line_indices, end_line_indices)
text = join(file_contents[begin_line_index:end_line_index], "")
text = replace(text, ''' => '"')
text = replace(text,
"**" => "^",
"cmath." => "",
"complexconjugate" => "conj",
".*" => ". *"
)
ori_str_range = findfirst(r"\{.+\}", text)
@assert !isnothing(ori_str_range)
ori_str = text[ori_str_range]
order_name_range_list = findall(r"\"\w+\"", ori_str)
order_order_range_list = findall(r":\d+", ori_str)
fin_str = "Dict{String, Int}(" * join(
[
ori_str[order_name_range] * " => " * ori_str[order_order_range][2:end]
for (order_name_range, order_order_range) ∈ zip(order_name_range_list, order_order_range_list)
], ", "
) * ")"
text = replace(text, ori_str => fin_str)
push!(coupling_str_list, (string ∘ Meta.parse)(text))
end
return coupling_str_list
end
function read_coupling_orders(model_path::String)::Vector{String}
file_path = joinpath(model_path, "coupling_orders.py")
@assert isfile(file_path)
file_contents = readlines(file_path)
begin_line_indices = findall(
contains("CouplingOrder("),
file_contents
)
end_line_indices = map(
begin_line_index -> findnext(endswith(')'), file_contents, begin_line_index),
begin_line_indices
)
coupling_order_str_list = String[]
for (begin_line_index, end_line_index) ∈ zip(begin_line_indices, end_line_indices)
text = join(file_contents[begin_line_index:end_line_index], "")
text = replace(text, ''' => '"')
push!(coupling_order_str_list, (string ∘ Meta.parse)(text))
end
return coupling_order_str_list
end
function read_decays(model_path::String)::Vector{String}
file_path = joinpath(model_path, "decays.py")
if !isfile(file_path)
return String[]
end
file_contents = readlines(file_path)
begin_line_indices = findall(
contains("Decay("),
file_contents
)
end_line_indices = map(
begin_line_index -> findnext(endswith(')'), file_contents, begin_line_index),
begin_line_indices
)
decay_str_list = String[]
for (begin_line_index, end_line_index) ∈ zip(begin_line_indices, end_line_indices)
text = join(file_contents[begin_line_index:end_line_index], "")
text = replace(text, ''' => '"')
text = replace(text,
"P." => "Particles.",
)
text = replace(text,
"{" => "Dict{Tuple, String}(",
":" => "=>",
"}" => ")"
)
text = replace(text,
"**" => "^",
"cmath." => "",
"complexconjugate" => "conj",
".*" => ". *"
)
push!(decay_str_list, (string ∘ Meta.parse)(text))
end
pushfirst!(decay_str_list, "import ..Particles\n\n")
return decay_str_list
end
function read_form_factors(model_path::String)::Vector{String}
file_path = joinpath(model_path, "form_factors.py")
if !isfile(file_path)
return String[]
end
end
function read_lorentz(model_path::String)::Vector{String}
file_path = joinpath(model_path, "lorentz.py")
@assert isfile(file_path)
file_contents = readlines(file_path)
begin_line_indices = findall(
contains("Lorentz("),
file_contents
)
end_line_indices = map(
begin_line_index -> findnext(endswith(')'), file_contents, begin_line_index),
begin_line_indices
)
lorentz_str_list = String[]
for (begin_line_index, end_line_index) ∈ zip(begin_line_indices, end_line_indices)
text = join(file_contents[begin_line_index:end_line_index], "")
text = replace(text, ''' => '"')
text = replace(text, "ForFac" => "FormFactors")
text = replace(text,
"**" => "^",
"cmath." => "",
"complexconjugate" => "conj",
".*" => ". *"
)
push!(lorentz_str_list, (string ∘ Meta.parse)(text))
end
pushfirst!(lorentz_str_list, "import ..FormFactors\n\n")
return lorentz_str_list
end
function read_model(model_path::String)::Dict{String, Vector{String}}
check_model(model_path)
return Dict{String, Union{String, Vector{String}}}(
"particles" => read_particles(model_path),
"couplings" => read_couplings(model_path),
"lorentz" => read_lorentz(model_path),
"parameters" => read_parameters(model_path),
"vertices" => read_vertices(model_path),
"coupling_orders" => read_coupling_orders(model_path),
"decays" => read_decays(model_path),
"form_factors" => read_form_factors(model_path),
"propagators" => read_propagators(model_path),
"CT_vertices" => read_CT_vertices(model_path)
)
end
function read_parameters(model_path::String)::Vector{String}
file_path = joinpath(model_path, "parameters.py")
@assert isfile(file_path)
file_contents = readlines(file_path)
begin_line_indices = findall(
contains("Parameter("),
file_contents
)
end_line_indices = map(
begin_line_index -> findnext(endswith(')'), file_contents, begin_line_index),
begin_line_indices
)
parameter_str_list = String[]
for (begin_line_index, end_line_index) ∈ zip(begin_line_indices, end_line_indices)
text = join(file_contents[begin_line_index:end_line_index], "")
text = replace(text, ''' => '"')
text = replace(text,
"**" => "^",
"cmath." => "",
"complexconjugate" => "conj",
".*" => ". *"
)
push!(parameter_str_list, (string ∘ Meta.parse)(text))
end
return parameter_str_list
end
function read_particles(model_path::String)::Vector{String}
file_path = joinpath(model_path, "particles.py")
@assert isfile(file_path)
file_contents = readlines(file_path)
begin_line_indices = findall(
line -> contains(line, "Particle(") || contains(line, ".anti()"),
file_contents
)
end_line_indices = map(
begin_line_index -> findnext(endswith(')'), file_contents, begin_line_index),
begin_line_indices
)
particle_str_list = String[]
for (begin_line_index, end_line_index) ∈ zip(begin_line_indices, end_line_indices)
text = join(file_contents[begin_line_index:end_line_index], "")
text = replace(text, ''' => '"')
text = replace(text, "True" => "true", "False" => "false")
text = replace(text, "Param." => "Parameters.")
anti_range = findfirst(r"\w+.anti\(\)", text)
if !isnothing(anti_range)
anti_text = text[anti_range]
text = replace(text,
anti_text => "anti(" * (first ∘ splitext)(anti_text) * ")"
)
end
push!(particle_str_list, (string ∘ Meta.parse)(text))
end
pushfirst!(particle_str_list, "import ..Parameters\n\n")
return particle_str_list
end
function read_propagators(model_path::String)::Vector{String}
file_path = joinpath(model_path, "propagators.py")
if !isfile(file_path)
return String[]
end
end
function read_vertices(model_path::String)::Vector{String}
file_path = joinpath(model_path, "vertices.py")
@assert isfile(file_path)
file_contents = readlines(file_path)
begin_line_indices = findall(
contains("Vertex("),
file_contents
)
end_line_indices = map(
begin_line_index -> findnext(endswith(')'), file_contents, begin_line_index),
begin_line_indices
)
vertex_str_list = String[]
for (begin_line_index, end_line_index) ∈ zip(begin_line_indices, end_line_indices)
text = join(file_contents[begin_line_index:end_line_index], "")
text = replace(text, ''' => '"')
text = replace(text,
"P." => "Particles.",
"L." => "LorentzIndices.",
"C." => "Couplings."
)
ori_str_range = findfirst(r"\{.+\}", text)
@assert !isnothing(ori_str_range)
ori_str = text[ori_str_range]
spin_color_pair_range_list = findall(r"\(\d+,\d+\)", ori_str)
coupling_range_list = findall(r"Couplings.\w+", ori_str)
fin_str = "Dict{Tuple{Int, Int}, Coupling}(" * join(
[
ori_str[spin_color_pair_range] * " => " * ori_str[coupling_range]
for (spin_color_pair_range, coupling_range) ∈ zip(spin_color_pair_range_list, coupling_range_list)
], ", "
) * ")"
text = replace(text, ori_str => fin_str)
push!(vertex_str_list, (string ∘ Meta.parse)(text))
end
pushfirst!(vertex_str_list, "import ..Particles\nimport ..Couplings\nimport ..LorentzIndices\n\n")
return vertex_str_list
end
| UniversalFeynRulesOutput | https://github.com/Fenyutanchan/UniversalFeynRulesOutput.jl.git |
|
[
"MIT"
] | 0.1.0 | 7e314a58dbc369aba96b4ea0991145ee0d3b671e | code | 2884 | function write_model(model_path::String, contents::Dict{String, Vector{String}})::String
jl_model_path = model_path * ".jl"
model_name = (last ∘ splitdir)(model_path)
if ispath(jl_model_path)
rm(jl_model_path; force=true, recursive=true)
end
Pkg.generate(jl_model_path)
ext_path = joinpath((dirname ∘ dirname ∘ pathof)(@__MODULE__), "ext")
ext_files = ["objects"]
model_src_path = joinpath(jl_model_path, "src")
main_model_jl = joinpath(model_src_path, (last ∘ splitdir)(jl_model_path))
for file ∈ ext_files
cp(
joinpath(ext_path, "$file.jl"),
joinpath(model_src_path, "$file.jl");
force=true
)
end
for key ∈ keys(contents)
module_name = make_module_name(key)
file_head = """
module $module_name
using ..Objects
export all_$key
"""
file_end = "\n\nend # $module_name"
file_path = joinpath(model_src_path, "$key.jl")
open(file_path, "w") do io
entries = [(first ∘ split)(line, " = ") for line ∈ filter(!contains("import"), contents[key])]
write(io,
file_head * join(
contents[key], "\n"
) * "\n\n" * "all_$key = (\n " * join(
["$entry = $entry" for entry ∈ entries], ",\n "
) * "\n)" * file_end
)
end
end
open(main_model_jl, "w") do io
file_contents = "module $model_name\n\n"
for key ∈ keys(contents)
file_contents *= "export all_$key\n"
end
file_contents *= "\n"
file_contents *= join(["include(\"$file.jl\")\nusing .$(make_module_name(file))" for file ∈ ext_files], "\n") * "\n\n"
ordered_including = [
"parameters",
"particles",
"form_factors",
"lorentz",
"couplings",
]
all_keys = push!(
ordered_including,
setdiff(keys(contents), ordered_including)...
)
for key ∈ all_keys
file_contents *= "include(\"$key.jl\")\nusing .$(make_module_name(key))\n"
end
file_contents *= "\nend # $model_name"
write(io, file_contents)
end
println("The Julia model is generated at $jl_model_path.")
return jl_model_path
end
function make_module_name(input::String)::String
module_name = replace(input, first(input) => (uppercase ∘ first)(input); count=1)
_indices = findall('_', module_name)
for index ∈ _indices
module_name = replace(module_name, module_name[index:index+1] => uppercase(module_name[index+1]))
end
if module_name == "Lorentz"
module_name *= "Indices"
end
return module_name
end
| UniversalFeynRulesOutput | https://github.com/Fenyutanchan/UniversalFeynRulesOutput.jl.git |
|
[
"MIT"
] | 0.1.0 | 7e314a58dbc369aba96b4ea0991145ee0d3b671e | code | 56 | using UniversalFeynRulesOutput
convert_model("./sm")
| UniversalFeynRulesOutput | https://github.com/Fenyutanchan/UniversalFeynRulesOutput.jl.git |
|
[
"MIT"
] | 0.1.0 | 7e314a58dbc369aba96b4ea0991145ee0d3b671e | code | 110 | module CTVertices
using ..Objects
export all_CT_vertices
all_CT_vertices = (
)
end # CTVertices | UniversalFeynRulesOutput | https://github.com/Fenyutanchan/UniversalFeynRulesOutput.jl.git |
|
[
"MIT"
] | 0.1.0 | 7e314a58dbc369aba96b4ea0991145ee0d3b671e | code | 291 | module CouplingOrders
using ..Objects
export all_coupling_orders
QCD = CouplingOrder(name = "QCD", expansion_order = 99, hierarchy = 1)
QED = CouplingOrder(name = "QED", expansion_order = 99, hierarchy = 2)
all_coupling_orders = (
QCD = QCD,
QED = QED
)
end # CouplingOrders | UniversalFeynRulesOutput | https://github.com/Fenyutanchan/UniversalFeynRulesOutput.jl.git |
|
[
"MIT"
] | 0.1.0 | 7e314a58dbc369aba96b4ea0991145ee0d3b671e | code | 14030 | module Couplings
using ..Objects
export all_couplings
GC_1 = Coupling(name = "GC_1", value = "-(ee*complex(0,1))/3.", order = Dict{String, Int}("QED" => 1))
GC_2 = Coupling(name = "GC_2", value = "(2*ee*complex(0,1))/3.", order = Dict{String, Int}("QED" => 1))
GC_3 = Coupling(name = "GC_3", value = "-(ee*complex(0,1))", order = Dict{String, Int}("QED" => 1))
GC_4 = Coupling(name = "GC_4", value = "ee*complex(0,1)", order = Dict{String, Int}("QED" => 1))
GC_5 = Coupling(name = "GC_5", value = "ee^2*complex(0,1)", order = Dict{String, Int}("QED" => 2))
GC_6 = Coupling(name = "GC_6", value = "2*ee^2*complex(0,1)", order = Dict{String, Int}("QED" => 2))
GC_7 = Coupling(name = "GC_7", value = "-ee^2/(2. *cw)", order = Dict{String, Int}("QED" => 2))
GC_8 = Coupling(name = "GC_8", value = "(ee^2*complex(0,1))/(2. *cw)", order = Dict{String, Int}("QED" => 2))
GC_9 = Coupling(name = "GC_9", value = "ee^2/(2. *cw)", order = Dict{String, Int}("QED" => 2))
GC_10 = Coupling(name = "GC_10", value = "-G", order = Dict{String, Int}("QCD" => 1))
GC_11 = Coupling(name = "GC_11", value = "complex(0,1)*G", order = Dict{String, Int}("QCD" => 1))
GC_12 = Coupling(name = "GC_12", value = "complex(0,1)*G^2", order = Dict{String, Int}("QCD" => 2))
GC_13 = Coupling(name = "GC_13", value = "I1x31", order = Dict{String, Int}("QED" => 1))
GC_14 = Coupling(name = "GC_14", value = "I1x32", order = Dict{String, Int}("QED" => 1))
GC_15 = Coupling(name = "GC_15", value = "I1x33", order = Dict{String, Int}("QED" => 1))
GC_16 = Coupling(name = "GC_16", value = "-I2x12", order = Dict{String, Int}("QED" => 1))
GC_17 = Coupling(name = "GC_17", value = "-I2x13", order = Dict{String, Int}("QED" => 1))
GC_18 = Coupling(name = "GC_18", value = "-I2x22", order = Dict{String, Int}("QED" => 1))
GC_19 = Coupling(name = "GC_19", value = "-I2x23", order = Dict{String, Int}("QED" => 1))
GC_20 = Coupling(name = "GC_20", value = "-I2x32", order = Dict{String, Int}("QED" => 1))
GC_21 = Coupling(name = "GC_21", value = "-I2x33", order = Dict{String, Int}("QED" => 1))
GC_22 = Coupling(name = "GC_22", value = "I3x21", order = Dict{String, Int}("QED" => 1))
GC_23 = Coupling(name = "GC_23", value = "I3x22", order = Dict{String, Int}("QED" => 1))
GC_24 = Coupling(name = "GC_24", value = "I3x23", order = Dict{String, Int}("QED" => 1))
GC_25 = Coupling(name = "GC_25", value = "I3x31", order = Dict{String, Int}("QED" => 1))
GC_26 = Coupling(name = "GC_26", value = "I3x32", order = Dict{String, Int}("QED" => 1))
GC_27 = Coupling(name = "GC_27", value = "I3x33", order = Dict{String, Int}("QED" => 1))
GC_28 = Coupling(name = "GC_28", value = "-I4x13", order = Dict{String, Int}("QED" => 1))
GC_29 = Coupling(name = "GC_29", value = "-I4x23", order = Dict{String, Int}("QED" => 1))
GC_30 = Coupling(name = "GC_30", value = "-I4x33", order = Dict{String, Int}("QED" => 1))
GC_31 = Coupling(name = "GC_31", value = "-2*complex(0,1)*lam", order = Dict{String, Int}("QED" => 2))
GC_32 = Coupling(name = "GC_32", value = "-4*complex(0,1)*lam", order = Dict{String, Int}("QED" => 2))
GC_33 = Coupling(name = "GC_33", value = "-6*complex(0,1)*lam", order = Dict{String, Int}("QED" => 2))
GC_34 = Coupling(name = "GC_34", value = "(ee^2*complex(0,1))/(2. *sw^2)", order = Dict{String, Int}("QED" => 2))
GC_35 = Coupling(name = "GC_35", value = "-((ee^2*complex(0,1))/sw^2)", order = Dict{String, Int}("QED" => 2))
GC_36 = Coupling(name = "GC_36", value = "(cw^2*ee^2*complex(0,1))/sw^2", order = Dict{String, Int}("QED" => 2))
GC_37 = Coupling(name = "GC_37", value = "-ee/(2. *sw)", order = Dict{String, Int}("QED" => 1))
GC_38 = Coupling(name = "GC_38", value = "-(ee*complex(0,1))/(2. *sw)", order = Dict{String, Int}("QED" => 1))
GC_39 = Coupling(name = "GC_39", value = "(ee*complex(0,1))/(2. *sw)", order = Dict{String, Int}("QED" => 1))
GC_40 = Coupling(name = "GC_40", value = "(ee*complex(0,1))/(sw*sqrt(2))", order = Dict{String, Int}("QED" => 1))
GC_41 = Coupling(name = "GC_41", value = "(CKM1x1*ee*complex(0,1))/(sw*sqrt(2))", order = Dict{String, Int}("QED" => 1))
GC_42 = Coupling(name = "GC_42", value = "(CKM1x2*ee*complex(0,1))/(sw*sqrt(2))", order = Dict{String, Int}("QED" => 1))
GC_43 = Coupling(name = "GC_43", value = "(CKM1x3*ee*complex(0,1))/(sw*sqrt(2))", order = Dict{String, Int}("QED" => 1))
GC_44 = Coupling(name = "GC_44", value = "(CKM2x1*ee*complex(0,1))/(sw*sqrt(2))", order = Dict{String, Int}("QED" => 1))
GC_45 = Coupling(name = "GC_45", value = "(CKM2x2*ee*complex(0,1))/(sw*sqrt(2))", order = Dict{String, Int}("QED" => 1))
GC_46 = Coupling(name = "GC_46", value = "(CKM2x3*ee*complex(0,1))/(sw*sqrt(2))", order = Dict{String, Int}("QED" => 1))
GC_47 = Coupling(name = "GC_47", value = "(CKM3x1*ee*complex(0,1))/(sw*sqrt(2))", order = Dict{String, Int}("QED" => 1))
GC_48 = Coupling(name = "GC_48", value = "(CKM3x2*ee*complex(0,1))/(sw*sqrt(2))", order = Dict{String, Int}("QED" => 1))
GC_49 = Coupling(name = "GC_49", value = "(CKM3x3*ee*complex(0,1))/(sw*sqrt(2))", order = Dict{String, Int}("QED" => 1))
GC_50 = Coupling(name = "GC_50", value = "-(cw*ee*complex(0,1))/(2. *sw)", order = Dict{String, Int}("QED" => 1))
GC_51 = Coupling(name = "GC_51", value = "(cw*ee*complex(0,1))/(2. *sw)", order = Dict{String, Int}("QED" => 1))
GC_52 = Coupling(name = "GC_52", value = "-((cw*ee*complex(0,1))/sw)", order = Dict{String, Int}("QED" => 1))
GC_53 = Coupling(name = "GC_53", value = "(cw*ee*complex(0,1))/sw", order = Dict{String, Int}("QED" => 1))
GC_54 = Coupling(name = "GC_54", value = "-ee^2/(2. *sw)", order = Dict{String, Int}("QED" => 2))
GC_55 = Coupling(name = "GC_55", value = "-(ee^2*complex(0,1))/(2. *sw)", order = Dict{String, Int}("QED" => 2))
GC_56 = Coupling(name = "GC_56", value = "ee^2/(2. *sw)", order = Dict{String, Int}("QED" => 2))
GC_57 = Coupling(name = "GC_57", value = "(-2*cw*ee^2*complex(0,1))/sw", order = Dict{String, Int}("QED" => 2))
GC_58 = Coupling(name = "GC_58", value = "-(ee*complex(0,1)*sw)/(6. *cw)", order = Dict{String, Int}("QED" => 1))
GC_59 = Coupling(name = "GC_59", value = "(ee*complex(0,1)*sw)/(2. *cw)", order = Dict{String, Int}("QED" => 1))
GC_60 = Coupling(name = "GC_60", value = "-(cw*ee)/(2. *sw) - (ee*sw)/(2. *cw)", order = Dict{String, Int}("QED" => 1))
GC_61 = Coupling(name = "GC_61", value = "-(cw*ee*complex(0,1))/(2. *sw) + (ee*complex(0,1)*sw)/(2. *cw)", order = Dict{String, Int}("QED" => 1))
GC_62 = Coupling(name = "GC_62", value = "(cw*ee*complex(0,1))/(2. *sw) + (ee*complex(0,1)*sw)/(2. *cw)", order = Dict{String, Int}("QED" => 1))
GC_63 = Coupling(name = "GC_63", value = "(cw*ee^2*complex(0,1))/sw - (ee^2*complex(0,1)*sw)/cw", order = Dict{String, Int}("QED" => 2))
GC_64 = Coupling(name = "GC_64", value = "-(ee^2*complex(0,1)) + (cw^2*ee^2*complex(0,1))/(2. *sw^2) + (ee^2*complex(0,1)*sw^2)/(2. *cw^2)", order = Dict{String, Int}("QED" => 2))
GC_65 = Coupling(name = "GC_65", value = "ee^2*complex(0,1) + (cw^2*ee^2*complex(0,1))/(2. *sw^2) + (ee^2*complex(0,1)*sw^2)/(2. *cw^2)", order = Dict{String, Int}("QED" => 2))
GC_66 = Coupling(name = "GC_66", value = "-(ee^2*vev)/(2. *cw)", order = Dict{String, Int}("QED" => 1))
GC_67 = Coupling(name = "GC_67", value = "(ee^2*vev)/(2. *cw)", order = Dict{String, Int}("QED" => 1))
GC_68 = Coupling(name = "GC_68", value = "-2*complex(0,1)*lam*vev", order = Dict{String, Int}("QED" => 1))
GC_69 = Coupling(name = "GC_69", value = "-6*complex(0,1)*lam*vev", order = Dict{String, Int}("QED" => 1))
GC_70 = Coupling(name = "GC_70", value = "-(ee^2*vev)/(4. *sw^2)", order = Dict{String, Int}("QED" => 1))
GC_71 = Coupling(name = "GC_71", value = "-(ee^2*complex(0,1)*vev)/(4. *sw^2)", order = Dict{String, Int}("QED" => 1))
GC_72 = Coupling(name = "GC_72", value = "(ee^2*complex(0,1)*vev)/(2. *sw^2)", order = Dict{String, Int}("QED" => 1))
GC_73 = Coupling(name = "GC_73", value = "(ee^2*vev)/(4. *sw^2)", order = Dict{String, Int}("QED" => 1))
GC_74 = Coupling(name = "GC_74", value = "-(ee^2*vev)/(2. *sw)", order = Dict{String, Int}("QED" => 1))
GC_75 = Coupling(name = "GC_75", value = "(ee^2*vev)/(2. *sw)", order = Dict{String, Int}("QED" => 1))
GC_76 = Coupling(name = "GC_76", value = "-(ee^2*vev)/(4. *cw) - (cw*ee^2*vev)/(4. *sw^2)", order = Dict{String, Int}("QED" => 1))
GC_77 = Coupling(name = "GC_77", value = "(ee^2*vev)/(4. *cw) - (cw*ee^2*vev)/(4. *sw^2)", order = Dict{String, Int}("QED" => 1))
GC_78 = Coupling(name = "GC_78", value = "-(ee^2*vev)/(4. *cw) + (cw*ee^2*vev)/(4. *sw^2)", order = Dict{String, Int}("QED" => 1))
GC_79 = Coupling(name = "GC_79", value = "(ee^2*vev)/(4. *cw) + (cw*ee^2*vev)/(4. *sw^2)", order = Dict{String, Int}("QED" => 1))
GC_80 = Coupling(name = "GC_80", value = "-(ee^2*complex(0,1)*vev)/2. - (cw^2*ee^2*complex(0,1)*vev)/(4. *sw^2) - (ee^2*complex(0,1)*sw^2*vev)/(4. *cw^2)", order = Dict{String, Int}("QED" => 1))
GC_81 = Coupling(name = "GC_81", value = "ee^2*complex(0,1)*vev + (cw^2*ee^2*complex(0,1)*vev)/(2. *sw^2) + (ee^2*complex(0,1)*sw^2*vev)/(2. *cw^2)", order = Dict{String, Int}("QED" => 1))
GC_82 = Coupling(name = "GC_82", value = "-(yb/sqrt(2))", order = Dict{String, Int}("QED" => 1))
GC_83 = Coupling(name = "GC_83", value = "-((complex(0,1)*yb)/sqrt(2))", order = Dict{String, Int}("QED" => 1))
GC_84 = Coupling(name = "GC_84", value = "-((complex(0,1)*yc)/sqrt(2))", order = Dict{String, Int}("QED" => 1))
GC_85 = Coupling(name = "GC_85", value = "yc/sqrt(2)", order = Dict{String, Int}("QED" => 1))
GC_86 = Coupling(name = "GC_86", value = "-ye", order = Dict{String, Int}("QED" => 1))
GC_87 = Coupling(name = "GC_87", value = "ye", order = Dict{String, Int}("QED" => 1))
GC_88 = Coupling(name = "GC_88", value = "-(ye/sqrt(2))", order = Dict{String, Int}("QED" => 1))
GC_89 = Coupling(name = "GC_89", value = "-((complex(0,1)*ye)/sqrt(2))", order = Dict{String, Int}("QED" => 1))
GC_90 = Coupling(name = "GC_90", value = "-ym", order = Dict{String, Int}("QED" => 1))
GC_91 = Coupling(name = "GC_91", value = "ym", order = Dict{String, Int}("QED" => 1))
GC_92 = Coupling(name = "GC_92", value = "-(ym/sqrt(2))", order = Dict{String, Int}("QED" => 1))
GC_93 = Coupling(name = "GC_93", value = "-((complex(0,1)*ym)/sqrt(2))", order = Dict{String, Int}("QED" => 1))
GC_94 = Coupling(name = "GC_94", value = "-((complex(0,1)*yt)/sqrt(2))", order = Dict{String, Int}("QED" => 1))
GC_95 = Coupling(name = "GC_95", value = "yt/sqrt(2)", order = Dict{String, Int}("QED" => 1))
GC_96 = Coupling(name = "GC_96", value = "-ytau", order = Dict{String, Int}("QED" => 1))
GC_97 = Coupling(name = "GC_97", value = "ytau", order = Dict{String, Int}("QED" => 1))
GC_98 = Coupling(name = "GC_98", value = "-(ytau/sqrt(2))", order = Dict{String, Int}("QED" => 1))
GC_99 = Coupling(name = "GC_99", value = "-((complex(0,1)*ytau)/sqrt(2))", order = Dict{String, Int}("QED" => 1))
GC_100 = Coupling(name = "GC_100", value = "(ee*complex(0,1)*conj(CKM1x1))/(sw*sqrt(2))", order = Dict{String, Int}("QED" => 1))
GC_101 = Coupling(name = "GC_101", value = "(ee*complex(0,1)*conj(CKM1x2))/(sw*sqrt(2))", order = Dict{String, Int}("QED" => 1))
GC_102 = Coupling(name = "GC_102", value = "(ee*complex(0,1)*conj(CKM1x3))/(sw*sqrt(2))", order = Dict{String, Int}("QED" => 1))
GC_103 = Coupling(name = "GC_103", value = "(ee*complex(0,1)*conj(CKM2x1))/(sw*sqrt(2))", order = Dict{String, Int}("QED" => 1))
GC_104 = Coupling(name = "GC_104", value = "(ee*complex(0,1)*conj(CKM2x2))/(sw*sqrt(2))", order = Dict{String, Int}("QED" => 1))
GC_105 = Coupling(name = "GC_105", value = "(ee*complex(0,1)*conj(CKM2x3))/(sw*sqrt(2))", order = Dict{String, Int}("QED" => 1))
GC_106 = Coupling(name = "GC_106", value = "(ee*complex(0,1)*conj(CKM3x1))/(sw*sqrt(2))", order = Dict{String, Int}("QED" => 1))
GC_107 = Coupling(name = "GC_107", value = "(ee*complex(0,1)*conj(CKM3x2))/(sw*sqrt(2))", order = Dict{String, Int}("QED" => 1))
GC_108 = Coupling(name = "GC_108", value = "(ee*complex(0,1)*conj(CKM3x3))/(sw*sqrt(2))", order = Dict{String, Int}("QED" => 1))
all_couplings = (
GC_1 = GC_1,
GC_2 = GC_2,
GC_3 = GC_3,
GC_4 = GC_4,
GC_5 = GC_5,
GC_6 = GC_6,
GC_7 = GC_7,
GC_8 = GC_8,
GC_9 = GC_9,
GC_10 = GC_10,
GC_11 = GC_11,
GC_12 = GC_12,
GC_13 = GC_13,
GC_14 = GC_14,
GC_15 = GC_15,
GC_16 = GC_16,
GC_17 = GC_17,
GC_18 = GC_18,
GC_19 = GC_19,
GC_20 = GC_20,
GC_21 = GC_21,
GC_22 = GC_22,
GC_23 = GC_23,
GC_24 = GC_24,
GC_25 = GC_25,
GC_26 = GC_26,
GC_27 = GC_27,
GC_28 = GC_28,
GC_29 = GC_29,
GC_30 = GC_30,
GC_31 = GC_31,
GC_32 = GC_32,
GC_33 = GC_33,
GC_34 = GC_34,
GC_35 = GC_35,
GC_36 = GC_36,
GC_37 = GC_37,
GC_38 = GC_38,
GC_39 = GC_39,
GC_40 = GC_40,
GC_41 = GC_41,
GC_42 = GC_42,
GC_43 = GC_43,
GC_44 = GC_44,
GC_45 = GC_45,
GC_46 = GC_46,
GC_47 = GC_47,
GC_48 = GC_48,
GC_49 = GC_49,
GC_50 = GC_50,
GC_51 = GC_51,
GC_52 = GC_52,
GC_53 = GC_53,
GC_54 = GC_54,
GC_55 = GC_55,
GC_56 = GC_56,
GC_57 = GC_57,
GC_58 = GC_58,
GC_59 = GC_59,
GC_60 = GC_60,
GC_61 = GC_61,
GC_62 = GC_62,
GC_63 = GC_63,
GC_64 = GC_64,
GC_65 = GC_65,
GC_66 = GC_66,
GC_67 = GC_67,
GC_68 = GC_68,
GC_69 = GC_69,
GC_70 = GC_70,
GC_71 = GC_71,
GC_72 = GC_72,
GC_73 = GC_73,
GC_74 = GC_74,
GC_75 = GC_75,
GC_76 = GC_76,
GC_77 = GC_77,
GC_78 = GC_78,
GC_79 = GC_79,
GC_80 = GC_80,
GC_81 = GC_81,
GC_82 = GC_82,
GC_83 = GC_83,
GC_84 = GC_84,
GC_85 = GC_85,
GC_86 = GC_86,
GC_87 = GC_87,
GC_88 = GC_88,
GC_89 = GC_89,
GC_90 = GC_90,
GC_91 = GC_91,
GC_92 = GC_92,
GC_93 = GC_93,
GC_94 = GC_94,
GC_95 = GC_95,
GC_96 = GC_96,
GC_97 = GC_97,
GC_98 = GC_98,
GC_99 = GC_99,
GC_100 = GC_100,
GC_101 = GC_101,
GC_102 = GC_102,
GC_103 = GC_103,
GC_104 = GC_104,
GC_105 = GC_105,
GC_106 = GC_106,
GC_107 = GC_107,
GC_108 = GC_108
)
end # Couplings | UniversalFeynRulesOutput | https://github.com/Fenyutanchan/UniversalFeynRulesOutput.jl.git |
|
[
"MIT"
] | 0.1.0 | 7e314a58dbc369aba96b4ea0991145ee0d3b671e | code | 11344 | module Decays
using ..Objects
export all_decays
import ..Particles
Decay_H = Decay(name = "Decay_H", particle = Particles.H, partial_widths = Dict{Tuple, String}((Particles.W__minus__, Particles.W__plus__) => "(((3*ee^4*vev^2)/(4. *sw^4) + (ee^4*MH^4*vev^2)/(16. *MW^4*sw^4) - (ee^4*MH^2*vev^2)/(4. *MW^2*sw^4))*sqrt(MH^4 - 4*MH^2*MW^2))/(16. *pi*abs(MH)^3)", (Particles.Z, Particles.Z) => "(((9*ee^4*vev^2)/2. + (3*ee^4*MH^4*vev^2)/(8. *MZ^4) - (3*ee^4*MH^2*vev^2)/(2. *MZ^2) + (3*cw^4*ee^4*vev^2)/(4. *sw^4) + (cw^4*ee^4*MH^4*vev^2)/(16. *MZ^4*sw^4) - (cw^4*ee^4*MH^2*vev^2)/(4. *MZ^2*sw^4) + (3*cw^2*ee^4*vev^2)/sw^2 + (cw^2*ee^4*MH^4*vev^2)/(4. *MZ^4*sw^2) - (cw^2*ee^4*MH^2*vev^2)/(MZ^2*sw^2) + (3*ee^4*sw^2*vev^2)/cw^2 + (ee^4*MH^4*sw^2*vev^2)/(4. *cw^2*MZ^4) - (ee^4*MH^2*sw^2*vev^2)/(cw^2*MZ^2) + (3*ee^4*sw^4*vev^2)/(4. *cw^4) + (ee^4*MH^4*sw^4*vev^2)/(16. *cw^4*MZ^4) - (ee^4*MH^2*sw^4*vev^2)/(4. *cw^4*MZ^2))*sqrt(MH^4 - 4*MH^2*MZ^2))/(32. *pi*abs(MH)^3)", (Particles.b, Particles.b__tilde__) => "((-12*MB^2*yb^2 + 3*MH^2*yb^2)*sqrt(-4*MB^2*MH^2 + MH^4))/(16. *pi*abs(MH)^3)", (Particles.e__minus__, Particles.e__plus__) => "((-4*Me^2*ye^2 + MH^2*ye^2)*sqrt(-4*Me^2*MH^2 + MH^4))/(16. *pi*abs(MH)^3)", (Particles.mu__minus__, Particles.mu__plus__) => "((MH^2*ym^2 - 4*MM^2*ym^2)*sqrt(MH^4 - 4*MH^2*MM^2))/(16. *pi*abs(MH)^3)", (Particles.ta__minus__, Particles.ta__plus__) => "((MH^2*ytau^2 - 4*MTA^2*ytau^2)*sqrt(MH^4 - 4*MH^2*MTA^2))/(16. *pi*abs(MH)^3)", (Particles.c, Particles.c__tilde__) => "((-12*MC^2*yc^2 + 3*MH^2*yc^2)*sqrt(-4*MC^2*MH^2 + MH^4))/(16. *pi*abs(MH)^3)", (Particles.t, Particles.t__tilde__) => "((3*MH^2*yt^2 - 12*MT^2*yt^2)*sqrt(MH^4 - 4*MH^2*MT^2))/(16. *pi*abs(MH)^3)"))
Decay_Z = Decay(name = "Decay_Z", particle = Particles.Z, partial_widths = Dict{Tuple, String}((Particles.W__minus__, Particles.W__plus__) => "(((-12*cw^2*ee^2*MW^2)/sw^2 - (17*cw^2*ee^2*MZ^2)/sw^2 + (4*cw^2*ee^2*MZ^4)/(MW^2*sw^2) + (cw^2*ee^2*MZ^6)/(4. *MW^4*sw^2))*sqrt(-4*MW^2*MZ^2 + MZ^4))/(48. *pi*abs(MZ)^3)", (Particles.d, Particles.d__tilde__) => "(MZ^2*(ee^2*MZ^2 + (3*cw^2*ee^2*MZ^2)/(2. *sw^2) + (5*ee^2*MZ^2*sw^2)/(6. *cw^2)))/(48. *pi*abs(MZ)^3)", (Particles.s, Particles.s__tilde__) => "(MZ^2*(ee^2*MZ^2 + (3*cw^2*ee^2*MZ^2)/(2. *sw^2) + (5*ee^2*MZ^2*sw^2)/(6. *cw^2)))/(48. *pi*abs(MZ)^3)", (Particles.b, Particles.b__tilde__) => "((-7*ee^2*MB^2 + ee^2*MZ^2 - (3*cw^2*ee^2*MB^2)/(2. *sw^2) + (3*cw^2*ee^2*MZ^2)/(2. *sw^2) - (17*ee^2*MB^2*sw^2)/(6. *cw^2) + (5*ee^2*MZ^2*sw^2)/(6. *cw^2))*sqrt(-4*MB^2*MZ^2 + MZ^4))/(48. *pi*abs(MZ)^3)", (Particles.e__minus__, Particles.e__plus__) => "((-5*ee^2*Me^2 - ee^2*MZ^2 - (cw^2*ee^2*Me^2)/(2. *sw^2) + (cw^2*ee^2*MZ^2)/(2. *sw^2) + (7*ee^2*Me^2*sw^2)/(2. *cw^2) + (5*ee^2*MZ^2*sw^2)/(2. *cw^2))*sqrt(-4*Me^2*MZ^2 + MZ^4))/(48. *pi*abs(MZ)^3)", (Particles.mu__minus__, Particles.mu__plus__) => "((-5*ee^2*MM^2 - ee^2*MZ^2 - (cw^2*ee^2*MM^2)/(2. *sw^2) + (cw^2*ee^2*MZ^2)/(2. *sw^2) + (7*ee^2*MM^2*sw^2)/(2. *cw^2) + (5*ee^2*MZ^2*sw^2)/(2. *cw^2))*sqrt(-4*MM^2*MZ^2 + MZ^4))/(48. *pi*abs(MZ)^3)", (Particles.ta__minus__, Particles.ta__plus__) => "((-5*ee^2*MTA^2 - ee^2*MZ^2 - (cw^2*ee^2*MTA^2)/(2. *sw^2) + (cw^2*ee^2*MZ^2)/(2. *sw^2) + (7*ee^2*MTA^2*sw^2)/(2. *cw^2) + (5*ee^2*MZ^2*sw^2)/(2. *cw^2))*sqrt(-4*MTA^2*MZ^2 + MZ^4))/(48. *pi*abs(MZ)^3)", (Particles.u, Particles.u__tilde__) => "(MZ^2*(-(ee^2*MZ^2) + (3*cw^2*ee^2*MZ^2)/(2. *sw^2) + (17*ee^2*MZ^2*sw^2)/(6. *cw^2)))/(48. *pi*abs(MZ)^3)", (Particles.c, Particles.c__tilde__) => "((-11*ee^2*MC^2 - ee^2*MZ^2 - (3*cw^2*ee^2*MC^2)/(2. *sw^2) + (3*cw^2*ee^2*MZ^2)/(2. *sw^2) + (7*ee^2*MC^2*sw^2)/(6. *cw^2) + (17*ee^2*MZ^2*sw^2)/(6. *cw^2))*sqrt(-4*MC^2*MZ^2 + MZ^4))/(48. *pi*abs(MZ)^3)", (Particles.t, Particles.t__tilde__) => "((-11*ee^2*MT^2 - ee^2*MZ^2 - (3*cw^2*ee^2*MT^2)/(2. *sw^2) + (3*cw^2*ee^2*MZ^2)/(2. *sw^2) + (7*ee^2*MT^2*sw^2)/(6. *cw^2) + (17*ee^2*MZ^2*sw^2)/(6. *cw^2))*sqrt(-4*MT^2*MZ^2 + MZ^4))/(48. *pi*abs(MZ)^3)", (Particles.ve, Particles.ve__tilde__) => "(MZ^2*(ee^2*MZ^2 + (cw^2*ee^2*MZ^2)/(2. *sw^2) + (ee^2*MZ^2*sw^2)/(2. *cw^2)))/(48. *pi*abs(MZ)^3)", (Particles.vm, Particles.vm__tilde__) => "(MZ^2*(ee^2*MZ^2 + (cw^2*ee^2*MZ^2)/(2. *sw^2) + (ee^2*MZ^2*sw^2)/(2. *cw^2)))/(48. *pi*abs(MZ)^3)", (Particles.vt, Particles.vt__tilde__) => "(MZ^2*(ee^2*MZ^2 + (cw^2*ee^2*MZ^2)/(2. *sw^2) + (ee^2*MZ^2*sw^2)/(2. *cw^2)))/(48. *pi*abs(MZ)^3)"))
Decay_c = Decay(name = "Decay_c", particle = Particles.c, partial_widths = Dict{Tuple, String}((Particles.W__plus__, Particles.d) => "((MC^2 - MW^2)*((3*CKM2x1*ee^2*MC^2*conj(CKM2x1))/(2. *sw^2) + (3*CKM2x1*ee^2*MC^4*conj(CKM2x1))/(2. *MW^2*sw^2) - (3*CKM2x1*ee^2*MW^2*conj(CKM2x1))/sw^2))/(96. *pi*abs(MC)^3)", (Particles.W__plus__, Particles.s) => "((MC^2 - MW^2)*((3*CKM2x2*ee^2*MC^2*conj(CKM2x2))/(2. *sw^2) + (3*CKM2x2*ee^2*MC^4*conj(CKM2x2))/(2. *MW^2*sw^2) - (3*CKM2x2*ee^2*MW^2*conj(CKM2x2))/sw^2))/(96. *pi*abs(MC)^3)", (Particles.W__plus__, Particles.b) => "(((3*CKM2x3*ee^2*MB^2*conj(CKM2x3))/(2. *sw^2) + (3*CKM2x3*ee^2*MC^2*conj(CKM2x3))/(2. *sw^2) + (3*CKM2x3*ee^2*MB^4*conj(CKM2x3))/(2. *MW^2*sw^2) - (3*CKM2x3*ee^2*MB^2*MC^2*conj(CKM2x3))/(MW^2*sw^2) + (3*CKM2x3*ee^2*MC^4*conj(CKM2x3))/(2. *MW^2*sw^2) - (3*CKM2x3*ee^2*MW^2*conj(CKM2x3))/sw^2)*sqrt(MB^4 - 2*MB^2*MC^2 + MC^4 - 2*MB^2*MW^2 - 2*MC^2*MW^2 + MW^4))/(96. *pi*abs(MC)^3)"))
Decay_t = Decay(name = "Decay_t", particle = Particles.t, partial_widths = Dict{Tuple, String}((Particles.W__plus__, Particles.d) => "((MT^2 - MW^2)*((3*CKM3x1*ee^2*MT^2*conj(CKM3x1))/(2. *sw^2) + (3*CKM3x1*ee^2*MT^4*conj(CKM3x1))/(2. *MW^2*sw^2) - (3*CKM3x1*ee^2*MW^2*conj(CKM3x1))/sw^2))/(96. *pi*abs(MT)^3)", (Particles.W__plus__, Particles.s) => "((MT^2 - MW^2)*((3*CKM3x2*ee^2*MT^2*conj(CKM3x2))/(2. *sw^2) + (3*CKM3x2*ee^2*MT^4*conj(CKM3x2))/(2. *MW^2*sw^2) - (3*CKM3x2*ee^2*MW^2*conj(CKM3x2))/sw^2))/(96. *pi*abs(MT)^3)", (Particles.W__plus__, Particles.b) => "(((3*CKM3x3*ee^2*MB^2*conj(CKM3x3))/(2. *sw^2) + (3*CKM3x3*ee^2*MT^2*conj(CKM3x3))/(2. *sw^2) + (3*CKM3x3*ee^2*MB^4*conj(CKM3x3))/(2. *MW^2*sw^2) - (3*CKM3x3*ee^2*MB^2*MT^2*conj(CKM3x3))/(MW^2*sw^2) + (3*CKM3x3*ee^2*MT^4*conj(CKM3x3))/(2. *MW^2*sw^2) - (3*CKM3x3*ee^2*MW^2*conj(CKM3x3))/sw^2)*sqrt(MB^4 - 2*MB^2*MT^2 + MT^4 - 2*MB^2*MW^2 - 2*MT^2*MW^2 + MW^4))/(96. *pi*abs(MT)^3)"))
Decay_W__plus__ = Decay(name = "Decay_W__plus__", particle = Particles.W__plus__, partial_widths = Dict{Tuple, String}((Particles.u, Particles.d__tilde__) => "(CKM1x1*ee^2*MW^4*conj(CKM1x1))/(16. *pi*sw^2*abs(MW)^3)", (Particles.u, Particles.s__tilde__) => "(CKM1x2*ee^2*MW^4*conj(CKM1x2))/(16. *pi*sw^2*abs(MW)^3)", (Particles.u, Particles.b__tilde__) => "((-MB^2 + MW^2)*((-3*CKM1x3*ee^2*MB^2*conj(CKM1x3))/(2. *sw^2) - (3*CKM1x3*ee^2*MB^4*conj(CKM1x3))/(2. *MW^2*sw^2) + (3*CKM1x3*ee^2*MW^2*conj(CKM1x3))/sw^2))/(48. *pi*abs(MW)^3)", (Particles.c, Particles.d__tilde__) => "((-MC^2 + MW^2)*((-3*CKM2x1*ee^2*MC^2*conj(CKM2x1))/(2. *sw^2) - (3*CKM2x1*ee^2*MC^4*conj(CKM2x1))/(2. *MW^2*sw^2) + (3*CKM2x1*ee^2*MW^2*conj(CKM2x1))/sw^2))/(48. *pi*abs(MW)^3)", (Particles.c, Particles.s__tilde__) => "((-MC^2 + MW^2)*((-3*CKM2x2*ee^2*MC^2*conj(CKM2x2))/(2. *sw^2) - (3*CKM2x2*ee^2*MC^4*conj(CKM2x2))/(2. *MW^2*sw^2) + (3*CKM2x2*ee^2*MW^2*conj(CKM2x2))/sw^2))/(48. *pi*abs(MW)^3)", (Particles.c, Particles.b__tilde__) => "(((-3*CKM2x3*ee^2*MB^2*conj(CKM2x3))/(2. *sw^2) - (3*CKM2x3*ee^2*MC^2*conj(CKM2x3))/(2. *sw^2) - (3*CKM2x3*ee^2*MB^4*conj(CKM2x3))/(2. *MW^2*sw^2) + (3*CKM2x3*ee^2*MB^2*MC^2*conj(CKM2x3))/(MW^2*sw^2) - (3*CKM2x3*ee^2*MC^4*conj(CKM2x3))/(2. *MW^2*sw^2) + (3*CKM2x3*ee^2*MW^2*conj(CKM2x3))/sw^2)*sqrt(MB^4 - 2*MB^2*MC^2 + MC^4 - 2*MB^2*MW^2 - 2*MC^2*MW^2 + MW^4))/(48. *pi*abs(MW)^3)", (Particles.t, Particles.d__tilde__) => "((-MT^2 + MW^2)*((-3*CKM3x1*ee^2*MT^2*conj(CKM3x1))/(2. *sw^2) - (3*CKM3x1*ee^2*MT^4*conj(CKM3x1))/(2. *MW^2*sw^2) + (3*CKM3x1*ee^2*MW^2*conj(CKM3x1))/sw^2))/(48. *pi*abs(MW)^3)", (Particles.t, Particles.s__tilde__) => "((-MT^2 + MW^2)*((-3*CKM3x2*ee^2*MT^2*conj(CKM3x2))/(2. *sw^2) - (3*CKM3x2*ee^2*MT^4*conj(CKM3x2))/(2. *MW^2*sw^2) + (3*CKM3x2*ee^2*MW^2*conj(CKM3x2))/sw^2))/(48. *pi*abs(MW)^3)", (Particles.t, Particles.b__tilde__) => "(((-3*CKM3x3*ee^2*MB^2*conj(CKM3x3))/(2. *sw^2) - (3*CKM3x3*ee^2*MT^2*conj(CKM3x3))/(2. *sw^2) - (3*CKM3x3*ee^2*MB^4*conj(CKM3x3))/(2. *MW^2*sw^2) + (3*CKM3x3*ee^2*MB^2*MT^2*conj(CKM3x3))/(MW^2*sw^2) - (3*CKM3x3*ee^2*MT^4*conj(CKM3x3))/(2. *MW^2*sw^2) + (3*CKM3x3*ee^2*MW^2*conj(CKM3x3))/sw^2)*sqrt(MB^4 - 2*MB^2*MT^2 + MT^4 - 2*MB^2*MW^2 - 2*MT^2*MW^2 + MW^4))/(48. *pi*abs(MW)^3)", (Particles.ve, Particles.e__plus__) => "((-Me^2 + MW^2)*(-(ee^2*Me^2)/(2. *sw^2) - (ee^2*Me^4)/(2. *MW^2*sw^2) + (ee^2*MW^2)/sw^2))/(48. *pi*abs(MW)^3)", (Particles.vm, Particles.mu__plus__) => "((-MM^2 + MW^2)*(-(ee^2*MM^2)/(2. *sw^2) - (ee^2*MM^4)/(2. *MW^2*sw^2) + (ee^2*MW^2)/sw^2))/(48. *pi*abs(MW)^3)", (Particles.vt, Particles.ta__plus__) => "((-MTA^2 + MW^2)*(-(ee^2*MTA^2)/(2. *sw^2) - (ee^2*MTA^4)/(2. *MW^2*sw^2) + (ee^2*MW^2)/sw^2))/(48. *pi*abs(MW)^3)"))
Decay_b = Decay(name = "Decay_b", particle = Particles.b, partial_widths = Dict{Tuple, String}((Particles.W__minus__, Particles.u) => "((MB^2 - MW^2)*((3*CKM1x3*ee^2*MB^2*conj(CKM1x3))/(2. *sw^2) + (3*CKM1x3*ee^2*MB^4*conj(CKM1x3))/(2. *MW^2*sw^2) - (3*CKM1x3*ee^2*MW^2*conj(CKM1x3))/sw^2))/(96. *pi*abs(MB)^3)", (Particles.W__minus__, Particles.c) => "(((3*CKM2x3*ee^2*MB^2*conj(CKM2x3))/(2. *sw^2) + (3*CKM2x3*ee^2*MC^2*conj(CKM2x3))/(2. *sw^2) + (3*CKM2x3*ee^2*MB^4*conj(CKM2x3))/(2. *MW^2*sw^2) - (3*CKM2x3*ee^2*MB^2*MC^2*conj(CKM2x3))/(MW^2*sw^2) + (3*CKM2x3*ee^2*MC^4*conj(CKM2x3))/(2. *MW^2*sw^2) - (3*CKM2x3*ee^2*MW^2*conj(CKM2x3))/sw^2)*sqrt(MB^4 - 2*MB^2*MC^2 + MC^4 - 2*MB^2*MW^2 - 2*MC^2*MW^2 + MW^4))/(96. *pi*abs(MB)^3)", (Particles.W__minus__, Particles.t) => "(((3*CKM3x3*ee^2*MB^2*conj(CKM3x3))/(2. *sw^2) + (3*CKM3x3*ee^2*MT^2*conj(CKM3x3))/(2. *sw^2) + (3*CKM3x3*ee^2*MB^4*conj(CKM3x3))/(2. *MW^2*sw^2) - (3*CKM3x3*ee^2*MB^2*MT^2*conj(CKM3x3))/(MW^2*sw^2) + (3*CKM3x3*ee^2*MT^4*conj(CKM3x3))/(2. *MW^2*sw^2) - (3*CKM3x3*ee^2*MW^2*conj(CKM3x3))/sw^2)*sqrt(MB^4 - 2*MB^2*MT^2 + MT^4 - 2*MB^2*MW^2 - 2*MT^2*MW^2 + MW^4))/(96. *pi*abs(MB)^3)"))
Decay_e__minus__ = Decay(name = "Decay_e__minus__", particle = Particles.e__minus__, partial_widths = Dict{Tuple, String}((Particles.W__minus__, Particles.ve) => "((Me^2 - MW^2)*((ee^2*Me^2)/(2. *sw^2) + (ee^2*Me^4)/(2. *MW^2*sw^2) - (ee^2*MW^2)/sw^2))/(32. *pi*abs(Me)^3)"))
Decay_mu__minus__ = Decay(name = "Decay_mu__minus__", particle = Particles.mu__minus__, partial_widths = Dict{Tuple, String}((Particles.W__minus__, Particles.vm) => "((MM^2 - MW^2)*((ee^2*MM^2)/(2. *sw^2) + (ee^2*MM^4)/(2. *MW^2*sw^2) - (ee^2*MW^2)/sw^2))/(32. *pi*abs(MM)^3)"))
Decay_ta__minus__ = Decay(name = "Decay_ta__minus__", particle = Particles.ta__minus__, partial_widths = Dict{Tuple, String}((Particles.W__minus__, Particles.vt) => "((MTA^2 - MW^2)*((ee^2*MTA^2)/(2. *sw^2) + (ee^2*MTA^4)/(2. *MW^2*sw^2) - (ee^2*MW^2)/sw^2))/(32. *pi*abs(MTA)^3)"))
all_decays = (
Decay_H = Decay_H,
Decay_Z = Decay_Z,
Decay_c = Decay_c,
Decay_t = Decay_t,
Decay_W__plus__ = Decay_W__plus__,
Decay_b = Decay_b,
Decay_e__minus__ = Decay_e__minus__,
Decay_mu__minus__ = Decay_mu__minus__,
Decay_ta__minus__ = Decay_ta__minus__
)
end # Decays | UniversalFeynRulesOutput | https://github.com/Fenyutanchan/UniversalFeynRulesOutput.jl.git |
|
[
"MIT"
] | 0.1.0 | 7e314a58dbc369aba96b4ea0991145ee0d3b671e | code | 114 | module FormFactors
using ..Objects
export all_form_factors
all_form_factors = (
)
end # FormFactors | UniversalFeynRulesOutput | https://github.com/Fenyutanchan/UniversalFeynRulesOutput.jl.git |
|
[
"MIT"
] | 0.1.0 | 7e314a58dbc369aba96b4ea0991145ee0d3b671e | code | 2739 | module LorentzIndices
using ..Objects
export all_lorentz
import ..FormFactors
UUS1 = Lorentz(name = "UUS1", spins = [-1, -1, 1], structure = "1")
UUV1 = Lorentz(name = "UUV1", spins = [-1, -1, 3], structure = "P(3,2) + P(3,3)")
SSS1 = Lorentz(name = "SSS1", spins = [1, 1, 1], structure = "1")
FFS1 = Lorentz(name = "FFS1", spins = [2, 2, 1], structure = "ProjM(2,1)")
FFS2 = Lorentz(name = "FFS2", spins = [2, 2, 1], structure = "ProjM(2,1) - ProjP(2,1)")
FFS3 = Lorentz(name = "FFS3", spins = [2, 2, 1], structure = "ProjP(2,1)")
FFS4 = Lorentz(name = "FFS4", spins = [2, 2, 1], structure = "ProjM(2,1) + ProjP(2,1)")
FFV1 = Lorentz(name = "FFV1", spins = [2, 2, 3], structure = "Gamma(3,2,1)")
FFV2 = Lorentz(name = "FFV2", spins = [2, 2, 3], structure = "Gamma(3,2,-1)*ProjM(-1,1)")
FFV3 = Lorentz(name = "FFV3", spins = [2, 2, 3], structure = "Gamma(3,2,-1)*ProjM(-1,1) - 2*Gamma(3,2,-1)*ProjP(-1,1)")
FFV4 = Lorentz(name = "FFV4", spins = [2, 2, 3], structure = "Gamma(3,2,-1)*ProjM(-1,1) + 2*Gamma(3,2,-1)*ProjP(-1,1)")
FFV5 = Lorentz(name = "FFV5", spins = [2, 2, 3], structure = "Gamma(3,2,-1)*ProjM(-1,1) + 4*Gamma(3,2,-1)*ProjP(-1,1)")
VSS1 = Lorentz(name = "VSS1", spins = [3, 1, 1], structure = "P(1,2) - P(1,3)")
VVS1 = Lorentz(name = "VVS1", spins = [3, 3, 1], structure = "Metric(1,2)")
VVV1 = Lorentz(name = "VVV1", spins = [3, 3, 3], structure = "P(3,1)*Metric(1,2) - P(3,2)*Metric(1,2) - P(2,1)*Metric(1,3) + P(2,3)*Metric(1,3) + P(1,2)*Metric(2,3) - P(1,3)*Metric(2,3)")
SSSS1 = Lorentz(name = "SSSS1", spins = [1, 1, 1, 1], structure = "1")
VVSS1 = Lorentz(name = "VVSS1", spins = [3, 3, 1, 1], structure = "Metric(1,2)")
VVVV1 = Lorentz(name = "VVVV1", spins = [3, 3, 3, 3], structure = "Metric(1,4)*Metric(2,3) - Metric(1,3)*Metric(2,4)")
VVVV2 = Lorentz(name = "VVVV2", spins = [3, 3, 3, 3], structure = "Metric(1,4)*Metric(2,3) + Metric(1,3)*Metric(2,4) - 2*Metric(1,2)*Metric(3,4)")
VVVV3 = Lorentz(name = "VVVV3", spins = [3, 3, 3, 3], structure = "Metric(1,4)*Metric(2,3) - Metric(1,2)*Metric(3,4)")
VVVV4 = Lorentz(name = "VVVV4", spins = [3, 3, 3, 3], structure = "Metric(1,3)*Metric(2,4) - Metric(1,2)*Metric(3,4)")
VVVV5 = Lorentz(name = "VVVV5", spins = [3, 3, 3, 3], structure = "Metric(1,4)*Metric(2,3) - (Metric(1,3)*Metric(2,4))/2. - (Metric(1,2)*Metric(3,4))/2.")
all_lorentz = (
UUS1 = UUS1,
UUV1 = UUV1,
SSS1 = SSS1,
FFS1 = FFS1,
FFS2 = FFS2,
FFS3 = FFS3,
FFS4 = FFS4,
FFV1 = FFV1,
FFV2 = FFV2,
FFV3 = FFV3,
FFV4 = FFV4,
FFV5 = FFV5,
VSS1 = VSS1,
VVS1 = VVS1,
VVV1 = VVV1,
SSSS1 = SSSS1,
VVSS1 = VVSS1,
VVVV1 = VVVV1,
VVVV2 = VVVV2,
VVVV3 = VVVV3,
VVVV4 = VVVV4,
VVVV5 = VVVV5
)
end # LorentzIndices | UniversalFeynRulesOutput | https://github.com/Fenyutanchan/UniversalFeynRulesOutput.jl.git |
|
[
"MIT"
] | 0.1.0 | 7e314a58dbc369aba96b4ea0991145ee0d3b671e | code | 7689 | module Objects
export Parameter
export Particle
export Coupling
export Lorentz
export Vertex
export CouplingOrder
export Decay
export FormFactor
export anti
export is_goldstone_boson
export is_self_conjugate
struct Parameter{T<:Number}
name::String
nature::String
value::Union{T, Expr, Symbol}
tex_name::String
lhablock::Union{String, Missing}
lhacode::Union{Integer, Missing}
function Parameter(; kwargs...)
if kwargs[:nature] == "external" && (
!haskey(kwargs, :lhablock) || !haskey(kwargs, :lhacode)
)
error("Need LHA information for external parameter $(kwargs.name).")
end
lhablock = haskey(kwargs, :lhablock) ? kwargs[:lhablock] : missing
lhacode = if haskey(kwargs, :lhacode)
@assert length(kwargs[:lhacode]) == 1
first(kwargs[:lhacode])
else
missing
end
value = if isa(kwargs[:value], String)
tmp = Meta.parse(kwargs[:value])
if isa(tmp, Real) && kwargs[:type] == "complex"
complex(tmp)
else
tmp
end
else
@assert isa(kwargs[:value], Number)
if isa(kwargs[:value], Real) && kwargs[:type] == "complex"
complex(kwargs[:value])
else
kwargs[:value]
end
end
if kwargs[:type] == "real"
return new{Real}(
kwargs[:name], kwargs[:nature], value,
kwargs[:texname], lhablock, lhacode
)
elseif kwargs[:type] == "complex"
# if isa(value, Real)
# return new{Complex}(
# kwargs[:name], kwargs[:nature], complex(value),
# kwargs[:texname], lhablock, lhacode
# )
# end
return new{Complex}(
kwargs[:name], kwargs[:nature], value,
kwargs[:texname], lhablock, lhacode
)
else
error("Type $(kwargs.type) is not supported.")
end
end
end
struct Particle
pdg_code::Int
name::String
anti_name::String
spin::Int
color::Int
mass::Union{Real, Parameter{Real}, Symbol, Expr}
width::Union{Real, Parameter{Real}, Symbol, Expr}
tex_name::String
anti_tex_name::String
charge::Union{Integer, Rational}
optional_properties::Dict{Symbol, Any}
Particle(
pdg_code::Int,
name::String,
anti_name::String,
spin::Int,
color::Int,
mass::Union{Real, Parameter{Real}, Symbol},
width::Union{Real, Parameter{Real}, Symbol},
tex_name::String,
anti_tex_name::String,
charge::Real,
optional_properties::Dict{Symbol, Any}
) = new(
pdg_code, name, anti_name,
spin, color, mass, width,
tex_name, anti_tex_name,
isa(charge, AbstractFloat) ? rationalize(charge) : charge,
optional_properties
)
function Particle(; kwargs...)
required_args = [
:pdg_code, :name, :antiname,
:spin, :color, :mass, :width,
:texname, :antitexname, :charge
]
optional_properties = Dict{Symbol, Any}(
:propagating => true,
:GoldstoneBoson => false,
:propagator => nothing
)
for key ∈ setdiff(keys(kwargs), required_args)
optional_properties[key] = kwargs[key]
end
charge = isa(kwargs[:charge], Integer) ? kwargs[:charge] : rationalize(kwargs[:charge])
optional_properties[:line] = find_line_type(
kwargs[:spin], kwargs[:color];
self_conjugate_flag=(kwargs[:name]==kwargs[:antiname])
)
new(
kwargs[:pdg_code],
kwargs[:name],
kwargs[:antiname],
kwargs[:spin],
kwargs[:color],
kwargs[:mass],
kwargs[:width],
kwargs[:texname],
kwargs[:antitexname],
charge,
optional_properties
)
end
end
struct Coupling
name::String
value::Union{Expr, Symbol}
order::Dict{String, Int}
function Coupling(; kwargs...)
value = if isa(kwargs[:value], String)
value_str = replace(
kwargs[:value],
"**" => "^",
"cmath." => "",
"complexconjugate" => "conj",
".*" => ". *"
)
Meta.parse(value_str)
else
@assert isa(kwargs[:value], Number)
kwargs[:value]
end
return new(kwargs[:name], value, kwargs[:order])
end
end
struct Lorentz
name::String
spins::Vector{Integer}
structure::String
Lorentz(; structure="exteranl", kwargs...) = new(kwargs[:name], kwargs[:spins], structure)
end
struct Vertex
name::String
particles::Vector{Particle}
color::Vector{String}
lorentz::Vector{Lorentz}
couplings::Dict{Tuple, Coupling}
Vertex(; kwargs...) = new(kwargs[:name], kwargs[:particles], kwargs[:color], kwargs[:lorentz], kwargs[:couplings])
end
struct CouplingOrder
name::String
expansion_order::Integer
hierarchy::Integer
perturbative_expansion::Integer
CouplingOrder(;perturbation_expansion::Integer=0, kwargs...) = new(
kwargs[:name],
kwargs[:expansion_order],
kwargs[:hierarchy],
perturbation_expansion
)
end
struct Decay
name::String
particle::Particle
particle_widths::Dict{Tuple, String}
Decay(; kwargs...) = new(kwargs[:name], kwargs[:particle], kwargs[:partial_widths])
end
struct FormFactor
name::String
type
value
end
function anti(p::Particle)::Particle
if is_self_conjugate(p)
return p
end
fixed_properties = [:line, :propagating, :GoldstoneBoson, :propagator]
anti_properties = Dict{Symbol, Any}()
for key ∈ fixed_properties
anti_properties[key] = p.optional_properties[key]
end
to_be_flipped_property_names = setdiff(
keys(p.optional_properties),
fixed_properties
)
for property_name ∈ to_be_flipped_property_names
anti_properties[property_name] = - p.optional_properties[property_name]
end
new_color = (p.color ∈ [1, 8]) ? p.color : -p.color
return Particle(
-p.pdg_code,
p.anti_name,
p.name,
p.spin,
new_color,
p.mass,
p.width,
p.anti_tex_name,
p.tex_name,
-p.charge,
anti_properties
)
end
function find_line_type(spin::Integer, color::Integer; self_conjugate_flag::Bool=false)::String
if spin == 1
return "dashed"
elseif spin == 2
if !self_conjugate_flag
return "straight"
elseif color == 1
return "swavy"
else
return "scurly"
end
elseif spin == 3
if color == 1
return "wavy"
else
return "curly"
end
elseif spin == 5
return "double"
elseif spin == -1
return "dotted"
else
return "dashed" # not supported
end
end
is_goldstone_boson(p::Particle) = p.optional_properties.GoldstoneBoson
is_self_conjugate(p::Particle) = p.name == p.anti_name
Base.zero(::Type{Parameter}) = Parameter(
name = "ZERO",
nature = "internal",
type = "real",
value = "0.0",
texname = "0"
)
end # module Objects
| UniversalFeynRulesOutput | https://github.com/Fenyutanchan/UniversalFeynRulesOutput.jl.git |
|
[
"MIT"
] | 0.1.0 | 7e314a58dbc369aba96b4ea0991145ee0d3b671e | code | 10600 | module Parameters
using ..Objects
export all_parameters
ZERO = Parameter(name = "ZERO", nature = "internal", type = "real", value = "0.0", texname = "0")
aEWM1 = Parameter(name = "aEWM1", nature = "external", type = "real", value = 132.50698, texname = "\\text{aEWM1}", lhablock = "SMINPUTS", lhacode = [1])
Gf = Parameter(name = "Gf", nature = "external", type = "real", value = 1.16639e-5, texname = "G_f", lhablock = "SMINPUTS", lhacode = [2])
aS = Parameter(name = "aS", nature = "external", type = "real", value = 0.118, texname = "\\alpha _s", lhablock = "SMINPUTS", lhacode = [3])
lamWS = Parameter(name = "lamWS", nature = "external", type = "real", value = 0.2253, texname = "\\text{lamWS}", lhablock = "Wolfenstein", lhacode = [1])
AWS = Parameter(name = "AWS", nature = "external", type = "real", value = 0.808, texname = "\\text{AWS}", lhablock = "Wolfenstein", lhacode = [2])
rhoWS = Parameter(name = "rhoWS", nature = "external", type = "real", value = 0.132, texname = "\\text{rhoWS}", lhablock = "Wolfenstein", lhacode = [3])
etaWS = Parameter(name = "etaWS", nature = "external", type = "real", value = 0.341, texname = "\\text{etaWS}", lhablock = "Wolfenstein", lhacode = [4])
ymc = Parameter(name = "ymc", nature = "external", type = "real", value = 1.27, texname = "\\text{ymc}", lhablock = "YUKAWA", lhacode = [4])
ymb = Parameter(name = "ymb", nature = "external", type = "real", value = 4.2, texname = "\\text{ymb}", lhablock = "YUKAWA", lhacode = [5])
ymt = Parameter(name = "ymt", nature = "external", type = "real", value = 164.5, texname = "\\text{ymt}", lhablock = "YUKAWA", lhacode = [6])
yme = Parameter(name = "yme", nature = "external", type = "real", value = 0.000511, texname = "\\text{yme}", lhablock = "YUKAWA", lhacode = [11])
ymm = Parameter(name = "ymm", nature = "external", type = "real", value = 0.10566, texname = "\\text{ymm}", lhablock = "YUKAWA", lhacode = [13])
ymtau = Parameter(name = "ymtau", nature = "external", type = "real", value = 1.777, texname = "\\text{ymtau}", lhablock = "YUKAWA", lhacode = [15])
MZ = Parameter(name = "MZ", nature = "external", type = "real", value = 91.188, texname = "\\text{MZ}", lhablock = "MASS", lhacode = [23])
MC = Parameter(name = "MC", nature = "external", type = "real", value = 1.27, texname = "\\text{MC}", lhablock = "MASS", lhacode = [4])
MT = Parameter(name = "MT", nature = "external", type = "real", value = 172.0, texname = "\\text{MT}", lhablock = "MASS", lhacode = [6])
MB = Parameter(name = "MB", nature = "external", type = "real", value = 4.7, texname = "\\text{MB}", lhablock = "MASS", lhacode = [5])
MH = Parameter(name = "MH", nature = "external", type = "real", value = 125.0, texname = "\\text{MH}", lhablock = "MASS", lhacode = [25])
Me = Parameter(name = "Me", nature = "external", type = "real", value = 0.000511, texname = "\\text{Me}", lhablock = "MASS", lhacode = [11])
MM = Parameter(name = "MM", nature = "external", type = "real", value = 0.10566, texname = "\\text{MM}", lhablock = "MASS", lhacode = [13])
MTA = Parameter(name = "MTA", nature = "external", type = "real", value = 1.777, texname = "\\text{MTA}", lhablock = "MASS", lhacode = [15])
WZ = Parameter(name = "WZ", nature = "external", type = "real", value = 2.44140351, texname = "\\text{WZ}", lhablock = "DECAY", lhacode = [23])
WW = Parameter(name = "WW", nature = "external", type = "real", value = 2.04759951, texname = "\\text{WW}", lhablock = "DECAY", lhacode = [24])
WT = Parameter(name = "WT", nature = "external", type = "real", value = 1.50833649, texname = "\\text{WT}", lhablock = "DECAY", lhacode = [6])
WH = Parameter(name = "WH", nature = "external", type = "real", value = 0.00638233934, texname = "\\text{WH}", lhablock = "DECAY", lhacode = [25])
WTau = Parameter(name = "WTau", nature = "external", type = "real", value = 2.27e-12, texname = "\\text{WTau}", lhablock = "DECAY", lhacode = [15])
CKM1x1 = Parameter(name = "CKM1x1", nature = "internal", type = "complex", value = "1 - lamWS^2/2.", texname = "\\text{CKM1x1}")
CKM1x2 = Parameter(name = "CKM1x2", nature = "internal", type = "complex", value = "lamWS", texname = "\\text{CKM1x2}")
CKM1x3 = Parameter(name = "CKM1x3", nature = "internal", type = "complex", value = "AWS*lamWS^3*(-(etaWS*complex(0,1)) + rhoWS)", texname = "\\text{CKM1x3}")
CKM2x1 = Parameter(name = "CKM2x1", nature = "internal", type = "complex", value = "-lamWS", texname = "\\text{CKM2x1}")
CKM2x2 = Parameter(name = "CKM2x2", nature = "internal", type = "complex", value = "1 - lamWS^2/2.", texname = "\\text{CKM2x2}")
CKM2x3 = Parameter(name = "CKM2x3", nature = "internal", type = "complex", value = "AWS*lamWS^2", texname = "\\text{CKM2x3}")
CKM3x1 = Parameter(name = "CKM3x1", nature = "internal", type = "complex", value = "AWS*lamWS^3*(1 - etaWS*complex(0,1) - rhoWS)", texname = "\\text{CKM3x1}")
CKM3x2 = Parameter(name = "CKM3x2", nature = "internal", type = "complex", value = "-(AWS*lamWS^2)", texname = "\\text{CKM3x2}")
CKM3x3 = Parameter(name = "CKM3x3", nature = "internal", type = "complex", value = "1", texname = "\\text{CKM3x3}")
aEW = Parameter(name = "aEW", nature = "internal", type = "real", value = "1/aEWM1", texname = "\\alpha _{\\text{EW}}")
G = Parameter(name = "G", nature = "internal", type = "real", value = "2*sqrt(aS)*sqrt(pi)", texname = "G")
MW = Parameter(name = "MW", nature = "internal", type = "real", value = "sqrt(MZ^2/2. + sqrt(MZ^4/4. - (aEW*pi*MZ^2)/(Gf*sqrt(2))))", texname = "M_W")
ee = Parameter(name = "ee", nature = "internal", type = "real", value = "2*sqrt(aEW)*sqrt(pi)", texname = "e")
sw2 = Parameter(name = "sw2", nature = "internal", type = "real", value = "1 - MW^2/MZ^2", texname = "\\text{sw2}")
cw = Parameter(name = "cw", nature = "internal", type = "real", value = "sqrt(1 - sw2)", texname = "c_w")
sw = Parameter(name = "sw", nature = "internal", type = "real", value = "sqrt(sw2)", texname = "s_w")
g1 = Parameter(name = "g1", nature = "internal", type = "real", value = "ee/cw", texname = "g_1")
gw = Parameter(name = "gw", nature = "internal", type = "real", value = "ee/sw", texname = "g_w")
vev = Parameter(name = "vev", nature = "internal", type = "real", value = "(2*MW*sw)/ee", texname = "\\text{vev}")
lam = Parameter(name = "lam", nature = "internal", type = "real", value = "MH^2/(2. *vev^2)", texname = "\\text{lam}")
yb = Parameter(name = "yb", nature = "internal", type = "real", value = "(ymb*sqrt(2))/vev", texname = "\\text{yb}")
yc = Parameter(name = "yc", nature = "internal", type = "real", value = "(ymc*sqrt(2))/vev", texname = "\\text{yc}")
ye = Parameter(name = "ye", nature = "internal", type = "real", value = "(yme*sqrt(2))/vev", texname = "\\text{ye}")
ym = Parameter(name = "ym", nature = "internal", type = "real", value = "(ymm*sqrt(2))/vev", texname = "\\text{ym}")
yt = Parameter(name = "yt", nature = "internal", type = "real", value = "(ymt*sqrt(2))/vev", texname = "\\text{yt}")
ytau = Parameter(name = "ytau", nature = "internal", type = "real", value = "(ymtau*sqrt(2))/vev", texname = "\\text{ytau}")
muH = Parameter(name = "muH", nature = "internal", type = "real", value = "sqrt(lam*vev^2)", texname = "\\mu")
I1x31 = Parameter(name = "I1x31", nature = "internal", type = "complex", value = "yb*conj(CKM1x3)", texname = "\\text{I1x31}")
I1x32 = Parameter(name = "I1x32", nature = "internal", type = "complex", value = "yb*conj(CKM2x3)", texname = "\\text{I1x32}")
I1x33 = Parameter(name = "I1x33", nature = "internal", type = "complex", value = "yb*conj(CKM3x3)", texname = "\\text{I1x33}")
I2x12 = Parameter(name = "I2x12", nature = "internal", type = "complex", value = "yc*conj(CKM2x1)", texname = "\\text{I2x12}")
I2x13 = Parameter(name = "I2x13", nature = "internal", type = "complex", value = "yt*conj(CKM3x1)", texname = "\\text{I2x13}")
I2x22 = Parameter(name = "I2x22", nature = "internal", type = "complex", value = "yc*conj(CKM2x2)", texname = "\\text{I2x22}")
I2x23 = Parameter(name = "I2x23", nature = "internal", type = "complex", value = "yt*conj(CKM3x2)", texname = "\\text{I2x23}")
I2x32 = Parameter(name = "I2x32", nature = "internal", type = "complex", value = "yc*conj(CKM2x3)", texname = "\\text{I2x32}")
I2x33 = Parameter(name = "I2x33", nature = "internal", type = "complex", value = "yt*conj(CKM3x3)", texname = "\\text{I2x33}")
I3x21 = Parameter(name = "I3x21", nature = "internal", type = "complex", value = "CKM2x1*yc", texname = "\\text{I3x21}")
I3x22 = Parameter(name = "I3x22", nature = "internal", type = "complex", value = "CKM2x2*yc", texname = "\\text{I3x22}")
I3x23 = Parameter(name = "I3x23", nature = "internal", type = "complex", value = "CKM2x3*yc", texname = "\\text{I3x23}")
I3x31 = Parameter(name = "I3x31", nature = "internal", type = "complex", value = "CKM3x1*yt", texname = "\\text{I3x31}")
I3x32 = Parameter(name = "I3x32", nature = "internal", type = "complex", value = "CKM3x2*yt", texname = "\\text{I3x32}")
I3x33 = Parameter(name = "I3x33", nature = "internal", type = "complex", value = "CKM3x3*yt", texname = "\\text{I3x33}")
I4x13 = Parameter(name = "I4x13", nature = "internal", type = "complex", value = "CKM1x3*yb", texname = "\\text{I4x13}")
I4x23 = Parameter(name = "I4x23", nature = "internal", type = "complex", value = "CKM2x3*yb", texname = "\\text{I4x23}")
I4x33 = Parameter(name = "I4x33", nature = "internal", type = "complex", value = "CKM3x3*yb", texname = "\\text{I4x33}")
all_parameters = (
ZERO = ZERO,
aEWM1 = aEWM1,
Gf = Gf,
aS = aS,
lamWS = lamWS,
AWS = AWS,
rhoWS = rhoWS,
etaWS = etaWS,
ymc = ymc,
ymb = ymb,
ymt = ymt,
yme = yme,
ymm = ymm,
ymtau = ymtau,
MZ = MZ,
MC = MC,
MT = MT,
MB = MB,
MH = MH,
Me = Me,
MM = MM,
MTA = MTA,
WZ = WZ,
WW = WW,
WT = WT,
WH = WH,
WTau = WTau,
CKM1x1 = CKM1x1,
CKM1x2 = CKM1x2,
CKM1x3 = CKM1x3,
CKM2x1 = CKM2x1,
CKM2x2 = CKM2x2,
CKM2x3 = CKM2x3,
CKM3x1 = CKM3x1,
CKM3x2 = CKM3x2,
CKM3x3 = CKM3x3,
aEW = aEW,
G = G,
MW = MW,
ee = ee,
sw2 = sw2,
cw = cw,
sw = sw,
g1 = g1,
gw = gw,
vev = vev,
lam = lam,
yb = yb,
yc = yc,
ye = ye,
ym = ym,
yt = yt,
ytau = ytau,
muH = muH,
I1x31 = I1x31,
I1x32 = I1x32,
I1x33 = I1x33,
I2x12 = I2x12,
I2x13 = I2x13,
I2x22 = I2x22,
I2x23 = I2x23,
I2x32 = I2x32,
I2x33 = I2x33,
I3x21 = I3x21,
I3x22 = I3x22,
I3x23 = I3x23,
I3x31 = I3x31,
I3x32 = I3x32,
I3x33 = I3x33,
I4x13 = I4x13,
I4x23 = I4x23,
I4x33 = I4x33
)
end # Parameters | UniversalFeynRulesOutput | https://github.com/Fenyutanchan/UniversalFeynRulesOutput.jl.git |
|
[
"MIT"
] | 0.1.0 | 7e314a58dbc369aba96b4ea0991145ee0d3b671e | code | 6933 | module Particles
using ..Objects
export all_particles
import ..Parameters
a = Particle(pdg_code = 22, name = "a", antiname = "a", spin = 3, color = 1, mass = Parameters.ZERO, width = Parameters.ZERO, texname = "a", antitexname = "a", charge = 0, GhostNumber = 0, LeptonNumber = 0, Y = 0)
Z = Particle(pdg_code = 23, name = "Z", antiname = "Z", spin = 3, color = 1, mass = Parameters.MZ, width = Parameters.WZ, texname = "Z", antitexname = "Z", charge = 0, GhostNumber = 0, LeptonNumber = 0, Y = 0)
W__plus__ = Particle(pdg_code = 24, name = "W+", antiname = "W-", spin = 3, color = 1, mass = Parameters.MW, width = Parameters.WW, texname = "W+", antitexname = "W-", charge = 1, GhostNumber = 0, LeptonNumber = 0, Y = 0)
W__minus__ = anti(W__plus__)
g = Particle(pdg_code = 21, name = "g", antiname = "g", spin = 3, color = 8, mass = Parameters.ZERO, width = Parameters.ZERO, texname = "g", antitexname = "g", charge = 0, GhostNumber = 0, LeptonNumber = 0, Y = 0)
ghA = Particle(pdg_code = 9000001, name = "ghA", antiname = "ghA~", spin = -1, color = 1, mass = Parameters.ZERO, width = Parameters.ZERO, texname = "ghA", antitexname = "ghA~", charge = 0, GhostNumber = 1, LeptonNumber = 0, Y = 0)
ghA__tilde__ = anti(ghA)
ghZ = Particle(pdg_code = 9000002, name = "ghZ", antiname = "ghZ~", spin = -1, color = 1, mass = Parameters.MZ, width = Parameters.WZ, texname = "ghZ", antitexname = "ghZ~", charge = 0, GhostNumber = 1, LeptonNumber = 0, Y = 0)
ghZ__tilde__ = anti(ghZ)
ghWp = Particle(pdg_code = 9000003, name = "ghWp", antiname = "ghWp~", spin = -1, color = 1, mass = Parameters.MW, width = Parameters.WW, texname = "ghWp", antitexname = "ghWp~", charge = 1, GhostNumber = 1, LeptonNumber = 0, Y = 0)
ghWp__tilde__ = anti(ghWp)
ghWm = Particle(pdg_code = 9000004, name = "ghWm", antiname = "ghWm~", spin = -1, color = 1, mass = Parameters.MW, width = Parameters.WW, texname = "ghWm", antitexname = "ghWm~", charge = -1, GhostNumber = 1, LeptonNumber = 0, Y = 0)
ghWm__tilde__ = anti(ghWm)
ghG = Particle(pdg_code = 9000005, name = "ghG", antiname = "ghG~", spin = -1, color = 8, mass = Parameters.ZERO, width = Parameters.ZERO, texname = "ghG", antitexname = "ghG~", charge = 0, GhostNumber = 1, LeptonNumber = 0, Y = 0)
ghG__tilde__ = anti(ghG)
ve = Particle(pdg_code = 12, name = "ve", antiname = "ve~", spin = 2, color = 1, mass = Parameters.ZERO, width = Parameters.ZERO, texname = "ve", antitexname = "ve~", charge = 0, GhostNumber = 0, LeptonNumber = 1, Y = 0)
ve__tilde__ = anti(ve)
vm = Particle(pdg_code = 14, name = "vm", antiname = "vm~", spin = 2, color = 1, mass = Parameters.ZERO, width = Parameters.ZERO, texname = "vm", antitexname = "vm~", charge = 0, GhostNumber = 0, LeptonNumber = 1, Y = 0)
vm__tilde__ = anti(vm)
vt = Particle(pdg_code = 16, name = "vt", antiname = "vt~", spin = 2, color = 1, mass = Parameters.ZERO, width = Parameters.ZERO, texname = "vt", antitexname = "vt~", charge = 0, GhostNumber = 0, LeptonNumber = 1, Y = 0)
vt__tilde__ = anti(vt)
u = Particle(pdg_code = 2, name = "u", antiname = "u~", spin = 2, color = 3, mass = Parameters.ZERO, width = Parameters.ZERO, texname = "u", antitexname = "u~", charge = 2 / 3, GhostNumber = 0, LeptonNumber = 0, Y = 0)
u__tilde__ = anti(u)
c = Particle(pdg_code = 4, name = "c", antiname = "c~", spin = 2, color = 3, mass = Parameters.MC, width = Parameters.ZERO, texname = "c", antitexname = "c~", charge = 2 / 3, GhostNumber = 0, LeptonNumber = 0, Y = 0)
c__tilde__ = anti(c)
t = Particle(pdg_code = 6, name = "t", antiname = "t~", spin = 2, color = 3, mass = Parameters.MT, width = Parameters.WT, texname = "t", antitexname = "t~", charge = 2 / 3, GhostNumber = 0, LeptonNumber = 0, Y = 0)
t__tilde__ = anti(t)
d = Particle(pdg_code = 1, name = "d", antiname = "d~", spin = 2, color = 3, mass = Parameters.ZERO, width = Parameters.ZERO, texname = "d", antitexname = "d~", charge = -1 / 3, GhostNumber = 0, LeptonNumber = 0, Y = 0)
d__tilde__ = anti(d)
s = Particle(pdg_code = 3, name = "s", antiname = "s~", spin = 2, color = 3, mass = Parameters.ZERO, width = Parameters.ZERO, texname = "s", antitexname = "s~", charge = -1 / 3, GhostNumber = 0, LeptonNumber = 0, Y = 0)
s__tilde__ = anti(s)
b = Particle(pdg_code = 5, name = "b", antiname = "b~", spin = 2, color = 3, mass = Parameters.MB, width = Parameters.ZERO, texname = "b", antitexname = "b~", charge = -1 / 3, GhostNumber = 0, LeptonNumber = 0, Y = 0)
b__tilde__ = anti(b)
H = Particle(pdg_code = 25, name = "H", antiname = "H", spin = 1, color = 1, mass = Parameters.MH, width = Parameters.WH, texname = "H", antitexname = "H", charge = 0, GhostNumber = 0, LeptonNumber = 0, Y = 0)
G0 = Particle(pdg_code = 250, name = "G0", antiname = "G0", spin = 1, color = 1, mass = Parameters.MZ, width = Parameters.WZ, texname = "G0", antitexname = "G0", GoldstoneBoson = true, charge = 0, GhostNumber = 0, LeptonNumber = 0, Y = 0)
G__plus__ = Particle(pdg_code = 251, name = "G+", antiname = "G-", spin = 1, color = 1, mass = Parameters.MW, width = Parameters.WW, texname = "G+", antitexname = "G-", GoldstoneBoson = true, charge = 1, GhostNumber = 0, LeptonNumber = 0, Y = 0)
G__minus__ = anti(G__plus__)
e__minus__ = Particle(pdg_code = 11, name = "e-", antiname = "e+", spin = 2, color = 1, mass = Parameters.Me, width = Parameters.ZERO, texname = "e-", antitexname = "e+", charge = -1, GhostNumber = 0, LeptonNumber = 1, Y = 0)
e__plus__ = anti(e__minus__)
mu__minus__ = Particle(pdg_code = 13, name = "mu-", antiname = "mu+", spin = 2, color = 1, mass = Parameters.MM, width = Parameters.ZERO, texname = "mu-", antitexname = "mu+", charge = -1, GhostNumber = 0, LeptonNumber = 1, Y = 0)
mu__plus__ = anti(mu__minus__)
ta__minus__ = Particle(pdg_code = 15, name = "ta-", antiname = "ta+", spin = 2, color = 1, mass = Parameters.MTA, width = Parameters.WTau, texname = "ta-", antitexname = "ta+", charge = -1, GhostNumber = 0, LeptonNumber = 1, Y = 0)
ta__plus__ = anti(ta__minus__)
all_particles = (
a = a,
Z = Z,
W__plus__ = W__plus__,
W__minus__ = W__minus__,
g = g,
ghA = ghA,
ghA__tilde__ = ghA__tilde__,
ghZ = ghZ,
ghZ__tilde__ = ghZ__tilde__,
ghWp = ghWp,
ghWp__tilde__ = ghWp__tilde__,
ghWm = ghWm,
ghWm__tilde__ = ghWm__tilde__,
ghG = ghG,
ghG__tilde__ = ghG__tilde__,
ve = ve,
ve__tilde__ = ve__tilde__,
vm = vm,
vm__tilde__ = vm__tilde__,
vt = vt,
vt__tilde__ = vt__tilde__,
u = u,
u__tilde__ = u__tilde__,
c = c,
c__tilde__ = c__tilde__,
t = t,
t__tilde__ = t__tilde__,
d = d,
d__tilde__ = d__tilde__,
s = s,
s__tilde__ = s__tilde__,
b = b,
b__tilde__ = b__tilde__,
H = H,
G0 = G0,
G__plus__ = G__plus__,
G__minus__ = G__minus__,
e__minus__ = e__minus__,
e__plus__ = e__plus__,
mu__minus__ = mu__minus__,
mu__plus__ = mu__plus__,
ta__minus__ = ta__minus__,
ta__plus__ = ta__plus__
)
end # Particles | UniversalFeynRulesOutput | https://github.com/Fenyutanchan/UniversalFeynRulesOutput.jl.git |
|
[
"MIT"
] | 0.1.0 | 7e314a58dbc369aba96b4ea0991145ee0d3b671e | code | 112 | module Propagators
using ..Objects
export all_propagators
all_propagators = (
)
end # Propagators | UniversalFeynRulesOutput | https://github.com/Fenyutanchan/UniversalFeynRulesOutput.jl.git |
|
[
"MIT"
] | 0.1.0 | 7e314a58dbc369aba96b4ea0991145ee0d3b671e | code | 718 | module sm
export all_decays
export all_particles
export all_parameters
export all_lorentz
export all_form_factors
export all_vertices
export all_propagators
export all_coupling_orders
export all_couplings
export all_CT_vertices
include("objects.jl")
using .Objects
include("parameters.jl")
using .Parameters
include("particles.jl")
using .Particles
include("form_factors.jl")
using .FormFactors
include("lorentz.jl")
using .LorentzIndices
include("couplings.jl")
using .Couplings
include("decays.jl")
using .Decays
include("vertices.jl")
using .Vertices
include("propagators.jl")
using .Propagators
include("coupling_orders.jl")
using .CouplingOrders
include("CT_vertices.jl")
using .CTVertices
end # sm | UniversalFeynRulesOutput | https://github.com/Fenyutanchan/UniversalFeynRulesOutput.jl.git |
|
[
"MIT"
] | 0.1.0 | 7e314a58dbc369aba96b4ea0991145ee0d3b671e | code | 38096 | module Vertices
using ..Objects
export all_vertices
import ..Particles
import ..Couplings
import ..LorentzIndices
V_1 = Vertex(name = "V_1", particles = [Particles.G0, Particles.G0, Particles.G0, Particles.G0], color = ["1"], lorentz = [LorentzIndices.SSSS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_33))
V_2 = Vertex(name = "V_2", particles = [Particles.G0, Particles.G0, Particles.G__minus__, Particles.G__plus__], color = ["1"], lorentz = [LorentzIndices.SSSS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_31))
V_3 = Vertex(name = "V_3", particles = [Particles.G__minus__, Particles.G__minus__, Particles.G__plus__, Particles.G__plus__], color = ["1"], lorentz = [LorentzIndices.SSSS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_32))
V_4 = Vertex(name = "V_4", particles = [Particles.G0, Particles.G0, Particles.H, Particles.H], color = ["1"], lorentz = [LorentzIndices.SSSS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_31))
V_5 = Vertex(name = "V_5", particles = [Particles.G__minus__, Particles.G__plus__, Particles.H, Particles.H], color = ["1"], lorentz = [LorentzIndices.SSSS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_31))
V_6 = Vertex(name = "V_6", particles = [Particles.H, Particles.H, Particles.H, Particles.H], color = ["1"], lorentz = [LorentzIndices.SSSS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_33))
V_7 = Vertex(name = "V_7", particles = [Particles.G0, Particles.G0, Particles.H], color = ["1"], lorentz = [LorentzIndices.SSS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_68))
V_8 = Vertex(name = "V_8", particles = [Particles.G__minus__, Particles.G__plus__, Particles.H], color = ["1"], lorentz = [LorentzIndices.SSS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_68))
V_9 = Vertex(name = "V_9", particles = [Particles.H, Particles.H, Particles.H], color = ["1"], lorentz = [LorentzIndices.SSS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_69))
V_10 = Vertex(name = "V_10", particles = [Particles.a, Particles.a, Particles.G__minus__, Particles.G__plus__], color = ["1"], lorentz = [LorentzIndices.VVSS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_6))
V_11 = Vertex(name = "V_11", particles = [Particles.a, Particles.G__minus__, Particles.G__plus__], color = ["1"], lorentz = [LorentzIndices.VSS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_3))
V_12 = Vertex(name = "V_12", particles = [Particles.ghA, Particles.ghWm__tilde__, Particles.W__minus__], color = ["1"], lorentz = [LorentzIndices.UUV1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_3))
V_13 = Vertex(name = "V_13", particles = [Particles.ghA, Particles.ghWp__tilde__, Particles.W__plus__], color = ["1"], lorentz = [LorentzIndices.UUV1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_4))
V_14 = Vertex(name = "V_14", particles = [Particles.ghWm, Particles.ghA__tilde__, Particles.G__plus__], color = ["1"], lorentz = [LorentzIndices.UUS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_75))
V_15 = Vertex(name = "V_15", particles = [Particles.ghWm, Particles.ghA__tilde__, Particles.W__plus__], color = ["1"], lorentz = [LorentzIndices.UUV1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_3))
V_16 = Vertex(name = "V_16", particles = [Particles.ghWm, Particles.ghWm__tilde__, Particles.G0], color = ["1"], lorentz = [LorentzIndices.UUS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_70))
V_17 = Vertex(name = "V_17", particles = [Particles.ghWm, Particles.ghWm__tilde__, Particles.H], color = ["1"], lorentz = [LorentzIndices.UUS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_71))
V_18 = Vertex(name = "V_18", particles = [Particles.ghWm, Particles.ghWm__tilde__, Particles.a], color = ["1"], lorentz = [LorentzIndices.UUV1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_4))
V_19 = Vertex(name = "V_19", particles = [Particles.ghWm, Particles.ghWm__tilde__, Particles.Z], color = ["1"], lorentz = [LorentzIndices.UUV1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_53))
V_20 = Vertex(name = "V_20", particles = [Particles.ghWm, Particles.ghZ__tilde__, Particles.G__plus__], color = ["1"], lorentz = [LorentzIndices.UUS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_78))
V_21 = Vertex(name = "V_21", particles = [Particles.ghWm, Particles.ghZ__tilde__, Particles.W__plus__], color = ["1"], lorentz = [LorentzIndices.UUV1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_52))
V_22 = Vertex(name = "V_22", particles = [Particles.ghWp, Particles.ghA__tilde__, Particles.G__minus__], color = ["1"], lorentz = [LorentzIndices.UUS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_74))
V_23 = Vertex(name = "V_23", particles = [Particles.ghWp, Particles.ghA__tilde__, Particles.W__minus__], color = ["1"], lorentz = [LorentzIndices.UUV1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_4))
V_24 = Vertex(name = "V_24", particles = [Particles.ghWp, Particles.ghWp__tilde__, Particles.G0], color = ["1"], lorentz = [LorentzIndices.UUS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_73))
V_25 = Vertex(name = "V_25", particles = [Particles.ghWp, Particles.ghWp__tilde__, Particles.H], color = ["1"], lorentz = [LorentzIndices.UUS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_71))
V_26 = Vertex(name = "V_26", particles = [Particles.ghWp, Particles.ghWp__tilde__, Particles.a], color = ["1"], lorentz = [LorentzIndices.UUV1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_3))
V_27 = Vertex(name = "V_27", particles = [Particles.ghWp, Particles.ghWp__tilde__, Particles.Z], color = ["1"], lorentz = [LorentzIndices.UUV1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_52))
V_28 = Vertex(name = "V_28", particles = [Particles.ghWp, Particles.ghZ__tilde__, Particles.G__minus__], color = ["1"], lorentz = [LorentzIndices.UUS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_77))
V_29 = Vertex(name = "V_29", particles = [Particles.ghWp, Particles.ghZ__tilde__, Particles.W__minus__], color = ["1"], lorentz = [LorentzIndices.UUV1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_53))
V_30 = Vertex(name = "V_30", particles = [Particles.ghZ, Particles.ghWm__tilde__, Particles.G__minus__], color = ["1"], lorentz = [LorentzIndices.UUS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_79))
V_31 = Vertex(name = "V_31", particles = [Particles.ghZ, Particles.ghWm__tilde__, Particles.W__minus__], color = ["1"], lorentz = [LorentzIndices.UUV1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_52))
V_32 = Vertex(name = "V_32", particles = [Particles.ghZ, Particles.ghWp__tilde__, Particles.G__plus__], color = ["1"], lorentz = [LorentzIndices.UUS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_76))
V_33 = Vertex(name = "V_33", particles = [Particles.ghZ, Particles.ghWp__tilde__, Particles.W__plus__], color = ["1"], lorentz = [LorentzIndices.UUV1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_53))
V_34 = Vertex(name = "V_34", particles = [Particles.ghZ, Particles.ghZ__tilde__, Particles.H], color = ["1"], lorentz = [LorentzIndices.UUS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_80))
V_35 = Vertex(name = "V_35", particles = [Particles.ghG, Particles.ghG__tilde__, Particles.g], color = ["f(1,2,3)"], lorentz = [LorentzIndices.UUV1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_10))
V_36 = Vertex(name = "V_36", particles = [Particles.g, Particles.g, Particles.g], color = ["f(1,2,3)"], lorentz = [LorentzIndices.VVV1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_10))
V_37 = Vertex(name = "V_37", particles = [Particles.g, Particles.g, Particles.g, Particles.g], color = ["f(-1,1,2)*f(3,4,-1)", "f(-1,1,3)*f(2,4,-1)", "f(-1,1,4)*f(2,3,-1)"], lorentz = [LorentzIndices.VVVV1, LorentzIndices.VVVV3, LorentzIndices.VVVV4], couplings = Dict{Tuple{Int, Int}, Coupling}((1, 1) => Couplings.GC_12, (0, 0) => Couplings.GC_12, (2, 2) => Couplings.GC_12))
V_38 = Vertex(name = "V_38", particles = [Particles.a, Particles.W__minus__, Particles.G0, Particles.G__plus__], color = ["1"], lorentz = [LorentzIndices.VVSS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_55))
V_39 = Vertex(name = "V_39", particles = [Particles.a, Particles.W__minus__, Particles.G__plus__, Particles.H], color = ["1"], lorentz = [LorentzIndices.VVSS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_54))
V_40 = Vertex(name = "V_40", particles = [Particles.a, Particles.W__minus__, Particles.G__plus__], color = ["1"], lorentz = [LorentzIndices.VVS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_74))
V_41 = Vertex(name = "V_41", particles = [Particles.W__minus__, Particles.G0, Particles.G__plus__], color = ["1"], lorentz = [LorentzIndices.VSS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_39))
V_42 = Vertex(name = "V_42", particles = [Particles.W__minus__, Particles.G__plus__, Particles.H], color = ["1"], lorentz = [LorentzIndices.VSS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_37))
V_43 = Vertex(name = "V_43", particles = [Particles.a, Particles.W__minus__, Particles.W__plus__], color = ["1"], lorentz = [LorentzIndices.VVV1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_4))
V_44 = Vertex(name = "V_44", particles = [Particles.a, Particles.W__plus__, Particles.G0, Particles.G__minus__], color = ["1"], lorentz = [LorentzIndices.VVSS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_55))
V_45 = Vertex(name = "V_45", particles = [Particles.a, Particles.W__plus__, Particles.G__minus__, Particles.H], color = ["1"], lorentz = [LorentzIndices.VVSS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_56))
V_46 = Vertex(name = "V_46", particles = [Particles.a, Particles.W__plus__, Particles.G__minus__], color = ["1"], lorentz = [LorentzIndices.VVS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_75))
V_47 = Vertex(name = "V_47", particles = [Particles.W__plus__, Particles.G0, Particles.G__minus__], color = ["1"], lorentz = [LorentzIndices.VSS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_38))
V_48 = Vertex(name = "V_48", particles = [Particles.W__plus__, Particles.G__minus__, Particles.H], color = ["1"], lorentz = [LorentzIndices.VSS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_37))
V_49 = Vertex(name = "V_49", particles = [Particles.W__minus__, Particles.W__plus__, Particles.G0, Particles.G0], color = ["1"], lorentz = [LorentzIndices.VVSS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_34))
V_50 = Vertex(name = "V_50", particles = [Particles.W__minus__, Particles.W__plus__, Particles.G__minus__, Particles.G__plus__], color = ["1"], lorentz = [LorentzIndices.VVSS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_34))
V_51 = Vertex(name = "V_51", particles = [Particles.W__minus__, Particles.W__plus__, Particles.H, Particles.H], color = ["1"], lorentz = [LorentzIndices.VVSS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_34))
V_52 = Vertex(name = "V_52", particles = [Particles.W__minus__, Particles.W__plus__, Particles.H], color = ["1"], lorentz = [LorentzIndices.VVS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_72))
V_53 = Vertex(name = "V_53", particles = [Particles.a, Particles.a, Particles.W__minus__, Particles.W__plus__], color = ["1"], lorentz = [LorentzIndices.VVVV2], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_5))
V_54 = Vertex(name = "V_54", particles = [Particles.W__minus__, Particles.W__plus__, Particles.Z], color = ["1"], lorentz = [LorentzIndices.VVV1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_53))
V_55 = Vertex(name = "V_55", particles = [Particles.W__minus__, Particles.W__minus__, Particles.W__plus__, Particles.W__plus__], color = ["1"], lorentz = [LorentzIndices.VVVV2], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_35))
V_56 = Vertex(name = "V_56", particles = [Particles.a, Particles.Z, Particles.G__minus__, Particles.G__plus__], color = ["1"], lorentz = [LorentzIndices.VVSS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_63))
V_57 = Vertex(name = "V_57", particles = [Particles.Z, Particles.G0, Particles.H], color = ["1"], lorentz = [LorentzIndices.VSS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_60))
V_58 = Vertex(name = "V_58", particles = [Particles.Z, Particles.G__minus__, Particles.G__plus__], color = ["1"], lorentz = [LorentzIndices.VSS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_61))
V_59 = Vertex(name = "V_59", particles = [Particles.W__minus__, Particles.Z, Particles.G0, Particles.G__plus__], color = ["1"], lorentz = [LorentzIndices.VVSS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_8))
V_60 = Vertex(name = "V_60", particles = [Particles.W__minus__, Particles.Z, Particles.G__plus__, Particles.H], color = ["1"], lorentz = [LorentzIndices.VVSS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_9))
V_61 = Vertex(name = "V_61", particles = [Particles.W__minus__, Particles.Z, Particles.G__plus__], color = ["1"], lorentz = [LorentzIndices.VVS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_67))
V_62 = Vertex(name = "V_62", particles = [Particles.W__plus__, Particles.Z, Particles.G0, Particles.G__minus__], color = ["1"], lorentz = [LorentzIndices.VVSS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_8))
V_63 = Vertex(name = "V_63", particles = [Particles.W__plus__, Particles.Z, Particles.G__minus__, Particles.H], color = ["1"], lorentz = [LorentzIndices.VVSS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_7))
V_64 = Vertex(name = "V_64", particles = [Particles.W__plus__, Particles.Z, Particles.G__minus__], color = ["1"], lorentz = [LorentzIndices.VVS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_66))
V_65 = Vertex(name = "V_65", particles = [Particles.a, Particles.W__minus__, Particles.W__plus__, Particles.Z], color = ["1"], lorentz = [LorentzIndices.VVVV5], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_57))
V_66 = Vertex(name = "V_66", particles = [Particles.Z, Particles.Z, Particles.G0, Particles.G0], color = ["1"], lorentz = [LorentzIndices.VVSS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_65))
V_67 = Vertex(name = "V_67", particles = [Particles.Z, Particles.Z, Particles.G__minus__, Particles.G__plus__], color = ["1"], lorentz = [LorentzIndices.VVSS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_64))
V_68 = Vertex(name = "V_68", particles = [Particles.Z, Particles.Z, Particles.H, Particles.H], color = ["1"], lorentz = [LorentzIndices.VVSS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_65))
V_69 = Vertex(name = "V_69", particles = [Particles.Z, Particles.Z, Particles.H], color = ["1"], lorentz = [LorentzIndices.VVS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_81))
V_70 = Vertex(name = "V_70", particles = [Particles.W__minus__, Particles.W__plus__, Particles.Z, Particles.Z], color = ["1"], lorentz = [LorentzIndices.VVVV2], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_36))
V_71 = Vertex(name = "V_71", particles = [Particles.d__tilde__, Particles.d, Particles.a], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFV1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_1))
V_72 = Vertex(name = "V_72", particles = [Particles.s__tilde__, Particles.s, Particles.a], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFV1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_1))
V_73 = Vertex(name = "V_73", particles = [Particles.b__tilde__, Particles.b, Particles.a], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFV1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_1))
V_74 = Vertex(name = "V_74", particles = [Particles.d__tilde__, Particles.d, Particles.g], color = ["T(3,2,1)"], lorentz = [LorentzIndices.FFV1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_11))
V_75 = Vertex(name = "V_75", particles = [Particles.s__tilde__, Particles.s, Particles.g], color = ["T(3,2,1)"], lorentz = [LorentzIndices.FFV1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_11))
V_76 = Vertex(name = "V_76", particles = [Particles.b__tilde__, Particles.b, Particles.g], color = ["T(3,2,1)"], lorentz = [LorentzIndices.FFV1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_11))
V_77 = Vertex(name = "V_77", particles = [Particles.b__tilde__, Particles.b, Particles.G0], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFS2], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_82))
V_78 = Vertex(name = "V_78", particles = [Particles.b__tilde__, Particles.b, Particles.H], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFS4], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_83))
V_79 = Vertex(name = "V_79", particles = [Particles.d__tilde__, Particles.d, Particles.Z], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFV2, LorentzIndices.FFV3], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_50, (0, 1) => Couplings.GC_58))
V_80 = Vertex(name = "V_80", particles = [Particles.s__tilde__, Particles.s, Particles.Z], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFV2, LorentzIndices.FFV3], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_50, (0, 1) => Couplings.GC_58))
V_81 = Vertex(name = "V_81", particles = [Particles.b__tilde__, Particles.b, Particles.Z], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFV2, LorentzIndices.FFV3], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 1) => Couplings.GC_58, (0, 0) => Couplings.GC_50))
V_82 = Vertex(name = "V_82", particles = [Particles.c__tilde__, Particles.d, Particles.G__plus__], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFS3], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_16))
V_83 = Vertex(name = "V_83", particles = [Particles.t__tilde__, Particles.d, Particles.G__plus__], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFS3], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_17))
V_84 = Vertex(name = "V_84", particles = [Particles.c__tilde__, Particles.s, Particles.G__plus__], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFS3], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_18))
V_85 = Vertex(name = "V_85", particles = [Particles.t__tilde__, Particles.s, Particles.G__plus__], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFS3], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_19))
V_86 = Vertex(name = "V_86", particles = [Particles.u__tilde__, Particles.b, Particles.G__plus__], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_13))
V_87 = Vertex(name = "V_87", particles = [Particles.c__tilde__, Particles.b, Particles.G__plus__], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFS1, LorentzIndices.FFS3], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_14, (0, 1) => Couplings.GC_20))
V_88 = Vertex(name = "V_88", particles = [Particles.t__tilde__, Particles.b, Particles.G__plus__], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFS1, LorentzIndices.FFS3], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_15, (0, 1) => Couplings.GC_21))
V_89 = Vertex(name = "V_89", particles = [Particles.u__tilde__, Particles.d, Particles.W__plus__], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFV2], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_100))
V_90 = Vertex(name = "V_90", particles = [Particles.c__tilde__, Particles.d, Particles.W__plus__], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFV2], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_103))
V_91 = Vertex(name = "V_91", particles = [Particles.t__tilde__, Particles.d, Particles.W__plus__], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFV2], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_106))
V_92 = Vertex(name = "V_92", particles = [Particles.u__tilde__, Particles.s, Particles.W__plus__], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFV2], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_101))
V_93 = Vertex(name = "V_93", particles = [Particles.c__tilde__, Particles.s, Particles.W__plus__], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFV2], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_104))
V_94 = Vertex(name = "V_94", particles = [Particles.t__tilde__, Particles.s, Particles.W__plus__], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFV2], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_107))
V_95 = Vertex(name = "V_95", particles = [Particles.u__tilde__, Particles.b, Particles.W__plus__], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFV2], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_102))
V_96 = Vertex(name = "V_96", particles = [Particles.c__tilde__, Particles.b, Particles.W__plus__], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFV2], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_105))
V_97 = Vertex(name = "V_97", particles = [Particles.t__tilde__, Particles.b, Particles.W__plus__], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFV2], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_108))
V_98 = Vertex(name = "V_98", particles = [Particles.e__plus__, Particles.e__minus__, Particles.a], color = ["1"], lorentz = [LorentzIndices.FFV1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_3))
V_99 = Vertex(name = "V_99", particles = [Particles.mu__plus__, Particles.mu__minus__, Particles.a], color = ["1"], lorentz = [LorentzIndices.FFV1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_3))
V_100 = Vertex(name = "V_100", particles = [Particles.ta__plus__, Particles.ta__minus__, Particles.a], color = ["1"], lorentz = [LorentzIndices.FFV1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_3))
V_101 = Vertex(name = "V_101", particles = [Particles.e__plus__, Particles.e__minus__, Particles.G0], color = ["1"], lorentz = [LorentzIndices.FFS2], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_88))
V_102 = Vertex(name = "V_102", particles = [Particles.mu__plus__, Particles.mu__minus__, Particles.G0], color = ["1"], lorentz = [LorentzIndices.FFS2], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_92))
V_103 = Vertex(name = "V_103", particles = [Particles.ta__plus__, Particles.ta__minus__, Particles.G0], color = ["1"], lorentz = [LorentzIndices.FFS2], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_98))
V_104 = Vertex(name = "V_104", particles = [Particles.e__plus__, Particles.e__minus__, Particles.H], color = ["1"], lorentz = [LorentzIndices.FFS4], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_89))
V_105 = Vertex(name = "V_105", particles = [Particles.mu__plus__, Particles.mu__minus__, Particles.H], color = ["1"], lorentz = [LorentzIndices.FFS4], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_93))
V_106 = Vertex(name = "V_106", particles = [Particles.ta__plus__, Particles.ta__minus__, Particles.H], color = ["1"], lorentz = [LorentzIndices.FFS4], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_99))
V_107 = Vertex(name = "V_107", particles = [Particles.e__plus__, Particles.e__minus__, Particles.Z], color = ["1"], lorentz = [LorentzIndices.FFV2, LorentzIndices.FFV4], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_50, (0, 1) => Couplings.GC_59))
V_108 = Vertex(name = "V_108", particles = [Particles.mu__plus__, Particles.mu__minus__, Particles.Z], color = ["1"], lorentz = [LorentzIndices.FFV2, LorentzIndices.FFV4], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_50, (0, 1) => Couplings.GC_59))
V_109 = Vertex(name = "V_109", particles = [Particles.ta__plus__, Particles.ta__minus__, Particles.Z], color = ["1"], lorentz = [LorentzIndices.FFV2, LorentzIndices.FFV4], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_50, (0, 1) => Couplings.GC_59))
V_110 = Vertex(name = "V_110", particles = [Particles.ve__tilde__, Particles.e__minus__, Particles.G__plus__], color = ["1"], lorentz = [LorentzIndices.FFS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_87))
V_111 = Vertex(name = "V_111", particles = [Particles.vm__tilde__, Particles.mu__minus__, Particles.G__plus__], color = ["1"], lorentz = [LorentzIndices.FFS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_91))
V_112 = Vertex(name = "V_112", particles = [Particles.vt__tilde__, Particles.ta__minus__, Particles.G__plus__], color = ["1"], lorentz = [LorentzIndices.FFS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_97))
V_113 = Vertex(name = "V_113", particles = [Particles.ve__tilde__, Particles.e__minus__, Particles.W__plus__], color = ["1"], lorentz = [LorentzIndices.FFV2], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_40))
V_114 = Vertex(name = "V_114", particles = [Particles.vm__tilde__, Particles.mu__minus__, Particles.W__plus__], color = ["1"], lorentz = [LorentzIndices.FFV2], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_40))
V_115 = Vertex(name = "V_115", particles = [Particles.vt__tilde__, Particles.ta__minus__, Particles.W__plus__], color = ["1"], lorentz = [LorentzIndices.FFV2], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_40))
V_116 = Vertex(name = "V_116", particles = [Particles.b__tilde__, Particles.u, Particles.G__minus__], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFS3], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_28))
V_117 = Vertex(name = "V_117", particles = [Particles.d__tilde__, Particles.c, Particles.G__minus__], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_22))
V_118 = Vertex(name = "V_118", particles = [Particles.s__tilde__, Particles.c, Particles.G__minus__], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_23))
V_119 = Vertex(name = "V_119", particles = [Particles.b__tilde__, Particles.c, Particles.G__minus__], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFS1, LorentzIndices.FFS3], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_24, (0, 1) => Couplings.GC_29))
V_120 = Vertex(name = "V_120", particles = [Particles.d__tilde__, Particles.t, Particles.G__minus__], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_25))
V_121 = Vertex(name = "V_121", particles = [Particles.s__tilde__, Particles.t, Particles.G__minus__], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFS1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_26))
V_122 = Vertex(name = "V_122", particles = [Particles.b__tilde__, Particles.t, Particles.G__minus__], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFS1, LorentzIndices.FFS3], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_27, (0, 1) => Couplings.GC_30))
V_123 = Vertex(name = "V_123", particles = [Particles.d__tilde__, Particles.u, Particles.W__minus__], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFV2], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_41))
V_124 = Vertex(name = "V_124", particles = [Particles.s__tilde__, Particles.u, Particles.W__minus__], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFV2], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_42))
V_125 = Vertex(name = "V_125", particles = [Particles.b__tilde__, Particles.u, Particles.W__minus__], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFV2], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_43))
V_126 = Vertex(name = "V_126", particles = [Particles.d__tilde__, Particles.c, Particles.W__minus__], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFV2], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_44))
V_127 = Vertex(name = "V_127", particles = [Particles.s__tilde__, Particles.c, Particles.W__minus__], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFV2], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_45))
V_128 = Vertex(name = "V_128", particles = [Particles.b__tilde__, Particles.c, Particles.W__minus__], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFV2], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_46))
V_129 = Vertex(name = "V_129", particles = [Particles.d__tilde__, Particles.t, Particles.W__minus__], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFV2], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_47))
V_130 = Vertex(name = "V_130", particles = [Particles.s__tilde__, Particles.t, Particles.W__minus__], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFV2], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_48))
V_131 = Vertex(name = "V_131", particles = [Particles.b__tilde__, Particles.t, Particles.W__minus__], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFV2], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_49))
V_132 = Vertex(name = "V_132", particles = [Particles.u__tilde__, Particles.u, Particles.a], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFV1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_2))
V_133 = Vertex(name = "V_133", particles = [Particles.c__tilde__, Particles.c, Particles.a], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFV1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_2))
V_134 = Vertex(name = "V_134", particles = [Particles.t__tilde__, Particles.t, Particles.a], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFV1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_2))
V_135 = Vertex(name = "V_135", particles = [Particles.u__tilde__, Particles.u, Particles.g], color = ["T(3,2,1)"], lorentz = [LorentzIndices.FFV1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_11))
V_136 = Vertex(name = "V_136", particles = [Particles.c__tilde__, Particles.c, Particles.g], color = ["T(3,2,1)"], lorentz = [LorentzIndices.FFV1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_11))
V_137 = Vertex(name = "V_137", particles = [Particles.t__tilde__, Particles.t, Particles.g], color = ["T(3,2,1)"], lorentz = [LorentzIndices.FFV1], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_11))
V_138 = Vertex(name = "V_138", particles = [Particles.c__tilde__, Particles.c, Particles.G0], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFS2], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_85))
V_139 = Vertex(name = "V_139", particles = [Particles.t__tilde__, Particles.t, Particles.G0], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFS2], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_95))
V_140 = Vertex(name = "V_140", particles = [Particles.c__tilde__, Particles.c, Particles.H], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFS4], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_84))
V_141 = Vertex(name = "V_141", particles = [Particles.t__tilde__, Particles.t, Particles.H], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFS4], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_94))
V_142 = Vertex(name = "V_142", particles = [Particles.u__tilde__, Particles.u, Particles.Z], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFV2, LorentzIndices.FFV5], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_51, (0, 1) => Couplings.GC_58))
V_143 = Vertex(name = "V_143", particles = [Particles.c__tilde__, Particles.c, Particles.Z], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFV2, LorentzIndices.FFV5], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_51, (0, 1) => Couplings.GC_58))
V_144 = Vertex(name = "V_144", particles = [Particles.t__tilde__, Particles.t, Particles.Z], color = ["Identity(1,2)"], lorentz = [LorentzIndices.FFV2, LorentzIndices.FFV5], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_51, (0, 1) => Couplings.GC_58))
V_145 = Vertex(name = "V_145", particles = [Particles.e__plus__, Particles.ve, Particles.G__minus__], color = ["1"], lorentz = [LorentzIndices.FFS3], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_86))
V_146 = Vertex(name = "V_146", particles = [Particles.mu__plus__, Particles.vm, Particles.G__minus__], color = ["1"], lorentz = [LorentzIndices.FFS3], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_90))
V_147 = Vertex(name = "V_147", particles = [Particles.ta__plus__, Particles.vt, Particles.G__minus__], color = ["1"], lorentz = [LorentzIndices.FFS3], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_96))
V_148 = Vertex(name = "V_148", particles = [Particles.e__plus__, Particles.ve, Particles.W__minus__], color = ["1"], lorentz = [LorentzIndices.FFV2], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_40))
V_149 = Vertex(name = "V_149", particles = [Particles.mu__plus__, Particles.vm, Particles.W__minus__], color = ["1"], lorentz = [LorentzIndices.FFV2], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_40))
V_150 = Vertex(name = "V_150", particles = [Particles.ta__plus__, Particles.vt, Particles.W__minus__], color = ["1"], lorentz = [LorentzIndices.FFV2], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_40))
V_151 = Vertex(name = "V_151", particles = [Particles.ve__tilde__, Particles.ve, Particles.Z], color = ["1"], lorentz = [LorentzIndices.FFV2], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_62))
V_152 = Vertex(name = "V_152", particles = [Particles.vm__tilde__, Particles.vm, Particles.Z], color = ["1"], lorentz = [LorentzIndices.FFV2], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_62))
V_153 = Vertex(name = "V_153", particles = [Particles.vt__tilde__, Particles.vt, Particles.Z], color = ["1"], lorentz = [LorentzIndices.FFV2], couplings = Dict{Tuple{Int, Int}, Coupling}((0, 0) => Couplings.GC_62))
all_vertices = (
V_1 = V_1,
V_2 = V_2,
V_3 = V_3,
V_4 = V_4,
V_5 = V_5,
V_6 = V_6,
V_7 = V_7,
V_8 = V_8,
V_9 = V_9,
V_10 = V_10,
V_11 = V_11,
V_12 = V_12,
V_13 = V_13,
V_14 = V_14,
V_15 = V_15,
V_16 = V_16,
V_17 = V_17,
V_18 = V_18,
V_19 = V_19,
V_20 = V_20,
V_21 = V_21,
V_22 = V_22,
V_23 = V_23,
V_24 = V_24,
V_25 = V_25,
V_26 = V_26,
V_27 = V_27,
V_28 = V_28,
V_29 = V_29,
V_30 = V_30,
V_31 = V_31,
V_32 = V_32,
V_33 = V_33,
V_34 = V_34,
V_35 = V_35,
V_36 = V_36,
V_37 = V_37,
V_38 = V_38,
V_39 = V_39,
V_40 = V_40,
V_41 = V_41,
V_42 = V_42,
V_43 = V_43,
V_44 = V_44,
V_45 = V_45,
V_46 = V_46,
V_47 = V_47,
V_48 = V_48,
V_49 = V_49,
V_50 = V_50,
V_51 = V_51,
V_52 = V_52,
V_53 = V_53,
V_54 = V_54,
V_55 = V_55,
V_56 = V_56,
V_57 = V_57,
V_58 = V_58,
V_59 = V_59,
V_60 = V_60,
V_61 = V_61,
V_62 = V_62,
V_63 = V_63,
V_64 = V_64,
V_65 = V_65,
V_66 = V_66,
V_67 = V_67,
V_68 = V_68,
V_69 = V_69,
V_70 = V_70,
V_71 = V_71,
V_72 = V_72,
V_73 = V_73,
V_74 = V_74,
V_75 = V_75,
V_76 = V_76,
V_77 = V_77,
V_78 = V_78,
V_79 = V_79,
V_80 = V_80,
V_81 = V_81,
V_82 = V_82,
V_83 = V_83,
V_84 = V_84,
V_85 = V_85,
V_86 = V_86,
V_87 = V_87,
V_88 = V_88,
V_89 = V_89,
V_90 = V_90,
V_91 = V_91,
V_92 = V_92,
V_93 = V_93,
V_94 = V_94,
V_95 = V_95,
V_96 = V_96,
V_97 = V_97,
V_98 = V_98,
V_99 = V_99,
V_100 = V_100,
V_101 = V_101,
V_102 = V_102,
V_103 = V_103,
V_104 = V_104,
V_105 = V_105,
V_106 = V_106,
V_107 = V_107,
V_108 = V_108,
V_109 = V_109,
V_110 = V_110,
V_111 = V_111,
V_112 = V_112,
V_113 = V_113,
V_114 = V_114,
V_115 = V_115,
V_116 = V_116,
V_117 = V_117,
V_118 = V_118,
V_119 = V_119,
V_120 = V_120,
V_121 = V_121,
V_122 = V_122,
V_123 = V_123,
V_124 = V_124,
V_125 = V_125,
V_126 = V_126,
V_127 = V_127,
V_128 = V_128,
V_129 = V_129,
V_130 = V_130,
V_131 = V_131,
V_132 = V_132,
V_133 = V_133,
V_134 = V_134,
V_135 = V_135,
V_136 = V_136,
V_137 = V_137,
V_138 = V_138,
V_139 = V_139,
V_140 = V_140,
V_141 = V_141,
V_142 = V_142,
V_143 = V_143,
V_144 = V_144,
V_145 = V_145,
V_146 = V_146,
V_147 = V_147,
V_148 = V_148,
V_149 = V_149,
V_150 = V_150,
V_151 = V_151,
V_152 = V_152,
V_153 = V_153
)
end # Vertices | UniversalFeynRulesOutput | https://github.com/Fenyutanchan/UniversalFeynRulesOutput.jl.git |
|
[
"MIT"
] | 0.1.0 | 7e314a58dbc369aba96b4ea0991145ee0d3b671e | docs | 637 | # UniversalFeynRulesOutput.jl: A Julia Package for Parsing Universal Feynrules Output (UFO) Format without Python Calls.
## Usage
There is only one API function `convert_model` for converting the UFO models.
```julia
using UniversalFeynRulesOutput
convert_model( "/path/to/model/" )
```
Then the directory `/path/to/model.jl` will be created automatically, which is the Julia module for UFO model.
## Python Object to Julia Struct
`ext/objects.jl` archives the definition of the Julia structs like `Parameter`, `Particle`, and etc.
This file will be automatically copied to the Julia UFO model folder when the converting begins.
| UniversalFeynRulesOutput | https://github.com/Fenyutanchan/UniversalFeynRulesOutput.jl.git |
|
[
"MIT"
] | 0.2.5 | 5f7fc2ce60d4540ffc1de3f102d9b5e00d3ad654 | code | 851 | using AztecDiamonds
using Documenter
DocMeta.setdocmeta!(AztecDiamonds, :DocTestSetup, :(using AztecDiamonds); recursive = true)
makedocs(;
modules = [AztecDiamonds],
authors = "Simeon David Schaub <[email protected]> and contributors",
repo = Remotes.GitHub("JuliaLabs", "AztecDiamonds.jl"),
sitename = "AztecDiamonds.jl",
format = Documenter.HTML(;
prettyurls = get(ENV, "CI", "false") == "true",
canonical = "https://julia.mit.edu/AztecDiamonds.jl",
edit_link = "main",
assets = String[],
),
pages = [
"Home" => "index.md",
#"Examples" => [
# "Basics" => "https://julia.mit.edu/AztecDiamonds.jl/examples/dev/notebook.html",
#],
],
)
deploydocs(;
repo = "github.com/JuliaLabs/AztecDiamonds.jl",
devbranch = "main",
push_preview = true,
)
| AztecDiamonds | https://github.com/JuliaLabs/AztecDiamonds.jl.git |
|
[
"MIT"
] | 0.2.5 | 5f7fc2ce60d4540ffc1de3f102d9b5e00d3ad654 | code | 60905 | ### A Pluto.jl notebook ###
# v0.19.42
using Markdown
using InteractiveUtils
# ╔═╡ a609b8a8-04ac-4533-9a33-61ea33805846
begin
using AztecDiamonds, CairoMakie
CairoMakie.activate!(type = "svg")
end
# ╔═╡ 84f88e89-c55e-41ba-97ad-fd561458c7e9
N = 200
# ╔═╡ ecde5a72-691b-4a9a-b0a8-2b740e42a710
D = diamond(N)
# ╔═╡ 1cf94d6d-a0bc-474b-b479-5b4f4c916ea5
let
f = Figure()
ax = Axis(f[1, 1]; aspect = 1)
plot!(ax, D; domino_padding = 0)
lines!(ax, -N:N, parent(dr_path(D)); linewidth = 3, label = "DR-path", color = :orange)
axislegend(ax)
f
end
# ╔═╡ ab0968e2-43c7-4610-87ba-47433c003081
using CUDA
# ╔═╡ 8bb0983b-103e-4cf8-9a9f-95feb90df054
ka_diamond(2000, CuArray)
# ╔═╡ 00000000-0000-0000-0000-000000000001
PLUTO_PROJECT_TOML_CONTENTS = """
[deps]
AztecDiamonds = "8762d9c5-fcab-4007-8fd1-c6de73397726"
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"
CairoMakie = "13f3f980-e62b-5c42-98c6-ff1f3baf88f0"
[compat]
AztecDiamonds = "~0.2.0"
CUDA = "~5.4.3"
CairoMakie = "~0.12.11"
"""
# ╔═╡ 00000000-0000-0000-0000-000000000002
PLUTO_MANIFEST_TOML_CONTENTS = """
# This file is machine-generated - editing it directly is not advised
julia_version = "1.10.5"
manifest_format = "2.0"
project_hash = "a132932df0b10634a98f998db04e15bd0c26ad9e"
[[deps.AbstractFFTs]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "d92ad398961a3ed262d8bf04a1a2b8340f915fef"
uuid = "621f4979-c628-5d54-868e-fcf4e3e8185c"
version = "1.5.0"
weakdeps = ["ChainRulesCore", "Test"]
[deps.AbstractFFTs.extensions]
AbstractFFTsChainRulesCoreExt = "ChainRulesCore"
AbstractFFTsTestExt = "Test"
[[deps.AbstractTrees]]
git-tree-sha1 = "2d9c9a55f9c93e8887ad391fbae72f8ef55e1177"
uuid = "1520ce14-60c1-5f80-bbc7-55ef81b5835c"
version = "0.4.5"
[[deps.Accessors]]
deps = ["CompositionsBase", "ConstructionBase", "InverseFunctions", "LinearAlgebra", "MacroTools", "Markdown"]
git-tree-sha1 = "b392ede862e506d451fc1616e79aa6f4c673dab8"
uuid = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697"
version = "0.1.38"
[deps.Accessors.extensions]
AccessorsAxisKeysExt = "AxisKeys"
AccessorsDatesExt = "Dates"
AccessorsIntervalSetsExt = "IntervalSets"
AccessorsStaticArraysExt = "StaticArrays"
AccessorsStructArraysExt = "StructArrays"
AccessorsTestExt = "Test"
AccessorsUnitfulExt = "Unitful"
[deps.Accessors.weakdeps]
AxisKeys = "94b1ba4f-4ee9-5380-92f1-94cde586c3c5"
Dates = "ade2ca70-3891-5945-98fb-dc099432e06a"
IntervalSets = "8197267c-284f-5f27-9208-e0e47529a953"
Requires = "ae029012-a4dd-5104-9daa-d747884805df"
StaticArrays = "90137ffa-7385-5640-81b9-e52037218182"
StructArrays = "09ab397b-f2b6-538f-b94a-2f83cf4a842a"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
Unitful = "1986cc42-f94f-5a68-af5c-568840ba703d"
[[deps.Adapt]]
deps = ["LinearAlgebra", "Requires"]
git-tree-sha1 = "6a55b747d1812e699320963ffde36f1ebdda4099"
uuid = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
version = "4.0.4"
weakdeps = ["StaticArrays"]
[deps.Adapt.extensions]
AdaptStaticArraysExt = "StaticArrays"
[[deps.AdaptivePredicates]]
git-tree-sha1 = "7e651ea8d262d2d74ce75fdf47c4d63c07dba7a6"
uuid = "35492f91-a3bd-45ad-95db-fcad7dcfedb7"
version = "1.2.0"
[[deps.AliasTables]]
deps = ["PtrArrays", "Random"]
git-tree-sha1 = "9876e1e164b144ca45e9e3198d0b689cadfed9ff"
uuid = "66dad0bd-aa9a-41b7-9441-69ab47430ed8"
version = "1.1.3"
[[deps.Animations]]
deps = ["Colors"]
git-tree-sha1 = "e81c509d2c8e49592413bfb0bb3b08150056c79d"
uuid = "27a7e980-b3e6-11e9-2bcd-0b925532e340"
version = "0.4.1"
[[deps.ArgCheck]]
git-tree-sha1 = "a3a402a35a2f7e0b87828ccabbd5ebfbebe356b4"
uuid = "dce04be8-c92d-5529-be00-80e4d2c0e197"
version = "2.3.0"
[[deps.ArgTools]]
uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f"
version = "1.1.1"
[[deps.Artifacts]]
uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33"
[[deps.Atomix]]
deps = ["UnsafeAtomics"]
git-tree-sha1 = "c06a868224ecba914baa6942988e2f2aade419be"
uuid = "a9b6321e-bd34-4604-b9c9-b65b8de01458"
version = "0.1.0"
[[deps.Automa]]
deps = ["PrecompileTools", "TranscodingStreams"]
git-tree-sha1 = "014bc22d6c400a7703c0f5dc1fdc302440cf88be"
uuid = "67c07d97-cdcb-5c2c-af73-a7f9c32a568b"
version = "1.0.4"
[[deps.AxisAlgorithms]]
deps = ["LinearAlgebra", "Random", "SparseArrays", "WoodburyMatrices"]
git-tree-sha1 = "01b8ccb13d68535d73d2b0c23e39bd23155fb712"
uuid = "13072b0f-2c55-5437-9ae7-d433b7a33950"
version = "1.1.0"
[[deps.AxisArrays]]
deps = ["Dates", "IntervalSets", "IterTools", "RangeArrays"]
git-tree-sha1 = "16351be62963a67ac4083f748fdb3cca58bfd52f"
uuid = "39de3d68-74b9-583c-8d2d-e117c070f3a9"
version = "0.4.7"
[[deps.AztecDiamonds]]
deps = ["Adapt", "Colors", "GeometryBasics", "ImageIO", "ImageShow", "KernelAbstractions", "MakieCore", "OffsetArrays", "Transducers"]
path = "../../../home/simeon/Nextcloud/Documents/Research/AztecDiamonds"
uuid = "8762d9c5-fcab-4007-8fd1-c6de73397726"
version = "0.2.0"
[[deps.BFloat16s]]
deps = ["LinearAlgebra", "Printf", "Random", "Test"]
git-tree-sha1 = "2c7cc21e8678eff479978a0a2ef5ce2f51b63dff"
uuid = "ab4f0b2a-ad5b-11e8-123f-65d77653426b"
version = "0.5.0"
[[deps.BangBang]]
deps = ["Accessors", "ConstructionBase", "InitialValues", "LinearAlgebra", "Requires"]
git-tree-sha1 = "e2144b631226d9eeab2d746ca8880b7ccff504ae"
uuid = "198e06fe-97b7-11e9-32a5-e1d131e6ad66"
version = "0.4.3"
[deps.BangBang.extensions]
BangBangChainRulesCoreExt = "ChainRulesCore"
BangBangDataFramesExt = "DataFrames"
BangBangStaticArraysExt = "StaticArrays"
BangBangStructArraysExt = "StructArrays"
BangBangTablesExt = "Tables"
BangBangTypedTablesExt = "TypedTables"
[deps.BangBang.weakdeps]
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
StaticArrays = "90137ffa-7385-5640-81b9-e52037218182"
StructArrays = "09ab397b-f2b6-538f-b94a-2f83cf4a842a"
Tables = "bd369af6-aec1-5ad0-b16a-f7cc5008161c"
TypedTables = "9d95f2ec-7b3d-5a63-8d20-e2491e220bb9"
[[deps.Base64]]
uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f"
[[deps.Baselet]]
git-tree-sha1 = "aebf55e6d7795e02ca500a689d326ac979aaf89e"
uuid = "9718e550-a3fa-408a-8086-8db961cd8217"
version = "0.1.1"
[[deps.Bzip2_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "9e2a6b69137e6969bab0152632dcb3bc108c8bdd"
uuid = "6e34b625-4abd-537c-b88f-471c36dfa7a0"
version = "1.0.8+1"
[[deps.CEnum]]
git-tree-sha1 = "389ad5c84de1ae7cf0e28e381131c98ea87d54fc"
uuid = "fa961155-64e5-5f13-b03f-caf6b980ea82"
version = "0.5.0"
[[deps.CRC32c]]
uuid = "8bf52ea8-c179-5cab-976a-9e18b702a9bc"
[[deps.CRlibm_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "e329286945d0cfc04456972ea732551869af1cfc"
uuid = "4e9b3aee-d8a1-5a3d-ad8b-7d824db253f0"
version = "1.0.1+0"
[[deps.CUDA]]
deps = ["AbstractFFTs", "Adapt", "BFloat16s", "CEnum", "CUDA_Driver_jll", "CUDA_Runtime_Discovery", "CUDA_Runtime_jll", "Crayons", "DataFrames", "ExprTools", "GPUArrays", "GPUCompiler", "KernelAbstractions", "LLVM", "LLVMLoopInfo", "LazyArtifacts", "Libdl", "LinearAlgebra", "Logging", "NVTX", "Preferences", "PrettyTables", "Printf", "Random", "Random123", "RandomNumbers", "Reexport", "Requires", "SparseArrays", "StaticArrays", "Statistics"]
git-tree-sha1 = "fdd9dfb67dfefd548f51000cc400bb51003de247"
uuid = "052768ef-5323-5732-b1bb-66c8b64840ba"
version = "5.4.3"
[deps.CUDA.extensions]
ChainRulesCoreExt = "ChainRulesCore"
EnzymeCoreExt = "EnzymeCore"
SpecialFunctionsExt = "SpecialFunctions"
[deps.CUDA.weakdeps]
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
EnzymeCore = "f151be2c-9106-41f4-ab19-57ee4f262869"
SpecialFunctions = "276daf66-3868-5448-9aa4-cd146d93841b"
[[deps.CUDA_Driver_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "325058b426c2b421e3d2df3d5fa646d72d2e3e7e"
uuid = "4ee394cb-3365-5eb0-8335-949819d2adfc"
version = "0.9.2+0"
[[deps.CUDA_Runtime_Discovery]]
deps = ["Libdl"]
git-tree-sha1 = "33576c7c1b2500f8e7e6baa082e04563203b3a45"
uuid = "1af6417a-86b4-443c-805f-a4643ffb695f"
version = "0.3.5"
[[deps.CUDA_Runtime_jll]]
deps = ["Artifacts", "CUDA_Driver_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "TOML"]
git-tree-sha1 = "afea94249b821dc754a8ca6695d3daed851e1f5a"
uuid = "76a88914-d11a-5bdc-97e0-2f5a05c973a2"
version = "0.14.1+0"
[[deps.Cairo]]
deps = ["Cairo_jll", "Colors", "Glib_jll", "Graphics", "Libdl", "Pango_jll"]
git-tree-sha1 = "7b6ad8c35f4bc3bca8eb78127c8b99719506a5fb"
uuid = "159f3aea-2a34-519c-b102-8c37f9878175"
version = "1.1.0"
[[deps.CairoMakie]]
deps = ["CRC32c", "Cairo", "Cairo_jll", "Colors", "FileIO", "FreeType", "GeometryBasics", "LinearAlgebra", "Makie", "PrecompileTools"]
git-tree-sha1 = "4f827b38d3d9ffe6e3b01fbcf866c625fa259ca5"
uuid = "13f3f980-e62b-5c42-98c6-ff1f3baf88f0"
version = "0.12.11"
[[deps.Cairo_jll]]
deps = ["Artifacts", "Bzip2_jll", "CompilerSupportLibraries_jll", "Fontconfig_jll", "FreeType2_jll", "Glib_jll", "JLLWrappers", "LZO_jll", "Libdl", "Pixman_jll", "Xorg_libXext_jll", "Xorg_libXrender_jll", "Zlib_jll", "libpng_jll"]
git-tree-sha1 = "a2f1c8c668c8e3cb4cca4e57a8efdb09067bb3fd"
uuid = "83423d85-b0ee-5818-9007-b63ccbeb887a"
version = "1.18.0+2"
[[deps.ChainRulesCore]]
deps = ["Compat", "LinearAlgebra"]
git-tree-sha1 = "71acdbf594aab5bbb2cec89b208c41b4c411e49f"
uuid = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
version = "1.24.0"
weakdeps = ["SparseArrays"]
[deps.ChainRulesCore.extensions]
ChainRulesCoreSparseArraysExt = "SparseArrays"
[[deps.ColorBrewer]]
deps = ["Colors", "JSON", "Test"]
git-tree-sha1 = "61c5334f33d91e570e1d0c3eb5465835242582c4"
uuid = "a2cac450-b92f-5266-8821-25eda20663c8"
version = "0.4.0"
[[deps.ColorSchemes]]
deps = ["ColorTypes", "ColorVectorSpace", "Colors", "FixedPointNumbers", "PrecompileTools", "Random"]
git-tree-sha1 = "b5278586822443594ff615963b0c09755771b3e0"
uuid = "35d6a980-a343-548e-a6ea-1d62b119f2f4"
version = "3.26.0"
[[deps.ColorTypes]]
deps = ["FixedPointNumbers", "Random"]
git-tree-sha1 = "b10d0b65641d57b8b4d5e234446582de5047050d"
uuid = "3da002f7-5984-5a60-b8a6-cbb66c0b333f"
version = "0.11.5"
[[deps.ColorVectorSpace]]
deps = ["ColorTypes", "FixedPointNumbers", "LinearAlgebra", "Requires", "Statistics", "TensorCore"]
git-tree-sha1 = "a1f44953f2382ebb937d60dafbe2deea4bd23249"
uuid = "c3611d14-8923-5661-9e6a-0046d554d3a4"
version = "0.10.0"
weakdeps = ["SpecialFunctions"]
[deps.ColorVectorSpace.extensions]
SpecialFunctionsExt = "SpecialFunctions"
[[deps.Colors]]
deps = ["ColorTypes", "FixedPointNumbers", "Reexport"]
git-tree-sha1 = "362a287c3aa50601b0bc359053d5c2468f0e7ce0"
uuid = "5ae59095-9a9b-59fe-a467-6f913c188581"
version = "0.12.11"
[[deps.Compat]]
deps = ["TOML", "UUIDs"]
git-tree-sha1 = "8ae8d32e09f0dcf42a36b90d4e17f5dd2e4c4215"
uuid = "34da2185-b29b-5c13-b0c7-acf172513d20"
version = "4.16.0"
weakdeps = ["Dates", "LinearAlgebra"]
[deps.Compat.extensions]
CompatLinearAlgebraExt = "LinearAlgebra"
[[deps.CompilerSupportLibraries_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae"
version = "1.1.1+0"
[[deps.CompositionsBase]]
git-tree-sha1 = "802bb88cd69dfd1509f6670416bd4434015693ad"
uuid = "a33af91c-f02d-484b-be07-31d278c5ca2b"
version = "0.1.2"
weakdeps = ["InverseFunctions"]
[deps.CompositionsBase.extensions]
CompositionsBaseInverseFunctionsExt = "InverseFunctions"
[[deps.ConstructionBase]]
git-tree-sha1 = "76219f1ed5771adbb096743bff43fb5fdd4c1157"
uuid = "187b0558-2788-49d3-abe0-74a17ed4e7c9"
version = "1.5.8"
weakdeps = ["IntervalSets", "LinearAlgebra", "StaticArrays"]
[deps.ConstructionBase.extensions]
ConstructionBaseIntervalSetsExt = "IntervalSets"
ConstructionBaseLinearAlgebraExt = "LinearAlgebra"
ConstructionBaseStaticArraysExt = "StaticArrays"
[[deps.Contour]]
git-tree-sha1 = "439e35b0b36e2e5881738abc8857bd92ad6ff9a8"
uuid = "d38c429a-6771-53c6-b99e-75d170b6e991"
version = "0.6.3"
[[deps.Crayons]]
git-tree-sha1 = "249fe38abf76d48563e2f4556bebd215aa317e15"
uuid = "a8cc5b0e-0ffa-5ad4-8c14-923d3ee1735f"
version = "4.1.1"
[[deps.DataAPI]]
git-tree-sha1 = "abe83f3a2f1b857aac70ef8b269080af17764bbe"
uuid = "9a962f9c-6df0-11e9-0e5d-c546b8b5ee8a"
version = "1.16.0"
[[deps.DataFrames]]
deps = ["Compat", "DataAPI", "DataStructures", "Future", "InlineStrings", "InvertedIndices", "IteratorInterfaceExtensions", "LinearAlgebra", "Markdown", "Missings", "PooledArrays", "PrecompileTools", "PrettyTables", "Printf", "REPL", "Random", "Reexport", "SentinelArrays", "SortingAlgorithms", "Statistics", "TableTraits", "Tables", "Unicode"]
git-tree-sha1 = "04c738083f29f86e62c8afc341f0967d8717bdb8"
uuid = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
version = "1.6.1"
[[deps.DataStructures]]
deps = ["Compat", "InteractiveUtils", "OrderedCollections"]
git-tree-sha1 = "1d0a14036acb104d9e89698bd408f63ab58cdc82"
uuid = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8"
version = "0.18.20"
[[deps.DataValueInterfaces]]
git-tree-sha1 = "bfc1187b79289637fa0ef6d4436ebdfe6905cbd6"
uuid = "e2d170a0-9d28-54be-80f0-106bbe20a464"
version = "1.0.0"
[[deps.Dates]]
deps = ["Printf"]
uuid = "ade2ca70-3891-5945-98fb-dc099432e06a"
[[deps.DefineSingletons]]
git-tree-sha1 = "0fba8b706d0178b4dc7fd44a96a92382c9065c2c"
uuid = "244e2a9f-e319-4986-a169-4d1fe445cd52"
version = "0.1.2"
[[deps.DelaunayTriangulation]]
deps = ["AdaptivePredicates", "EnumX", "ExactPredicates", "Random"]
git-tree-sha1 = "94eb20e6621600f4315813b1d1fc9b8a5a6a34db"
uuid = "927a84f5-c5f4-47a5-9785-b46e178433df"
version = "1.4.0"
[[deps.Distributed]]
deps = ["Random", "Serialization", "Sockets"]
uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b"
[[deps.Distributions]]
deps = ["AliasTables", "FillArrays", "LinearAlgebra", "PDMats", "Printf", "QuadGK", "Random", "SpecialFunctions", "Statistics", "StatsAPI", "StatsBase", "StatsFuns"]
git-tree-sha1 = "e6c693a0e4394f8fda0e51a5bdf5aef26f8235e9"
uuid = "31c24e10-a181-5473-b8eb-7969acd0382f"
version = "0.25.111"
[deps.Distributions.extensions]
DistributionsChainRulesCoreExt = "ChainRulesCore"
DistributionsDensityInterfaceExt = "DensityInterface"
DistributionsTestExt = "Test"
[deps.Distributions.weakdeps]
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
DensityInterface = "b429d917-457f-4dbc-8f4c-0cc954292b1d"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
[[deps.DocStringExtensions]]
deps = ["LibGit2"]
git-tree-sha1 = "2fb1e02f2b635d0845df5d7c167fec4dd739b00d"
uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae"
version = "0.9.3"
[[deps.Downloads]]
deps = ["ArgTools", "FileWatching", "LibCURL", "NetworkOptions"]
uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6"
version = "1.6.0"
[[deps.EarCut_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "e3290f2d49e661fbd94046d7e3726ffcb2d41053"
uuid = "5ae413db-bbd1-5e63-b57d-d24a61df00f5"
version = "2.2.4+0"
[[deps.EnumX]]
git-tree-sha1 = "bdb1942cd4c45e3c678fd11569d5cccd80976237"
uuid = "4e289a0a-7415-4d19-859d-a7e5c4648b56"
version = "1.0.4"
[[deps.ExactPredicates]]
deps = ["IntervalArithmetic", "Random", "StaticArrays"]
git-tree-sha1 = "b3f2ff58735b5f024c392fde763f29b057e4b025"
uuid = "429591f6-91af-11e9-00e2-59fbe8cec110"
version = "2.2.8"
[[deps.Expat_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl"]
git-tree-sha1 = "1c6317308b9dc757616f0b5cb379db10494443a7"
uuid = "2e619515-83b5-522b-bb60-26c02a35a201"
version = "2.6.2+0"
[[deps.ExprTools]]
git-tree-sha1 = "27415f162e6028e81c72b82ef756bf321213b6ec"
uuid = "e2ba6199-217a-4e67-a87a-7c52f15ade04"
version = "0.1.10"
[[deps.Extents]]
git-tree-sha1 = "81023caa0021a41712685887db1fc03db26f41f5"
uuid = "411431e0-e8b7-467b-b5e0-f676ba4f2910"
version = "0.1.4"
[[deps.FFMPEG_jll]]
deps = ["Artifacts", "Bzip2_jll", "FreeType2_jll", "FriBidi_jll", "JLLWrappers", "LAME_jll", "Libdl", "Ogg_jll", "OpenSSL_jll", "Opus_jll", "PCRE2_jll", "Zlib_jll", "libaom_jll", "libass_jll", "libfdk_aac_jll", "libvorbis_jll", "x264_jll", "x265_jll"]
git-tree-sha1 = "8cc47f299902e13f90405ddb5bf87e5d474c0d38"
uuid = "b22a6f82-2f65-5046-a5b2-351ab43fb4e5"
version = "6.1.2+0"
[[deps.FFTW]]
deps = ["AbstractFFTs", "FFTW_jll", "LinearAlgebra", "MKL_jll", "Preferences", "Reexport"]
git-tree-sha1 = "4820348781ae578893311153d69049a93d05f39d"
uuid = "7a1cc6ca-52ef-59f5-83cd-3a7055c09341"
version = "1.8.0"
[[deps.FFTW_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "c6033cc3892d0ef5bb9cd29b7f2f0331ea5184ea"
uuid = "f5851436-0d7a-5f13-b9de-f02708fd171a"
version = "3.3.10+0"
[[deps.FileIO]]
deps = ["Pkg", "Requires", "UUIDs"]
git-tree-sha1 = "82d8afa92ecf4b52d78d869f038ebfb881267322"
uuid = "5789e2e9-d7fb-5bc7-8068-2c6fae9b9549"
version = "1.16.3"
[[deps.FilePaths]]
deps = ["FilePathsBase", "MacroTools", "Reexport", "Requires"]
git-tree-sha1 = "919d9412dbf53a2e6fe74af62a73ceed0bce0629"
uuid = "8fc22ac5-c921-52a6-82fd-178b2807b824"
version = "0.8.3"
[[deps.FilePathsBase]]
deps = ["Compat", "Dates"]
git-tree-sha1 = "7878ff7172a8e6beedd1dea14bd27c3c6340d361"
uuid = "48062228-2e41-5def-b9a4-89aafe57970f"
version = "0.9.22"
weakdeps = ["Mmap", "Test"]
[deps.FilePathsBase.extensions]
FilePathsBaseMmapExt = "Mmap"
FilePathsBaseTestExt = "Test"
[[deps.FileWatching]]
uuid = "7b1f6079-737a-58dc-b8bc-7a2ca5c1b5ee"
[[deps.FillArrays]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "6a70198746448456524cb442b8af316927ff3e1a"
uuid = "1a297f60-69ca-5386-bcde-b61e274b549b"
version = "1.13.0"
weakdeps = ["PDMats", "SparseArrays", "Statistics"]
[deps.FillArrays.extensions]
FillArraysPDMatsExt = "PDMats"
FillArraysSparseArraysExt = "SparseArrays"
FillArraysStatisticsExt = "Statistics"
[[deps.FixedPointNumbers]]
deps = ["Statistics"]
git-tree-sha1 = "05882d6995ae5c12bb5f36dd2ed3f61c98cbb172"
uuid = "53c48c17-4a7d-5ca2-90c5-79b7896eea93"
version = "0.8.5"
[[deps.Fontconfig_jll]]
deps = ["Artifacts", "Bzip2_jll", "Expat_jll", "FreeType2_jll", "JLLWrappers", "Libdl", "Libuuid_jll", "Zlib_jll"]
git-tree-sha1 = "db16beca600632c95fc8aca29890d83788dd8b23"
uuid = "a3f928ae-7b40-5064-980b-68af3947d34b"
version = "2.13.96+0"
[[deps.Format]]
git-tree-sha1 = "9c68794ef81b08086aeb32eeaf33531668d5f5fc"
uuid = "1fa38f19-a742-5d3f-a2b9-30dd87b9d5f8"
version = "1.3.7"
[[deps.FreeType]]
deps = ["CEnum", "FreeType2_jll"]
git-tree-sha1 = "907369da0f8e80728ab49c1c7e09327bf0d6d999"
uuid = "b38be410-82b0-50bf-ab77-7b57e271db43"
version = "4.1.1"
[[deps.FreeType2_jll]]
deps = ["Artifacts", "Bzip2_jll", "JLLWrappers", "Libdl", "Zlib_jll"]
git-tree-sha1 = "5c1d8ae0efc6c2e7b1fc502cbe25def8f661b7bc"
uuid = "d7e528f0-a631-5988-bf34-fe36492bcfd7"
version = "2.13.2+0"
[[deps.FreeTypeAbstraction]]
deps = ["ColorVectorSpace", "Colors", "FreeType", "GeometryBasics"]
git-tree-sha1 = "2493cdfd0740015955a8e46de4ef28f49460d8bc"
uuid = "663a7486-cb36-511b-a19d-713bb74d65c9"
version = "0.10.3"
[[deps.FriBidi_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl"]
git-tree-sha1 = "1ed150b39aebcc805c26b93a8d0122c940f64ce2"
uuid = "559328eb-81f9-559d-9380-de523a88c83c"
version = "1.0.14+0"
[[deps.Future]]
deps = ["Random"]
uuid = "9fa8497b-333b-5362-9e8d-4d0656e87820"
[[deps.GPUArrays]]
deps = ["Adapt", "GPUArraysCore", "LLVM", "LinearAlgebra", "Printf", "Random", "Reexport", "Serialization", "Statistics"]
git-tree-sha1 = "62ee71528cca49be797076a76bdc654a170a523e"
uuid = "0c68f7d7-f131-5f86-a1c3-88cf8149b2d7"
version = "10.3.1"
[[deps.GPUArraysCore]]
deps = ["Adapt"]
git-tree-sha1 = "ec632f177c0d990e64d955ccc1b8c04c485a0950"
uuid = "46192b85-c4d5-4398-a991-12ede77f4527"
version = "0.1.6"
[[deps.GPUCompiler]]
deps = ["ExprTools", "InteractiveUtils", "LLVM", "Libdl", "Logging", "Preferences", "Scratch", "Serialization", "TOML", "TimerOutputs", "UUIDs"]
git-tree-sha1 = "ab29216184312f99ff957b32cd63c2fe9c928b91"
uuid = "61eb1bfa-7361-4325-ad38-22787b887f55"
version = "0.26.7"
[[deps.GeoFormatTypes]]
git-tree-sha1 = "59107c179a586f0fe667024c5eb7033e81333271"
uuid = "68eda718-8dee-11e9-39e7-89f7f65f511f"
version = "0.4.2"
[[deps.GeoInterface]]
deps = ["Extents", "GeoFormatTypes"]
git-tree-sha1 = "5921fc0704e40c024571eca551800c699f86ceb4"
uuid = "cf35fbd7-0cd7-5166-be24-54bfbe79505f"
version = "1.3.6"
[[deps.GeometryBasics]]
deps = ["EarCut_jll", "Extents", "GeoInterface", "IterTools", "LinearAlgebra", "StaticArrays", "StructArrays", "Tables"]
git-tree-sha1 = "b62f2b2d76cee0d61a2ef2b3118cd2a3215d3134"
uuid = "5c1252a2-5f33-56bf-86c9-59e7332b4326"
version = "0.4.11"
[[deps.Gettext_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Libiconv_jll", "Pkg", "XML2_jll"]
git-tree-sha1 = "9b02998aba7bf074d14de89f9d37ca24a1a0b046"
uuid = "78b55507-aeef-58d4-861c-77aaff3498b1"
version = "0.21.0+0"
[[deps.Glib_jll]]
deps = ["Artifacts", "Gettext_jll", "JLLWrappers", "Libdl", "Libffi_jll", "Libiconv_jll", "Libmount_jll", "PCRE2_jll", "Zlib_jll"]
git-tree-sha1 = "7c82e6a6cd34e9d935e9aa4051b66c6ff3af59ba"
uuid = "7746bdde-850d-59dc-9ae8-88ece973131d"
version = "2.80.2+0"
[[deps.Graphics]]
deps = ["Colors", "LinearAlgebra", "NaNMath"]
git-tree-sha1 = "d61890399bc535850c4bf08e4e0d3a7ad0f21cbd"
uuid = "a2bd30eb-e257-5431-a919-1863eab51364"
version = "1.1.2"
[[deps.Graphite2_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "344bf40dcab1073aca04aa0df4fb092f920e4011"
uuid = "3b182d85-2403-5c21-9c21-1e1f0cc25472"
version = "1.3.14+0"
[[deps.GridLayoutBase]]
deps = ["GeometryBasics", "InteractiveUtils", "Observables"]
git-tree-sha1 = "fc713f007cff99ff9e50accba6373624ddd33588"
uuid = "3955a311-db13-416c-9275-1d80ed98e5e9"
version = "0.11.0"
[[deps.Grisu]]
git-tree-sha1 = "53bb909d1151e57e2484c3d1b53e19552b887fb2"
uuid = "42e2da0e-8278-4e71-bc24-59509adca0fe"
version = "1.0.2"
[[deps.HarfBuzz_jll]]
deps = ["Artifacts", "Cairo_jll", "Fontconfig_jll", "FreeType2_jll", "Glib_jll", "Graphite2_jll", "JLLWrappers", "Libdl", "Libffi_jll"]
git-tree-sha1 = "401e4f3f30f43af2c8478fc008da50096ea5240f"
uuid = "2e76f6c2-a576-52d4-95c1-20adfe4de566"
version = "8.3.1+0"
[[deps.HypergeometricFunctions]]
deps = ["LinearAlgebra", "OpenLibm_jll", "SpecialFunctions"]
git-tree-sha1 = "7c4195be1649ae622304031ed46a2f4df989f1eb"
uuid = "34004b35-14d8-5ef3-9330-4cdb6864b03a"
version = "0.3.24"
[[deps.ImageAxes]]
deps = ["AxisArrays", "ImageBase", "ImageCore", "Reexport", "SimpleTraits"]
git-tree-sha1 = "2e4520d67b0cef90865b3ef727594d2a58e0e1f8"
uuid = "2803e5a7-5153-5ecf-9a86-9b4c37f5f5ac"
version = "0.6.11"
[[deps.ImageBase]]
deps = ["ImageCore", "Reexport"]
git-tree-sha1 = "eb49b82c172811fd2c86759fa0553a2221feb909"
uuid = "c817782e-172a-44cc-b673-b171935fbb9e"
version = "0.1.7"
[[deps.ImageCore]]
deps = ["ColorVectorSpace", "Colors", "FixedPointNumbers", "MappedArrays", "MosaicViews", "OffsetArrays", "PaddedViews", "PrecompileTools", "Reexport"]
git-tree-sha1 = "b2a7eaa169c13f5bcae8131a83bc30eff8f71be0"
uuid = "a09fc81d-aa75-5fe9-8630-4744c3626534"
version = "0.10.2"
[[deps.ImageIO]]
deps = ["FileIO", "IndirectArrays", "JpegTurbo", "LazyModules", "Netpbm", "OpenEXR", "PNGFiles", "QOI", "Sixel", "TiffImages", "UUIDs"]
git-tree-sha1 = "437abb322a41d527c197fa800455f79d414f0a3c"
uuid = "82e4d734-157c-48bb-816b-45c225c6df19"
version = "0.6.8"
[[deps.ImageMetadata]]
deps = ["AxisArrays", "ImageAxes", "ImageBase", "ImageCore"]
git-tree-sha1 = "355e2b974f2e3212a75dfb60519de21361ad3cb7"
uuid = "bc367c6b-8a6b-528e-b4bd-a4b897500b49"
version = "0.9.9"
[[deps.ImageShow]]
deps = ["Base64", "ColorSchemes", "FileIO", "ImageBase", "ImageCore", "OffsetArrays", "StackViews"]
git-tree-sha1 = "3b5344bcdbdc11ad58f3b1956709b5b9345355de"
uuid = "4e3cecfd-b093-5904-9786-8bbb286a6a31"
version = "0.3.8"
[[deps.Imath_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl"]
git-tree-sha1 = "0936ba688c6d201805a83da835b55c61a180db52"
uuid = "905a6f67-0a94-5f89-b386-d35d92009cd1"
version = "3.1.11+0"
[[deps.IndirectArrays]]
git-tree-sha1 = "012e604e1c7458645cb8b436f8fba789a51b257f"
uuid = "9b13fd28-a010-5f03-acff-a1bbcff69959"
version = "1.0.0"
[[deps.Inflate]]
git-tree-sha1 = "d1b1b796e47d94588b3757fe84fbf65a5ec4a80d"
uuid = "d25df0c9-e2be-5dd7-82c8-3ad0b3e990b9"
version = "0.1.5"
[[deps.InitialValues]]
git-tree-sha1 = "4da0f88e9a39111c2fa3add390ab15f3a44f3ca3"
uuid = "22cec73e-a1b8-11e9-2c92-598750a2cf9c"
version = "0.3.1"
[[deps.InlineStrings]]
git-tree-sha1 = "45521d31238e87ee9f9732561bfee12d4eebd52d"
uuid = "842dd82b-1e85-43dc-bf29-5d0ee9dffc48"
version = "1.4.2"
[deps.InlineStrings.extensions]
ArrowTypesExt = "ArrowTypes"
ParsersExt = "Parsers"
[deps.InlineStrings.weakdeps]
ArrowTypes = "31f734f8-188a-4ce0-8406-c8a06bd891cd"
Parsers = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0"
[[deps.IntelOpenMP_jll]]
deps = ["Artifacts", "JLLWrappers", "LazyArtifacts", "Libdl"]
git-tree-sha1 = "10bd689145d2c3b2a9844005d01087cc1194e79e"
uuid = "1d5cc7b8-4909-519e-a0f8-d0f5ad9712d0"
version = "2024.2.1+0"
[[deps.InteractiveUtils]]
deps = ["Markdown"]
uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240"
[[deps.Interpolations]]
deps = ["Adapt", "AxisAlgorithms", "ChainRulesCore", "LinearAlgebra", "OffsetArrays", "Random", "Ratios", "Requires", "SharedArrays", "SparseArrays", "StaticArrays", "WoodburyMatrices"]
git-tree-sha1 = "88a101217d7cb38a7b481ccd50d21876e1d1b0e0"
uuid = "a98d9a8b-a2ab-59e6-89dd-64a1c18fca59"
version = "0.15.1"
weakdeps = ["Unitful"]
[deps.Interpolations.extensions]
InterpolationsUnitfulExt = "Unitful"
[[deps.IntervalArithmetic]]
deps = ["CRlibm_jll", "MacroTools", "RoundingEmulator"]
git-tree-sha1 = "fe30dec78e68f27fc416901629c6e24e9d5f057b"
uuid = "d1acc4aa-44c8-5952-acd4-ba5d80a2a253"
version = "0.22.16"
[deps.IntervalArithmetic.extensions]
IntervalArithmeticDiffRulesExt = "DiffRules"
IntervalArithmeticForwardDiffExt = "ForwardDiff"
IntervalArithmeticIntervalSetsExt = "IntervalSets"
IntervalArithmeticLinearAlgebraExt = "LinearAlgebra"
IntervalArithmeticRecipesBaseExt = "RecipesBase"
[deps.IntervalArithmetic.weakdeps]
DiffRules = "b552c78f-8df3-52c6-915a-8e097449b14b"
ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210"
IntervalSets = "8197267c-284f-5f27-9208-e0e47529a953"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
RecipesBase = "3cdcf5f2-1ef4-517c-9805-6587b60abb01"
[[deps.IntervalSets]]
git-tree-sha1 = "dba9ddf07f77f60450fe5d2e2beb9854d9a49bd0"
uuid = "8197267c-284f-5f27-9208-e0e47529a953"
version = "0.7.10"
[deps.IntervalSets.extensions]
IntervalSetsRandomExt = "Random"
IntervalSetsRecipesBaseExt = "RecipesBase"
IntervalSetsStatisticsExt = "Statistics"
[deps.IntervalSets.weakdeps]
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
RecipesBase = "3cdcf5f2-1ef4-517c-9805-6587b60abb01"
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
[[deps.InverseFunctions]]
git-tree-sha1 = "2787db24f4e03daf859c6509ff87764e4182f7d1"
uuid = "3587e190-3f89-42d0-90ee-14403ec27112"
version = "0.1.16"
weakdeps = ["Dates", "Test"]
[deps.InverseFunctions.extensions]
InverseFunctionsDatesExt = "Dates"
InverseFunctionsTestExt = "Test"
[[deps.InvertedIndices]]
git-tree-sha1 = "0dc7b50b8d436461be01300fd8cd45aa0274b038"
uuid = "41ab1584-1d38-5bbf-9106-f11c6c58b48f"
version = "1.3.0"
[[deps.IrrationalConstants]]
git-tree-sha1 = "630b497eafcc20001bba38a4651b327dcfc491d2"
uuid = "92d709cd-6900-40b7-9082-c6be49f344b6"
version = "0.2.2"
[[deps.Isoband]]
deps = ["isoband_jll"]
git-tree-sha1 = "f9b6d97355599074dc867318950adaa6f9946137"
uuid = "f1662d9f-8043-43de-a69a-05efc1cc6ff4"
version = "0.1.1"
[[deps.IterTools]]
git-tree-sha1 = "42d5f897009e7ff2cf88db414a389e5ed1bdd023"
uuid = "c8e1da08-722c-5040-9ed9-7db0dc04731e"
version = "1.10.0"
[[deps.IteratorInterfaceExtensions]]
git-tree-sha1 = "a3f24677c21f5bbe9d2a714f95dcd58337fb2856"
uuid = "82899510-4779-5014-852e-03e436cf321d"
version = "1.0.0"
[[deps.JLLWrappers]]
deps = ["Artifacts", "Preferences"]
git-tree-sha1 = "f389674c99bfcde17dc57454011aa44d5a260a40"
uuid = "692b3bcd-3c85-4b1f-b108-f13ce0eb3210"
version = "1.6.0"
[[deps.JSON]]
deps = ["Dates", "Mmap", "Parsers", "Unicode"]
git-tree-sha1 = "31e996f0a15c7b280ba9f76636b3ff9e2ae58c9a"
uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6"
version = "0.21.4"
[[deps.JpegTurbo]]
deps = ["CEnum", "FileIO", "ImageCore", "JpegTurbo_jll", "TOML"]
git-tree-sha1 = "fa6d0bcff8583bac20f1ffa708c3913ca605c611"
uuid = "b835a17e-a41a-41e7-81f0-2f016b05efe0"
version = "0.1.5"
[[deps.JpegTurbo_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl"]
git-tree-sha1 = "c84a835e1a09b289ffcd2271bf2a337bbdda6637"
uuid = "aacddb02-875f-59d6-b918-886e6ef4fbf8"
version = "3.0.3+0"
[[deps.JuliaNVTXCallbacks_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "af433a10f3942e882d3c671aacb203e006a5808f"
uuid = "9c1d0b0a-7046-5b2e-a33f-ea22f176ac7e"
version = "0.2.1+0"
[[deps.KernelAbstractions]]
deps = ["Adapt", "Atomix", "InteractiveUtils", "MacroTools", "PrecompileTools", "Requires", "StaticArrays", "UUIDs", "UnsafeAtomics", "UnsafeAtomicsLLVM"]
git-tree-sha1 = "cb1cff88ef2f3a157cbad75bbe6b229e1975e498"
uuid = "63c18a36-062a-441e-b654-da1e3ab1ce7c"
version = "0.9.25"
[deps.KernelAbstractions.extensions]
EnzymeExt = "EnzymeCore"
LinearAlgebraExt = "LinearAlgebra"
SparseArraysExt = "SparseArrays"
[deps.KernelAbstractions.weakdeps]
EnzymeCore = "f151be2c-9106-41f4-ab19-57ee4f262869"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
[[deps.KernelDensity]]
deps = ["Distributions", "DocStringExtensions", "FFTW", "Interpolations", "StatsBase"]
git-tree-sha1 = "7d703202e65efa1369de1279c162b915e245eed1"
uuid = "5ab0869b-81aa-558d-bb23-cbf5423bbe9b"
version = "0.6.9"
[[deps.LAME_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl"]
git-tree-sha1 = "170b660facf5df5de098d866564877e119141cbd"
uuid = "c1c5ebd0-6772-5130-a774-d5fcae4a789d"
version = "3.100.2+0"
[[deps.LLVM]]
deps = ["CEnum", "LLVMExtra_jll", "Libdl", "Preferences", "Printf", "Requires", "Unicode"]
git-tree-sha1 = "2470e69781ddd70b8878491233cd09bc1bd7fc96"
uuid = "929cbde3-209d-540e-8aea-75f648917ca0"
version = "8.1.0"
weakdeps = ["BFloat16s"]
[deps.LLVM.extensions]
BFloat16sExt = "BFloat16s"
[[deps.LLVMExtra_jll]]
deps = ["Artifacts", "JLLWrappers", "LazyArtifacts", "Libdl", "TOML"]
git-tree-sha1 = "597d1c758c9ae5d985ba4202386a607c675ee700"
uuid = "dad2f222-ce93-54a1-a47d-0025e8a3acab"
version = "0.0.31+0"
[[deps.LLVMLoopInfo]]
git-tree-sha1 = "2e5c102cfc41f48ae4740c7eca7743cc7e7b75ea"
uuid = "8b046642-f1f6-4319-8d3c-209ddc03c586"
version = "1.0.0"
[[deps.LLVMOpenMP_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl"]
git-tree-sha1 = "78211fb6cbc872f77cad3fc0b6cf647d923f4929"
uuid = "1d63c593-3942-5779-bab2-d838dc0a180e"
version = "18.1.7+0"
[[deps.LZO_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl"]
git-tree-sha1 = "70c5da094887fd2cae843b8db33920bac4b6f07d"
uuid = "dd4b983a-f0e5-5f8d-a1b7-129d4a5fb1ac"
version = "2.10.2+0"
[[deps.LaTeXStrings]]
git-tree-sha1 = "50901ebc375ed41dbf8058da26f9de442febbbec"
uuid = "b964fa9f-0449-5b57-a5c2-d3ea65f4040f"
version = "1.3.1"
[[deps.LazyArtifacts]]
deps = ["Artifacts", "Pkg"]
uuid = "4af54fe1-eca0-43a8-85a7-787d91b784e3"
[[deps.LazyModules]]
git-tree-sha1 = "a560dd966b386ac9ae60bdd3a3d3a326062d3c3e"
uuid = "8cdb02fc-e678-4876-92c5-9defec4f444e"
version = "0.3.1"
[[deps.LibCURL]]
deps = ["LibCURL_jll", "MozillaCACerts_jll"]
uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21"
version = "0.6.4"
[[deps.LibCURL_jll]]
deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"]
uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0"
version = "8.4.0+0"
[[deps.LibGit2]]
deps = ["Base64", "LibGit2_jll", "NetworkOptions", "Printf", "SHA"]
uuid = "76f85450-5226-5b5a-8eaa-529ad045b433"
[[deps.LibGit2_jll]]
deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll"]
uuid = "e37daf67-58a4-590a-8e99-b0245dd2ffc5"
version = "1.6.4+0"
[[deps.LibSSH2_jll]]
deps = ["Artifacts", "Libdl", "MbedTLS_jll"]
uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8"
version = "1.11.0+1"
[[deps.Libdl]]
uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb"
[[deps.Libffi_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "0b4a5d71f3e5200a7dff793393e09dfc2d874290"
uuid = "e9f186c6-92d2-5b65-8a66-fee21dc1b490"
version = "3.2.2+1"
[[deps.Libgcrypt_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Libgpg_error_jll"]
git-tree-sha1 = "9fd170c4bbfd8b935fdc5f8b7aa33532c991a673"
uuid = "d4300ac3-e22c-5743-9152-c294e39db1e4"
version = "1.8.11+0"
[[deps.Libgpg_error_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl"]
git-tree-sha1 = "fbb1f2bef882392312feb1ede3615ddc1e9b99ed"
uuid = "7add5ba3-2f88-524e-9cd5-f83b8a55f7b8"
version = "1.49.0+0"
[[deps.Libiconv_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl"]
git-tree-sha1 = "f9557a255370125b405568f9767d6d195822a175"
uuid = "94ce4f54-9a6c-5748-9c1c-f9c7231a4531"
version = "1.17.0+0"
[[deps.Libmount_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl"]
git-tree-sha1 = "0c4f9c4f1a50d8f35048fa0532dabbadf702f81e"
uuid = "4b2f31a3-9ecc-558c-b454-b3730dcb73e9"
version = "2.40.1+0"
[[deps.Libuuid_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl"]
git-tree-sha1 = "5ee6203157c120d79034c748a2acba45b82b8807"
uuid = "38a345b3-de98-5d2b-a5d3-14cd9215e700"
version = "2.40.1+0"
[[deps.LinearAlgebra]]
deps = ["Libdl", "OpenBLAS_jll", "libblastrampoline_jll"]
uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
[[deps.LogExpFunctions]]
deps = ["DocStringExtensions", "IrrationalConstants", "LinearAlgebra"]
git-tree-sha1 = "a2d09619db4e765091ee5c6ffe8872849de0feea"
uuid = "2ab3a3ac-af41-5b50-aa03-7779005ae688"
version = "0.3.28"
[deps.LogExpFunctions.extensions]
LogExpFunctionsChainRulesCoreExt = "ChainRulesCore"
LogExpFunctionsChangesOfVariablesExt = "ChangesOfVariables"
LogExpFunctionsInverseFunctionsExt = "InverseFunctions"
[deps.LogExpFunctions.weakdeps]
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
ChangesOfVariables = "9e997f8a-9a97-42d5-a9f1-ce6bfc15e2c0"
InverseFunctions = "3587e190-3f89-42d0-90ee-14403ec27112"
[[deps.Logging]]
uuid = "56ddb016-857b-54e1-b83d-db4d58db5568"
[[deps.MKL_jll]]
deps = ["Artifacts", "IntelOpenMP_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "oneTBB_jll"]
git-tree-sha1 = "f046ccd0c6db2832a9f639e2c669c6fe867e5f4f"
uuid = "856f044c-d86e-5d09-b602-aeab76dc8ba7"
version = "2024.2.0+0"
[[deps.MacroTools]]
deps = ["Markdown", "Random"]
git-tree-sha1 = "2fa9ee3e63fd3a4f7a9a4f4744a52f4856de82df"
uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09"
version = "0.5.13"
[[deps.Makie]]
deps = ["Animations", "Base64", "CRC32c", "ColorBrewer", "ColorSchemes", "ColorTypes", "Colors", "Contour", "Dates", "DelaunayTriangulation", "Distributions", "DocStringExtensions", "Downloads", "FFMPEG_jll", "FileIO", "FilePaths", "FixedPointNumbers", "Format", "FreeType", "FreeTypeAbstraction", "GeometryBasics", "GridLayoutBase", "ImageBase", "ImageIO", "InteractiveUtils", "Interpolations", "IntervalSets", "Isoband", "KernelDensity", "LaTeXStrings", "LinearAlgebra", "MacroTools", "MakieCore", "Markdown", "MathTeXEngine", "Observables", "OffsetArrays", "Packing", "PlotUtils", "PolygonOps", "PrecompileTools", "Printf", "REPL", "Random", "RelocatableFolders", "Scratch", "ShaderAbstractions", "Showoff", "SignedDistanceFields", "SparseArrays", "Statistics", "StatsBase", "StatsFuns", "StructArrays", "TriplotBase", "UnicodeFun", "Unitful"]
git-tree-sha1 = "2281aaf0685e5e8a559982d32f17d617a949b9cd"
uuid = "ee78f7c6-11fb-53f2-987a-cfe4a2b5a57a"
version = "0.21.11"
[[deps.MakieCore]]
deps = ["ColorTypes", "GeometryBasics", "IntervalSets", "Observables"]
git-tree-sha1 = "22fed09860ca73537a36d4e5a9bce0d9e80ee8a8"
uuid = "20f20a25-4f0e-4fdf-b5d1-57303727442b"
version = "0.8.8"
[[deps.MappedArrays]]
git-tree-sha1 = "2dab0221fe2b0f2cb6754eaa743cc266339f527e"
uuid = "dbb5928d-eab1-5f90-85c2-b9b0edb7c900"
version = "0.4.2"
[[deps.Markdown]]
deps = ["Base64"]
uuid = "d6f4376e-aef5-505a-96c1-9c027394607a"
[[deps.MathTeXEngine]]
deps = ["AbstractTrees", "Automa", "DataStructures", "FreeTypeAbstraction", "GeometryBasics", "LaTeXStrings", "REPL", "RelocatableFolders", "UnicodeFun"]
git-tree-sha1 = "e1641f32ae592e415e3dbae7f4a188b5316d4b62"
uuid = "0a4f8689-d25c-4efe-a92b-7142dfc1aa53"
version = "0.6.1"
[[deps.MbedTLS_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1"
version = "2.28.2+1"
[[deps.MicroCollections]]
deps = ["Accessors", "BangBang", "InitialValues"]
git-tree-sha1 = "44d32db644e84c75dab479f1bc15ee76a1a3618f"
uuid = "128add7d-3638-4c79-886c-908ea0c25c34"
version = "0.2.0"
[[deps.Missings]]
deps = ["DataAPI"]
git-tree-sha1 = "ec4f7fbeab05d7747bdf98eb74d130a2a2ed298d"
uuid = "e1d29d7a-bbdc-5cf2-9ac0-f12de2c33e28"
version = "1.2.0"
[[deps.Mmap]]
uuid = "a63ad114-7e13-5084-954f-fe012c677804"
[[deps.MosaicViews]]
deps = ["MappedArrays", "OffsetArrays", "PaddedViews", "StackViews"]
git-tree-sha1 = "7b86a5d4d70a9f5cdf2dacb3cbe6d251d1a61dbe"
uuid = "e94cdb99-869f-56ef-bcf0-1ae2bcbe0389"
version = "0.3.4"
[[deps.MozillaCACerts_jll]]
uuid = "14a3606d-f60d-562e-9121-12d972cd8159"
version = "2023.1.10"
[[deps.NVTX]]
deps = ["Colors", "JuliaNVTXCallbacks_jll", "Libdl", "NVTX_jll"]
git-tree-sha1 = "53046f0483375e3ed78e49190f1154fa0a4083a1"
uuid = "5da4648a-3479-48b8-97b9-01cb529c0a1f"
version = "0.3.4"
[[deps.NVTX_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "ce3269ed42816bf18d500c9f63418d4b0d9f5a3b"
uuid = "e98f9f5b-d649-5603-91fd-7774390e6439"
version = "3.1.0+2"
[[deps.NaNMath]]
deps = ["OpenLibm_jll"]
git-tree-sha1 = "0877504529a3e5c3343c6f8b4c0381e57e4387e4"
uuid = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3"
version = "1.0.2"
[[deps.Netpbm]]
deps = ["FileIO", "ImageCore", "ImageMetadata"]
git-tree-sha1 = "d92b107dbb887293622df7697a2223f9f8176fcd"
uuid = "f09324ee-3d7c-5217-9330-fc30815ba969"
version = "1.1.1"
[[deps.NetworkOptions]]
uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908"
version = "1.2.0"
[[deps.Observables]]
git-tree-sha1 = "7438a59546cf62428fc9d1bc94729146d37a7225"
uuid = "510215fc-4207-5dde-b226-833fc4488ee2"
version = "0.5.5"
[[deps.OffsetArrays]]
git-tree-sha1 = "1a27764e945a152f7ca7efa04de513d473e9542e"
uuid = "6fe1bfb0-de20-5000-8ca7-80f57d26f881"
version = "1.14.1"
weakdeps = ["Adapt"]
[deps.OffsetArrays.extensions]
OffsetArraysAdaptExt = "Adapt"
[[deps.Ogg_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "887579a3eb005446d514ab7aeac5d1d027658b8f"
uuid = "e7412a2a-1a6e-54c0-be00-318e2571c051"
version = "1.3.5+1"
[[deps.OpenBLAS_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "Libdl"]
uuid = "4536629a-c528-5b80-bd46-f80d51c5b363"
version = "0.3.23+4"
[[deps.OpenEXR]]
deps = ["Colors", "FileIO", "OpenEXR_jll"]
git-tree-sha1 = "327f53360fdb54df7ecd01e96ef1983536d1e633"
uuid = "52e1d378-f018-4a11-a4be-720524705ac7"
version = "0.3.2"
[[deps.OpenEXR_jll]]
deps = ["Artifacts", "Imath_jll", "JLLWrappers", "Libdl", "Zlib_jll"]
git-tree-sha1 = "8292dd5c8a38257111ada2174000a33745b06d4e"
uuid = "18a262bb-aa17-5467-a713-aee519bc75cb"
version = "3.2.4+0"
[[deps.OpenLibm_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "05823500-19ac-5b8b-9628-191a04bc5112"
version = "0.8.1+2"
[[deps.OpenSSL_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl"]
git-tree-sha1 = "1b35263570443fdd9e76c76b7062116e2f374ab8"
uuid = "458c3c95-2e84-50aa-8efc-19380b2a3a95"
version = "3.0.15+0"
[[deps.OpenSpecFun_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "13652491f6856acfd2db29360e1bbcd4565d04f1"
uuid = "efe28fd5-8261-553b-a9e1-b2916fc3738e"
version = "0.5.5+0"
[[deps.Opus_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl"]
git-tree-sha1 = "6703a85cb3781bd5909d48730a67205f3f31a575"
uuid = "91d4177d-7536-5919-b921-800302f37372"
version = "1.3.3+0"
[[deps.OrderedCollections]]
git-tree-sha1 = "dfdf5519f235516220579f949664f1bf44e741c5"
uuid = "bac558e1-5e72-5ebc-8fee-abe8a469f55d"
version = "1.6.3"
[[deps.PCRE2_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "efcefdf7-47ab-520b-bdef-62a2eaa19f15"
version = "10.42.0+1"
[[deps.PDMats]]
deps = ["LinearAlgebra", "SparseArrays", "SuiteSparse"]
git-tree-sha1 = "949347156c25054de2db3b166c52ac4728cbad65"
uuid = "90014a1f-27ba-587c-ab20-58faa44d9150"
version = "0.11.31"
[[deps.PNGFiles]]
deps = ["Base64", "CEnum", "ImageCore", "IndirectArrays", "OffsetArrays", "libpng_jll"]
git-tree-sha1 = "67186a2bc9a90f9f85ff3cc8277868961fb57cbd"
uuid = "f57f5aa1-a3ce-4bc8-8ab9-96f992907883"
version = "0.4.3"
[[deps.Packing]]
deps = ["GeometryBasics"]
git-tree-sha1 = "ec3edfe723df33528e085e632414499f26650501"
uuid = "19eb6ba3-879d-56ad-ad62-d5c202156566"
version = "0.5.0"
[[deps.PaddedViews]]
deps = ["OffsetArrays"]
git-tree-sha1 = "0fac6313486baae819364c52b4f483450a9d793f"
uuid = "5432bcbf-9aad-5242-b902-cca2824c8663"
version = "0.5.12"
[[deps.Pango_jll]]
deps = ["Artifacts", "Cairo_jll", "Fontconfig_jll", "FreeType2_jll", "FriBidi_jll", "Glib_jll", "HarfBuzz_jll", "JLLWrappers", "Libdl"]
git-tree-sha1 = "e127b609fb9ecba6f201ba7ab753d5a605d53801"
uuid = "36c8627f-9965-5494-a995-c6b170f724f3"
version = "1.54.1+0"
[[deps.Parsers]]
deps = ["Dates", "PrecompileTools", "UUIDs"]
git-tree-sha1 = "8489905bcdbcfac64d1daa51ca07c0d8f0283821"
uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0"
version = "2.8.1"
[[deps.Pixman_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LLVMOpenMP_jll", "Libdl"]
git-tree-sha1 = "35621f10a7531bc8fa58f74610b1bfb70a3cfc6b"
uuid = "30392449-352a-5448-841d-b1acce4e97dc"
version = "0.43.4+0"
[[deps.Pkg]]
deps = ["Artifacts", "Dates", "Downloads", "FileWatching", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"]
uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
version = "1.10.0"
[[deps.PkgVersion]]
deps = ["Pkg"]
git-tree-sha1 = "f9501cc0430a26bc3d156ae1b5b0c1b47af4d6da"
uuid = "eebad327-c553-4316-9ea0-9fa01ccd7688"
version = "0.3.3"
[[deps.PlotUtils]]
deps = ["ColorSchemes", "Colors", "Dates", "PrecompileTools", "Printf", "Random", "Reexport", "Statistics"]
git-tree-sha1 = "7b1a9df27f072ac4c9c7cbe5efb198489258d1f5"
uuid = "995b91a9-d308-5afd-9ec6-746e21dbc043"
version = "1.4.1"
[[deps.PolygonOps]]
git-tree-sha1 = "77b3d3605fc1cd0b42d95eba87dfcd2bf67d5ff6"
uuid = "647866c9-e3ac-4575-94e7-e3d426903924"
version = "0.1.2"
[[deps.PooledArrays]]
deps = ["DataAPI", "Future"]
git-tree-sha1 = "36d8b4b899628fb92c2749eb488d884a926614d3"
uuid = "2dfb63ee-cc39-5dd5-95bd-886bf059d720"
version = "1.4.3"
[[deps.PrecompileTools]]
deps = ["Preferences"]
git-tree-sha1 = "5aa36f7049a63a1528fe8f7c3f2113413ffd4e1f"
uuid = "aea7be01-6a6a-4083-8856-8a6e6704d82a"
version = "1.2.1"
[[deps.Preferences]]
deps = ["TOML"]
git-tree-sha1 = "9306f6085165d270f7e3db02af26a400d580f5c6"
uuid = "21216c6a-2e73-6563-6e65-726566657250"
version = "1.4.3"
[[deps.PrettyTables]]
deps = ["Crayons", "LaTeXStrings", "Markdown", "PrecompileTools", "Printf", "Reexport", "StringManipulation", "Tables"]
git-tree-sha1 = "66b20dd35966a748321d3b2537c4584cf40387c7"
uuid = "08abe8d2-0d0c-5749-adfa-8a2ac140af0d"
version = "2.3.2"
[[deps.Printf]]
deps = ["Unicode"]
uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7"
[[deps.ProgressMeter]]
deps = ["Distributed", "Printf"]
git-tree-sha1 = "8f6bc219586aef8baf0ff9a5fe16ee9c70cb65e4"
uuid = "92933f4c-e287-5a05-a399-4b506db050ca"
version = "1.10.2"
[[deps.PtrArrays]]
git-tree-sha1 = "77a42d78b6a92df47ab37e177b2deac405e1c88f"
uuid = "43287f4e-b6f4-7ad1-bb20-aadabca52c3d"
version = "1.2.1"
[[deps.QOI]]
deps = ["ColorTypes", "FileIO", "FixedPointNumbers"]
git-tree-sha1 = "18e8f4d1426e965c7b532ddd260599e1510d26ce"
uuid = "4b34888f-f399-49d4-9bb3-47ed5cae4e65"
version = "1.0.0"
[[deps.QuadGK]]
deps = ["DataStructures", "LinearAlgebra"]
git-tree-sha1 = "1d587203cf851a51bf1ea31ad7ff89eff8d625ea"
uuid = "1fd47b50-473d-5c70-9696-f719f8f3bcdc"
version = "2.11.0"
[deps.QuadGK.extensions]
QuadGKEnzymeExt = "Enzyme"
[deps.QuadGK.weakdeps]
Enzyme = "7da242da-08ed-463a-9acd-ee780be4f1d9"
[[deps.REPL]]
deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"]
uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb"
[[deps.Random]]
deps = ["SHA"]
uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
[[deps.Random123]]
deps = ["Random", "RandomNumbers"]
git-tree-sha1 = "4743b43e5a9c4a2ede372de7061eed81795b12e7"
uuid = "74087812-796a-5b5d-8853-05524746bad3"
version = "1.7.0"
[[deps.RandomNumbers]]
deps = ["Random"]
git-tree-sha1 = "c6ec94d2aaba1ab2ff983052cf6a606ca5985902"
uuid = "e6cf234a-135c-5ec9-84dd-332b85af5143"
version = "1.6.0"
[[deps.RangeArrays]]
git-tree-sha1 = "b9039e93773ddcfc828f12aadf7115b4b4d225f5"
uuid = "b3c3ace0-ae52-54e7-9d0b-2c1406fd6b9d"
version = "0.3.2"
[[deps.Ratios]]
deps = ["Requires"]
git-tree-sha1 = "1342a47bf3260ee108163042310d26f2be5ec90b"
uuid = "c84ed2f1-dad5-54f0-aa8e-dbefe2724439"
version = "0.4.5"
weakdeps = ["FixedPointNumbers"]
[deps.Ratios.extensions]
RatiosFixedPointNumbersExt = "FixedPointNumbers"
[[deps.Reexport]]
git-tree-sha1 = "45e428421666073eab6f2da5c9d310d99bb12f9b"
uuid = "189a3867-3050-52da-a836-e630ba90ab69"
version = "1.2.2"
[[deps.RelocatableFolders]]
deps = ["SHA", "Scratch"]
git-tree-sha1 = "ffdaf70d81cf6ff22c2b6e733c900c3321cab864"
uuid = "05181044-ff0b-4ac5-8273-598c1e38db00"
version = "1.0.1"
[[deps.Requires]]
deps = ["UUIDs"]
git-tree-sha1 = "838a3a4188e2ded87a4f9f184b4b0d78a1e91cb7"
uuid = "ae029012-a4dd-5104-9daa-d747884805df"
version = "1.3.0"
[[deps.Rmath]]
deps = ["Random", "Rmath_jll"]
git-tree-sha1 = "852bd0f55565a9e973fcfee83a84413270224dc4"
uuid = "79098fc4-a85e-5d69-aa6a-4863f24498fa"
version = "0.8.0"
[[deps.Rmath_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl"]
git-tree-sha1 = "58cdd8fb2201a6267e1db87ff148dd6c1dbd8ad8"
uuid = "f50d1b31-88e8-58de-be2c-1cc44531875f"
version = "0.5.1+0"
[[deps.RoundingEmulator]]
git-tree-sha1 = "40b9edad2e5287e05bd413a38f61a8ff55b9557b"
uuid = "5eaf0fd0-dfba-4ccb-bf02-d820a40db705"
version = "0.2.1"
[[deps.SHA]]
uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce"
version = "0.7.0"
[[deps.SIMD]]
deps = ["PrecompileTools"]
git-tree-sha1 = "2803cab51702db743f3fda07dd1745aadfbf43bd"
uuid = "fdea26ae-647d-5447-a871-4b548cad5224"
version = "3.5.0"
[[deps.Scratch]]
deps = ["Dates"]
git-tree-sha1 = "3bac05bc7e74a75fd9cba4295cde4045d9fe2386"
uuid = "6c6a2e73-6563-6170-7368-637461726353"
version = "1.2.1"
[[deps.SentinelArrays]]
deps = ["Dates", "Random"]
git-tree-sha1 = "ff11acffdb082493657550959d4feb4b6149e73a"
uuid = "91c51154-3ec4-41a3-a24f-3f23e20d615c"
version = "1.4.5"
[[deps.Serialization]]
uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b"
[[deps.Setfield]]
deps = ["ConstructionBase", "Future", "MacroTools", "StaticArraysCore"]
git-tree-sha1 = "e2cc6d8c88613c05e1defb55170bf5ff211fbeac"
uuid = "efcf1570-3423-57d1-acb7-fd33fddbac46"
version = "1.1.1"
[[deps.ShaderAbstractions]]
deps = ["ColorTypes", "FixedPointNumbers", "GeometryBasics", "LinearAlgebra", "Observables", "StaticArrays", "StructArrays", "Tables"]
git-tree-sha1 = "79123bc60c5507f035e6d1d9e563bb2971954ec8"
uuid = "65257c39-d410-5151-9873-9b3e5be5013e"
version = "0.4.1"
[[deps.SharedArrays]]
deps = ["Distributed", "Mmap", "Random", "Serialization"]
uuid = "1a1011a3-84de-559e-8e89-a11a2f7dc383"
[[deps.Showoff]]
deps = ["Dates", "Grisu"]
git-tree-sha1 = "91eddf657aca81df9ae6ceb20b959ae5653ad1de"
uuid = "992d4aef-0814-514b-bc4d-f2e9a6c4116f"
version = "1.0.3"
[[deps.SignedDistanceFields]]
deps = ["Random", "Statistics", "Test"]
git-tree-sha1 = "d263a08ec505853a5ff1c1ebde2070419e3f28e9"
uuid = "73760f76-fbc4-59ce-8f25-708e95d2df96"
version = "0.4.0"
[[deps.SimpleTraits]]
deps = ["InteractiveUtils", "MacroTools"]
git-tree-sha1 = "5d7e3f4e11935503d3ecaf7186eac40602e7d231"
uuid = "699a6c99-e7fa-54fc-8d76-47d257e15c1d"
version = "0.9.4"
[[deps.Sixel]]
deps = ["Dates", "FileIO", "ImageCore", "IndirectArrays", "OffsetArrays", "REPL", "libsixel_jll"]
git-tree-sha1 = "2da10356e31327c7096832eb9cd86307a50b1eb6"
uuid = "45858cf5-a6b0-47a3-bbea-62219f50df47"
version = "0.1.3"
[[deps.Sockets]]
uuid = "6462fe0b-24de-5631-8697-dd941f90decc"
[[deps.SortingAlgorithms]]
deps = ["DataStructures"]
git-tree-sha1 = "66e0a8e672a0bdfca2c3f5937efb8538b9ddc085"
uuid = "a2af1166-a08f-5f64-846c-94a0d3cef48c"
version = "1.2.1"
[[deps.SparseArrays]]
deps = ["Libdl", "LinearAlgebra", "Random", "Serialization", "SuiteSparse_jll"]
uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
version = "1.10.0"
[[deps.SpecialFunctions]]
deps = ["IrrationalConstants", "LogExpFunctions", "OpenLibm_jll", "OpenSpecFun_jll"]
git-tree-sha1 = "2f5d4697f21388cbe1ff299430dd169ef97d7e14"
uuid = "276daf66-3868-5448-9aa4-cd146d93841b"
version = "2.4.0"
weakdeps = ["ChainRulesCore"]
[deps.SpecialFunctions.extensions]
SpecialFunctionsChainRulesCoreExt = "ChainRulesCore"
[[deps.SplittablesBase]]
deps = ["Setfield", "Test"]
git-tree-sha1 = "e08a62abc517eb79667d0a29dc08a3b589516bb5"
uuid = "171d559e-b47b-412a-8079-5efa626c420e"
version = "0.1.15"
[[deps.StackViews]]
deps = ["OffsetArrays"]
git-tree-sha1 = "46e589465204cd0c08b4bd97385e4fa79a0c770c"
uuid = "cae243ae-269e-4f55-b966-ac2d0dc13c15"
version = "0.1.1"
[[deps.StaticArrays]]
deps = ["LinearAlgebra", "PrecompileTools", "Random", "StaticArraysCore"]
git-tree-sha1 = "eeafab08ae20c62c44c8399ccb9354a04b80db50"
uuid = "90137ffa-7385-5640-81b9-e52037218182"
version = "1.9.7"
weakdeps = ["ChainRulesCore", "Statistics"]
[deps.StaticArrays.extensions]
StaticArraysChainRulesCoreExt = "ChainRulesCore"
StaticArraysStatisticsExt = "Statistics"
[[deps.StaticArraysCore]]
git-tree-sha1 = "192954ef1208c7019899fbf8049e717f92959682"
uuid = "1e83bf80-4336-4d27-bf5d-d5a4f845583c"
version = "1.4.3"
[[deps.Statistics]]
deps = ["LinearAlgebra", "SparseArrays"]
uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
version = "1.10.0"
[[deps.StatsAPI]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "1ff449ad350c9c4cbc756624d6f8a8c3ef56d3ed"
uuid = "82ae8749-77ed-4fe6-ae5f-f523153014b0"
version = "1.7.0"
[[deps.StatsBase]]
deps = ["DataAPI", "DataStructures", "LinearAlgebra", "LogExpFunctions", "Missings", "Printf", "Random", "SortingAlgorithms", "SparseArrays", "Statistics", "StatsAPI"]
git-tree-sha1 = "5cf7606d6cef84b543b483848d4ae08ad9832b21"
uuid = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91"
version = "0.34.3"
[[deps.StatsFuns]]
deps = ["HypergeometricFunctions", "IrrationalConstants", "LogExpFunctions", "Reexport", "Rmath", "SpecialFunctions"]
git-tree-sha1 = "b423576adc27097764a90e163157bcfc9acf0f46"
uuid = "4c63d2b9-4356-54db-8cca-17b64c39e42c"
version = "1.3.2"
weakdeps = ["ChainRulesCore", "InverseFunctions"]
[deps.StatsFuns.extensions]
StatsFunsChainRulesCoreExt = "ChainRulesCore"
StatsFunsInverseFunctionsExt = "InverseFunctions"
[[deps.StringManipulation]]
deps = ["PrecompileTools"]
git-tree-sha1 = "a04cabe79c5f01f4d723cc6704070ada0b9d46d5"
uuid = "892a3eda-7b42-436c-8928-eab12a02cf0e"
version = "0.3.4"
[[deps.StructArrays]]
deps = ["ConstructionBase", "DataAPI", "Tables"]
git-tree-sha1 = "f4dc295e983502292c4c3f951dbb4e985e35b3be"
uuid = "09ab397b-f2b6-538f-b94a-2f83cf4a842a"
version = "0.6.18"
weakdeps = ["Adapt", "GPUArraysCore", "SparseArrays", "StaticArrays"]
[deps.StructArrays.extensions]
StructArraysAdaptExt = "Adapt"
StructArraysGPUArraysCoreExt = "GPUArraysCore"
StructArraysSparseArraysExt = "SparseArrays"
StructArraysStaticArraysExt = "StaticArrays"
[[deps.SuiteSparse]]
deps = ["Libdl", "LinearAlgebra", "Serialization", "SparseArrays"]
uuid = "4607b0f0-06f3-5cda-b6b1-a6196a1729e9"
[[deps.SuiteSparse_jll]]
deps = ["Artifacts", "Libdl", "libblastrampoline_jll"]
uuid = "bea87d4a-7f5b-5778-9afe-8cc45184846c"
version = "7.2.1+1"
[[deps.TOML]]
deps = ["Dates"]
uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76"
version = "1.0.3"
[[deps.TableTraits]]
deps = ["IteratorInterfaceExtensions"]
git-tree-sha1 = "c06b2f539df1c6efa794486abfb6ed2022561a39"
uuid = "3783bdb8-4a98-5b6b-af9a-565f29a5fe9c"
version = "1.0.1"
[[deps.Tables]]
deps = ["DataAPI", "DataValueInterfaces", "IteratorInterfaceExtensions", "OrderedCollections", "TableTraits"]
git-tree-sha1 = "598cd7c1f68d1e205689b1c2fe65a9f85846f297"
uuid = "bd369af6-aec1-5ad0-b16a-f7cc5008161c"
version = "1.12.0"
[[deps.Tar]]
deps = ["ArgTools", "SHA"]
uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e"
version = "1.10.0"
[[deps.TensorCore]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "1feb45f88d133a655e001435632f019a9a1bcdb6"
uuid = "62fd8b95-f654-4bbd-a8a5-9c27f68ccd50"
version = "0.1.1"
[[deps.Test]]
deps = ["InteractiveUtils", "Logging", "Random", "Serialization"]
uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
[[deps.TiffImages]]
deps = ["ColorTypes", "DataStructures", "DocStringExtensions", "FileIO", "FixedPointNumbers", "IndirectArrays", "Inflate", "Mmap", "OffsetArrays", "PkgVersion", "ProgressMeter", "SIMD", "UUIDs"]
git-tree-sha1 = "bc7fd5c91041f44636b2c134041f7e5263ce58ae"
uuid = "731e570b-9d59-4bfa-96dc-6df516fadf69"
version = "0.10.0"
[[deps.TimerOutputs]]
deps = ["ExprTools", "Printf"]
git-tree-sha1 = "5a13ae8a41237cff5ecf34f73eb1b8f42fff6531"
uuid = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f"
version = "0.5.24"
[[deps.TranscodingStreams]]
git-tree-sha1 = "e84b3a11b9bece70d14cce63406bbc79ed3464d2"
uuid = "3bb67fe8-82b1-5028-8e26-92a6c54297fa"
version = "0.11.2"
[[deps.Transducers]]
deps = ["Accessors", "Adapt", "ArgCheck", "BangBang", "Baselet", "CompositionsBase", "ConstructionBase", "DefineSingletons", "Distributed", "InitialValues", "Logging", "Markdown", "MicroCollections", "Requires", "SplittablesBase", "Tables"]
git-tree-sha1 = "5215a069867476fc8e3469602006b9670e68da23"
uuid = "28d57a85-8fef-5791-bfe6-a80928e7c999"
version = "0.4.82"
[deps.Transducers.extensions]
TransducersBlockArraysExt = "BlockArrays"
TransducersDataFramesExt = "DataFrames"
TransducersLazyArraysExt = "LazyArrays"
TransducersOnlineStatsBaseExt = "OnlineStatsBase"
TransducersReferenceablesExt = "Referenceables"
[deps.Transducers.weakdeps]
BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e"
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
LazyArrays = "5078a376-72f3-5289-bfd5-ec5146d43c02"
OnlineStatsBase = "925886fa-5bf2-5e8e-b522-a9147a512338"
Referenceables = "42d2dcc6-99eb-4e98-b66c-637b7d73030e"
[[deps.TriplotBase]]
git-tree-sha1 = "4d4ed7f294cda19382ff7de4c137d24d16adc89b"
uuid = "981d1d27-644d-49a2-9326-4793e63143c3"
version = "0.1.0"
[[deps.UUIDs]]
deps = ["Random", "SHA"]
uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
[[deps.Unicode]]
uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5"
[[deps.UnicodeFun]]
deps = ["REPL"]
git-tree-sha1 = "53915e50200959667e78a92a418594b428dffddf"
uuid = "1cfade01-22cf-5700-b092-accc4b62d6e1"
version = "0.4.1"
[[deps.Unitful]]
deps = ["Dates", "LinearAlgebra", "Random"]
git-tree-sha1 = "d95fe458f26209c66a187b1114df96fd70839efd"
uuid = "1986cc42-f94f-5a68-af5c-568840ba703d"
version = "1.21.0"
weakdeps = ["ConstructionBase", "InverseFunctions"]
[deps.Unitful.extensions]
ConstructionBaseUnitfulExt = "ConstructionBase"
InverseFunctionsUnitfulExt = "InverseFunctions"
[[deps.UnsafeAtomics]]
git-tree-sha1 = "6331ac3440856ea1988316b46045303bef658278"
uuid = "013be700-e6cd-48c3-b4a1-df204f14c38f"
version = "0.2.1"
[[deps.UnsafeAtomicsLLVM]]
deps = ["LLVM", "UnsafeAtomics"]
git-tree-sha1 = "2d17fabcd17e67d7625ce9c531fb9f40b7c42ce4"
uuid = "d80eeb9a-aca5-4d75-85e5-170c8b632249"
version = "0.2.1"
[[deps.WoodburyMatrices]]
deps = ["LinearAlgebra", "SparseArrays"]
git-tree-sha1 = "c1a7aa6219628fcd757dede0ca95e245c5cd9511"
uuid = "efce3f68-66dc-5838-9240-27a6d6f5f9b6"
version = "1.0.0"
[[deps.XML2_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Libiconv_jll", "Zlib_jll"]
git-tree-sha1 = "1165b0443d0eca63ac1e32b8c0eb69ed2f4f8127"
uuid = "02c8fc9c-b97f-50b9-bbe4-9be30ff0a78a"
version = "2.13.3+0"
[[deps.XSLT_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Libgcrypt_jll", "Libgpg_error_jll", "Libiconv_jll", "XML2_jll", "Zlib_jll"]
git-tree-sha1 = "a54ee957f4c86b526460a720dbc882fa5edcbefc"
uuid = "aed1982a-8fda-507f-9586-7b0439959a61"
version = "1.1.41+0"
[[deps.Xorg_libX11_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Xorg_libxcb_jll", "Xorg_xtrans_jll"]
git-tree-sha1 = "afead5aba5aa507ad5a3bf01f58f82c8d1403495"
uuid = "4f6342f7-b3d2-589e-9d20-edeb45f2b2bc"
version = "1.8.6+0"
[[deps.Xorg_libXau_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl"]
git-tree-sha1 = "6035850dcc70518ca32f012e46015b9beeda49d8"
uuid = "0c0b7dd1-d40b-584c-a123-a41640f87eec"
version = "1.0.11+0"
[[deps.Xorg_libXdmcp_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl"]
git-tree-sha1 = "34d526d318358a859d7de23da945578e8e8727b7"
uuid = "a3789734-cfe1-5b06-b2d0-1dd0d9d62d05"
version = "1.1.4+0"
[[deps.Xorg_libXext_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Xorg_libX11_jll"]
git-tree-sha1 = "d2d1a5c49fae4ba39983f63de6afcbea47194e85"
uuid = "1082639a-0dae-5f34-9b06-72781eeb8cb3"
version = "1.3.6+0"
[[deps.Xorg_libXrender_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Xorg_libX11_jll"]
git-tree-sha1 = "47e45cd78224c53109495b3e324df0c37bb61fbe"
uuid = "ea2f1a96-1ddc-540d-b46f-429655e07cfa"
version = "0.9.11+0"
[[deps.Xorg_libpthread_stubs_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl"]
git-tree-sha1 = "8fdda4c692503d44d04a0603d9ac0982054635f9"
uuid = "14d82f49-176c-5ed1-bb49-ad3f5cbd8c74"
version = "0.1.1+0"
[[deps.Xorg_libxcb_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "XSLT_jll", "Xorg_libXau_jll", "Xorg_libXdmcp_jll", "Xorg_libpthread_stubs_jll"]
git-tree-sha1 = "bcd466676fef0878338c61e655629fa7bbc69d8e"
uuid = "c7cfdc94-dc32-55de-ac96-5a1b8d977c5b"
version = "1.17.0+0"
[[deps.Xorg_xtrans_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl"]
git-tree-sha1 = "e92a1a012a10506618f10b7047e478403a046c77"
uuid = "c5fb5394-a638-5e4d-96e5-b29de1b5cf10"
version = "1.5.0+0"
[[deps.Zlib_jll]]
deps = ["Libdl"]
uuid = "83775a58-1f1d-513f-b197-d71354ab007a"
version = "1.2.13+1"
[[deps.isoband_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "51b5eeb3f98367157a7a12a1fb0aa5328946c03c"
uuid = "9a68df92-36a6-505f-a73e-abb412b6bfb4"
version = "0.2.3+0"
[[deps.libaom_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl"]
git-tree-sha1 = "1827acba325fdcdf1d2647fc8d5301dd9ba43a9d"
uuid = "a4ae2306-e953-59d6-aa16-d00cac43593b"
version = "3.9.0+0"
[[deps.libass_jll]]
deps = ["Artifacts", "Bzip2_jll", "FreeType2_jll", "FriBidi_jll", "HarfBuzz_jll", "JLLWrappers", "Libdl", "Zlib_jll"]
git-tree-sha1 = "e17c115d55c5fbb7e52ebedb427a0dca79d4484e"
uuid = "0ac62f75-1d6f-5e53-bd7c-93b484bb37c0"
version = "0.15.2+0"
[[deps.libblastrampoline_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "8e850b90-86db-534c-a0d3-1478176c7d93"
version = "5.11.0+0"
[[deps.libfdk_aac_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl"]
git-tree-sha1 = "8a22cf860a7d27e4f3498a0fe0811a7957badb38"
uuid = "f638f0a6-7fb0-5443-88ba-1cc74229b280"
version = "2.0.3+0"
[[deps.libpng_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Zlib_jll"]
git-tree-sha1 = "d7015d2e18a5fd9a4f47de711837e980519781a4"
uuid = "b53b4c65-9356-5827-b1ea-8c7a1a84506f"
version = "1.6.43+1"
[[deps.libsixel_jll]]
deps = ["Artifacts", "JLLWrappers", "JpegTurbo_jll", "Libdl", "Pkg", "libpng_jll"]
git-tree-sha1 = "d4f63314c8aa1e48cd22aa0c17ed76cd1ae48c3c"
uuid = "075b6546-f08a-558a-be8f-8157d0f608a5"
version = "1.10.3+0"
[[deps.libvorbis_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Ogg_jll", "Pkg"]
git-tree-sha1 = "490376214c4721cdaca654041f635213c6165cb3"
uuid = "f27f6e37-5d2b-51aa-960f-b287f2bc3b7a"
version = "1.3.7+2"
[[deps.nghttp2_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d"
version = "1.52.0+1"
[[deps.oneTBB_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl"]
git-tree-sha1 = "7d0ea0f4895ef2f5cb83645fa689e52cb55cf493"
uuid = "1317d2d5-d96f-522e-a858-c73665f53c3e"
version = "2021.12.0+0"
[[deps.p7zip_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0"
version = "17.4.0+2"
[[deps.x264_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl"]
git-tree-sha1 = "35976a1216d6c066ea32cba2150c4fa682b276fc"
uuid = "1270edf5-f2f9-52d2-97e9-ab00b5d0237a"
version = "10164.0.0+0"
[[deps.x265_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl"]
git-tree-sha1 = "dcc541bb19ed5b0ede95581fb2e41ecf179527d2"
uuid = "dfaa095f-4041-5dcd-9319-2fabd8486b76"
version = "3.6.0+0"
"""
# ╔═╡ Cell order:
# ╠═a609b8a8-04ac-4533-9a33-61ea33805846
# ╠═84f88e89-c55e-41ba-97ad-fd561458c7e9
# ╠═ecde5a72-691b-4a9a-b0a8-2b740e42a710
# ╠═1cf94d6d-a0bc-474b-b479-5b4f4c916ea5
# ╠═ab0968e2-43c7-4610-87ba-47433c003081
# ╠═8bb0983b-103e-4cf8-9a9f-95feb90df054
# ╟─00000000-0000-0000-0000-000000000001
# ╟─00000000-0000-0000-0000-000000000002
| AztecDiamonds | https://github.com/JuliaLabs/AztecDiamonds.jl.git |
|
[
"MIT"
] | 0.2.5 | 5f7fc2ce60d4540ffc1de3f102d9b5e00d3ad654 | code | 1722 | module MakieExtension
using Makie
using GeometryBasics: Vec2f, Point2f, Rect2f
using Colors
using Adapt: adapt
using AztecDiamonds: Tiling, faces, UP, RIGHT
import AztecDiamonds: tilingplot, tilingplot!
function prepare_plot(t::Tiling; pad = 0.1f0)
tiles = Rect2f[]
colors = RGB{Colors.N0f8}[]
arrow_pts, arrows = Point2f[], Vec2f[]
foreach(faces(t)) do (i, j, isdotted)
if t[i, j] == UP
r = Rect2f(j - 1 + pad, i - 1 + pad, 1 - 2pad, 2 - 2pad)
col = isdotted ? colorant"red" : colorant"green"
push!(tiles, r)
push!(colors, col)
off = isdotted ? -0.3f0 : 0.3f0
push!(arrow_pts, Point2f(j - 0.5f0 - off, i))
push!(arrows, Point2f(isdotted ? -0.5f0 : 0.5f0, 0))
elseif t[i, j] == RIGHT
r = Rect2f(j - 1 + pad, i - 1 + pad, 2 - 2pad, 1 - 2pad)
col = isdotted ? colorant"yellow" : colorant"blue"
push!(tiles, r)
push!(colors, col)
off = isdotted ? -0.3f0 : 0.3f0
push!(arrow_pts, Point2f(j, i - 0.5f0 - off))
push!(arrows, Point2f(0, isdotted ? -0.5f0 : 0.5f0))
end
end
return tiles, colors, arrow_pts, arrows
end
@recipe(TilingPlot, t) do scene
Attributes(
show_arrows = false,
domino_padding = 0.1f0,
domino_stroke = 0,
)
end
Makie.plottype(::Tiling) = TilingPlot
function Makie.plot!(x::TilingPlot{<:Tuple{Tiling}})
t = adapt(Array, x[:t][])
tiles, colors, arrow_pts, arrows = prepare_plot(t; pad = x.domino_padding[])
poly!(x, tiles; color = colors, strokewidth = x.domino_stroke)
x.show_arrows[] && arrows!(x, arrow_pts, arrows)
return x
end
end
| AztecDiamonds | https://github.com/JuliaLabs/AztecDiamonds.jl.git |
|
[
"MIT"
] | 0.2.5 | 5f7fc2ce60d4540ffc1de3f102d9b5e00d3ad654 | code | 5979 | module AztecDiamonds
using OffsetArrays, Transducers
using Transducers: @next, complete
export Tiling, diamond, ka_diamond, dr_path
@enum Edge::UInt8 NONE UP RIGHT SHOULD_FILL
inds(N) = ((1 - N):N, (1 - N):N)
"""
Tiling(N::Int[, x::OffsetMatrix{AztecDiamonds.Edge}]; sizehint::Int = N)
Represents an order N diamond-shaped tiling. If `x` is not provided, it is initialized with `NONE`
representing an empty tiling. The `sizehint` keyword argument may be used to preallocate a larger
matrix for `x` fitting a tiling of order `sizehint` to avoid reallocations when the tiling grows.
The indices of `x` represent the coordinates of the diamond-shaped tiling and run from 1-N to N
(though `x` is allowed to be larger as long as it contains these indices).
The edges it contains can either be `UP`, `RIGHT`, or `NONE`, where `UP` represents a vertical tile
covering one more tile to the top, `RIGHT` represents a horizontal tile covering one more tile to
the right. `NONE` means the edge is either already covered by another tile to the bottom or left or
the tiling is not fully filled yet.
```jldoctest
julia> t = Tiling(1)
1-order Tiling{Matrix{AztecDiamonds.Edge}}
julia> t[0, 0] = t[1, 0] = AztecDiamonds.RIGHT;
julia> t
1-order Tiling{Matrix{AztecDiamonds.Edge}}
🬇🬋🬋🬃
🬇🬋🬋🬃
```
See [`diamond`](@ref) and [`ka_diamond`](@ref) for constructing a filled tiling.
"""
struct Tiling{M <: AbstractMatrix{Edge}}
N::Int
x::OffsetMatrix{Edge, M}
end
Tiling(N::Int; sizehint::Int = N) = Tiling(N, fill(NONE, inds(sizehint)))
in_diamond(N, i, j) = abs(2i - 1) + abs(2j - 1) ≤ 2N
Base.checkbounds(::Type{Bool}, (; N)::Tiling, i, j) = in_diamond(N, i, j)
function Base.checkbounds(t::Tiling, i, j)
checkbounds(Bool, t, i, j) || throw(BoundsError(t, (i, j)))
return nothing
end
Base.@propagate_inbounds function Base.getindex(t::Tiling, i, j)
@boundscheck checkbounds(t, i, j)
return t.x[i, j]
end
Base.@propagate_inbounds function Base.setindex!(t::Tiling, x, i, j)
@boundscheck checkbounds(t, i, j)
return setindex!(t.x, x, i, j)
end
Base.@propagate_inbounds function Base.get(t::Tiling, (i, j)::NTuple{2, Integer}, def)
return checkbounds(Bool, t, i, j) ? t[i, j] : def
end
Base.:(==)(t1::Tiling, t2::Tiling) = t1.N == t2.N && t1.x == t2.x
const TILING_SEED = 0x493d55c7378becd5 % UInt
function Base.hash((; N, x)::Tiling, h::UInt)
return hash(x, hash(N, hash(TILING_SEED, h)))
end
Base.copy((; N, x)::Tiling) = Tiling(N, copy(x))
struct DiamondFaces <: Transducers.Foldable
N::Int
end
faces((; N)::Tiling) = DiamondFaces(N)
Base.eltype(::DiamondFaces) = Tuple{Int, Int, Bool}
Base.length((; N)::DiamondFaces) = N * (N + 1) * 2
function Transducers.__foldl__(rf::R, val::V, (; N)::DiamondFaces) where {R, V}
for j in (1 - N):N
j′ = max(j, 1 - j)
for i in (j′ - N):(N - j′ + 1)
isdotted = isodd(i + j - N)
val = @next(rf, val, (i, j, isdotted))
end
end
return complete(rf, val)
end
struct BlockIterator{good, T <: Tiling} <: Transducers.Foldable
t::T
BlockIterator{good}(t::T) where {good, T <: Tiling} = new{good, T}(t)
end
Base.@propagate_inbounds function isblock(t::Tiling, i, j, ::Val{good}) where {good}
(; N) = t
isdotted = isodd(i + j - N)
tile = t[i, j]
if tile == UP && j < N && get(t, (i, j + 1), NONE) == UP
return good == isdotted
elseif tile == RIGHT && i < N && get(t, (i + 1, j), NONE) == RIGHT
return good == isdotted
end
return false
end
function Transducers.asfoldable((; t)::BlockIterator{good}) where {good}
return faces(t) |> Filter() do (i, j, isdotted)
return @inbounds isblock(t, i, j, Val(good))
end
end
# destruction
function remove_bad_blocks!(t::Tiling)
foreach(BlockIterator{false}(t)) do (i, j)
@inbounds if t[i, j] == UP
t[i, j + 1] = NONE
else
t[i + 1, j] = NONE
end
@inbounds t[i, j] = NONE
end
return t
end
# sliding
function slide_tiles!(t′::Tiling, t::Tiling)
foreach(faces(t)) do (i, j, isdotted)
tile = @inbounds t[i, j]
inc = isdotted ? -1 : 1
@inbounds if tile == UP
t′[i, j + inc] = UP
elseif tile == RIGHT
t′[i + inc, j] = RIGHT
end
end
return t′
end
Base.@propagate_inbounds function is_empty_tile(t′::Tiling, i, j)
return t′[i, j] == NONE && get(t′, (i - 1, j), NONE) != UP && get(t′, (i, j - 1), NONE) != RIGHT
end
# filling
function fill_empty_blocks!(t′::Tiling)
foreach(faces(t′)) do (i, j)
@inbounds if is_empty_tile(t′, i, j)
if rand(Bool)
t′[i, j] = t′[i, j + 1] = UP
else
t′[i, j] = t′[i + 1, j] = RIGHT
end
end
end
return t′
end
function step!(t′::Tiling, t::Tiling)
t′.N == t.N + 1 || throw(ArgumentError("t′.N ≠ t.N + 1"))
remove_bad_blocks!(t)
slide_tiles!(t′, t)
fill_empty_blocks!(t′)
return t′
end
function diamond!(t, t′, N)
for N in 1:N
(; x) = t′
view(x, inds(N - 1)...) .= NONE
t′ = Tiling(N, x)
t, t′ = step!(t′, t), t
end
return t
end
"""
diamond(N::Int) -> Tiling{Matrix{AztecDiamonds.Edge}}
Generates a uniformally random order N diamond tiling.
```jldoctest
julia> using Random; Random.seed!(1);
julia> diamond(4)
4-order Tiling{Matrix{AztecDiamonds.Edge}}
🬇🬋🬋🬃
🬇🬋🬋🬃🬇🬋🬋🬃
🬦🬓🬦🬓🬦🬓🬦🬓🬇🬋🬋🬃
🬦🬓🬉🬄🬉🬄🬉🬄🬉🬄🬇🬋🬋🬃🬦🬓
🬉🬄🬦🬓🬦🬓🬇🬋🬋🬃🬦🬓🬦🬓🬉🬄
🬉🬄🬉🬄🬇🬋🬋🬃🬉🬄🬉🬄
🬇🬋🬋🬃🬇🬋🬋🬃
🬇🬋🬋🬃
```
See [`ka_diamond`](@ref) for a version that can take advantage of GPU acceleration.
`ka_diamond(N, Array)` may also be faster for large N.
Ref [`Tiling`](@ref)
"""
function diamond(N::Int)
t, t′ = Tiling(0; sizehint = N), Tiling(0; sizehint = N)
return diamond!(t, t′, N)
end
include("ka.jl")
include("show.jl")
include("dr_path.jl")
# stubs for plotting functions
function tilingplot end
function tilingplot! end
end
| AztecDiamonds | https://github.com/JuliaLabs/AztecDiamonds.jl.git |
|
[
"MIT"
] | 0.2.5 | 5f7fc2ce60d4540ffc1de3f102d9b5e00d3ad654 | code | 644 | function dr_path(t::Tiling)
(; x, N) = t
y = OffsetVector{Float64}(undef, -N:N)
y[-N] = -0.5
prev = UP
i = -1
for j in (1 - N):N
@assert checkbounds(Bool, t, i + 1, j)
tile = x[i + 1, j]
if prev == RIGHT
y[j] = i + 0.5
elseif tile == UP
i += 1
y[j] = i + 0.5
elseif tile == RIGHT
y[j] = i + 0.5
else
if prev == UP
y[j] = i - 0.5
i -= 1
else
i -= 1
y[j] = i + 0.5
end
end
prev = tile
end
return y
end
| AztecDiamonds | https://github.com/JuliaLabs/AztecDiamonds.jl.git |
|
[
"MIT"
] | 0.2.5 | 5f7fc2ce60d4540ffc1de3f102d9b5e00d3ad654 | code | 5098 | using KernelAbstractions, Adapt
Adapt.adapt_structure(to, (; N, x)::Tiling) = Tiling(N, adapt(to, x))
KernelAbstractions.get_backend((; x)::Tiling) = KernelAbstractions.get_backend(x)
# destruction
@kernel function remove_bad_blocks_kernel!(t::Tiling) # COV_EXCL_LINE
(; N) = t
I = @index(Global, NTuple) # COV_EXCL_LINE
i, j = I .- N
@inbounds if in_diamond(N, i, j) && isblock(t, i, j, Val(false))
if t[i, j] == UP
t[i, j + 1] = NONE
else
t[i + 1, j] = NONE
end
t[i, j] = NONE
end
end
# sliding
@kernel function slide_tiles_kernel!(t′::Tiling, @Const(t::Tiling)) # COV_EXCL_LINE
(; N) = t
I = @index(Global, NTuple) # COV_EXCL_LINE
i, j = I .- N
@inbounds if in_diamond(N, i, j)
tile = @inbounds t[i, j]
isdotted = isodd(i + j - N)
inc = ifelse(isdotted, -1, 1)
@inbounds if tile == UP
t′[i, j + inc] = UP
elseif tile == RIGHT
t′[i + inc, j] = RIGHT
end
end
end
# filling
@kernel function fill_empty_blocks_kernel1!(t′::Tiling, scratch::OffsetMatrix) # COV_EXCL_LINE
(; N) = t′
I = @index(Global, NTuple) # COV_EXCL_LINE
i, j = I .- N
@inbounds if in_diamond(N, i, j) && is_empty_tile(t′, i, j)
should_fill = true
i′ = i - 1
while in_diamond(N, i′, j) && is_empty_tile(t′, i′, j)
should_fill ⊻= true
i′ -= 1
end
if should_fill
j′ = j - 1
while in_diamond(N, i, j′) && is_empty_tile(t′, i, j′)
should_fill ⊻= true
j′ -= 1
end
if should_fill
scratch[i, j] = SHOULD_FILL
end
end
end
end
@kernel function fill_empty_blocks_kernel2!(t′::Tiling, scratch::OffsetMatrix) # COV_EXCL_LINE
(; N) = t′
I = @index(Global, NTuple) # COV_EXCL_LINE
i, j = I .- N
@inbounds if in_diamond(N, i, j)
if scratch[i, j] == SHOULD_FILL
if rand(Bool)
t′[i, j] = t′[i, j + 1] = UP
else
t′[i, j] = t′[i + 1, j] = RIGHT
end
end
end
end
@kernel function zero_kernel!(t::Tiling, N) # COV_EXCL_LINE
I = @index(Global, NTuple) # COV_EXCL_LINE
i, j = I .- N
@inbounds t.x[i, j] = NONE
end
function ka_diamond!(t, t′, N; backend)
zero! = zero_kernel!(backend)
remove_bad_blocks! = remove_bad_blocks_kernel!(backend)
slide_tiles! = slide_tiles_kernel!(backend)
fill_empty_blocks1! = fill_empty_blocks_kernel1!(backend)
fill_empty_blocks2! = fill_empty_blocks_kernel2!(backend)
t′ = Tiling(1, t′.x)
ndrange = (2, 2)
fill_empty_blocks1!(t′, t.x; ndrange)
fill_empty_blocks2!(t′, t.x; ndrange)
t, t′ = t′, t
for N in 2:N
zero!(t′, N - 1; ndrange)
t′ = Tiling(N, t′.x)
remove_bad_blocks!(t; ndrange)
slide_tiles!(t′, t; ndrange)
ndrange = (2N, 2N)
fill_empty_blocks1!(t′, t.x; ndrange)
fill_empty_blocks2!(t′, t.x; ndrange)
t, t′ = t′, t
end
return t
end
"""
ka_diamond(N::Int, ArrayT::Type{<:AbstractArray}) -> Tiling{ArrayT{Edge}}
Generate a uniformly random diamond tiling just like [`diamond`](@ref), but using `KernelAbstractions.jl`
to be able to take advantage of (GPU) parallelism. `ArrayT` can either be `Array` or any GPU array type.
Ref [`Tiling`](@ref)
"""
function ka_diamond(N::Int, ArrayT::Type{<:AbstractArray})
mem = ntuple(_ -> fill!(ArrayT{Edge}(undef, 2N, 2N), NONE), 2)
t, t′ = map(x -> Tiling(0, OffsetMatrix(x, inds(N))), mem)
return ka_diamond!(t, t′, N; backend = KernelAbstractions.get_backend(mem[1]))
end
# rotation of tilings
@kernel function rotr90_kernel!(t′::Tiling, @Const(t::Tiling)) # COV_EXCL_LINE
(; N) = t
I = @index(Global, NTuple) # COV_EXCL_LINE
i, j = I .- N
edge = NONE
if @inbounds t.x[i, j] == RIGHT
edge = UP
elseif get(t, (i - 1, j), NONE) == UP
edge = RIGHT
end
@inbounds t′.x[j, 1 - i] = edge
end
@kernel function rotl90_kernel!(t′::Tiling, @Const(t::Tiling)) # COV_EXCL_LINE
(; N) = t
I = @index(Global, NTuple) # COV_EXCL_LINE
i, j = I .- N
edge = NONE
if @inbounds t.x[i, j] == UP
edge = RIGHT
elseif get(t, (i, j - 1), NONE) == RIGHT
edge = UP
end
@inbounds t′.x[1 - j, i] = edge
end
@kernel function rot180_kernel!(t′::Tiling, @Const(t::Tiling)) # COV_EXCL_LINE
(; N) = t
I = @index(Global, NTuple) # COV_EXCL_LINE
i, j = I .- N
edge = NONE
if get(t, (i - 1, j), NONE) == UP
edge = UP
elseif get(t, (i, j - 1), NONE) == RIGHT
edge = RIGHT
end
@inbounds t′.x[1 - i, 1 - j] = edge
end
for rot in Symbol.(:rot, ["r90", "l90", "180"])
@eval function Base.$rot(t::Tiling)
(; N, x) = t
t′ = Tiling(N, similar(x))
backend = KernelAbstractions.get_backend(t)
$(Symbol(rot, :_kernel!))(backend)(t′, t; ndrange = (2N, 2N))
return t′
end
end
| AztecDiamonds | https://github.com/JuliaLabs/AztecDiamonds.jl.git |
|
[
"MIT"
] | 0.2.5 | 5f7fc2ce60d4540ffc1de3f102d9b5e00d3ad654 | code | 3182 | using Colors
import ImageShow
using Base64: Base64EncodePipe
Base.summary(io::IO, t::Tiling) = print(io, t.N, "-order ", typeof(t))
function to_img(t::Tiling)
img = fill(colorant"transparent", inds(t.N))
foreach(faces(t)) do (i, j, isdotted)
if t[i, j] == UP
col = isdotted ? colorant"red" : colorant"green"
img[i, j] = img[i + 1, j] = col
elseif t[i, j] == RIGHT
col = isdotted ? colorant"yellow" : colorant"blue"
img[i, j] = img[i, j + 1] = col
end
end
img
end
function Base.show(io::IO, (; N, x)::Tiling)
print(io, "Tiling(", N)
if N > 0
print(io, ", ")
Base._show_nonempty(IOContext(io, :compact => true), parent(x), "")
end
print(io, ")")
end
function Base.show(io::IO, ::MIME"text/plain", t::Tiling)
summary(io, t)
(; N) = t
if displaysize(io)[2] < 4N
printstyled(
io, "\n Output too large to fit terminal. \
Use `using ImageView; imshow(AztecDiamonds.to_img(D))` to display as an image instead.";
color = :black,
)
return nothing
end
t = adapt(Array, t)
foreach(Iterators.product(inds(N)...)) do (j, i)
j == 1 - N && println(io)
isdotted = isodd(i + j - N)
if get(t, (i, j), NONE) == UP
color = isdotted ? :red : :green
if get(t, (i - 1, j), NONE) == UP
print(io, "UU")
elseif get(t, (i, j - 1), NONE) == RIGHT
print(io, "UR")
else
printstyled(io, "🬦🬓"; color)
end
elseif get(t, (i - 1, j), NONE) == UP
color = !isdotted ? :red : :green
if get(t, (i, j - 1), NONE) == RIGHT
print(io, "NR")
elseif get(t, (i, j), NONE) == RIGHT
print(io, "RU")
else
printstyled(io, "🬉🬄"; color)
end
elseif get(t, (i, j), NONE) == RIGHT
color = isdotted ? :yellow : :blue
if get(t, (i, j - 1), NONE) == RIGHT
print(io, "RR")
else
printstyled(io, "🬇🬋"; color)
end
elseif get(t, (i, j - 1), NONE) == RIGHT
color = !isdotted ? :yellow : :blue
printstyled(io, "🬋🬃"; color)
elseif j < 0 || in_diamond(N, i, j) # don't produce trailing spaces
print(io, " ")
end
end
end
Base.showable(::MIME"image/png", (; N)::Tiling) = N > 0
function Base.show(io::IO, ::MIME"image/png", t::Tiling; kw...)
io = IOContext(io, :full_fidelity => true)
img = to_img(adapt(Array, t))
show(io, MIME("image/png"), img; kw...)
end
Base.showable(::MIME"juliavscode/html", (; N)::Tiling) = N > 0
function Base.show(io::IO, ::MIME"juliavscode/html", t::Tiling; kw...)
img = to_img(adapt(Array, t))
print(io, "<img src='data:image/gif;base64,")
b64_io = IOContext(Base64EncodePipe(io), :full_fidelity => true)
show(b64_io, MIME("image/png"), img; kw...)
close(b64_io)
print(io, "' style='width: 100%; max-height: 500px; object-fit: contain; image-rendering: pixelated' />")
end
| AztecDiamonds | https://github.com/JuliaLabs/AztecDiamonds.jl.git |
|
[
"MIT"
] | 0.2.5 | 5f7fc2ce60d4540ffc1de3f102d9b5e00d3ad654 | code | 2298 | @testitem "core" begin
include("verify_tiling.jl")
D = diamond(100)
@test verify_tiling(D)
dr = dr_path(D)
@test dr[end] == -0.5
end
@testitem "Tiling" begin
using AztecDiamonds: NONE
D = diamond(100)
D′ = copy(D)
@test D′ == D
@test isequal(D′, D)
@test hash(D′) == hash(D)
D[0, 0] = NONE
@test D[0, 0] == NONE
@test_throws BoundsError D[51, 51]
@test_throws BoundsError D[-51, -51]
@test_throws BoundsError D[51, 51] = NONE
end
@testitem "DiamondFaces" begin
using AztecDiamonds: DiamondFaces
df = DiamondFaces(10)
df′ = foldl(vcat, df; init = Union{}[])
@test length(df) == length(df′)
@test eltype(df) == eltype(df′)
@test length(df′[1]) == 3
end
@testitem "KernelAbstractions CPU" begin
include("verify_tiling.jl")
D = ka_diamond(100, Array)
@test verify_tiling(D)
end
@testitem "rotation of tilings" begin
using AztecDiamonds.Colors: @colorant_str, RGBA, N0f8 # somehow using Colors: ... doesn't work in VSCode
include("verify_tiling.jl")
_to_img(D) = parent(AztecDiamonds.to_img(D))
D = diamond(100)
@testset "$rot" for (rot, replacements) in (
(
rotr90, Pair{RGBA{N0f8}, RGBA{N0f8}}[
colorant"red" => colorant"yellow",
colorant"yellow" => colorant"green",
colorant"green" => colorant"blue",
colorant"blue" => colorant"red",
],
),
(
rotl90, Pair{RGBA{N0f8}, RGBA{N0f8}}[
colorant"red" => colorant"blue",
colorant"blue" => colorant"green",
colorant"green" => colorant"yellow",
colorant"yellow" => colorant"red",
],
),
(
rot180, Pair{RGBA{N0f8}, RGBA{N0f8}}[
colorant"red" => colorant"green",
colorant"green" => colorant"red",
colorant"blue" => colorant"yellow",
colorant"yellow" => colorant"blue",
],
),
)
D′ = rot(D)
@test verify_tiling(D′)
@test _to_img(D′) == replace(rot(_to_img(D)), replacements...)
end
end
| AztecDiamonds | https://github.com/JuliaLabs/AztecDiamonds.jl.git |
|
[
"MIT"
] | 0.2.5 | 5f7fc2ce60d4540ffc1de3f102d9b5e00d3ad654 | code | 379 | @testitem "CUDA" tags = [:cuda] begin
include("verify_tiling.jl")
using CUDA, Adapt
D = ka_diamond(200, CuArray)
D_cpu = adapt(Array, D)
@test verify_tiling(D_cpu)
@testset "$rot" for rot in (rotr90, rotl90, rot180)
D′ = rot(D)
D_cpu′ = adapt(Array, D′)
@test verify_tiling(D_cpu′)
@test D_cpu′ == rot(D_cpu)
end
end
| AztecDiamonds | https://github.com/JuliaLabs/AztecDiamonds.jl.git |
|
[
"MIT"
] | 0.2.5 | 5f7fc2ce60d4540ffc1de3f102d9b5e00d3ad654 | code | 345 | @testitem "Makie" begin
using CairoMakie
using CairoMakie: Axis
D = diamond(100)
f = Figure()
ax = Axis(f[1, 1]; aspect = 1)
plot!(ax, D; domino_padding = 0.05f0, domino_stroke = 1, show_arrows = true)
path = tempname() * ".png"
save(path, f)
@test isfile(path)
@test filesize(path) > 1024 # 1 kiB
end
| AztecDiamonds | https://github.com/JuliaLabs/AztecDiamonds.jl.git |
|
[
"MIT"
] | 0.2.5 | 5f7fc2ce60d4540ffc1de3f102d9b5e00d3ad654 | code | 290 | using TestItemRunner, CUDA
iscuda((; tags)) = :cuda in tags
if !(haskey(ENV, "BUILDKITE") && CUDA.functional()) # skip non-gpu tests on Buildkite CI
@run_package_tests filter = !iscuda verbose = true
end
if CUDA.functional()
@run_package_tests filter = iscuda verbose = true
end
| AztecDiamonds | https://github.com/JuliaLabs/AztecDiamonds.jl.git |
|
[
"MIT"
] | 0.2.5 | 5f7fc2ce60d4540ffc1de3f102d9b5e00d3ad654 | code | 1812 | @testitem "image show" begin
using Images
D = diamond(100)
@test Base.showable("image/png", D)
@test repr("image/png", D) isa Vector{UInt8}
img = AztecDiamonds.to_img(D)
@test img isa AbstractMatrix{<:Colorant}
@test axes(img) == (-99:100, -99:100)
@test !Base.showable("image/png", Tiling(0))
end
@testitem "pretty printing" begin
@test summary(Tiling(2)) == "2-order $Tiling{Matrix{AztecDiamonds.Edge}}"
@test repr(Tiling(1)) == "Tiling(1, [NONE NONE; NONE NONE])"
N = 20
D = diamond(N)
r = repr(MIME("text/plain"), D)
@test length(r) == 2537
r_color = repr(MIME("text/plain"), D; context = :color => true)
@test length(r_color) == length(r) + 10length(AztecDiamonds.faces(D))
r = repr(MIME("text/plain"), D; context = :displaysize => (10, 10))
@test contains(r, "Output too large to fit terminal")
end
@testitem "printing of malformed tilings" begin
using AztecDiamonds: Tiling, UP, RIGHT
t = Tiling(4)
t[-3, 0] = UP
t[-2, 0] = UP
t[0, -3] = RIGHT
t[0, -2] = UP
t[0, 0] = UP
t[1, -1] = RIGHT
t[0, 1] = UP
t[1, 1] = RIGHT
t[2, -1] = RIGHT
t[2, 0] = RIGHT
# TODO: should
expected = replace(
"""
4-order $Tiling{Matrix{AztecDiamonds.Edge}}
🬦🬓 \\
UU \\
🬉🬄 \\
🬇🬋UR 🬦🬓🬦🬓 \\
🬉🬄🬇🬋NRRU🬋🬃 \\
🬇🬋RR🬋🬃 \\
\\
""",
"\\" => ""
)
@test repr(MIME("text/plain"), t) == expected
end
@testitem "VSCode show" begin
using Base64
D = diamond(20)
@test Base.showable("juliavscode/html", D)
html = String(repr("juliavscode/html", D))
b64_png = stringmime("image/png", D)
@test contains(html, b64_png)
end
| AztecDiamonds | https://github.com/JuliaLabs/AztecDiamonds.jl.git |
|
[
"MIT"
] | 0.2.5 | 5f7fc2ce60d4540ffc1de3f102d9b5e00d3ad654 | code | 894 | using AztecDiamonds: inds, NONE, UP, RIGHT
function verify_tiling(t::Tiling)
(; N, x) = t
for (i, j) in Iterators.product(inds(N)...)
if checkbounds(Bool, t, i, j)
if t[i, j] == NONE && get(t, (i - 1, j), NONE) != UP && get(t, (i, j - 1), NONE) != RIGHT
error("Square ($i, $j) is not covered by any tile!")
end
else
if x[i, j] != NONE
error("Square ($i, $j) should be empty, is $(x[i, j])")
end
if get(x, CartesianIndex(i - 1, j), NONE) == UP
error("Square ($i, $j) should be empty, is covered from below by ($(i - 1), $j)")
end
if get(x, CartesianIndex(i, j - 1), NONE) == RIGHT
error("Square ($i, $j) should be empty, is covered from the left by ($i, $(j - 1))")
end
end
end
return true
end
| AztecDiamonds | https://github.com/JuliaLabs/AztecDiamonds.jl.git |
|
[
"MIT"
] | 0.2.5 | 5f7fc2ce60d4540ffc1de3f102d9b5e00d3ad654 | docs | 2778 | # AztecDiamonds
[](https://julia.mit.edu/AztecDiamonds.jl/stable/)
[](https://julia.mit.edu/AztecDiamonds.jl/dev/)
[](https://github.com/JuliaLabs/AztecDiamonds.jl/actions/workflows/CI.yml?query=branch%3Amain)
[](https://buildkite.com/julialang/aztecdiamonds-dot-jl)
[](https://codecov.io/gh/JuliaLabs/AztecDiamonds.jl)
A package for generating and analyzing [Aztec diamonds](https://en.wikipedia.org/wiki/Aztec_diamond)
## Getting Started
To generate an order-n Aztec diamond, simply call `diamond(n)`
```julia-repl
julia> D = diamond(10)
10-order Tiling{Matrix{AztecDiamonds.Edge}}
🬇🬋🬋🬃
🬇🬋🬋🬃🬇🬋🬋🬃
🬇🬋🬋🬃🬇🬋🬋🬃🬇🬋🬋🬃
🬇🬋🬋🬃🬇🬋🬋🬃🬦🬓🬦🬓🬇🬋🬋🬃
🬇🬋🬋🬃🬇🬋🬋🬃🬦🬓🬉🬄🬉🬄🬦🬓🬇🬋🬋🬃
🬇🬋🬋🬃🬦🬓🬇🬋🬋🬃🬉🬄🬦🬓🬦🬓🬉🬄🬦🬓🬇🬋🬋🬃
🬦🬓🬇🬋🬋🬃🬉🬄🬦🬓🬇🬋🬋🬃🬉🬄🬉🬄🬦🬓🬉🬄🬦🬓🬇🬋🬋🬃
🬦🬓🬉🬄🬦🬓🬇🬋🬋🬃🬉🬄🬦🬓🬦🬓🬇🬋🬋🬃🬉🬄🬦🬓🬉🬄🬦🬓🬦🬓🬦🬓
🬦🬓🬉🬄🬦🬓🬉🬄🬦🬓🬦🬓🬦🬓🬉🬄🬉🬄🬦🬓🬦🬓🬦🬓🬉🬄🬦🬓🬉🬄🬉🬄🬉🬄🬦🬓
🬦🬓🬉🬄🬦🬓🬉🬄🬦🬓🬉🬄🬉🬄🬉🬄🬇🬋🬋🬃🬉🬄🬉🬄🬉🬄🬦🬓🬉🬄🬇🬋🬋🬃🬦🬓🬉🬄🬦🬓
🬉🬄🬦🬓🬉🬄🬦🬓🬉🬄🬦🬓🬇🬋🬋🬃🬦🬓🬇🬋🬋🬃🬇🬋🬋🬃🬉🬄🬇🬋🬋🬃🬦🬓🬉🬄🬦🬓🬉🬄
🬉🬄🬦🬓🬉🬄🬦🬓🬉🬄🬦🬓🬦🬓🬉🬄🬦🬓🬦🬓🬦🬓🬦🬓🬇🬋🬋🬃🬦🬓🬉🬄🬦🬓🬉🬄
🬉🬄🬦🬓🬉🬄🬦🬓🬉🬄🬉🬄🬦🬓🬉🬄🬉🬄🬉🬄🬉🬄🬇🬋🬋🬃🬉🬄🬦🬓🬉🬄
🬉🬄🬦🬓🬉🬄🬇🬋🬋🬃🬉🬄🬇🬋🬋🬃🬇🬋🬋🬃🬦🬓🬇🬋🬋🬃🬉🬄
🬉🬄🬦🬓🬦🬓🬦🬓🬇🬋🬋🬃🬇🬋🬋🬃🬦🬓🬉🬄🬇🬋🬋🬃
🬉🬄🬉🬄🬉🬄🬦🬓🬦🬓🬇🬋🬋🬃🬉🬄🬇🬋🬋🬃
🬇🬋🬋🬃🬉🬄🬉🬄🬇🬋🬋🬃🬇🬋🬋🬃
🬇🬋🬋🬃🬇🬋🬋🬃🬇🬋🬋🬃
🬇🬋🬋🬃🬇🬋🬋🬃
🬇🬋🬋🬃
```
It is recommended that you use an interactive enviroment like Pluto, VS Code or IJulia to be able to view larger diamond tilings in all their glory. Alternatively, you can also view them in a separate window using the [ImageView](https://github.com/JuliaImages/ImageView.jl) package as follows:
```julia-repl
julia> using ImageView
julia> imshow(AztecDiamonds.to_img(D))
[...]
```
It is possible to take advantage of GPU acceleration via [KernelAbstractions.jl](https://github.com/JuliaGPU/KernelAbstractions.jl) on supported backends, e.g. CUDA:
```julia-repl
julia> using CUDA
julia> ka_diamond(200, CuArray)
[...]
```
You can extract the DR-path separating the northern arctic region from the rest of the diamond using the `dr_path` function.
```julia-repl
julia> dr_path(D)
21-element OffsetArray(::Vector{Float64}, -10:10) with eltype Float64 with indices -10:10:
-0.5
0.5
1.5
2.5
3.5
4.5
5.5
4.5
5.5
6.5
5.5
5.5
5.5
4.5
3.5
3.5
3.5
2.5
1.5
0.5
-0.5
```
To get the other DR-paths the tiling can be rotated first using the functions `rotr90`, `rotl90` or `rot180`.
| AztecDiamonds | https://github.com/JuliaLabs/AztecDiamonds.jl.git |
|
[
"MIT"
] | 0.2.5 | 5f7fc2ce60d4540ffc1de3f102d9b5e00d3ad654 | docs | 438 | ```@meta
CurrentModule = AztecDiamonds
```
# AztecDiamonds
Documentation for [AztecDiamonds](https://github.com/JuliaLabs/AztecDiamonds.jl).
For an example notebook using this package, see [here](https://julia.mit.edu/AztecDiamonds.jl/examples/stable/notebook.html).
Here's a random diamond:
```@example
using AztecDiamonds
show(stdout, MIME("text/plain"), diamond(10))
```
```@index
```
```@autodocs
Modules = [AztecDiamonds]
```
| AztecDiamonds | https://github.com/JuliaLabs/AztecDiamonds.jl.git |
|
[
"MIT"
] | 0.1.0 | 7f1b99029b30c0498fd715a8bde8defd2f4e1893 | code | 860 | using SqpSolver, Ipopt
using JuMP
ipopt_solver = optimizer_with_attributes(
Ipopt.Optimizer,
"print_level" => 0,
"warm_start_init_point" => "yes",
)
optimizer = optimizer_with_attributes(
SqpSolver.Optimizer,
"external_optimizer" => ipopt_solver,
"max_iter" => 100,
"algorithm" => "SQP-TR",
)
model = Model(optimizer)
@variable(model, X);
@variable(model, Y);
@objective(model, Min, X^2 + X);
@NLconstraint(model, X^2 - X == 2);
@NLconstraint(model, X*Y == 1);
@NLconstraint(model, X*Y >= 0);
@constraint(model, X >= -2);
println("________________________________________");
print(model);
println("________________________________________");
JuMP.optimize!(model);
xsol = JuMP.value.(X)
ysol = JuMP.value.(Y)
status = termination_status(model)
println("Xsol = ", xsol);
println("Ysol = ", ysol);
println("Status: ", status);
| SqpSolver | https://github.com/exanauts/SqpSolver.jl.git |
|
[
"MIT"
] | 0.1.0 | 7f1b99029b30c0498fd715a8bde8defd2f4e1893 | code | 1411 | mutable struct ACWRPowerModel <: PowerModels.AbstractWRModel
PowerModels.@pm_fields
end
function PowerModels.variable_bus_voltage(pm::ACWRPowerModel; kwargs...)
variable_bus_voltage_magnitude_sqr(pm; kwargs...)
variable_buspair_voltage_product(pm; kwargs...)
nw = pm.cnw
PowerModels.var(pm, nw)[:vr] = JuMP.@variable(
pm.model,
[i in PowerModels.ids(pm, nw, :bus)],
base_name="$(nw)_vr",
start = PowerModels.comp_start_value(PowerModels.ref(pm, nw, :bus, i), "vr_start", 1.0))
PowerModels.var(pm, nw)[:vi] = JuMP.@variable(
pm.model,
[i in PowerModels.ids(pm, nw, :bus)],
base_name="$(nw)_vi",
start = PowerModels.comp_start_value(PowerModels.ref(pm, nw, :bus, i), "vi_start"))
end
function PowerModels.constraint_model_voltage(pm::ACWRPowerModel, n::Int)
w = var(pm, n, :w)
wr = var(pm, n, :wr)
wi = var(pm, n, :wi)
vr = var(pm, n, :vr)
vi = var(pm, n, :vi)
for i in ids(pm, n, :bus)
JuMP.@constraint(pm.model, w[i] == vr[i]^2 + vi[i]^2)
end
for (i,j) in ids(pm, n, :buspairs)
JuMP.@constraint(pm.model, wr[(i,j)] == vr[i] * vr[j] + vi[i] * vi[j])
JuMP.@constraint(pm.model, wi[(i,j)] == vi[i] * vr[j] - vr[i] * vi[j])
end
end
build_acwr(data_file::String) = instantiate_model(PowerModels.parse_file(data_file), ACWRPowerModel, PowerModels.build_opf)
| SqpSolver | https://github.com/exanauts/SqpSolver.jl.git |
|
[
"MIT"
] | 0.1.0 | 7f1b99029b30c0498fd715a8bde8defd2f4e1893 | code | 5659 | """
Initialize variable values by taking the mean of lower and upper bounds.
"""
function init_vars(pm::AbstractPowerModel)
init_branch_vars(pm)
init_dc_vars(pm)
init_gen_vars(pm)
init_voltage_vars(pm)
end
"""
Initialize variable values for ACPPowerModel from Ipopt solution.
"""
function init_vars_from_ipopt(pm::T, pm2::T) where T<:AbstractPowerModel
optimize_model!(pm2, optimizer = Ipopt.Optimizer)
init_branch_vars(pm, pm2)
init_dc_vars(pm, pm2)
init_gen_vars(pm, pm2)
init_voltage_vars(pm, pm2)
end
"""
Set initial variable value to JuMP, if the variable has both lower and upper bounds.
"""
function set_start_value(v::JuMP.VariableRef)
if has_lower_bound(v) && has_upper_bound(v)
if upper_bound(v) < Inf && lower_bound(v) > -Inf
JuMP.set_start_value(v, (upper_bound(v)+lower_bound(v))/2)
elseif upper_bound(v) < Inf
JuMP.set_start_value(v, upper_bound(v))
elseif lower_bound(v) > -Inf
JuMP.set_start_value(v, lower_bound(v))
end
elseif has_lower_bound(v)
if lower_bound(v) > -Inf
JuMP.set_start_value(v, lower_bound(v))
else
JuMP.set_start_value(v, 0.0)
end
elseif has_upper_bound(v)
if upper_bound(v) < Inf
JuMP.set_start_value(v, upper_bound(v))
else
JuMP.set_start_value(v, 0.0)
end
end
end
"""
Initilize branch variable values
"""
function init_branch_vars(pm::AbstractPowerModel)
for (l,i,j) in ref(pm,:arcs)
set_start_value(var(pm,:p)[(l,i,j)])
set_start_value(var(pm,:q)[(l,i,j)])
end
end
function init_branch_vars(pm::AbstractPowerModel, pm_solved::AbstractPowerModel)
for (l,i,j) in ref(pm,:arcs)
JuMP.set_start_value(var(pm,:p)[(l,i,j)], JuMP.value(var(pm_solved,:p)[(l,i,j)]))
JuMP.set_start_value(var(pm,:q)[(l,i,j)], JuMP.value(var(pm_solved,:q)[(l,i,j)]))
end
end
function init_branch_vars(pm::IVRPowerModel)
for (l,i,j) in ref(pm,:arcs)
set_start_value(var(pm,:cr)[(l,i,j)])
set_start_value(var(pm,:ci)[(l,i,j)])
end
for l in ids(pm,:branch)
set_start_value(var(pm,:csr)[l])
set_start_value(var(pm,:csi)[l])
end
end
function init_branch_vars(pm::IVRPowerModel, pm_solved::IVRPowerModel)
for (l,i,j) in ref(pm,:arcs)
JuMP.set_start_value(var(pm,:cr)[(l,i,j)], JuMP.value(var(pm_solved,:cr)[(l,i,j)]))
JuMP.set_start_value(var(pm,:ci)[(l,i,j)], JuMP.value(var(pm_solved,:ci)[(l,i,j)]))
end
for l in ids(pm,:branch)
JuMP.set_start_value(var(pm,:csr)[l], JuMP.value(var(pm_solved,:csr)[l]))
JuMP.set_start_value(var(pm,:csi)[l], JuMP.value(var(pm_solved,:csi)[l]))
end
end
"""
Initilize direct current branch variable values
"""
function init_dc_vars(pm::AbstractPowerModel)
for arc in ref(pm,:arcs_dc)
set_start_value(var(pm,:p_dc)[arc])
set_start_value(var(pm,:q_dc)[arc])
end
end
function init_dc_vars(pm::AbstractPowerModel, pm_solved::AbstractPowerModel)
for arc in ref(pm,:arcs_dc)
JuMP.set_start_value(var(pm,:p_dc)[arc], JuMP.value(var(pm_solved,:p_dc)[arc]))
JuMP.set_start_value(var(pm,:q_dc)[arc], JuMP.value(var(pm_solved,:q_dc)[arc]))
end
end
function init_dc_vars(pm::IVRPowerModel)
for arc in ref(pm,:arcs_dc)
set_start_value(var(pm,:crdc)[arc])
set_start_value(var(pm,:cidc)[arc])
end
end
function init_dc_vars(pm::IVRPowerModel, pm_solved::IVRPowerModel)
for arc in ref(pm,:arcs_dc)
JuMP.set_start_value(var(pm,:crdc)[arc], JuMP.value(var(pm_solved,:crdc)[arc]))
JuMP.set_start_value(var(pm,:crdc)[arc], JuMP.value(var(pm_solved,:crdc)[arc]))
end
end
"""
Initilize generation variable values
"""
function init_gen_vars(pm::AbstractPowerModel)
for (i,gen) in ref(pm,:gen)
set_start_value(var(pm,:pg)[i])
set_start_value(var(pm,:qg)[i])
end
end
function init_gen_vars(pm::AbstractPowerModel, pm_solved::AbstractPowerModel)
for (i,gen) in ref(pm,:gen)
JuMP.set_start_value(var(pm,:pg)[i], JuMP.value(var(pm_solved,:pg)[i]))
JuMP.set_start_value(var(pm,:qg)[i], JuMP.value(var(pm_solved,:qg)[i]))
end
end
function init_gen_vars(pm::IVRPowerModel)
for (i,gen) in ref(pm,:gen)
set_start_value(var(pm,:crg)[i])
set_start_value(var(pm,:cig)[i])
end
end
function init_gen_vars(pm::IVRPowerModel, pm_solved::IVRPowerModel)
for (i,gen) in ref(pm,:gen)
JuMP.set_start_value(var(pm,:crg)[i], JuMP.value(var(pm_solved,:crg)[i]))
JuMP.set_start_value(var(pm,:crg)[i], JuMP.value(var(pm_solved,:crg)[i]))
end
end
"""
Initilize voltage variable values
"""
function init_voltage_vars(pm::AbstractACPModel)
for (i,bus) in ref(pm,:bus)
set_start_value(var(pm,:va)[i])
set_start_value(var(pm,:vm)[i])
end
end
function init_voltage_vars(pm::AbstractACPModel, pm_solved::AbstractACPModel)
for (i,bus) in ref(pm,:bus)
JuMP.set_start_value(var(pm,:va)[i], JuMP.value(var(pm_solved,:va)[i]))
JuMP.set_start_value(var(pm,:vm)[i], JuMP.value(var(pm_solved,:vm)[i]))
end
end
function init_voltage_vars(pm::AbstractACRModel)
for (i,bus) in ref(pm,:bus)
set_start_value(var(pm,:vr)[i])
set_start_value(var(pm,:vi)[i])
end
end
function init_voltage_vars(pm::AbstractACRModel, pm_solved::AbstractACRModel)
for (i,bus) in ref(pm,:bus)
JuMP.set_start_value(var(pm,:vr)[i], JuMP.value(var(pm_solved,:vr)[i]))
JuMP.set_start_value(var(pm,:vi)[i], JuMP.value(var(pm_solved,:vi)[i]))
end
end
| SqpSolver | https://github.com/exanauts/SqpSolver.jl.git |
|
[
"MIT"
] | 0.1.0 | 7f1b99029b30c0498fd715a8bde8defd2f4e1893 | code | 3538 | using Revise
using SqpSolver
using PowerModels, JuMP, Ipopt
using filterSQP
using CPLEX
PowerModels.silence()
include("acwr.jl")
include("init_opf.jl")
function build_opf(pm::PowerModels.AbstractPowerModel)
PowerModels.variable_bus_voltage(pm)
PowerModels.variable_gen_power(pm)
PowerModels.variable_branch_power(pm)
PowerModels.variable_dcline_power(pm)
PowerModels.objective_min_fuel_and_flow_cost(pm)
PowerModels.constraint_model_voltage(pm)
for i in PowerModels.ids(pm, :ref_buses)
PowerModels.constraint_theta_ref(pm, i)
end
for i in PowerModels.ids(pm, :bus)
PowerModels.constraint_power_balance(pm, i)
end
for i in PowerModels.ids(pm, :branch)
PowerModels.constraint_ohms_yt_from(pm, i)
PowerModels.constraint_ohms_yt_to(pm, i)
# constraint_voltage_angle_difference(pm, i)
PowerModels.constraint_thermal_limit_from(pm, i)
PowerModels.constraint_thermal_limit_to(pm, i)
end
for i in PowerModels.ids(pm, :dcline)
PowerModels.constraint_dcline_power_losses(pm, i)
end
end
build_acp(data_file::String) = instantiate_model(PowerModels.parse_file(data_file), ACPPowerModel, build_opf)
build_acr(data_file::String) = instantiate_model(PowerModels.parse_file(data_file), ACRPowerModel, build_opf)
build_iv(data_file::String) = instantiate_model(PowerModels.parse_file(data_file), IVRPowerModel, PowerModels.build_opf_iv)
build_dcp(data_file::String) = instantiate_model(PowerModels.parse_file(data_file), DCPPowerModel, PowerModels.build_opf_iv)
##
function run_sqp_opf(data_file::String, max_iter::Int = 100)
pm = build_acr(data_file)
# init_vars(pm)
# pm2 = build_acp(data_file)
# JuMP.@objective(pm2.model, Min, 0)
# init_vars_from_ipopt(pm, pm2)
# choose an internal QP solver
qp_solver = optimizer_with_attributes(
Ipopt.Optimizer,
"print_level" => 0,
"warm_start_init_point" => "yes",
"linear_solver" => "ma57",
# "ma57_pre_alloc" => 5.0,
# CPLEX.Optimizer,
# "CPX_PARAM_SCRIND" => 1,
# "CPX_PARAM_THREADS" => 1,
# "CPXPARAM_OptimalityTarget" => 2, # 1: convex, 2: local, 3: global
# "CPXPARAM_Barrier_ConvergeTol" => 1.0e-4,
)
result = optimize_model!(pm, optimizer = optimizer_with_attributes(
SqpSolver.Optimizer,
"algorithm" => "SQP-TR",
"external_optimizer" => qp_solver,
"tol_infeas" => 1.e-6,
"tol_residual" => 1.e-4,
"max_iter" => max_iter,
"use_soc" => true,
))
return pm, result
end
run_sqp_opf("../data/case9.m", 50);
##
function run_ipopt!(data_file::String)
pm = build_acp(data_file)
init_vars(pm)
# pm2 = build_acp(data_file)
# JuMP.@objective(pm2.model, Min, 0)
# init_vars_from_ipopt(pm, pm2)
solver = optimizer_with_attributes(
Ipopt.Optimizer,
"warm_start_init_point" => "yes",
"linear_solver" => "ma57",
)
optimize_model!(pm, optimizer = solver)
return
end
run_ipopt!("../data/case2869pegase.m");
# ##
# function run_filter_sqp!(data_file::String)
# pm = build_acp(data_file)
# init_vars(pm)
# # pm2 = build_acp(data_file)
# # JuMP.@objective(pm2.model, Min, 0)
# # init_vars_from_ipopt(pm, pm2)
# solver = optimizer_with_attributes(
# filterSQP.Optimizer,
# "iprint" => 1,
# )
# optimize_model!(pm, optimizer = solver)
# return
# end
# run_filter_sqp!("../data/case1354pegase.m"); | SqpSolver | https://github.com/exanauts/SqpSolver.jl.git |
|
[
"MIT"
] | 0.1.0 | 7f1b99029b30c0498fd715a8bde8defd2f4e1893 | code | 39450 | """
"""
mutable struct _ConstraintInfo{F,S}
func::F
set::S
dual_start::Union{Nothing,Float64}
end
_ConstraintInfo(func, set) = _ConstraintInfo(func, set, nothing)
"""
Optimizer()
Create a new SqpSolver optimizer.
"""
mutable struct Optimizer <: MOI.AbstractOptimizer
inner::Union{Model,Nothing}
name::String
invalid_model::Bool
variables::MOI.Utilities.VariablesContainer{Float64}
variable_primal_start::Vector{Union{Nothing,Float64}}
variable_lower_start::Vector{Union{Nothing,Float64}}
variable_upper_start::Vector{Union{Nothing,Float64}}
nlp_data::MOI.NLPBlockData
sense::MOI.OptimizationSense
objective::Union{
Nothing,
MOI.VariableIndex,
MOI.ScalarAffineFunction{Float64},
MOI.ScalarQuadraticFunction{Float64},
}
linear_le_constraints::Vector{
_ConstraintInfo{
MOI.ScalarAffineFunction{Float64},
MOI.LessThan{Float64},
},
}
linear_ge_constraints::Vector{
_ConstraintInfo{
MOI.ScalarAffineFunction{Float64},
MOI.GreaterThan{Float64},
},
}
linear_eq_constraints::Vector{
_ConstraintInfo{MOI.ScalarAffineFunction{Float64},MOI.EqualTo{Float64}},
}
quadratic_le_constraints::Vector{
_ConstraintInfo{
MOI.ScalarQuadraticFunction{Float64},
MOI.LessThan{Float64},
},
}
quadratic_ge_constraints::Vector{
_ConstraintInfo{
MOI.ScalarQuadraticFunction{Float64},
MOI.GreaterThan{Float64},
},
}
quadratic_eq_constraints::Vector{
_ConstraintInfo{
MOI.ScalarQuadraticFunction{Float64},
MOI.EqualTo{Float64},
},
}
nlp_dual_start::Union{Nothing,Vector{Float64}}
silent::Bool
options::Parameters
solve_time::Float64
callback::Union{Nothing,Function}
function Optimizer(; kwargs...)
prob = new(
nothing,
"",
false,
MOI.Utilities.VariablesContainer{Float64}(),
Union{Nothing,Float64}[],
Union{Nothing,Float64}[],
Union{Nothing,Float64}[],
MOI.NLPBlockData([], _EmptyNLPEvaluator(), false),
MOI.FEASIBILITY_SENSE,
nothing,
_ConstraintInfo{
MOI.ScalarAffineFunction{Float64},
MOI.LessThan{Float64},
}[],
_ConstraintInfo{
MOI.ScalarAffineFunction{Float64},
MOI.GreaterThan{Float64},
}[],
_ConstraintInfo{
MOI.ScalarAffineFunction{Float64},
MOI.EqualTo{Float64},
}[],
_ConstraintInfo{
MOI.ScalarQuadraticFunction{Float64},
MOI.LessThan{Float64},
}[],
_ConstraintInfo{
MOI.ScalarQuadraticFunction{Float64},
MOI.GreaterThan{Float64},
}[],
_ConstraintInfo{
MOI.ScalarQuadraticFunction{Float64},
MOI.EqualTo{Float64},
}[],
nothing,
false,
Parameters(),
NaN,
nothing,
)
for (k, v) in kwargs
set_parameter(prob.options, string(k), v)
end
return prob
end
end
MOI.get(::Optimizer, ::MOI.SolverVersion) = "0.1.0"
### _EmptyNLPEvaluator
struct _EmptyNLPEvaluator <: MOI.AbstractNLPEvaluator end
MOI.features_available(::_EmptyNLPEvaluator) = [:Grad, :Jac, :Hess]
MOI.initialize(::_EmptyNLPEvaluator, ::Any) = nothing
MOI.eval_constraint(::_EmptyNLPEvaluator, g, x) = nothing
MOI.jacobian_structure(::_EmptyNLPEvaluator) = Tuple{Int64,Int64}[]
MOI.hessian_lagrangian_structure(::_EmptyNLPEvaluator) = Tuple{Int64,Int64}[]
MOI.eval_constraint_jacobian(::_EmptyNLPEvaluator, J, x) = nothing
MOI.eval_hessian_lagrangian(::_EmptyNLPEvaluator, H, x, σ, μ) = nothing
function MOI.empty!(model::Optimizer)
model.inner = nothing
model.invalid_model = false
MOI.empty!(model.variables)
empty!(model.variable_primal_start)
empty!(model.variable_lower_start)
empty!(model.variable_upper_start)
model.nlp_data = MOI.NLPBlockData([], _EmptyNLPEvaluator(), false)
model.sense = MOI.FEASIBILITY_SENSE
model.objective = nothing
empty!(model.linear_le_constraints)
empty!(model.linear_ge_constraints)
empty!(model.linear_eq_constraints)
empty!(model.quadratic_le_constraints)
empty!(model.quadratic_ge_constraints)
empty!(model.quadratic_eq_constraints)
model.nlp_dual_start = nothing
return
end
function MOI.is_empty(model::Optimizer)
return MOI.is_empty(model.variables) &&
isempty(model.variable_primal_start) &&
isempty(model.variable_lower_start) &&
isempty(model.variable_upper_start) &&
model.nlp_data.evaluator isa _EmptyNLPEvaluator &&
model.sense == MOI.FEASIBILITY_SENSE &&
isempty(model.linear_le_constraints) &&
isempty(model.linear_ge_constraints) &&
isempty(model.linear_eq_constraints) &&
isempty(model.quadratic_le_constraints) &&
isempty(model.quadratic_ge_constraints) &&
isempty(model.quadratic_eq_constraints)
end
MOI.supports_incremental_interface(::Optimizer) = true
function MOI.copy_to(model::Optimizer, src::MOI.ModelLike)
return MOI.Utilities.default_copy_to(model, src)
end
MOI.get(::Optimizer, ::MOI.SolverName) = "SqpSolver"
function MOI.supports_constraint(
::Optimizer,
::Type{
<:Union{
MOI.VariableIndex,
MOI.ScalarAffineFunction{Float64},
MOI.ScalarQuadraticFunction{Float64},
},
},
::Type{
<:Union{
MOI.LessThan{Float64},
MOI.GreaterThan{Float64},
MOI.EqualTo{Float64},
},
},
)
return true
end
function MOI.get(model::Optimizer, ::MOI.ListOfConstraintTypesPresent)
ret = MOI.get(model.variables, MOI.ListOfConstraintTypesPresent())
constraints = Set{Tuple{Type,Type}}()
for F in (
MOI.ScalarAffineFunction{Float64},
MOI.ScalarQuadraticFunction{Float64},
)
for S in (
MOI.LessThan{Float64},
MOI.GreaterThan{Float64},
MOI.EqualTo{Float64},
)
if !isempty(_constraints(model, F, S))
push!(constraints, (F, S))
end
end
end
return append!(ret, collect(constraints))
end
### MOI.Name
MOI.supports(::Optimizer, ::MOI.Name) = true
function MOI.set(model::Optimizer, ::MOI.Name, value::String)
model.name = value
return
end
MOI.get(model::Optimizer, ::MOI.Name) = model.name
### MOI.Silent
MOI.supports(::Optimizer, ::MOI.Silent) = true
function MOI.set(model::Optimizer, ::MOI.Silent, value)
model.silent = value
return
end
MOI.get(model::Optimizer, ::MOI.Silent) = model.silent
### MOI.TimeLimitSec
MOI.supports(::Optimizer, ::MOI.TimeLimitSec) = true
function MOI.set(model::Optimizer, ::MOI.TimeLimitSec, value::Real)
MOI.set(model, MOI.RawOptimizerAttribute("time_limit"), Float64(value))
return
end
function MOI.set(model::Optimizer, ::MOI.TimeLimitSec, ::Nothing)
MOI.set(model, MOI.RawOptimizerAttribute("time_limit"), 1.0e+10)
return
end
function MOI.get(model::Optimizer, ::MOI.TimeLimitSec)
return get_parameter(model.options, "time_limit")
end
### MOI.RawOptimizerAttribute
MOI.supports(::Optimizer, ::MOI.RawOptimizerAttribute) = true
function MOI.set(model::Optimizer, p::MOI.RawOptimizerAttribute, value)
set_parameter(model.options, p.name, value)
return
end
function MOI.get(model::Optimizer, p::MOI.RawOptimizerAttribute)
return get_parameter(model.options, p.name)
end
### Variables
"""
column(x::MOI.VariableIndex)
Return the column associated with a variable.
"""
column(x::MOI.VariableIndex) = x.value
function MOI.add_variable(model::Optimizer)
push!(model.variable_primal_start, nothing)
push!(model.variable_lower_start, nothing)
push!(model.variable_upper_start, nothing)
return MOI.add_variable(model.variables)
end
function MOI.is_valid(model::Optimizer, x::MOI.VariableIndex)
return MOI.is_valid(model.variables, x)
end
function MOI.get(
model::Optimizer,
attr::Union{MOI.NumberOfVariables,MOI.ListOfVariableIndices},
)
return MOI.get(model.variables, attr)
end
function MOI.is_valid(
model::Optimizer,
ci::MOI.ConstraintIndex{MOI.VariableIndex,S},
) where {S<:Union{MOI.LessThan,MOI.GreaterThan,MOI.EqualTo}}
return MOI.is_valid(model.variables, ci)
end
function MOI.get(
model::Optimizer,
attr::Union{
MOI.NumberOfConstraints{MOI.VariableIndex,S},
MOI.ListOfConstraintIndices{MOI.VariableIndex,S},
},
) where {S<:Union{MOI.LessThan,MOI.GreaterThan,MOI.EqualTo}}
return MOI.get(model.variables, attr)
end
function MOI.get(
model::Optimizer,
attr::Union{MOI.ConstraintFunction,MOI.ConstraintSet},
c::MOI.ConstraintIndex{MOI.VariableIndex,S},
) where {S<:Union{MOI.LessThan,MOI.GreaterThan,MOI.EqualTo}}
return MOI.get(model.variables, attr, c)
end
function MOI.add_constraint(
model::Optimizer,
x::MOI.VariableIndex,
set::Union{
MOI.LessThan{Float64},
MOI.GreaterThan{Float64},
MOI.EqualTo{Float64},
},
)
return MOI.add_constraint(model.variables, x, set)
end
function MOI.set(
model::Optimizer,
::MOI.ConstraintSet,
ci::MOI.ConstraintIndex{MOI.VariableIndex,S},
set::S,
) where {S<:Union{MOI.LessThan,MOI.GreaterThan,MOI.EqualTo}}
MOI.set(model.variables, MOI.ConstraintSet(), ci, set)
return
end
function MOI.delete(
model::Optimizer,
ci::MOI.ConstraintIndex{MOI.VariableIndex,S},
) where {S<:Union{MOI.LessThan,MOI.GreaterThan,MOI.EqualTo}}
MOI.delete(model.variables, ci)
return
end
### ScalarAffineFunction and ScalarQuadraticFunction constraints
function MOI.is_valid(
model::Optimizer,
ci::MOI.ConstraintIndex{F,S},
) where {
F<:Union{
MOI.ScalarAffineFunction{Float64},
MOI.ScalarQuadraticFunction{Float64},
},
S<:Union{MOI.LessThan,MOI.GreaterThan,MOI.EqualTo},
}
return 1 <= ci.value <= length(_constraints(model, F, S))
end
function _constraints(
model::Optimizer,
::Type{MOI.ScalarAffineFunction{Float64}},
::Type{MOI.LessThan{Float64}},
)
return model.linear_le_constraints
end
function _constraints(
model::Optimizer,
::Type{MOI.ScalarAffineFunction{Float64}},
::Type{MOI.GreaterThan{Float64}},
)
return model.linear_ge_constraints
end
function _constraints(
model::Optimizer,
::Type{MOI.ScalarAffineFunction{Float64}},
::Type{MOI.EqualTo{Float64}},
)
return model.linear_eq_constraints
end
function _constraints(
model::Optimizer,
::Type{MOI.ScalarQuadraticFunction{Float64}},
::Type{MOI.LessThan{Float64}},
)
return model.quadratic_le_constraints
end
function _constraints(
model::Optimizer,
::Type{MOI.ScalarQuadraticFunction{Float64}},
::Type{MOI.GreaterThan{Float64}},
)
return model.quadratic_ge_constraints
end
function _constraints(
model::Optimizer,
::Type{MOI.ScalarQuadraticFunction{Float64}},
::Type{MOI.EqualTo{Float64}},
)
return model.quadratic_eq_constraints
end
function _check_inbounds(model::Optimizer, var::MOI.VariableIndex)
MOI.throw_if_not_valid(model, var)
return
end
function _check_inbounds(model::Optimizer, aff::MOI.ScalarAffineFunction)
for term in aff.terms
MOI.throw_if_not_valid(model, term.variable)
end
return
end
function _check_inbounds(model::Optimizer, quad::MOI.ScalarQuadraticFunction)
for term in quad.affine_terms
MOI.throw_if_not_valid(model, term.variable)
end
for term in quad.quadratic_terms
MOI.throw_if_not_valid(model, term.variable_1)
MOI.throw_if_not_valid(model, term.variable_2)
end
return
end
function MOI.add_constraint(
model::Optimizer,
func::F,
set::S,
) where {
F<:Union{
MOI.ScalarAffineFunction{Float64},
MOI.ScalarQuadraticFunction{Float64},
},
S<:MOI.AbstractScalarSet,
}
_check_inbounds(model, func)
constraints = _constraints(model, F, S)
push!(constraints, _ConstraintInfo(func, set))
return MOI.ConstraintIndex{F,S}(length(constraints))
end
function MOI.get(
model::Optimizer,
::MOI.NumberOfConstraints{F,S},
) where {
F<:Union{
MOI.ScalarAffineFunction{Float64},
MOI.ScalarQuadraticFunction{Float64},
},
S,
}
return length(_constraints(model, F, S))
end
function MOI.get(
model::Optimizer,
::MOI.ListOfConstraintIndices{F,S},
) where {
F<:Union{
MOI.ScalarAffineFunction{Float64},
MOI.ScalarQuadraticFunction{Float64},
},
S,
}
return MOI.ConstraintIndex{F,S}[
MOI.ConstraintIndex{F,S}(i) for
i in eachindex(_constraints(model, F, S))
]
end
function MOI.get(
model::Optimizer,
::MOI.ConstraintFunction,
c::MOI.ConstraintIndex{F,S},
) where {
F<:Union{
MOI.ScalarAffineFunction{Float64},
MOI.ScalarQuadraticFunction{Float64},
},
S,
}
return _constraints(model, F, S)[c.value].func
end
function MOI.get(
model::Optimizer,
::MOI.ConstraintSet,
c::MOI.ConstraintIndex{F,S},
) where {
F<:Union{
MOI.ScalarAffineFunction{Float64},
MOI.ScalarQuadraticFunction{Float64},
},
S,
}
return _constraints(model, F, S)[c.value].set
end
function MOI.supports(
::Optimizer,
::MOI.ConstraintDualStart,
::Type{MOI.ConstraintIndex{F,S}},
) where {
F<:Union{
MOI.ScalarAffineFunction{Float64},
MOI.ScalarQuadraticFunction{Float64},
},
S,
}
return true
end
function MOI.set(
model::Optimizer,
::MOI.ConstraintDualStart,
ci::MOI.ConstraintIndex{F,S},
value::Union{Real,Nothing},
) where {
F<:Union{
MOI.ScalarAffineFunction{Float64},
MOI.ScalarQuadraticFunction{Float64},
},
S,
}
MOI.throw_if_not_valid(model, ci)
constraints = _constraints(model, F, S)
constraints[ci.value].dual_start = value
return
end
function MOI.get(
model::Optimizer,
::MOI.ConstraintDualStart,
ci::MOI.ConstraintIndex{F,S},
) where {
F<:Union{
MOI.ScalarAffineFunction{Float64},
MOI.ScalarQuadraticFunction{Float64},
},
S,
}
MOI.throw_if_not_valid(model, ci)
constraints = _constraints(model, F, S)
return constraints[ci.value].dual_start
end
### MOI.VariablePrimalStart
function MOI.supports(
::Optimizer,
::MOI.VariablePrimalStart,
::Type{MOI.VariableIndex},
)
return true
end
function MOI.set(
model::Optimizer,
::MOI.VariablePrimalStart,
vi::MOI.VariableIndex,
value::Union{Real,Nothing},
)
MOI.throw_if_not_valid(model, vi)
model.variable_primal_start[column(vi)] = value
return
end
### MOI.ConstraintDualStart
_dual_start(::Optimizer, ::Nothing, ::Int = 1) = 0.0
function _dual_start(model::Optimizer, value::Real, scale::Int = 1)
return _dual_multiplier(model) * value * scale
end
function MOI.supports(
::Optimizer,
::MOI.ConstraintDualStart,
::Type{
MOI.ConstraintIndex{
MOI.VariableIndex,
<:Union{MOI.GreaterThan,MOI.LessThan,MOI.EqualTo},
},
},
)
return true
end
function MOI.set(
model::Optimizer,
::MOI.ConstraintDualStart,
ci::MOI.ConstraintIndex{MOI.VariableIndex,MOI.GreaterThan{Float64}},
value::Union{Real,Nothing},
)
MOI.throw_if_not_valid(model, ci)
model.variable_lower_start[ci.value] = value
return
end
function MOI.get(
model::Optimizer,
::MOI.ConstraintDualStart,
ci::MOI.ConstraintIndex{MOI.VariableIndex,MOI.GreaterThan{Float64}},
)
MOI.throw_if_not_valid(model, ci)
return model.variable_lower_start[ci.value]
end
function MOI.set(
model::Optimizer,
::MOI.ConstraintDualStart,
ci::MOI.ConstraintIndex{MOI.VariableIndex,MOI.LessThan{Float64}},
value::Union{Real,Nothing},
)
MOI.throw_if_not_valid(model, ci)
model.variable_upper_start[ci.value] = value
return
end
function MOI.get(
model::Optimizer,
::MOI.ConstraintDualStart,
ci::MOI.ConstraintIndex{MOI.VariableIndex,MOI.LessThan{Float64}},
)
MOI.throw_if_not_valid(model, ci)
return model.variable_upper_start[ci.value]
end
function MOI.set(
model::Optimizer,
::MOI.ConstraintDualStart,
ci::MOI.ConstraintIndex{MOI.VariableIndex,MOI.EqualTo{Float64}},
value::Union{Real,Nothing},
)
MOI.throw_if_not_valid(model, ci)
if value === nothing
model.variable_lower_start[ci.value] = nothing
model.variable_upper_start[ci.value] = nothing
elseif value >= 0.0
model.variable_lower_start[ci.value] = value
model.variable_upper_start[ci.value] = 0.0
else
model.variable_lower_start[ci.value] = 0.0
model.variable_upper_start[ci.value] = value
end
return
end
function MOI.get(
model::Optimizer,
::MOI.ConstraintDualStart,
ci::MOI.ConstraintIndex{MOI.VariableIndex,MOI.EqualTo{Float64}},
)
MOI.throw_if_not_valid(model, ci)
l = model.variable_lower_start[ci.value]
u = model.variable_upper_start[ci.value]
return (l === u === nothing) ? nothing : (l + u)
end
### MOI.NLPBlockDualStart
MOI.supports(::Optimizer, ::MOI.NLPBlockDualStart) = true
function MOI.set(
model::Optimizer,
::MOI.NLPBlockDualStart,
values::Union{Nothing,Vector},
)
model.nlp_dual_start = values
return
end
MOI.get(model::Optimizer, ::MOI.NLPBlockDualStart) = model.nlp_dual_start
### MOI.NLPBlock
MOI.supports(::Optimizer, ::MOI.NLPBlock) = true
function MOI.set(model::Optimizer, ::MOI.NLPBlock, nlp_data::MOI.NLPBlockData)
model.nlp_data = nlp_data
return
end
### ObjectiveSense
MOI.supports(::Optimizer, ::MOI.ObjectiveSense) = true
function MOI.set(
model::Optimizer,
::MOI.ObjectiveSense,
sense::MOI.OptimizationSense,
)
model.sense = sense
return
end
MOI.get(model::Optimizer, ::MOI.ObjectiveSense) = model.sense
### ObjectiveFunction
MOI.get(model::Optimizer, ::MOI.ObjectiveFunctionType) = typeof(model.objective)
function MOI.get(model::Optimizer, ::MOI.ObjectiveFunction{F}) where {F}
return convert(F, model.objective)::F
end
function MOI.supports(
::Optimizer,
::MOI.ObjectiveFunction{
<:Union{
MOI.VariableIndex,
MOI.ScalarAffineFunction{Float64},
MOI.ScalarQuadraticFunction{Float64},
},
},
)
return true
end
function MOI.set(
model::Optimizer,
::MOI.ObjectiveFunction{F},
func::F,
) where {
F<:Union{
MOI.VariableIndex,
MOI.ScalarAffineFunction{Float64},
MOI.ScalarQuadraticFunction{Float64},
},
}
_check_inbounds(model, func)
model.objective = func
return
end
### SqpSolver callback functions
### In setting up the data for SqpSolver, we order the constraints as follows:
### - linear_le_constraints
### - linear_ge_constraints
### - linear_eq_constraints
### - quadratic_le_constraints
### - quadratic_ge_constraints
### - quadratic_eq_constraints
### - nonlinear constraints from nlp_data
const _CONSTRAINT_ORDERING = (
:linear_le_constraints,
:linear_ge_constraints,
:linear_eq_constraints,
:quadratic_le_constraints,
:quadratic_ge_constraints,
:quadratic_eq_constraints,
)
function _offset(
::Optimizer,
::Type{<:MOI.ScalarAffineFunction},
::Type{<:MOI.LessThan},
)
return 0
end
function _offset(
model::Optimizer,
::Type{<:MOI.ScalarAffineFunction},
::Type{<:MOI.GreaterThan},
)
return length(model.linear_le_constraints)
end
function _offset(
model::Optimizer,
F::Type{<:MOI.ScalarAffineFunction},
::Type{<:MOI.EqualTo},
)
return _offset(model, F, MOI.GreaterThan{Float64}) +
length(model.linear_ge_constraints)
end
function _offset(
model::Optimizer,
::Type{<:MOI.ScalarQuadraticFunction},
::Type{<:MOI.LessThan},
)
x = _offset(model, MOI.ScalarAffineFunction{Float64}, MOI.EqualTo{Float64})
return x + length(model.linear_eq_constraints)
end
function _offset(
model::Optimizer,
F::Type{<:MOI.ScalarQuadraticFunction},
::Type{<:MOI.GreaterThan},
)
return _offset(model, F, MOI.LessThan{Float64}) +
length(model.quadratic_le_constraints)
end
function _offset(
model::Optimizer,
F::Type{<:MOI.ScalarQuadraticFunction},
::Type{<:MOI.EqualTo},
)
return _offset(model, F, MOI.GreaterThan{Float64}) +
length(model.quadratic_ge_constraints)
end
function _nlp_constraint_offset(model::Optimizer)
x = _offset(
model,
MOI.ScalarQuadraticFunction{Float64},
MOI.EqualTo{Float64},
)
return x + length(model.quadratic_eq_constraints)
end
_eval_function(::Nothing, ::Any) = 0.0
_eval_function(f, x) = MOI.Utilities.eval_variables(xi -> x[xi.value], f)
### Eval_F_CB
function _eval_objective(model::Optimizer, x)
if model.nlp_data.has_objective
return MOI.eval_objective(model.nlp_data.evaluator, x)
end
return _eval_function(model.objective, x)
end
### Eval_Grad_F_CB
_fill_gradient(::Any, ::Any, ::Nothing) = nothing
function _fill_gradient(grad, ::Vector, f::MOI.VariableIndex)
grad[f.value] = 1.0
return
end
function _fill_gradient(grad, ::Vector, f::MOI.ScalarAffineFunction{Float64})
for term in f.terms
grad[term.variable.value] += term.coefficient
end
return
end
function _fill_gradient(
grad,
x::Vector,
quad::MOI.ScalarQuadraticFunction{Float64},
)
for term in quad.affine_terms
grad[term.variable.value] += term.coefficient
end
for term in quad.quadratic_terms
row_idx = term.variable_1
col_idx = term.variable_2
if row_idx == col_idx
grad[row_idx.value] += term.coefficient * x[row_idx.value]
else
grad[row_idx.value] += term.coefficient * x[col_idx.value]
grad[col_idx.value] += term.coefficient * x[row_idx.value]
end
end
return
end
function _eval_objective_gradient(model::Optimizer, grad, x)
if model.nlp_data.has_objective
MOI.eval_objective_gradient(model.nlp_data.evaluator, grad, x)
else
fill!(grad, 0.0)
_fill_gradient(grad, x, model.objective)
end
return
end
### Eval_G_CB
function _eval_constraint(model::Optimizer, g, x)
row = 1
for key in _CONSTRAINT_ORDERING
for info in getfield(model, key)
g[row] = _eval_function(info.func, x)
row += 1
end
end
nlp_g = view(g, row:length(g))
MOI.eval_constraint(model.nlp_data.evaluator, nlp_g, x)
return
end
### Eval_Jac_G_CB
function _append_to_jacobian_sparsity(J, f::MOI.ScalarAffineFunction, row)
for term in f.terms
push!(J, (row, term.variable.value))
end
return
end
function _append_to_jacobian_sparsity(J, f::MOI.ScalarQuadraticFunction, row)
for term in f.affine_terms
push!(J, (row, term.variable.value))
end
for term in f.quadratic_terms
row_idx = term.variable_1
col_idx = term.variable_2
if row_idx == col_idx
push!(J, (row, row_idx.value))
else
push!(J, (row, row_idx.value))
push!(J, (row, col_idx.value))
end
end
return
end
function _jacobian_structure(model::Optimizer)
J = Tuple{Int64,Int64}[]
row = 1
for key in _CONSTRAINT_ORDERING
for info in getfield(model, key)
_append_to_jacobian_sparsity(J, info.func, row)
row += 1
end
end
if length(model.nlp_data.constraint_bounds) > 0
for (nlp_row, col) in MOI.jacobian_structure(model.nlp_data.evaluator)
push!(J, (nlp_row + row - 1, col))
end
end
return J
end
function _fill_constraint_jacobian(
values,
offset,
::Vector,
f::MOI.ScalarAffineFunction,
)
num_coefficients = length(f.terms)
for i in 1:num_coefficients
values[offset+i] = f.terms[i].coefficient
end
return num_coefficients
end
function _fill_constraint_jacobian(
values,
offset,
x,
f::MOI.ScalarQuadraticFunction,
)
nterms = 0
for term in f.affine_terms
nterms += 1
values[offset+nterms] = term.coefficient
end
for term in f.quadratic_terms
row_idx = term.variable_1
col_idx = term.variable_2
if row_idx == col_idx
nterms += 1
values[offset+nterms] = term.coefficient * x[col_idx.value]
else
# Note that the order matches the Jacobian sparsity pattern.
nterms += 2
values[offset+nterms-1] = term.coefficient * x[col_idx.value]
values[offset+nterms] = term.coefficient * x[row_idx.value]
end
end
return nterms
end
function _eval_constraint_jacobian(model::Optimizer, values, x)
offset = 0
for key in _CONSTRAINT_ORDERING
for info in getfield(model, key)
offset += _fill_constraint_jacobian(values, offset, x, info.func)
end
end
nlp_values = view(values, (1+offset):length(values))
MOI.eval_constraint_jacobian(model.nlp_data.evaluator, nlp_values, x)
return
end
### Eval_H_CB
_append_to_hessian_sparsity(::Any, ::Any) = nothing
function _append_to_hessian_sparsity(H, f::MOI.ScalarQuadraticFunction)
for term in f.quadratic_terms
push!(H, (term.variable_1.value, term.variable_2.value))
end
return
end
function _append_hessian_lagrangian_structure(H, model::Optimizer)
if !model.nlp_data.has_objective
_append_to_hessian_sparsity(H, model.objective)
end
for info in model.quadratic_le_constraints
_append_to_hessian_sparsity(H, info.func)
end
for info in model.quadratic_ge_constraints
_append_to_hessian_sparsity(H, info.func)
end
for info in model.quadratic_eq_constraints
_append_to_hessian_sparsity(H, info.func)
end
append!(H, MOI.hessian_lagrangian_structure(model.nlp_data.evaluator))
return
end
_fill_hessian_lagrangian(::Any, ::Any, ::Any, ::Any) = 0
function _fill_hessian_lagrangian(H, offset, λ, f::MOI.ScalarQuadraticFunction)
for term in f.quadratic_terms
H[offset+1] = λ * term.coefficient
offset += 1
end
return length(f.quadratic_terms)
end
function _eval_hessian_lagrangian(
::Type{S},
model::Optimizer,
H,
μ,
offset,
) where {S}
F = MOI.ScalarQuadraticFunction{Float64}
offset_start = _offset(model, F, S)
for (i, info) in enumerate(_constraints(model, F, S))
offset +=
_fill_hessian_lagrangian(H, offset, μ[offset_start+i], info.func)
end
return offset
end
function _eval_hessian_lagrangian(model::Optimizer, H, x, σ, μ)
offset = 0
if !model.nlp_data.has_objective
offset += _fill_hessian_lagrangian(H, 0, σ, model.objective)
end
# Handles any quadratic constraints that are present. The order matters.
offset =
_eval_hessian_lagrangian(MOI.LessThan{Float64}, model, H, μ, offset)
offset =
_eval_hessian_lagrangian(MOI.GreaterThan{Float64}, model, H, μ, offset)
offset = _eval_hessian_lagrangian(MOI.EqualTo{Float64}, model, H, μ, offset)
# Handles the Hessian in the nonlinear block
MOI.eval_hessian_lagrangian(
model.nlp_data.evaluator,
view(H, 1+offset:length(H)),
x,
σ,
view(μ, 1+_nlp_constraint_offset(model):length(μ)),
)
return
end
### MOI.optimize!
_bounds(s::MOI.LessThan) = (-Inf, s.upper)
_bounds(s::MOI.GreaterThan) = (s.lower, Inf)
_bounds(s::MOI.EqualTo) = (s.value, s.value)
function MOI.optimize!(model::Optimizer)
# TODO: Reuse model.inner for incremental solves if possible.
num_linear_constraints =
length(model.linear_le_constraints) +
length(model.linear_ge_constraints) +
length(model.linear_eq_constraints)
num_quadratic_constraints =
length(model.quadratic_le_constraints) +
length(model.quadratic_ge_constraints) +
length(model.quadratic_eq_constraints)
num_nlp_constraints = length(model.nlp_data.constraint_bounds)
has_hessian = :Hess in MOI.features_available(model.nlp_data.evaluator)
init_feat = [:Grad]
if has_hessian
push!(init_feat, :Hess)
end
if num_nlp_constraints > 0
push!(init_feat, :Jac)
end
MOI.initialize(model.nlp_data.evaluator, init_feat)
jacobian_sparsity = _jacobian_structure(model)
hessian_sparsity = Tuple{Int,Int}[]
if has_hessian
_append_hessian_lagrangian_structure(hessian_sparsity, model)
end
if model.sense == MOI.MIN_SENSE
objective_scale = 1.0
elseif model.sense == MOI.MAX_SENSE
objective_scale = -1.0
else # FEASIBILITY_SENSE
# TODO: This could produce confusing solver output if a nonzero
# objective is set.
objective_scale = 0.0
end
eval_f_cb(x) = objective_scale * _eval_objective(model, x)
function eval_grad_f_cb(x, grad_f)
if model.sense == MOI.FEASIBILITY_SENSE
grad_f .= zero(eltype(grad_f))
else
_eval_objective_gradient(model, grad_f, x)
rmul!(grad_f,objective_scale)
end
return
end
eval_g_cb(x, g) = _eval_constraint(model, g, x)
function eval_jac_g_cb(x, rows, cols, values)
if values === nothing
for i in eachindex(jacobian_sparsity)
rows[i], cols[i] = jacobian_sparsity[i]
end
else
_eval_constraint_jacobian(model, values, x)
end
return
end
function eval_h_cb(x, rows, cols, obj_factor, lambda, values)
if values === nothing
for i in eachindex(hessian_sparsity)
rows[i], cols[i] = hessian_sparsity[i]
end
else
obj_factor *= objective_scale
_eval_hessian_lagrangian(model, values, x, obj_factor, lambda)
end
return
end
g_L, g_U = Float64[], Float64[]
for key in _CONSTRAINT_ORDERING
for info in getfield(model, key)
l, u = _bounds(info.set)
push!(g_L, l)
push!(g_U, u)
end
end
for bound in model.nlp_data.constraint_bounds
push!(g_L, bound.lower)
push!(g_U, bound.upper)
end
start_time = time()
if length(model.variables.lower) == 0
model.invalid_model = true
return
end
model.inner = Model(
length(model.variables.lower),
length(g_L),
model.variables.lower,
model.variables.upper,
g_L,
g_U,
jacobian_sparsity,
hessian_sparsity,
eval_f_cb,
eval_g_cb,
eval_grad_f_cb,
eval_jac_g_cb,
has_hessian ? eval_h_cb : nothing,
num_linear_constraints,
objective_scale == -1 ? :Max : :Min,
model.options
)
options = model.inner.parameters
if !has_hessian
set_parameter(options, "hessian_type", "none")
end
if model.silent
set_parameter(options, "OutputFlag", 0)
end
# Initialize the starting point, projecting variables from 0 onto their
# bounds if VariablePrimalStart is not provided.
for (i, v) in enumerate(model.variable_primal_start)
if v !== nothing
model.inner.x[i] = v
else
model.inner.x[i] = max(0.0, model.variables.lower[i])
model.inner.x[i] = min(model.inner.x[i], model.variables.upper[i])
end
end
# Initialize the dual start to 0.0 if NLPBlockDualStart is not provided.
if model.nlp_dual_start === nothing
model.nlp_dual_start = zeros(Float64, num_nlp_constraints)
end
# ConstraintDualStart
row = 1
for key in _CONSTRAINT_ORDERING
for info in getfield(model, key)
model.inner.mult_g[row] = _dual_start(model, info.dual_start, -1)
row += 1
end
end
for dual_start in model.nlp_dual_start
model.inner.mult_g[row] = _dual_start(model, dual_start, -1)
row += 1
end
# ConstraintDualStart for variable bounds
for i in 1:length(model.inner.n)
model.inner.mult_x_L[i] =
_dual_start(model, model.variable_lower_start[i])
model.inner.mult_x_U[i] =
_dual_start(model, model.variable_upper_start[i], -1)
end
optimize!(model.inner)
# Store SolveTimeSec.
model.solve_time = time() - start_time
return
end
### MOI.ResultCount
# SQP always has an iterate available.
function MOI.get(model::Optimizer, ::MOI.ResultCount)
return (model.inner !== nothing) ? 1 : 0
end
### MOI.TerminationStatus
function MOI.get(model::Optimizer, ::MOI.TerminationStatus)
if model.invalid_model
return MOI.INVALID_MODEL
elseif model.inner === nothing
return MOI.OPTIMIZE_NOT_CALLED
end
status = ApplicationReturnStatus[model.inner.status]
if status == :Solve_Succeeded || status == :Feasible_Point_Found
return MOI.LOCALLY_SOLVED
elseif status == :Infeasible_Problem_Detected
return MOI.LOCALLY_INFEASIBLE
elseif status == :Solved_To_Acceptable_Level
return MOI.ALMOST_LOCALLY_SOLVED
elseif status == :Search_Direction_Becomes_Too_Small
return MOI.NUMERICAL_ERROR
elseif status == :Diverging_Iterates
return MOI.NORM_LIMIT
elseif status == :User_Requested_Stop
return MOI.INTERRUPTED
elseif status == :Maximum_Iterations_Exceeded
return MOI.ITERATION_LIMIT
elseif status == :Maximum_CpuTime_Exceeded
return MOI.TIME_LIMIT
elseif status == :Restoration_Failed
return MOI.NUMERICAL_ERROR
elseif status == :Error_In_Step_Computation
return MOI.NUMERICAL_ERROR
elseif status == :Invalid_Option
return MOI.INVALID_OPTION
elseif status == :Not_Enough_Degrees_Of_Freedom
return MOI.INVALID_MODEL
elseif status == :Invalid_Problem_Definition
return MOI.INVALID_MODEL
elseif status == :Invalid_Number_Detected
return MOI.INVALID_MODEL
elseif status == :Unrecoverable_Exception
return MOI.OTHER_ERROR
else
return MOI.MEMORY_LIMIT
end
end
### MOI.RawStatusString
function MOI.get(model::Optimizer, ::MOI.RawStatusString)
if model.invalid_model
return "The model has no variable"
elseif model.inner === nothing
return "Optimize not called"
else
return string(ApplicationReturnStatus[model.inner.status])
end
end
### MOI.PrimalStatus
function MOI.get(model::Optimizer, attr::MOI.PrimalStatus)
if !(1 <= attr.result_index <= MOI.get(model, MOI.ResultCount()))
return MOI.NO_SOLUTION
end
status = ApplicationReturnStatus[model.inner.status]
if status == :Solve_Succeeded
return MOI.FEASIBLE_POINT
elseif status == :Feasible_Point_Found
return MOI.FEASIBLE_POINT
elseif status == :Solved_To_Acceptable_Level
# Solutions are only guaranteed to satisfy the "acceptable" convergence
# tolerances.
return MOI.NEARLY_FEASIBLE_POINT
elseif status == :Infeasible_Problem_Detected
return MOI.INFEASIBLE_POINT
else
return MOI.UNKNOWN_RESULT_STATUS
end
end
### MOI.DualStatus
function MOI.get(model::Optimizer, attr::MOI.DualStatus)
if !(1 <= attr.result_index <= MOI.get(model, MOI.ResultCount()))
return MOI.NO_SOLUTION
end
status = ApplicationReturnStatus[model.inner.status]
if status == :Solve_Succeeded
return MOI.FEASIBLE_POINT
elseif status == :Feasible_Point_Found
return MOI.FEASIBLE_POINT
elseif status == :Solved_To_Acceptable_Level
# Solutions are only guaranteed to satisfy the "acceptable" convergence
# tolerances.
return MOI.NEARLY_FEASIBLE_POINT
else
return MOI.UNKNOWN_RESULT_STATUS
end
end
### MOI.SolveTimeSec
MOI.get(model::Optimizer, ::MOI.SolveTimeSec) = model.solve_time
### MOI.ObjectiveValue
function MOI.get(model::Optimizer, attr::MOI.ObjectiveValue)
MOI.check_result_index_bounds(model, attr)
scale = (model.sense == MOI.MAX_SENSE) ? -1 : 1
return scale * model.inner.obj_val
end
### MOI.VariablePrimal
function MOI.get(
model::Optimizer,
attr::MOI.VariablePrimal,
vi::MOI.VariableIndex,
)
MOI.check_result_index_bounds(model, attr)
MOI.throw_if_not_valid(model, vi)
return model.inner.x[column(vi)]
end
### MOI.ConstraintPrimal
function MOI.get(
model::Optimizer,
attr::MOI.ConstraintPrimal,
ci::MOI.ConstraintIndex{F,S},
) where {
F<:Union{
MOI.ScalarAffineFunction{Float64},
MOI.ScalarQuadraticFunction{Float64},
},
S,
}
MOI.check_result_index_bounds(model, attr)
MOI.throw_if_not_valid(model, ci)
return model.inner.g[_offset(model, F, S)+ci.value]
end
function MOI.get(
model::Optimizer,
attr::MOI.ConstraintPrimal,
ci::MOI.ConstraintIndex{
MOI.VariableIndex,
<:Union{
MOI.LessThan{Float64},
MOI.GreaterThan{Float64},
MOI.EqualTo{Float64},
},
},
)
MOI.check_result_index_bounds(model, attr)
MOI.throw_if_not_valid(model, ci)
return model.inner.x[ci.value]
end
### MOI.ConstraintDual
_dual_multiplier(model::Optimizer) = 1.0
function MOI.get(
model::Optimizer,
attr::MOI.ConstraintDual,
ci::MOI.ConstraintIndex{F,S},
) where {
F<:Union{
MOI.ScalarAffineFunction{Float64},
MOI.ScalarQuadraticFunction{Float64},
},
S,
}
MOI.check_result_index_bounds(model, attr)
MOI.throw_if_not_valid(model, ci)
s = -_dual_multiplier(model)
return s * model.inner.mult_g[_offset(model, F, S)+ci.value]
end
function MOI.get(
model::Optimizer,
attr::MOI.ConstraintDual,
ci::MOI.ConstraintIndex{MOI.VariableIndex,MOI.LessThan{Float64}},
)
MOI.check_result_index_bounds(model, attr)
MOI.throw_if_not_valid(model, ci)
rc = model.inner.mult_x_L[ci.value] - model.inner.mult_x_U[ci.value]
return min(0.0, _dual_multiplier(model) * rc)
end
function MOI.get(
model::Optimizer,
attr::MOI.ConstraintDual,
ci::MOI.ConstraintIndex{MOI.VariableIndex,MOI.GreaterThan{Float64}},
)
MOI.check_result_index_bounds(model, attr)
MOI.throw_if_not_valid(model, ci)
rc = model.inner.mult_x_L[ci.value] - model.inner.mult_x_U[ci.value]
return max(0.0, _dual_multiplier(model) * rc)
end
function MOI.get(
model::Optimizer,
attr::MOI.ConstraintDual,
ci::MOI.ConstraintIndex{MOI.VariableIndex,MOI.EqualTo{Float64}},
)
MOI.check_result_index_bounds(model, attr)
MOI.throw_if_not_valid(model, ci)
rc = model.inner.mult_x_L[ci.value] - model.inner.mult_x_U[ci.value]
return _dual_multiplier(model) * rc
end
### MOI.NLPBlockDual
function MOI.get(model::Optimizer, attr::MOI.NLPBlockDual)
MOI.check_result_index_bounds(model, attr)
s = -_dual_multiplier(model)
return s .* model.inner.mult_g[(1+_nlp_constraint_offset(model)):end]
end
| SqpSolver | https://github.com/exanauts/SqpSolver.jl.git |
|
[
"MIT"
] | 0.1.0 | 7f1b99029b30c0498fd715a8bde8defd2f4e1893 | code | 344 | module SqpSolver
using LinearAlgebra
using SparseArrays
using Printf
using Logging
using JuMP
import MathOptInterface
const MOI = MathOptInterface
const MOIU = MathOptInterface.Utilities
include("status.jl")
include("parameters.jl")
include("model.jl")
include("algorithms.jl")
include("utils.jl")
include("MOI_wrapper.jl")
end # module
| SqpSolver | https://github.com/exanauts/SqpSolver.jl.git |
|
[
"MIT"
] | 0.1.0 | 7f1b99029b30c0498fd715a8bde8defd2f4e1893 | code | 302 | """
AbstractOptimizer
Abstract type of solvers
"""
abstract type AbstractOptimizer end
"""
run!
Abstract function of running algorithm
"""
function run! end
include("algorithms/common.jl")
include("algorithms/merit.jl")
include("algorithms/subproblem.jl")
include("algorithms/sqp.jl")
| SqpSolver | https://github.com/exanauts/SqpSolver.jl.git |
|
[
"MIT"
] | 0.1.0 | 7f1b99029b30c0498fd715a8bde8defd2f4e1893 | code | 3109 | abstract type AbstractSqpModel end
mutable struct Model{T,TD} <: AbstractSqpModel
n::Int # Num vars
m::Int # Num cons
x::TD # Starting and final solution
x_L::TD # Variables Lower Bound
x_U::TD # Variables Upper Bound
g::TD # Final constraint values
g_L::TD # Constraints Lower Bound
g_U::TD # Constraints Upper Bound
j_str::Array{Tuple{Int,Int}}
h_str::Array{Tuple{Int,Int}}
mult_g::TD # lagrange multipliers on constraints
mult_x_L::TD # lagrange multipliers on lower bounds
mult_x_U::TD # lagrange multipliers on upper bounds
obj_val::T # Final objective
status::Int # Final status
# Callbacks
eval_f::Function
eval_g::Function
eval_grad_f::Function
eval_jac_g::Function
eval_h::Union{Function,Nothing}
num_linear_constraints::Int # number of linear constraints
intermediate # Can be nothing
# For MathProgBase
sense::Symbol
parameters::Parameters
statistics::Dict{String,Any} # collects parameters of all iterations inside the algorithm if StatisticsFlag > 0
Model(
n::Int,
m::Int,
x_L::TD,
x_U::TD,
g_L::TD,
g_U::TD,
j_str::Array{Tuple{Int,Int}},
h_str::Array{Tuple{Int,Int}},
eval_f::Function,
eval_g::Function,
eval_grad_f::Function,
eval_jac_g::Function,
eval_h::Union{Function,Nothing},
num_linear_constraints::Int,
sense::Symbol, # {:Min, :Max}
parameters::Parameters
) where {T, TD<:AbstractArray{T}} = new{T,TD}(
n, m,
zeros(n), x_L, x_U,
zeros(m), g_L, g_U,
j_str, h_str,
zeros(m), zeros(n), zeros(n),
0.0,
-5,
eval_f, eval_g, eval_grad_f, eval_jac_g, eval_h,
num_linear_constraints,
nothing, sense,
parameters,
Dict{String,Any}()
)
end
function optimize!(model::Model)
if isnothing(model.parameters.external_optimizer)
model.status = -12;
@error "`external_optimizer` parameter must be set for subproblem solutions."
else
if model.parameters.algorithm == "SQP-TR"
sqp = SqpTR(model)
run!(sqp)
# elseif model.parameters.algorithm == "SLP-TR"
# model.eval_h = nothing
# slp = SlpTR(model)
# run!(slp)
# elseif model.parameters.algorithm == "SLP-LS"
# model.eval_h = nothing
# slp = SlpLS(model)
# run!(slp)
else
@warn "$(model.parameters.algorithm) is not defined"
end
end
return nothing
end
function add_statistic(model::AbstractSqpModel, name::String, value)
if model.parameters.StatisticsFlag == 0
return
end
model.statistics[name] = value
end
function add_statistics(model::AbstractSqpModel, name::String, value::T) where T
if model.parameters.StatisticsFlag == 0
return
end
if !haskey(model.statistics, name)
model.statistics[name] = Array{T,1}()
end
push!(model.statistics[name], value)
end
| SqpSolver | https://github.com/exanauts/SqpSolver.jl.git |
|
[
"MIT"
] | 0.1.0 | 7f1b99029b30c0498fd715a8bde8defd2f4e1893 | code | 1962 | Base.@kwdef mutable struct Parameters
mode::String = "Normal" # If Debug it will allow printing some useful information including collecting values for analysis parameters.
algorithm::String = "SQP-TR" # SQP-TR: sequential quadratic programming with trust region
# Defines the external solver for suproblems
external_optimizer::Union{Nothing,DataType,MOI.OptimizerWithAttributes,Function} =
nothing
# Whether to use approximation hessian (limited-memory), exact, or none
hessian_type::String = "none"
# flags
OutputFlag::Int = 1 # 0 supresses all outputs except warnings and errors
StatisticsFlag::Int = 0 # 0 supresses collection of statistics parameters
# Algorithmic parameters
tol_direction::Float64 = 1.e-8 # tolerance for the norm of direction
tol_residual::Float64 = 1.e-8 # tolerance for Kuhn-Tucker residual
tol_infeas::Float64 = 1.e-8 # tolerance for constraint violation
max_iter::Int = 3000 # Defines the maximum number of iterations
time_limit::Float64 = Inf # Defines the time limit for the solver. (This hasn't been implemented yet)
init_mu::Float64 = 1.e+0 # initial mu value
max_mu::Float64 = 1.e+10 # maximum mu value allowed
rho::Float64 = 0.8 # parameter in (0,1) used for updating merit function penalty
eta::Float64 = 0.4 # descent step test parameter defined in (0,0.5)
tau::Float64 = 0.9 # line search step decrease parameter defined in (0,1)
min_alpha::Float64 = 1.e-6 # minimum step size
tr_size::Float64 = 10. # trust region size
use_soc::Bool = false # use second-order correction
end
function get_parameter(params::Parameters, pname::String)
return getfield(params, Symbol(pname))
end
function set_parameter(params::Parameters, pname::String, val)
setfield!(params, Symbol(pname), val)
return nothing
end
| SqpSolver | https://github.com/exanauts/SqpSolver.jl.git |
|
[
"MIT"
] | 0.1.0 | 7f1b99029b30c0498fd715a8bde8defd2f4e1893 | code | 768 | " solution status (from ipopt) "
ApplicationReturnStatus = Dict(
0 => :Solve_Succeeded,
1 => :Solved_To_Acceptable_Level,
2 => :Infeasible_Problem_Detected,
3 => :Search_Direction_Becomes_Too_Small,
4 => :Diverging_Iterates,
5 => :User_Requested_Stop,
6 => :Feasible_Point_Found,
-1 => :Maximum_Iterations_Exceeded,
-2 => :Restoration_Failed,
-3 => :Error_In_Step_Computation,
-4 => :Maximum_CpuTime_Exceeded,
-5 => :Optimize_not_called,
-6 => :Method_not_defined,
-10 => :Not_Enough_Degrees_Of_Freedom,
-11 => :Invalid_Problem_Definition,
-12 => :Invalid_Option,
-13 => :Invalid_Number_Detected,
-100 => :Unrecoverable_Exception,
-102 => :Insufficient_Memory,
-199 => :Internal_Error,
)
| SqpSolver | https://github.com/exanauts/SqpSolver.jl.git |
|
[
"MIT"
] | 0.1.0 | 7f1b99029b30c0498fd715a8bde8defd2f4e1893 | code | 789 | """
"""
function print_vector(x::Vector{Float64}, msg::String = "")
@printf("%s\n", msg)
for (i,v) in enumerate(x)
@printf(" %+.6f", v)
if i % 5 == 0
@printf("\n")
end
end
@printf("\n")
end
"""
"""
function dropzeros!(x::Vector{Float64}, eps::Float64 = 1.0e-10)
for (i,v) in enumerate(x)
if abs(v) < eps
x[i] = 0.0
end
end
end
function print_matrix(A::SparseMatrixCSC{Float64, Int64})
for i = 1:A.m
a = A[i,:]
SparseArrays.droptol!(a, 1.0e-10)
if length(a.nzind) > 0
@printf("row%6d", i)
for (k,j) in enumerate(a.nzind)
@printf("\tcol%6d\t%+.6e", j, a.nzval[k])
end
@printf("\n")
end
end
end
| SqpSolver | https://github.com/exanauts/SqpSolver.jl.git |
|
[
"MIT"
] | 0.1.0 | 7f1b99029b30c0498fd715a8bde8defd2f4e1893 | code | 1964 | """
KT_residuals
Compute Kuhn-Turck residuals
# Arguments
- `df`: gradient
- `lambda`: Lagrangian multipliers with respect to the constraints
- `mult_x_U`: reduced cost with respect to the upper bounds
- `mult_x_L`: reduced cost with respect to the lower bounds
- `Jac`: Jacobian matrix
- `norm`: whether the residual is normalized or not
"""
function KT_residuals(
df::Tv, lambda::Tv, mult_x_U::Tv, mult_x_L::Tv, Jac::Tm
) where {T, Tv<:AbstractArray{T}, Tm<:AbstractMatrix{T}}
KT_res = norm(df + Jac' * lambda + mult_x_U - mult_x_L, Inf)
scalar = max(1.0, norm(df, Inf), norm(mult_x_U, Inf), norm(mult_x_L, Inf))
for i = axes(Jac,1)
scalar = max(scalar, abs(lambda[i]) * norm(Jac[i,:]))
end
return KT_res / scalar
end
"""
norm_complementarity
Compute the normalized complementeraity
"""
function norm_complementarity(
E::Tv, g_L::Tv, g_U::Tv, x::Tv, x_L::Tv, x_U::Tv,
lambda::Tv, mult_x_U::Tv, mult_x_L::Tv,
p = Inf
) where {T, Tv <: AbstractArray{T}}
m = length(E)
compl = Tv(undef, m)
denom = 0.0
for i = 1:m
if g_L[i] == g_U[i]
compl[i] = 0.0
else
compl[i] = min(E[i] - g_L[i], g_U[i] - E[i]) * lambda[i]
denom += lambda[i]^2
end
end
return norm(compl, p) / (1 + sqrt(denom))
end
"""
norm_violations
Compute the normalized constraint violation
"""
function norm_violations(
E::Tv, g_L::Tv, g_U::Tv, x::Tv, x_L::Tv, x_U::Tv, p = Inf
) where {T, Tv <: AbstractArray{T}}
m = length(E)
n = length(x)
viol = Tv(undef, m+n)
fill!(viol, 0.0)
for i = 1:m
if E[i] > g_U[i]
viol[i] = E[i] - g_U[i]
elseif E[i] < g_L[i]
viol[i] = g_L[i] - E[i]
end
end
for j = 1:n
if x[j] > x_U[j]
viol[m+j] = x[j] - x_U[j]
elseif x[j] < x_L[j]
viol[m+j] = x_L[j] - x[j]
end
end
return norm(viol, p)
end | SqpSolver | https://github.com/exanauts/SqpSolver.jl.git |
|
[
"MIT"
] | 0.1.0 | 7f1b99029b30c0498fd715a8bde8defd2f4e1893 | code | 815 | """
compute_derivative
Compute and return directional derivative
# Arguments
- `∇f`: evaluation of the objective gradient
- `p`: search direction
- `∇fp`: objective gradient times times search direction, i.e., `∇f' * p`
- `μ`: penalty parameter
- `cons_viol`: constraint violations
"""
compute_derivative(∇fp::T, μ::T, cons_viol::T) where {T} = ∇fp - μ * cons_viol
compute_derivative(∇fp::T, μ::Tv, cons_viol::Tv) where {T, Tv<:AbstractArray{T}} = ∇fp - μ' * cons_viol
compute_derivative(∇fp::T, μ::T, cons_viol::Tv) where {T, Tv<:AbstractArray{T}} = ∇fp - μ * sum(cons_viol)
compute_derivative(∇f::Tv, p::Tv, μ::T, cons_viol::Tv) where {T, Tv<:AbstractArray{T}} = ∇f' * p - μ * sum(cons_viol)
compute_derivative(∇f::Tv, p::Tv, μ::Tv, cons_viol::Tv) where {T, Tv<:AbstractArray{T}} = ∇f' * p - μ' * cons_viol
| SqpSolver | https://github.com/exanauts/SqpSolver.jl.git |
|
[
"MIT"
] | 0.1.0 | 7f1b99029b30c0498fd715a8bde8defd2f4e1893 | code | 6206 | """
AbstractSqpOptimizer
Abstract type of SQP solvers
"""
abstract type AbstractSqpOptimizer <: AbstractOptimizer end
macro def(name, definition)
return quote
macro $(esc(name))()
esc($(Expr(:quote, definition)))
end
end
end
@def sqp_fields begin
problem::AbstractSqpModel # problem data
x::TD # primal solution
p::TD # search direction
p_soc::TD # direction after SOC
p_slack::Dict{Int,TD} # search direction at feasibility restoration phase
lambda::TD # Lagrangian dual multiplier
mult_x_L::TD # reduced cost for lower bound
mult_x_U::TD # reduced cost for upper bound
# Evaluations at `x`
f::T # objective function
df::TD # gradient
E::TD # constraint evaluation
dE::TD # Jacobian
j_row::TI # Jacobian matrix row index
j_col::TI # Jacobian matrix column index
Jacobian::AbstractMatrix{T} # Jacobian matrix
h_row::TI # Hessian matrix row index
h_col::TI # Hessian matrix column index
h_val::TD # Hessian matrix values
Hessian::Union{Nothing,AbstractMatrix{T}} # Hessian matrix
prim_infeas::T # primal infeasibility at `x`
dual_infeas::T # dual (approximate?) infeasibility
compl::T # complementary slackness
optimizer::Union{Nothing,AbstractSubOptimizer} # Subproblem optimizer
sub_status # subproblem status
options::Parameters
feasibility_restoration::Bool # indicator for feasibility restoration
iter::Int # iteration counter
ret::Int # solution status
start_time::Float64 # solution start time
start_iter_time::Float64 # iteration start time
tmpx::TD # temporary solution x
tmpE::TD # temporary constraint evaluation
end
"""
QpData
Create QP subproblem data
"""
function QpData(sqp::AbstractSqpOptimizer)
return QpData(
MOI.MIN_SENSE,
sqp.Hessian,
sqp.df,
sqp.Jacobian,
sqp.E,
sqp.problem.g_L,
sqp.problem.g_U,
sqp.problem.x_L,
sqp.problem.x_U,
sqp.problem.num_linear_constraints
)
end
"""
eval_functions!
Evalute the objective, gradient, constraints, and Jacobian.
"""
function eval_functions!(sqp::AbstractSqpOptimizer)
sqp.f = sqp.problem.eval_f(sqp.x)
sqp.problem.eval_grad_f(sqp.x, sqp.df)
sqp.problem.eval_g(sqp.x, sqp.E)
eval_Jacobian!(sqp)
# print_matrix(sqp.Jacobian)
if !isnothing(sqp.problem.eval_h)
sqp.problem.eval_h(sqp.x, sqp.h_row, sqp.h_col, 1.0, sqp.lambda, sqp.h_val)
fill!(sqp.Hessian.nzval, 0.0)
for (i, v) in enumerate(sqp.h_val)
if sqp.h_col[i] == sqp.h_row[i]
sqp.Hessian[sqp.h_row[i],sqp.h_col[i]] += v
else
sqp.Hessian[sqp.h_row[i],sqp.h_col[i]] += v
sqp.Hessian[sqp.h_col[i],sqp.h_row[i]] += v
end
end
end
end
"""
eval_Jacobian!
Evaluate Jacobian matrix.
"""
function eval_Jacobian!(sqp::AbstractSqpOptimizer)
sqp.problem.eval_jac_g(sqp.x, sqp.j_row, sqp.j_col, sqp.dE)
fill!(sqp.Jacobian.nzval, 0.0)
for (i, v) in enumerate(sqp.dE)
sqp.Jacobian[sqp.j_row[i],sqp.j_col[i]] += v
end
end
"""
norm_violations
Compute the normalized constraint violation
"""
norm_violations(sqp::AbstractSqpOptimizer, p = 1) = norm_violations(
sqp.E, sqp.problem.g_L, sqp.problem.g_U,
sqp.x, sqp.problem.x_L, sqp.problem.x_U,
p
)
function norm_violations(sqp::AbstractSqpOptimizer, x::TD, p = 1) where {T, TD<:AbstractArray{T}}
fill!(sqp.tmpE, 0.0)
sqp.problem.eval_g(x, sqp.tmpE)
return norm_violations(
sqp.tmpE, sqp.problem.g_L, sqp.problem.g_U,
x, sqp.problem.x_L, sqp.problem.x_U,
p
)
end
"""
KT_residuals
Compute Kuhn-Turck residuals
"""
KT_residuals(sqp::AbstractSqpOptimizer) = KT_residuals(sqp.df, sqp.lambda, sqp.mult_x_U, sqp.mult_x_L, sqp.Jacobian)
"""
norm_complementarity
Compute the normalized complementeraity
"""
norm_complementarity(sqp::AbstractSqpOptimizer, p = Inf) = norm_complementarity(
sqp.E, sqp.problem.g_L, sqp.problem.g_U,
sqp.x, sqp.problem.x_L, sqp.problem.x_U,
sqp.lambda, sqp.mult_x_U, sqp.mult_x_L,
p
)
"""
compute_phi
Evaluate and return the merit function value for a given point x + α * p.
# Arguments
- `sqp`: SQP structure
- `x`: the current solution point
- `α`: step size taken from `x`
- `p`: direction taken from `x`
"""
function compute_phi(sqp::AbstractSqpOptimizer, x::TD, α::T, p::TD) where {T,TD<:AbstractArray{T}}
sqp.tmpx .= x .+ α * p
f = sqp.f
sqp.tmpE .= sqp.E
if α > 0.0
f = sqp.problem.eval_f(sqp.tmpx)
sqp.problem.eval_g(sqp.tmpx, sqp.tmpE)
end
if sqp.feasibility_restoration
return norm_violations(sqp.tmpE, sqp.problem.g_L, sqp.problem.g_U, sqp.tmpx, sqp.problem.x_L, sqp.problem.x_U, 1)
else
return f + sqp.μ * norm_violations(sqp.tmpE, sqp.problem.g_L, sqp.problem.g_U, sqp.tmpx, sqp.problem.x_L, sqp.problem.x_U, 1)
end
end
"""
compute_derivative
Compute the directional derivative at current solution for a given direction.
"""
function compute_derivative(sqp::AbstractSqpOptimizer)
dfp = 0.0
cons_viol = zeros(sqp.problem.m)
if sqp.feasibility_restoration
for (_, v) in sqp.p_slack
dfp += sum(v)
end
for i = 1:sqp.problem.m
viol = maximum([0.0, sqp.E[i] - sqp.problem.g_U[i], sqp.problem.g_L[i] - sqp.E[i]])
lhs = sqp.E[i] - viol
cons_viol[i] += maximum([0.0, lhs - sqp.problem.g_U[i], sqp.problem.g_L[i] - lhs])
end
else
dfp += sqp.df' * sqp.p
for i = 1:sqp.problem.m
cons_viol[i] += maximum([
0.0,
sqp.E[i] - sqp.problem.g_U[i],
sqp.problem.g_L[i] - sqp.E[i]
])
end
end
return compute_derivative(dfp, sqp.μ, cons_viol)
end
function terminate_by_iterlimit(sqp::AbstractSqpOptimizer)
if sqp.iter > sqp.options.max_iter
sqp.ret = -1
if sqp.prim_infeas <= sqp.options.tol_infeas
sqp.ret = 6
end
return true
end
return false
end
# include("sqp_line_search.jl")
include("sqp_trust_region.jl") | SqpSolver | https://github.com/exanauts/SqpSolver.jl.git |
|
[
"MIT"
] | 0.1.0 | 7f1b99029b30c0498fd715a8bde8defd2f4e1893 | code | 12687 | """
Sequential quadratic programming with line search
"""
mutable struct SqpLS{T,Tv,Tt} <: AbstractSqpOptimizer
@sqp_fields
soc::Tv # second-order correction direction
phi::T # merit function value
μ::Tv # penalty parameters for the merit function
directional_derivative::T # directional derivative
alpha::T # stepsize
function SqpLS(problem::Model{T,Tv,Tt}) where {T,Tv<:AbstractArray{T},Tt}
sqp = new{T,Tv,Tt}()
sqp.problem = problem
sqp.x = Tv(undef, problem.n)
sqp.p = zeros(problem.n)
sqp.p_slack = Dict()
sqp.lambda = zeros(problem.m)
sqp.mult_x_L = zeros(problem.n)
sqp.mult_x_U = zeros(problem.n)
sqp.df = Tv(undef, problem.n)
sqp.E = Tv(undef, problem.m)
sqp.dE = Tv(undef, length(problem.j_str))
sqp.j_row = Vector{Int}(undef, length(problem.j_str))
sqp.j_col = Vector{Int}(undef, length(problem.j_str))
for i=1:length(problem.j_str)
sqp.j_row[i] = Int(problem.j_str[i][1])
sqp.j_col[i] = Int(problem.j_str[i][2])
end
sqp.Jacobian = sparse(sqp.j_row, sqp.j_col, ones(length(sqp.j_row)), problem.m, problem.n)
sqp.h_row = Vector{Int}(undef, length(problem.h_str))
sqp.h_col = Vector{Int}(undef, length(problem.h_str))
for i=1:length(problem.h_str)
sqp.h_row[i] = Int(problem.h_str[i][1])
sqp.h_col[i] = Int(problem.h_str[i][2])
end
sqp.h_val = Tv(undef, length(problem.h_str))
sqp.Hessian = sparse(sqp.h_row, sqp.h_col, ones(length(sqp.h_row)), problem.n, problem.n)
sqp.soc = zeros(problem.n)
sqp.phi = Inf
sqp.μ = Tv(undef, problem.m)
fill!(sqp.μ, 10.0)
sqp.alpha = 1.0
sqp.prim_infeas = Inf
sqp.dual_infeas = Inf
sqp.compl = Inf
sqp.options = problem.parameters
sqp.optimizer = nothing
sqp.feasibility_restoration = false
sqp.iter = 1
sqp.ret = -5
sqp.start_time = 0.0
sqp.start_iter_time = 0.0
return sqp
end
end
"""
run!
Run the line-search SQP algorithm
"""
function run!(sqp::SqpLS)
sqp.start_time = time()
if sqp.options.OutputFlag == 1
sparsity_val = ifelse(
sqp.problem.m > 0,
length(sqp.problem.j_str) / (sqp.problem.m * sqp.problem.n),
0.0,
)
@printf("Constraint sparsity: %e\n", sparsity_val)
add_statistics(sqp.problem, "sparsity", sparsity_val)
else
Logging.disable_logging(Logging.Info)
end
# Set initial point from MOI
@assert length(sqp.x) == length(sqp.problem.x)
sqp.x .= sqp.problem.x
# Adjust the initial point to satisfy the column bounds
for i = 1:sqp.problem.n
if sqp.problem.x_L[i] > -Inf
sqp.x[i] = max(sqp.x[i], sqp.problem.x_L[i])
end
if sqp.problem.x_U[i] > -Inf
sqp.x[i] = min(sqp.x[i], sqp.problem.x_U[i])
end
end
sqp.iter = 1
is_valid_step = true
while true
# Iteration counter limit
if sqp.iter > sqp.options.max_iter
sqp.ret = -1
if sqp.prim_infeas <= sqp.options.tol_infeas
sqp.ret = 6
end
break
end
sqp.start_iter_time = time()
# evaluate function, constraints, gradient, Jacobian
eval_functions!(sqp)
sqp.alpha = 0.0
sqp.prim_infeas = norm_violations(sqp, Inf)
sqp.dual_infeas = KT_residuals(sqp)
sqp.compl = norm_complementarity(sqp)
LP_time_start = time()
# solve QP subproblem (to initialize dual multipliers)
# sqp.p, lambda, mult_x_U, mult_x_L, sqp.p_slack, status =
sqp.p, sqp.lambda, sqp.mult_x_U, sqp.mult_x_L, sqp.p_slack, status =
sub_optimize!(sqp)
# directions for dual multipliers
# p_lambda = lambda - sqp.lambda
# p_x_U = mult_x_U - sqp.mult_x_U
# p_x_L = mult_x_L - sqp.mult_x_L
add_statistics(sqp.problem, "QP_time", time() - LP_time_start)
if status ∈ [MOI.OPTIMAL, MOI.ALMOST_LOCALLY_SOLVED, MOI.LOCALLY_SOLVED]
# do nothing
elseif status ∈ [MOI.INFEASIBLE, MOI.DUAL_INFEASIBLE, MOI.NORM_LIMIT]
if sqp.feasibility_restoration == true
@info "Failed to find a feasible direction"
if sqp.prim_infeas <= sqp.options.tol_infeas
sqp.ret = 6
else
sqp.ret = 2
end
break
else
# println("Feasibility restoration ($(status), |p| = $(norm(sqp.p, Inf))) begins.")
sqp.feasibility_restoration = true
continue
end
else
@warn("Unexpected QP subproblem solution status ($status)")
sqp.ret == -3
if sqp.prim_infeas <= sqp.options.tol_infeas
sqp.ret = 6
end
break
end
compute_mu!(sqp)
sqp.phi = compute_phi(sqp, sqp.x, 0.0, sqp.p)
sqp.directional_derivative = compute_derivative(sqp)
# step size computation
is_valid_step = compute_alpha(sqp)
print(sqp)
collect_statistics(sqp)
if norm(sqp.p, Inf) <= sqp.options.tol_direction
if sqp.feasibility_restoration
sqp.feasibility_restoration = false
sqp.iter += 1
continue
else
sqp.ret = 0
break
end
end
if sqp.prim_infeas <= sqp.options.tol_infeas && sqp.compl <= sqp.options.tol_residual
if sqp.feasibility_restoration
sqp.feasibility_restoration = false
sqp.iter += 1
continue
elseif sqp.dual_infeas <= sqp.options.tol_residual
sqp.ret = 0
break
end
end
# Failed to find a step size
if !is_valid_step
@info "Failed to find a step size"
# if sqp.feasibility_restoration
# if sqp.prim_infeas <= sqp.options.tol_infeas
# sqp.ret = 6
# else
# sqp.ret = 2
# end
# break
# else
# sqp.feasibility_restoration = true
# end
# sqp.iter += 1
# continue
## Second-order correction step
sqp.alpha = 1.0
sqp.soc, _, _, _, _, status = sub_optimize_soc!(sqp)
if status ∈ [MOI.OPTIMAL, MOI.ALMOST_LOCALLY_SOLVED, MOI.LOCALLY_SOLVED]
# TODO: Do we need a line search on this correction
f_k = sqp.problem.eval_f(sqp.x)
f_kk = sqp.problem.eval_f(sqp.x + sqp.p)
f_soc = sqp.problem.eval_f(sqp.x + sqp.p + sqp.soc)
@info "Second-order correction" f_k f_kk f_soc
else
@warn "Unexpected status ($status) from second-order correction subproblem"
end
end
# @info "solution at k " sqp.x sqp.problem.eval_f(sqp.x)
# update primal points
sqp.x += sqp.alpha .* sqp.p + sqp.soc
fill!(sqp.soc, 0.0)
# sqp.lambda += sqp.alpha .* p_lambda
# sqp.mult_x_U += sqp.alpha .* p_x_U
# sqp.mult_x_L += sqp.alpha .* p_x_L
# sqp.lambda += p_lambda
# sqp.mult_x_U += p_x_U
# sqp.mult_x_L += p_x_L
# @info "solution at k+1" sqp.x sqp.problem.eval_f(sqp.x)
sqp.iter += 1
end
sqp.problem.obj_val = sqp.problem.eval_f(sqp.x)
sqp.problem.status = Int(sqp.ret)
sqp.problem.x .= sqp.x
sqp.problem.g .= sqp.E
sqp.problem.mult_g .= sqp.lambda
sqp.problem.mult_x_U .= sqp.mult_x_U
sqp.problem.mult_x_L .= sqp.mult_x_L
add_statistic(sqp.problem, "iter", sqp.iter)
end
"""
sub_optimize!
Solve QP subproblems by using JuMP
"""
sub_optimize!(sqp::SqpLS) = sub_optimize!(sqp, JuMP.Model(sqp.options.external_optimizer), 1000.0)
"""
sub_optimize_soc!
Solve second-order correction subproblem
"""
sub_optimize_soc!(sqp::SqpLS) = sub_optimize_soc!(sqp, JuMP.Model(sqp.options.external_optimizer), 1000.0)
"""
compute_mu!
Compute the penalty parameter for the merit fucntion
"""
compute_mu!(sqp::AbstractSqpOptimizer) = compute_mu_rule2!(sqp)
function compute_mu_rule1!(sqp::AbstractSqpOptimizer)
denom = max((1-sqp.options.rho)*norm_violations(sqp, 1), 1.0e-8)
Hess_part = max(0.5 * sqp.p' * sqp.Hessian * sqp.p, 0.0)
for i = 1:sqp.problem.m
sqp.μ[i] = max(sqp.μ[i], (sqp.df' * sqp.p + Hess_part) / denom)
sqp.μ[i] = max(sqp.μ[i], abs(sqp.lambda[i]))
end
end
function compute_mu_rule2!(sqp::AbstractSqpOptimizer)
if sqp.iter == 1
denom = max((1-sqp.options.rho)*norm_violations(sqp, 1), 1.0e-8)
Hess_part = max(0.5 * sqp.p' * sqp.Hessian * sqp.p, 0.0)
for i = 1:sqp.problem.m
sqp.μ[i] = (sqp.df' * sqp.p + Hess_part) / denom
end
else
for i = 1:sqp.problem.m
sqp.μ[i] = max(sqp.μ[i], abs(sqp.lambda[i]))
end
end
end
function compute_mu_rule3!(sqp::AbstractSqpOptimizer)
for i = 1:sqp.problem.m
sqp.μ[i] = max(sqp.μ[i], abs(sqp.lambda[i]))
end
end
"""
compute_alpha
Compute step size for line search
"""
function compute_alpha(sqp::AbstractSqpOptimizer)::Bool
is_valid = true
sqp.alpha = 1.0
if norm(sqp.p, Inf) <= sqp.options.tol_direction
return true
end
phi_x_p = compute_phi(sqp, sqp.x, sqp.alpha, sqp.p)
eta = sqp.options.eta
# if phi_x_p > sqp.phi
# @info "Increasing ϕ" phi_x_p sqp.phi sqp.f sqp.problem.eval_f(sqp.x + sqp.alpha * sqp.p)
# end
# E_k = norm_violations(sqp, sqp.x)
# f_k = sqp.problem.eval_f(sqp.x)
while phi_x_p > sqp.phi + eta * sqp.alpha * sqp.directional_derivative
# The step size can become too small.
if sqp.alpha < sqp.options.min_alpha
is_valid = false
break
end
sqp.alpha *= sqp.options.tau
phi_x_p = compute_phi(sqp, sqp.x, sqp.alpha, sqp.p)
# E_k_p = norm_violations(sqp, sqp.x + sqp.alpha * sqp.p, 1)
# f_k_p = sqp.problem.eval_f(sqp.x + sqp.alpha * sqp.p)
# @info "step" sqp.alpha sqp.phi phi_x_p f_k norm(E_k, 1) f_k_p norm(E_k_p, 1)
end
# @show phi_x_p, sqp.phi, sqp.alpha, sqp.directional_derivative, is_valid
return is_valid
end
"""
print
Print iteration information.
"""
function print(sqp::SqpLS)
if sqp.options.OutputFlag == 0
return
end
if (sqp.iter - 1) % 25 == 0
@printf(" %6s", "iter")
@printf(" %15s", "f(x_k)")
@printf(" %15s", "ϕ(x_k)")
@printf(" %15s", "|μ|")
# @printf(" %15s", "D(ϕ,p)")
@printf(" %14s", "α")
@printf(" %14s", "|p|")
# @printf(" %14s", "α|p|")
@printf(" %14s", "inf_pr")
@printf(" %14s", "inf_du")
@printf(" %14s", "compl")
@printf(" %10s", "time")
@printf("\n")
end
st = ifelse(sqp.feasibility_restoration, "FR", " ")
@printf("%2s%6d", st, sqp.iter)
@printf(" %+6.8e", sqp.f)
@printf(" %+6.8e", sqp.phi)
@printf(" %+6.8e", norm(sqp.μ,Inf))
# @printf(" %+.8e", sqp.directional_derivative)
@printf(" %6.8e", sqp.alpha)
@printf(" %6.8e", norm(sqp.p, Inf))
# @printf(" %6.8e", sqp.alpha * norm(sqp.p, Inf))
@printf(" %6.8e", sqp.prim_infeas)
@printf(" %.8e", sqp.dual_infeas)
@printf(" %6.8e", sqp.compl)
@printf(" %10.2f", time() - sqp.start_time)
@printf("\n")
end
"""
collect_statistics
Collect iteration information.
"""
function collect_statistics(sqp::SqpLS)
if sqp.options.StatisticsFlag == 0
return
end
add_statistics(sqp.problem, "f(x)", sqp.f)
add_statistics(sqp.problem, "ϕ(x_k))", sqp.phi)
add_statistics(sqp.problem, "D(ϕ,p)", sqp.directional_derivative)
add_statistics(sqp.problem, "|p|", norm(sqp.p, Inf))
add_statistics(sqp.problem, "|J|2", norm(sqp.dE, 2))
add_statistics(sqp.problem, "|J|inf", norm(sqp.dE, Inf))
add_statistics(sqp.problem, "inf_pr", sqp.prim_infeas)
# add_statistics(sqp.problem, "inf_du", dual_infeas)
add_statistics(sqp.problem, "compl", sqp.compl)
add_statistics(sqp.problem, "alpha", sqp.alpha)
add_statistics(sqp.problem, "iter_time", time() - sqp.start_iter_time)
add_statistics(sqp.problem, "time_elapsed", time() - sqp.start_time)
end | SqpSolver | https://github.com/exanauts/SqpSolver.jl.git |
|
[
"MIT"
] | 0.1.0 | 7f1b99029b30c0498fd715a8bde8defd2f4e1893 | code | 20367 | """
Sequential quadratic programming with trust region
"""
abstract type AbstractSqpTrOptimizer <: AbstractSqpOptimizer end
mutable struct SqpTR{T,TD,TI} <: AbstractSqpTrOptimizer
@sqp_fields
# directions for multipliers
p_lambda::TD
p_mult_x_L::TD
p_mult_x_U::TD
E_soc::TD # constraint evaluation for SOC
soc::TD # second-order correction direction
phi::T # merit function value
μ::T # penalty parameter
Δ::T # current trust region size
Δ_min::T # minimum trust region size allowed
Δ_max::T # maximum trust region size allowed
step_acceptance::Bool
function SqpTR(problem::Model{T,TD}, TI = Vector{Int}) where {T,TD<:AbstractArray{T}}
sqp = new{T,TD,TI}()
sqp.problem = problem
sqp.x = deepcopy(problem.x)
sqp.p = zeros(T, problem.n)
sqp.p_soc = zeros(T, problem.n)
sqp.p_slack = Dict()
sqp.lambda = zeros(T, problem.m)
sqp.mult_x_L = zeros(T, problem.n)
sqp.mult_x_U = zeros(T, problem.n)
sqp.df = TD(undef, problem.n)
sqp.E = TD(undef, problem.m)
sqp.dE = TD(undef, length(problem.j_str))
# FIXME: Replace Vector{Int} with TI?
sqp.j_row = TI(undef, length(problem.j_str))
sqp.j_col = TI(undef, length(problem.j_str))
for i = 1:length(problem.j_str)
sqp.j_row[i] = Int(problem.j_str[i][1])
sqp.j_col[i] = Int(problem.j_str[i][2])
end
sqp.Jacobian =
sparse(sqp.j_row, sqp.j_col, ones(length(sqp.j_row)), problem.m, problem.n)
sqp.h_row = TI(undef, length(problem.h_str))
sqp.h_col = TI(undef, length(problem.h_str))
for i = 1:length(problem.h_str)
sqp.h_row[i] = Int(problem.h_str[i][1])
sqp.h_col[i] = Int(problem.h_str[i][2])
end
sqp.h_val = TD(undef, length(problem.h_str))
sqp.Hessian =
sparse(sqp.h_row, sqp.h_col, ones(length(sqp.h_row)), problem.n, problem.n)
sqp.p_lambda = zeros(T, problem.m)
sqp.p_mult_x_L = zeros(T, problem.n)
sqp.p_mult_x_U = zeros(T, problem.n)
sqp.E_soc = TD(undef, problem.m)
sqp.soc = zeros(T, problem.n)
sqp.phi = 1.0e+20
sqp.μ = 1.0e+4
sqp.Δ = 10.0
sqp.Δ_min = 1.0e-4
sqp.Δ_max = 1.0e+8
sqp.step_acceptance = true
sqp.prim_infeas = Inf
sqp.dual_infeas = Inf
sqp.options = problem.parameters
sqp.optimizer = nothing
sqp.sub_status = nothing
sqp.feasibility_restoration = false
sqp.iter = 1
sqp.ret = -5
sqp.start_time = 0.0
sqp.start_iter_time = 0.0
sqp.tmpx = TD(undef, problem.n)
sqp.tmpE = TD(undef, problem.m)
return sqp
end
end
"""
run!
Run the line-search SQP algorithm
"""
function run!(sqp::AbstractSqpTrOptimizer)
sqp.μ = sqp.options.init_mu
sqp.Δ = sqp.options.tr_size
sqp.start_time = time()
if sqp.options.OutputFlag == 0
Logging.disable_logging(Logging.Info)
end
print_header(sqp)
# Find the initial point feasible to linear and bound constraints
lpviol = violation_of_linear_constraints(sqp, sqp.x)
if isnan(sqp.f)
sqp.problem.status = -13
return
elseif lpviol > sqp.options.tol_infeas
@info "Initial point not feasible to linear constraints..."
sub_optimize_lp!(sqp)
print(sqp, "LP")
else
@info "Initial point feasible to linear constraints..." lpviol
end
while true
# Iteration counter limit
if terminate_by_iterlimit(sqp)
break
end
sqp.start_iter_time = time()
# evaluate function, constraints, gradient, Jacobian
if sqp.step_acceptance
eval_functions!(sqp)
sqp.prim_infeas = norm_violations(sqp)
sqp.dual_infeas = KT_residuals(sqp)
end
# solve QP subproblem
QP_time = @elapsed compute_step!(sqp)
add_statistics(sqp.problem, "QP_time", QP_time)
if sqp.sub_status ∈ [MOI.OPTIMAL, MOI.ALMOST_OPTIMAL, MOI.ALMOST_LOCALLY_SOLVED, MOI.LOCALLY_SOLVED]
# do nothing
if sqp.Δ == sqp.Δ_max && isapprox(norm(sqp.p, Inf), sqp.Δ)
@info "Problem is possibly unbounded."
sqp.ret = 4
break
end
elseif sqp.sub_status ∈ [MOI.INFEASIBLE, MOI.LOCALLY_INFEASIBLE]
if sqp.feasibility_restoration == true
@info "Failed to find a feasible direction"
if sqp.prim_infeas <= sqp.options.tol_infeas
sqp.ret = 6
else
sqp.ret = 2
end
break
else
@info "Feasibility restoration starts... (status: $(sqp.sub_status))"
# println("Feasibility restoration ($(sqp.sub_status), |p| = $(norm(sqp.p, Inf))) begins.")
sqp.feasibility_restoration = true
print(sqp)
collect_statistics(sqp)
sqp.iter += 1
continue
end
else
sqp.ret == -3
if sqp.prim_infeas <= sqp.options.tol_infeas * 10.0
@info "Found a feasible solution... (status: $(sqp.sub_status))"
sqp.ret = 6
else
@info "Unexpected status from subproblem... (status: $(sqp.sub_status))"
end
break
end
if sqp.step_acceptance
sqp.phi = compute_phi(sqp, sqp.x, 0.0, sqp.p)
end
print(sqp)
collect_statistics(sqp)
if norm(sqp.p, Inf) <= sqp.options.tol_direction
if sqp.feasibility_restoration
sqp.feasibility_restoration = false
sqp.iter += 1
continue
else
sqp.ret = 0
break
end
end
if sqp.prim_infeas <= sqp.options.tol_infeas &&
sqp.dual_infeas <= sqp.options.tol_residual &&
!isapprox(sqp.Δ, norm(sqp.p, Inf)) &&
!sqp.feasibility_restoration
sqp.ret = 0
break
end
do_step!(sqp)
# NOTE: This is based on the algorithm of filterSQP.
if sqp.feasibility_restoration && sqp.step_acceptance
sqp.feasibility_restoration = false
end
sqp.iter += 1
end
sqp.problem.obj_val = sqp.problem.eval_f(sqp.x)
sqp.problem.status = Int(sqp.ret)
sqp.problem.x .= sqp.x
sqp.problem.g .= sqp.E
sqp.problem.mult_g .= -sqp.lambda
sqp.problem.mult_x_U .= -sqp.mult_x_U
sqp.problem.mult_x_L .= sqp.mult_x_L
add_statistic(sqp.problem, "iter", sqp.iter)
end
"""
violation_of_linear_constraints
Compute the violation of linear constraints at a given point `x`
# Arguments
- `sqp`: SQP model struct
- `x`: solution to evaluate the violations
# Note
This function assumes that the first `sqp.problem.num_linear_constraints` constraints are linear.
"""
function violation_of_linear_constraints(sqp::AbstractSqpTrOptimizer, x::TD)::T where {T, TD <: AbstractVector{T}}
# evaluate constraints
sqp.f = sqp.problem.eval_f(sqp.x)
if !isnan(sqp.f)
sqp.problem.eval_g(x, sqp.E)
end
lpviol = 0.0
for i = 1:sqp.problem.num_linear_constraints
lpviol += max(0.0, sqp.problem.g_L[i] - sqp.E[i])
lpviol -= min(0.0, sqp.problem.g_U[i] - sqp.E[i])
end
for i = 1:sqp.problem.n
lpviol += max(0.0, sqp.problem.x_L[i] - x[i])
lpviol -= min(0.0, sqp.problem.x_U[i] - x[i])
end
return lpviol
end
"""
sub_optimize_lp!
Compute the initial point that is feasible to linear constraints and variable bounds.
# Arguments
- `sqp`: SQP model struct
"""
function sub_optimize_lp!(sqp::AbstractSqpTrOptimizer)
sqp.f = sqp.problem.eval_f(sqp.x)
sqp.problem.eval_grad_f(sqp.x, sqp.df)
eval_Jacobian!(sqp)
if 1 == 1
sqp.x, sqp.lambda, sqp.mult_x_U, sqp.mult_x_L, sqp.sub_status = sub_optimize_lp(
sqp.options.external_optimizer,
sqp.Jacobian, sqp.problem.g_L, sqp.problem.g_U,
sqp.problem.x_L, sqp.problem.x_U, sqp.x,
sqp.problem.num_linear_constraints, sqp.problem.m
)
else
fill!(sqp.E, 0.0)
sqp.optimizer =
SubOptimizer(
JuMP.Model(sqp.options.external_optimizer),
QpData(
MOI.MIN_SENSE,
nothing,
sqp.df,
sqp.Jacobian,
sqp.E,
sqp.problem.g_L,
sqp.problem.g_U,
sqp.problem.x_L,
sqp.problem.x_U,
sqp.problem.num_linear_constraints
)
)
create_model!(sqp.optimizer, sqp.Δ)
sqp.x, sqp.lambda, sqp.mult_x_U, sqp.mult_x_L, sqp.sub_status = sub_optimize_lp(sqp.optimizer, sqp.x)
end
# TODO: Do we need to discard small numbers?
dropzeros!(sqp.x)
dropzeros!(sqp.lambda)
dropzeros!(sqp.mult_x_U)
dropzeros!(sqp.mult_x_L)
return
end
"""
sub_optimize!
Solve trust-region QP subproblem. If in feasibility restoration phase, the feasibility restoration subproblem is solved.
# Arguments
- `sqp`: SQP model struct
"""
function sub_optimize!(sqp::AbstractSqpTrOptimizer)
if isnothing(sqp.optimizer)
sqp.optimizer = SubOptimizer(
JuMP.Model(sqp.options.external_optimizer),
QpData(sqp),
)
create_model!(sqp.optimizer, sqp.Δ)
else
sqp.optimizer.data = QpData(sqp)
end
# TODO: This can be modified to Sl1QP.
if sqp.feasibility_restoration
return sub_optimize_FR!(sqp.optimizer, sqp.x, sqp.Δ)
else
return sub_optimize!(sqp.optimizer, sqp.x, sqp.Δ)
# return sub_optimize_L1QP!(sqp.optimizer, sqp.x, sqp.Δ, sqp.μ)
end
end
"""
sub_optimize_soc!
Solve second-order correction QP subproblem.
# Arguments
- `sqp`: SQP model struct
"""
function sub_optimize_soc!(sqp::AbstractSqpTrOptimizer)
sqp.problem.eval_g(sqp.x + sqp.p, sqp.E_soc)
sqp.E_soc -= sqp.Jacobian * sqp.p
sqp.optimizer.data = QpData(
MOI.MIN_SENSE,
sqp.Hessian,
sqp.df,
sqp.Jacobian,
sqp.E_soc,
sqp.problem.g_L,
sqp.problem.g_U,
sqp.problem.x_L,
sqp.problem.x_U,
sqp.problem.num_linear_constraints
)
p, _, _, _, _, _ = sub_optimize!(sqp.optimizer, sqp.x, sqp.Δ)
sqp.p_soc .= sqp.p .+ p
return nothing
# return sub_optimize_L1QP!(sqp.optimizer, sqp.x, sqp.Δ, sqp.μ)
end
"""
compute_step!
Compute the step direction with respect to priaml and dual variables by solving QP subproblem and also updates the penalty parameter μ.
# Arguments
- `sqp`: SQP model struct
"""
function compute_step!(sqp::AbstractSqpTrOptimizer)
@info "solve QP subproblem..."
sqp.p, lambda, mult_x_U, mult_x_L, sqp.p_slack, sqp.sub_status = sub_optimize!(sqp)
sqp.p_lambda = lambda - sqp.lambda
sqp.p_mult_x_L = mult_x_L - sqp.mult_x_L
sqp.p_mult_x_U = mult_x_U - sqp.mult_x_U
sqp.μ = max(sqp.μ, norm(sqp.lambda, Inf), norm(sqp.mult_x_L, Inf), norm(sqp.mult_x_U, Inf))
@info "...found a direction"
end
"""
compute_step_Sl1QP!
Compute the step direction with respect to priaml and dual variables by solving an elastic-mode QP subproblem and also updates the penalty parameter μ.
# Arguments
- `sqp`: SQP model struct
# Note
This is not currently used.
"""
function compute_step_Sl1QP!(sqp::AbstractSqpTrOptimizer)
ϵ_1 = 0.9
ϵ_2 = 0.1
sqp.p, lambda, mult_x_U, mult_x_L, sqp.p_slack, sqp.sub_status = sub_optimize!(sqp)
if sqp.sub_status ∈ [MOI.OPTIMAL, MOI.ALMOST_LOCALLY_SOLVED, MOI.LOCALLY_SOLVED]
# compute the constraint violation
m_0 = norm_violations(sqp, 1)
m_μ = 0.0
for (_, slacks) in sqp.p_slack
m_μ += sum(slacks)
end
if m_μ > 1.0e-8
p, infeasibility = sub_optimize_infeas(sqp.optimizer, sqp.x, sqp.Δ)
# @show m_μ, infeasibility
if infeasibility < 1.0e-8
while m_μ > 1.0e-8 && sqp.μ < sqp.options.max_mu
sqp.μ = min(10.0 * sqp.μ, sqp.options.max_mu)
sqp.p, lambda, mult_x_U, mult_x_L, sqp.p_slack, sqp.sub_status = sub_optimize_L1QP!(sqp.optimizer, sqp.x, sqp.Δ, sqp.μ)
m_μ = 0.0
for (_, slacks) in sqp.p_slack, s in slacks
m_μ += s
end
@info "L1QP solve for feasible QP" infeasibility sqp.μ sqp.sub_status m_μ
end
else
m_inf = norm_violations(
sqp.E + sqp.Jacobian * p,
sqp.problem.g_L,
sqp.problem.g_U,
sqp.x + p,
sqp.problem.x_L,
sqp.problem.x_U,
1,
)
while m_0 - m_μ < ϵ_1 * (m_0 - m_inf) && sqp.μ < sqp.options.max_mu
sqp.μ = min(10.0 * sqp.μ, sqp.options.max_mu)
sqp.p, lambda, mult_x_U, mult_x_L, sqp.p_slack, sqp.sub_status = sub_optimize_L1QP!(sqp.optimizer, sqp.x, sqp.Δ, sqp.μ)
m_μ = 0.0
for (_, slacks) in sqp.p_slack, s in slacks
m_μ += s
end
@info "L1QP solve for infeasible QP" infeasibility sqp.μ m_0 m_μ
end
end
end
# q_0 = compute_qmodel(sqp, false)
# q_k = compute_qmodel(sqp, true)
# @info "L1QP solve for μ+" q_0 q_k m_0 m_μ
# while q_0 - q_k < ϵ_2 * sqp.μ * (m_0 - m_μ)
# sqp.μ = min(2.0 * sqp.μ, sqp.options.max_mu)
# sqp.p, lambda, mult_x_U, mult_x_L, sqp.p_slack, sqp.sub_status = sub_optimize_L1QP!(sqp.optimizer, sqp.x, sqp.Δ, sqp.μ)
# m_μ = 0.0
# for (_, slacks) in sqp.p_slack, s in slacks
# m_μ += s
# end
# q_k = compute_qmodel(sqp, true)
# @info "L1QP solve for μ+" q_0 q_k m_0 m_μ
# end
else
@error "Unexpected QP subproblem status $(sqp.sub_status)"
end
@info "...solved QP subproblem"
sqp.p_lambda = lambda - sqp.lambda
sqp.p_mult_x_L = mult_x_L - sqp.mult_x_L
sqp.p_mult_x_U = mult_x_U - sqp.mult_x_U
sqp.μ = max(sqp.μ, norm(sqp.lambda, Inf))
end
"""
compute_qmodel
Evaluate the quadratic model q(p) with ℓ₁ penalty term, which is given by
q(p) = fₖ + ∇fₖᵀp + 0.5 pᵀ ∇ₓₓ²Lₖ p + μ ∑ᵢ|cᵢ(xₖ) + ∇cᵢ(xₖ)ᵀp| + μ ∑ᵢ[cᵢ(xₖ) + ∇cᵢ(xₖ)ᵀp]⁻
# Arguments
- `sqp::SqpTR`: SQP model struct
- `p::TD`: direction vector
- `with_step::Bool`: `true` for q(p); `false` for `q(0)`
# Note
For p=0, the model is simplified to q(0) = μ ∑ᵢ|cᵢ(xₖ)| + μ ∑ᵢ[cᵢ(xₖ)]⁻
"""
function compute_qmodel(sqp::AbstractSqpTrOptimizer, p::TD, with_step::Bool = false) where {T, TD<:AbstractArray{T}}
qval = 0.0
if with_step
qval += sqp.df' * p + 0.5 * p' * sqp.Hessian * p
sqp.tmpx .= sqp.x .+ p
sqp.tmpE .= sqp.E .+ sqp.Jacobian * p
else
sqp.tmpx .= sqp.x
sqp.tmpE .= sqp.E
end
qval += sqp.μ * norm_violations(
sqp.tmpE,
sqp.problem.g_L,
sqp.problem.g_U,
sqp.tmpx,
sqp.problem.x_L,
sqp.problem.x_U,
1,
)
return qval
end
compute_qmodel(sqp::AbstractSqpTrOptimizer, with_step::Bool = false) = compute_qmodel(sqp, sqp.p, with_step)
"""
do_step!
Test the step `p` whether to accept or reject.
"""
function do_step!(sqp::AbstractSqpTrOptimizer)
ϕ_k = compute_phi(sqp, sqp.x, 1.0, sqp.p)
ared = sqp.phi - ϕ_k
# @show sqp.phi, ϕ_k
pred = 1.0
if !sqp.feasibility_restoration
q_0 = compute_qmodel(sqp, false)
q_k = compute_qmodel(sqp, true)
pred = q_0 - q_k
# @show q_0, q_k
end
ρ = ared / pred
if ared > 0 && ρ > 0
sqp.x .+= sqp.p
sqp.lambda .+= sqp.p_lambda
sqp.mult_x_L .+= sqp.p_mult_x_L
sqp.mult_x_U .+= sqp.p_mult_x_U
if isapprox(sqp.Δ, norm(sqp.p, Inf))
sqp.Δ = min(2 * sqp.Δ, sqp.Δ_max)
end
sqp.step_acceptance = true
else
sqp.tmpx .= sqp.x .+ sqp.p
c_k = norm_violations(sqp, sqp.tmpx)
perform_soc = false
if sqp.options.use_soc
if c_k > 0 && sqp.feasibility_restoration == false
@info "Try second-order correction..."
# sqp.p should be adjusted inside sub_optimize_soc!
sub_optimize_soc!(sqp)
ϕ_soc = compute_phi(sqp, sqp.x, 1.0, sqp.p_soc)
ared = sqp.phi - ϕ_soc
pred = 1.0
if !sqp.feasibility_restoration
q_soc = compute_qmodel(sqp, sqp.p_soc, true)
pred = q_0 - q_soc
end
ρ_soc = ared / pred
if ared > 0 && ρ_soc > 0
@info "SOC" ϕ_k ϕ_soc ared pred ρ_soc
@info "...second-order correction added"
sqp.x .+= sqp.p_soc
sqp.lambda .+= sqp.p_lambda
sqp.mult_x_L .+= sqp.p_mult_x_L
sqp.mult_x_U .+= sqp.p_mult_x_U
sqp.step_acceptance = true
perform_soc = true
else
@info "...second-order correction discarded"
end
end
end
if !perform_soc
sqp.Δ = max(0.5 * min(sqp.Δ, norm(sqp.p, Inf)), 0.1 * sqp.options.tol_direction)
sqp.step_acceptance = false
end
end
end
function print_header(sqp::AbstractSqpTrOptimizer)
if sqp.options.OutputFlag == 0
return
end
@printf(" %6s", "iter")
@printf(" ")
@printf(" %15s", "f(x_k)")
@printf(" %15s", "ϕ(x_k)")
@printf(" %15s", "μ")
@printf(" %15s", "|λ|∞")
@printf(" %14s", "Δ")
@printf(" %14s", "|p|")
@printf(" %14s", "inf_pr")
@printf(" %14s", "inf_du")
# @printf(" %14s", "compl")
@printf(" %10s", "time")
@printf("\n")
end
"""
print
Print iteration information.
"""
function print(sqp::AbstractSqpTrOptimizer, status_mark = " ")
if sqp.options.OutputFlag == 0
return
end
if sqp.iter > 1 && (sqp.iter - 1) % 25 == 0
print_header(sqp)
end
st = ifelse(sqp.feasibility_restoration, "FR", status_mark)
@printf("%2s%6d", st, sqp.iter)
@printf("%1s", ifelse(sqp.step_acceptance, "a", "r"))
objective_scale = sqp.problem.sense == :Min ? 1 : -1
@printf(" %+6.8e", sqp.f * objective_scale)
@printf(" %+6.8e", sqp.phi)
@printf(" %+6.8e", sqp.μ)
@printf(" %+6.8e", max(norm(sqp.lambda,Inf),norm(sqp.mult_x_L,Inf),norm(sqp.mult_x_U,Inf)))
@printf(" %6.8e", sqp.Δ)
@printf(" %6.8e", norm(sqp.p, Inf))
if isinf(sqp.prim_infeas)
@printf(" %14s", "Inf")
else
@printf(" %6.8e", sqp.prim_infeas)
end
if isinf(sqp.dual_infeas)
@printf(" %14s", "Inf")
else
@printf(" %6.8e", sqp.dual_infeas)
end
@printf(" %10.2f", time() - sqp.start_time)
@printf("\n")
end
"""
collect_statistics
Collect iteration information.
"""
function collect_statistics(sqp::AbstractSqpTrOptimizer)
if sqp.options.StatisticsFlag == 0
return
end
add_statistics(sqp.problem, "f(x)", sqp.f)
add_statistics(sqp.problem, "ϕ(x_k))", sqp.phi)
add_statistics(sqp.problem, "D(ϕ,p)", sqp.directional_derivative)
add_statistics(sqp.problem, "|p|", norm(sqp.p, Inf))
add_statistics(sqp.problem, "|J|2", norm(sqp.dE, 2))
add_statistics(sqp.problem, "|J|inf", norm(sqp.dE, Inf))
add_statistics(sqp.problem, "inf_pr", sqp.prim_infeas)
add_statistics(sqp.problem, "inf_du", sqp.dual_infeas)
add_statistics(sqp.problem, "alpha", sqp.alpha)
add_statistics(sqp.problem, "iter_time", time() - sqp.start_iter_time)
add_statistics(sqp.problem, "time_elapsed", time() - sqp.start_time)
end
| SqpSolver | https://github.com/exanauts/SqpSolver.jl.git |
|
[
"MIT"
] | 0.1.0 | 7f1b99029b30c0498fd715a8bde8defd2f4e1893 | code | 574 | abstract type AbstractSubOptimizer end
"""
sense 0.5 x'Qx + c'x + μ (s1 + s2)
subject to
c_lb <= Ax + b + s1 - s2 + s <= c_ub
v_lb <= x + x_k <= v_ub
-Δ <= x <= Δ
s1 + max(0,s) >= 0
s2 - min(0,s) >= 0
"""
struct QpData{T,Tv<:AbstractArray{T},Tm<:AbstractMatrix{T}}
sense::MOI.OptimizationSense
Q::Union{Nothing,Tm}
c::Tv
A::Tm
b::Tv
c_lb::Tv
c_ub::Tv
v_lb::Tv
v_ub::Tv
num_linear_constraints::Int
end
SubModel = Union{
MOI.AbstractOptimizer,
JuMP.AbstractModel,
}
include("subproblem_MOI.jl")
include("subproblem_JuMP.jl")
| SqpSolver | https://github.com/exanauts/SqpSolver.jl.git |
|
[
"MIT"
] | 0.1.0 | 7f1b99029b30c0498fd715a8bde8defd2f4e1893 | code | 17060 | mutable struct QpJuMP{T,Tv<:AbstractArray{T},Tm<:AbstractMatrix{T}} <: AbstractSubOptimizer
model::JuMP.Model
data::QpData{T,Tv,Tm}
x::Vector{JuMP.VariableRef}
constr::Vector{JuMP.ConstraintRef}
rngbdcons::Vector{Int}
rngcons::Vector{Int}
slack_vars::Dict{Int,Vector{JuMP.VariableRef}}
function QpJuMP(model::JuMP.AbstractModel, data::QpData{T,Tv,Tm}) where {T,Tv,Tm}
qp = new{T,Tv,Tm}()
qp.model = model
qp.data = data
qp.x = []
qp.constr = []
qp.rngbdcons = []
qp.rngcons = []
qp.slack_vars = Dict()
return qp
end
end
SubOptimizer(model::JuMP.AbstractModel, data::QpData{T,Tv,Tm}) where {T,Tv,Tm} =
QpJuMP(model, data)
"""
create_model!
Initialize QP subproblem in JuMP.Model. The model assumes that the first `qp.data.num_linear_constraints` constraints are linear.
The slack variables are not introduced for the linear constraints.
# Arguments
- `qp`
- `Δ`: trust-region size
"""
function create_model!(
qp::QpJuMP{T,Tv,Tm},
Δ::T,
) where {T,Tv,Tm}
qp.constr = []
qp.rngbdcons = []
qp.rngcons = []
empty!(qp.slack_vars)
n = length(qp.data.c)
m = length(qp.data.c_lb)
# create nominal variables
qp.x = @variable(
qp.model,
[i = 1:n],
base_name = "x",
)
set_trust_region!(qp, Δ)
# add slack variables only for nonlinear constraints
for i = (qp.data.num_linear_constraints+1):m
qp.slack_vars[i] = []
push!(qp.slack_vars[i], @variable(qp.model, base_name = "u[$i]", lower_bound = 0.0))
if qp.data.c_lb[i] > -Inf && qp.data.c_ub[i] < Inf
push!(qp.slack_vars[i], @variable(qp.model, base_name = "v[$i]", lower_bound = 0.0))
end
end
# dummy objective function
@objective(qp.model, Min, 0.0)
# create affine constraints
for i = 1:m
c_ub = qp.data.c_ub[i]
c_lb = qp.data.c_lb[i]
if abs(qp.data.b[i]) < Inf
c_ub -= qp.data.b[i]
c_lb -= qp.data.b[i]
end
if qp.data.c_lb[i] == qp.data.c_ub[i] #This means the constraint is equality
if i <= qp.data.num_linear_constraints
Arow = qp.data.A[i,:]
push!(
qp.constr,
@constraint(qp.model, sum(v * qp.x[Arow.nzind[j]] for (j,v) in enumerate(Arow.nzval)) == c_lb)
)
else
push!(qp.constr, @constraint(qp.model, qp.slack_vars[i][1] - qp.slack_vars[i][2] == c_lb))
end
elseif qp.data.c_lb[i] > -Inf && qp.data.c_ub[i] < Inf
if i <= qp.data.num_linear_constraints
Arow = qp.data.A[i,:]
# push!(qp.constr, @constraint(qp.model, c_lb <= sum(A[i,j] * qp.x[j] for j in A[i,:].nzind) <= c_ub))
push!(qp.constr, @constraint(qp.model, sum(v * qp.x[Arow.nzind[j]] for (j,v) in enumerate(Arow.nzval)) >= c_lb))
else
push!(qp.constr, @constraint(qp.model, qp.slack_vars[i][1] >= c_lb))
end
push!(qp.rngcons, i)
elseif qp.data.c_lb[i] > -Inf
if i <= qp.data.num_linear_constraints
Arow = qp.data.A[i,:]
push!(qp.constr, @constraint(qp.model, sum(v * qp.x[Arow.nzind[j]] for (j,v) in enumerate(Arow.nzval)) >= c_lb))
else
push!(qp.constr, @constraint(qp.model, qp.slack_vars[i][1] >= c_lb))
end
elseif qp.data.c_ub[i] < Inf
if i <= qp.data.num_linear_constraints
Arow = qp.data.A[i,:]
push!(qp.constr, @constraint(qp.model, sum(v * qp.x[Arow.nzind[j]] for (j,v) in enumerate(Arow.nzval)) <= c_ub))
else
push!(qp.constr, @constraint(qp.model, -qp.slack_vars[i][1] <= c_ub))
end
end
end
# create ranged affine constraints
for i in qp.rngcons
c_ub = qp.data.c_ub[i] - qp.data.b[i]
if i <= qp.data.num_linear_constraints
Arow = qp.data.A[i,:]
push!(qp.constr, @constraint(qp.model, sum(v * qp.x[Arow.nzind[j]] for (j,v) in enumerate(Arow.nzval)) <= c_ub))
else
push!(qp.constr, @constraint(qp.model, -qp.slack_vars[i][2] <= c_ub))
end
end
end
function sub_optimize!(
qp::QpJuMP{T,Tv,Tm},
x_k::Tv,
Δ::T,
) where {T,Tv,Tm}
# dimension of LP
m, n = size(qp.data.A)
# modify objective function
if isnothing(qp.data.Q)
@objective(qp.model, qp.data.sense,
sum(qp.data.c[i] * qp.x[i] for i = 1:n)
)
else
# obj = QuadExpr(
# sum(qp.data.c[i] * qp.x[i] for i = 1:n)
# )
# for j = 1:qp.data.Q.n, i in nzrange(qp.data.Q, j)
# add_to_expression!(
# obj,
# 0.5*qp.data.Q.nzval[i],
# qp.x[qp.data.Q.rowval[i]],
# qp.x[j],
# )
# end
# @objective(qp.model, qp.data.sense, obj)
@objective(
qp.model,
qp.data.sense,
sum(qp.data.c[i] * qp.x[i] for i = 1:n)
+ 0.5 * sum(
qp.data.Q.nzval[i] * qp.x[qp.data.Q.rowval[i]] * qp.x[j]
for j = 1:qp.data.Q.n for i in nzrange(qp.data.Q, j)
)
)
end
# fix slack variables to zeros
for (_, slacks) in qp.slack_vars, s in slacks
if JuMP.has_lower_bound(s)
JuMP.delete_lower_bound(s)
end
JuMP.fix(s, 0.0)
end
set_trust_region!(qp, x_k, Δ)
modify_constraints!(qp)
# @show x_k
# JuMP.print(qp.model)
JuMP.optimize!(qp.model)
status = termination_status(qp.model)
Xsol, lambda, mult_x_U, mult_x_L, p_slack = collect_solution!(qp, status)
return Xsol, lambda, mult_x_U, mult_x_L, p_slack, status
end
function sub_optimize_lp(
optimizer,
A::Tm, cl::Tv, cu::Tv,
xl::Tv, xu::Tv, x_k::Tv,
m::Int, num_constraints::Int
) where {T, Tv<:AbstractArray{T}, Tm<:AbstractMatrix{T}}
n = length(x_k)
model = JuMP.Model(optimizer)
@variable(model, xl[i] <= x[i=1:n] <= xu[i])
@objective(model, Min, sum((x[i] - x_k[i])^2 for i=1:n))
constr = Vector{JuMP.ConstraintRef}(undef, m)
for i = 1:m
arow = A[i,:]
if cl[i] == cu[i]
constr[i] = @constraint(model, sum(a * x[arow.nzind[j]] for (j, a) in enumerate(arow.nzval)) == cl[i])
elseif cl[i] > -Inf && cu[i] < Inf
constr[i] = @constraint(model, cl[i] <= sum(a * x[arow.nzind[j]] for (j, a) in enumerate(arow.nzval)) <= cu[i])
elseif cl[i] > -Inf
constr[i] = @constraint(model, sum(a * x[arow.nzind[j]] for (j, a) in enumerate(arow.nzval)) >= cl[i])
elseif cu[i] < Inf
constr[i] = @constraint(model, sum(a * x[arow.nzind[j]] for (j, a) in enumerate(arow.nzval)) <= cu[i])
end
end
JuMP.optimize!(model)
status = termination_status(model)
Xsol = Tv(undef, n)
lambda = zeros(T, num_constraints)
mult_x_U = zeros(T, n)
mult_x_L = zeros(T, n)
if status ∈ [MOI.OPTIMAL, MOI.ALMOST_OPTIMAL, MOI.ALMOST_LOCALLY_SOLVED, MOI.LOCALLY_SOLVED]
Xsol .= JuMP.value.(x)
# extract the multipliers to constraints
for i = 1:m
lambda[i] = JuMP.dual(constr[i])
end
# extract the multipliers to column bounds
for i = 1:n
redcost = JuMP.reduced_cost(x[i])
if redcost > 0
mult_x_L[i] = redcost
elseif redcost < 0
mult_x_U[i] = redcost
end
end
elseif status ∈ [MOI.LOCALLY_INFEASIBLE, MOI.INFEASIBLE, MOI.DUAL_INFEASIBLE, MOI.NORM_LIMIT, MOI.OBJECTIVE_LIMIT]
fill!(Xsol, 0.0)
fill!(lambda, 0.0)
fill!(mult_x_U, 0.0)
fill!(mult_x_L, 0.0)
else
@error "Unexpected status: $(status)"
end
return Xsol, lambda, mult_x_U, mult_x_L, status
end
function sub_optimize_lp(
qp::QpJuMP{T,Tv,Tm},
x_k::Tv,
) where {T,Tv,Tm}
# problem dimension
m, n = size(qp.data.A)
# modify objective function
@objective(qp.model, Min, sum((qp.x[i] - x_k[i])^2 for i=1:n))
# @objective(qp.model, Min, sum((qp.x[i])^2 for i=1:n))
# modify slack variable bounds
for (i, slacks) in qp.slack_vars, s in slacks
if JuMP.has_lower_bound(s)
JuMP.delete_lower_bound(s)
end
if i <= qp.data.num_linear_constraints
JuMP.fix(s, 0.0)
end
end
# set initial variable values
for i = 1:n
JuMP.set_start_value(qp.x[i], x_k[i])
end
set_trust_region!(qp, Inf)
modify_constraints!(qp)
JuMP.optimize!(qp.model)
status = termination_status(qp.model)
Xsol, lambda, mult_x_U, mult_x_L, p_slack = collect_solution!(qp, status)
return Xsol, lambda, mult_x_U, mult_x_L, status
end
function sub_optimize_L1QP!(
qp::QpJuMP{T,Tv,Tm},
x_k::Tv,
Δ::T,
μ::T,
) where {T,Tv,Tm}
# problem dimension
m, n = size(qp.data.A)
# modify objective function
obj_direction = ifelse(qp.data.sense == MOI.MIN_SENSE, 1.0, -1.0)
if isnothing(qp.data.Q)
@objective(qp.model, qp.data.sense,
sum(qp.data.c[i] * qp.x[i] for i = 1:n)
+ obj_direction * μ * sum(s for (_, slacks) in qp.slack_vars, s in slacks)
)
else
# μ_inv = 1.0 / μ
# @objective(
# qp.model,
# qp.data.sense,
# μ_inv * sum(qp.data.c[i] * qp.x[i] for i = 1:n)
# + μ_inv * 0.5 * sum(
# qp.data.Q.nzval[i] * qp.x[qp.data.Q.rowval[i]] * qp.x[j]
# for j = 1:qp.data.Q.n for i in nzrange(qp.data.Q, j)
# )
# + obj_direction * sum(s for (_, slacks) in qp.slack_vars for s in slacks)
# )
@objective(
qp.model,
qp.data.sense,
sum(qp.data.c[i] * qp.x[i] for i = 1:n)
+ 0.5 * sum(
qp.data.Q.nzval[i] * qp.x[qp.data.Q.rowval[i]] * qp.x[j]
for j = 1:qp.data.Q.n for i in nzrange(qp.data.Q, j)
)
+ obj_direction * μ * sum(s for (_, slacks) in qp.slack_vars for s in slacks)
)
end
# modify slack variable bounds
for (_, slacks) in qp.slack_vars, s in slacks
if JuMP.is_fixed(s)
JuMP.unfix(s)
end
set_lower_bound(s, 0.0)
end
set_trust_region!(qp, x_k, Δ)
modify_constraints!(qp)
# JuMP.print(qp.model)
JuMP.optimize!(qp.model)
status = termination_status(qp.model)
Xsol, lambda, mult_x_U, mult_x_L, p_slack = collect_solution!(qp, status)
# for i in 1:n
# v_lb = qp.data.v_lb[i] - x_k[i]
# v_ub = qp.data.v_ub[i] - x_k[i]
# @show i, Xsol[i], v_lb, v_ub, mult_x_L[i], mult_x_U[i]
# end
return Xsol, lambda, mult_x_U, mult_x_L, p_slack, status
end
"""
Solve QP subproblem for feasibility restoration
"""
function sub_optimize_FR!(
qp::QpJuMP{T,Tv,Tm},
x_k::Tv,
Δ::T,
) where {T,Tv,Tm}
# dimension of LP
m, n = size(qp.data.A)
# modify objective function
@objective(qp.model, Min, sum(s for (_, slacks) in qp.slack_vars, s in slacks))
# modify slack variable bounds
for (i, slacks) in qp.slack_vars
if qp.data.b[i] >= qp.data.c_lb[i] && qp.data.b[i] <= qp.data.c_ub[i]
for s in slacks
if JuMP.is_fixed(s) == false
JuMP.fix(s, 0.0, force = true)
end
end
else
for s in slacks
if JuMP.is_fixed(s)
JuMP.unfix(s)
end
set_lower_bound(s, 0.0)
end
end
end
set_trust_region!(qp, x_k, Δ)
modify_constraints!(qp)
# JuMP.write_to_file(qp.model, "debug_jump.lp", format = MOI.FileFormats.FORMAT_LP)
# @show x_k
# JuMP.print(qp.model)
JuMP.optimize!(qp.model)
status = termination_status(qp.model)
Xsol, lambda, mult_x_U, mult_x_L, p_slack = collect_solution!(qp, status)
return Xsol, lambda, mult_x_U, mult_x_L, p_slack, status
end
"""
Compute the infeasibility of the linearized model
"""
function sub_optimize_infeas(
qp::QpJuMP{T,Tv,Tm},
x_k::Tv,
Δ::T,
) where {T,Tv,Tm}
# modify objective function
@objective(qp.model, Min, sum(s for (_, slacks) in qp.slack_vars, s in slacks))
# modify slack variable bounds
for (_, slacks) in qp.slack_vars, s in slacks
if JuMP.is_fixed(s)
JuMP.unfix(s)
end
set_lower_bound(s, 0.0)
end
set_trust_region!(qp, x_k, Δ)
modify_constraints!(qp)
JuMP.optimize!(qp.model)
status = termination_status(qp.model)
Xsol = Tv(undef, length(qp.x))
infeasibility = Inf
if status ∈ [MOI.OPTIMAL, MOI.ALMOST_LOCALLY_SOLVED, MOI.LOCALLY_SOLVED]
Xsol .= JuMP.value.(qp.x)
infeasibility = JuMP.objective_value(qp.model)
end
return Xsol, infeasibility
end
function set_trust_region!(
x::Vector{JuMP.VariableRef},
v_lb::Tv,
v_ub::Tv,
Δ::T
) where {T,Tv}
for i in eachindex(x)
lb = max(-Δ, v_lb[i])
ub = min(+Δ, v_ub[i])
if lb > ub
lb = max(-Δ, min(0.0, v_lb[i]))
ub = min(+Δ, max(0.0, v_ub[i]))
end
set_lower_bound(x[i], lb)
set_upper_bound(x[i], ub)
end
end
function set_trust_region!(
qp::QpJuMP{T,Tv,Tm},
x_k::Tv,
Δ::T
) where {T,Tv,Tm}
return set_trust_region!(qp.x, qp.data.v_lb - x_k, qp.data.v_ub - x_k, Δ)
end
function set_trust_region!(
qp::QpJuMP{T,Tv,Tm},
Δ::T
) where {T,Tv,Tm}
return set_trust_region!(qp.x, qp.data.v_lb, qp.data.v_ub, Δ)
end
function modify_constraints!(qp::QpJuMP{T,Tv,Tm}) where {T,Tv,Tm}
# problem dimension
m, n = size(qp.data.A)
# modify the nonlinear constraint coefficients
for j = 1:qp.data.A.n, i in nzrange(qp.data.A, j)
if qp.data.A.rowval[i] > qp.data.num_linear_constraints
set_normalized_coefficient(
qp.constr[qp.data.A.rowval[i]],
qp.x[j],
qp.data.A.nzval[i],
)
end
end
# modify the coefficients for the other part of ranged constraints
for (ind, val) in enumerate(qp.rngcons)
if val > qp.data.num_linear_constraints
row_of_A = qp.data.A[val, :]
for (i,j) = enumerate(row_of_A.nzind)
set_normalized_coefficient(qp.constr[m+ind], qp.x[j], row_of_A.nzval[i])
end
end
end
# modify RHS
for i in 1:m
c_ub = qp.data.c_ub[i] - qp.data.b[i]
c_lb = qp.data.c_lb[i] - qp.data.b[i]
if qp.data.c_lb[i] == qp.data.c_ub[i]
set_normalized_rhs(qp.constr[i], c_lb)
elseif qp.data.c_lb[i] > -Inf && qp.data.c_ub[i] < Inf
set_normalized_rhs(qp.constr[i], c_lb)
elseif qp.data.c_lb[i] > -Inf
set_normalized_rhs(qp.constr[i], c_lb)
elseif qp.data.c_ub[i] < Inf
set_normalized_rhs(qp.constr[i], c_ub)
end
end
# modify the RHS for the other part of ranged constraints
for (i, val) in enumerate(qp.rngcons)
c_ub = qp.data.c_ub[val] - qp.data.b[val]
set_normalized_rhs(qp.constr[i+m], c_ub)
end
end
function collect_solution!(qp::QpJuMP{T,Tv,Tm}, status) where {T,Tv,Tm}
# problem dimension
m, n = size(qp.data.A)
Xsol = Tv(undef, n)
p_slack = Dict{Int,Vector{Float64}}()
lambda = Tv(undef, m)
mult_x_U = zeros(T, n)
mult_x_L = zeros(T, n)
if status ∈ [MOI.OPTIMAL, MOI.ALMOST_OPTIMAL, MOI.ALMOST_LOCALLY_SOLVED, MOI.LOCALLY_SOLVED]
Xsol .= JuMP.value.(qp.x)
for (i, slacks) in qp.slack_vars
p_slack[i] = JuMP.value.(slacks)
end
# @show JuMP.objective_value(qp.model), Xsol
# @show p_slack
# extract the multipliers to constraints
for i = 1:m
lambda[i] = JuMP.dual(qp.constr[i])
end
for (i, val) in enumerate(qp.rngcons)
lambda[val] += JuMP.dual(qp.constr[i+m])
end
# @show MOI.get(qp.model, MOI.ConstraintDual(1), qp.constr)
# extract the multipliers to column bounds
for i = 1:n
redcost = JuMP.reduced_cost(qp.x[i])
if redcost > 0
mult_x_L[i] = redcost
elseif redcost < 0
mult_x_U[i] = redcost
end
end
elseif status ∈ [MOI.LOCALLY_INFEASIBLE, MOI.INFEASIBLE, MOI.DUAL_INFEASIBLE, MOI.NORM_LIMIT, MOI.OBJECTIVE_LIMIT]
fill!(Xsol, 0.0)
fill!(lambda, 0.0)
fill!(mult_x_U, 0.0)
fill!(mult_x_L, 0.0)
elseif status == MOI.ITERATION_LIMIT
@warn "Solution status: $(status)"
else
@warn "Unexpected status: $(status)"
end
return Xsol, lambda, mult_x_U, mult_x_L, p_slack
end
| SqpSolver | https://github.com/exanauts/SqpSolver.jl.git |
|
[
"MIT"
] | 0.1.0 | 7f1b99029b30c0498fd715a8bde8defd2f4e1893 | code | 19658 | mutable struct QpModel{T,Tv<:AbstractArray{T},Tm<:AbstractMatrix{T}} <: AbstractSubOptimizer
model::MOI.AbstractOptimizer
data::QpData{T,Tv,Tm}
adj::Vector{Int}
x::Vector{MOI.VariableIndex}
constr_v_ub::Vector{MOI.ConstraintIndex}
constr_v_lb::Vector{MOI.ConstraintIndex}
constr::Vector{MOI.ConstraintIndex}
slack_vars::Dict{Int,Vector{MOI.VariableIndex}}
constr_slack::Vector{MOI.ConstraintIndex}
function QpModel(
model::MOI.AbstractOptimizer,
data::QpData{T,Tv,Tm},
) where {T,Tv,Tm}
qp = new{T,Tv,Tm}()
qp.model = model
qp.data = data
qp.adj = []
qp.x = []
qp.constr_v_ub = []
qp.constr_v_lb = []
qp.constr = []
qp.constr_slack = []
qp.slack_vars = Dict()
return qp
end
end
SubOptimizer(model::MOI.AbstractOptimizer, data::QpData{T,Tv,Tm}) where {T,Tv,Tm} = QpModel(model, data)
function create_model!(qp::QpModel{T,Tv,Tm}, x_k::Tv, Δ::T, tol_error = 0.0) where {T,Tv,Tm}
# empty optimizer just in case
MOI.empty!(qp.model)
qp.adj = []
qp.constr_v_ub = []
qp.constr_v_lb = []
qp.constr = []
qp.constr_slack = []
empty!(qp.slack_vars)
n = length(qp.data.c)
m = length(qp.data.c_lb)
@assert n > 0
@assert m >= 0
@assert length(qp.data.c) == n
@assert length(qp.data.c_lb) == m
@assert length(qp.data.c_ub) == m
@assert length(qp.data.v_lb) == n
@assert length(qp.data.v_ub) == n
@assert length(x_k) == n
# variables
qp.x = MOI.add_variables(qp.model, n)
# objective function
obj_terms = Array{MOI.ScalarAffineTerm{T},1}()
for i = 1:n
push!(obj_terms, MOI.ScalarAffineTerm{T}(qp.data.c[i], MOI.VariableIndex(i)))
end
for i = 1:m
# add slack variables
qp.slack_vars[i] = []
push!(qp.slack_vars[i], MOI.add_variable(qp.model))
if qp.data.c_lb[i] > -Inf && qp.data.c_ub[i] < Inf
push!(qp.slack_vars[i], MOI.add_variable(qp.model))
end
# Set slack bounds and objective coefficient
push!(
qp.constr_slack,
MOI.add_constraint(
qp.model,
MOI.VariableIndex(qp.slack_vars[i][1]),
MOI.GreaterThan(0.0),
),
)
push!(obj_terms, MOI.ScalarAffineTerm{T}(1.0, qp.slack_vars[i][1]))
if length(qp.slack_vars[i]) == 2
push!(
qp.constr_slack,
MOI.add_constraint(
qp.model,
MOI.VariableIndex(qp.slack_vars[i][2]),
MOI.GreaterThan(0.0),
),
)
push!(obj_terms, MOI.ScalarAffineTerm{T}(1.0, qp.slack_vars[i][2]))
end
end
# set objective function
if isnothing(qp.data.Q)
MOI.set(
qp.model,
MOI.ObjectiveFunction{MOI.ScalarAffineFunction{T}}(),
MOI.ScalarAffineFunction(obj_terms, 0.0),
)
else
Q_terms = Array{MOI.ScalarQuadraticTerm{T},1}()
for j = 1:qp.data.Q.n, i in nzrange(qp.data.Q, j)
if i >= j
push!(
Q_terms,
MOI.ScalarQuadraticTerm{T}(
qp.data.Q.nzval[i],
MOI.VariableIndex(qp.data.Q.rowval[i]),
MOI.VariableIndex(j)
)
)
end
end
MOI.set(qp.model,
MOI.ObjectiveFunction{MOI.ScalarQuadraticFunction{T}}(),
MOI.ScalarQuadraticFunction(obj_terms, Q_terms, 0.0))
end
MOI.set(qp.model, MOI.ObjectiveSense(), qp.data.sense)
for i = 1:n
ub = min(Δ, qp.data.v_ub[i] - x_k[i])
lb = max(-Δ, qp.data.v_lb[i] - x_k[i])
ub = (abs(ub) <= tol_error) ? 0.0 : ub
lb = (abs(lb) <= tol_error) ? 0.0 : lb
push!(
qp.constr_v_ub,
MOI.add_constraint(qp.model, MOI.VariableIndex(qp.x[i]), MOI.LessThan(ub)),
)
push!(
qp.constr_v_lb,
MOI.add_constraint(qp.model, MOI.VariableIndex(qp.x[i]), MOI.GreaterThan(lb)),
)
end
for i = 1:m
c_ub = qp.data.c_ub[i] - qp.data.b[i]
c_lb = qp.data.c_lb[i] - qp.data.b[i]
c_ub = (abs(c_ub) <= tol_error) ? 0.0 : c_ub
c_lb = (abs(c_lb) <= tol_error) ? 0.0 : c_lb
if qp.data.c_lb[i] == qp.data.c_ub[i] #This means the constraint is equality
push!(
qp.constr,
MOI.add_constraint(
qp.model,
MOI.ScalarAffineFunction(
MOI.ScalarAffineTerm.(
[1.0; -1.0],
[qp.slack_vars[i][1]; qp.slack_vars[i][2]],
),
0.0,
),
MOI.EqualTo(c_lb),
),
)
elseif qp.data.c_lb[i] != -Inf &&
qp.data.c_ub[i] != Inf &&
qp.data.c_lb[i] < qp.data.c_ub[i]
push!(
qp.constr,
MOI.add_constraint(
qp.model,
MOI.ScalarAffineFunction(
MOI.ScalarAffineTerm.([1.0], [qp.slack_vars[i][1]]),
0.0,
),
MOI.GreaterThan(c_lb),
),
)
push!(qp.adj, i)
elseif qp.data.c_lb[i] != -Inf
push!(
qp.constr,
MOI.add_constraint(
qp.model,
MOI.ScalarAffineFunction(
MOI.ScalarAffineTerm.([1.0], [qp.slack_vars[i][1]]),
0.0,
),
MOI.GreaterThan(c_lb),
),
)
elseif qp.data.c_ub[i] != Inf
push!(
qp.constr,
MOI.add_constraint(
qp.model,
MOI.ScalarAffineFunction(
MOI.ScalarAffineTerm.([-1.0], [qp.slack_vars[i][1]]),
0.0,
),
MOI.LessThan(c_ub),
),
)
end
end
for i in qp.adj
c_ub = qp.data.c_ub[i] - qp.data.b[i]
c_ub = (abs(c_ub) <= tol_error) ? 0.0 : c_ub
push!(
qp.constr,
MOI.add_constraint(
qp.model,
MOI.ScalarAffineFunction(
MOI.ScalarAffineTerm.([-1.0], [qp.slack_vars[i][2]]),
0.0,
),
MOI.LessThan(c_ub),
),
)
end
end
"""
sub_optimize!
Solve subproblem
# Arguments
- `qp`: QP model
- `x_k`: trust region center
- `Δ`: trust region size
- `feasibility`: indicator for feasibility restoration phase
- `tol_error`: threshold to drop small numbers to zeros
"""
function sub_optimize!(
qp::QpModel{T,Tv,Tm},
x_k::Tv,
Δ::T,
feasibility = false,
tol_error = 0.0,
) where {T,Tv,Tm}
# dimension of LP
m, n = size(qp.data.A)
@assert n > 0
@assert m >= 0
@assert length(qp.data.c) == n
@assert length(qp.data.c_lb) == m
@assert length(qp.data.c_ub) == m
@assert length(qp.data.v_lb) == n
@assert length(qp.data.v_ub) == n
@assert length(x_k) == n
b = deepcopy(qp.data.b)
if feasibility
if isnothing(qp.data.Q)
# modify objective coefficient
for i = 1:n
MOI.modify(
qp.model,
MOI.ObjectiveFunction{MOI.ScalarAffineFunction{T}}(),
MOI.ScalarCoefficientChange(MOI.VariableIndex(i), 0.0),
)
end
# modify slack objective coefficient
for (_, slacks) in qp.slack_vars, s in slacks
MOI.modify(
qp.model,
MOI.ObjectiveFunction{MOI.ScalarAffineFunction{T}}(),
MOI.ScalarCoefficientChange(s, 1.0),
)
end
else
# Set new QP objective function again
obj_terms = Array{MOI.ScalarAffineTerm{T},1}()
for (_, slacks) in qp.slack_vars, s in slacks
push!(obj_terms, MOI.ScalarAffineTerm{T}(1.0, s))
end
MOI.set(
qp.model,
MOI.ObjectiveFunction{MOI.ScalarQuadraticFunction{T}}(),
MOI.ScalarQuadraticFunction(
obj_terms,
Array{MOI.ScalarQuadraticTerm{T},1}(),
0.0
)
)
end
# set optimization sense
MOI.set(qp.model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
do_transform = false
for cons in qp.constr_slack
if typeof(cons) == MOI.ConstraintIndex{MOI.VariableIndex,MOI.EqualTo{T}}
do_transform = true
break
end
end
# set slack variable bounds
constr_index = 1
for i = 1:m
# Adjust parameters for feasibility problem
viol = 0.0
if qp.data.b[i] > qp.data.c_ub[i]
viol = qp.data.c_ub[i] - qp.data.b[i]
elseif qp.data.b[i] < qp.data.c_lb[i]
viol = qp.data.c_lb[i] - qp.data.b[i]
end
b[i] -= abs(viol)
# Add bound constraints
if length(qp.slack_vars[i]) == 2
if viol < 0
if do_transform
qp.constr_slack[constr_index] = MOI.transform(
qp.model,
qp.constr_slack[constr_index],
MOI.GreaterThan(0.0),
)
else
MOI.set(
qp.model,
MOI.ConstraintSet(),
qp.constr_slack[constr_index],
MOI.GreaterThan(0.0),
)
end
constr_index += 1
if do_transform
qp.constr_slack[constr_index] = MOI.transform(
qp.model,
qp.constr_slack[constr_index],
MOI.GreaterThan(viol),
)
else
MOI.set(
qp.model,
MOI.ConstraintSet(),
qp.constr_slack[constr_index],
MOI.GreaterThan(viol),
)
end
constr_index += 1
else
if do_transform
qp.constr_slack[constr_index] = MOI.transform(
qp.model,
qp.constr_slack[constr_index],
MOI.GreaterThan(-viol),
)
else
MOI.set(
qp.model,
MOI.ConstraintSet(),
qp.constr_slack[constr_index],
MOI.GreaterThan(-viol),
)
end
constr_index += 1
if do_transform
qp.constr_slack[constr_index] = MOI.transform(
qp.model,
qp.constr_slack[constr_index],
MOI.GreaterThan(0.0),
)
else
MOI.set(
qp.model,
MOI.ConstraintSet(),
qp.constr_slack[constr_index],
MOI.GreaterThan(0.0),
)
end
constr_index += 1
end
elseif length(qp.slack_vars[i]) == 1
if do_transform
qp.constr_slack[constr_index] = MOI.transform(
qp.model,
qp.constr_slack[constr_index],
MOI.GreaterThan(-abs(viol)),
)
else
MOI.set(
qp.model,
MOI.ConstraintSet(),
qp.constr_slack[constr_index],
MOI.GreaterThan(-abs(viol)),
)
end
# @show i, viol, length(qp.slack_vars[i]), qp.constr_slack[constr_index]
constr_index += 1
else
@error "unexpected slack_vars"
end
end
else
if isnothing(qp.data.Q)
# modify objective coefficient
for i = 1:n
MOI.modify(
qp.model,
MOI.ObjectiveFunction{MOI.ScalarAffineFunction{T}}(),
MOI.ScalarCoefficientChange(MOI.VariableIndex(i), qp.data.c[i]),
)
end
# set slack objective coefficient
for (_, slacks) in qp.slack_vars, s in slacks
MOI.modify(
qp.model,
MOI.ObjectiveFunction{MOI.ScalarAffineFunction{T}}(),
MOI.ScalarCoefficientChange(s, 0.0),
)
end
else
# Set new QP objective function again
obj_terms = Array{MOI.ScalarAffineTerm{T},1}()
for i = 1:n
push!(obj_terms, MOI.ScalarAffineTerm{T}(qp.data.c[i], MOI.VariableIndex(i)))
end
for (_, slacks) in qp.slack_vars, s in slacks
push!(obj_terms, MOI.ScalarAffineTerm{T}(0.0, s))
end
Q_terms = Array{MOI.ScalarQuadraticTerm{T},1}()
for j = 1:qp.data.Q.n, i in nzrange(qp.data.Q, j)
if i >= j
push!(
Q_terms,
MOI.ScalarQuadraticTerm{T}(
qp.data.Q.nzval[i],
MOI.VariableIndex(qp.data.Q.rowval[i]),
MOI.VariableIndex(j)
)
)
end
end
MOI.set(
qp.model,
MOI.ObjectiveFunction{MOI.ScalarQuadraticFunction{T}}(),
MOI.ScalarQuadraticFunction(obj_terms, Q_terms, 0.0)
)
end
# set optimization sense
MOI.set(qp.model, MOI.ObjectiveSense(), qp.data.sense)
# set slack variable bounds
do_transform = false
for cons in qp.constr_slack
if typeof(cons) != MOI.ConstraintIndex{MOI.VariableIndex,MOI.EqualTo{T}}
do_transform = true
break
end
end
if do_transform
for i in eachindex(qp.constr_slack)
qp.constr_slack[i] =
MOI.transform(qp.model, qp.constr_slack[i], MOI.EqualTo(0.0))
end
end
end
# set variable bounds
for i = 1:n
ub = min(Δ, qp.data.v_ub[i] - x_k[i])
lb = max(-Δ, qp.data.v_lb[i] - x_k[i])
ub = (abs(ub) <= tol_error) ? 0.0 : ub
lb = (abs(lb) <= tol_error) ? 0.0 : lb
MOI.set(qp.model, MOI.ConstraintSet(), qp.constr_v_ub[i], MOI.LessThan(ub))
MOI.set(qp.model, MOI.ConstraintSet(), qp.constr_v_lb[i], MOI.GreaterThan(lb))
end
# @show Δ, qp.data.v_lb, qp.data.v_ub, x_k
# modify the constraint coefficients
for j = 1:qp.data.A.n, i in nzrange(qp.data.A, j)
coeff = abs(qp.data.A.nzval[i]) <= tol_error ? 0.0 : qp.data.A.nzval[i]
MOI.modify(
qp.model,
qp.constr[qp.data.A.rowval[i]],
MOI.ScalarCoefficientChange(MOI.VariableIndex(j), coeff),
)
end
for (ind, val) in enumerate(qp.adj)
row_of_A = qp.data.A[val, :]
for i = 1:row_of_A.n
j = row_of_A.nzind[i]
coeff = abs(row_of_A.nzval[i]) <= tol_error ? 0.0 : row_of_A.nzval[i]
MOI.modify(
qp.model,
qp.constr[m+ind],
MOI.ScalarCoefficientChange(MOI.VariableIndex(j), coeff),
)
end
end
# modify RHS
for i = 1:m
c_ub = qp.data.c_ub[i] - b[i]
c_lb = qp.data.c_lb[i] - b[i]
c_ub = (abs(c_ub) <= tol_error) ? 0.0 : c_ub
c_lb = (abs(c_lb) <= tol_error) ? 0.0 : c_lb
if qp.data.c_lb[i] == qp.data.c_ub[i]
MOI.set(qp.model, MOI.ConstraintSet(), qp.constr[i], MOI.EqualTo(c_lb))
elseif qp.data.c_lb[i] != -Inf &&
qp.data.c_ub[i] != Inf &&
qp.data.c_lb[i] < qp.data.c_ub[i]
MOI.set(qp.model, MOI.ConstraintSet(), qp.constr[i], MOI.GreaterThan(c_lb))
elseif qp.data.c_lb[i] != -Inf
MOI.set(qp.model, MOI.ConstraintSet(), qp.constr[i], MOI.GreaterThan(c_lb))
elseif qp.data.c_ub[i] != Inf
MOI.set(qp.model, MOI.ConstraintSet(), qp.constr[i], MOI.LessThan(c_ub))
end
end
@show qp.data.c_lb-b, qp.data.c_ub-b, b
for (i, val) in enumerate(qp.adj)
c_ub = qp.data.c_ub[val] - b[val]
c_ub = (abs(c_ub) <= tol_error) ? 0.0 : c_ub
MOI.set(qp.model, MOI.ConstraintSet(), qp.constr[i+m], MOI.LessThan(c_ub))
end
# dest = MOI.FileFormats.Model(format = MOI.FileFormats.FORMAT_LP)
# MOI.copy_to(dest, qp.model)
# MOI.write_to_file(dest, "debug_moi.lp")
MOI.optimize!(qp.model)
status = MOI.get(qp.model, MOI.TerminationStatus())
# TODO: These can be part of data.
Xsol = Tv(undef, n)
p_slack = Dict{Int,Vector{Float64}}()
lambda = Tv(undef, m)
mult_x_U = Tv(undef, n)
mult_x_L = Tv(undef, n)
if status == MOI.OPTIMAL
# @show MOI.get(qp.model, MOI.ObjectiveValue())
Xsol .= MOI.get(qp.model, MOI.VariablePrimal(), qp.x)
for (i, slacks) in qp.slack_vars
p_slack[i] = MOI.get(qp.model, MOI.VariablePrimal(), slacks)
end
@show MOI.get(qp.model, MOI.ObjectiveValue()), Xsol
# @show p_slack
# extract the multipliers to constraints
for i = 1:m
lambda[i] = MOI.get(qp.model, MOI.ConstraintDual(1), qp.constr[i])
end
for (i, val) in enumerate(qp.adj)
lambda[val] += MOI.get(qp.model, MOI.ConstraintDual(1), qp.constr[i+m])
end
# @show MOI.get(qp.model, MOI.ConstraintDual(1), qp.constr)
# extract the multipliers to column bounds
mult_x_U .= MOI.get(qp.model, MOI.ConstraintDual(1), qp.constr_v_ub)
mult_x_L .= MOI.get(qp.model, MOI.ConstraintDual(1), qp.constr_v_lb)
# careful because of the trust region
for j = 1:n
if Xsol[j] < qp.data.v_ub[j] - x_k[j]
mult_x_U[j] = 0.0
end
if Xsol[j] > qp.data.v_lb[j] - x_k[j]
mult_x_L[j] = 0.0
end
end
elseif status == MOI.DUAL_INFEASIBLE
@error "Trust region must be employed."
elseif status == MOI.INFEASIBLE
fill!(Xsol, 0.0)
fill!(lambda, 0.0)
fill!(mult_x_U, 0.0)
fill!(mult_x_L, 0.0)
else
@error "Unexpected status: $(status)"
end
return Xsol, lambda, mult_x_U, mult_x_L, p_slack, status
end
| SqpSolver | https://github.com/exanauts/SqpSolver.jl.git |
|
[
"MIT"
] | 0.1.0 | 7f1b99029b30c0498fd715a8bde8defd2f4e1893 | code | 1797 | module TestMOIWrapper
using SqpSolver
using Ipopt
using JuMP
using Test
const MOI = SqpSolver.MathOptInterface
const MOIT = MOI.Test
const MOIU = MOI.Utilities
const MOIB = MOI.Bridges
const optimizer = SqpSolver.Optimizer()
const ipopt_optimizer = optimizer_with_attributes(
Ipopt.Optimizer,
"print_level" => 0,
"mu_strategy" => "adaptive",
"warm_start_init_point" => "yes",
)
MOI.set(optimizer, MOI.RawOptimizerAttribute("external_optimizer"), ipopt_optimizer)
MOI.set(optimizer, MOI.RawOptimizerAttribute("max_iter"), 1000)
MOI.set(optimizer, MOI.RawOptimizerAttribute("OutputFlag"), 1)
function runtests()
for name in names(@__MODULE__; all = true)
if startswith("$(name)", "test_")
@testset "$(name)" begin
getfield(@__MODULE__, name)()
end
end
end
return
end
function test_MOI_Test()
model = MOI.Utilities.CachingOptimizer(
MOI.Utilities.UniversalFallback(MOI.Utilities.Model{Float64}()),
MOI.Bridges.full_bridge_optimizer(optimizer, Float64),
)
MOI.set(model, MOI.Silent(), true)
MOI.Test.runtests(
model,
MOI.Test.Config(
atol = 1e-4,
rtol = 1e-4,
infeasible_status = MOI.LOCALLY_INFEASIBLE,
optimal_status = MOI.LOCALLY_SOLVED,
exclude = Any[
MOI.ConstraintDual,
MOI.ConstraintBasisStatus,
MOI.DualObjectiveValue,
MOI.ObjectiveBound,
],
);
exclude = String[
# Tests purposefully excluded:
# - Convex after reformulation; but we cannot find a global optimum.
"test_quadratic_SecondOrderCone_basic",
],
)
return
end
end
TestMOIWrapper.runtests()
| SqpSolver | https://github.com/exanauts/SqpSolver.jl.git |
|
[
"MIT"
] | 0.1.0 | 7f1b99029b30c0498fd715a8bde8defd2f4e1893 | code | 635 |
qp_solver = optimizer_with_attributes(
Ipopt.Optimizer,
"print_level" => 0,
"warm_start_init_point" => "yes",
)
optimizer_solver = optimizer_with_attributes(
SqpSolver.Optimizer,
"external_optimizer" => qp_solver,
"algorithm" => "SQP-TR",
"OutputFlag" => 0,
)
model = Model(optimizer_solver)
@variable(model, X);
@variable(model, Y);
@objective(model, Min, X^2 + X);
@NLconstraint(model, X^2 - X == 2);
@NLconstraint(model, X * Y == 1);
@NLconstraint(model, X * Y >= 0);
@constraint(model, X >= -2);
JuMP.optimize!(model);
xsol = JuMP.value.(X)
ysol = JuMP.value.(Y)
status = termination_status(model)
| SqpSolver | https://github.com/exanauts/SqpSolver.jl.git |
|
[
"MIT"
] | 0.1.0 | 7f1b99029b30c0498fd715a8bde8defd2f4e1893 | code | 654 | using PowerModels
PowerModels.silence()
build_acp(data_file::String) = instantiate_model(
PowerModels.parse_file(data_file),
ACPPowerModel,
PowerModels.build_opf
)
function run_sqp_opf(data_file::String, max_iter::Int = 100)
pm = build_acp(data_file)
qp_solver = optimizer_with_attributes(
Ipopt.Optimizer,
"print_level" => 0,
"warm_start_init_point" => "yes",
)
result = optimize_model!(pm, optimizer = optimizer_with_attributes(
SqpSolver.Optimizer,
"algorithm" => "SQP-TR",
"external_optimizer" => qp_solver,
"max_iter" => max_iter,
))
return result
end | SqpSolver | https://github.com/exanauts/SqpSolver.jl.git |
|
[
"MIT"
] | 0.1.0 | 7f1b99029b30c0498fd715a8bde8defd2f4e1893 | code | 370 | using SqpSolver
using JuMP, MathOptInterface
using Ipopt
using Test
@testset "MathOptInterface" begin
include("MOI_wrapper.jl")
end
@testset "External Solver Attributes Implementation with Toy Example" begin
include("ext_solver.jl")
@test isapprox(xsol, -1.0, rtol=1e-4)
@test isapprox(ysol, -1.0, rtol=1e-4)
@test status == MOI.LOCALLY_SOLVED
end
| SqpSolver | https://github.com/exanauts/SqpSolver.jl.git |
|
[
"MIT"
] | 0.1.0 | 7f1b99029b30c0498fd715a8bde8defd2f4e1893 | docs | 1201 | # SqpSolver.jl

[](https://codecov.io/gh/exanauts/SqpSolver.jl)
This is a Julia package that implements sequantial quadratic programming algorithms for continuous nonlinear optimization.
## Installation
```julia
]add SqpSolver
```
## Example
Consider the following quadratic optimization problem
```
min x^2 + x
s.t. x^2 - x = 2
```
This problem can be solved by the following code snippet:
```julia
# Load packages
using SqpSolver, JuMP
using Ipopt # can be any QP solver
# Number of variables
n = 1
# Build nonlinear problem model via JuMP
model = Model(optimizer_with_attributes(
SqpSolver.Optimizer,
"external_optimizer" => Ipopt.Optimizer,
))
@variable(model, x)
@objective(model, Min, x^2 + x)
@NLconstraint(model, x^2 - x == 2)
# Solve optimization problem
JuMP.optimize!(model)
# Retrieve solution
Xsol = JuMP.value.(X)
```
## Acknowledgements
This material is based upon work supported by the U.S. Department of Energy, Office of Science, under contract number DE-AC02-06CH11357.
| SqpSolver | https://github.com/exanauts/SqpSolver.jl.git |
|
[
"MIT"
] | 0.1.1 | 5f508af97ecf39645febed8ba2fabf5cfdc682e0 | code | 358 | using Documenter
using MixedModelsDatasets
makedocs(; root=joinpath(dirname(pathof(MixedModelsDatasets)), "..", "docs"),
sitename="MixedModelsDatasets",
doctest=true,
strict=true,
pages=["index.md"])
deploydocs(; repo="github.com/JuliaMixedModels/MixedModelsDatasets.jl", push_preview=true,
devbranch="main")
| MixedModelsDatasets | https://github.com/JuliaMixedModels/MixedModelsDatasets.jl.git |
|
[
"MIT"
] | 0.1.1 | 5f508af97ecf39645febed8ba2fabf5cfdc682e0 | code | 951 | module MixedModelsDatasets
using Arrow
using Artifacts
using LazyArtifacts
export dataset, datasets
_testdata() = artifact"TestData"
cacheddatasets = Dict{String,Arrow.Table}()
"""
dataset(nm)
Return, as an `Arrow.Table`, the test data set named `nm`, which can be a `String` or `Symbol`
"""
function dataset(nm::AbstractString)
get!(cacheddatasets, nm) do
path = joinpath(_testdata(), nm * ".arrow")
if !isfile(path)
throw(ArgumentError("Dataset \"$nm\" is not available.\nUse MixedModels.datasets() for available names."))
end
return Arrow.Table(path)
end
end
dataset(nm::Symbol) = dataset(string(nm))
"""
datasets()
Return a vector of names of the available test data sets
"""
function datasets()
return first.(Base.Filesystem.splitext.(filter(endswith(".arrow"),
readdir(_testdata()))))
end
end # module MixedModelsDatasets
| MixedModelsDatasets | https://github.com/JuliaMixedModels/MixedModelsDatasets.jl.git |
|
[
"MIT"
] | 0.1.1 | 5f508af97ecf39645febed8ba2fabf5cfdc682e0 | code | 371 | using Arrow
using Aqua
using MixedModelsDatasets
using Test
@testset "Aqua" begin
@static if VERSION >= v"1.9"
Aqua.test_all(MixedModelsDatasets; ambiguities=false, piracy=true)
end
end
@testset "datasets" begin
@test length(datasets()) == 17
@testset "$(ds) loadable" for ds in datasets()
@test dataset(ds) isa Arrow.Table
end
end
| MixedModelsDatasets | https://github.com/JuliaMixedModels/MixedModelsDatasets.jl.git |
|
[
"MIT"
] | 0.1.1 | 5f508af97ecf39645febed8ba2fabf5cfdc682e0 | docs | 300 | # MixedModelsDatasets.jl Documentation
```@meta
CurrentModule = MixedModelsDatasets
DocTestSetup = quote
using MixedModelsDatasets
end
DocTestFilters = [r"([a-z]*) => \1", r"getfield\(.*##[0-9]+#[0-9]+"]
```
# API
```@index
```
```@autodocs
Modules = [MixedModelsDatasets]
Private = true
```
| MixedModelsDatasets | https://github.com/JuliaMixedModels/MixedModelsDatasets.jl.git |
|
[
"MIT"
] | 0.6.6 | 57663205a3b08c4e5fdd398ed1f08cc5a1c318e2 | code | 670 | using StatGeochemBase
using Documenter
DocMeta.setdocmeta!(StatGeochemBase, :DocTestSetup, :(using StatGeochemBase); recursive=true)
makedocs(;
modules=[StatGeochemBase],
authors="C. Brenhin Keller",
repo="https://github.com/brenhinkeller/StatGeochemBase.jl/blob/{commit}{path}#{line}",
sitename="StatGeochemBase.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://brenhinkeller.github.io/StatGeochemBase.jl",
assets=String[],
),
pages=[
"Home" => "index.md",
],
)
deploydocs(;
repo="github.com/brenhinkeller/StatGeochemBase.jl",
devbranch = "main",
)
| StatGeochemBase | https://github.com/brenhinkeller/StatGeochemBase.jl.git |
|
[
"MIT"
] | 0.6.6 | 57663205a3b08c4e5fdd398ed1f08cc5a1c318e2 | code | 18736 | ## --- To make arrays with messy types better behaved
"""
```julia
unionize(x::AbstractVector)
```
Turn an array with possibly abstract element type into one with
`eltype` equal to a Union of all types of elements in the array.
Always returns a copy, even if `x` is already unionized.
### Examples
```julia
julia> a = Any[false, 0, 1.0]
3-element Vector{Any}:
false
0
1.0
julia> unionize(a)
3-element Vector{Union{Bool, Float64, Int64}}:
false
0
1.0
```
"""
function unionize(x::AbstractVector)
types = unique(typeof.(x))
if length(types) > 1
unionized = similar(x, Union{types...})
else
unionized = similar(x, only(types))
end
unionized .= x
end
unionize(x::AbstractRange) = copy(x) # Exemption for ranges, which should probably alway have concrete eltype already
export unionize
## --- To avoid allocations when indexing by a vector of Booleans
"""
```julia
copyat!(dest, src, tₛ::AbstractVector{Bool})
```
Copy from src to dest when tₛ is true. Equivalent to `dest .= src[tₛ]`, but without inducing allocations.
See also `reversecopyat!`
"""
function copyat!(dest::DenseArray, src, tₛ::AbstractVector{Bool})
@assert eachindex(src) == eachindex(tₛ)
iₙ = firstindex(dest)
iₗ = lastindex(dest)
@inbounds for iₛ in eachindex(src)
if tₛ[iₛ]
dest[iₙ] = src[iₛ]
iₙ += 1
iₙ > iₗ && break
end
end
return dest
end
export copyat!
"""
```julia
reversecopyat!(dest, src, tₛ::AbstractVector{Bool})
```
As `copyat!`, but also reverse the order of stored elements.
Equivalent to `dest .= reverse(src[tₛ])`, but without inducing allocations.
"""
function reversecopyat!(dest::DenseArray, src, tₛ::AbstractVector{Bool})
@assert eachindex(src) == eachindex(tₛ)
i₀ = firstindex(dest)
iₙ = lastindex(dest)
@inbounds for iₛ in eachindex(src)
if tₛ[iₛ]
dest[iₙ] = src[iₛ]
iₙ -= 1
iₙ < i₀ && break
end
end
return dest
end
export reversecopyat!
## --- Sorting and counting array elements
"""
```julia
n = count_unique!(A)
```
Sort the array `A` in-place (if not already sorted), move unique elements to
the front, and return the number of unique elements found.
`A[1:count_unique!(A)]` should return an array equivalent to `unique(A)`
### Examples
```julia
julia> A = rand(1:5, 10)
10-element Vector{Int64}:
4
4
2
3
3
4
1
5
1
2
julia> A = rand(1:5, 7)
7-element Vector{Int64}:
1
1
4
3
1
1
4
julia> n = count_unique!(A)
3
julia> A
7-element Vector{Int64}:
1
3
4
1
3
4
4
julia> A[1:n]
3-element Vector{Int64}:
1
3
4
```
"""
function count_unique!(A)
issorted(A) || sort!(A)
n = 1
last = A[1]
@inbounds for i=2:length(A)
if A[i] != last
n += 1
last = A[n] = A[i]
end
end
return n
end
export count_unique!
## --- Convert between bin centers and bin edges
"""
```julia
cntr(edges::Collection)
```
Given an array of bin edges, return a corresponding vector of bin centers
### Examples
```julia
julia> cntr(1:10)
1.5:1.0:9.5
```
"""
function cntr(edges::Collection)
centers = (edges[1:end-1] + edges[2:end]) ./ 2
return centers
end
export cntr
## --- Searching arrays
"""
```julia
findmatches(source, target)
```
Return the linear index of the first value in `target` (if any) that is equal
to a given value in `source` for each value in `source`; else 0.
### Examples
```julia
julia> findmatches([3,5],1:10)
2-element Vector{Int64}:
3
5
```
"""
function findmatches(source, target)
@inbounds for j ∈ eachindex(target)
if isequal(source, target[j])
return j
end
end
return 0
end
function findmatches(source::Collection, target)
index = similar(source, Int)
return findmatches!(index, source, target)
end
function findmatches!(index::DenseArray, source::Collection, target)
# Loop through source and find first match for each (if any)
@inbounds for i ∈ eachindex(index)
index[i] = 0
for j ∈ eachindex(target)
if isequal(source[i], target[j])
index[i] = j
break
end
end
end
return index
end
export findmatches, findmatches!
"""
```julia
findclosest(source, target)
```
Return the index of the numerically closest value in the indexable collection
`target` for each value in `source`.
If muliple values are equally close, the first one is used
### Examples
```julia
julia> findclosest(3.4, 1:10)
3
julia> findclosest(3:4, 1:10)
2-element Vector{Int64}:
3
4
```
"""
function findclosest(source, target)
if issorted(target)
𝔦ₛ = searchsortedfirst(target, source)
𝔦₊ = min(𝔦ₛ, lastindex(target))
𝔦₋ = max(𝔦ₛ-1, firstindex(target))
index = if 𝔦₊ != 𝔦₋ && abs(target[𝔦₊]-source) > abs(target[𝔦₋]-source)
𝔦₋
else
𝔦₊
end
elseif issorted(target, rev=true)
𝔦ₛ = searchsortedfirst(target, source, rev=true)
𝔦₊ = min(𝔦ₛ, lastindex(target))
𝔦₋ = max(𝔦ₛ-1, firstindex(target))
index = if 𝔦₊ != 𝔦₋ && abs(target[𝔦₊]-source) > abs(target[𝔦₋]-source)
𝔦₋
else
𝔦₊
end
else
δ = abs(first(target) - source)
index = firstindex(target)
@inbounds for j ∈ Iterators.drop(eachindex(target),1)
δₚ = abs(target[j] - source)
if δₚ < δ
δ = δₚ
index = j
end
end
end
return index
end
function findclosest(source::Collection, target)
index = similar(source, Int)
return findclosest!(index, source, target)
end
function findclosest!(index::DenseArray, source::Collection, target)
@assert eachindex(index) == eachindex(source)
# Find closest (numerical) match in target for each value in source
if issorted(target)
@inbounds for i ∈ eachindex(source)
𝔦ₛ = searchsortedfirst(target, source[i])
𝔦₊ = min(𝔦ₛ, lastindex(target))
𝔦₋ = max(𝔦ₛ-1, firstindex(target))
if 𝔦₊ != 𝔦₋ && abs(target[𝔦₊]-source[i]) > abs(target[𝔦₋]-source[i])
index[i] = 𝔦₋
else
index[i] = 𝔦₊
end
end
elseif issorted(target, rev=true)
@inbounds for i ∈ eachindex(source)
𝔦ₛ = searchsortedfirst(target, source[i], rev=true)
𝔦₊ = min(𝔦ₛ, lastindex(target))
𝔦₋ = max(𝔦ₛ-1, firstindex(target))
if 𝔦₊ != 𝔦₋ && abs(target[𝔦₊]-source[i]) > abs(target[𝔦₋]-source[i])
index[i] = 𝔦₋
else
index[i] = 𝔦₊
end
end
else
@inbounds for i ∈ eachindex(source)
δ = abs(first(target) - source[i])
index[i] = firstindex(target)
for j ∈ Iterators.drop(eachindex(target),1)
δₚ = abs(target[j] - source[i])
if δₚ < δ
δ = δₚ
index[i] = j
end
end
end
end
return index
end
export findclosest, findclosest!
"""
```julia
findclosestbelow(source, target)
```
Return the index of the nearest value of the indexable collection `target`
that is less than (i.e., "below") each value in `source`.
If no such target values exist, returns `firstindex(target)-1`.
### Examples
```julia
julia> findclosestabove(3.5, 1:10)
4
julia> findclosestabove(3:4, 1:10)
2-element Vector{Int64}:
4
5
```
"""
findclosestbelow(source, target) = findclosestbelow!(fill(0, length(source)), source, target)
findclosestbelow(source::Number, target) = only(findclosestbelow!(fill(0), source, target))
findclosestbelow(source::AbstractArray, target) = findclosestbelow!(similar(source, Int), source, target)
function findclosestbelow!(index::DenseArray, source, target)
if issorted(target)
@inbounds for i ∈ eachindex(source)
index[i] = searchsortedfirst(target, source[i]) - 1
end
elseif issorted(target, rev=true)
@inbounds for i ∈ eachindex(source)
index[i] = searchsortedlast(target, source[i], rev=true) + 1
index[i] > lastindex(target) && (index[i] = firstindex(target)-1)
end
else
∅ = firstindex(target) - 1
δ = first(source) - first(target)
@inbounds for i ∈ eachindex(source)
index[i] = j = ∅
while j < lastindex(target)
j += 1
if target[j] < source[i]
δ = source[i] - target[j]
index[i] = j
break
end
end
while j < lastindex(target)
j += 1
if target[j] < source[i]
δₚ = source[i] - target[j]
if δₚ < δ
δ = δₚ
index[i] = j
end
end
end
end
end
return index
end
export findclosestbelow, findclosestbelow!
"""
```julia
findclosestabove(source, target)
```
Return the index of the nearest value of the indexable collection `target`
that is greater than (i.e., "above") each value in `source`.
If no such values exist, returns `lastindex(target)+1`.
### Examples
```julia
julia> findclosestbelow(3.5, 1:10)
3
julia> findclosestbelow(3:4, 1:10)
2-element Vector{Int64}:
2
3
```
"""
findclosestabove(source, target) = findclosestabove!(fill(0, length(source)), source, target)
findclosestabove(source::Number, target) = only(findclosestabove!(fill(0), source, target))
findclosestabove(source::AbstractArray, target) = findclosestabove!(similar(source, Int), source, target)
function findclosestabove!(index::DenseArray, source, target)
if issorted(target)
@inbounds for i ∈ eachindex(source)
index[i] = searchsortedlast(target, source[i]) + 1
end
elseif issorted(target, rev=true)
@inbounds for i ∈ eachindex(source)
index[i] = searchsortedfirst(target, source[i], rev=true) - 1
index[i] < firstindex(target) && (index[i] = lastindex(target)+1)
end
else
∅ = lastindex(target) + 1
δ = first(source) - first(target)
@inbounds for i ∈ eachindex(source)
index[i] = j = ∅
while j > firstindex(target)
j -= 1
if target[j] > source[i]
δ = target[j] - source[i]
index[i] = j
break
end
end
while j > firstindex(target)
j -= 1
if target[j] > source[i]
δₚ = target[j] - source[i]
if δₚ < δ
δ = δₚ
index[i] = j
end
end
end
end
end
return index
end
export findclosestabove, findclosestabove!
"""
```julia
findnth(t::Collection{Bool}, n::Integer)
```
Return the index of the `n`th true value of `t`, else length(`t`)
### Examples
```julia
julia> t = rand(Bool,5)
5-element Vector{Bool}:
1
1
0
1
1
julia> findnth(t, 3)
4
```
"""
function findnth(t::Collection{Bool}, n::Integer)
N = 0
@inbounds for i ∈ eachindex(t)
if t[i]
N += 1
end
if N == n
return i
end
end
return length(t)
end
export findnth
"""
```julia
findclosestunequal(x::Collection, i::Integer)
```
Return the index of the closest index `n` to `i` for which `x[n] != x[i]`,
or `i` if no unequal values of `x` are found.
### Examples
```julia
julia> x = [1, 2, 2, 3, 4]
5-element Vector{Int64}:
1
2
2
3
4
julia> findclosestunequal(x, 2)
1
julia> findclosestunequal(x, 3)
4
```
"""
function findclosestunequal(x::Collection, i::Int)
xᵢ = x[i]
for offset = 1:(length(x)-1)
l = i - offset
if l >= firstindex(x)
(x[l] == xᵢ) || return l
end
u = i + offset
if u <= lastindex(x)
(x[u] == xᵢ) || return u
end
end
return i
end
export findclosestunequal
## --- String matching
"""
```julia
containsi(haystack, needle)
```
Converts both `haystack` and `needle` to strings and checks whether
`string(haystack)` contains `string(needle)`, ignoring case.
### Examples
```julia
julia> containsi("QuickBrownFox", "brown")
true
```
"""
containsi(haystack::AbstractString, needle::Union{AbstractString,AbstractChar}) = occursin(lowercase(needle), lowercase(haystack))
containsi(haystack, needle) = occursin(lowercase(string(needle)), lowercase(string(haystack)))
export containsi
## --- Drawing a pseudorandom array from a numerically specified distribution
"""
```julia
draw_from_distribution(dist::Collection{AbstractFloat}, n::Integer)
```
Draw `n` random floating point numbers from a continuous probability distribution
specified by a collection `dist` defining the PDF curve thereof.
### Examples
```julia
julia> draw_from_distribution([0,1,2,1,0.], 7)
7-element Vector{Float64}:
0.5271744125470383
0.6624591724796276
0.7737643383545575
0.9603780726501608
0.7772477857811155
0.8307248435614027
0.6351766227803024
```
"""
function draw_from_distribution(dist::Collection{AbstractFloat}, n::Integer)
x = Array{eltype(dist)}(undef, n)
draw_from_distribution!(x, dist)
return x
end
export draw_from_distribution
"""
```julia
draw_from_distribution!(x::DenseArray{<:AbstractFloat}, dist::Collection{AbstractFloat})
```
Fill an existing variable `x` with random floating point numbers drawn from
a continuous probability distribution specified by a vector `dist`
defining the PDF curve thereof.
"""
function draw_from_distribution!(x::DenseArray{<:AbstractFloat}, dist::Collection{AbstractFloat})
# Fill the array x with random numbers from the distribution 'dist'
dist_ymax = maximum(dist)
dist_xmax = prevfloat(length(dist) - 1.0)
@inbounds for i ∈ eachindex(x)
while true
# Pick random x value
rx = rand(eltype(x)) * dist_xmax
# Interpolate corresponding distribution value
f = floor(Int,rx)
y = dist[f+2]*(rx-f) + dist[f+1]*(1-(rx-f))
# See if x value is accepted
ry = rand(Float64) * dist_ymax
if (y > ry)
x[i] = rx / dist_xmax
break
end
end
end
end
export draw_from_distribution!
## --- Numerically integrate a 1-d distribution
"""
```julia
trapezoidalquadrature(edges, values)
```
Add up the area under a curve with y positions specified by a vector of `values`
and x positions specfied by a vector of `edges` using trapezoidal integration.
Bins need not be evenly spaced, though it helps (integration will be faster
if `edges` are specified as an AbstractRange).
### Examples
```julia
julia> trapezoidalquadrature(0:0.1:10, 0:0.1:10)
50.0
```
"""
function trapezoidalquadrature(edges::AbstractRange, values::Collection)
@assert eachindex(edges)==eachindex(values)
result = zero(eltype(values))
@inbounds @fastmath for i ∈ (firstindex(edges)+1):lastindex(edges)
result += values[i-1]+values[i]
end
dx = (edges[end]-edges[1])/(length(edges) - 1)
return result * dx / 2
end
function trapezoidalquadrature(edges::Collection, values::Collection)
@assert eachindex(edges)==eachindex(values)
result = zero(promote_type(eltype(edges), eltype(values)))
@inbounds @fastmath for i ∈ (firstindex(edges)+1):lastindex(edges)
result += (values[i-1] + values[i]) * (edges[i] - edges[i-1])
end
return result / 2
end
export trapezoidalquadrature
trapz = trapezoidalquadrature
export trapz
"""
```julia
midpointquadrature(bincenters, values)
```
Add up the area under a curve with y positions specified by a vector of `values`
and x positions specfied by a vector of `bincenters` using midpoint integration.
### Examples
```julia
julia> midpointquadrature(0:0.1:10, 0:0.1:10)
50.5
```
"""
function midpointquadrature(bincenters::AbstractRange, values::Collection)
@assert eachindex(bincenters)==eachindex(values)
sum(values) * (last(bincenters)-first(bincenters)) / (length(bincenters) - 1)
end
export midpointquadrature
## --- End of File
| StatGeochemBase | https://github.com/brenhinkeller/StatGeochemBase.jl.git |
|
[
"MIT"
] | 0.6.6 | 57663205a3b08c4e5fdd398ed1f08cc5a1c318e2 | code | 48207 | ## --- Matplotlib colormaps
# Viridis (b-g-yl)
viridis = parse.(Color, ["#440154","#440256","#450457","#450559","#46075A","#46085C","#460A5D","#460B5E","#470D60","#470E61","#471063","#471164","#471365","#481467","#481668","#481769","#48186A","#481A6C","#481B6D","#481C6E","#481D6F","#481F70","#482071","#482173","#482374","#482475","#482576","#482677","#482878","#482979","#472A7A","#472C7A","#472D7B","#472E7C","#472F7D","#46307E","#46327E","#46337F","#463480","#453581","#453781","#453882","#443983","#443A83","#443B84","#433D84","#433E85","#423F85","#424086","#424186","#414287","#414487","#404588","#404688","#3F4788","#3F4889","#3E4989","#3E4A89","#3E4C8A","#3D4D8A","#3D4E8A","#3C4F8A","#3C508B","#3B518B","#3B528B","#3A538B","#3A548C","#39558C","#39568C","#38588C","#38598C","#375A8C","#375B8D","#365C8D","#365D8D","#355E8D","#355F8D","#34608D","#34618D","#33628D","#33638D","#32648E","#32658E","#31668E","#31678E","#31688E","#30698E","#306A8E","#2F6B8E","#2F6C8E","#2E6D8E","#2E6E8E","#2E6F8E","#2D708E","#2D718E","#2C718E","#2C728E","#2C738E","#2B748E","#2B758E","#2A768E","#2A778E","#2A788E","#29798E","#297A8E","#297B8E","#287C8E","#287D8E","#277E8E","#277F8E","#27808E","#26818E","#26828E","#26828E","#25838E","#25848E","#25858E","#24868E","#24878E","#23888E","#23898E","#238A8D","#228B8D","#228C8D","#228D8D","#218E8D","#218F8D","#21908D","#21918C","#20928C","#20928C","#20938C","#1F948C","#1F958B","#1F968B","#1F978B","#1F988B","#1F998A","#1F9A8A","#1E9B8A","#1E9C89","#1E9D89","#1F9E89","#1F9F88","#1FA088","#1FA188","#1FA187","#1FA287","#20A386","#20A486","#21A585","#21A685","#22A785","#22A884","#23A983","#24AA83","#25AB82","#25AC82","#26AD81","#27AD81","#28AE80","#29AF7F","#2AB07F","#2CB17E","#2DB27D","#2EB37C","#2FB47C","#31B57B","#32B67A","#34B679","#35B779","#37B878","#38B977","#3ABA76","#3BBB75","#3DBC74","#3FBC73","#40BD72","#42BE71","#44BF70","#46C06F","#48C16E","#4AC16D","#4CC26C","#4EC36B","#50C46A","#52C569","#54C568","#56C667","#58C765","#5AC864","#5CC863","#5EC962","#60CA60","#63CB5F","#65CB5E","#67CC5C","#69CD5B","#6CCD5A","#6ECE58","#70CF57","#73D056","#75D054","#77D153","#7AD151","#7CD250","#7FD34E","#81D34D","#84D44B","#86D549","#89D548","#8BD646","#8ED645","#90D743","#93D741","#95D840","#98D83E","#9BD93C","#9DD93B","#A0DA39","#A2DA37","#A5DB36","#A8DB34","#AADC32","#ADDC30","#B0DD2F","#B2DD2D","#B5DE2B","#B8DE29","#BADE28","#BDDF26","#C0DF25","#C2DF23","#C5E021","#C8E020","#CAE11F","#CDE11D","#D0E11C","#D2E21B","#D5E21A","#D8E219","#DAE319","#DDE318","#DFE318","#E2E418","#E5E419","#E7E419","#EAE51A","#ECE51B","#EFE51C","#F1E51D","#F4E61E","#F6E620","#F8E621","#FBE723","#FDE725"])
export viridis
# Plasma (b-m-yl)
plasma = parse.(Color, ["#0D0887","#100788","#130789","#16078A","#19068C","#1B068D","#1D068E","#20068F","#220690","#240691","#260591","#280592","#2A0593","#2C0594","#2E0595","#2F0596","#310597","#330597","#350498","#370499","#38049A","#3A049A","#3C049B","#3E049C","#3F049C","#41049D","#43039E","#44039E","#46039F","#48039F","#4903A0","#4B03A1","#4C02A1","#4E02A2","#5002A2","#5102A3","#5302A3","#5502A4","#5601A4","#5801A4","#5901A5","#5B01A5","#5C01A6","#5E01A6","#6001A6","#6100A7","#6300A7","#6400A7","#6600A7","#6700A8","#6900A8","#6A00A8","#6C00A8","#6E00A8","#6F00A8","#7100A8","#7201A8","#7401A8","#7501A8","#7701A8","#7801A8","#7A02A8","#7B02A8","#7D03A8","#7E03A8","#8004A8","#8104A7","#8305A7","#8405A7","#8606A6","#8707A6","#8808A6","#8A09A5","#8B0AA5","#8D0BA5","#8E0CA4","#8F0DA4","#910EA3","#920FA3","#9410A2","#9511A1","#9613A1","#9814A0","#99159F","#9A169F","#9C179E","#9D189D","#9E199D","#A01A9C","#A11B9B","#A21D9A","#A31E9A","#A51F99","#A62098","#A72197","#A82296","#AA2395","#AB2494","#AC2694","#AD2793","#AE2892","#B02991","#B12A90","#B22B8F","#B32C8E","#B42E8D","#B52F8C","#B6308B","#B7318A","#B83289","#BA3388","#BB3488","#BC3587","#BD3786","#BE3885","#BF3984","#C03A83","#C13B82","#C23C81","#C33D80","#C43E7F","#C5407E","#C6417D","#C7427C","#C8437B","#C9447A","#CA457A","#CB4679","#CC4778","#CC4977","#CD4A76","#CE4B75","#CF4C74","#D04D73","#D14E72","#D24F71","#D35171","#D45270","#D5536F","#D5546E","#D6556D","#D7566C","#D8576B","#D9586A","#DA5A6A","#DA5B69","#DB5C68","#DC5D67","#DD5E66","#DE5F65","#DE6164","#DF6263","#E06363","#E16462","#E26561","#E26660","#E3685F","#E4695E","#E56A5D","#E56B5D","#E66C5C","#E76E5B","#E76F5A","#E87059","#E97158","#E97257","#EA7457","#EB7556","#EB7655","#EC7754","#ED7953","#ED7A52","#EE7B51","#EF7C51","#EF7E50","#F07F4F","#F0804E","#F1814D","#F1834C","#F2844B","#F3854B","#F3874A","#F48849","#F48948","#F58B47","#F58C46","#F68D45","#F68F44","#F79044","#F79143","#F79342","#F89441","#F89540","#F9973F","#F9983E","#F99A3E","#FA9B3D","#FA9C3C","#FA9E3B","#FB9F3A","#FBA139","#FBA238","#FCA338","#FCA537","#FCA636","#FCA835","#FCA934","#FDAB33","#FDAC33","#FDAE32","#FDAF31","#FDB130","#FDB22F","#FDB42F","#FDB52E","#FEB72D","#FEB82C","#FEBA2C","#FEBB2B","#FEBD2A","#FEBE2A","#FEC029","#FDC229","#FDC328","#FDC527","#FDC627","#FDC827","#FDCA26","#FDCB26","#FCCD25","#FCCE25","#FCD025","#FCD225","#FBD324","#FBD524","#FBD724","#FAD824","#FADA24","#F9DC24","#F9DD25","#F8DF25","#F8E125","#F7E225","#F7E425","#F6E626","#F6E826","#F5E926","#F5EB27","#F4ED27","#F3EE27","#F3F027","#F2F227","#F1F426","#F1F525","#F0F724","#F0F921"])
export plasma
# Magma (k-m-wt)
magma = parse.(Color, ["#000004","#010005","#010106","#010108","#020109","#02020B","#02020D","#03030F","#030312","#040414","#050416","#060518","#06051A","#07061C","#08071E","#090720","#0A0822","#0B0924","#0C0926","#0D0A29","#0E0B2B","#100B2D","#110C2F","#120D31","#130D34","#140E36","#150E38","#160F3B","#180F3D","#19103F","#1A1042","#1C1044","#1D1147","#1E1149","#20114B","#21114E","#221150","#241253","#251255","#271258","#29115A","#2A115C","#2C115F","#2D1161","#2F1163","#311165","#331067","#341069","#36106B","#38106C","#390F6E","#3B0F70","#3D0F71","#3F0F72","#400F74","#420F75","#440F76","#451077","#471078","#491078","#4A1079","#4C117A","#4E117B","#4F127B","#51127C","#52137C","#54137D","#56147D","#57157E","#59157E","#5A167E","#5C167F","#5D177F","#5F187F","#601880","#621980","#641A80","#651A80","#671B80","#681C81","#6A1C81","#6B1D81","#6D1D81","#6E1E81","#701F81","#721F81","#732081","#752181","#762181","#782281","#792282","#7B2382","#7C2382","#7E2482","#802582","#812581","#832681","#842681","#862781","#882781","#892881","#8B2981","#8C2981","#8E2A81","#902A81","#912B81","#932B80","#942C80","#962C80","#982D80","#992D80","#9B2E7F","#9C2E7F","#9E2F7F","#A02F7F","#A1307E","#A3307E","#A5317E","#A6317D","#A8327D","#AA337D","#AB337C","#AD347C","#AE347B","#B0357B","#B2357B","#B3367A","#B5367A","#B73779","#B83779","#BA3878","#BC3978","#BD3977","#BF3A77","#C03A76","#C23B75","#C43C75","#C53C74","#C73D73","#C83E73","#CA3E72","#CC3F71","#CD4071","#CF4070","#D0416F","#D2426F","#D3436E","#D5446D","#D6456C","#D8456C","#D9466B","#DB476A","#DC4869","#DE4968","#DF4A68","#E04C67","#E24D66","#E34E65","#E44F64","#E55064","#E75263","#E85362","#E95462","#EA5661","#EB5760","#EC5860","#ED5A5F","#EE5B5E","#EF5D5E","#F05F5E","#F1605D","#F2625D","#F2645C","#F3655C","#F4675C","#F4695C","#F56B5C","#F66C5C","#F66E5C","#F7705C","#F7725C","#F8745C","#F8765C","#F9785D","#F9795D","#F97B5D","#FA7D5E","#FA7F5E","#FA815F","#FB835F","#FB8560","#FB8761","#FC8961","#FC8A62","#FC8C63","#FC8E64","#FC9065","#FD9266","#FD9467","#FD9668","#FD9869","#FD9A6A","#FD9B6B","#FE9D6C","#FE9F6D","#FEA16E","#FEA36F","#FEA571","#FEA772","#FEA973","#FEAA74","#FEAC76","#FEAE77","#FEB078","#FEB27A","#FEB47B","#FEB67C","#FEB77E","#FEB97F","#FEBB81","#FEBD82","#FEBF84","#FEC185","#FEC287","#FEC488","#FEC68A","#FEC88C","#FECA8D","#FECC8F","#FECD90","#FECF92","#FED194","#FED395","#FED597","#FED799","#FED89A","#FDDA9C","#FDDC9E","#FDDEA0","#FDE0A1","#FDE2A3","#FDE3A5","#FDE5A7","#FDE7A9","#FDE9AA","#FDEBAC","#FCECAE","#FCEEB0","#FCF0B2","#FCF2B4","#FCF4B6","#FCF6B8","#FCF7B9","#FCF9BB","#FCFBBD","#FCFDBF"])
export magma
# Inferno (k-m-yl)
inferno = parse.(Color, ["#000004","#010005","#010106","#010108","#02010A","#02020C","#02020E","#030210","#040312","#040314","#050417","#060419","#07051B","#08051D","#09061F","#0A0722","#0B0724","#0C0826","#0D0829","#0E092B","#10092D","#110A30","#120A32","#140B34","#150B37","#160B39","#180C3C","#190C3E","#1B0C41","#1C0C43","#1E0C45","#1F0C48","#210C4A","#230C4C","#240C4F","#260C51","#280B53","#290B55","#2B0B57","#2D0B59","#2F0A5B","#310A5C","#320A5E","#340A5F","#360961","#380962","#390963","#3B0964","#3D0965","#3E0966","#400A67","#420A68","#440A68","#450A69","#470B6A","#490B6A","#4A0C6B","#4C0C6B","#4D0D6C","#4F0D6C","#510E6C","#520E6D","#540F6D","#550F6D","#57106E","#59106E","#5A116E","#5C126E","#5D126E","#5F136E","#61136E","#62146E","#64156E","#65156E","#67166E","#69166E","#6A176E","#6C186E","#6D186E","#6F196E","#71196E","#721A6E","#741A6E","#751B6E","#771C6D","#781C6D","#7A1D6D","#7C1D6D","#7D1E6D","#7F1E6C","#801F6C","#82206C","#84206B","#85216B","#87216B","#88226A","#8A226A","#8C2369","#8D2369","#8F2469","#902568","#922568","#932667","#952667","#972766","#982766","#9A2865","#9B2964","#9D2964","#9F2A63","#A02A63","#A22B62","#A32C61","#A52C60","#A62D60","#A82E5F","#A92E5E","#AB2F5E","#AD305D","#AE305C","#B0315B","#B1325A","#B3325A","#B43359","#B63458","#B73557","#B93556","#BA3655","#BC3754","#BD3853","#BF3952","#C03A51","#C13A50","#C33B4F","#C43C4E","#C63D4D","#C73E4C","#C83F4B","#CA404A","#CB4149","#CC4248","#CE4347","#CF4446","#D04545","#D24644","#D34743","#D44842","#D54A41","#D74B3F","#D84C3E","#D94D3D","#DA4E3C","#DB503B","#DD513A","#DE5238","#DF5337","#E05536","#E15635","#E25734","#E35933","#E45A31","#E55C30","#E65D2F","#E75E2E","#E8602D","#E9612B","#EA632A","#EB6429","#EB6628","#EC6726","#ED6925","#EE6A24","#EF6C23","#EF6E21","#F06F20","#F1711F","#F1731D","#F2741C","#F3761B","#F37819","#F47918","#F57B17","#F57D15","#F67E14","#F68013","#F78212","#F78410","#F8850F","#F8870E","#F8890C","#F98B0B","#F98C0A","#F98E09","#FA9008","#FA9207","#FA9407","#FB9606","#FB9706","#FB9906","#FB9B06","#FB9D07","#FC9F07","#FCA108","#FCA309","#FCA50A","#FCA60C","#FCA80D","#FCAA0F","#FCAC11","#FCAE12","#FCB014","#FCB216","#FCB418","#FBB61A","#FBB81D","#FBBA1F","#FBBC21","#FBBE23","#FAC026","#FAC228","#FAC42A","#FAC62D","#F9C72F","#F9C932","#F9CB35","#F8CD37","#F8CF3A","#F7D13D","#F7D340","#F6D543","#F6D746","#F5D949","#F5DB4C","#F4DD4F","#F4DF53","#F4E156","#F3E35A","#F3E55D","#F2E661","#F2E865","#F2EA69","#F1EC6D","#F1ED71","#F1EF75","#F1F179","#F2F27D","#F2F482","#F3F586","#F3F68A","#F4F88E","#F5F992","#F6FA96","#F8FB9A","#F9FC9D","#FAFDA1","#FCFFA4"])
export inferno
# Cividis (bl-gy-yl)
cividis = parse.(Color, ["#00224E","#00234F","#002451","#002553","#002554","#002656","#002758","#002859","#00285B","#00295D","#002A5F","#002A61","#002B62","#002C64","#002C66","#002D68","#002E6A","#002E6C","#002F6D","#00306F","#003070","#003170","#003171","#013271","#053371","#083370","#0C3470","#0F3570","#123570","#143670","#163770","#18376F","#1A386F","#1C396F","#1E3A6F","#203A6F","#213B6E","#233C6E","#243C6E","#263D6E","#273E6E","#293F6E","#2A3F6D","#2B406D","#2D416D","#2E416D","#2F426D","#31436D","#32436D","#33446D","#34456C","#35456C","#36466C","#38476C","#39486C","#3A486C","#3B496C","#3C4A6C","#3D4A6C","#3E4B6C","#3F4C6C","#404C6C","#414D6C","#424E6C","#434E6C","#444F6C","#45506C","#46516C","#47516C","#48526C","#49536C","#4A536C","#4B546C","#4C556C","#4D556C","#4E566C","#4F576C","#50576C","#51586D","#52596D","#535A6D","#545A6D","#555B6D","#555C6D","#565C6D","#575D6D","#585E6D","#595E6E","#5A5F6E","#5B606E","#5C616E","#5D616E","#5E626E","#5E636F","#5F636F","#60646F","#61656F","#62656F","#636670","#646770","#656870","#656870","#666970","#676A71","#686A71","#696B71","#6A6C71","#6B6D72","#6C6D72","#6C6E72","#6D6F72","#6E6F73","#6F7073","#707173","#717274","#727274","#727374","#737475","#747475","#757575","#767676","#777776","#777777","#787877","#797977","#7A7A78","#7B7A78","#7C7B78","#7D7C78","#7E7C78","#7E7D78","#7F7E78","#807F78","#817F78","#828079","#838179","#848279","#858279","#868379","#878478","#888578","#898578","#8A8678","#8B8778","#8C8878","#8D8878","#8E8978","#8F8A78","#908B78","#918B78","#928C78","#928D78","#938E78","#948E77","#958F77","#969077","#979177","#989277","#999277","#9A9376","#9B9476","#9C9576","#9D9576","#9E9676","#9F9775","#A09875","#A19975","#A29975","#A39A74","#A49B74","#A59C74","#A69C74","#A79D73","#A89E73","#A99F73","#AAA073","#ABA072","#ACA172","#ADA272","#AEA371","#AFA471","#B0A571","#B1A570","#B3A670","#B4A76F","#B5A86F","#B6A96F","#B7A96E","#B8AA6E","#B9AB6D","#BAAC6D","#BBAD6D","#BCAE6C","#BDAE6C","#BEAF6B","#BFB06B","#C0B16A","#C1B26A","#C2B369","#C3B369","#C4B468","#C5B568","#C6B667","#C7B767","#C8B866","#C9B965","#CBB965","#CCBA64","#CDBB63","#CEBC63","#CFBD62","#D0BE62","#D1BF61","#D2C060","#D3C05F","#D4C15F","#D5C25E","#D6C35D","#D7C45C","#D9C55C","#DAC65B","#DBC75A","#DCC859","#DDC858","#DEC958","#DFCA57","#E0CB56","#E1CC55","#E2CD54","#E4CE53","#E5CF52","#E6D051","#E7D150","#E8D24F","#E9D34E","#EAD34C","#EBD44B","#EDD54A","#EED649","#EFD748","#F0D846","#F1D945","#F2DA44","#F3DB42","#F5DC41","#F6DD3F","#F7DE3E","#F8DF3C","#F9E03A","#FBE138","#FCE236","#FDE334","#FEE434","#FEE535","#FEE636","#FEE838"])
export cividis
# Laguna colormap by Peter Karpov (k-bl-w)
laguna = parse.(Color,
["#000000","#030103","#060206","#090209","#0C030D","#0F040F","#110412","#130515","#150617","#17061A","#19071C","#1B071E","#1C0820","#1E0822","#1F0924","#200926","#220A28","#230A2A","#240A2C","#250B2D","#260B2F","#270C31","#280C33","#290D34","#2A0D36","#2B0E38","#2C0E3A","#2D0F3C","#2E0F3D","#2E103F","#2F1041","#301143","#311145","#321247","#331249","#33134B","#34144D","#35144E","#361550","#361652","#371654","#381756","#381858","#39185A","#3A195C","#3A1A5E","#3B1A60","#3B1B62","#3C1C64","#3C1D66","#3D1E68","#3D1E6A","#3E1F6C","#3E206E","#3F2170","#3F2272","#3F2374","#402476","#402478","#40257A","#41267B","#41277D","#41287F","#412981","#422A83","#422B85","#422C86","#422D88","#422E8A","#422F8C","#43308D","#43318F","#433391","#433492","#433594","#433696","#433797","#433899","#43399A","#423A9C","#423C9D","#423D9F","#423EA0","#423FA2","#4240A3","#4242A4","#4143A5","#4144A7","#4145A8","#4147A9","#4148AA","#4049AB","#404AAD","#404CAE","#404DAF","#3F4EB0","#3F50B1","#3F51B2","#3E52B3","#3E53B3","#3E55B4","#3D56B5","#3D57B6","#3D59B7","#3C5AB7","#3C5BB8","#3C5DB9","#3B5EB9","#3B5FBA","#3A61BB","#3A62BB","#3A63BC","#3965BC","#3966BD","#3968BD","#3869BD","#386ABE","#376CBE","#376DBE","#376EBF","#3670BF","#3671BF","#3572BF","#3574C0","#3575C0","#3476C0","#3478C0","#3479C0","#337BC0","#337CC0","#337DC0","#327FC0","#3280C0","#3281C0","#3183C0","#3184C0","#3185C0","#3087C0","#3088C0","#308AC0","#308BBF","#2F8CBF","#2F8EBF","#2F8FBF","#2F90BF","#2F92BE","#2F93BE","#2F94BD","#3096BD","#3097BC","#3098BC","#319ABB","#319BBB","#329CBA","#329EBA","#339FB9","#33A0B9","#34A2B8","#35A3B8","#35A4B7","#36A6B7","#37A7B6","#38A8B6","#38A9B5","#39ABB5","#3AACB4","#3BADB4","#3CAEB3","#3DB0B3","#3EB1B3","#3FB2B2","#40B3B2","#41B5B1","#42B6B1","#44B7B1","#45B8B0","#46BAB0","#47BBB0","#49BCB0","#4ABDAF","#4BBEAF","#4DC0AF","#4EC1AF","#50C2AE","#52C3AE","#53C4AE","#55C5AE","#56C7AE","#58C8AE","#5AC9AE","#5CCAAE","#5ECBAE","#60CCAE","#61CDAE","#63CEAE","#65CFAE","#67D0AF","#6AD1AF","#6CD3AF","#6ED4AF","#70D5B0","#72D6B0","#74D7B1","#77D8B1","#79D9B1","#7BDAB2","#7EDBB3","#80DCB3","#82DCB4","#85DDB4","#87DEB5","#8ADFB6","#8CE0B7","#8FE1B8","#91E2B8","#94E3B9","#97E4BA","#99E5BB","#9CE5BC","#9FE6BD","#A1E7BF","#A4E8C0","#A7E9C1","#A9E9C2","#ACEAC4","#AFEBC5","#B2ECC6","#B4EDC8","#B7EDC9","#BAEECB","#BDEFCC","#BFEFCE","#C2F0CF","#C5F1D1","#C8F2D3","#CAF2D4","#CDF3D6","#D0F4D8","#D3F4DA","#D5F5DC","#D8F6DE","#DBF6E0","#DDF7E2","#E0F8E4","#E3F8E6","#E5F9E8","#E8F9EA","#EBFAEC","#EDFBEF","#F0FBF1","#F2FCF3","#F5FDF5","#F8FDF8","#FAFEFA","#FDFEFD","#FFFFFF"])
export laguna
# Lacerta colormap by Peter Karpov (k-bl-g-w)
lacerta = parse.(Color,
["#000000","#03000B","#050014","#06011A","#080120","#090124","#0A0228","#0B022C","#0B032F","#0C0432","#0D0435","#0D0538","#0E063A","#0E063C","#0F073F","#0F0841","#100943","#100944","#110A46","#110B48","#110C49","#120D4B","#120E4C","#120F4E","#130F4F","#131050","#131151","#141253","#141354","#141455","#141556","#151657","#151758","#151859","#15195A","#161A5B","#161C5C","#161D5D","#161E5E","#171F5E","#17205F","#172160","#172261","#182361","#182462","#182662","#182763","#182864","#192964","#192A65","#192B65","#192C65","#1A2E66","#1A2F66","#1A3067","#1A3167","#1B3267","#1B3368","#1B3468","#1B3668","#1C3768","#1C3869","#1C3969","#1C3A69","#1D3B69","#1D3D69","#1D3E69","#1D3F6A","#1E406A","#1E416A","#1E426A","#1E446A","#1F456A","#1F466A","#1F476A","#20486A","#20496A","#204A6A","#214C6A","#214D6A","#214E6A","#224F6A","#22506A","#22516A","#23536A","#23546A","#24556A","#24566A","#245769","#255869","#255969","#265B69","#265C69","#275D69","#275E69","#285F69","#286068","#286268","#296368","#296468","#2A6568","#2A6667","#2B6767","#2C6867","#2C6A67","#2D6B67","#2D6C66","#2E6D66","#2E6E66","#2F6F66","#307065","#307265","#317365","#327465","#327564","#337664","#347764","#347864","#357A63","#367B63","#367C63","#377D62","#387E62","#397F62","#398061","#3A8261","#3B8361","#3C8460","#3D8560","#3D8660","#3E875F","#3F885F","#40895F","#418B5E","#428C5E","#438D5E","#448E5D","#458F5D","#46905C","#47915C","#48925C","#49945B","#4A955B","#4B965A","#4C975A","#4D9859","#4E9959","#4F9A59","#509B58","#519D58","#529E57","#539F57","#55A056","#56A156","#57A255","#58A355","#5AA454","#5BA554","#5CA653","#5DA853","#5FA952","#60AA52","#61AB51","#63AC51","#64AD50","#66AE50","#67AF4F","#68B04F","#6AB14E","#6BB24D","#6DB34D","#6EB44C","#70B64C","#72B74B","#73B84B","#75B94A","#76BA49","#78BB49","#7ABC48","#7BBD47","#7DBE47","#7FBF46","#81C045","#82C145","#84C244","#86C343","#88C443","#8AC542","#8CC641","#8EC741","#8FC840","#91C93F","#93CA3F","#95CB3E","#98CC3E","#9ACC3F","#9DCD41","#A0CE42","#A2CF44","#A5CF46","#A8D048","#AAD149","#ADD24B","#B0D24D","#B2D34F","#B5D451","#B7D554","#BAD556","#BCD658","#BFD75A","#C1D75D","#C3D85F","#C6D962","#C8DA64","#CADA67","#CDDB69","#CFDC6C","#D1DD6F","#D3DD72","#D5DE74","#D8DF77","#DAE07A","#DCE07D","#DEE181","#DFE284","#E1E387","#E3E38A","#E5E48E","#E7E591","#E8E694","#EAE798","#ECE89C","#EDE89F","#EFE9A3","#F0EAA7","#F1EBAA","#F3ECAE","#F4EDB2","#F5EEB6","#F6EFBA","#F7EFBE","#F8F0C2","#F9F1C6","#FAF2CA","#FBF3CF","#FBF4D3","#FCF5D7","#FDF6DB","#FDF7E0","#FEF8E4","#FEF9E9","#FEFBED","#FFFCF1","#FFFDF6","#FFFEFA","#FFFFFF"])
export lacerta
# Hesperia colormap by Peter Karpov
hesperia = parse.(Color,
["#000000","#02000D","#040016","#05001D","#070023","#080128","#09012C","#0A0130","#0C0134","#0D0137","#0E023A","#0F023D","#100240","#110242","#120344","#130346","#140348","#15044A","#16044C","#17044E","#19044F","#1A0551","#1B0552","#1C0654","#1D0655","#1E0657","#1F0758","#20075A","#22075B","#23085D","#24085E","#250960","#270961","#280A63","#290A64","#2B0B65","#2C0B67","#2D0C68","#2F0C69","#300D6B","#320D6C","#330E6D","#340E6E","#360F6F","#370F71","#391072","#3A1173","#3C1174","#3D1275","#3F1276","#401377","#421478","#431479","#45157A","#46167B","#48167C","#49177D","#4B187D","#4C187E","#4E197F","#4F1A80","#511A80","#531B81","#541C82","#561D83","#571D83","#591E84","#5A1F84","#5C2085","#5D2086","#5F2186","#612287","#622387","#642388","#652488","#672588","#692689","#6A2789","#6C288A","#6D288A","#6F298A","#702A8B","#722B8B","#742C8B","#752D8B","#772D8C","#782E8C","#7A2F8C","#7B308C","#7D318C","#7F328D","#80338D","#82348D","#83348D","#85358D","#86368D","#88378D","#89388D","#8B398D","#8C3A8D","#8E3B8D","#8F3C8D","#913D8D","#933E8D","#943F8D","#96408D","#97418C","#99428C","#9A438C","#9C438C","#9D448C","#9E458C","#A0468B","#A1478B","#A3488B","#A4498B","#A64A8A","#A74B8A","#A94C8A","#AA4D89","#AC4F89","#AD5089","#AE5188","#B05288","#B15388","#B35487","#B45587","#B65687","#B75786","#B85886","#BA5985","#BB5A85","#BC5B84","#BE5C84","#BF5D83","#C15E83","#C25F82","#C36182","#C56281","#C66381","#C76480","#C9657F","#CA667F","#CB677E","#CD687E","#CE697D","#CF6B7C","#D06C7C","#D26D7B","#D36E7A","#D46F7A","#D57079","#D77178","#D87378","#D97477","#DA7576","#DC7675","#DD7775","#DE7874","#DF7A73","#E07B72","#E27C72","#E37D71","#E37F70","#E48070","#E58270","#E6836F","#E6856F","#E7866E","#E8876E","#E8896E","#E98A6D","#EA8C6D","#EA8D6D","#EB8F6C","#EB906C","#EC926C","#ED936C","#ED956C","#EE966B","#EE976B","#EF996B","#EF9A6B","#F09C6B","#F09D6B","#F19F6B","#F1A06B","#F2A26B","#F2A36B","#F3A56B","#F3A66B","#F4A86C","#F4A96C","#F5AB6C","#F5AC6C","#F5AD6D","#F6AF6D","#F6B06D","#F7B26E","#F7B36E","#F7B56F","#F8B66F","#F8B870","#F8B971","#F9BB71","#F9BC72","#F9BE73","#FABF74","#FAC174","#FAC275","#FAC376","#FBC577","#FBC679","#FBC87A","#FBC97B","#FCCB7C","#FCCC7E","#FCCE7F","#FCCF80","#FCD082","#FDD284","#FDD385","#FDD587","#FDD689","#FDD88B","#FDD98D","#FEDA8F","#FEDC91","#FEDD94","#FEDF96","#FEE098","#FEE19B","#FEE39E","#FEE4A1","#FEE6A3","#FEE7A7","#FFE8AA","#FFEAAD","#FFEBB0","#FFECB4","#FFEEB7","#FFEFBB","#FFF0BF","#FFF1C3","#FFF3C7","#FFF4CC","#FFF5D0","#FFF6D5","#FFF7D9","#FFF9DE","#FFFAE3","#FFFBE9","#FFFCEE","#FFFDF3","#FFFEF9","#FFFFFF"])
export hesperia
## --- Other colormaps
# CubeHelix colormap by Stephen Cobeldick
cubehelix = parse.(Color, ["#000000","#020102","#030103","#050205","#070206","#080308","#0A030A","#0B040C","#0C050E","#0E050F","#0F0611","#100713","#110815","#120817","#130919","#140A1B","#150B1D","#160C1F","#160D21","#170E23","#180F25","#181027","#191129","#19122B","#19132D","#1A142F","#1A1631","#1A1733","#1A1835","#1B1A36","#1B1B38","#1B1C3A","#1B1E3B","#1B1F3D","#1A213E","#1A2240","#1A2441","#1A2543","#1A2744","#192845","#192A46","#192C47","#192D48","#182F49","#18314A","#18324B","#17344C","#17364C","#17374D","#16394D","#163B4E","#163D4E","#163F4E","#16404E","#15424E","#15444F","#15464E","#15474E","#15494E","#154B4E","#154D4E","#154E4D","#15504D","#15524C","#16534C","#16554B","#16574B","#17584A","#175A49","#185B48","#195D48","#195E47","#1A6046","#1B6145","#1C6344","#1D6443","#1E6542","#1F6741","#206840","#22693F","#236A3E","#256B3D","#266C3C","#286D3B","#2A6E3A","#2B6F39","#2D7038","#2F7137","#317236","#337335","#357435","#387434","#3A7533","#3C7632","#3F7632","#417731","#447731","#467830","#497830","#4C792F","#4E792F","#51792F","#54792F","#577A2F","#5A7A2F","#5D7A2F","#607A2F","#637A2F","#667A30","#697B30","#6C7B31","#6F7B31","#727B32","#757B33","#787B34","#7B7A35","#7E7A36","#817A37","#847A38","#877A3A","#8A7A3B","#8D7A3D","#907A3E","#937A40","#967A42","#997944","#9C7946","#9F7948","#A1794A","#A4794C","#A7794F","#A97951","#AC7954","#AE7956","#B17959","#B3795B","#B5795E","#B77961","#B97964","#BC7967","#BE796A","#BF796D","#C17A70","#C37A73","#C57A76","#C67A79","#C87B7C","#C97B7F","#CA7C83","#CC7C86","#CD7D89","#CE7D8C","#CF7E8F","#D07E93","#D17F96","#D18099","#D2809C","#D381A0","#D382A3","#D383A6","#D484A9","#D485AC","#D486AF","#D487B2","#D588B5","#D589B8","#D48ABB","#D48CBE","#D48DC1","#D48EC3","#D490C6","#D391C9","#D392CB","#D294CE","#D295D0","#D297D2","#D198D4","#D09AD7","#D09CD9","#CF9DDB","#CF9FDD","#CEA1DF","#CDA2E0","#CCA4E2","#CCA6E4","#CBA8E5","#CAA9E7","#CAABE8","#C9ADE9","#C8AFEA","#C8B1EC","#C7B2ED","#C6B4EE","#C6B6EE","#C5B8EF","#C5BAF0","#C4BCF1","#C4BDF1","#C3BFF2","#C3C1F2","#C2C3F2","#C2C5F3","#C2C6F3","#C2C8F3","#C1CAF3","#C1CCF3","#C1CDF3","#C1CFF3","#C1D1F3","#C2D2F3","#C2D4F3","#C2D6F3","#C2D7F3","#C3D9F3","#C3DAF2","#C4DCF2","#C4DDF2","#C5DFF2","#C6E0F1","#C6E1F1","#C7E3F1","#C8E4F0","#C9E5F0","#CAE7F0","#CBE8F0","#CCE9EF","#CDEAEF","#CFEBEF","#D0ECEF","#D1EDEF","#D3EEEF","#D4EFEF","#D6F0EF","#D7F1EF","#D9F2EF","#DBF3EF","#DCF3EF","#DEF4EF","#E0F5F0","#E2F6F0","#E3F6F0","#E5F7F1","#E7F8F1","#E9F8F2","#EBF9F3","#EDFAF4","#EFFAF4","#F0FBF5","#F2FBF6","#F4FCF7","#F6FCF8","#F8FDFA","#FAFDFB","#FBFEFC","#FDFEFE","#FFFFFF"])
export cubehelix
# Cubehelix 0.5, -1, 1
cubelinearl = parse.(Color, ["#000000","#020102","#030103","#050205","#070206","#080308","#0A030A","#0B040B","#0D050D","#0E050F","#100611","#110713","#120714","#140816","#150918","#16091A","#170A1C","#180B1E","#1A0C20","#1B0C22","#1C0D24","#1D0E25","#1D0F28","#1E102A","#1F112B","#20122D","#21132F","#211431","#221533","#231635","#231737","#241839","#24193B","#251A3D","#251B3F","#261C41","#261E42","#271F44","#272046","#272148","#272249","#28244B","#28254D","#28264F","#282850","#282952","#282A53","#282C55","#292D56","#292E58","#293059","#29315B","#29335C","#28345D","#28365F","#283760","#283961","#283A62","#283C63","#283D64","#283F65","#284166","#284267","#284468","#274569","#27476A","#27496B","#274A6B","#274C6C","#274E6D","#274F6D","#27516E","#27526E","#27546F","#27566F","#26576F","#265970","#265B70","#275C70","#275E70","#276071","#276171","#276371","#276471","#276671","#276871","#286971","#286B71","#286C71","#286E70","#296F70","#297170","#297370","#2A7470","#2A766F","#2B776F","#2B796F","#2C7A6E","#2D7C6E","#2D7D6D","#2E7E6D","#2F806D","#30816C","#30836C","#31846B","#32856B","#33876A","#34886A","#358969","#368A69","#378B68","#398D68","#3A8E67","#3B8F67","#3C9066","#3E9165","#3F9365","#419464","#429564","#439663","#459763","#469862","#489962","#4A9A62","#4B9B61","#4D9C61","#4F9C60","#519D60","#529E60","#549F5F","#56A05F","#58A05F","#5AA15F","#5CA25E","#5EA35E","#60A35E","#62A45E","#64A45E","#67A55E","#69A65E","#6BA75E","#6DA75E","#6FA85E","#71A85E","#74A95F","#76A95F","#78AA5F","#7AAA5F","#7DAB60","#7FAB60","#81AB61","#84AC61","#86AC62","#88AD62","#8BAD63","#8DAD63","#8FAE64","#92AE65","#94AF66","#96AF67","#99AF68","#9BB069","#9EB06A","#A0B06B","#A2B06C","#A4B16D","#A7B16E","#A9B16F","#ABB270","#ADB272","#B0B273","#B2B374","#B4B376","#B6B477","#B8B479","#BAB47A","#BDB57C","#BFB57D","#C1B57F","#C3B681","#C5B683","#C7B684","#C9B786","#CAB788","#CCB88A","#CEB88C","#D0B88E","#D2B990","#D3B991","#D5BA93","#D7BA95","#D8BB97","#DABB9A","#DCBC9C","#DDBC9E","#DFBDA0","#E0BDA2","#E1BEA4","#E3BFA6","#E4BFA8","#E5C0AA","#E7C1AC","#E8C1AE","#E9C2B1","#EAC3B3","#EBC4B5","#ECC4B7","#EDC5B9","#EEC6BB","#EFC7BD","#F0C8C0","#F1C9C2","#F1C9C4","#F2CAC6","#F3CBC8","#F4CCCA","#F4CDCC","#F5CECE","#F6CFD0","#F6D0D2","#F7D2D4","#F7D3D6","#F8D4D7","#F8D5D9","#F8D6DB","#F9D7DD","#F9D8DF","#FADAE0","#FADBE2","#FADCE4","#FADDE6","#FBDFE7","#FBE0E9","#FBE1EA","#FBE2EB","#FCE4ED","#FCE5EE","#FCE7EF","#FCE8F1","#FCE9F2","#FCEBF3","#FDECF4","#FDEDF6","#FDEFF7","#FDF0F8","#FDF2F9","#FDF3F9","#FDF5FA","#FEF6FB","#FEF8FC","#FEF9FD","#FEFBFD","#FEFCFE","#FFFDFE","#FFFFFF"])
export cubelinearl
# Cubehelix 1, -1, 1
cubeviridis = parse.(Color, ["#000000","#020101","#040101","#050202","#070302","#090303","#0B0404","#0D0405","#0F0506","#110606","#120607","#140708","#160709","#18080A","#1A080C","#1C090D","#1D090E","#1F0A0F","#210B11","#230B12","#240C13","#260C15","#280D16","#2A0E18","#2B0E1A","#2D0F1B","#2E0F1D","#30101E","#321020","#331122","#351124","#361225","#381327","#391329","#3A142B","#3C152D","#3D152F","#3E1631","#3F1733","#411735","#421837","#431939","#441A3B","#451B3D","#461B3F","#471C41","#481D43","#491E45","#4A1F48","#4A1F4A","#4B204C","#4C214E","#4D2250","#4D2352","#4E2454","#4F2556","#4F2658","#50275B","#50285D","#51295F","#512B61","#512C63","#522D65","#522E67","#522F69","#52306B","#52326D","#53336F","#533471","#533673","#533775","#533877","#533978","#533B7A","#533C7C","#533E7E","#533F7F","#524181","#524283","#524484","#524586","#524687","#524889","#514A8A","#514B8B","#514D8D","#514E8E","#505090","#505291","#505392","#4F5593","#4F5694","#4F5895","#4E5A96","#4E5C97","#4E5D98","#4E5F99","#4D619A","#4D629B","#4C649B","#4C669C","#4C679D","#4C699D","#4B6B9E","#4B6D9E","#4B6E9F","#4B709F","#4A729F","#4A74A0","#4A75A0","#4A77A0","#4A79A1","#4A7AA1","#4A7CA1","#4A7EA1","#4A80A1","#4A81A1","#4A83A1","#4A85A1","#4A86A1","#4A88A1","#4A89A1","#4A8BA0","#4A8DA0","#4B8EA0","#4B90A0","#4B929F","#4C939F","#4C959F","#4C969E","#4D989E","#4E999E","#4E9B9D","#4F9C9D","#4F9E9C","#509F9C","#51A09B","#52A29B","#52A39A","#53A59A","#54A699","#55A799","#56A998","#57AA98","#58AB97","#59AC97","#5AAE96","#5BAF96","#5DB095","#5EB195","#5FB294","#60B394","#62B593","#63B693","#65B792","#66B892","#68B991","#69BA91","#6BBA91","#6CBB90","#6EBC90","#70BD90","#71BE8F","#73BF8F","#75C08F","#77C18F","#79C18F","#7BC28E","#7CC38E","#7EC48E","#80C48E","#82C58E","#84C68E","#86C68E","#88C78E","#8AC88E","#8DC88F","#8FC98F","#91C98F","#93CA8F","#95CB90","#97CB90","#99CC90","#9BCC91","#9DCD91","#A0CD92","#A2CE93","#A4CE93","#A6CF94","#A8CF95","#AAD095","#ACD096","#AFD197","#B1D198","#B3D199","#B5D29A","#B7D29B","#B9D39C","#BBD39D","#BDD39E","#C0D49F","#C2D4A1","#C3D5A2","#C5D5A3","#C7D6A5","#C9D6A6","#CBD6A8","#CDD7A9","#CFD7AA","#D1D8AC","#D3D8AD","#D4D9AF","#D6DAB1","#D8DAB2","#DADBB4","#DBDBB6","#DDDCB8","#DFDCB9","#E0DDBB","#E2DDBD","#E3DEBF","#E4DEC1","#E6DFC2","#E7E0C4","#E9E0C6","#EAE1C8","#EBE2CA","#ECE2CC","#EEE3CE","#EFE4D0","#F0E5D2","#F1E6D4","#F2E6D6","#F3E7D8","#F4E8DA","#F5E9DC","#F6EADE","#F7EBDF","#F7ECE1","#F8EDE3","#F9EEE5","#F9EFE7","#FAF0E9","#FBF1EB","#FBF2ED","#FCF3EE","#FCF4F0","#FCF5F2","#FDF6F4","#FDF7F5","#FEF9F7","#FEFAF9","#FEFBFA","#FEFCFC","#FFFEFD","#FFFFFF"])
export cubeviridis
# Cubehelix 0.25, -0.67, 1.5
cubelacerta = parse.(Color, ["#000000","#020102","#030105","#050207","#060209","#07030B","#09030E","#0A0410","#0B0413","#0C0515","#0E0617","#0F061A","#10071C","#11081E","#120921","#130923","#140A25","#140B28","#150C2A","#160D2C","#170E2F","#170F31","#181033","#191036","#191138","#1A123A","#1A133C","#1B143F","#1B1541","#1C1743","#1C1845","#1C1947","#1D1A49","#1D1B4B","#1D1C4E","#1D1E50","#1E1F52","#1E2054","#1E2156","#1E2257","#1E245A","#1E255B","#1E275D","#1E285F","#1E2961","#1E2B62","#1E2C64","#1E2D66","#1E2F68","#1E3069","#1E326B","#1E336C","#1E356E","#1D366F","#1D3871","#1D3972","#1D3B74","#1D3C75","#1C3E76","#1C3F77","#1C4179","#1C437A","#1B447B","#1B467C","#1B477D","#1B497E","#1A4B7F","#1A4C80","#1A4E81","#1A5082","#195183","#195383","#195584","#185685","#185886","#185A86","#185C87","#185D87","#175F88","#176188","#176289","#176489","#176689","#17678A","#17698A","#176B8A","#166C8B","#166E8B","#16708B","#16718B","#16738B","#16758B","#16778B","#16788B","#177A8B","#177B8B","#177D8B","#177F8B","#17808B","#17828B","#18848B","#18858A","#18878A","#18888A","#198A8A","#198B89","#1A8D89","#1A8F89","#1B9088","#1B9288","#1C9387","#1C9587","#1D9687","#1E9786","#1E9986","#1F9B85","#209C85","#209D84","#219F84","#22A083","#23A183","#24A382","#25A482","#25A581","#27A781","#28A880","#29A97F","#2AAA7F","#2BAC7E","#2CAD7E","#2DAE7D","#2EAF7D","#30B07C","#31B17B","#32B27B","#34B47A","#35B57A","#37B679","#38B779","#3AB878","#3BB978","#3DBA77","#3EBB77","#40BC77","#42BD76","#43BE76","#45BE75","#47C075","#48C075","#4AC174","#4CC274","#4EC374","#50C473","#52C473","#53C573","#55C673","#57C772","#59C772","#5BC872","#5DC972","#5FC972","#61CA72","#63CB72","#66CB72","#68CC72","#6ACD72","#6CCD72","#6ECE72","#70CE73","#72CF73","#75CF73","#77D074","#79D074","#7BD174","#7ED175","#80D275","#82D276","#84D376","#87D377","#89D377","#8BD478","#8ED478","#90D579","#92D57A","#94D57B","#97D67B","#99D67C","#9BD77D","#9ED77E","#A0D77F","#A2D880","#A4D881","#A7D882","#A9D983","#ABD985","#ADDA86","#AFDA87","#B1DA88","#B4DB8A","#B6DB8B","#B8DB8C","#BADC8E","#BCDC8F","#BEDC91","#C0DD92","#C2DD94","#C4DD95","#C6DE97","#C8DE99","#CADF9B","#CCDF9C","#CEDF9E","#D0E0A0","#D2E0A2","#D4E1A3","#D5E1A5","#D7E1A7","#D9E2A9","#DBE2AB","#DCE3AD","#DEE3AF","#E0E4B1","#E1E4B4","#E3E5B6","#E4E5B8","#E6E6BA","#E7E7BC","#E9E7BE","#EAE8C1","#EBE8C3","#ECE9C5","#EEEAC7","#EFEAC9","#F0EBCC","#F1ECCE","#F2ECD0","#F3EDD3","#F4EED5","#F5EED7","#F6EFDA","#F7F0DC","#F8F1DE","#F9F2E1","#F9F3E3","#FAF4E6","#FBF5E8","#FBF6EA","#FCF6EC","#FCF7EF","#FDF8F1","#FDF9F4","#FEFAF6","#FEFCF8","#FEFDFA","#FFFEFD","#FFFFFF"])
export cubelacerta
# Cubehelix 0.75, -0.67, 1.5
cubelaguna = parse.(Color, ["#000000","#020001","#050102","#070103","#090104","#0B0206","#0E0207","#100208","#120309","#14030B","#16030C","#19040E","#1B040F","#1D0411","#1F0512","#210514","#230516","#250617","#270619","#29061B","#2B071D","#2D071F","#2E0821","#300822","#320824","#340927","#350928","#370A2A","#390A2C","#3A0B2E","#3C0B31","#3D0C33","#3F0C35","#400D37","#420E39","#430E3B","#440F3E","#460F40","#471042","#481144","#491147","#4A1249","#4B134B","#4D134E","#4E1450","#4F1552","#501654","#511657","#521759","#52185B","#53195E","#541A60","#551B62","#551C65","#561D67","#571E69","#571E6C","#581F6E","#582070","#592172","#592275","#5A2377","#5A2579","#5B267B","#5B277E","#5B2880","#5C2982","#5C2A84","#5C2B86","#5C2D89","#5C2E8B","#5C2F8D","#5C308F","#5D3291","#5D3393","#5D3495","#5D3697","#5D3799","#5C389B","#5C3A9D","#5C3B9F","#5C3CA1","#5C3EA2","#5C3FA4","#5C41A6","#5B42A8","#5B44A9","#5B45AB","#5B47AD","#5B48AE","#5A4AB0","#5A4BB1","#5A4DB3","#594FB4","#5950B6","#5852B7","#5853B9","#5855BA","#5757BB","#5758BC","#575ABE","#565CBF","#565DC0","#565FC1","#5561C2","#5562C3","#5464C4","#5466C5","#5468C6","#5369C7","#536BC8","#536DC9","#526EC9","#5270CA","#5272CB","#5174CB","#5176CC","#5177CD","#5079CD","#507BCE","#507CCE","#4F7ECF","#4F80CF","#4F82CF","#4F83D0","#4F85D0","#4E87D0","#4E89D1","#4E8AD1","#4E8CD1","#4E8ED1","#4E90D1","#4E91D1","#4E93D1","#4E95D1","#4E96D1","#4E98D1","#4E9AD1","#4E9CD1","#4E9DD1","#4E9FD1","#4EA0D1","#4EA2D1","#4FA4D0","#4FA5D0","#4FA7D0","#4FA9D0","#50AACF","#50ACCF","#50ADCF","#51AFCE","#51B0CE","#52B2CE","#52B3CD","#53B5CD","#53B6CD","#54B8CC","#55B9CC","#55BACB","#56BCCB","#57BDCA","#58BFCA","#58C0C9","#59C1C9","#5AC3C8","#5BC4C8","#5CC5C7","#5DC7C7","#5EC8C7","#5FC9C6","#60CAC6","#61CBC5","#63CDC5","#64CEC4","#65CFC4","#67D0C3","#68D1C3","#69D2C3","#6AD3C2","#6CD4C2","#6DD5C1","#6FD6C1","#70D7C1","#72D8C0","#73DAC0","#75DBC0","#77DBBF","#78DCBF","#7ADDBF","#7BDEBE","#7DDFBE","#7FE0BE","#81E1BE","#83E1BE","#84E2BE","#86E3BE","#88E4BE","#8AE4BE","#8CE5BE","#8EE6BE","#90E7BE","#92E7BE","#94E8BE","#96E8BE","#98E9BE","#9AEABF","#9CEABF","#9EEBBF","#A0EBC0","#A2ECC0","#A4ECC1","#A7EDC1","#A9EDC2","#ABEEC2","#ADEEC3","#AFEFC3","#B1EFC4","#B4F0C5","#B6F0C5","#B8F1C6","#BAF1C7","#BCF2C8","#BEF2C9","#C1F3CA","#C3F3CB","#C5F3CC","#C7F4CD","#C9F4CE","#CBF5CF","#CEF5D0","#D0F5D2","#D2F6D3","#D4F6D4","#D6F6D6","#D8F7D7","#DAF7D9","#DCF7DA","#DEF8DC","#E0F8DD","#E2F8DF","#E4F9E1","#E6F9E2","#E8FAE4","#EAFAE6","#ECFAE8","#EEFBEA","#F0FBEC","#F1FBEE","#F3FCF0","#F5FCF2","#F7FDF4","#F9FDF6","#FAFEF8","#FCFEFA","#FDFEFD","#FFFFFF"])
export cubelaguna
# Linear-Luminosity colormap by Matteo Niccoli
linearl = parse.(Color,
["#040404","#0A0308","#0D040B","#10050E","#120510","#150612","#160713","#180815","#1A0816","#1B0918","#1C0A19","#1E0B1A","#1F0C1B","#200C1C","#210D1D","#230E1F","#240E20","#250F20","#260F21","#271022","#281123","#291124","#2A1226","#2B1326","#2C1327","#2E1429","#2E142D","#2E1532","#2D1537","#2D153C","#2D1640","#2D1743","#2D1747","#2D184B","#2D184D","#2D1951","#2D1954","#2C1A57","#2C1B5A","#2D1B5C","#2D1C5F","#2C1D62","#2C1D64","#2C1E67","#2C1F6A","#2C1F6D","#2C206E","#2C2171","#2C2274","#2C2276","#2A2379","#282678","#262877","#242A78","#222C78","#212E78","#202F78","#1F3179","#1E327A","#1E337B","#1D347B","#1D357D","#1C377D","#1C387E","#1B397F","#1C3A80","#1C3B81","#1B3C81","#1B3D83","#1B3E84","#1B3F85","#1C4086","#1B4187","#1B4288","#1B4489","#1B458A","#194788","#164986","#154A85","#144C83","#114E81","#104F80","#0F517E","#0E527D","#0A547B","#0A557A","#095778","#085877","#075976","#065B75","#045C73","#045E72","#045F72","#036070","#01626F","#01636E","#00646D","#00656C","#00676B","#00686A","#006969","#006B68","#006C65","#006E64","#006F63","#007062","#007260","#00735F","#00745D","#00765C","#00775A","#007859","#007958","#007B56","#007C55","#007D53","#007F52","#008050","#00814F","#00834D","#00844B","#008549","#008648","#008846","#008944","#008A42","#008B41","#008D40","#008E3F","#008F3D","#00913C","#00923C","#00933A","#009539","#009638","#009737","#009935","#009A34","#009B33","#009D32","#009E30","#009F2F","#00A02D","#00A22C","#00A32A","#00A429","#00A527","#00A724","#00A822","#00A91F","#00AA17","#00A908","#09AA00","#14AB00","#1DAC00","#23AD00","#28AE00","#2DAF00","#30B000","#34B100","#37B200","#3BB300","#3DB400","#40B500","#42B600","#44B700","#47B800","#49B900","#4CBA00","#4EBB00","#4FBC00","#51BD00","#53BE00","#55BF00","#57C000","#5CC000","#63C100","#6AC100","#72C100","#77C200","#7DC200","#82C200","#87C300","#8CC300","#91C300","#95C400","#99C400","#9DC500","#A1C500","#A5C500","#A9C600","#ACC600","#B0C700","#B4C700","#B8C700","#BAC800","#BEC900","#C1C900","#C5C900","#C8CA00","#C9C918","#CBCA33","#CECA41","#CFCB4D","#D1CB57","#D4CB5F","#D5CC67","#D7CD6D","#DACD74","#DBCE79","#DDCF7F","#DFCF84","#E2CF8A","#E3D08F","#E5D193","#E7D197","#E8D29B","#EBD39F","#EDD3A4","#EED4A8","#F0D4AC","#F3D5AF","#F3D6B3","#F5D6B7","#F8D7BA","#F8D8BD","#F8DAC1","#F7DBC3","#F7DCC6","#F7DEC9","#F8DFCC","#F7E0CE","#F7E2D1","#F7E3D3","#F7E5D6","#F7E6D8","#F7E7DA","#F7E8DC","#F8EAE0","#F7EBE1","#F7ECE5","#F7EEE7","#F7EFE8","#F8F0EB","#F8F2ED","#F7F3EF","#F8F4F1","#F8F6F4","#F8F7F6","#F8F8F8","#F9F9F9","#FBFBFB","#FCFCFC","#FDFDFD","#FEFEFE","#FFFFFF"])
export linearl
# YlCn two-sided colormap for +/- data
ylcn = parse.(Color, ["#7AFEFF","#78FCFF","#76FAFF","#74F8FF","#72F6FF","#71F4FF","#6FF2FF","#6DF0FF","#6BEFFF","#69EDFF","#67EBFF","#65E9FF","#63E7FF","#61E5FF","#5FE3FF","#5DE1FF","#5BDFFF","#5ADDFF","#58DBFF","#56D9FF","#54D7FF","#52D5FF","#50D3FF","#4ED1FF","#4CCFFF","#4ACDFF","#48CBFF","#46C9FF","#44C7FF","#43C5FF","#41C3FF","#3FC1FF","#3DBFFF","#3BBDFF","#39BBFF","#37BAFF","#35B8FF","#33B6FF","#31B4FF","#2FB2FF","#2DB0FF","#2CAEFF","#2AACFF","#28AAFF","#26A8FF","#24A6FF","#22A4FF","#20A2FF","#1EA0FF","#1C9EFF","#1A9CFF","#189AFF","#1798FF","#1596FF","#1394FF","#1192FF","#0F90FF","#0D8EFF","#0B8CFF","#098AFF","#0788FF","#0586FF","#0384FF","#0183FF","#0181FF","#037FFD","#057CFB","#077AF9","#0978F8","#0B76F6","#0D74F4","#0F72F2","#1170F0","#136EEE","#156CEC","#176AEA","#1968E8","#1B66E6","#1D64E4","#1F62E2","#2160E0","#235EDE","#255CDC","#275ADA","#2958D9","#2C56D7","#2E54D5","#3052D3","#3250D1","#344ECF","#364CCD","#384ACB","#3A48C9","#3C46C7","#3E44C5","#4042C3","#4240C1","#443EBF","#463CBD","#483ABB","#4A38B9","#4C36B8","#4E34B6","#5032B4","#5230B2","#552EB0","#572CAE","#5929AC","#5B27AA","#5D25A8","#5F23A6","#6121A4","#631FA2","#651DA0","#671B9E","#69199C","#6B179A","#6D1598","#6F1397","#711195","#730F93","#750D91","#770B8F","#79098D","#7B078B","#7D0589","#800387","#820185","#840183","#860381","#88057F","#89077D","#8B097B","#8D0C79","#8F0E77","#911075","#931273","#951471","#97166F","#99186C","#9B1A6A","#9D1C68","#9F1E66","#A12164","#A32362","#A52560","#A7275E","#A9295C","#AB2B5A","#AD2D58","#AF2F56","#B13154","#B33352","#B53550","#B7384E","#B93A4B","#BB3C49","#BD3E47","#BE4045","#C04243","#C24441","#C4463F","#C6483D","#C84A3B","#CA4D39","#CC4F37","#CE5135","#D05333","#D25531","#D4572F","#D6592D","#D85B2A","#DA5D28","#DC5F26","#DE6224","#E06422","#E26620","#E4681E","#E66A1C","#E86C1A","#EA6E18","#EC7016","#EE7214","#F07412","#F27610","#F3790E","#F57B0C","#F77D0A","#F97F07","#FB8105","#FD8303","#FF8501","#FF8702","#FF8904","#FF8B06","#FF8D08","#FF8F0A","#FF910C","#FF930E","#FF9510","#FF9612","#FF9814","#FF9A16","#FF9C18","#FF9E1A","#FFA01C","#FFA21E","#FFA420","#FFA622","#FFA824","#FFAA26","#FFAC28","#FFAD2A","#FFAF2C","#FFB12E","#FFB330","#FFB532","#FFB734","#FFB936","#FFBB38","#FFBD3A","#FFBF3C","#FFC13E","#FFC240","#FFC442","#FFC644","#FFC846","#FFCA48","#FFCC4A","#FFCE4C","#FFD04E","#FFD250","#FFD452","#FFD654","#FFD856","#FFD958","#FFDB5A","#FFDD5C","#FFDF5E","#FFE160","#FFE362","#FFE564","#FFE766","#FFE968","#FFEB6A","#FFED6C","#FFEF6E","#FFF070","#FFF272","#FFF474","#FFF676","#FFF878","#FFFA7A","#FFFC7C","#FFFE7E","#FFFF80"])
export ylcn
# Fire colormap
fire = RGB{N0f8}.(
vcat(fill(1,120),range(0.992,0.05,length=136)), # r
vcat(range(0.9,0,length=120),fill(0,136)), # g
vcat(range(0.9,0,length=120),fill(0,136)) #b
)
export fire
# Water colormap
water = RGB{N0f8}.(
vcat(range(0.9,0,length=136),fill(0,120)), # r
vcat(range(0.9,0,length=136),fill(0,120)), # g
vcat(fill(1,136),range(0.992,0.05,length=120)) #b
)
export water
# Distinguishable colors for plot lines
lines = parse.(Color, ["#0072BD","#D95319","#EDB120","#7E2F8E","#77AC30","#4DBEEE","#A2142F", "#23366d", "#e73b20", "#741d2d", "#0b6402", "#102ca8", "#545257", "#40211f", "#bf7336", "#afc037", ])
export lines
# Various one-color ramps
color_x = [1.07; range(1.2,2,length=130); range(2+1/120,2.9,length=125)]
reds = linterp1(1:3, parse.(Color, ["#FFFFFF", "#FF0000", "#000000",]), color_x)
oranges = linterp1(1:3, parse.(Color, ["#FFFFFF", "#FF8f00", "#000000",]), color_x)
greens = linterp1(1:3, parse.(Color, ["#FFFFFF", "#00AA66", "#000000",]), color_x)
cyans = linterp1(1:3, parse.(Color, ["#FFFFFF", "#00AAFF", "#000000",]), color_x)
blues = linterp1(1:3, parse.(Color, ["#FFFFFF", "#0000FF", "#000000",]), color_x)
violets = linterp1(1:3, parse.(Color, ["#FFFFFF", "#8000F0", "#000000",]), color_x)
purples = linterp1(1:3, parse.(Color, ["#FFFFFF", "#800080", "#000000",]), color_x)
magentas = linterp1(1:3, parse.(Color, ["#FFFFFF", "#F00080", "#000000",]), color_x)
grays = linterp1(1:3, parse.(Color, ["#FFFFFF", "#333333", "#000000",]), color_x)
export reds, oranges, greens, cyans, blues, violets, purples, magentas, grays
# Consistent mineral color dictionary
mineralcolors=Dict{String,Color}()
mineralcolors["olivine"] = parse(Color, "#5b9d00")
mineralcolors["forsterite"] = parse(Color, "#5bad00")
mineralcolors["fayalite"] = parse(Color, "#6b8d00")
mineralcolors["garnet"] = parse(Color, "#741d2d")
mineralcolors["pyrope"] = parse(Color, "#9a1d36")
mineralcolors["almandine"] = parse(Color, "#ae1921")
mineralcolors["grossular"] = parse(Color, "#953d31")
mineralcolors["spessartine"] = parse(Color, "#ef5702")
mineralcolors["andradite"] = parse(Color, "#393125")
mineralcolors["epidote"] = parse(Color, "#afc037")
mineralcolors["zoisite"] = parse(Color, "#93871d")
mineralcolors["clinozoisite"] = parse(Color, "#93871d")
mineralcolors["pyroxene"] = parse(Color, "#506B20")
mineralcolors["orthopyroxene"] = parse(Color, "#7e5933")
mineralcolors["enstatite"] = parse(Color, "#37350e")
mineralcolors["ferrosilite"] = parse(Color, "#242d2c")
mineralcolors["clinopyroxene"] = parse(Color, "#227d0e")
mineralcolors["diopside"] = parse(Color, "#227d0e")
mineralcolors["chrome diopside"] = parse(Color, "#0b6402")
mineralcolors["hedenbergite"] = parse(Color, "#58634b")
mineralcolors["acmite"] = parse(Color, "#979141")
mineralcolors["jadeite"] = parse(Color, "#008621")
mineralcolors["omphacite"] = parse(Color, "#478233")
mineralcolors["rhodonite"] = parse(Color, "#c21a0d")
mineralcolors["wollastonite"] = parse(Color, "#c1b0a2")
mineralcolors["amphibole"] = parse(Color, "#4F6518")
mineralcolors["clinoamphibole"] = parse(Color, "#4F6500")
mineralcolors["orthoamphibole"] = parse(Color, "#4F6535")
mineralcolors["riebeckite"] = parse(Color, "#215d76")
mineralcolors["glaucophane"] = parse(Color, "#23366d")
mineralcolors["tremolite"] = parse(Color, "#588010")
mineralcolors["pargasite"] = parse(Color, "#078014")
mineralcolors["grunerite"] = parse(Color, "#917b58")
mineralcolors["anthophyllite"] = parse(Color, "#a9bdcc")
mineralcolors["muscovite"] = parse(Color, "#b294a9")
mineralcolors["white mica"] = parse(Color, "#b294a9")
mineralcolors["biotite"] = parse(Color, "#4f3114")
mineralcolors["annite"] = parse(Color, "#4f3114")
mineralcolors["phlogopite"] = parse(Color, "#684a36")
mineralcolors["pyrophyllite"] = parse(Color, "#dfbf7d")
mineralcolors["chlorite"] = parse(Color, "#5d8d71")
mineralcolors["talc"] = parse(Color, "#92a1a1")
mineralcolors["feldspar"] = parse(Color, "#70b0c0")
mineralcolors["ternary feldspar"] = parse(Color, "#70b0c0")
mineralcolors["microcline"] = parse(Color, "#00afa9")
mineralcolors["orthoclase"] = parse(Color, "#ef9e90")
mineralcolors["k-feldspar"] = parse(Color, "#ef9e90")
mineralcolors["albite"] = parse(Color, "#70b0c0")
mineralcolors["anorthite"] = parse(Color, "#7f9fad")
mineralcolors["nepheline"] = parse(Color, "#b1bac9")
mineralcolors["leucite"] = parse(Color, "#e8c383")
mineralcolors["sodalite"] = parse(Color, "#202f94")
mineralcolors["quartz"] = parse(Color, "#803c92")
mineralcolors["chloritoid"] = parse(Color, "#769a94")
mineralcolors["cordierite"] = parse(Color, "#435477")
mineralcolors["sapphirine"] = parse(Color, "#27374f")
mineralcolors["staurolite"] = parse(Color, "#6a472e")
mineralcolors["kyanite"] = parse(Color, "#4b7bc2")
mineralcolors["andalusite"] = parse(Color, "#dc9992")
mineralcolors["sillimanite"] = parse(Color, "#d1d4d4")
mineralcolors["apatite"] = parse(Color, "#277e85")
mineralcolors["monazite"] = parse(Color, "#912b1d")
mineralcolors["xenotime"] = parse(Color, "#73240d")
mineralcolors["allanite"] = parse(Color, "#503c6c")
mineralcolors["sphene"] = parse(Color, "#9CD356")
mineralcolors["zircon"] = parse(Color, "#0079a5")
mineralcolors["spinel"] = parse(Color, "#ad2b4c")
mineralcolors["ulvospinel"] = parse(Color, "#545257")
mineralcolors["hercynite"] = parse(Color, "#454444")
mineralcolors["magnetite"] = parse(Color, "#1d2523")
mineralcolors["ilmenite"] = parse(Color, "#282a27")
mineralcolors["hematite"] = parse(Color, "#40211f")
mineralcolors["rutile"] = parse(Color, "#360216")
mineralcolors["corundum"] = parse(Color, "#8c3464")
mineralcolors["goethite"] = parse(Color, "#796367")
mineralcolors["brucite"] = parse(Color, "#cdc646")
mineralcolors["calcite"] = parse(Color, "#f6b472")
mineralcolors["dolomite"] = parse(Color, "#eccdc3")
mineralcolors["siderite"] = parse(Color, "#6c462d")
mineralcolors["rhodochrosite"] = parse(Color, "#cc0153")
mineralcolors["malachite"] = parse(Color, "#068671")
mineralcolors["azurite"] = parse(Color, "#102ca8")
mineralcolors["pyrite"] = parse(Color, "#cab360")
mineralcolors["proustite"] = parse(Color, "#94120a")
mineralcolors["pyrargyrite"] = parse(Color, "#6f0921")
mineralcolors["crocoite"] = parse(Color, "#e73b20")
mineralcolors["orpiment"] = parse(Color, "#bf7336")
mineralcolors["fluid"] = parse(Color, "#4DBEEE")
mineralcolors["melt"] = parse(Color, "#A2142F")
export mineralcolors
w = RGB{N0f8}(1.0,1.0,1.0)
k = RGB{N0f8}(0,0,0)
color_x = [1.11; range(1.2,2,length=140); range(2+1/120,2.8,length=115)]
almandines = linterp1(1:3, [w, mineralcolors["almandine"], k], color_x)
spessartines = linterp1(1:3, [w, mineralcolors["spessartine"], k], color_x)
pargasites = linterp1(1:3, [w, mineralcolors["pargasite"], k], color_x)
malachites = linterp1(1:3, [w, mineralcolors["malachite"], k], color_x)
azurites = linterp1(1:3, [w, mineralcolors["azurite"], k], color_x)
quartzes = linterp1(1:3, [w, mineralcolors["quartz"], k], color_x)
struct AllColormaps
hesperia
magma
inferno
plasma
viridis
cubeviridis
lacerta
cubelacerta
linearl
cubelinearl
laguna
cubelaguna
cividis
ylcn
water
fire
reds
oranges
greens
cyans
blues
violets
purples
magentas
grays
cubehelix
lines
end
colormaps = AllColormaps(
hesperia,
magma,
inferno,
plasma,
viridis,
cubeviridis,
lacerta,
cubelacerta,
linearl,
cubelinearl,
laguna,
cubelaguna,
cividis,
ylcn,
water,
fire,
reds,
oranges,
greens,
cyans,
blues,
violets,
purples,
magentas,
grays,
cubehelix,
lines
)
export colormaps
| StatGeochemBase | https://github.com/brenhinkeller/StatGeochemBase.jl.git |
|
[
"MIT"
] | 0.6.6 | 57663205a3b08c4e5fdd398ed1f08cc5a1c318e2 | code | 291 | ## --- Custom display functions
# Custom pretty-printing for colormaps
function display(x::AllColormaps)
println("AllColormaps:")
for name in fieldnames(AllColormaps)
println(" $name")
display(getfield(x, name))
end
end
## ---
| StatGeochemBase | https://github.com/brenhinkeller/StatGeochemBase.jl.git |
|
[
"MIT"
] | 0.6.6 | 57663205a3b08c4e5fdd398ed1f08cc5a1c318e2 | code | 2813 | ## --- Map colormaps to images
"""
```julia
imsc(A::AbstractArray, colormap::AbstractVector=viridis, cmin=nanminimum(A), cmax=nanmaximum(A))
```
Convert a matrix `A` to an image (an array of Colors.jl colors) using the
specified `colormap` (default `viridis`), optionally scaled between `cmin`
and `cmax`.
### Examples
```julia
julia> A = rand(3,3)
3×3 Matrix{Float64}:
0.39147 0.553489 0.351628
0.331786 0.343836 0.824674
0.639233 0.558113 0.965627
julia> img = imsc(A) # N.B. will display as image if `using ImageInTerminal`
3×3 Array{RGB{N0f8},2} with eltype ColorTypes.RGB{FixedPointNumbers.N0f8}:
RGB{N0f8}(0.282,0.137,0.455) … RGB{N0f8}(0.278,0.051,0.376)
RGB{N0f8}(0.267,0.004,0.329) RGB{N0f8}(0.431,0.808,0.345)
RGB{N0f8}(0.133,0.553,0.553) RGB{N0f8}(0.992,0.906,0.145)
julia> using Images; save("img.png", img) # Save to file as PNG
julia> using Plots; plot(0:3, 0:3, img) # Plot with specified x and y axes
```
"""
function imsc(A::AbstractArray, colormap::AbstractVector=viridis, cmin=nanminimum(A), cmax=nanmaximum(A))
Nc = length(colormap)
crange = cmax - cmin
return A .|> x -> colormap[isnan(x) ? 1 : ceil(UInt, min(max(Nc*(x-cmin)/crange, 1), Nc))]
end
export imsc
"""
```julia
imsci(A::AbstractArray, colormap::AbstractVector=viridis, cmin=nanminimum(A), cmax=nanmaximum(A))
```
Convert a matrix `A` to an indirect array image (an IndirectArray of Colors.jl
colors) using the specified `colormap` (default `viridis`), optionally scaled
between `cmin` and `cmax`.
As `imsc`, but returns an IndirectArray; slightly more space efficient for
small colormaps, but with computational cost.
### Examples
```julia
julia> A = rand(3,3)
3×3 Matrix{Float64}:
0.39147 0.553489 0.351628
0.331786 0.343836 0.824674
0.639233 0.558113 0.965627
julia> img = imsci(A)
3×3 IndirectArrays.IndirectArray{RGB{N0f8}, 2, UInt64, Matrix{UInt64}, Vector{RGB{N0f8}}}:
RGB{N0f8}(0.282,0.137,0.455) … RGB{N0f8}(0.278,0.051,0.376)
RGB{N0f8}(0.267,0.004,0.329) RGB{N0f8}(0.431,0.808,0.345)
RGB{N0f8}(0.133,0.553,0.553) RGB{N0f8}(0.992,0.906,0.145)
julia> using Images; save("img.png", img) # Save to file as PNG
julia> using Plots; plot(0:3, 0:3, img) # Plot with specified x and y axes
```
"""
function imsci(A::AbstractArray,colormap::AbstractArray=viridis,cmin::Number=nanminimum(A),cmax::Number=nanmaximum(A))
Nc = length(colormap)
crange = cmax - cmin
return IndirectArray(A .|> x -> isnan(x) ? 1 : ceil(UInt, min(max(Nc*(x-cmin)/crange, 1), Nc)), colormap)
end
export imsci
## -- End of File
| StatGeochemBase | https://github.com/brenhinkeller/StatGeochemBase.jl.git |
|
[
"MIT"
] | 0.6.6 | 57663205a3b08c4e5fdd398ed1f08cc5a1c318e2 | code | 38201 | ## --- Parse a delimited string
"""
```julia
delim_string_parse!(result, str, delim, [T];
\toffset::Integer=0,
\tmerge::Bool=false,
\tundefval=NaN)
```
Parse a delimited string `str` with delimiter `delim` into values of type `T`
and return the answers in a pre-allocated `result` array provided as input.
If `T` is not specified explicitly, the `eltype` of the `result` array will
be used by default.
Optional keyword arguments and defaults:
offset::Integer=0
Start writing the parsed results into `result` at index `1+offset`
merge::Bool=false
Merge repeated delimiters?
undefval=NaN
A value to subsitute for any value that cannot be `parse`d to type `T`.
See also `delim_string_parse` for a non-in-place version that will automatically
allocate a result array.
### Examples
```julia
julia> A = zeros(100);
julia> n = delim_string_parse!(A, "1,2,3,4,5", ',', Float64)
5
julia> A[1:n]
5-element Vector{Float64}:
1.0
2.0
3.0
4.0
5.0
```
"""
function delim_string_parse!(result::Array, str::AbstractString, delim::Char, T::Type=eltype(result); offset::Integer=0, merge::Bool=false, undefval=NaN)
# Ignore initial delimiter
last_delim_pos = 0
if ~isempty(str) && first(str) == delim
last_delim_pos = 1
end
# Cycle through string parsing text betweeen delims
delim_pos = 0
n = offset
if merge
for i ∈ eachindex(str)
if str[i] == delim
delim_pos = i
if delim_pos > last_delim_pos+1
n += 1
parsed = nothing
if delim_pos > last_delim_pos+1
parsed = tryparse(T, str[(last_delim_pos+1):(delim_pos-1)])
end
result[n] = isnothing(parsed) ? T(undefval) : parsed
end
last_delim_pos = delim_pos
end
end
else
for i ∈ eachindex(str)
if str[i] == delim
delim_pos = i
if delim_pos > last_delim_pos
n += 1
parsed = nothing
if delim_pos > last_delim_pos+1
parsed = tryparse(T, str[(last_delim_pos+1):(delim_pos-1)])
end
result[n] = isnothing(parsed) ? T(undefval) : parsed
last_delim_pos = delim_pos
end
end
end
end
# Check for final value after last delim
if length(str) > last_delim_pos
n += 1
parsed = tryparse(T, str[(last_delim_pos+1):length(str)])
result[n] = isnothing(parsed) ? T(undefval) : parsed
end
# Return the number of result values
return n-offset
end
export delim_string_parse!
"""
```julia
delim_string_parse(str, delim, T;
\tmerge::Bool=false,
\tundefval=NaN)
```
Parse a delimited string `str` with delimiter `delim` into values of type `T`
and return the answers as an array with eltype `T`
Optional keyword arguments and defaults:
merge::Bool=false
Merge repeated delimiters?
undefval=NaN
A value to subsitute for any value that cannot be `parse`d to type `T`.
See also `delim_string_parse!` for an in-place version.
### Examples
```julia
julia> delim_string_parse("1,2,3,4,5", ',', Float64)
5-element Vector{Float64}:
1.0
2.0
3.0
4.0
5.0
```
"""
function delim_string_parse(str::AbstractString, delim::Char, T::Type=Float64; merge::Bool=false, undefval=NaN)
# Allocate an array to hold our parsed results
result = Array{T}(undef,ceil(Int,length(str)/2))
# Parse the string
n = delim_string_parse!(result, str, delim, T; merge=merge, undefval=undefval)
# Return the result values
return result[1:n]
end
export delim_string_parse
"""
```julia
delim_string_function(f, str, delim, T;
\tmerge::Bool=false,
```
Parse a delimited string `str` with delimiter `delim` into substrings that will
then be operated upon by function `f`. The results of `f` will be returned
in an array with eltype `T`.
### Examples
```julia
julia> delim_string_function(x -> delim_string_parse(x, ',', Int32, undefval=0), "1,2,3,4\n5,6,7,8\n9,10,11,12\n13,14,15,16", '\n', Array{Int32,1})
4-element Vector{Vector{Int32}}:
[1, 2, 3, 4]
[5, 6, 7, 8]
[9, 10, 11, 12]
[13, 14, 15, 16]
```
"""
function delim_string_function(f::Function, str::AbstractString, delim::Char, T::Type; merge::Bool=false)
# Max number of delimted values
ndelims = 2
for i ∈ eachindex(str)
if str[i] == delim
ndelims += 1
end
end
# Allocate output array
result = Array{T}(undef,ceil(Int,ndelims))
# Ignore initial delimiter
last_delim_pos = 0
if first(str) == delim
last_delim_pos = 1
end
# Cycle through string parsing text betweeen delims
delim_pos = 0
n = 0
if merge
for i ∈ eachindex(str)
if str[i] == delim
delim_pos = i
if delim_pos > last_delim_pos+1
n += 1
if delim_pos > last_delim_pos+1
result[n] = f(str[(last_delim_pos+1):(delim_pos-1)])
end
end
last_delim_pos = delim_pos
end
end
else
for i ∈ eachindex(str)
if str[i] == delim
delim_pos = i
if delim_pos > last_delim_pos
n += 1
if delim_pos > last_delim_pos+1
result[n] = f(str[(last_delim_pos+1):(delim_pos-1)])
end
last_delim_pos = delim_pos
end
end
end
end
# Check for final value after last delim
if length(str)>last_delim_pos
n += 1
result[n] = f(str[(last_delim_pos+1):length(str)])
end
# Return the result values
return result[1:n]
end
export delim_string_function
"""
```julia
parsedlm(str::AbstractString, delimiter::Char, T::Type=Float64; rowdelimiter::Char='\\n')
```
Parse a string delimited by both row and column into a single (2-D) matrix. Default column delimiter is newline.
Similar to `readdlm`, but operating on a string instead of a file.
### Examples
```julia
julia> parsedlm("1,2,3\n4,5,6\n7,8,9\n", ',', Float64)
3×3 Matrix{Float64}:
1.0 2.0 3.0
4.0 5.0 6.0
7.0 8.0 9.0
julia> parsedlm("1,2,3,4\n5,6,7,8\n9,10,11,12\n13,14,15,16", ',', Int64)
4×4 Matrix{Int64}:
1 2 3 4
5 6 7 8
9 10 11 12
13 14 15 16
```
"""
function parsedlm(str::AbstractString, delimiter::Char, ::Type{T}=Float64; rowdelimiter::Char='\n') where {T}
# Count rows, and find maximum number of delimiters per row
numcolumns = maxcolumns = maxrows = 0
cₗ = delimiter
for c in str
(c == delimiter) && (numcolumns += 1)
if c == rowdelimiter
maxrows += 1
numcolumns += 1
# See if we have a new maximum, and reset the counters
(numcolumns > maxcolumns) && (maxcolumns = numcolumns)
numcolumns=0
end
cₗ = c
end
# If the last line isn't blank, add one more to the row counter
(cₗ != rowdelimiter) && (maxrows += 1)
# Allocate space for the imported array and fill with emptyval
parsedmatrix = emptys(T, maxrows, maxcolumns)
maxchars = length(str)
kₗ = kₙ = firstindex(str) # Last and next delimiter position
@inbounds for i = 1:maxrows
for j = 1:maxcolumns
c = str[kₙ]
while (kₙ < maxchars) && (c !== delimiter) && (c !== rowdelimiter)
kₙ = nextind(str, kₙ)
c = str[kₙ]
end
if kₙ>kₗ
# Parse the string
k = (c===delimiter || c===rowdelimiter) ? prevind(str,kₙ) : kₙ
parsed = tryparse(T, str[kₗ:k])
isnothing(parsed) || (parsedmatrix[i,j] = parsed)
end
# If we're at the end of the string, move on
(kₙ == maxchars) && break
# Step over the delimiter
kₗ = kₙ = nextind(str, kₙ)
# If we've hit a row delimiter, move to next row
(str[kₙ] == rowdelimiter) && break
end
end
return parsedmatrix
end
export parsedlm
## --- Classifying imported datasets
"""
```julia
isnumeric(x)
```
Return `true` if `x` can be parsed as a number, else `false`
### Examples
```julia
julia> StatGeochem.isnumeric(1)
true
julia> StatGeochem.isnumeric("1")
true
julia> StatGeochem.isnumeric("0.5e9")
true
julia> StatGeochem.isnumeric("foo")
false
```
"""
isnumeric(x) = false
isnumeric(x::Number) = true
isnumeric(x::AbstractString) = tryparse(Float64,x) !== nothing
"""
```julia
nonnumeric(x)
```
Return true for if `x` is not missing but cannot be parsed as a number
### Examples
```julia
julia> StatGeochem.nonnumeric(1)
false
julia> StatGeochem.nonnumeric("1")
false
julia> StatGeochem.nonnumeric("0.5e9")
false
julia> StatGeochem.nonnumeric("foo")
true
```
"""
nonnumeric(x) = true
nonnumeric(x::Number) = false
nonnumeric(x::Missing) = false
nonnumeric(x::AbstractString) = (tryparse(Float64,x) === nothing) && (x != "")
## --- Transforming imported datasets
"""
```julia
floatify(x, T::Type=Float64)
```
Convert `x` to a floating-point number (default `Float64`) by any means necessary
### Examples
```julia
julia> StatGeochem.floatify(5)
5.0
julia> StatGeochem.floatify("5")
5.0
julia> StatGeochem.floatify("0x05")
5.0
julia> StatGeochem.floatify("0.5e1")
5.0
```
"""
floatify(x, T::Type{<:AbstractFloat}=Float64) = T(NaN)
floatify(x::Number, T::Type{<:AbstractFloat}=Float64) = T(x)
floatify(x::AbstractString, T::Type{<:AbstractFloat}=Float64) = (n = tryparse(T,x)) !== nothing ? n : T(NaN)
columnformat(x, standardize::Bool=true, floattype=Float64) = _columnformat(x, Val(standardize), floattype)
function _columnformat(x, ::Val{true}, floattype)
if sum(isnumeric.(x)) >= sum(nonnumeric.(x))
floatify.(x, floattype)
else
string.(x)
end
end
function _columnformat(x, ::Val{false}, floattype)
if all(xi -> isa(xi, AbstractString), x)
string.(x)
elseif all(xi -> isa(xi, AbstractFloat), x)
float.(x)
elseif all(xi -> isa(xi, Integer), x)
Integer.(x)
else
x
end
end
"""
```julia
sanitizevarname(s::AbstractString)
```
Modify an input string `s` to transform it into an acceptable variable name.
### Examples
```julia
julia> StatGeochem.sanitizevarname("foo")
"foo"
julia> StatGeochem.sanitizevarname("523foo")
"_523foo"
julia> StatGeochem.sanitizevarname("Length (μm)")
"Length_μm"
```
"""
function sanitizevarname(s::AbstractString)
s = replace(s, r"[\[\](){}]" => "") # Remove parentheses entirely
s = replace(s, r"^([0-9])" => s"_\1") # Can't begin with a number
s = replace(s, r"([\0-\x1F -/:-@\[-`{-~])" => s"_") # Everything else becomes an underscore
return s
end
sanitizevarname(s::Symbol) = s
symboltuple(x::NTuple{N, Symbol}) where {N} = x
symboltuple(x::NTuple{N}) where {N} = ntuple(i->Symbol(x[i]), N)
symboltuple(x) = ((Symbol(s) for s in x)...,)
stringarray(x::Vector{String}) = x
stringarray(x::NTuple{N, String}) where {N} = [s for s in x]
stringarray(x) = [String(s) for s in x]
"""
```julia
TupleDataset(d::Dict, elements=keys(d))
```
Convert a dict-based dataset to a tuple-based dataset.
See also `DictDataset`
### Examples
```julia
julia> d
Dict{String, Vector{Float64}} with 2 entries:
"Yb" => [0.823733, 0.0531003, 0.47996, 0.560998, 0.001816, 0.455064, 0.694017, 0.737816, 0.0755015, 0.46098 …
"La" => [0.440947, 0.937551, 0.464318, 0.694184, 0.253974, 0.521292, 0.857979, 0.0545946, 0.716639, 0.597616…
julia> t = TupleDataset(d)
NamedTuple with 2 elements:
Yb = Vector{Float64}(100,) [0.8237334494155881 ... 0.012863893327602627]
La = Vector{Float64}(100,) [0.44094669199955616 ... 0.5371416189174069]
```
"""
function TupleDataset(d::Dict, elements=haskey(d,"elements") ? d["elements"] : keys(d))
symbols = symboltuple(sanitizevarname.(elements))
return NamedTuple{symbols}(d[e] for e in elements)
end
export TupleDataset
"""
```julia
DictDataset(t::NamedTuple, elements=keys(t))
```
Convert a tuple-based dataset to a dict-based dataset.
See also `TupleDataset`
### Examples
```julia
julia> t
NamedTuple with 2 elements:
La = Vector{Float64}(100,) [0.6809734028326375 ... 0.30665937715972313]
Yb = Vector{Float64}(100,) [0.8851029525168138 ... 0.866246147690925]
julia> d = DictDataset(t)
Dict{String, Vector{Float64}} with 2 entries:
"Yb" => [0.885103, 0.284384, 0.351527, 0.643542, 0.631274, 0.653966, 0.968414, 0.00204819, 0.0655173, 0.5343…
"La" => [0.680973, 0.35098, 0.0198742, 0.139642, 0.0703337, 0.0328973, 0.639431, 0.245205, 0.424142, 0.48889…
```
"""
function DictDataset(t::NamedTuple, elements=keys(t))
d = Dict(String(e) => t[Symbol(e)] for e in elements)
end
export DictDataset
"""
```julia
elementify(data::AbstractArray, [elements=data[1,:]];
\timportas=:Dict,
\tstandardize::Bool=true,
\tfloattype=Float64,
\tskipstart::Integer=1,
\tskipnameless::Bool=true
)
```
Convert a flat array `data` into a Named Tuple (`importas=:Tuple`) or
Dictionary (`importas=:Dict`) with each column as a variable.
Tuples are substantially more efficient, so should be favored where possible.
### Examples
```julia
julia> A = ["La" "Ce" "Pr"; 1.5 1.1 1.0; 3.7 2.9 2.5]
3×3 Matrix{Any}:
"La" "Ce" "Pr"
1.5 1.1 1.0
3.7 2.9 2.5
julia> elementify(A, importas=:Tuple)
NamedTuple with 3 elements:
La = Vector{Float64}(2,) [1.5 ... 3.7]
Ce = Vector{Float64}(2,) [1.1 ... 2.9]
Pr = Vector{Float64}(2,) [1.0 ... 2.5]
julia> elementify(A, importas=:Dict)
Dict{String, Union{Vector{Float64}, Vector{String}}} with 4 entries:
"Ce" => [1.1, 2.9]
"Pr" => [1.0, 2.5]
"elements" => ["La", "Ce", "Pr"]
"La" => [1.5, 3.7]
```
"""
function elementify(data::AbstractArray;
importas=:Tuple,
skipstart::Integer=1,
standardize::Bool=true,
floattype=Float64,
skipnameless::Bool=true,
sumduplicates::Bool=false
)
elementify(data, data[firstindex(data),:];
importas=importas,
skipstart=skipstart,
standardize=standardize,
floattype=floattype,
skipnameless=skipnameless,
sumduplicates=sumduplicates)
end
function elementify(data::AbstractArray, elements;
importas=:Tuple,
skipstart::Integer=0,
standardize::Bool=true,
floattype=Float64,
skipnameless::Bool=true,
sumduplicates::Bool=false
)
if importas === :Dict || importas === :dict
# Output as dictionary
if standardize
# Constrain types somewhat for a modicum of type-stability
if 1+skipstart == size(data,1)
result = Dict{String,Union{Vector{String}, String, Float64}}()
else
result = Dict{String,Union{Vector{String}, Vector{Float64}}}()
end
else
result = Dict{String, Any}()
end
# Process elements array
elements = stringarray(elements)
if skipnameless
elements = filter(!isempty, elements)
end
result["elements"] = isa(elements, Vector) ? elements : collect(elements)
# Parse the input array, minus empty-named columns
i₀ = firstindex(data) + skipstart
for j ∈ eachindex(elements)
if skipstart == size(data,1)-1
column = data[end,j]
else
column = data[i₀:end,j]
end
if !haskey(result, elements[j])
result[elements[j]] = columnformat(column, standardize, floattype)
else
lastcol = result[elements[j]]
treat_as_numbers = ((sum(isnumeric.(column)) >= sum(nonnumeric.(column))) || (sum(isnumeric.(lastcol)) >= sum(nonnumeric.(lastcol))))
if treat_as_numbers
if sumduplicates
@info "Duplicate key $(elements[j]) found, summing"
result[elements[j]] = nanadd(floatify.(lastcol, floattype), floatify.(column, floattype))
else
@info "Duplicate key $(elements[j]) found, averaging"
result[elements[j]] = nanadd(floatify.(lastcol, floattype), floatify.(column, floattype)) ./ 2.0
end
else
n = 1
while haskey(result, elements[j]*string(n))
n+=1
end
@info "Duplicate key $(elements[j]) found, replaced with $(elements[j]*string(n))"
elements[j] = elements[j]*string(n)
result[elements[j]] = columnformat(column, standardize, floattype)
end
end
end
# Return only unique elements, since dictionary keys must be unique
result["elements"] = unique(elements)
return result
elseif importas==:Tuple || importas==:tuple || importas==:NamedTuple
# Import as NamedTuple (more efficient future default)
t = Bool[(skipnameless && e !== "") for e in elements]
elements = sanitizevarname.(elements[t])
i₀ = firstindex(data) + skipstart
values = (columnformat(data[i₀:end, j], standardize, floattype) for j in findall(vec(t)))
return NamedTuple{symboltuple(elements)}(values)
end
end
export elementify
"""
```julia
unelementify(dataset, elements;
\tfloatout::Bool=false,
\tfloattype=Float64,
\tfindnumeric::Bool=false,
\tskipnan::Bool=false,
\trows=:
)
```
Convert a Dict or Named Tuple of vectors into a 2-D array with variables as columns
### Examples
```julia
julia> D
NamedTuple with 3 elements:
La = Vector{Float64}(2,) [1.5 ... 3.7]
Ce = Vector{Float64}(2,) [1.1 ... 2.9]
Pr = Vector{Float64}(2,) [1.0 ... 2.5]
julia> unelementify(D)
3×3 Matrix{Any}:
"La" "Ce" "Pr"
1.5 1.1 1.0
3.7 2.9 2.5
```
"""
function unelementify(dataset::Dict, elements=sort(collect(keys(dataset)));
floatout::Bool=false,
floattype=Float64,
findnumeric::Bool=false,
skipnan::Bool=false,
rows=:
)
# Find the elements in the input dict if they exist and aren't otherwise specified
if any(elements .== "elements")
elements = stringarray(dataset["elements"])
end
# Figure out how many are numeric (if necessary), so we can export only
# those if `findnumeric` is set
if findnumeric
is_numeric_element = Array{Bool}(undef,length(elements))
for i ∈ eachindex(elements)
is_numeric_element[i] = sum(isnumeric.(dataset[elements[i]])) > sum(nonnumeric.(dataset[elements[i]]))
end
elements = elements[is_numeric_element]
end
# Generate output array
if floatout
# Allocate output Array{Float64}
result = Array{Float64}(undef, length(dataset[first(elements)][rows]), length(elements))
# Parse the input dict. No column names if `floatout` is set
for i ∈ eachindex(elements)
result[:,i] = floatify.(dataset[elements[i]][rows], floattype)
end
else
# Allocate output Array{Any}
result = Array{Any}(undef, length(dataset[first(elements)][rows])+1, length(elements))
# Parse the input dict
for i ∈ eachindex(elements)
# Column name goes in the first row, everything else after that
result[1,i] = elements[i]
result[2:end,i] .= dataset[elements[i]][rows]
# if `skipnan` is set, replace each NaN in the output array with
# an empty string ("") such that it is empty when printed to file
# with dlmwrite or similar
if skipnan
for n = 2:length(result[:,i])
if isa(result[n,i], AbstractFloat) && isnan(result[n,i])
result[n,i] = ""
end
end
end
end
end
return result
end
function unelementify(dataset::NamedTuple, elements=keys(dataset);
floatout::Bool=false,
floattype=Float64,
findnumeric::Bool=false,
skipnan::Bool=false,
rows=:
)
# Figure out how many are numeric (if necessary), so we can export only
# those if `findnumeric` is set
elements = symboltuple(elements)
if findnumeric
elements = filter(x -> sum(isnumeric.(dataset[x])) > sum(nonnumeric.(dataset[x])), elements)
end
# Generate output array
if floatout
# Allocate output Array{Float64}
result = Array{floattype}(undef,length(dataset[first(elements)][rows]),length(elements))
# Parse the input dict. No column names if `floatout` is set
for i ∈ eachindex(elements)
result[:,i] = floatify.(dataset[elements[i]][rows], floattype)
end
else
# Allocate output Array{Any}
result = Array{Any}(undef,length(dataset[first(elements)][rows])+1,length(elements))
# Parse the input dict
for i ∈ eachindex(elements)
# Column name goes in the first row, everything else after that
result[1,i] = string(elements[i])
result[2:end,i] .= dataset[elements[i]][rows]
# if `skipnan` is set, replace each NaN in the output array with
# an empty string ("") such that it is empty when printed to file
# with dlmwrite or similar
if skipnan
for n = 2:length(result[:,i])
if isa(result[n,i], AbstractFloat) && isnan(result[n,i])
result[n,i] = ""
end
end
end
end
end
return result
end
export unelementify
## --- Concatenating / stacking datasets
# Fill an array with the designated empty type
emptys(::Type, s...) = fill(missing, s...)
emptys(::Type{T}, s...) where T <: AbstractString = fill("", s...)
emptys(::Type{T}, s...) where T <: Number = fill(NaN, s...)
emptys(::Type{T}, s...) where T <: AbstractFloat = fill(T(NaN), s...)
"""
```julia
concatenatedatasets(d1::NamedTuple, d2::NamedTuple,... ;[elements::Vector{Symbol}])
concatenatedatasets(d1::AbstractDict, d2::AbstractDict,... ;[elements::Vector{String}])
```
Vertically concatenate two or more Dict- or Tuple-based datasets, variable-by-variable.
Optionally, a list of variables to include may be specified in `elements`
### Examples
```julia
julia> d1 = Dict("La" => rand(5), "Yb" => rand(5))
Dict{String, Vector{Float64}} with 2 entries:
"Yb" => [0.221085, 0.203369, 0.0657271, 0.124606, 0.0975556]
"La" => [0.298578, 0.481674, 0.888624, 0.632234, 0.564491]
julia> d2 = Dict("La" => rand(5), "Ce" => rand(5))
Dict{String, Vector{Float64}} with 2 entries:
"Ce" => [0.0979752, 0.108585, 0.718315, 0.771128, 0.698499]
"La" => [0.538215, 0.633298, 0.981322, 0.908532, 0.77754]
julia> concatenatedatasets(d1,d2)
Dict{String, Vector{Float64}} with 3 entries:
"Ce" => [NaN, NaN, NaN, NaN, NaN, 0.0979752, 0.108585, 0.718315, 0.771128, 0.698499]
"Yb" => [0.221085, 0.203369, 0.0657271, 0.124606, 0.0975556, NaN, NaN, NaN, NaN, NaN]
"La" => [0.298578, 0.481674, 0.888624, 0.632234, 0.564491, 0.538215, 0.633298, 0.981322, 0.908532, 0.77754]
```
"""
concatenatedatasets(args...; kwargs...) = concatenatedatasets((args...,); kwargs...)
function concatenatedatasets(dst::Tuple; kwargs...)
if length(dst) == 1
only(dst)
elseif length(dst) == 2
concatenatedatasets(dst[1], dst[2]; kwargs...)
else
c = concatenatedatasets(dst[1], dst[2]; kwargs...)
concatenatedatasets((c, dst[3:end]...); kwargs...)
end
end
function concatenatedatasets(d1::AbstractDict, d2::AbstractDict; elements=String[])
# Return early if either is empty
isempty(d1) && return d2
isempty(d2) && return d1
# Determine keys to include. Use "elements" field if it exists
d1ₑ = haskey(d1,"elements") ? d1["elements"] : sort(collect(keys(d1)))
d2ₑ = haskey(d2,"elements") ? d2["elements"] : sort(collect(keys(d2)))
available = d1ₑ ∪ d2ₑ
if isempty(elements)
elementsᵢ = available
else
elementsᵢ = elements ∩ available
end
# Combine datasets
s1, s2 = size(d1[first(d1ₑ)]), size(d2[first(d2ₑ)])
result = typeof(d1)(e => vcombine(d1,d2,e,s1,s2) for e in elementsᵢ)
haskey(d1,"elements") && (result["elements"] = elementsᵢ)
return result
end
function concatenatedatasets(d1::NamedTuple, d2::NamedTuple; elements=Symbol[])
# Return early if either is empty
isempty(d1) && return d2
isempty(d2) && return d1
# Determine keys to include
available = keys(d1) ∪ keys(d2)
if isempty(elements)
elementsᵢ = available
else
elementsᵢ = elements ∩ available
end
# Combine datasets
s1, s2 = size(d1[first(keys(d1))]), size(d2[first(keys(d2))])
return NamedTuple{(elementsᵢ...,)}(vcombine(d1,d2,e,s1,s2) for e in elementsᵢ)
end
# Vertically concatenate the fields `e` (if present) of two named tuples
function vcombine(d1, d2, e, s1=size(d1[first(keys(d1))]), s2=size(d2[first(keys(d2))]))
if haskey(d1,e) && ~haskey(d2,e)
T = eltype(d1[e])
vcat(d1[e], emptys(T, s2))
elseif ~haskey(d1,e) && haskey(d2,e)
T = eltype(d2[e])
vcat(emptys(T, s1), d2[e])
else
vcat(d1[e], d2[e])
end
end
export concatenatedatasets
## --- Hashing of imported datasets
function rescale(x::Number, digits::Integer=1)
n = if isfinite(x) && !iszero(x)
-(floor(Int, log10(abs(x)))-digits+1)
else
0
end
return trunc(x * 10.0^n)
end
prehash(x, digits::Integer) = hash(x)
prehash(x::Number, digits::Integer) = prehash(Float64(x), digits)
prehash(x::Float64, digits::Integer) = reinterpret(UInt64, rescale(x, digits))
"""
```julia
hashdataset(ds::Union{Dict, NamedTuple}; digits::Number=3, elements=keys(ds))
```
Calculate a hash value for each row of a dataset.
By default, this considers only the first 3 `digits` of each number, regardless of scale.
### Examples
```julia
julia> ds = (La=rand(5), Yb=rand(5)/10)
NamedTuple with 2 elements:
La = Vector{Float64}(5,) [0.580683620945775 ... 0.23810020661332487]
Yb = Vector{Float64}(5,) [0.014069255862588826 ... 0.067367584177675]
julia> hashdataset(ds)
5-element Vector{UInt64}:
0x89a02fa88348e07c
0x181e78f0ad2af144
0xa3811bd05cca4743
0xfcfe1b6edf0c81cf
0x647868efa9352972
```
"""
function hashdataset(ds::Union{Dict, NamedTuple}; digits::Number=3, elements=keys(ds))
I = eachindex(ds[first(elements)])
for e in elements
@assert eachindex(ds[e]) == I
end
hashes = similar(ds[first(elements)], UInt64)
for i in eachindex(hashes)
dt = ntuple(j -> prehash(ds[elements[j]][i], digits), length(elements))
hashes[i] = hash(dt)
end
return hashes
end
export hashdataset
## --- Renormalization of imported datasets
"""
```julia
renormalize!(A::AbstractArray; dim, total=1.0)
```
Normalize an array `A` in place such that it sums to `total`. Optionally may
specify a dimension `dim` along which to normalize.
"""
function renormalize!(A::AbstractArray; dim=:, total=1.0)
current_sum = NaNStatistics._nansum(A, dim)
A .*= total ./ current_sum
end
"""
```julia
renormalize!(dataset, [elements]; total=1.0)
```
Normalize in-place a (i.e., compositional) `dataset` defined by a `Dict` or
`NamedTuple` of one-dimensional numerical arrays, such that all the `elements`
(i.e., variables -- by default all keys in the datset) sum to a given `total`
(by default, `1.0`).
Note that the arrays representing each element or variable are assumed to be
of uniform length
"""
function renormalize!(dataset::Union{Dict,NamedTuple}, elements=keys(dataset); total=1.0)
# Note that this assumes all variables in the dataset are the same length!
current_sum = zeros(size(dataset[first(keys(dataset))]))
for e in elements
current_sum .+= dataset[e] .|> x -> isnan(x) ? 0 : x
end
current_sum[current_sum .== 0] .= NaN
for e in elements
dataset[e] .*= total ./ current_sum
end
return dataset
end
export renormalize!
## --- High-level import/export functions
function guessdelimiter(s::AbstractString)
if length(s)>3
if s[end-3:end] == ".csv"
','
elseif s[end-3:end] == ".tsv"
'\t'
elseif s[end-3:end] == ".psv"
'|'
else
'\t'
end
else
'\t'
end
end
"""
```julia
function importdataset(filepath, [delim];
\timportas=:Dict,
\telements=nothing,
\tstandardize::Bool=true,
\tfloattype=Float64,
\tskipstart::Integer=0,
\tskipnameless::Bool=true,
\tmindefinedcolumns::Integer=0
)
```
Import a delimited file specified by `filepath` with delimiter `delim` as a
dataset in the form of either a `Dict` or a `NamedTuple`.
Possible keyword arguments include:
\timportas
Specify the format of the imported dataset. Options include `:Dict` and `:Tuple`
\telements
Specify the names to be used for each element (i.e., column) of the dataset.
Default value (`nothing`) will cause `elements` to be read from the first row of the file
\tstandardize
Convert columns to uniform type wherever possible. Boolean; `true` by default.
\tfloattype
Preferred floating-point type for numerical data. `Float64` by default.
\tskipstart
Ignore this many rows at the start of the input file (useful if input file has
a header or other text before the column names). `0` by default.
\tskipnameless
Skip columns with no column name. Boolean; `true` by default
\tmindefinedcolumns
Skip rows with fewer than this number of delimiters. `0` by default.
"""
function importdataset(filepath::AbstractString, delim::AbstractChar=guessdelimiter(filepath);
importas=:Dict,
elements=nothing,
standardize::Bool=true,
floattype=Float64,
skipstart::Integer=0,
skipnameless::Bool=true,
mindefinedcolumns::Integer=0
)
# Read file
io = open(filepath, "r")
if read(io, Char) == '\ufeff'
@warn """Skipping hidden \'\\ufeff\' (U+FEFF) character at start of input file.
This character is often added to CSV files by Microsoft Excel (and some other
Microsoft products) as what appears to be what we might call an "extension",
which would would cause file parsing to fail if we didn't manually remove it.
Try using open software like LibreOffice instead of Excel to make this warning go away.
"""
else
seekstart(io)
end
data = readdlm(io, delim, skipstart=skipstart)
close(io)
# Exclude rows with fewer than `mindefinedcolumns` columns
if mindefinedcolumns > 0
definedcolumns = vec(sum(.~ isempty.(data), dims=2))
t = definedcolumns .>= mindefinedcolumns
data = data[t,:]
end
if isnothing(elements)
return elementify(data,
importas=importas,
standardize=standardize,
floattype=floattype,
skipnameless=skipnameless
)
else
return elementify(data, elements,
importas=importas,
standardize=standardize,
floattype=floattype,
skipnameless=skipnameless
)
end
end
export importdataset
"""
```julia
exportdataset(dataset, [elements], filepath, delim;
\tfloatout::Bool=false,
\tfindnumeric::Bool=false,
\tskipnan::Bool=true,
\tdigits::Integer,
\tsigdigits::Integer
\trows=:
)
```
Convert a dict or named tuple of vectors into a 2-D array with variables as columns
Export a `dataset` (in the form of either a `Dict` or a `NamedTuple`),
optionally specifying which `elements` to export, as a delimited ASCII text file
with the name specified by `filepath` and delimiter `delim`.
Possible keyword arguments include:
\tdigits
\tsigdigits
Specify a number of absolute or significant digits to which to round the printed output.
Default is no rounding.
\tskipnan
Leave `NaN`s as empty cells in the delimited output file. Boolean; `true` by default.
\tfloatout
Force all output to be represented as a floating-point number, or else `NaN`.
Boolean; `false` by default.
\tfindnumeric
Export only numeric columns. Boolean; `false` by default.
\trows
specify which rows of the dataset to export. Default `:` exports all rows.
"""
function exportdataset(dataset::Union{Dict,NamedTuple}, filepath::AbstractString, delim::AbstractChar=guessdelimiter(filepath);
floatout::Bool=false,
findnumeric::Bool=false,
skipnan::Bool=true,
digits::Integer=0,
sigdigits::Integer=0,
rows=:
)
# Convert dataset to flat 2d array
data = unelementify(dataset,
floatout=floatout,
findnumeric=findnumeric,
skipnan=skipnan,
rows=rows
)
# Round output if applicable
if digits > 0
map!(x -> isa(x, Number) ? round(x, digits=digits) : x, data, data)
end
if sigdigits > 0
map!(x -> isa(x, Number) ? round(x, sigdigits=sigdigits) : x, data, data)
end
# Write to file
return writedlm(filepath, data, delim)
end
# As above, but specifying which elements to export
function exportdataset(dataset::Union{Dict,NamedTuple}, elements::Array, filepath::AbstractString, delim::AbstractChar=guessdelimiter(filepath);
floatout::Bool=false,
findnumeric::Bool=false,
skipnan::Bool=true,
digits::Integer=0,
sigdigits::Integer=0,
rows=:
)
# Convert dataset to flat 2d array
data = unelementify(dataset, elements,
floatout=floatout,
findnumeric=findnumeric,
skipnan=skipnan,
rows=rows
)
# Round output if applicable
if digits > 0
map!(x -> isa(x, Number) ? round(x, digits=digits) : x, data, data)
end
if sigdigits > 0
map!(x -> isa(x, Number) ? round(x, sigdigits=sigdigits) : x, data, data)
end
# Write to file
return writedlm(filepath, data, delim)
end
export exportdataset
## --- End of File
| StatGeochemBase | https://github.com/brenhinkeller/StatGeochemBase.jl.git |
|
[
"MIT"
] | 0.6.6 | 57663205a3b08c4e5fdd398ed1f08cc5a1c318e2 | code | 10004 | ## --- Simple linear interpolations
function _linterp1(x, y, xq::Number, extrapolate::Symbol)
@assert extrapolate === :Linear
knot_index = searchsortedfirst(x, xq, Base.Order.ForwardOrdering()) - 1
𝔦₋ = min(max(knot_index, firstindex(x)), lastindex(x) - 1)
𝔦₊ = 𝔦₋ + 1
x₋, x₊ = x[𝔦₋], x[𝔦₊]
y₋, y₊ = y[𝔦₋], y[𝔦₊]
f = (xq - x₋) / (x₊ - x₋)
return f*y₊ + (1-f)*y₋
end
function _linterp1(x, y, xq::Number, extrapolate::Number)
i₁, iₙ = firstindex(x), lastindex(x) - 1
knot_index = searchsortedfirst(x, xq, Base.Order.ForwardOrdering()) - 1
Tₓ = promote_type(eltype(x), eltype(xq))
T = promote_type(eltype(y), Base.promote_op(/, Tₓ, Tₓ))
if i₁ <= knot_index <= iₙ
𝔦₋ = knot_index
𝔦₊ = 𝔦₋ + 1
x₋, x₊ = x[𝔦₋], x[𝔦₊]
y₋, y₊ = y[𝔦₋], y[𝔦₊]
f = (xq - x₋) / (x₊ - x₋)
return f*y₊ + (1-f)*y₋
elseif knot_index<i₁ && x[i₁] == xq
return T(y[i₁])
else
return T(extrapolate)
end
end
function _linterp1(x, y, xq::AbstractArray, extrapolate)
Tₓ = promote_type(eltype(x), eltype(xq))
T = promote_type(eltype(y), Base.promote_op(/, Tₓ, Tₓ))
yq = similar(xq, T, size(xq))
_linterp1!(yq, x, y, xq, extrapolate)
end
# Allocate knot_index if not provided
_linterp1!(yq, x, y, xq::AbstractArray, extrapolate) = _linterp1!(yq, ones(Int, length(xq)), x, y, xq::AbstractArray, extrapolate)
# Linear interpolation with linear extrapolation
function _linterp1!(yq, knot_index, x::DenseArray, y::DenseArray, xq::AbstractArray, extrapolate::Symbol)
@assert extrapolate === :Linear
i₁, iₙ = firstindex(x), lastindex(x) - 1
searchsortedfirst_vec!(knot_index, x, xq)
knot_index .-= 1
@inbounds @fastmath for i ∈ eachindex(knot_index)
knot_index[i] = min(max(knot_index[i], i₁), iₙ)
end
@inbounds @fastmath for i ∈ eachindex(knot_index, xq, yq)
𝔦₋ = knot_index[i]
𝔦₊ = 𝔦₋ + 1
x₋, x₊ = x[𝔦₋], x[𝔦₊]
y₋, y₊ = y[𝔦₋], y[𝔦₊]
f = (xq[i] - x₋)/(x₊ - x₋)
yq[i] = f*y₊ + (1-f)*y₋
end
return yq
end
# Fallback method
function _linterp1!(yq, knot_index, x, y, xq::AbstractArray, extrapolate::Symbol)
@assert extrapolate === :Linear
i₁, iₙ = firstindex(x), lastindex(x) - 1
searchsortedfirst_vec!(knot_index, x, xq)
knot_index .-= 1
@inbounds for i ∈ eachindex(knot_index)
knot_index[i] = min(max(knot_index[i], i₁), iₙ)
end
@inbounds for i ∈ eachindex(knot_index, xq, yq)
𝔦₋ = knot_index[i]
𝔦₊ = 𝔦₋ + 1
x₋, x₊ = x[𝔦₋], x[𝔦₊]
y₋, y₊ = y[𝔦₋], y[𝔦₊]
f = (xq[i] - x₋)/(x₊ - x₋)
yq[i] = f*y₊ + (1-f)*y₋
end
return yq
end
# Linear interpolation with constant extrapolation
function _linterp1!(yq, knot_index, x, y, xq::AbstractArray, extrapolate::Number)
i₁, iₙ = firstindex(x), lastindex(x) - 1
searchsortedfirst_vec!(knot_index, x, xq)
knot_index .-= 1
@inbounds for i ∈ eachindex(knot_index)
𝔦 = knot_index[i]
if i₁ <= 𝔦 <= iₙ
𝔦₋ = 𝔦
𝔦₊ = 𝔦₋ + 1
x₋, x₊ = x[𝔦₋], x[𝔦₊]
y₋, y₊ = y[𝔦₋], y[𝔦₊]
f = (xq[i] - x₋)/(x₊ - x₋)
yq[i] = f*y₊ + (1-f)*y₋
elseif 𝔦<i₁ && x[i₁] == xq[i]
yq[i] = y[i₁]
else
yq[i] = extrapolate
end
end
return yq
end
# Vectorization-friendly searchsortedfirst implementation from Interpolations.jl
# https://github.com/JuliaMath/Interpolations.jl
Base.@propagate_inbounds function searchsortedfirst_exp_left(v, xx, lo, hi)
for i in 0:4
ind = lo + i
ind > hi && return ind
xx <= v[ind] && return ind
end
n = 3
tn2 = 2^n
tn2m1 = 2^(n-1)
ind = lo + tn2
while ind <= hi
xx <= v[ind] && return searchsortedfirst(v, xx, lo + tn2 - tn2m1, ind, Base.Order.Forward)
tn2 *= 2
tn2m1 *= 2
ind = lo + tn2
end
return searchsortedfirst(v, xx, lo + tn2 - tn2m1, hi, Base.Order.Forward)
end
function searchsortedfirst_vec!(ix::StridedVector, v::AbstractVector, x::AbstractVector)
@assert firstindex(v) === 1
if issorted(x)
lo = 1
hi = length(v)
@inbounds for i ∈ eachindex(x, ix)
y = searchsortedfirst_exp_left(v, x[i], lo, hi)
ix[i] = y
lo = min(y, hi)
end
else
ix .= searchsortedfirst.(Ref(v), x)
end
return ix
end
## --- Linear interpolation, top-level functions
"""
```julia
yq = linterp1(x::AbstractArray, y::AbstractArray, xq; extrapolate=:Linear)
```
Simple linear interpolation in one dimension. Given a vector of knots `x`
and values `y`, find the corresponding `y` values at position(s) `xq`.
Knots `x` must be sorted in increasing order.
If the optional keyword argument `extrapolate` is set to `:Linear` (default),
`xq` values outside the range of `x` will be extrapolated using a linear
extrapolation of the closest two `x`-`y` pairs. Otherwise, if `extrapolate`
is set to a `Number` (e.g., `0`, or `NaN`), that number will be used
instead.
### Examples
```julia
julia> linterp1(1:10, 1:10, 5.5)
5.5
julia> linterp1(1:10, 1:10, 0.5:10.5)
11-element Vector{Float64}:
0.5
1.5
2.5
3.5
4.5
5.5
6.5
7.5
8.5
9.5
10.5
```
"""
function linterp1(x::AbstractArray, y::AbstractArray, xq; extrapolate=:Linear)
issorted(x) || error("knot-vector `x` must be sorted in increasing order")
return _linterp1(x, y, xq, extrapolate)
end
export linterp1
"""
```julia
linterp1!(yq::StridedArray, x::AbstractArray, y::AbstractArray, xq; extrapolate=:Linear, knot_index=ones(Int, length(xq)))
```
In-place variant of `linterp1`.
"""
function linterp1!(yq::StridedArray, x::AbstractArray, y::AbstractArray, xq; extrapolate=:Linear, knot_index::AbstractVector{Int}=ones(Int, length(xq)))
issorted(x) || error("knot-vector `x` must be sorted in increasing order")
return _linterp1!(yq, knot_index, x, y, xq, extrapolate)
end
export linterp1!
"""
```julia
yq = linterp1s(x::AbstractArray, y::AbstractArray, xq; extrapolate=:Linear)
```
As as `linterp1` (simple linear interpolation in one dimension), but will sort
the knots `x` and values `y` pairwise if `x` if not already sorted in
increasing order.
### Examples
```julia
julia> linterp1s(10:-1:1, 1:10, 5.5)
5.5
julia> linterp1s(10:-1:1, 1:10, 0.5:10.5)
11-element Vector{Float64}:
10.5
9.5
8.5
7.5
6.5
5.5
4.5
3.5
2.5
1.5
0.5
```
"""
function linterp1s(x::AbstractArray, y::AbstractArray, xq; extrapolate=:Linear)
sI = sortperm(x) # indices to construct sorted array
return _linterp1(x[sI], y[sI], xq, extrapolate)
end
export linterp1s
"""
```julia
linterp1s!(yq::StridedArray, x::StridedArray, y::StridedArray, xq; extrapolate=:Linear)
linterp1s!(yq::StridedArray, knot_index::StridedArray{Int}, x::StridedArray, y::StridedArray, xq::AbstractArray; extrapolate=:Linear)
```
In-place variant of `linterp1s`.
Will sort `x` and permute `y` to match, before interpolating at `xq` and storing the result in `yq`.
An optional temporary working array `knot_index = similar(xq, Int)` may be provided to fully eliminate allocations.
"""
function linterp1s!(yq::StridedArray, x::StridedArray, y::StridedArray, xq; extrapolate=:Linear)
@assert length(xq) === length(yq)
@assert eachindex(x) === eachindex(y)
vsort!(y, x) # Sort x and permute y to match
return _linterp1!(yq, x, y, xq, extrapolate)
end
function linterp1s!(yq::StridedArray, knot_index::StridedArray{Int}, x::StridedArray, y::StridedArray, xq::AbstractArray; extrapolate=:Linear)
@assert eachindex(knot_index) === eachindex(yq)
@assert eachindex(x) === eachindex(y)
@assert length(yq) === length(xq)
vsort!(y, x) # Sort x and permute y to match
return _linterp1!(yq, knot_index, x, y, xq, extrapolate)
end
export linterp1s!
# Linearly interpolate vector y at index i, returning outboundsval if outside of bounds
function linterp_at_index(y::AbstractArray, i::Number, extrapolate=float(eltype(y))(NaN))
if firstindex(y) <= i < lastindex(y)
𝔦₋ = floor(Int, i)
𝔦₊ = 𝔦₋ + 1
f = i - 𝔦₋
return f*y[𝔦₊] + (1-f)*y[𝔦₋]
else
return extrapolate
end
end
export linterp_at_index
## --- Resize and interpolate arrays of colors
# Linearly interpolate array of colors at positions xq
function linterp1(x::AbstractArray, image::AbstractArray{<:Color}, xq)
# Interpolate red, green, and blue vectors separately
r_interp = linterp1(x, image .|> c -> c.r, xq)
g_interp = linterp1(x, image .|> c -> c.g, xq)
b_interp = linterp1(x, image .|> c -> c.b, xq)
# Convert back to a color
return RGB.(r_interp,g_interp,b_interp)
end
function resize_colormap(cmap::AbstractArray{<:Color}, n::Integer)
cNum = length(cmap)
if n<2
cmap[1:1]
else
linterp1(1:cNum,cmap,collect(range(1,cNum,length=n)))
end
end
export resize_colormap
| StatGeochemBase | https://github.com/brenhinkeller/StatGeochemBase.jl.git |
|
[
"MIT"
] | 0.6.6 | 57663205a3b08c4e5fdd398ed1f08cc5a1c318e2 | code | 24428 | ## --- Dealing with different number representations
"""
```julia
nearest(T, x)
```
Convert `x` to the nearest representable value in type T, rounding if inexact
### Examples
```julia
julia> nearest(Int, 1234.56)
1235
julia> nearest(Int, Inf)
9223372036854775807
julia> nearest(Int, -Inf)
-9223372036854775808
````
"""
function nearest(::Type{T}, x) where T <: Number
if x > typemax(T)
typemax(T)
elseif x < typemin(T)
typemin(T)
else
T(x)
end
end
function nearest(::Type{T}, x) where T <: Integer
if x > typemax(T)
typemax(T)
elseif x < typemin(T)
typemin(T)
else
round(T, x)
end
end
export nearest
## --- Determining reported precision of numbers
# Convert size to decimal precision
maxdigits(T::Type) = ceil(Int, sizeof(T)*2.408239965311849)
# Special cases
maxdigits(::Type{BigFloat}) = 78
maxdigits(::Type{Float64}) = 16
maxdigits(::Type{Float32}) = 8
maxdigits(::Type{Float16}) = 4
maxdigits(::Type{Int64}) = 19
maxdigits(::Type{Int32}) = 10
maxdigits(::Type{Int16}) = 5
maxdigits(::Type{Int8}) = 3
maxdigits(::Type{Bool}) = 1
"""
```julia
sigdigits(d)
```
Determine the number of decimal significant figures of a number `d`.
### Examples
```julia
julia> sigdigits(1000)
1
julia> sigdigits(1001)
4
julia> sigdigits(1001.1245)
8
```
"""
function sigdigits(d::T) where T <: Number
n = 0
isfinite(d) || return n
rtol = 10.0^-maxdigits(T)
while n < maxdigits(T)
isapprox(d, round(d, sigdigits=n); rtol) && return n
n += 1
end
return n
end
sigdigits(d::Irrational) = Inf
export sigdigits
"""
```julia
leastsigfig(d)
```
Return the order of magnitude of the least significant decimal digit of a number `d`.
### Examples
```julia
julia> leastsigfig(1000)
1000.0
julia> leastsigfig(1001)
1.0
julia> leastsigfig(1001.1234)
0.0001
```
"""
function leastsigfig(d)
iszero(d) && return 1.0*d
isfinite(d) || return 1.0*d
10.0^(floor(Int, log10(abs(d)))-sigdigits(d)+1)
end
export leastsigfig
## --- Fast inverse square-root
"""
```julia
fast_inv_sqrt(x)
```
The infamous fast inverse square root of `x`, in 32 and 64 bit versions.
Can be up to 10x faster than base `1/sqrt(x)`, though with nontrivial loss
of precision. The implementations here are good to about 4 ppm.
"""
function fast_inv_sqrt(x::Float64)
x_2 = 0.5 * x
result = Base.sub_int(9.603007803048109e153, Base.lshr_int(x,1)) # Floating point magic
result *= ( 1.5 - (x_2 * result * result )) # Newton's method
result *= ( 1.5 - (x_2 * result * result )) # Newton's method (again)
return result
end
function fast_inv_sqrt(x::Float32)
x_2 = 0.5f0 * x
result = Base.sub_int(1.321202f19, Base.lshr_int(x,1)) # Floating point magic
result *= ( 1.5f0 - (x_2 * result * result) ) # Newton's method
result *= ( 1.5f0 - (x_2 * result * result) ) # Newton's method (again)
return result
end
export fast_inv_sqrt
## --- Remove non-positive numbers
function positive!(a::DenseArray{<:AbstractFloat})
@inbounds for i in eachindex(a)
if !(a[i] > 0)
a[i] = NaN
end
end
return a
end
export positive!
## --- Rescale an AbstractArray between a new minimum and maximum
"""
```julia
rescale(y, min::Number=0, max::Number=1)
```
Rescale a collection of numbers `y` between a new minimum `min` and new maximum `max`
### Examples
```julia
julia> rescale(1:5)
5-element Vector{Float64}:
0.0
0.25
0.5
0.75
1.0
julia> rescale(1:5, -1, 0)
5-element Vector{Float64}:
-1.0
-0.75
-0.5
-0.25
0.0
```
"""
function rescale(y::AbstractArray, min::Number=0, max::Number=1)
obsmin = nanminimum(y)
y = float.(y) .- obsmin
obsmax = nanmaximum(y)
y ./= obsmax
y .+= min
y .*= (max-min)
end
function rescale(y::AbstractRange, min::Number=0, max::Number=1)
obsmin = minimum(y)
y = float(y) .- obsmin
obsmax = maximum(y)
y = y ./ obsmax
y = y .+ min
y = y .* (max-min)
end
function rescale(y, min::Number=0, max::Number=1)
obsmin = nanminimum(y)
y = float.(y) .- obsmin
obsmax = nanmaximum(y)
y = y ./ obsmax
y = y .+ min
y = y .* (max-min)
end
export rescale
## --- Some mathematical constants
const SQRT2 = sqrt(2)
const SQRT2PI = sqrt(2*pi)
const INVSQRT2 = 1/sqrt(2)
const AN = Union{AbstractArray{<:Number},Number}
## --- Gaussian distribution functions
"""
```julia
normpdf(mu,sigma,x)
```
Probability density function of the Normal (Gaussian) distribution
``ℯ^{-(x-μ)^2 / (2σ^2)} / σ√2π``
with mean `mu` and standard deviation `sigma`, evaluated at `x`
"""
@inline normpdf(mu,sigma,x) = exp(-(x-mu)*(x-mu) / (2*sigma*sigma)) / (SQRT2PI*sigma)
@inline normpdf(mu::Number,sigma::Number,x::Number) = exp(-(x-mu)*(x-mu) / (2*sigma*sigma)) / (SQRT2PI*sigma)
normpdf(mu::AN,sigma::AN,x::AN) = @fastmath @. exp(-(x-mu)*(x-mu) / (2*sigma*sigma)) / (SQRT2PI*sigma)
export normpdf
"""
```julia
normpdf_ll(mu, sigma, x)
```
Fast log likelihood proportional to the natural logarithm of the probability
density function of a Normal (Gaussian) distribution with mean `mu` and
standard deviation `sigma`, evaluated at `x`.
If `x`, [`mu`, and `sigma`] are given as arrays, the sum of the log likelihood
over all `x` will be returned.
See also `normpdf`, `normlogpdf`
"""
@inline normpdf_ll(mu,sigma,x) = -(x-mu)*(x-mu) / (2*sigma*sigma)
function normpdf_ll(mu::Number,sigma::Number,x::AbstractArray)
inv_s2 = 1/(2*sigma*sigma)
ll = zero(typeof(inv_s2))
@inbounds @fastmath @simd ivdep for i ∈ eachindex(x)
ll -= (x[i]-mu)*(x[i]-mu) * inv_s2
end
return ll
end
function normpdf_ll(mu::AbstractArray,sigma::Number,x::AbstractArray)
inv_s2 = 1/(2*sigma*sigma)
ll = zero(typeof(inv_s2))
@inbounds @fastmath @simd ivdep for i ∈ eachindex(x, mu)
ll -= (x[i]-mu[i])*(x[i]-mu[i]) * inv_s2
end
return ll
end
function normpdf_ll(mu::Number,sigma::AbstractArray,x::AbstractArray)
ll = zero(float(eltype(sigma)))
@inbounds @fastmath @simd ivdep for i ∈ eachindex(x, sigma)
ll -= (x[i]-mu)*(x[i]-mu) / (2*sigma[i]*sigma[i])
end
return ll
end
function normpdf_ll(mu::AbstractArray,sigma::AbstractArray,x::AbstractArray)
ll = zero(float(eltype(sigma)))
@inbounds @fastmath @simd ivdep for i ∈ eachindex(x, mu, sigma)
ll -= (x[i]-mu[i])*(x[i]-mu[i]) / (2*sigma[i]*sigma[i])
end
return ll
end
export normpdf_ll
"""
```julia
normlogpdf(mu, sigma, x)
```
The natural logarithm of the probability density function of a Normal (Gaussian)
distribution with mean `mu` and standard deviation `sigma`, evaluated at `x`.
If `x`, [`mu`, and `sigma`] are given as arrays, the sum of the log probability density
over all `x` will be returned.
See also `normpdf`, `normlogpdf`
"""
@inline normlogpdf(mu,sigma,x) = -(x-mu)*(x-mu) / (2*sigma*sigma) - log(SQRT2PI*sigma)
function normlogpdf(mu::Number,sigma::Number,x::AbstractArray)
inv_s2 = 1/(2*sigma*sigma)
ll = zero(typeof(inv_s2))
@inbounds @fastmath @simd ivdep for i ∈ eachindex(x)
ll -= (x[i]-mu)*(x[i]-mu) * inv_s2
end
return ll - length(x)*log(SQRT2PI*sigma)
end
function normlogpdf(mu::AbstractArray,sigma::Number,x::AbstractArray)
inv_s2 = 1/(2*sigma*sigma)
ll = zero(typeof(inv_s2))
@inbounds @fastmath @simd ivdep for i ∈ eachindex(x, mu)
ll -= (x[i]-mu[i])*(x[i]-mu[i]) * inv_s2
end
return ll - log(SQRT2PI*sigma)*length(x)
end
function normlogpdf(mu::Number,sigma::AbstractArray,x::AbstractArray)
ll = zero(float(eltype(sigma)))
@inbounds @fastmath @simd ivdep for i ∈ eachindex(x, sigma)
ll -= (x[i]-mu)*(x[i]-mu) / (2*sigma[i]*sigma[i]) + log(SQRT2PI*sigma[i])
end
return ll
end
function normlogpdf(mu::AbstractArray,sigma::AbstractArray,x::AbstractArray)
ll = zero(float(eltype(sigma)))
@inbounds @fastmath @simd ivdep for i ∈ eachindex(x, mu, sigma)
ll -= (x[i]-mu[i])*(x[i]-mu[i]) / (2*sigma[i]*sigma[i]) + log(SQRT2PI*sigma[i])
end
return ll
end
export normlogpdf
"""
```julia
normcdf(mu,sigma,x)
```
Cumulative distribution function of the Normal (Gaussian) distribution
``1/2 + erf(\frac{x-μ}{σ√2})/2``
with mean `mu` and standard deviation `sigma`, evaluated at `x`.
"""
@inline normcdf(mu,sigma,x) = 0.5 + 0.5 * erf((x-mu) / (sigma*SQRT2))
@inline normcdf(mu::Number,sigma::Number,x::Number) = 0.5 + 0.5 * erf((x-mu) / (sigma*SQRT2))
normcdf(mu::AN,sigma::AN,x::AN) = @fastmath @. 0.5 + 0.5 * erf((x-mu) / (sigma*SQRT2))
export normcdf
"""
```julia
normcdf_ll(mu, sigma, x)
```
Fast log likelihood proportional to the natural logarithm of the cumulative
distribution function of a Normal (Gaussian) distribution with mean `mu` and
standard deviation `sigma`, evaluated at `x`.
If `x`, [`mu`, and `sigma`] are given as arrays, the sum of the log likelihood
over all `x` will be returned.
See also `normcdf`
"""
@inline function normcdf_ll(xₛ::Number)
if xₛ < -1.0
return log(0.5*erfcx(-xₛ * INVSQRT2)) - 0.5*abs2(xₛ)
else
return log1p(-0.5*erfc(xₛ * INVSQRT2))
end
end
function normcdf_ll(xₛ::AbstractArray)
ll = zero(float(eltype(xₛ)))
@inbounds for i ∈ eachindex(xₛ)
ll += normcdf_ll(xₛ[i])
end
return ll
end
@inline function normcdf_ll(mu::Number, sigma::Number, x::Number)
xₛ = (x - mu) / sigma
return normcdf_ll(xₛ)
end
function normcdf_ll(mu::Number,sigma::Number,x::AbstractArray)
inv_sigma = 1/sigma
ll = zero(typeof(inv_sigma))
@inbounds for i ∈ eachindex(x)
xₛ = (x[i] - mu) * inv_sigma
ll += normcdf_ll(xₛ)
end
return ll
end
function normcdf_ll(mu::AbstractArray,sigma::Number,x::AbstractArray)
inv_sigma = 1/sigma
ll = zero(typeof(inv_sigma))
@inbounds for i ∈ eachindex(x)
xₛ = (x[i] - mu[i]) * inv_sigma
ll += normcdf_ll(xₛ)
end
return ll
end
function normcdf_ll(mu::Number,sigma::AbstractArray,x::AbstractArray)
ll = zero(float(eltype(sigma)))
@inbounds for i ∈ eachindex(x)
xₛ = (x[i] - mu) / sigma[i]
ll += normcdf_ll(xₛ)
end
return ll
end
function normcdf_ll(mu::AbstractArray,sigma::AbstractArray,x::AbstractArray)
ll = zero(float(eltype(sigma)))
@inbounds for i ∈ eachindex(x)
xₛ = (x[i] - mu[i]) / sigma[i]
ll += normcdf_ll(xₛ)
end
return ll
end
export normcdf_ll
"""
```julia
normcdf_ll!(mu, sigma, x)
```
Fast log likelihood proportional to the natural logarithm of the cumulative
distribution function of a Normal (Gaussian) distribution with mean `mu` and
standard deviation `sigma`, evaluated at `x`.
As `normcdf_ll`, but in-place (using `x` as a buffer).
"""
function normcdf_ll!(xₛ::AbstractArray)
@inbounds for i ∈ eachindex(xₛ)
xₛ[i] = normcdf_ll(xₛ[i])
end
ll = zero(float(eltype(xₛ)))
@inbounds @fastmath for i ∈ eachindex(xₛ)
ll += xₛ[i]
end
return ll
end
function normcdf_ll!(mu::Number,sigma::Number,x::AbstractArray)
inv_sigma = 1/sigma
@inbounds for i ∈ eachindex(x)
xₛ = (x[i] - mu) * inv_sigma
x[i] = normcdf_ll(xₛ)
end
ll = zero(typeof(inv_sigma))
@inbounds @fastmath for i ∈ eachindex(x)
ll += x[i]
end
return ll
end
function normcdf_ll!(mu::AbstractArray,sigma::Number,x::AbstractArray)
inv_sigma = 1/sigma
@inbounds for i ∈ eachindex(x)
xₛ = (x[i] - mu[i]) * inv_sigma
x[i] = normcdf_ll(xₛ)
end
ll = zero(typeof(inv_sigma))
@inbounds @fastmath for i ∈ eachindex(x)
ll += x[i]
end
return ll
end
function normcdf_ll!(mu::Number,sigma::AbstractArray,x::AbstractArray)
@inbounds for i ∈ eachindex(x)
xₛ = (x[i] - mu) / sigma[i]
x[i] = normcdf_ll(xₛ)
end
ll = zero(float(eltype(sigma)))
@inbounds @fastmath for i ∈ eachindex(x)
ll += x[i]
end
return ll
end
function normcdf_ll!(mu::AbstractArray,sigma::AbstractArray,x::AbstractArray)
@inbounds for i ∈ eachindex(x)
xₛ = (x[i] - mu[i]) / sigma[i]
x[i] = normcdf_ll(xₛ)
end
ll = zero(float(eltype(sigma)))
@inbounds @fastmath for i ∈ eachindex(x)
ll += x[i]
end
return ll
end
export normcdf_ll!
"""
```julia
normcdf!(result,mu,sigma,x)
```
In-place version of `normcdf`
"""
function normcdf!(result::DenseArray, mu::Number, sigma::Number, x::AbstractArray)
T = eltype(result)
inv_sigma_sqrt2 = one(T)/(sigma*T(SQRT2))
@inbounds @fastmath for i ∈ eachindex(x,result)
result[i] = T(0.5) + T(0.5) * erf((x[i]-mu) * inv_sigma_sqrt2)
end
return result
end
export normcdf!
"""
```julia
norm_quantile(F::Number)
```
How far away from the mean (in units of sigma) should we expect proportion
F of the samples to fall in a standard Gaussian (Normal[0,1]) distribution
"""
@inline norm_quantile(F) = SQRT2*erfinv(2*F-1)
export norm_quantile
"""
```julia
norm_width(N::Number)
```
How dispersed (in units of sigma) should we expect a sample of N numbers
drawn from a standard Gaussian (Normal[0,1]) distribution to be?
"""
@inline norm_width(N) = 2*norm_quantile(1 - 1/(2N))
export norm_width
"""
```julia
normproduct(μ1, σ1, μ2, σ2)
```
The integral of the product of two normal distributions N[μ1,σ1] * N[μ2,σ2].
This is itself just another Normal distribution! Specifically, one with
variance σ1^2 + σ2^2, evaluated at distance |μ1-μ2| from the mean
"""
normproduct(μ1, σ1, μ2, σ2) = normpdf(μ1, sqrt.(σ1.*σ1 + σ2.*σ2), μ2)
export normproduct
"""
```julia
normproduct_ll(μ1, σ1, μ2, σ2)
```
Fast log likelihood proportional to the integral of N[μ1,σ1] * N[μ2,σ2]
As `normlogproduct`, but using the fast log likelihood of a Normal distribution
(i.e., without the preexponential terms).
"""
normproduct_ll(μ1, σ1, μ2, σ2) = normpdf_ll(μ1, sqrt.(σ1.*σ1 + σ2.*σ2), μ2)
export normproduct_ll
"""
```julia
normlogproduct(μ1, σ1, μ2, σ2)
```
The logarithm of the integral of N[μ1,σ1] * N[μ2,σ2]
"""
normlogproduct(μ1, σ1, μ2, σ2) = normlogpdf(μ1, sqrt.(σ1.*σ1 + σ2.*σ2), μ2)
export normlogproduct
## --- Geometry
"""
```julia
inpolygon(x,y,point)
```
Check if a 2D polygon defined by the arrays `x`, `y` contains a given `point`.
Returns boolean (true or false)
### Examples
```julia
julia> x = [0, 1, 1, 0];
julia> y = [0, 0, 1, 1];
julia> inpolygon(x, y, (0.5,0.5))
true
julia> inpolygon(x, y, (0.5,1.5))
false
```
"""
function inpolygon(x,y,point)
# Check that we have the right kind of input data
if length(x) != length(y)
error("polygon must have equal number of x and y points\n")
end
if length(x) < 3
error("polygon must have at least 3 points\n")
end
if length(point) != 2
error("point must be an ordered pair (x,y)\n")
end
# Extract x and y data of point
point_x = point[1]
point_y = point[2]
# For first point, previous point is last
x_here = x[end]
y_here = y[end]
# Ensure we are not sitting parallel to a vertex by infinitessimally moving the point
if y_here == point_y
point_y = nextfloat(float(point_y))
end
if x_here == point_x
point_x = nextfloat(float(point_x))
end
# Check how many times a line projected right along x-axis from point intersects the polygon
intersections = 0
@inbounds for i ∈ eachindex(x)
# Recycle our vertex
x_last = copy(x_here)
y_last = copy(y_here)
# Get a new vertex
x_here = x[i]
y_here = y[i]
# Ensure we are not sitting parallel to a vertex by infinitessimally moving the point
if y_here == point_y
point_y = nextfloat(float(point_y))
end
if x_here == point_x
point_x = nextfloat(float(point_x))
end
if y_last > point_y && y_here > point_y
# If both ys above point, no intersection
continue
elseif y_last < point_y && y_here < point_y
# If both ys below point, no intersection
continue
elseif x_last < point_x && x_here < point_x
# If both x's left of point, no intersection
continue
elseif x_last > point_x && x_here > point_x
# By elimination
# We have one y above and y below our point
# If both y's are right of line, then definite intersection
intersections += 1
continue
else
# By elimination
# One y above and one y below
# One x to the right and one x to the left
# We must project
dy = y_here - y_last
if abs(dy) > 0
dx = x_here - x_last
inv_slope = dx / dy
x_proj = x_last + (point_y - y_last) * inv_slope
if x_proj > point_x
intersections += 1
end
end
end
end
# If number of intersections is odd, point is in the polygon
return Bool(mod(intersections,2))
end
export inpolygon
"""
```julia
(columns, rows) = find_grid_inpolygon(grid_x, grid_y, poly_x, poly_y)
```
Find the indexes of grid points that fall within a polygon for a grid with
cell centers given by grid_x (j-columns of grid) and grid_y (i-rows of grid).
Returns a list of rows and columns in the polygon
### Examples
```julia
julia> grid_x = -1.5:1/3:1.5;
julia> grid_y = -1.5:1/3:1.5;
julia> cols,rows = find_grid_inpolygon(gridx, gridy, [-.4,.4,.4,-.4],[.4,.4,-.4,-.4])
([5, 5, 6, 6], [5, 6, 5, 6])
julia> grid_x[cols]
4-element Vector{Float64}:
-0.16666666666666666
-0.16666666666666666
0.16666666666666666
0.16666666666666666
julia> grid_y[rows]
4-element Vector{Float64}:
-0.16666666666666666
0.16666666666666666
-0.16666666666666666
0.16666666666666666
"""
function find_grid_inpolygon(grid_x, grid_y, poly_x, poly_y)
# Check that we have the right kind of input data
if length(poly_x) != length(poly_y)
error("polygon must have equal number of x and y points\n")
end
if length(poly_x) < 3
error("polygon must have at least 3 points\n")
end
# Find maximum x and y range of polygon
(xmin, xmax) = extrema(poly_x)
(ymin, ymax) = extrema(poly_y)
# Find the matrix indices within the range of the polygon (if any)
column_inrange = findall((grid_x .>= xmin) .& (grid_x .<= xmax))
row_inrange = findall((grid_y .>= ymin) .& (grid_y .<= ymax))
# Keep a list of matrix indexes in the polygon
row = Array{Int}(undef,length(column_inrange) * length(row_inrange))
column = Array{Int}(undef,length(column_inrange) * length(row_inrange))
n = 0
for j ∈ eachindex(column_inrange)
for i ∈ eachindex(row_inrange)
point = (grid_x[column_inrange[j]], grid_y[row_inrange[i]])
if inpolygon(poly_x, poly_y, point)
n += 1
row[n] = row_inrange[i]
column[n] = column_inrange[j]
end
end
end
return (column[1:n], row[1:n])
end
export find_grid_inpolygon
"""
```julia
arcdistance(latᵢ,lonᵢ,lat,lon)
```
Calculate the distance on a sphere between the point (`latᵢ`,`lonᵢ`) and any
number of points in (`lat`,`lon`).
Latitude and Longitude should be specified in decimal degrees
"""
function arcdistance(latᵢ,lonᵢ,lat,lon)
@assert eachindex(latᵢ) == eachindex(lonᵢ)
@assert eachindex(lat) == eachindex(lon)
# Argument for acos()
arg = @. sin(latᵢ * pi/180) * sin(lat * pi/180) + cos(latᵢ*pi/180) * cos(lat * pi/180)*cos((lonᵢ - lon) * pi/180)
# Avoid domain errors from imprecise sine and cosine math
@inbounds for i in eachindex(arg)
if arg[i] < -1
arg[i] = -1
elseif arg[i] > 1
arg[i] = 1
end
end
# Calculate angular distance
theta = 180/pi .* acos.(arg)
return theta
end
export arcdistance
"""
```julia
minarcdistance(latᵢ,lonᵢ,lat,lon)
```
Return the smallest non-`NaN` arcdistance (i.e. distance on a sphere in arc degrees)
between a given point (`latᵢ[i]`,`lonᵢ[i]`) and any point in (`lat`,`lon`)
for each `i` in `eachindex(latᵢ, lonᵢ)`.
Latitude and Longitude should be specified in decimal degrees
"""
function minarcdistance(latᵢ,lonᵢ,lat,lon)
@assert eachindex(latᵢ) == eachindex(lonᵢ)
@assert eachindex(lat) == eachindex(lon)
# Precalculate some shared factors
sli = sin.(latᵢ .* pi/180)
sl = sin.(lat .* pi/180)
cli = cos.(latᵢ*pi/180)
cl = cos.(lat .* pi/180)
thetamin = fill(NaN, size(latᵢ))
@inbounds for i in eachindex(latᵢ)
for j in eachindex(lon)
arg = sli[i] * sl[j] + cli[i] * cl[j] * cos((lonᵢ[i] - lon[j]) * pi/180)
if arg < -1
arg = -1.0
elseif arg > 1
arg = 1.0
end
θᵢⱼ = 180/pi * acos(arg)
if !(θᵢⱼ >= thetamin[i])
thetamin[i] = θᵢⱼ
end
end
end
return thetamin
end
export minarcdistance
## --- Linear regression
"""
```julia
(a,b) = linreg(x::AbstractVector, y::AbstractVector)
```
Returns the coefficients for a simple linear least-squares regression of
the form `y = a + bx`
### Examples
```
julia> a, b = linreg(1:10, 1:10)
2-element Vector{Float64}:
-1.19542133983862e-15
1.0
julia> isapprox(a, 0, atol = 1e-12)
true
julia> isapprox(b, 1, atol = 1e-12)
true
```
"""
function linreg(x::AbstractVector{T}, y::AbstractVector{<:Number}) where {T<:Number}
A = similar(x, length(x), 2)
A[:,1] .= one(T)
A[:,2] .= x
return A\y
end
export linreg
## --- End of File
| StatGeochemBase | https://github.com/brenhinkeller/StatGeochemBase.jl.git |
|
[
"MIT"
] | 0.6.6 | 57663205a3b08c4e5fdd398ed1f08cc5a1c318e2 | code | 577 | module StatGeochemBase
using NaNStatistics
using VectorizedStatistics
using SpecialFunctions: erf, erfc, erfcx, erfinv
const Collection{T} = Union{DenseArray{<:T}, AbstractRange{<:T}, NTuple{N,T}} where N
include("Math.jl")
using Colors: Color, RGBX, RGB, N0f8
include("Interpolations.jl")
include("ArrayStats.jl")
using IndirectArrays: IndirectArray
include("Images.jl")
include("Colormaps.jl")
import Base.display
include("Display.jl") # Custom pretty-printing
using DelimitedFiles
include("Import.jl")
end
| StatGeochemBase | https://github.com/brenhinkeller/StatGeochemBase.jl.git |
|
[
"MIT"
] | 0.6.6 | 57663205a3b08c4e5fdd398ed1f08cc5a1c318e2 | code | 339 | using StatGeochemBase
using NaNStatistics
using Test
@testset "Math" begin include("testMath.jl") end
@testset "Images" begin include("testImages.jl") end
@testset "Import" begin include("testImport.jl") end
@testset "ArrayStats" begin include("testArrayStats.jl") end
@testset "Interpolations" begin include("testInterpolations.jl") end
| StatGeochemBase | https://github.com/brenhinkeller/StatGeochemBase.jl.git |
|
[
"MIT"
] | 0.6.6 | 57663205a3b08c4e5fdd398ed1f08cc5a1c318e2 | code | 3689 | ## --- test ArrayStats.jl
# Type wrangling
a = Any[false, 0, 1.0]
@test unionize(a) isa Vector{Union{Bool, Int, Float64}}
@test unionize(a) == a
@test unionize(1:10) === 1:10
# Copying
src = rand(100)
t = src .< 0.5
dest = fill(NaN, count(t))
copyat!(dest, src, t)
@test dest == src[t]
reversecopyat!(dest, src, t)
@test dest == reverse!(src[t])
# Sorting, counting, matching
A = rand(1:100., 100); B = sort(A)
@test A[1:count_unique!(A)] == unique(B)
@test findnth(fill(true, 50), 25) == 25
@test findmatches(40:60, 1:100) == 40:60
@test findmatches(50, 1:100) == 50
@test findclosest(3.6, 1:10) == 4
@test findclosest(-1, 1:10) == 1
@test findclosest(11, 1:10) == 10
@test findclosest(3.6, 10:-1:1) == 7
@test findclosest(-1, 10:-1:1) == 10
@test findclosest(11, 10:-1:1) == 1
@test findclosest(3.6, [10, 3, 8, 6, 9, 2, 4, 7, 5, 1]) == 7
@test findclosest(-1, [10, 3, 8, 6, 9, 2, 4, 7, 5, 1]) == 10
@test findclosest(11, [10, 3, 8, 6, 9, 2, 4, 7, 5, 1]) == 1
@test findclosest(3.3:5.3, 1:10) == 3:5
@test findclosest(3.3:5.3, 10:-1:1) == 8:-1:6
@test findclosest(3.3:5.3, [10, 3, 8, 6, 9, 2, 4, 7, 5, 1]) == [2,7,9]
@test findclosestbelow(3.6, 1:10) == 3
@test findclosestbelow(3.6, 10:-1:1) == 8
@test findclosestbelow(3.6, [10, 3, 8, 6, 9, 2, 4, 7, 5, 1]) == 2
@test findclosestbelow(-1, 1:10) == 0
@test findclosestbelow(-1, 10:-1:1) == 0
@test findclosestbelow(-1, [10, 3, 8, 6, 9, 2, 4, 7, 5, 1]) == 0
@test findclosestbelow(11, 1:10) == 10
@test findclosestbelow(11, 10:-1:1) == 1
@test findclosestbelow(11, [10, 3, 8, 6, 9, 2, 4, 7, 5, 1]) == 1
@test findclosestbelow(3.3:5.3, 1:10) == 3:5
@test findclosestbelow((3.3:5.3...,), 1:10) == 3:5
@test findclosestbelow(3.3:5.3, 10:-1:1) == 11 .- (3:5)
@test findclosestbelow((3.3:5.3...,), 10:-1:1) == 11 .- (3:5)
@test findclosestabove(3.6, 1:10) == 4
@test findclosestabove(3.6, 10:-1:1) == 7
@test findclosestabove(3.6, [10, 3, 8, 6, 9, 2, 4, 7, 5, 1]) == 7
@test findclosestabove(11, 1:10) == 11
@test findclosestabove(11, 10:-1:1) == 11
@test findclosestabove(11, [10, 3, 8, 6, 9, 2, 4, 7, 5, 1]) == 11
@test findclosestabove(0, 1:10) == 1
@test findclosestabove(0, 10:-1:1) == 10
@test findclosestabove(0, [10, 3, 8, 6, 9, 2, 4, 7, 5, 1]) == 10
@test findclosestabove(3.3:5.3, 1:10) == 4:6
@test findclosestabove((3.3:5.3...,), 1:10) == 4:6
@test findclosestbelow(3.6, 10:-1:1) == 11 - 3
@test findclosestabove(3.6, 10:-1:1) == 11 - 4
@test findclosestabove(3.3:5.3, 10:-1:1) == 11 .- (4:6)
@test findclosestabove((3.3:5.3...,), 10:-1:1) == 11 .- (4:6)
x = fill(1, 50)
@test findclosestunequal(x, 25) == 25
x[end] = 2
@test findclosestunequal(x, 25) == 50
x[1] = 0
@test findclosestunequal(x, 25) == 1
# Interpolation
@test cntr(0:2:100) == 1:2:99
# Integration
@test trapezoidalquadrature(1:10, fill(1,10)) == 9
@test trapz(collect(1:10.), ones(10)) == 9
@test midpointquadrature(1:10, ones(10)) == 10
# Distributions
A = draw_from_distribution(ones(100), 10000)::AbstractArray
@test length(A) == 10000
@test isapprox(nanmean(A), 0.5, atol=0.08)
@test isapprox(nanstd(A), sqrt(1/12), atol=0.08)
# Strings
@test contains("JuliaLang is pretty cool!", "Julia")
@test !contains("JuliaLang is pretty cool!", "julia")
@test containsi("JuliaLang is pretty cool!", "Julia")
@test containsi("JuliaLang is pretty cool!", "julia")
@test !containsi("JuliaLang is pretty cool!", "tomatoes")
## ---
| StatGeochemBase | https://github.com/brenhinkeller/StatGeochemBase.jl.git |
|
[
"MIT"
] | 0.6.6 | 57663205a3b08c4e5fdd398ed1f08cc5a1c318e2 | code | 632 | ## --- Images.jl
using Colors: Color
cmap = resize_colormap(viridis, 10)
@test length(cmap) == 10
@test isa(cmap, Array{<:Color,1})
matrix = rand(10,10)
# Specifiying limits
img1 = imsc(matrix, viridis, 0, 1)
@test isa(img1, Array{<:Color,2})
img2 = imsci(matrix, viridis, 0, 1)
@test isa(img2, AbstractArray{<:Color,2})
@test all(img1 .== img2)
# Auto-ranging
img1 = imsc(matrix, viridis)
@test isa(img1, Array{<:Color,2})
img2 = imsci(matrix, viridis)
@test isa(img2, AbstractArray{<:Color,2})
@test all(img1 .== img2)
# Other
#@test display(colormaps) != NaN
## ---
| StatGeochemBase | https://github.com/brenhinkeller/StatGeochemBase.jl.git |
|
[
"MIT"
] | 0.6.6 | 57663205a3b08c4e5fdd398ed1f08cc5a1c318e2 | code | 7786 | ## --- String parsing functions
@test parsedlm("1,2,3\n4,5,6\n7,8,9\n", ',', Float64) == reshape(1:9,3,3)'
@test parsedlm("1,2,3,4\n5,6,7,8\n9,10,11,12\n13,14,15,16", ',', Int64) == reshape(1:16,4,4)'
A = delim_string_function(x -> delim_string_parse(x, ',', Float32),
"1,2,3,4\n5,6,7,8\n9,10,11,12\n13,14,15,16", '\n', Array{Float32,1})
@test isa(A, Array{Array{Float32,1},1})
@test all([A[i][j] == (i-1)*4 + j for i=1:4, j=1:4])
A = delim_string_function(x -> delim_string_parse(x, ',', Int64, merge=true, undefval=0),
"\n1,2,3,,4\n5,6,,7,8\n9,10,,,,11,12\n\n\n13,14,15,16", '\n', Array{Int64,1}, merge=true)
@test all([A[i][j] == (i-1)*4 + j for i=1:4, j=1:4])
## --- Elementify/unelementify functions
elements = ["U" "Lv" "Te" "O" "Er" "W" "Re" "j" "asdf" "Zr" "Al" "S" "K" "V" "N" "Ga" "I"]
data = vcat(elements, hcat(rand(1000, length(elements)-1), string.(rand("abcdefghijklmnopqrstuvwxyz0123456789",1000))))
datatuple = elementify(data,importas=:Tuple)::NamedTuple
datadict = elementify(data,importas=:Dict)::Dict
@test isa(display(datatuple), Nothing)
@test isa(datatuple, NamedTuple)
@test unelementify(datatuple) == data
@test unelementify(DictDataset(datatuple)::Dict, elements) == data
@test isa(datadict, Dict)
@test unelementify(datadict) == data
@test unelementify(TupleDataset(datadict, elements)::NamedTuple) == data
# Test adding or averaging option for numeric elements
addtest = ["a" "b" "a";1 2 3]
avg = elementify(addtest, importas=:Dict)
add = elementify(addtest, importas=:Dict, sumduplicates=true)
@test avg["elements"] == avg["elements"]
@test avg["a"] == 2
@test add["a"] == 4
## --- Import / export functions
@test exportdataset(datatuple, "tupledataset.csv", ',') == nothing
@test importdataset("tupledataset.csv", importas=:Tuple) == datatuple
@test importdataset("tupledataset.csv", ',', importas=:Tuple, elements=elements, skipstart=1) == datatuple
@test importdataset("tupledataset.csv", ',', importas=:Tuple, elements=(elements...,), skipstart=1) == datatuple
@test exportdataset(datatuple, "tupledataset.tsv") == nothing
@test importdataset("tupledataset.tsv", '\t', importas=:Tuple) == datatuple
@test exportdataset(datatuple, "tupledataset.csv", ',', digits=6) == nothing
@test importdataset("tupledataset.csv", ',', importas=:Tuple).Lv == round.(datatuple.Lv, digits=6)
@test exportdataset(datatuple, "tupledataset.csv", ',', sigdigits=5) == nothing
@test importdataset("tupledataset.csv", ',', importas=:Tuple).Lv == round.(datatuple.Lv, sigdigits=5)
@test exportdataset(datadict, datadict["elements"], "dictdataset.csv", ',') == nothing
@test importdataset("dictdataset.csv", importas=:Dict, mindefinedcolumns=2) == datadict
@test importdataset("dictdataset.csv", ',', importas=:Dict, elements=elements, skipstart=1) == datadict
@test importdataset("dictdataset.csv", ',', importas=:Dict, elements=(elements...,), skipstart=1) == datadict
@test StatGeochemBase.guessdelimiter("foobar.csv") == ','
@test StatGeochemBase.guessdelimiter("foobar.tsv") == '\t'
## -- Normalization functions
dataarray = rand(1000, length(elements))
data = vcat(elements, dataarray)
datatuple = elementify(data,importas=:Tuple)::NamedTuple
datadict = elementify(data,importas=:Dict)::Dict
renormalize!(dataarray, total=100)
@test nansum(dataarray) ≈ 100
renormalize!(dataarray, dim=1, total=100)
@test all(nansum(dataarray, dims=1) .≈ 100)
renormalize!(dataarray, dim=2, total=100)
@test all(nansum(dataarray, dims=2) .≈ 100)
# Renormalization functions on NamedTuple-based dataset
datatuple = elementify(unelementify(datatuple, findnumeric=true), importas=:Tuple)
renormalize!(datatuple, total=100)
@test all(sum(unelementify(datatuple, floatout=true),dims=2) .≈ 100)
# Renormalization functions on Dict-based dataset
datadict = elementify(unelementify(datadict, findnumeric=true), importas=:Dict)
renormalize!(datadict, datadict["elements"], total=100.)
@test all(sum(unelementify(datadict, floatout=true),dims=2) .≈ 100)
# Internal standardization functions
@test isnan(StatGeochemBase.floatify("asdf"))
@test StatGeochemBase.floatify("12345", Float64) === 12345.0
@test StatGeochemBase.floatify("12345", Float32) === 12345f0
@test StatGeochemBase.floatify(12345, Float64) === 12345.0
@test StatGeochemBase.floatify(12345, Float32) === 12345f0
@test isa(StatGeochemBase.columnformat(["asdf","qwer","zxcv"], false), Array{String,1})
@test isa(StatGeochemBase.columnformat([1f0, 2f0, 3f0], false), Array{Float32,1})
@test isa(StatGeochemBase.columnformat([1., 2., 3.], false), Array{Float64,1})
@test isa(StatGeochemBase.columnformat([0x01,0x02,0x03], false), Array{UInt8,1})
@test isa(StatGeochemBase.columnformat([1,2,3], false), Array{Int64,1})
@test all(StatGeochemBase.columnformat([0x01,2,"3"], false) .=== [0x01,2,"3"])
@test StatGeochemBase.columnformat([0x01,2,"3"], true) == [1,2,3]
@test StatGeochemBase.columnformat(["asdf","qwer","zxcv"], true) == ["asdf","qwer","zxcv"]
@test StatGeochemBase.columnformat(["","","zxcv"], true) == ["","","zxcv"]
@test isequal(StatGeochemBase.columnformat(["","","5"], true), [NaN, NaN, 5.0])
@test StatGeochemBase.isnumeric(missing) == false
@test StatGeochemBase.nonnumeric(missing) == false
@test StatGeochemBase.isnumeric("") == false
@test StatGeochemBase.nonnumeric("") == false
@test StatGeochemBase.isnumeric("5") == true
@test StatGeochemBase.nonnumeric("5") == false
@test StatGeochemBase.isnumeric('x') == false
@test StatGeochemBase.nonnumeric('x') == true
@test StatGeochemBase.isnumeric(NaN) == true
@test StatGeochemBase.nonnumeric(NaN) == false
@test StatGeochemBase.symboltuple((:foo, :bar, :baz)) === (:foo, :bar, :baz)
@test StatGeochemBase.symboltuple(("foo", "bar", "baz")) === (:foo, :bar, :baz)
@test StatGeochemBase.symboltuple([:foo, :bar, :baz]) === (:foo, :bar, :baz)
@test StatGeochemBase.symboltuple(["foo", "bar", "baz"]) === (:foo, :bar, :baz)
@test StatGeochemBase.stringarray((:foo, :bar, :baz)) == ["foo", "bar", "baz"]
@test StatGeochemBase.stringarray(("foo", "bar", "baz")) == ["foo", "bar", "baz"]
@test StatGeochemBase.stringarray([:foo, :bar, :baz]) == ["foo", "bar", "baz"]
@test StatGeochemBase.stringarray(["foo", "bar", "baz"]) == ["foo", "bar", "baz"]
@test isequal(StatGeochemBase.emptys(Any,3), [missing, missing, missing])
@test isequal(StatGeochemBase.emptys(String,3), ["", "", ""])
@test all(StatGeochemBase.emptys(Float16,3) .=== Float16[NaN, NaN, NaN])
@test all(StatGeochemBase.emptys(Float64,3) .=== [NaN, NaN, NaN])
@test all(StatGeochemBase.emptys(Int64,3) .=== [NaN, NaN, NaN])
## --- Concatenating and merging datasets
d2 = concatenatedatasets(datadict, datadict)
@test isa(d2, Dict)
d2array = unelementify(d2, floatout=true)
@test isa(d2array, Array{Float64,2})
@test size(d2array) == (2000, length(datadict["elements"]))
A = ["La" "Ce" "Pr" "ID"; 1.5 1.1 1.0 "x9"; 3.7 2.9 2.5 "SJ21-12"]
B = ["La" "Yb"; 1.5 1.1; 1.0 3.7; 2.9 2.5]
a = elementify(A, importas=:Tuple)
b = elementify(B, importas=:Tuple)
d = concatenatedatasets(a,b)
@test isa(d, NamedTuple)
@test isequal(d.La, [1.5, 3.7, 1.5, 1.0, 2.9])
@test isequal(d.Yb, [NaN, NaN, 1.1, 3.7, 2.5])
@test isequal(d.ID, ["x9", "SJ21-12", "", "", ""])
darray = unelementify(d, floatout=true)
@test isa(darray, Array{Float64,2})
@test size(darray) == (5, length(keys(d)))
d = concatenatedatasets(a,b,a,b,a; elements=(:La,))
@test isa(d, NamedTuple)
@test isequal(d.La, [a.La; b.La; a.La; b.La; a.La])
@test hashdataset(d) == [0x69f0025597bf6523, 0xe8341bcc0a64d447, 0x69f0025597bf6523, 0x6eb8871cf9477895, 0x4f3831d3feae830b, 0x69f0025597bf6523, 0xe8341bcc0a64d447, 0x69f0025597bf6523, 0x6eb8871cf9477895, 0x4f3831d3feae830b, 0x69f0025597bf6523, 0xe8341bcc0a64d447]
## --- Clean up
rm("dictdataset.csv")
rm("tupledataset.csv")
rm("tupledataset.tsv")
## --- End of File
| StatGeochemBase | https://github.com/brenhinkeller/StatGeochemBase.jl.git |
|
[
"MIT"
] | 0.6.6 | 57663205a3b08c4e5fdd398ed1f08cc5a1c318e2 | code | 2661 | ## --- test Interpolations.jl
# Interpolation
@test linterp1(1:10, 1:10, 1, extrapolate=NaN) == 1
@test linterp1(1:10, 1:10, 10, extrapolate=NaN) == 10
@test linterp1(1:10, 1:10, 5.5) == 5.5
@test linterp1(1:10, 1:10, 1:10, extrapolate=NaN) == 1:10
@test linterp1(1:10, collect(1:10.), 3:7) == 3:7
@test linterp1(1:10,21:30,5:0.5:6) == [25.0, 25.5, 26.0]
@test linterp1s(10:-1:1,21:30,5:0.5:6) == [26.0, 25.5, 25.0]
@test linterp_at_index(1:100,10) == 10
# Extrapolation
@test linterp1(1:10, 1:10, 15) == 15 # Default is to extrapolate
@test linterp1(1:10, 1:10, 15, extrapolate=-5) == -5
@test linterp1(1:10, 1:10, 5, extrapolate=-5) == 5
@test isnan(linterp1(1:10, 1:10, 15, extrapolate=NaN))
@test linterp1(1:10,1:10,0:11) == 0:11 # Default is to extrapolate
@test linterp1(1:10,1:10,0:11, extrapolate=:Linear) == 0:11
@test linterp1(1:10,1:10,0.5:10.5, extrapolate=:Linear) == 0.5:10.5
@test linterp1(1:10,1:10,0.5:10.5, extrapolate=-5) == [-5; 1.5:9.5; -5]
@test all(linterp1(1:10,1:10,0.5:10.5, extrapolate=NaN) .=== [NaN; 1.5:9.5; NaN])
@test isnan(linterp_at_index(1:100,-10))
@test linterp_at_index(1:100,-10, 0) == 0
# In-place
xq = 3:7
@test linterp1!(similar(xq, Float64), 1:10, collect(1:10.), xq) == 3:7
xq = 5:0.5:6
@test linterp1!(similar(xq), 1:10, 21:30, xq) == [25.0, 25.5, 26.0]
xq = 5:0.5:6
@test linterp1s!(similar(xq), collect(1:10), collect(21:30), xq) == [25.0, 25.5, 26.0]
@test linterp1s!(similar(xq), collect(10:-1:1), collect(21:30), xq) == [26.0, 25.5, 25.0]
xq = 5:0.5:7
@test linterp1s!(similar(xq), rand(Int,length(xq)), collect(10:-1:1), collect(21:30), xq) == [26.0, 25.5, 25.0, 24.5, 24]
xq = 0:0.01:1
x = rand(200)
y = rand(200)
yq = linterp1s(x, y, xq)
@test linterp1s!(similar(xq), x, y, xq) ≈ yq
xq = 0:11
@test linterp1!(similar(xq, Float64), 1:10, 1:10, xq, extrapolate=:Linear) == 0:11
xq = 0.5:10.5
@test isequal(linterp1!(similar(xq), 1:10, 1:10, xq, extrapolate=NaN), [NaN; 1.5:9.5; NaN])
# Test consistency of sorting against Base
x = rand(10)*10
y = rand(10)*10
perm = sortperm(x)
xs = x[perm]
yx = y[perm]
xq = 0:0.01:10
yq = similar(xq)
knot_index = rand(Int,length(xq))
linterp1s!(yq, knot_index, x, y, xq)
@test yq == linterp1(xs, yx, xq)
x = rand(1000)*10
y = rand(1000)*10
perm = sortperm(x)
xs = x[perm]
yx = y[perm]
xq = 0:0.01:10
yq = similar(xq)
knot_index = rand(Int,length(xq))
linterp1s!(yq, knot_index, x, y, xq)
@test yq == linterp1(xs, yx, xq)
| StatGeochemBase | https://github.com/brenhinkeller/StatGeochemBase.jl.git |
|
[
"MIT"
] | 0.6.6 | 57663205a3b08c4e5fdd398ed1f08cc5a1c318e2 | code | 5565 | ## --- Special functions
@test isapprox(fast_inv_sqrt(5.0), 1/sqrt(5.0), atol=1e-6)
@test isapprox(fast_inv_sqrt(5f0), 1/sqrt(5f0), atol=1e-6)
@test nearest(Int64, 3.3) === 3
@test nearest(Float64, 1//3) === 1/3
@test all(x->!(x<=0), positive!(randn(100)))
@test rescale(0:10) ≈ 0:0.1:1
@test rescale(collect(0:10)) ≈ 0:0.1:1
@test all(rescale((0:10...,)) .≈ ((0:0.1:1)...,))
@test rescale(0:10, -1, 0) ≈ -1:0.1:0
@test rescale(collect(0:10), -1, 0) ≈ -1:0.1:0
@test all(rescale((0:10...,), -1, 0) .≈ ((-1:0.1:0)...,))
## --- Significant figures
@test sigdigits(1/3) === 16
@test sigdigits(0.11) === 2
@test sigdigits(0.111) === 3
@test sigdigits(0.1111) === 4
@test sigdigits(0.11111) === 5
@test sigdigits(0.111111) === 6
@test sigdigits(0.1111111) === 7
@test sigdigits(0.11111111) === 8
@test sigdigits(0.111111111) === 9
@test sigdigits(0.1111111111) === 10
@test sigdigits(0.11111111111) === 11
@test sigdigits(0.111111111111) === 12
@test sigdigits(0.1111111111111) === 13
@test sigdigits(0.11111111111111) === 14
@test sigdigits(0.111111111111111) === 15
@test sigdigits(0.1111111111111111) === 16
for T in (BigInt, Int64, Int32, Int16, Int8, UInt64, UInt32, UInt16, UInt8)
@test sigdigits(T(100)) === 1
@test sigdigits(T(101)) === 3
@test leastsigfig(T(100)) === 100.
@test leastsigfig(T(101)) === 1.
end
@test sigdigits(big"1000.") === sigdigits(1000.) === sigdigits(Float32(1000.)) === sigdigits(Float16(1000.)) === 1
@test sigdigits(big"1000.5") === sigdigits(1000.5) === sigdigits(Float32(1000.5)) === 5
@test leastsigfig(big"1000.") === leastsigfig(1000.) === leastsigfig(Float32(1000.)) === leastsigfig(Float16(1000.)) === 1000.
@test leastsigfig(big"1000.5") === leastsigfig(1000.5) === leastsigfig(Float32(1000.5)) === 0.1
@test sigdigits(NaN) == sigdigits(Inf) == sigdigits(0) == 0
@test isnan(leastsigfig(NaN))
@test leastsigfig(Inf) == Inf
@test leastsigfig(0) == 0
## --- Linear regression
@test linreg(1:10, 1:10) ≈ [0, 1]
## --- Distributions
@test normpdf.(0, 1,-1:1) ≈ [0.24197072451914337, 0.3989422804014327, 0.24197072451914337]
@test normpdf.(1:10, 1:10, 1:10) ≈ normpdf(collect.((1:10, 1:10, 1:10))...)
@test normpdf_ll.(0,1,-5:5) == -(-5:5).^2/2
r = collect(-5:5)
@test normpdf_ll(0,1,r) == normpdf_ll(0,ones(11),r) == normpdf_ll(zeros(11),ones(11),r) == sum(normpdf_ll.(0,1,r))
@test normpdf_ll(ones(10),1,collect(1:10)) == normpdf_ll(collect(1:10),1,ones(10)) ≈ -142.5 # Test for symmetry
@test normlogpdf.(0,1,-5:5) == -(-5:5).^2/2 .- log(sqrt(2pi))
r = collect(-5:5)
@test normlogpdf(0,1,r) == normlogpdf(0,ones(11),r) == normlogpdf(zeros(11),ones(11),r) == sum(normlogpdf.(0,1,r))
@test normlogpdf(ones(10),1,collect(1:10)) == normlogpdf(collect(1:10),1,ones(10)) ≈ -151.68938533204673 # Test for symmetry
@test normcdf(1,1,1) == 0.5
result = zeros(5)
normcdf!(result, 0, 1, -2:2)
@test result ≈ normcdf(0,1,-2:2) ≈ normcdf.(0,1,-2:2) ≈ [0.02275013194817921, 0.15865525393145707, 0.5, 0.8413447460685429, 0.9772498680518208]
@test normcdf.(1:10, 1:10, 1:10) == normcdf(collect.((1:10, 1:10, 1:10))...) == fill(0.5, 10)
@test normcdf_ll.(0,1,-5:5) ≈ [-15.064998393988725, -10.360101486527292, -6.607726221510349, -3.7831843336820317, -1.841021645009264, -0.6931471805599453, -0.17275377902344985, -0.023012909328963486, -0.0013508099647481923, -3.1671743377489226e-5, -2.866516129637633e-7]
r = collect(-5:5)
@test normcdf_ll(r) ≈ -38.54732871798976
@test normcdf_ll(r) == normcdf_ll(0,1,r) == normcdf_ll(0,ones(11),r) == normcdf_ll(zeros(11),ones(11),r) == sum(normcdf_ll.(0,1,r))
@test normcdf_ll(zeros(10),1,collect(1:10)) == normcdf_ll(-collect(1:10),1,zeros(10)) ≈ -0.19714945770002004 # Test for symmetry
@test normcdf_ll!(float.(r)) ≈ -38.54732871798976
@test normcdf_ll!(float.(r)) == normcdf_ll!(0,1,float.(r)) == normcdf_ll!(0,ones(11),float.(r)) == normcdf_ll!(zeros(11),ones(11),float.(r))
@test normcdf_ll!(zeros(10),1,collect(1:10.)) == normcdf_ll!(-collect(1:10),1,zeros(10)) ≈ -0.19714945770002004 # Test for symmetry
@test normproduct(0,1,0,1) === normpdf(0,sqrt(2),0) === 0.28209479177387814
@test normproduct_ll(0,1,0,1) === normpdf_ll(0,sqrt(2),0) === 0.0
@test normlogproduct(0,1,0,1) === normlogpdf(0,sqrt(2),0) === -log(sqrt(2pi)*sqrt(2))
@test [-2,0,2] ≈ norm_quantile.([0.022750131948, 0.5, 0.977249868052])
@test norm_quantile.(0:0.25:1) ≈ [-Inf, -0.6744897501960818, 0.0, 0.6744897501960818, Inf]
@test isapprox(norm_width(390682215445)/2, 7, atol=1e-5)
## -- Geometry
@test inpolygon([-1,0,1,0],[0,1,0,-1],[0,0])
@test !inpolygon([-1,0,1,0],[0,1,0,-1],[0,10])
@test inpolygon([-1,0,1,0],[0,1,0,-1],prevfloat.([0.5,0.5]))
@test !inpolygon([-1,0,1,0],[0,1,0,-1],nextfloat.([0.5,0.5]))
@test inpolygon([-1,1,1,-1],[1,1,-1,-1],(0,0))
@test !inpolygon([-1,1,1,-1],[1,1,-1,-1],(1.1,1))
i,j = find_grid_inpolygon(-1.5:1/3:1.5, -1.5:1/3:1.5, [-.75,.75,.75,-.75],[.75,.75,-.75,-.75])
@test sort([i j], dims=2) == [4 4; 4 5; 4 6; 4 7; 4 5; 5 5; 5 6; 5 7; 4 6; 5 6; 6 6; 6 7; 4 7; 5 7; 6 7; 7 7]
@test arcdistance(0,100,[30,0,0],[100,100,95]) ≈ [30,0,5]
@test minarcdistance(0,100,[30,0,0],[100,100,95]) ≈ fill(0)
@test minarcdistance([1,0,1,2,3,4],[101,100,100,100,100,100],[30,0,0],[100,100,95]) ≈ [1.414177660951948,0,1,2,3,4]
## ---
| StatGeochemBase | https://github.com/brenhinkeller/StatGeochemBase.jl.git |
|
[
"MIT"
] | 0.6.6 | 57663205a3b08c4e5fdd398ed1f08cc5a1c318e2 | docs | 1381 | # StatGeochemBase
[![Docs][docs-dev-img]][docs-dev-url]
[![CI][ci-img]][ci-url]
[![CI-julia-nightly][ci-nightly-img]][ci-nightly-url]
[![codecov.io][codecov-img]][codecov-url]
A set of statistical, geochemical, and geochronological functions common to [Chron.jl](https://github.com/brenhinkeller/Chron.jl) and [StatGeochem.jl](https://github.com/brenhinkeller/StatGeochem.jl)
Depends upon [NaNStatistics.jl](https://github.com/brenhinkeller/NaNStatistics.jl) for NaN-ignoring summary statistics, histograms, and binning.
[docs-stable-img]: https://img.shields.io/badge/docs-stable-blue.svg
[docs-stable-url]: https://brenhinkeller.github.io/StatGeochemBase.jl/stable/
[docs-dev-img]: https://img.shields.io/badge/docs-dev-blue.svg
[docs-dev-url]: https://brenhinkeller.github.io/StatGeochemBase.jl/dev/
[ci-img]: https://github.com/brenhinkeller/StatGeochemBase.jl/workflows/CI/badge.svg
[ci-url]: https://github.com/brenhinkeller/StatGeochemBase.jl/actions/workflows/CI.yml
[ci-nightly-img]:https://github.com/brenhinkeller/StatGeochemBase.jl/workflows/CI%20(Julia%20nightly)/badge.svg
[ci-nightly-url]:https://github.com/brenhinkeller/StatGeochemBase.jl/actions/workflows/CI-julia-nightly.yml
[codecov-img]: https://codecov.io/gh/brenhinkeller/StatGeochemBase.jl/branch/main/graph/badge.svg
[codecov-url]: http://codecov.io/github/brenhinkeller/StatGeochemBase.jl?branch=main
| StatGeochemBase | https://github.com/brenhinkeller/StatGeochemBase.jl.git |
|
[
"MIT"
] | 0.6.6 | 57663205a3b08c4e5fdd398ed1f08cc5a1c318e2 | docs | 216 | ```@meta
CurrentModule = StatGeochemBase
```
# StatGeochemBase
Documentation for [StatGeochemBase](https://github.com/brenhinkeller/StatGeochemBase.jl).
```@index
```
```@autodocs
Modules = [StatGeochemBase]
```
| StatGeochemBase | https://github.com/brenhinkeller/StatGeochemBase.jl.git |
|
[
"MIT"
] | 3.0.0 | cb117510f2ba3d831439f56a5a3c00170cbf7a8d | code | 1017 | using Literate
using Dates
# TODO: Remove items from `SKIPFILE` as soon as they run on the latest stable
ONLYSTATIC = []
EXAMPLE_DIRS = ["Tutorials",]
SKIPFILE = [
"t03_eop.jl", "t04_lighttime.jl", "t05_multithread.jl"
]
function update_date(content)
content = replace(content, "DATEOFTODAY" => Dates.DateTime(now()))
return content
end
for edir in EXAMPLE_DIRS
gen_dir = joinpath(@__DIR__, "src", edir, "gen")
example_dir = joinpath(@__DIR__, "src", edir)
for example in filter!(x -> endswith(x, ".jl"), readdir(example_dir))
if example in SKIPFILE
continue
end
input = abspath(joinpath(example_dir, example))
script = Literate.script(input, gen_dir)
code = strip(read(script, String))
mdpost(str) = replace(str, "@__CODE__" => code)
Literate.markdown(
input, gen_dir,
preprocess=update_date,
postprocess=mdpost,
documenter=!(example in ONLYSTATIC)
)
end
end
| FrameTransformations | https://github.com/JuliaSpaceMissionDesign/FrameTransformations.jl.git |
|
[
"MIT"
] | 3.0.0 | cb117510f2ba3d831439f56a5a3c00170cbf7a8d | code | 1313 | using Documenter, FrameTransformations
using Pkg
const CI = get(ENV, "CI", "false") == "true"
if CI
Pkg.add("Ephemerides")
Pkg.add("StaticArrays")
Pkg.add("ReferenceFrameRotations")
Pkg.add("JSMDUtils")
Pkg.add("JSMDInterfaces")
Pkg.add("Literate")
Pkg.add("Dates")
Pkg.add("Tempo")
end
include("generate.jl")
makedocs(;
authors="JSMD Development Team",
sitename="FrameTransformations.jl",
modules=[FrameTransformations],
format=Documenter.HTML(; prettyurls=CI, highlights=["yaml"], ansicolor=true),
pages=[
"Home" => "index.md",
"Tutorials" => [
"01 - Frame System" => "Tutorials/gen/t00_frames.md",
"02 - Rotation" => "Tutorials/gen/t01_rotation.md",
"03 - Axes" => "Tutorials/gen/t02_axes.md",
"04 - Points" => "Tutorials/gen/t03_points.md"
],
"API" => [
"Public API" => [
"Axes" => "API/axes_api.md",
"Points" => "API/point_api.md",
"Directions" => "API/dir_api.md",
"Frames" => "API/frames_api.md"
],
],
],
clean=true,
checkdocs=:none
)
if CI
deploydocs(;
repo="github.com/JuliaSpaceMissionDesign/FrameTransformations.jl", branch="gh-pages"
)
end
| FrameTransformations | https://github.com/JuliaSpaceMissionDesign/FrameTransformations.jl.git |
|
[
"MIT"
] | 3.0.0 | cb117510f2ba3d831439f56a5a3c00170cbf7a8d | code | 5400 | # # [Frame System Overview](@id tutorial_00_frames)
# _This example was generated on DATEOFTODAY._
# The core object of `FrameTransformations` is the [`FrameSystem`](@ref), which provides
# the capability to compute relative position, orientation and their time derivatives up to
# order 3 (jerk), between standard and user-defined point and axes. It works by creating two
# separate graphs that silently store and manage all the parent-child relationships between
# the user-registered axes and points, in the form of `FramePointNode` and `FrameAxesNode`.
# These two objects define two precise entities:
# - **Axes**: defines an orientation in space. These are related each other by means of a
# [`Rotation`](@ref) transformation which relate one axes to a parent axes in
# a certain time interval.
# - **Points**: defines a location in space. These are related each other by
# means of a [`Translation`]@(ref) transformation which relate one point to a parent point in a
# particular axes in a certain time interval.
# Additionally, it is possible to create `Direction`s, as vector valued functions that could
# be used to define custom frames.
#-
#md # !!! note
#md # A single [`FrameSystem`](@ref) instance simultaneously handles both the axes and
#md # point graphs, regardless of what the user registers in it. For instance, if no
#md # points are added, the point graph will remain empty. The same applies for directions.
#-
# Additionally, any node can have several childs, each with different transformations with
# respect to the parent node. However, they shall be **registered** within the
# [`FrameSystem`](@ref) before being used in a transformation or as parents of other nodes.
# ## Basic Constructors
# The creation of a generic [`FrameSystem`](@ref) requires the definition of the maximum
# desired transformation order and of its `DataType`, which in most applications is a `Float64`.
# The transformation order is always one greater than the maximum desired time derivative.
# For instance, if the user only desires to compute position and velocity components (i.e.,
# order 1 time-derivative), the transformation order to be used is 2. Thus, the maximum
# allowed transformation order is 4.
# In this example, we highlight the most basic way to initialise a [`FrameSystem`](@ref):
using FrameTransformations
using Tempo
F = FrameSystem{2,Float64}()
# From this example, you can see that within the frame system there are both point and axes
# graphs. However, at the moment they are completely empty since the graph was just created.
# Each [`FrameSystem`](@ref) object is assigned a reference timescale that is used to perform
# computations with epochs and to parse ephemeris files. The default timescale is the
# `BarycentricDynamicalTime`, however, the user is free to select the most suited timescale
# for his applications. In this example, we set the `InternationalAtomicTime` as the reference scale.
F = FrameSystem{2,Float64,InternationalAtomicTime}()
# ## Graph Inspection
# Once a [`FrameSystem`](@ref) is constructed (and populated) there are many routines devoted
# to inspect its content. As already said, there are three main *objects* that are contained
# in the `FrameSystem`: **points**, **axes** and **directions**. For each of them series of
# utility functions are made available in order to check for the presence of a registered point:
has_point(F, 1)
# a registered axes:
has_axes(F, 1)
# or a registered direction:
has_direction(F, :Root)
# Additionally, the possibility to get a dictionary containing all name-id relationships is
# made available for axes, via the [`axes_alias`](@ref) method:
axes_alias(F)
# and points, via the [`points_alias`](@ref) method:
points_alias(F)
# Finally, the `FrameSystem` order and timescale might be retrieved via the associated methods:
order(F)
#-
FrameTransformations.timescale(F)
# Refer to the [API](@ref frames_api) for additional details.
# ## Basic Usage
#md # !!! note
#md # Work in progress
# ## Ephemerides Support
# In certain scenarios, the transformations require usage of binary ephemeris kernels, e.g.,
# the JPL's DE440 files. To support this applications, this package has an interface relying
# on [JSMDInterfaces.jl](https://github.com/JuliaSpaceMissionDesign/JSMDInterfaces.jl)
# `AbstractEphemerisProvider`s. Currently, this package is shipped with extension for the
# following two ephemeris readers:
# * [Ephemerides.jl](https://github.com/JuliaSpaceMissionDesign/Ephemerides.jl)
# * [CalcephEphemeris.jl](https://github.com/JuliaSpaceMissionDesign/CalcephEphemeris.jl)
# Once the desired ephemeris provider is created, it can be used to register points or axes.
# In this example we begin loading an old DE421 kernels to pass to the ephemeris reader.
using Ephemerides, Downloads
url = "https://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/a_old_versions/de421.bsp";
E = EphemerisProvider(Downloads.download(url));
F = FrameSystem{2,Float64}()
# Before registering any node, a set of root axes and a root node shall be anyway registered.
add_axes_icrf!(F)
add_point!(F, :SSB, 0, 1)
# Points from the `EphemerisProvider` can be now registered.
add_point_ephemeris!(F, E, :Sun, 10)
add_point_ephemeris!(F, E, :EMB, 3)
# Here the parent point will be inferred from the ephemeris.
F | FrameTransformations | https://github.com/JuliaSpaceMissionDesign/FrameTransformations.jl.git |
|
[
"MIT"
] | 3.0.0 | cb117510f2ba3d831439f56a5a3c00170cbf7a8d | code | 1521 | # # [Rotations](@id tutorial_01_rotation)
# _This example was generated on DATEOFTODAY._
# Before diving into the creation of the axes graph, it is worth highlighting that transformations
# that express the relative orientation or its time-derivatives between two generic set of
# axes are represented by a [`Rotation`](@ref) object, which stores a Direction Cosine Matrix
# (DCM) and its derivatives. This package leverages the already available
# [ReferenceFrameRotations.jl](https://github.com/JuliaSpace/ReferenceFrameRotations.jl)
# to define the DCM objects.
# A time-fixed rotation between two axes and its derivative can then be expressed as follows:
using StaticArrays
using FrameTransformations
using ReferenceFrameRotations
dcm = angle_to_dcm(π / 3, :Z)
δdcm = DCM(0I)
R = Rotation(dcm, δdcm)
#-
R[1]
#-
R[2]
# A rotation object is returned by all the rotation functions that are applied to the `FrameSystem`.
# It provide overloads to the basic algebraic operations so that multiplication and inversions
# can be efficiently computed leveraging the properties of rotation matrixes.
# For example, to rotate a generic vector `v`, we can simply do:
v = [1.0, -6.0, 3.0, 0.0, 5.0, 0]
R * v
# For a static vector vector `sv`:
sv = SA[1.0, -6.0, 3.0, 0.0, 5.0, 0]
R * sv
# And for a [`Translation`](@ref)
t = Translation(1.0, -6.0, 3.0, 0.0, 5.0, 0)
R * t
# The inverse can instead be taken as:
inv(R)
# See the [Rotation API](@ref rotation_api) for more information on this object. | FrameTransformations | https://github.com/JuliaSpaceMissionDesign/FrameTransformations.jl.git |
|
[
"MIT"
] | 3.0.0 | cb117510f2ba3d831439f56a5a3c00170cbf7a8d | code | 7740 | # # [Axes](@id tutorial_01_axes)
# _This example was generated on DATEOFTODAY._
# To compute relative orientations, `FrameTransformations` provides the capability to define
# custom and standard reference axes (e.g., the ITRF) and arbitrarily connect them through
# the [`FrameSystem`](@ref) In turn, this allows the computation of the relative orientation
# and its derivatives (up to order 3) between any two registered axes.
# At the time being, the following types of axes are supported:
# - **Inertial axes**: these are the only ones which can be used as root axes to initialise
# the axes graph.
# - **Fixed offset axes**: they have a constant orientation with respect to their parent axes.
# - **Rotating axes**: the orientation of these axes depends only on time and is computed t
# through the custom functions provided by the user.
# - **Ephemeris axes**: these are constructed by extracting the Euler rotation angles and their
# derivatives from the binary PCK kernels that are loaded within the [`FrameSystem`](@ref).
#-
#md # !!! note
#md # This package provides a dedicated function to register each type of supported axes.
#md # Additionally, higher-level functions to automatically register standard astronomical
#md # reference axes are also provided, e.g., [`add_axes_ecl2000!`](@ref).
#-
# ## Graph Initialisation
# In this section we will display how to create a frame system to compute generic axes rotation.
# First of all, we need to load both this package and an ephemeris reader.
# The latter will be used to compute the orientation of the Moon's Principal Axes (PA) 440,
# whose Euler angles are defined in binary PCK kernels and to retrieve the positions of the
# planets. In this example, [Ephemerides.jl](https://github.com/JuliaSpaceMissionDesign/Ephemerides.jl)
# package and download the kernels from NAIF's website.
using FrameTransformations
using Ephemerides
url_pck = "https://naif.jpl.nasa.gov/pub/naif/generic_kernels/pck/moon_pa_de421_1900-2050.bpc";
url_spk = "https://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/a_old_versions/de421.bsp";
const EPH = EphemerisProvider([download(url_spk), download(url_pck)])
const F = FrameSystem{3,Float64}()
# To initialise the axes graph, a set of root axes must be initially registered.
# These will serve as the uppermost node of the graph and have no parents, meaning their
# orientation is not specified. Only inertial axes can be used as root axes of the
# [`FrameSystem`](@ref).
# In this example, we will use the `ICRF` as our base root inertial axes.
add_axes!(F, :ICRF, AXESID_ICRF)
# Once a set of root axes has been registered, any other type of axes can be added to the system.
#md # !!! note
#md # For standard applications, it is good practice that the axes's IDs are as in agreement
#md # with NAIF's numbering system. A list of IDs for the most common axes is provided in
#md # this package.
#md # !!! note
#md # The frame system uses an integer system based on the user-defined IDs to compute
#md # the transformations between axes and points.
# Inertial axes are those that are fixed with respect to the star background.
# They are the only ones that can be used as root axes in the frame system but can also be
# defined through a relative orientation with respect to another set of inertial axis.
# ## [Inertial Axes](@id ine_axes)
# In this example, we register the `GCRF` as a set of inertial axes with respect to
# the `ICRF`. We assume that the two frames are equivalent, thus:
using ReferenceFrameRotations
using LinearAlgebra
fun(t) = DCM(1.0I)
add_axes_projected!(F, :GCRF, AXESID_GCRF, :ICRF, fun)
R = rotation6(F, AXESID_ICRF, AXESID_GCRF, 1.0)
#-
R[1]
#-
R[2]
# Since it is an inertial frame, the time derivative of the rotation is null.
# ## [Fixed-offset Axes](@id fox_axes)
# Fixed-offset axes have a constant orientation with respect to their parent axes in time.
# We previously saw that inertial axes can also be used to define axes with a fixed orientation
# with respect to their parents. However, while inertial axes do not rotate with respect to
# the star background, fixed offset axes are only constant with respect to their parent axes,
# but might be rotating with respect to some other inertial axes.
# In this example, we register `FOX` as a set of axes with a fixed rotation of `π/4` around
# the Z-axis with respect to the `ICRF`.
rot = angle_to_dcm(π / 4, :Z)
add_axes_fixedoffset!(F, :FOX, 2, AXESID_ICRF, rot)
# The state rotation matrix can then be obtained as:
R = rotation6(F, :ICRF, :FOX, 86400)
#-
R[1]
#-
R[2]
# Since `FOX` has a constant orientation with respect to the `ICRF`, the time derivative of
# the rotation matrix `R[2]` is, in fact, null. For further information see the
# [`add_axes_fixedoffset!`](@ref) documentation.
# ## [Rotating Axes](@id rot_axes)
# Rotating axes are generic, time-dependant, non-inertial axes. In order to register this
# kind of axes, a function (and optionally its derivatives) that expresses the relative
# orientation of this axes must be defined. This function shall return a Direction Cosine
# Matrix (DCM), available from [ReferenceFrameRotations.jl](https://github.com/JuliaSpace/ReferenceFrameRotations.jl).
fun(t) = angle_to_dcm(-t, :Z)
add_axes_rotating!(F, :ROX, 3, :ICRF, fun)
# If we now compute the orientation between the `FOX` and `ROX` at `π/4` we obtain an identity
# rotation, since the orientation of `ROX` is directed in the opposite direction of `FOX`.
R = rotation6(F, 2, 3, π / 4)
#-
R[1]
# Notice that, although we only provided a function that expresses the relative orientation,
# the frame system has automatically computed its time-derivative via Automatic Differentiation
# (AD) of `fun`.
#-
R2 = rotation6(F, 1, 3, π / 4)
#-
R2[2]
# This becomes particularly useful for rapid prototyping or when the manual differentiation
# requires a lot of time. The functions for higher-order derivatives, must return the original
# DCM and its derivatives up to their orders. For example:
using JSMDUtils.Math
fun(t) = angle_to_dcm(-t, :Z)
dfun(t) = (angle_to_dcm(-t, :Z), Math.angle_to_δdcm([-t, -1], :Z))
add_axes_rotating!(F, :ROX2, 4, :ICRF, fun, dfun)
R2 = rotation6(F, 1, 3, π / 4)
#-
R2[2]
# We can see the results are in agreement with the previous example.
# For more details, see [`add_axes_rotating!`](@ref) documentation.
# ## Ephemeris Axes
# Ephemeris axes a are a type of time-dependent axes which are build by means of Euler angles
# contained within a binary PCK ephemeris kernel. For example, in practice these are used
# to express the orientation of high-accuracy Lunar body-fixed frames (i.e., the Principal
# Axes) or the Earth's ITRF.
#md # !!! note
#md # To properly compute the orientation of these axes, the ephemeris provider used
#md # must contain the necessary PCK kernels.
#md # Additionally, in this case the ID of the registered axes must match the ID
#md # contained in the PCK kernels.
# In this example, the ephemeris provider `EPH` has loaded the DE421
# PCK kernel containing the orientation of the Moon's Principal Axes (PA421). NAIF's system
# has assigned to such set of axes the ID `31006`. If a different ID was assigned to the
# `MoonPA`, the function would have thrown an error.
# The function also requires the user to specify the rotation sequence to convert the Euler
# angles to a proper rotation matrix.
FrameTransformations.add_axes_ephemeris!(F, EPH, :MOONPA, 31006, :ZXZ)
R = rotation6(F, :ICRF, :MOONPA, 86400.0)
#-
R[1]
#-
R[2]
# For further information see the [`add_axes_ephemeris!`](@ref) documentation.
| FrameTransformations | https://github.com/JuliaSpaceMissionDesign/FrameTransformations.jl.git |
|
[
"MIT"
] | 3.0.0 | cb117510f2ba3d831439f56a5a3c00170cbf7a8d | code | 5477 | # # [Points Creation and Translations](@id tutorial_02_points)
# _This example was generated on DATEOFTODAY._
# Similarly to [axes](@ref tutorial_01_axes), `FrameTransformations` also provides the
# capability to define custom and standard reference points (e.g., the Solar System
# Barycenter) and arbitrarily connect them through the [`FrameSystem`](@ref). In turn, this
# allows the computation of the relative position and its derivatives (up to order 3) between
# any two registered points and express it in any known set of axes.
# At the time being, the following types of points are supported:
# - **Root point**: it is the root of the point graph.
# - **Fixed points**: are those whose positions have a constant offset with respect their
# parent point in a given set of axes.
# - **Dynamical points**: the position of these points depends only on time and is computed
# through custom user-defined functions.
# - **Ephemeris points**: are those whose state-vector is retrieved from binary SPK kernels
# (e.g., DE440) that are loaded within the [`FrameSystem`](@ref).
#md # !!! note
#md # This package provides a dedicated function to register each type of supported points.
# ## Graph Initialisation
# In this section we will display how to create a frame system to compute generic points
# transformation. Differently from the axes graph, each register point is also associated
# to a set of axes. Hence, this tutorial assumes the reader to already be familiar with the
# different types of axes and their definitions.
# We then can go ahead and initialise the graph.
using StaticArrays
using FrameTransformations
F = FrameSystem{2,Float64}()
# ## Root Point
# To initialise the point graph, we first need to define a root point. This, in turn, must
# be associated to an arbitrary set of axes. Therefore, we begin by definining a generic
# `SatFrame`, here considered as inertial, and then register a root point, called
# `SC` in our graph.
# A root point can be registered using the [`add_point!`](@ref) function:
add_axes!(F, :SatFrame, -1)
add_point!(F, :SC, -10000, :SatFrame)
#md # !!! tip
#md # For standard applications, it is good practice that the points's IDs are as in
#md # agreement with NAIF's numbering system. This becomes mandatory to properly read
#md # JPL's SPK kernels.
#md # !!! note
#md # The frame system uses an integer system based on the user-defined IDs to compute
#md # the transformations between axes and points. The name and acronym of the point are
#md # only used as aliases to provide a user-friendly interface to the transformations
#md # and do not have any other meaning.
# ## Fixed Points
# Fixed points have a constant relative position vector with respect to their parent points
# in a given set of axes. Similarly to fixed-offset axes, these points are fixed w.r.t. their
# parents but might be moving with respect to others.
# In this example, we use the [`add_point_fixedoffset!`](@ref) function to register the location
# of an antenna and two solar panels, which are generally fixed in the satellite body-fixed
# frame. To do so, we define a position offset in the form of a 3-elements vector with respect
# to the `SC`.
sa_offset_left = [1.0, 0.0, 0.0]
sa_offset_right = [-1.0, 0.0, 0.0]
an_offset = [0.0, 0.0, -1.0]
add_point_fixedoffset!(F, :SolArrLeft, -10101, :SC, :SatFrame, sa_offset_left)
add_point_fixedoffset!(F, :SolArrRight, -10102, :SC, :SatFrame, sa_offset_right)
add_point_fixedoffset!(F, :Antenna, -10001, :SC, :SatFrame, an_offset)
# As a result the graph is now populated with the new points and we can finally compute
# their relative positions and velocities with the proper transformation functions:
#-
vector3(F, :SolArrLeft, :SC, :SatFrame, 123.0)
#-
vector6(F, :Antenna, :SolArrRight, :SatFrame, 456.0)
# As expected, since these points are fixed, the relative velocity vector is null.
# ## Dynamical Points
# Dynamical points are generic time-dependent points whose position vector (and optionally
# its derivatives) are only function of time. However, differently from ephemeris points,
# their position is computed through user-defined functions.
fun(t) = SA[cos(t), sin(t), 0.0]
add_point_dynamical!(F, :TimedAppendage, -10003, :SolArrLeft, :SatFrame, fun)
#-
vector3(F, :TimedAppendage, :SC, :SatFrame, π / 3)
#md # !!! note
#md # To avoid allocations, `fun` should return a static array.
# Similarly to rotating-axes, if the user only provides the function to compute the relative
# position, the remaining derivatives are automatically retrievied via automatic
# differentiation of `fun`. On the other hand, if those functions are specified, they must
# return a single vector that stacks all the components. For instance, for the first order
# derivative of `fun`, the function should return a 6-elements vector containing the
# relative position and velocity. For example:
fun(t) = SA[cos(t), sin(t), 0]
dfun(t) = SA[cos(t), sin(t), 0, -sin(t), cos(t), 0]
add_point_dynamical!(F, :TimedAppendage2, -10004, :SolArrLeft, :SatFrame, fun, dfun)
#-
vector6(F, :TimedAppendage2, :SC, :SatFrame, π / 3)
# We can again see that the results are in agreement with the previous example.
# For more details, consult the [`add_point_dynamical!`](@ref) documentation.
# ## Ephemeris Points
# Refer to the [frames tutorial](@ref tutorial_00_frames)'s *Ephemeris Support* section. | FrameTransformations | https://github.com/JuliaSpaceMissionDesign/FrameTransformations.jl.git |
|
[
"MIT"
] | 3.0.0 | cb117510f2ba3d831439f56a5a3c00170cbf7a8d | code | 2272 | module EphemeridesExt
import FrameTransformations: add_point_ephemeris!, add_axes_ephemeris!
using FrameTransformations: FrameSystem,
FramePointFunctions, Translation, add_point!,
FrameAxesFunctions, Rotation, add_axes!,
check_point_ephemeris, check_axes_ephemeris,
angles_to_rot3, angles_to_rot6, angles_to_rot9, angles_to_rot12
using Ephemerides: EphemerisProvider,
ephem_vector3, ephem_vector6, ephem_vector9, ephem_vector12,
ephem_rotation3, ephem_rotation6, ephem_rotation9, ephem_rotation12
"""
add_point_ephemeris!(fr::FrameSystem{O, T}, eph::EphemerisProvider,
name::Symbol, id::Int) where {O, T}
Add a point from `Ephemerides.jl` provider.
"""
function add_point_ephemeris!(
fr::FrameSystem{O,T}, eph::EphemerisProvider, name::Symbol, id::Int
) where {O,T}
pid, axid = check_point_ephemeris(fr, eph, id)
funs = FramePointFunctions{O,T}(
t -> Translation{O}(ephem_vector3(eph, pid, id, t)),
t -> Translation{O}(ephem_vector6(eph, pid, id, t)),
t -> Translation{O}(ephem_vector9(eph, pid, id, t)),
t -> Translation{O}(ephem_vector12(eph, pid, id, t))
)
return add_point!(fr, name, id, axid, funs, pid)
end
"""
add_axes_ephemeris!(fr::FrameSystem{O, T}, eph::EphemerisProvider,
name::Symbol, id::Int) where {O, T}
Add an axes from `Ephemerides.jl` provider.
"""
function add_axes_ephemeris!(
fr::FrameSystem{O,T}, eph::EphemerisProvider, name::Symbol, id::Int, rot_seq::Symbol
) where {O,T}
# Check and retrieve the parent ID for the given axes
pid = check_axes_ephemeris(fr, eph, id)
if rot_seq in (:ZYX, :XYX, :XYZ, :XZX, :XZY, :YXY, :YXZ, :YZX, :YZY, :ZXY, :ZXZ, :ZYZ)
funs = FrameAxesFunctions{O,T}(
t -> Rotation{O}(angles_to_rot3(ephem_rotation3(eph, pid, id, t), rot_seq)),
t -> Rotation{O}(angles_to_rot6(ephem_rotation6(eph, pid, id, t), rot_seq)),
t -> Rotation{O}(angles_to_rot9(ephem_rotation9(eph, pid, id, t), rot_seq)),
t -> Rotation{O}(angles_to_rot12(ephem_rotation12(eph, pid, id, t), rot_seq))
)
else
throw(ArgumentError("The rotation sequence :$rot_seq is not valid."))
end
return add_axes!(fr, name, id, funs, pid)
end
end | FrameTransformations | https://github.com/JuliaSpaceMissionDesign/FrameTransformations.jl.git |
|
[
"MIT"
] | 3.0.0 | cb117510f2ba3d831439f56a5a3c00170cbf7a8d | code | 4092 | module FrameTransformations
using LinearAlgebra
using StaticArrays
using ReferenceFrameRotations
using FunctionWrappers: FunctionWrapper
using FunctionWrappersWrappers: FunctionWrappersWrapper
using JSMDUtils.Math: D¹, D², D³, arcsec2rad,
unitvec, δunitvec, δ²unitvec, δ³unitvec,
cross3, cross6, cross9, cross12,
angle_to_δdcm, _3angles_to_δdcm, _3angles_to_δ²dcm, _3angles_to_δ³dcm
using JSMDInterfaces.Graph: AbstractJSMDGraphNode,
add_edge!, add_vertex!, get_path, has_vertex
using SMDGraphs: MappedNodeGraph, SimpleGraph, MappedGraph,
get_mappedid, get_mappednode, get_node, get_path
import SMDGraphs: get_node_id
using Tempo: AbstractTimeScale, Epoch, j2000s, BarycentricDynamicalTime, ftype, CENTURY2SEC
using JSMDInterfaces.Ephemeris: AbstractEphemerisProvider,
ephem_position_records, ephem_available_points,
ephem_orient_records, ephem_available_axes
using JSMDInterfaces.Interface: @interface
using JSMDInterfaces.Bodies: body_rotational_elements, ∂body_rotational_elements,
∂²body_rotational_elements, ∂³body_rotational_elements
using IERSConventions: iers_bias, iers_obliquity,
iers_rot3_gcrf_to_itrf, iers_rot6_gcrf_to_itrf,
iers_rot9_gcrf_to_itrf, iers_rot12_gcrf_to_itrf,
iers_rot3_gcrf_to_mod, iers_rot3_gcrf_to_tod, iers_rot3_gcrf_to_gtod,
iers_rot3_gcrf_to_pef, iers_rot3_gcrf_to_cirf, iers_rot3_gcrf_to_tirf,
IERSModel, iers2010a, iers2010b, iers1996
using ForwardDiff
using JSMDUtils.Autodiff: JSMDDiffTag, derivative
# ==========================================================================================
# Core
# ==========================================================================================
# Low-level types and aliases
export Translation, Rotation
include("Core/translation.jl")
include("Core/rotation.jl")
include("Core/ad.jl")
# Frame system
export FrameSystem,
order, timescale, points_graph, axes_graph, points_alias, axes_alias, directions,
has_axes, has_point, has_direction,
point_id, axes_id
include("Core/nodes.jl")
include("Core/graph.jl")
# Helper functions
export add_axes!, add_axes_projected!, add_axes_rotating!, add_axes_fixedoffset!,
add_point!, add_point_dynamical!, add_point_fixedoffset!,
add_direction!, add_axes_alias!, add_point_alias!,
add_point_ephemeris!, add_axes_ephemeris!
include("Core/axes.jl")
include("Core/points.jl")
include("Core/directions.jl")
# Transformations
export rotation3, rotation6, rotation9, rotation12,
vector3, vector6, vector9, vector12,
direction3, direction6, direction9, direction12
include("Core/transform.jl")
# ==========================================================================================
# Definitions
# ==========================================================================================
export AXESID_ICRF, AXESID_GCRF,
AXESID_ECL2000, AXESID_EME2000,
AXESID_MOONME_DE421, AXESID_MOONPA_DE421, AXESID_MOONPA_DE440
include("Definitions/index.jl")
export add_axes_icrf!, add_axes_gcrf!, add_axes_eme2000!, add_axes_ecl2000!
include("Definitions/celestial.jl")
include("Definitions/ecliptic.jl")
export add_point_ephemeris!
include("Definitions/ephemeris.jl")
export add_axes_frozen!
include("Definitions/frozen.jl")
export add_axes_itrf!, add_axes_cirf!, add_axes_tirf!,
add_axes_mod!, add_axes_tod!, add_axes_gtod!, add_axes_pef!
include("Definitions/terrestrial.jl")
export add_axes_bci2000!, add_axes_bcrtod!
include("Definitions/planetary.jl")
export add_axes_pa440!, add_axes_pa421!, add_axes_me421!
include("Definitions/lunar.jl")
export add_axes_topocentric!, add_point_surface!
include("Definitions/topocentric.jl")
export add_direction_position!, add_direction_velocity!, add_direction_orthogonal!,
add_direction_fixed!
include("Definitions/directions.jl")
export add_axes_twodir!
include("Definitions/axesfromdir.jl")
export add_axes_fixed_quaternion!, add_axes_fixed_angles!, add_axes_fixed_angleaxis!
include("Definitions/attitude.jl")
end | FrameTransformations | https://github.com/JuliaSpaceMissionDesign/FrameTransformations.jl.git |
|
[
"MIT"
] | 3.0.0 | cb117510f2ba3d831439f56a5a3c00170cbf7a8d | code | 1377 | const TagAD1{T} = ForwardDiff.Tag{JSMDDiffTag,T}
const DualAD1{T} = ForwardDiff.Dual{TagAD1{T},T,1}
# ------------------------------------------------------------------------------------------
# Points
const FramePointFunSignature{O,T} = FunctionWrapper{Translation{O,T},Tuple{T}}
const FramePointFunWrapper{O,T} = FunctionWrappersWrapper{
Tuple{
FramePointFunSignature{O,T},
FramePointFunSignature{O,DualAD1{T}}
},true
}
function FramePointFunWrapper{O,T}(fun::Function) where {O,T}
types = (T, DualAD1{T})
inps = map(x -> Tuple{x}, types)
outs = map(x -> Translation{O,x}, types)
wrps = map(inps, outs) do A, R
FunctionWrapper{R,A}(fun)
end
return FramePointFunWrapper{O,T}(wrps)
end
# ------------------------------------------------------------------------------------------
# Axes
const FrameAxesFunSignature{O,T} = FunctionWrapper{Rotation{O,T},Tuple{T}}
const FrameAxesFunWrapper{O,T} = FunctionWrappersWrapper{
Tuple{
FrameAxesFunSignature{O,T},
FrameAxesFunSignature{O,DualAD1{T}}
},true
}
function FrameAxesFunWrapper{O,T}(fun::Function) where {O,T}
types = (T, DualAD1{T})
inps = map(x -> Tuple{x}, types)
outs = map(x -> Rotation{O,x}, types)
wrps = map(inps, outs) do A, R
FunctionWrapper{R,A}(fun)
end
return FrameAxesFunWrapper{O,T}(wrps)
end | FrameTransformations | https://github.com/JuliaSpaceMissionDesign/FrameTransformations.jl.git |
|
[
"MIT"
] | 3.0.0 | cb117510f2ba3d831439f56a5a3c00170cbf7a8d | code | 7021 | """
add_axes!(frames, name::Symbol, id::Int, class::Int, funs, parentid)
Add a new axes node to `frames`.
### Inputs
- `frames` -- Target frame system
- `name` -- Axes name, must be unique within `frames`
- `id` -- Axes ID, must be unique within `frames`
- `funs` -- `FrameAxesFunctions` object storing the functions to compute the DCM and,
eventually, its time derivatives.
- `parentid` -- Axes ID of the parent axes. Not required only for the root axes.
!!! warning
This is a low-level function and is NOT meant to be directly used. Instead, to add a set of
axes to the frame system, see [`add_axes_projected!`](@ref), [`add_axes_rotating!`](@ref)
and [`add_axes_fixedoffset!`](@ref).
"""
function add_axes!(
frames::FrameSystem{O,T}, name::Symbol, id::Int,
funs::FrameAxesFunctions{O,T}=FrameAxesFunctions{O,T}(),
parentid=nothing
) where {O,T<:Number}
if has_axes(frames, id)
# Check if a set of axes with the same ID is already registered within
# the given frame system
throw(
ArgumentError(
"Axes with ID $id are already registered in the frame system."
),
)
end
if name in map(x -> x.name, axes_graph(frames).nodes)
# Check if axes with the same name also do not already exist
throw(
ArgumentError(
"Axes with name=$name are already registered in the frame system."
),
)
end
if !isnothing(parentid)
# Check if the root axes is not present
isempty(axes_graph(frames)) && throw(ArgumentError("Missing root axes."))
# Check if the parent axes are registered in frame
if !has_axes(frames, parentid)
throw(
ArgumentError(
"The specified parent axes with ID $parentid are not " *
"registered in the frame system.",
),
)
end
else
# Check if axes are already present
!isempty(axes_graph(frames)) && throw(ArgumentError("Root axes already registed."))
# Root axes
parentid = id
end
# Create point
node = FrameAxesNode{O,T}(name, id, parentid, funs)
# Insert new point in the graph
add_axes!(frames, node)
# Connect the new axes to the parent axes in the graph
!isnothing(parentid) && add_edge!(axes_graph(frames), parentid, id)
return nothing
end
"""
add_axes_fixedoffset!(frames, name::Symbol, id::Int, parent, dcm:DCM)
Add axes `name` with id `id` to `frames` with a fixed-offset from `parent`.
Fixed offset axes have a constant orientation with respect to their `parent` axes,
represented by `dcm`, a Direction Cosine Matrix (DCM).
### See also
See also [`add_axes!`](@ref).
"""
function add_axes_fixedoffset!(
frames::FrameSystem{O,T}, name::Symbol, id::Int, parent, dcm::DCM{T}
) where {O,T}
funs = FrameAxesFunctions{O,T}(t -> Rotation{O}(dcm))
add_axes!(frames, name, id, funs, axes_id(frames, parent))
end
"""
add_axes_projected!(frames, name, id, parent, fun)
Add inertial axes `name` and id `id` as a set of projected axes to `frames`. The axes relation
to the `parent` axes are given by a `fun`.
Projected axes are similar to rotating axis, except that all the positions, velocity, etc ...
are rotated by the 0-order rotation (i.e. the derivatives of the rotation matrix are null,
despite the rotation depends on time).
### See also
See also [`add_axes!`](@ref).
"""
function add_axes_projected!(
frames::FrameSystem{O,T}, name::Symbol, id::Int, parent, fun::Function
) where {O,T}
funs = FrameAxesFunctions{O,T}(t -> Rotation{O}(fun(t)))
add_axes!(frames, name, id, funs, axes_id(frames, parent))
end
"""
add_axes_rotating!(frames, name::Symbol, id::Int, parent, fun, δfun=nothing,
δ²fun=nothing, δ³fun=nothing)
Add `axes` as a set of rotating axes to `frames`. The orientation of these axes depends only
on time and is computed through the custom functions provided by the user.
The input functions must accept only time as argument and their outputs must be as follows:
- `fun`: return a Direction Cosine Matrix (DCM).
- `δfun`: return the DCM and its 1st order time derivative.
- `δ²fun`: return the DCM and its 1st and 2nd order time derivatives.
- `δ³fun`: return the DCM and its 1st, 2nd and 3rd order time derivatives.
If `δfun`, `δ²fun` or `δ³fun` are not provided, they are computed via automatic differentiation.
!!! warning
It is expected that the input functions and their outputs have the correct signature. This
function does not perform any checks on the output types.
"""
function add_axes_rotating!(
frames::FrameSystem{O,T}, name::Symbol, id::Int, parent, fun,
δfun=nothing, δ²fun=nothing, δ³fun=nothing,
) where {O,T}
for (order, fcn) in enumerate([δfun, δ²fun, δ³fun])
if (O < order + 1 && !isnothing(fcn))
@warn "ignoring $fcn, frame system order is less than $(order+1)"
end
end
funs = FrameAxesFunctions{O,T}(
t -> Rotation{O}(fun(t)),
# First derivative
if isnothing(δfun)
t -> Rotation{O}(fun(t), D¹(fun, t))
else
t -> Rotation{O}(δfun(t))
end,
# Second derivative
if isnothing(δ²fun)
(
if isnothing(δfun)
t -> Rotation{O}(fun(t), D¹(fun, t), D²(fun, t))
else
t -> Rotation{O}(δfun(t)..., D²(fun, t))
end
)
else
t -> Rotation{O}(δ²fun(t))
end,
# Third derivative
if isnothing(δ³fun)
(
if isnothing(δ²fun)
(
if isnothing(δfun)
t -> Rotation{O}(fun(t), D¹(fun, t), D²(fun, t), D³(fun, t))
else
t -> Rotation{O}(δfun(t)..., D²(δfun, t)...)
end
)
else
t -> Rotation{O}(δ²fun(t)..., D³(fun, t))
end
)
else
t -> Rotation{O}(δ³fun(t))
end,
)
return add_axes!(frames, name, id, funs, axes_id(frames, parent))
end
"""
add_axes_alias!(frames, target, alias::Symbol)
Add a name `alias` to a `target` axes registered in `frames`.
"""
function add_axes_alias!(frames::FrameSystem{O,T}, target, alias::Symbol) where {O,T}
if !has_axes(frames, target)
throw(
ErrorException(
"no axes with ID $target registered in the given frame system"
)
)
end
if alias in keys(axes_alias(frames))
throw(
ErrorException(
"axes with name $alias already present in the given frame system"
)
)
end
push!(axes_alias(frames), Pair(alias, axes_id(frames, target)))
nothing
end | FrameTransformations | https://github.com/JuliaSpaceMissionDesign/FrameTransformations.jl.git |
|
[
"MIT"
] | 3.0.0 | cb117510f2ba3d831439f56a5a3c00170cbf7a8d | code | 2698 |
"""
add_direction!(frames, name::Symbol, axes, fun, δfun=nothing, δ²fun=nothing, δ³fun=nothing)
Add a new direction node to `frames`. The orientation of these direction depends only
on time and is computed through the custom functions provided by the user.
The input functions must accept only time as argument and their outputs must be as follows:
- `fun`: return a direction vector.
- `δfun`: return a direction vector and its 1st order time derivative.
- `δ²fun`: return a direction vector and its 1st and 2nd order time derivatives.
- `δ³fun`: return a direction vector and its 1st, 2nd and 3rd order time derivatives.
If `δfun`, `δ²fun` or `δ³fun` are not provided, they are computed via automatic differentiation.
!!! warning
It is expected that the input functions and their outputs have the correct signature. This
function does not perform any checks on the output types.
"""
function add_direction!(
frames::FrameSystem{O,N}, name::Symbol, axes, fun::Function,
δfun=nothing, δ²fun=nothing, δ³fun=nothing
) where {O,N}
for (order, fcn) in enumerate([δfun, δ²fun, δ³fun])
if (O < order + 1 && !isnothing(fcn))
@warn "ignoring $fcn, frame system order is less than $(order+1)"
end
end
funs = DirectionFunctions{O,N}(
t -> Translation{O}(fun(t)),
# First derivative
if isnothing(δfun)
t -> Translation{O}(vcat(fun(t), D¹(fun, t)))
else
t -> Translation{O}(δfun(t))
end,
# Second derivative
if isnothing(δ²fun)
(
if isnothing(δfun)
t -> Translation{O}(vcat(fun(t), D¹(fun, t), D²(fun, t)))
else
t -> Translation{O}(vcat(δfun(t), D²(fun, t)))
end
)
else
t -> Translation{O}(δ²fun(t))
end,
# Third derivative
if isnothing(δ³fun)
(
if isnothing(δ²fun)
(
if isnothing(δfun)
t -> Direction{O}(vcat(fun(t), D¹(fun, t), D²(fun, t), D³(fun, t)))
else
t -> Direction{O}(vcat(δfun(t), D²(fun, t), D³(fun, t)))
end
)
else
t -> Direction{O}(vcat(δ²fun(t), D³(fun, t)))
end
)
else
t -> Translation{O}(δ³fun(t))
end,
)
axid = axes_id(frames, axes)
dir = DirectionDefinition{O,N}(name, length(directions(frames)) + 1, axid, funs)
push!(directions(frames), Pair(name, dir))
nothing
end
| FrameTransformations | https://github.com/JuliaSpaceMissionDesign/FrameTransformations.jl.git |
|
[
"MIT"
] | 3.0.0 | cb117510f2ba3d831439f56a5a3c00170cbf7a8d | code | 5549 |
struct AliasGraph{G,A}
graph::G
alias::A
end
"""
FrameSystem{O, T, S}
A `FrameSystem` instance manages a collection of user-defined `FramePointNode`,
`FrameAxesNode` and `DirectionDefinition` objects, enabling computation of arbitrary transformations
between them. It is created by specifying the maximum transformation order `O`, the outputs
datatype `T` and an `AbstractTimeScale` instance `S`.
The following transformation orders are accepted:
- **1**: position
- **2**: position and velocity
- **3**: position, velocity and acceleration
- **4**: position, velocity, acceleration and jerk
---
FrameSystem{O, T, S}()
Create a new, empty `FrameSystem` object of order `O`, datatype `T` and timescale `S`.
The parameter `S` can be dropped, in case the default (`BarycentricDynamicalTime`) is used.
"""
struct FrameSystem{O,T<:Number,S<:AbstractTimeScale}
points::AliasGraph{PointsGraph{O,T},Dict{Symbol,Int}}
axes::AliasGraph{AxesGraph{O,T},Dict{Symbol,Int}}
dir::Dict{Symbol,DirectionDefinition{O,T}}
end
function FrameSystem{O,T,S}() where {O,T,S}
return FrameSystem{O,T,S}(
AliasGraph(MappedGraph(FramePointNode{O,T}), Dict{Symbol,Int}()),
AliasGraph(MappedGraph(FrameAxesNode{O,T}), Dict{Symbol,Int}()),
Dict()
)
end
@inline FrameSystem{O,T}() where {O,T} = FrameSystem{O,T,BarycentricDynamicalTime}()
function Base.summary(io::IO, ::FrameSystem{O,T,S}) where {O,T,S}
return println(io, "FrameSystem{$O, $T, $S}")
end
"""
order(frames::FrameSystem{O}) where O
Return the frame system order `O`.
"""
@inline order(::FrameSystem{O}) where {O} = O
"""
timescale(frames::FrameSystem{O, T, S}) where {O, T, S}
Return the frame system order timescale `S`.
"""
@inline timescale(::FrameSystem{O,T,S}) where {O,T,S} = S
"""
points_graph(frames::FrameSystem)
Return the frame system points graph.
"""
@inline points_graph(f::FrameSystem) = f.points.graph
"""
axes_graph(frames::FrameSystem)
Return the frame system axes graph.
"""
@inline axes_graph(f::FrameSystem) = f.axes.graph
"""
points_alias(f::FrameSystem)
Return the registered points aliases map.
"""
@inline points_alias(f::FrameSystem) = f.points.alias
"""
axes_alias(f::FrameSystem)
Return the registered axes aliases map.
"""
@inline axes_alias(f::FrameSystem) = f.axes.alias
"""
directions(f::FrameSystem)
Return the direction dictionary.
"""
@inline directions(f::FrameSystem) = f.dir
"""
point_id(f::FrameSystem, id)
Get the `id` associate to a point.
"""
@inline point_id(::FrameSystem, id::Int) = id
@inline point_id(f::FrameSystem, name::Symbol) = points_alias(f)[name]
"""
axes_id(f::FrameSystem, id)
Get the `id` associate to an axes.
"""
@inline axes_id(::FrameSystem, id::Int) = id
@inline axes_id(f::FrameSystem, name::Symbol) = axes_alias(f)[name]
"""
add_point!(fs::FrameSystem{O, T}, p::FramePointNode{O, T}) where {O,T}
Add point to the frame system.
"""
function add_point!(fs::FrameSystem{O,T}, p::FramePointNode{O,T}) where {O,T}
push!(fs.points.alias, Pair(p.name, p.id))
return add_vertex!(fs.points.graph, p)
end
"""
add_axes!(fs::FrameSystem{O, T}, ax::FrameAxesNode{O, T}) where {O,T}
Add axes to the frame system.
"""
function add_axes!(fs::FrameSystem{O,T}, ax::FrameAxesNode{O,T}) where {O,T}
push!(fs.axes.alias, Pair(ax.name, ax.id))
return add_vertex!(fs.axes.graph, ax)
end
"""
has_point(frames::FrameSystem, id)
Check if `id` point is within `frames`.
"""
@inline has_point(f::FrameSystem, id) = has_vertex(points_graph(f), point_id(f, id))
"""
has_axes(frames::FrameSystem, ax)
Check if `ax` axes is within `frames`.
"""
@inline has_axes(f::FrameSystem, ax) = has_vertex(axes_graph(f), axes_id(f, ax))
"""
has_axes(frames::FrameSystem, name::Symbol)
Check if `name` direction is within `frames`.
"""
@inline has_direction(f::FrameSystem, name::Symbol) = haskey(f.dir, name)
# ---
# Formatting & printing
function _fmt_node(n::FramePointNode)
return " $(n.name)(id=$(n.id), axesid=$(n.axesid))"
end
function _fmt_node(n::FrameAxesNode)
return " $(n.name)(id=$(n.id))"
end
function prettyprint(g::Union{AxesGraph,PointsGraph})
if !isempty(g.nodes)
println(_fmt_node(g.nodes[1]))
_print_frame_graph(g, get_node_id(g.nodes[1]), 2, " ", " │ ")
end
end
function _print_frame_graph(g, pid::Int, idx::Int, last::String, prefix::String)
for i in idx:length(g.nodes)
if g.nodes[i].parentid == pid
prefix = (i < length(g.nodes) && g.nodes[i+1].parentid == pid) ? " |" : " └"
println(last * prefix * "── " * _fmt_node(g.nodes[i]))
_print_frame_graph(
g, get_node_id(g.nodes[i]), i, last * prefix * " ", last * prefix)
end
end
end
function Base.show(io::IO, g::FrameSystem{O,T,S}) where {O,T,S}
println(
io,
"FrameSystem{$O, $T, $S} with $(length(points_graph(g).nodes))"
*
" points, $(length(axes_graph(g).nodes)) axes and $(length(g.dir)) directions"
)
if !isempty(points_graph(g).nodes)
printstyled(io, "\nPoints: \n"; bold=true)
prettyprint(points_graph(g))
end
if !isempty(axes_graph(g).nodes)
printstyled(io, "\nAxes: \n"; bold=true)
prettyprint(axes_graph(g))
end
if !isempty(directions(g))
printstyled(io, "\nDirections: \n"; bold=true)
for d in values(directions(g))
println(" └── $(d.name)(id=$(d.id))")
end
end
end | FrameTransformations | https://github.com/JuliaSpaceMissionDesign/FrameTransformations.jl.git |
|
[
"MIT"
] | 3.0.0 | cb117510f2ba3d831439f56a5a3c00170cbf7a8d | code | 6310 | # ------------------------------------------------------------------------------------------
# POINTS
# ------------------------------------------------------------------------------------------
# ------
# Functions
struct FramePointFunctions{O,T}
fun::NTuple{O,FramePointFunWrapper{O,T}}
end
Base.getindex(pf::FramePointFunctions, i) = pf.fun[i]
@generated function FramePointFunctions{T}(funs::Function...) where {T}
O = length(funs)
expr = Expr(:call, :tuple)
for i in 1:O
push!(
expr.args,
Expr(
:call, Expr(:curly, :FramePointFunWrapper, O, T), Expr(:ref, :funs, i)
)
)
end
pexpr = Expr(:call, Expr(:curly, :FramePointFunctions, O, T), expr)
return quote
@inbounds $(pexpr)
end
end
@generated function FramePointFunctions{O,T}(funs::Function...) where {O,T}
O > length(funs) && throw(ArgumentError("required at least $O functions."))
expr = Expr(:call, :tuple)
for i in 1:O
push!(
expr.args,
Expr(
:call, Expr(:curly, :FramePointFunWrapper, O, T), Expr(:ref, :funs, i)
)
)
end
pexpr = Expr(:call, Expr(:curly, :FramePointFunctions, O, T), expr)
return quote
@inbounds $(pexpr)
end
end
@generated function FramePointFunctions{O,T}(fun::Function) where {O,T}
expr = Expr(:call, :tuple)
for _ in 1:O
push!(
expr.args,
Expr(
:call, Expr(:curly, :FramePointFunWrapper, O, T), :fun
)
)
end
pexpr = Expr(
:call,
Expr(:curly, :FramePointFunctions, O, T),
expr
)
return quote
Base.@_inline_meta
$(pexpr)
end
end
function FramePointFunctions{O,T}() where {O,T}
return FramePointFunctions{O,T}(t -> Translation{O,T}())
end
# ------
# Node
"""
FramePointNode{O, T} <: AbstractJSMDGraphNode
Define a frame system point.
### Fields
- `name` -- point name
- `id` -- ID of the point
- `parentid` -- ID of the parent point
- `axesid` -- ID of the axes in which the point coordinates are expressed
- `f` -- `FramePointFunctions` container
"""
struct FramePointNode{O,T<:Number} <: AbstractJSMDGraphNode
name::Symbol
id::Int
parentid::Int
axesid::Int
# internals
f::FramePointFunctions{O,T}
end
get_node_id(p::FramePointNode{O,T}) where {O,T} = p.id
function Base.show(io::IO, p::FramePointNode{O,T}) where {O,T}
pstr = "FramePointNode{$O, $T}(name=$(p.name)"
pstr *= ", id=$(p.id), axesid=$(p.axesid)"
p.parentid == p.id || (pstr *= ", parent=$(p.parentid)")
pstr *= ")"
return println(io, pstr)
end
const PointsGraph{O,T} = MappedNodeGraph{FramePointNode{O,T},SimpleGraph{Int}}
# ------------------------------------------------------------------------------------------
# AXES
# ------------------------------------------------------------------------------------------
# ------
# Functions
struct FrameAxesFunctions{O,T}
fun::NTuple{O,FrameAxesFunWrapper{O,T}}
end
Base.getindex(pf::FrameAxesFunctions, i) = pf.fun[i]
@generated function FrameAxesFunctions{T}(funs::Function...) where {T}
O = length(funs)
expr = Expr(:call, :tuple)
for i in 1:O
push!(
expr.args,
Expr(
:call,
Expr(:curly, :FrameAxesFunWrapper, O, T),
Expr(:ref, :funs, i)
)
)
end
pexpr = Expr(
:call,
Expr(:curly, :FrameAxesFunctions, O, T),
expr
)
return quote
@inbounds $(pexpr)
end
end
@generated function FrameAxesFunctions{O,T}(funs::Function...) where {O,T}
O > length(funs) && throw(ArgumentError("required at least $O functions."))
expr = Expr(:call, :tuple)
for i in 1:O
push!(
expr.args,
Expr(
:call,
Expr(:curly, :FrameAxesFunWrapper, O, T),
Expr(:ref, :funs, i)
)
)
end
pexpr = Expr(
:call,
Expr(:curly, :FrameAxesFunctions, O, T),
expr
)
return quote
@inbounds $(pexpr)
end
end
@generated function FrameAxesFunctions{O,T}(fun::Function) where {O,T}
expr = Expr(:call, :tuple)
for _ in 1:O
push!(
expr.args,
Expr(
:call, Expr(:curly, :FrameAxesFunWrapper, O, T), :fun
)
)
end
pexpr = Expr(
:call,
Expr(:curly, :FrameAxesFunctions, O, T),
expr
)
return quote
Base.@_inline_meta
$(pexpr)
end
end
function FrameAxesFunctions{O,T}() where {O,T}
return FrameAxesFunctions{O,T}(t -> Rotation{O,T}(one(T)I))
end
# ------
# Node
"""
FrameAxesNode{O, T} <: AbstractJSMDGraphNode
Define a set of axes.
### Fields
- `name` -- axes name
- `id` -- axes ID (equivalent of NAIFId for axes)
- `parentid` -- ID of the parent axes
- `f` -- `FrameAxesFunctions` container
"""
struct FrameAxesNode{O,T<:Number} <: AbstractJSMDGraphNode
name::Symbol
id::Int
parentid::Int
# internals
f::FrameAxesFunctions{O,T}
end
get_node_id(ax::FrameAxesNode{O,T}) where {O,T} = ax.id
function Base.show(io::IO, ax::FrameAxesNode{O,T}) where {O,T}
pstr = "FrameAxesNode{$O, $T}(name=$(ax.name), id=$(ax.id)"
ax.parentid == ax.id || (pstr *= ", parent=$(ax.parentid)")
pstr *= ")"
return println(io, pstr)
end
const AxesGraph{O,T} = MappedNodeGraph{FrameAxesNode{O,T},SimpleGraph{Int}}
# ------------------------------------------------------------------------------------------
# DIRECTIONS
# ------------------------------------------------------------------------------------------
const DirectionFunctions{O,T} = FramePointFunctions{O,T}
"""
DirectionDefinition{O, T}
Define a new direction.
### Fields
- `name` -- direction name
- `id` -- direction ID
- `f` -- `DirectionFunctions` container
"""
struct DirectionDefinition{O,T}
name::Symbol
id::Int
axesid::Int
# internals
f::DirectionFunctions{O,T}
end
function Base.show(io::IO, d::DirectionDefinition{O,T}) where {O,T}
return println(io, "DirectionDefinition{$O, $T}(name=$(d.name), id=$(d.id), axesid=$(d.axesid))")
end | FrameTransformations | https://github.com/JuliaSpaceMissionDesign/FrameTransformations.jl.git |
|
[
"MIT"
] | 3.0.0 | cb117510f2ba3d831439f56a5a3c00170cbf7a8d | code | 7233 | """
add_point!(frames, name, id, axesid, class, funs, parentid=nothing)
Create and add a new point node `name` to `frames` based on the input parameters.
### Inputs
- `frames` -- Target frame system
- `name` -- Point name, must be unique within `frames`
- `id` -- Point ID, must be unique within `frames`
- `axes` -- ID/Name of the axes in which the state vector of the point is expressed.
- `funs` -- `FramePointFunctions` object storing the functions to update the state
vectors of the point.
- `parentid` -- NAIF ID of the parent point. Not required only for the root point.
!!! warning
This is a low-level function and is NOT meant to be directly used. Instead, to add a point
to the frame system, see [`add_point_dynamical!`](@ref) and [`add_point_fixedoffset!`](@ref).
"""
function add_point!(
frames::FrameSystem{O,T}, name::Symbol, id::Int, axes,
funs::FramePointFunctions{O,T}=FramePointFunctions{O,T}(), parentid=nothing
) where {O,T<:Number}
if has_point(frames, id)
# Check point with the same id already registered
throw(
ArgumentError(
"A point with ID $id is already registered in the input frame system.",
),
)
end
# Check point with the same name does not already exist
if name in map(x -> x.name, points_graph(frames).nodes)
throw(
ArgumentError(
"A point with name=$name is already registed in the input frame system"
),
)
end
# Check if the given axes are known in the FrameSystem
axesid = axes_id(frames, axes)
if !has_axes(frames, axesid)
throw(
ArgumentError(
"Axes with ID $axesid are not registered in the input frame system"
),
)
end
if isnothing(parentid)
# If a root-point exists, check that a parent has been specified
if !isempty(points_graph(frames))
throw(
ArgumentError(
"A parent point is required because the input frame system " *
"already contains a root-point.",
),
)
end
parentid = id # Root-point has parentid = id
else
# Check that the parent point is registered in frames
if !has_point(frames, parentid)
throw(
ArgumentError(
"The specified parent point with id $parentid is not " *
"registered in the input frame system.",
),
)
end
end
# Creates point node
pnt = FramePointNode{O,T}(name, id, parentid, axesid, funs)
# Insert new point in the graph
add_point!(frames, pnt)
# Connect the new point to the parent point in the graph
!isnothing(parentid) && add_edge!(points_graph(frames), parentid, id)
return nothing
end
"""
add_point_fixedoffset!(frames, name, id, parent, axes, offset::AbstractVector)
Add `point` as a fixed-offset point to `frames`.
Fixed points are those whose positions have a constant `offset` with respect their `parent`
points in the given set of `axes`. Thus, points eligible for this class must have null
velocity and acceleration with respect to `parent`.
"""
function add_point_fixedoffset!(
frames::FrameSystem{O,T}, name::Symbol, id::Int, parent, ax,
offset::AbstractVector{N}
) where {O,N,T}
if length(offset) != 3
throw(
DimensionMismatch(
"The offset vector should have length 3, but has $(length(offset))."
),
)
end
tr = Translation{O}(SVector(offset...))
funs = FramePointFunctions{O,T}(t -> tr)
return add_point!(
frames, name, id, axes_id(frames, ax), funs, point_id(frames, parent)
)
end
"""
add_point_dynamical!(frames, name, id, parent, axes, fun, δfun=nothing, δ²fun=nothing, δ³fun=nothing)
Add `point` as a time point to `frames`. The state vector for these points depends only on
time and is computed through the custom functions provided by the user.
The input functions must accept only time as argument and their outputs must be as follows:
- **fun**: return a 3-elements vector: position
- **δfun**: return a 6-elements vector: position and velocity
- **δ²fun**: return a 9-elements vector: position, velocity and acceleration
- **δ³fun**: return a 12-elements vector: position, velocity, acceleration and jerk
If `δfun`, `δ²fun` or `δ³fun` are not provided, they are computed with automatic differentiation.
!!! warning
It is expected that the input functions and their ouputs have the correct signature. This
function does not perform any checks on whether the returned vectors have the appropriate
dimensions.
"""
function add_point_dynamical!(
frames::FrameSystem{O,T}, name::Symbol, id::Int, parent, ax, fun,
δfun=nothing, δ²fun=nothing, δ³fun=nothing
) where {O,T}
for (order, fcn) in enumerate([δfun, δ²fun, δ³fun])
if (O < order + 1 && !isnothing(fcn))
@warn "ignoring $fcn, frame system order is less than $(order+1)"
end
end
funs = FramePointFunctions{O,T}(
t -> Translation{O}(fun(t)),
# First derivative
if isnothing(δfun)
t -> Translation{O}(vcat(fun(t), D¹(fun, t)))
else
t -> Translation{O}(δfun(t))
end,
# Second derivative
if isnothing(δ²fun)
(
if isnothing(δfun)
t -> Translation{O}(vcat(fun(t), D¹(fun, t), D²(fun, t)))
else
t -> Translation{O}(vcat(δfun(t), D²(fun, t)))
end
)
else
t -> Translation{O}(δ²fun(t))
end,
# Third derivative
if isnothing(δ³fun)
(
if isnothing(δ²fun)
(
if isnothing(δfun)
t -> Translation{O}(vcat(fun(t), D¹(fun, t), D²(fun, t), D³(fun, t)))
else
t -> Translation{O}(vcat(δfun(t), D²(fun, t), D³(fun, t)))
end
)
else
t -> Translation{O}(vcat(δ²fun(t), D³(fun, t)))
end
)
else
t -> Translation{O}(δ³fun(t))
end,
)
return add_point!(
frames, name, id, axes_id(frames, ax), funs, point_id(frames, parent)
)
end
"""
add_point_alias!(frames, target, alias::Symbol)
Add a name `alias` to a `target` point registered in `frames`.
"""
function add_point_alias!(frames::FrameSystem{O,N}, target, alias::Symbol) where {O,N}
if !has_point(frames, target)
throw(
ErrorException(
"no point with ID $target registered in the given frame system"
)
)
end
if alias in keys(points_alias(frames))
throw(
ErrorException(
"point with name $alias already present in the given frame system"
)
)
end
push!(points_alias(frames), Pair(alias, point_id(frames, target)))
nothing
end | FrameTransformations | https://github.com/JuliaSpaceMissionDesign/FrameTransformations.jl.git |
|
[
"MIT"
] | 3.0.0 | cb117510f2ba3d831439f56a5a3c00170cbf7a8d | code | 13662 |
# ------------------------------------------------------------------------------------------
# PROMOTIONS
# ------------------------------------------------------------------------------------------
# Returns the inner datatype of a given DCM
dcm_eltype(::Union{DCM{T},Type{DCM{T}}}) where {T} = T
# Returns a promoted type for a given tuple of DCMs
@generated function promote_dcm_eltype(::Union{T,Type{T}}) where {T<:Tuple}
t = Union{}
for i in 1:length(T.parameters)
tmp = dcm_eltype(Base.unwrapva(T.parameters[i]))
t = :(promote_type($t, $tmp))
end
return quote
Base.@_inline_meta
$t
end
end
# ------------------------------------------------------------------------------------------
# TYPE DEF
# ------------------------------------------------------------------------------------------
"""
Rotation{O, N}
A container to efficiently compute `O`-th order rotation matrices of type `N` between two
set of axes. It stores the Direction Cosine Matrix (DCM) and its time derivatives up to
the (`O`-1)-th order. Since this type is immutable, the data must be provided upon
construction and cannot be mutated later.
The rotation of state vector between two set of axes is computed with an ad-hoc overload
of the product operator. For example, a 3rd order Rotation object `R`, constructed from the
DCM `A` and its time derivatives `δA` and `δ²A` rotates a vector `v` = `[p, v, a]` as:
`̂v = [A*p, δA*p + A*v, δ²A*p + 2δA*v + A*a]`
A `Rotation` object `R` call always be converted to a `SMatrix` or a `MMatrix` by invoking
the proper constructor.
### Examples
```julia-repl
julia> A = angle_to_dcm(π/3, :Z)
DCM{Float64}:
0.5 0.866025 0.0
-0.866025 0.5 0.0
0.0 0.0 1.0
julia> R = Rotation(A);
julia> SM = SMatrix(R)
3×3 SMatrix{3, 3, Float64, 9} with indices SOneTo(3)×SOneTo(3):
0.5 0.866025 0.0
-0.866025 0.5 0.0
0.0 0.0 1.0
julia> MM = MMatrix(R)
3×3 MMatrix{3, 3, Float64, 9} with indices SOneTo(3)×SOneTo(3):
0.5 0.866025 0.0
-0.866025 0.5 0.0
0.0 0.0 1.0
```
---
Rotation(dcms::DCM...)
Create a `Rotation` object from a Direction Cosine Matrix (DCM) and any of its time
derivatives. The rotation order is inferred from the number of inputs, while the rotation
type is obtained by promoting the DCMs types.
### Examples
```julia-repl
julia> A = angle_to_dcm(π/3, :Z);
julia> δA = DCM(0.0I);
julia> δ²A = DCM(0.0I);
julia> R = Rotation(A, δA, δ²A);
julia> typeof(R)
Rotation{3, Float64}
julia> R2 = Rotation(A, B, C, DCM(0.0im*I));
julia> typeof(R2)
Rotation{4, ComplexF64}
```
---
Rotation{O}(dcms::DCM...) where O
Create a `Rotation` object of order `O`. If the number of `dcms` is smaller than `O`, the
remaining slots are filled with null DCMs, otherwise if the number of inputs is greater than
`O`, only the first `O` DCMs are used.
!!! warning
Usage of this constructor is not recommended as it may yield unexpected results to
unexperienced users.
---
Rotation{O}(u::UniformScaling{N}) where {O, N}
Rotation{O, N}(u::UniformScaling) where {O, N}
Create an `O`-order identity `Rotation` object of type `N` with identity position rotation
and null time derivatives.
### Examples
```julia-repl
julia> Rotation{1}(1.0I)
Rotation{1, Float64}(([1.0 0.0 0.0; 0.0 1.0 0.0; 0.0 0.0 1.0],))
julia> Rotation{1, Int64}(I)
Rotation{1, Int64}(([1 0 0; 0 1 0; 0 0 1],))
```
---
Rotation{S1}(rot::Rotation{S2, N}) where {S1, S2, N}
Rotation{S1, N}(R::Rotation{S2}) where {S1, S2, N}
Transform a `Rotation` object of order `S2` to order `S1` and type `N`. The behaviour of
these functions depends on the values of `S1` and `S2`:
- `S1` < `S2`: Only the first `S1` components of `rot` are considered.
- `S1` > `S2`: The missing orders are filled with null DCMs.
### Examples
```julia-repl
julia> A = angle_to_dcm(π/3, :Z);
julia> B = angle_to_dcm(π/4, π/6, :XY);
julia> R1 = Rotation(A, B);
julia> order(R1)
2
julia> R2 = Rotation{1}(R1);
julia> order(R2)
1
julia> R2[1] == A
true
julia> R3 = Rotation{3}(R1);
julia> R3[3]
DCM{Float64}:
0.0 0.0 0.0
0.0 0.0 0.0
0.0 0.0 0.0
```
---
Rotation(m::DCM{N}, ω::AbstractVector) where N
Create a 2nd order `Rotation` object of type `N` to rotate between two set of axes `a` and
`b` from a Direction Cosine Matrix (DCM) and the angular velocity vector `ω` of `b` with
respect to `a`, expressed in `b`
"""
struct Rotation{S,T} <: AbstractArray{T,1}
m::NTuple{S,DCM{T}}
function Rotation(tup::NTuple{S,Any}) where {S}
T = promote_dcm_eltype(tup)
return new{S,T}(tup)
end
end
"""
order(R::Rotation{O}) where O
Return the rotation order O.
"""
@inline order(::Rotation{S,<:Any}) where {S} = S
# Julia API
Base.size(::Rotation{S,<:Any}) where {S} = (S,)
Base.getindex(R::Rotation, i) = R.m[i]
Base.length(::Rotation{S}) where {S} = S
# ------------------------------------------------------------------------------------------
# CONSTRUCTORS
# ------------------------------------------------------------------------------------------
# Varargs constructor
function Rotation(args::Vararg{Any,S}) where {S}
return Rotation(args)
end
# Constructor with filter and auto-fill of missing DCMS
@generated function Rotation{S}(dcms::DCM...) where {S}
D = length(dcms)
T = Expr(:call, :promote_dcm_eltype, :dcms)
expr = Expr(:call, :tuple)
for i in 1:min(S, D)
push!(expr.args, Expr(:ref, :dcms, i))
end
for _ in 1:(S-D)
push!(expr.args, Expr(:call, :DCM, Expr(:call, :(*), Expr(:call, :zero, T), :I)))
end
return quote
@inbounds Rotation($(expr))
end
end
@generated function Rotation{S1}(dcms::NTuple{S2,DCM{T}}) where {S1,S2,T}
expr = Expr(:call, :tuple)
for i in 1:min(S1, S2)
push!(expr.args, Expr(:ref, :dcms, i))
end
for _ in 1:(S1-S2)
push!(expr.args, Expr(:call, :DCM, Expr(:call, :(*), Expr(:call, :zero, T), :I)))
end
return quote
@inbounds Rotation($(expr))
end
end
# Constructor for S-order identity rotations!
@generated function Rotation{S}(::UniformScaling{T}) where {S,T}
expr = Expr(:call, :tuple)
for i in 1:S
if i == 1
push!(expr.args, Expr(:call, :DCM, Expr(:call, :(*), Expr(:call, :one, T), :I)))
else
push!(expr.args, Expr(:call, :DCM, Expr(:call, :(*), Expr(:call, :zero, T), :I)))
end
end
return quote
@inbounds Rotation($(expr))
end
end
@generated function Rotation{S,T}(::UniformScaling) where {S,T}
expr = Expr(:call, :tuple)
for i in 1:S
if i == 1
push!(expr.args, Expr(:call, :DCM, Expr(:call, :(*), Expr(:call, T, 1), :I)))
else
push!(expr.args, Expr(:call, :DCM, Expr(:call, :(*), Expr(:call, T, 0), :I)))
end
end
return quote
@inbounds Rotation($(expr))
end
end
# Convert a Rotation to a different order
@generated function Rotation{S1}(rot::Rotation{S2,T}) where {S1,S2,T}
expr = Expr(:call, :tuple)
for i in 1:min(S1, S2)
push!(expr.args, Expr(:ref, :rot, i))
end
for _ in 1:(S1-S2)
push!(expr.args, Expr(:call, :DCM, Expr(:call, :(*), Expr(:call, :zero, T), :I)))
end
return quote
@inbounds Rotation($(expr))
end
end
# Convert a rotation to a different order and type
@generated function Rotation{S1,T}(rot::Rotation{S2}) where {S1,S2,T}
expr = Expr(:call, :tuple)
for i in 1:min(S1, S2)
push!(expr.args, Expr(:., T, Expr(:tuple, Expr(:ref, :rot, i))))
end
for _ in 1:(S1-S2)
push!(expr.args, Expr(:call, :DCM, Expr(:call, :(*), Expr(:call, :zero, T), :I)))
end
return quote
@inbounds Rotation($(expr))
end
end
function Rotation(m::DCM{T}, ω::AbstractVector) where {T}
dm = DCM(ddcm(m, SVector(ω)))
return Rotation((m, dm))
end
@inline Rotation{S}(rot::Rotation{S}) where {S} = rot
# ------------------------------------------------------------------------------------------
# TYPE CONVERSIONS
# ------------------------------------------------------------------------------------------
# Convert a Rotation to a tuple
@generated function Base.Tuple(rot::Rotation{S,T}) where {S,T}
expr = Expr(:call, :tuple)
for j in 1:(3S)
Oⱼ = (j - 1) ÷ 3 + 1
for i in 1:(3S)
Oᵢ = (i - 1) ÷ 3 + 1
if Oⱼ > Oᵢ
push!(expr.args, Expr(:call, :zero, T))
else
row = i - 3 * (Oᵢ - 1)
col = j - 3 * (Oⱼ - 1)
rom = Oᵢ - Oⱼ + 1
push!(
expr.args,
Expr(
:ref,
Expr(:ref, :rot, rom), row, col
)
)
end
end
end
return quote
Base.@_inline_meta
@inbounds $(expr)
end
end
# Generic Rotation-to-StaticArrays conversions
@inline function (::Type{SA})(rot::Rotation) where {SA<:StaticArray}
return SA(Tuple(rot))
end
# ------------------------------------------------------------------------------------------
# OPERATIONS
# ------------------------------------------------------------------------------------------
# Inverse
"""
inv(rot::Rotation)
Compute the invese of the rotation object `rot`. The operation is efficiently performed by
taking the transpose of each rotation matrix within `rot`.
"""
Base.inv(rot::Rotation) = _inverse_rotation(rot)
@generated function _inverse_rotation(rot::Rotation{S,T}) where {S,T}
expr = Expr(:call, :Rotation,)
for i in 1:S
push!(
expr.args, Expr(:call, :adjoint, Expr(:ref, :rot, i))
)
end
return quote
@inbounds $(expr)
end
end
# Product between Rotations
@inline Base.:*(r1::Rotation{S,<:Any}, r2::Rotation{S,<:Any}) where {S} = _compose_rotation(r1, r2)
function Base.:*(::Rotation{S1,<:Any}, ::Rotation{S2,<:Any}) where {S1,S2}
throw(DimensionMismatch("Cannot multiply two `Rotation` types of order $S1 and $S2"))
end
@generated function _compose_rotation(A::Rotation{S,<:Any}, B::Rotation{S,<:Any}) where {S}
expr = Expr(:call, :Rotation)
for i in 1:S
sum_expr = Expr(:call, :+)
for j in 1:i
c = binomial(i - 1, j - 1)
aᵢ = Expr(:ref, :A, i - j + 1)
bᵢ = Expr(:ref, :B, j)
push!(sum_expr.args, Expr(:call, :*, c, aᵢ, bᵢ))
end
push!(expr.args, sum_expr)
end
return quote
@inbounds $(expr)
end
end
@inline Base.:*(A::Rotation{S}, v::Translation{S}) where {S} = _apply_rotation(A, v)
@inline Base.:*(A::Rotation{S}, v::AbstractVector{T}) where {S,T} = _apply_rotation(A, v)
@inline function Base.:*(::Rotation{S1}, ::Translation{S2}) where {S1,S2}
throw(DimensionMismatch("Cannot apply Rotation of order $S1 to Translation of order $S2"))
end
# Product between Rotation and a Translation
@generated function _apply_rotation(R::Rotation{S,Nr}, v::Translation{S,Nv}) where {S,Nr,Nv}
# Apply rotation on a translation vector with the same size
#
# n
# z(n) = R(n)*v(1) + ∑ binomial(n, k) * R(n-k) * v(k)
# k=1
expr = Expr(:call, :tuple)
for i in 1:S
sumexpr = Expr(:call, :+)
push!(expr.args, sumexpr)
push!(sumexpr.args, Expr(:call, :*, Expr(:ref, :R, i), Expr(:ref, :v, 1)))
for j in 1:i-1
push!(
sumexpr.args,
Expr(
:call, :*,
binomial(i - 1, j),
Expr(:ref, :R, i - j),
Expr(:ref, :v, j + 1),
)
)
end
end
return quote
Base.@_inline_meta
@inbounds Translation($(expr))
end
end
@inline Base.:*(A::Rotation, v::SVector) = _apply_rotation(A, v)
# Compute product between Rotation and a "proper" SVector (returning a Translation)
@generated function _apply_rotation(R::Rotation{Sr,Nr}, v::SVector{Sv,Nv}) where {Sr,Sv,Nr,Nv}
if Sv != 3Sr
throw(
DimensionMismatch(
"Cannot apply Rotation of order $Sr to a $(Sv) vector",
)
)
end
expr = Expr(
:call,
Expr(:curly, :SVector, Sv, Nv),
Expr(
:call,
:_apply_rotation, :R,
Expr(:call, Expr(:curly, :Translation, Sr), :v)
)
)
return quote
Base.@_inline_meta
@inbounds $expr
end
end
# Function to compute product between Rotation and a generic vector
@generated function _apply_rotation(A::Rotation{S,Na}, b::AbstractVector{Nb}) where {S,Na,Nb}
exprs = [[Expr(:call, :+) for _ in 1:3] for _ in 1:S]
for i in 1:S
for j in 1:i
for k in 1:3
mi = i - j + 1
push!(
exprs[i][k].args,
StaticArrays.combine_products([
:(A[$mi][$k, $w] * b[3*($j-1)+$w]) for w in 1:3
]),
)
end
end
end
sa = 3 * S
retexpr = :(@inbounds return similar_type(b, T, Size($sa))(tuple($((exprs...)...))))
return quote
Base.@_inline_meta
length(b) != $sa && throw(
DimensionMismatch(
"Cannot multiply `Rotation` of size ($($sa), $($sa)) and a $(size(b)) vector",
),
)
T = Base.promote_op(LinearAlgebra.matprod, Na, Nb)
$retexpr
end
end | FrameTransformations | https://github.com/JuliaSpaceMissionDesign/FrameTransformations.jl.git |
|
[
"MIT"
] | 3.0.0 | cb117510f2ba3d831439f56a5a3c00170cbf7a8d | code | 12098 | for (order, axfun, _axfun, pfun, _pfun, _pfwd, _pbwd, dfun) in zip(
(1, 2, 3, 4),
(:rotation3, :rotation6, :rotation9, :rotation12),
(:_rotation3, :_rotation6, :_rotation9, :_rotation12),
(:vector3, :vector6, :vector9, :vector12),
(:_vector3, :_vector6, :_vector9, :_vector12),
(:_vector3_forward, :_vector6_forward, :_vector9_forward, :_vector12_forward),
(:_vector3_backward, :_vector6_backward, :_vector9_backward, :_vector12_backward),
(:direction3, :direction6, :direction9, :direction12)
)
# --------------------------------------------------------------------------------------
# Axes transformations
# --------------------------------------------------------------------------------------
@eval begin
@inline function ($axfun)(
::FrameSystem{<:Any,<:Any,S1}, from, to, ::Epoch{S2}
) where {S1,S2}
throw(ArgumentError("Incompatible epoch timescale: expected $S1, found $S2."))
end
"""
$($axfun)(fr::FrameSystem, from, to, ep::Epoch)
Compute the rotation that transforms a $(3*$order)-elements state vector from one
specified set of axes to another at a given epoch.
Requires a frame system of order ≥ $($order).
### Inputs
- `fr` -- The `FrameSystem` container object
- `from` -- ID or instance of the axes to transform from
- `to` -- ID or instance of the axes to transform to
- `ep` -- `Epoch` of the rotation. Its timescale must match that of the frame system.
### Output
A [`Rotation`](@ref) object of order $($order).
"""
@inline function ($axfun)(
fr::FrameSystem{<:Any,<:Any,S}, from, to, ep::Epoch{S}
) where {S}
return $(axfun)(fr, from, to, j2000s(ep))
end
"""
$($axfun)(fr::FrameSystem, from, to, t::Number)
Compute the rotation that transforms a $(3*$order)-elements state vector from one
specified set of axes to another at a given time `t`, expressed in seconds since
`J2000`.
"""
function ($axfun)(fr::FrameSystem{O,T}, from, to, t::Number) where {O,T}
return $(_axfun)(fr, from, to, t)
end
# Low level function to compute the rotation
function ($_axfun)(fr::FrameSystem{O,T}, from, to, t::Number) where {O,T}
if O < $order
throw(
ErrorException(
"insufficient frame system order: " *
"transformation requires at least order $($order).",
),
)
end
fromid = axes_id(fr, from)
toid = axes_id(fr, to)
fromid == toid && return Rotation{$order}(T(1) * I)
# Check to ensure that the two axes are stored in the frame system
for id in (fromid, toid)
if !has_axes(fr, id)
throw(
ErrorException(
"axes with ID $id are not registered in the frame system."
)
)
end
end
return $(_axfun)(fr, get_path(axes_graph(fr), fromid, toid), t)
end
# Low-level function to parse a path of axes and chain their rotations
@inbounds function ($_axfun)(fr::FrameSystem, path::Vector{Int}, t::Number)
f1 = get_mappednode(axes_graph(fr), path[1])
f2 = get_mappednode(axes_graph(fr), path[2])
rot = $(_axfun)(f1, f2, t)
for i in 2:(length(path)-1)
f1 = f2
f2 = get_mappednode(axes_graph(fr), path[i+1])
rot = $(_axfun)(f1, f2, t) * rot
end
return rot
end
# Low-level function to compute the rotation between two axes
@inline function ($_axfun)(from::FrameAxesNode, to::FrameAxesNode, t::Number)
return if from.id == to.parentid
$(_axfun)(to, t)
else
inv($(_axfun)(from, t))
end
end
@inline function ($_axfun)(ax::FrameAxesNode, t::Number)
R = ax.f[$order](t)
return Rotation{$order}(R)
end
end
# --------------------------------------------------------------------------------------
# Points transformations
# --------------------------------------------------------------------------------------
@eval begin
@inline function ($pfun)(
::FrameSystem{<:Any,<:Any,S1}, from, to, axes, ::Epoch{S2}
) where {S1,S2}
throw(ArgumentError("Incompatible epoch timescale: expected $S1, found $S2."))
end
"""
$($pfun)(fr::FrameSystem, from, to, axes, ep::Epoch)
Compute $(3*$order)-elements state vector of a target point relative to
an observing point, in a given set of axes, at the desired epoch `ep`.
Requires a frame system of order ≥ $($order).
### Inputs
- `fr` -- The `FrameSystem` container object
- `from` -- ID or instance of the observing point
- `to` -- ID or instance of the target point
- `axes` -- ID or instance of the output state vector axes
- `ep` -- `Epoch` of the observer. Its timescale must match that of the frame system.
"""
@inline function ($pfun)(
fr::FrameSystem{<:Any,<:Any,S}, from, to, axes, ep::Epoch{S}
) where {S}
return $(pfun)(fr, from, to, axes, j2000s(ep))
end
"""
$($pfun)(fr, from, to, axes, t::Number)
Compute $(3*$order)-elements state vector of a target point relative to
an observing point, in a given set of axes, at the desired time `t` expressed in
seconds since `J2000`.
"""
function ($pfun)(fr::FrameSystem{O,T}, from, to, ax, t::Number) where {O,T}
if O < $order
throw(
ErrorException(
"insufficient frame system order: " *
"transformation requires at least order $($order).",
),
)
end
fromid = point_id(fr, from)
toid = point_id(fr, to)
axid = axes_id(fr, ax)
fromid == toid && return @SVector zeros(T, 3 * $order)
# Check to ensure that the two points are registerd
for id in (fromid, toid)
if !has_point(fr, id)
throw(
ErrorException("point with ID $id is not registered in the frame system.")
)
end
end
# Check that the ouput axes are registered
if !has_axes(fr, axid)
throw(
ErrorException("axes with ID $axid are not registered in the frame system.")
)
end
return SVector($(_pfun)(fr, get_path(points_graph(fr), fromid, toid), axid, t))
end
function ($_pfun)(fr::FrameSystem, path::Vector{Int}, axes::Int, t::Number)
@inbounds p1 = get_mappednode(points_graph(fr), path[1])
@inbounds p2 = get_mappednode(points_graph(fr), path[end])
if length(path) == 2
# This handles all the cases where you don't need to chain any transformations
axid, tr = ($_pfun)(p1, p2, t)
if axid != axes
return $(axfun)(fr, axid, axes, t) * tr
end
return tr
elseif axes == p1.axesid
# backward pass
return $(_pbwd)(fr, p2, path, t)
elseif axes == p2.axesid
# forward pass
return $(_pfwd)(fr, p1, path, t)
else
# Optimising this transformation would probably demand a significant
# portion of time with respect to the time required by the whole transformation
# therefore forward pass is used without any optimisation
return $(axfun)(fr, p2.axesid, axes, t) * $(_pfwd)(fr, p1, path, t)
end
end
@inbounds function ($_pfwd)(fr::FrameSystem, p1::FramePointNode, path::Vector{Int}, t::Number)
p2 = get_mappednode(points_graph(fr), path[2])
axid, tr = ($_pfun)(p1, p2, t)
for i in 2:(length(path)-1)
p1 = p2
p2 = get_mappednode(points_graph(fr), path[i+1])
ax2id, tr2 = ($_pfun)(p1, p2, t)
# Rotates previous vector to p2's axes
if ax2id != axid
tr = ($axfun)(fr, axid, ax2id, t) * tr
end
axid = ax2id
tr += tr2
end
return tr
end
@inbounds function ($_pbwd)(fr::FrameSystem, p1::FramePointNode, path::Vector{Int}, t::Number)
p2 = get_mappednode(points_graph(fr), path[end-1])
axid, tr = ($_pfun)(p1, p2, t)
for i in 2:(length(path)-1)
p1 = p2
p2 = get_mappednode(points_graph(fr), path[end-i])
ax2id, tr2 = ($_pfun)(p1, p2, t)
# Rotates previous vector to p2's axes
if ax2id != axid
tr = ($axfun)(fr, axid, ax2id, t) * tr
end
axid = ax2id
tr += tr2
end
return -tr
end
@inbounds function ($_pfun)(from::FramePointNode, to::FramePointNode, t::Number)
if from.id == to.parentid
return to.axesid, $(_pfun)(to, t)
else
return from.axesid, -$(_pfun)(from, t)
end
end
@inbounds function ($_pfun)(p::FramePointNode, t::Number)
tr = p.f[$order](t)
return Translation{$order}(tr)
end
end
# --------------------------------------------------------------------------------------
# Directions transformations
# --------------------------------------------------------------------------------------
@eval begin
"""
$($dfun)(frames::FrameSystem, name::Symbol, axes, ep::Epoch)
Compute the direction vector `name` of order $(3*$order) at epoch `ep` expressed in
the `axes` frame.
Requires a frame system of order ≥ $($order).
"""
@inline function ($dfun)(
frames::FrameSystem{<:Any,<:Any,S}, name::Symbol, axes, ep::Epoch{S}
) where {S}
return $(dfun)(frames, name, axes, j2000s(ep))
end
"""
$($dfun)(frames::FrameSystem, name::Symbol, axes, t::Number)
Compute the direction vector `name` of order $(3*$order) at epoch `t`, where `t` is
expressed in seconds since `J2000`.
Requires a frame system of order ≥ $($order).
"""
function ($dfun)(frames::FrameSystem{O,N}, name::Symbol, axes, t::Number) where {O,N}
if O < $order
throw(
ErrorException(
"Insufficient frame system order: " *
"transformation requires at least order $($order).",
),
)
end
if !has_direction(frames, name)
throw(
ErrorException(
"No direction with name $(name) registered in the frame system."
)
)
end
node = directions(frames)[name]
stv = Translation{$order}(node.f[$order](t))
thisaxid = node.axesid
axid = axes_id(frames, axes)
if thisaxid != axid
stv = ($axfun)(frames, thisaxid, axid, t) * stv
end
D = 3 * $order
return @views SVector(stv)[1:D]
end
end
end
| FrameTransformations | https://github.com/JuliaSpaceMissionDesign/FrameTransformations.jl.git |
|
[
"MIT"
] | 3.0.0 | cb117510f2ba3d831439f56a5a3c00170cbf7a8d | code | 7491 |
# ------------------------------------------------------------------------------------------
# PROMOTIONS
# ------------------------------------------------------------------------------------------
const SVector3{T} = SVector{3,T}
svector3_eltype(::Union{SVector3{T},Type{SVector3{T}}}) where {T} = T
# Returns a promoted type for a given tuple of SVector3
@generated function promote_svector3_eltype(::Union{T,Type{T}}) where {T<:Tuple}
t = Union{}
for i in 1:length(T.parameters)
tmp = svector3_eltype(Base.unwrapva(T.parameters[i]))
t = :(promote_type($t, $tmp))
end
return quote
Base.@_inline_meta
$t
end
end
# ------------------------------------------------------------------------------------------
# TYPE DEF
# ------------------------------------------------------------------------------------------
"""
Translation{O, N}
A container to efficiently compute `O`-th order translation vectors of type `N` between two
points or a direction. It stores the translation vector and its time derivatives up to
the (`O`-1)-th order. Since this type is immutable, the data must be provided upon
construction and cannot be mutated later.
!!! todo
Add constructors details.
### See also
See also [`Rotation`](@ref).
"""
struct Translation{S,T<:Number} <: AbstractArray{T,1}
v::NTuple{S,SVector3{T}}
function Translation(tup::NTuple{S,Any}) where {S}
T = promote_svector3_eltype(tup)
return new{S,T}(tup)
end
end
function Base.summary(io::IO, ::Translation{S,N}) where {S,N}
return print(io, "Translation{$S, $N}")
end
"""
order(t::Translation{O}) where O
Return the translation order O.
"""
order(::Translation{S,<:Any}) where {S} = S
# Julia API
Base.size(::Translation{S,<:Any}) where {S} = (S,)
Base.getindex(t::Translation, i) = t.v[i]
Base.length(::Translation{S}) where {S} = S
Base.keys(t::Translation) = keys(t.v)
# ------------------------------------------------------------------------------------------
# CONSTRUCTORS
# ------------------------------------------------------------------------------------------
# Varargs constructor
function Translation(args::Vararg{<:Number,S}) where {S}
O, r = divrem(S, 3)
if r != 0
throw(
DimensionMismatch(
"Cannot initialize a Translation with $S arguments: shall be divisible by 3.")
)
end
T = promote_type(typeof.(args)...)
return Translation(ntuple(i -> SVector3{T}(@views(args[((i-1)*3+1):(3*i)])), O))
end
# Empty constructor
@generated function Translation{S,T}() where {S,T}
expr = Expr(:call, :Translation)
for _ in 1:3*S
push!(expr.args, zero(T))
end
return quote
Base.@_inline_meta
$expr
end
end
# Empty constructor
@generated function Translation{O,T}(args::Vararg{<:Number,L}) where {O,L,T}
expr = Expr(:call, :Translation)
for i in 1:L
push!(
expr.args, Expr(:ref, :args, i)
)
end
for _ in 1:(3*O-L)
push!(expr.args, zero(T))
end
return quote
Base.@_inline_meta
$expr
end
end
# Convert to a different order auto-fill of missing SVector3
@generated function Translation{S1}(tr::Translation{S2,T}) where {S1,S2,T}
expr = Expr(:call, :Translation)
for i in 1:min(S1, S2)
v = Expr(:ref, :tr, i)
for j in 1:3
push!(expr.args, Expr(:ref, v, j))
end
end
for _ in 1:3*(S1-S2)
push!(expr.args, zero(T))
end
return quote
Base.@_inline_meta
@inbounds $(expr)
end
end
@inline Translation{S}(tr::Translation{S}) where {S} = tr
# ------------------------------------------------------------------------------------------
# TYPE CONVERSIONS
# ------------------------------------------------------------------------------------------
# Convert to Tuple
@generated function Base.Tuple(tr::Translation{S,N}) where {S,N}
expr = Expr(:call, :tuple)
for i in 1:S
v = Expr(:ref, :tr, i)
for j in 1:3
push!(expr.args, Expr(:ref, v, j))
end
end
return quote
Base.@_inline_meta
@inbounds $expr
end
end
# Generic convert to SVector
@inline function (::Type{SA})(tr::Translation) where {SA<:StaticArray}
return SA(Tuple(tr))
end
# Constructor from SVector
@generated function Translation{St}(v::SVector{Sv,T}) where {St,Sv,T}
if rem(Sv, 3) != 0
throw(
DimensionMismatch(
"Cannot create Translation from vector with size $(Sv), shall be divisible by 3."
)
)
end
expr = Expr(:call, :Translation)
for i in 1:min(Sv, 3 * St)
push!(expr.args, Expr(:ref, :v, i))
end
for _ in 1:(3*St-Sv)
push!(expr.args, zero(T))
end
return quote
Base.@_inline_meta
@inbounds $(expr)
end
end
# Constructor from Vector
function Translation{S}(v::AbstractVector{T}) where {S,T}
return Translation{S}(SVector(v...))
end
# ------------------------------------------------------------------------------------------
# OPERATIONS
# ------------------------------------------------------------------------------------------
function Base.:(==)(t1::Translation{S1}, t2::Translation{S2}) where {S1,S2}
throw(
DimensionMismatch("Cannot compare two Translations with different orders.")
)
end
function Base.:(==)(t1::Translation{S}, t2::Translation{S}) where {S}
if length(t1) != length(t2)
return false
end
for i in eachindex(t1)
if t1[i] != t2[i]
return false
end
end
return true
end
@generated function Base.:(+)(t1::Translation{S1}, t2::Translation{S2}) where {S1,S2}
expr = Expr(:call, :tuple)
if S1 ≥ S2
for i in 1:S2
push!(
expr.args, Expr(:call, :(+), Expr(:ref, :t1, i), Expr(:ref, :t2, i))
)
end
for i in S2+1:S1
push!(expr.args, Expr(:ref, :t1, i))
end
else
for i in 1:S1
push!(
expr.args, Expr(:call, :(+), Expr(:ref, :t1, i), Expr(:ref, :t2, i))
)
end
for i in S1+1:S2
push!(expr.args, Expr(:ref, :t2, i))
end
end
trexpr = Expr(:call, :Translation, expr)
return quote
Base.@_inline_meta
@inbounds $trexpr
end
end
@generated function Base.:(-)(t::Translation{S}) where {S}
expr = Expr(:call, :tuple)
for i in 1:S
push!(expr.args, Expr(:call, :(-), Expr(:ref, :t, i)))
end
trexpr = Expr(:call, :Translation, expr)
return quote
Base.@_inline_meta
@inbounds $trexpr
end
end
@generated function Base.:(-)(t1::Translation{S1}, t2::Translation{S2}) where {S1,S2}
expr = Expr(:call, :tuple)
if S1 ≥ S2
for i in 1:S2
push!(
expr.args, Expr(:call, :(-), Expr(:ref, :t1, i), Expr(:ref, :t2, i))
)
end
for i in S2+1:S1
push!(expr.args, Expr(:ref, :t1, i))
end
else
for i in 1:S1
push!(
expr.args, Expr(:call, :(-), Expr(:ref, :t1, i), Expr(:ref, :t2, i))
)
end
for i in S1+1:S2
push!(expr.args, Expr(:ref, :t2, i))
end
end
trexpr = Expr(:call, :Translation, expr)
return quote
Base.@_inline_meta
@inbounds $trexpr
end
end
| FrameTransformations | https://github.com/JuliaSpaceMissionDesign/FrameTransformations.jl.git |
|
[
"MIT"
] | 3.0.0 | cb117510f2ba3d831439f56a5a3c00170cbf7a8d | code | 2160 |
"""
add_axes_fixed_quaternion!(frames::FrameSystem, name::Symbol, id::Int, parent, q::Quaternion)
Add axes `name` with id `id` to `frames` with a fixed-offset from `parent`.
Fixed offset axes have a constant orientation with respect to their `parent` axes,
represented by the quaternion `q`.
### See also
See also [`add_axes_fixedoffset!`](@ref).
"""
function add_axes_fixed_quaternion!(
frames::FrameSystem, name::Symbol, id::Int, parent, q::Quaternion
)
add_axes_fixedoffset!(frames, name, id, parent, quat_to_dcm(q))
end
"""
add_axes_fixed_angles!(frames, name::Symbol, id::Int, parent, θ::AbstractVector{N}, seq::Symbol)
Add axes `name` with id `id` to `frames` with a fixed-offset from `parent`.
Fixed offset axes have a constant orientation with respect to their `parent` axes,
represented by Euler angles `θ`.
The rotation sequence is defined by `seq` specifing the rotation axes. The possible
values depends on the number of rotations as follows:
- **1 rotation** (`θ₁`): `:X`, `:Y`, or `:Z`.
- **2 rotations** (`θ₁`, `θ₂`): `:XY`, `:XZ`, `:YX`, `:YZ`, `:ZX`, or `:ZY`.
- **3 rotations** (`θ₁`, `θ₂`, `θ₃`): `:XYX`, `XYZ`, `:XZX`, `:XZY`, `:YXY`, `:YXZ`, `:YZX`,
`:YZY`, `:ZXY`, `:ZXZ`, `:ZYX`, or `:ZYZ`
### See also
See also [`add_axes_fixedoffset!`](@ref).
"""
function add_axes_fixed_angles!(
frames::FrameSystem, name::Symbol, id::Int, parent, θ::AbstractVector{N}, seq::Symbol
) where {N}
add_axes_fixedoffset!(frames, name, id, parent, angle_to_dcm(θ..., seq))
end
"""
add_axes_fixed_angleaxis!(frames, name::Symbol, id::Int, parent, ϕ::Number, v::AbstractVector{N})
Add axes `name` with id `id` to `frames` with a fixed-offset from `parent`.
Fixed offset axes have a constant orientation with respect to their `parent` axes,
represented by Euler angle `ϕ` [rad] and Euler axis `v`.
### See also
See also [`add_axes_fixedoffset!`](@ref).
"""
function add_axes_fixed_angleaxis!(
frames::FrameSystem, name::Symbol, id::Int, parent, ϕ::Number, v::AbstractVector{N}
) where {N}
naxis = unitvec(v)
add_axes_fixedoffset!(frames, name, id, parent, angleaxis_to_dcm(ϕ, naxis))
end | FrameTransformations | https://github.com/JuliaSpaceMissionDesign/FrameTransformations.jl.git |
|
[
"MIT"
] | 3.0.0 | cb117510f2ba3d831439f56a5a3c00170cbf7a8d | code | 2394 | """
function add_axes_twodir!(frames::FrameSystem{O,T}, name::Symbol, id, parent,
dir1::Symbol, dir2::Symbol, seq::Symbol; project::Bool=false) where {O,T}
Add a set of axes to `frames` based on two directions.
A right-handed coordinate system is generated based on the specified sequence direction (`seq`),
which determines the order in which the vectors are used to define the basis.
The `project` flag specifies whether the resulting axes are inertial or not.
### See also
See also [`add_axes_projected!`](@ref) and [`add_axes_rotating!`](@ref).
"""
function add_axes_twodir!(
frames::FrameSystem{O,T}, name::Symbol, id, parent, dir1::Symbol, dir2::Symbol, seq::Symbol;
project::Bool=false
) where {O,T}
# Check directions
if !(has_direction(frames, dir1))
throw(
ArgumentError("No direction with name $dir1 available.")
)
end
if !(has_direction(frames, dir2))
throw(
ArgumentError("No direction with name $dir2 available.")
)
end
if !(has_axes(frames, parent))
throw(
ArgumentError("No axes with id $pid available.")
)
end
fun = t -> twodir_to_dcm(
direction3(frames, dir1, parent, t), direction3(frames, dir2, parent, t), seq
)
if project
add_axes_projected!(frames, name, id, parent, fun)
else
add_axes_rotating!(frames, name, id, parent, fun)
end
end
function _two_dir_basis(a::AbstractVector, b::AbstractVector,
seq::Symbol, fc::Function)
if seq == :XY
w = fc(a, b)
v = fc(w, a)
u = a
elseif seq == :YX
w = fc(b, a)
u = fc(a, w)
v = a
elseif seq == :XZ
v = fc(b, a)
w = fc(a, v)
u = a
elseif seq == :ZX
v = fc(a, b)
u = fc(v, a)
w = a
elseif seq == :YZ
u = fc(a, b)
w = fc(u, a)
v = a
elseif seq == :ZY
u = fc(b, a)
v = fc(a, u)
w = a
else
throw(ArgumentError("Invalid rotation sequence $seq."))
end
return u, v, w
end
function twodir_to_dcm(a::AbstractVector, b::AbstractVector, seq::Symbol)
ut, vt, wt = _two_dir_basis(a, b, seq, cross3)
u, v, w = unitvec(ut), unitvec(vt), unitvec(wt)
@inbounds dcm = DCM((u[1], v[1], w[1], u[2], v[2], w[2], u[3], v[3], w[3]))
return dcm
end | FrameTransformations | https://github.com/JuliaSpaceMissionDesign/FrameTransformations.jl.git |
|
[
"MIT"
] | 3.0.0 | cb117510f2ba3d831439f56a5a3c00170cbf7a8d | code | 3813 | """
add_axes_icrf!(frames::FrameSystem)
Add the International Celestial Reference Frame (ICRF) as the root axes of the frames graph.
The axes are automatically named `ICRF` and assigned the $(AXESID_ICRF) ID.
### See also
See also [`add_axes!`](@ref), [`add_axes_gcrf!`](@ref) and [`AXESID_ICRF`](@ref).
"""
@inline function add_axes_icrf!(frames::FrameSystem)
if !isempty(axes_graph(frames))
throw(ArgumentError("The ICRF can only be defined as a set of root axes."))
end
return add_axes!(frames, :ICRF, AXESID_ICRF)
end
"""
add_axes_gcrf!(frames::FrameSystem)
Add the Geocentric Celestial Reference Frame (GCRF) to the frames graph. The axes are
automatically named `GCRF` and assigned the $(AXESID_GCRF) ID. These axes can only
be defined as a set of root axes or as child of the ICRF (ID = $(AXESID_ICRF)).
### See also
See also [`add_axes_icrf!`](@ref) and [`AXESID_GCRF`](@ref).
"""
function add_axes_gcrf!(frames::FrameSystem)
if has_axes(frames, AXESID_ICRF)
# Add the GCRF as a child of the ICRF with an identity rotation
return add_axes_fixedoffset!(
frames, :GCRF, AXESID_GCRF, AXESID_ICRF, DCM(1.0I)
)
elseif isempty(axes_graph(frames))
# Add the GCRF as a root set of axes
return add_axes!(frames, :GCRF, AXESID_GCRF)
else
throw(
ArgumentError(
"The GCRF can only be defined with respect to the ICRF (ID =" *
" $(AXESID_ICRF)) or as a set of root axes."
)
)
end
end
"""
DCM_ICRF_TO_EME2000
DCM for the rotation from the International Celestial Reference Frame (`ICRF`) and the
Mean Equator and Equinox of J2000.0 (`EME2000`). This corresponds to the `J2000` frame in
the SPICE toolkit.
!!! note
The frame bias is here computed using the IAU 2006 Precession model, similarly to ESA's
GODOT. Some other software libraries, such as Orekit, use the frame bias of the IAU 2000
precession model. The two definitions differ of about 1 arcsecond.
Moreover, according to [Hilton](https://www.aanda.org/articles/aa/pdf/2004/02/aa3851.pdf)
there are multiple possibilities to define the proper rotation between the ICRS and
the EME2000. The transformation implemented here correspond to Eq. 6 using the parameters
in Table 3, line 1 (RIERS).
### References
- Hilton, James L., and Catherine Y. Hohenkerk. -- Rotation matrix from the mean
dynamical equator and equinox at J2000. 0 to the ICRS. -- Astronomy & Astrophysics
513.2 (2004): 765-770. DOI: [10.1051/0004-6361:20031552](https://www.aanda.org/articles/aa/pdf/2004/02/aa3851.pdf)
- [SOFA docs](https://www.iausofa.org/2021_0512_C/sofa/sofa_pn_c.pdf)
"""
const DCM_ICRF_TO_EME2000 = iers_bias(iers2010a, 0)
"""
add_axes_eme2000!(frames, name::Symbol=:EME2000, parentid::Int=AXESID_ICRF,
id::Int = AXESID_EME2000)
Add Mean Equator Mean Equinox of J2000 axes to `frames`. Custom `name`, `id` and `parentid`
can be assigned by the user.
### See also
See also [`DCM_ICRF_TO_EME2000`](@ref).
"""
function add_axes_eme2000!(
frames::FrameSystem, name::Symbol=:EME2000, parentid::Int=AXESID_ICRF, id::Int=AXESID_EME2000,
)
if parentid == AXESID_ICRF || parentid == AXESID_GCRF
dcm = DCM_ICRF_TO_EME2000
else
throw(
ArgumentError(
"Mean Equator, Mean Equinox of J2000 (EME2000) axes can only be defined " *
"w.r.t. the ICRF (ID = $(AXESID_ICRF)) or the GCRF (ID = $(AXESID_GCRF))."
),
)
end
if id != AXESID_EME2000
@warn "$name is aliasing an ID that is not the standard EME2000 ID" *
" ($(AXESID_EME2000))."
end
return add_axes_fixedoffset!(frames, name, id, parentid, dcm)
end | FrameTransformations | https://github.com/JuliaSpaceMissionDesign/FrameTransformations.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.