licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 1.0.10 | bb37e1a43eb7c52d6a12cfe29c60b518bda742aa | code | 9946 | using PovertyAndInequalityMeasures
using DataFrames
using Test
using CSV
#
# These tests mostly try to replicate examples from
# World Bank 'Handbook on poverty and inequality'
# by Haughton and Khandker
# http://documents.worldbank.org/curated/en/488081468157174849/Handbook-on-poverty-and-inequality
#
# ...
primitive type Amount <: AbstractFloat 64 end
const TOL = 0.00001; # for nearly equal
"
This just creates an array which is `times` vcat copies of `a`
"
function vcn( a :: Array{Float64}, times :: Int64 )
nrows = size( a )[1]
ncols = size( a )[2]
newrows = nrows*times
out = zeros( Float64, newrows, ncols )
p = 0
for row in 1:nrows
for i in 1:times
p += 1
out[p,:] .= a[row,:]
end
end
out
end
NONE=Array{Symbol,1}();
function removeIgnored( d :: Dict{ Symbol, <:Any}, ignore::Array{Symbol,1} ):: Dict{ Symbol, <:Any}
for i in ignore
if haskey( d, i )
delete!( d, i )
end
end
return d;
end
"
element - by element compare of the type of dicts we use for poverty and inequality output
"
function comparedics( left :: Dict{ Symbol, <:Any}, right :: Dict{ Symbol, <:Any}, ignore::Array{Symbol,1} = NONE ) :: Bool
left = removeIgnored( left, ignore )
right = removeIgnored( right, ignore )
lk = keys( left )
if lk != keys( right )
return false
end
for k in lk
# try catch here in case types are way off
try
if !( left[k] ≈ right[k] )
l = left[k]
r = right[k]
print( "comparison failed '$k' : left = $l right = $r")
return false
end
catch e
return false
end
end
return true
end
@testset "WB Chapter 4 - Poverty " begin
country_a = [ 1.0 100; 1.0 100; 1 150; 1 150 ]
country_b = copy( country_a )
country_b[1:2,2] .= 124
country_c = [ 1.0 100; 1.0 110; 1 150; 1 160 ]
#
# Ch 4 doesn't discuss weighting issues, so
# we'll add some simple checks for that.
# a_2 and b_2 should be the same as country_a and _b,
# but with 2 obs of weight 2 rather than 4 of weight 1
#
country_a_2 = [2.0 100; 2.0 150 ]
country_b_2 = [2.0 124; 2.0 150 ]
# d should be a big version of a and also produce same result
country_d = vcn( country_a, 50 )
# attempt to blow things up with huge a clone
country_d = vcn( country_c, 100_000 )
# very unbalanced copy of dataset 1 with 10,000 weight1 1:2 and 2 weight 10,000 7:10
country_e = vcn( country_c[1:2,:], 10_000 )
cx = copy(country_c[3:4,:])
cx[:,1] .= 10_000
country_e = vcat( country_e, cx )
line = 125.0
growth = 0.05
country_a_pov = make_poverty( country_a, line, growth )
print("country A " );println( country_a_pov )
country_a_2_pov = make_poverty( country_a_2, line,growth )
country_b_pov = make_poverty( country_b, line, growth )
country_c_pov = make_poverty( country_c, line, growth )
print("country C " );println( country_c_pov )
country_d_pov = make_poverty( country_d, line, growth )
print("country D " );println( country_d_pov )
country_e_pov = make_poverty( country_e, line, growth )
print("country E " );println( country_e_pov )
@test povs_equal( country_a_pov, country_a_2_pov )
@test povs_equal( country_c_pov, country_e_pov )
# @test povs_equal( country_c_pov, country_d_pov )
# test dataframes same as A
country_a_df = DataFrame( weight=[2.0, 2.0], income=[100.0, 150])
country_a_pov_df = make_poverty( country_a_df, line, growth, :weight, :income )
@test povs_equal( country_a_pov_df, country_a_pov )
# numbers from WP ch. 4
@test country_a_pov.headcount ≈ 0.5
@test country_b_pov.headcount ≈ 0.5
@test country_b_pov.gap ≈ 1.0/250.0
@test country_c_pov.watts ≈ 0.0877442307
# some of these are hand-calculations, for from Ada version
@test isapprox( country_c_pov.gap, 0.080000, atol = TOL )
@test isapprox( country_c_pov.foster_greer_thorndyke[ 1 ], 0.5000000, atol = TOL ) # pov level
@test isapprox( country_c_pov.foster_greer_thorndyke[ 2 ], 0.1984059, atol = TOL )
@test isapprox( country_c_pov.foster_greer_thorndyke[ 3 ], 0.0800000, atol = TOL )
@test isapprox( country_c_pov.foster_greer_thorndyke[ 4 ], 0.0327530, atol = TOL )
@test isapprox( country_c_pov.foster_greer_thorndyke[ 5 ], 0.0136000, atol = TOL )
@test isapprox( country_c_pov.foster_greer_thorndyke[ 6 ], 0.0057192, atol = TOL )
@test isapprox( country_c_pov.sen , 0.0900000, atol = TOL )
@test isapprox( country_c_pov.shorrocks , 0.0625000, atol = TOL )
@test isapprox( country_c_pov.watts , 0.0877442, atol = TOL )
@test isapprox( country_c_pov.time_to_exit , 1.7548846, atol = TOL )
@test isapprox( country_c_pov.gini_amongst_poor , 0.0238095, atol = TOL )
@test isapprox( country_c_pov.poverty_gap_gini , 0.5625000, atol = TOL )
end # poverty testset
#
# reproduce WB Table 6.3 with various cominations ofweights & data
# note table has errors:
#
# 1. uses log10 not ln for theil##
# 2. has N in wrong place for ge(2) - outside bracket
#
@testset "WB Chapter 6 - Inequality " begin
c1 = [1.0 10; 1 15; 1 20; 1 25; 1 40; 1 20; 1 30; 1 35; 1 45; 1 90 ]
# these next are copies of c1 intended
# to check we haven't screwed up the weighting
c2 = vcn( c1, 2 )
c3 = copy( c1 )
c3[:,1] .= 10_000.0
c4 = copy( c1 )
c4[:,1] .= 2.0
# very unbalanced copy of dataset 1 with 100,000 weight1 1:6 and 4 weight 100,000 7:10
c64k = vcn( c1[1:6,:], 100_000 )
cx = copy(c1[7:10,:])
cx[:,1] .= 100_000
c64k = vcat( c64k, cx )
iq1 = make_inequality( c1 )
iq2 = make_inequality( c2 )
iq3 = make_inequality( c3 )
iq4 = make_inequality( c4 )
iq64k = make_inequality( c64k )
# weighting and multiplying should make no difference
println( "iq1");println( iq1 )
println( "iq2");println( iq2 )
println( "iq3");println( iq3 )
println( "iq64k");println( iq64k )
# test from dataframe
cdf = DataFrame(income=c1[:,2], weight=c1[:,1])
iqdf = make_inequality( cdf, :weight, :income)
@test ineqs_equal( iqdf, iq1 )
@test ineqs_equal( iq1 , iq64k, include_populations = false )
@test ineqs_equal( iq1 , iq2, include_populations = false )
@test ineqs_equal( iq1 , iq3, include_populations = false )
@test ineqs_equal( iq1 , iq4, include_populations = false )
@test isapprox( iq1.gini , 0.3272727, atol = TOL )
@test isapprox( iq1.theil_l, 0.1792203, atol = TOL )
@test isapprox( iq1.theil_t, 0.1830644, atol = TOL )
@test isapprox( iq1.generalised_entropy[ 1 ], 0.1883288, atol = TOL )
@test isapprox( iq1.generalised_entropy[ 2 ], 0.1954897, atol = TOL )
@test isapprox( iq1.generalised_entropy[ 3 ], 0.2047211, atol = TOL )
@test isapprox( iq1.generalised_entropy[ 4 ], 0.2162534, atol = TOL )
@test isapprox( iq1.generalised_entropy[ 5 ], 0.2303812, atol = TOL )
@test isapprox( iq1.generalised_entropy[ 6 ], 0.2474728, atol = TOL )
@test isapprox( iq1.atkinson[ 1 ], 0.0446396, atol = TOL )
@test isapprox( iq1.atkinson[ 2 ], 0.0869155, atol = TOL )
@test isapprox( iq1.atkinson[ 3 ], 0.1267328, atol = TOL )
@test isapprox( iq1.atkinson[ 4 ], 0.1640783, atol = TOL )
@test isapprox( iq1.atkinson[ 5 ], 0.1989991, atol = TOL )
@test isapprox( iq1.atkinson[ 6 ], 0.2315817, atol = TOL )
@test isapprox( iq1.atkinson[ 7 ], 0.2619332, atol = TOL )
@test isapprox( iq1.atkinson[ 8 ], 0.2901688, atol = TOL )
@test isapprox( iq1.atkinson[ 9 ], 0.3164032, atol = TOL )
@test isapprox( iq1.hoover, 0.2363636, atol = TOL )
print( iq1 )
end # inequality testset
hbai_dir = "/mnt/data/hbai/tab/"
if isdir(hbai_dir)
# HBAI example if available
# load each year & jam varnames to lower case
hbai = CSV.File("$(hbai_dir)h1819.tab")|>DataFrame
lcnames = Symbol.(lowercase.(string.(names(hbai))))
rename!(hbai, lcnames)
# make scottish subset
positives = hbai[(hbai.s_oe_ahc .> 0.0 ),:]
regions = Vector{InequalityMeasures}(undef,0)
nations = Vector{InequalityMeasures}(undef,0)
scot = positives[(positives.gvtregn .== 12),:]
wal = positives[(positives.gvtregn .== 11),:]
nire = positives[(positives.gvtregn .== 13),:]
eng = positives[(positives.gvtregn .< 11),:]
for reg in 1:13
println("region $reg ")
if reg != 3
rd = positives[(positives.gvtregn .== reg),:]
ineq_ahc = make_inequality( rd,:gs_newpp,:s_oe_ahc )
push!(regions, ineq_ahc )
end
end
# theil decomp isn't exact if <0 incomes included
uk_ineq_ahc = make_inequality( positives,:gs_newpp,:s_oe_ahc )
sco_ineq_ahc = make_inequality( scot,:gs_newpp,:s_oe_ahc )
push!( nations, sco_ineq_ahc )
wal_ineq_ahc = make_inequality( wal,:gs_newpp,:s_oe_ahc )
push!( nations, wal_ineq_ahc )
nire_ineq_ahc = make_inequality( nire,:gs_newpp,:s_oe_ahc )
push!( nations, nire_ineq_ahc )
eng_ineq_ahc = make_inequality( eng,:gs_newpp,:s_oe_ahc )
push!( nations, eng_ineq_ahc )
dt_regions = add_decomposed_theil( uk_ineq_ahc, regions )
dt_nations = add_decomposed_theil( uk_ineq_ahc, nations )
pvline_ahc = positives.mdoeahc[1]*0.6
pvline_bhc = positives.mdoebhc[1]*0.6
eng_pov_ahc = make_poverty( eng, pvline_ahc, 0.02, :gs_newpp,:s_oe_ahc )
sco_pov_ahc = make_poverty( scot, pvline_ahc, 0.02, :gs_newpp,:s_oe_ahc )
end
@testset "Decile Tests" begin
n = 1000
r = rand(n)
rs = sort(r)
rc = cumsum(rs)
d = DataFrame( w=fill(1.0,n), i=r)
eq = make_inequality(d,:w,:i )
@test rc[100]/100 ≈ eq.deciles[1,4]
@test rs[100] ≈ eq.deciles[1,3]
@test (rc[1000]-rc[900])/100 ≈ eq.deciles[10,4]
@test rs[1000] ≈ eq.deciles[10,3]
@test rs[900] ≈ eq.deciles[9,3]
end | PovertyAndInequalityMeasures | https://github.com/grahamstark/PovertyAndInequalityMeasures.jl.git |
|
[
"MIT"
] | 1.0.10 | bb37e1a43eb7c52d6a12cfe29c60b518bda742aa | docs | 954 | # Poverty And Inequality Measures
This package allows you to generate various standard measures of poverty and inequality from a sample dataset.
The measures are mostly taken from chs. 4-6 of the World Banks' [Handbook on Poverty and Inequality](http://documents.worldbank.org/curated/en/488081468157174849/Handbook-on-poverty-and-inequality).
[](https://grahamstark.github.io/PovertyAndInequalityMeasures.jl/stable)
[](https://grahamstark.github.io/PovertyAndInequalityMeasures.jl/dev)
[](https://travis-ci.com/grahamstark/PovertyAndInequalityMeasures.jl)
[](https://codecov.io/gh/grahamstark/PovertyAndInequalityMeasures.jl)
| PovertyAndInequalityMeasures | https://github.com/grahamstark/PovertyAndInequalityMeasures.jl.git |
|
[
"MIT"
] | 1.0.10 | bb37e1a43eb7c52d6a12cfe29c60b518bda742aa | docs | 2520 | ```@meta
CurrentModule = PovertyAndInequalityMeasures
```
# PovertyAndInequalityMeasures
This generates various measures poverty and inequality from a sample dataset.
The measures are mostly taken from chs. 4-6 of the World Banks' [Handbook on Poverty and Inequality](biblio.md).
See the [test case for worked examples](https://github.com/grahamstark/PovertyAndInequalityMeasures.jl/tree/master/test)
Poverty measures are:
* `headcount`;
* `gap`;
* `Foster Greer Thorndyke`, for each of the values in `foster_greer_thorndyke_alphas` - note that α=0 is headcount and α=1 is gap;
* `Watts`;
* `time to exit`, for the supplied growth rate;
* `Shorrocks`;
* `Sen`.
See WB ch. 4 on these measures.
Inequality Measures Are:
* `Gini`;
* `Atkinson`, for each value in `atkinson_es`;
* `Theil`;
* `generalised_entropy`;
* `Hoover`;
* `Theil`;
* `Palma`.
See World Bank chs. 5 an 6, and Cobham and Sumner on the Palma. Also returned by the inequality function are:
* `total_income`
* `total_population`
* `average_income`
* `deciles`.
There's also a small `binify` routine which chops a dataset up
into chunks of cumulative income and population suitable for drawing [Lorenz Curves](https://en.wikipedia.org/wiki/Lorenz_curve).
## Index
```@index
```
```@autodocs
Modules = [PovertyAndInequalityMeasures]
[:constant, :type, :function]
```
## TODO
* better decomposable indices;
* having separate dataframe/array versions seems complicated.
## Bibliography
Cobham Alex, and Sumner Andy. “Is Inequality All about the Tails?: The Palma Measure of Income Inequality.” Significance 11, no. 1 (February 19, 2014): 10–13. [https://doi.org/10.1111/j.1740-9713.2014.00718.x](https://doi.org/10.1111/j.1740-9713.2014.00718.x).
Haughton, Jonathan, and Shahidur R. Khandker. ‘Handbook on Poverty and Inequality’. The World Bank, 27 March 2009. [http://documents.worldbank.org/curated/en/488081468157174849/Handbook-on-poverty-and-inequality](http://documents.worldbank.org/curated/en/488081468157174849/Handbook-on-poverty-and-inequality).
Preston, Ian. ‘Inequality and Income Gaps’. IFS Working Paper. Institute for Fiscal Studies, 5 December 2006. [https://econpapers.repec.org/paper/ifsifsewp/06_2f25.htm](https://econpapers.repec.org/paper/ifsifsewp/06_2f25.htm).
Reed, Howard, and Graham Stark. ‘Tackling Child Poverty Delivery Plan - Forecasting Child Poverty in Scotland’. Scottish Government, 9 March 2018. [http://www.gov.scot/Publications/2018/03/2911/0](http://www.gov.scot/Publications/2018/03/2911/0)``.
| PovertyAndInequalityMeasures | https://github.com/grahamstark/PovertyAndInequalityMeasures.jl.git |
|
[
"MIT"
] | 0.4.0 | 81022321115ba46e0a256913860b95d01373ce91 | code | 866 | # Use
#
# DOCUMENTER_DEBUG=true julia --color=yes make.jl local [nonstrict] [fixdoctests]
#
# for local builds.
using Documenter
using EncodedArrays
# Doctest setup
DocMeta.setdocmeta!(
EncodedArrays,
:DocTestSetup,
:(using EncodedArrays);
recursive=true,
)
makedocs(
sitename = "EncodedArrays",
modules = [EncodedArrays],
format = Documenter.HTML(
prettyurls = !("local" in ARGS),
canonical = "https://oschulz.github.io/EncodedArrays.jl/stable/"
),
pages = [
"Home" => "index.md",
"API" => "api.md",
"LICENSE" => "LICENSE.md",
],
doctest = ("fixdoctests" in ARGS) ? :fix : true,
linkcheck = !("nonstrict" in ARGS),
strict = !("nonstrict" in ARGS),
)
deploydocs(
repo = "github.com/oschulz/EncodedArrays.jl.git",
forcepush = true,
push_preview = true,
)
| EncodedArrays | https://github.com/oschulz/EncodedArrays.jl.git |
|
[
"MIT"
] | 0.4.0 | 81022321115ba46e0a256913860b95d01373ce91 | code | 302 | # This file is a part of EncodedArrays.jl, licensed under the MIT License (MIT).
__precompile__(true)
module EncodedArrays
using ArraysOfArrays
using BitOperations
using FillArrays
using StructArrays
include("encoded_array.jl")
include("varlen_io.jl")
include("varlen_diff_codec.jl")
end # module
| EncodedArrays | https://github.com/oschulz/EncodedArrays.jl.git |
|
[
"MIT"
] | 0.4.0 | 81022321115ba46e0a256913860b95d01373ce91 | code | 13909 | # This file is a part of EncodedArrays.jl, licensed under the MIT License (MIT).
"""
abstract type AbstractArrayCodec <: Codecs.Codec end
Abstract type for arrays codecs.
Subtypes must implement the [`AbstractEncodedArray`](@ref) API.
Most coded should use [`EncodedArray`](@ref) as the concrete subtype of
`AbstractArrayCodec`. Codecs that use a custom subtype of
`AbstractEncodedArray` must implement
EncodedArrays.encarraytype(::Type{<:AbstractArrayCodec},::Type{<:AbstractArray{T,N}})::Type{<:AbstractEncodedArray{T,N}}
"""
abstract type AbstractArrayCodec end
export AbstractArrayCodec
import Base.|>
"""
¦>(A::AbstractArray{T}, codec::AbstractArrayCodec)::AbstractEncodedArray
Encode `A` using `codec` and return an [`AbstractEncodedArray`](@ref). The
default implementation returns an [`EncodedArray`](@ref).
"""
function |>(A::AbstractArray{T}, codec::AbstractArrayCodec) where T
encoded = Vector{UInt8}()
encode_data!(encoded, codec, A)
EncodedArray{T}(codec, size(A), encoded)
end
# Make AbstractArrayCodec behave as a Scalar for broadcasting
@inline Base.Broadcast.broadcastable(codec::AbstractArrayCodec) = (codec,)
"""
encode_data!(encoded::AbstractVector{UInt8}, codec::AbstractArrayCodec, data::AbstractArray)
Will resize `encoded` as necessary to fit the encoded data.
Returns `encoded`.
"""
function encode_data! end
"""
decode_data!(data::AbstractArray, codec::AbstractArrayCodec, encoded::AbstractVector{UInt8})
Depending on `codec`, may or may not resize `decoded` to fit the size of the
decoded data. Codecs may require `decoded` to be of correct size (e.g. to
improved performance or when the size/shape of the decoded data cannot be
easily inferred from the encoded data.
Returns `data`.
"""
function decode_data! end
"""
AbstractEncodedArray{T,N} <: AbstractArray{T,N}
Abstract type for arrays that store their elements in encoded/compressed form.
In addition to the standard `AbstractArray` API, an `AbstractEncodedArray`
must support the functions
* `EncodedArrays.getcodec(A::EncodedArray)`: Returns the codec.
* `Base.codeunits(A::EncodedArray)`: Returns the internal encoded data
representation.
Encoded arrays will typically be created via
A_enc = (codec::AbstractArrayCodec)(A::AbstractArray)
or
A_enc = AbstractEncodedArray(undef, codec::AbstractArrayCodec)
append!(A_enc, B::AbstractArray)
Decoding happens via standard array conversion or assignment:
A_dec = Array(A)
A_dec = convert(Array,A)
A_dec = A[:]
A_dec = Array{T,N}(undef, size(A_enc)...)
A_dec[:] = A_enc
"""
abstract type AbstractEncodedArray{T,N} <: AbstractArray{T,N} end
export AbstractEncodedArray
import Base.==
==(A::AbstractArray, B::AbstractEncodedArray) = A == Array(B)
==(A::AbstractEncodedArray, B::AbstractArray) = Array(A) == Array(B)
==(A::AbstractEncodedArray, B::AbstractEncodedArray) = Array(A) == Array(B)
"""
EncodedArrays.getcodec(A::AbstractEncodedArray)::AbstractArrayCodec
Returns the codec used to encode/compress A.
"""
function getcodec end
"""
EncodedArray{T,N,C,DV} <: AbstractEncodedArray{T,N}
Concrete type for [`AbstractEncodedArray`](@ref)s.
Constructor:
```julia
EncodedArray{T}(
codec::AbstractArrayCodec,
size::NTuple{N,Integer},
encoded::AbstractVector{UInt8}
)
```
Codecs using `EncodedArray` only need to implement
[`EncodedArrays.encode_data!`](@ref) and [`EncodedArrays.decode_data!`](@ref).
If length of the decoded data can be inferred from the encoded data,
a constructor
EncodedArray{T,N}(codec::MyCodec,encoded::AbstractVector{UInt8})
should also be defined. By default, two `EncodedArray`s that have the same
codec and size are assumed to be equal if and only if their code units are
equal.
Generic methods for the rest of the [`AbstractEncodedArray`](@ref) API are
already provided for `EncodedArray`.
"""
struct EncodedArray{T,N,C<:AbstractArrayCodec,DV<:AbstractVector{UInt8}} <: AbstractEncodedArray{T,N}
codec::C
size::NTuple{N,Int}
encoded::DV
end
export EncodedArray
EncodedArray{T}(
codec::AbstractArrayCodec,
size::NTuple{N,Integer},
encoded::AbstractVector{UInt8}
) where {T,N} = EncodedArray{T, N, typeof(codec),typeof(encoded)}(codec, size, encoded)
EncodedArray{T}(
codec::AbstractArrayCodec,
length::Integer,
encoded::AbstractVector{UInt8}
) where {T} = EncodedArray{T, typeof(codec),typeof(encoded)}(codec, (len,), encoded)
EncodedArray{T,N,C,DV}(A::EncodedArray{T,N,C}) where {T,N,C,DV} = EncodedArray{T,N,C,DV}(A.codec, A.size, A.encoded)
Base.convert(::Type{EncodedArray{T,N,C,DV}}, A::EncodedArray{T,N,C}) where {T,N,C,DV} = EncodedArray{T,N,C,DV}(A)
@inline Base.size(A::EncodedArray) = A.size
@inline getcodec(A::EncodedArray) = A.codec
@inline Base.codeunits(A::EncodedArray) = A.encoded
# ToDo: Base.iscontiguous
function Base.Array{T,N}(A::EncodedArray{U,N}) where {T,N,U}
B = Array{T,N}(undef, size(A)...)
decode_data!(B, getcodec(A), codeunits(A))
end
Base.Array{T}(A::EncodedArray{U,N}) where {T,N,U} = Array{T,N}(A)
Base.Array(A::EncodedArray{T,N}) where {T,N} = Array{T,N}(A)
Base.Vector(A::EncodedArray{T,1}) where {T} = Array{T,1}(A)
Base.Matrix(A::EncodedArray{T,2}) where {T} = Array{T,2}(A)
Base.convert(::Type{Array{T,N}}, A::EncodedArray) where {T,N} = Array{T,N}(A)
Base.convert(::Type{Array{T}}, A::EncodedArray) where {T} = Array{T}(A)
Base.convert(::Type{Array}, A::EncodedArray) = Array(A)
Base.convert(::Type{Vector}, A::EncodedArray) = Vector(A)
Base.convert(::Type{Matrix}, A::EncodedArray) = Matrix(A)
Base.IndexStyle(A::EncodedArray) = IndexLinear()
function _getindex(A::EncodedArray, idxs::AbstractVector{Int})
B = collect(A)
if idxs == eachindex(IndexLinear(), A)
B
else
B[idxs]
end
end
_getindex(A::EncodedArray, i::Int) = collect(A)[i]
Base.@propagate_inbounds Base.getindex(A::EncodedArray, idxs) =
_getindex(A, Base.to_indices(A, (idxs,))...)
@inline function _setindex!(A::AbstractArray, B::EncodedArray, idxs::AbstractVector{Int})
@boundscheck let n = length(idxs), len_B = length(eachindex(B))
n == len_B || Base.throw_setindex_mismatch(B, (n,))
end
if idxs == eachindex(A) || idxs == axes(A)
decode_data!(A, getcodec(B), codeunits(B))
else
decode_data!(view(A, idxs), getcodec(B), codeunits(B))
end
A
end
Base.@propagate_inbounds function Base.setindex!(A::AbstractArray, B::EncodedArray, idxs::Colon)
_setindex!(A, B, Base.to_indices(A, (idxs,))...)
end
@inline Base.@propagate_inbounds function Base.setindex!(A::Array, B::EncodedArray, idxs::AbstractVector{Int})
@boundscheck checkbounds(A, idxs)
_setindex!(A, B, Base.to_indices(A, (idxs,))...)
end
function _append!(A::AbstractVector, B::EncodedArray)
n = length(eachindex(B))
from = lastindex(A) + 1
to = lastindex(A) + n
resize!(A, to + 1 - firstindex(A))
A[from:to] = B
A
end
Base.append!(A::AbstractVector, B::EncodedArray) = _append!(A, B)
Base.append!(A::Vector, B::EncodedArray) = _append!(A, B)
# # ToDo (compatible with ElasticArrays.ElasticArray):
# Base.append!(A::AbstractArray{T,N}, B::EncodedArray) where {T,N} = ...
@inline function Base.copyto!(dest::AbstractArray, src::EncodedArray)
@boundscheck if length(eachindex(dest)) < length(eachindex(src))
throw(BoundsError())
end
decode_data!(dest, getcodec(src), codeunits(src))
end
# # ToDo:
# Base.copyto!(dest::AbstractArray, destoffs, src::EncodedArray, srcoffs, N) = ...
import Base.==
function ==(A::EncodedArray, B::EncodedArray)
if getcodec(A) == getcodec(B) && size(A) == size(B)
codeunits(A) == codeunits(B)
else
Array(A) == Array(B)
end
end
"""
VectorOfEncodedArrays{T,N,...}
A vector of encoded arrays.
The code units of all entries are stored in contiguous fashion using
an `ArraysOfArray.VectorOfArrays`. All element arrays are encoded using the
same codec.
"""
struct VectorOfEncodedArrays{
T, N,
C <: AbstractArrayCodec,
VS <: AbstractVector{<:NTuple{N,<:Integer}},
VOA <: VectorOfArrays
} <: AbstractVector{EncodedArray{T,N,C,Array{UInt8,1}}}
codec::C
innersizes::VS
encoded::VOA
end
export VectorOfEncodedArrays
VectorOfEncodedArrays{T}(codec::AbstractArrayCodec, innersizes::AbstractVector{<:NTuple{N,<:Integer}}, encoded::VectorOfArrays) where {T,N} =
VectorOfEncodedArrays{T,N,typeof(codec),typeof(innersizes),typeof(encoded)}(codec, innersizes, encoded)
@inline Base.size(A::VectorOfEncodedArrays) = size(A.encoded)
@inline Base.getindex(A::VectorOfEncodedArrays{T}, i::Int) where T =
EncodedArray{T}(A.codec, A.innersizes[i], A.encoded[i])
@inline Base.getindex(A::VectorOfEncodedArrays{T}, idxs::Union{AbstractArray,Colon}) where T =
VectorOfEncodedArrays{T}(A.codec, A.innersizes[idxs], A.encoded[idxs])
@inline Base.IndexStyle(::Type{<:VectorOfEncodedArrays}) = IndexLinear()
const BroadcastedEncodeVectorOfArrays{T,N,C<:AbstractArrayCodec} = Base.Broadcast.Broadcasted{
<:Base.Broadcast.AbstractArrayStyle{1},
Tuple{Base.OneTo{Int}},
typeof(|>),
<:Tuple{
VectorOfArrays{T,N},
Union{Tuple{C},Ref{C}}
}
}
@inline _get_1st_or_ith(A, i::Int) = (length(A) == 1) ? A[1] : A[i]
function _bcast_enc_impl(::Type{T}, ::Val{N}, ::Type{C}, data_arg, codec_arg) where {T,N,C}
idxs_tuple = Base.Broadcast.combine_axes(data_arg, codec_arg)
@assert length(idxs_tuple) == 1
idxs = idxs_tuple[1]
codec = only(codec_arg)
n = length(idxs)
size_vec = Vector{NTuple{N,Int}}(undef, n)
encoded_vec = VectorOfVectors{UInt8}()
sizehint!(encoded_vec.elem_ptr, n + 1)
sizehint!(encoded_vec.kernel_size, n)
for i in idxs
data = _get_1st_or_ith(data_arg, i)
size_vec[i] = size(data)
# ToDo: Improve, eliminate temporary memory allocation:
tmp_encoded = encode_data!(Vector{UInt8}(), codec, data)
push!(encoded_vec, tmp_encoded)
end
return VectorOfEncodedArrays{T}(codec, size_vec, encoded_vec)
end
function Base.copy(instance::BroadcastedEncodeVectorOfArrays{T,N,C}) where {T,N,C}
data_arg = instance.args[1]
codec_arg = instance.args[2]
_bcast_enc_impl(T, Val{N}(), C, data_arg, codec_arg)
end
const BroadcastedDecodeVectorOfArrays{T,M,C<:AbstractArrayCodec} = Base.Broadcast.Broadcasted{
Base.Broadcast.DefaultArrayStyle{1},
Tuple{Base.OneTo{Int}},
typeof(collect),
<:Tuple{VectorOfEncodedArrays{T,M,C}}
}
function _bcast_dec_impl(::Type{T}, ::Val{N}, ::Type{C}, encoded_data) where {T,N,C}
result = VectorOfArrays{T,N}()
@inbounds for i in eachindex(encoded_data)
x = encoded_data[i]
push!(result, Fill(typemax(T), length(x)))
copyto!(last(result), x)
end
result
end
function Base.copy(instance::BroadcastedDecodeVectorOfArrays{T,N,C}) where {T,N,C}
_bcast_dec_impl(T, Val{N}(), C, instance.args[1])
end
# ToDo: SerialArrayCodec with decode_next, encode_next!, pos_type(codec),
# finalize_codeunits!
# ToDo: Custom broadcasting over encoded array.
"""
VectorOfEncodedSimilarArrays{T,M,C,...}
A vector of encoded arrays that have the same original size.
The code units of all entries are stored in contiguous fashion using
an `ArraysOfArray.VectorOfArrays`. All element arrays are encoded using the
same codec.
"""
struct VectorOfEncodedSimilarArrays{
T, M,
C <: AbstractArrayCodec,
VOA <: VectorOfArrays
} <: AbstractArrayOfSimilarArrays{T,M,1}
codec::C
innersize::Dims{M}
encoded::VOA
end
export VectorOfEncodedSimilarArrays
VectorOfEncodedSimilarArrays{T}(codec::AbstractArrayCodec, innersize::Dims{M}, encoded::VectorOfArrays) where {T,M} =
VectorOfEncodedSimilarArrays{T,M,typeof(codec),typeof(encoded)}(codec, innersize, encoded)
@inline Base.size(A::VectorOfEncodedSimilarArrays) = size(A.encoded)
@inline Base.getindex(A::VectorOfEncodedSimilarArrays{T}, i::Int) where T =
EncodedArray{T}(A.codec, A.innersize, A.encoded[i])
@inline Base.getindex(A::VectorOfEncodedSimilarArrays{T}, idxs::Union{AbstractArray,Colon}) where T =
VectorOfEncodedSimilarArrays{T}(A.codec, A.innersize, A.encoded[idxs])
@inline Base.IndexStyle(::Type{<:VectorOfEncodedSimilarArrays}) = IndexLinear()
ArraysOfArrays.innersize(A::VectorOfEncodedSimilarArrays) = A.innersize
const BroadcastedEncodeVectorOfSimilarArrays{T,M,C<:AbstractArrayCodec} = Base.Broadcast.Broadcasted{
<:Base.Broadcast.AbstractArrayStyle{1},
Tuple{Base.OneTo{Int}},
typeof(|>),
<:Tuple{
AbstractArrayOfSimilarArrays{T,M,1},
Union{Tuple{C},Ref{C}}
}
}
function Base.copy(instance::BroadcastedEncodeVectorOfSimilarArrays{T,M,C}) where {T,M,C}
data_arg = instance.args[1]
codec_arg = instance.args[2]
voea = _bcast_enc_impl(T, Val{M}(), C, data_arg, codec_arg)
codec = voea.codec
encoded = voea.encoded
VectorOfEncodedSimilarArrays{T}(codec, innersize(data_arg), encoded)
end
const BroadcastedDecodeVectorOfSimilarArrays{T,M,C<:AbstractArrayCodec} = Base.Broadcast.Broadcasted{
Base.Broadcast.DefaultArrayStyle{1},
Tuple{Base.OneTo{Int}},
typeof(collect),
<:Tuple{VectorOfEncodedSimilarArrays{T,M,C}}
}
function _decode_data_noret!(args...)
decode_data!(args...)
return nothing
end
function _bcast_dec_impl(encoded_arrays::VectorOfEncodedSimilarArrays{T,M,C}) where {T,M,C}
codec = encoded_arrays.codec
elsz = encoded_arrays.innersize
encoded_data = encoded_arrays.encoded
n = length(encoded_data)
decoded_data = similar(flatview(encoded_data), T, elsz..., n)
result = VectorOfSimilarArrays(decoded_data)
_decode_data_noret!.(result, Ref(codec), encoded_data)
return result
end
function Base.copy(instance::BroadcastedDecodeVectorOfSimilarArrays)
_bcast_dec_impl(instance.args[1])
end
| EncodedArrays | https://github.com/oschulz/EncodedArrays.jl.git |
|
[
"MIT"
] | 0.4.0 | 81022321115ba46e0a256913860b95d01373ce91 | code | 1840 | # This file is a part of EncodedArrays.jl, licensed under the MIT License (MIT).
"""
VarlenDiffArrayCodec <: AbstractArrayCodec
"""
struct VarlenDiffArrayCodec <: AbstractArrayCodec
end
export VarlenDiffArrayCodec
# function _encode(io::IO, codec::VarlenDiffArrayCodec, x::T, last_x::T) where {T <: Integer}
# write_autozz_varlen(io, x - last_x)
# end
#
#
# function _decode(io::IO, codec::VarlenDiffArrayCodec, last_x::T) where {T <: Integer}
# read_autozz_varlen(io, T) + last_x
# end
# function _length(io::IO, codec::VarlenDiffArrayCodec, T::Type{<:Integer})
# n::Int = 0
# last_x::T = 0
# while !eof(io)
# x = _decode(io, T, last_x)
# last_x = x
# n += 1
# end
# n
# end
function encode_data!(encoded::AbstractVector{UInt8}, codec::VarlenDiffArrayCodec, data::AbstractVector{T}) where {T}
output = IOBuffer()
last_x::T = zero(T)
@inbounds for x in data
dx = signed(x) - signed(last_x)
write_autozz_varlen(output, dx)
last_x = x
end
tmp = take!(output)
resize!(encoded, length(eachindex(tmp)))
copyto!(encoded, tmp)
encoded
end
function decode_data!(data::AbstractVector{T}, codec::VarlenDiffArrayCodec, encoded::AbstractVector{UInt8}) where {T}
input = IOBuffer(encoded)
last_x::T = zero(T)
i = firstindex(data)
while !eof(input)
if lastindex(data) < i
if isempty(data)
resize!(data, length(eachindex(encoded)))
else
resize!(data, 2 * (i - firstindex(data)))
end
end
dx = read_autozz_varlen(input, typeof(signed(zero(T))))
x = last_x + dx
last_x = x
data[i] = x
i += 1
end
if i <= lastindex(data)
resize!(data, i - firstindex(data))
end
data
end
| EncodedArrays | https://github.com/oschulz/EncodedArrays.jl.git |
|
[
"MIT"
] | 0.4.0 | 81022321115ba46e0a256913860b95d01373ce91 | code | 2085 | # This file is a part of EncodedArrays.jl, licensed under the MIT License (MIT).
"""
read_varlen(io::IO, T::Type{<:Unsigned})
Read an unsigned variable-length integer value of type `T` from `io`. If
the next value encoded in x is too large to be represented by `T`, an
exception is thrown.
See [`EncodedArrays.write_varlen`](@ref).
"""
@inline function read_varlen(io::IO, T::Type{<:Unsigned})
maxPos = 8 * sizeof(T)
x::T = 0
pos::Int = 0
while true
(pos >= maxPos) && throw(ErrorException("Overflow during decoding of variable-length encoded number."))
b = read(io, UInt8)
x = x | (T(b & 0x7f) << pos)
if ((b & 0x80) == 0)
return x
else
pos += 7
end
end
end
"""
write_varlen(io::IO, x::Unsigned)
Write unsigned integer value `x` to IO using variable-length coding. Data
is written in LSB fashion in units of one byte. The highest bit of each byte
indicates if more bytes will need to be read, the 7 lower bits contain the
next 7 bits of x.
"""
@inline function write_varlen(io::IO, x::Unsigned)
T = typeof(x)
rest::T = x
done::Bool = false
while !(done)
new_rest = rest >>> 7;
a = UInt8(rest & 0x7F)
b = (new_rest == 0) ? a : a | UInt8(0x80)
write(io, b)
rest = new_rest;
done = (rest == 0)
end
nothing
end
"""
read_autozz_varlen(io::IO, ::Type{<:Integer})
Read an integer of type `T` from `io`, using zig-zag decoding depending on
whether `T` is signed or unsigned.
"""
function read_autozz_varlen end
@inline read_autozz_varlen(io::IO, T::Type{<:Unsigned}) = read_varlen(io, T)
@inline read_autozz_varlen(io::IO, T::Type{<:Signed}) = zigzagdec(read_varlen(io, unsigned(T)))
"""
write_autozz_varlen(io::IO, x::Integer)
Write integer value `x` to `io`, using zig-zag encoding depending on
whether the type of x is signed or unsigned.
"""
@inline write_autozz_varlen(io::IO, x::Unsigned) = write_varlen(io, x)
@inline write_autozz_varlen(io::IO, x::Signed) = write_varlen(io, zigzagenc(x))
| EncodedArrays | https://github.com/oschulz/EncodedArrays.jl.git |
|
[
"MIT"
] | 0.4.0 | 81022321115ba46e0a256913860b95d01373ce91 | code | 501 | # This file is a part of EncodedArrays.jl, licensed under the MIT License (MIT).
import Test
import EncodedArrays
import Documenter
Test.@testset "Package EncodedArrays" begin
include("test_encoded_array.jl")
include("test_varlen_io.jl")
include("test_varlen_diff_codec.jl")
# doctests
Documenter.DocMeta.setdocmeta!(
EncodedArrays,
:DocTestSetup,
:(using EncodedArrays);
recursive=true,
)
Documenter.doctest(EncodedArrays)
end # testset
| EncodedArrays | https://github.com/oschulz/EncodedArrays.jl.git |
|
[
"MIT"
] | 0.4.0 | 81022321115ba46e0a256913860b95d01373ce91 | code | 4353 | # This file is a part of EncodedArrays.jl, licensed under the MIT License (MIT).
using EncodedArrays
using Test
using ArraysOfArrays
@testset "encoded_array" begin
data = rand(Int16(-1000):Int16(2000), 21)
codec = VarlenDiffArrayCodec()
data_enc = data |> codec
@testset "ctors and conversions" begin
@test @inferred(Array(data_enc)) == data
@test typeof(Array(data_enc)) == Array{eltype(data),1}
@test @inferred(Array{Int16}(data_enc)) == data
@test @inferred(Array{Int16,1}(data_enc)) == data
@test @inferred(Vector(data_enc)) == data
@test @inferred(Vector{Int16}(data_enc)) == data
@test typeof(Vector{Int16}(data_enc)) == Vector{Int16}
@test @inferred(Vector{Int32}(data_enc)) == data
@test typeof(Vector{Int32}(data_enc)) == Vector{Int32}
@test @inferred(convert(Array, data_enc)) == data
@test typeof(convert(Array, data_enc)) == Array{eltype(data),1}
@test @inferred(convert(Array{Int16}, data_enc)) == data
@test @inferred(convert(Array{Int16,1}, data_enc)) == data
@test @inferred(convert(Vector, data_enc)) == data
@test @inferred(convert(Vector{Int16}, data_enc)) == data
@test typeof(convert(Vector{Int16}, data_enc)) == Vector{Int16}
@test @inferred(convert(Vector{Int32}, data_enc)) == data
@test typeof(convert(Vector{Int32}, data_enc)) == Vector{Int32}
end
@testset "collect" begin
@test @inferred(collect(data_enc)) == data
@test typeof(collect(data_enc)) == typeof(data)
end
@testset "getindex" begin
@test @inferred(data_enc[:]) == data
@test typeof(data_enc[:]) == Array{eltype(data),1}
@test @inferred(data_enc[:]) == data
@test @inferred(data_enc[1:21]) == data
@test @inferred(data_enc[5:15]) == data[5:15]
@test @inferred(data_enc[7]) == data[7]
end
@testset "setindex!" begin
tmp = zero.(data)
@test (tmp[:] = data_enc) == data
tmp = vcat(zero.(data), zero.(data))
tmp2 = copy(tmp)
tmp[10:30] = data_enc
tmp2[10:30] = data
@test tmp == tmp2
end
@testset "equality" begin
@test @inferred data == data_enc
@test @inferred data_enc == data
@test @inferred data_enc == data_enc
end
@testset "append!" begin
A = similar(data, 0)
@test @inferred(append!(A, data_enc)) === A
@test A == data
A = data[1:4]
@test @inferred(append!(A, data_enc)) === A
@test A == vcat(data[1:4], data)
end
@testset "append!" begin
A = similar(data)
@test @inferred(copyto!(A, data_enc)) === A
@test A == data
@test_throws BoundsError @inferred(copyto!(similar(data, 5), data_enc))
end
@testset "VectorOfEncodedArrays" begin
codec = VarlenDiffArrayCodec()
data_orig = VectorOfArrays([cumsum(rand(-5:5, rand(1:100))) for i in 1:10])
data_enc = @inferred(broadcast(|>, data_orig, codec))
@test data_enc isa VectorOfEncodedArrays
@test (a -> collect(a)).(data_enc) == data_orig
data_dec = @inferred(broadcast(collect, data_enc) )
@test data_dec isa VectorOfArrays
@test data_dec == data_orig
@test @inferred(data_enc[2]) isa EncodedArray
@test @inferred(collect(data_enc[2])) == data_orig[2]
@test @inferred(data_enc[2:5]) isa VectorOfEncodedArrays
@test @inferred(broadcast(collect, data_enc[2:5])) == data_orig[2:5]
end
@testset "VectorOfEncodedSimilarArrays" begin
codec = VarlenDiffArrayCodec()
data_orig = VectorOfSimilarArrays([cumsum(rand(-5:5, 100)) for i in 1:10])
data_enc = @inferred(broadcast(|>, data_orig, codec))
@test data_enc isa VectorOfEncodedSimilarArrays
@test (a -> collect(a)).(data_enc) == data_orig
data_dec = @inferred(broadcast(collect, data_enc) )
@test data_dec isa VectorOfSimilarArrays
@test data_dec == data_orig
@test @inferred(data_enc[2]) isa EncodedArray
@test @inferred(collect(data_enc[2])) == data_orig[2]
@test @inferred(data_enc[2:5]) isa VectorOfEncodedSimilarArrays
@test @inferred(broadcast(collect, data_enc[2:5])) == data_orig[2:5]
end
end # testset
| EncodedArrays | https://github.com/oschulz/EncodedArrays.jl.git |
|
[
"MIT"
] | 0.4.0 | 81022321115ba46e0a256913860b95d01373ce91 | code | 1587 | # This file is a part of EncodedArrays.jl, licensed under the MIT License (MIT).
using EncodedArrays
using Test
using BitOperations
@testset "varlen_diff_codec" begin
a = zigzagdec(UInt64(0x4c6b5f94758))
data = [a, a + 3, a - 2]
codec = VarlenDiffArrayCodec()
encoded = Vector{UInt8}()
EncodedArrays.encode_data!(encoded, codec, data)
@test encoded == [0xd8, 0x8e, 0xe5, 0xaf, 0xeb, 0x98, 0x01, 0x06, 0x09]
data_dec = view(similar(data), :) # Use view to test that decoder doesn't resize
@test EncodedArrays.decode_data!(data_dec, codec, encoded) === data_dec
@test data_dec == data
data_dec = Vector{Int64}()
@test EncodedArrays.decode_data!(data_dec, codec, encoded) === data_dec
@test data_dec == data
data_dec = Vector{Int64}(undef, 2)
@test EncodedArrays.decode_data!(data_dec, codec, encoded) === data_dec
@test data_dec == data
data_dec = Vector{UInt64}()
@test EncodedArrays.decode_data!(data_dec, codec, encoded) === data_dec
@test data_dec == data
b = unsigned(a)
data = [b, b + 3, b - 2]
encoded = Vector{UInt8}()
EncodedArrays.encode_data!(encoded, codec, data)
@test encoded == [0xd8, 0x8e, 0xe5, 0xaf, 0xeb, 0x98, 0x01, 0x06, 0x09]
data_dec = view(similar(data), :) # Use view to test that decoder doesn't resize
@test EncodedArrays.decode_data!(data_dec, codec, encoded) === data_dec
@test data_dec == data
data_dec = Vector{Int64}()
@test EncodedArrays.decode_data!(data_dec, codec, encoded) === data_dec
@test data_dec == data
end # testset
| EncodedArrays | https://github.com/oschulz/EncodedArrays.jl.git |
|
[
"MIT"
] | 0.4.0 | 81022321115ba46e0a256913860b95d01373ce91 | code | 1635 | # This file is a part of EncodedArrays.jl, licensed under the MIT License (MIT).
using EncodedArrays
using Test
using BitOperations
@testset "varlen_io" begin
function test_encdec(T_read::Type{<:Integer}, f_read::Function, f_write::Function, x::Integer, encoded::AbstractVector{UInt8})
buf_out = IOBuffer()
@inferred f_write(buf_out, x)
enc_data = take!(buf_out)
@test enc_data == encoded
buf_in = IOBuffer(enc_data)
dec_x = @inferred f_read(buf_in, T_read)
@test typeof(dec_x) == T_read
@test dec_x == x
@test eof(buf_in)
end
@testset "read_varlen, write_varlen" begin
f_read = EncodedArrays.read_varlen
f_write = EncodedArrays.write_varlen
test_encdec(UInt64, f_read, f_write, UInt64(0x00), [0x00])
test_encdec(UInt64, f_read, f_write, UInt64(0x7f), [0x7f])
test_encdec(UInt64, f_read, f_write, UInt64(0x80), [0x80, 0x01])
test_encdec(UInt64, f_read, f_write, UInt64(0x4c6b5f94759), [0xd9, 0x8e, 0xe5, 0xaf, 0xeb, 0x98, 0x01])
@test_throws ErrorException test_encdec(UInt32, f_read, f_write, UInt64(0x4c6b5f94759), [0xd9, 0x8e, 0xe5, 0xaf, 0xeb, 0x98, 0x01])
end
@testset "read_autozz_varlen, write_autozz_varlen" begin
f_read = EncodedArrays.read_autozz_varlen
f_write = EncodedArrays.write_autozz_varlen
test_encdec(UInt64, f_read, f_write, UInt64(0x4c6b5f94759), [0xd9, 0x8e, 0xe5, 0xaf, 0xeb, 0x98, 0x01])
test_encdec(Int64, f_read, f_write, zigzagdec(UInt64(0x4c6b5f94759)), [0xd9, 0x8e, 0xe5, 0xaf, 0xeb, 0x98, 0x01])
end
end # testset
| EncodedArrays | https://github.com/oschulz/EncodedArrays.jl.git |
|
[
"MIT"
] | 0.4.0 | 81022321115ba46e0a256913860b95d01373ce91 | docs | 1232 | # EncodedArrays.jl
[](https://oschulz.github.io/EncodedArrays.jl/stable)
[](https://oschulz.github.io/EncodedArrays.jl/dev)
[](LICENSE.md)
[](https://github.com/oschulz/EncodedArrays.jl/actions?query=workflow%3ACI)
[](https://codecov.io/gh/oschulz/EncodedArrays.jl)
EncodedArrays provides an API for arrays that store their elements in
encoded/compressed form. This package is meant to be lightweight and only
implements a simple codec `VarlenDiffArrayCodec`. As codec implementations are
often complex and have various dependencies, more advanced codecs should
be implemented in separate packages.
## Documentation
* [Documentation for stable version](https://oschulz.github.io/EncodedArrays.jl/stable)
* [Documentation for development version](https://oschulz.github.io/EncodedArrays.jl/dev)
| EncodedArrays | https://github.com/oschulz/EncodedArrays.jl.git |
|
[
"MIT"
] | 0.4.0 | 81022321115ba46e0a256913860b95d01373ce91 | docs | 301 | # API
## Modules
```@index
Order = [:module]
```
## Types and constants
```@index
Order = [:type, :constant]
```
## Functions and macros
```@index
Order = [:macro, :function]
```
# Documentation
```@autodocs
Modules = [EncodedArrays]
Order = [:module, :type, :constant, :macro, :function]
```
| EncodedArrays | https://github.com/oschulz/EncodedArrays.jl.git |
|
[
"MIT"
] | 0.4.0 | 81022321115ba46e0a256913860b95d01373ce91 | docs | 1292 | # EncodedArrays.jl
EncodedArrays provides an API for arrays that store their elements in encoded/compressed form. This package is meant to be lightweight and only implements a simple codec `VarlenDiffArrayCodec`. As codec implementations are often complex and have various dependencies, more advanced codecs should
be implemented in separate packages.
Random access on an encoded array will typically be very inefficient, but linear access may be efficient (depending on the codec). Accessing the whole array contents at once, e.g. via `collect(A)`, `A[:]`, or copying/appending/conversion to a regular array, must be efficient.
An encoded array will typically have very inefficient random access, but may have efficient linear access and must be efficient when accessing the whole array contents at once via `getindex`, copying/appending to a regular array, etc.
This package defines two central abstract types, [`AbstractEncodedArray`](@ref) and [`AbstractArrayCodec`](@ref). It also defines a concrete type [`EncodedArray`](@ref) that implements most of the API and only leaves [`EncodedArrays.encode_data!`](@ref) and [`EncodedArrays.decode_data!`](@ref) for a new codec to implement.
Custom broadcasting optimizations are not implemented yet but will likely be added in the future.
| EncodedArrays | https://github.com/oschulz/EncodedArrays.jl.git |
|
[
"MIT"
] | 0.1.1 | edef04bbcbb872baa5da9950d372386700d35bc4 | code | 4763 | module PolygenicRiskScores
## CSV parsing
using CSV
using DataFrames, DataFramesMeta
using Dates, Distributions, Statistics, Random, LinearAlgebra, Printf
using HDF5
include("parse_genet.jl")
include("gigrnd.jl")
include("mcmc_gtb.jl")
## argument parsing
using ArgParse
settings = ArgParseSettings()
@add_arg_table! settings begin
"--ref_dir"
help = "Path to the reference panel directory"
required = true
"--bim_prefix"
help = "Directory and prefix of the bim file for the validation set"
required = true
"--sst_file"
help = "Path to summary statistics file"
required = true
"--sst_missing"
help = "Indicator for missing data in sumstats file (eg 'NA')"
default = ""
"--a"
arg_type = Float64
default = 1.0
"--b"
arg_type = Float64
default = 0.5
"--phi"
arg_type = Float64
"--n_gwas"
help = "Sample size of the GWAS"
arg_type = Int
required = true
"--n_iter"
help = "Number of MCMC iterations to perform"
arg_type = Int
default = 1000
"--n_burnin"
help = "Number of MCMC burn-in iterations"
arg_type = Int
default = 500
"--thin"
arg_type = Int
default = 5
"--out_dir"
help = "Output file directory and prefix"
required = true
"--out_header"
help = "Write header to output file"
action = :store_true
"--out_delim"
help = "Output file delimiter"
default = '\t'
"--out_path"
help = "Output file path (overrides --out_dir)"
"--chrom"
help = "Chromosomes to process"
default = "1:22"
"--beta_std"
action = :store_true
"--seed"
help = "RNG seed for MCMC"
arg_type = Int
"--quiet"
help = "Disable all unnecessary printing"
action = :store_true
"--hostsfile"
help = "Hostsfile to use for parallel processing"
default = nothing
end
function main()
opts = parse_args(ARGS, settings)
verbose = !opts["quiet"]
chroms = eval(Meta.parse(opts["chrom"]))
verbose && @info "Selecting chromosomes $chroms"
ref_dir = opts["ref_dir"]
verbose && @info "Parsing reference file: $ref_dir/snpinfo_1kg_hm3"
t = now()
ref_df = parse_ref(ref_dir * "/snpinfo_1kg_hm3", chroms)
verbose && @info "$(nrow(ref_df)) SNPs in reference file ($(round(now()-t, Dates.Second)))"
bim_prefix = opts["bim_prefix"]
verbose && @info "Parsing BIM file: $(bim_prefix*".bim")"
t = now()
vld_df = parse_bim(bim_prefix, chroms)
verbose && @info "$(nrow(vld_df)) SNPs in BIM file ($(round(now()-t, Dates.Second)))"
for chrom in chroms
_main(chrom, ref_df, vld_df, opts; verbose=verbose)
end
end
function _main(chrom, ref_df, vld_df, opts; verbose=false)
sst_file = opts["sst_file"]
verbose && @info "(Chromosome $chrom) Parsing summary statistics file: $sst_file"
t = now()
sst_df = parse_sumstats(ref_df[ref_df.CHR .== chrom,:], vld_df[vld_df.CHR .== chrom,:], sst_file, opts["n_gwas"]; verbose=verbose, missingstring=opts["sst_missing"])
verbose && @info "(Chromosome $chrom) $(nrow(sst_df)) SNPs in summary statistics file ($(round(now()-t, Dates.Second)))"
verbose && @info "(Chromosome $chrom) Parsing reference LD"
t = now()
ld_blk, blk_size = parse_ldblk(opts["ref_dir"], sst_df, chrom)
verbose && @info "(Chromosome $chrom) Completed parsing reference LD ($(round(now()-t, Dates.Second)))"
verbose && @info "(Chromosome $chrom) Initiating MCMC"
t = now()
beta_est = mcmc(opts["a"], opts["b"], opts["phi"], sst_df, opts["n_gwas"], ld_blk, blk_size, opts["n_iter"], opts["n_burnin"], opts["thin"], chrom, opts["beta_std"], opts["seed"]; verbose=verbose)
verbose && @info "(Chromosome $chrom) Completed MCMC ($(round(now()-t, Dates.Second)))"
verbose && @info "(Chromosome $chrom) Writing posterior effect sizes"
eff_file = if opts["out_path"] === nothing
out_path = opts["out_dir"]
phi = opts["phi"]
phi_str = phi === nothing ? "auto" : @sprintf("%1.0e", phi)
out_path * @sprintf("_pst_eff_a%d_b%.1f_phi%s_chr%d.txt", opts["a"], opts["b"], phi_str, chrom)
else
opts["out_path"]
end
t = now()
out_df = sst_df[:, [:SNP, :BP, :A1, :A2]]
out_df[!, :CHR] .= chrom
out_df.BETA = map(b->@sprintf("%.6e", b), beta_est)
out_df = select(out_df, [:CHR, :SNP, :BP, :A1, :A2, :BETA])
CSV.write(eff_file, out_df; header=opts["out_header"], delim=opts["out_delim"])
verbose && @info "(Chromosome $chrom) finished writing posterior effect sizes ($(round(now()-t, Dates.Second)))"
end
end # module
| PolygenicRiskScores | https://github.com/fauconab/PolygenicRiskScores.jl.git |
|
[
"MIT"
] | 0.1.1 | edef04bbcbb872baa5da9950d372386700d35bc4 | code | 2071 | # Ported from PRScs/gigrnd.py
function psi(x, alpha, lam)
f = -alpha*(cosh(x)-1)-lam*(exp(x)-x-1)
return f
end
function dpsi(x, alpha, lam)
f = -alpha*sinh(x)-lam*(exp(x)-1)
return f
end
function g(x, sd, td, f1, f2)
if (x >= -sd) && (x <= td)
f = 1
elseif x > td
f = f1
elseif x < -sd
f = f2
end
return f
end
function gigrnd(p, a, b)
# setup -- sample from the two-parameter version gig(lam,omega)
p = float(p); a = float(a); b = float(b)
lam = p
omega = sqrt(a*b)
if lam < 0
lam = -lam
swap = true
else
swap = false
end
alpha = sqrt(omega^2+lam^2)-lam
# find t
x = -psi(1, alpha, lam)
if (x >= 1/2) && (x <= 2)
t = 1
elseif x > 2
t = sqrt(2/(alpha+lam))
elseif x < 1/2
t = log(4/(alpha+2*lam))
end
# find s
x = -psi(-1, alpha, lam)
if (x >= 1/2) && (x <= 2)
s = 1
elseif x > 2
s = sqrt(4/(alpha*cosh(1)+lam))
elseif x < 1/2
if alpha == 0
s = 1/lam
else
s = min(1/lam, log(1+1/alpha+sqrt(1/alpha^2+2/alpha)))
end
end
# find auxiliary parameters
eta = -psi(t, alpha, lam)
zeta = -dpsi(t, alpha, lam)
theta = -psi(-s, alpha, lam)
xi = dpsi(-s, alpha, lam)
p = 1/xi
r = 1/zeta
td = t-r*eta
sd = s-p*theta
q = td+sd
# random variate generation
while true
U = rand()
V = rand()
W = rand()
if U < q/(p+q+r)
rnd = -sd+q*V
elseif U < (q+r)/(p+q+r)
rnd = td-r*log(V)
else
rnd = -sd+p*log(V)
end
f1 = exp(-eta-zeta*(rnd-t))
f2 = exp(-theta+xi*(rnd+s))
if W*g(rnd, sd, td, f1, f2) <= exp(psi(rnd, alpha, lam))
break
end
end
# transform back to the three-parameter version gig(p,a,b)
rnd = exp(rnd)*(lam/omega+sqrt(1+lam^2/omega^2))
if swap
rnd = 1/rnd
end
rnd = rnd/sqrt(a/b)
return rnd
end
| PolygenicRiskScores | https://github.com/fauconab/PolygenicRiskScores.jl.git |
|
[
"MIT"
] | 0.1.1 | edef04bbcbb872baa5da9950d372386700d35bc4 | code | 2445 | # Ported from PRCcs/src/mcmc_gtb.py
function mcmc(a, b, phi, sst_df, n, ld_blk, blk_size, n_iter, n_burnin, thin, chrom, beta_std, seed; verbose=false)
# seed
if seed !== nothing
Random.seed!(seed)
end
# derived stats
beta_mrg = copy(sst_df.BETA)
maf = copy(sst_df.MAF)
n_pst = (n_iter-n_burnin)/thin
p = length(sst_df.SNP)
n_blk = length(ld_blk)
# initialization
beta = zeros(p)
psi = ones(p)
sigma = 1.0
if phi === nothing
phi = 1.0
phi_updt = true
else
phi_updt = false
end
beta_est = zeros(p)
psi_est = zeros(p)
sigma_est = 0.0
phi_est = 0.0
# MCMC
for itr in 1:n_iter
if itr % 100 == 0
verbose && @info "(Chromosome $chrom) MCMC iteration $itr"
end
mm = 1; quad = 0.0
for kk in 1:n_blk
if blk_size[kk] == 0
continue
else
idx_blk = mm:(mm+blk_size[kk]-1)
dinvt = ld_blk[kk] .+ Diagonal(1.0 ./ psi[idx_blk])
dinvt_chol = cholesky(dinvt).U
beta_tmp = (transpose(dinvt_chol) \ beta_mrg[idx_blk]) .+ sqrt(sigma/n) .* randn(length(idx_blk))
beta[idx_blk] = dinvt_chol \ beta_tmp
quad += dot(transpose(beta[idx_blk]) * dinvt, beta[idx_blk])
mm += blk_size[kk]
end
end
err = max(n/2.0*(1.0-2.0*sum(beta.*beta_mrg)+quad), n/2.0*sum(beta .^ 2 ./ psi))
sigma = 1.0/rand(Gamma((n+p)/2.0, 1.0/err))
delta = rand.(Gamma.(a+b, 1.0 ./ (psi .+ phi)))
for jj in 1:p
psi[jj] = gigrnd(a-0.5, 2.0*delta[jj], n*beta[jj]^2/sigma)
end
psi[psi .> 1] .= 1.0
if phi_updt
w = rand(Gamma(1.0, 1.0/(phi+1.0)))
phi = rand(Gamma(p*b+0.5, 1.0/(sum(delta)+w)))
end
# posterior
if (itr>n_burnin) && (itr % thin == 0)
beta_est = beta_est + beta/n_pst
psi_est = psi_est + psi/n_pst
sigma_est = sigma_est + sigma/n_pst
phi_est = phi_est + phi/n_pst
end
end
# convert standardized beta to per-allele beta
if !beta_std
beta_est ./= sqrt.(2.0 .* maf .* (1.0 .- maf))
end
# print estimated phi
if phi_updt && verbose
@info @sprintf("Estimated global shrinkage parameter: %1.2e", phi_est)
end
return beta_est
end
| PolygenicRiskScores | https://github.com/fauconab/PolygenicRiskScores.jl.git |
|
[
"MIT"
] | 0.1.1 | edef04bbcbb872baa5da9950d372386700d35bc4 | code | 7141 | function parse_ref(ref_file::String, chroms::UnitRange)
df = CSV.File(ref_file; types=Dict(:A1=>Char,:A2=>Char)) |> DataFrame
df.A1 = tochar.(df.A1)
df.A2 = tochar.(df.A2)
@assert df.CHR isa Vector{Int}
@assert df.BP isa Vector{Int}
@assert df.MAF isa Vector{T} where T<:Real
filter!(row->row.CHR in chroms, df)
return df
end
parse_ref(ref_file::String, chrom::Integer) =
parse_ref(ref_file, chrom:chrom)
function tochar(x)::Union{Char,Missing}
if x isa String
if length(x) == 1
return first.(x)
else
return missing
end
else
x
end
end
function parse_bim(bim_file::String, chroms::UnitRange)
header = [:CHR, :SNP, :POS, :BP, :A1, :A2]
df = CSV.File(bim_file*".bim"; header=header, types=Dict(:A1=>Char,:A2=>Char)) |> DataFrame
df.A1 = tochar.(df.A1)
df.A2 = tochar.(df.A2)
@assert df.CHR isa Vector{Int}
filter!(row->row.CHR in chroms, df)
return df
end
parse_bim(bim_file::String, chrom::Integer) =
parse_bim(bim_file, chrom:chrom)
nuc_map(char::Char) = nuc_map(Val(char))
nuc_map(::Val{'A'}) = 'T'
nuc_map(::Val{'T'}) = 'A'
nuc_map(::Val{'C'}) = 'G'
nuc_map(::Val{'G'}) = 'C'
function permute_snps(df)
unique(vcat(
df[:,[:SNP,:A1,:A2]],
DataFrame(SNP=df.SNP,
A1=df.A2,
A2=df.A1),
DataFrame(SNP=df.SNP,
A1=nuc_map.(first.(df.A1)),
A2=nuc_map.(first.(df.A2))),
DataFrame(SNP=df.SNP,
A1=nuc_map.(first.(df.A2)),
A2=nuc_map.(first.(df.A1)))
))
end
function join_snps(ref_df, vld_df, sst_df; verbose=false)
# TODO: Be more efficient, don't allocate all this memory
vld_snps = vld_df[:,[:SNP,:A1,:A2]]
ref_snps = permute_snps(ref_df)
sst_snps = permute_snps(sst_df)
snps = innerjoin(vld_snps, ref_snps, sst_snps, on=[:SNP,:A1,:A2], makeunique=true)
verbose && @info "$(nrow(snps)) common SNPs"
return snps
end
norm_ppf(x) = quantile(Normal(), x)
function parse_sumstats(ref_df, vld_df, sst_file, n_subj; verbose=false, missingstring="")
sst_df = CSV.File(sst_file; missingstring=missingstring, types=Dict(:A1=>Char,:A2=>Char)) |> DataFrame
sst_df.A1 = tochar.(sst_df.A1)
sst_df.A2 = tochar.(sst_df.A2)
nucs = Set(['A','C','T','G'])
filter!(row->(row.A1 in nucs) && (row.A2 in nucs), sst_df)
filter!(row->!(row.P isa Missing) && !(row.BETA isa Missing), sst_df)
sst_df.P = convert(Vector{Float64}, sst_df.P)
sst_df.BETA = convert(Vector{Float64}, sst_df.BETA)
snps = join_snps(ref_df, vld_df, sst_df; verbose=verbose)
sort!(snps, [:SNP, :A1, :A2])
n_sqrt = sqrt(n_subj)
sst_eff = Dict{String,Float64}()
for row in Tables.namedtupleiterator(sst_df)
if hassnp(snps, (row.SNP,row.A1,row.A2)) ||
hassnp(snps, (row.SNP,nuc_map.(row.A1),nuc_map.(row.A2)))
effect_sign = 1
elseif hassnp(snps, (row.SNP,row.A2,row.A1)) ||
hassnp(snps, (row.SNP,nuc_map.(row.A2),nuc_map.(row.A1)))
effect_sign = -1
else
continue
end
if hasproperty(row, :BETA)
beta = row.BETA
elseif hasproperty(row, :OR)
beta = log(row.OR)
end
p = max(row.P, 1e-323)
beta_std = effect_sign*sign(beta)*abs(norm_ppf(p/2))/n_sqrt
sst_eff[row.SNP] = beta_std
end
_sst_df = DataFrame(SNP=String[],CHR=Int[],BP=Int[],BETA=Float64[],A1=Char[],A2=Char[],MAF=Float64[],FLP=Int[])
for (idx,row) in enumerate(Tables.namedtupleiterator(ref_df))
haskey(sst_eff, row.SNP) || continue
SNP = row.SNP
CHR = row.CHR
BP = row.BP
BETA = sst_eff[row.SNP]
A1,A2 = row.A1,row.A2
if hassnp(snps, (SNP,A1,A2))
MAF = row.MAF
FLP = 1
elseif hassnp(snps, (SNP,A2,A1))
A1, A2 = A2, A1
MAF = 1-row.MAF
FLP = -1
elseif hassnp(snps, (SNP,nuc_map(A1),nuc_map(A2)))
A1, A2 = nuc_map(A1), nuc_map(A2)
MAF = row.MAF
FLP = 1
elseif hassnp(snps, (SNP,nuc_map(A2),nuc_map(A1)))
A1, A2 = nuc_map(A2), nuc_map(A1)
MAF = 1-row.MAF
FLP = -1
else
verbose && @warn "(Chromosome $CHR) Didn't find ($SNP,$A1,$A2) in snps"
# FIXME: Skip?
end
push!(_sst_df, (SNP=SNP,CHR=CHR,BP=BP,BETA=BETA,A1=A1,A2=A2,MAF=MAF,FLP=FLP))
end
return _sst_df
end
function findsnp(snps, (snp,a1,a2))
SNP_range = binary_range_search(snps, snp, :SNP)
SNP_range === nothing && return nothing
SNP_L, SNP_R = SNP_range
SNP_sub = snps[SNP_L:SNP_R,:]
A1_range = binary_range_search(SNP_sub, a1, :A1)
A1_range === nothing && return nothing
A1_L, A1_R = A1_range
A1_sub = SNP_sub[A1_L:A1_R,:]
A2_range = binary_range_search(A1_sub, a2, :A2)
A2_range === nothing && return nothing
A2_L, A2_R = A2_range
@assert A2_L == A2_R
return SNP_L + (A1_L-1) + (A2_L-1)
end
hassnp(snps, row) = findsnp(snps, row) !== nothing
# TODO: Allow warm-restarts
function binary_range_search(snps, x, col)
_snps = snps[!,col]
L = 1
R = nrow(snps)
while true
(L > R) && return nothing
M = floor(Int, (L+R)/2)
_x = _snps[M]
if _x == x
L,R = M,M
snps_rows = nrow(snps)
while (L > 1) && (_snps[L - 1] == x)
L -= 1
end
while R < (snps_rows) && (_snps[R + 1] == x)
R += 1
end
return L,R
elseif _x < x
L = M+1
elseif _x > x
R = M-1
end
end
end
function parse_ldblk(ldblk_dir, sst_df, chrom)
chr_name = ldblk_dir * "/ldblk_1kg_chr" * string(chrom) * ".hdf5"
hdf_chr = h5open(chr_name, "r")
n_blk = length(hdf_chr)
ld_blk = [read(hdf_chr["blk_"*string(blk)]["ldblk"]) for blk in 1:n_blk]
snp_blk = Vector{String}[]
for blk in 1:n_blk
push!(snp_blk, read(hdf_chr["blk_"*string(blk)]["snplist"]))
end
blk_size = Int[]
_ld_blk = Matrix{Float64}[]
mm = 1
for blk in 1:n_blk
idx = [(ii,findfirst(s->s==snp, sst_df.SNP)) for (ii, snp) in enumerate(snp_blk[blk]) if snp in sst_df.SNP]
push!(blk_size, length(idx))
if !isempty(idx)
idx_blk = mm:(mm+length(snp_blk[blk])-1)
flip = [sst_df.FLP[jj] for jj in last.(idx)]
flipM = flip' .* flip
_blk = Matrix{Float64}(undef, length(idx), length(idx))
P = collect(Iterators.product(first.(idx),first.(idx)))
for icol in 1:size(P,2)
for irow in 1:size(P,1)
row,col = P[irow,icol]
_blk[irow,icol] = ld_blk[blk][row,col] * flipM[irow,icol]
end
end
push!(_ld_blk, _blk)
mm += length(snp_blk[blk])
else
push!(_ld_blk, Matrix{Float64}(undef, 0, 0))
end
end
return _ld_blk, blk_size
end
| PolygenicRiskScores | https://github.com/fauconab/PolygenicRiskScores.jl.git |
|
[
"MIT"
] | 0.1.1 | edef04bbcbb872baa5da9950d372386700d35bc4 | code | 3046 | using Test
using Printf
using CSV, DataFrames, Tables
using HypothesisTests
PRS_DATA_PATH = get(ENV, "JULIA_PRS_TEST_DATA_PATH", nothing)
if PRS_DATA_PATH === nothing
PRS_DATA_PATH = mktempdir()
cd(PRS_DATA_PATH) do
isdir("PRScs") || run(`git clone https://github.com/getian107/PRScs`)
isfile("test.bim") || run(`ln -s PRScs/test_data/test.bim .`)
isfile("sumstats.txt") || run(`ln -s PRScs/test_data/sumstats.txt .`)
if !isdir("ldblk_1kg_eur")
run(`wget -q https://www.dropbox.com/s/mt6var0z96vb6fv/ldblk_1kg_eur.tar.gz`)
run(`tar xf ldblk_1kg_eur.tar.gz`)
end
end
end
#These tests check the following:
#Script runs using phi auto version
#Script runs using specified fi
#Script successfully saves results using out_path and with out_dir
function test_harness(;a=1,b=0.5,phi=1e-02,chr=22,niter=1000,out_header=false)
a = repr(a)
b = repr(b)
phi = @sprintf("%1.0e", phi)
chr = repr(chr)
niter = repr(niter)
cmd_jl = `julia --project -e "using PolygenicRiskScores; PolygenicRiskScores.main()" -- --ref_dir=$PRS_DATA_PATH/ldblk_1kg_eur --bim_prefix=$PRS_DATA_PATH/test --sst_file=$PRS_DATA_PATH/sumstats.txt --n_gwas=200000 --chrom=$chr --phi=$phi --n_iter=$niter --out_dir=$PRS_DATA_PATH/output_jl`
if out_header
push!(cmd_jl.exec, "--out_header")
end
run(cmd_jl)
run(`python3 $PRS_DATA_PATH/PRScs/PRScs.py --ref_dir=$PRS_DATA_PATH/ldblk_1kg_eur --bim_prefix=$PRS_DATA_PATH/test --sst_file=$PRS_DATA_PATH/sumstats.txt --n_gwas=200000 --chrom=$chr --phi=$phi --n_iter=$niter --out_dir=$PRS_DATA_PATH/output_py`)
output_jl = "output_jl_pst_eff_a$(a)_b$(b)_phi$(phi)_chr$(chr).txt"
output_py = "output_py_pst_eff_a$(a)_b$(b)_phi$(phi)_chr$(chr).txt"
beta_jl=nothing
beta_py=nothing
for (kind, output_path) in ((:jl, output_jl), (:py, output_py))
output_path = joinpath(PRS_DATA_PATH, output_path)
@test isfile(output_path)
@test stat(output_path).size > 30_000
header = [:CHR, :SNP, :BP, :A1, :A2, :BETA]
if out_header
f = CSV.File(output_path)
else
f = CSV.File(output_path; header=header)
end
for (idx,col) in enumerate(header)
if out_header && kind == :jl
@test Tables.columnnames[idx] == col
end
@test length(getproperty(f, col)) == 1000
end
if kind == :jl
beta_jl = f.BETA
else
beta_py = f.BETA
end
end
# FIXME: Add accuracy/comparison tests
# Do a t-test to make sure the beta results are not significantly different
@test pvalue(EqualVarianceTTest(beta_jl,beta_py)) > 0.01
rm(joinpath(PRS_DATA_PATH, output_jl))
rm(joinpath(PRS_DATA_PATH, output_py))
end
@testset "Basic Options" begin
# Default run
test_harness()
end
# run julia once with seed X
# get hash of output file
# run julia again with seed X
# test that hash of new output file == old output file
| PolygenicRiskScores | https://github.com/fauconab/PolygenicRiskScores.jl.git |
|
[
"MIT"
] | 0.1.1 | edef04bbcbb872baa5da9950d372386700d35bc4 | docs | 962 | # PolygenicRiskScores.jl (previously PRS.jl)
PolygenicRiskScores.jl is a port of [PRS-CS](https://github.com/getian107/PRScs) to Julia.
## Usage
Using the test data from PRS-CS at ~/prs-data, the following invocation should work when run
in the root directory of PolygenicRiskScores.jl:
```
julia --project -e "using PolygenicRiskScores; PolygenicRiskScores.main()" -- --ref_dir=~/prs-data/ldblk_1kg_eur --bim_prefix=~/prs-data/test --sst_file=~/prs-data/sumstats.txt --n_gwas=200000 --chrom=22 --phi=1e-2 --n_iter=1000 --out_dir=~/prs-data/output_jl
```
## Multi-threaded CSV reading
Julia's CSV reader supports multi-threaded CSV reading. In order to enable this, we need to load Julia with more than one thread. This is done through the `JULIA_NUM_THREADS` environment variable:
```
JULIA_NUM_THREADS=8 julia --project -e "using PolygenicRiskScores; PolygenicRiskScores.main()" ...
```
The above code would run PolygenicRiskScores.jl with 8 threads.
| PolygenicRiskScores | https://github.com/fauconab/PolygenicRiskScores.jl.git |
|
[
"MIT"
] | 0.10.1 | 370c3ac3c000dd3ac975681521dcfe687efc1f24 | code | 1424 | name: CompatHelper
on:
schedule:
- cron: 0 0 * * *
workflow_dispatch:
permissions:
contents: write
pull-requests: write
jobs:
CompatHelper:
runs-on: ubuntu-latest
steps:
- name: Check if Julia is already available in the PATH
id: julia_in_path
run: which julia
continue-on-error: true
- name: Install Julia, but only if it is not already available in the PATH
uses: julia-actions/setup-julia@v1
with:
version: '1'
arch: ${{ runner.arch }}
if: steps.julia_in_path.outcome != 'success'
- name: "Add the General registry via Git"
run: |
import Pkg
ENV["JULIA_PKG_SERVER"] = ""
Pkg.Registry.add("General")
shell: julia --color=yes {0}
- name: "Install CompatHelper"
run: |
import Pkg
name = "CompatHelper"
uuid = "aa819f21-2bde-4658-8897-bab36330d9b7"
version = "3"
Pkg.add(; name, uuid, version)
shell: julia --color=yes {0}
- name: "Run CompatHelper"
run: |
import CompatHelper
CompatHelper.main()
shell: julia --color=yes {0}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
COMPATHELPER_PRIV: ${{ secrets.DOCUMENTER_KEY }}
# COMPATHELPER_PRIV: ${{ secrets.COMPATHELPER_PRIV }} | PlanetOrbits | https://github.com/sefffal/PlanetOrbits.jl.git |
|
[
"MIT"
] | 0.10.1 | 370c3ac3c000dd3ac975681521dcfe687efc1f24 | code | 3002 | using PlanetOrbits
using Plots
using Colors
logocolors = Colors.JULIA_LOGO_COLORS
orbit1 = orbit(
a = 0.8,
i = 0.0,
e = 0.0,
ω = 0.0,
Ω = 0.0,
τ = 0.7,
plx=1000,
M = 1.0,
)
orbit2 = orbit(
a = 1.269,
i = 0.0,
e = 0.16,
ω = 120,
Ω = 0.0,
τ = 0.8,
plx=1000,
M = 1.0,
)
period(orbit1)/period(orbit2)
#
p = plot(xlims=(-1600,1300), ylims=(-1300,1600), size=(300,300), framestyle=:none, legend=nothing, margin=-20Plots.mm, background=:transparent)
scatter!([0], [0], color=logocolors.blue, markersize=20, markerstrokewidth=1, markerstrokecolor="#222")
plot!(orbit1, color=logocolors.green, linewidth=2.5)
scatter!([raoff(orbit1, 0)], [decoff(orbit1, 0)], color=logocolors.green, markersize=13, markerstrokewidth=1, markerstrokecolor="#222")
plot!(orbit2, color=logocolors.red, linewidth=2.5)
x = raoff(orbit2, 0)
y = decoff(orbit2, 0)
scatter!([x],[y], color=logocolors.red, markersize=9, markerstrokewidth=1, markerstrokecolor="#222")
moon = orbit(
# a = 0.2,
a = 0.274,
i = 0,
e = 0.0,
ω = 120,
Ω = 0.0,
τ = 0.0,
plx=1000,
M = 1.0,
)
νs = range(0, 2π, length=100)
xs = raoff.(PlanetOrbits.orbitsolve_ν.(moon, νs)) .+ x
ys = decoff.(PlanetOrbits.orbitsolve_ν.(moon, νs)) .+ y
plot!(xs,ys, color=logocolors.purple, linewidth=2.0)
i = 2
scatter!(xs[i:i],ys[i:i], color=logocolors.purple, markersize=6, markerstrokewidth=1, markerstrokecolor="#222")
savefig("docs/src/assets/logo.svg")
savefig("docs/src/assets/logo.png")
p
##
anim = @animate for t in range(0, period(orbit2), length=120)
p = plot(xlims=(-1730,1400), ylims=(-1450,1700), size=(350,350), framestyle=:none, legend=nothing, margin=-20Plots.mm, background=:white)
plot!(orbit1, color=logocolors.green, linewidth=2.5)
x0 = raoff(orbit1, t)
y0 = decoff(orbit1, t)
scatter!([x0], [y0], color=logocolors.green, markersize=9, markerstrokewidth=1, markerstrokecolor="#222")
plot!(orbit2, color=logocolors.red, linewidth=2.5)
x = raoff(orbit2, t)
y = decoff(orbit2, t)
scatter!([x],[y], color=logocolors.red, markersize=13, markerstrokewidth=1, markerstrokecolor="#222")
moon = orbit(
# a = 0.2,
a = 0.274,
i = 0,
e = 0.0,
ω = 120,
Ω = 0.0,
τ = 0.0,
plx=1000,
M = 1.0,
)
νs = range(0, 2π, length=100)
xs = raoff.(PlanetOrbits.orbitsolve_ν.(moon, νs)) .+ x
ys = decoff.(PlanetOrbits.orbitsolve_ν.(moon, νs)) .+ y
plot!(xs,ys, color=logocolors.purple, linewidth=2.0)
xm = raoff(moon, t)+x
ym = decoff(moon, t)+y
scatter!([xm], [ym], color=logocolors.purple, markersize=6, markerstrokewidth=1, markerstrokecolor="#222")
star_x = -(x0*9^3 + x*13^3)/40^3 # + xm*6^2
star_y = -(y0*9^3 + y*13^3)/40^3 # + ym*6^2
scatter!([star_x], [star_y], color=logocolors.blue, markersize=20, markerstrokewidth=1, markerstrokecolor="#222")
end
gif(anim, "docs/src/assets/logo.gif", fps=30)
| PlanetOrbits | https://github.com/sefffal/PlanetOrbits.jl.git |
|
[
"MIT"
] | 0.10.1 | 370c3ac3c000dd3ac975681521dcfe687efc1f24 | code | 355 | using Documenter, PlanetOrbits
ENV["GKSwstype"] = "100" # GR in documeter env variable
include("pages.jl")
makedocs(
sitename="PlanetOrbits.jl",
pages=pages,
format = Documenter.HTML(
prettyurls = get(ENV, "CI", nothing) == "true"
)
)
deploydocs(
repo = "github.com/sefffal/PlanetOrbits.jl.git",
devbranch = "master"
)
| PlanetOrbits | https://github.com/sefffal/PlanetOrbits.jl.git |
|
[
"MIT"
] | 0.10.1 | 370c3ac3c000dd3ac975681521dcfe687efc1f24 | code | 526 | pages = [
"Home" => "index.md",
# "Getting Started" => "getting-started.md",
"Tutorials" => [
"Introduction" => "introduction.md",
"Plotting" => "plots.md",
"Conversions" => "conversions.md",
"Hyperbolic" => "hyperbolic.md",
"Image Warping" => "image-warping.md",
],
"Documentation" => [
"API" => "api.md",
"Conventions" => "conventions.md",
"Kepler Solvers" => "kepler.md",
]
] | PlanetOrbits | https://github.com/sefffal/PlanetOrbits.jl.git |
|
[
"MIT"
] | 0.10.1 | 370c3ac3c000dd3ac975681521dcfe687efc1f24 | code | 60835 | ### A Pluto.jl notebook ###
# v0.17.5
using Markdown
using InteractiveUtils
# This Pluto notebook uses @bind for interactivity. When running this notebook outside of Pluto, the following 'mock version' of @bind gives bound variables a default value (instead of an error).
macro bind(def, element)
quote
local iv = try Base.loaded_modules[Base.PkgId(Base.UUID("6e696c72-6542-2067-7265-42206c756150"), "AbstractPlutoDingetjes")].Bonds.initial_value catch; b -> missing; end
local el = $(esc(element))
global $(esc(def)) = Core.applicable(Base.get, el) ? Base.get(el) : iv(el)
el
end
end
# ╔═╡ 1793f5fa-7f30-4a1d-baef-b06436e1fc71
using Revise, PlutoUI, PlanetOrbits, Plots; theme(:dao)
# ╔═╡ 872e19d4-c071-40bb-a091-22e48e85e2a6
md"""
# Orbit Playground
A Pluto notebook for visualizing orbits
"""
# ╔═╡ ce86c343-b6e4-4833-935f-ee0392d1ee89
md"""
*Specify the Keplerian elements to visualize using the text fields and sliders below.*
"""
# ╔═╡ 79f416a6-7c52-4d57-8bdd-1f84dc04d7e8
md"Gravitational Parameter of the star ($M_⊙$)"
# ╔═╡ 60c8b78b-1b70-42d0-8fe6-7e40b0cdd4a2
M = 1;
# ╔═╡ 124025b9-f7de-4395-a5d3-1dc6ed4a67f7
md"Paralax Distance (mas)"
# ╔═╡ 9da00dbb-c645-4bb9-a26c-5f148efb36cd
plx = 24.;
# ╔═╡ 800b91ae-d6c6-479f-afd7-d07d7b207cd3
md"Epoch of periastron passage as fraction of orbital period [0,1]. Reference epoch: 58849."
# ╔═╡ f1ef0015-d671-450f-80cf-dc6651460998
tp = 0.;
# ╔═╡ c179bd9e-e392-4622-92a5-d64f442e2840
md"""
Drag the numbers to adjust the orbit:
a = $(@bind a Scrubbable(0.1:0.1:50, default=5))au,
i = $(@bind i Scrubbable(0:1:180.0, default=0))°,
Ω = $(@bind Ω Scrubbable(0:2:360.0, default=0))°,
e = $( @bind e Scrubbable(0:0.01:0.9, default=0)),
ω = $(@bind ω Scrubbable(0:2:360.0, default=0))°,
"""
# ╔═╡ 3f38e8ca-2286-427f-8816-3b8b6cc78c74
elem = VisualOrbitDeg(a,i,e,tp,M,ω,Ω,plx)
# ╔═╡ 596e2d59-203c-4e69-985b-f8a82624ef6c
md"""
-----
Time range in MJD (modified Juian days). Increase `length` to increase the resolution of the plots.
"""
# ╔═╡ 465e92c8-1004-47d6-ac4e-69172afad2b0
ts = 58849 .+ range(0, 2period(elem), step=2period(elem)÷150)
# ╔═╡ dde3ae5c-c51d-4efd-a824-3e360981a228
md"""
Time= $(@bind t Scrubbable(ts)) mjd
"""
#$(@bind t NumberField(ts, default=first(ts))) time [mjd]
# ╔═╡ 36bc055e-3b5b-41a0-863a-53b78a6328d9
md"""
### CSV Export
Click below to download a CSV with the ΔRA, ΔDEC, and RV values
for t = $(round(Int, first(t))) to $(round(Int, first(t))) (mjd)
"""
# ╔═╡ e8d619bc-e37d-437a-96fb-0995aed2f823
begin
posn = kep2cart.(elem, ts)
ra = [p[1] for p in posn]
dec = [p[2] for p in posn]
los = [p[3] for p in posn]
rv = [p[4] for p in posn]
posn_ν = PlanetOrbits.kep2cart_ν.(elem, range(-π, π, length=90))
ra_ν = [p[1] for p in posn_ν]
dec_ν = [p[2] for p in posn_ν]
end;
# ╔═╡ 593a177f-bf0b-4b05-9408-745793ab2536
begin
# Top panel: projected orbit plot
p1 = plot(xflip=true, aspectratio=1, legend=:topleft, size=(650,400), fontfamily="", fmt=:svg)
scatter!([0], [0], label="star", marker=:star, color=3, ms=10)
l = maximum(sqrt.(ra.^2 .+ dec.^2))*1.05
plot!(ra_ν, dec_ν;label="orbit",color=1)
xlims!(-1.4l,1.4l)
ylims!(-l,l)
scatter!([raoff(elem, t)], [decoff(elem, t)], label="planet", ms=10, color=2)
xlabel!("ΔRA - mas")
ylabel!("ΔDEC - mas")
end
# ╔═╡ 14b4f565-2f8d-4e70-a79c-8926db3b0ca7
begin
# Bottom panel: planet radial velocity
yl = max(abs.(extrema(rv))...)*1.25
if yl < 1.5
yl = 1.5
end
ylims=(-yl,yl)
p2 = plot(ts, rv; legend=:none, ylims, size=(650,200), fontfamily="", fmt=:svg, margin=10Plots.mm)
scatter!([t], [radvel(elem, t)], ms=10)
xlabel!("t - mjd")
# ylabel!("\$\\mathrm{RV_{planet} - km/s}\$")
ylabel!("RVplanet - km/s")
end
# ╔═╡ 30c618b6-bd58-4335-bc55-23c16317011d
let
# Create a super simple CSV export
csv = "dRA (mas), dDEC (mas), RV (km/s)\n"*join((
join(p, ", ")
for p in zip(ra,dec,rv)
), "\n")
DownloadButton(csv, "orbit-a-$a-i-$i-e-$e-ω-$ω-Ω-$Ω-tp-$tp-M-$M.csv")
end
# ╔═╡ 9dd26db3-e443-46f3-8e18-21eb37b4d5b6
begin
# dradt = diff(ra)./step(ts).*PlanetOrbits.year2days
# ddecdt = diff(dec)./step(ts).*PlanetOrbits.year2days
# app_speed = sqrt.(dradt.^2 .+ ddecdt.^2)
# k = kep2cart.(elem, ts)
# app_speed = sqrt.(getproperty.(posn_ν, :ẋ).^2 .+ getproperty.(posn_ν, :ẏ).^2)
end;
# ╔═╡ 00000000-0000-0000-0000-000000000001
PLUTO_PROJECT_TOML_CONTENTS = """
[deps]
PlanetOrbits = "e969fe19-5388-4377-ad0e-889377a5e1a0"
Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80"
PlutoUI = "7f904dfe-b85e-4ff6-b463-dae2292396a8"
Revise = "295af30f-e4ad-537b-8983-00126c2a3abe"
[compat]
PlanetOrbits = "~0.2.0"
Plots = "~1.25.6"
PlutoUI = "~0.7.29"
Revise = "~3.3.1"
"""
# ╔═╡ 00000000-0000-0000-0000-000000000002
PLUTO_MANIFEST_TOML_CONTENTS = """
# This file is machine-generated - editing it directly is not advised
julia_version = "1.7.1"
manifest_format = "2.0"
[[deps.AbstractFFTs]]
deps = ["ChainRulesCore", "LinearAlgebra"]
git-tree-sha1 = "6f1d9bc1c08f9f4a8fa92e3ea3cb50153a1b40d4"
uuid = "621f4979-c628-5d54-868e-fcf4e3e8185c"
version = "1.1.0"
[[deps.AbstractPlutoDingetjes]]
deps = ["Pkg"]
git-tree-sha1 = "8eaf9f1b4921132a4cff3f36a1d9ba923b14a481"
uuid = "6e696c72-6542-2067-7265-42206c756150"
version = "1.1.4"
[[deps.Adapt]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "af92965fb30777147966f58acb05da51c5616b5f"
uuid = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
version = "3.3.3"
[[deps.ArgTools]]
uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f"
[[deps.ArnoldiMethod]]
deps = ["LinearAlgebra", "Random", "StaticArrays"]
git-tree-sha1 = "f87e559f87a45bece9c9ed97458d3afe98b1ebb9"
uuid = "ec485272-7323-5ecc-a04f-4719b315124d"
version = "0.1.0"
[[deps.ArrayInterface]]
deps = ["Compat", "IfElse", "LinearAlgebra", "Requires", "SparseArrays", "Static"]
git-tree-sha1 = "1ee88c4c76caa995a885dc2f22a5d548dfbbc0ba"
uuid = "4fba245c-0d91-5ea0-9b3e-6abc04ee57a9"
version = "3.2.2"
[[deps.Artifacts]]
uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33"
[[deps.AstroLib]]
deps = ["Dates", "DelimitedFiles", "LinearAlgebra", "Printf", "StaticArrays"]
git-tree-sha1 = "283b723fa46dcfdaa758aa66e1b28fb25104ba1b"
uuid = "c7932e45-9af1-51e7-9da9-f004cd3a462b"
version = "0.4.1"
[[deps.AstroTime]]
deps = ["Dates", "EarthOrientation", "ItemGraphs", "LeapSeconds", "MacroTools", "MuladdMacro", "Reexport"]
git-tree-sha1 = "b3217075a2453321b304746f64311e748f9725a7"
uuid = "c61b5328-d09d-5e37-a9a8-0eb41c39009c"
version = "0.7.0"
[[deps.AxisAlgorithms]]
deps = ["LinearAlgebra", "Random", "SparseArrays", "WoodburyMatrices"]
git-tree-sha1 = "66771c8d21c8ff5e3a93379480a2307ac36863f7"
uuid = "13072b0f-2c55-5437-9ae7-d433b7a33950"
version = "1.0.1"
[[deps.AxisArrays]]
deps = ["Dates", "IntervalSets", "IterTools", "RangeArrays"]
git-tree-sha1 = "d127d5e4d86c7680b20c35d40b503c74b9a39b5e"
uuid = "39de3d68-74b9-583c-8d2d-e117c070f3a9"
version = "0.4.4"
[[deps.Base64]]
uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f"
[[deps.Bzip2_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "19a35467a82e236ff51bc17a3a44b69ef35185a2"
uuid = "6e34b625-4abd-537c-b88f-471c36dfa7a0"
version = "1.0.8+0"
[[deps.CEnum]]
git-tree-sha1 = "215a9aa4a1f23fbd05b92769fdd62559488d70e9"
uuid = "fa961155-64e5-5f13-b03f-caf6b980ea82"
version = "0.4.1"
[[deps.CFITSIO]]
deps = ["CFITSIO_jll"]
git-tree-sha1 = "4379a2dac795014534b9895a45889aa658fca213"
uuid = "3b1b4be9-1499-4b22-8d78-7db3344d1961"
version = "1.4.0"
[[deps.CFITSIO_jll]]
deps = ["Artifacts", "JLLWrappers", "LibCURL_jll", "Libdl", "Pkg"]
git-tree-sha1 = "2fabb5fc48d185d104ca7ed7444b475705993447"
uuid = "b3e40c51-02ae-5482-8a39-3ace5868dcf4"
version = "3.49.1+0"
[[deps.Cairo_jll]]
deps = ["Artifacts", "Bzip2_jll", "Fontconfig_jll", "FreeType2_jll", "Glib_jll", "JLLWrappers", "LZO_jll", "Libdl", "Pixman_jll", "Pkg", "Xorg_libXext_jll", "Xorg_libXrender_jll", "Zlib_jll", "libpng_jll"]
git-tree-sha1 = "4b859a208b2397a7a623a03449e4636bdb17bcf2"
uuid = "83423d85-b0ee-5818-9007-b63ccbeb887a"
version = "1.16.1+1"
[[deps.Calculus]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "f641eb0a4f00c343bbc32346e1217b86f3ce9dad"
uuid = "49dc2e85-a5d0-5ad3-a950-438e2897f1b9"
version = "0.5.1"
[[deps.CatIndices]]
deps = ["CustomUnitRanges", "OffsetArrays"]
git-tree-sha1 = "a0f80a09780eed9b1d106a1bf62041c2efc995bc"
uuid = "aafaddc9-749c-510e-ac4f-586e18779b91"
version = "0.2.2"
[[deps.ChainRulesCore]]
deps = ["Compat", "LinearAlgebra", "SparseArrays"]
git-tree-sha1 = "926870acb6cbcf029396f2f2de030282b6bc1941"
uuid = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
version = "1.11.4"
[[deps.ChangesOfVariables]]
deps = ["ChainRulesCore", "LinearAlgebra", "Test"]
git-tree-sha1 = "bf98fa45a0a4cee295de98d4c1462be26345b9a1"
uuid = "9e997f8a-9a97-42d5-a9f1-ce6bfc15e2c0"
version = "0.1.2"
[[deps.Clustering]]
deps = ["Distances", "LinearAlgebra", "NearestNeighbors", "Printf", "SparseArrays", "Statistics", "StatsBase"]
git-tree-sha1 = "75479b7df4167267d75294d14b58244695beb2ac"
uuid = "aaaa29a8-35af-508c-8bc3-b662a17a0fe5"
version = "0.14.2"
[[deps.CodeTracking]]
deps = ["InteractiveUtils", "UUIDs"]
git-tree-sha1 = "9aa8a5ebb6b5bf469a7e0e2b5202cf6f8c291104"
uuid = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2"
version = "1.0.6"
[[deps.ColorSchemes]]
deps = ["ColorTypes", "Colors", "FixedPointNumbers", "Random"]
git-tree-sha1 = "6b6f04f93710c71550ec7e16b650c1b9a612d0b6"
uuid = "35d6a980-a343-548e-a6ea-1d62b119f2f4"
version = "3.16.0"
[[deps.ColorTypes]]
deps = ["FixedPointNumbers", "Random"]
git-tree-sha1 = "024fe24d83e4a5bf5fc80501a314ce0d1aa35597"
uuid = "3da002f7-5984-5a60-b8a6-cbb66c0b333f"
version = "0.11.0"
[[deps.ColorVectorSpace]]
deps = ["ColorTypes", "FixedPointNumbers", "LinearAlgebra", "SpecialFunctions", "Statistics", "TensorCore"]
git-tree-sha1 = "3f1f500312161f1ae067abe07d13b40f78f32e07"
uuid = "c3611d14-8923-5661-9e6a-0046d554d3a4"
version = "0.9.8"
[[deps.Colors]]
deps = ["ColorTypes", "FixedPointNumbers", "Reexport"]
git-tree-sha1 = "417b0ed7b8b838aa6ca0a87aadf1bb9eb111ce40"
uuid = "5ae59095-9a9b-59fe-a467-6f913c188581"
version = "0.12.8"
[[deps.Compat]]
deps = ["Base64", "Dates", "DelimitedFiles", "Distributed", "InteractiveUtils", "LibGit2", "Libdl", "LinearAlgebra", "Markdown", "Mmap", "Pkg", "Printf", "REPL", "Random", "SHA", "Serialization", "SharedArrays", "Sockets", "SparseArrays", "Statistics", "Test", "UUIDs", "Unicode"]
git-tree-sha1 = "44c37b4636bc54afac5c574d2d02b625349d6582"
uuid = "34da2185-b29b-5c13-b0c7-acf172513d20"
version = "3.41.0"
[[deps.CompilerSupportLibraries_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae"
[[deps.ComponentArrays]]
deps = ["ArrayInterface", "LinearAlgebra", "Requires"]
git-tree-sha1 = "76495e7a7e47abc3771d70c782d5f6e66f114d36"
uuid = "b0b7db55-cfe3-40fc-9ded-d10e2dbeff66"
version = "0.10.5"
[[deps.ComputationalResources]]
git-tree-sha1 = "52cb3ec90e8a8bea0e62e275ba577ad0f74821f7"
uuid = "ed09eef8-17a6-5b46-8889-db040fac31e3"
version = "0.3.2"
[[deps.Contour]]
deps = ["StaticArrays"]
git-tree-sha1 = "9f02045d934dc030edad45944ea80dbd1f0ebea7"
uuid = "d38c429a-6771-53c6-b99e-75d170b6e991"
version = "0.5.7"
[[deps.CoordinateTransformations]]
deps = ["LinearAlgebra", "StaticArrays"]
git-tree-sha1 = "681ea870b918e7cff7111da58791d7f718067a19"
uuid = "150eb455-5306-5404-9cee-2592286d6298"
version = "0.6.2"
[[deps.CustomUnitRanges]]
git-tree-sha1 = "1a3f97f907e6dd8983b744d2642651bb162a3f7a"
uuid = "dc8bdbbb-1ca9-579f-8c36-e416f6a65cce"
version = "1.0.2"
[[deps.DataAPI]]
git-tree-sha1 = "cc70b17275652eb47bc9e5f81635981f13cea5c8"
uuid = "9a962f9c-6df0-11e9-0e5d-c546b8b5ee8a"
version = "1.9.0"
[[deps.DataStructures]]
deps = ["Compat", "InteractiveUtils", "OrderedCollections"]
git-tree-sha1 = "3daef5523dd2e769dad2365274f760ff5f282c7d"
uuid = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8"
version = "0.18.11"
[[deps.DataValueInterfaces]]
git-tree-sha1 = "bfc1187b79289637fa0ef6d4436ebdfe6905cbd6"
uuid = "e2d170a0-9d28-54be-80f0-106bbe20a464"
version = "1.0.0"
[[deps.Dates]]
deps = ["Printf"]
uuid = "ade2ca70-3891-5945-98fb-dc099432e06a"
[[deps.DelimitedFiles]]
deps = ["Mmap"]
uuid = "8bb1440f-4735-579b-a4ab-409b98df4dab"
[[deps.DensityInterface]]
deps = ["InverseFunctions", "Test"]
git-tree-sha1 = "80c3e8639e3353e5d2912fb3a1916b8455e2494b"
uuid = "b429d917-457f-4dbc-8f4c-0cc954292b1d"
version = "0.4.0"
[[deps.DirectImages]]
deps = ["AxisArrays", "ColorSchemes", "CoordinateTransformations", "Distributions", "FITSIO", "ImageAxes", "ImageCore", "ImageFiltering", "ImageMetadata", "ImageShow", "ImageTransformations", "Images", "Interpolations", "LinearAlgebra", "Measures", "OffsetArrays", "OrderedCollections", "Printf", "RecipesBase", "Requires", "StaticArrays", "Statistics", "StatsBase"]
git-tree-sha1 = "a71357416cc81fed1d523fabc8cf5733b2068d73"
uuid = "d0ffc639-4e6f-47d5-8eab-0368fa958583"
version = "0.3.1"
[[deps.PlanetOrbits]]
deps = ["AstroLib", "AstroTime", "ChainRulesCore", "ComponentArrays", "CoordinateTransformations", "Dates", "DirectImages", "Distributions", "KissMCMC", "LinearAlgebra", "NamedTupleTools", "Random", "RecipesBase", "Requires", "StaticArrays"]
git-tree-sha1 = "7e381d76b2f1545a927abac05e4a467414beabb3"
uuid = "e969fe19-5388-4377-ad0e-889377a5e1a0"
version = "0.2.0"
[[deps.Distances]]
deps = ["LinearAlgebra", "SparseArrays", "Statistics", "StatsAPI"]
git-tree-sha1 = "3258d0659f812acde79e8a74b11f17ac06d0ca04"
uuid = "b4f34e82-e78d-54a5-968a-f98e89d6e8f7"
version = "0.10.7"
[[deps.Distributed]]
deps = ["Random", "Serialization", "Sockets"]
uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b"
[[deps.Distributions]]
deps = ["ChainRulesCore", "DensityInterface", "FillArrays", "LinearAlgebra", "PDMats", "Printf", "QuadGK", "Random", "SparseArrays", "SpecialFunctions", "Statistics", "StatsBase", "StatsFuns", "Test"]
git-tree-sha1 = "97e9e9d0b8303bae296f3bdd1c2b0065dcb7e7ef"
uuid = "31c24e10-a181-5473-b8eb-7969acd0382f"
version = "0.25.38"
[[deps.DocStringExtensions]]
deps = ["LibGit2"]
git-tree-sha1 = "b19534d1895d702889b219c382a6e18010797f0b"
uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae"
version = "0.8.6"
[[deps.Downloads]]
deps = ["ArgTools", "LibCURL", "NetworkOptions"]
uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6"
[[deps.DualNumbers]]
deps = ["Calculus", "NaNMath", "SpecialFunctions"]
git-tree-sha1 = "84f04fe68a3176a583b864e492578b9466d87f1e"
uuid = "fa6b7ba4-c1ee-5f82-b5fc-ecf0adba8f74"
version = "0.6.6"
[[deps.EarCut_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "3f3a2501fa7236e9b911e0f7a588c657e822bb6d"
uuid = "5ae413db-bbd1-5e63-b57d-d24a61df00f5"
version = "2.2.3+0"
[[deps.EarthOrientation]]
deps = ["Dates", "DelimitedFiles", "LeapSeconds", "OptionalData", "RemoteFiles"]
git-tree-sha1 = "d1081912769ed7d6712e1757059c7f67762ff36f"
uuid = "732a3c5d-d6c0-58bc-adb1-1b51709a25e2"
version = "0.7.1"
[[deps.EllipsisNotation]]
deps = ["ArrayInterface"]
git-tree-sha1 = "3fe985505b4b667e1ae303c9ca64d181f09d5c05"
uuid = "da5c29d0-fa7d-589e-88eb-ea29b0a81949"
version = "1.1.3"
[[deps.Expat_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "b3bfd02e98aedfa5cf885665493c5598c350cd2f"
uuid = "2e619515-83b5-522b-bb60-26c02a35a201"
version = "2.2.10+0"
[[deps.FFMPEG]]
deps = ["FFMPEG_jll"]
git-tree-sha1 = "b57e3acbe22f8484b4b5ff66a7499717fe1a9cc8"
uuid = "c87230d0-a227-11e9-1b43-d7ebe4e7570a"
version = "0.4.1"
[[deps.FFMPEG_jll]]
deps = ["Artifacts", "Bzip2_jll", "FreeType2_jll", "FriBidi_jll", "JLLWrappers", "LAME_jll", "Libdl", "Ogg_jll", "OpenSSL_jll", "Opus_jll", "Pkg", "Zlib_jll", "libass_jll", "libfdk_aac_jll", "libvorbis_jll", "x264_jll", "x265_jll"]
git-tree-sha1 = "d8a578692e3077ac998b50c0217dfd67f21d1e5f"
uuid = "b22a6f82-2f65-5046-a5b2-351ab43fb4e5"
version = "4.4.0+0"
[[deps.FFTViews]]
deps = ["CustomUnitRanges", "FFTW"]
git-tree-sha1 = "cbdf14d1e8c7c8aacbe8b19862e0179fd08321c2"
uuid = "4f61f5a4-77b1-5117-aa51-3ab5ef4ef0cd"
version = "0.3.2"
[[deps.FFTW]]
deps = ["AbstractFFTs", "FFTW_jll", "LinearAlgebra", "MKL_jll", "Preferences", "Reexport"]
git-tree-sha1 = "463cb335fa22c4ebacfd1faba5fde14edb80d96c"
uuid = "7a1cc6ca-52ef-59f5-83cd-3a7055c09341"
version = "1.4.5"
[[deps.FFTW_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "c6033cc3892d0ef5bb9cd29b7f2f0331ea5184ea"
uuid = "f5851436-0d7a-5f13-b9de-f02708fd171a"
version = "3.3.10+0"
[[deps.FITSIO]]
deps = ["CFITSIO", "Printf", "Reexport", "Tables"]
git-tree-sha1 = "e6033823834ec0070125120d4d4a1234f1826a47"
uuid = "525bcba6-941b-5504-bd06-fd0dc1a4d2eb"
version = "0.16.12"
[[deps.FileIO]]
deps = ["Pkg", "Requires", "UUIDs"]
git-tree-sha1 = "67551df041955cc6ee2ed098718c8fcd7fc7aebe"
uuid = "5789e2e9-d7fb-5bc7-8068-2c6fae9b9549"
version = "1.12.0"
[[deps.FileWatching]]
uuid = "7b1f6079-737a-58dc-b8bc-7a2ca5c1b5ee"
[[deps.FillArrays]]
deps = ["LinearAlgebra", "Random", "SparseArrays", "Statistics"]
git-tree-sha1 = "8756f9935b7ccc9064c6eef0bff0ad643df733a3"
uuid = "1a297f60-69ca-5386-bcde-b61e274b549b"
version = "0.12.7"
[[deps.FixedPointNumbers]]
deps = ["Statistics"]
git-tree-sha1 = "335bfdceacc84c5cdf16aadc768aa5ddfc5383cc"
uuid = "53c48c17-4a7d-5ca2-90c5-79b7896eea93"
version = "0.8.4"
[[deps.Fontconfig_jll]]
deps = ["Artifacts", "Bzip2_jll", "Expat_jll", "FreeType2_jll", "JLLWrappers", "Libdl", "Libuuid_jll", "Pkg", "Zlib_jll"]
git-tree-sha1 = "21efd19106a55620a188615da6d3d06cd7f6ee03"
uuid = "a3f928ae-7b40-5064-980b-68af3947d34b"
version = "2.13.93+0"
[[deps.Formatting]]
deps = ["Printf"]
git-tree-sha1 = "8339d61043228fdd3eb658d86c926cb282ae72a8"
uuid = "59287772-0a20-5a39-b81b-1366585eb4c0"
version = "0.4.2"
[[deps.FreeType2_jll]]
deps = ["Artifacts", "Bzip2_jll", "JLLWrappers", "Libdl", "Pkg", "Zlib_jll"]
git-tree-sha1 = "87eb71354d8ec1a96d4a7636bd57a7347dde3ef9"
uuid = "d7e528f0-a631-5988-bf34-fe36492bcfd7"
version = "2.10.4+0"
[[deps.FriBidi_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "aa31987c2ba8704e23c6c8ba8a4f769d5d7e4f91"
uuid = "559328eb-81f9-559d-9380-de523a88c83c"
version = "1.0.10+0"
[[deps.GLFW_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Libglvnd_jll", "Pkg", "Xorg_libXcursor_jll", "Xorg_libXi_jll", "Xorg_libXinerama_jll", "Xorg_libXrandr_jll"]
git-tree-sha1 = "0c603255764a1fa0b61752d2bec14cfbd18f7fe8"
uuid = "0656b61e-2033-5cc2-a64a-77c0f6c09b89"
version = "3.3.5+1"
[[deps.GR]]
deps = ["Base64", "DelimitedFiles", "GR_jll", "HTTP", "JSON", "Libdl", "LinearAlgebra", "Pkg", "Printf", "Random", "RelocatableFolders", "Serialization", "Sockets", "Test", "UUIDs"]
git-tree-sha1 = "4a740db447aae0fbeb3ee730de1afbb14ac798a1"
uuid = "28b8d3ca-fb5f-59d9-8090-bfdbd6d07a71"
version = "0.63.1"
[[deps.GR_jll]]
deps = ["Artifacts", "Bzip2_jll", "Cairo_jll", "FFMPEG_jll", "Fontconfig_jll", "GLFW_jll", "JLLWrappers", "JpegTurbo_jll", "Libdl", "Libtiff_jll", "Pixman_jll", "Pkg", "Qt5Base_jll", "Zlib_jll", "libpng_jll"]
git-tree-sha1 = "aa22e1ee9e722f1da183eb33370df4c1aeb6c2cd"
uuid = "d2c73de3-f751-5644-a686-071e5b155ba9"
version = "0.63.1+0"
[[deps.GeometryBasics]]
deps = ["EarCut_jll", "IterTools", "LinearAlgebra", "StaticArrays", "StructArrays", "Tables"]
git-tree-sha1 = "58bcdf5ebc057b085e58d95c138725628dd7453c"
uuid = "5c1252a2-5f33-56bf-86c9-59e7332b4326"
version = "0.4.1"
[[deps.Gettext_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Libiconv_jll", "Pkg", "XML2_jll"]
git-tree-sha1 = "9b02998aba7bf074d14de89f9d37ca24a1a0b046"
uuid = "78b55507-aeef-58d4-861c-77aaff3498b1"
version = "0.21.0+0"
[[deps.Glib_jll]]
deps = ["Artifacts", "Gettext_jll", "JLLWrappers", "Libdl", "Libffi_jll", "Libiconv_jll", "Libmount_jll", "PCRE_jll", "Pkg", "Zlib_jll"]
git-tree-sha1 = "a32d672ac2c967f3deb8a81d828afc739c838a06"
uuid = "7746bdde-850d-59dc-9ae8-88ece973131d"
version = "2.68.3+2"
[[deps.Graphics]]
deps = ["Colors", "LinearAlgebra", "NaNMath"]
git-tree-sha1 = "1c5a84319923bea76fa145d49e93aa4394c73fc2"
uuid = "a2bd30eb-e257-5431-a919-1863eab51364"
version = "1.1.1"
[[deps.Graphite2_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "344bf40dcab1073aca04aa0df4fb092f920e4011"
uuid = "3b182d85-2403-5c21-9c21-1e1f0cc25472"
version = "1.3.14+0"
[[deps.Graphs]]
deps = ["ArnoldiMethod", "Compat", "DataStructures", "Distributed", "Inflate", "LinearAlgebra", "Random", "SharedArrays", "SimpleTraits", "SparseArrays", "Statistics"]
git-tree-sha1 = "d727758173afef0af878b29ac364a0eca299fc6b"
uuid = "86223c79-3864-5bf0-83f7-82e725a168b6"
version = "1.5.1"
[[deps.Grisu]]
git-tree-sha1 = "53bb909d1151e57e2484c3d1b53e19552b887fb2"
uuid = "42e2da0e-8278-4e71-bc24-59509adca0fe"
version = "1.0.2"
[[deps.HTTP]]
deps = ["Base64", "Dates", "IniFile", "Logging", "MbedTLS", "NetworkOptions", "Sockets", "URIs"]
git-tree-sha1 = "0fa77022fe4b511826b39c894c90daf5fce3334a"
uuid = "cd3eb016-35fb-5094-929b-558a96fad6f3"
version = "0.9.17"
[[deps.HarfBuzz_jll]]
deps = ["Artifacts", "Cairo_jll", "Fontconfig_jll", "FreeType2_jll", "Glib_jll", "Graphite2_jll", "JLLWrappers", "Libdl", "Libffi_jll", "Pkg"]
git-tree-sha1 = "129acf094d168394e80ee1dc4bc06ec835e510a3"
uuid = "2e76f6c2-a576-52d4-95c1-20adfe4de566"
version = "2.8.1+1"
[[deps.Hyperscript]]
deps = ["Test"]
git-tree-sha1 = "8d511d5b81240fc8e6802386302675bdf47737b9"
uuid = "47d2ed2b-36de-50cf-bf87-49c2cf4b8b91"
version = "0.0.4"
[[deps.HypertextLiteral]]
git-tree-sha1 = "2b078b5a615c6c0396c77810d92ee8c6f470d238"
uuid = "ac1192a8-f4b3-4bfe-ba22-af5b92cd3ab2"
version = "0.9.3"
[[deps.IOCapture]]
deps = ["Logging", "Random"]
git-tree-sha1 = "f7be53659ab06ddc986428d3a9dcc95f6fa6705a"
uuid = "b5f81e59-6552-4d32-b1f0-c071b021bf89"
version = "0.2.2"
[[deps.IfElse]]
git-tree-sha1 = "debdd00ffef04665ccbb3e150747a77560e8fad1"
uuid = "615f187c-cbe4-4ef1-ba3b-2fcf58d6d173"
version = "0.1.1"
[[deps.ImageAxes]]
deps = ["AxisArrays", "ImageBase", "ImageCore", "Reexport", "SimpleTraits"]
git-tree-sha1 = "c54b581a83008dc7f292e205f4c409ab5caa0f04"
uuid = "2803e5a7-5153-5ecf-9a86-9b4c37f5f5ac"
version = "0.6.10"
[[deps.ImageBase]]
deps = ["ImageCore", "Reexport"]
git-tree-sha1 = "b51bb8cae22c66d0f6357e3bcb6363145ef20835"
uuid = "c817782e-172a-44cc-b673-b171935fbb9e"
version = "0.1.5"
[[deps.ImageContrastAdjustment]]
deps = ["ImageCore", "ImageTransformations", "Parameters"]
git-tree-sha1 = "0d75cafa80cf22026cea21a8e6cf965295003edc"
uuid = "f332f351-ec65-5f6a-b3d1-319c6670881a"
version = "0.3.10"
[[deps.ImageCore]]
deps = ["AbstractFFTs", "ColorVectorSpace", "Colors", "FixedPointNumbers", "Graphics", "MappedArrays", "MosaicViews", "OffsetArrays", "PaddedViews", "Reexport"]
git-tree-sha1 = "9a5c62f231e5bba35695a20988fc7cd6de7eeb5a"
uuid = "a09fc81d-aa75-5fe9-8630-4744c3626534"
version = "0.9.3"
[[deps.ImageDistances]]
deps = ["Distances", "ImageCore", "ImageMorphology", "LinearAlgebra", "Statistics"]
git-tree-sha1 = "7a20463713d239a19cbad3f6991e404aca876bda"
uuid = "51556ac3-7006-55f5-8cb3-34580c88182d"
version = "0.2.15"
[[deps.ImageFiltering]]
deps = ["CatIndices", "ComputationalResources", "DataStructures", "FFTViews", "FFTW", "ImageBase", "ImageCore", "LinearAlgebra", "OffsetArrays", "Reexport", "SparseArrays", "StaticArrays", "Statistics", "TiledIteration"]
git-tree-sha1 = "15bd05c1c0d5dbb32a9a3d7e0ad2d50dd6167189"
uuid = "6a3955dd-da59-5b1f-98d4-e7296123deb5"
version = "0.7.1"
[[deps.ImageIO]]
deps = ["FileIO", "Netpbm", "OpenEXR", "PNGFiles", "QOI", "Sixel", "TiffImages", "UUIDs"]
git-tree-sha1 = "816fc866edd8307a6e79a575e6585bfab8cef27f"
uuid = "82e4d734-157c-48bb-816b-45c225c6df19"
version = "0.6.0"
[[deps.ImageMagick]]
deps = ["FileIO", "ImageCore", "ImageMagick_jll", "InteractiveUtils", "Libdl", "Pkg", "Random"]
git-tree-sha1 = "5bc1cb62e0c5f1005868358db0692c994c3a13c6"
uuid = "6218d12a-5da1-5696-b52f-db25d2ecc6d1"
version = "1.2.1"
[[deps.ImageMagick_jll]]
deps = ["Artifacts", "JLLWrappers", "JpegTurbo_jll", "Libdl", "Libtiff_jll", "Pkg", "Zlib_jll", "libpng_jll"]
git-tree-sha1 = "d43c324d044dc8256e1470a561ee7a0cf5e122c9"
uuid = "c73af94c-d91f-53ed-93a7-00f77d67a9d7"
version = "6.9.12+1"
[[deps.ImageMetadata]]
deps = ["AxisArrays", "ImageAxes", "ImageBase", "ImageCore"]
git-tree-sha1 = "36cbaebed194b292590cba2593da27b34763804a"
uuid = "bc367c6b-8a6b-528e-b4bd-a4b897500b49"
version = "0.9.8"
[[deps.ImageMorphology]]
deps = ["ImageCore", "LinearAlgebra", "Requires", "TiledIteration"]
git-tree-sha1 = "5581e18a74a5838bd919294a7138c2663d065238"
uuid = "787d08f9-d448-5407-9aad-5290dd7ab264"
version = "0.3.0"
[[deps.ImageQualityIndexes]]
deps = ["ImageContrastAdjustment", "ImageCore", "ImageDistances", "ImageFiltering", "OffsetArrays", "Statistics"]
git-tree-sha1 = "1d2d73b14198d10f7f12bf7f8481fd4b3ff5cd61"
uuid = "2996bd0c-7a13-11e9-2da2-2f5ce47296a9"
version = "0.3.0"
[[deps.ImageSegmentation]]
deps = ["Clustering", "DataStructures", "Distances", "Graphs", "ImageCore", "ImageFiltering", "ImageMorphology", "LinearAlgebra", "MetaGraphs", "RegionTrees", "SimpleWeightedGraphs", "StaticArrays", "Statistics"]
git-tree-sha1 = "36832067ea220818d105d718527d6ed02385bf22"
uuid = "80713f31-8817-5129-9cf8-209ff8fb23e1"
version = "1.7.0"
[[deps.ImageShow]]
deps = ["Base64", "FileIO", "ImageBase", "ImageCore", "OffsetArrays", "StackViews"]
git-tree-sha1 = "d0ac64c9bee0aed6fdbb2bc0e5dfa9a3a78e3acc"
uuid = "4e3cecfd-b093-5904-9786-8bbb286a6a31"
version = "0.3.3"
[[deps.ImageTransformations]]
deps = ["AxisAlgorithms", "ColorVectorSpace", "CoordinateTransformations", "ImageBase", "ImageCore", "Interpolations", "OffsetArrays", "Rotations", "StaticArrays"]
git-tree-sha1 = "b4b161abc8252d68b13c5cc4a5f2ba711b61fec5"
uuid = "02fcd773-0e25-5acc-982a-7f6622650795"
version = "0.9.3"
[[deps.Images]]
deps = ["Base64", "FileIO", "Graphics", "ImageAxes", "ImageBase", "ImageContrastAdjustment", "ImageCore", "ImageDistances", "ImageFiltering", "ImageIO", "ImageMagick", "ImageMetadata", "ImageMorphology", "ImageQualityIndexes", "ImageSegmentation", "ImageShow", "ImageTransformations", "IndirectArrays", "IntegralArrays", "Random", "Reexport", "SparseArrays", "StaticArrays", "Statistics", "StatsBase", "TiledIteration"]
git-tree-sha1 = "11d268adba1869067620659e7cdf07f5e54b6c76"
uuid = "916415d5-f1e6-5110-898d-aaa5f9f070e0"
version = "0.25.1"
[[deps.Imath_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "87f7662e03a649cffa2e05bf19c303e168732d3e"
uuid = "905a6f67-0a94-5f89-b386-d35d92009cd1"
version = "3.1.2+0"
[[deps.IndirectArrays]]
git-tree-sha1 = "012e604e1c7458645cb8b436f8fba789a51b257f"
uuid = "9b13fd28-a010-5f03-acff-a1bbcff69959"
version = "1.0.0"
[[deps.Inflate]]
git-tree-sha1 = "f5fc07d4e706b84f72d54eedcc1c13d92fb0871c"
uuid = "d25df0c9-e2be-5dd7-82c8-3ad0b3e990b9"
version = "0.1.2"
[[deps.IniFile]]
deps = ["Test"]
git-tree-sha1 = "098e4d2c533924c921f9f9847274f2ad89e018b8"
uuid = "83e8ac13-25f8-5344-8a64-a9f2b223428f"
version = "0.5.0"
[[deps.IntegralArrays]]
deps = ["ColorTypes", "FixedPointNumbers", "IntervalSets"]
git-tree-sha1 = "00019244715621f473d399e4e1842e479a69a42e"
uuid = "1d092043-8f09-5a30-832f-7509e371ab51"
version = "0.1.2"
[[deps.IntelOpenMP_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "d979e54b71da82f3a65b62553da4fc3d18c9004c"
uuid = "1d5cc7b8-4909-519e-a0f8-d0f5ad9712d0"
version = "2018.0.3+2"
[[deps.InteractiveUtils]]
deps = ["Markdown"]
uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240"
[[deps.Interpolations]]
deps = ["AxisAlgorithms", "ChainRulesCore", "LinearAlgebra", "OffsetArrays", "Random", "Ratios", "Requires", "SharedArrays", "SparseArrays", "StaticArrays", "WoodburyMatrices"]
git-tree-sha1 = "b15fc0a95c564ca2e0a7ae12c1f095ca848ceb31"
uuid = "a98d9a8b-a2ab-59e6-89dd-64a1c18fca59"
version = "0.13.5"
[[deps.IntervalSets]]
deps = ["Dates", "EllipsisNotation", "Statistics"]
git-tree-sha1 = "3cc368af3f110a767ac786560045dceddfc16758"
uuid = "8197267c-284f-5f27-9208-e0e47529a953"
version = "0.5.3"
[[deps.InverseFunctions]]
deps = ["Test"]
git-tree-sha1 = "a7254c0acd8e62f1ac75ad24d5db43f5f19f3c65"
uuid = "3587e190-3f89-42d0-90ee-14403ec27112"
version = "0.1.2"
[[deps.IrrationalConstants]]
git-tree-sha1 = "7fd44fd4ff43fc60815f8e764c0f352b83c49151"
uuid = "92d709cd-6900-40b7-9082-c6be49f344b6"
version = "0.1.1"
[[deps.ItemGraphs]]
deps = ["LightGraphs"]
git-tree-sha1 = "e363e8bbeb44dc32c711a9c3f7e7323601050727"
uuid = "d5eda45b-7e79-5788-9687-2c6ab7b96158"
version = "0.4.0"
[[deps.IterTools]]
git-tree-sha1 = "fa6287a4469f5e048d763df38279ee729fbd44e5"
uuid = "c8e1da08-722c-5040-9ed9-7db0dc04731e"
version = "1.4.0"
[[deps.IteratorInterfaceExtensions]]
git-tree-sha1 = "a3f24677c21f5bbe9d2a714f95dcd58337fb2856"
uuid = "82899510-4779-5014-852e-03e436cf321d"
version = "1.0.0"
[[deps.JLD2]]
deps = ["DataStructures", "FileIO", "MacroTools", "Mmap", "Pkg", "Printf", "Reexport", "TranscodingStreams", "UUIDs"]
git-tree-sha1 = "09ef0c32a26f80b465d808a1ba1e85775a282c97"
uuid = "033835bb-8acc-5ee8-8aae-3f567f8a3819"
version = "0.4.17"
[[deps.JLLWrappers]]
deps = ["Preferences"]
git-tree-sha1 = "22df5b96feef82434b07327e2d3c770a9b21e023"
uuid = "692b3bcd-3c85-4b1f-b108-f13ce0eb3210"
version = "1.4.0"
[[deps.JSON]]
deps = ["Dates", "Mmap", "Parsers", "Unicode"]
git-tree-sha1 = "8076680b162ada2a031f707ac7b4953e30667a37"
uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6"
version = "0.21.2"
[[deps.JpegTurbo_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "d735490ac75c5cb9f1b00d8b5509c11984dc6943"
uuid = "aacddb02-875f-59d6-b918-886e6ef4fbf8"
version = "2.1.0+0"
[[deps.JuliaInterpreter]]
deps = ["CodeTracking", "InteractiveUtils", "Random", "UUIDs"]
git-tree-sha1 = "a2366b16704ffe78be1831341e6799ab2f4f07d2"
uuid = "aa1ae85d-cabe-5617-a682-6adf51b2e16a"
version = "0.9.0"
[[deps.KissMCMC]]
deps = ["ProgressMeter", "StaticArrays", "Statistics"]
git-tree-sha1 = "2a650312bcf33cda265a392f0681b6dbaedbb2b8"
uuid = "79d62d8d-4dfd-5781-bc85-ce78e0ac132a"
version = "0.2.1"
[[deps.LAME_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "f6250b16881adf048549549fba48b1161acdac8c"
uuid = "c1c5ebd0-6772-5130-a774-d5fcae4a789d"
version = "3.100.1+0"
[[deps.LZO_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "e5b909bcf985c5e2605737d2ce278ed791b89be6"
uuid = "dd4b983a-f0e5-5f8d-a1b7-129d4a5fb1ac"
version = "2.10.1+0"
[[deps.LaTeXStrings]]
git-tree-sha1 = "f2355693d6778a178ade15952b7ac47a4ff97996"
uuid = "b964fa9f-0449-5b57-a5c2-d3ea65f4040f"
version = "1.3.0"
[[deps.Latexify]]
deps = ["Formatting", "InteractiveUtils", "LaTeXStrings", "MacroTools", "Markdown", "Printf", "Requires"]
git-tree-sha1 = "a8f4f279b6fa3c3c4f1adadd78a621b13a506bce"
uuid = "23fbe1c1-3f47-55db-b15f-69d7ec21a316"
version = "0.15.9"
[[deps.LazyArtifacts]]
deps = ["Artifacts", "Pkg"]
uuid = "4af54fe1-eca0-43a8-85a7-787d91b784e3"
[[deps.LeapSeconds]]
deps = ["Dates"]
git-tree-sha1 = "0e5be6875ee72468bc12221d32ba1021c5d224fe"
uuid = "2f5f767c-a11e-5269-a972-637d4b97c32d"
version = "1.1.0"
[[deps.LibCURL]]
deps = ["LibCURL_jll", "MozillaCACerts_jll"]
uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21"
[[deps.LibCURL_jll]]
deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"]
uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0"
[[deps.LibGit2]]
deps = ["Base64", "NetworkOptions", "Printf", "SHA"]
uuid = "76f85450-5226-5b5a-8eaa-529ad045b433"
[[deps.LibSSH2_jll]]
deps = ["Artifacts", "Libdl", "MbedTLS_jll"]
uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8"
[[deps.Libdl]]
uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb"
[[deps.Libffi_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "0b4a5d71f3e5200a7dff793393e09dfc2d874290"
uuid = "e9f186c6-92d2-5b65-8a66-fee21dc1b490"
version = "3.2.2+1"
[[deps.Libgcrypt_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Libgpg_error_jll", "Pkg"]
git-tree-sha1 = "64613c82a59c120435c067c2b809fc61cf5166ae"
uuid = "d4300ac3-e22c-5743-9152-c294e39db1e4"
version = "1.8.7+0"
[[deps.Libglvnd_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll", "Xorg_libXext_jll"]
git-tree-sha1 = "7739f837d6447403596a75d19ed01fd08d6f56bf"
uuid = "7e76a0d4-f3c7-5321-8279-8d96eeed0f29"
version = "1.3.0+3"
[[deps.Libgpg_error_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "c333716e46366857753e273ce6a69ee0945a6db9"
uuid = "7add5ba3-2f88-524e-9cd5-f83b8a55f7b8"
version = "1.42.0+0"
[[deps.Libiconv_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "42b62845d70a619f063a7da093d995ec8e15e778"
uuid = "94ce4f54-9a6c-5748-9c1c-f9c7231a4531"
version = "1.16.1+1"
[[deps.Libmount_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "9c30530bf0effd46e15e0fdcf2b8636e78cbbd73"
uuid = "4b2f31a3-9ecc-558c-b454-b3730dcb73e9"
version = "2.35.0+0"
[[deps.Libtiff_jll]]
deps = ["Artifacts", "JLLWrappers", "JpegTurbo_jll", "Libdl", "Pkg", "Zlib_jll", "Zstd_jll"]
git-tree-sha1 = "340e257aada13f95f98ee352d316c3bed37c8ab9"
uuid = "89763e89-9b03-5906-acba-b20f662cd828"
version = "4.3.0+0"
[[deps.Libuuid_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "7f3efec06033682db852f8b3bc3c1d2b0a0ab066"
uuid = "38a345b3-de98-5d2b-a5d3-14cd9215e700"
version = "2.36.0+0"
[[deps.LightGraphs]]
deps = ["ArnoldiMethod", "DataStructures", "Distributed", "Inflate", "LinearAlgebra", "Random", "SharedArrays", "SimpleTraits", "SparseArrays", "Statistics"]
git-tree-sha1 = "432428df5f360964040ed60418dd5601ecd240b6"
uuid = "093fc24a-ae57-5d10-9952-331d41423f4d"
version = "1.3.5"
[[deps.LinearAlgebra]]
deps = ["Libdl", "libblastrampoline_jll"]
uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
[[deps.LogExpFunctions]]
deps = ["ChainRulesCore", "ChangesOfVariables", "DocStringExtensions", "InverseFunctions", "IrrationalConstants", "LinearAlgebra"]
git-tree-sha1 = "e5718a00af0ab9756305a0392832c8952c7426c1"
uuid = "2ab3a3ac-af41-5b50-aa03-7779005ae688"
version = "0.3.6"
[[deps.Logging]]
uuid = "56ddb016-857b-54e1-b83d-db4d58db5568"
[[deps.LoweredCodeUtils]]
deps = ["JuliaInterpreter"]
git-tree-sha1 = "f46e8f4e38882b32dcc11c8d31c131d556063f39"
uuid = "6f1432cf-f94c-5a45-995e-cdbf5db27b0b"
version = "2.2.0"
[[deps.MKL_jll]]
deps = ["Artifacts", "IntelOpenMP_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "Pkg"]
git-tree-sha1 = "5455aef09b40e5020e1520f551fa3135040d4ed0"
uuid = "856f044c-d86e-5d09-b602-aeab76dc8ba7"
version = "2021.1.1+2"
[[deps.MacroTools]]
deps = ["Markdown", "Random"]
git-tree-sha1 = "3d3e902b31198a27340d0bf00d6ac452866021cf"
uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09"
version = "0.5.9"
[[deps.MappedArrays]]
git-tree-sha1 = "e8b359ef06ec72e8c030463fe02efe5527ee5142"
uuid = "dbb5928d-eab1-5f90-85c2-b9b0edb7c900"
version = "0.4.1"
[[deps.Markdown]]
deps = ["Base64"]
uuid = "d6f4376e-aef5-505a-96c1-9c027394607a"
[[deps.MbedTLS]]
deps = ["Dates", "MbedTLS_jll", "Random", "Sockets"]
git-tree-sha1 = "1c38e51c3d08ef2278062ebceade0e46cefc96fe"
uuid = "739be429-bea8-5141-9913-cc70e7f3736d"
version = "1.0.3"
[[deps.MbedTLS_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1"
[[deps.Measures]]
git-tree-sha1 = "e498ddeee6f9fdb4551ce855a46f54dbd900245f"
uuid = "442fdcdd-2543-5da2-b0f3-8c86c306513e"
version = "0.3.1"
[[deps.MetaGraphs]]
deps = ["Graphs", "JLD2", "Random"]
git-tree-sha1 = "2af69ff3c024d13bde52b34a2a7d6887d4e7b438"
uuid = "626554b9-1ddb-594c-aa3c-2596fe9399a5"
version = "0.7.1"
[[deps.Missings]]
deps = ["DataAPI"]
git-tree-sha1 = "bf210ce90b6c9eed32d25dbcae1ebc565df2687f"
uuid = "e1d29d7a-bbdc-5cf2-9ac0-f12de2c33e28"
version = "1.0.2"
[[deps.Mmap]]
uuid = "a63ad114-7e13-5084-954f-fe012c677804"
[[deps.MosaicViews]]
deps = ["MappedArrays", "OffsetArrays", "PaddedViews", "StackViews"]
git-tree-sha1 = "b34e3bc3ca7c94914418637cb10cc4d1d80d877d"
uuid = "e94cdb99-869f-56ef-bcf0-1ae2bcbe0389"
version = "0.3.3"
[[deps.MozillaCACerts_jll]]
uuid = "14a3606d-f60d-562e-9121-12d972cd8159"
[[deps.MuladdMacro]]
git-tree-sha1 = "c6190f9a7fc5d9d5915ab29f2134421b12d24a68"
uuid = "46d2c3a1-f734-5fdb-9937-b9b9aeba4221"
version = "0.2.2"
[[deps.NaNMath]]
git-tree-sha1 = "f755f36b19a5116bb580de457cda0c140153f283"
uuid = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3"
version = "0.3.6"
[[deps.NamedTupleTools]]
git-tree-sha1 = "63831dcea5e11db1c0925efe5ef5fc01d528c522"
uuid = "d9ec5142-1e00-5aa0-9d6a-321866360f50"
version = "0.13.7"
[[deps.NearestNeighbors]]
deps = ["Distances", "StaticArrays"]
git-tree-sha1 = "16baacfdc8758bc374882566c9187e785e85c2f0"
uuid = "b8a86587-4115-5ab1-83bc-aa920d37bbce"
version = "0.4.9"
[[deps.Netpbm]]
deps = ["FileIO", "ImageCore"]
git-tree-sha1 = "18efc06f6ec36a8b801b23f076e3c6ac7c3bf153"
uuid = "f09324ee-3d7c-5217-9330-fc30815ba969"
version = "1.0.2"
[[deps.NetworkOptions]]
uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908"
[[deps.OffsetArrays]]
deps = ["Adapt"]
git-tree-sha1 = "043017e0bdeff61cfbb7afeb558ab29536bbb5ed"
uuid = "6fe1bfb0-de20-5000-8ca7-80f57d26f881"
version = "1.10.8"
[[deps.Ogg_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "887579a3eb005446d514ab7aeac5d1d027658b8f"
uuid = "e7412a2a-1a6e-54c0-be00-318e2571c051"
version = "1.3.5+1"
[[deps.OpenBLAS_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "Libdl"]
uuid = "4536629a-c528-5b80-bd46-f80d51c5b363"
[[deps.OpenEXR]]
deps = ["Colors", "FileIO", "OpenEXR_jll"]
git-tree-sha1 = "327f53360fdb54df7ecd01e96ef1983536d1e633"
uuid = "52e1d378-f018-4a11-a4be-720524705ac7"
version = "0.3.2"
[[deps.OpenEXR_jll]]
deps = ["Artifacts", "Imath_jll", "JLLWrappers", "Libdl", "Pkg", "Zlib_jll"]
git-tree-sha1 = "923319661e9a22712f24596ce81c54fc0366f304"
uuid = "18a262bb-aa17-5467-a713-aee519bc75cb"
version = "3.1.1+0"
[[deps.OpenLibm_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "05823500-19ac-5b8b-9628-191a04bc5112"
[[deps.OpenSSL_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "648107615c15d4e09f7eca16307bc821c1f718d8"
uuid = "458c3c95-2e84-50aa-8efc-19380b2a3a95"
version = "1.1.13+0"
[[deps.OpenSpecFun_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "13652491f6856acfd2db29360e1bbcd4565d04f1"
uuid = "efe28fd5-8261-553b-a9e1-b2916fc3738e"
version = "0.5.5+0"
[[deps.OptionalData]]
git-tree-sha1 = "d047cc114023e12292533bb822b45c23cb51d310"
uuid = "fbd9d27c-2d1c-5c1c-99f2-7497d746985d"
version = "1.0.0"
[[deps.Opus_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "51a08fb14ec28da2ec7a927c4337e4332c2a4720"
uuid = "91d4177d-7536-5919-b921-800302f37372"
version = "1.3.2+0"
[[deps.OrderedCollections]]
git-tree-sha1 = "85f8e6578bf1f9ee0d11e7bb1b1456435479d47c"
uuid = "bac558e1-5e72-5ebc-8fee-abe8a469f55d"
version = "1.4.1"
[[deps.PCRE_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "b2a7af664e098055a7529ad1a900ded962bca488"
uuid = "2f80f16e-611a-54ab-bc61-aa92de5b98fc"
version = "8.44.0+0"
[[deps.PDMats]]
deps = ["LinearAlgebra", "SparseArrays", "SuiteSparse"]
git-tree-sha1 = "ee26b350276c51697c9c2d88a072b339f9f03d73"
uuid = "90014a1f-27ba-587c-ab20-58faa44d9150"
version = "0.11.5"
[[deps.PNGFiles]]
deps = ["Base64", "CEnum", "ImageCore", "IndirectArrays", "OffsetArrays", "libpng_jll"]
git-tree-sha1 = "6d105d40e30b635cfed9d52ec29cf456e27d38f8"
uuid = "f57f5aa1-a3ce-4bc8-8ab9-96f992907883"
version = "0.3.12"
[[deps.PaddedViews]]
deps = ["OffsetArrays"]
git-tree-sha1 = "03a7a85b76381a3d04c7a1656039197e70eda03d"
uuid = "5432bcbf-9aad-5242-b902-cca2824c8663"
version = "0.5.11"
[[deps.Parameters]]
deps = ["OrderedCollections", "UnPack"]
git-tree-sha1 = "34c0e9ad262e5f7fc75b10a9952ca7692cfc5fbe"
uuid = "d96e819e-fc66-5662-9728-84c9c7592b0a"
version = "0.12.3"
[[deps.Parsers]]
deps = ["Dates"]
git-tree-sha1 = "d7fa6237da8004be601e19bd6666083056649918"
uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0"
version = "2.1.3"
[[deps.Pixman_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "b4f5d02549a10e20780a24fce72bea96b6329e29"
uuid = "30392449-352a-5448-841d-b1acce4e97dc"
version = "0.40.1+0"
[[deps.Pkg]]
deps = ["Artifacts", "Dates", "Downloads", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"]
uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
[[deps.PkgVersion]]
deps = ["Pkg"]
git-tree-sha1 = "a7a7e1a88853564e551e4eba8650f8c38df79b37"
uuid = "eebad327-c553-4316-9ea0-9fa01ccd7688"
version = "0.1.1"
[[deps.PlotThemes]]
deps = ["PlotUtils", "Requires", "Statistics"]
git-tree-sha1 = "a3a964ce9dc7898193536002a6dd892b1b5a6f1d"
uuid = "ccf2f8ad-2431-5c83-bf29-c5338b663b6a"
version = "2.0.1"
[[deps.PlotUtils]]
deps = ["ColorSchemes", "Colors", "Dates", "Printf", "Random", "Reexport", "Statistics"]
git-tree-sha1 = "68604313ed59f0408313228ba09e79252e4b2da8"
uuid = "995b91a9-d308-5afd-9ec6-746e21dbc043"
version = "1.1.2"
[[deps.Plots]]
deps = ["Base64", "Contour", "Dates", "Downloads", "FFMPEG", "FixedPointNumbers", "GR", "GeometryBasics", "JSON", "Latexify", "LinearAlgebra", "Measures", "NaNMath", "PlotThemes", "PlotUtils", "Printf", "REPL", "Random", "RecipesBase", "RecipesPipeline", "Reexport", "Requires", "Scratch", "Showoff", "SparseArrays", "Statistics", "StatsBase", "UUIDs", "UnicodeFun", "Unzip"]
git-tree-sha1 = "db7393a80d0e5bef70f2b518990835541917a544"
uuid = "91a5bcdd-55d7-5caf-9e0b-520d859cae80"
version = "1.25.6"
[[deps.PlutoUI]]
deps = ["AbstractPlutoDingetjes", "Base64", "ColorTypes", "Dates", "Hyperscript", "HypertextLiteral", "IOCapture", "InteractiveUtils", "JSON", "Logging", "Markdown", "Random", "Reexport", "UUIDs"]
git-tree-sha1 = "7711172ace7c40dc8449b7aed9d2d6f1cf56a5bd"
uuid = "7f904dfe-b85e-4ff6-b463-dae2292396a8"
version = "0.7.29"
[[deps.Preferences]]
deps = ["TOML"]
git-tree-sha1 = "2cf929d64681236a2e074ffafb8d568733d2e6af"
uuid = "21216c6a-2e73-6563-6e65-726566657250"
version = "1.2.3"
[[deps.Printf]]
deps = ["Unicode"]
uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7"
[[deps.ProgressMeter]]
deps = ["Distributed", "Printf"]
git-tree-sha1 = "afadeba63d90ff223a6a48d2009434ecee2ec9e8"
uuid = "92933f4c-e287-5a05-a399-4b506db050ca"
version = "1.7.1"
[[deps.QOI]]
deps = ["ColorTypes", "FileIO", "FixedPointNumbers"]
git-tree-sha1 = "18e8f4d1426e965c7b532ddd260599e1510d26ce"
uuid = "4b34888f-f399-49d4-9bb3-47ed5cae4e65"
version = "1.0.0"
[[deps.Qt5Base_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "Fontconfig_jll", "Glib_jll", "JLLWrappers", "Libdl", "Libglvnd_jll", "OpenSSL_jll", "Pkg", "Xorg_libXext_jll", "Xorg_libxcb_jll", "Xorg_xcb_util_image_jll", "Xorg_xcb_util_keysyms_jll", "Xorg_xcb_util_renderutil_jll", "Xorg_xcb_util_wm_jll", "Zlib_jll", "xkbcommon_jll"]
git-tree-sha1 = "ad368663a5e20dbb8d6dc2fddeefe4dae0781ae8"
uuid = "ea2cea3b-5b76-57ae-a6ef-0a8af62496e1"
version = "5.15.3+0"
[[deps.QuadGK]]
deps = ["DataStructures", "LinearAlgebra"]
git-tree-sha1 = "78aadffb3efd2155af139781b8a8df1ef279ea39"
uuid = "1fd47b50-473d-5c70-9696-f719f8f3bcdc"
version = "2.4.2"
[[deps.Quaternions]]
deps = ["DualNumbers", "LinearAlgebra"]
git-tree-sha1 = "adf644ef95a5e26c8774890a509a55b7791a139f"
uuid = "94ee1d12-ae83-5a48-8b1c-48b8ff168ae0"
version = "0.4.2"
[[deps.REPL]]
deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"]
uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb"
[[deps.Random]]
deps = ["SHA", "Serialization"]
uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
[[deps.RangeArrays]]
git-tree-sha1 = "b9039e93773ddcfc828f12aadf7115b4b4d225f5"
uuid = "b3c3ace0-ae52-54e7-9d0b-2c1406fd6b9d"
version = "0.3.2"
[[deps.Ratios]]
deps = ["Requires"]
git-tree-sha1 = "01d341f502250e81f6fec0afe662aa861392a3aa"
uuid = "c84ed2f1-dad5-54f0-aa8e-dbefe2724439"
version = "0.4.2"
[[deps.RecipesBase]]
git-tree-sha1 = "6bf3f380ff52ce0832ddd3a2a7b9538ed1bcca7d"
uuid = "3cdcf5f2-1ef4-517c-9805-6587b60abb01"
version = "1.2.1"
[[deps.RecipesPipeline]]
deps = ["Dates", "NaNMath", "PlotUtils", "RecipesBase"]
git-tree-sha1 = "37c1631cb3cc36a535105e6d5557864c82cd8c2b"
uuid = "01d81517-befc-4cb6-b9ec-a95719d0359c"
version = "0.5.0"
[[deps.Reexport]]
git-tree-sha1 = "45e428421666073eab6f2da5c9d310d99bb12f9b"
uuid = "189a3867-3050-52da-a836-e630ba90ab69"
version = "1.2.2"
[[deps.RegionTrees]]
deps = ["IterTools", "LinearAlgebra", "StaticArrays"]
git-tree-sha1 = "4618ed0da7a251c7f92e869ae1a19c74a7d2a7f9"
uuid = "dee08c22-ab7f-5625-9660-a9af2021b33f"
version = "0.3.2"
[[deps.RelocatableFolders]]
deps = ["SHA", "Scratch"]
git-tree-sha1 = "cdbd3b1338c72ce29d9584fdbe9e9b70eeb5adca"
uuid = "05181044-ff0b-4ac5-8273-598c1e38db00"
version = "0.1.3"
[[deps.RemoteFiles]]
deps = ["Dates", "FileIO", "HTTP"]
git-tree-sha1 = "54527375d877a64c55190fb762d584f927d6d7c3"
uuid = "cbe49d4c-5af1-5b60-bb70-0a60aa018e1b"
version = "0.4.2"
[[deps.Requires]]
deps = ["UUIDs"]
git-tree-sha1 = "838a3a4188e2ded87a4f9f184b4b0d78a1e91cb7"
uuid = "ae029012-a4dd-5104-9daa-d747884805df"
version = "1.3.0"
[[deps.Revise]]
deps = ["CodeTracking", "Distributed", "FileWatching", "JuliaInterpreter", "LibGit2", "LoweredCodeUtils", "OrderedCollections", "Pkg", "REPL", "Requires", "UUIDs", "Unicode"]
git-tree-sha1 = "2f9d4d6679b5f0394c52731db3794166f49d5131"
uuid = "295af30f-e4ad-537b-8983-00126c2a3abe"
version = "3.3.1"
[[deps.Rmath]]
deps = ["Random", "Rmath_jll"]
git-tree-sha1 = "bf3188feca147ce108c76ad82c2792c57abe7b1f"
uuid = "79098fc4-a85e-5d69-aa6a-4863f24498fa"
version = "0.7.0"
[[deps.Rmath_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "68db32dff12bb6127bac73c209881191bf0efbb7"
uuid = "f50d1b31-88e8-58de-be2c-1cc44531875f"
version = "0.3.0+0"
[[deps.Rotations]]
deps = ["LinearAlgebra", "Quaternions", "Random", "StaticArrays", "Statistics"]
git-tree-sha1 = "2fa87d198bc5356c649b92109ed3ce46ee1eb89d"
uuid = "6038ab10-8711-5258-84ad-4b1120ba62dc"
version = "1.1.1"
[[deps.SHA]]
uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce"
[[deps.Scratch]]
deps = ["Dates"]
git-tree-sha1 = "0b4b7f1393cff97c33891da2a0bf69c6ed241fda"
uuid = "6c6a2e73-6563-6170-7368-637461726353"
version = "1.1.0"
[[deps.Serialization]]
uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b"
[[deps.SharedArrays]]
deps = ["Distributed", "Mmap", "Random", "Serialization"]
uuid = "1a1011a3-84de-559e-8e89-a11a2f7dc383"
[[deps.Showoff]]
deps = ["Dates", "Grisu"]
git-tree-sha1 = "91eddf657aca81df9ae6ceb20b959ae5653ad1de"
uuid = "992d4aef-0814-514b-bc4d-f2e9a6c4116f"
version = "1.0.3"
[[deps.SimpleTraits]]
deps = ["InteractiveUtils", "MacroTools"]
git-tree-sha1 = "5d7e3f4e11935503d3ecaf7186eac40602e7d231"
uuid = "699a6c99-e7fa-54fc-8d76-47d257e15c1d"
version = "0.9.4"
[[deps.SimpleWeightedGraphs]]
deps = ["Graphs", "LinearAlgebra", "Markdown", "SparseArrays", "Test"]
git-tree-sha1 = "a6f404cc44d3d3b28c793ec0eb59af709d827e4e"
uuid = "47aef6b3-ad0c-573a-a1e2-d07658019622"
version = "1.2.1"
[[deps.Sixel]]
deps = ["Dates", "FileIO", "ImageCore", "IndirectArrays", "OffsetArrays", "REPL", "libsixel_jll"]
git-tree-sha1 = "8fb59825be681d451c246a795117f317ecbcaa28"
uuid = "45858cf5-a6b0-47a3-bbea-62219f50df47"
version = "0.1.2"
[[deps.Sockets]]
uuid = "6462fe0b-24de-5631-8697-dd941f90decc"
[[deps.SortingAlgorithms]]
deps = ["DataStructures"]
git-tree-sha1 = "b3363d7460f7d098ca0912c69b082f75625d7508"
uuid = "a2af1166-a08f-5f64-846c-94a0d3cef48c"
version = "1.0.1"
[[deps.SparseArrays]]
deps = ["LinearAlgebra", "Random"]
uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
[[deps.SpecialFunctions]]
deps = ["ChainRulesCore", "IrrationalConstants", "LogExpFunctions", "OpenLibm_jll", "OpenSpecFun_jll"]
git-tree-sha1 = "e08890d19787ec25029113e88c34ec20cac1c91e"
uuid = "276daf66-3868-5448-9aa4-cd146d93841b"
version = "2.0.0"
[[deps.StackViews]]
deps = ["OffsetArrays"]
git-tree-sha1 = "46e589465204cd0c08b4bd97385e4fa79a0c770c"
uuid = "cae243ae-269e-4f55-b966-ac2d0dc13c15"
version = "0.1.1"
[[deps.Static]]
deps = ["IfElse"]
git-tree-sha1 = "7f5a513baec6f122401abfc8e9c074fdac54f6c1"
uuid = "aedffcd0-7271-4cad-89d0-dc628f76c6d3"
version = "0.4.1"
[[deps.StaticArrays]]
deps = ["LinearAlgebra", "Random", "Statistics"]
git-tree-sha1 = "2ae4fe21e97cd13efd857462c1869b73c9f61be3"
uuid = "90137ffa-7385-5640-81b9-e52037218182"
version = "1.3.2"
[[deps.Statistics]]
deps = ["LinearAlgebra", "SparseArrays"]
uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
[[deps.StatsAPI]]
git-tree-sha1 = "d88665adc9bcf45903013af0982e2fd05ae3d0a6"
uuid = "82ae8749-77ed-4fe6-ae5f-f523153014b0"
version = "1.2.0"
[[deps.StatsBase]]
deps = ["DataAPI", "DataStructures", "LinearAlgebra", "LogExpFunctions", "Missings", "Printf", "Random", "SortingAlgorithms", "SparseArrays", "Statistics", "StatsAPI"]
git-tree-sha1 = "51383f2d367eb3b444c961d485c565e4c0cf4ba0"
uuid = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91"
version = "0.33.14"
[[deps.StatsFuns]]
deps = ["ChainRulesCore", "InverseFunctions", "IrrationalConstants", "LogExpFunctions", "Reexport", "Rmath", "SpecialFunctions"]
git-tree-sha1 = "bedb3e17cc1d94ce0e6e66d3afa47157978ba404"
uuid = "4c63d2b9-4356-54db-8cca-17b64c39e42c"
version = "0.9.14"
[[deps.StructArrays]]
deps = ["Adapt", "DataAPI", "StaticArrays", "Tables"]
git-tree-sha1 = "2ce41e0d042c60ecd131e9fb7154a3bfadbf50d3"
uuid = "09ab397b-f2b6-538f-b94a-2f83cf4a842a"
version = "0.6.3"
[[deps.SuiteSparse]]
deps = ["Libdl", "LinearAlgebra", "Serialization", "SparseArrays"]
uuid = "4607b0f0-06f3-5cda-b6b1-a6196a1729e9"
[[deps.TOML]]
deps = ["Dates"]
uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76"
[[deps.TableTraits]]
deps = ["IteratorInterfaceExtensions"]
git-tree-sha1 = "c06b2f539df1c6efa794486abfb6ed2022561a39"
uuid = "3783bdb8-4a98-5b6b-af9a-565f29a5fe9c"
version = "1.0.1"
[[deps.Tables]]
deps = ["DataAPI", "DataValueInterfaces", "IteratorInterfaceExtensions", "LinearAlgebra", "TableTraits", "Test"]
git-tree-sha1 = "bb1064c9a84c52e277f1096cf41434b675cd368b"
uuid = "bd369af6-aec1-5ad0-b16a-f7cc5008161c"
version = "1.6.1"
[[deps.Tar]]
deps = ["ArgTools", "SHA"]
uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e"
[[deps.TensorCore]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "1feb45f88d133a655e001435632f019a9a1bcdb6"
uuid = "62fd8b95-f654-4bbd-a8a5-9c27f68ccd50"
version = "0.1.1"
[[deps.Test]]
deps = ["InteractiveUtils", "Logging", "Random", "Serialization"]
uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
[[deps.TiffImages]]
deps = ["ColorTypes", "DataStructures", "DocStringExtensions", "FileIO", "FixedPointNumbers", "IndirectArrays", "Inflate", "OffsetArrays", "PkgVersion", "ProgressMeter", "UUIDs"]
git-tree-sha1 = "991d34bbff0d9125d93ba15887d6594e8e84b305"
uuid = "731e570b-9d59-4bfa-96dc-6df516fadf69"
version = "0.5.3"
[[deps.TiledIteration]]
deps = ["OffsetArrays"]
git-tree-sha1 = "5683455224ba92ef59db72d10690690f4a8dc297"
uuid = "06e1c1a7-607b-532d-9fad-de7d9aa2abac"
version = "0.3.1"
[[deps.TranscodingStreams]]
deps = ["Random", "Test"]
git-tree-sha1 = "216b95ea110b5972db65aa90f88d8d89dcb8851c"
uuid = "3bb67fe8-82b1-5028-8e26-92a6c54297fa"
version = "0.9.6"
[[deps.URIs]]
git-tree-sha1 = "97bbe755a53fe859669cd907f2d96aee8d2c1355"
uuid = "5c2747f8-b7ea-4ff2-ba2e-563bfd36b1d4"
version = "1.3.0"
[[deps.UUIDs]]
deps = ["Random", "SHA"]
uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
[[deps.UnPack]]
git-tree-sha1 = "387c1f73762231e86e0c9c5443ce3b4a0a9a0c2b"
uuid = "3a884ed6-31ef-47d7-9d2a-63182c4928ed"
version = "1.0.2"
[[deps.Unicode]]
uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5"
[[deps.UnicodeFun]]
deps = ["REPL"]
git-tree-sha1 = "53915e50200959667e78a92a418594b428dffddf"
uuid = "1cfade01-22cf-5700-b092-accc4b62d6e1"
version = "0.4.1"
[[deps.Unzip]]
git-tree-sha1 = "34db80951901073501137bdbc3d5a8e7bbd06670"
uuid = "41fe7b60-77ed-43a1-b4f0-825fd5a5650d"
version = "0.1.2"
[[deps.Wayland_jll]]
deps = ["Artifacts", "Expat_jll", "JLLWrappers", "Libdl", "Libffi_jll", "Pkg", "XML2_jll"]
git-tree-sha1 = "3e61f0b86f90dacb0bc0e73a0c5a83f6a8636e23"
uuid = "a2964d1f-97da-50d4-b82a-358c7fce9d89"
version = "1.19.0+0"
[[deps.Wayland_protocols_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "66d72dc6fcc86352f01676e8f0f698562e60510f"
uuid = "2381bf8a-dfd0-557d-9999-79630e7b1b91"
version = "1.23.0+0"
[[deps.WoodburyMatrices]]
deps = ["LinearAlgebra", "SparseArrays"]
git-tree-sha1 = "de67fa59e33ad156a590055375a30b23c40299d3"
uuid = "efce3f68-66dc-5838-9240-27a6d6f5f9b6"
version = "0.5.5"
[[deps.XML2_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Libiconv_jll", "Pkg", "Zlib_jll"]
git-tree-sha1 = "1acf5bdf07aa0907e0a37d3718bb88d4b687b74a"
uuid = "02c8fc9c-b97f-50b9-bbe4-9be30ff0a78a"
version = "2.9.12+0"
[[deps.XSLT_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Libgcrypt_jll", "Libgpg_error_jll", "Libiconv_jll", "Pkg", "XML2_jll", "Zlib_jll"]
git-tree-sha1 = "91844873c4085240b95e795f692c4cec4d805f8a"
uuid = "aed1982a-8fda-507f-9586-7b0439959a61"
version = "1.1.34+0"
[[deps.Xorg_libX11_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libxcb_jll", "Xorg_xtrans_jll"]
git-tree-sha1 = "5be649d550f3f4b95308bf0183b82e2582876527"
uuid = "4f6342f7-b3d2-589e-9d20-edeb45f2b2bc"
version = "1.6.9+4"
[[deps.Xorg_libXau_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "4e490d5c960c314f33885790ed410ff3a94ce67e"
uuid = "0c0b7dd1-d40b-584c-a123-a41640f87eec"
version = "1.0.9+4"
[[deps.Xorg_libXcursor_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libXfixes_jll", "Xorg_libXrender_jll"]
git-tree-sha1 = "12e0eb3bc634fa2080c1c37fccf56f7c22989afd"
uuid = "935fb764-8cf2-53bf-bb30-45bb1f8bf724"
version = "1.2.0+4"
[[deps.Xorg_libXdmcp_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "4fe47bd2247248125c428978740e18a681372dd4"
uuid = "a3789734-cfe1-5b06-b2d0-1dd0d9d62d05"
version = "1.1.3+4"
[[deps.Xorg_libXext_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll"]
git-tree-sha1 = "b7c0aa8c376b31e4852b360222848637f481f8c3"
uuid = "1082639a-0dae-5f34-9b06-72781eeb8cb3"
version = "1.3.4+4"
[[deps.Xorg_libXfixes_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll"]
git-tree-sha1 = "0e0dc7431e7a0587559f9294aeec269471c991a4"
uuid = "d091e8ba-531a-589c-9de9-94069b037ed8"
version = "5.0.3+4"
[[deps.Xorg_libXi_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libXext_jll", "Xorg_libXfixes_jll"]
git-tree-sha1 = "89b52bc2160aadc84d707093930ef0bffa641246"
uuid = "a51aa0fd-4e3c-5386-b890-e753decda492"
version = "1.7.10+4"
[[deps.Xorg_libXinerama_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libXext_jll"]
git-tree-sha1 = "26be8b1c342929259317d8b9f7b53bf2bb73b123"
uuid = "d1454406-59df-5ea1-beac-c340f2130bc3"
version = "1.1.4+4"
[[deps.Xorg_libXrandr_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libXext_jll", "Xorg_libXrender_jll"]
git-tree-sha1 = "34cea83cb726fb58f325887bf0612c6b3fb17631"
uuid = "ec84b674-ba8e-5d96-8ba1-2a689ba10484"
version = "1.5.2+4"
[[deps.Xorg_libXrender_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll"]
git-tree-sha1 = "19560f30fd49f4d4efbe7002a1037f8c43d43b96"
uuid = "ea2f1a96-1ddc-540d-b46f-429655e07cfa"
version = "0.9.10+4"
[[deps.Xorg_libpthread_stubs_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "6783737e45d3c59a4a4c4091f5f88cdcf0908cbb"
uuid = "14d82f49-176c-5ed1-bb49-ad3f5cbd8c74"
version = "0.1.0+3"
[[deps.Xorg_libxcb_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "XSLT_jll", "Xorg_libXau_jll", "Xorg_libXdmcp_jll", "Xorg_libpthread_stubs_jll"]
git-tree-sha1 = "daf17f441228e7a3833846cd048892861cff16d6"
uuid = "c7cfdc94-dc32-55de-ac96-5a1b8d977c5b"
version = "1.13.0+3"
[[deps.Xorg_libxkbfile_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll"]
git-tree-sha1 = "926af861744212db0eb001d9e40b5d16292080b2"
uuid = "cc61e674-0454-545c-8b26-ed2c68acab7a"
version = "1.1.0+4"
[[deps.Xorg_xcb_util_image_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_xcb_util_jll"]
git-tree-sha1 = "0fab0a40349ba1cba2c1da699243396ff8e94b97"
uuid = "12413925-8142-5f55-bb0e-6d7ca50bb09b"
version = "0.4.0+1"
[[deps.Xorg_xcb_util_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libxcb_jll"]
git-tree-sha1 = "e7fd7b2881fa2eaa72717420894d3938177862d1"
uuid = "2def613f-5ad1-5310-b15b-b15d46f528f5"
version = "0.4.0+1"
[[deps.Xorg_xcb_util_keysyms_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_xcb_util_jll"]
git-tree-sha1 = "d1151e2c45a544f32441a567d1690e701ec89b00"
uuid = "975044d2-76e6-5fbe-bf08-97ce7c6574c7"
version = "0.4.0+1"
[[deps.Xorg_xcb_util_renderutil_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_xcb_util_jll"]
git-tree-sha1 = "dfd7a8f38d4613b6a575253b3174dd991ca6183e"
uuid = "0d47668e-0667-5a69-a72c-f761630bfb7e"
version = "0.3.9+1"
[[deps.Xorg_xcb_util_wm_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_xcb_util_jll"]
git-tree-sha1 = "e78d10aab01a4a154142c5006ed44fd9e8e31b67"
uuid = "c22f9ab0-d5fe-5066-847c-f4bb1cd4e361"
version = "0.4.1+1"
[[deps.Xorg_xkbcomp_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libxkbfile_jll"]
git-tree-sha1 = "4bcbf660f6c2e714f87e960a171b119d06ee163b"
uuid = "35661453-b289-5fab-8a00-3d9160c6a3a4"
version = "1.4.2+4"
[[deps.Xorg_xkeyboard_config_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_xkbcomp_jll"]
git-tree-sha1 = "5c8424f8a67c3f2209646d4425f3d415fee5931d"
uuid = "33bec58e-1273-512f-9401-5d533626f822"
version = "2.27.0+4"
[[deps.Xorg_xtrans_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "79c31e7844f6ecf779705fbc12146eb190b7d845"
uuid = "c5fb5394-a638-5e4d-96e5-b29de1b5cf10"
version = "1.4.0+3"
[[deps.Zlib_jll]]
deps = ["Libdl"]
uuid = "83775a58-1f1d-513f-b197-d71354ab007a"
[[deps.Zstd_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "cc4bf3fdde8b7e3e9fa0351bdeedba1cf3b7f6e6"
uuid = "3161d3a3-bdf6-5164-811a-617609db77b4"
version = "1.5.0+0"
[[deps.libass_jll]]
deps = ["Artifacts", "Bzip2_jll", "FreeType2_jll", "FriBidi_jll", "HarfBuzz_jll", "JLLWrappers", "Libdl", "Pkg", "Zlib_jll"]
git-tree-sha1 = "5982a94fcba20f02f42ace44b9894ee2b140fe47"
uuid = "0ac62f75-1d6f-5e53-bd7c-93b484bb37c0"
version = "0.15.1+0"
[[deps.libblastrampoline_jll]]
deps = ["Artifacts", "Libdl", "OpenBLAS_jll"]
uuid = "8e850b90-86db-534c-a0d3-1478176c7d93"
[[deps.libfdk_aac_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "daacc84a041563f965be61859a36e17c4e4fcd55"
uuid = "f638f0a6-7fb0-5443-88ba-1cc74229b280"
version = "2.0.2+0"
[[deps.libpng_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Zlib_jll"]
git-tree-sha1 = "94d180a6d2b5e55e447e2d27a29ed04fe79eb30c"
uuid = "b53b4c65-9356-5827-b1ea-8c7a1a84506f"
version = "1.6.38+0"
[[deps.libsixel_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "78736dab31ae7a53540a6b752efc61f77b304c5b"
uuid = "075b6546-f08a-558a-be8f-8157d0f608a5"
version = "1.8.6+1"
[[deps.libvorbis_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Ogg_jll", "Pkg"]
git-tree-sha1 = "c45f4e40e7aafe9d086379e5578947ec8b95a8fb"
uuid = "f27f6e37-5d2b-51aa-960f-b287f2bc3b7a"
version = "1.3.7+0"
[[deps.nghttp2_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d"
[[deps.p7zip_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0"
[[deps.x264_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "4fea590b89e6ec504593146bf8b988b2c00922b2"
uuid = "1270edf5-f2f9-52d2-97e9-ab00b5d0237a"
version = "2021.5.5+0"
[[deps.x265_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "ee567a171cce03570d77ad3a43e90218e38937a9"
uuid = "dfaa095f-4041-5dcd-9319-2fabd8486b76"
version = "3.5.0+0"
[[deps.xkbcommon_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Wayland_jll", "Wayland_protocols_jll", "Xorg_libxcb_jll", "Xorg_xkeyboard_config_jll"]
git-tree-sha1 = "ece2350174195bb31de1a63bea3a41ae1aa593b6"
uuid = "d8fb68d0-12a3-5cfd-a85a-d49703b185fd"
version = "0.9.1+5"
"""
# ╔═╡ Cell order:
# ╟─872e19d4-c071-40bb-a091-22e48e85e2a6
# ╠═1793f5fa-7f30-4a1d-baef-b06436e1fc71
# ╟─ce86c343-b6e4-4833-935f-ee0392d1ee89
# ╟─79f416a6-7c52-4d57-8bdd-1f84dc04d7e8
# ╠═60c8b78b-1b70-42d0-8fe6-7e40b0cdd4a2
# ╟─124025b9-f7de-4395-a5d3-1dc6ed4a67f7
# ╠═9da00dbb-c645-4bb9-a26c-5f148efb36cd
# ╟─800b91ae-d6c6-479f-afd7-d07d7b207cd3
# ╠═f1ef0015-d671-450f-80cf-dc6651460998
# ╟─c179bd9e-e392-4622-92a5-d64f442e2840
# ╟─dde3ae5c-c51d-4efd-a824-3e360981a228
# ╟─593a177f-bf0b-4b05-9408-745793ab2536
# ╟─14b4f565-2f8d-4e70-a79c-8926db3b0ca7
# ╟─3f38e8ca-2286-427f-8816-3b8b6cc78c74
# ╟─36bc055e-3b5b-41a0-863a-53b78a6328d9
# ╟─30c618b6-bd58-4335-bc55-23c16317011d
# ╟─596e2d59-203c-4e69-985b-f8a82624ef6c
# ╠═465e92c8-1004-47d6-ac4e-69172afad2b0
# ╠═e8d619bc-e37d-437a-96fb-0995aed2f823
# ╟─9dd26db3-e443-46f3-8e18-21eb37b4d5b6
# ╟─00000000-0000-0000-0000-000000000001
# ╟─00000000-0000-0000-0000-000000000002
| PlanetOrbits | https://github.com/sefffal/PlanetOrbits.jl.git |
|
[
"MIT"
] | 0.10.1 | 370c3ac3c000dd3ac975681521dcfe687efc1f24 | code | 790 | using PlanetOrbits, Distributions, DataFrames
# The template orbit
template = orbit(
M = 1.5,
a = 7.6,
i = deg2rad(30),
e = 0.6,
Ω = 0.7,
ω = 1.4,
tp = 0.,
plx = 100.0,
)
# Epochs specified manually:
epoch = [
mjd("2020-01-01"),
mjd("2021-01-01"),
mjd("2022-01-01"),
]
# Or a range of epochs:
epoch = range(start=mjd("2020-01-01"),step=365,length=3)
astrom = DataFrame(;
epoch,
ra=raoff.(template, epoch),
dec=decoff.(template, epoch),
# Or:
# pa=posangle.(template, epoch),# .+ σ_pa .* randn.(),
# sep=projectedseparation.(template, epoch) .+ σ_sep .* randn.(),
)
# The above will display automatically at a REPL and you can copy the values
## Optional: save to file
using CSV
CSV.write("astrometry.csv", astrom)
| PlanetOrbits | https://github.com/sefffal/PlanetOrbits.jl.git |
|
[
"MIT"
] | 0.10.1 | 370c3ac3c000dd3ac975681521dcfe687efc1f24 | code | 2412 | """
This extension package adds rule support for Enzyme.
There are a few useful rules we can define for the sake of performance
but the most important thing is a rule for the Kepler solver.
"""
module PlanetOrbitsEnzymeExt
using PlanetOrbits, Enzyme
# @scalar_rule kepler_solver(M, e) @setup(u = 1 - e*cos(Ω)) (1 / u,sin(Ω) / u)
# These could be added to improve performance. The units would have to be adapted.
# @scalar_rule raoff(o::OrbitSolution) pmra(o)
# @scalar_rule decoff(o::OrbitSolution) pmdec(o)
# @scalar_rule propmotionanom(o::OrbitSolution) acceleration(o)
# @scalar_rule orbitsolve_ν(elem::VisualOrbit{T}, t; tref=58849)
# EnzymeRules.reverse(::EnzymeRules.Config, func::EnzymeRules.Annotation{typeof(PlanetOrbits.kepler_solver)}, dret::Active, tape, args::EnzymeRules.Annotation...) = 1
# function EnzymeRules.reverse(
# # config::EnzymeRules.ConfigWidth{1},
# config,
# # func::typeof(PlanetOrbits.kepler_solver)
# func,
# args...
# # ::Type{<:Active},
# # ::Any,
# # ma::Active,
# # e::Active
# )
# @show config func ma e
# if needs_primal(config)
# return AugmentedReturn(func.val(x.val),nothing, nothing)
# else
# return AugmentedReturn(nothing, nothing, nothing)
# end
# end
# function EnzymeRules.reverse(
# conf::Config,
# func::Annotation{typeof(f)},
# ann::Type{<:Annotation},
# tape,
# args::Annotation...
# )
# @show conf func ann tape args
# end
# function forward(::Const{typeof(PlanetOrbits.kepler_solver)}, ::Type{<:DuplicatedNoNeed}, x::Duplicated)
# return 10+2*x.val*x.dval
# end
# function forward(::Const{typeof(PlanetOrbits.kepler_solver)}, ::Type{<:BatchDuplicatedNoNeed}, x::BatchDuplicated{T, N}) where {T, N}
# return NTuple{N, T}(1000+2*x.val*dv for dv in x.dval)
# end
# function forward(func::Const{typeof(PlanetOrbits.kepler_solver)}, ::Type{<:Duplicated}, x::Duplicated)
# return Duplicated(func.val(x.val), 100+2*x.val*x.dval)
# end
# function forward(func::Const{typeof(PlanetOrbits.kepler_solver)}, ::Type{<:BatchDuplicated}, x::BatchDuplicated{T, N}) where {T,N}
# return BatchDuplicated(func.val(x.val), NTuple{N, T}(10000+2*x.val*dv for dv in x.dval))
# end
# function forward(::Const{Core.typeof(f_ip)}, ::Type{<:Const}, x::Duplicated)
# ld = x.val[1]
# x.val[1] *= ld
# x.dval[1] *= 2 * ld + 10
# return nothing
# end
end
| PlanetOrbits | https://github.com/sefffal/PlanetOrbits.jl.git |
|
[
"MIT"
] | 0.10.1 | 370c3ac3c000dd3ac975681521dcfe687efc1f24 | code | 2544 | module PlanetOrbitsForwardDiffExt
using PlanetOrbits
using ForwardDiff: ForwardDiff, Dual, value, partials
using Roots
# Current primal value takes 60ns to calculate.
# Derivative tracing through function takes 94ns.
# This seems too slow! Once the primal is known, we should only
# need a single sin or cos and some division/multiplication.
# Hacky: we have an annoying method abiguity between ::Dual being specific and ::SomeSolver being specific.
# the compiler doesn't know which one to choose! We want it to choose these ones.
# The best way I have found so far is to iterate at compile time through all subtypes and define gradient
# rules for each. This won't work for new solvers added by a user.
function define_partials_for_solver(T::Symbol)
@eval function PlanetOrbits.kepler_solver(M::Dual{T}, e::Real, method::PlanetOrbits.$T) where T
if value(e) >= 1
@error "diff rules need to be updated for unbound orbits. Review implicit derivative of hyperbolic keplers eqn."
end
EA = PlanetOrbits.kepler_solver(value(M),e,method)
temp = 1 - e*cos(EA)
return Dual{T}(
EA,
partials(M)/temp
)
end
@eval function PlanetOrbits.kepler_solver(M::Real, e::Dual{T}, method::PlanetOrbits.$T) where T
if value(e) >= 1
@error "diff rules need to be updated for unbound orbits. Review implicit derivative of hyperbolic keplers eqn."
end
EA = PlanetOrbits.kepler_solver(M,value(e),method)
sea, cea = sincos(EA)
temp = 1 - value(e)*cea
return Dual{T}(
EA,
partials(e)*sea/temp
)
end
@eval function PlanetOrbits.kepler_solver(M::Dual{T}, e::Dual{T}, method::PlanetOrbits.$T) where T
if value(e) >= 1
@error "diff rules need to be updated for unbound orbits. Review implicit derivative of hyperbolic keplers eqn."
end
EA = PlanetOrbits.kepler_solver(value(M),value(e),method)
sea, cea = sincos(EA)
temp = 1 - value(e)*cea
return Dual{T}(
EA,
partials(M)/temp + partials(e)*sea/temp,
)
end
end
define_partials_for_solver(:Goat)
define_partials_for_solver(:RootsMethod)
# define_partials_for_solver(:Markley)
# Shocker! Currently it's faster to diff through the Markley algorithm than it is to run it and then compute a
# a single `sincos` call. SIMD is amazing!
# We leave this implementation here for future in case these peformance tradeoffs change.
end | PlanetOrbits | https://github.com/sefffal/PlanetOrbits.jl.git |
|
[
"MIT"
] | 0.10.1 | 370c3ac3c000dd3ac975681521dcfe687efc1f24 | code | 573 | #=
This file contains plot recipes for the Makie.jl
ecosystem. This way you can do e.g.:
lines(elems)
=#
module PlanetOrbitsMakieExt
using PlanetOrbits, Makie
function Makie.convert_single_argument(elem::AbstractOrbit)
# We trace out in equal steps of true anomaly instead of time for a smooth
# curve, regardless of eccentricity.
L = 90
eccanoms = range(-2π, 2π, length=L)
solns = orbitsolve_eccanom.(elem, eccanoms)
νs = range(-π, π, length=90)
return map(solns) do sol
return Makie.Point2f(raoff(sol), decoff(sol))
end
end
end | PlanetOrbits | https://github.com/sefffal/PlanetOrbits.jl.git |
|
[
"MIT"
] | 0.10.1 | 370c3ac3c000dd3ac975681521dcfe687efc1f24 | code | 25627 | """
# PlanetOrbits
A package for calculating orbits in the context of direct imaging,
astrometry, and radial velocity.
"""
module PlanetOrbits
# ---------------------------------------------------
# Imports
# ---------------------------------------------------
using LinearAlgebra
using StaticArrays
# ---------------------------------------------------
# Constants
# ---------------------------------------------------
# radians <-> milliarcseconds
const rad2mas = 2.06264806e8
const mas2rad = 4.848136816903219e-9
# radians <-> arcseconds
const rad2as = 206265
const as2rad = 4.848132257047972e-6
# parsecs <-> astronomical units
const pc2au = 206265
const au2pc = 1/pc2au
# astronomical units <-> metres (IAU definition)
const au2m = 1.495978707e11
const m2au = 1/au2m
# days <-> seconds
const day2sec = 86400
const sec2day = 1/day2sec
# years <-> days
const year2day_tropical = 365.2422
const day2year_tropical = 1/year2day_tropical
const year2day_julian = 365.2500 # IAU definition
const day2year_julian = 1/year2day_julian
# years <-> seconds. Tropical definition, ie on earth on average
const year2sec_tropical = 3.1556926e7
const sec2year_tropical = 1/year2sec_tropical
# years <-> seconds. IAU defintion, ie using Julian years
const year2sec_julian = year2day_julian*day2sec
const sec2year_julian = 1/year2sec_julian
# Exact "unit" definition of a jupiter mass, in terms of IAU solar mass.
const mjup2msol_IAU = 1.2668653e17/1.3271244e20 # 0.0009545942339693249
# This constant accounts for the fact that the IAU definition of an AU and a solar mass
# do not result in an orbital period of one Julian year.
# From Gilles Otten (thank you for tracking this down!):
# If G*M_sun has been determined as 1.3271244 × 1e20 m^3*s^−2 and 1 AU is 149 597 870 700 meter
# by definition then a hypothetical planet around a 1 M_sun system at a semimajor axis of the
# definition of 1 AU has a period of sqrt(4*pi^2/(1.3271244e20)*(149597870700)^3)/86400=
# 365.2568983840419 julian days
const kepler_year_to_julian_day_conversion_factor = 365.2568983840419 # julian days
# ---------------------------------------------------
# Type Hierarchy
# ---------------------------------------------------
"""
AbstractOrbit
Represents a orbit. Contains all the information to
calculate the location of a planet at a given time,
true anomaly, eccentric anomaly, or mean anomaly.
Different concrete implementations of AbstractOrbit
contain varying amounts of information.
Basic information about the orbit can be queried using
functions like `period(orbit)`.
Orbits can be solved using functions like `orbitsolve(orb)`.
See: `RadialVelocityOrbit`, `KepOrbit`, `VisualOrbit`
"""
abstract type AbstractOrbit{T} end
export AbstractOrbit
"""
AbstractOrbitSolution
Represents the solution of an orbit. Contains all
the information of an AbstractOrbit, plus information
necessary to uniquely locate a planet.
The solution can be queried using a variety of functions
such as `radvel(solution)`.
The API for creating orbit solutions it not considered public
as the fields may change between minor versions. Instead,
create solutions only through the public `orbitsolve` and
`orbitsolve_...` functions.
"""
abstract type AbstractOrbitSolution end
export AbstractOrbitSolution
# Return the orbit solution type for a given orbit type.
# Register for each orbit type.
function _solution_type end
_solution_type(o::Any) = _solution_type(typeof(o))
# ---------------------------------------------------
# System Properties
# ---------------------------------------------------
"""
period(orbit)
Period of an orbit [days].
Note: a 1 AU (IAU) orbit around a 1Msun (IAU) star has a period just over 1 julian year.
"""
function period end
export period
"""
totalmass(orbit)
Total mass of the system in solar masses
"""
function totalmass end
export totalmass
"""
distance(orbit)
Distance to the system [pc].
"""
function distance end
export distance
"""
meanmotion(orbit)
Mean motion [rad/julian year].
Note: a 1 AU (IAU) orbit around a 1Msun (IAU) star has a period just over 1 julian year.
"""
function meanmotion end
export meanmotion
"""
eccentricity(orbit)
Eccentricity of an orbit, between 0 and 1.
"""
function eccentricity end
export eccentricity
"""
inclination(orbit)
Inclination of an orbit, if available [rad].
"""
function inclination end
export inclination
"""
semimajoraxis(orbit)
Semi-major axis of an orbit, if available [au].
"""
function semimajoraxis end
export semimajoraxis
"""
periastron(elements)
Compute the MJD of periastron passage most recently after the reference epoch tref specified in the orbit.
N.B. mjd of 58849 = 2020-01-01
"""
function periastron end
export periastron
"""
semiamplitude(orbit)
Radial velocity semiamplitude [m/s].
"""
function semiamplitude end
export semiamplitude
# ---------------------------------------------------
# Solve Orbit in Cartesian Coordinates
# ---------------------------------------------------
"""
orbitsolve(orbit, t, method=Auto())
Given an orbit object and a time `t` in days, get the position and
velocity of the secondary body (e.g. planet around a star).
This will output a struct that is a subtype of `AbstractOrbitSolution` which
we can then query with `raoff`, `decoff`, `radvel`, etc.
You can also calculate those quanitities individually (see their docstrings)
but if you need more than one, it is most efficient to save the orbit solution
once.
Note: these calculations use the small angle approximation, so are only accurate when
the star is much further way from the observer than the secondary is from the primary.
See also: `orbitsolve_ν`, `orbitsolve_meananom`, `orbitsolve_eccanom`, `projectedseparation`, `raoff`, `decoff`, `radvel`, `propmotionanom`.
"""
function orbitsolve end
export orbitsolve, orbitsolve_ν, orbitsolve_meananom, orbitsolve_eccanom
# ---------------------------------------------------
# Orbital Position and Motion
# ---------------------------------------------------
"""
raoff(orbit, t)
Get the offset [mas] from the primary body in Right Ascension
at the time `t` [days].
raoff(o)
Get the offset [mas] from the primary body in Right Ascension
from an instance of `AbstractOrbitSolution`.
"""
function raoff end
export raoff
"""
decoff(orbit, t)
Get the offset [mas] from the primary body in Declination
at the time `t` [days].
decoff(orbit, t)
Get the offset [mas] from the primary body in Declination
from an instance of `AbstractOrbitSolution`.
"""
function decoff end
export decoff
"""
posx(orbit, t)
Get the offset [AU] from the primary body at the time `t` [days].
posx(orbit, t)
Same as above, but from an instance of `AbstractOrbitSolution`.
"""
function posx end
"""
posy(orbit, t)
Get the offset [AU] from the primary body at the time `t` [days].
posy(o)
Same as above, but from an instance of `AbstractOrbitSolution`.
"""
function posy end
"""
posz(orbit, t)
Get the offset [AU] from the primary body at the time `t` [days].
posz(o)
Same as above, but from an instance of `AbstractOrbitSolution`.
"""
function posz end
"""
posangle(orbit, t)
Calculate the position angle [rad] of the secondary about its primary
from our perspective at the time `t` [days].
posangle(o)
Calculate the position angle [rad] of the secondary about its primary
from our perspective from an instance of `AbstractOrbitSolution`.
posangle(elem, t, M_planet)
Calculate the position angle [rad] of the secondary about its primary
from our perspective at the time `t` [days].
In this case only, the value of M_planet can be arbitrary.
posangle(o, M_planet)
Calculate the position angle [rad] of the **primary**
from our perspective from an instance of `AbstractOrbitSolution`.
In this case only, the value of M_planet can be arbitrary.
"""
function posangle(o::AbstractOrbitSolution)
x = posx(o)
y = posy(o)
return atan(x, y) # Note: the order of these arguments is *correct* in our conventions
end
export posangle
"""
projectedseparation(orbit, t)
Calculate the projected separation [mas] of the secondary from its
primary at the time `t` [days].
projectedseparation(o)
Calculate the projected separation [mas] of the secondary from its
primary from an instance of `AbstractOrbitSolution`.
"""
function projectedseparation(o::AbstractOrbitSolution)
x = raoff(o)
y = decoff(o)
return sqrt(x^2 + y^2)
end
export projectedseparation
"""
pmra(orbit, t)
Get the instantaneous proper motion anomaly [mas/julian year] in right-ascension of
the *secondary* at the time `t` [days].
pmra(o)
Get the instantaneous proper motion anomaly [mas/julian year] in right-ascension of
the *secondary* from an instance of `AbstractOrbitSolution`.
pmra(elem, t, M_planet)
Get the instantaneous proper motion anomaly [mas/julian year] in right-ascension of
the *primary* in at the time `t` [days]. The units of `M_planet`
and `elem.M` must match.
pmra(o, M_planet)
Same as above, but from an orbit solution.
"""
function pmra end
"""
pmdec(orbit, t)
Get the instantaneous proper motion anomaly [mas/julian year] in declination of
the *secondary* at the time `t` [days].
pmdec(o)
Get the instantaneous proper motion anomaly [mas/julian year] in declination of
the *secondary* from an instance of `AbstractOrbitSolution`.
pmdec(elem, t, M_planet)
Get the instantaneous proper motion anomaly [mas/julian year] in declination of
the *primary* in at the time `t` [days]. The units of `M_planet`
and `elem.M` must match.
pmdec(o, M_planet)
Same as above, but from an orbit solution.
"""
function pmdec end
export pmra, pmdec
function ra end
function dec end
"""
radvel(orbit, t)
Get the relative radial velocity [m/s] of the
*secondary* vs the *primary* along the line of
sight (positive meaning moving away) at the time `t` [days].
radvel(o)
Get the relative radial velocity [m/s] of the
*secondary* vs the *primary* along the line of
sight (positive meaning moving away) from an instance of `AbstractOrbitSolution`.
radvel(elem, t, M_planet)
Get the absolute radial velocity [m/s] of the
*primary* long the line of
sight (positive meaning moving away) at the time `t` [days]. The units of `M_planet`
and `elem.M` must match.
radvel(o, M_planet)
Get the absolute radial velocity [m/s] of the *primary* along the
line of sight (positive meaning moving away) from an `AbstractOrbitSolution`. The units of `M_planet`
and `elem.M` must match.
"""
function radvel end
export radvel
"""
accra(orbit, t)
Get the instantaneous acceleration [mas/julian year^2] in the right-ascension direction of
the *secondary* at the time `t` [days].
accra(o)
Get the instantaneous acceleration [mas/julian year^2] in the right-ascension direction of
the *secondary* from an instance of `AbstractOrbitSolution`.
accra(elem, t, M_planet)
Get the instantaneous acceleration [mas/julian year^2] in the right-ascension direction of
the *primary* in at the time `t` [days]. The units of `M_planet`
and `elem.M` must match.
accra(o)
Get the instantaneous acceleration [mas/julian year^2] in the right-ascension direction of
the *primary* from an instance of `AbstractOrbitSolution`. The units of
`M_planet` and `elem.M` must match.
"""
function accra end
"""
accdec(orbit, t)
Get the instantaneous acceleration [mas/julian year^2] in the right-ascension direction of
the *secondary* at the time `t` [days].
accdec(o)
Get the instantaneous acceleration [mas/julian year^2] in the right-ascension direction of
the *secondary* from an instance of `AbstractOrbitSolution`.
accdec(elem, t, M_planet)
Get the instantaneous acceleration [mas/julian year^2] in the right-ascension direction of
the *primary* in at the time `t` [days]. The units of `M_planet`
and `elem.M` must match.
accdec(o)
Get the instantaneous acceleration [mas/julian year^2] in the right-ascension direction of
the *primary* from an instance of `AbstractOrbitSolution`. The units of
`M_planet` and `elem.M` must match.
"""
function accdec end
export accra, accdec
"""
trueanom(orbit, t)
Get the true anomaly [radians] of the *secondary*
at the time `t` [days].
trueanom(o)
Get the true anomaly [radians] of the *secondary*
from an instance of `AbstractOrbitSolution`.
"""
trueanom(os::AbstractOrbitSolution) = os.ν
trueanom(os::AbstractOrbitSolution, mass::Number) = trueanom(os) # Same for primary and secondary
"""
eccanom(orbit, t)
Get the eccentric anomaly [radians] of the *secondary*
at the time `t` [days].
eccanom(o)
Get the eccentric anomaly [radians] of the *secondary*
from an instance of `AbstractOrbitSolution`.
Note that for hyperbolic orbits, eccentric anomaly is not defined and the hyperbolic anomaly is returned instead.
"""
eccanom(os::AbstractOrbitSolution) = os.EA
eccanom(os::AbstractOrbitSolution, mass::Number) = eccanom(os) # Same for primary and secondary
"""
meananom(orbit, t)
Get the mean anomaly [radians] of the *secondary*
at the time `t` [days].
meananom(o)
Get the mean anomaly [radians] of the *secondary*
from an instance of `AbstractOrbitSolution`.
"""
function meananom(os::AbstractOrbitSolution)
if os.elem.e < 1
return eccanom(os) - os.elem.e * sin(eccanom(os))
else
return os.elem.e * sinh(eccanom(os)) - eccanom(os)
end
end
meananom(os::AbstractOrbitSolution, mass::Number) = meananom(os) # Same for primary and secondary
export trueanom, eccanom, meananom
"""
periapsis(orbit)
Return the periapsis of an orbit in AU.
Keywords: periastron, perihelion, perigee
"""
function periapsis(o::AbstractOrbit)
if eccentricity(o) < 1
semimajoraxis(o)*(1 - eccentricity(o))
else
-semimajoraxis(o)*(eccentricity(o) - 1)
end
end
"""
apoapsis(orbit)
Return the apoapsis of an orbit in AU.
Keywords: apoastron, apohelion, apogee
"""
function apoapsis(o::AbstractOrbit)
if eccentricity(o) < 1
semimajoraxis(o)*(1 + eccentricity(o))
else
-semimajoraxis(o)*(1 + eccentricity(o))
end
end
"""
semiminoraxis(orbit)
Return the semi-minor axis of an orbit in AU.
"""
function semiminoraxis(o::AbstractOrbit)
if eccentricity(o) < 1
semimajoraxis(o)*sqrt(1-eccentricity(o)^2)
else
semimajoraxis(o)*sqrt(eccentricity(o)^2 - 1)
end
end
export periapsis, apoapsis, semiminoraxis
# Internal function used by each orbit type to map mean anomaly to true anomaly
function _trueanom_from_eccanom end
# Define iterate and length = 1 so that we can broadcast over elements.
Base.length(::AbstractOrbit) = 1
Base.iterate(elem::AbstractOrbit) = (elem, nothing)
Base.iterate(::AbstractOrbit, ::Nothing) = nothing
# ---------------------------------------------------
# Kepler Equation Solvers
# ---------------------------------------------------
abstract type AbstractSolver end
"""
PlanetOrbits.Auto()
Automatic choice of Kepler solver algorithm.
Currently defaults to PlanetOrbits.Markley()
"""
struct Auto <: AbstractSolver end
include("kepsolve-goat.jl")
include("kepsolve-markley.jl")
"""
PlanetOrbits.RootsMethod(method::Roots.PlanetOrbits.Roots.AbstractUnivariateZeroMethod, kwargs...)
Wraps a root finding method from Roots.jl. Requires Roots to be loaded first.
You can also pass keyword arguments that will be forwarded to Roots to control
the tolerance.
Examples:
```julia
method = PlanetOrbits.RootsMethod(Roots.Newton())
method = PlanetOrbits.RootsMethod(Roots.Thukral5B())
method = PlanetOrbits.RootsMethod(Roots.Bisection())
method = PlanetOrbits.RootsMethod(Roots.A42())
method = PlanetOrbits.RootsMethod(Roots.Newton(), rtol=1e-3, atol=1e-3)
```
"""
struct RootsMethod{M,K} <: AbstractSolver
method::M
kwargs::K
end
RootsMethod(method; kwargs...) = RootsMethod(method, kwargs)
include("kepsolve-roots.jl")
# Fallback kepler solver function.
# If algorithm is unspecified, select the best one here.
kepler_solver(MA, e) = kepler_solver(MA, e, Auto())
function kepler_solver(MA, e, ::Auto)
if e < 1
kepler_solver(MA, e, Markley())
else
# Halley() converged slightly faster than Newton() for hyperbolic orbits
kepler_solver(MA, e, RootsMethod(Roots.Halley()))
# kepler_solver(MA, e, RootsMethod(Roots.Bisection()))
end
end
function orbitsolve(elem::AbstractOrbit, t, method::AbstractSolver=Auto())
# Epoch of periastron passage
tₚ = periastron(elem)
if t isa Integer
t = float(t)
end
# Mean anomaly
MA = meanmotion(elem)/oftype(t, year2day_julian) * (t - tₚ)
# Compute eccentric anomaly
EA = kepler_solver(MA, eccentricity(elem), method)
# Calculate true anomaly
ν = _trueanom_from_eccanom(elem, EA)
return orbitsolve_ν(elem, ν, EA, t) # optimization: Don't have to recalculate EA and t.
end
"""
orbitsolve_ν(elem, ν, EA)
Solve an orbit from a given true anomaly [rad].
See `orbitsolve` for the same function accepting a given time.
Can optionally pass eccentric anomaly (EA) if already computed.
"""
function orbitsolve_ν end
"""
orbitsolve_meananom(elements, MA)
Same as `orbitsolve`, but solves orbit for a given mean anomaly instead of time.
"""
function orbitsolve_meananom(elem::AbstractOrbit, MA)
# Compute eccentric anomaly
EA = kepler_solver(MA, eccentricity(elem))
# Calculate true anomaly
ν = 2*atan(elem.ν_fact*tan(EA/2))
return orbitsolve_ν(elem, ν, EA)
end
"""
orbitsolve_eccanom(elements, EA)
Same as `orbitsolve`, but solves orbit for a given eccentric anomaly instead of time.
"""
function orbitsolve_eccanom(elem::AbstractOrbit, EA)
# Calculate true anomaly
ν = _trueanom_from_eccanom(elem, EA)
return orbitsolve_ν(elem, ν)
end
function radvel(o::AbstractOrbitSolution)
żcart = o.elem.K*(o.cosν_ω + o.elem.ecosω) # [m/s]
return żcart
end
function _time_from_EA(sol::AbstractOrbitSolution, EA;)
elem = sol.elem
if eccentricity(elem) < 1
# Epoch of periastron passage
tₚ = periastron(elem)
MA = EA - eccentricity(elem) * sin(EA)
# Mean anomaly
t = MA/meanmotion(elem)*oftype(EA, year2day_julian) + tₚ
else
# Epoch of periastron passage
tₚ = periastron(elem)
MA = -EA + eccentricity(elem)*sinh(EA)
t = MA/meanmotion(elem)*oftype(EA, year2day_julian) + tₚ
end
return t
end
# Given an eccentric anomaly, calculate *a* time at which the body
# would be at that location.
function _time_from_EA(elem::AbstractOrbit, EA;)
if eccentricity(elem) < 1
# Epoch of periastron passage
tₚ = periastron(elem)
MA = EA - eccentricity(elem) * sin(EA)
# Mean anomaly
t = MA/meanmotion(elem)*oftype(EA, year2day_julian) + tₚ
else
# Epoch of periastron passage
tₚ = periastron(elem)
MA = -EA + eccentricity(elem)*sinh(EA)
t = MA/meanmotion(elem)*oftype(EA, year2day_julian) + tₚ
end
return t
# # ---- Worked math for elliptical case ---
# # ν/2 = atan(elem.ν_fact*tan(EA/2))
# # tan(ν/2) = elem.ν_fact*tan(EA/2)
# # tan(ν/2)/elem.ν_fact = tan(EA/2)
# # atan(tan(ν/2)/elem.ν_fact) = (EA/2)
# # atan(tan(ν/2)/elem.ν_fact)*2 = EA
# # EA = atan(tan(ν/2)/elem.ν_fact)*2
# # Compute eccentric anomaly
# MA = EA - elem.e * sin(EA)
# # Epoch of periastron passage
# tₚ = periastron(elem)
# # MA = meanmotion(elem)/oftype(t, year2day) * (t - tₚ)
# # MA / meanmotion(elem) * year2day = (t - tₚ)
# # MA / meanmotion(elem) * year2day + tₚ = t
# t = MA / meanmotion(elem) * year2day + tₚ - tref
end
include("orbit-keplerian.jl")
include("orbit-visual.jl")
include("orbit-absolute.jl")
include("orbit-thiele-innes.jl")
include("orbit-radvel.jl")
include("orbit-cartesian.jl")
function orbitsolve_meananom(elem::VisualOrbit, MA)
# Compute eccentric anomaly
EA = kepler_solver(MA, eccentricity(elem))
# Calculate true anomaly
ν = 2*atan(elem.parent.ν_fact*tan(EA/2))
return orbitsolve_ν(elem, ν, EA)
end
"""
Get the position in the x direction in astronomical units.
"""
function posx(o::Union{OrbitSolutionKep, OrbitSolutionCartesian})
if semimajoraxis(o.elem) == 0
return zero(typeof(semimajoraxis(o.elem)))
end
xcart = o.r*(o.cosν_ω*o.elem.sinΩ + o.sinν_ω*o.elem.cosi*o.elem.cosΩ) # [AU]
return xcart
end
"""
Get the position in the y direction in astronomical units.
"""
function posy(o::Union{OrbitSolutionKep, OrbitSolutionCartesian})
if semimajoraxis(o.elem) == 0
return zero(typeof(semimajoraxis(o.elem)))
end
ycart = o.r*(o.cosν_ω*o.elem.cosΩ - o.sinν_ω*o.elem.cosi*o.elem.sinΩ) # [AU]
return ycart
end
"""
Get the position in the z direction in astronomical units.
"""
function posz(o::Union{OrbitSolutionKep, OrbitSolutionCartesian})
if semimajoraxis(o.elem) == 0
return zero(typeof(semimajoraxis(o.elem)))
end
zcart = o.r*(o.sinν_ω*o.elem.sini) # [AU]
return zcart
end
export posx, posy, posz
"""
Get the velocity in the x direction in astronomical units / julian year.
"""
function velx(o::Union{OrbitSolutionKep, OrbitSolutionCartesian})
ẋcart = o.elem.J*(o.elem.cosi_cosΩ*(o.cosν_ω + o.elem.ecosω) - o.elem.sinΩ*(o.sinν_ω + o.elem.esinω)) # [AU/julian year]
return ẋcart
end
"""
Get the velocity in the y direction in astronomical units / julian year.
"""
function vely(o::Union{OrbitSolutionKep, OrbitSolutionCartesian})
ẏcart = -o.elem.J*(o.elem.cosi_sinΩ*(o.cosν_ω + o.elem.ecosω) + o.elem.cosΩ*(o.sinν_ω + o.elem.esinω)) # [AU/julian year]
return ẏcart
end
"""
Get the velocity in the z direction in astronomical units / julian year.
"""
function velz(o::Union{OrbitSolutionKep, OrbitSolutionCartesian, OrbitSolutionRadialVelocity})
żcart = radvel(o) * m2au * year2sec_julian
return żcart
end
export velx, vely, velz
"""
orbit(...)
Construct an orbit from the provided keyword arguments. Will automatically select
a subclass of AbstractOrbit based on the information provided. This is a convenience
function that is not type stable and should not be used in performance sensitive
contexts. Instead, call one of the concrete constructors `KepOrbit`, `VisualOrbit`,
or `RadialVelocityOrbit` directly.
This function logs the kind of elements created so that it's easy to select the correct
constructor.
Required arguments:
- a: semi-major axis [AU]
- M: gravitational parameter [M⊙]
Optional arguments:
- tp: epoch of periastron passage, default=0
- e: eccentricity, default=0
- ω: argument of periapsis [rad], default=0
- i: inclination [rad]
- Ω: longitude of ascending node [rad]
- plx: parallax [mas]; defines the distance to the primary
"""
function orbit(;kwargs...)
T = supportedorbit(kwargs)
if !haskey(kwargs, :e)
kwargs = (;kwargs...,e=0,ω=0)
end
if !haskey(kwargs, :tp)
kwargs = (;kwargs...,tp=0)
end
return T(;kwargs...)
end
# Function to return what orbit type is supported based on precence
# or absence of properties
function supportedorbit(kwargs)
OrbitType =
if haskey(kwargs, :x) && haskey(kwargs, :vx)
CartesianOrbit
elseif haskey(kwargs, :A)
ThieleInnesOrbit
elseif haskey(kwargs, :i)
KepOrbit
else
RadialVelocityOrbit
end
if haskey(kwargs, :rv)
return AbsoluteVisual{OrbitType}
elseif haskey(kwargs, :plx) && !(OrbitType==ThieleInnesOrbit)
return Visual{OrbitType}
else
return OrbitType
end
end
export orbit
# Define fallbacks for all accessor functions.
# If the user calls f(elems, t, args...) we compute the
# AbstractOrbitSolution for them.
fun_list = (
:trueanom,
:eccanom,
:meananom,
:posx,
:posy,
:posz,
:raoff,
:decoff,
:posangle,
:projectedseparation,
:propmotionanom,
:velx,
:vely,
:velz,
:radvel,
:pmra,
:pmdec,
:ra,
:dec,
:accra,
:accdec,
:acceleration,
)
for fun in fun_list
@eval function ($fun)(orbit::AbstractOrbit, t::Real, args...)
return ($fun)(orbitsolve(orbit, t), args...)
end
end
# Define versions that compute the quantity on of the primary instead of
# the secondary
mass_fun_list = (
:posx,
:posy,
:posz,
:radvel,
:raoff,
:decoff,
:pmra,
:pmdec,
:accra,
:accdec,
:propmotionanom,
:acceleration,
)
for fun in mass_fun_list
@eval function ($fun)(o::AbstractOrbitSolution, M_planet)
quantity = ($fun)(o)
M_tot = totalmass(o.elem)
return -M_planet/M_tot*quantity
end
end
function projectedseparation(o::AbstractOrbitSolution, M_planet)
quantity = projectedseparation(o)
M_tot = totalmass(o.elem)
return M_planet/M_tot*quantity
end
function posangle(o::AbstractOrbitSolution, M_planet)
x = posx(o,M_planet)
y = posy(o,M_planet)
return atan(x, y) # Note: the order of these arguments is *correct* in our conventions
end
# ---------------------------------------------------
# Addional & Optional Features
# ---------------------------------------------------
include("recipes-plots.jl")
include("time.jl")
include("chain-rules.jl")
# include("transformation.jl")
include("precompile.jl")
end # module
# ---------------------------------------------------
| PlanetOrbits | https://github.com/sefffal/PlanetOrbits.jl.git |
|
[
"MIT"
] | 0.10.1 | 370c3ac3c000dd3ac975681521dcfe687efc1f24 | code | 1067 |
# Using implicit differentiation, I found that the derivatives of eccentric anomaly
# have closed form solutions once the primal value is known.
# By providing those here, upstream automatic differentiation libraries will be able
# to efficiently diff through Kepler's equation.
using ChainRulesCore
@scalar_rule PlanetOrbits.kepler_solver(M, e) @setup(u = 1 - e*cos(Ω)) (1 / u,sin(Ω) / u)
# We have analytic gradients for these already calculated. But with the above defintion
# they don't have much of an effect.
# @scalar_rule raoff(o::OrbitSolution) pmra(o)
# @scalar_rule decoff(o::OrbitSolution) pmdec(o)
# @scalar_rule propmotionanom(o::OrbitSolution) acceleration(o)
# @scalar_rule orbitsolve_ν(elem::VisualOrbit{T}, t; tref=58849)
# DiffRules.@define_diffrule PlanetOrbits.kepler_solver(M, e) = :(
# EA = PlanetOrbits.kepler_solver($M,$e);
# temp = 1 - e*cos(EA);
# d_dM = 1 / temp
# ),
# :(
# EA = PlanetOrbits.kepler_solver($M,$e);
# temp = 1 - e*cos(EA);
# d_de = sin(EA) / temp
# )
# end | PlanetOrbits | https://github.com/sefffal/PlanetOrbits.jl.git |
|
[
"MIT"
] | 0.10.1 | 370c3ac3c000dd3ac975681521dcfe687efc1f24 | code | 3903 | using StaticArrays
"""
PlanetOrbits.Goat()
Kepler solver implementation from https://arxiv.org/abs/2103.15829 and https://github.com/oliverphilcox/Keplers-Goat-Herd
It is here for comparison purposes only. In general, Markley() is more performant and accurate.
"""
struct Goat <: AbstractSolver end
# Implementation from https://arxiv.org/abs/2103.15829 and https://github.com/oliverphilcox/Keplers-Goat-Herd
function kepler_solver(𝓁, e, ::Goat)
# This function implements the 🐐 GOAT algorithm for
# solving Kepler's equation. It is approximately
# 4x faster than the other methods implemented
# here.
if isapprox(e, 0)
return 𝓁
end
if isapprox(rem(𝓁,π), 0)
return 𝓁
end
N_it = 15
N_points = N_it-2
N_fft = (N_it)*2
radius = e / 2
# Generate e^{ikx} sampling points and precompute real and imaginary parts
# Keep these on the stack inside an MVector -> no allocations
exp2R = @MVector zeros(typeof(e), N_points)
exp2I = @MVector zeros(typeof(e), N_points)
exp4R = @MVector zeros(typeof(e), N_points)
exp4I = @MVector zeros(typeof(e), N_points)
coshI = @MVector zeros(typeof(e), N_points)
sinhI = @MVector zeros(typeof(e), N_points)
ecosR = @MVector zeros(typeof(e), N_points)
esinR = @MVector zeros(typeof(e), N_points)
@inbounds for j in 1:N_points
freq = 2π*j/N_fft
cf = cos(freq)
sf = sin(freq)
exp2R[j] = cf
exp2I[j] = sf
exp4R[j] = cf*cf-sf*sf
exp4I[j] = 2.0*cf*sf
coshI[j] = cosh(radius*exp2I[j])
sinhI[j] = sinh(radius*exp2I[j])
ecosR[j] = e*cos(radius*exp2R[j])
esinR[j] = e*sin(radius*exp2R[j])
end
esinRadius = e*sin(radius)
ecosRadius = e*cos(radius)
# Define contour center for each ell and precompute sin(center), cos(center)
if 𝓁 < π
center = 𝓁 + e/2
else
center = 𝓁 - e/2
end
sinC = sin(center)
cosC = cos(center)
output = center
# Accumulate Fourier coefficients
# NB: we halve the range by symmetry, absorbing factor of 2 into ratio
#######
# Separate out j = 0 piece, which is simpler
# Compute z in real and imaginary parts (zI = 0 here)
zR = center + radius
# Compute e*sin(zR) from precomputed quantities
tmpsin = sinC*ecosRadius+cosC*esinRadius # sin(zR)
# Compute f(z(x)) in real and imaginary parts (fxI = 0)
fxR = zR - tmpsin - 𝓁
# Add to array, with factor of 1/2 since an edge
ft_gx2 = 0.5/fxR
ft_gx1 = 0.5/fxR
#######
# Compute for j = 1 to N_points
@inbounds @simd for j in 1:N_points
# Compute z in real and imaginary parts
zR = center + radius*exp2R[j]
zI = radius*exp2I[j]
# Compute f(z(x)) in real and imaginary parts
# can use precomputed cosh / sinh / cos / sin for this!
tmpcosh = coshI[j] # cosh(zI)
tmpsinh = sinhI[j] # sinh(zI)
tmpsin = sinC*ecosR[j]+cosC*esinR[j] # e sin(zR)
tmpcos = cosC*ecosR[j]-sinC*esinR[j] # e cos(zR)
fxR = zR - tmpsin*tmpcosh-𝓁
fxI = zI - tmpcos*tmpsinh
# Compute 1/f(z) and append to array
ftmp = fxR*fxR+fxI*fxI
fxR /= ftmp
fxI /= ftmp
ft_gx2 += (exp4R[j]*fxR+exp4I[j]*fxI)
ft_gx1 += (exp2R[j]*fxR+exp2I[j]*fxI)
end
#######
# Separate out j = N_it piece, which is simpler
# Compute z in real and imaginary parts (zI = 0 here)
zR = center - radius
# Compute sin(zR) from precomputed quantities
tmpsin = sinC*ecosRadius-cosC*esinRadius # sin(zR)
# Compute f(z(x)) in real and imaginary parts (fxI = 0 here)
fxR = zR - tmpsin-𝓁
# Add to sum, with 1/2 factor for edges
ft_gx2 += 0.5/fxR
ft_gx1 += -0.5/fxR
#######
# Compute E(ell)
output += radius*ft_gx2/ft_gx1;
return output
end
| PlanetOrbits | https://github.com/sefffal/PlanetOrbits.jl.git |
|
[
"MIT"
] | 0.10.1 | 370c3ac3c000dd3ac975681521dcfe687efc1f24 | code | 3650 |
"""
PlanetOrbits.Markley()
Kepler solver implementation from AstroLib, based on Markley (1995)
Celestial Mechanics and Dynamical Astronomy, 63, 101 (DOI:10.1007/BF00691917).
"""
struct Markley <: AbstractSolver end
# Our own simpler version of rem2pi. This probably doesn't handle
# inf/nan and the boundary conditions perfectly.
# However, it works with Enzyme
function myrem2pi(x)
if x > π
while x > π
x -= 2π
end
elseif x < -π
while x < -π
x += 2π
end
end
x
end
# The following function is taken directly from AstroLib.jl
# We remove one invariant check we handle elsewhere and also
# force inlining for about a 5% speedup.
# We also supply analytic gradients for use in autodiff packages.
@inline function kepler_solver(_M::Real, e::Real, ::Markley)
# We already handle this invariant
# @assert 0 <= e <= 1 "eccentricity must be in the range [0, 1]"
# M must be in the range [-pi, pi], see Markley (1995), page 2.
M = rem2pi(_M, RoundNearest)
# M = myrem2pi(_M)
T = float(promote_type(typeof(M), typeof(e)))
if iszero(M) || iszero(e)
return T(M)
end
pi2 = abs2(T(pi))
# equation (20)
α = (3 * pi2 + 8 * (pi2 - pi * abs(M)) / (5 * (1 + e)))/(pi2 - 6)
# equation (5)
d = 3 * (1 - e) + α * e
# equation (9)
q = 2 * α * d * (1 - e) - M * M
# equation (10)
r = 3 * α * d * (d - 1 + e) * M + M * M * M
# equation (14)
w = cbrt(abs2(abs(r) + sqrt(q * q * q + r * r)))
# equation (15)
E1 = (2 * r * w / @evalpoly(w, q * q, q, 1) + M)/d
# equation (26) & equation (27)
f2, f3 = e .* sincos(E1)
# equation (21)
f0 = E1 - f2 - M
# equation (25)
f1 = 1 - f3
# equation (22)
δ3 = -f0 / (f1 - f0 * f2 / (2 * f1))
# equation (23)
δ4 = -f0 / @evalpoly(δ3, f1, f2 / 2, f3 / 6)
# equations (24) and (28)
δ5 = -f0 / @evalpoly(δ4, f1, f2 / 2, f3 / 6, - f2 / 24)
return E1 + δ5 # equation 29
end
| PlanetOrbits | https://github.com/sefffal/PlanetOrbits.jl.git |
|
[
"MIT"
] | 0.10.1 | 370c3ac3c000dd3ac975681521dcfe687efc1f24 | code | 2967 | using Roots
@inline function PlanetOrbits.kepler_solver(_MA::Real, e::Real, method::RootsMethod)
if e < 1
MA = rem2pi(_MA, RoundNearest)
return kepler_solver_roots(MA, e, method)
else
return hyperbolic_kepler_solver_roots(_MA, e, method)
end
end
@inline function kepler_solver_roots(MA::Real, e::Real, method::RootsMethod)
# Standard Kepler's equation and derivatives
kep1(EA) = EA - MA - e*sin(EA)
kep1′(EA) = 1 - e*cos(EA)
kep1′′(EA) = e*sin(EA)
kep1′′′(EA) = e*cos(EA)
kep1′′′′(EA) = -e*sin(EA)
kep1′′′′′(EA) = -e*cos(EA)
fs = (kep1,kep1′,kep1′′,kep1′′′,kep1′′′′,kep1′′′′)
if e == 0
return MA
end
if typeof(method.method) <: Roots.AbstractBracketingMethod
if e < 0.7
initial = (MA - e, MA+e)
else
initial = machin(e, MA) .+ (-0.05, 0.05)
end
else
if e < 0.5
initial = MA
else
if -π < MA < 0 || π < MA
initial = MA - e
else
initial = MA + e
end
end
end
EA = Roots.find_zero(fs, initial, method.method; method.kwargs...)
return EA
end
@inline function hyperbolic_kepler_solver_roots(MA::Real, e::Real, method::RootsMethod)
# Hyperbolic Kepler's equation and derivatives
# Note: we keep the "eccentric anomaly" / "EA" notation but this is
# really hyperbolic anomaly.
keph(EA) = - EA - MA + e*sinh(EA)
keph′(EA) = e*cosh(EA) - 1
keph′′(EA) = e*sinh(EA)
keph′′′(EA) = e*cosh(EA)
keph′′′′(EA) = e*sinh(EA)
keph′′′′′(EA) = e*cosh(EA)
fs = (keph,keph′,keph′′,keph′′′,keph′′′′,keph′′′′)
if typeof(method.method) <: Roots.AbstractBracketingMethod
initial = (MA - e, MA+e)
else
if abs(MA) < 100
if -π < MA < 0 || π < MA
initial = MA - e
else
initial = MA + e
end
else
initial = sign(MA)*log(abs(MA))
end
end
EA = Roots.find_zero(fs, initial, method.method; method.kwargs...)
return EA
end
# Functions to find a good starting point for iteration,
# specifically with Newton's method.
# Functions by J Cook: https://www.johndcook.com/blog/2022/11/02/keplers-equation-python/
# These are currently unused. For Newton's method, we are faster
# just running more iterations from a simple guess than calculating
# all these sqrt and cbrt to get a good starting point.
# This will solve the special form of the cubic we need.
function solve_cubic(a, c, d)
p = c/a
q = d/a
k = sqrt( q^2/4 + p^3/27 )
return cbrt(-q/2 - k) + cbrt(-q/2 + k)
end
# Machin's starting point for Newton's method
# See johndcook.com/blog/2022/11/01/kepler-newton/
function machin(e, M)
n = sqrt(5 + sqrt(16 + 9/e))
a = n*(e*(n^2 - 1)+1)/6
c = n*(1-e)
d = -M
s = solve_cubic(a, c, d)
return n*asin(s)
end
| PlanetOrbits | https://github.com/sefffal/PlanetOrbits.jl.git |
|
[
"MIT"
] | 0.10.1 | 370c3ac3c000dd3ac975681521dcfe687efc1f24 | code | 15261 | """
AbsoluteVisual{OrbitType}(..., ref_epoch=, ra=, dec=, plx=, rv=, pmra=, pmdec=)
This wraps another orbit object to add parallax, proper motion, and
RV fields, at a given reference epoch.
Like a Visual{OrbitType} this allows for calculating projected quantities,
eg. separation in milliarcseconds.
What this type additionally does is correct for the star's 3D motion through
space (RV and proper motion) and differential light travel-time compared to a
reference epoch when calculating various quantities.
This becomes necessary when computing eg. RVs over a long time period.
ra : degrees
dec : degrees
plx : mas
pmra : mas/yr
pmdec : mas/yr
rv : m/s
ref_epoch : days
TODO: account for viewing angle differences and differential light travel
time between a planet and its host.
"""
struct AbsoluteVisualOrbit{T<:Number,O<:AbstractOrbit} <: AbstractOrbit{T}
parent::O
ref_epoch::T
ra::T
dec::T
plx::T
rv::T
pmra::T
pmdec::T
dist::T
end
# TODO: distance vs time
distance(elem::AbsoluteVisualOrbit, t::Number) = elem.dist*au2pc
"""
AbsoluteVisual{OrbitType}(..., ref_epoch=, ra=, dec=, plx=, rv=, pmra=, pmdec=)
This wraps another orbit object to add parallax, proper motion, and
RV fields, at a given reference epoch.
Like a Visual{OrbitType} this allows for calculating projected quantities,
eg. separation in milliarcseconds.
What this type additionally does is correct for the star's 3D motion through
space (RV and proper motion) and differential light travel-time compared to a
reference epoch when calculating various quantities.
This becomes necessary when computing eg. RVs over a long time period.
ra : degrees
dec : degrees
parallax : mas
pmra : mas/yr
pmdec : mas/yr
rv : m/s
ref_epoch : years
TODO: account for viewing angle differences and differential light travel
time between a planet and its host.
"""
const AbsoluteVisual{OrbitType} = AbsoluteVisualOrbit{T,OrbitType} where T
function AbsoluteVisual{OrbitType}(;ref_epoch, ra, dec, plx, rv, pmra, pmdec, kargs...,) where {OrbitType}
dist = 1000/plx * pc2au # distance [AU]
parent = OrbitType(;kargs...)
T = _parent_num_type(parent)
T = promote_type(T, typeof(ref_epoch), typeof(ra), typeof(dec), typeof(plx), typeof(rv), typeof(pmra), typeof(pmdec))
return AbsoluteVisualOrbit{T,OrbitType{T}}(parent, ref_epoch, ra, dec, plx, rv, pmra, pmdec, dist)
end
function AbsoluteVisual(parent::AbstractOrbit, ref_epoch, ra, dec, plx, rv, pmra, pmdec,)
dist = 1000/plx * pc2au # distance [AU]
T = _parent_num_type(parent)
# TODO: we could have a conversion error here if the parent orbit uses a more restrictive number type and we cant convert these new properties to match
return AbsoluteVisualOrbit{T,typeof(parent)}(parent, ref_epoch, ra, dec, plx, rv, pmra, pmdec, dist)
end
export AbsoluteVisual
struct OrbitSolutionAbsoluteVisual{TEl<:AbstractOrbit,TSol<:AbstractOrbitSolution,T<:Number,TComp<:NamedTuple} <: AbstractOrbitSolution
elem::TEl
sol::TSol
t::T
compensated::TComp
end
"""
This function calculates how to account for stellar 3D motion
when comparing measurements across epochs (epoch1 vs epoch2).
Typically `epoch1` is your reference epoch, `epoch2` is your measurement
epoch, and the remaining parameters are parameters you are hoping to fit.
You use this function to calculate their compensated values, and compare
these to data at `epoch2`.
Will also calculates light travel time, returning updated epochs
(epoch2a) due to change in distance between epoch1 and epoch2.
epoch2 will be when the light was detected, epoch2a will be the
"emitted" time accounting for the different positions between epoch1
and epoch 2.
Original Author: Eric Nielsen
"""
function compensate_star_3d_motion(elem::AbsoluteVisualOrbit,epoch2_days::Number)
ra1 = elem.ra # degrees
dec1 = elem.dec # degrees
parallax1 = elem.plx # mas
pmra1 = elem.pmra # mas/yr
pmdec1 = elem.pmdec # mas/yr
rv1 = elem.rv/1000 # m/s -> km/s
epoch1_days = elem.ref_epoch # MJD
# Be very careful now about assuming there are 365.245 days per year.
# We want to use that value when using a number like proper motion in mas/yr.
# We don't want to make that assumption when using delta years as a time unit,
# since they are each either 365 or 366 days.
# epoch1 = epoch1_days*days_per_average_year
# epoch2 = epoch2_days*days_per_average_year
# Guard against same epoch
# TODO: could just return arguments with appropriate units
if epoch1_days == epoch2_days
epoch2_days += eps(epoch2_days)
end
T = promote_type(
typeof(ra1),
typeof(dec1),
typeof(parallax1),
typeof(pmra1),
typeof(pmdec1),
typeof(rv1),
typeof(epoch1_days),
typeof(epoch2_days)
)
mydtor = convert(T, π / 180)
my206265 = convert(T, 180 / π * 60 * 60)
sec2year = convert(T, 365.25 * 24 * 60 * 60)
pc2km = convert(T, 3.08567758149137e13)
distance1 = convert(T, 1000 / parallax1)
# convert RV to pc/year, convert delta RA and delta Dec to radians/year
# These are differential quantities originally expressed per average-length-year.
# We want them in units per day, which always have the same length
dra1 = pmra1 / 1000 / my206265 / cos(dec1 * mydtor)
ddec1 = pmdec1 / 1000 /my206265
ddist1 = rv1 / pc2km * sec2year
# convert first epoch to x,y,z and dx,dy,dz
sin_ra1, cos_ra1 = sincos(ra1*mydtor)
sin_dec1, cos_dec1 = sincos(dec1*mydtor)
x₁ = cos_ra1 * cos_dec1 * distance1
y₁ = sin_ra1 * cos_dec1 * distance1
z₁ = sin_dec1 * distance1
# Excellent. Now dx,dy,dz,which are constants
dx = -1 * sin_ra1 * cos_dec1 * distance1 * dra1 -
cos_ra1 * sin_dec1 * distance1 * ddec1 +
cos_ra1 * cos_dec1 * ddist1
dy = 1 * cos_ra1 * cos_dec1 * distance1 * dra1 -
sin_ra1 * sin_dec1 * distance1 * ddec1 +
sin_ra1 * cos_dec1 * ddist1
dz = 1 * cos_dec1 * distance1 * ddec1 + sin_dec1 * ddist1
# be careful here with units:
delta_time_jyear = (epoch2_days - epoch1_days)/year2day_julian
x₂ = x₁ + dx * delta_time_jyear#(epoch2-epoch1)
y₂ = y₁ + dy * delta_time_jyear#(epoch2-epoch1)
z₂ = z₁ + dz * delta_time_jyear#(epoch2-epoch1)
# And done. Now we just need to go backward.
distance2 = sqrt(x₂^2 + y₂^2 + z₂^2)
if distance2 == 0
distance2 += eps(distance2)
end
parallax2 = 1000/distance2
ra2 = ((atan(y₂,x₂)/mydtor + 360) % 360)
arg = z₂ / distance2
if 1.0 < arg < 1.0 + sqrt(eps(1.0))
arg = 1.0
end
dec2 = asin(arg) / mydtor
ddist2 = 1 / sqrt(x₂^2 + y₂^2 + z₂^2) * (x₂ * dx + y₂ * dy + z₂ * dz)
dra2 = 1 / (x₂^2 + y₂^2) * (-1 * y₂ * dx + x₂ * dy)
ddec2 = 1 / (distance2 * sqrt(1 - z₂^2 / distance2^2)) * (-1 * z₂ * ddist2 / distance2 + dz)
pmra2 = dra2 * my206265 * 1000 * cos(dec2 * mydtor)
pmdec2 = ddec2 * 1000 * my206265
rv2 = ddist2 * pc2km / sec2year
# light travel time
delta_time = (distance2 - distance1) * 3.085677e13 / 2.99792e5 # in seconds
# epoch2a = epoch2 - delta_time/3.154e7
epoch2a_days = epoch2_days - delta_time*sec2day
distance2_pc = distance2 * pc2au
return (;
distance2_pc,
parallax2,
ra2,
dec2,
ddist2,
dra2,
ddec2,
pmra2,
pmdec2,
rv2=rv2*1000,
delta_time,
epoch1_days,
# epoch2,
# epoch2a,
epoch2a_days,
x₁,
y₁,
z₁,
x₂,
y₂,
z₂,
)
end
# We have to override the generic `orbitsolve` for this case, as we have to adjust
# for light travel time here.
function orbitsolve(elem::AbsoluteVisualOrbit, t, method::AbstractSolver=Auto())
# Epoch of periastron passage
tₚ = periastron(elem)
if t isa Integer
t = float(t)
end
compensated = compensate_star_3d_motion(elem, t)
# Mean anomaly
MA = meanmotion(elem)/oftype(t, year2day_julian) * (compensated.epoch2a_days - tₚ)
# Compute eccentric anomaly
EA = kepler_solver(MA, eccentricity(elem), method)
# Calculate true anomaly
ν = _trueanom_from_eccanom(elem, EA)
return orbitsolve_ν(elem, ν, EA, t, compensated)
end
function orbitsolve_ν(
elem::AbsoluteVisualOrbit,
ν,
# TODO: EA_from_ν is no longer accurate here, since the light travel
# time can vary vs time, we can't determine time from nu directly.
EA=EA_from_ν(elem.parent, ν),
t=_time_from_EA(elem, EA),
compensated::NamedTuple=compensate_star_3d_motion(elem,t);
kwargs...
)
# TODO: asking for a solution at a given ν is no longer well-defined,
# as it will vary over time and not repeat over each orbital period.
sol = orbitsolve_ν(elem.parent, ν, EA, compensated.epoch2a_days; kwargs...)
return OrbitSolutionAbsoluteVisual(elem, sol, t, compensated)
end
# The solution time is the time we asked for, not the true time accounting for light travel.
soltime(os::OrbitSolutionAbsoluteVisual) = os.t
# Forward these functions to the underlying orbit object
solution_fun_list = (
:trueanom,
:eccanom,
:meananom,
:posx,
:posy,
:posz,
:posangle,
:velx,
:vely,
:velz,
)
for fun in solution_fun_list
# TODO-1: several of these need to handle the varying parallax correctly
# TODO-2: several more need to account for chaning viewing angle and planet light-travel time.
@eval function ($fun)(os::OrbitSolutionAbsoluteVisual, args...)
return ($fun)(os.sol, args...)
end
end
orbit_fun_list = (
:eccentricity,
:periastron,
:period,
:inclination,
:semimajoraxis,
:totalmass,
:meanmotion,
:semiamplitude,
:_trueanom_from_eccanom,
)
for fun in orbit_fun_list
@eval function ($fun)(elem::AbsoluteVisualOrbit, args...)
return ($fun)(elem.parent, args...)
end
end
function radvel(os::OrbitSolutionAbsoluteVisual)
# Adjust RV to account for star's 3D motion through space.
# We add the difference between the RV at the reference epoch
# and the RV at the measurement epoch
return radvel(os.sol) + (os.compensated.rv2 - os.elem.rv)
end
function raoff(o::OrbitSolutionAbsoluteVisual)
xcart = posx(o) # [AU]
cart2angle = rad2as*oftype(xcart, 1e3)/o.compensated.distance2_pc
xang = xcart*cart2angle # [mas]
return xang
end
function decoff(o::OrbitSolutionAbsoluteVisual)
ycart = posy(o) # [AU]
cart2angle = rad2as*oftype(ycart, 1e3)/o.compensated.distance2_pc
yang = ycart*cart2angle # [mas]
return yang
end
function pmra(o::OrbitSolutionAbsoluteVisual)
ẋcart = o.elem.parent.J*(o.elem.parent.cosi_cosΩ*(o.sol.cosν_ω + o.elem.parent.ecosω) - o.elem.parent.sinΩ*(o.sol.sinν_ω + o.elem.parent.esinω)) # [AU/year]
cart2angle = rad2as*oftype(ẋcart, 1e3)/o.compensated.distance2_pc
ẋang = ẋcart*cart2angle # [mas/year]
return ẋang + (o.compensated.pmra2 - o.elem.pmra)
end
function pmdec(o::OrbitSolutionAbsoluteVisual)
ẏcart = -o.elem.parent.J*(o.elem.parent.cosi_sinΩ*(o.sol.cosν_ω + o.elem.parent.ecosω) + o.elem.parent.cosΩ*(o.sol.sinν_ω + o.elem.parent.esinω)) # [AU/year]
cart2angle = rad2as*oftype(ẏcart, 1e3)/o.compensated.distance2_pc
ẏang = ẏcart*cart2angle # [mas/year]
return ẏang + (o.compensated.pmdec2 - o.elem.pmdec)
end
# The non-keplerian deviation due to system's 3D motion must be applied additively
# to both the
function radvel(o::OrbitSolutionAbsoluteVisual, M_planet)
quantity = radvel(o.sol)
M_tot = totalmass(o.elem)
return -M_planet/M_tot*quantity + (o.compensated.rv2 - o.elem.rv)
end
function pmra(o::OrbitSolutionAbsoluteVisual, M_planet)
ẋcart = o.elem.parent.J*(o.elem.parent.cosi_cosΩ*(o.sol.cosν_ω + o.elem.parent.ecosω) - o.elem.parent.sinΩ*(o.sol.sinν_ω + o.elem.parent.esinω)) # [AU/year]
cart2angle = rad2as*oftype(ẋcart, 1e3)/o.compensated.distance2_pc
quantity = ẋang = ẋcart*cart2angle # [mas/year]
M_tot = totalmass(o.elem)
return -M_planet/M_tot*quantity + (o.compensated.pmra2 - o.elem.pmra)
end
function pmdec(o::OrbitSolutionAbsoluteVisual, M_planet)
ẏcart = -o.elem.parent.J*(o.elem.parent.cosi_sinΩ*(o.sol.cosν_ω + o.elem.parent.ecosω) + o.elem.parent.cosΩ*(o.sol.sinν_ω + o.elem.parent.esinω)) # [AU/year]
cart2angle = rad2as*oftype(ẏcart, 1e3)/o.compensated.distance2_pc
quantity = ẏang = ẏcart*cart2angle # [mas/year]
M_tot = totalmass(o.elem)
return -M_planet/M_tot*quantity + (o.compensated.pmdec2 - o.elem.pmdec)
end
function accra(o::OrbitSolutionAbsoluteVisual)
throw(NotImplementedException())
# if eccentricity(o.elem) >= 1
# @warn "acceleration not tested for ecc >= 1 yet. Results are likely wrong."
# end
# ẍcart = -o.elem.parent.A*(1 + o.sol.ecosν)^2 * (o.elem.parent.cosi_cosΩ*o.sol.sinν_ω + o.elem.parent.sinΩ*o.sol.cosν_ω) # [AU/year^2]
# cart2angle = rad2as*oftype(ẍcart, 1e3)/o.compensated.distance2_pc
# ẍang = ẍcart*cart2angle # [mas/year^2]
# return ẍang
end
function accdec(o::OrbitSolutionAbsoluteVisual)
# throw(NotImplementedException())
# if eccentricity(o.elem) >= 1
# @warn "acceleration not tested for ecc >= 1 yet. Results are likely wrong."
# end
# ÿcart = o.elem.parent.A*(1 + o.sol.ecosν)^2 * (o.elem.parent.cosi_sinΩ*o.sol.sinν_ω - o.elem.parent.cosΩ*o.sol.cosν_ω) # [AU/year^2]
# cart2angle = rad2as*oftype(ÿcart, 1e3)/o.compensated.distance2_pc
# ÿang = ÿcart*cart2angle # [mas/year^2]
# return ÿang
end
"""
PlanetOrbits.ra(orbit, t)
Get the instantaneous position of a companion in degrees of RA and Dec.
For the relative position, see `raoff`.
"""
function ra(o::OrbitSolutionAbsoluteVisual, M_planet)
# Already solved at correct epoch accoutning for light travel time
# difference wrt. reference epoch.
kep_offset_mas = raoff(o, M_planet)
total = o.compensated.ra2 + kep_offset_mas/60/60/1000
return total
end
"""
PlanetOrbits.dec(orbit, t)
Get the instantaneous position of a companion in degrees of RA and Dec.
For the relative position, see `decoff`.
"""
function dec(o::OrbitSolutionAbsoluteVisual, M_planet)
# Already solved at correct epoch accoutning for light travel time
# difference wrt. reference epoch.
kep_offset_mas = decoff(o, M_planet)
total = o.compensated.dec2 + kep_offset_mas/60/60/1000
return total
end
# Pretty printing
function Base.show(io::IO, mime::MIME"text/plain", elem::AbsoluteVisual)
show(io, mime, elem.parent)
print(io, """\
AbsoluteVisual
──────────────────────────
reference epoch [days] = $(round(elem.ref_epoch, digits=1))
plx [mas] = $(round(elem.plx, digits=3))
ra [°] = $(round(elem.ra, digits=3))
dec [°] = $(round(elem.dec, digits=3))
pmra [mas/yr] = $(round(elem.pmra, digits=3))
pmdec [mas/yr] = $(round(elem.pmdec, digits=3))
rv [m/s] = $(round(elem.rv, digits=3))
──────────────────────────
""")
end
| PlanetOrbits | https://github.com/sefffal/PlanetOrbits.jl.git |
|
[
"MIT"
] | 0.10.1 | 370c3ac3c000dd3ac975681521dcfe687efc1f24 | code | 12651 |
"""
This constructor assumes that 1 year, 1 AU, and 1 solar mass are compatible. According
to IAU definitions, they are not. Use with care where high precision is needed.
"""
struct CartesianOrbit{T<:Number} <: AbstractOrbit{T}
# Note: these position and velocity values are in *barycentric* coordinates
x::T # AU (increasing to the left)
y::T # AU (increasing upwards)
z::T # AU (increasing away)
vx::T # AU/yr
vy::T # AU/yr
vz::T # AU/yr
M::T # Host mass (solar masses)
# Orbital properties
a::T
e::T
i::T
ω::T
Ω::T
tp::T
# Physical constants
T::T
n::T
ν_fact::T
p::T
# Geometric factors
cosi::T
sini::T
cosΩ::T
sinΩ::T
ecosω::T
esinω::T
cosi_cosΩ::T
cosi_sinΩ::T
# Semiamplitudes
J::T
K::T
A::T
function CartesianOrbit(x, y, z, vx, vy, vz, M, tref=0; tol=1e-8)
# tref is the epoch at which these values are provided
# This code was adapted from a combination of:
# https://github.com/spencerw/keplerorbit/blob/master/KeplerOrbit/KeplerOrbit.py (MIT license)
# https://github.com/esa/pykep/blob/403a7dfe8ed3ff19b43bcbd6e6856de7f820cf55/src/third_party/cspice/oscelt.c#L429 (public domain)
# https://github.com/poliastro/poliastro/blob/21fd7719e89a7d22b4eac63141a60a7f1b01768c/src/poliastro/core/elements.py#L279 (MIT license)
# TODO: This constructor assumes that 1 year, 1 AU, and 1 solar mass are compatible. According
# to IAU definitions, they are not. Use with care where high precision is needed.
if M isa Integer
M = float(M)
end
x, y, z, vx, vy, vz, M, tref = promote(x, y, z, vx, vy, vz, M, tref)
T = typeof(x)
# Unit vectors
i⃗ = @SVector(T[1.0, 0.0, 0.0])
j⃗ = @SVector(T[0.0, 1.0, 0.0])
k⃗ = @SVector(T[0.0, 0.0, 1.0])
# Position vector
r⃗ = @SVector([ x, y, z])
r = norm(r⃗)
if r == 0
error("0 position vector")
end
# Velocity vector
v⃗ = @SVector([vx, vy, vz]) ./ 2π # TODO: track this down!
v = norm(v⃗)
if r == 0
error("0 velocity vector")
end
# Angular momentum vector
h⃗ = r⃗ × v⃗
h = norm(h⃗)
if h == 0
error("velocity and position vectors are parallel (degenerate case)")
end
# Eccentricity vector
tmp⃗ = v⃗ × h⃗
e⃗ = tmp⃗ / M - r⃗ / r
e = norm(e⃗)
# Equivalent:
# e⃗ = ((v⃗ ⋅ v⃗ - M / r) * r⃗ - (r⃗ ⋅ v⃗) * v⃗) / M
# e = norm(e⃗)
n⃗ = k⃗ × h⃗
n = norm(n⃗)
oneminusesq = (1 - e^2)
# Inclination
i = π - acos((k⃗ ⋅ h⃗) / h)
if e < 1
ν_fact = √((1+e)/(1-e)) # true anomaly prefactor
else
ν_fact = √((1+e)/(e-1)) # true anomaly prefactor
end
circular = e < tol
equatorial = abs(i) < tol
F2ν(F) = 2atan(ν_fact*tanh(F/2))
# TODO:
# These cases are extremely ugly and messy.
# They are correct, but should be refactored.
if equatorial && !circular
Ω = 0
Ω += π/2
ω = rem2pi(atan(e⃗[2], e⃗[1]), RoundDown)
ν = atan((h⃗ ⋅ (e⃗ × r⃗)) / h, r⃗ ⋅ e⃗)
p = h^2 / M
a = p / (1 - (e^2))
if a > 0
e_se = (r⃗⋅v⃗) / sqrt(M*a)
e_ce = r*v^2 / M - 1
ν = 2atan(ν_fact*tan(atan(e_se, e_ce) / 2))
a = p/oneminusesq
period_days = √(a^3/M)*kepler_year_to_julian_day_conversion_factor
period_yrs = period_days/year2day_julian
meanmotion = 2π/period_yrs # mean motion
else
e_sh = (r⃗ ⋅ v⃗) / sqrt(-M*a)
e_ch = r * v^2 / M - 1
ν = F2ν(log((e_ch + e_sh) / (e_ch - e_sh)) / 2)
period_days = Inf
meanmotion = 2π * √(M/-a^3) *kepler_year_to_julian_day_conversion_factor/year2day_julian # mean motion
end
ω = pi-ω
if e < 1
EA = 2atan(tan(ν/2)/ν_fact)
MA = EA - e*sin(EA)
else
EA = 2atanh(tan(ν/2)/ν_fact)
MA = e*sinh(EA) -EA
end
tp = -MA / meanmotion * PlanetOrbits.year2day_julian + tref
elseif !equatorial && circular
e = oftype(e, 0)
e⃗ = e⃗ * oftype(e, 0)
Ω = rem2pi(atan(n⃗[2], n⃗[1]), RoundDown)
ω = 0
# Argument of latitude
ν = atan((r⃗ ⋅ (h⃗ × n⃗)) / h, r⃗ ⋅ n⃗)
p = h^2 / M
a = p/oneminusesq
period_days = √(a^3/M)*kepler_year_to_julian_day_conversion_factor
period_yrs = period_days/year2day_julian
meanmotion = 2π/period_yrs # mean motion
Ω = 3π/2 - Ω
# Remaining calculation: determine tp
MA = EA = 2atan(tan(ν/2)/ν_fact)
tp = -MA / meanmotion * PlanetOrbits.year2day_julian + tref
elseif equatorial && circular
e = oftype(e, 0)
e⃗ = e⃗ * oftype(e, 0)
Ω = 0
ω = 0
ν = rem2pi(atan(r⃗[2], r⃗[1]), RoundDown)
p = h^2 / M
a = p/oneminusesq
period_days = √(a^3/M)*kepler_year_to_julian_day_conversion_factor
period_yrs = period_days/year2day_julian
meanmotion = 2π/period_yrs # mean motion
# Ω = 3π/2 - Ω
# ω = 0#π/2
# @show Ω
# Ω -= 3π/2
Ω = -π/2
MA = EA = 2atan(tan(ν/2)/ν_fact)
tp = MA / meanmotion * year2day_julian + tref
else
p = h^2 / M
a = p / (1 - (e^2))
# elliptical or hyperbolic
if a > 0
e_se = (r⃗⋅v⃗) / sqrt(M*a)
e_ce = r*v^2 / M - 1
ν = 2atan(ν_fact*tan(atan(e_se, e_ce) / 2))
a = p/oneminusesq
period_days = √(a^3/M)*kepler_year_to_julian_day_conversion_factor
period_yrs = period_days/year2day_julian
meanmotion = 2π/period_yrs # mean motion
else
e_sh = (r⃗ ⋅ v⃗) / sqrt(-M*a)
e_ch = r * v^2 / M - 1
ν = F2ν(log((e_ch + e_sh) / (e_ch - e_sh)) / 2)
period_days = Inf
meanmotion = 2π * √(M/-a^3)*kepler_year_to_julian_day_conversion_factor/year2day_julian
end
px = r⃗ ⋅ n⃗
py = (r⃗ ⋅ (h⃗ × n⃗)) / h
ω = rem2pi(atan(py, px) - ν, RoundNearest)
if n⃗[2] >= 0
Ω = 3pi/2 - acos(n⃗[1]/n);
else
Ω = acos(n⃗[1]/n) -pi/2
end
# Remaining calculation: determine tp
# Need mean anomaly
if e < 1
EA = 2atan(tan(ν/2)/ν_fact)
MA = EA - e*sin(EA)
else
EA = 2atanh(tan(ν/2)/ν_fact)
MA = e*sinh(EA) -EA
end
tp = -MA / meanmotion * year2day_julian + tref
end
Ω += pi
# Geometric factors involving rotation angles
sini, cosi = sincos(i)
sinω, cosω = sincos(ω)
sinΩ, cosΩ = sincos(Ω)
ecosω = e * cosω
esinω = e * sinω
cosi_cosΩ = cosi * cosΩ
cosi_sinΩ = cosi * sinΩ
if e < 1
J = ((2π * a) / period_yrs)*kepler_year_to_julian_day_conversion_factor/year2day_julian / √oneminusesq # horizontal velocity semiamplitude [AU/year]
K = J * au2m * sec2year_julian * sini # radial velocity semiamplitude [m/s]
A = ((4π^2 * a) / period_yrs^2) / oneminusesq^2 # horizontal acceleration semiamplitude [AU/year^2]
else
J = -((2π*a)/√(M/-a^3))*kepler_year_to_julian_day_conversion_factor/year2day_julian / √(-oneminusesq) # horizontal velocity semiamplitude [AU/year]
K = J*au2m*sec2year_julian*sini # radial velocity semiamplitude [m/s]
# TODO: acceleration not verified for ecc >= 1 yet. Results will be silently wrong.
A = ((4π^2 * a)/(M/-a^3))*kepler_year_to_julian_day_conversion_factor/year2day_julian / oneminusesq^2 # horizontal acceleration semiamplitude [AU/year^2]
end
orbit = new{typeof(M)}(
# Passed parameters that define the elements
x, y, z, vx, vy, vz, M,
# Converted campbell elements
a, e, i, ω, Ω, tp,
# Cached calcuations
period_days, meanmotion, ν_fact, p,
# Geometric factors
cosi, sini, cosΩ, sinΩ, ecosω, esinω, cosi_cosΩ, cosi_sinΩ,
# Semiamplitudes
J, K, A
)
return orbit
end
end
CartesianOrbit(;x, y, z, vx, vy, vz, M, tref=0, tol=1e-8, kwargs...) = CartesianOrbit(x, y, z, vx, vy, vz, M, tref; tol)
function cleanroundoff(arg)
# Due to round off, we can sometimes end up just a tiny bit greater than 1 or less than -1.
# In that case, apply a threshold of 1.
if 1 < abs(arg) < 1+3eps()
arg = one(arg)
elseif -1-3eps() < abs(arg) < -1
arg = -one(arg)
end
return arg
end
period(o::CartesianOrbit) = o.T
meanmotion(o::CartesianOrbit) = o.n
eccentricity(o::CartesianOrbit) = o.e
totalmass(o::CartesianOrbit) = o.M
inclination(o::CartesianOrbit) = o.i
semimajoraxis(o::CartesianOrbit) = o.a
function _trueanom_from_eccanom(o::CartesianOrbit, EA)
if o.e < 1
ν = 2*atan(o.ν_fact*tan(EA/2))
else
# true anomaly prefactor changed in constructor if hyperbolic
ν = 2*atan(o.ν_fact*tanh(EA/2))
end
return ν
end
periastron(elem::CartesianOrbit) = elem.tp
semiamplitude(elem::CartesianOrbit) = elem.K
"""
Represents a `CartesianOrbit` evaluated to some position.
"""
struct OrbitSolutionCartesian{T<:Number,TEl<:CartesianOrbit} <: AbstractOrbitSolution
elem::TEl
ν::T
EA::T
sinν_ω::T
cosν_ω::T
ecosν::T
r::T
t::T
function OrbitSolutionCartesian(elem, ν, EA, sinν_ω, cosν_ω, ecosν, r, t)
promoted = promote(ν, EA, sinν_ω, cosν_ω, ecosν, r, t)
return new{eltype(promoted),typeof(elem)}(elem, promoted...)
end
end
export CartesianOrbit
soltime(os::OrbitSolutionCartesian) = os.t
# Solve orbit to a new cartesian position given true anomaly
function orbitsolve_ν(elem::CartesianOrbit, ν, EA=2atan(tan(ν / 2) / elem.ν_fact), t=_time_from_EA(elem, EA))
sinν_ω, cosν_ω = sincos(elem.ω + ν)
ecosν = elem.e * cos(ν)
r = elem.p / (1 + ecosν)
return OrbitSolutionCartesian(elem, ν, EA, sinν_ω, cosν_ω, ecosν, r, t)
end
# TODO: we can accelerate this since we already know some parameters
"""
Convert an existing orbit object to a CartesianOrbit.
"""
function CartesianOrbit(os::AbstractOrbitSolution; tol=1e-8)
x = PlanetOrbits.posx(os)
y = PlanetOrbits.posy(os)
z = PlanetOrbits.posz(os)
vx = PlanetOrbits.velx(os)
vy = PlanetOrbits.vely(os)
vz = PlanetOrbits.velz(os)
return CartesianOrbit(
x,
y,
z,
vx,
vy,
vz,
totalmass(os.elem),
soltime(os);
tol
)
end
#=
o = orbit(a=1.0,i=0,ω=π/2,e=0.5,Ω=0,M=1,plx=100.,τ=0.0)
##
x = -1.05
y = 3.782338790704024e-16
z = -2.5048146051777413e-17
vx = -3.490253699036788e-16 * 2pi
vy = -0.9464377445249709 * 2pi
vz = 0.09496052074620637 * 2pi
oc = CartesianOrbit(x,y,z,vx,vy,vz,1,0)
sc = orbitsolve(oc, 0)
x = PlanetOrbits.posx(sc)
y = PlanetOrbits.posy(sc)
z = PlanetOrbits.posz(sc)
vx = PlanetOrbits.velx(sc)
vy = PlanetOrbits.vely(sc)
vz = PlanetOrbits.velz(sc)
oc2= CartesianOrbit(x,y,z,vx,vy,vz,1,0)
plot(orbitsolve(oc,0));plot!(orbitsolve(oc2,0))
# i appears to be going backwards
o = orbit(
a = 1,
i = π/4,
Ω = 0.001,
ω = 0.001,
e = 0.5,
τ = 0.5,
M = 1,
tref=0
)
t = 0
oc3 = CartesianOrbit(orbitsolve(o,t))
oc4 = CartesianOrbit(orbitsolve(oc3,t))
oc5 = CartesianOrbit(orbitsolve(oc4,t))
o.ω, oc3.ω, oc4.ω,oc5.ω
o.Ω, oc3.Ω, oc4.Ω,oc5.Ω
o.i, oc3.i, oc4.i,oc5.i
o.τ, oc3.τ, oc4.τ,oc5.τ
meananom(o,t), meananom(oc3,t), meananom(oc4,t),meananom(oc5,t)
plot(orbitsolve(o,t),label="o", lw=2, ls=:dash, color=1)
plot!(orbitsolve(oc3,t), label="oc3", color=2)
plot!(orbitsolve(oc4,t), label="oc4", color=3)
plot!(orbitsolve(oc5,t), label="oc5", color=4)
I think there are three things left:
* something about the dates / times stamping of CartesianOrbit is not making sense.
=#
| PlanetOrbits | https://github.com/sefffal/PlanetOrbits.jl.git |
|
[
"MIT"
] | 0.10.1 | 370c3ac3c000dd3ac975681521dcfe687efc1f24 | code | 7496 |
"""
KepOrbit(
a, # semi-major axis [AU]
e, # eccentricity
i, # inclination [rad]
ω, # argument of periapsis [rad]
Ω, # longitude of ascending node [rad]
tp, # epoch of periastron passage at MJD=0
M, # mass of primary [M⊙]
)
Represents the Keplerian elements of a secondary body orbiting a primary.
Use the traditional Campbell parameterization.
Values can be specified by keyword argument or named tuple for convenience.
"""
struct KepOrbit{T<:Number} <: AbstractOrbit{T}
# Orbital properties
a::T
e::T
i::T
ω::T
Ω::T
tp::T
M::T
# Physical constants
T::T
n::T
ν_fact::T
p::T
# Geometric factors
cosi::T
sini::T
cosΩ::T
sinΩ::T
ecosω::T
esinω::T
cosi_cosΩ::T
cosi_sinΩ::T
# Semiamplitudes
J::T
K::T
A::T
# Inner constructor to enforce invariants and pre-calculate
# constants from the orbital elements
function KepOrbit(a, e, i, ω, Ω, tp, M)
# Enforce invariants on user parameters
# a = max(a, zero(a))
# e = max(zero(e), min(e, one(e)))
M = max(M, zero(M))
i = rem(i, π, RoundDown)
Ω = rem2pi(Ω, RoundDown)
if e >= 1 && a > 0
@warn "Negative semi-major is required for hyperbolic (e>1) orbits. Flipping sign (maxlog=1)." maxlog=1
a = -a
end
# Pre-calculate factors to be re-used by orbitsolve
# Physical constants of system and orbit
if e < 1
period_days = √(a^3/M)*kepler_year_to_julian_day_conversion_factor
period_yrs = period_days/year2day_julian
n = 2π/period_yrs # mean motion
else
period_days = Inf
# TODO: Need to confirm where this 2pi is coming from
# mean motion
n = 2π * √(M/-a^3)*kepler_year_to_julian_day_conversion_factor/year2day_julian
# n = √(M/-a^3) # mean motion
end
if e < 1
ν_fact = √((1 + e)/(1 - e)) # true anomaly prefactor
else
ν_fact = √((1 + e)/(e - 1)) # true anomaly prefactor
end
oneminusesq = (1 - e^2)
p = a*oneminusesq # semi-latus rectum [AU]
# Get type of parameters
T = promote_type(
typeof(a), typeof(e), typeof(i), typeof(ω),
typeof(Ω), typeof(tp), typeof(M),
)
# The user might pass in integers, but it makes no sense to do these
# calculations on integers. Assume they mean to use floats.
if T <: Integer
T = promote_type(T, Float64)
end
# Geometric factors involving rotation angles
sini, cosi = sincos(i)
sinω, cosω = sincos(ω)
sinΩ, cosΩ = sincos(Ω)
ecosω = e*cosω
esinω = e*sinω
cosi_cosΩ = cosi*cosΩ
cosi_sinΩ = cosi*sinΩ
if e < 1
# Velocity and acceleration semiamplitudes
J = ((2π*a)/period_yrs) / √oneminusesq # horizontal velocity semiamplitude [AU/year]
K = J*au2m*sec2year_julian*sini # radial velocity semiamplitude [m/s]
A = ((4π^2 * a)/period_yrs^2) / oneminusesq^2 # horizontal acceleration semiamplitude [AU/year^2]
else
J = -((2π*a)/√(M/-a^3)) / √(-oneminusesq) # horizontal velocity semiamplitude [AU/year]
K = J*au2m*sec2year_julian*sini # radial velocity semiamplitude [m/s]
# TODO: acceleration not verified for ecc >= 1 yet. Results will be silently wrong.
A = ((4π^2 * a)/(M/-a^3)) / oneminusesq^2 # horizontal acceleration semiamplitude [AU/year^2]
end
new{T}(
# Passed parameters that define the elements
a, e, i, ω, Ω, tp, M,
# Cached calcuations
period_days, n, ν_fact, p,
# Geometric factors
cosi, sini, cosΩ, sinΩ, ecosω, esinω, cosi_cosΩ, cosi_sinΩ,
# Semiamplitudes
J, K, A
)
end
end
# Allow arguments to be specified by keyword
KepOrbit(;a, e, i, ω, Ω, tp, M, kwargs...) = KepOrbit(a, e, i, ω, Ω, tp, M)
export KepOrbit
Base.convert(::Type{KepOrbit{T2}}, orb::KepOrbit{T1}) where {T1<:Number,T2<:Number} = KepOrbit(
T2(orb.a),
T2(orb.e),
T2(orb.i),
T2(orb.ω),
T2(orb.Ω),
T2(orb.tp),
T2(orb.M)
)
"""
astuple(elements)
Return the parameters of a KepOrbit value as a tuple.
"""
function astuple(elem::KepOrbit)
return (;elem.a, elem.e, elem.i, elem.ω, elem.Ω, elem.tp, elem.M)
end
export astuple
# Pretty printing
Base.show(io::IO, ::MIME"text/plain", elem::KepOrbit) = print(
io, """
$(typeof(elem))
─────────────────────────
a [au ] = $(round(elem.a, sigdigits=3))
e = $(round(elem.e, sigdigits=8))
i [° ] = $(round(rad2deg(elem.i), sigdigits=3))
ω [° ] = $(round(rad2deg(elem.ω), sigdigits=3))
Ω [° ] = $(round(rad2deg(elem.Ω), sigdigits=3))
tp [day] = $(round(elem.tp, sigdigits=3))
M [M⊙ ] = $(round(elem.M, sigdigits=3))
period [yrs ] : $(round(period(elem)*day2year_julian, digits=1))
mean motion [°/yr] : $(round(rad2deg(meanmotion(elem)), sigdigits=3))
──────────────────────────
"""
)
Base.show(io::IO, elem::KepOrbit) = print(io,
"KepOrbit($(round(elem.a, sigdigits=3)), $(round(elem.e, sigdigits=3)), $(round(elem.i, sigdigits=3)), "*
"$(round(elem.ω, sigdigits=3)), $(round(elem.Ω, sigdigits=3)), $(round(elem.tp, sigdigits=3)), "*
"$(round(elem.M, sigdigits=3)))"
)
"""
Represents a `KepOrbit` evaluated to some position.
"""
struct OrbitSolutionKep{T<:Number,TEl<:KepOrbit} <: AbstractOrbitSolution
elem::TEl
ν::T
EA::T
sinν_ω::T
cosν_ω::T
ecosν::T
r::T
t::T
function OrbitSolutionKep(elem, ν, EA, sinν_ω, cosν_ω, ecosν, r, t)
promoted = promote(ν, EA, sinν_ω, cosν_ω, ecosν, r, t)
return new{eltype(promoted),typeof(elem)}(elem, promoted...)
end
end
_solution_type(::Type{KepOrbit}) = OrbitSolutionKep
period(elem::KepOrbit) = elem.T
meanmotion(elem::KepOrbit) = elem.n
eccentricity(o::KepOrbit) = o.e
totalmass(o::KepOrbit) = o.M
inclination(o::KepOrbit) = o.i
semimajoraxis(o::KepOrbit) = o.a
function _trueanom_from_eccanom(o::KepOrbit, EA)
if o.e < 1
ν = 2*atan(o.ν_fact*tan(EA/2))
else
# true anomaly prefactor changed in constructor if hyperbolic
ν = 2*atan(o.ν_fact*tanh(EA/2))
end
return ν
end
function periastron(elem::KepOrbit)
return elem.tp
end
semiamplitude(elem::KepOrbit) = elem.K
# ----------------------------------------------------------------------------------------------------------------------
# Solve Orbit in Cartesian Coordinates
# ----------------------------------------------------------------------------------------------------------------------
function EA_from_ν(elem::KepOrbit, ν)
if elem.e < 1
EA = 2atan(tan(ν/2)/elem.ν_fact)
else
arg = tan(ν/2)/elem.ν_fact
if 1 < arg < 1+sqrt(eps(arg))
arg = oftype(arg, 1)
end
EA = 2atanh(arg)
end
return EA
end
function orbitsolve_ν(elem::KepOrbit, ν, EA=EA_from_ν(elem, ν), t=_time_from_EA(elem, EA))
# @show EA t ν
sinν_ω, cosν_ω = sincos(elem.ω + ν)
ecosν = elem.e*cos(ν)
# @show ecosν
r = elem.p/(1 + ecosν)
# @show r
return OrbitSolutionKep(elem, ν, EA, sinν_ω, cosν_ω, ecosν, r, t)
end
soltime(os::OrbitSolutionKep) = os.t
| PlanetOrbits | https://github.com/sefffal/PlanetOrbits.jl.git |
|
[
"MIT"
] | 0.10.1 | 370c3ac3c000dd3ac975681521dcfe687efc1f24 | code | 4198 | """
RadialVelocityOrbit(a, e, ω, tp, M)
Represents an orbit of a planet with only the information
retrievable from radial velocity measurements.
That is, without inclination, longitude of ascending node,
or distance to the system.
"""
struct RadialVelocityOrbit{T<:Number} <: AbstractOrbit{T}
a::T
e::T
ω::T
tp::T
M::T
# Physical constants
T::T
n::T
ν_fact::T
# Geometric factors
ecosω::T
# Semiamplitude
K::T
# Inner constructor to enforce invariants and pre-calculate
# constants from the orbital elements
function RadialVelocityOrbit(a, e, ω, tp, M, tref=58849)
# Enforce invariants on user parameters
M = max(M, zero(M))
# Pre-calculate factors to be re-used by orbitsolve
# Physical constants of system and orbit
period_days = √(a^3/M)*kepler_year_to_julian_day_conversion_factor
period_yrs = period_days/year2day_julian
n = 2π/period_yrs # mean motion
ν_fact = √((1 + e)/(1 - e)) # true anomaly prefactor
# Get type of parameters
T = promote_type(
typeof(a), typeof(e), typeof(ω),
typeof(tp), typeof(M), typeof(tref)
)
# The user might pass in integers, but it makes no sense to do these
# calculations on integers. Assume they mean to use floats.
if T <: Integer
T = promote_type(T, Float64)
end
# Geometric factors involving rotation angles
ecosω = e*cos(ω)
# Velocity and acceleration semiamplitudes
J = ((2π*a)/period_yrs) / √(1 - e^2) # horizontal velocity semiamplitude [AU/year]
K = J*au2m*sec2year_julian # radial velocity semiamplitude [m/s]
new{T}(
# Passed parameters that define the elements
a, e, ω, tp, M,
# Cached calcuations
period_days, n, ν_fact,
# Geometric factors
ecosω,
# Semiamplitude
K
)
end
end
# Allow arguments to be specified by keyword
RadialVelocityOrbit(;a, e, ω, tp, M, kwargs...) = RadialVelocityOrbit(a, e, ω, tp, M)
export RadialVelocityOrbit
period(elem::RadialVelocityOrbit) = elem.T
meanmotion(elem::RadialVelocityOrbit) = elem.n
eccentricity(o::RadialVelocityOrbit) = o.e
totalmass(o::RadialVelocityOrbit) = o.M
semimajoraxis(o::RadialVelocityOrbit) = o.a
_trueanom_from_eccanom(o::RadialVelocityOrbit, EA) =2*atan(o.ν_fact*tan(EA/2))
periastron(o::RadialVelocityOrbit) = o.tp
semiamplitude(elem::RadialVelocityOrbit) = elem.K
# Pretty printing
Base.show(io::IO, ::MIME"text/plain", elem::RadialVelocityOrbit) = print(
io, """
$(typeof(elem))
─────────────────────────
a [au ] = $(round(elem.a, sigdigits=3))
e = $(round(elem.e, sigdigits=8))
ω [° ] = $(round(rad2deg(elem.ω), sigdigits=3))
tp = $(round(elem.tp, sigdigits=3))
M [M⊙ ] = $(round(elem.M, sigdigits=3))
──────────────────────────
period [yrs ] : $(round(period(elem)*day2year_julian, digits=1))
mean motion [°/yr] : $(round(rad2deg(meanmotion(elem)), sigdigits=3))
semiamplitude₂ [m/s] : $(round(semiamplitude(elem), digits=1))
──────────────────────────
"""
)
"""
orbitsolve_ν(elem, ν)
Solve a keplerian orbit from a given true anomaly [rad].
See orbitsolve for the same function accepting a given time.
"""
function orbitsolve_ν(elem::RadialVelocityOrbit, ν, EA=2atan(tan(ν/2)/elem.ν_fact), t=_time_from_EA(elem, EA))
cosν_ω = cos(elem.ω + ν)
return OrbitSolutionRadialVelocity(elem, ν, EA, cosν_ω, t)
end
"""
Represents a `RadialVelocityOrbit` evaluated to some position.
"""
struct OrbitSolutionRadialVelocity{T<:Number,TEl<:RadialVelocityOrbit} <: AbstractOrbitSolution
elem::TEl
ν::T
EA::T
cosν_ω::T
t::T
function OrbitSolutionRadialVelocity(elem, ν, EA, cosν_ω, t)
promoted = promote(ν, EA, cosν_ω, t)
return new{eltype(promoted),typeof(elem)}(elem, promoted...)
end
end
export OrbitSolutionRadialVelocity
soltime(os::OrbitSolutionRadialVelocity) = os.t
_solution_type(::Type{RadialVelocityOrbit}) = OrbitSolutionRadialVelocity
| PlanetOrbits | https://github.com/sefffal/PlanetOrbits.jl.git |
|
[
"MIT"
] | 0.10.1 | 370c3ac3c000dd3ac975681521dcfe687efc1f24 | code | 9478 | """
ThieleInnesOrbit(e, tp, M, plx, A, B, F, G)
Represents a visual orbit of a planet using Thiele-Innes
orbital elements. Convertable to and from a VisualOrbit.
This parameterization does not have the issue that traditional
angular parameters have where the argument of periapsis and
longitude of ascending node become undefined for circular and face
on orbits respectively.
!!! warning
There is a remaining bug in this implementation for pi <= Ω < 2pi
"""
struct ThieleInnesOrbit{T<:Number} <: AbstractOrbit{T}
# Orbital properties
e::T
tp::T
M::T
plx::T
A::T
B::T
F::T
G::T
# Constants
C::T
H::T
T::T
n::T
ν_fact::T
# Inner constructor to enforce invariants and pre-calculate
# constants from the orbital elements
function ThieleInnesOrbit(e, tp, M, plx, A, B, F, G)
e, tp, M, plx, A, B, F, G = promote(e, tp, M, plx, A, B, F, G)
T = typeof(e)
# TODO: confirm these following lines are necessary for us to calculate
# the period and mean motion (that's all we really need)
u = (A^2 + B^2 + F^2 + G^2)/2
v = A*G - B * F
α = sqrt(u + sqrt((u+v)*(u-v)))
a = α/plx
if e > 1
a = -a
@warn "Support for hyperbolic Thiele-Innes orbits is not yet complete. Results will be silently wrong."
end
ω_p_Ω = atan((B-F),(A+G))
ω_m_Ω = atan((B+F),(G-A))
# sign of ω_p_Ω: sin(ω_p_Ω) same sign as (B-F)
# sign of ω_m_Ω: sin(ω_m_Ω) same sign as (-B-F)
if sign(sin(ω_p_Ω)) != sign(B-F)
ω_p_Ω = ω_p_Ω + pi
end;
if sign(sin(ω_m_Ω)) != sign(-B-F)
ω_m_Ω = ω_m_Ω + pi
end;
ω_p_Ω = rem2pi(ω_p_Ω, RoundDown)
ω_m_Ω = rem2pi(ω_m_Ω, RoundDown)
ω = (ω_p_Ω + ω_m_Ω)/2
Ω = (ω_p_Ω - ω_m_Ω)/2
# TODO: we have an error with the following orbit orbit(M=1, e=0.4, a=1, i=2, Ω=4, ω=1, tp=0, plx=10.0);
# Error seems to happen when Ω + ω > 2pi
# The issue is that ω ends up off by pi.
if Ω < 0
ω += π
Ω += π
end
ω = rem2pi(ω, RoundDown)
s,c = sincos(ω-Ω)
d₁ = abs((A+G)*c)
d₂ = abs((F-B)*s)
s2,c2 = sincos(ω+Ω)
if d₁ >= d₂
i = 2atan(sqrt(abs((A-G)*c2)/d₁))
else
i = 2atan(sqrt(abs((B+F)*s2)/d₂))
end
C = a*sin(ω)*sin(i)
H = a*cos(ω)*sin(i)
# Pre-calculate factors to be re-used by orbitsolve
# Physical constants of system and orbit
if e < 1
period_days = √(a^3/M)*kepler_year_to_julian_day_conversion_factor
period_yrs = period_days/year2day_julian
n = 2π/period_yrs # mean motio
else
period = Inf
# TODO: Need to confirm where this 2pi is coming from
n = 2π * √(M/-a^3)*kepler_year_to_julian_day_conversion_factor/year2day_julian
# n = √(M/-a^3) # mean motion
end
if e < 1
ν_fact = √((1 + e)/(1 - e)) # true anomaly prefactor
else
ν_fact = √((1 + e)/(e - 1)) # true anomaly prefactor
end
new{T}(e, tp, M, plx, A, B, F, G, C, H, period_days, n, ν_fact)
end
end
ThieleInnesOrbit(;e, tp, M, plx, A, B, F, G, kwargs...) = ThieleInnesOrbit(e, tp, M, plx, A, B, F, G)
export ThieleInnesOrbit
period(elem::ThieleInnesOrbit) = elem.T
meanmotion(elem::ThieleInnesOrbit) = elem.n
eccentricity(o::ThieleInnesOrbit) = o.e
totalmass(o::ThieleInnesOrbit) = o.M
function semimajoraxis(o::ThieleInnesOrbit)
(;A,B,F,G,plx) = o
u = (A^2 + B^2 + F^2 + G^2)/2
v = A*G - B * F
α = sqrt(u + sqrt((u+v)*(u-v)))
a = α/plx
return a
end
function inclination(o::ThieleInnesOrbit)
# TODO: test
ω_p_Ω = atan((o.B-o.F),(o.A+o.G))
ω_m_Ω = atan((o.B+o.F),(o.G-o.A))
# sign of ω_p_Ω: sin(ω_p_Ω) same sign as (B-F)
# sign of ω_m_Ω: sin(ω_m_Ω) same sign as (-B-F)
if sign(sin(ω_p_Ω)) != sign(o.B-o.F)
ω_p_Ω = ω_p_Ω + pi
end;
if sign(sin(ω_m_Ω)) != sign(-o.B-o.F)
ω_m_Ω = ω_m_Ω + pi
end;
ω_p_Ω = rem2pi(ω_p_Ω, RoundDown)
ω_m_Ω = rem2pi(ω_m_Ω, RoundDown)
ω = (ω_p_Ω + ω_m_Ω)/2
Ω = (ω_p_Ω - ω_m_Ω)/2
Ω, ω
if Ω < 0
ω += π
Ω += π
end
s,c = sincos(ω-Ω)
d₁ = abs((o.A+o.G)*c)
d₂ = abs((o.F-o.B)*s)
s2,c2 = sincos(ω+Ω)
if d₁ >= d₂
i = 2atan(sqrt(abs((o.A-o.G)*c2)/d₁))
else
i = 2atan(sqrt(abs((o.B+o.F)*s2)/d₂))
end
return i
end
function _trueanom_from_eccanom(o::ThieleInnesOrbit, EA)
if o.e < 1
ν = 2*atan(o.ν_fact*tan(EA/2))
else
# true anomaly prefactor changed in constructor if hyperbolic
ν = 2*atan(o.ν_fact*tanh(EA/2))
end
return ν
end
periastron(o::ThieleInnesOrbit) = o.tp
function semiamplitude(o::ThieleInnesOrbit)
# TODO: test implementation
oneminusesq = (1 - eccentricity(o)^2)
a = semimajoraxis(o)
sini = sin(inclination(o))
J = ((2π*a)/period(o)*day2year_julian) / √oneminusesq # horizontal velocity semiamplitude [AU/year]
K = J*au2m*sec2year_julian*sini # radial velocity semiamplitude [m/s]
return K
end
distance(o::ThieleInnesOrbit) = 1000/o.plx * pc2au
"""
Represents a `ThieleInnesOrbit` evaluated to some position.
"""
struct OrbitSolutionThieleInnes{T<:Number,TEl<:ThieleInnesOrbit} <: AbstractOrbitSolution
elem::TEl
ν::T
EA::T
x::T
y::T
ẋ::T
ẏ::T
t::T
function OrbitSolutionThieleInnes(elem, ν, EA, x, y,ẋ, ẏ,t)
promoted = promote(ν, EA, x, y,ẋ,ẏ, t)
return new{eltype(promoted),typeof(elem)}(elem, promoted...)
end
end
function orbitsolve_ν(elem::ThieleInnesOrbit, ν, EA=2atan(tan(ν/2)/elem.ν_fact), t=_time_from_EA(elem, EA))
# https://arxiv.org/ftp/arxiv/papers/1008/1008.3416.pdf
sea, cea = sincos(EA)
x = cea - elem.e
if elem.e < 1
y = sea * sqrt(1 - elem.e^2)
ẏ = √(1-elem.e^2)*elem.n*cos(EA)/(1-elem.e*cos(EA))
else
# TODO: this is just a guess
y = sea * sqrt(elem.e^2 - 1)
ẏ = √(elem.e^2-1)*elem.n*cos(EA)/(elem.e*cos(EA)-1)
end
ẋ = -elem.n*sin(EA)/(1-elem.e*cos(EA))
return OrbitSolutionThieleInnes(elem, ν, EA, x, y, ẋ, ẏ, t)
end
soltime(os::OrbitSolutionThieleInnes) = os.t
function raoff(sol::OrbitSolutionThieleInnes)
sol.x*sol.elem.B + sol.y*sol.elem.G
end
function decoff(sol::OrbitSolutionThieleInnes)
sol.x*sol.elem.A + sol.y*sol.elem.F
end
# Radial velocity not currently right. Z position is correct.
function radvel(sol::OrbitSolutionThieleInnes)
(sol.ẋ*sol.elem.C + sol.ẏ*sol.elem.H)*au2m*sec2year_julian
end
function posx(sol::OrbitSolutionThieleInnes)
raoff(sol)/sol.elem.plx
end
function posy(sol::OrbitSolutionThieleInnes)
decoff(sol)/sol.elem.plx
end
function posz(sol::OrbitSolutionThieleInnes)
(sol.x*sol.elem.C + sol.y*sol.elem.H)
end
function pmra(sol::OrbitSolutionThieleInnes)
sol.ẋ*sol.elem.B + sol.ẏ*sol.elem.G
end
function pmdec(sol::OrbitSolutionThieleInnes)
sol.ẋ*sol.elem.A + sol.ẏ*sol.elem.F
end
function ThieleInnesOrbit(orbit::Visual{KepOrbit{T1},T2}) where {T1,T2}
a = semimajoraxis(orbit)
α = a*orbit.plx
elem = orbit.parent
A = α*( elem.cosΩ*cos(elem.ω)-elem.sinΩ*sin(elem.ω)*elem.cosi)
B = α*( elem.sinΩ*cos(elem.ω)+elem.cosΩ*sin(elem.ω)*elem.cosi)
F = α*(-elem.cosΩ*sin(elem.ω)-elem.sinΩ*cos(elem.ω)*elem.cosi)
G = α*(-elem.sinΩ*sin(elem.ω)+elem.cosΩ*cos(elem.ω)*elem.cosi)
ThieleInnesOrbit(
eccentricity(orbit),
periastron(orbit),
totalmass(orbit),
1000/distance(orbit),
A, B, F, G
)
end
function Visual{KepOrbit}(o::ThieleInnesOrbit)
u = (o.A^2 + o.B^2 + o.F^2 + o.G^2)/2
v = o.A*o.G - o.B * o.F
α = sqrt(u + sqrt((u+v)*(u-v)))
ω_p_Ω = atan((o.B-o.F),(o.A+o.G))
ω_m_Ω = atan((o.B+o.F),(o.G-o.A))
# sign of ω_p_Ω: sin(ω_p_Ω) same sign as (B-F)
# sign of ω_m_Ω: sin(ω_m_Ω) same sign as (-B-F)
if sign(sin(ω_p_Ω)) != sign(o.B-o.F)
ω_p_Ω = ω_p_Ω + pi
end;
if sign(sin(ω_m_Ω)) != sign(-o.B-o.F)
ω_m_Ω = ω_m_Ω + pi
end;
ω_p_Ω = rem2pi(ω_p_Ω, RoundDown)
ω_m_Ω = rem2pi(ω_m_Ω, RoundDown)
ω = (ω_p_Ω + ω_m_Ω)/2
Ω = (ω_p_Ω - ω_m_Ω)/2
if Ω < 0
ω += π
Ω += π
end
s,c = sincos(ω-Ω)
d₁ = abs((o.A+o.G)*c)
d₂ = abs((o.F-o.B)*s)
s2,c2 = sincos(ω+Ω)
if d₁ >= d₂
i = 2atan(sqrt(abs((o.A-o.G)*c2)/d₁))
else
i = 2atan(sqrt(abs((o.B+o.F)*s2)/d₂))
end
a = α/o.plx
return Visual{KepOrbit}(;a, o.e, i, ω, Ω, o.tp, o.M, o.plx)
end
# Pretty printing
Base.show(io::IO, ::MIME"text/plain", elem::ThieleInnesOrbit) = print(
io, """
$(typeof(elem))
─────────────────────────
A [mas] = $(round(elem.A, sigdigits=3))
B [mas] = $(round(elem.B, sigdigits=3))
F [mas] = $(round(elem.F, sigdigits=3))
G [mas] = $(round(elem.G, sigdigits=3))
e = $(round(elem.e, sigdigits=8))
tp = $(round(elem.tp, sigdigits=3))
M [M⊙ ] = $(round(elem.M, sigdigits=3))
period [yrs ] : $(round(period(elem)*day2year_julian, digits=1))
mean motion [°/yr] : $(round(rad2deg(meanmotion(elem)), sigdigits=3))
──────────────────────────
"""
)
| PlanetOrbits | https://github.com/sefffal/PlanetOrbits.jl.git |
|
[
"MIT"
] | 0.10.1 | 370c3ac3c000dd3ac975681521dcfe687efc1f24 | code | 5081 | """
Visual{OrbitType}(..., plx=...)
This wraps another orbit to add the parallax distance field `plx`,
thus allowing projected quantities to be calculated.
It forwards everything else to the parent orbit.
For example, the KepOrbit type supports calculating x and y positions in AU.
A Visual{KepOrbit} additionally supports calculating projected
right ascension and declination offsets.
Note: the `ThieleInnesOrbit` type does not need to be wrapped in `Visual`
as it the Thiele-Innes constants are already expressed in milliarcseconds and
thus it always requires a `plx` value.
"""
struct VisualOrbit{T<:Number,O<:AbstractOrbit} <: AbstractOrbit{T}
parent::O
plx::T
dist::T
end
distance(elem::VisualOrbit) = elem.dist*au2pc
"""
Visual{OrbitType}(..., plx=...)
This wraps another orbit to add the parallax distance field `plx`,
thus allowing projected quantities to be calculated.
It forwards everything else to the parent orbit.
For example, the KepOrbit type supports calculating x and y positions in AU.
A Visual{KepOrbit} additionally supports calculating projected
right ascension and declination offsets.
!!! note
The `ThieleInnesOrbit` type does not need to be wrapped in `Visual`
as it the Thiele-Innes constants are already expressed in milliarcseconds and
thus it always requires a `plx` value.
"""
const Visual{OrbitType} = VisualOrbit{T,OrbitType} where T
_parent_num_type(orbit::AbstractOrbit{T}) where T = T
function Visual{OrbitType}(;plx, args...,) where {OrbitType}
dist = 1000/plx * pc2au # distance [AU]
parent = OrbitType(;args...)
T = _parent_num_type(parent)
return VisualOrbit{T,OrbitType{T}}(parent, plx, dist)
end
function Visual(parent::AbstractOrbit, plx,)
dist = 1000/plx * pc2au # distance [AU]
T = _parent_num_type(parent)
return VisualOrbit{T,typeof(parent)}(parent, plx, dist)
end
export Visual
struct OrbitSolutionVisual{TEl<:AbstractOrbit,TSol<:AbstractOrbitSolution} <: AbstractOrbitSolution
elem::TEl
sol::TSol
end
function orbitsolve_ν(elem::VisualOrbit, ν, args...; kwargs...)
sol = orbitsolve_ν(elem.parent, ν, args...; kwargs...)
return OrbitSolutionVisual(elem, sol)
end
soltime(os::OrbitSolutionVisual) = soltime(os.sol)
# Forward these functions to the underlying orbit object
solution_fun_list = (
:trueanom,
:eccanom,
:meananom,
:posx,
:posy,
:posz,
:posangle,
:velx,
:vely,
:velz,
:radvel,
)
for fun in solution_fun_list
@eval function ($fun)(os::OrbitSolutionVisual, args...)
return ($fun)(os.sol, args...)
end
end
orbit_fun_list = (
:eccentricity,
:periastron,
:period,
:inclination,
:semimajoraxis,
:totalmass,
:meanmotion,
:semiamplitude,
:_trueanom_from_eccanom,
)
for fun in orbit_fun_list
@eval function ($fun)(elem::VisualOrbit, args...)
return ($fun)(elem.parent, args...)
end
end
function raoff(o::OrbitSolutionVisual)
xcart = posx(o) # [AU]
cart2angle = rad2as*oftype(xcart, 1e3)/o.elem.dist
xang = xcart*cart2angle # [mas]
return xang
end
function decoff(o::OrbitSolutionVisual)
ycart = posy(o) # [AU]
cart2angle = rad2as*oftype(ycart, 1e3)/o.elem.dist
yang = ycart*cart2angle # [mas]
return yang
end
function pmra(o::OrbitSolutionVisual)
ẋcart = o.elem.parent.J*(o.elem.parent.cosi_cosΩ*(o.sol.cosν_ω + o.elem.parent.ecosω) - o.elem.parent.sinΩ*(o.sol.sinν_ω + o.elem.parent.esinω)) # [AU/year]
cart2angle = rad2as*oftype(ẋcart, 1e3)/o.elem.dist
ẋang = ẋcart*cart2angle # [mas/year]
return ẋang
end
function pmdec(o::OrbitSolutionVisual)
ẏcart = -o.elem.parent.J*(o.elem.parent.cosi_sinΩ*(o.sol.cosν_ω + o.elem.parent.ecosω) + o.elem.parent.cosΩ*(o.sol.sinν_ω + o.elem.parent.esinω)) # [AU/year]
cart2angle = rad2as*oftype(ẏcart, 1e3)/o.elem.dist
ẏang = ẏcart*cart2angle # [mas/year]
return ẏang
end
function accra(o::OrbitSolutionVisual)
if eccentricity(o.elem) >= 1
@warn "acceleration not tested for ecc >= 1 yet. Results are likely wrong."
end
ẍcart = -o.elem.parent.A*(1 + o.sol.ecosν)^2 * (o.elem.parent.cosi_cosΩ*o.sol.sinν_ω + o.elem.parent.sinΩ*o.sol.cosν_ω) # [AU/year^2]
cart2angle = rad2as*oftype(ẍcart, 1e3)/o.elem.dist
ẍang = ẍcart*cart2angle # [mas/year^2]
return ẍang
end
function accdec(o::OrbitSolutionVisual)
if eccentricity(o.elem) >= 1
@warn "acceleration not tested for ecc >= 1 yet. Results are likely wrong."
end
ÿcart = o.elem.parent.A*(1 + o.sol.ecosν)^2 * (o.elem.parent.cosi_sinΩ*o.sol.sinν_ω - o.elem.parent.cosΩ*o.sol.cosν_ω) # [AU/year^2]
cart2angle = rad2as*oftype(ÿcart, 1e3)/o.elem.dist
ÿang = ÿcart*cart2angle # [mas/year^2]
return ÿang
end
# Pretty printing
function Base.show(io::IO, mime::MIME"text/plain", elem::Visual)
show(io, mime, elem.parent)
print(io, """\
plx [mas] = $(round(elem.plx, sigdigits=3))
distance [pc ] : $(round(distance(elem), digits=1))
──────────────────────────
""")
end
| PlanetOrbits | https://github.com/sefffal/PlanetOrbits.jl.git |
|
[
"MIT"
] | 0.10.1 | 370c3ac3c000dd3ac975681521dcfe687efc1f24 | code | 1298 | using PrecompileTools
@setup_workload begin
@compile_workload begin
o1 = orbit(
a = 1,
i = π/4,
Ω = 0.001,
ω = 0.001,
e = 0.5,
tp = 365/2,
M = 1,
tref=0,
)
o2 = orbit(
a = 1,
i = π/4,
Ω = 0.001,
ω = 0.001,
e = 0.5,
tp = 365/2,
M = 1,
tref=0,
plx=100.
)
o3 = orbit(
A = 500,
B = 600,
F = -500,
G = 300,
e = 0.5,
tp = 365/2,
M = 1,
tref=0,
plx=100.
)
o4 = CartesianOrbit(orbitsolve(o1, 0.))
# CartesianOrbit(orbitsolve(o2, 0.))
# CartesianOrbit(orbitsolve(o3, 0.))
for o in (o1,o2,o3,o4)
totalmass(o)
period(o)
meanmotion(o)
eccentricity(o)
periastron(o)
semiamplitude(o)
radvel(o,0.0)
posangle(o,0.0)
end
for o in (o2,o3)
distance(o)
raoff(o, 0.0)
raoff(o, 0.0)
decoff(o, 0.0)
pmra(o, 0.0)
pmdec(o, 0.0)
end
end
end
| PlanetOrbits | https://github.com/sefffal/PlanetOrbits.jl.git |
|
[
"MIT"
] | 0.10.1 | 370c3ac3c000dd3ac975681521dcfe687efc1f24 | code | 9242 | #=
This file contains plot recipes for the Plots.jl
ecosystem. This way you can do e.g.:
plot(elems)
=#
# Plotting recipes for orbital elements
using RecipesBase
@recipe function f(elem::AbstractOrbit)
os = orbitsolve_ν(elem, 0)
line_z --> nothing
solmarker --> false
os
end
@recipe function f(elem::AbsoluteVisualOrbit)
os = orbitsolve(elem, elem.ref_epoch)
line_z --> nothing
solmarker --> false
os
end
# Recipe for an array of orbits. Same as sigle orbit,
# but scale down transparency as we add more orbits.
@recipe function f(elems::AbstractArray{<:AbstractOrbit})
label --> ""
seriesalpha --> 30/length(elems)
for elem in elems
@series begin
elem
end
end
end
@recipe function f(oses::AbstractArray{<:AbstractOrbitSolution})
label --> ""
seriesalpha --> 30/length(oses)
for os in oses
@series begin
os
end
end
end
default_plotkind(::OrbitSolutionCartesian) = (:x, :y)
default_plotkind(::OrbitSolutionKep) = (:x, :y)
default_plotkind(::OrbitSolutionThieleInnes) = :astrometry
default_plotkind(::OrbitSolutionRadialVelocity) = :radvel
default_plotkind(::OrbitSolutionVisual) = :astrometry
default_plotkind(::OrbitSolutionAbsoluteVisual) = :astrometry
# Plotting recipes for orbital elements
using RecipesBase
@recipe function f(os::AbstractOrbitSolution)
# Variables to plot
kind = get(plotattributes, :kind, default_plotkind(os))
if kind == :astrometry
kind = (:raoff, :decoff)
end
if kind isa Symbol
kind = (:t, kind)
end
if length(kind) < 2
error("Requires at least two variables to create plot")
end
posvars = (:x,:y,:z)
astromvars = (:raoff, :decoff)
astromvelvars = (:pmra, :pmdec)
astromaccvars = (:accra, :accdec)
ravars = (:x, :raoff, :pmra)
timevars = (:t, :ν, :trueanom, :meananom, :eccanom)
unwrapvars = (timevars..., :posangle)
spatial_vars = (posvars..., astromvars..., astromvelvars..., astromaccvars...)
if all(∈(spatial_vars), kind)
# Set equal aspect ratio when plotting all spatial coordinates
aspect_ratio --> 1
end
resolver = (;
t=("t", "mjd", (sol,args...)->_time_from_EA(sol,eccanom(sol))),
ν=("ν", "rad", trueanom,),
trueanom=("ν", "rad", trueanom,),
meananom=("mean.anom.", "rad", meananom,),
eccanom=("ecc.anom", "rad", eccanom,),
x=("x", "au", posx,),
y=("y", "au", posy,),
z=("z", "au", posz,),
velx=("∂x/δt", "au/yr", velx),
vely=("∂y/δt", "au/yr", vely),
velz=("∂z/δt", "au/yr", velz),
raoff=("Δra", "mas", raoff,),
decoff=("Δdec", "mas", decoff,),
pmra=("∂ra/∂t", "mas/yr", pmra,),
pmdec=("∂ra/∂t", "mas/yr", pmdec,),
accra=("∂²ra/∂t²", "mas/yr²", accra,),
accdec=("∂²ra/∂t²", "mas/yr²", accdec,),
radvel=("radvel", "m/s", radvel,),
posangle=("posangle", "mas", posangle,),
projectedseparation=("projectedseparation", "mas", projectedseparation,),
)
xl, xu, xf = resolver[kind[1]]
yl, yu, yf = resolver[kind[2]]
xguide --> "$xl [$xu]"
yguide --> "$yl [$yu]"
if length(kind) >= 3
zl, zu, zf = resolver[kind[3]]
zguide --> "$zl [$zu]"
end
bodies = get(plotattributes, :body, :secondary)
if bodies isa Symbol
bodies = (bodies,)
end
L = get(plotattributes, :orbitsteps, 90)
for body in bodies
# We trace out in equal steps of eccentric anomaly instead of time for a smooth
# curve, regardless of eccentricity.
# When the independent variable is a timevar (angle or time) we want
# two cycles, otherwise we just do one complete orbit
if kind[1] == :t || !isfinite(period(os.elem))
if isfinite(period(os.elem))
# bound orbit case
default_tspan = (soltime(os)-period(os.elem), soltime(os)+period(os.elem))
tspan = get(plotattributes, :tspan, default_tspan)
tstart, tstop = extrema(tspan)
ea_start = eccanom(orbitsolve(os.elem, tstart))
ea_stop = eccanom(orbitsolve(os.elem, tstop))
i = 0
while i > 10 && _time_from_EA(os.elem, ea_start) > tstart + 0.000001
ea_start -= 2π
i += 1
end
i = 0
while i > 10 && _time_from_EA(os.elem, ea_stop) < tstop - 0.000001
ea_stop += 2π
i += 1
end
# if ea_stop < ea_start
# ea_stop += 2π
# end
eccanoms = range(
ea_start,
ea_stop,
length=L,
)
else
# non-elliptical case
default_tspan = (soltime(os)-5*365*meanmotion(os.elem), soltime(os)+5*365*meanmotion(os.elem))
tspan = get(plotattributes, :tspan, default_tspan)
tstart, tstop = extrema(tspan)
eccanoms = range(
eccanom(orbitsolve(os.elem, tstart)),
eccanom(orbitsolve(os.elem, tstop)),
length=L,
)
end
elseif kind[1] ∈ timevars
eccanoms = range(-2π, 2π, length=L)
xticks --> (range(-2π, 2π, step=π/2), ["-2π", "-3π/2", "-π", "-π/2", "0", "+π/2", "+π", "+3π/2", "+2π"])
else
# Otherwise we are plotting two variables against each other and don't need
# to consider multiple cycles
eccanoms = range(eccanom(os), eccanom(os)+2π, length=L)
line_z --> -eccanoms
colorbar --> nothing
end
if get(plotattributes, :timestep, false) || os isa OrbitSolutionAbsoluteVisual
if kind[1] == :t
tspan = get(plotattributes, :tspan, (soltime(os)-period(os.elem), soltime(os)+2period(os.elem)))
solns = orbitsolve.(os.elem, range(tspan..., length=L))
else
solns = orbitsolve.(os.elem, range(soltime(os), soltime(os)+period(os.elem), length=L))
end
else
solns = orbitsolve_eccanom.(os.elem, eccanoms)
end
if body == :secondary
x = xf(os)
xs = xf.(solns)
y = yf(os)
ys = yf.(solns)
if length(kind) >= 3
z = zf(os)
zs = zf.(solns)
end
elseif body == :primary
x = xf(os, plotattributes[:mass])
xs = xf.(solns, plotattributes[:mass])
y = yf(os, plotattributes[:mass])
ys = yf.(solns, plotattributes[:mass])
if length(kind) >= 3
z = zf(os, plotattributes[:mass])
zs = zf.(solns, plotattributes[:mass])
end
else
error("Unrecognized body $body. Pass body=:primary or :secondary")
end
# We almost always want to reverse the RA coordinate to match how we
# see it in the sky.
if kind[1] ∈ ravars
xflip --> true
end
if kind[2] ∈ ravars
yflip --> true
end
if length(kind) >= 3 && kind[3] ∈ ravars
zflip --> true
end
# Prevent wrapped lines
if kind[1] ∈ unwrapvars && isfinite(period(os.elem))
P = kind[1]==:t ? period(os.elem) : 2π
unwrap!(xs, P)
xs .-= P
end
@series begin
label --> string(body)
if haskey(plotattributes, :seriescolor)
line_z := nothing
end
if isdefined(Main, :Plots) && isdefined(Main.Plots, :palette) && get(plotattributes, :solmarker, true)
# We would like to create a nice semi-transparent
# gray gradient. But palette isn't in PlotRecipes so
# we fall back to this hacky way of getting it
if body == :secondary
seriescolor --> Main.Plots.palette(["#444444ff", "#44444433"],10)
else
seriescolor --> Main.Plots.palette(["#BB4444ff", "#BB444433"],10)
end
end
if length(kind) >= 3
xs, ys, zs
else
xs, ys
end
end
if get(plotattributes, :solmarker, true)
@series begin
seriestype --> :scatter
label --> ""
if body == :secondary
seriescolor --> :gray
else
seriescolor --> "#BB4444"
end
if length(kind) >= 3
[x], [y], [z]
else
[x], [y]
end
end
end
end
end
# https://discourse.julialang.org/t/equivalent-of-matlabs-unwrap/44882/4?
function unwrap!(x, period = 2π)
y = convert(eltype(x), period)
v = first(x)
@inbounds for k = eachindex(x)
x[k] = v = v + rem(x[k] - v, y, RoundNearest)
end
end
| PlanetOrbits | https://github.com/sefffal/PlanetOrbits.jl.git |
|
[
"MIT"
] | 0.10.1 | 370c3ac3c000dd3ac975681521dcfe687efc1f24 | code | 1898 |
using Dates: Dates, DateTime, Date
using AstroTime: AstroTime
export mjd
"""
mjd("2020-01-01")
Get the modfied julian day of a date, or in general a UTC
timestamp.
"""
function mjd(timestamp::AbstractString)
return timestamp |>
AstroTime.TTEpoch |> # Switched from UTC to Terrestrial Time epoch
AstroTime.modified_julian |>
AstroTime.days |>
AstroTime.value;
end
"""
mjd(Date("2020-01-01"))
Get the modfied julian day of a Date or DateTime object.
"""
function mjd(date_or_datetime::Union{Date,DateTime})
return date_or_datetime |>
AstroTime.TTEpoch |> # Switched from UTC to Terrestrial Time epoch
AstroTime.modified_julian |>
AstroTime.days |>
AstroTime.value;
end
"""
mjd()
Get the current modified julian day of right now.
"""
function mjd()
return Dates.now() |>
AstroTime.TTEpoch |>
AstroTime.modified_julian |>
AstroTime.days |>
AstroTime.value;
end
export mjd
"""
years2mjd()
Convert from decimal years (e.g. 1995.25) into modified
julian date, rounded to closest second
"""
function years2mjd(decimal_years)
yr_floor = floor(decimal_years)
yr_obj = Dates.Date(yr_floor,1,1)
days = (decimal_years - yr_floor) * Dates.daysinyear(yr_obj)
days_floor = floor(days)
ep = AstroTime.TTEpoch(
Dates.DateTime(yr_floor) + Dates.Day(days_floor) + Dates.Second(round((days-days_floor)*60*60*24))
)
return AstroTime.value(AstroTime.modified_julian(ep))
end
export years2mjd
"""
mjd2date(modified_julian)
Get a Date value from a modfied julian day, rounded to closest day
## Examples
```julia
julia> mjd2date(59160.8)
2020-11-08
```
"""
function mjd2date(days)
return DateTime(
Dates.DateTime("1858-11-17") +
Dates.Day(floor(days)) +
Dates.Second(round((days-floor(days))*60*60*24))
)
end
export mjd2date | PlanetOrbits | https://github.com/sefffal/PlanetOrbits.jl.git |
|
[
"MIT"
] | 0.10.1 | 370c3ac3c000dd3ac975681521dcfe687efc1f24 | code | 8647 | #=
This file defines a Coordinate Transform that allows us to project
images forward and backwards in time according to Kepler's laws.
The currently solution is incomplete but workable for inclined
orbital planes.
=#
#=
Goals:
I want to be able to enter orbital parameters and get a coordinate transformations
object.
I guess this object must encode the delta T and the image platescale.
Main use case is specifying everything except semi-major axis.
Then, I want a convenience function for taking an image, orbital parameters, and dt
and returning a new image.
Finally, I want convenience functions for doing this to a sequence of images and
returning a stack (custom stacking function of course). Stacking function must
look back into contrast map from original image. Reverse the dt at current location
and lookup via projected separation.
This is *sort* of inverse to what we have now. We need to lookup pixels, and see
where those pixels were before.
Finally, I want to wrap this up with some animation functions. We should be
able to input an image and step through dt.
Or input multiple images and fade between them.
=#
struct OrbitalTransformation{T<:Number}
# Orbital properties
i::T
e::T
M::T
ω::T
Ω::T
plx::T
# Image properties
platescale::T
# Time properties
dt::T
# Cached constants for these elements.
dist::T
ν_fact::T
cos_Ω::T
sin_Ω::T
cos_i::T
sin_i::T
# Inner constructor to inforce invariants and pre-calculate a few
# constants for these elements.
function OrbitalTransformation(i, e, M, ω, Ω, plx, platescale, dt)
# Enforce invariants on user parameters
e = max(zero(e), min(e, one(e)))
M = max(M, zero(M))
plx = max(plx, zero(plx))
# Pre-calculate some factors that will be re-used when calculating kep2cart at any time
# Distance in AU
dist = 1/(plx/1000) * pc2au
# Compute period (days)
# Factor in calculating the true anomaly
ν_fact = √((1+e)/(1-e))
if e != 0
error("eccentric transformations are not currently working correctly")
end
T = promote_type(
typeof(i),
typeof(e),
typeof(M),
typeof(ω),
typeof(Ω),
typeof(plx),
typeof(platescale),
typeof(dt),
)
# The user might pass in integers, but it makes no sense to do these
# calculations on integers. Assume they mean to use floats.
if T <: Integer
T = promote_type(T, Float64)
end
sin_Ω, cos_Ω = sincos(Ω)
sin_i, cos_i = sincos(i)
new{T}(
# Passed parameters that define the elements
i,
e,
M,
ω,
Ω,
plx,
platescale,
dt,
# Cached calcuations
dist,
ν_fact,
# Geometric factors
cos_Ω,
sin_Ω,
cos_i,
sin_i,
)
end
end
# Allow arguments to be specified by keyword.
OrbitalTransformation(;i, e, M, ω, Ω, plx, platescale, dt) = OrbitalTransformation(i, e, M, ω, Ω, plx, platescale, dt)
# And by a named tuple without splatting
OrbitalTransformation(nt::NamedTuple) = OrbitalTransformation(nt.i, nt.e, nt.M, nt.ω, nt.Ω, nt.plx, nt.platescale, nt.dt)
export OrbitalTransformation
function (ot::OrbitalTransformation{T})(dist_proj_px) where T
# Given x and y, solve for a and mean anomaly. Or I guess eccentric anomaly? and then work back?
# nvm we solve for true anomaly
# Convert pixel coordinates into AU`
# dist = 1/(ot.plx/1000) * pc2au
dist_proj_mas = dist_proj_px*ot.platescale
dist_proj_as = dist_proj_mas/1e3
dist_proj_rad = dist_proj_as / rad2as
dist_proj_au = tan.(dist_proj_rad) .* ot.dist
(y₀,x₀) = dist_proj_au
# cos_i = cos(ot.i)
# sin_Ω, cos_Ω = sincos(ot.Ω)
# ν_fact = √((1+ot.e)/(1-ot.e))
r₀′ = √(x₀^2 + y₀^2)
# Singularity at the origin that has a trivial solution
if r₀′ ≈ 0
return SVector{2,typeof(x₀)}(0.0, 0.0)
end
# Derive true anomaly from position and known orbital properties
# y/x = (sin_Ω*cos(θ) + cos_Ω*cos_i*sin(θ)) /
# (cos_Ω*cos(θ) + sin_Ω*cos_i*sin(θ))
# y/x = (sin_Ω + cos_Ω*cos_i*tan(θ)) /
# (cos_Ω + sin_Ω*cos_i*tan(θ))
# y*(cos_Ω + sin_Ω*cos_i*tan(θ)) = x*(sin_Ω + cos_Ω*cos_i*tan(θ))
# y*cos_Ω + y*sin_Ω*cos_i*tan(θ) = x*sin_Ω + x*cos_Ω*cos_i*tan(θ)
# y*sin_Ω*cos_i*tan(θ) - x*cos_Ω*cos_i*tan(θ) = x*sin_Ω - y*cos_Ω
# tan(θ)*(y*sin_Ω*cos_i - x*cos_Ω*cos_i) = x*sin_Ω - y*cos_Ω
# tan(θ) = (x*sin_Ω - y*cos_Ω)/(y*sin_Ω*cos_i - x*cos_Ω*cos_i)
# ω + ν = atan((x*sin_Ω - y*cos_Ω)/(y*sin_Ω*cos_i - x*cos_Ω*cos_i))
# ν₀ = atan(x₀*sin_Ω - y₀*cos_Ω, y₀*sin_Ω*cos_i - x₀*cos_Ω*cos_i) - ω
# ν₀ = atan(
# (x₀*sin_Ω - y₀*cos_Ω ),
# (y₀*sin_Ω*cos_i - x₀*cos_Ω*cos_i)
# ) - ω
# x = r*(sin_Ω*cos_ω_ν + cos_Ω*sin_ω_ν*cos_i)
# y = r*(cos_Ω*cos_ω_ν - sin_Ω*sin_ω_ν*cos_i)
# y/x = r*(cos_Ω*cos_ω_ν - sin_Ω*sin_ω_ν*cos_i)/r*(sin_Ω*cos_ω_ν + cos_Ω*sin_ω_ν*cos_i)
# y/x = (cos_Ω*cos_ω_ν - sin_Ω*sin_ω_ν*cos_i)/
# (sin_Ω*cos_ω_ν + cos_Ω*sin_ω_ν*cos_i)
# y/x = (A*cos_ω_ν - B*sin_ω_ν*C)/ # C = cos_i
# (B*cos_ω_ν + A*sin_ω_ν*C)
# y/x = (A*cos_θ - B*sin_θ*C)/
# (B*cos_θ + A*sin_θ*C)
# y/x = (A*cos_θ - B*sin_θ*C)/
# (B*cos_θ + A*sin_θ*C)
# Substitute:
# tan θ = sin θ / cos θ
# sin θ = tan θ * cos θ
# y/x = (A*cos_θ - B*tan θ * cos θ *C)/
# (B*cos_θ + A*tan θ * cos θ *C)
# y/x = (A - B*tan θ * C)/
# (B + A*tan θ * C)
# y * (B + A*tan θ * C) = x * (A - B*tan θ * C)
# y * (B + A*Z) = x * (A - B*Z) # Z = tan θ * C
# By + A*Zy = Ax - B*Zx
# (Ay + Bx)Z = Ax - By
# Expand again
# Z = (cos_Ω*x - sin_Ω*y)/(cos_Ω*y + sin_Ω*x)
# tan θ * C = (cos_Ω*x - sin_Ω*y)/(cos_Ω*y + sin_Ω*x)
# tan ω_ν * C = (cos_Ω*x - sin_Ω*y)/(cos_Ω*y + sin_Ω*x)
# tan ω_ν = (cos_Ω*x - sin_Ω*y)/(cos_Ω*y + sin_Ω*x)/C
# tan ω_ν = (cos_Ω*x - sin_Ω*y)/(cos_Ω*y + sin_Ω*x) / cos_i
# ω+ν = atan(cos_Ω*x - sin_Ω*y, (cos_Ω*y + sin_Ω*x)/cos_i)
# ν = atan(cos_Ω*x - sin_Ω*y, (cos_Ω*y + sin_Ω*x)/cos_i) - ω
# Code:
# ν₀ = atan(cos_Ω*x₀ - sin_Ω*y₀, (cos_Ω*y₀ + sin_Ω*x₀)/cos_i) - ot.ω
# De-project
y₀′ = (ot.cos_Ω*x₀ - ot.sin_Ω*y₀)/ot.cos_i
x₀′ = (ot.cos_Ω*y₀ + ot.sin_Ω*x₀)
# Calculate true anomaly of initial position
ν₀ = atan(y₀′, x₀′) - ot.ω
# From those, get the initial eccentric anomal
EA₀ = 2atan(ot.ν_fact*tan(ν₀/2))
# The inverse of kepler's equation is closed form.
# This gives us initial mean anomal
MA₀ = EA₀ - ot.e*sin(EA₀)
# Orbital radius
# After we have done the inclination re-projection,
# x₀′ and y₀′ give the separation from the star in AU
# and so we can calculate the initial separation as follows
r₀ = √(x₀′^2 + y₀′^2)
# Since we now know the eccentric anomaly via the true anomaly,
# and we have the separation, we can calculate the semi-major axis.
a = r₀/(1-ot.e*cos(EA₀))
# Calculate mean motion for this semi-major axis
m = 2π/√(a^3/ot.M)
# Advance mean anomaly by dt
MA = MA₀ + m/convert(T, year2day) * (-1)* ot.dt
# And finally solve for eccentric anomaly as usual
EA = kepler_solver(MA, ot.e, Markley())
ν = convert(T,2)*atan(ot.ν_fact*tan(EA/convert(T,2)))
sin_ω_ν, cos_ω_ν = sincos(ot.ω+ν)
# sin_Ω, cos_Ω = sincos(ot.Ω)
r = a*(one(T)-ot.e*cos(EA))
y = r*(ot.cos_Ω*cos_ω_ν - ot.sin_Ω*sin_ω_ν*ot.cos_i)
x = r*(ot.sin_Ω*cos_ω_ν + ot.cos_Ω*sin_ω_ν*ot.cos_i)
# TODO: move to tests
if ot.dt == 0
xtol = isapprox(x,x₀,atol=1e-2)
ytol = isapprox(y,y₀,atol=1e-2)
if !xtol || !ytol
@show x₀ x y₀ y r₀ r a MA MA₀ EA ν₀ ν
error("Output != input despite dt=0")
end
end
# z = r*(sin(ot.i)*sin(ot.ω+ν))
coords_AU = SVector(y,x)
dist_proj_rad = atan.(coords_AU, ot.dist)
dist_proj_mas = dist_proj_rad .* convert(eltype(dist_proj_rad),rad2as*1e3) # radians -> mas
dist_proj_px = dist_proj_mas./ot.platescale
return dist_proj_px
end
# Inverse transform is just time reversal:
Base.inv(orbit::OrbitalTransformation) = OrbitalTransformation(orbit.i,orbit.e,orbit.M,orbit.ω,orbit.Ω,orbit.plx,orbit.platescale,-orbit.dt) | PlanetOrbits | https://github.com/sefffal/PlanetOrbits.jl.git |
|
[
"MIT"
] | 0.10.1 | 370c3ac3c000dd3ac975681521dcfe687efc1f24 | code | 12405 | # ----------------------------------------------------------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------------------------------------------------------
using Test
using PlanetOrbits
using ForwardDiff
using FiniteDiff
# ----------------------------------------------------------------------------------------------------------------------
# Constants and Helper Functions
# ----------------------------------------------------------------------------------------------------------------------
# 10 steps per day for one year
one_year_range = 0.0:0.1:365.24
# Relative tolerance for certain tests
rtol = 1e-4
# Absolute tolerance for certain tests
atol = 1e-6
# ----------------------------------------------------------------------------------------------------------------------
# Tests
# ----------------------------------------------------------------------------------------------------------------------
## Close to an idealized face-on Earth with circular orbit at 1 pc
# Due to IAU definitions, values don't match exactly
@testset "Earth, i = 0, e = 0, d = 1 pc" begin
idealearth = orbit(
a = 1.0,
e = 0.0,
i = 0.0,
ω = 0.0,
Ω = 0.0,
tp = 0.0,
M = 1.0,
plx = 1000.0
)
# Test basic orbit properties
@test period(idealearth) ≈ PlanetOrbits.year2day_julian rtol=rtol
@test distance(idealearth) ≈ 1.0 rtol=rtol
@test meanmotion(idealearth) ≈ 2π rtol=rtol
@test periastron(idealearth) ≈ 0.0
@test semiamplitude(idealearth) ≈ 0.0
# Orbit solutions at quarters of the orbit
oq1 = PlanetOrbits.orbitsolve_ν(idealearth, 0.0)
oq2 = PlanetOrbits.orbitsolve_ν(idealearth, π/2)
oq3 = PlanetOrbits.orbitsolve_ν(idealearth, π)
oq4 = PlanetOrbits.orbitsolve_ν(idealearth, 3π/2)
# Test orbit values at first quarter
@test raoff(oq1) ≈ 0.0 atol=atol
@test decoff(oq1) ≈ 1000.0 rtol=rtol
@test posangle(oq1) ≈ 0.0 atol=atol
@test projectedseparation(oq1) ≈ 1000.0 rtol=rtol
@test sign(pmra(oq1)) == +1
@test pmdec(oq1) ≈ 0.0 atol=atol
@test radvel(oq1) ≈ 0.0 atol=atol
@test accra(oq1) ≈ 0.0 atol=atol
@test sign(accdec(oq1)) == -1
# Test orbit values at second quarter
@test raoff(oq2) ≈ 1000.0 rtol=rtol
@test decoff(oq2) ≈ 0.0 atol=atol
@test posangle(oq2) ≈ π/2 rtol=rtol
@test projectedseparation(oq2) ≈ 1000.0 rtol=rtol
@test pmra(oq2) ≈ 0.0 atol=atol
@test sign(pmdec(oq2)) == -1
@test radvel(oq2) ≈ 0.0 atol=atol
@test sign(accra(oq2)) == -1
@test accdec(oq2) ≈ 0.0 atol=atol
# Test orbit values at third quarter
@test raoff(oq3) ≈ 0.0 atol=atol
@test decoff(oq3) ≈ -1000.0 rtol=rtol
@test posangle(oq3) ≈ π rtol=rtol
@test projectedseparation(oq3) ≈ 1000.0 rtol=rtol
@test sign(pmra(oq3)) == -1
@test pmdec(oq3) ≈ 0.0 atol=atol
@test radvel(oq3) ≈ 0.0 atol=atol
@test accra(oq3) ≈ 0.0 atol=atol
@test sign(accdec(oq3)) == +1
# Test orbit values at fourth quarter
@test raoff(oq4) ≈ -1000.0 rtol=rtol
@test decoff(oq4) ≈ 0.0 atol=atol
@test posangle(oq4) ≈ -π/2 rtol=rtol
@test projectedseparation(oq4) ≈ 1000.0 rtol=rtol
@test pmra(oq4) ≈ 0.0 atol=atol
@test sign(pmdec(oq4)) == +1
@test radvel(oq4) ≈ 0.0 atol=atol
@test sign(accra(oq4)) == +1
@test accdec(oq4) ≈ 0.0 atol=atol
# Compare velocities and accelerations
@test pmra(oq1) ≈ -pmra(oq3) rtol=rtol
@test pmdec(oq2) ≈ -pmdec(oq4) rtol=rtol
@test accdec(oq1) ≈ -accdec(oq3) rtol=rtol
@test accra(oq2) ≈ -accra(oq4) rtol=rtol
end
## Idealized edge-on Earth with circular orbit at 1 pc
@testset "Earth, i = 90, e = 0, d = 1 pc" begin
idealearth = orbit(
a = 1.0,
e = 0.0,
i = π/2,
ω = 0.0,
Ω = 0.0,
tp = 0.0,
M = 1.0,
plx = 1000.0
)
# Test basic orbit properties
@test period(idealearth) ≈ PlanetOrbits.year2day_julian rtol=rtol
@test distance(idealearth) ≈ 1.0 rtol=rtol
@test meanmotion(idealearth) ≈ 2π rtol=rtol
@test periastron(idealearth) == 0.0
@test semiamplitude(idealearth) ≈ 29785.89 rtol=1e-3
# Orbit solutions at quarters of the orbit
oq1 = PlanetOrbits.orbitsolve_ν(idealearth, 0.0)
oq2 = PlanetOrbits.orbitsolve_ν(idealearth, π/2)
oq3 = PlanetOrbits.orbitsolve_ν(idealearth, π)
oq4 = PlanetOrbits.orbitsolve_ν(idealearth, 3π/2)
# Test orbit values at first quarter
@test raoff(oq1) ≈ 0.0 atol=atol
@test decoff(oq1) ≈ 1000.0 rtol=rtol
@test projectedseparation(oq1) ≈ 1000.0 rtol=rtol
@test pmra(oq1) ≈ 0.0 atol=atol
@test pmdec(oq1) ≈ 0.0 atol=atol
@test radvel(oq1) ≈ 29785.89 rtol=1e-3
@test accra(oq1) ≈ 0.0 atol=atol
@test sign(accdec(oq1)) == -1
# Test orbit values at second quarter
@test raoff(oq2) ≈ 0.0 atol=atol
@test decoff(oq2) ≈ 0.0 atol=atol
@test projectedseparation(oq2) ≈ 0.0 atol=atol
@test pmra(oq2) ≈ 0.0 atol=atol
@test sign(pmdec(oq2)) == -1
@test radvel(oq2) ≈ 0.0 atol=atol
@test accra(oq2) ≈ 0.0 atol=atol
@test accdec(oq2) ≈ 0.0 atol=atol
# Test orbit values at third quarter
@test raoff(oq3) ≈ 0.0 atol=atol
@test decoff(oq3) ≈ -1000.0 rtol=rtol
@test projectedseparation(oq3) ≈ 1000.0 rtol=rtol
@test pmra(oq3) ≈ 0.0 atol=atol
@test pmdec(oq3) ≈ 0.0 atol=atol
@test radvel(oq3) ≈ -29785.89 rtol=1e-3
@test accra(oq3) ≈ 0.0 atol=atol
@test sign(accdec(oq3)) == +1
# Test orbit values at fourth quarter
@test raoff(oq4) ≈ 0.0 atol=atol
@test decoff(oq4) ≈ 0.0 atol=atol
@test projectedseparation(oq4) ≈ 0.0 atol=atol
@test pmra(oq4) ≈ 0.0 atol=atol
@test sign(pmdec(oq4)) == +1
@test radvel(oq4) ≈ 0.0 atol=atol
@test sign(accra(oq4)) == +1
@test accdec(oq4) ≈ 0.0 atol=atol
# Compare velocities and accelerations
@test pmdec(oq2) ≈ -pmdec(oq4) rtol=rtol
@test accdec(oq1) ≈ -accdec(oq3) rtol=rtol
end
## Test varying eccentricity
@testset "Eccentricity" begin
# Basic eccentric orbit
eccentric_1AU_1Msun_1pc = orbit(
a = 1.0, # AU
e = 0.5,
i = 0.0,
ω = 0.0,
Ω = 0.0,
tp = 0.0,
M = 1.0, # M_sun
plx = 1000.0, # 1000 mas == 1pc
)
xs = raoff.(eccentric_1AU_1Msun_1pc, one_year_range)
ys = decoff.(eccentric_1AU_1Msun_1pc, one_year_range)
ps = projectedseparation.(eccentric_1AU_1Msun_1pc, one_year_range)
@test period(eccentric_1AU_1Msun_1pc) ≈ 1.0*PlanetOrbits.year2day_julian rtol=rtol
@test distance(eccentric_1AU_1Msun_1pc) == 1
# Mean motion should be the same
@test PlanetOrbits.meanmotion(eccentric_1AU_1Msun_1pc) ≈ 2π rtol=rtol
# The separation should now be varying
# By definition of eccentricity 0.5, 1AU and 1PC
@test maximum(ps) ≈ 1500 rtol=rtol
@test minimum(ps) ≈ 500 rtol=rtol
# When argument of periapsis and periastron are both zero, periastron should be in the East, apoastron in the West
@test maximum(ys) ≈ 500 rtol=rtol
@test minimum(ys) ≈ -1500 rtol=rtol
# Rotate Ω
ecc_rot_Ω = orbit(
a = 1.0, # AU
e = 0.5,
i = 0.0,
ω = 0.0,
Ω = deg2rad(90),
tp = 0.0,
M = 1.0, # M_sun
plx = 1000.0, # 1000 mas == 1pc
)
xs = raoff.(ecc_rot_Ω, one_year_range)
ys = decoff.(ecc_rot_Ω, one_year_range)
# Recall, East is left in the sky.
# We have rotated 90 degrees CCW.
@test minimum(xs) ≈ -1500 rtol=rtol
@test maximum(xs) ≈ 500 rtol=rtol
# Rotate τ
ecc_rot_ω = orbit(
a = 1.0, # AU
e = 0.5,
i = 0.0,
ω = deg2rad(90.0),
Ω = 0.0,
tp = 0.0,
M = 1.0, # M_sun
plx = 1000.0, # 1000 mas == 1pc
)
xs = raoff.(ecc_rot_ω, one_year_range)
ys = decoff.(ecc_rot_ω, one_year_range)
# Recall, East is left in the sky.
# We have rotated 90 degrees CCW.
@test minimum(xs) ≈ -1500 rtol=rtol
@test maximum(xs) ≈ 500 rtol=rtol
# Rotate Ω & τ
ecc_rot_Ωτ = orbit(
a = 1.0, # AU
e = 0.5,
i = 0.0,
ω = deg2rad(-90),
Ω = deg2rad(90),
tp = 0.0,
M = 1.0, # M_sun
plx = 1000.0, # 1000 mas == 1pc
)
xs = raoff.(ecc_rot_Ωτ, one_year_range)
ys = decoff.(ecc_rot_Ωτ, one_year_range)
# Recall, East is left in the sky.
# We have rotated 90 degrees CCW.
@test maximum(ys) ≈ 500 rtol=rtol
@test minimum(ys) ≈ -1500 rtol=rtol
# Highly eccentric
ecc09 = orbit(
a = 1.0, # AU
e = 0.9,
i = 0.0,
ω = 0.0,
Ω = 0.0,
tp = 0.0,
M = 1.0, # M_sun
plx = 1000.0, # 1000 mas == 1pc
)
xs = raoff.(ecc09, one_year_range)
ys = decoff.(ecc09, one_year_range)
ps = projectedseparation.(ecc09, one_year_range)
# Loosen the tolerance on these
@test maximum(ps) ≈ 1900 rtol=1e-4
@test minimum(ps) ≈ 100 rtol=1e-4
# Extremely eccentric
ecc09 = orbit(
a = 1.0, # AU
e = 1-1e-3,
i = 0.0,
ω = 0.0,
Ω = 0.0,
tp = 0.0,
M = 1.0, # M_sun
plx = 1000.0, # 1000 mas == 1pc
)
xs = raoff.(ecc09, one_year_range)
ys = decoff.(ecc09, one_year_range)
ps = projectedseparation.(ecc09, one_year_range)
@test maximum(ps) ≈ 1999 rtol=1e-4
# Loosen the tolerance on these even more (periastron flies by very quickly)
@test minimum(ps) ≈ 1 rtol=1e1
end
## Test chain rules
@testset "Chain Rules" begin
# These tests are broken at MA===0, e>0
# First test analytic chain rules
k1(MA) = e->PlanetOrbits.kepler_solver(MA, e)
k2(e) = MA->PlanetOrbits.kepler_solver(MA, e)
for e in 0:0.1:0.9
for MA in 0.001:0.1:2π
@test FiniteDiff.finite_difference_derivative(k2(e), MA) ≈ ForwardDiff.derivative(k2(e), MA) rtol=rtol
end
end
for e = 0.001:0.1:0.9
for MA in 0.001:0.1:2π
@test FiniteDiff.finite_difference_derivative(k1(MA), e) ≈ ForwardDiff.derivative(k1(MA), e) rtol=rtol
end
end
end
## Test analytic derivatives match numeric derivatives
@testset "PMA & Accel." begin
# Check analytic derivative properties against ForwardDiff over a big range of orbits
for t in 0.:35:356.,
a in 0.1:0.2:3,
e in 0:0.1:0.9,
i in deg2rad.([-45, 0, 45, 90, ]),
ω in deg2rad.([-45, 0, 45, 90, ]),
Ω in deg2rad.([-45, 0, 45, 90, ])
elems = orbit(;
a,
e,
i = 0.0,
ω = 0.0,
Ω = 0.0,
tp = 0.0,
M = 1.0,
plx = 1000.0, # 1000 mas <-> 1pc
)
@test pmra(elems, 100.0) ≈ ForwardDiff.derivative(
t->raoff(elems, t),
100.0
)*PlanetOrbits.year2day_julian
@test pmdec(elems, 100.0) ≈ ForwardDiff.derivative(
t->decoff(elems, t),
100.0
)*PlanetOrbits.year2day_julian
@test accra(elems, 100.0) ≈ ForwardDiff.derivative(
t->pmra(elems, t),
100.0
)*PlanetOrbits.year2day_julian
@test accdec(elems, 100.0) ≈ ForwardDiff.derivative(
t->pmdec(elems, t),
100.0
)*PlanetOrbits.year2day_julian
end
end
@testset "Orbit selection" begin
@test typeof(orbit(;a=1.0, e=0.0, ω=0.0, tp=0.0, M=1.0)) <: RadialVelocityOrbit
@test typeof(orbit(;a=1.0, e=0.0, ω=0.0, tp=0.0, M=1.0, i=0.1, Ω=0.0)) <: KepOrbit
@test typeof(orbit(;a=1.0, e=0.0, ω=0.0, tp=0.0, M=1.0, i=0.1, Ω=0.0, plx=100.0).parent) <: KepOrbit
@test typeof(orbit(;A=100.0, B=100.0, F=100.0, G=-100.0, e=0.5, tp=0.0, M=1.0, plx=100.0)) <: ThieleInnesOrbit
end
@testset "Conventions" begin
IAU_earth = orbit(
a = 1.0,
e = 0.0,
i = 0.0,
ω = 0.0,
Ω = 0.0,
tp = 0.0,
M = 1.0,
plx = 1000.0
)
@test period(IAU_earth) ≈ 365.2568983840419 rtol=1e-15 atol=1e-15
@test meanmotion(IAU_earth) ≈ 2pi*365.2500000000/365.2568983840419 rtol=1e-15 atol=1e-15
end
# ----------------------------------------------------------------------------------------------------------------------
| PlanetOrbits | https://github.com/sefffal/PlanetOrbits.jl.git |
|
[
"MIT"
] | 0.10.1 | 370c3ac3c000dd3ac975681521dcfe687efc1f24 | docs | 1232 | <img height=150 src="https://github.com/sefffal/PlanetOrbits.jl/blob/master/docs/src/assets/logo.png"/>
# PlanetOrbits.jl
[](https://sefffal.github.io/PlanetOrbits.jl/dev)
[](https://codecov.io/gh/sefffal/PlanetOrbits.jl)
Tools for solving simple Keplerian orbits.
The primary use case is mapping orbital elements into e.g. Cartesian coordinates at different times.
A Plots.jl recipe is included for easily plotting orbits.
One can for instance calculate an orbit around a star in 3D, a projected position in the sky, a radial velocity curve, or stellar astrometric accleration over time.
It's a great tool for visualizing different orbits (see examples) and generating nice animations (e.g. with Plots or Luxor.jl).
This package has been designed for good performance and composability with a wide range of packages in the Julia ecosystem, including ForwardDiff.
It forms the backbone of [Octofitter.jl](https://github.com/sefffal/Octofitter.jl), a modelling framework for all kinds of exoplanet data.
See documentation at https://sefffal.github.io/PlanetOrbits.jl/dev
| PlanetOrbits | https://github.com/sefffal/PlanetOrbits.jl.git |
|
[
"MIT"
] | 0.10.1 | 370c3ac3c000dd3ac975681521dcfe687efc1f24 | docs | 10365 | # API Documentation
The following tables show what functions are supported for what kind of orbit. If you're not sure yet what kind of orbit to use, just use the [`orbit`](@ref) function!
✅ indiciates that a function is available, and ❌ indicates it is not due to the orbit not storing sufficient information. ⚠️ indicates that it could be supoprted, but is not yet implemented.
## Required Parameters
The following table specifies what properties are required to construct each orbit type. Based on this information, different orbit types have different capabilities (described in following tables).
| property | meaning | KepOrbit | Visual{KepOrbit} | AbsoluteVisual{KepOrbit} | ThieleInnesOrbit | RadialVelocityOrbit | CartesianOrbit | Visual{CartesianOrbit} |
|---------- | ------------------- |---------- |------------------ |----------------------- |------------------ |--------------------- |---------------- |------------------------|
| M | | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ |
| plx | | | ✔️ | ✔️ | ✔️ | | | ✔️ |
| tp | | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ |
| tref | | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ |
| e | | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | | |
| i | | ✔️ | ✔️ | ✔️ | | | | |
| ω | | ✔️ | ✔️ | ✔️ | | ✔️ | | |
| Ω | | ✔️ | ✔️ | ✔️ | | | | |
| A | | | | | ✔️ | | | |
| B | | | | | ✔️ | | | |
| F | | | | | ✔️ | | | |
| G | | | | | ✔️ | | | |
| x | | | | | | | ✔️ | ✔️ |
| y | | | | | | | ✔️ | ✔️ |
| z | | | | | | | ✔️ | ✔️ |
| vx | | | | | | | ✔️ | ✔️ |
| vy | | | | | | | ✔️ | ✔️ |
| vz | | | | | | | ✔️ | ✔️ |
| ref_epoch | | | | ✔️ | | | | |
| ra | | | | ✔️ | | | | |
| dec | | | | ✔️ | | | | |
| rv | | | | ✔️ | | | | |
| pmra | | | | ✔️ | | | | |
| pmdec | | | | ✔️ | | | | |
## Properties of Orbits
You can use these functions like `totalmass(orbit)`.
| Function | KepOrbit | Visual{KepOrbit} | ThieleInnesOrbit | RadialVelocityOrbit | CartesianOrbit | Visual{CartesianOrbit} |
|---------- |---------- |------------------ |------------------ |--------------------- |---------------- |------------------------ |
| [`totalmass`](@ref) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| [`period`](@ref) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| [`distance`](@ref) | ❌ | ✅ | ✅ | ❌ | ❌ | ✅ |
| [`meanmotion`](@ref) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| [`eccentricity`](@ref) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| [`inclination`](@ref) | ✅ | ✅ | ✅ | ❌ | ✅ | ✅ |
| [`semimajoraxis`](@ref) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| [`periastron`](@ref) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| [`semiamplitude`](@ref) | ✅ | ✅ | ⚠️ | ✅ | ✅ | ✅ |
## Properties of Orbit Solutions
You can use these functions like `sol = orbitsolve(orbit,mjd("2020-01")); posx(sol)` or `posx(orbit, mjd("2020-01"))`.
| Function | KepOrbit | Visual{KepOrbit} | ThieleInnesOrbit | RadialVelocityOrbit | CartesianOrbit | Visual{CartesianOrbit} |
|---------- |---------- |------------------ |------------------ |--------------------- |---------------- |------------------------ |
| [`meananom`](@ref) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| [`trueanom`](@ref) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| [`eccanom`](@ref) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| [`posx`](@ref) | ✅ | ✅ | ✅ | ❌ | ✅ | ✅ |
| [`posy`](@ref) | ✅ | ✅ | ✅ | ❌ | ✅ | ✅ |
| [`posz`](@ref) | ✅ | ✅ | ✅ | ❌ | ✅ | ✅ |
| [`velx`](@ref) | ✅ | ✅ | ✅ | ❌ | ✅ | ✅ |
| [`vely`](@ref) | ✅ | ✅ | ✅ | ❌ | ✅ | ✅ |
| [`velz`](@ref) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| [`raoff`](@ref) | ❌ | ✅ | ✅ | ❌ | ❌ | ✅ |
| [`decoff`](@ref) | ❌ | ✅ | ✅ | ❌ | ❌ | ✅ |
| [`radvel`](@ref) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| [`posangle`](@ref) | ✅ | ✅ | ✅ | ❌ | ✅ | ✅ |
| [`pmra`](@ref) | ❌ | ✅ | ✅ | ❌ | ❌ | ✅ |
| [`pmdec`](@ref) | ❌ | ✅ | ✅ | ❌ | ❌ | ✅ |
| [`accra`](@ref) | ❌ | ✅ | ❌ | ❌ | ❌ | ⚠️ |
| [`accdec`](@ref) | ❌ | ✅ | ❌ | ❌ | ❌ | ⚠️ |
## Documentation
```@autodocs
Modules = [PlanetOrbits]
```
| PlanetOrbits | https://github.com/sefffal/PlanetOrbits.jl.git |
|
[
"MIT"
] | 0.10.1 | 370c3ac3c000dd3ac975681521dcfe687efc1f24 | docs | 1761 |
# Units & Conventions
The main constructor, [`Visual{KepOrbit}`](@ref), accepts the following parameters:
- `a`: Semi-major axis in astronomical units (AU)
- `i`: Inclination in radians
- `e`: Eccentricity in the range [0, 1)
- `tp`: Epoch of periastron passage in days (specifically, referenced to modified julian date)
- `M`: Graviataion parameter of the central body, expressed in units of Solar mass.
- `ω`: Argument of periastron
- `Ω`: Longitude of the ascending node, radians.
- `plx`: Distance to the system expressed in milliarcseconds of parallax.
Parameters can either be specified by position or as keyword arguments (but not a mix).

This diagram shows a circular (gray), inclined (blue), and inclined eccentric (green) orbits described using the conventions of this package.
The $x$ variable increases to the left in the plane of the sky, consistent with right ascension that increases towards the East. The $y$ coordinate increases upwards towards the North. The $z$ coordinate increases away from the observer.
The ascending node is measured CCW in the plane of the sky starting from the $y$ (North) axis.
The location of the planet along its ellipse is measured along the green ellipse starting from periastron.
The parameter $tp$ describes a date at which the planet made its closest approach to the star, and therefore sets the location of the planet at a given time.
For bound (circular or elliptical) orbits there are infinitely many equivalent `tp` values, related by $t_p\prime = t_p i P$ where $P$ is the period of the planet.
See this PDF for a detailed derivation of projected position, velocity, and acceleration from these coordinates: [Derivation.pdf](assets/orbit_coordinate_notes.pdf)
| PlanetOrbits | https://github.com/sefffal/PlanetOrbits.jl.git |
|
[
"MIT"
] | 0.10.1 | 370c3ac3c000dd3ac975681521dcfe687efc1f24 | docs | 1691 | # Converting Between Orbit Types
You can convert between several of the supported orbit types.
Examples:
```@example 1
using PlanetOrbits, Plots
# Specify a Visual{KepOrbit}
orb_vis = Visual{KepOrbit}(M=1, e=0.4, a=1, i=2, Ω=3, ω=1, tp=0, plx=10.0);
# Convert to Thiele-Innes
orb_ti = ThieleInnesOrbit(orb_vis)
# Convert back to Visual{KepOrbit}
orb_vis2 = Visual{KepOrbit}(orb_ti)
# Convert to a CartesianOrbit (specified by position and velocity)
# We have to solve the orbit at a particular time (or true anomally, mean anomally, etc)
# Then we can use that solution to construct a CartesianOrbit
orb_vis_sol = orbitsolve(orb_vis,0)
orb_cart = CartesianOrbit(orb_vis_sol; tol=1e-4) # default is 1e-8
# Solve each orbit at the same date
time = mjd("2023-01-01")
sol_vis = orbitsolve(orb_vis, time)
sol_ti = orbitsolve(orb_ti, time)
sol_vis2 = orbitsolve(orb_vis2, time)
sol_cart = orbitsolve(orb_cart, time)
plot( aspectratio=1, legend=:none,)
xlims!(-1.5,1.5)
ylims!(-1.5,1.5)
zlims!(-1.5,1.5)
plot!(sol_vis, color=1, lw=10, ms=10, kind=(:x,:y,:z))
plot!(sol_ti, color=2, lw=6, ms=6, kind=(:x,:y,:z))
plot!(sol_vis2, color=3, lw=3, ms=3, kind=(:x,:y,:z))
plot!(sol_cart, color=4, lw=1, ms=1, kind=(:x,:y,:z))
scatter!([0], [0], [0], marker=:circle, color=:white, ms=6)
```
When converting to a [`CartesianOrbit`](@ref), the `tol` parameter controls how near-equitorial and near-circular orbits are handled.
If eccentricity is below `tol`, then it is zeroed and the orbit is treated as circular (changing how `ω` is set).
If the absolute value of inclination is below `tol`, then it is zeroed and the orbit is treated as equatorial (changing how `Ω` is set). | PlanetOrbits | https://github.com/sefffal/PlanetOrbits.jl.git |
|
[
"MIT"
] | 0.10.1 | 370c3ac3c000dd3ac975681521dcfe687efc1f24 | docs | 1255 | # Getting Started
The first step to using DirectDetections.jl is to install Julia. If you're used to Python, don't worry --- Julia is easy to install, and you won't need to code anything other than changing your input data.
## Installing Julia
Visit the [julialang.org](https://julialang.org/downloads/) Downloads page, and select the latest stable version for your operating system. Currently, this is 1.7.0. Click the `[help]` links next to your operating system if you require more detailed instructions.
## Installing PlanetOrbits
Normally, Julia packages are installed from the General registry. Since PlanetOrbits isn't quite ready for prime time, it requires one extra step to add an additional registry.
1. Start julia in a terminal by running `julia`
2. Type `]` to enter package-mode (see Julia documentation for more details)
3. Type `up` to setup the General registry if this is your first time using Julia.
4. Type `registry add https://github.com/sefffal/DirectRegistry`
5. Type `add PlanetOrbits`
If you would like to visualize your results, you can also install the Plots package:
4. Type `add Plots`
This will take a little while to download all the required packages and precompile for your system.
## Plotting your first orbit | PlanetOrbits | https://github.com/sefffal/PlanetOrbits.jl.git |
|
[
"MIT"
] | 0.10.1 | 370c3ac3c000dd3ac975681521dcfe687efc1f24 | docs | 782 | # Hyperbolic Orbits
PlanetOrbits.jl has preliminary support for hyperbolic orbits.
They are currently supported with [`KepOrbit`](@ref) and [`CartesianOrbit`](@ref) but not [`ThieleInnesOrbit`](@ref).
```@example 1
using PlanetOrbits, Plots
# Specify orbit with a Campbell parameters (KepOrbit)
orb = orbit(M=1, e=1.1, a=1, i=2, Ω=3, ω=1, tp=mjd("2024-01-01"));
sol = orbitsolve(orb, mjd("2024-3-01"))
plot(sol, tspan=mjd("2024-01-01") .+ (-300,100))
```
```@example 1
using PlanetOrbits, Plots
# Specify orbit with a state vector (CartesianOrbit)
orb = orbit(
x = 1.0,
y = 0.3,
z = 0.001,
vx = 0,
vy = 9,
vz = 0.0,
M = 1,
tref = mjd("2024-01-01")
)
sol = orbitsolve(orb, mjd("2024-3-01"))
plot(sol, tspan=mjd("2024-01-01") .+ (-300,100))
``` | PlanetOrbits | https://github.com/sefffal/PlanetOrbits.jl.git |
|
[
"MIT"
] | 0.10.1 | 370c3ac3c000dd3ac975681521dcfe687efc1f24 | docs | 2015 |
# Image Warping
!!! warning
This functionality is currently not working and could use some attention in the form of pull request.
If you have an image of a system, you can warp the image as if each pixel were a test particle following Kepler's laws.
This is an easy way to see what a disk or a system of planets would look like at a time other than when it was captured.
To make this possible, PlanetOrbits.jl can create `OrbitalTransformation` objects. These follow the conventions set out
in CoordinateTransformations.jl and are compatible with ImageTransformations.jl.
Example:
```julia
using ImageTransformations, AstroImages, CoordinateTransformations
ot = OrbitalTransformation(
i = 0.3,
e = 0.0,
M = 1.0,
ω = 0.5,
Ω = 0.5,
plx = 300.0,
platescale=10.0, # mas/px
dt = 3*365.25 # days forward in time
)
img = load("input.fits")
tform_centered = ImageTransformations.recenter(ot, ImageTransformations.center(img))
img_future = warp(img_centered, ot, axes(img_centered))
# Display with AstroImages.jl
imview([img; img_future], cmap=:seaborn_icefire_gradient)
```
**Before, and After Orbital Transformation**

Note the arguments `platescale` and `dt` are required, but `a` and `τ` are not. The position of the pixel in X/Y space uniquely determines the semi-major axis and epoch of periastron passage when the rest of the orbital parameters are known. `platescale` in units of milliarseconds/pixel is necessary to get the overall scale of the transform correct. This is because an orbital transformation is **not** linear (and therefore, care must be taken when composing an OrbitalTransformation with other CoordinateTransformations). Scaling an image will change the amount of rotation that occurs at each separation. `dt` is the the amount of time in days to project the image forward. It can also be negative to project the image into the past.
| PlanetOrbits | https://github.com/sefffal/PlanetOrbits.jl.git |
|
[
"MIT"
] | 0.10.1 | 370c3ac3c000dd3ac975681521dcfe687efc1f24 | docs | 2341 |
# PlanetOrbits.jl
[](https://github.com/sefffal/PlanetOrbits.jl)
Tools for solving Keplerian orbits, especially in the context of exoplanet detection and modelling.
The functions of this package allow one to propagate two-body orbits using a variety of orbital basis sets (classic Campbell, Thiele-Innes, and Cartesian state-vectors.).
A fully featured A Plots.jl recipe is included for easily plotting different orbit properties in space or time, or against other variables.
Bound circular and elliptical orbits are fully supported. Support for hyperbolic orbits is experimental.
Among other uses, it can be used to calculates the projected positions of planets, radial velocity, and proper motion anomaly.
It is also a great tool for visualizing different orbits (see examples) and generating nice animations (e.g. with Plots or Luxor.jl).
This package has been designed for good performance and composability with a wide range of packages in the Julia ecosystem.
Automatic differentiation with ForwardDiff, Enzyme, and Zygote are supported.
A variety of Kepler solvers are provided. Arbitrary precision can be achieved by specifying orbits using Julia's built in `BigFloat` datatype and using a solver with user-specified tolerance.
To fit orbits to observations, see [Octofitter.jl](https://github.com/sefffal/Octofitter.jl).
See also [AstroImages.jl](https://github.com/JuliaAstro/AstroImages.jl).
## Attribution
If you find this package useful in your research, please cite the following [paper](https://dx.doi.org/10.3847/1538-3881/acf5cc) (open-access link).
This software package contains calculations that are adapted from various open source packages, including:
* NASA/JPL SPICE (public domain)
* keplerorbit.py by Spencer Wallace (MIT license)
* PoliaAstro (MIT license)
* Orbitize by Blunt et al. (BSD 3-Clause License)
* RadVel by Fulton et al. (MIT license)
These codes were useful references in the development of this package but are not redistributed.
### Tutorials
```@contents
Pages = ["introdcution.md", "plots.md", "image-warping.md"]
Depth = 5
```
### Documentation
```@contents
Pages = ["api.md", "conventions.md", "kepler.md"]
Depth = 5
```
```@raw html
<video src="assets/51-eri-orbit.mp4" autoplay loop width=300 height=300>
```
| PlanetOrbits | https://github.com/sefffal/PlanetOrbits.jl.git |
|
[
"MIT"
] | 0.10.1 | 370c3ac3c000dd3ac975681521dcfe687efc1f24 | docs | 8787 | # Introduction
```@setup 1
# Set a plot theme so that the plots appear nicely on dark or light documenter themes
using Plots
theme(:default;
framestyle=:box,
)
```
This package is structured around a representation of an orbit ([`PlanetOrbits.AbstractOrbit`](@ref), and a representation of a "solved" orbit ([`PlanetOrbits.AbstractOrbitSolution`](@ref)).
You start by creating an orbit with known information, e.g. the semi-major axis and eccentricity. You can then query information from this orbit, like its orbital period, mean motion, or periastron (closest approach). Then, you can "solve" the orbit one more times for a given time, eccentric anomaly, or true anomaly.
Let's see how this works.
```@example 1
using PlanetOrbits, Plots
orb = orbit(
a=1.0, # semi major axis (AU)
M=1.0, # primary mass (solar masses)
)
```
The [`orbit`](@ref) function accepts many combinations of orbital parameters and returns a subtype of [`PlanetOrbits.AbstractOrbit`](@ref).
We can now query some basic properties about the orbit:
```@example 1
period(orb) # orbital period (days)
```
```@example 1
meanmotion(orb) # Mean motion (radians/yr)
```
```@example 1
periastron(orb) # Epoch of periastron passage (MJD)
```
```@example 1
semiamplitude(orb) # radial velocity semi-amplitude (m/s)
```
We can plot the orbit (more on this in [Plotting](@ref)):
```@example 1
plot(orb)
```
And we can solve the orbit for a given orbital location
```@example 1
sol = orbitsolve_ν(orb, 0.1) # true anomaly (radians)
sol = orbitsolve_eccanom(orb, 0.1) # eccentric anomaly (radians)
sol = orbitsolve_meananom(orb, 0.1) # mean anomaly (radians)
```
When constructing an orbit, the location of the planet along its orbit can be specified by `tp`. This is the (or a) time the planet made its closest approach to the star.
```@example 1
orb = orbit(
a=1.0, # semi major axis (AU)
M=1.0, # primary mass (solar masses)
tp=mjd("2020-04-15"),
);
```
We can now meaningfully solve the location of the planet at a specific time:
```@example 1
t = mjd("2020-07-15") # date as in modified julian days.
sol = orbitsolve(orb, t) # can optionally pass `tref=...`
```
We can query specifics at this solution:
```@example 1
trueanom(sol) # true anomaly (radians)
```
```@example 1
eccanom(sol) # eccentric anomaly (radians)
```
```@example 1
plot(sol) # defaults to kind=:radvel for RadialVelocityOrbit
```
Notice that we now see a marker at the location found by [`orbitsolve`](@ref).
We can create an orbit with some eccentricity. If not specified, eccentricity and the argument or periapsis default to 0 for any orbit type.
```@example 1
orb = orbit(
a=1.0, # semi major axis (AU)
M=1.0, # primary mass (solar masses)
tp=mjd("2020-04-15"),
# New:
e=0.6, # eccentricity
ω=2.5, # argument of periapsis (radians)
)
plot(orb) # defaults to kind=:radvel for RadialVelocityOrbit
```
!!! warning "ω convention"
The convention used in this package is that ω, the argument of periapsis, refers to the **secondary** body. This is in contrast to the typical standard adopted in the radial velocity literature where ω refers to the primary. You can convert by adding or subtracting 180°.
Since we only provided very minimal information to the `orbit` function, we've been receiving a [`RadialVelocityOrbit`](@ref). This object contains sufficient information to calculate the above radial velocity plots, orbital period, etc., but not the 3D position in space.
Let's create a new orbit with a specified inclination and longitude of ascending node.
```@example 1
orb = orbit(
a=1.0, # semi major axis (AU)
M=1.0, # primary mass (solar masses)
tp=mjd("2020-04-15"),
e=0.6, # eccentricity
ω=2.5, # argument of periapsis (radians)
# New:
i=0.6, # inclination (radians)
Ω=2.3, # inclination (radians)
)
```
This time we received a full [`KepOrbit`](@ref). This has the necessary information to solve
the orbit in 2/3D.
```@example 1
plot(orb) # defaults to kind=(:x,:y) for KepOrbit
```
```@example 1
plot(orb, kind=(:x,:y,:z))
```
!!! note "Cartesian convention"
The convention used in this package is that x increases to the left (just like right-ascension), and the z increases away from the observer.
We can solve for a time or location as usual.
```@example 1
sol = orbitsolve(orb, mjd("2025-01"))
```
```@example 1
eccanom(sol) # eccentric anomaly (radians)
```
We can also query the cartesian position of the planet in AU:
```@example 1
PlanetOrbits.posx(sol)
```
```@example 1
PlanetOrbits.posy(sol)
```
```@example 1
PlanetOrbits.posy(sol)
```
```@example 1
plot(sol)
```
```@example 1
plot(sol, kind=:x)
```
We can still of course calculate the radial velocity as well.
```@example 1
radvel(sol)
```
```@example 1
plot(sol, kind=:radvel)
```
Finally, we'll specify the parallax distance to the system. This will allow us to plot orbits with angular units as they would appear in the sky from the Earth.
```@example 1
orb = orbit(
a=1.0, # semi major axis (AU)
M=1.0, # primary mass (solar masses)
tp=mjd("2020-04-15"),
e=0.6, # eccentricity
ω=2.5, # argument of periapsis (radians)
i=0.6, # inclination (radians)
Ω=2.3, # inclination (radians)
# New:
plx=100.0 # parallax distance (milliarcseconds)
)
```
```@example 1
sol = orbitsolve(orb, 0.0)
plot(sol)
```
```
posangle(sol) # position angle offset from barycentre (milliarcseconds)
```
```@example 1
projectedseparation(sol) # separation from barycentre (milliarcseconds)
```
```@example 1
raoff(sol) # right ascension offset from barycentre (milliarcseconds)
```
```@example 1
decoff(sol) # declination offset from barycentre (milliarcseconds)
```
```@example 1
raoff(sol) # right ascension offset from barycentre (milliarcseconds)
```
```@example 1
decoff(sol) # declination offset from barycentre (milliarcseconds)
```
```@example 1
pmra(sol) # instantaneous right ascension velocity from barycentre (milliarcseconds/year)
```
```@example 1
pmdec(sol) # instantaneous declination velocity from barycentre (milliarcseconds/year)
```
```@example 1
accra(sol) # instantaneous right ascension acceleration from barycentre (milliarcseconds/year^2)
```
```@example 1
accdec(sol) # instantaneous declination acceleration from barycentre (milliarcseconds/year^2)
```
## Performance
The [`orbit`](@ref) function is a convenience only for interactive use. It is inefficient since it is not type-stable. Instead, one should use one of the orbit constructors directly.
For example, instead of
```@example 1
orb = orbit(
a=1.0, # semi major axis (AU)
M=1.0, # primary mass (solar masses)
tp=mjd("2020-04-15"),
e=0.6, # eccentricity
ω=2.5, # argument of periapsis (radians)
i=0.6, # inclination (radians)
Ω=2.3, # inclination (radians)
) # Not type stable
```
Use:
```@example 1
orb = KepOrbit(
a=1.0, # semi major axis (AU)
M=1.0, # primary mass (solar masses)
tp=mjd("2020-04-15"),
e=0.6, # eccentricity
ω=2.5, # argument of periapsis (radians)
i=0.6, # inclination (radians)
Ω=2.3, # inclination (radians)
plx=100.0 # parallax distance (milliarcseconds)
) # Type stable
```
This will prevent unnecessary allocations in some cases.
## Convenience
All functions described above that apply to orbit solutions can be called directly on an orbit along with a time in days:
```@example 1
orb = orbit(
a=1.0, # semi major axis (AU)
M=1.0, # primary mass (solar masses),
)
radvel(orb, mjd("2025-01-01"))
```
If you need to calculate many different properties, e.g. both x and y position at a given time/location, it is more efficient to calculate the orbit solution a single time and query the result as needed.
## Host calculations
The above calculations treat the planet as a test particle and calculate their displacement/velocity/etc. compared to the two-particle system's barycentre. If you wish to calculate the same properties for the host object, you can additionally supply the mass of the planet.
```@example 1
orb = orbit(
a=1.0, # semi major axis (AU)
M=1.0, # primary mass (solar masses)
i=0.5,
Ω=2.5,
plx=100.0
)
sol = orbitsolve(orb, mjd("2025-01-01"))
# Secondary radial velocity
radvel(sol)
```
```@example 1
# Primary radial velocity
radvel(sol, 0.1) # 0.1 solar mass secondary
```
The following show pairs of results for the secondary and the primary:
```@example 1
PlanetOrbits.posx(sol), PlanetOrbits.posx(sol, 0.1)
```
```@example 1
radvel(sol), radvel(sol, 0.1)
```
```@example 1
raoff(sol), raoff(sol, 0.1)
```
```@example 1
accra(sol), accra(sol, 0.1)
```
```@example 1
projectedseparation(sol), projectedseparation(sol, 0.1)
```
```@example 1
posangle(sol), posangle(sol, 0.1)
```
| PlanetOrbits | https://github.com/sefffal/PlanetOrbits.jl.git |
|
[
"MIT"
] | 0.10.1 | 370c3ac3c000dd3ac975681521dcfe687efc1f24 | docs | 2936 | # Kepler Solvers
The heart of this package is being able to take a set of Keplerian elements and output relative positions, velocities, etc.
This normaly requires solving Kepler's equation numerically. This package supports a multitude of solver algorithms that can be passed to [`orbitsolve`](@ref):
* [`PlanetOrbits.Auto`](@ref)
* [`PlanetOrbits.Markley`](@ref)
* [`PlanetOrbits.Goat`](@ref)
* [`PlanetOrbits.RootsMethod`](@ref)
The last of these `RootsMethod`, allows one to substitute any algorithm from the Roots.jl package. These include many different classical and modern root finding algorithms.chosen precision, including artibrary precision BigFloats. Using big floats with, for example, `Roots.PlanetOrbits.Thukral5B` and a tight tolerenace, allows you to solve orbits up to arbitrary precision.
The default choice is `Auto`, which currently selects `Markley` for all cases. The Markley algorithm is very fast, reasonably accurate, and always converges, making it a good default choice.
The Markley algorithm is a tweaked version of the algorithm from [AstroLib.jl](http://juliaastro.github.io/AstroLib.jl/stable/ref/#AstroLib.kepler_solver). It is non-iterative and converges with less than 1e-15 relative error across the full range of e between 0 and 1.
On my laptop, this solves for a single eccentric anomaly in just 71 ns.
Since it is implemented in pure Julia, there is no overhead from calling into a C or Cython compiled function and no need for vectorization.
## Examples
```@example 1
using PlanetOrbits, BenchmarkTools
orb = orbit(a=1.2, e=0.1, M=1.0, ω=1.4, τ=0.5)
t = mjd("2025-06-23")
@benchmark orbitsolve(orb, t, PlanetOrbits.Markley())
```
```@example 1
@benchmark orbitsolve(orb, t, PlanetOrbits.Goat())
```
```@example 1
using Roots
@benchmark orbitsolve(orb, t, PlanetOrbits.RootsMethod(Roots.Newton()))
```
```@example 1
using Roots
@benchmark orbitsolve(orb, t, PlanetOrbits.RootsMethod(Roots.Thukral3B()))
```
```@example 1
@benchmark orbitsolve(orb, t, PlanetOrbits.RootsMethod(Roots.A42()))
```
```@example 1
@benchmark orbitsolve(orb, t, PlanetOrbits.RootsMethod(Roots.Bisection()))
```
```@example 1
@benchmark orbitsolve(orb, t, PlanetOrbits.RootsMethod(Roots.SuperHalley()))
```
```@example 1
@benchmark orbitsolve(orb, t, PlanetOrbits.RootsMethod(Roots.Brent()))
```
```@example 1
@benchmark orbitsolve(orb, t, PlanetOrbits.RootsMethod(Roots.Order2()))
```
```@example 1
@benchmark orbitsolve(orb, t, PlanetOrbits.RootsMethod(Roots.AlefeldPotraShi()))
```
## High precision
You can solve Kepler's equation in high precision using big floats and tightening the tolerance on the solver.
```@example 1
orb_big = orbit(a=big(1.2), e=big(0.1), M=big(1.0), ω=big(1.4), τ=big(0.5))
sol = orbitsolve(orb_big, big(t), PlanetOrbits.RootsMethod(Roots.Thukral5B(),rtol=1e-30,atol=1e-30,))
radvel(sol)
```
## Comparison

 | PlanetOrbits | https://github.com/sefffal/PlanetOrbits.jl.git |
|
[
"MIT"
] | 0.10.1 | 370c3ac3c000dd3ac975681521dcfe687efc1f24 | docs | 3048 | # Plotting
This package defines Plots.jl recipes for orbits and orbit solutions.
At its most basic, you can simply call `plot` on the object. The kind
of plot will be based on the type of object. You can pass the `kind`
argument to control what plot is generated.
Examples:
```@example 1
using PlanetOrbits, Plots
orb = orbit(
a=1.0,
M=1.0,
)
plot(orb) # defaults to kind=:radvel
```
```@example 1
orb = orbit(
a=1.0,
M=1.0,
i=0.5,
Ω=4.5
)
plot(orb) # defaults to kind=(:x,:y)
```
```@example 1
orb = orbit(
a=1.0,
M=1.0,
i=0.5,
Ω=4.5,
plx=120.0,
tp=mjd("2024-03-01")
)
plot(orb) # defaults to kind=:astrom
```
We can override:
```@example 1
plot(orb, kind=:radvel)
```
```@example 1
plot(orb, kind=(:x,:y))
```
```@example 1
plot(orb, kind=(:x,:z))
```
```@example 1
plot(orb, kind=(:x,:y,:z))
```
They all work on a given orbit solution as well:
```@example 1
sol = orbitsolve(orb, mjd("2020-01-01"))
plot(sol, kind=:radvel)
```
```@example 1
plot(sol, kind=(:x,:y))
```
```@example 1
plot(sol, kind=(:x,:z))
```
```@example 1
plot(sol, kind=(:x,:y,:z))
```
Note, in GR the position of the marker in 3D plots is incorrect. Try the `plotly()` backend instead.
The plots are generated by tracing out the orbit in equal steps of mean anomaly.
Compared to taking equal steps in time, this gives smooth lines even for very highly
eccentric orbits.
The plot recipe sets the axes to have default aspect ratios and flips the right-ascension (horizontal) axis to increasing towards the left as it does when viewed in the plane of the sky.
## Plotting multiple orbits
If you have an array of Keplerian orbits, you can plot them all in one go:
```@example 1
elements = [orbit(a=16+0.3i, i=deg2rad(i), e=0.25+0.001i, τ=0, M=1, ω=0, Ω=120, plx=35) for i in 1:1:90]
plot(elements, color=1)
```
This recipe scales down the opacity slightly so that you can see where the orbits overlap. Override by passing `alpha=1`.
## Animations
You can use the Plots.jl `@gif` and `@animate` macros to create animations using a for loop.
```@example 1
orb = orbit(a=1.2, e=0.4, M=1.0, ω=π/2, τ=0.0, i=π/4, Ω=0,plx=100)
@gif for t in range(0, period(orb),length=30)
sol = orbitsolve(orb,t)
plot(
plot(sol,kind=(:raoff,:decoff),body=(:primary,:secondary),mass=0.2,legend=false,title="astrometry"),
plot(sol,kind=(:pmra,:pmdec),body=(:primary,:secondary),mass=0.2,title="proper motion anomaly"),
plot(sol,kind=(:accra,:accdec),body=(:primary,:secondary),mass=0.2,legend=false,title="astrometric acceleration"),
plot(sol,kind=(:t,:radvel), tspan=(-300,300), body=(:primary,:secondary),mass=0.2,legend=false,title="radial velocity"),
lims=:symmetric,
framestyle=:box, titlefontsize=10,guidefontsize=7,tickfontsize=7
)
end
```
## Logo
To get more ideas for plotting, check out [this example](https://github.com/sefffal/PlanetOrbits.jl/blob/master/docs/logo.jl) which generates an animated version of the logo for this page.
 | PlanetOrbits | https://github.com/sefffal/PlanetOrbits.jl.git |
|
[
"MIT"
] | 0.10.1 | 370c3ac3c000dd3ac975681521dcfe687efc1f24 | docs | 1158 |
# Symbolic Manipulation
The Symbolics.jl package works fairly well out of the box with PlanetOrbits.jl.
You can create fully or partially symbolic [`KepOrbit`](@ref) and/or solve for orbits
at a time or true anomaly given by a symbolic `t`.
This could come in use in a few scenarios. For example, if you have an orbit with all parameters known except inclination, you could construct a set of elements with `i` as a symbolic variable.
Solving the orbit using [`orbitsolve`](@ref) would then return a solution with simplified symbolic expressions of `i` that can be evaluated very efficiently for different values.
N.B. this approach is quite messy for a symbolic `e` since Kepler's equation is trancendental.
There is some support for using the Symbolics.jl package. You can create symbolic variables and trace most of the functions defined in this package to get symbolic expressions.
This is a little slow, and I'm not sure of the applications, but it's neat that it works.
```julia
using Symbolics
@variables t
expr = radvel(elements, t);
```
This works with the VisualOrbit constructors as well if you want to create
a full symbolic set of elements. | PlanetOrbits | https://github.com/sefffal/PlanetOrbits.jl.git |
|
[
"MIT"
] | 1.0.8 | 6d4c314981c5f1df153c23b69dfa295c5286e247 | code | 7155 | """
KissSmoothing: Easily smooth your data!
exports a single function `denoise`, for further help look at it's docstring.
"""
module KissSmoothing
using FFTW: dct, idct
using Statistics: mean
using LinearAlgebra: factorize, I
using SparseArrays: sparse
"""
denoise(V::Array; factor=1.0, rtol=1e-12, dims=ndims(V), verbose = false)
smooth data in `V` according to:
`V` : data to smooth
`factor` : smoothing intensity
`rtol` : relative tolerance on how precise the smoothing intensity is determined
`dims` : array dimension being smoothed along
`verbose` : enables some printing of internal info
returns a tuple (S,N) where:
`S` : is the smoothed data
`N` : is the extracted noise
in particular `S + N` reconstructs the original data `V`.
"""
function denoise(
V::AbstractArray{Float64,N};
factor::Float64 = 1.0,
rtol::Float64 = 1e-12,
dims::Int64 = N,
verbose::Bool = false,
) where {N}
buf = IOBuffer()
K1 = sqrt(2 / pi)
K2 = sqrt(2) * K1
#K3 = sqrt(6)*K1
#K4 = sqrt(20)*K1
lV = size(V, dims)
if lV < 4
return copy(V), zero(V)
end
iV = dct(V)
stri = map(i -> ifelse(i == dims, lV, 1), 1:ndims(V))
X = map(abs2, reshape(LinRange(0.0, 1.0, lV), stri...))
d = factor * mean(abs, diff(V, dims = dims)) * (K1 / K2)
σt = 0.5
σd = 0.25
f = zero(V)
for iter = 1:60
σ = sqrt(lV) * σt / (1 - σt)
if !isfinite(σ)
break
end
f .= idct(iV .* exp.(-X .* σ))
c = mapreduce((x, y) -> abs(x - y), +, f, V) / length(V)
Δ = d - c
σt += σd * sign(Δ)
σd /= 2
if verbose
println(buf, iter, " ", σ, " ", Δ)
end
if abs(d - c) < rtol * d
break
end
end
if verbose
print(String(take!(buf)))
end
f, V .- f
end
function tps(x::AbstractArray{Float64},y::AbstractArray{Float64})
ϵ = nextfloat(0.0)
r = ϵ+mapreduce(+, x, y) do a, b
abs2(a - b)
end
r * log(r)
end
struct RBF{G<:AbstractArray{Float64},C<:AbstractArray{Float64}}
Γ::G
C::C
end
function evalPhi(xs::AbstractArray{Float64}, cp::AbstractArray{Float64})
Phi = zeros(size(xs, 1), size(cp, 1)+1+size(xs,2))
@views for i = 1:size(xs, 1)
mu = 0.0
for j = 1:size(cp, 1)
k = tps(xs[i, :], cp[j, :])
Phi[i, j] = k
mu += k
end
mu /= size(cp, 1)
for j = 1:size(cp, 1)
Phi[i, j] -= mu
end
Phi[i, size(cp, 1)+1] = 1.0
for j = 1:size(xs, 2)
Phi[i, size(cp, 1)+1+j] = xs[i,j]
end
end
Phi
end
function (net::RBF)(X::AbstractArray{Float64})
evalPhi(X, net.C) * net.Γ
end
"""
fit_rbf(xv::Array, yv::Array, cp::Array)
fit thin-plate radial basis function according to:
`xv` : array NxP, N number of training points, P number of input variables
`yv` : array NxQ, N number of training points, Q number of output variables
`cp` : array KxP, K number of control points, P number of input variables
returns a callable RBF object.
"""
function fit_rbf(
xv::AbstractArray{Float64},
yv::AbstractArray{Float64},
cp::AbstractArray{Float64},
)
RBF(evalPhi(xv, cp) \ yv, collect(cp))
end
function basis_d(x, n1, nK)
x1p = max(x - n1, zero(x))
x2p = max(x - nK, zero(x))
return ((x1p)^3 - (x2p)^3) / (nK - n1)
end
function basis_N(x, xi, k::Int)
K = length(xi)
if k<1 || k>K
error("order must be between 1 and K = length(xi)")
end
if k == K - 1
return one(x)
end
sx = (x - xi[1]) / (xi[end] - xi[1])
if k == K
return sx
end
nxi_k = (xi[k] - xi[1]) / (xi[end] - xi[1])
nxi_em1 = (xi[end-1] - xi[1]) / (xi[end] - xi[1])
return basis_d(sx, nxi_k, 1) - basis_d(sx, nxi_em1, xi[end])
end
"""
fit_nspline(xv::Vector, yv::Vector, cp::Vector)
fit natural cubic splines basis function according to:
`xv` : array N, N number of training points
`yv` : array N, N number of training points
`cp` : array K, K number of control points
returns a callable function.
"""
function fit_nspline(
x::AbstractVector{Float64},
y::AbstractVector{Float64},
xi::AbstractVector{Float64},
)
issorted(xi) || error("Knots \"xi\" must be sorted.")
N = length(x)
K = length(xi)
M = zeros(N, K)
scal = 1 / sqrt(N)
for i = 1:N, j = 1:K
M[i, j] = basis_N(x[i], xi, j) / scal
end
C = M \ (y ./ scal)
function fn(x)
s = zero(x)
for i in eachindex(C)
s += basis_N(x, xi, i) * C[i]
end
return s
end
end
"""
fit_sine_series(X::Vector,Y::Vector, basis_elements::Integer; lambda = 0.0, order=3)
fit Y ~ 1 + X + Σ sin(.) by minimising Σ (Y - f(x))^2 + lambda * ∫(Δ^order F)^2
`X` : array N, N number of training points.
`Y` : array N, N number of training points.
`basis_elements` : number of sine terms.
Keyword arguments:
`lambda` : intensity of regularization.
`order` : derivative to regularise.
returns a callable function.
"""
function fit_sine_series(X::AbstractVector{<:Real},Y::AbstractVector{<:Real}, basis_elements::Integer; lambda = 0.0, order::Int64=3)
lx, hx = extrema(X)
T = @. (X - lx)/(hx-lx)*pi
M = zeros(length(X),2+basis_elements)
for i in eachindex(X)
M[i, 1] = 1
M[i, 2] = T[i]
for k in 1:basis_elements
M[i, 2+k] = sin(k*T[i])
end
end
phi = M'M
for i=3:2+basis_elements
phi[i,i] += lambda*(i-2)^(2order)
end
C = phi \ (M'*Y)
return function fn(x)
t = (x - lx)/(hx-lx)*pi
s = C[1] + C[2]*t
for k in 1:basis_elements
s += C[2+k]*sin(k*t)
end
s
end
end
"""
lsq_denoise(S::AbstractVector{<:Real}; order::Integer=3, strength::Real = NaN)
denoise a sequence S by penalising its order-th finite differences in a least squares regression
`S` : sequence.
Keyword arguments:
`order` : finite differencing order.
`strength` : intensity of penalisation on the derivative in [0, Inf[, if unspecified it is auto-determined.
return the filtered sequence.
"""
function lsq_denoise(S::AbstractVector{<:Real}; order::Integer=3, strength::Real = NaN)
D = sparse(1.0I, length(S), length(S))
for _ in 1:order
D = diff(D, dims=1)
end
C = D'D
smoother(t) = factorize(I + (t/(1-t))*C)\S
if isfinite(strength)
return smoother(strength/(1+strength))
end
X = collect(S)
for _ in 1:order
X = diff(X)
end
noise = sum(x -> x^2/length(X), X) / binomial(2*order,order)
t = 0.5
dt = 0.25
while true
V = smoother(t)
pnoise = mapreduce((x,y) -> abs2(x-y)/length(V), +, V, S)
t += dt*sign(noise-pnoise)
dt /= 2
if abs(pnoise-noise) < 1e-5 * noise || dt < 1e-16
break
end
end
smoother(t)
end
export denoise, fit_rbf, RBF, fit_nspline, fit_sine_series, lsq_denoise
end # module
| KissSmoothing | https://github.com/francescoalemanno/KissSmoothing.jl.git |
|
[
"MIT"
] | 1.0.8 | 6d4c314981c5f1df153c23b69dfa295c5286e247 | code | 2550 | using KissSmoothing
using Test
import Random
@testset "Goodness of Smoothing" begin
rn0 = Random.MersenneTwister(1337)
for N in [100, 200, 400, 800]
for α in [0.01, 0.05, 0.1]
x = LinRange(0, 1, N)
y = identity.(x)
n = randn(rn0, length(x)) .* α
yr = y .+ n
ys, yn = denoise(yr)
c1 = sum(abs2, yr .- y)
c2 = sum(abs2, ys .- y)
@test c2 < c1
s1 = sum(abs2, n)
s2 = sum(abs2, yn)
@test abs(log2(s2 / s1)) < 1
end
end
end
@testset "Too few points" begin
O = [1.0, 2.0, 10.0]
S, N = denoise(O)
@test all(O .== S)
@test all(N .== 0)
@test length(S) == 3
@test length(N) == 3
end
@testset "Infinite smoothing" begin
O = sign.(sin.(1:1000)) .+ 1
S, N = denoise(O, factor = Inf)
@test all(S .≈ 1)
@test all(abs.(N) .≈ 1)
end
@testset "Verbose" begin
O = sign.(sin.(1:1000)) .+ 1
S, N = denoise(O, factor = 0.0, verbose = true)
@test all(abs.(S .- O) .< 1e-10)
end
@testset "Fit 1D RBF" begin
for μ in LinRange(-100,100,5)
t = LinRange(0,2pi,150)
y = sin.(t).+ μ .+ t
fn = fit_rbf(t,y,LinRange(0,2pi,50))
pred_y = fn(t)
error = sqrt(sum(abs2, pred_y .- y)/length(t))
@test error < 0.0001
end
end
@testset "Fit Sine Series" begin
for μ in LinRange(-100,100,5)
t = LinRange(0,2pi,150)
y = sin.(t) .+ μ .* t
fn = fit_sine_series(t,y,50, order = 3, lambda = 0.00001)
pred_y = fn.(t)
error = sqrt(sum(abs2, pred_y .- y)/length(t))
@test error < 0.0002
end
end
@testset "Fit NSpline" begin
for μ in LinRange(-100,100,5)
t = LinRange(0,2pi,150)
y = sin.(t).+μ
fn = fit_nspline(t,y,LinRange(0,2pi,50))
pred_y = fn.(t)
error = sqrt(sum(abs2, pred_y .- y)/length(t))
@test error < 0.0002
end
@test_throws ErrorException KissSmoothing.basis_N(Float64[],Float64[],1)
end
@testset "LSQ Denoise" begin
rn1 = Random.MersenneTwister(1337)
for μ in LinRange(-100,100,5)
t = LinRange(0,2pi,150)
y = sin.(t)
ny = y .+ randn(rn1,length(t))*0.5
errorn = sqrt(sum(abs2, ny .- y)/length(t))
sy = lsq_denoise(ny)
errors = sqrt(sum(abs2, sy .- y)/length(t))
sym = lsq_denoise(ny; strength = 3000.0)
errorsm = sqrt(sum(abs2, sym .- y)/length(t))
@test errors < errorn
@test errorsm < errorn
end
end
| KissSmoothing | https://github.com/francescoalemanno/KissSmoothing.jl.git |
|
[
"MIT"
] | 1.0.8 | 6d4c314981c5f1df153c23b69dfa295c5286e247 | docs | 5741 | # KissSmoothing.jl
This package implements a denoising procedure, a Radial Basis Function estimation procedure and a Natural Cubic Splines estimation procedure.
## Denoising
denoise(V::Array; factor=1.0, rtol=1e-12, dims=ndims(V), verbose = false)
smooth data in `V` according to:
`V` : data to smooth
`factor` : smoothing intensity
`rtol` : relative tolerance on how precise the smoothing intensity is determined
`dims` : array dimension being smoothed
`verbose` : enables some printing of internal info
returns a tuple (S,N) where:
`S` : is the smoothed data
`N` : is the extracted noise
in particular `S + N` reconstructs the original data `V`.
### Example
```julia
begin
using KissSmoothing, Statistics, LinearAlgebra
using PyPlot
figure(figsize=(5,4))
for (i,s) in enumerate(2 .^ LinRange(-1.5,1.5,4))
# generating a simple sinusoidal signal
X = LinRange(0, pi, 1000)
Y = sin.(X.^2)
# generate it's noise corrupted version
TN = std(Y).*randn(length(X))./7 .*s
raw_S = Y .+ TN
# using this package function to extract signal S and noise N
S, N = denoise(raw_S)
subplot(2, 2, i)
plot(X,raw_S, color="gray",lw=0.8, label="Y noisy")
plot(X,Y,color="red",label="Y true")
plot(X,S,color="blue", ls ="dashed",label="Y smoothed")
xlabel("X")
ylabel("Y")
i==1 && legend()
end
tight_layout()
savefig("test.png")
end
```

### Multidimensional example
```julia
using KissSmoothing, Statistics, LinearAlgebra
using PyPlot
figure(figsize=(5,4))
for (i,s) in enumerate(2 .^ LinRange(-1.5,1.5,4))
# generating a simple circle dataset
X = LinRange(0,10pi,1000)
Y = sin.(X) .+ randn(length(X))./7 .*s
Z = cos.(X) .+ randn(length(X))./7 .*s
M = [Y Z]
O = [sin.(X) cos.(X)]
# using this package function to extract signal S and noise N
S, N = denoise(M, dims=1)
subplot(2,2,i)
scatter(M[:,1],M[:,2], color="gray",s=2,label="noisy")
plot(S[:,1],S[:,2], color="red",lw=1.5,label="smoothed")
plot(O[:,1],O[:,2], color="blue",lw=1.0,label="true")
i==1 && legend()
xlabel("X")
ylabel("Y")
end
tight_layout()
savefig("test_multi.png")
```

## RBF Estimation
fit_rbf(xv::Array, yv::Array, cp::Array)
fit thin-plate radial basis function according to:
`xv` : array NxP, N number of training points, P number of input variables
`yv` : array NxQ, N number of training points, Q number of output variables
`cp` : array KxP, K number of control points, P number of input variables
returns a callable RBF object.
### Example
```julia
using PyPlot, KissSmoothing
t = LinRange(0,2pi,1000)
ty = sin.(t)
y = ty .+ randn(length(t)) .*0.05
fn = fit_rbf(t,y,LinRange(0,2pi,20))
scatter(t, y, color="gray",s=2,label="noisy")
plot(t, fn(t), color="red",lw=1.5,label="rbf estimate")
plot(t,ty, color="blue",lw=1.0,label="true")
xlabel("X")
ylabel("Y")
legend()
tight_layout()
savefig("rbf.png")
```

## NSplines Estimation
fit_nspline(xv::Vector, yv::Vector, cp::Vector)
fit natural cubic splines basis function according to:
`xv` : array N, N number of training points
`yv` : array N, N number of training points
`cp` : array K, K number of control points
returns a callable function.
### Example
```julia
using PyPlot, KissSmoothing
t = LinRange(0,pi,1000)
ty = sin.(t.^2)
y = ty .+ randn(length(t)) .*0.05
fn = fit_nspline(t,y,LinRange(0,2pi,20))
scatter(t, y, color="gray",s=2,label="noisy")
plot(t, fn.(t), color="red",lw=1.5,label="nspline estimate")
plot(t,ty, color="blue",lw=1.0,label="true")
xlabel("X")
ylabel("Y")
legend()
tight_layout()
savefig("nspline.png")
```

## Sine Series Estimation
fit_sine_series(X::Vector,Y::Vector, basis_elements::Integer; lambda = 0.0, order=3)
fit Y ~ 1 + X + Σ sin(.) by minimising Σ (Y - f(x))^2 + lambda * ∫(Δ^order F)^2
`X` : array N, N number of training points.
`Y` : array N, N number of training points.
`basis_elements` : number of sine terms.
Keyword arguments:
`lambda` : intensity of regularization.
`order` : derivative to regularise.
returns a callable function.
### Example
```julia
using PyPlot, KissSmoothing
fg(x) = sin(x^2) + x
x = collect(LinRange(0,pi,250))
xc = identity.(x)
filter!(x->(x-1)^2>0.1, x)
filter!(x->(x-2)^2>0.1, x)
y = fg.(x) .+ randn(length(x)) .*0.05
fn = fit_sine_seris(x,y,20, lambda = 0.00001)
scatter(x, y, color="gray",s=2,label="noisy")
plot(xc, fn.(xc), color="red",lw=1.,label="fit")
plot(xc,fg.(xc), color="blue",lw=0.7,label="true")
xlabel("X")
ylabel("Y")
legend()
tight_layout()
savefig("sine_fit.png")
```

## Least Squares Denoising
lsq_denoise(S::AbstractVector{<:Real}; order::Integer=3, strength::Real = NaN)
denoise a sequence S by penalising its order-th finite differences in a least squares regression
`S` : sequence.
Keyword arguments:
`order` : finite differencing order.
`strength` : intensity of penalisation on the derivative, if unspecified it is auto-determined.
return the filtered sequence.
### Example
```julia
using PyPlot, KissSmoothing
fg(x) = sin(x^2) + x
x = collect(LinRange(0,pi,150))
y = fg.(x) .+ randn(length(x)) .*0.1
plot(y, color="red",lw=1.,label="noisy seq")
plot(lsq_denoise(y,order=2), color="blue",lw=1.,label="filtered")
xlabel("X")
ylabel("Y")
legend()
tight_layout()
savefig("lsq_denoise.png")
```

| KissSmoothing | https://github.com/francescoalemanno/KissSmoothing.jl.git |
|
[
"MIT"
] | 0.1.5 | 6cc9d682755680e0f0be87c56392b7651efc2c7b | code | 6965 | __precompile__()
module Unrolled
using MacroTools
using MacroTools: prewalk, postwalk
export @unroll, @code_unrolled
export unrolled_reduce, unrolled_filter, unrolled_intersect, unrolled_setdiff,
unrolled_union, unrolled_in, unrolled_any, unrolled_all, unrolled_map,
unrolled_foreach
function unrolled_filter end
""" `type_length(::Type)` returns the length of sequences of that type (only makes sense
for sequence-like types, obviously. """
function type_length end
"""
`type_size(::Type, dim)` returns the size of an array in the specified dimension.
"""
function type_size end
include("range.jl")
const expansion_funs = Dict{Function, Function}()
macro unroll_loop(niter_type::Type, loop)
local niter
@assert(@capture(loop, for var_ in seq_ loopbody__ end),
"Internal error in @unroll_loop")
try
niter = type_length(niter_type)
catch e
# Don't unroll the loop, we can't figure out its length
if isa(e, MethodError)
return esc(loop)
else rethrow() end
end
esc(:($Unrolled.@unroll_loop($niter, for $var in $seq
$(loopbody...)
end)))
end
macro unroll_loop(niter::Int, loop)
@assert(@capture(loop, for var_ in seq_ loopbody__ end),
"Internal error in @unroll_loop")
esc(quote
$([:(let $var = $seq[$i]; $(loopbody...) end) for i in 1:niter]...)
nothing
end)
end
macro unroll_loop(loop::Expr)
@assert(@capture(loop, for var_ in 1:niter_ loopbody__ end),
"Internal error in @unroll_loop")
esc(quote $([:(let $var = $i; $(loopbody...) end) for i in 1:niter]...) end)
end
type_length(tup::Type{T}) where {T<:Union{Tuple,NamedTuple}} = length(tup.types)
# Default fall-back
type_length(typ::Type) = length(typ)
type_size(typ::Type, i) = size(typ, i)
""" `function_argument_name(arg_expr)`
Returns the name (as a symbol) of this argument, where arg_expr is whatever can
be put in a function definition's argument list (eg. `len::Int=5`) """
function_argument_name(arg_expr) = MacroTools.splitarg(arg_expr)[1]
macro unroll(fundef)
# This macro will turn the function definition into a generated function.
di = splitdef(fundef)
@assert !haskey(di, :params) "`@unroll` only supports parametric functions using `where ...` notation"
fname = di[:name]
args = di[:args]
kwargs = get(di, :kwargs, [])
body = di[:body]
arg_vars = [a===nothing ? gensym() : a for a in map(function_argument_name, args)]
kwarg_vars = map(function_argument_name, kwargs)
all_args = [arg_vars; kwarg_vars]
function seq_type(seq_var)
@assert(seq_var in all_args,
"Can only unroll a loop over one of the function's arguments")
return Expr(:($), seq_var)
end
function seq_type_length(seq_var)
@assert(seq_var in all_args,
"Can only unroll a loop over one of the function's arguments")
return Expr(:($), Expr(:call, :($Unrolled.type_length), seq_var))
end
function seq_type_size(seq_var, dim)
@assert(seq_var in all_args,
"Can only unroll a loop over one of the function's arguments")
@assert(dim isa Integer,
"Dimension argument must be an integer")
return Expr(:($), Expr(:call, :($Unrolled.type_size), seq_var, dim))
end
process(x) = x
function process(expr::Expr)
if expr.args[1]==Symbol("@unroll")
what = expr.args[3]
@match what begin
for var_ in 1:length(seq_) loopbody__ end =>
:($Unrolled.@unroll_loop(for $var in 1:$(seq_type_length(seq));
$(loopbody...) end))
for var_ in 1:size(seq_, dim_) loopbody__ end =>
:($Unrolled.@unroll_loop(for $var in 1:$(seq_type_size(seq, dim));
$(loopbody...) end))
for var_ in seq_ loopbody__ end =>
:($Unrolled.@unroll_loop($(seq_type(seq)),
for $var in $seq; $(loopbody...) end))
any_ => error("Cannot @unroll $what")
end
else
expr
end
end
# We walk over every expression in the function body, and replace the `@unroll`
# loops with macros that will perform the actual unrolling (we use intermediate macros
# for sanity)
expansion = postwalk(process, body)
exp_fun = Symbol(fname, :_unrolled_expansion_, gensym()) # gensym to support multiple methods
return esc(quote
# The expansion function (for easy calling)
Base.@__doc__ function $exp_fun($(all_args...))
$(Expr(:quote, expansion))
end
@generated function $fname($(args...); $(kwargs...)) where {$(di[:whereparams]...)}
$exp_fun($(all_args...))
end
$Unrolled.expansion_funs[$fname] = $exp_fun
end)
end
macro code_unrolled(expr)
@assert(@capture(expr, f_(args__)))
ar = gensym()
esc(quote
$ar = [$(args...)]
macroexpand(@__MODULE__, $Unrolled.expansion_funs[$f](map(typeof, $ar)...))
end)
end
################################################################################
@generated function unrolled_map(f, seq)
:(tuple($((:(f(seq[$i])) for i in 1:type_length(seq))...)))
end
@generated function unrolled_map(f::F, seq1, seq2) where F
@assert type_length(seq1) == type_length(seq2)
:(tuple($((:(f(seq1[$i], seq2[$i])) for i in 1:type_length(seq1))...)))
end
@unroll function unrolled_foreach(f, seq)
@unroll for x in seq; f(x) end
end
@generated function unrolled_reduce(f, v0, seq)
niter = type_length(seq)
expand(i) = i == 0 ? :v0 : :(f(seq[$i], $(expand(i-1))))
return expand(niter)
end
function _unrolled_filter(f, tup)
:($([Expr(:(...), :(f(tup[$i]) ? (tup[$i],) : ()))
for i in 1:type_length(tup)]...),)
end
@generated unrolled_filter(f, tup) = _unrolled_filter(f, tup)
unrolled_intersect(tup1, tup2) = unrolled_filter(x->x in tup2, tup1)
unrolled_setdiff(tup1, tup2) = unrolled_filter(!(x->x in tup2), tup1)
unrolled_union() = ()
unrolled_union(tup1) = tup1
unrolled_union(tup1, tup2) = (tup1..., unrolled_setdiff(tup2, tup1)...)
unrolled_union(tup1, tup2, tupn...) =
unrolled_reduce(unrolled_union, tup1, (tup2, tupn...))
""" `unrolled_in(obj, tup)` is like `in`. Beware that its return type is not
always known - see #21322 """
@inline @unroll function unrolled_in(obj, tup)
@unroll for x in tup
if obj == x
return true
end
end
return false
end
@unroll function unrolled_all(f, tup)
@unroll for x in tup
if !f(x)
return false
end
end
return true
end
@unroll function unrolled_any(f, tup)
@unroll for x in tup
if f(x)
return true
end
end
return false
end
end # module
| Unrolled | https://github.com/cstjean/Unrolled.jl.git |
|
[
"MIT"
] | 0.1.5 | 6cc9d682755680e0f0be87c56392b7651efc2c7b | code | 1926 | export FixedRange, FixedEnd, @fixed_range
""" `FixedRange{A, B}()` is like `UnitRange{Int64}(A, B)`, but the bounds are encoded
in the type. """
struct FixedRange{A, B} end
""" `FixedRange{2, FixedEnd()}()` behaves like a type-stable 2:end """
struct FixedEnd{N} end
FixedEnd() = FixedEnd{0}()
Unrolled.type_length(::Type{FixedRange{A, B}}) where {A, B} = B - A + 1
Base.length(fr::FixedRange{A, B}) where {A, B} = B - A + 1
Base.maximum(fr::FixedRange{A, B}) where {A, B} = B::Int
Base.minimum(fr::FixedRange{A, B}) where {A, B} = A::Int
Base.getindex(fr::FixedRange, i::Int) = minimum(fr) + i - 1
Base.iterate(fr::FixedRange, state=minimum(fr)) =
state > maximum(fr) ? nothing : (state, state+1)
Base.lastindex(fr::FixedRange) = length(fr)
replace_end(::FixedEnd{N}, ::Type{SEQ}) where {N, SEQ} = type_length(SEQ) - N
replace_end(n::Int, ::Type) = n
Base.getindex(seq::NTuple{N, Any}, fr::FixedRange{A, B}) where {N, A, B} = _getindex(seq, fr)
Base.getindex(seq::AbstractArray, fr::FixedRange{A, B}) where {A, B} = _getindex(seq, fr)
@generated _getindex(seq, ::FixedRange{A, B}) where {A, B} =
:(tuple($((:(seq[$i]) for i in replace_end(A, seq):replace_end(B, seq))...)))
""" `@fixed_range 3:10` behaves like the standard range `3:10`, but is stored within
the type system, so that `some_tuple[@fixed_range 3:10]` is type-stable. Also supports
`some_tuple[@fixed_range 3:end-5]` """
macro fixed_range(r::Expr)
process(x::Int) = x
process(x::Symbol) = x === :end ? :($Unrolled.FixedEnd()) : x
function process(x::Expr)
@assert @capture(x, en_-m_) "`@fixed_range` macro cannot handle $x"
@assert en === :end
:(FixedEnd{$m}())
end
expand(a, b) =
:($Unrolled.FixedRange{$(process(a)), $(process(b))}())
@match r begin
a_:b_ => esc(expand(a, b))
s_[a_:b_] => esc(:($s[$(expand(a, b))]))
any_ => error("Bad @fixed_range")
end
end
| Unrolled | https://github.com/cstjean/Unrolled.jl.git |
|
[
"MIT"
] | 0.1.5 | 6cc9d682755680e0f0be87c56392b7651efc2c7b | code | 3617 | using Unrolled
using Test
using StaticArrays
using MacroTools
using QuickTypes: type_parameters
# Check that we can extract argument names
@capture(:(function foo(a, b::Int, c=2; d::Int=4) end),
(function foo(args__; kwargs__) end))
@test map(Unrolled.function_argument_name, vcat(args, kwargs)) == [:a, :b, :c, :d]
@unroll function my_sum(ss)
total = zero(eltype(ss))
@unroll for x in ss
total += x
end
return total
end
@unroll function my_sum(; ss::Tuple=1) # test kwargs
total = zero(eltype(ss))
@unroll for x in ss
total += x
end
return total
end
@test my_sum((1,2,3)) == 6
@test my_sum([1,2,3]) == 6
@test my_sum(SVector(1,2,3)) == 6
@test my_sum(; ss=(1,2,3)) == 6
# Had to disable this test on 0.7. @test_throws looks broken?
# @test_throws AssertionError @eval @unroll function my_sum(ss)
# total = zero(eltype(ss))
# @unroll for x in ss[1:end-1]
# total += x
# end
# return total
# end
@unroll function _do_sum(sub_seq) # helper for my_sum_but_last
total = zero(eltype(sub_seq))
@unroll for x in sub_seq
total += x
end
return total
end
# Sum every number in seq except the last one
function my_sum_but_last(seq)
return _do_sum(seq[1:end-1])
end
@test my_sum_but_last((1,20,3)) == 21
# Check that unrolled_union & co are correct and type-stable
struct MyVal{T} end
# Otherwise it's not type-stable. Perhaps it should defined in Base for all singleton
# types.
@generated Base.in(val::MyVal, tup::Tuple) = val in type_parameters(tup)
@test (@inferred(unrolled_union((MyVal{1}(), MyVal{2}()), (MyVal{2}(), MyVal{0}()))) ==
(MyVal{1}(), MyVal{2}(), MyVal{0}()))
@test (@inferred(unrolled_intersect((MyVal{1}(), MyVal{2}()), (MyVal{2}(), MyVal{0}())))==
(MyVal{2}(),))
@test (@inferred(unrolled_setdiff((MyVal{1}(), MyVal{2}()), (MyVal{2}(), MyVal{0}()))) ==
(MyVal{1}(),))
@test (@inferred(unrolled_union((MyVal{1}(), MyVal{2}()),
(MyVal{2}(), MyVal{0}()),
(MyVal{10}(),))) ==
(MyVal{10}(), MyVal{2}(), MyVal{0}(), MyVal{1}()))
@test @inferred(unrolled_reduce((+), 0, unrolled_map(abs, (1,2,-3,7)))) == 13
const tupl = (1,2,3,4,5.0,6,7...)
@test @inferred(getindex(tupl, FixedRange{4, 5}())) == (4, 5.0)
@test @inferred(getindex(tupl, FixedRange{4, FixedEnd{1}()}())) == (4, 5.0, 6)
f(tupl) = @fixed_range(tupl[4:end-1])
@test @inferred(f(tupl)) == (4, 5.0, 6)
@unroll function summer(tup, t::Type{T}) where T
s = zero(T)
@unroll for x in tup
s += x
end
s
end
@test summer((1,2,3), Float64) === 6.0
x = [0.0]
unrolled_foreach((1,2,3, 1.0)) do y
x[1] += y
end
@test x[1] == 7.0
# Issue #6
@unroll function sim_gbm(state, sim_T, drift, vol, ts::Tuple, ::Val{log_scale}) where log_scale
log_scale && (state = log.(state))
@unroll for _ in 1:length(ts)
if log_scale
state = sim_gbm_euler_step(state, drift, vol_matrix, dt(ts))
else
state = sim_gbm_log_scale_euler_step(state, drift, vol_matrix, dt(ts))
end
end
return log_scale ? exp.(state) : state
end
# Unrolling with a sized argument
struct CartesianIndexSpace{dims} <: AbstractArray{Int, 2}; end
Base.size(cis::Type{CartesianIndexSpace{dims}}) where {dims} = dims
Base.size(cis::Type{<:CartesianIndexSpace}, i::Integer) = size(cis)[i]
Base.size(cis::CartesianIndexSpace) = size(typeof(cis))
@unroll function do_count(cis)
n = 0
@unroll for i = 1:size(cis, 2)
n += 1
end
n
end
@test do_count(CartesianIndexSpace{(1,4)}()) == 4
| Unrolled | https://github.com/cstjean/Unrolled.jl.git |
|
[
"MIT"
] | 0.1.5 | 6cc9d682755680e0f0be87c56392b7651efc2c7b | docs | 3887 | # Unrolled
[](https://travis-ci.org/cstjean/Unrolled.jl)
[](https://coveralls.io/github/cstjean/Unrolled.jl?branch=master)
[](http://codecov.io/github/cstjean/Unrolled.jl?branch=master)
Unrolled.jl provides functions to unroll loops on sequences whose length is known at
compile-time (mostly `Tuple` and [`StaticArrays`](https://github.com/JuliaArrays/StaticArrays.jl)). This can significantly improve performance and type-stability.
# The `@unroll` macro
```julia
julia> using Unrolled
julia> @unroll function my_sum(seq)
# More on why we need @unroll twice later.
total = zero(eltype(seq))
@unroll for x in seq
total += x
end
return total
end
my_sum_unrolled_expansion_ (generic function with 1 method)
julia> my_sum((1, 2, 3))
6
```
To see what code will be executed,
```julia
# Tuples are unrolled
julia> @code_unrolled my_sum((1,2,3))
quote
total = zero(eltype(seq))
begin
let x = seq[1]
total += x
end
let x = seq[2]
total += x
end
let x = seq[3]
total += x
end
end
return total
end
# But not vectors, since their length is not part of Vector{Int}
julia> @code_unrolled my_sum([1,2,3])
quote
total = zero(eltype(seq))
for x = seq
total += x
end
return total
end
```
All types for which `length` is implemented will be unrolled (this includes the fixed-size
vectors from [StaticArrays.jl](https://github.com/JuliaArrays/StaticArrays.jl) and
[FixedSizeArrays.jl](https://github.com/SimonDanisch/FixedSizeArrays.jl))
## Usage
`@unroll` works by generating (at compile-time) a separate function for each type
combination. This is why we need (at least) two `@unroll`:
- One in front of the `function` definition
- One in front of each `for` loop to be unrolled
`@unroll` can only unroll loops over the arguments of the function. For instance, this
is an error:
```julia
# Sum every number in seq except the last one
@unroll function my_sum_but_last(seq)
total = zero(eltype(seq))
@unroll for x in seq[1:end-1] # Bad!
total += x
end
return total
end
```
An easy work-around is to use a helper function
```julia
@unroll function _do_sum(sub_seq) # helper for my_sum_but_last
total = zero(eltype(sub_seq))
@unroll for x in sub_seq
total += x
end
return total
end
# Sum every number in seq except the last one
my_sum_but_last(seq) = _do_sum(seq[1:end-1])
my_sum_but_last((1,20,3)) # 21
```
As a special case, `@unroll` also supports iteration over `1:some_argument`
```julia
@unroll function foo(tup)
@unroll for x in 1:length(tup)
println(x)
end
end
foo((:a, :b, :c))
> 1
> 2
> 3
```
# Unrolled functions
Unrolled.jl also provides the following unrolled functions, defined on `Tuple`s only.
```
unrolled_map, unrolled_reduce, unrolled_in, unrolled_any, unrolled_all, unrolled_foreach
```
and
```
unrolled_filter, unrolled_intersect, unrolled_union, unrolled_setdiff
```
The functions in this second group will only perform well when the computations can be
performed entirely at compile-time (using the types). For example,
`unrolled_filter(x->isa(x, Int), some_tuple)`.
In this other example, `unrolled_filter` is compiled to a constant:
```julia
using Unrolled, Base.Test
@generated positive{N}(::Val{N}) = N > 0
@inferred unrolled_filter(positive, (Val{1}(), Val{3}(), Val{-1}(), Val{5}()))
```
# Note on `Val`
In my experience, `Val` objects are more type-stable than `Val` types. Favor
`Val{:x}()` over `Val{:x}`.
| Unrolled | https://github.com/cstjean/Unrolled.jl.git |
|
[
"MIT"
] | 0.3.3 | 90ec6d3d0e421f2ae60250e9692346c0d0fa233e | code | 2400 | """
Least-squares spectral estimation toolbox.
For help, see README.md at https://github.com/baggepinnen/LPVSpectral.jl and
[Fredrik Bagge Carlson, Anders Robertsson, Rolf Johansson: "Linear Parameter-Varying Spectral Decomposition". In: 2017 American Control Conference 2017.]
(http://lup.lub.lu.se/record/ac32368e-e199-44ff-b76a-36668ac7d595)
Available at: http://lup.lub.lu.se/record/ac32368e-e199-44ff-b76a-36668ac7d595
This module provide the functions
```
ls_spectral
tls_spectral
ls_windowpsd
ls_windowcsd
ls_cohere
ls_spectral_lpv
ls_sparse_spectral_lpv
ls_windowpsd_lpv
basis_activation_func
```
and re-exports the following from DSP.jl
```
export periodogram, welch_pgram, Windows
```
Periodogram types and SpectralExt type can be plotted using `plot(x::SpectralExt)`
"""
module LPVSpectral
using LinearAlgebra, Statistics, Printf
using DSP, FFTW, StatsBase
using RecipesBase
"""
LPV Spectral estimation result type.
See `ls_spectral_lpv` for additional help.
An object of this type can be plotted if `Plots.jl` is installed. Use regular Plots-syntax, with the additional attributes
```
normalization= :none / :sum / :max
normdim = :freq / :v # Only applies if normalization= :sum or :max
dims = 2 or 3 (default = 2)
```
Fields:
```
Y::AbstractVector
X::AbstractVector
V::AbstractVector
w
Nv
λ
coulomb::Bool
normalize::Bool
x # The estimated parameters
Σ # Covariance of the estimated parameters
```
"""
struct SpectralExt
Y::AbstractVector
X::AbstractVector
V::AbstractVector
w
Nv
λ
coulomb::Bool
normalize::Bool
x
Σ
end
include("utilities.jl")
include("windows.jl")
include("mel.jl")
include("plotting.jl")
include("lsfft.jl")
include("autocov.jl")
# Functions
export ls_spectral,
tls_spectral,
ls_sparse_spectral,
ls_windowpsd,
ls_windowcsd,
ls_cohere,
ls_spectral_lpv,
ls_sparse_spectral_lpv,
ls_windowpsd_lpv,
basis_activation_func,
SpectralExt,
psd,
detrend,
detrend!,
autocov,
autocor
export melspectrogram, mfcc, mel
# Re-export
export Windows, Windows2, Windows3, mapwindows
# ComplexNormal
export ComplexNormal
export cn_V2ΓC,cn_V2ΓC,cn_Vxx,cn_Vyy,cn_Vxy,cn_Vyx,cn_fVxx,cn_fVyy,cn_fVxy,
cn_fVyx,cn_Vs,cn_V,cn_fV,Σ,pdf,affine_transform, rand
using Requires
function __init__()
@require ProximalOperators = "a725b495-10eb-56fe-b38b-717eba820537" include("lasso.jl")
end
end # module
| LPVSpectral | https://github.com/baggepinnen/LPVSpectral.jl.git |
|
[
"MIT"
] | 0.3.3 | 90ec6d3d0e421f2ae60250e9692346c0d0fa233e | code | 7169 | function autofun(f,t::AbstractVector,h::AbstractVector{<:Vector{T}},maxlag;kwargs...) where T<:Number
res = map(zip(t,h)) do (t,h)
f(t,h,maxlag;kwargs...)
end
Tau = getindex.(res,1)
Y = getindex.(res,2)
τ = reduce(vcat, Tau)
y = reduce(vcat, Y)
perm = @views sortperm(τ)
return τ[perm],y[perm]
end
StatsBase.autocov(t::AbstractVector,h::AbstractVector{<:Vector{T}},maxlag;kwargs...) where T<:Number = autofun(autocov,t,h,maxlag;kwargs...)
StatsBase.autocor(t::AbstractVector,h::AbstractVector{<:Vector{T}},maxlag;kwargs...) where T<:Number = autofun(autocor,t,h,maxlag;kwargs...)
"""
τ, acf = autocov(t::AbstractVector, y::AbstractVector{T}, maxlag; normalize=false) where T <: Number
Calculate the autocovariance function for a signal `y` sampled at sample times `t`. This is useful if the signal is sampled non-equidistantly.
If `y` and `t` are vectors of vectors, the acf for all data is estimated and concatenated in the output, which is always two vectors of numbers.
The returned vectors are samples of the acf at lags `τ`. `τ` contains differences between entries in `t`, whereas `lags` supplied to the standard metohd of `autocov` are the differences between *indices* in `y` to compare. The result will in general be noisier that the stadard `autocov` and may require further estimation, e.g., by means of gaussian process regression or quantile regression. The following should hold `mean(acf[τ.==0]) ≈ var(y)`.
#Arguments:
- `t`: Vector of sample locations/times
- `y`: signal
- `maxlag`: Specifies the maximum difference between two values in `t` for which to calculate the ACF
- `normalize`: If false (default) the behaviour mimics the standard method from StatsBase and the ACF decreases automatically for higher lags, even if the theoretical ACF does not. If `true`, the theoretical ACF is estimated. `normalize = true` leads to sharper spectral estimates when taking the fft of the ACF. Note that the variance in the estimated ACF will be high for large lags if `true`.
"""
function StatsBase.autocov(t::AbstractVector{tT},y::AbstractVector{T},maxlag::Number; normalize=false) where {T<:Number, tT<:Number}
isequidistant(t) || return _autocov(t,y,maxlag;normalize=normalize)
ly = length(y)
length(t) == ly || throw(ArgumentError("t and y must be the same length"))
acf = zeros(T, (ly^2+ly)÷2)
τ = zeros(tT, (ly^2+ly)÷2)
k = 1
@inbounds for i in eachindex(y)
for j in 0:length(y)-1
i+j > ly && continue
τi = abs(t[i+j]-t[i])
τi > maxlag && continue
τ[k] = τi
c = dot(y, 1:ly-j, y, 1+j:ly) / (ly-normalize*(j-1)) # the -1 is important, it corresponds to `corrected=true` for `var`. The variance increases for large lags and this becomes extra important close to lag=length(y)-1
acf[k] = c
k += 1
end
end
perm = @views sortperm(τ[1:k-1])
if all(x->x==y[1], y) || var(y) < eps()
return τ[perm], zeros(T,length(perm))
end
return τ[perm],acf[perm]
end
"""
τ, acf = autocor(t::AbstractVector, y::AbstractVector{T}, maxlag; normalize=false) where T <: Number
Calculate the auto correlation function for a signal `y` sampled at sample times `t`. This is useful if the signal is sampled non-equidistantly.
If `y` and `t` are vectors of vectors, the acf for all data is estimated and concatenated in the output, which is always two vectors of numbers.
The returned vectors are samples of the acf at lags `τ`. `τ` contains differences between entries in `t`, whereas `lags` supplied to the standard metohd of `autocov` are the differences between *indices* in `y` to compare. The result will in general be noisier that the stadard `autocov` and may require further estimation, e.g., by means of gaussian process regression or quantile regression. The following should hold `mean(acf[τ.==0]) ≈ var(y)`.
#Arguments:
- `t`: Vector of sample locations/times
- `y`: signal
- `maxlag`: Specifies the maximum difference between two values in `t` for which to calculate the ACF
- `normalize`: If false (default) the behaviour mimics the standard method from StatsBase and the ACF decreases automatically for higher lags, even if the theoretical ACF does not. If `true`, the theoretical ACF is estimated. `normalize = true` leads to sharper spectral estimates when taking the fft of the ACF. Note that the variance in the estimated ACF will be high for large lags if `true`.
"""
function StatsBase.autocor(t::AbstractVector{tT},y::AbstractVector{T},maxlag::Number; normalize=false) where {T<:Number, tT<:Number}
isequidistant(t) || return _autocor(t,y,maxlag;normalize=normalize)
ly = length(y)
length(t) == ly || throw(ArgumentError("t and y must be the same length"))
acf = zeros(T, (ly^2+ly)÷2)
τ = zeros(tT, (ly^2+ly)÷2)
k = 1
dd = dot(y,y)
@inbounds for i in eachindex(y)
for j in 0:length(y)-1
i+j > ly && continue
τi = abs(t[i+j]-t[i])
τi > maxlag && continue
τ[k] = τi
c = dot(y, 1:ly-j, y, 1+j:ly) / (dd*(ly - normalize*(j-0))/ly)
acf[k] = c
k += 1
end
end
perm = @views sortperm(τ[1:k-1])
if dd < eps()
return τ[perm], ones(T,length(perm))
end
return τ[perm],acf[perm]
end
isequidistant(v::Union{<:UnitRange, <:StepRange, <:StepRangeLen}) = step(v) > 0
function isequidistant(v)
d = v[2]-v[1]
d > 0 || return false
d = abs(d)
for i in 3:length(v)
abs(abs(v[i]-v[i-1])-d) < 20d*eps() || return false
end
true
end
function _autocov(t::AbstractVector{tT},y::AbstractVector{T},maxlag::Number; normalize=false) where {T<:Number, tT<:Number}
ly = length(y)
length(t) == ly || throw(ArgumentError("t and y must be the same length"))
acf = zeros(T, (ly^2+ly)÷2)
τ = zeros(tT, (ly^2+ly)÷2)
k = 1
@inbounds for i in eachindex(y)
for j in 0:length(y)-1
i+j > ly && continue
τi = abs(t[i+j]-t[i])
τi > maxlag && continue
τ[k] = τi
c = y[i]*y[i+j]
acf[k] = c
k += 1
end
end
perm = @views sortperm(τ[1:k-1])
if all(x->x==y[1], y) || var(y) < eps()
return τ[perm], zeros(T,length(perm))
end
return τ[perm],acf[perm]
end
function _autocor(t::AbstractVector{tT},y::AbstractVector{T},maxlag::Number; normalize=false) where {T<:Number, tT<:Number}
ly = length(y)
length(t) == ly || throw(ArgumentError("t and y must be the same length"))
acf = zeros(T, (ly^2+ly)÷2)
τ = zeros(tT, (ly^2+ly)÷2)
k = 1
dd = var(y)
@inbounds for i in eachindex(y)
for j in 0:length(y)-1
i+j > ly && continue
τi = abs(t[i+j]-t[i])
τi > maxlag && continue
τ[k] = τi
c = y[i]*y[i+j]/dd
acf[k] = c
k += 1
end
end
perm = @views sortperm(τ[1:k-1])
if dd < eps()
return τ[perm], ones(T,length(perm))
end
zeroinds = τ .== 0
acf[zeroinds] .= 1
return τ[perm],acf[perm]
end
| LPVSpectral | https://github.com/baggepinnen/LPVSpectral.jl.git |
|
[
"MIT"
] | 0.3.3 | 90ec6d3d0e421f2ae60250e9692346c0d0fa233e | code | 5658 | using .ProximalOperators
"""
ls_sparse_spectral_lpv(Y,X,V,w,Nv::Int; λ = 1, coulomb = false, normalize=true)
Perform LPV spectral estimation using the method presented in
Bagge Carlson et al. "Linear Parameter-Varying Spectral Decomposition."
modified to include a sparsity-promoting L1 group-lasso penalty on the coefficients.
The groups are based on frequency, meaning a solution in which either all parameters
For a particular frequency are zero, or all are non-zero.
This is useful in the identification of frequency components among a large set of possible frequencies.
See the paper or README For additional details.
- `Y` output
- `X` sample locations
- `V` scheduling signal
- `w` frequency vector
- `Nv` number of basis functions
- `λ` Regularization parameter
- `coulomb` Assume discontinuity at `v=0` (useful For signals where, e.g., Coulomb friction might cause issues.)\n
- `normalize` Use normalized basis functions (See paper For details).
See `?ADMM` for keyword arguments to control the solver.
See also `psd`, `ls_spectral_lpv` and `ls_windowpsd_lpv`
"""
function ls_sparse_spectral_lpv(y::AbstractVector{S}, X::AbstractVector{S}, V::AbstractVector{S},
w, Nv::Integer;
λ = 1,
coulomb = false,
normalize = true,
kwargs...) where S
w = w[:]
T = length(y)
Nf = length(w)
K = basis_activation_func(V,Nv,normalize,coulomb)
M(w,X,V) = vec(vec(exp.(im.*w.*X))*K(V)')'
As = zeros(complex(S),T,ifelse(coulomb,2,1)*Nf*Nv)
for n = 1:T
As[n,:] = M(w,X[n],V[n])
end
x = zeros(S, 2size(As,2)) # Initialize with standard least squares
inds = reshape(1:length(x), Nf, :)'[:] # Permute parameters so that groups are adjacent
inds = vcat(inds...)
# x = [real.(params); imag.(params)][inds]
Φ = [real.(As) imag.(As)][:,inds]
proxf = ProximalOperators.LeastSquares(Φ,y,iterative=true)
gs = ntuple(f->NormL2(S(λ)), Nf)
indsg = ntuple(f->((f-1)*2Nv+1:f*2Nv, ) ,Nf)
proxg = SlicedSeparableSum(gs, indsg)
local x, z
try
x,z = ADMM(x, proxf, proxg; kwargs...)
catch e
if e isa InterruptException
@info "Aborting"
z = copy(x)
else
rethrow(e)
end
end
z = z[sortperm(inds)] # Sortperm is inverse of inds
params = complex.(z[1:end÷2], z[end÷2+1:end])
SpectralExt(y, X, V, w, Nv, λ, coulomb, normalize, params, nothing)
end
"""`x,f = ls_sparse_spectral(y,t,f=default_freqs(t), [window::AbstractVector]; λ=1,
proxg = ProximalOperators.NormL1(λ),
kwargs...)`
perform spectral estimation using the least-squares method with (default) a L1-norm penalty on the
Fourier coefficients, change kwarg `proxg` to e.g. `NormL0(λ)` for a different behavior or ` proxg = IndBallL0(4)` if the number of frequencies is known in advance. Promotes a sparse spectrum. See `?ADMM` for keyword arguments to control the solver.
`y` is the signal to be analyzed
`t` is the sampling points
`f` is a vector of frequencies
"""
function ls_sparse_spectral(y::AbstractArray{T},t,f=default_freqs(t);
init = false,
λ = T(1),
proxg = NormL1(λ),
kwargs...) where T
A,zerofreq = get_fourier_regressor(T.(t),T.(f))
params = init ? fourier_solve(A,y,zerofreq,λ) : fill(zero(T), length(f)) # Initialize with standard least squares
if zerofreq === nothing
x = [real.(params); imag.(params)]
else
x = [real.(params); imag.(params[2:end])]
end
proxf = ProximalOperators.LeastSquares(A,y, iterative=true)
x,z = ADMM(x, proxf, proxg; kwargs...)
params = fourier2complex(z, zerofreq)
params, f
end
function ls_sparse_spectral(y::AbstractArray{T},t,f, W;
init = false,
λ = T(1),
proxg = NormL1(T(λ)),
kwargs...) where T
A,zerofreq = get_fourier_regressor(t,f)
params = init ? fourier_solve(A,y,zerofreq,λ) : fill(zero(T), length(f)) # Initialize with standard least squares
if zerofreq === nothing
x = [real.(params); imag.(params)]
else
x = [real.(params); imag.(params[2:end])]
end
Wd = Diagonal(W)
Q = A'Wd*A
q = A'Wd*y
proxf = ProximalOperators.Quadratic(Q, q, iterative=true)
# proxf = ProximalOperators.LeastSquares(A,Wd*y, iterative=true)
x,z = ADMM(x, proxf, proxg; kwargs...)
params = fourier2complex(z, zerofreq)
params, f
end
"""
ADMM(x,proxf,proxg;
iters = 10000, # ADMM maximum number of iterations
tol = 1e-5, # ADMM tolerance
printerval = 100, # Print this often
cb(x,z) = nothing, # Callback function
μ = 0.05`) # ADMM tuning parameter. If results oscillate, lower this value.
"""
function ADMM(x::AbstractArray{T},proxf,proxg;
iters = 10000,
tol = 1e-5,
printerval = 100,
cb = nothing,
μ = T(0.05)) where T
@assert 0 ≤ μ ≤ 1 "μ should be ≤ 1"
μ = T(μ)
z = copy(x)
u = zeros(T,size(x))
tmp = similar(u)
for i = 1:iters
tmp .= z.-u
prox!(x, proxf, tmp, μ)
tmp .= x .+ u
prox!(z, proxg, tmp, μ)
tmp .= x .- z
u .+= tmp
nxz = norm(tmp)
if i % printerval == 0
@printf("%d ||x-z||₂ %.10f\n", i, nxz)
if cb != nothing
cb(x,z)
end
end
if nxz < tol
@printf("%d ||x-z||₂ %.10f\n", i, nxz)
@info("||x-z||₂ ≤ tol")
break
end
end
x,z
end
| LPVSpectral | https://github.com/baggepinnen/LPVSpectral.jl.git |
|
[
"MIT"
] | 0.3.3 | 90ec6d3d0e421f2ae60250e9692346c0d0fa233e | code | 9611 | # default_freqs(t, nw=1) = LinRange(0,0.5/mean(diff(t))-1/length(t)/2,length(t)÷2÷nw)
function default_freqs(n::Int,fs=1)
f = FFTW.rfftfreq(n,fs)
0:f.multiplier:f[end]
end
default_freqs(t::AbstractVector,fs=1/mean(diff(t))) = default_freqs(length(t),fs)
default_freqs(t::AbstractVector,n::Int) = default_freqs(t[1:n])
# function default_freqs(t, nw=1)
# fs = 1/mean(diff(t))
# N = length(t)
# l = N/2/nw
# range(0,stop=0.5fs-1/N/2, length=floor(Int,l))
# end
function check_freq(f)
zerofreq = findfirst(iszero, f)
zerofreq !== nothing && zerofreq != 1 && throw(ArgumentError("If zero frequency is included it must be the first frequency"))
zerofreq
end
function get_fourier_regressor(t::AbstractArray{T},f::AbstractArray{T}) where T
zerofreq = check_freq(f)
N = length(t)
Nf = length(f)
Nreg = zerofreq === nothing ? 2Nf : 2Nf-1
# N >= Nreg || throw(ArgumentError("Too many frequency components $Nreg > $N"))
A = zeros(T,N,Nreg)
sinoffset = Nf
π2 = T(2π)
dd = 1/(sqrt(2*length(f))) # This is to give the spectrum the same power as using DSP.periodogram
for fn=1:Nf
if fn == zerofreq
sinoffset = Nf-1
end
@inbounds for n = 1:N
phi = π2*f[fn]*t[n]
A[n,fn] = cos(phi)*dd
if fn != zerofreq
A[n,fn+sinoffset] = -sin(phi)*dd
end
end
end
A, zerofreq
end
"""`x,f = ls_spectral(y,t,f=(0:((length(y)-1)/2))/length(y); λ=0)`
perform spectral estimation using the least-squares method
`y` is the signal to be analyzed
`t` is the sampling points
`f` is a vector of frequencies
The difference between `ls_spectral` and `rfft` is `abs.(rfft) = √(2π)abs.(x)`
See also `ls_sparse_spectral` `tls_spectral`
"""
function ls_spectral(y,t,f=default_freqs(t); λ=1e-10, verbose=false)
A, zerofreq = get_fourier_regressor(t,f)
x = fourier_solve(A,y,zerofreq,λ)
verbose && @info("Condition number: $(round(cond(A'A), digits=2))\n")
x, f
end
"""`x,f = ls_spectral(y,t,f,W::AbstractVector)`
`W` is a vector of weights, same length as `y`, for weighted least-squares
"""
function ls_spectral(y,t,f,W::AbstractVector; verbose=false, λ=1e-10)
A, zerofreq = get_fourier_regressor(t,f)
Wd = Diagonal(W)
x = (A'Wd*A + λ*I)\(A'Wd)*y
verbose && @info("Condition number: $(round(cond(A'*Wd*A), digits=2))\n")
fourier2complex(x,zerofreq), f
end
"""`x,f = tls_spectral(y,t,f)`
Perform total least-squares spectral estimation using the SVD-method. See `ls_spectral` for additional help
"""
function tls_spectral(y,t,f=default_freqs(t)[1:end-1])
zerofreq = check_freq(f)
A, zerofreq = get_fourier_regressor(t,f)
AA = [A y]
# s = svd(AA, full=true)
_,_,Vt = LAPACK.gesvd!('S','S',AA)
n = size(A,2)
V21 = Vt[n+1,1:n]
V22 = Vt[n+1,n+1]
x = -V21/V22
fourier2complex(x,zerofreq), f
end
"""`S,f = ls_windowpsd(y,t,freqs; nw = 8, noverlap = -1, window_func=rect, estimator=ls_spectral, kwargs...)`
perform widowed spectral estimation using the least-squares method.
`window_func` defaults to `Windows.rect`
`estimator` is the spectral estimatio function to use, default is `ls_spectral`. For sparse estimation, try
`estimator = ls_sparse_spectral` See `ls_sparse_spectral` for more help. `kwargs` are passed to `estimator`.
See `ls_spectral` for additional help.
"""
function ls_windowpsd(y,t,freqs=nothing; nw = 8, noverlap = -1, window_func=rect, estimator=ls_spectral, kwargs...)
n = length(y)÷nw
freqs === nothing && (freqs = default_freqs(t,n))
windows = Windows2(y,t,n,noverlap,window_func)
nw = length(windows)
S = zeros(eltype(y), length(freqs))
noverlap = windows.ys.noverlap
poverlap = noverlap/n
for (yi,ti) in windows
x = estimator(yi,ti,freqs,windows.W; kwargs...)[1]
S .+= abs2.(x)
end
# @show (1+poverlap),nw
return S./nw^2, freqs
end
"""`ls_windowcsd(y,u,t,freqs; nw = 10, noverlap = -1, window_func=rect, estimator=ls_spectral, kwargs...)`
Perform windowed cross spectral density estimation using the least-squares method.
`y` and `u` are the two signals to be analyzed and `t::AbstractVector` are their sampling points
`window_func` defaults to `Windows.rect`
`estimator` is the spectral estimatio function to use, default is `ls_spectral`. For sparse estimation, try
`estimator = ls_sparse_spectral` See `ls_sparse_spectral` for more help.
See `ls_spectral` for additional help.
"""
function ls_windowcsd(y,u,t,freqs=nothing; nw = 10, noverlap = -1, window_func=rect, estimator=ls_spectral, kwargs...)
n = length(y)÷nw
freqs === nothing && (freqs = default_freqs(t,n))
S = zeros(ComplexF64,length(freqs))
windowsy = Windows2(y,t,n,noverlap,window_func)
windowsu = Windows2(u,t,n,noverlap,window_func)
nw = length(windowsy)
noverlap = windowsy.ys.noverlap
poverlap = max(noverlap,1)/n
for ((y,t), (u,_)) in zip(windowsy, windowsu)
xy = estimator(y,t,freqs,windowsy.W; kwargs...)[1]
xu = estimator(u,t,freqs,windowsu.W; kwargs...)[1]
# Cross spectrum
S += xy.*conj.(xu)
end
return S./nw, freqs
end
# function lscohere(y,u,t,freqs, nw = 10, noverlap = -1)
# Syu = lswindowcsd(y,u,t,freqs, nw, noverlap)
# Syy = lswindowpsd(y, t,freqs, nw, noverlap)
# Suu = lswindowpsd(u, t,freqs, nw, noverlap)
# Sch = (abs(Syu).^2)./(Suu.*Syy);
# end
"""`ls_cohere(y,u,t,freqs; nw = 10, noverlap = -1, estimator=ls_spectral, kwargs...)`
Perform spectral coherence estimation using the least-squares method.
`estimator` is the spectral estimatio function to use, default is `ls_spectral`. For sparse estimation, try
`estimator = ls_sparse_spectral` See `ls_sparse_spectral` for more help.
See also `ls_windowcsd` and `ls_spectral` for additional help.
"""
function ls_cohere(y,u,t,freqs=nothing; nw = 10, noverlap = -1, estimator=ls_spectral, kwargs...)
n = length(y)÷nw
freqs === nothing && (freqs = default_freqs(t,n))
Syy = zeros(length(freqs))
Suu = zeros(length(freqs))
Syu = zeros(ComplexF64,length(freqs))
windows = Windows3(y,t,u,n,noverlap,hanning)
for (y,t,u) in windows
xy = estimator(y,t,freqs,windows.W; kwargs...)[1]
xu = estimator(u,t,freqs,windows.W; kwargs...)[1]
# Cross spectrum
Syu .+= xy.*conj.(xu)
Syy .+= abs2.(xy)
Suu .+= abs2.(xu)
end
Sch = abs2.(Syu)./(Suu.*Syy)
return Sch, freqs
end
@inline _K(V,vc,gamma) = exp.(-gamma*(V.-vc).^2)
@inline function _K_norm(V,vc,gamma)
r = _K(V,vc,gamma)
r ./=sum(r)
end
@inline _Kcoulomb(V,vc,gamma) = _K(V,vc,gamma).*(sign.(V) .== sign.(vc))
@inline function _Kcoulomb_norm(V,vc,gamma)
r = _Kcoulomb(V,vc,gamma)
r ./=sum(r)
end
"""psd(se::SpectralExt)
Compute the power spectral density For a SpectralExt object
See also `ls_windowpsd_lpv`
"""
function psd(se::SpectralExt)
rp = LPVSpectral.reshape_params(copy(se.x),length(se.w))
return abs2.(sum(rp,dims=2))
end
"""
`ls_spectral_lpv(Y,X,V,w,Nv::Int; λ = 1e-8, coulomb = false, normalize=true)`
Perform LPV spectral estimation using the method presented in
Bagge Carlson et al. "Linear Parameter-Varying Spectral Decomposition."
See the paper For additional details.
`Y` output\n
`X` sample locations\n
`V` scheduling signal\n
`w` frequency vector\n
`Nv` number of basis functions\n
`λ` Regularization parameter\n
`coulomb` Assume discontinuity at `v=0` (useful for signals where, e.g., Coulomb friction might cause issues.)\n
`normalize` Use normalized basis functions (See paper for details).
The method will issue a warning If less than 90% of the variance in `Y` is described by the estimated model. If this is the case, try increasing either the number of frequencies or the number of basis functions per frequency. Alternatively, try lowering the regularization parameter `λ`.
See also `psd`, `ls_sparse_spectral_lpv` and `ls_windowpsd_lpv`
"""
function ls_spectral_lpv(Y::AbstractVector,X::AbstractVector,V::AbstractVector,w,Nv::Integer; λ = 1e-8, coulomb = false, normalize=true)
w = w[:]
N = length(Y)
Nf = length(w)
K = basis_activation_func(V,Nv,normalize,coulomb)
M(w,X,V) = vec(vec(exp.(im*w.*X))*K(V)')'
A = zeros(ComplexF64,N, ifelse(coulomb,2,1)*Nf*Nv)
for n = 1:N
A[n,:] = M(w,X[n],V[n])
end
params = real_complex_bs(A,Y,λ)
real_params = [real.(params); imag.(params)]
AA = [real.(A) imag.(A)]
e = AA*real_params-Y
Σ = var(e)*inv(AA'AA + λ*I)
fva = 1-var(e)/var(Y)
fva < 0.9 && @warn("Fraction of variance explained = $(fva)")
SpectralExt(Y, X, V, w, Nv, λ, coulomb, normalize, params, Σ)
end
"""ls_windowpsd_lpv(Y::AbstractVector,X::AbstractVector,V::AbstractVector,w,Nv::Integer, nw::Int=10, noverlap=0; kwargs...)
Perform windowed psd estimation using the LPV method. A rectangular window is always used.
See `?ls_spectral_lpv` for additional help.
"""
function ls_windowpsd_lpv(Y::AbstractVector,X::AbstractVector,V::AbstractVector,w,Nv::Integer, nw::Int=10, noverlap=0; kwargs...)
S = zeros(length(w))
windows = Windows3(Y,X,V,length(Y)÷nw,noverlap,rect) # ones produces a rectangular window
for (y,x,v) in windows
x = ls_spectral_lpv(y,x,v,w,Nv; kwargs...)
rp = reshape_params(x.x,length(w))
S += abs2.(sum(rp,dims=2))
end
return S
end
| LPVSpectral | https://github.com/baggepinnen/LPVSpectral.jl.git |
|
[
"MIT"
] | 0.3.3 | 90ec6d3d0e421f2ae60250e9692346c0d0fa233e | code | 4799 | fft_frequencies(fs::Real, nfft::Int) = LinRange(0f0, fs / 2f0, (nfft >> 1) + 1)
"""
MelSpectrogram{T, F, Ti} <: DSP.Periodograms.TFR{T}
# Arguments:
- `power::Matrix{T}`
- `mels::F`
- `time::Ti`
"""
struct MelSpectrogram{T, F,Ti} <: DSP.Periodograms.TFR{T}
power::Matrix{T}
mels::F
time::Ti
end
DSP.freq(tfr::MelSpectrogram) = tfr.mels
Base.time(tfr::MelSpectrogram) = tfr.time
"""
MFCC{T, F, Ti} <: DSP.Periodograms.TFR{T}
# Arguments:
- `mfcc::Matrix{T}`
- `number::F`
- `time::Ti`
"""
struct MFCC{T, F, Ti} <: DSP.Periodograms.TFR{T}
mfcc::Matrix{T}
number::F
time::Ti
end
DSP.freq(tfr::MFCC) = tfr.number
Base.time(tfr::MFCC) = tfr.time
function hz_to_mel(frequencies)
f_min = 0f0
f_sp = 200f0 / 3
mels = collect((frequencies .- f_min) ./ f_sp)
min_log_hz = 1000f0
min_log_mel = (min_log_hz .- f_min) ./ f_sp
logstep = log(6.4f0) / 27f0
@inbounds for i = 1:length(mels)
if frequencies[i] >= min_log_hz
mels[i] = min_log_mel + log(frequencies[i] / min_log_hz) / logstep
end
end
mels
end
function mel_to_hz(mels)
f_min = 0f0
f_sp = 200f0 / 3
frequencies = collect(f_min .+ f_sp .* mels)
min_log_hz = 1000f0
min_log_mel = (min_log_hz .- f_min) ./ f_sp
logstep = log(6.4f0) / 27f0
@inbounds for i = 1:length(frequencies)
if mels[i] >= min_log_mel
frequencies[i] = min_log_hz * exp(logstep * (mels[i] - min_log_mel))
end
end
frequencies
end
function mel_frequencies(nmels::Int = 128, fmin::Real = 0.0f0, fmax::Real = 11025f0)
min_mel = hz_to_mel(fmin)[1]
max_mel = hz_to_mel(fmax)[1]
mels = LinRange(min_mel, max_mel, nmels)
mel_to_hz(mels)
end
"""
M = mel(fs::Real, nfft::Int; nmels::Int = 128, fmin::Real = 0f0, fmax::Real = fs/2f0)
Returns a Mel matrix `M` such that `M*f` is a mel spectrogram if `f` is a vector of spectrogram powers, e.g.,
```julia
M*abs2.(rfft(sound))
```
"""
function mel(fs::Real, nfft::Int; nmels::Int = 128, fmin::Real = 0f0, fmax::Real = fs/2f0)
weights = zeros(Float32, nmels, (nfft >> 1) + 1)
fftfreqs = fft_frequencies(fs, nfft)
melfreqs = mel_frequencies(nmels + 2, fmin, fmax)
enorm = 2f0 ./ (melfreqs[3:end] - melfreqs[1:nmels])
for i in 1:nmels
lower = (fftfreqs .- melfreqs[i]) ./ (melfreqs[i+1] - melfreqs[i])
upper = (melfreqs[i+2] .- fftfreqs) ./ (melfreqs[i+2] - melfreqs[i+1])
weights[i, :] = max.(0, min.(lower, upper)) * enorm[i]
end
weights
end
"""
melspectrogram(s, n=div(length(s), 8), args...; fs=1, nmels::Int=128, fmin::Real=0.0f0, fmax::Real=fs / 2.0f0, window=hanning, kwargs...)
DOCSTRING
#Arguments:
- `s`: signal
- `n`: number of points in each window
- `args`: are sent to `spectrogram`
- `fs`: sample frequency
- `nmels`: number of mel frequencies
- `fmin`: minimum freq
- `fmax`: maximum freq
- `window`: window function, defaults to hanning
- `kwargs`: are sent to `spectrogram`
"""
function melspectrogram(S::DSP.Periodograms.Spectrogram; fs=1, nmels::Int = 128, fmin::Real = 0f0, fmax::Real = fs / 2f0)
n = 2size(S.power,1)-1
data = mel(fs, n; nmels=nmels, fmin=fmin, fmax=fmax) * S.power
nframes = size(data, 2)
MelSpectrogram(data, LinRange(hz_to_mel(fmin)[1], hz_to_mel(fmax)[1], nmels), S.time)
end
function melspectrogram(s, n=div(length(s), 8), args...; fs=1, nmels::Int = 128, fmin::Real = 0f0, fmax::Real = fs / 2f0, window=hanning, kwargs...)
S = DSP.spectrogram(s, n, args...; fs=fs, window=window, kwargs...)
melspectrogram(S,fs=fs,nmels=nmels,fmin=fmin,fmax=fmax)
end
"""
mfcc(s, args...; nmfcc::Int=20, nmels::Int=128, window=hanning, kwargs...)
Compute the [Mel-frequency cepstral coefficients (MFCCs)](https://en.wikipedia.org/wiki/Mel-frequency_cepstrum)
# Arguments:
- `s`: signal
- `args`: are sent to `spectrogram`
- `nmfcc`: number of coeffs
- `nmels`: number of mel frequencies
- `window`: window function, defaults to hanning
- `kwargs`: are sent to `spectrogram`
"""
function mfcc(s, args...; nmfcc::Int = 20, nmels::Int = 128, window=hanning, kwargs...)
if nmfcc >= nmels
error("number of mfcc components should be less than the number of mel frequency bins")
end
M = melspectrogram(s, args...; nmels=nmels, window=window, kwargs...)
mfcc = dct_matrix(nmfcc, nmels) * power(M)
for frame in 1:size(mfcc, 2)
mfcc[:, frame] /= norm(mfcc[:, frame])
end
MFCC(mfcc, 1:nmfcc, time(M))
end
"""returns the DCT filters"""
function dct_matrix(nfilters::Int, ninput::Int)
basis = Array{Float32}(undef, nfilters, ninput)
samples = (1f0:2f0:2ninput) * π / 2ninput
for i = 1:nfilters
basis[i, :] = cos.(i * samples)
end
basis *= sqrt(2f0/ninput)
basis
end
| LPVSpectral | https://github.com/baggepinnen/LPVSpectral.jl.git |
|
[
"MIT"
] | 0.3.3 | 90ec6d3d0e421f2ae60250e9692346c0d0fa233e | code | 4861 | function meshgrid(a,b)
grid_a = [i for i in a, j in b]
grid_b = [j for i in a, j in b]
grid_a, grid_b
end
@recipe function plot_periodogram(p::DSP.Periodograms.TFR)
title --> "Periodogram"
yscale --> :log10
xguide --> "Frequency"
p.freq, p.power
end
@recipe function plot_spectrogram(p::DSP.Periodograms.Spectrogram; compression=(0.005,1))
seriestype := :heatmap
title --> "Spectrogram"
yscale --> :log10
yguide --> "Frequency"
xguide --> "Time [s]"
p.time, p.freq[2:end], compress(log.(p.power)[2:end,:], compression)
end
@recipe function mel(h::MelSpectrogram; compression=(0.005,1))
seriestype := :heatmap
xguide --> "Time [s]"
yguide --> "Frequency [Hz]"
# yticks --> yr
# xticks --> xr
title --> "Mel Spectrogram"
yscale := :log10
freqs = (mel_to_hz(h.mels)[2:end])
h.time, freqs, compress(log.(h.power)[2:end,:], compression)
end
function compress(x, q)
if q isa Number
q = q < 0.5 ? q : 1-q
q = (q, 1-q)
else
q = (min(q...), max(q...))
end
th = quantile(vec(x),q)
clamp.(x,th...)
end
# @userplot SchedFunc
@recipe function plot_schedfunc(se::SpectralExt; normalization=:none, normdim=:freq, dims=2, bounds=true, nMC = 5_000, phase = false, mcmean=false)
xi,V,X,w,Nv,coulomb,normalize = se.x,se.V,se.X,se.w,se.Nv,se.coulomb,se.normalize
Nf = length(w)
x = reshape_params(xi,Nf)
ax = abs.(x)
px = angle.(x)
K = basis_activation_func(V,Nv,normalize,coulomb)
fg,vg = meshgrid(w,LinRange(minimum(V),maximum(V),Nf == 100 ? 101 : 100)) # to guarantee that the broadcast below always works
F = zeros(size(fg))
FB = zeros(size(fg)...,nMC)
P = zeros(size(fg))
PB = zeros(size(fg)...,nMC)
bounds = bounds && se.Σ != nothing
if bounds
cn = ComplexNormal(se.x,se.Σ)
zi = LPVSpectral.rand(cn,nMC) # Draw several random parameters from the posterior distribution
end
for j = 1:size(fg,1)
for i = 1:size(vg,2)
ϕ = K(vg[j,i]) # Kernel activation vector
F[j,i] = abs(dot(x[j,:],ϕ))
P[j,i] = angle(dot(x[j,:],ϕ))
if bounds
for iMC = 1:nMC
zii = zi[iMC,j:Nf:end][:]
FB[j,i,iMC] = abs(dot(zii,ϕ))
if phase
PB[j,i,iMC] = angle(dot(zii,ϕ))
end
end
end
end
end
FB = sort(FB,dims=3)
lim = 10
FBl = FB[:,:,nMC ÷ lim]
FBu = FB[:,:,nMC - (nMC ÷ lim)]
FBm = dropdims(mean(FB,dims=3),dims=3)
PB = sort(PB,dims=3)
PBl = PB[:,:,nMC ÷ lim]
PBu = PB[:,:,nMC - (nMC ÷ lim)]
PBm = dropdims(mean(PB,dims=3),dims=3)
nd = normdim == :freq ? 1 : 2
normalizer = 1.
if normalization == :sum
normalizer = sum(F, dims=nd)/size(F,nd)
elseif normalization == :max
normalizer = maximum(F, dims=nd)
end
F = F./normalizer
delete!(plotattributes, :normalization)
delete!(plotattributes, :normdim)
if dims == 3
delete!(plotattributes, :dims)
yguide --> "\$v\$"
xguide --> "\$\\omega\$"
# zguide := "\$f(v)\$"
for i = 1:Nf
@series begin
seriestype := path3d
fg[i,:],vg[i,:],F[i,:]
end
end
else
for i = 1:Nf
xguide --> "\$v\$"
yguide --> "\$A(v)\$"
title --> "Estimated functional dependece \$A(v)\$\n"# Normalization: $normalization, along dim $normdim")#, zlabel="\$f(v)\$")
@series begin
label --> "ω = $(round(fg[i,1]/pi,sigdigits=1))π"
m = mcmean && bounds ? FBm[i,:] : F[i,:]
if bounds
# fillrange := FBu[i,:]
ribbon := (-FBl[i,:] .+ m, FBu[i,:] .- m)
end
vg[i,:],m
end
end
if phase
for i = 1:Nf
xguide --> "\$v\$"
yguide --> "\$\\phi(v)\$"
linestyle := :dashdot
@series begin
label --> "\$\\phi\$"
fillalpha := 0.1
pi = P[i,:]
if bounds
ribbon := (-PBl[i,:] .+ pi, PBu[i,:] .- pi)
end
vg[i,:],pi
end
end
end
end
delete!(plotattributes, :phase)
delete!(plotattributes, :bounds)
delete!(plotattributes, :nMC)
delete!(plotattributes, :mcmean)
nothing
end
@recipe function plot_spectralext(::Type{Val{:spectralext}}, x, y, z)
xi,w = y.x, y.w
title --> "Spectrum"
Nf = length(w)
x = reshape_params(xi,Nf)
ax = abs2.(x)
px = angle.(x)
ax
end
| LPVSpectral | https://github.com/baggepinnen/LPVSpectral.jl.git |
|
[
"MIT"
] | 0.3.3 | 90ec6d3d0e421f2ae60250e9692346c0d0fa233e | code | 5864 | function detrend!(x::Vector, order=0, t = 1:length(x))
x[:] .-= mean(x)
if order == 1
k = x\t
x[:] .-= k*t
end
x
end
"""detrend(x, order=0, t = 1:length(x))
Removes the trend of order `order`, i.e, mean and, if order=1, the slope of the signal `x`
If `order` = 1, then the sampling points of `x` can be supplied as `t` (default: `t = 1:length(x)`)
"""
function detrend(x,args...)
y = copy(x)
detrend!(y,args...)
end
"""basis_activation_func(V,Nv,normalize,coulomb)
Returns a func ϕ(v) ∈ ℜ(Nv) that calculates the activation of `Nv` basis functions spread out to cover V nicely. If coulomb is true, then we get twice the number of basis functions, 2Nv
"""
function basis_activation_func(V,Nv,normalize,coulomb)
if coulomb # If Coulomb setting is activated, double the number of basis functions and clip the activation at zero velocity (useful for data that exhibits a discontinuity at v=0, like coulomb friction)
vc = range(0, stop=maximum(abs.(V)), length=Nv+2)
vc = vc[2:end-1]
vc = [-vc[end:-1:1]; vc]
Nv = 2Nv
gamma = Nv/(abs(vc[1]-vc[end]))
K = normalize ? V -> _Kcoulomb_norm(V,vc,gamma) : V -> _Kcoulomb(V,vc,gamma) # Use coulomb basis function instead
else
vc = range(minimum(V), stop=maximum(V), length=Nv)
gamma = Nv/(abs(vc[1]-vc[end]))
K = normalize ? V -> _K_norm(V,vc,gamma) : V -> _K(V,vc,gamma)
end
end
"""`ridgereg(A,b,λ)`
Accepts `λ` to solve the ridge regression problem using the formulation `[A;λI]\\[b;0]. λ should be given with the same dimension as the columns of A, i.e. if λ represents an inverse standard deviation, then 1/λ = σ, not 1/λ = σ²`"""
function ridgereg(A,b,λ)
n = size(A,2)
[A; λ*I]\[b;zeros(n)]
end
"""`real_complex_bs(A,b, λ=0)`
Replaces the backslash operator For complex arguments. Expands the A-matrix into `[real(A) imag(A)]` and performs the computation using real arithmetics. Optionally accepts `λ` to solve the ridge regression problem using the formulation `[A;λI]\\[b;0]. λ should be given with the same dimension as the columns of A, i.e. if λ represents a standard deviation, then λ = σ, not λ = σ²`
"""
function real_complex_bs(A,b, λ=0)
T,n = size(A)
Ar = [real(A) imag(A)]
xr = λ > 0 ? [Ar; λ*I]\[b;zeros(2n)] : Ar\b
x = complex.(xr[1:n], xr[n+1:end])
end
function fourier_solve(A,y,zerofreq,λ=0)
n = size(A,2)
x = λ > 0 ? svd([A; λ*I])\[y;zeros(n)] : svd(A)\y
fourier2complex(x,zerofreq)
end
function fourier2complex(x,zerofreq)
n = length(x)÷2
if zerofreq === nothing
return complex.(x[1:n], x[n+1:end])
else
x0 = x[zerofreq]
x = deleteat!(copy(x), zerofreq)
x = complex.(x[1:n], x[n+1:end])
insert!(x,zerofreq,x0)
return x
end
end
""" Returns params as a [nω × N] matrix"""
reshape_params(x,Nf) = reshape(x, Nf,round(Int,length(x)/Nf))
## Complex Normal
import Base.rand
mutable struct ComplexNormal{T<:Complex}
m::AbstractVector{T}
Γ::Cholesky
C::Symmetric{T}
end
function ComplexNormal(X::AbstractVecOrMat,Y::AbstractVecOrMat)
@assert size(X) == size(Y)
mc = complex.(mean(X,dims=1)[:], mean(Y,dims=1)[:])
V = Symmetric(cov([X Y]))
Γ,C = cn_V2ΓC(V)
ComplexNormal(mc,Γ,C)
end
function ComplexNormal(X::AbstractVecOrMat{T}) where T<:Complex
ComplexNormal(real.(X),imag.(X))
end
function ComplexNormal(m::AbstractVector{T},V::AbstractMatrix{T}) where T<:Real
n = Int(length(m)/2)
mc = complex.(m[1:n], m[n+1:end])
Γ,C = cn_V2ΓC(V)
ComplexNormal(mc,Γ,C)
end
function ComplexNormal(mc::AbstractVector{Tc},V::AbstractMatrix{Tr}) where {Tr<:Real, Tc<:Complex}
Γ,C = cn_V2ΓC(V)
ComplexNormal(mc,Γ,C)
end
function cn_V2ΓC(V::Symmetric{T}) where T<:Real
n = size(V,1)÷2
Vxx = V[1:n,1:n]
Vyy = V[n+1:end,n+1:end]
Vxy = V[1:n,n+1:end]
Vyx = V[n+1:end,1:n]
Γ = cholesky(complex.(Vxx + Vyy, Vyx - Vxy))
C = Symmetric(complex.(Vxx - Vyy, Vyx + Vxy))
Γ,C
end
cn_V2ΓC(V::AbstractMatrix{T}) where {T<:Real} = cn_V2ΓC(Symmetric(V))
@inline cn_Vxx(Γ,C) = cholesky(real.(Matrix(Γ)+C)/2)
@inline cn_Vyy(Γ,C) = cholesky(real.(Matrix(Γ)-C)/2)
@inline cn_Vxy(Γ,C) = cholesky(imag.(-Matrix(Γ)+C)/2)
@inline cn_Vyx(Γ,C) = cholesky(imag.(Matrix(Γ)+C)/2)
@inline cn_fVxx(Γ,C) = real.(Matrix(Γ)+C)/2
@inline cn_fVyy(Γ,C) = real.(Matrix(Γ)-C)/2
@inline cn_fVxy(Γ,C) = imag.(-Matrix(Γ)+C)/2
@inline cn_fVyx(Γ,C) = imag.(Matrix(Γ)+C)/2
@inline cn_Vs(Γ,C) = cn_Vxx(Γ,C),cn_Vyy(Γ,C),cn_Vxy(Γ,C),cn_Vyx(Γ,C)
@inline cn_fV(Γ,C) = [cn_fVxx(Γ,C) cn_fVxy(Γ,C); cn_fVyx(Γ,C) cn_fVyy(Γ,C)]
@inline cn_V(Γ,C) = cholesky(Hermitian(cn_fV(Γ,C)))
@inline Σ(cn::ComplexNormal) = Matrix(cn.Γ) # TODO: check this
for f in [:cn_Vxx,:cn_Vyy,:cn_Vxy,:cn_Vyx,:cn_fVxx,:cn_fVyy,:cn_fVxy,:cn_fVyx,:cn_Vs,:cn_V,:cn_fV]
@eval ($f)(cn::ComplexNormal) = ($f)(cn.Γ,cn.C)
end
"""
`f(cn::ComplexNormal, z)`
Probability density Function `f(z)` for a complex normal distribution.
This can probably be more efficiently implemented
"""
function pdf(cn::ComplexNormal, z)
k = length(cn.m)
R = conj(cn.C)'*inv(cn.Γ)
P = Matrix(cn.Γ)-R*cn.C # conj(Γ) = Γ for Γ::Cholesky
cm = conj(cn.m)
cz = conj(z)
zmm = z-cn.m
czmm = cz-cm
ld = [czmm' zmm']
rd = [zmm; czmm]
S = [Matrix(cn.Γ) cn.C;conj(cn.C) Matrix(cn.Γ)] # conj(Γ) = Γ for Γ::Cholesky
1/(π^k*sqrt(det(cn.Γ)*det(P))) * exp(-0.5* ld*(S\rd))
end
affine_transform(cn::ComplexNormal, A,b) = ComplexNormal(A*cn.m+b, cholesky(Hermitian(A*Matrix(cn.Γ)*conj(A'))), Symmetric(A*cn.C*A'))
function rand(cn::ComplexNormal,s::Integer)
L = cn_V(cn).U
m = [real(cn.m); imag(cn.m)]
n = length(cn.m)
z = (m' .+ randn(s,2n)*L)
return complex.(z[:,1:n],z[:,n+1:end])
end
| LPVSpectral | https://github.com/baggepinnen/LPVSpectral.jl.git |
|
[
"MIT"
] | 0.3.3 | 90ec6d3d0e421f2ae60250e9692346c0d0fa233e | code | 3244 | abstract type AbstractWindows end
Base.length(w::AbstractWindows) = length(w.ys);
Base.collect(W::AbstractWindows) = [copy.(w) for w in W]
# Window2 ==========================================================
struct Windows2 <: AbstractWindows
y::AbstractVector
t::AbstractVector
ys
ts
W
"""
Windows2(y, t, n=length(y)÷8, noverlap=-1, window_func=rect)
noverlap = -1 sets the noverlap to n/2
Iterating `w::Windows2` produces `(y,t)` of specified length. The window is *not* applied to the signal, instead the window array is available as `w.W`.
# Arguments:
- `y`: Signal
- `t`: Time vector
- `n`: Number of datapoints per window
- `noverlap`: Overlap between windows
- `window_func`: Function to create a vector of weights, e.g., `DSP.hanning, DSP.rect` etc.
"""
function Windows2(y::AbstractArray{T},t,n::Int=length(y)>>3, noverlap::Int=n>>1,window_func=rect) where T
noverlap < 0 && (noverlap = n>>1)
N = length(y)
@assert N == length(t) "y and t has to be the same length"
W = T.(window_func(n))
ys = arraysplit(y,n,noverlap)
ts = arraysplit(t,n,noverlap)
new(y,t,ys,ts,W)
end
end
function Base.iterate(w::Windows2, state=1)
state > length(w) && return nothing
((w.ys[state],w.ts[state]), state+1)
end
"""
mapwindows(f, W::AbstractWindows)
Apply a Function `f` over all windows represented by `W::Windows2`.
`f` must take `(y,t)->ŷ` where `y` and `ŷ` have the same length.
"""
function mapwindows(f::Function, W::AbstractWindows)
res = map(f,W)
merge(res,W)
end
mapwindows(f::Function, args...) = mapwindows(f, Windows2(args...))
function Base.merge(yf::AbstractVector{<:AbstractVector},w::Windows2)
ym = zeros(eltype(yf[1]), length(w.y))
counts = zeros(Int, length(w.y))
dpw = length(w.ys[1])
inds = 1:dpw
for i in eachindex(w.ys)
ym[inds] .+= yf[i]
counts[inds] .+= 1
inds = inds .+ (dpw-w.ys.noverlap)
inds = inds[1]:min(inds[end], length(ym))
end
ym ./= max.(counts,1)
end
# Window3 ==========================================================
"""
Windows3(y, t, v, n=length(y)÷8, noverlap=-1, window_func=rect)
noverlap = -1 sets the noverlap to n/2
#Arguments:
- `y`: Signal
- `t`: Time vector
- `v`: Auxiliary vector
- `n`: Number of datapoints per window
- `noverlap`: Overlap between windows
- `window_func`: Function to apply over window
"""
struct Windows3 <: AbstractWindows
y::AbstractVector
t::AbstractVector
v::AbstractVector
ys
ts
vs
W
function Windows3(y::AbstractVector,t::AbstractVector,v::AbstractVector,n::Int=length(y)>>3, noverlap::Int=n>>1,window_func::Function=rect)
N = length(y)
@assert N == length(t) == length(v) "y, t and v has to be the same length"
noverlap < 0 && (noverlap = n>>1)
W = window_func(n)
ys = arraysplit(y,n,noverlap)
ts = arraysplit(t,n,noverlap)
vs = arraysplit(v,n,noverlap)
new(y,t,v,ys,ts,vs,W)
end
end
function Base.iterate(w::Windows3, state=1)
state > length(w.ys) && return nothing
((w.ys[state],w.ts[state],w.vs[state]), state+1)
end
| LPVSpectral | https://github.com/baggepinnen/LPVSpectral.jl.git |
|
[
"MIT"
] | 0.3.3 | 90ec6d3d0e421f2ae60250e9692346c0d0fa233e | code | 10071 | using LPVSpectral
using Test, LinearAlgebra, Statistics, Random, StatsBase, ProximalOperators
using Plots, DSP
Random.seed!(0)
# write your own tests here
function generate_signal(f,w,N, modphase=false)
x = sort(10rand(N)) # Sample points
v = range(0, stop=1, length=N) # Scheduling variable
# generate output signal
# phase_matrix
dependence_matrix = Float64[f[(i-1)%length(f)+1].(v) for v in v, i in eachindex(w)] # N x nw
frequency_matrix = [cos(w*x -0.5modphase*(dependence_matrix[i,j])) for (i,x) in enumerate(x), (j,w) in enumerate(w)] # N x nw
y = sum(dependence_matrix.*frequency_matrix,dims=2)[:] # Sum over all frequencies
y += 0.1randn(size(y))
y,v,x,frequency_matrix, dependence_matrix
end
@testset "LPVSpectral" begin
@info "Testing LPVSpectral"
@testset "Windows" begin
@info "Testing Windows"
y = 1:100
t = 1:100
W = Windows2(y,t,10,0)
@test length(W) == 10
@test first(W) == (1:10,1:10)
res = mapwindows(W) do (y,t)
-y
end
@test res == -1:-1:-100
W = Windows2(y,t,10,1)
@test length(W) == 11
cW = collect(W)
@test cW[1] == (1:10,1:10)
@test cW[2] == (10:19,10:19)
res = mapwindows(W) do (y,t)
-y
end
@test res == -1:-1:-100
y = 1:100
t = 1:100
W = Windows3(y,t,t,10,0)
@test length(W) == 10
@test first(W) == (1:10,1:10,1:10)
W = Windows3(y,t,t,10,1)
@test length(W) == 11
cW = collect(W)
@test cW[1] == (1:10,1:10,1:10)
@test cW[2] == (10:19,10:19,10:19)
end
@testset "Mel" begin
@info "Testing Mel"
M = mel(1,256)
@test size(M,1) == 128
@test size(M,2) == 256÷2+1
M = mel(1000,256, fmin=100)
sum(M[:,1:26]) == 0
y = randn(1000)
M = melspectrogram(y)
@test length(freq(M)) == 128
@test size(M.power) == (128,14)
@test length(time(M)) == 14
plot(M)
M = mfcc(y)
@test length(freq(M)) == 20
@test size(M.mfcc) == (20,14)
@test length(time(M)) == 14
end
@testset "LPV methods" begin
@info "testing LPV methods"
N = 500 # Number of training data points
f = [v->2v^2, v->2/(5v+1), v->3exp(-10*(v-0.5)^2),] # Functional dependences on the scheduling variable
w = 2pi*[2,10,20] # Frequency vector
w_test = 2π*collect(2:2:25)
Y,V,X,frequency_matrix, dependence_matrix = generate_signal(f,w,N,true)
λ = 0.02 # Regularization parmater
normal = true # Use normalized basis functions
Nv = 50 # Number of basis functions
se = ls_spectral_lpv(Y,X,V,w_test,Nv; λ = λ, normalize = normal) # Perform LPV spectral estimation
@show plot(se, phase=true)
windowpsd = ls_windowpsd_lpv(Y,X,V,w_test,Nv; λ = λ, normalize = normal)
spectrum_lpv = psd(se)
si = sortperm(spectrum_lpv[:],rev=true)
@test Set(si[1:3]) == Set([1,5,10])
si = sortperm(windowpsd[:],rev=true)
@test Set(si[1:3]) == Set([1,5,10])
end
@testset "detrend" begin
@info "testing detrend"
tre = [1,2,3]
@test detrend(tre) == [-1,0,1]
@test tre == [1,2,3] # Shouldn't have changed
detrend!(tre)
@test tre == [-1,0,1] # Should have changed
end
## Test ComplexNormal
@testset "Complex Normal" begin
@info "testing Complex Normal"
n = 10
n2 = 5
a = randn(n)
A = randn(n,n)
A = A'A + I
b = randn(n2)
B = randn(n2,n2)
B = B'B + I
X = randn(n,n2)
Y = randn(n,n2)
cn = ComplexNormal(X,Y)
@test size(cn.m) == (n2,)
@test size(cn.Γ) == (n2,n2)
@test size(cn.C) == (n2,n2)
# affine_transform(cn, B, b)
pdf(cn,b)
pdf(cn,im*b)
@test isa(cn_Vxx(A,A), Cholesky)
@test isa(cn_fVxx(A,A), Matrix)
@test issymmetric(cn_fVxx(A,A))
cn_V(A,0.1A)
cn = ComplexNormal(a,A)
cn = ComplexNormal(im*b,A)
A = randn(4,4);
A = A'A
x = randn(1000,3)
y = randn(1000,3)
cn = ComplexNormal(x,y)
z = rand(cn,1000000);
cn2 = ComplexNormal(z)
@test norm(Matrix(cn.Γ)-Matrix(cn2.Γ)) < 0.01
@test norm(Matrix(cn.C)-Matrix(cn2.C)) < 0.01
end
@testset "ls methods" begin
@info "testing ls methods"
T = 100
t = 0:0.1:T-0.1
f = LPVSpectral.default_freqs(t)
@test f[1] == 0
@test f[end] == 5
@test length(f) == 10T÷2+1
# f2 = LPVSpectral.default_freqs(t,10)
# @test f2[1] == 0
# @test f2[end] == 0.5-1/length(t)/2
# @test length(f2) == T÷2÷10
@test LPVSpectral.check_freq(f) == 1
@test_throws ArgumentError LPVSpectral.check_freq([1,0,2])
A, z = LPVSpectral.get_fourier_regressor(t,f)
@test size(A) == (10T,2length(f)-1)
Base.isapprox(t1::Tuple{Float64,Int64}, t2::Tuple{Float64,Int64}; atol) = all(t -> isapprox(t[1],t[2],atol=atol), zip(t1,t2))
y = sin.(2pi .* t)
x,freqs = ls_spectral(y,t)
@test findmax(abs2.(x)) ≈ (2.0length(freqs), 101) atol=1e-4
W = ones(length(y))
x,_ = ls_spectral(y,t,f,W)
@test findmax(abs2.(x)) ≈ (2.0length(freqs), 101) atol=1e-4
x,freqs = tls_spectral(y,t)
@test findmax(abs2.(x)) ≈ (2.0length(freqs), 101) atol=1e-4
x,freqs = ls_windowpsd(y,t,noverlap=0)
@test findmax(x)[2] == 13
x,freqs = ls_windowpsd(y,t,nw=16,noverlap=0)
@test findmax(abs.(x))[2] == 7
x,freqs = ls_windowcsd(y,y,t,noverlap=0)
@test findmax(abs.(x)) ≈ (2.0length(freqs), 11) atol=1e-4
x,_ = ls_cohere(y,y,t)
@test all(x .== 1)
x,freqs = ls_cohere(y,y .+ 0.5 .*randn.(),t,nw=8,noverlap=-1)
@show mean(x)
@test findmax(x) ≈ (1.0, 14) atol=0.15
@test mean(x) < 0.25
end
@testset "plots" begin
@info "testing plots"
y = randn(1000)
@show plot(periodogram(y))
@show plot(spectrogram(y))
@show plot(melspectrogram(y))
@show plot(periodogram(filtfilt(ones(4), [4], y)))
@show plot(welch_pgram(y))
end
@testset "Lasso" begin
@info "Testing Lasso"
include("test_lasso.jl")
end
@testset "autocov" begin
@info "Testing autocov"
y = repeat([1.,0.,-1.],100)
τ,acf = autocov(1:length(y), y, Inf)
acf0 = autocov(y, demean=false)
acfh = [mean(acf[τ.==i]) for i = 0:length(acf0)-1]
@test acfh ≈ acf0 rtol=0.01
@test norm(acfh - acf0) < 0.01
τ,acf = autocor(1:length(y), y, Inf)
acf0 = autocor(y, demean=false)
acfh = [mean(acf[τ.==i]) for i = 0:length(acf0)-1]
@test acfh ≈ acf0 rtol=0.01
@test norm(acfh - acf0) < 0.1
y = randn(100)
τ,acf = autocor(1:length(y), y, Inf)
acf0 = autocor(y, demean=false)
acfh = [mean(acf[τ.==i]) for i = 0:length(acf0)-1]
@test acfh ≈ acf0 rtol=0.01
@test norm(acfh - acf0) < 0.1
y = randn(10)
τ,acf = autocov(1:length(y), y, Inf)
acf0 = autocov(y, demean=false)
acfh = [mean(acf[τ.==i]) for i = 0:length(acf0)-1]
@test acfh ≈ acf0 rtol=0.01
@test norm(acfh - acf0) < 0.2
y = randn(10)
τ,acf = autocor(1:length(y), y, Inf)
acf0 = autocor(y, demean=false)
acfh = [mean(acf[τ.==i]) for i = 0:length(acf0)-1]
@test acfh ≈ acf0 rtol=0.03
@test norm(acfh - acf0) < 0.2
y = [randn(10) for _ in 1:10]
t = reshape(1.0:100,10,10)
t = [t[:,i] for i in 1:size(t,2)]
τ,acf = autocov(t, y, Inf)
acf0 = mean(autocov.(y, demean=false))
acfh = [mean(acf[τ.==i]) for i = 0:length(acf0)-1]
@test acfh ≈ acf0 rtol=0.01
@test norm(acfh - acf0) < 0.2
τ,acf = autocor(t, y, Inf)
acf0 = mean(autocor.(y, demean=false))
acfh = [mean(acf[τ.==i]) for i = 0:length(acf0)-1]
@test acfh ≈ acf0 rtol=0.01
@test norm(acfh - acf0) < 0.2
# See what happens if one series is contant
y = zeros(10)
τ,acf = autocor(1:10, y, Inf)
@test all(acf .== 1)
using LPVSpectral: _autocov, _autocor, isequidistant
@test isequidistant(1:5)
@test isequidistant(1:2:10)
@test !isequidistant(reverse(1:2:10))
@test isequidistant(collect(1:5))
@test isequidistant(collect(1:2:10))
@test isequidistant(collect(1:0.33:10))
res = map(1:10) do _
t = 100rand(100)
@test !isequidistant(t)
t0 = 0:99
y = sin.(0.05 .* t)
y0 = sin.(0.05 .* t0)
τ0, acf0 = autocor(t0, y0, Inf, normalize=true)
τ,acf = autocor(t, y, Inf)
acff = filtfilt(ones(200),[200], acf)
@test count(τ .== 0) == length(y)
# plot(τ[1:10:end],acf[1:10:end])
# plot!(τ0[1:10:end],acf0[1:10:end])
# plot!(τ0[1:10:end],acff[1:10:end])
mean(abs2,acf0-acff) < 0.05
end
@test mean(res) > 0.7
res = map(1:10) do _
t = 100rand(100)
@test !isequidistant(t)
t0 = 0:99
y = sin.(0.05 .* t)
y0 = sin.(0.05 .* t0)
τ0, acf0 = autocov(t0, y0, Inf, normalize=true)
τ,acf = autocov(t, y, Inf)
acff = filtfilt(ones(200),[200], acf)
@test count(τ .== 0) == length(y)
# plot(τ[1:10:end],acf[1:10:end])
# plot!(τ0[1:10:end],acf0[1:10:end])
# plot!(τ0[1:10:end],acff[1:10:end])
mean(abs2,acf0-acff) < 0.025
end
@test mean(res) > 0.7
end
end
| LPVSpectral | https://github.com/baggepinnen/LPVSpectral.jl.git |
|
[
"MIT"
] | 0.3.3 | 90ec6d3d0e421f2ae60250e9692346c0d0fa233e | code | 4329 | using LPVSpectral, Plots, DSP, ProximalOperators
@testset "lasso" begin
function generate_signal(f,w,N, modphase=false)
x = sort(10rand(N)) # Sample points
v = range(0, stop=1, length=N) # Scheduling variable
# generate output signal
dependence_matrix = Float64[f[(i-1)%length(f)+1](v) for v in v, i in eachindex(w)] # N x nw
frequency_matrix = [cos(w*x -0.5modphase*(dependence_matrix[i,j])) for (i,x) in enumerate(x), (j,w) in enumerate(w)] # N x nw
y = sum(dependence_matrix.*frequency_matrix,dims=2)[:] # Sum over all frequencies
y += 0.1randn(size(y))
y,v,x,frequency_matrix, dependence_matrix
end
N = 500 # Number of training data points
f = [v->2v^2, v->2/(5v+1), v->3exp(-10*(v-0.5)^2),] # Functional dependences on the scheduling variable
w = 2π*[2,10,20] # Frequency vector
w_test = 2π*(2:2:25) # Test Frequency vector, set w_test = w For a nice Function visualization
Y,V,X,frequency_matrix, dependence_matrix = generate_signal(f,w,N, true)
λ = 0.02 # Regularization parameter
λs = 5 # Regularization parameter group-lasso
normal = true # Use normalized basis functions
Nv = 50 # Number of basis functions
zeronan(x) = ifelse(x==0, NaN, x)
callback(x,z) = ()#plot(max.(abs2.([complex.(x[1:end÷2], x[end÷2+1:end]) complex.(z[1:end÷2], z[end÷2+1:end])]), 1e-20), reuse=true, show=true)
ses = ls_sparse_spectral_lpv(Y,X,V,w_test,Nv; λ = λs, normalize = normal, tol=1e-8, printerval=100, iters=2000, cb=callback) # Perform LPV spectral estimation
se = ls_spectral_lpv(Y,X,V,w_test,Nv; λ = 0.02, normalize = normal)
xs = LPVSpectral.ls_sparse_spectral(Y,X,1:0.1:25; λ=20, tol=1e-9, printerval=100, iters=9000, μ=0.00001,cb=callback)
# xsi = LPVSpectral.ls_sparse_spectral(Y,X,1:0.1:25; proxg=IndBallL0(6), λ=0.5, tol=1e-9, printerval=100, iters=9000, μ=0.0001,cb=callback)
xsw = ls_windowpsd(Y,X,1:0.5:22; nw=2, estimator=ls_sparse_spectral, λ=0.2, tol=1e-10, printerval=10000, iters=60000, μ=0.0001)
# plot(X,[Y V], linewidth=[1 2], lab=["\$y_t\$" "\$v_t\$"], xlabel=L"$x$ (sampling points)", title=L"Test signal $y_t$ and scheduling signal $v_t$", legend=true, xlims=(0,10), grid=false, c=[:cyan :blue])
plot(se; normalization=:none, dims=2, l=:solid, c = :orange, fillalpha=0.5, nMC = 100, fillcolor=:orange, linewidth=2, bounds=true, lab=reshape(["Est. \$\\omega = $(round(w/π))\\pi \$" for w in w_test],1,:), phase = false)
plot!(ses; normalization=:none, dims=2, l=:solid, c = :green, linewidth=2, lab=reshape(["Est. \$\\omega = $(round(w/π))\\pi \$" for w in w_test],1,:), phase = false)
plot!(V,dependence_matrix, title="Functional dependencies \$A(\\omega,v)\$", xlabel="\$v\$", ylabel="\$A(\\omega,v)\$", c = [:blue], l=:dot, linewidth=2,lab=reshape(["True \$\\omega = $(round(w/π))\\pi\$" for w in w],1,:), grid=false)
## Plot regular spectrum
# spectrum_lpv = psd(se) # Calculate power spectral density
# spectrum_lpvs = psd(ses) # Calculate sparse power spectral density
# fs = N/(X[end]-X[1]) # This is the (approximate) sampling freqency of the generated signal
# spectrum_per = DSP.periodogram(Y, fs=fs)
# spectrum_welch = DSP.welch_pgram(Y, fs=fs)
# plot(2π*collect(spectrum_per.freq), spectrum_per.power, lab="Periodogram", l=:path, m=:none, yscale=:log10, c=:cyan, legend=:bottomright)
# plot!(2π*collect(spectrum_welch.freq), spectrum_welch.power, lab="Welch", l=:path, m=:none, yscale=:log10, linewidth=2, c=:blue)
# plot!(2π*(1:0.1:25), zeronan.(abs2.(xs)), lab="sparse", l=:path, m=:none, yscale=:log10, linewidth=2, c=:magenta)
# plot!(w_test,spectrum_lpv/fs, xlabel=L"\omega [rad/s]", ylabel="Spectral density", ylims=(-Inf,Inf), grid=false, lab="LPV", l=:scatter, m=:o, yscale=:log10, c=:orange)
# plot!(w_test,zeronan.(spectrum_lpvs)./fs, lab="Sparse LPV", l=:scatter, m=:x, c=:green)
# plot!(2π*(1:0.1:25), max.(abs2.(xsi), 1e-15), lab="sparse ind ball", l=:path, m=:none, yscale=:log10, linewidth=2, c=:yellow)
# plot!(2π*(1:0.5:22), zeronan.(abs2.(xsw)), lab="sparse windowed", l=:path, m=:none, yscale=:log10, linewidth=2, c=:orange)
# savetikz("/local/home/fredrikb/phdthesis/spectralest/figs/spectrum_gen2.tex")
end
| LPVSpectral | https://github.com/baggepinnen/LPVSpectral.jl.git |
|
[
"MIT"
] | 0.3.3 | 90ec6d3d0e421f2ae60250e9692346c0d0fa233e | docs | 9553 | # LPVSpectral
[](https://github.com/baggepinnen/LPVSpectral.jl/actions)
[](https://codecov.io/gh/baggepinnen/LPVSpectral.jl)
A toolbox for least-squares spectral estimation, sparse spectral estimation and Linear Parameter-Varying (LPV) spectral estimation. Contains an implementation of the spectral estimation method presented in
[Bagge Carlson et al. "Linear Parameter-Varying Spectral Decomposition." 2017 American Control Conference.](http://lup.lub.lu.se/record/ac32368e-e199-44ff-b76a-36668ac7d595)
```bibtex
@inproceedings{bagge2017spectral,
title = {Linear Parameter-Varying Spectral Decomposition},
author = {Bagge Carlson, Fredrik and Robertsson, Anders and Johansson, Rolf},
booktitle = {2017 American Control Conference (ACC)},
year = {2017},
}
```
Extensions (sparse estimation methods) to the above article were developed in
[Bagge Carlson, F.](https://www.control.lth.se/staff/fredrik-bagge-carlson/), ["Machine Learning and System Identification for Estimation in Physical Systems"](https://lup.lub.lu.se/search/publication/ffb8dc85-ce12-4f75-8f2b-0881e492f6c0) (PhD Thesis 2018).
```bibtex
@thesis{bagge2018,
title = {Machine Learning and System Identification for Estimation in Physical Systems},
author = {Bagge Carlson, Fredrik},
keyword = {Machine Learning,System Identification,Robotics,Spectral estimation,Calibration,State estimation},
month = {12},
type = {PhD Thesis},
number = {TFRT-1122},
institution = {Dept. Automatic Control, Lund University, Sweden},
year = {2018},
url = {https://lup.lub.lu.se/search/publication/ffb8dc85-ce12-4f75-8f2b-0881e492f6c0},
}
```
## Installation
`import Pkg; Pkg.add("LPVSpectral")`
# List of functions
This package provides tools for general least-squares spectral analysis, check out the functions
```
ls_spectral # Least-squares spectral analysis
ls_sparse_spectral # Least-squares sparse (L0) spectral analysis (uses ADMM)
tls_spectral # Total Least-squares spectral analysis
ls_windowpsd # Windowed Least-squares spectral analysis (sparse estimates available, see kwarg `estimator`)
ls_windowcsd # Windowed Least-squares cross-spectral density estimation (sparse estimates available, see kwarg `estimator`)
ls_cohere # Least-squares cross coherence estimation (sparse estimates available, see kwarg `estimator`)
ls_spectral_lpv # LPV spectral decomposition
ls_sparse_spectral_lpv # LPV spectral decomposition with group-lasso penalty on frequencies (uses ADMM)
ls_windowpsd_lpv # Windowed power spectral density estimation with LPV method
mel # Compute Mel projection matrix
melspectrogram # Standard Mel spectrogram
mfcc # Mel cepstrum spectrogram
```
The functions that estimate sparse spectra require the user to manually import `using ProximalOperators`.
All functions have docstrings available in the REPL. The general pattern is
```julia
x,f = ls_XXX(y,t,f=default_freqs(t) [, W]; kwargs...)
```
where `x` are the complex Fourier coefficients and `f` are the frequency points. If no frequency vector is supplied, the default is to assume a sample time of 1 and use an equidistant grid from 0 to 0.5 of `length(t)÷2`.
`W` is an optional weight vector of `length(y)` for weighted least-squares estimation. Some methods accept keyword arguments, these methods are `ls_windowpsd, ls_windowcsd, ls_cohere` and the keywords and their defaults are
`nw = 10, noverlap = -1, window_func=rect, estimator=ls_spectral`.
# Sparse spectral estimation
We provide a number of ways to estimate spare spectra. These functions require the user to manually load `using ProximalOperators`.
## L₁ regularized spectral estimation
Minimize ||y-Ax||₂² + λ||x||₁ where x are the Fourier coefficients. Promotes a sparse spectrum
```julia
x = ls_sparse_spectral(y,t,ω; proxg=NormL1(λ), tol=1e-9, printerval=1000, iters=30000, μ=0.000001)
```
## L₀ regularized spectral estimation
Minimize ||y-Ax||₂² + λ||x||₀ where x are the Fourier coefficients. Promotes a sparse spectrum
```julia
x = ls_sparse_spectral(y,t,ω; proxg=NormL0(λ), tol=1e-9, printerval=1000, iters=30000, μ=0.000001)
```
## L₀ constrained spectral estimation
Minimize ||y-Ax||₂² s.t. ||x||₀ ≦ r where x are the Fourier coefficients. Enforces an `r`-sparse spectrum
```julia
x = ls_sparse_spectral(y,t,ω; proxg=IndBallL0(r), tol=1e-9, printerval=1000, iters=30000, μ=0.000001)
```
## Sparse LPV spectral estimation
See detailed example below and Bagge 2018.
```julia
se = ls_sparse_spectral_lpv(Y,X,V,ω_test,Nv; λ = 0.1, normalize = normal, tol=1e-8, printerval=100, iters=6000)
```
# LPV spectral estimation
We demonstrate the usage of the package with a simple example using simulated data, details can be found in the paper.
## Signal generation
```julia
using LPVSpectral, Plots, LaTeXStrings, DSP
"""
`y,v,x = generate_signal(f,w,N)`
`f` is a vector of functions `f(v)` that determine the functional dependence of the spectrum upon the velocity, one function for each frequency in `w` both the amplitude and the phase are determined from these functions
`w` is a vector of frequencies for which to estimate the spectrum
`y,v,x` are output signal, sample points and scheduling variable respectively
"""
function generate_signal(f,w,N, modphase=false)
x = sort(10rand(N)) # Sample points
v = range(0, stop=1, length=N) # Scheduling variable
# generate output signal
dependence_matrix = Float64[f[(i-1)%length(f)+1](v) for v in v, i in eachindex(w)] # N x nw
frequency_matrix = [cos(w*x -0.5modphase*(dependence_matrix[i,j])) for (i,x) in enumerate(x), (j,w) in enumerate(w)] # N x nw
y = sum(dependence_matrix.*frequency_matrix,dims=2)[:] # Sum over all frequencies
y += 0.1randn(size(y))
y,v,x,frequency_matrix, dependence_matrix
end
N = 500 # Number of training data points
f = [v->2v^2, v->2/(5v+1), v->3exp(-10*(v-0.5)^2),] # Functional dependences on the scheduling variable
w = 2π.*[2,10,20] # Frequency vector
w_test = 2π.*(2:2:25) # Test Frequency vector, set w_test = w for a nice function visualization
Y,V,X,frequency_matrix, dependence_matrix = generate_signal(f,w,N, true)
```
## Signal analysis
We now make use of the spectral estimation method presented in the paper:
```julia
# Options for spectral estimation
λ = 0.02 # Regularization parameter
λs = 1 # Regularization parameter group-lasso
normal = true # Use normalized basis functions
Nv = 50 # Number of basis functions
se = ls_spectral_lpv(Y,X,V,w_test,Nv; λ = λ, normalize = normal) # Perform LPV spectral estimation
ses = ls_sparse_spectral_lpv(Y,X,V,w_test,Nv; λ = λs, normalize = normal, tol=1e-8, printerval=100, iters=6000) # Same as above but with a group-lasso penalty on frequencies, promoting a solution with a sparse set of frequencies. Can be used to identify a sparse spectrum, i.e. to find w among w_test.
```
All that remains now is to visualize the result, along with the result of standard spectral estimation methods.
```julia
plot(X,[Y V], linewidth=[1 2], lab=["\$y_t\$" "\$v_t\$"], xlabel=L"$x$ (sampling points)", title=L"Test signal $y_t$ and scheduling signal $v_t$", legend=true, xlims=(0,10), grid=false, c=[:cyan :blue])
plot(se; normalization=:none, dims=2, l=:solid, c = [:red :green :blue], fillalpha=0.5, nMC = 5000, fillcolor=[RGBA(1,.5,.5,.5) RGBA(.5,1,.5,.5) RGBA(.5,.5,1,.5)], linewidth=2, bounds=true, lab=reshape(["Est. \$\\omega = $(round(w/π))\\pi \$" for w in w_test],1,:), phase = false)
plot!(V,dependence_matrix, title=L"Functional dependencies $A(\omega,v)$", xlabel=L"$v$", ylabel=L"$A(\omega,v)$", c = [:red :green :blue], l=:dot, linewidth=2,lab=reshape(["True \$\\omega = $(round(w/π))\\pi\$" for w in w],1,:), grid=false)
# Plot regular spectrum
spectrum_lpv = psd(se) # Calculate power spectral density
spectrum_lpvs = psd(ses) # Calculate sparse power spectral density
fs = N/(X[end]-X[1]) # This is the (approximate) sampling freqency of the generated signal
spectrum_per = DSP.periodogram(Y, fs=fs)
spectrum_welch = DSP.welch_pgram(Y, fs=fs)
plot(2π*collect(spectrum_per.freq), spectrum_per.power, lab="Periodogram", l=:path, m=:none, yscale=:log10, c=:cyan)
plot!(2π*collect(spectrum_welch.freq), spectrum_welch.power, lab="Welch", l=:path, m=:none, yscale=:log10, linewidth=2, c=:blue)
plot!(w_test,spectrum_lpv/fs, xlabel=L"$\omega$ [rad/s]", ylabel="Spectral density", ylims=(-Inf,Inf), grid=false, lab="LPV", l=:scatter, m=:o, yscale=:log10, c=:orange)
plot!(w_test,spectrum_lpvs/fs, lab="Sparse LPV", l=:scatter, m=:o, c=:green)
```



When the three frequencies in w have been identified, `w_test` can be replaced by `w` for a nicer plot. As indicated by the last figure, the sparse estimate using group-lasso is better at identifying the three frequency components present (with a small bias in the estimation of the true frequencies).
# Plotting
This package defines a recipe for plotting of periodogram types from `DSP.jl`. You can thus type
```julia
using LPVSpectral, DSP, Plots
plot(periodogram(y))
plot(welch_pgram(y))
plot(melspectrogram(y)) # melspectrogram, mel, mfcc are defined in this package
```
| LPVSpectral | https://github.com/baggepinnen/LPVSpectral.jl.git |
|
[
"MIT"
] | 0.4.3 | 8a5633b506ea4406eb15de8d6a6416abd358c22a | code | 2908 | __precompile__()
module DocSeeker
export searchdocs
using StringDistances, Hiccup, Requires
using REPL: stripmd
import Markdown
# TODO: figure out how to get something useable out of `DocObj.sig`
# TODO: figure out how to save `sig` and not kill serialization
struct DocObj
name::String
mod::String
typ::String
# sig::Any
text::String
html::Markdown.MD
path::String
line::Int
exported::Bool
end
function Base.hash(s::DocObj, h::UInt)
hash(s.name, hash(s.mod, hash(s.typ, hash(s.text, hash(s.exported, hash(s.line,
hash(s.path, hash(:DocObj, h))))))))
end
function Base.:(==)(a::DocObj, b::DocObj)
isequal(a.name, b.name) && isequal(a.mod, b.mod) && isequal(a.typ, b.typ) &&
isequal(a.text, b.text) && isequal(a.path, b.path) && isequal(a.line, b.line) &&
isequal(a.exported, b.exported)
end
# TODO: better string preprocessing.
"""
score(needle::AbstractString, s::DocObj, mod::String = "Main", name_only::Bool = false) -> Float
Scores `s` against the search query `needle`. Returns a `Float` between 0 and 1.
"""
function score(needle::AbstractString, s::DocObj, mod::String = "Main", name_only::Bool = false)
isempty(needle) && return mod == s.mod ? 1.0 : 0.0
score = 0.0
needles = split(needle, ' ')
binding_score = length(needles) > 1 ? 0.0 : compare(needle, s.name, JaroWinkler())
c_binding_score = length(needles) > 1 ? 0.0 : compare(lowercase(needle), lowercase(s.name), JaroWinkler())
if name_only
score = c_binding_score
else
docs_score = compare(lowercase(needle), lowercase(s.text), TokenSet(Jaro()))
# bonus for exact case-insensitive binding match
binding_weight = c_binding_score == 1.0 ? 0.95 : 0.7
score += binding_weight*c_binding_score + (1 - binding_weight)*docs_score
end
# penalty if cases don't match
binding_score < c_binding_score && (score *= 0.98)
# penalty if binding has no docs
isempty(s.text) && (score *= 0.85)
# penalty if binding isn't exported
s.exported || (score *= 0.99)
# penalty if module doesn't match
mod ≠ "Main" && mod ≠ s.mod && (score *= 0.75)
return score
end
# console rendering
function Base.show(io::IO, d::DocObj)
println(io, string(d.mod, '.', d.name, " @$(d.path):$(d.line)"))
end
function Base.show(io::IO, ::MIME"text/plain", d::DocObj)
println(io, string(d.mod, '.', d.name, " @$(d.path):$(d.line)"))
println(io)
println(io, d.text)
end
include("introspective.jl")
include("finddocs.jl")
include("static.jl")
include("documenter.jl")
function __init()__
# improved rendering if used in Atom:
@require Atom="c52e3926-4ff0-5f6e-af25-54175e0327b1" begin
function Atom.render(i::Atom.Inline, d::DocObj)
Atom.render(i, Atom.Tree(span(span(".syntax--support.syntax--function", d.name),
span(" @ $(d.path):$(d.line)")), [Atom.render(i, Atom.renderMD(d.html))]))
end
end
end
end # module
| DocSeeker | https://github.com/JunoLab/DocSeeker.jl.git |
|
[
"MIT"
] | 0.4.3 | 8a5633b506ea4406eb15de8d6a6416abd358c22a | code | 623 | using Serialization, Pkg, DocSeeker
db_path(pkg, env) = joinpath(@__DIR__, "..", "db", string(pkg, "-", hash(env), ".db"))
function create_db(pkg, env)
sympkg = Symbol(pkg)
db = db_path(pkg, env)
cd(env) do
open(db, "w+") do io
if pkg == "Base"
serialize(io, DocSeeker.alldocs(Base))
else
mod = Main.eval(quote
using $sympkg
$sympkg
end)
serialize(io, DocSeeker.alldocs(mod))
end
end
end
end
pkg, env = ARGS
Pkg.activate(env)
create_db(pkg, env)
| DocSeeker | https://github.com/JunoLab/DocSeeker.jl.git |
|
[
"MIT"
] | 0.4.3 | 8a5633b506ea4406eb15de8d6a6416abd358c22a | code | 965 | function load_documenter_docs(pkg)
docs = docsdir(pkg)
isempty(docs) && return []
getfiles(docs)
end
function getfiles(path, files = Tuple{String, String}[])
isdir(path) || return files
for f in readdir(path)
f = joinpath(path, f)
if isfile(f) && split(f, '.')[end] == "md"
push!(files, (f, read(f)))
elseif isdir(f)
getfiles(f, files)
end
end
files
end
function searchfiles(needle, files::Vector{Tuple{String, String}})
scores = Float64[]
for (path, content) in files
push!(scores, compare(TokenSet(Jaro()), needle, content))
end
p = sortperm(scores, rev=true)[1:min(20, length(scores))]
scores[p], files[p]
end
searchfiles(needle, pkg::String) = searchfiles(load_documenter_docs(pkg))
function searchfiles(needle)
files = Tuple{String, String}[]
for pkg in readdir(Pkg.dir())
isdir(Pkg.dir(pkg)) || continue
append!(files, load_documenter_docs(pkg))
end
searchfiles(needle, files)
end
| DocSeeker | https://github.com/JunoLab/DocSeeker.jl.git |
|
[
"MIT"
] | 0.4.3 | 8a5633b506ea4406eb15de8d6a6416abd358c22a | code | 3492 | using URIParser
jlhome() = ccall(:jl_get_julia_home, Any, ())
function basepath(files...)
srcdir = joinpath(jlhome(), "..", "..")
releasedir = joinpath(jlhome(), "..", "share", "julia")
normpath(joinpath(isdir(srcdir) ? srcdir : releasedir, files...))
end
function pkgrootpath(pkg)
pkgpath = Base.find_package(pkg)
# package not installed
isfile(pkgpath) || return nothing
return normpath(joinpath(dirname(pkgpath), ".."))
end
"""
docsdir(pkg) -> String
Find the directory conataining the documenatation for package `pkg`. Will fall back to
returning a package's README.md. Returns an empty `String` if no indication of documentation
is found.
"""
function docsdir(pkg)
# sepcial case base
lowercase(pkg) == "base" && return joinpath(basepath("doc"), "src")
pkgpath = pkgrootpath(pkg)
pkgpath === nothing && return ""
# Documenter.jl default:
docpath = joinpath(pkgpath, "docs", "src")
isdir(docpath) && return docpath
# other possibility:
docpath = joinpath(pkgpath, "doc", "src")
isdir(docpath) && return docpath
# fallback to readme
readmepath = joinpath(pkgpath, "README.md")
return isfile(readmepath) ? readmepath : ""
end
function readmepath(pkg)
(lowercase(pkg) == "base") && return ""
pkgpath = pkgrootpath(pkg)
# package not installed
pkgpath === nothing && return ""
joinpath(pkgpath, "README.md")
end
"""
docsurl(pkg) -> String
Return the most likely candidate for a package's online documentation or an empty string.
"""
docsurl(pkg) = baseURL(finddocsURL(pkg))
"""
baseURL(links::Vector{Markdown.Link}) -> String
Find the most common host and return the first URL in `links` with that host.
"""
function baseURL(links::Vector{Markdown.Link})
isempty(links) && return ""
length(links) == 1 && return links[1].url
# find most common host
urls = map(x -> URI(x.url), links)
hosts = String[url.host for url in urls]
perm = sortperm([(host, count(x -> x == host, hosts)) for host in unique(hosts)], lt = (x,y) -> x[2] > y[2])
# TODO: better heuristic for choosing the right path
links[perm[1]].url
end
"""
finddocsURL(pkg) -> Vector{Markdown.Link}
Search `pkg`s readme for links to documentation.
"""
function finddocsURL(pkg)
lowercase(pkg) == "base" && return [Markdown.Link("", "https://docs.julialang.org")]
pkgpath = pkgrootpath(pkg)
doclinks = Markdown.Link[]
pkgpath === nothing && return doclinks
readmepath = joinpath(pkgpath, "README.md")
isfile(readmepath) || return doclinks
md = Markdown.parse(String(read(joinpath(pkgpath, "README.md"))))
links = findlinks(md)
isempty(links) && (links = findplainlinks(md))
for link in links
if isdoclink(link)
push!(doclinks, link)
end
end
doclinks
end
function findplainlinks(md)
text = Markdown.plain(md)
[Markdown.Link(link, link) for link in matchall(r"(https?:\/\/[^\s]+)\b", text)]
end
function isdoclink(link::Markdown.Link)
p = lowercase(Markdown.plaininline(link))
# TODO: could be a bit smarter about this
contains(p, "docs") || contains(p, "documentation") ||
contains(p, "/stable") || contains(p, "/latest")
end
function findlinks(mdobj)
doclinks = Markdown.Link[]
for obj in mdobj.content
findlinks(obj, doclinks)
end
doclinks
end
function findlinks(mdobj::Markdown.Paragraph, links)
for obj in mdobj.content
findlinks(obj, links)
end
end
findlinks(mdobj, links) = nothing
findlinks(mdobj::Markdown.Link, links) = push!(links, mdobj)
| DocSeeker | https://github.com/JunoLab/DocSeeker.jl.git |
|
[
"MIT"
] | 0.4.3 | 8a5633b506ea4406eb15de8d6a6416abd358c22a | code | 6325 | mutable struct GlobalCache
time::Float64
cache::Vector{DocObj}
end
const CACHE = GlobalCache(0.0, DocObj[])
CACHETIMEOUT = 30 # s
MAX_RETURN_SIZE = 20 # how many results to return at most
function searchdocs(needle::AbstractString; loaded::Bool = true, mod::Module = Main,
maxreturns::Int = MAX_RETURN_SIZE, exportedonly::Bool = false,
name_only::Bool = false)
loaded ? dynamicsearch(needle, mod, exportedonly, maxreturns, name_only) :
dynamicsearch(needle, mod, exportedonly, maxreturns, name_only, loaddocsdb())
end
# TODO:
# We may want something like `CodeTools.getmodule` here, so that we can accept `mod` as `String`:
# - then we can correctly score bindings even in unloaded packages
# - it would make `isdefined` checks below more robust -- currently it won't work when e.g.
# we try to find `Atom.JunoDebugger.isdebugging` given `mod == Atom`
function dynamicsearch(needle::AbstractString, mod::Module = Main,
exportedonly::Bool = false, maxreturns::Int = MAX_RETURN_SIZE,
name_only::Bool = false, docs::Vector{DocObj} = alldocs(mod))
isempty(docs) && return DocObj[]
scores = zeros(size(docs))
modstr = string(mod)
Threads.@threads for i in eachindex(docs)
scores[i] = score(needle, docs[i], modstr, name_only)
end
perm = sortperm(scores, rev=true)
out = [(scores[p], docs[p]) for p in perm]
f = if exportedonly
if mod == Main
x -> x[2].exported
else
let mod = mod, modstr = modstr
x -> begin
# filters out unexported bindings
x[2].exported &&
# filters bindings that can be reached from `mod`
(
isdefined(mod, Symbol(x[2].mod)) ||
modstr == x[2].mod # needed since submodules are not defined in themselves
)
end
end
end
else
if mod == Main
x -> true
else
let mod = mod, modstr = modstr
x -> begin
# filters bindings that can be reached from `mod`
isdefined(mod, Symbol(x[2].mod)) ||
modstr == x[2].mod # needed since submodules are not defined in themselves
end
end
end
end
filter!(f, out)
return out[1:min(length(out), maxreturns)]
end
function modulebindings(mod, exportedonly = false, binds = Dict{Module, Set{Symbol}}(), seenmods = Set{Module}())
# This does fairly stupid things, but whatever. Works for now.
for mod in Base.loaded_modules_array()
mod in seenmods && continue
push!(seenmods, mod)
modulebindings(mod, exportedonly, binds, seenmods)
end
for name in names(mod, all=!exportedonly, imported=!exportedonly)
startswith(string(name), '#') && continue
if isdefined(mod, name) && !Base.isdeprecated(mod, name)
obj = getfield(mod, name)
!haskey(binds, mod) && (binds[mod] = Set{Symbol}())
push!(binds[mod], name)
if (obj isa Module) && !(obj in seenmods)
push!(seenmods, obj)
modulebindings(obj, exportedonly, binds, seenmods)
end
end
end
return binds
end
"""
alldocs(topmod = Main) -> Vector{DocObj}
Find all docstrings in all currently loaded Modules.
"""
function alldocs(topmod = Main)::Vector{DocObj}
time() - CACHE.time < CACHETIMEOUT && return CACHE.cache
results = DocObj[]
# all bindings
modbinds = modulebindings(topmod, false)
# exported bindings only
exported = modulebindings(topmod, true)
# loop over all loaded modules
for mod in keys(modbinds)
parentmod = parentmodule(mod)
meta = Docs.meta(mod)
# loop over all names handled by the docsystem
for b in keys(meta)
# kick everything out that is handled by the docsystem
haskey(modbinds, mod) && delete!(modbinds[mod], b.var)
haskey(exported, mod) && delete!(exported[mod], b.var)
expb = (haskey(exported, mod) && (b.var in exported[mod])) ||
(haskey(exported, parentmod) && (b.var in exported[parentmod]))
multidoc = meta[b]
for sig in multidoc.order
d = multidoc.docs[sig]
md = Markdown.parse(join(d.text, ' '))
text = stripmd(md)
path = d.data[:path] == nothing ? "<unknown>" : d.data[:path]
dobj = DocObj(string(b.var), string(b.mod), string(determinetype(b.mod, b.var)),
# sig,
text, md, path, d.data[:linenumber], expb)
push!(results, dobj)
end
end
# resolve everything that is not caught by the docsystem
for name in modbinds[mod]
b = Docs.Binding(mod, name)
# figure out how to do this properly...
expb = (haskey(exported, mod) && (name in exported[mod])) ||
(haskey(exported, parentmod) && (name in exported[parentmod]))
if isdefined(mod, name) && !Base.isdeprecated(mod, name) && name != :Vararg
# HACK: For now we don't need this -> free 50% speedup.
# bind = getfield(mod, name)
# meths = methods(bind)
# if !isempty(meths)
# for m in meths
# dobj = DocObj(string(name), string(mod), string(determinetype(mod, name)),
# "", Hiccup.div(), m.file, m.line, expb)
# push!(results, dobj)
# end
# else
# dobj = DocObj(string(name), string(mod), string(determinetype(mod, name)),
# "", Markdown.parse(""), "<unknown>", 0, expb)
# push!(results, dobj)
# end
dobj = DocObj(string(name), string(mod), string(determinetype(mod, name)),
"", Markdown.parse(""), "<unknown>", 0, expb)
push!(results, dobj)
end
end
end
append!(results, keywords())
results = unique(results)
# update cache
CACHE.time = time()
CACHE.cache = results
return results
end
function keywords()
out = DocObj[]
for k in keys(Docs.keywords)
d = Docs.keywords[k]
md = Markdown.parse(join(d.text, ' '))
text = stripmd(md)
dobj = DocObj(string(k), "Base", "Keyword", text, md, "", 0, true)
push!(out, dobj)
end
return out
end
function determinetype(mod, var)
(isdefined(mod, var) && !Base.isdeprecated(mod, var)) || return ""
b = getfield(mod, var)
b isa Function && return "Function"
b isa UnionAll && return "DataType"
string(typeof(b))
end
| DocSeeker | https://github.com/JunoLab/DocSeeker.jl.git |
|
[
"MIT"
] | 0.4.3 | 8a5633b506ea4406eb15de8d6a6416abd358c22a | code | 2439 | using Pkg
using Base.Iterators: flatten
using Serialization: serialize, deserialize
include("utils.jl")
const PROGRESS_ID = "docseeker_progress"
DOCDBCACHE = DocObj[]
function _createdocsdb()
@info "Docs" progress=0 _id=PROGRESS_ID
PKGSDONE[] = 0
try
pkgs = if isdefined(Pkg, :dependencies)
getfield.(values(Pkg.dependencies()), :name)
else
collect(keys(Pkg.installed()))
end
pushfirst!(pkgs, "Base")
ondone = (i, el) -> progress_callback(i, el, pkgs)
run_queued(docdb_wrapper, pkgs, ondone = ondone)
catch err
@error err
finally
@info "" progress=1 _id=PROGRESS_ID
end
end
const PKGSDONE = Ref(0)
function progress_callback(i, el, pkgs)
total = length(pkgs)
PKGSDONE[] += 1
@info "Docs: $el ($(PKGSDONE[])/$total)" progress=PKGSDONE[]/total _id=PROGRESS_ID
end
function docdb_wrapper(pkg)
workerfile = joinpath(@__DIR__, "create_db.jl")
env = dirname(Base.active_project())
cmd = `$(first(Base.julia_cmd())) --compiled-modules=no -O0 $workerfile $pkg $env`
logfile = joinpath(@__DIR__, "..", "db", string(pkg, "-", hash(env), ".log"))
return cmd, Dict(:log=>logfile)
end
"""
createdocsdb()
Asynchronously create a "database" of all local docstrings in [`Pkg.installed()`](@ref).
This is done by loading all packages and using introspection to retrieve the docstrings --
the obvious limitation is that only packages that actually load without errors are considered.
"""
function createdocsdb()
dbdir = joinpath(@__DIR__, "..", "db")
for file in readdir(dbdir)
if endswith(file, ".db") || endswith(file, ".log")
rm(joinpath(dbdir, file))
end
end
@async _createdocsdb()
nothing
end
"""
loaddocsdb() -> Vector{DocObj}
Retrieve the docstrings from the "database" created by [`createdocsdb()`](@ref).
Will return an empty vector if the database is locked by [`createdocsdb()`](@ref).
"""
function loaddocsdb()
global DOCDBCACHE
isempty(DOCDBCACHE) && (DOCDBCACHE = _loaddocsdb())
length(DOCDBCACHE) == 0 &&
throw(ErrorException("Please regenerate the doc cache by calling `DocSeeker.createdocsdb()`."))
DOCDBCACHE
end
function _loaddocsdb()
dbdir = joinpath(@__DIR__, "..", "db")
docs = DocObj[]
for file in readdir(dbdir)
endswith(file, ".db") || continue
try
append!(docs, deserialize(joinpath(dbdir, file)))
catch err
# @error err, file
end
end
unique!(docs)
return docs
end
| DocSeeker | https://github.com/JunoLab/DocSeeker.jl.git |
|
[
"MIT"
] | 0.4.3 | 8a5633b506ea4406eb15de8d6a6416abd358c22a | code | 3358 | function run_with_timeout(
command; log=stdout, timeout = 10*60, name = "",
wait_time = 1, verbose = true, kill_timeout = 60, ondone=identity
)
out_io = IOBuffer()
err_io = IOBuffer()
out_file, err_file = "", ""
if VERSION < v"1.1"
out_file, out_io = mktemp()
err_file, err_io = mktemp()
end
pipe = pipeline(command, stdout = out_io, stderr = err_io)
process = run(pipe, wait = false)
if VERSION < v"1.1"
out_io = open(out_file)
err_io = open(err_file)
end
timeout_start = time()
task = @async begin
logfallback = false
io = try
log isa String ? open(log, "w") : log
catch err
@error "Error opening logfile, falling back to stdout" error=err
logfallback = true
stdout
end
try
tstart = time()
verbose && @info("starting $name")
while process_running(process)
elapsed = (time() - timeout_start)
if elapsed > timeout
verbose && @info("Terminating $name")
kill(process)
# Handle scenarios where SIGTERM is blocked/ignored/handled by the process
start_time = time()
while process_running(process)
if time() - start_time > kill_timeout
verbose && @info("Killing $name")
kill(process, signum = SIGKILL)
end
sleep(5)
end
break
end
errstr, outstr = readstr_buffer.((out_io, err_io))
is_silent = length(errstr) == 0 && length(outstr) == 0
isempty(outstr) || println(io, outstr)
isempty(errstr) || println(io, errstr)
# if something printed reset timeout
if !is_silent
timeout_start = time()
end
sleep(wait_time)
end
ondone()
verbose && @info("$name completed in $(round(time() - tstart, digits=1)) seconds")
catch err
@error "Error while running $(name) with timeout." error=err
finally
errstr, outstr = readstr_buffer.((out_io, err_io))
isempty(outstr) || println(io, outstr)
isempty(errstr) || println(io, errstr)
flush(io)
if log isa String && !logfallback
close(io)
end
end
end
return process, task
end
function run_queued(f, arr; sleeptime = 1, processes = 3, ondone=identity)
process_queue = []
for (i, el) in enumerate(arr)
while length(process_queue) >= processes
filter!(process_running, process_queue)
sleep(sleeptime)
end
args = f(el)
process, task = run_with_timeout(first(args); args[2]..., name=el, ondone = () -> ondone(i, el), verbose=false)
push!(process_queue, process)
end
for proc in process_queue
wait(proc)
end
end
function readstr_buffer(x::IOStream)
return read(x, String)
end
function readstr_buffer(x::Base.GenericIOBuffer{Array{UInt8,1}})
return String(take!(x))
end
| DocSeeker | https://github.com/JunoLab/DocSeeker.jl.git |
|
[
"MIT"
] | 0.4.3 | 8a5633b506ea4406eb15de8d6a6416abd358c22a | code | 775 | import DocSeeker: dynamicsearch
function firstN(matches, desired, N = 3)
binds = map(x -> x[2].name, matches[1:N])
for d in desired
if !(d in binds)
return false
end
end
return true
end
@testset "dynamicsearch" begin
@test firstN(dynamicsearch("sine"), ["sin", "sind", "asin"], 20)
@test firstN(dynamicsearch("regular expression"), ["match", "eachmatch", "replace"], 20)
@test dynamicsearch("Real")[1][2].name == "Real"
@test length(dynamicsearch("Real")[1][2].text) > 0
@test dynamicsearch("regex")[1][2].name == "Regex"
let downloadsearch = dynamicsearch("download")
dfound = 0
for d in downloadsearch
if d[2].name == "download" && d[2].mod == "Base"
dfound += 1
end
end
@test dfound == 1
end
end
| DocSeeker | https://github.com/JunoLab/DocSeeker.jl.git |
|
[
"MIT"
] | 0.4.3 | 8a5633b506ea4406eb15de8d6a6416abd358c22a | code | 327 | import DocSeeker: baseURL, finddocsURL, readmepath
@testset "finddocs" begin
@testset "finddocsURL" begin
@test baseURL(finddocsURL("base")) == "https://docs.julialang.org"
end
@testset "readmepath" begin
@test readmepath("DocSeeker") == abspath(joinpath(@__DIR__, "..", "README.md"))
end
end
| DocSeeker | https://github.com/JunoLab/DocSeeker.jl.git |
|
[
"MIT"
] | 0.4.3 | 8a5633b506ea4406eb15de8d6a6416abd358c22a | code | 79 | using DocSeeker
using Test
include("dynamicsearch.jl")
include("finddocs.jl")
| DocSeeker | https://github.com/JunoLab/DocSeeker.jl.git |
|
[
"MIT"
] | 0.4.3 | 8a5633b506ea4406eb15de8d6a6416abd358c22a | docs | 1715 | # DocSeeker
[](https://travis-ci.org/JunoLab/DocSeeker.jl)
DocSeeker.jl provides utilities for handling documentation in local (so far) packages.
### Usage
The main entry point is `searchdocs(needle::AbstractString)`:
```julia
searchdocs("sin")
```
will return a vector of tuples of scores and their corresponding match. Scores are numbers
between 0 and 1, and represent the quality of a given match. Matches are `DocObj`, which
accumulate lots of metadata about a binding (e.g. name, type, location etc.).
`searchdocs` takes three keyword arguments:
- `loaded::Bool = true` will search only packages in the current session, while `loaded = false` will search in *all* locally installed packages (actually only those in `Pkg.installed()`). Requires a call to `DocSeeker.createdocsdb()` beforehand.
- `mod::Module = Main` will filter out bindings that can't be reached from the given module -- by default every loaded package will be searched.
- `maxreturns::Int = DocSeeker.MAX_RETURN_SIZE = 20` will specify the maximum number of the results
- `exportedonly::Bool = false` will search all names a module has, while `exportedonly = true` only takes exported names into consideration.
- `name_only::Bool = false` will respect equalities between `needle` and both a binding's name and its doc text, while `name_only = true` only respects a equality to a binding's name.
Re-generation of the cache that powers the search in all installed packages can be triggered
via `DocSeeker.createdocsdb()` (async, so no worries about killing you julia session). For now,
there is *no* automatic re-generation, though that'll be implemented soon.
| DocSeeker | https://github.com/JunoLab/DocSeeker.jl.git |
|
[
"MIT"
] | 1.0.0 | eacd96906188a977d9cd4b00c48552cdd46e5f47 | code | 2384 | module CancellationTokens
export CancellationTokenSource, get_token, is_cancellation_requested, cancel, OperationCanceledException
include("event.jl")
@enum CancellationTokenSourceStates NotCanceledState=1 NotifyingState=2 NotifyingCompleteState=3
mutable struct CancellationTokenSource
_state::CancellationTokenSourceStates
_timer::Union{Nothing,Timer}
_kernel_event::Union{Nothing,Event} # TODO Event is Julia > 1.1, make it work on 1.0
function CancellationTokenSource()
return new(NotCanceledState, nothing, nothing)
end
end
function CancellationTokenSource(timespan_in_seconds::Real)
x = CancellationTokenSource()
x._timer = Timer(timespan_in_seconds) do _
_internal_notify(x)
end
return x
end
function _internal_notify(x::CancellationTokenSource)
if x._state==NotCanceledState
x._state = NotifyingState
if x._timer!==nothing
close(x._timer)
x._timer = nothing
end
if x._kernel_event!==nothing
notify(x._kernel_event)
x._kernel_event = nothing
end
x._state = NotifyingCompleteState
end
end
function cancel(x::CancellationTokenSource)
_internal_notify(x)
return
end
is_cancellation_requested(x::CancellationTokenSource) = x._state > NotCanceledState
function _waithandle(x::CancellationTokenSource)
if x._kernel_event===nothing
x._kernel_event = Event()
end
return x._kernel_event
end
# CancellationToken
struct CancellationToken
_source::CancellationTokenSource
end
get_token(x::CancellationTokenSource) = CancellationToken(x)
is_cancellation_requested(x::CancellationToken) = is_cancellation_requested(x._source)
_waithandle(x::CancellationToken) = _waithandle(x._source)
function Base.wait(x::CancellationToken)
if is_cancellation_requested(x)
return
else
wait(_waithandle(x))
end
end
# OperationCanceledException
struct OperationCanceledException <: Exception
_token::CancellationToken
end
get_token(x::OperationCanceledException) = x._token
function CancellationTokenSource(tokens::CancellationToken...)
x = CancellationTokenSource()
for t in tokens
@async begin
wait(t)
_internal_notify(x)
end
end
return x
end
include("augment_base.jl")
end # module
| CancellationTokens | https://github.com/davidanthoff/CancellationTokens.jl.git |
|
[
"MIT"
] | 1.0.0 | eacd96906188a977d9cd4b00c48552cdd46e5f47 | code | 571 | function Base.sleep(sec::Real, token::CancellationToken)
# Create a cancel source with a timeout
timer_src = CancellationTokenSource(sec)
timer_token = get_token(timer_src)
# Create a cancel source that cancels either if the timeout source cancels,
# or when the passed token cancels
combined = CancellationTokenSource(timer_token, token)
# Wait for the combined source to cancel
wait(get_token(combined))
if is_cancellation_requested(timer_src)
return
else
throw(OperationCanceledException(token))
end
end
| CancellationTokens | https://github.com/davidanthoff/CancellationTokens.jl.git |
|
[
"MIT"
] | 1.0.0 | eacd96906188a977d9cd4b00c48552cdd46e5f47 | code | 1016 | @static if VERSION < v"1.1"
mutable struct Event
lock::Base.Threads.Mutex
q::Vector{Task}
set::Bool
# TODO: use a Condition with its paired lock
Event() = new(Base.Threads.Mutex(), Task[], false)
end
function Base.wait(e::Event)
e.set && return
lock(e.lock)
while !e.set
ct = current_task()
push!(e.q, ct)
unlock(e.lock)
try
wait()
catch
filter!(x->x!==ct, e.q)
rethrow()
end
lock(e.lock)
end
unlock(e.lock)
return nothing
end
function Base.notify(e::Event)
lock(e.lock)
if !e.set
e.set = true
for t in e.q
schedule(t)
end
empty!(e.q)
end
unlock(e.lock)
return nothing
end
elseif VERSION < v"1.2"
using Base.Threads: Event
else
using Base: Event
end
| CancellationTokens | https://github.com/davidanthoff/CancellationTokens.jl.git |
|
[
"MIT"
] | 1.0.0 | eacd96906188a977d9cd4b00c48552cdd46e5f47 | code | 741 | using Test: get_test_counts
using CancellationTokens
using Test
@testset "CancellationTokens" begin
src = CancellationTokenSource()
cancel(src)
wait(get_token(src))
src = CancellationTokenSource()
@async begin
sleep(0.1)
cancel(src)
end
wait(get_token(src))
src = CancellationTokenSource(0.1)
wait(get_token(src))
src = CancellationTokenSource()
@async begin
sleep(0.1)
cancel(src)
end
wait(get_token(src))
src = CancellationTokenSource()
sleep(0.1, get_token(src))
src = CancellationTokenSource()
@async begin
sleep(0.1)
cancel(src)
end
@test_throws OperationCanceledException sleep(20.0, get_token(src))
end
| CancellationTokens | https://github.com/davidanthoff/CancellationTokens.jl.git |
|
[
"MIT"
] | 1.0.0 | eacd96906188a977d9cd4b00c48552cdd46e5f47 | docs | 383 | # CancellationTokens
A Julia implementation of .Net's Cancellation Framework. See [here](https://devblogs.microsoft.com/pfxteam/net-4-cancellation-framework/) and [here](https://docs.microsoft.com/en-us/dotnet/standard/threading/cancellation-in-managed-threads) for details.
The package is currently _not_ thread safe, so it should only be used with single threaded tasks for now.
| CancellationTokens | https://github.com/davidanthoff/CancellationTokens.jl.git |
|
[
"MIT"
] | 1.0.0 | ffa3df6df789a731b70a237846f283802218333e | code | 610 | __precompile__()
module InvariantCausal
using Printf
using LinearAlgebra
import Statistics: var, cov, quantile, mean, median
export causalSearch, screen_lasso, screen_HOLP, two_sample_chow, sukhatme_fisher_test, conditional_inv_test_chow, conditional_inv_test_logistic,
GaussianSEM, simulate, causes, cov, random_gaussian_SEM, random_noise_intervened_SEM
include("conditionalInvTests.jl")
include("causalSearch.jl")
include("SEM.jl")
include("screening.jl")
function _test_full()
include(joinpath(@__DIR__, "..", "test", "test_full.jl"))
end
end | InvariantCausal | https://github.com/richardkwo/InvariantCausal.jl.git |
|
[
"MIT"
] | 1.0.0 | ffa3df6df789a731b70a237846f283802218333e | code | 3970 | using StatsBase: sample
using UnicodePlots: spy
using Printf
abstract type SEM end
struct GaussianSEM <: SEM
p ::Int64
B ::Matrix{Float64}
err_var ::Vector{Float64}
function GaussianSEM(B, err_var)
@assert size(B, 1) == size(B, 2) == length(err_var)
@assert all(err_var .> 0)
p = length(err_var)
new(p, B, err_var)
end
end
function Base.show(io::IO, sem::GaussianSEM)
print(io, "Gaussian SEM with $(sem.p) variables:\n")
print(io, "B = \n")
print(io, spy(sem.B))
print(io, "σ² = $(sem.err_var)")
end
"""
simulate(sem, [n])
simulate(sem, [do_variables, do_values], [n])
Simulate from a Gaussian SEM `sem`. `n` is the sample size.
do-interventions can be performed by specifying vectors of `do_variables` and `do_values`.
"""
function simulate(sem::GaussianSEM)
p = sem.p
ϵ = randn(p) .* sqrt.(sem.err_var)
return (I - sem.B) \ ϵ
end
function simulate(sem::GaussianSEM, do_variables::Vector{Int64}, do_values::Vector{Float64})
@assert length(do_variables) == length(do_values)
p = sem.p
ϵ = randn(p) .* sqrt.(sem.err_var)
ϵ[do_variables] .= do_values
B = copy(sem.B)
B[do_variables, :] .= 0
return (I - B) \ ϵ
end
function simulate(sem::GaussianSEM, n::Int64)
return vcat(map(i -> simulate(sem), 1:n)'...)
end
function simulate(sem::GaussianSEM, do_variables::Vector{Int64}, do_values::Vector{Float64}, n::Int64)
return vcat(map(i -> simulate(sem, do_variables, do_values), 1:n)'...)
end
function causes(sem::SEM, i::Int64)
@assert 1 <= i <= sem.p
return (1:sem.p)[sem.B[i, :].!=0]
end
function cov(sem::GaussianSEM)
S = inv(I - sem.B)
return S * diagm(sem.err_var) * S'
end
"""
random_gaussian_SEM(p, k; [lb=-2, ub=2, var_min=0.5, var_max=2])
Generate a random-graph acyclic SEM with `p` variables and `k` average degree, and random coefficients.
* `lb`, `ub`: coeff ~ unif[`lb`, `ub`] with random sign
* `var_min`, `var_max`: var of error ~ unif[`var.min`, `var.max`]
"""
function random_gaussian_SEM(p::Int64, k::Int64; lb=-2, ub=2, var_min=0.5, var_max=2)
B = zeros(p, p)
B[rand(p, p) .< 2k / (p-1)] .= 1
B[UpperTriangular(B).!=0] .= 0
m = sum(B.==1)
B[B.==1] .= (rand(m) * (ub - lb) .+ lb) .* sign.(randn(m))
err_var = rand(p) * (var_max - var_min) .+ var_min
_order = sample(1:p, p, replace=false)
B = B[_order, _order]
return GaussianSEM(B, err_var)
end
"""
random_noise_intervened_SEM(sem::GaussianSEM, [p_intervened=2, noise_multiplier_min=0.5, noise_multiplier_max=2., avoid=[],
prob_coeff_unchanged=2/3, lb=-2, ub=2])
Produce a new SEM based on original SEM by changing coefficients and noise variances.
* `p_intervened`: randomly choose `p_intervened` variables to intervene; will avoid those specified in `avoid`
* [`noise_multiplier_min`, `noise_multiplier_max`]: interval that noise multiplier is uniformly sampled from
* `prob.coeff.unchanged`: probability that coefficient is not changed
* `[lb, ub]`: if to change, coefficient is drawn uniformly from this interval with random sign
Return: `sem_new`, `intervened_variables`
"""
function random_noise_intervened_SEM(sem::GaussianSEM;
p_intervened=2, noise_multiplier_min=0.5, noise_multiplier_max=2., avoid=[],
prob_coeff_unchanged=2/3, lb=-2, ub=2)
B = copy(sem.B)
p = sem.p
err_var = copy(sem.err_var)
vars = sample(setdiff(collect(1:p), avoid), p_intervened, replace=false)
for i in vars
noise_multiplier = rand() * (noise_multiplier_max - noise_multiplier_min) + noise_multiplier_min
err_var[i] = err_var[i] * noise_multiplier
if rand() > prob_coeff_unchanged
_J = (1:p)[B[i, :].!=0]
B[i, _J] .= rand(length(_J)) * (ub - lb) .+ lb
end
end
return GaussianSEM(B, err_var), vars
end
| InvariantCausal | https://github.com/richardkwo/InvariantCausal.jl.git |
|
[
"MIT"
] | 1.0.0 | ffa3df6df789a731b70a237846f283802218333e | code | 14307 | using DataStructures: PriorityQueue, enqueue!, dequeue_pair!
using DataFrames: DataFrame, columnindex
using CategoricalArrays: CategoricalArray
struct CausalSearchResult
S ::Union{Vector{Int64}, Vector{Symbol}}
confint ::Matrix{Float64}
trace_confint_min ::Matrix{Float64}
trace_confint_max ::Matrix{Float64}
trace_p_values ::Vector{Float64}
α ::Float64
p ::Int64
variables_considered ::Union{Vector{Int64}, Vector{Symbol}}
selection_only ::Bool
model_reject ::Bool
function CausalSearchResult(S, confint, trace_confint_min, trace_confint_max, trace_p_values, α, p, variables_considered; selection_only=false)
new(collect(S), confint, trace_confint_min, trace_confint_max, trace_p_values, α, p, variables_considered, selection_only, false)
end
function CausalSearchResult(S, trace_confint_min, trace_confint_max, trace_p_values, α, p, variables_considered; selection_only=true)
confint = Matrix{Float64}(undef, length(S), 2)
confint[:, 1] .= -Inf
confint[:, 2] .= Inf
new(collect(S), confint, trace_confint_min, trace_confint_max, trace_p_values, α, p, variables_considered, selection_only, false)
end
function CausalSearchResult(trace_confint_min, trace_confint_max, trace_p_values, α, p, variables_considered; selection_only=true)
new(Vector{Int64}(), Matrix{Float64}(undef, 0, 2), trace_confint_min, trace_confint_max, trace_p_values, α, p, variables_considered, selection_only, true)
end
end
function Base.show(io::IO, result::CausalSearchResult)
if result.model_reject
printstyled("\n * The whole model is rejected! i.e., Y | (all variables) is not invariant.\n", color=:light_magenta, bold=true)
elseif isempty(result.S)
# no causal variable found
printstyled("\n * Found no causal variable (empty intersection).\n", color=:light_magenta, bold=true)
else
printstyled("\n * Causal variables include: $(result.S)\n", color=:green, bold=true)
if result.selection_only
printstyled(" * No confidence intervals produced in selection_only mode\n", color=:light_blue)
else
println(@sprintf("\n %-10s \t %-3s %% \t\t %-3s %%", "variable", result.α * 100, (1 - result.α) * 100))
for i in result.S
if isa(i, Int64)
j = i
else
j = find(result.variables_considered.==i)[1]
end
println(@sprintf(" %-10s \t% -04.4f \t% -04.4f", i, result.confint[j, 1], result.confint[j, 2]))
end
end
end
if length(result.variables_considered) == result.p
println("\n ⋅ Variables considered include 1:$(result.p)")
else
println("\n ⋅ Variables considered include $(result.variables_considered)")
end
end
"""
causalSearch(X, y, env, [ S=1:size(X,2) ];
α=0.01, method="chow", p_max=8, screen="auto", verbose=true,
selection_only=false, iterate_all=false, n_max_for_exact=5000, max_num_true_causes=Inf)
causalSearch(df::DataFrame, target::(Int64 or Symbol), env; ...)
Searching over subsets in `X[,S]` for direct causes of `y`
# Arguments
* `X`: either an n x p matrix or a `DataFrames.DataFrame`
* `y`: vector of n (`X` and `y` can be alternatively specified by `df` and its column `target`)
* `env`: environment indicators (rows of X): 1, 2, ...
* `S`: set of variables (col indices of X) to search, can be a Vector or a Set
* `α`: significance level (e.g. 0.01)
* `method`:
+ `"chow"` for Gaussian linear regression, combined two-sample chow test
+ `"logistic-LR"` for logistic regression (`y` consists of 0 and 1), combined likelihood-ratio test
+ `"logistic-SF"` for logistic regression (`y` consists of 0 and 1), combined Sukhatme-Fisher test
* `p_max`: maximum number of variables to consider.
will method in `screen` to screen out `p_max` number of variables if `p_max < |S|`.
(set to `Inf` if want no screening)
* `screen`:
+ `"lasso"`: with lasso (from glmnet) solution path (see `screen_lasso`)
+ `"HOLP"`: "High dimensional ordinary least squares projection" method of Wang & Leng, only when p ≧ n (see `screen_HOLP`)
+ `"auto"`: use `"HOLP"` when p > n, and `"lasso"` otherwise
* `verbose`: if true, will print each subset tested
* `selection_only`: if true, will prune supersets of an invariant set;
but not able to produce valid confidence intervals
* `iterate_all`: if true, will iterate over all subsets to ensure validity of confidence intervals
(if model is not rejected)
* `n_max_for_exact`: maximum number of observations of an environment for exact testing;
otherwise a subsample of n_max rows will be used
* `max_num_true_causes`: maximum number of true causal variables; if specified to smaller than `|S|`,
it will skip testing subsets with bigger size than `max_num_true_causes`.
"""
function causalSearch(X::Union{Matrix{Float64}, DataFrame}, y::Vector{Float64}, env::Vector{Int64}, S=1:size(X,2);
α=0.01, method="chow", p_max=8, screen="auto", verbose=true,
selection_only=false, iterate_all=false,
n_max_for_exact=5000, max_num_true_causes=Inf)
@assert size(X, 1) == length(y) == length(env)
if method=="chow"
model = "linear"
if isa(X, DataFrame)
X = Matrix{Float64}(X) # note: current linear fitting has to work with Matrix{Float64}
end
elseif method=="logistic-LR" || method=="logistic-SF"
model = "logistic"
# combine into a DataFrame (note: GLM.jl has to work with DataFrame)
@assert all((y.==1) .| (y.==0))
df = DataFrame(isa(X, DataFrame) ? hcat(X, y, makeunique=true) : hcat(X, y))
for _col in propertynames(df)
if isa(df[!, _col], CategoricalArray)
@assert length(unique(df[!, _col])) == 2 "categorical variable $_col should be recoded to binary"
end
end
target = propertynames(df)[end] # target is the last column
else
error("method must be one of: `chow`, `logistic-LR`, `logistic-SF`")
end
S = collect(S)
S = unique(S)
p = size(X, 2)
if p_max < length(S)
@assert model=="linear" "screening unsupported for GLM"
q = length(S)
if screen == "auto"
if q <= size(X, 1)
screen = "lasso"
else
screen = "HOLP"
end
end
if screen == "lasso"
S = S[screen_lasso(X[:, S], y, p_max)]
elseif screen == "HOLP"
S = S[screen_HOLP(X[:, S], y, p_max)]
else
error("screen must be one of: `auto`, `lasso`, `HOLP`")
end
printstyled("$(length(S)) variables are screened out from $q variables with $screen: $S\n", color=:blue)
end
variables_considered = S[:]
if max_num_true_causes < length(S)
printstyled("the size of |S| is restricted to ≦ $max_num_true_causes \n", color=:blue)
else
max_num_true_causes = length(S)
end
if iterate_all
selection_only = false
end
accepted_sets = Dict{Union{Vector{Symbol},Vector{Int64}}, Float64}()
n_tested_sets = 0
running_intersection = S
running_confintervals = zeros(p, 2)
running_confintervals[:, 1] .= Inf
running_confintervals[:, 2] .= -Inf
_trace_confint_max = Vector{Vector{Float64}}()
_trace_confint_min = Vector{Vector{Float64}}()
trace_p_values = Vector{Float64}()
n_env = maximum(env)
if method == "chow"
for i in 1:n_env
ni = sum(env.==i)
if ni > n_max_for_exact
printstyled(@sprintf("environment %d has %d obs, subsample of %d is used\n", i, ni, n_max_for_exact), color=:blue)
end
end
end
println(@sprintf "Causal invariance search across %d environments with at α=%s (|S| = %d, method = %s, model = %s)\n" n_env α length(S) method model)
max_num_true_causes < length(S) && printstyled("|S| is restricted to subsets with size ≦ $max_num_true_causes.\n", color=:blue)
# priority queue: S -> -p.value (so sets with higher p-values are tested sooner)
candidate_sets = PriorityQueue{Union{Set{Int64}, Set{Symbol}}, Float64}()
enqueue!(candidate_sets, Set{typeof(S[1])}()=>0.)
size_current_set = 0
while size_current_set <= max_num_true_causes
base_sets = Dict{Union{Set{Int64}, Set{Symbol}}, Float64}() # set -> (-p_value)
while !isempty(candidate_sets)
# dequeue the set with highest p-value
_S, neg_p_value = dequeue_pair!(candidate_sets)
_S_vec = collect(_S) # convert from Set{Int64} to Vector{Int64}
# skip supersets under `selection_only`
selection_only && length(running_intersection)!=length(S) && issubset(running_intersection, _S) && continue
# test conditional invariance
n_tested_sets += 1
if method == "chow"
rej, p_value, conf_intervals = conditional_inv_test_chow(X[:,_S_vec], y, env, n_env, α=α, n_max_for_exact=n_max_for_exact)
elseif method == "logistic-LR"
# target is the last column of df
rej, p_value, conf_intervals = conditional_inv_test_logistic(df, target, _S_vec, env, n_env, α=α,
add_intercept=true, method="logistic-LR")
elseif method == "logistic-SF"
rej, p_value, conf_intervals = conditional_inv_test_logistic(df, target, _S_vec, env, n_env, α=α,
add_intercept=true, method="logistic-SF")
end
base_sets[_S] = -p_value
push!(trace_p_values, p_value)
if !rej
# _S is an invariant set
accepted_sets[_S_vec] = p_value
# running ∩
running_intersection = intersect(running_intersection, _S_vec)
# running ∪
conf_intervals_padded = zeros(size(running_confintervals)) # unincluded variables have [0,0] as confint
if isa(_S_vec, Vector{Int64})
conf_intervals_padded[_S_vec, :] = conf_intervals
else
_idx_vec = [columnindex(df, z) for z in _S_vec]
conf_intervals_padded[_idx_vec, :] = conf_intervals
end
running_confintervals[:, 1] = min.(running_confintervals[:, 1], conf_intervals_padded[:, 1])
running_confintervals[:, 2] = max.(running_confintervals[:, 2], conf_intervals_padded[:, 2])
# keep trace
push!(_trace_confint_min, conf_intervals_padded[:, 1])
push!(_trace_confint_max, conf_intervals_padded[:, 2])
if verbose
println(@sprintf "S = %-40s: p-value = %-1.4f [%1s] ⋂ = %s" (isempty(_S_vec) ? "[]" : _S_vec) p_value (rej ? " " : "*") running_intersection)
end
if isempty(running_intersection) && (!iterate_all)
break # empty, terminate
end
else
if verbose
println(@sprintf "S = %-40s: p-value = %-1.4f [%1s] ⋂ = %s" (isempty(_S_vec) ? "[]" : _S_vec) p_value (rej ? " " : "*") running_intersection)
end
end
end
isempty(running_intersection) && (!iterate_all) && break # have to break twice
# generate sets with size ++
for (_base_S, _neg_p_value) in base_sets
for i in setdiff(S, _base_S)
S_new = union(_base_S, Set([i]))
# skipping supersets of running intersection in `selection_only` mode
selection_only && length(running_intersection)!=length(S) && issubset(running_intersection, S_new) && continue
if haskey(candidate_sets, S_new)
candidate_sets[S_new] = min(candidate_sets[S_new], _neg_p_value)
else
candidate_sets[S_new] = _neg_p_value
end
end
end
size_current_set += 1
end
println("\nTested $n_tested_sets sets: $(length(accepted_sets)) sets are accepted.")
trace_confint_min = hcat(_trace_confint_min...)'
trace_confint_max = hcat(_trace_confint_max...)'
if isempty(accepted_sets)
# model rejected
return CausalSearchResult(trace_confint_min, trace_confint_max, trace_p_values, α, p, variables_considered; selection_only = selection_only)
elseif selection_only
return CausalSearchResult(running_intersection, trace_confint_min, trace_confint_max, trace_p_values, α, p, variables_considered; selection_only = true)
else
return CausalSearchResult(running_intersection, running_confintervals, trace_confint_min, trace_confint_max, trace_p_values, α, p, variables_considered; selection_only = false)
end
end
function causalSearch(df::DataFrame, target::Union{Int64, Symbol}, env::Vector{Int64};
α=0.01, method="chow", screen="auto", p_max=8, verbose=true,
selection_only=false, iterate_all=false,
n_max_for_exact=5000, max_num_true_causes=Inf)
if isa(target, Int64)
target = propertynames(df)[target]
end
S = setdiff(propertynames(df), [target])
X = df[:, S]
y = df[!, target]
causalSearch(X, y, env, S, α=α, method=method, screen=screen, p_max=p_max, verbose=verbose,
selection_only=selection_only, iterate_all=iterate_all,
n_max_for_exact=n_max_for_exact, max_num_true_causes=max_num_true_causes)
end | InvariantCausal | https://github.com/richardkwo/InvariantCausal.jl.git |
|
[
"MIT"
] | 1.0.0 | ffa3df6df789a731b70a237846f283802218333e | code | 8666 | using Distributions: cdf, FDist, TDist, Chisq, Bernoulli
using StatsBase: sample
using DataFrames: DataFrame
using StatsModels: @formula
using GLM: glm, loglikelihood, coef, confint, predict
"""
conditional_inv_test_chow(X, y, env, n_env; α=0.01)
Test H0: `y` | `X` is invariant under environments by combining leave-one-out chow test.
* `X`: n x p matrix of covariates
* `y`: vector of response
* `env`: vector of environments (1:n_env), corresponding to rows of X
* `n_env`: number of environments
* `α`: significance level
* `n_max_for_exact`: maximum number of observations of an environment for exact testing;
otherwise a subsample of n_max rows will be used
Return: `rej`, `p_value`, `conf_intervals`
* `rej`: false if invariant
* `conf_intervals`: p x 2 matrix, cols = (min, max)
"""
function conditional_inv_test_chow(X::Matrix{Float64}, y::Vector{Float64}, env::Vector{Int64}, n_env::Int64;
α=0.01, n_max_for_exact=5000)
@assert n_env >= 1
p_values = ones(n_env)
p = size(X, 2)
n_all = size(X, 1)
for i in 1:n_env
@assert sum(env.==i) > 0
# fit on (-env), test on (env)
idx_in = (1:n_all)[env.==i]
idx_out = (1:n_all)[env.!=i]
if length(idx_in) > n_max_for_exact
idx_in = sample(idx_in, n_max_for_exact, replace=false)
end
p_values[i] = two_sample_chow(X[idx_out, :], X[idx_in, :], y[idx_out], y[idx_in];
α=α, add_intercept=true)
if p_values[i] < min(α / n_env)
break # early termination
end
end
# Bonferroni correction
p_value = min(minimum(p_values) * n_env, 1.)
if p_value < α
# reject
reject = true
conf_intervals = zeros(p, 2) # min = max = 0.
else
# not reject
reject = false
# pool all data and run regression
X = hcat(X, ones(size(X,1)))
β = (X' * X) \ (X' * y)
σ = sqrt(sum((y - X * β).^2) / (n_all - p - 1))
prec_X = diag(inv(X' * X))
qt = quantile(TDist(n_all - p - 1), 1 - α / (2 * p))
conf_left = β - qt * σ * sqrt.(prec_X)
conf_right = β + qt * σ * sqrt.(prec_X)
# note: the union of (1-α) conf ints have coverage (1-2α) (Theorem 2 of PBM)
conf_intervals = hcat(conf_left[1:p], conf_right[1:p])
end
return reject, p_value, conf_intervals
end
"""
two_sample_chow(X1, X2, y1, y2; [α=0.01, add_intercept=true])
Two-sample Chow's test of H0: two linear regressions `y1 ~ X1` and `y2 ~ X2` have
the same linear coefficients, assuming Gaussian errors with equal variances.
Will fit on `X1`` and test residual on `X2`.
Choose `X2` to be the sample with smaller n since n2 x n2 matrix inversion is performed.
"""
function two_sample_chow(X1::Matrix{Float64}, X2::Matrix{Float64},
y1::Vector{Float64}, y2::Vector{Float64}; α=0.01, add_intercept=true)
n1 = size(X1, 1)
n2 = size(X2, 1)
if add_intercept
X1 = hcat(X1, ones(n1))
X2 = hcat(X2, ones(n2))
end
p = size(X1, 2)
# fit on X1
β = zeros(p)
try
β = (X1' * X1) \ (X1' * y1)
catch _err
printstyled("encountered $_err in least square for Chow's test\n", color=:light_red)
return 0.
end
res2 = y2 - X2 * β
Σ_res = diagm(ones(n2)) + X2 * ((X1' * X1) \ X2') # inv(A) * B = A \ B
σ2 = var(y1 - X1 * β) * (n1 - 1) / (n1 - p) # should use dof = (n - p) as denominator
chow_stat = res2' * (Σ_res \ res2) / (σ2 * n2) # inv(A) * B = A \ B
# F distribution
ν1 = n2
ν2 = n1 - size(X1, 2)
p_value = 1 - cdf(FDist(ν1, ν2), chow_stat)
return p_value
end
"""
conditional_inv_test_logistic(df, :target, S, env, n_env; α=0.01, add_intercept=true, method="logistic-LR")
Test `H0: y | X` is invariant under environments by combining leave-one-out likelihood ratio test.
The model is logistic regression specified by `fmla`.
* `df`: a DataFrames.DataFrame
* `target`: target variable in df (Symbol)
* `S`: covariates to condition on (`X = df[:,S]`)
* `env`: vector of environments (1:n_env), corresponding to rows of X
* `n_env`: number of environments
* `α`: significance level
* `add_intercept`: add `+ 1` or not in formula
* `method`:
+ `logistic-LR`: likelihood ratio test
+ `logistic-SF`: test equal mean and variance of prediction errors with Sukhatme-Fisher
Return: `rej`, `p_value`, `conf_intervals`
* `rej`: false if invariant
* `conf_intervals`: p x 2 matrix, cols = (min, max)
"""
function conditional_inv_test_logistic(df::DataFrame, target::Symbol, S::Vector{Symbol},
env::Vector{Int64}, n_env::Int64; α=0.01, add_intercept=true, method="logistic-LR")
@assert n_env >= 1
@assert !(target in S)
p = length(S) + add_intercept
n_all = size(df, 1)
p_values = ones(n_env)
fmla = get_formula(df, target, S, add_intercept=add_intercept)
# fit on pooled data
fit0 = glm(fmla, df, Bernoulli())
# iterate over environments
for i in 1:n_env
@assert sum(env.==i) > 0
if method == "logistic-LR"
# fit separately
fit1 = glm(fmla, df[env.==i, :], Bernoulli())
fit2 = glm(fmla, df[env.!=i, :], Bernoulli())
# log likelihood ratio = 2 log (p(bigger model) / p(smaller model))
lr = 2 * (loglikelihood(fit1) + loglikelihood(fit2) - loglikelihood(fit0))
p_values[i] = 1 - cdf(Chisq(p), lr)
elseif method == "logistic-SF"
# predict and test equal mean of residuals
if p > 1
p_hat = predict(fit0, df)
else
p_hat = predict(fit0)
end
res = (df[!, target] - p_hat) ./ sqrt.(p_hat .* (1 .- p_hat))
p_values[i] = sukhatme_fisher_test(res[env.==i], res[env.!=i])
else
error("method undefined")
end
if p_values[i] < α / n_env
break # early termination
end
end
# Bonferroni correction
p_value = min(minimum(p_values) * n_env, 1.)
if p_value < α
# reject
reject = true
conf_intervals = zeros(length(S), 2) # min = max = 0.
else
# not reject
reject = false
# the pooled fit is accepted
if length(S) == 0
conf_intervals = zeros(0, 2)
elseif add_intercept
conf_intervals = confint(fit0)[2:end, :] # intercept is always the 1st row
else
conf_intervals = confint(fit0)
end
end
return reject, p_value, conf_intervals
end
function conditional_inv_test_logistic(df::DataFrame, target::Symbol, S::Vector{Int64},
env::Vector{Int64}, n_env::Int64; α=0.01, add_intercept=true, method="logistic-LR")
conditional_inv_test_logistic(df, target, propertynames(df)[S], env, n_env;
α=α, add_intercept=add_intercept, method=method)
end
"""
get_formula(df::DataFrame, target::Symbol, S=setdiff(names(df), [target]); add_intercept=true)
Generate formula of `y ~ .` type for `df`, where `y` is specified by `target`.
"""
function get_formula(df::DataFrame, target::Symbol, S=setdiff(names(df), [target]); add_intercept=true)
if length(S)==0
return @eval @formula($target ~ 1)
end
if add_intercept
return @eval @formula($target ~ +(1, $(S...)))
else
return @eval @formula($target ~ +($(S...), 0))
end
end
"""
sukhatme_fisher_test(x, y)
Sukhatme-Fisher test of H0: vectors x and y are two independent normal samples with equal mean and variance
See Perng, S. K., and Ramon C. Littell. "A test of equality of two normal population means and variances."
Journal of the American Statistical Association 71.356 (1976): 968-971.
Return: p-value
"""
function sukhatme_fisher_test(x::Vector{Float64}, y::Vector{Float64})
m = length(x)
n = length(y)
ss1 = var(x) * (m - 1)
ss2 = var(y) * (n - 1)
T = (mean(y) - mean(x)) / sqrt((m + n) * (ss1 + ss2) / ((m + n - 2) * (m * n)))
F = (ss2 * (m-1)) / (ss1 * (n-1))
g = FDist(n-1, m-1)
if F >= median(g)
H = 2 * (1 - cdf(g, F))
else
H = 2 * cdf(g, F)
end
W = - 2 * log(H)
t = TDist(m + n - 2)
w = Chisq(2)
Q = - 2 * log(2 * (1 - cdf(t, abs(T)))) - 2 * log(1 - cdf(w, W))
q = Chisq(4)
return 1 - cdf(q, Q)
end | InvariantCausal | https://github.com/richardkwo/InvariantCausal.jl.git |
|
[
"MIT"
] | 1.0.0 | ffa3df6df789a731b70a237846f283802218333e | code | 1523 | using Printf
using GLMNet: glmnet
"""
screen_lasso(X, y, pmax)
Screening out `pmax` variables by lasso (glmnet) on `X` and `y`.
"""
function screen_lasso(X::Matrix{Float64}, y::Vector{Float64}, pmax::Int64)
fit = glmnet(X, y)
p = size(X, 2)
@assert pmax <= p
betas = fit.betas
n_vars = sum(betas.!=0, dims=1)[:]
S = Set{Int64}()
for n in unique(n_vars)
Z = betas[:, n_vars.==n]
non_zeros = sum(Z.!=0, dims=2)[:]
vars = (1:p)[non_zeros .> 0]
new_vars = setdiff(vars, S)
Z = Z[new_vars, :]
if length(S) + length(new_vars) > pmax
# must break ties
z = abs.(Z[:, end])
idx = sort(1:length(z), by = i -> z[i], rev = true)
new_vars = new_vars[idx[1:(pmax - length(S))]]
end
union!(S, new_vars)
length(S) == pmax && break
end
return sort(collect(S))
end
"""
screen_HOLP(X, y, pmax)
Screen out `pmax` variables with HOLP projection.
See: Wang, Xiangyu, and Chenlei Leng. "High dimensional ordinary least squares projection for screening variables."
Journal of the Royal Statistical Society: Series B (Statistical Methodology) 78.3 (2016): 589-611.
"""
function screen_HOLP(X::Matrix{Float64}, y::Vector{Float64}, pmax::Int64)
n, p = size(X)
@assert p >= n
@assert pmax < p
_X = X .- mean(X, 1)
_X = _X ./ std(_X, 1)
_y = y - mean(y)
β = _X' * ((_X * _X' + 10 * eye(n)) \ _y)
return sortperm(abs.(β), rev=true)[1:pmax]
end | InvariantCausal | https://github.com/richardkwo/InvariantCausal.jl.git |
|
[
"MIT"
] | 1.0.0 | ffa3df6df789a731b70a237846f283802218333e | code | 152 | #!/usr/bin/env julia
using InvariantCausal
using Test
println("Starting tests")
@time begin
include("test_regression.jl")
include("test_search.jl")
end | InvariantCausal | https://github.com/richardkwo/InvariantCausal.jl.git |
|
[
"MIT"
] | 1.0.0 | ffa3df6df789a731b70a237846f283802218333e | code | 8242 | using InvariantCausal
using DelimitedFiles
using Test
include(joinpath(@__DIR__, "test_search.jl"))
include(joinpath(@__DIR__, "test_regression.jl"))
X = readdlm(joinpath(@__DIR__, "X1.dat"))
env = Vector{Int}(X[:,1])
X = X[:,2:end]
S = 1:size(X,2)
@time @testset "causal search" begin
result = map(i -> causalSearch(X, X[:, i], env, setdiff(S,i) , α=0.01), S)
@test result[2].S == [5]
@test result[3].S == [5]
@test result[7].S == [4] || result[7].S == []
for i in [1, 4, 5, 6]
@test result[i].model_reject == true
end
end
@time @testset "causal search with selection_only" begin
result = map(i -> causalSearch(X, X[:, i], env, setdiff(S,i), α=0.01, selection_only=true), S)
@test result[2].S == [5]
@test result[3].S == [5]
@test result[7].S == [4] || result[7].S == []
for i in [1, 4, 5, 6]
@test result[i].model_reject == true
end
end
@time @testset "causal search with limited # of true causes" begin
result = map(i -> causalSearch(X, X[:, i], env, setdiff(S,i), α=0.01, max_num_true_causes=3), S)
@test result[2].S == [5]
@test result[3].S == [5]
@test result[7].S == [4] || result[7].S == []
for i in [1, 4, 5, 6]
@test result[i].model_reject == true
end
end
@time @testset "causal search with subsampling" begin
result = map(i -> causalSearch(X, X[:, i], env, setdiff(S,i), α=0.01, n_max_for_exact=90), S)
@test result[2].S == [5]
@test result[3].S == [5] || result[3].S == []
@test result[7].S == [4] || result[7].S == []
for i in [1, 4, 5, 6]
@test result[i].model_reject == true
end
end
function generate_setting(setting_configs; n_environments=2)
n_int = rand(setting_configs["n_int"])
n_obs = rand(setting_configs["n_obs"])
p = rand(setting_configs["p"])
k = rand(setting_configs["k"])
lb_obs = rand(setting_configs["lb_obs"])
ub_obs = lb_obs + rand(setting_configs["ub_lb_delta_obs"])
_var_1 = rand(setting_configs["err_var_min"])
_var_2 = rand(setting_configs["err_var_min"])
err_var_min = min(_var_1, _var_2)
err_var_max = max(_var_1, _var_2)
noise_multiplier_min = rand(setting_configs["noise_multiplier_min"])
if rand() < setting_configs["prob_fixed_noise_multiplier"]
noise_multiplier_max = noise_multiplier_min
else
noise_multiplier_max = noise_multiplier_min + rand(setting_configs["noise_multiplier_max_min_delta"])
end
_l = rand(setting_configs["lb_int"])
_u = rand(setting_configs["ub_int"])
lb_int = min(_l, _u)
ub_int = max(_l, _u)
if rand() < setting_configs["prob_int_single"]
n_int_variables = 1
frac_int_variables = 1 / p
else
frac_int_variables = rand(setting_configs["frac_int_variables"])
n_int_variables = floor(Int64, p * frac_int_variables)
end
prob_coefficient_unchanged = setting_configs["prob_coefficient_unchanged"]
target = rand(1:p)
SEMs = Vector{GaussianSEM}(undef, n_environments)
env = repeat([1], inner=n_obs)
intervened_variables = Vector{Vector{Int64}}(undef, n_environments)
intervened_variables[1] = []
SEMs[1] = random_gaussian_SEM(p, k,
lb=lb_obs, ub=ub_obs, var_min=err_var_min, var_max=err_var_max)
true_β = SEMs[1].B[target, setdiff(1:p, target)]
true_causes = (1:(p-1))[true_β .!= 0]
for j in 2:n_environments
SEMs[j], intervened_variables[j] = random_noise_intervened_SEM(SEMs[1], p_intervened=n_int_variables,
noise_multiplier_min=noise_multiplier_min, noise_multiplier_max=noise_multiplier_max,
avoid=[target], prob_coeff_unchanged=prob_coefficient_unchanged,
lb=lb_int, ub=ub_int)
env = vcat(env, repeat([j], inner=n_int))
end
return Dict(
"n_environments" => n_environments,
"env" => env,
"SEMs" => SEMs,
"intervened_variables" => intervened_variables,
"target" => target,
"true_causes" => true_causes,
"true_β" => true_β,
"n_int" => n_int,
"n_obs" => n_obs,
"p" => p,
"k" => k,
"lb_obs" => lb_obs,
"ub_obs" => ub_obs,
"err_var_min" => err_var_min,
"err_var_max" => err_var_max,
"noise_multiplier_min" => noise_multiplier_min,
"noise_multiplier_max" => noise_multiplier_max,
"lb_int" => lb_int,
"ub_int" => ub_int,
"prob_coefficient_unchanged" => prob_coefficient_unchanged,
"n_int_variables" => n_int_variables,
"frac_int_variables" => frac_int_variables
)
end
@time @testset "random instances" begin
setting_configs = Dict(
"n_int" => [100, 200, 300, 400, 500],
"n_obs" => [100, 200, 300, 400, 500],
"p" => collect(5:9),
"k" => collect(1:4),
"lb_obs" => collect(0.1:0.1:2),
"ub_lb_delta_obs" => collect(0.1:0.1:1),
"err_var_min" => collect(0.1:0.1:2),
"err_var_max" => collect(0.1:0.1:2),
"noise_multiplier_min" => collect(0.1:0.1:4),
"noise_multiplier_max_min_delta" => collect(0.1:0.1:2),
"lb_int" => collect(0.1:0.1:2),
"ub_int" => collect(0.1:0.1:2),
"prob_fixed_noise_multiplier" => 1/3,
"prob_coefficient_unchanged" => 2/3,
"prob_int_single" => 1/6,
"frac_int_variables" => 1. ./ collect(1.1:0.1:3)
)
for trial in 1:10
for n_environments in [2, 3, 5]
setting = generate_setting(setting_configs, n_environments=n_environments)
env = setting["env"]
p = setting["p"]
target = setting["target"]
true_β = setting["true_β"]
true_causes = setting["true_causes"]
intervened_variables = setting["intervened_variables"]
SEM_obs = setting["SEMs"][1]
X = simulate(SEM_obs, setting["n_obs"])
for j in 2:n_environments
_X = simulate(setting["SEMs"][j], setting["n_int"])
X = vcat(X, _X)
end
y = X[:, target]
X = X[:, setdiff(1:p, target)]
result = causalSearch(X, y, env, verbose=false)
println("S = $(result.S), truth = $true_causes")
@test issubset(result.S, true_causes)
for k in result.S
@test result.confint[k, 1] < true_β[k] < result.confint[k, 2]
end
end
end
end
@time @testset "random instances, many variables with screening" begin
setting_configs = Dict(
"n_int" => [100, 200, 300, 400, 500],
"n_obs" => [100, 200, 300, 400, 500],
"p" => collect(20:50),
"k" => collect(1:3),
"lb_obs" => collect(0.1:0.1:2),
"ub_lb_delta_obs" => collect(0.1:0.1:1),
"err_var_min" => collect(0.1:0.1:2),
"err_var_max" => collect(0.1:0.1:2),
"noise_multiplier_min" => collect(0.1:0.1:4),
"noise_multiplier_max_min_delta" => collect(0.1:0.1:2),
"lb_int" => collect(0.1:0.1:2),
"ub_int" => collect(0.1:0.1:2),
"prob_fixed_noise_multiplier" => 1/3,
"prob_coefficient_unchanged" => 2/3,
"prob_int_single" => 1/6,
"frac_int_variables" => 1. ./ collect(1.1:0.1:3)
)
for trial in 1:10
for n_environments in [2, 3, 5]
setting = generate_setting(setting_configs, n_environments=n_environments)
env = setting["env"]
p = setting["p"]
target = setting["target"]
true_β = setting["true_β"]
true_causes = setting["true_causes"]
intervened_variables = setting["intervened_variables"]
SEM_obs = setting["SEMs"][1]
X = simulate(SEM_obs, setting["n_obs"])
for j in 2:n_environments
_X = simulate(setting["SEMs"][j], setting["n_int"])
X = vcat(X, _X)
end
y = X[:, target]
X = X[:, setdiff(1:p, target)]
result = causalSearch(X, y, env, verbose=false, p_max=8)
# performance only guaranteed if lasso selected all the causes
println("S = $(result.S), truth = $true_causes")
if issubset(true_causes, result.variables_considered)
@test issubset(result.S, true_causes)
end
end
end
end | InvariantCausal | https://github.com/richardkwo/InvariantCausal.jl.git |
|
[
"MIT"
] | 1.0.0 | ffa3df6df789a731b70a237846f283802218333e | code | 964 | using Distributions
using Test
using Statistics
@time @testset "linear reg chow test" begin
p = 7
n = 500
n_env = 3
env = repeat(1:n_env, inner=n)
X = randn(n * n_env, p)
beta = randn(p)
y = X * beta .- 1. + randn(n * n_env)
rej, p_value, conf_intervals = InvariantCausal.conditional_inv_test_chow(X, y, env, n_env)
@test rej == false
y[1:n] = X[1:n, :] * (beta / 2) .- 1. + randn(n) # change environment 1
rej, p_value, conf_intervals = InvariantCausal.conditional_inv_test_chow(X, y, env, n_env)
@test rej == true
end
@time @testset "chow test" begin
m = 100
p_values = map(x -> (
n = 300;
p = 7;
X1 = randn(2 * n, p);
X2 = randn(n, p);
beta = randn(p);
y1 = X1 * beta .+ 1 + randn(2 * n);
y2 = X2 * beta .+ 1 + randn(n);
two_sample_chow(X1, X2, y1, y2)), 1:m)
@test abs(mean(p_values .< 0.05) - 0.05) < 2 * sqrt(0.05 * 0.95 / m)
end
| InvariantCausal | https://github.com/richardkwo/InvariantCausal.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.