licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.6.4 | b4ac59b6877535177e89e1fe1b0ff5d0c2e903de | code | 4983 | using Crystalline, Test
using Crystalline: corep_orthogonality_factor
using LinearAlgebra: dot
@testset "Point groups" begin
@testset "Find a point group for every space group" begin
for D in 1:3
for sgnum in 1:MAX_SGNUM[D]
G = spacegroup(sgnum)
pg = Crystalline.find_parent_pointgroup(G)
@test pg !== nothing
end
end
end
@testset "Matching operator-sorting between symmorphic space groups & point groups" begin
for D in 1:3
for sgnum in 1:MAX_SGNUM[D]
G = spacegroup(sgnum)
# get isogonal point group of G: strip away translational parts and retain
# unique rotation parts; does not change ordering (except for stripping out
# non-unique rotational parts) and retains conventional lattice vectors
isogonal_G = pointgroup(G)
# find a matching point group from those stored obtained directly from
# pointgroup(iuclab, D), via find_parent_pointgroup
pg = Crystalline.find_parent_pointgroup(G)
# compare operator sorting and setting; equivalent in the database
@test pg == isogonal_G
end
end
end
@testset "Schoenflies notation: space vs. point group" begin
for sgnum in 1:MAX_SGNUM[3]
sg = spacegroup(sgnum, Val(3))
pg = Crystalline.find_parent_pointgroup(sg)
sglab = schoenflies(sgnum)
pglab = schoenflies(pg)
@test pglab == sglab[1:lastindex(pglab)]
end
end
@testset "Generators" begin
for (Dα΅, gtype) in ((Val(1), PointGroup{1}), (Val(2), PointGroup{2}), (Val(3), PointGroup{3}))
D = typeof(Dα΅).parameters[1]
for iuclab in Crystalline.PG_IUCs[D]
ops1 = sort!(pointgroup(iuclab, Dα΅))
ops2 = sort!(generate(generators(iuclab, gtype)))
@test ops1 β ops2
end
for pgnum in 1:length(Crystalline.PG_NUM2IUC[D])
ops1 = sort!(pointgroup(pgnum, Dα΅))
ops2 = sort!(generate(generators(pgnum, gtype)))
@test ops1 β ops2
end
end
end
@testset "Irrep orthogonality" begin
## 2βΏα΅ orthogonality theorem (characters) [automatically includes 1Λ’α΅ orthog. theorem also]:
# βα΅’Οα΅’β½α΅βΎ*Οα΅’β½α΅βΎ = Ξ΄βᡦfNβββ½α΅βΎ
# for irreps Dα΅’β½α΅βΎ and Dα΅’β½α΅βΎ in the same point group (with i running over the
# Nββ = Nβββ½α΅βΎ = Nβββ½α΅βΎ elements). `f` incorporates a multiplicative factor due to the
# conversionn to "physically real" irreps; see `corep_orthogonality_factor(..)`.
@testset "2βΏα΅ orthogonality theorem" begin
for D in 1:3
for pgiuc in PG_IUCs[D]
pgirsβ² = pgirreps(pgiuc, Val(D))
Nββ = order(first(pgirsβ²))
# check both "ordinary" irreps and "physically real" irreps (coreps)
for irtype in (identity, realify)
pgirs = irtype(pgirsβ²)
for (a, pgirβ½α΅βΎ) in enumerate(pgirs)
Οβ½α΅βΎ = characters(pgirβ½α΅βΎ)
f = corep_orthogonality_factor(pgirβ½α΅βΎ)
for (Ξ², pgirβ½α΅βΎ) in enumerate(pgirs)
Οβ½α΅βΎ = characters(pgirβ½α΅βΎ)
# βα΅’Οα΅’β½α΅βΎ*Οα΅’β½α΅βΎ == Ξ΄βᡦf|g|
@test (dot(Οβ½α΅βΎ, Οβ½α΅βΎ) β (a==Ξ²)*f*Nββ) atol=1e-12
end
end
end
end
end
end # @testset "2βΏα΅ orthogonality theorem"
## Great orthogonality theorem of irreps:
# βα΅’[Dα΅’β½α΅βΎ]ββ*[Dα΅’β½α΅βΎ]β±Όβ = Ξ΄βᡦδββ±ΌΞ΄ββNβββ½α΅βΎ/dim(Dβ½α΅βΎ)
# for irreps Dα΅’β½α΅βΎ and Dα΅’β½α΅βΎ in the same point group (with i running over the
# Nββ = Nβββ½α΅βΎ = Nβββ½α΅βΎ elements)
# NB: cannot test this for coreps (see notes in `test/irreps_reality.jl`)
@testset "Great orthogonality theorem" begin
Ξ±Ξ²Ξ³ = nothing
for D in 1:3
for pgiuc in PG_IUCs[D]
pgirs = pgirreps(pgiuc, Val(D))
Nββ = order(first(pgirs))
for (a, pgirβ½α΅βΎ) in enumerate(pgirs)
Dβ½α΅βΎ = pgirβ½α΅βΎ(Ξ±Ξ²Ξ³) # vector of irreps in (a)
dimβ½α΅βΎ = size(first(Dβ½α΅βΎ),1)
for (Ξ², pgirβ½α΅βΎ) in enumerate(pgirs)
Dβ½α΅βΎ = pgirβ½α΅βΎ(Ξ±Ξ²Ξ³) # vector of irreps in (Ξ²)
dimβ½α΅βΎ = size(first(Dβ½α΅βΎ),1)
Ξ΄βᡦ = (a==Ξ²)
for n in Base.OneTo(dimβ½α΅βΎ), j in Base.OneTo(dimβ½α΅βΎ) # rows of each irrep
Ξ΄βᡦδββ±Ό = Ξ΄βᡦ*(n==j)
for m in Base.OneTo(dimβ½α΅βΎ), k in Base.OneTo(dimβ½α΅βΎ) # cols of each irrep
Ξ΄βᡦδββ±ΌΞ΄ββ = Ξ΄βᡦδββ±Ό*(m==k)
# compute βα΅’[Dα΅’β½α΅βΎ]ββ*[Dα΅’β½α΅βΎ]β±Όβ
g_orthog = sum(conj(Dβ½α΅βΎ[i][n,m])*Dβ½α΅βΎ[i][j,k] for i in Base.OneTo(Nββ))
# test equality to Ξ΄βᡦδββ±ΌΞ΄ββNβββ½α΅βΎ/dim(Dβ½α΅βΎ)
@test isapprox(g_orthog, Ξ΄βᡦδββ±ΌΞ΄ββ*Nββ/dimβ½α΅βΎ, atol=1e-12)
end
end
end
end
end
end
end # @testset "Great orthogonality theorem"
end # @testset "Irrep orthogonality"
end # @testset "Point groups | Crystalline | https://github.com/thchr/Crystalline.jl.git |
|
[
"MIT"
] | 0.6.4 | b4ac59b6877535177e89e1fe1b0ff5d0c2e903de | code | 1743 | using Crystalline, Test
@testset "Crystalline" begin
# basic symmetry operations
include("symops.jl")
include("SquareStaticMatrices.jl")
include("groups_xyzt_vs_coded.jl")
include("generators_xyzt_vs_coded.jl")
# Bravais.jl
include("niggli.jl")
include("basisvecs.jl")
# abstractvecs
include("kvecs.jl")
include("wyckoff.jl")
# symmetry vectors
include("symmetryvectors.jl")
# show, notation, and cached info
include("show.jl")
include("notation.jl")
include("orders.jl")
# group checks
include("littlegroup_orders.jl")
include("pointgroup.jl")
# loading irreps from .jld files vs parsing of ISOTROPY
include("parsed_vs_loaded_littlegroup_irreps.jl")
# multiplication tables and irreps
include("irreps_orthogonality.jl")
include("chartable.jl")
include("multtable.jl")
include("irreps_reality.jl")
include("lgirreps_vs_pgirreps_at_Gamma.jl")
# compatibility
include("compatibility.jl")
# conjugacy classes and irreps
include("conjugacy.jl")
# lattices
include("lattices.jl")
# additional k-vectors in Ξ¦-Ξ© ("special" representation domain vectors)
include("holosymmetric.jl")
# band representations & site symmetry groups
include("classification.jl") # => does topo classification agree w/ Adrian?
include("bandrep.jl") # => do k-vectors match (Bilbao's bandreps vs ISOTROPY)?
include("calc_bandreps.jl") # => tests of /src/calc_bandreps.jl
include("isomorphic_parent_pointgroup.jl") # => do we find the same "parent" point
# groups as Bilbao?
# magnetic space groups
include("mspacegroup.jl")
end | Crystalline | https://github.com/thchr/Crystalline.jl.git |
|
[
"MIT"
] | 0.6.4 | b4ac59b6877535177e89e1fe1b0ff5d0c2e903de | code | 13029 | using Crystalline, Test
using LinearAlgebra: dot
# ---------------------------------------------------------------------------------------- #
# test print with nicely printed diff on failures (from https://github.com/invenia/PkgTemplates.jl/blob/master/test/runtests.jl)
using DeepDiffs: deepdiff
function print_diff(a, b)
old = Base.have_color
@eval Base have_color = true
try
println(deepdiff(a, b))
finally
@eval Base have_color = $old
end
end
function test_show(expected::AbstractString, observed::AbstractString)
if expected == observed
@test true
else
print_diff(expected, observed)
@test :expected == :observed
end
end
test_tp_show(v, observed::AbstractString) = test_show(repr(MIME"text/plain"(), v), observed)
# ---------------------------------------------------------------------------------------- #
@testset "`show` overloads" begin
# -------------------------------
# DirectBasis
# -------------------------------
Rs = DirectBasis([1,0,0], [0,1,0], [0,0,1]) # cubic
str = """
DirectBasis{3} (cubic):
[1.0, 0.0, 0.0]
[0.0, 1.0, 0.0]
[0.0, 0.0, 1.0]"""
test_tp_show(Rs, str)
Rs = DirectBasis([1,0,0], [-0.5, β(3)/2, 0.0], [0, 0, 1.5]) # hexagonal
str = """
DirectBasis{3} (hexagonal):
[1.0, 0.0, 0.0]
[-0.5, 0.8660254037844386, 0.0]
[0.0, 0.0, 1.5]"""
test_tp_show(Rs, str)
Rsβ² = directbasis(183, Val(3))
@test Rs[1] β Rsβ²[1] && Rs[2] β Rsβ²[2]
@test abs(dot(Rs[1], Rsβ²[3])) < 1e-14
@test abs(dot(Rs[2], Rsβ²[3])) < 1e-14
@test abs(dot(Rs[3], Rsβ²[3])) > 1e-1
Gs = reciprocalbasis(Rs)
str = """
ReciprocalBasis{3} (hexagonal):
[6.283185307179586, 3.6275987284684357, -0.0]
[0.0, 7.255197456936871, 0.0]
[0.0, -0.0, 4.1887902047863905]"""
test_tp_show(Gs, str)
# -------------------------------
# SymOperation
# -------------------------------
str = """
1 ββββββββββββββββββββββββββββββββ (x,y,z)
β 1 0 0 β· 0 β
β 0 1 0 β 0 β
β 0 0 1 β΅ 0 β"""
test_tp_show(S"x,y,z", str)
str = """
{-3ββββββΊ|0,Β½,β
} ββββββββ (z,-x+1/2,y+1/3)
β 0 0 1 β· 0 β
β -1 0 0 β 1/2 β
β 0 1 0 β΅ 1/3 β"""
test_tp_show(S"z,-x+1/2,y+1/3", str)
str = """
3β» βββββββββββββββββββββββββββββ (-x+y,-x)
β -1 1 β· 0 β
β -1 0 β΅ 0 β"""
test_tp_show(S"y-x,-x", str)
str = """
3-element Vector{SymOperation{3}}:
1
2βββ
{3ββββ»|0,0,β
}"""
test_tp_show([S"x,y,z", S"-x,z,y", S"y,z,x+1/3"], str)
str = """
4Γ4 Matrix{SymOperation{3}}:
1 2βββ 2βββ 2βββ
2βββ 1 2βββ 2βββ
2βββ 2βββ 1 2βββ
2βββ 2βββ 2βββ 1"""
sg = spacegroup(16)
test_tp_show(sg .* permutedims(sg), str)
# -------------------------------
# MultTable
# -------------------------------
str = """
6Γ6 MultTable{SymOperation{3}}:
ββββββββ¬ββββββββββββββββββββββββββββββββββββββββββ
β 1 3ββββΊ 3ββββ» 2βββ 6ββββ» 6ββββΊ
ββββββββΌββββββββββββββββββββββββββββββββββββββββββ
1 β 1 3ββββΊ 3ββββ» 2βββ 6ββββ» 6ββββΊ
3ββββΊ β 3ββββΊ 3ββββ» 1 6ββββ» 6ββββΊ 2βββ
3ββββ» β 3ββββ» 1 3ββββΊ 6ββββΊ 2βββ 6ββββ»
2βββ β 2βββ 6ββββ» 6ββββΊ 1 3ββββΊ 3ββββ»
6ββββ» β 6ββββ» 6ββββΊ 2βββ 3ββββΊ 3ββββ» 1
6ββββΊ β 6ββββΊ 2βββ 6ββββ» 3ββββ» 1 3ββββΊ
ββββββββ΄ββββββββββββββββββββββββββββββββββββββββββ
"""
test_tp_show(MultTable(pointgroup("6")), str)
str = """
4Γ4 MultTable{SymOperation{3}}:
βββββββββββββββ¬ββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
β 1 {2βββ|0,0,Β½} -1 {mβββ|0,0,Β½}
βββββββββββββββΌββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
1 β 1 {2βββ|0,0,Β½} -1 {mβββ|0,0,Β½}
{2βββ|0,0,Β½} β {2βββ|0,0,Β½} 1 {mβββ|0,0,Β½} -1
-1 β -1 {mβββ|0,0,Β½} 1 {2βββ|0,0,Β½}
{mβββ|0,0,Β½} β {mβββ|0,0,Β½} -1 {2βββ|0,0,Β½} 1
βββββββββββββββ΄ββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
"""
test_tp_show(MultTable(spacegroup(13)), str)
# -------------------------------
# KVec
# -------------------------------
for v in (KVec, RVec)
test_tp_show(v("0,0,.5+u"), "[0, 0, 1/2+Ξ±]")
test_tp_show(v("1/2+Ξ±,Ξ²+Ξ±,1/4"), "[1/2+Ξ±, Ξ±+Ξ², 1/4]")
test_tp_show(v("Ξ²,-Ξ±"), "[Ξ², -Ξ±]")
@test repr(MIME"text/plain"(), v("y,Ξ³,u")) == repr(MIME"text/plain"(), v("Ξ²,w,x"))
end
# -------------------------------
# AbstractGroup
# -------------------------------
str = """
PointGroup{3} β21 (6) with 6 operations:
1
3ββββΊ
3ββββ»
2βββ
6ββββ»
6ββββΊ"""
test_tp_show(pointgroup("6", Val(3)), str)
str = """
SpaceGroup{3} β213 (P4β32) with 24 operations:
1
{2βββ|Β½,0,Β½}
{2βββ|0,Β½,Β½}
{2βββ|Β½,Β½,0}
3ββββΊ
{3ββββββΊ|Β½,Β½,0}
{3βββββ»|Β½,0,Β½}
{3ββββββΊ|0,Β½,Β½}
3ββββ»
{3βββββΊ|0,Β½,Β½}
{3ββββββ»|Β½,Β½,0}
{3ββββββ»|Β½,0,Β½}
{2βββ|ΒΎ,ΒΌ,ΒΌ}
{2ββββ|ΒΎ,ΒΎ,ΒΎ}
{4ββββ»|ΒΌ,ΒΌ,ΒΎ}
{4ββββΊ|ΒΌ,ΒΎ,ΒΌ}
{4ββββ»|ΒΎ,ΒΌ,ΒΌ}
{2βββ|ΒΌ,ΒΎ,ΒΌ}
{2ββββ|ΒΎ,ΒΎ,ΒΎ}
{4ββββΊ|ΒΌ,ΒΌ,ΒΎ}
{4ββββΊ|ΒΎ,ΒΌ,ΒΌ}
{2βββ|ΒΌ,ΒΌ,ΒΎ}
{4ββββ»|ΒΌ,ΒΎ,ΒΌ}
{2ββββ|ΒΎ,ΒΎ,ΒΎ}"""
test_tp_show(spacegroup(213, Val(3)), str)
str = """
SiteGroup{2} β17 (p6mm) at 2b = [1/3, 2/3] with 6 operations:
1
{3βΊ|1,1}
{3β»|0,1}
{mββ|1,1}
mββ
{mββ|0,1}"""
sg = spacegroup(17,Val(2))
wps = wyckoffs(17, Val(2))
test_tp_show(sitegroup(sg, wps[end-1]), str)
# -------------------------------
# LGIrrep
# -------------------------------
str = """
4-element Collection{LGIrrep{3}} for β16 (P222) at Ξ = [0, 0, 0]:
Ξβ ββ¬βββββββββββββββββββββββββββββββββββββββββββββ
ββ 1: ββββββββββββββββββββββββββββββββ (x,y,z)
β 1
β
ββ 2βββ: βββββββββββββββββββββββββββ (x,-y,-z)
β 1
β
ββ 2βββ: βββββββββββββββββββββββββββ (-x,y,-z)
β 1
β
ββ 2βββ: βββββββββββββββββββββββββββ (-x,-y,z)
β 1
ββββββββββββββββββββββββββββββββββββββββββββββ
Ξβ ββ¬βββββββββββββββββββββββββββββββββββββββββββββ
ββ 1: ββββββββββββββββββββββββββββββββ (x,y,z)
β 1
β
ββ 2βββ: βββββββββββββββββββββββββββ (x,-y,-z)
β -1
β
ββ 2βββ: βββββββββββββββββββββββββββ (-x,y,-z)
β -1
β
ββ 2βββ: βββββββββββββββββββββββββββ (-x,-y,z)
β 1
ββββββββββββββββββββββββββββββββββββββββββββββ
Ξβ ββ¬βββββββββββββββββββββββββββββββββββββββββββββ
ββ 1: ββββββββββββββββββββββββββββββββ (x,y,z)
β 1
β
ββ 2βββ: βββββββββββββββββββββββββββ (x,-y,-z)
β 1
β
ββ 2βββ: βββββββββββββββββββββββββββ (-x,y,-z)
β -1
β
ββ 2βββ: βββββββββββββββββββββββββββ (-x,-y,z)
β -1
ββββββββββββββββββββββββββββββββββββββββββββββ
Ξβ ββ¬βββββββββββββββββββββββββββββββββββββββββββββ
ββ 1: ββββββββββββββββββββββββββββββββ (x,y,z)
β 1
β
ββ 2βββ: βββββββββββββββββββββββββββ (x,-y,-z)
β -1
β
ββ 2βββ: βββββββββββββββββββββββββββ (-x,y,-z)
β 1
β
ββ 2βββ: βββββββββββββββββββββββββββ (-x,-y,z)
β -1
ββββββββββββββββββββββββββββββββββββββββββββββ"""
test_tp_show(lgirreps(16)["Ξ"], str)
str = """
4-element Collection{LGIrrep{3}}:
#undef
#undef
#undef
#undef"""
test_tp_show(similar(lgirreps(16)["Ξ"]), str)
str = """
Ξβ
ββ¬βββββββββββββββββββββββββββββββββββββββββββββ
ββ 1: ββββββββββββββββββββββββββββββββ (x,y,z)
β β‘ 1 0 β€
β β£ 0 1 β¦
β
ββ {2βββ|0,Β½,ΒΌ}: βββββββββββ (x,-y+1/2,-z+1/4)
β β‘ 1 0 β€
β β£ 0 -1 β¦
β
ββ {2βββ|0,Β½,ΒΌ}: βββββββββββ (-x,y+1/2,-z+1/4)
β β‘ -1 0 β€
β β£ 0 1 β¦
β
ββ 2βββ: βββββββββββββββββββββββββββ (-x,-y,z)
β β‘ -1 0 β€
β β£ 0 -1 β¦
β
ββ -4ββββΊ: βββββββββββββββββββββββββ (y,-x,-z)
β β‘ 0 1 β€
β β£ -1 0 β¦
β
ββ -4ββββ»: βββββββββββββββββββββββββ (-y,x,-z)
β β‘ 0 -1 β€
β β£ 1 0 β¦
β
ββ {mβββ|0,Β½,ΒΌ}: βββββββββββ (-y,-x+1/2,z+1/4)
β β‘ 0 -1 β€
β β£ -1 0 β¦
β
ββ {mββββ|0,Β½,ΒΌ}: ββββββββββββ (y,x+1/2,z+1/4)
β β‘ 0 1 β€
β β£ 1 0 β¦
ββββββββββββββββββββββββββββββββββββββββββββββ"""
test_tp_show(lgirreps(122)["Ξ"][end], str)
str = """
Ξβ ββ¬βββββββββββββββββββββββββββββββββββββββββββββ
ββ 1: ββββββββββββββββββββββββββββββββ (x,y,z)
β 1
β
ββ 2βββ: βββββββββββββββββββββββββββ (-x,-y,z)
β -1
β
ββ 3ββββΊ: βββββββββββββββββββββββββ (-y,x-y,z)
β exp(-0.6667iΟ)
β
ββ 3ββββ»: ββββββββββββββββββββββββ (-x+y,-x,z)
β exp(0.6667iΟ)
β
ββ 6ββββΊ: ββββββββββββββββββββββββββ (x-y,x,z)
β exp(-0.3333iΟ)
β
ββ 6ββββ»: βββββββββββββββββββββββββ (y,-x+y,z)
β exp(0.3333iΟ)
ββββββββββββββββββββββββββββββββββββββββββββββ"""
test_tp_show(lgirreps(168)["Ξ"][end], str)
# -------------------------------
# PGIrrep
# -------------------------------
str = """
ΞβΞβ ββ¬βββββββββββββββββββββββββββββββββββββββββββββ
ββ 1: ββββββββββββββββββββββββββββββββ (x,y,z)
β β‘ 1 0 β€
β β£ 0 1 β¦
β
ββ 3ββββΊ: βββββββββββββββββββββββββ (-y,x-y,z)
β β‘ -0.5+0.866im 0 β€
β β£ 0 -0.5-0.866im β¦
β
ββ 3ββββ»: ββββββββββββββββββββββββ (-x+y,-x,z)
β β‘ -0.5-0.866im 0 β€
β β£ 0 -0.5+0.866im β¦
β
ββ 2βββ: βββββββββββββββββββββββββββ (-x,-y,z)
β β‘ -1 0 β€
β β£ 0 -1 β¦
β
ββ 6ββββ»: βββββββββββββββββββββββββ (y,-x+y,z)
β β‘ 0.5-0.866im 0 β€
β β£ 0 0.5+0.866im β¦
β
ββ 6ββββΊ: ββββββββββββββββββββββββββ (x-y,x,z)
β β‘ 0.5+0.866im 0 β€
β β£ 0 0.5-0.866im β¦
ββββββββββββββββββββββββββββββββββββββββββββββ"""
pgirs = pgirreps("6")
pgirsβ² = realify(pgirs)
test_tp_show(pgirsβ²[end], str)
@test summary(pgirs) == "6-element Collection{PGIrrep{3}}"
@test summary(pgirsβ²) == "4-element Collection{PGIrrep{3}}"
# -------------------------------
# CharacterTable
# -------------------------------
str = """
CharacterTable{3} for β21 (6):
ββββββββ¬ββββββββββββββββββββ
β Ξβ Ξβ ΞβΞβ
ΞβΞβ
ββββββββΌββββββββββββββββββββ
1 β 1 1 2 2
3ββββΊ β 1 1 -1 -1
3ββββ» β 1 1 -1 -1
2βββ β 1 -1 2 -2
6ββββ» β 1 -1 -1 1
6ββββΊ β 1 -1 -1 1
ββββββββ΄ββββββββββββββββββββ
"""
test_tp_show(characters(pgirsβ²), str)
str = """
CharacterTable{3} for β230 (Ia-3d) at P = [1/2, 1/2, 1/2]:
ββββββββββββββββββ¬ββββββββββββββββββββββ
β Pβ Pβ Pβ
ββββββββββββββββββΌββββββββββββββββββββββ
1 β 2 2 4
{2βββ|0,0,Β½} β 0 0 0
{2βββ|Β½,0,0} β 0 0 0
{2βββ|0,Β½,0} β 0 0 0
3ββββΊ β -1 -1 1
3ββββ» β -1 -1 1
{3βββββΊ|Β½,0,0} β -1im -1im 1im
{3βββββ»|0,Β½,0} β 1im 1im -1im
{3ββββββ»|0,Β½,0} β -1im -1im 1im
{3ββββββΊ|0,0,Β½} β 1im 1im -1im
{3ββββββ»|0,0,Β½} β -1im -1im 1im
{3ββββββΊ|Β½,0,0} β 1im 1im -1im
{-4ββββΊ|ΒΌ,ΒΌ,ΒΎ} β -1+1im 1-1im 0
{-4ββββ»|ΒΎ,ΒΌ,ΒΌ} β 1-1im -1+1im 0
{-4ββββΊ|ΒΎ,ΒΌ,ΒΌ} β -1+1im 1-1im 0
{-4ββββ»|ΒΌ,ΒΎ,ΒΌ} β 1-1im -1+1im 0
{-4ββββΊ|ΒΌ,ΒΎ,ΒΌ} β -1+1im 1-1im 0
{-4ββββ»|ΒΌ,ΒΌ,ΒΎ} β 1-1im -1+1im 0
{mβββ|ΒΎ,ΒΌ,ΒΌ} β 0 0 0
{mββββ|ΒΌ,ΒΌ,ΒΌ} β 0 0 0
{mβββ|ΒΌ,ΒΌ,ΒΎ} β 0 0 0
{mββββ|ΒΌ,ΒΌ,ΒΌ} β 0 0 0
{mβββ|ΒΌ,ΒΎ,ΒΌ} β 0 0 0
{mββββ|ΒΌ,ΒΌ,ΒΌ} β 0 0 0
ββββββββββββββββββ΄ββββββββββββββββββββββ
"""
test_tp_show(characters(lgirreps(230)["P"]), str)
# -------------------------------
# BandRepSet and BandRep
# -------------------------------
brs = bandreps(42, 3)
str = """
BandRepSet (β42): 6 BandReps, sampling 17 LGIrreps (spin-1 w/ TR)
βββββ¬ββββββββββββββββββββββββ
β 4a 4a 4a 4a 8b 8b
β Aβ Aβ Bβ Bβ A B
βββββΌββββββββββββββββββββββββ
Ξβ β 1 Β· Β· Β· 1 Β·
Ξβ β Β· 1 Β· Β· 1 Β·
Ξβ β Β· Β· Β· 1 Β· 1
Ξβ β Β· Β· 1 Β· Β· 1
Tβ β 1 Β· Β· Β· Β· 1
Tβ β Β· 1 Β· Β· Β· 1
Tβ β Β· Β· Β· 1 1 Β·
Tβ β Β· Β· 1 Β· 1 Β·
Yβ β 1 Β· Β· Β· Β· 1
Yβ β Β· 1 Β· Β· Β· 1
Yβ β Β· Β· Β· 1 1 Β·
Yβ β Β· Β· 1 Β· 1 Β·
Zβ β 1 Β· Β· Β· 1 Β·
Zβ β Β· 1 Β· Β· 1 Β·
Zβ β Β· Β· Β· 1 Β· 1
Zβ β Β· Β· 1 Β· Β· 1
Lβ β 1 1 1 1 2 2
βββββΌββββββββββββββββββββββββ
ΞΌ β 1 1 1 1 2 2
βββββ΄ββββββββββββββββββββββββ
KVecs: Ξ, T, Y, Z, L"""
test_tp_show(brs, str)
test_tp_show(brs[1], "1-band BandRep (AββG at 4a):\n [Ξβ, Tβ, Yβ, Zβ, Lβ]")
test_tp_show(brs[end], "2-band BandRep (BβG at 8b):\n [Ξβ+Ξβ, Tβ+Tβ, Yβ+Yβ, Zβ+Zβ, 2Lβ]")
end # @testset | Crystalline | https://github.com/thchr/Crystalline.jl.git |
|
[
"MIT"
] | 0.6.4 | b4ac59b6877535177e89e1fe1b0ff5d0c2e903de | code | 834 | using Test
using Crystalline
@testset "(Abstract)SymmetryVectors" begin
brs = calc_bandreps(221) # ::Collection{NewBandRep{3}}
# addition of symmetry vectors
@test brs[1] + brs[2] == SymmetryVector(brs[1]) + SymmetryVector(brs[2])
@test Vector(brs[1] + brs[2]) == Vector(brs[1]) + Vector(brs[2])
# multiplication of symmetry vectors
@test brs[1] + brs[1] == 2brs[1]
@test -brs[1] == 2brs[1] - 3brs[1]
@test zero(brs[1]) == 0*brs[1]
@test brs[3]*7 == 7*brs[3]
# type-stable summation
@test sum(brs[1:1]) isa SymmetryVector{3}
@test sum(brs[1:2]) isa SymmetryVector{3}
# printing SymmetryVector that have zero-contents
n = SymmetryVector(brs[1])
@test n*0 == zero(n)
@test string(zero(n)) == "[0Mα΅’, 0Xα΅’, 0Ξα΅’, 0Rα΅’] (0 bands)"
end # @testset "(Abstract)SymmetryVectors" | Crystalline | https://github.com/thchr/Crystalline.jl.git |
|
[
"MIT"
] | 0.6.4 | b4ac59b6877535177e89e1fe1b0ff5d0c2e903de | code | 6717 | using Crystalline, Test
@testset "Symmetry operations" begin
@testset "Basics" begin
# Space group β1
sg = spacegroup(1, Val(3))
@test order(sg) == 1
@test dim(sg) == 3
op = sg[1]
@test matrix(op) == [1.0 0.0 0.0 0.0; 0.0 1.0 0.0 0.0; 0.0 0.0 1.0 0.0]
@test xyzt(op) == "x,y,z"
# Space group β146
sg = spacegroup(146, Val(3))
@test order(sg) == 9
@test dim(sg) == 3
op = sg[9]
@test matrix(op) β [-1.0 1.0 0.0 1/3; -1.0 0.0 0.0 2/3; 0.0 0.0 1.0 2/3]
@test xyzt(op) == "-x+y+1/3,-x+2/3,z+2/3"
# Plane group β7
sg = spacegroup(7, 2) # keep as 2 (rather than Val(2)) intentionally, to test...
@test order(sg) == 4
@test dim(sg) == 2
op = sg[2]
@test matrix(op) β [-1.0 0.0 0.0; 0.0 -1.0 0.0]
@test xyzt(op) == "-x,-y"
# Round-trippability of constructors
op = SymOperation{3}("-y,x,z+1/2")
@test op == SymOperation("-y,x,z+1/2")
@test op == S"-y,x,z+1/2"
@test op == SymOperation(rotation(op), translation(op))
@test op == SymOperation(matrix(op)) # SMatrix
@test op == SymOperation(Matrix(op)) # Matrix
# identity operation
@test S"x,y,z" == one(S"y,z,x") == one(SymOperation{3})
end
@testset "Parsing cornercases" begin
# allow some forms of whitespace in xyzt string parsing (issue #29)
@test SymOperation("x,y,z") == SymOperation("x, y, z")
@test SymOperation("x,-y,+z") == SymOperation(" x, - y, +z")
@test SymOperation("x,-y+x,+z-1/2") == SymOperation("x, - y + x, +z - 1/2")
@test SymOperation(" x,-y+x+1/3,+z-1/2") == SymOperation(" x, - y + x + 1/3, +z - 1/2")
end
@testset "Conversion between xyzt and matrix forms" begin
for D = 1:3
Dα΅ = Val(D)
@testset "D = $(D)" begin
for sgnum in 1:MAX_SGNUM[D]
@testset "β$sgnum" begin
sg = spacegroup(sgnum, Dα΅)
for op in sg
@test Crystalline.xyzt2matrix(xyzt(op), Dα΅) β matrix(op) # xyzt->matrix (`β` due to possible rounding errors and precision loss on round-trip)
@test Crystalline.matrix2xyzt(matrix(op)) == xyzt(op) # matrix->xyzt
end
end
end
end
end
end
@testset "Composition" begin
sg = spacegroup(230, Val(3)) # random space group
# test associativity (with and without modular arithmetic)
gβ, gβ, gβ = sg[5:7]
@test gβ*(gβ*gβ) == (gβ*gβ)*gβ
@test compose(compose(gβ, gβ, false), gβ, false) == compose(gβ, compose(gβ, gβ, false), false)
end
@testset "Operators in differents bases" begin
sgnum = 110 # bravais type "tI"
cntr = centering(sgnum, 3) # 'I'
csg = spacegroup(sgnum, Val(3)) # conventional basis
psg = SpaceGroup{3}(sgnum, primitivize.(csg, cntr, false)) # primitive basis
# compute a random possible basis for tI bravais type
cRs = directbasis(sgnum, Val(3)) # conventional basis
pRs = primitivize(cRs, cntr) # primitive basis
# check that the cartesian representation of the space group operations, obtained
# from `csg` & `cRs` versus `psg` and `pRs` agree (as it should)
cartRs_from_cRs = cartesianize(csg, cRs)
cartRs_from_pRs = cartesianize(psg, pRs)
@test all(isapprox.(cartRs_from_cRs, cartRs_from_pRs, atol=1e-12))
# `isapprox` with centering
op = S"-y+2/3,x-y+1/3,z+1/3" # {3ββββΊ|β
,β
,β
} (in a rhombohedral system)
opβ² = op * SymOperation{3}(Bravais.centeringtranslation('R',Val(3)))
@test isapprox(op, opβ², 'R', true)
@test !isapprox(op, opβ², 'P', true)
# `isapprox` with `modw = false`
op = S"x,-y,z"
opβ² = S"x-3,-y+10,z-5"
@test !isapprox(op, opβ², 'I', false)
@test !isapprox(op, opβ², 'P', false)
@test isapprox(op, opβ², 'P', true)
@test isapprox(op, opβ², 'I', true)
end
@testset "Groups created from generators" begin # (`generate` default sorts by `seitz`)
# generate plane group (17) p6mm from 6βΊ and mββ
gens = SymOperation.(["x-y,x", "-x+y,y"])
sg = spacegroup(17, Val(2))
@test sort!(generate(gens)) == sort!(sg)
# generate site symmetry group of Wyckoff position 2b in p6mm
ops = SymOperation.(
["x,y","-y+1,x-y+1", "-x+y,-x+1", # {1|0}, {3βΊ|1.0,1.0}, {3β»|0,1.0},
"-y+1,-x+1", "-x+y,y", "x,x-y+1"]) # {mββ|1.0,1.0}, {mββ|0}, {mββ|0,1.0}
gens = ops[[2,6]]
@test sort!(generate(gens, modΟ=false), by=xyzt) == sort!(ops, by=xyzt)
# generate space group with nonsymmorphic operations
sg = spacegroup(180, Val(3)) # P6β22
gens = sg[[6,8]] # {6ββββΊ|0,0,β
}, 2βββ
@test sort!(generate(gens)) β sort!(sg)
# generators do not specify a finite group under "non-modulo" composition
@test_throws OverflowError generate(SymOperation.(["x,y+1,z"]); modΟ=false, Nmax=50)
end
@testset "Generators" begin
for (Dα΅, gtype) in ((Val(1), SpaceGroup{1}), (Val(2), SpaceGroup{2}), (Val(3), SpaceGroup{3}))
D = typeof(Dα΅).parameters[1]
for sgnum in 1:MAX_SGNUM[D]
ops1 = sort!(spacegroup(sgnum, Dα΅))
ops2 = sort!(generate(generators(sgnum, gtype)))
@test ops1 β ops2
end
end
end
@testset "Error types and domain checking" begin
@test_throws DomainError spacegroup(231, 3)
@test_throws DomainError spacegroup(-1, 2)
@test_throws DomainError spacegroup(2, 0)
@test_throws DomainError spacegroup(41, 5)
@test_throws ArgumentError SymOperation{2}("x,z")
@test_throws ArgumentError SymOperation("x,z")
@test_throws ArgumentError SymOperation("x,Γ·z")
@test_throws ArgumentError SymOperation("x ,z") # don't allow spaces *after* entries
@test_throws DimensionMismatch SymOperation{3}("x,y+z")
end
@testset "Checking symmorphic space groups" begin
# we do a memoized look-up for `issymmorph(::Integer, ::Integer)`: ensure that it
# agrees with explicit calculations
for D in 1:3
for sgnum in 1:MAX_SGNUM[D]
@test issymmorph(sgnum, D) == issymmorph(spacegroup(sgnum, D))
end
end
end
end | Crystalline | https://github.com/thchr/Crystalline.jl.git |
|
[
"MIT"
] | 0.6.4 | b4ac59b6877535177e89e1fe1b0ff5d0c2e903de | code | 2984 | using Crystalline, Test, PrettyTables
debug = false
if !isdefined(Main, :LGIRS)
LGIRS = lgirreps.(1:MAX_SGNUM[3], Val(3)) # loaded from our saved .jld2 files
end
@testset "Order of space group, Bilbao vs. ISOTROPY, in primitivized basis" begin
for sgnum = 1:230
cntr = centering(sgnum, 3)
# sgops from Bilbao (BCD)
ops = operations(spacegroup(sgnum))
reduce_ops
# go to primitive basis β reduce to unique set β go back to conventional basis
opsβ² = reduce_ops(ops, cntr, true)
if debug
if length(ops) β length(opsβ²) # a trivial translation set was then removed
println(sgnum, ": ", length(ops), " β ", length(opsβ²))
end
end
Nops_ISO = length(operations(LGIRS[sgnum][1][1]))
@test Nops_ISO == length(opsβ²) # test that ISOTROPY spacegroup(..) indeed excludes trivial translation sets
end
end
let count = 0, failures = Int[]
for sgnum = 1:230
cntr = centering(sgnum, 3)
# sgops from Bilbao (BCD)
ops_BCD = operations(spacegroup(sgnum))
ops_BCDβ² = reduce_ops(ops_BCD, cntr, true) # go to primitive basis β reduce to unique
# set β go back to conventional basis
# sgops from ISOTROPY (via Ξ point)
ops_ISO = operations(LGIRS[sgnum][1][1])
# sorting according to seitz notation
sorted_idx_BCD = sortperm(seitz.(ops_BCDβ²))
sorted_idx_ISO = sortperm(seitz.(ops_ISO))
ops_BCDβ² = ops_BCDβ²[sorted_idx_BCD]
ops_ISO = ops_ISO[sorted_idx_ISO]
# extracting various (sorted) metrics of the sgops
seitz_BCD = seitz.(ops_BCDβ²)
seitz_ISO = seitz.(ops_ISO)
matrix_BCD = matrix.(ops_BCDβ²)
matrix_ISO = matrix.(ops_ISO)
Ο_BCD = translation.(ops_BCDβ²)
Ο_ISO = translation.(ops_ISO)
# comparisons of (sorted) sgops across BCD and ISO
BCD_vs_ISO = seitz_BCD .== seitz_ISO # 6 disagreements (trivial differences of **primitive** lattice translations)
# print some stuff if BCD and ISO sgops sets not equivalent
if any(!, BCD_vs_ISO)
count += 1
push!(failures, sgnum)
dΟ = Ο_BCD .- Ο_ISO
P = Crystalline.primitivebasismatrix(cntr, Val(3))
dΟ_primitive = [P\dΟ_i for dΟ_i in dΟ]
if debug
println("\nsgnum=$(sgnum) ($(bravaistype(sgnum, 3))):")
pretty_table(stdout,
[seitz_BCD seitz_ISO BCD_vs_ISO dΟ dΟ_primitive];
header = ["BCD", "ISOTROPY", "==?", "dΟ (conventional basis)", "dΟ (primitive basis)"],
highlighters = Highlighter((d,i,j)->!d[i,3], Crayon(background=:red)))
end
end
end
print("\n$(count)/230 disagreements, for space groups: ")
join(stdout, failures, ", "); println()
end | Crystalline | https://github.com/thchr/Crystalline.jl.git |
|
[
"MIT"
] | 0.6.4 | b4ac59b6877535177e89e1fe1b0ff5d0c2e903de | code | 2230 | using Test, Crystalline
using Crystalline: constant, free
@testset "SiteGroup" begin
neg_error_tol = 1e-15
for D in 1:3
Dα΅ = Val(D)
for sgnum in 1:MAX_SGNUM[D]
sg = spacegroup(sgnum, Dα΅)
wps = wyckoffs(sgnum, Dα΅)
for wp in wps
g = sitegroup(sg, wp)
@test g isa SiteGroup
rv = parent(wp)
# test that ops in `g` leave the Wyckoff position `wp` invariant
for op in g
rvβ² = op*rv
@test isapprox(rv, rvβ², nothing, false) # isapprox(::RVec, ::RVec)
wpβ² = op*wp
@test isapprox(wp, wpβ², nothing, false) # isapprox(::WyckoffPosition, ::WyckoffPosition)
end
# test that all the constant parts of the positions in the Wyckoff orbit
# all have coordinates in [0,1)
wpreps = orbit(g) # wyckoff position representatives
@test all(wpreps) do wp
all(xyz->xyzβ₯(-neg_error_tol) && xyz<1, constant(wp))
end
# test that `g` and `cosets(g)` furnishes a left-coset decomposition of `sg`
ops = [opΚ°*opα΅ for opΚ° in cosets(g) for opα΅ in g];
@test sort!(ops, by=xyzt) β sort(sg, by=xyzt)
end
end
end
end
@testset "Maximal Wyckoff positions" begin
for sgnum in 1:MAX_SGNUM[3]
sg = spacegroup(sgnum, Val(3))
sitegs = sitegroups(sg)
max_sitegs = findmaximal(sitegs)
max_wps = position.(max_sitegs)
# the band representations should include all maximal wyckoff positions;
# check consistency against that
max_wps_brs_str = getfield.(bandreps(sgnum, 3).bandreps, Ref(:wyckpos))
@test sort(unique(max_wps_brs_str)) == sort(label.(max_wps))
end
# type-stability
@test (@inferred Vector{WyckoffPosition{1}} wyckoffs(1, Val(1))) isa Vector{WyckoffPosition{1}}
@test (@inferred Vector{WyckoffPosition{2}} wyckoffs(1, Val(2))) isa Vector{WyckoffPosition{2}}
@test (@inferred Vector{WyckoffPosition{3}} wyckoffs(1, Val(3))) isa Vector{WyckoffPosition{3}}
end | Crystalline | https://github.com/thchr/Crystalline.jl.git |
|
[
"MIT"
] | 0.6.4 | b4ac59b6877535177e89e1fe1b0ff5d0c2e903de | docs | 4272 | # Crystalline.jl
[![Documentation (stable)][docs-stable-img]][docs-stable-url] [![Documentation (dev)][docs-dev-img]][docs-dev-url] [![Build status][ci-status-img]][ci-status-url] [![Coverage][coverage-img]][coverage-url]
Tools for crystalline symmetry analysis implemented in the Julia language.
This package provides access to the symmetry operations of crystalline point groups, space groups, Wyckoff positions, their irreducible representations and band representations, as well as tools for their associated manipulation.
## Installation
The package can be installed via Julia's package manager:
```julia
julia> using Pkg; Pkg.add("Crystalline")
julia> using Crystalline
```
## Functionality
Crystalline.jl provides several functionalities for line groups, plane groups, and space groups, as well as crystallographic point groups.
Example use includes:
```julia
# construct a 3D `SymOperation` from its triplet form
julia> S"x,-y,-z"
2βββ βββββββββββββββββββββββββββ (x,-y,-z)
β 1 0 0 β· 0 β
β 0 -1 0 β 0 β
β 0 0 -1 β΅ 0 β
# load the `SymOperation`s of the 3D space group β16 in a conventional setting
julia> sg = spacegroup(16, Val(3))
SpaceGroup{3} β16 (P222) with 4 operations:
1
2βββ
2βββ
2βββ
# load a dictionary of small irreps and their little groups for space group β16,
# indexed by their k-point labels; then inspect the small irreps at the A point
julia> lgirs = lgirreps(16, Val(3))
julia> lgirs["A"]
2-element Collection{LGIrrep{3}} for β16 (P222) at A = [Ξ±, 0, 1/2]:
Aβ ββ¬βββββββββββββββββββββββββββββββββββββββββββββ
ββ 1: ββββββββββββββββββββββββββββββββ (x,y,z)
β 1
β
ββ 2βββ: βββββββββββββββββββββββββββ (x,-y,-z)
β 1
ββββββββββββββββββββββββββββββββββββββββββββββ
Aβ ββ¬βββββββββββββββββββββββββββββββββββββββββββββ
ββ 1: ββββββββββββββββββββββββββββββββ (x,y,z)
β 1
β
ββ 2βββ: βββββββββββββββββββββββββββ (x,-y,-z)
β -1
ββββββββββββββββββββββββββββββββββββββββββββββ
# construct the character table for the small irreps at the Ξ point
julia> characters(lgirs["Ξ"])
CharacterTable{3} for β16 (P222) at Ξ = [0, 0, 0]:
βββββββ¬ββββββββββββββββ
β Ξβ Ξβ Ξβ Ξβ
βββββββΌββββββββββββββββ
1 β 1 1 1 1
2βββ β 1 -1 1 -1
2βββ β 1 -1 -1 1
2βββ β 1 1 -1 -1
βββββββ΄ββββββββββββββββ
```
Additional functionality includes e.g. point group operations (`pointgroup`) and irreps (`pgirreps`), elementary band representations (`bandreps`), Wyckoff positions (`wyckoffs`), conjugacy classes (`classes`), class-specific characters (`classcharacters`), group generators (`generators`), subperiodic groups (`subperiodicgroup`), 3D magnetic space groups (`mspacegroup`), and physically real irreps (`realify`).
In addition, Bravais lattice utilities and conventions are accessible via the lightweight stand-alone sub-package [Bravais.jl](https://github.com/thchr/Crystalline.jl/tree/master/Bravais).
For a full description of the public API, see the [documentation][docs-dev-url].
### Current limitations
At present, the package's emphasis is on spinless systems (i.e., double groups and spinful irreps are not implemented).
## API stability
Crystalline.jl is a research package in active development: breaking changes are likely (but will respect semantic versioning).
## Citation
If you find this package useful in your reseach, please cite our paper:
- T. Christensen, H.C. Po, J.D. Joannopoulos, & M. SoljaΔiΔ, *Location and topology of the fundamental gap in photonic crystals*, [Phys. Rev. X **12**, 021066 (2022)](https://doi.org/10.1103/PhysRevX.12.021066).
In addition, please cite any earlier works explicitly referenced in the documentation of individual functions.
[ci-status-img]: https://github.com/thchr/Crystalline.jl/workflows/CI/badge.svg
[ci-status-url]: https://github.com/thchr/Crystalline.jl/actions
[docs-dev-img]: https://img.shields.io/badge/docs-dev-blue.svg
[docs-dev-url]: https://thchr.github.io/Crystalline.jl/dev
[docs-stable-img]: https://img.shields.io/badge/docs-stable-blue.svg
[docs-stable-url]: https://thchr.github.io/Crystalline.jl/stable
[coverage-img]: https://codecov.io/gh/thchr/Crystalline.jl/branch/master/graph/badge.svg
[coverage-url]: https://codecov.io/gh/thchr/Crystalline.jl
| Crystalline | https://github.com/thchr/Crystalline.jl.git |
|
[
"MIT"
] | 0.6.4 | b4ac59b6877535177e89e1fe1b0ff5d0c2e903de | docs | 1002 | We vendor the entirety of https://github.com/wildart/SmithNormalForm.jl because it is not, and will not (see [SmithNormalForm.jl #2](https://github.com/wildart/SmithNormalForm.jl/issues/2)) be, registered in Julia's General registry.
However, in order for Crystalline to be registered in the General Registry, we cannot depend on an unregistrered package, so we need to vendor it ourselves.
## git subtree
We can use git's subtree functionality to pull down SmithNormalForm.jl and also keep it up-to-date:
Specifically, SmithNormalForm.jl's git repo was added following the strategy in https://www.atlassian.com/git/tutorials/git-subtree, with the commands:
```sh
git remote add -f wildart-snf https://github.com/wildart/SmithNormalForm.jl.git
git subtree add --prefix .vendor/SmithNormalForm wildart-snf master --squash
```
and we can check for updates (and integrate them) via:
```sh
git fetch wildart-snf master
git subtree pull --prefix .vendor/SmithNormalForm wildart-snf master --squash
```
| Crystalline | https://github.com/thchr/Crystalline.jl.git |
|
[
"MIT"
] | 0.6.4 | b4ac59b6877535177e89e1fe1b0ff5d0c2e903de | docs | 1242 | # Smith Normal Form
[](https://travis-ci.org/wildart/SmithNormalForm.jl)
[](https://coveralls.io/github/wildart/SmithNormalForm.jl?branch=master)
The [Smith normal form](https://en.wikipedia.org/wiki/Smith_normal_form) decomposition over integer domain implementation in Julia.
## Installation
For Julia 1.1+, add [BoffinStuff](https://github.com/wildart/BoffinStuff.git) registry in the package manager, and proceed with the installation:
```
pkg> registry add https://github.com/wildart/BoffinStuff.git
pkg> add SmithNormalForm
```
## Example
```julia
julia> using SmithNormalForm, LinearAlgebra
julia> M = [2 4 4; -6 6 12; 10 -4 -16]
3Γ3 Array{Int64,2}:
2 4 4
-6 6 12
10 -4 -16
julia> F = smith(M)
Smith normal form:
[2 0 0; 0 6 0; 0 0 12]
julia> F.S
3Γ3 Array{Int64,2}:
1 0 0
-3 1 0
5 -2 1
julia> F.T
3Γ3 Array{Int64,2}:
1 2 2
0 3 4
0 1 1
julia> diagm(F)
3Γ3 Array{Int64,2}:
2 0 0
0 6 0
0 0 12
julia> F.S*diagm(F)*F.T
3Γ3 Array{Int64,2}:
2 4 4
-6 6 12
10 -4 -16
```
| Crystalline | https://github.com/thchr/Crystalline.jl.git |
|
[
"MIT"
] | 0.6.4 | b4ac59b6877535177e89e1fe1b0ff5d0c2e903de | docs | 1415 | # Bravais.jl
[![Documentation (stable)][docs-stable-img]][docs-stable-url] [![Documentation (dev)][docs-dev-img]][docs-dev-url] [![Build status][ci-status-img]][ci-status-url]
Tools for treating lattice bases, crystal systems, and Bravais types.
---
Bravais.jl is developed as a light-weight shared utility package for [Crystalline.jl](https://github.com/thchr/Crystalline.jl) and [Brillouin.jl](https://github.com/thchr/Brillouin.jl), intended to give access to a set of systematic conventions and tools related to point lattices as they arise in crystallography and space group theory.
See the associated [documentation][docs-dev-url] for a specification of available methods and public API.
## Citation
If you find this package useful in your reseach, consider citing our arXiv paper:
- T. Christensen, H.C. Po, J.D. Joannopoulos, & M. SoljaΔiΔ, *Location and topology of the fundamental gap in photonic crystals*, [arXiv:2106.10267 (2021)](https://arxiv.org/abs/2106.10267)
[ci-status-img]: https://github.com/thchr/Crystalline.jl/workflows/CI/badge.svg
[ci-status-url]: https://github.com/thchr/Crystalline.jl/actions
[docs-dev-img]: https://img.shields.io/badge/docs-dev-blue.svg
[docs-dev-url]: https://thchr.github.io/Crystalline.jl/dev/bravais/
[docs-stable-img]: https://img.shields.io/badge/docs-stable-blue.svg
[docs-stable-url]: https://thchr.github.io/Crystalline.jl/stable/bravais/ | Crystalline | https://github.com/thchr/Crystalline.jl.git |
|
[
"MIT"
] | 0.6.4 | b4ac59b6877535177e89e1fe1b0ff5d0c2e903de | docs | 103 | 1D and 2D data is not explicitly stored: both can be transferred directly from a subset of the 3D data. | Crystalline | https://github.com/thchr/Crystalline.jl.git |
|
[
"MIT"
] | 0.6.4 | b4ac59b6877535177e89e1fe1b0ff5d0c2e903de | docs | 1412 | # ISOTROPY ISO-IR dataset
The files `CIR_data.txt` and `PIR_data.txt` are sourced from the
[ISOTROPY software's ISO-IR dataset](https://stokes.byu.edu/iso/irtables.php)
and contain data needed to generate *space group* irreps.
In ISOTROPY, the data files are extracted in Fortran using associated files
`CIR_data.f` and `PIR_data.f` (which we do not include here).
We extract the data files using Julia instead (see `build/ParseIsotropy.jl`).
Two datasets are included in ISOTROPY, one for "ordinary" irreps (`CIR_data.txt`)
and one for "physically real" irreps/coreps in a real form (`PIR_data.txt`).
In practice, we only use the `CIR_data.txt` dataset in Crystalline.
(The `PIR_data.txt` dataset can be used via `build/ParseIsotropy.jl` to obtain
physically real _space group_ (i.e. not little group) irreps, however.)
## References
Included below is the original header of the `CIR_data.txt` and `PIR_data.txt`
files (omitted in the included files for ease of parsing):
> ISO-IR: Complex Irreducible Representations of the 230 Crystallographic Space Groups
> 2011 Version
> Harold T. Stokes and Branton J. Campbell, 2013
The corresponding journal reference is:
- H. T. Stokes, B. J. Campbell, and R. Cordes, "Tabulation of Irreducible Representations of the Crystallographic Space Groups and Their Superspace Extensions", [Acta Cryst. A. **69**, 388-395 (2013)](https://doi.org/10.1107/S0108767313007538). | Crystalline | https://github.com/thchr/Crystalline.jl.git |
|
[
"MIT"
] | 0.6.4 | b4ac59b6877535177e89e1fe1b0ff5d0c2e903de | docs | 451 | # Public API
---
```@meta
CurrentModule = Crystalline
```
## Exported types
```@autodocs
Modules = [Crystalline]
Private = false
Order = [:type]
```
## Exported methods
```@autodocs
Modules = [Crystalline]
Private = false
Order = [:function]
```
## Exported macros
```@autodocs
Modules = [Crystalline]
Private = false
Order = [:macro]
```
## Exported constants
```@autodocs
Modules = [Crystalline]
Private = false
Order = [:constant]
``` | Crystalline | https://github.com/thchr/Crystalline.jl.git |
|
[
"MIT"
] | 0.6.4 | b4ac59b6877535177e89e1fe1b0ff5d0c2e903de | docs | 1808 | # Elementary band representations
Crystalline.jl provides an interface to access the elementary band representations (EBRs) hosted by the Bilbao Crystallographic Server's [BANDREP](https://www.cryst.ehu.es/cgi-bin/cryst/programs/bandrep.pl) program via [`bandreps`](@ref).
Please cite the original research (listed in the associated docstrings).
As an example, we can obtain the all inequivalent EBRs in space group 219 (F-43c) with:
```@example ebrs
using Crystalline
brs = bandreps(219, 3) # space group 219 (dimension 3)
```
which returns a `BandRepSet`, which itself is an `AbstractVector` of `BandRep`s. This allows us to index into `brs` easily:
```@example ebrs
brs[1] # obtain the EBR induced by Wyckoff position 8a with irrep A
```
By default, `bandreps` returns the spinless EBRs with time-reversal symmetry.
This behavior can be controlled with the keyword arguments `spinful` (default, `false`) and `timereversal` (default, `true`).
By default, only minimal paths are included in the sampling of **k**-vectors; additional paths can be obtained by setting the keyword argument `allpaths = true` (default, `false`).
The distinct topological classes identifiable from symmetry can can be calculated via [`classification`](@ref), which uses the Smith normal form's principle factors:
```@example ebrs
classification(brs)
```
Which demonstrates that the symmetry indicator group of spinless particles with time-reversal symmetry in space group 219 is trivial.
## Topology and associated bases
The [`SymmetryBases.jl`](https://github.com/thchr/SymmetryBases.jl) package provides tools to analyze topology of symmetry vectors and compute associated Hilbert bases.
## API
```@meta
CurrentModule = Crystalline
```
```@docs; canonical=false
bandreps
classification
nontrivial_factors
basisdim
``` | Crystalline | https://github.com/thchr/Crystalline.jl.git |
|
[
"MIT"
] | 0.6.4 | b4ac59b6877535177e89e1fe1b0ff5d0c2e903de | docs | 1286 | # Bravais.jl
Bravais types, basis systems, and transformations between conventional and primitive settings.
## API
```@meta
CurrentModule = Bravais
```
### Types
```@docs
AbstractBasis
DirectBasis
ReciprocalBasis
AbstractPoint
DirectPoint
ReciprocalPoint
```
### Crystal systems & Bravais types
```@docs
crystalsystem
bravaistype
centering
```
### Basis construction
```@docs
crystal
directbasis
reciprocalbasis
nigglibasis
```
### Transformations
```@docs
primitivebasismatrix
transform
primitivize
conventionalize
cartesianize
cartesianize!
latticize
latticize!
```
### Miscellaneous
```@docs
volume
metricmatrix
```
## Crystalline.jl extensions of Bravais.jl functions
```@meta
CurrentModule = Crystalline
```
### `SymOperation`
```@docs
transform(::SymOperation, ::AbstractMatrix{<:Real}, ::Union{AbstractVector{<:Real}, Nothing}, ::Bool=true)
primitivize(::SymOperation, ::Char, ::Bool)
conventionalize(::SymOperation, ::Char, ::Bool)
```
### `AbstractVec`
```@docs
transform(::Crystalline.AbstractVec, ::AbstractMatrix{<:Real})
primitivize(::Crystalline.AbstractVec, ::Char)
conventionalize(::Crystalline.AbstractVec, ::Char)
```
### `AbstractFourierLattice`
```@docs
primitivize(::AbstractFourierLattice, ::Char)
conventionalize(::AbstractFourierLattice, ::Char)
``` | Crystalline | https://github.com/thchr/Crystalline.jl.git |
|
[
"MIT"
] | 0.6.4 | b4ac59b6877535177e89e1fe1b0ff5d0c2e903de | docs | 3093 | # Groups
All groups in Crystalline are concrete instances of the abstract supertype [`Crystalline.AbstractGroup{D}`](@ref), referring to a group in `D` dimensions. `Crystalline.AbstractGroup{D}` is itself a subtype of `AbstractVector{SymOperation{D}}`.
Crystalline currently supports five group types: [`SpaceGroup`](@ref), [`PointGroup`](@ref), [`LittleGroup`](@ref), [`SubperiodicGroup`](@ref), [`SiteGroup`](@ref), and [`MSpaceGroup`](@ref).
## Example: space groups
The one, two, and three-dimensional space groups are accessible via [`spacegroup`](@ref), which takes the space group number `sgnum` and dimensino `D` as input (ideally, the dimension is provided as a `Val{D}` for the sake of type stability) and returns a `SpaceGroup{D}` structure:
```@example spacegroup
using Crystalline
D = 3 # dimension
sgnum = 16 # space group number (β€2 in 1D, β€17 in 2D, β€230 in 3D)
sg = spacegroup(sgnum, D) # where practical, `spacegroup` should be called with a `Val{D}` dimension to ensure type stability; here we have D::Int instead for simplicity
```
By default, the returned operations are given in the conventional setting of the International Tables of Crystallography, Volume A (ITA). Conversion to a primitive basis (in the CDML setting) can be accomplished via [`primitivize`](@ref).
In addition to space groups, Crystalline.jl provides access to the operations of point groups ([`pointgroup`](@ref)), little groups ([`littlegroups`](@ref)), subperiodic groups ([`subperiodicgroup`](@ref); including rod, layer, and frieze groups), site symmetry groups ([`sitegroup`](@ref) and [`sitegroups`](@ref)), and magnetic space groups ([`mspacegroup`](@ref)).
### Multiplication tables
We can compute the multiplication table of a space group (under the previously defined notion of operator composition) using [`MultTable`](@ref):
```@example spacegroup
MultTable(sg)
```
Alternatively, exploiting overloading of the `*`-operator, "raw" multiplication tables can be constructed via a simple outer product:
```@example spacegroup
sg .* permutedims(sg) # equivalent to `reshape(kron(sg, sg), (length(sg), length(sg)))`
```
### Symmorphic vs. nonsymorphic space groups
To determine whether a space group is symmorphic or not, use [`issymmorph`](@ref) taking either a `SpaceGroup`, `LittleGroup`, or `SubperiodicGroup` (or a `SpaceGroup` identified by its number and dimensionality; in this case, using tabulated look-up).
To test whether a given `SymOperation` is symmorphic in a given centering setting, use [`issymmorph(::SymOperation, ::Char)`](@ref)
## Group generators
Generators of `SpaceGroup`s, `PointGroup`s, and `SubperiodicGroup`s are accessible via [`generators`](@ref), e.g.:
```@example spacegroup
ops = generators(sgnum, SpaceGroup{D})
```
To generate a group from a list of generators, we can use the [`generate`](@ref) method. As an example, we can verify that `ops` in fact returns symmetry operations identical to those in `sg`:
```@example spacegroup
generate(ops)
```
## Magnetic space groups
Magnetic space groups are accessible via [`mspacegroup`](@ref).
| Crystalline | https://github.com/thchr/Crystalline.jl.git |
|
[
"MIT"
] | 0.6.4 | b4ac59b6877535177e89e1fe1b0ff5d0c2e903de | docs | 552 | # Crystalline.jl
---
Documentation for [Crystalline.jl](https://github.com/thchr/Crystalline.jl) and [Bravais.jl](https://github.com/thchr/Crystalline.jl/tree/master/Bravais).
!!! note
Crystalline.jl remains a work-in-progress research package.
Breaking changes are likely (but will respect [semver](https://semver.org/) conventions).
```@contents
Pages = ["operations.md",
"groups.md",
"irreps.md",
"bravais.md",
"bandreps.md",
"lattices.md",
"api.md",
"internal-api.md"]
``` | Crystalline | https://github.com/thchr/Crystalline.jl.git |
|
[
"MIT"
] | 0.6.4 | b4ac59b6877535177e89e1fe1b0ff5d0c2e903de | docs | 311 | # Internal API
This page lists unexported functionality from Crystalline, that may be of interest to developers.
---
```@meta
CurrentModule = Crystalline
```
## Unexported, internal functionality
```@autodocs
Modules = [Crystalline]
Private = true
Public = false
Order = [:type, :function, :constant]
``` | Crystalline | https://github.com/thchr/Crystalline.jl.git |
|
[
"MIT"
] | 0.6.4 | b4ac59b6877535177e89e1fe1b0ff5d0c2e903de | docs | 6610 | # Irreducible representations
Crystalline.jl provides easy access to crystallographic point group irreps, site symmetry group irreps, and the little group irreps of space groups.
Currently, we only provide access to spinless (or "single-valued") irreps.
## Point group irreps
Irreps for the crystallographic point groups are accessible via [`pgirreps`](@ref), with the point group specified either by IUC label and dimensionality.
As an example, we may load the irreps of the 6mm (Cβα΅₯ in Schoenflies notation; see also [`schoenflies(::PointGroup)`](@ref)) point group in 3D.
```@example pgirs
using Crystalline
pgirs = pgirreps("6mm", Val(3))
```
Frequently, the character table of the associated irreps is more informative than the irrep matrices themselves. We can construct this table using [`characters`](@ref), which returns a `CharacterTable`:
```@example pgirs
characters(pgirs)
```
The characters are functions of the conjugacy class (i.e., the characters of operations in the same conjugacy class are equal). Thus, a more compact representation of the character table can be achieved by a class-resolved table, achievable via [`classcharacters`](@ref):
```@example pgirs
classcharacters(pgirs)
```
### Notation
The default point group irrep labeling follows the Bilbao Crystallographic Server's labeling, which in turn follows the 1963 labelling of Koster, Dimmock, Wheeler, & Statz [^2] (which is also followed e.g. by CDML [^1] labeling as well as Bradley and Cracknell's book).
Associated Muliken (or "spectroscopist's") notation can be obtained via `mulliken`.
## Little group irreps
Little group irreps, sometimes called ''small'' irreps, are accessible via [`lgirreps`](@ref) and provided with CDML [^1] labels (courtesy of ISOTROPY).
As an example, we can obtain the irreps of space group 183 (P6mm; the trivial 3D extension of plane group 17, which in turn is the space group extension of point group 6mm from above) by:
```@example lgirs
using Crystalline
lgirsd = lgirreps(183, Val(3))
```
which returns a dictionary of `LGIrrep`s, indexed by k-labels given as `String`s, corresponding to different little groups.
In general, we include all the little groups included in ISOTROPY; unfortunately, there is no strict guarantee that this includes a full listing of all inequivalent irreps (although it is typically true). The listing typically contains both special points, lines, and planes (and also always the general point).
We can inspect the little group irreps of any particular **k**-point, accessing it via its canonical label.
As before, we can inspect the associated character tables to get an overview of the irreps:
```@example lgirs
lgirs = lgirsd["Ξ"] # little group irreps at the Ξ point
characters(lgirs)
```
## Space group irreps
We currently do not provide access to "full" space group irreps. They can, however, be readily built by induction from little group irreps. Specifically, every little group irrep $D_{\mathbf{k}}^\alpha$ associated with the little group $G_{\mathbf{k}}$, induces a space group irrep, sometimes denoted ${}^*D_{\mathbf{k}}^{\alpha}$ or $D^{\alpha}_{\mathbf{k}}\uparrow G$, in the full space group $G$:[^Inui]
```math
[{}^*D_{\mathbf{k}}^{\alpha}(g)]_{ij}
=
\begin{cases}
D_{\mathbf{k}}^{\alpha}(h_i^{-1}gh_j) & \text{if }h_i^{-1}gh_j \in G_{\mathbf{k}}\\
\boldsymbol{0}_{d_{\mathbf{k}}^{\alpha}\times d_{\mathbf{k}}^{\alpha}} & \text{otherwise}
\end{cases},
```
where $d_{\mathbf{k}}^{\alpha}$ is the dimension of the little group irrep $D^{\alpha}_{\mathbf{k}}$, $\boldsymbol{0}_{d_{\mathbf{k}}^{\alpha}\times d_{\mathbf{k}}^{\alpha}}$ is a $d_{\mathbf{k}}^{\alpha}\times d_{\mathbf{k}}^{\alpha}$ zero matrix, and $h_i$ and $h_j$ iterate over the (left) coset representatives of $G_{\mathbf{k}}$ in $G$ (of which there are $|\mathrm{star}\{\mathbf{k}\}|$, i.e., the order of the star of $\mathbf{k}$). The induced irrep ${}^*D_{\mathbf{k}}^{\alpha}$ is consequently a $d_{\mathbf{k}}^{\alpha}|\mathrm{star}\{\mathbf{k}\}|\times d_{\mathbf{k}}^{\alpha}|\mathrm{star}\{\mathbf{k}\}|$ matrix.
[^Inui]: Inui, Tanabe, & Onodera, *Group Theory and its Applications in Physics*, Springer (1990). Section 11.9.
## Site symmetry irreps
To obtain irreps associated with a given site symmetry group (see [`SiteGroup`](@ref)), use [`siteirreps`](@ref) which obtains the irreps associated with the site symmetry group by identifying a "parent" point group which is isomorphic to the provided site symmetry group, and then returning a suitable permutation of the point group's irreps.
## Time-reversal symmetry & "physically real" irreps
Irreps returned in Crystalline.jl do not assume time-reversal symmetry by default.
To incorporate time-reversal symmetry (or, equivalently, to obtain associated "physically real" irreps - or, more technically, co-representations), which may cause irreps to "stick together", see [`realify`](@ref) (which takes a vector of `PGIrrep`s or `LGIrrep`s).
As an example, the Ξβ, Ξβ, Ξβ
, and Ξβ irreps of point group 6 (Cβ) are intrinsically complex in the absence of time-reversal symmetry:
```@example realirs
using Crystalline
pgirs = pgirreps("6", Val(3))
characters(pgirs)
```
When time-reversal symmetry is incorporated, the irreps stick together pairwise and have real characters:
```@example realirs
pgirsβ² = realify(pgirs)
characters(pgirsβ²)
```
To inspect the reality type of a given irrep, see [`reality`](@ref).
Possible types are `REAL`, `COMPLEX`, and `PSEUDOREAL` (the latter does not arise for point groups):
```@example realirs
label.(pgirs) .=> reality.(pgirs)
```
The reality type can be computed ab initio via [`calc_reality`](@ref), using the Frobenius criterion for `PGIrrep`s and `SiteIrrep`s and the Herring criterion for `LGIrrep`s.
## Data sources
Point group irreps are obtained from the Bilbao Crystallographic Server's [Representations PG program](https://www.cryst.ehu.es/cgi-bin/cryst/programs/representations_point.pl?tipogrupo=spg) and little group irreps of space groups are obtained from [ISOTROPY's 2011 ISO-IR dataset](https://stokes.byu.edu/iso/irtables.php).
If these functionalities are used in published research, please cite the original publications (listed in associated function docstrings).
[^1]: Cracknell, A.P., Davies, B.L., Miller, S.C., & Love, W.F., *Kronecker Product Tables, Vol. 1. General Introduction and Tables of Irreducible Representations of Space Groups*, New York: IFI/Plenum (1979).
[^2]: Koster, G.F., Dimmock, J.O., Wheeler, R.G., & Statz, H., *Properties of the Thirty-two Point Groups*, Cambridge: MIT Press (1963). | Crystalline | https://github.com/thchr/Crystalline.jl.git |
|
[
"MIT"
] | 0.6.4 | b4ac59b6877535177e89e1fe1b0ff5d0c2e903de | docs | 308 | # Isosurfaces with space group symmetry
```@meta
CurrentModule = Crystalline
```
```@docs; canonical=false
UnityFourierLattice
ModulatedFourierLattice
levelsetlattice
modulate
primitivize(::AbstractFourierLattice, ::Char)
conventionalize(::AbstractFourierLattice, ::Char)
AbstractFourierLattice(::Any)
``` | Crystalline | https://github.com/thchr/Crystalline.jl.git |
|
[
"MIT"
] | 0.6.4 | b4ac59b6877535177e89e1fe1b0ff5d0c2e903de | docs | 3183 | # Symmetry operations
A [`SymOperation{D}`](@ref) is a representation of a spatial symmetry operation $g=\{\mathbf{W}|\mathbf{w}\}$, composed of a rotational $\mathbf{W}$ and a translation part $\mathbf{w}$.
The rotational and translation parts are assumed to share the same basis setting; by default, operations returned by Crystalline.jl are in the conventional setting of the International Tables of Crystallography, Volume A (ITA).
`SymOperation`s can be constructed in two ways, either by explicitly specifying the $\mathbf{W}$ and $\mathbf{w}$:
```@example operations
using Crystalline, StaticArrays
W, w = (@SMatrix [1 0 0; 0 0 1; 0 1 0]), (@SVector [0, 0.5, 0])
op = SymOperation(W, w)
```
or by its equivalent triplet form
```julia
op = SymOperation{3}("x,z+1/2,y")
```
There is also a string macro accessor `@S_str` that allows triplet input via `S"x,z+1/2,y"`.
In the above output, three equivalent notations for the symmetry operation are given: first, the Seitz notation {mββββ|0,Β½,0}, then the triplet notation (x,z+1/2,y), and finally the explicit matrix notation.
## Components
The rotation and translation parts $\mathbf{W}$ and $\mathbf{w}$ of a `SymOperation{D}` $\{\mathbf{W}|\mathbf{w}\}$ can be accessed via [`rotation`](@ref) and [`translation`](@ref), returning an `SMatrix{D, D, Float64}` and an `SVector{D, Float64}`, respectively.
The "augmented" matrix $[\mathbf{W}|\mathbf{w}]$ can similarly be obtained via [`matrix`](@ref).
## Operator composition
Composition of two operators $g_1$ and $g_2$ is defined by
```math
g_1 \circ g_2 = \{\mathbf{W}_1|\mathbf{w}_1\} \circ \{\mathbf{W}_2|\mathbf{w}_2\} = \{\mathbf{W}_1\mathbf{W}_2|\mathbf{w}_1 + \mathbf{W}_1\mathbf{w}_2\}
```
We can compose two `SymOperation`s in Crystalline via:
```@example operations
op1 = S"z,x,y" # 3ββββΊ
op2 = S"z,y,x" # mββββ
op1 * op2
```
which is accessed by an overloaded call to `Base.*`, i.e. the multiplication operator (this enables us to also call derived methods of `*`, such as integer powers (e.g., `S"-y,x-y,z"^3 == S"x,y,z"`).
Note that composition is taken modulo integer lattice translations by default, such that
```@example operations
op2β² = S"z,y,x+1" # {mββββ|001}
op1 * op2β² # equivalent to compose(op1, op2β², true)
```
rather than `S"x+1,z,y"`, which is the result of direct application of the above composition rule.
To compute "unreduced" composition, the more precise [`compose`](@ref) variant of `*` can be used with an optional third argument `false`:
```@example operations
compose(op1, op2β², false)
```
## Operator inverses
The operator inverse is defined as $\{\mathbf{W}|\mathbf{w}\} = \{\mathbf{W}^{-1}|-\mathbf{W}^{-1}\mathbf{w}\}$ and can be computed via
```@example operations
inv(op1) # inv(3ββββΊ)
```
## Action of symmetry operators
A `SymOperation` can act on vectors in direct ([`RVec`](@ref)) or reciprocal ([`KVec`](@ref)) space.
When acting in reciprocal space, translation parts of a `SymOperation` have no effect.
# Magnetic symmetry operations
Magnetic symmetry operations that may incorporate composition with an anti-unitary time-reversal operation can be created via [`MSymOperation`](@ref) (see also [`mspacegroup`](@ref)). | Crystalline | https://github.com/thchr/Crystalline.jl.git |
|
[
"MIT"
] | 0.6.4 | b4ac59b6877535177e89e1fe1b0ff5d0c2e903de | docs | 66 | We only include _crystallographic_ point groups in this tabulation | Crystalline | https://github.com/thchr/Crystalline.jl.git |
|
[
"MIT"
] | 0.1.1 | 1eee6b029f0312479bad622c48c5902458eb3888 | code | 923 | using Distributed
addprocs(1)
@everywhere using Revise
@everywhere using VersatileHDPMixtureModels
function generate_data(dim, groups_count,sample_count,var,Ξ± = 10, Ξ³ = 1)
crf_prior = hdp_prior_crf_draws(sample_count,groups_count,Ξ±,Ξ³)
pts,labels = generate_grouped_gaussian_from_hdp_group_counts(crf_prior[2],dim,var)
return pts, labels
end
function results_stats(pred_dict, gt_dict)
avg_nmi = 0
for i=1:length(pred_dict)
nmi = mutualinfo(pred_dict[i],gt_dict[i])
avg_nmi += nmi
end
return avg_nmi / length(pred_dict)
end
function run_and_compare(pts,labels,gdim,iters = 100)
gprior, lprior = create_default_priors(gdim,0,:niw)
model = hdp_fit(pts,10,1,gprior,iters)
model_results = get_model_global_pred(model[1])
return results_stats(labels,model_results)
end
gdim = 3
pts,labels = generate_data(gdim,4,100,100.0)
run_and_compare(pts,labels,gdim) | VersatileHDPMixtureModels | https://github.com/BGU-CS-VIL/VersatileHDPMixtureModels.jl.git |
|
[
"MIT"
] | 0.1.1 | 1eee6b029f0312479bad622c48c5902458eb3888 | code | 3078 | include("../CODE/ds.jl")
include("../CODE/distributions/niw.jl")
using LinearAlgebra
#Global Setting
use_gpu = false
use_darrays = false #Only relevant if use_gpu = false
random_seed = nothing
#Data Loading specifics
data_path = "../DATA/Bees/"
data_prefix = "XXX"
groups_count = 4
global_preprocessing = nothing
local_preprocessing = nothing
#Model Parameters
iterations = 100
hard_clustering = false
total_dim = 10
local_dim = 7
Ξ± = 10.0
Ξ³ = 1000000000000.0
global_weight = 1.0
local_weight= 1.0
initial_global_clusters = 1
initial_local_clusters = 1
use_dict_for_global = false
ignore_local = true
split_stop = 5
argmax_sample_stop = 5
glob_dim = 6
global_hyper_params = niw_hyperparams(1.0,
zeros(glob_dim),
glob_dim+3,
Matrix{Float64}(I, glob_dim, glob_dim)*0.1)
local_mult = 0.1
count_list = [1,1,1,1]
local_hyper_params = [niw_hyperparams(1.0, zeros(i),i+3,Matrix{Float64}(I, i, i)*local_mult) for i in count_list]
#local_hyper_params = [niw_hyperparams(1.0, zeros(5),8+3,Matrix{Float64}(I, 5, 5)*local_mult) for i in count_list]
# local_hyper_params = [niw_hyperparams(1.0,
# zeros(3),
# 6,
# Matrix{Float64}(I, 3, 3)*local_mult),
# niw_hyperparams(1.0,
# zeros(6),
# 9.0,
# Matrix{Float64}(I, 6, 6)*local_mult),
# niw_hyperparams(1.0,
# zeros(9),
# 12.0,
# Matrix{Float64}(I, 9, 9)*local_mult),
# niw_hyperparams(1.0,
# zeros(12),
# 15.0,
# Matrix{Float64}(I, 12, 12)*local_mult),
# niw_hyperparams(1.0,
# zeros(15),
# 18.0,
# Matrix{Float64}(I, 15, 15)*local_mult),
# niw_hyperparams(1.0,
# zeros(18),
# 21.0,
# Matrix{Float64}(I, 18, 18)*local_mult),
# niw_hyperparams(1.0,
# zeros(21),
# 24.0,
# Matrix{Float64}(I, 21, 21)*local_mult),
# niw_hyperparams(1.0,
# zeros(24),
# 27.0,
# Matrix{Float64}(I, 24, 24)*local_mult),
# niw_hyperparams(1.0,
# zeros(27),
# 30.0,
# Matrix{Float64}(I, 27, 27)*local_mult)]
# local_hyper_params = [niw_hyperparams(1.0,
# zeros(3),
# 100.0,
# Matrix{Float64}(I, 3, 3)*10.0),
# niw_hyperparams(1.0,
# zeros(6),
# 100.0,
# Matrix{Float64}(I, 6, 6)*10.0),
# niw_hyperparams(1.0,
# zeros(9),
# 100.0,
# Matrix{Float64}(I, 9, 9)*10.0),
# niw_hyperparams(1.0,
# zeros(12),
# 100.0,
# Matrix{Float64}(I, 12, 12)*10.0),
# niw_hyperparams(1.0,
# zeros(15),
# 100.0,
# Matrix{Float64}(I, 15, 15)*10.0),
# niw_hyperparams(1.0,
# zeros(18),
# 100.0,
# Matrix{Float64}(I, 18, 18)*10.0),
# niw_hyperparams(1.0,
# zeros(21),
# 100.0,
# Matrix{Float64}(I, 21, 21)*10.0),
# niw_hyperparams(1.0,
# zeros(24),
# 100.0,
# Matrix{Float64}(I, 24, 24)*10.0),
# niw_hyperparams(1.0,
# zeros(27),
# 100.0,
# Matrix{Float64}(I, 27, 27)*10.0)]
| VersatileHDPMixtureModels | https://github.com/BGU-CS-VIL/VersatileHDPMixtureModels.jl.git |
|
[
"MIT"
] | 0.1.1 | 1eee6b029f0312479bad622c48c5902458eb3888 | code | 868 | using VersatileHDPMixtureModels
nips_path = "docword.nips.txt"
function text_file_to_dict(path)
data_dict = Dict()
cur_doc = 0
open(path) do file
cur_vec =[]
for ln in eachline(file)
items = [parse(Int64,x) for x in split(ln)]
if length(items) < 3
continue
end
if cur_doc != items[1]
data_dict[cur_doc] = Float32.(cur_vec)[:,:]'
cur_vec = []
cur_doc = items[1]
end
for i=1:items[3]
push!(cur_vec,items[2])
end
end
data_dict[cur_doc] = Float32.(cur_vec)[:,:]'
end
delete!(data_dict,0)
return data_dict
end
nips_data = text_file_to_dict(nips_path)
nips_gprior = topic_modeling_hyper(Float64.(ones(12419))*0.1)
### Unlike the HDP, here to promote splits you need to increase both Ξ³ and Ξ±.
model = hdp_fit(nips_data,100.0,100.0,nips_gprior,80,1,10)
avg_word = VersatileHDPMixtureModels.calc_avg_word(model[1])
| VersatileHDPMixtureModels | https://github.com/BGU-CS-VIL/VersatileHDPMixtureModels.jl.git |
|
[
"MIT"
] | 0.1.1 | 1eee6b029f0312479bad622c48c5902458eb3888 | code | 1730 | __precompile__()
module VersatileHDPMixtureModels
using Distributed
using StatsBase
using Distributions
using SpecialFunctions
using LinearAlgebra
using JLD2
using Clustering
using Random
using NPZ
using Base
using PDMats
#DS:
include("ds.jl")
#Distributions:
include("distributions/mv_gaussian.jl")
include("distributions/mv_group_gaussian.jl")
include("distributions/multinomial_dist.jl")
include("distributions/topic_modeling_dist.jl")
include("distributions/compact_mnm.jl")
#Priors:
include("priors/multinomial_prior.jl")
include("priors/topic_modeling_prior.jl")
include("priors/niw.jl")
include("priors/niw_stable_var.jl")
include("priors/bayes_network_model.jl")
include("priors/compact_mnm_prior.jl")
#Rest:
include("params_base.jl")
include("utils.jl")
include("shared_actions.jl")
include("local_clusters_actions.jl")
include("global_clusters_actions.jl")
include("crf_hdp.jl")
include("gaussian_generator.jl")
include("hdp_shared_features.jl")
#Data Generators:
export
generate_sph_gaussian_data,
generate_gaussian_data,
generate_mnmm_data,
generate_grouped_mnm_data,
generate_grouped_gaussian_data,
create_mnmm_data,
create_gaussian_data,
create_grouped_gaussian_data,
create_grouped_mnmm_data,
hdp_prior_crf_draws,
generate_grouped_gaussian_from_hdp_group_counts,
#Model DS:
hdp_shared_features,
multinomial_dist,
mv_gaussian,
mv_group_gaussian,
topic_modeling_dist,
bayes_network_model,
niw_hyperparams,
niw_stable_hyperparams,
topic_modeling_hyper,
compact_mnm_hyper,
compact_mnm_dist,
#Functions
hdp_fit,
vhdp_fit,
create_default_priors,
get_model_global_pred,
create_global_labels
end # module
| VersatileHDPMixtureModels | https://github.com/BGU-CS-VIL/VersatileHDPMixtureModels.jl.git |
|
[
"MIT"
] | 0.1.1 | 1eee6b029f0312479bad622c48c5902458eb3888 | code | 7191 | mutable struct Dish
count::Int64
cluster_params::cluster_parameters
end
mutable struct Table
count::Int64
dish_index::Int64
end
mutable struct Restaurant
tables::Vector{Table}
points::AbstractArray{Float64,2}
labels::AbstractArray{Int64,2}
end
mutable struct Crf_model
dishes::Vector{Dish}
restaurants::Vector{Restaurant}
prior::distribution_hyper_params
end
function create_new_dish(prior)
#pts = point[:,:]
suff = create_sufficient_statistics(prior,[])
post = calc_posterior(prior,suff)
dist = sample_distribution(prior)
cluster_params = cluster_parameters(prior,dist,suff,post)
dish = Dish(1,cluster_params)
return dish
end
function resample_dish!(pts_vec,dish)
if length(pts_vec) == 0
suff = create_sufficient_statistics(dish.cluster_params.hyperparams,[])
post = calc_posterior(dish.cluster_params.hyperparams,suff)
dist = sample_distribution(dish.cluster_params.hyperparams)
cluster_params = cluster_parameters(dish.cluster_params.hyperparams,dist,suff,post)
dish.cluster_params = cluster_params
else
all_pts = reduce(hcat,pts_vec)
suff = create_sufficient_statistics(dish.cluster_params.hyperparams,dish.cluster_params.hyperparams,all_pts)
post = calc_posterior(dish.cluster_params.hyperparams,suff)
dist = sample_distribution(post)
cluster_params = cluster_parameters(dish.cluster_params.hyperparams,dist,suff,post)
dish.cluster_params = cluster_params
end
end
function init_crf_model(data,prior)
restaurants = []
dishes = []
for i=1:length(data)
v = data[i]
table = Table(size(v,2),1)
restaurant = Restaurant([table],v,ones(size(v,2),1))
push!(restaurants,restaurant)
end
first_dish = create_new_dish(prior)
first_dish.count = length(data) #Amount of tables at init
model = Crf_model([first_dish],restaurants,prior)
return model
end
function get_tables_probability_vec(tables,Ξ±)
counts = [x.count for x in tables]
push!(counts,Ξ±)
weights = [x/sum(counts) for x in counts]
return weights
end
function sample_dish(dishes, pts, Ξ³,new_dish,cur_dish = -1)
counts = [x.count for x in dishes]
if cur_dish > 0
counts[cur_dish] -= 1
end
push!(counts,Ξ³)
weights = [x/sum(counts) for x in counts]
dists = [x.cluster_params.distribution for x in dishes]
push!(dists, new_dish.cluster_params.distribution)
parr = zeros(length(size(pts,2)), length(dists))
for (k,v) in enumerate(dists)
log_likelihood!(reshape((@view parr[:,k]),:,1), pts,v)
end
parr = sum(parr, dims = 1)[:,:] + reshape(log.(weights),1,:)
labels = [0][:,:]
sample_log_cat_array!(labels,parr)
return labels[1,1]
end
function sample_table(dists,weights,pts)
parr = zeros(length(size(pts,2)), length(dists))
for (k,v) in enumerate(dists)
log_likelihood!(reshape((@view parr[:,k]),:,1), pts,v)
end
parr = sum(parr, dims = 1) + reshape(log.(weights),1,:)
labels = [0][:,:]
sample_log_cat_array!(labels,parr)
return labels[1,1]
end
function crf_hdp_fit(data,Ξ±,Ξ³,prior,iters)
model = init_crf_model(data,prior)
#Each iteration is a single pass on all points
for iter=1:iters
println("Iter " * string(iter) * " Dish count: " * string(length(model.dishes)))
#Resample dishes distribution
cur_new_dish = create_new_dish(prior)
for (dish_index,dish) in enumerate(model.dishes)
pts = []
for restaurant in model.restaurants
for (table_index,table) in enumerate(restaurant.tables)
if table.dish_index == dish_index
push!(pts, restaurant.points[:,restaurant.labels[:] .== table_index])
end
end
end
resample_dish!(pts,dish)
end
#Samples points
for restaurant in model.restaurants
# println([x.count for x in restaurant.tables])
for i=1:size(restaurant.points,2)
point = restaurant.points[:,i][:,:]
prev_table = restaurant.labels[i,1]
restaurant.tables[prev_table].count -= 1
weights = get_tables_probability_vec(restaurant.tables,Ξ±)
new_table_dish = sample_dish(model.dishes, point,Ξ³,cur_new_dish)
dists = [model.dishes[x.dish_index].cluster_params.distribution for x in restaurant.tables]
if new_table_dish > length(model.dishes)
push!(dists, cur_new_dish.cluster_params.distribution)
else
push!(dists,model.dishes[new_table_dish].cluster_params.distribution)
end
try
log.(weights)
catch e
println("error print:")
println([x.count for x in restaurant.tables])
rethrow([e])
end
new_table = sample_table(dists,weights,point)
restaurant.labels[i,1] = new_table
if new_table > length(restaurant.tables)
push!(restaurant.tables,Table(1,new_table_dish))
if new_table_dish > length(model.dishes)
cur_new_dish.count = 1
push!(model.dishes, cur_new_dish)
cur_new_dish = create_new_dish(prior)
else
model.dishes[new_table_dish].count +=1
end
else
restaurant.tables[new_table].count += 1
end
if restaurant.tables[prev_table].count == 0
model.dishes[restaurant.tables[prev_table].dish_index].count -= 1
end
end
end
#Samples tables
for restaurant in model.restaurants
for (index,table) in enumerate(restaurant.tables)
if table.count == 0
continue
end
point = restaurant.points[:,(restaurant.labels .== index)[:]]
prev_dish = table.dish_index
new_table_dish = sample_dish(model.dishes, point,Ξ³,cur_new_dish,prev_dish)
model.dishes[prev_dish].count -=1
if new_table_dish > length(model.dishes)
cur_new_dish.count = 1
push!(model.dishes, cur_new_dish)
cur_new_dish = create_new_dish(prior)
else
model.dishes[new_table_dish].count +=1
end
# println("old dish:" * string(prev_dish) * " new dish:" * string(new_table_dish))
table.dish_index = new_table_dish
end
end
end
return model
end
function get_dish_labels_from_model(model::Crf_model)
labels_dict = Dict()
for (k,v) in enumerate(model.restaurants)
labels = [v.tables[v.labels[i,1]].dish_index for i=1:size(v.points,2)]
labels_dict[k] = labels
end
return labels_dict
end
| VersatileHDPMixtureModels | https://github.com/BGU-CS-VIL/VersatileHDPMixtureModels.jl.git |
|
[
"MIT"
] | 0.1.1 | 1eee6b029f0312479bad622c48c5902458eb3888 | code | 2452 | abstract type distribution_hyper_params end
#Suff statistics must contain N which is the number of points associated with the cluster
abstract type sufficient_statistics end
abstract type distibution_sample end
import Base.copy
struct model_hyper_params
global_hyper_params::distribution_hyper_params
local_hyper_params
Ξ±::Float64
Ξ³::Float64
Ξ·::Float64
global_weight::Float64
local_weight::Float64
total_dim::Int64
local_dim::Int64
end
mutable struct cluster_parameters
hyperparams::distribution_hyper_params
distribution::distibution_sample
suff_statistics::sufficient_statistics
posterior_hyperparams::distribution_hyper_params
end
mutable struct splittable_cluster_params
cluster_params::cluster_parameters
cluster_params_l::cluster_parameters
cluster_params_r::cluster_parameters
lr_weights::AbstractArray{Float64, 1}
splittable::Bool
logsublikelihood_hist::AbstractArray{Float64,1}
end
mutable struct global_cluster
cluster_params::splittable_cluster_params
total_dim::Int64
local_dim::Int64
points_count::Int64
clusters_count::Int64
clusters_sub_counts::AbstractArray{Int64, 1}
end
mutable struct local_cluster
cluster_params::splittable_cluster_params
total_dim::Int64
local_dim::Int64
points_count::Int64
global_weight::Float64
local_weight::Float64
globalCluster::Int64
globalCluster_subcluster::Int64 #1 for left, 2 for right
global_suff_stats::AbstractArray{Int64, 1}
end
mutable struct local_group
model_hyperparams::model_hyper_params
points::AbstractArray{Float64,2}
labels::AbstractArray{Int64,2}
labels_subcluster::AbstractArray{Int64,2}
local_clusters::Vector{local_cluster}
weights::Vector{Float64}
group_num::Int64
end
mutable struct local_group_stats
labels::AbstractArray{Int64,2}
labels_subcluster::AbstractArray{Int64,2}
local_clusters::Vector{local_cluster}
end
mutable struct hdp_shared_features
model_hyperparams::model_hyper_params
groups_dict::Dict
global_clusters::Vector{global_cluster}
weights::AbstractArray{Float64, 1}
end
function copy_local_cluster(c::local_cluster)
return deepcopy(c)
end
function update_group_from_stats!(group::local_group, stats::local_group_stats)
group.labels = stats.labels
group.labels_subcluster = stats.labels_subcluster
group.local_clusters = stats.local_clusters
end
| VersatileHDPMixtureModels | https://github.com/BGU-CS-VIL/VersatileHDPMixtureModels.jl.git |
|
[
"MIT"
] | 0.1.1 | 1eee6b029f0312479bad622c48c5902458eb3888 | code | 9582 | using NPZ
using Distributions
using LinearAlgebra
using Distributed
using StatsBase
using Distributions
using SpecialFunctions
using LinearAlgebra
using Random
function generate_sph_gaussian_data(N::Int64, D::Int64, K::Int64)
x = randn(D,N)
tpi = rand(Dirichlet(ones(K)))
tzn = rand(Multinomial(N,tpi))
tz = zeros(N)
tmean = zeros(D,K)
tcov = zeros(D,D,K)
ind = 1
println(tzn)
for i=1:length(tzn)
indices = ind:ind+tzn[i]-1
tz[indices] .= i
tmean[:,i] .= rand(MvNormal(zeros(D), 100*Matrix{Float64}(I, D, D)))
tcov[:,:,i] .= rand(InverseGamma((D+2)/2,1))*Matrix{Float64}(I, D, D)
# T = chol(slice(tcov,:,:,i))
# x[:,indices] = broadcast(+, T*x[:,indices], tmean[:,i]);
d = MvNormal(tmean[:,i], tcov[:,:,i])
for j=indices
x[:,j] = rand(d)
end
ind += tzn[i]
end
x, tz, tmean, tcov
end
function generate_gaussian_data(N::Int64, D::Int64, K::Int64)
x = randn(D,N)
tpi = rand(Dirichlet(ones(K)))
tzn = rand(Multinomial(N,tpi))
tz = zeros(N)
tmean = zeros(D,K)
tcov = zeros(D,D,K)
ind = 1
println(tzn)
for i=1:length(tzn)
indices = ind:ind+tzn[i]-1
tz[indices] .= i
tmean[:,i] .= rand(MvNormal(zeros(D), 100*Matrix{Float64}(I, D, D)))
tcov[:,:,i] .= rand(InverseWishart(D+2, Matrix{Float64}(I, D, D)))
# T = chol(slice(tcov,:,:,i))
# x[:,indices] = broadcast(+, T'*x[:,indices], tmean[:,i]);
d = MvNormal(tmean[:,i], tcov[:,:,i])
for j=indices
x[:,j] = rand(d)
end
ind += tzn[i]
end
x, tz, tmean, tcov
end
function generate_mnmm_data(N::Int64, D::Int64, K::Int64, trials::Int64)
clusters = zeros(D,K)
x = zeros(D,N)
labels = rand(1:K,(N,))
for i=1:K
alphas = rand(1:20,(D,))
alphas[i] = rand(30:100)
clusters[:,i] = rand(Dirichlet(alphas))
end
for i=1:N
x[:,i] = rand(Multinomial(trials,clusters[:,labels[i]]))
end
return x, labels, clusters
end
function generate_grouped_mnm_data(N::Int64, D_global::Int64, D_local, K_global::Int64, K_local::Int64, groups_count::Int64, rand_local::Bool, trials::Int64)
global_weights = rand(Dirichlet(ones(K_global)))
pts_dict = Dict()
labels_dict = Dict()
clusters_g = zeros(D_global,K_global)
for i=1:length(global_weights)
alphas = ones(D_global)*2
alphas[rand(1:D_global,Int(floor(D_global/40)))] .= rand(trials/2:trials)
clusters_g[:,i] = rand(Dirichlet(alphas))
end
for j=1:groups_count
x = zeros(D_global + D_local,N)
x_labels = zeros(size(x,2),2)
if rand_local
local_k = rand(1:K_local*2)
else
local_k = K_local
end
clusters_tzn = sample(1:length(global_weights),ProbabilityWeights(global_weights),local_k)
local_weights = rand(Dirichlet(ones(local_k)*100))
clusters_l = zeros(D_global+D_local,local_k)
ind = 1
group_ind = 1
group_tzn = rand(Multinomial(N,local_weights))
# group_tzn = rand(Multinomial(N,ones(local_k)))
for i=1:length(local_weights)
indices = ind:ind+group_tzn[i]-1
# println(indices)
alphas = ones(D_local)*2
alphas[rand(1:D_local,Int(floor(D_local/1.25)))] .= rand(trials/2:trials)
# local_part = rand(Dirichlet(alphas))
# clusters_l[:,i] .= cat(clusters_g[:,clusters_tzn[i]], alphas, dims = [1])
pvector = rand(Dirichlet(alphas))
for a=indices
x[1:D_global,a] = rand(Multinomial(trials,clusters_g[:,clusters_tzn[i]]))
x[D_global+1:end,a] = rand(Multinomial(trials,pvector))
x_labels[a,1] = clusters_tzn[i]
x_labels[a,2] = i
end
ind += group_tzn[i]
end
pts_dict[j] = x
labels_dict[j] = x_labels
end
return pts_dict, labels_dict
end
function generate_grouped_gaussian_data(N::Int64, D_global::Int64, D_local, K_global::Int64, K_local::Int64, groups_count::Int64, rand_local::Bool, var_size, hdp=false)
global_weights = rand(Dirichlet(ones(K_global)))
pts_dict = Dict()
labels_dict = Dict()
tmean = zeros(D_global,K_global)
tcov = zeros(D_global,D_global,K_global)
for i=1:length(global_weights)
tmean[:,i] .= rand(MvNormal(zeros(D_global), var_size*Matrix{Float64}(I, D_global, D_global)))
tcov[:,:,i] .= rand(InverseWishart(D_global+2, Matrix{Float64}(I, D_global, D_global)))
end
for j=1:groups_count
x = randn(D_global + D_local,N)
x_labels = zeros(size(x,2),2)
if rand_local
local_k = rand(K_local-2:K_local+2)
else
local_k = K_local
end
clusters_tzn = sample(1:length(global_weights),ProbabilityWeights(global_weights),local_k)
local_weights = rand(Dirichlet(ones(local_k)*100))
group_mean = zeros(D_global+D_local,local_k)
group_cov = zeros(D_global+D_local,D_global+D_local,local_k)
ind = 1
group_ind = 1
group_tzn = rand(Multinomial(N,local_weights))
# group_tzn = rand(Multinomial(N,ones(local_k)))
for i=1:length(local_weights)
indices = ind:ind+group_tzn[i]-1
g_mean = rand(MvNormal(zeros(D_local), var_size*Matrix{Float64}(I, D_local, D_local)))
g_cov = rand(InverseWishart(D_local+2, Matrix{Float64}(I, D_local, D_local)))
group_mean[:,i] .= cat(tmean[:,clusters_tzn[i]], g_mean, dims = [1])
group_cov[:,:,i] .= cat(tcov[:,:,clusters_tzn[i]], g_cov, dims = [1,2])
d = MvNormal(group_mean[:,i], group_cov[:,:,i])
for j=indices
x[:,j] = rand(d)
x_labels[j,1] = clusters_tzn[i]
x_labels[j,2] = i
end
ind += group_tzn[i]
end
if hdp
x[D_global+1:end,:] .= 0
end
pts_dict[j] = x
labels_dict[j] = x_labels
end
return pts_dict, labels_dict
end
function create_mnmm_data()
x,labels,clusters = generate_mnmm_data(10^6,100,6,1000)
npzwrite("data/mnmm/1milD100K6.npy",x')
x,labels,clusters = generate_mnmm_data(10^6,100,60,100)
npzwrite("data/mnmm/1milD100K60.npy",x')
end
function create_gaussian_data()
x,labels,clusters = generate_gaussian_data(10^5,2,20)
npzwrite("data/2d-1mil/samples_100k1.npy",x')
# x,labels,clusters = generate_gaussian_data(10^5,100,60,100)
# npzwrite("data/30d-1mil/samples.npy",x')
end
function create_grouped_gaussian_data()
samples_count = 10000
global_dim = 2
local_dim = 1
global_clusters_count = 4
local_clusters_count = 5
groups_count = 5
random_local_cluster_count = true
var_size = 60
hdp = true
path_to_save = "data/3d-gaussians/"
prefix = "gg"
pts, labels = generate_grouped_gaussian_data(samples_count,
global_dim,
local_dim,
global_clusters_count,
local_clusters_count,
groups_count,
random_local_cluster_count,
var_size,
hdp)
for (k,v) in pts
npzwrite(path_to_save * prefix * string(k) * ".npy",v')
end
for (k,v) in labels
npzwrite(path_to_save * prefix*"_labels" * string(k) * ".npy",v)
end
end
function create_grouped_mnmm_data()
samples_count = 10000
global_dim = 100
local_dim = 100
global_clusters_count = 10
local_clusters_count = 20
groups_count = 4
random_local_cluster_count = false
trials = 1000
path_to_save = "data/200d-mnm/"
prefix = "g"
pts, labels = generate_grouped_mnm_data(samples_count,
global_dim,
local_dim,
global_clusters_count,
local_clusters_count,
groups_count,
random_local_cluster_count,
trials)
for (k,v) in pts
npzwrite(path_to_save * prefix * string(k) * ".npy",v')
end
for (k,v) in labels
npzwrite(path_to_save * prefix*"_labels" * string(k) * ".npy",v)
end
end
function single_crp_draw(tables,Ξ±)
tables = push!(tables,Ξ±)
sum_tables = sum(tables)
table_probs = [x / sum_tables for x in tables]
return sample(ProbabilityWeights(table_probs))
end
function hdp_prior_crf_draws(N,J,Ξ±,Ξ³)
groups_tables_counts = Dict()
for j=1:J
table_counts = []
for i=1:N
point_table = single_crp_draw(table_counts[:],Ξ±)
if point_table == length(table_counts)+1
push!(table_counts,1)
else
table_counts[point_table] += 1
end
end
groups_tables_counts[j] = table_counts
end
global_tables_count = []
groups_tables_assignments = Dict([i=>[] for i=1:J])
cur_group_table = Dict([i=>1 for i=1:J])
is_done = [false for i=1:J]
while any(is_done .== false)
for j=1:J
group_assignments = groups_tables_assignments[j]
i = cur_group_table[j]
if i > length(groups_tables_counts[j])
is_done[j] = true
continue
end
table_assignment = single_crp_draw(global_tables_count,Ξ³)
if table_assignment == length(global_tables_count)+1
push!(global_tables_count,1)
else
global_tables_count[table_assignment] += 1
end
push!(group_assignments,table_assignment)
groups_tables_assignments[j] = group_assignments
cur_group_table[j]+=1
end
end
global_mixture = [x / sum(global_tables_count) for x in global_tables_count]
groups_mixtures = Dict()
for j=1:J
local_mixture = zeros(length(global_mixture))
for i = 1:length(global_mixture)
if i in groups_tables_assignments[j]
local_mixture[i] = sum(groups_tables_counts[j][groups_tables_assignments[j] .== i])
end
end
groups_mixtures[j] = local_mixture
end
return global_mixture,groups_mixtures,groups_tables_counts,groups_tables_assignments
end
function generate_grouped_gaussian_from_hdp_group_counts(group_counts, dim, var_size)
pts_dict = Dict()
labels_dict = Dict()
K = length(group_counts[1])
tmean = zeros(dim,K)
tcov = zeros(dim,dim,K)
J = length(group_counts)
components = []
for i=1:length(group_counts[1])
tmean[:,i] .= rand(MvNormal(zeros(dim), var_size*Matrix{Float64}(I, dim, dim)))
tcov[:,:,i] .= rand(InverseWishart(dim+2, Matrix{Float64}(I, dim, dim)))
push!(components,MvNormal(tmean[:,i], tcov[:,:,i]))
end
for j=1:J
points = reduce(hcat,[rand(components[i],Int(group_counts[j][i])) for i=1:length(group_counts[j])])
labels = reduce(vcat,[Int.(ones(Int(group_counts[j][i])))*i for i=1:length(group_counts[j])])
pts_dict[j] = points
labels_dict[j] = labels
end
return pts_dict, labels_dict
end
| VersatileHDPMixtureModels | https://github.com/BGU-CS-VIL/VersatileHDPMixtureModels.jl.git |
|
[
"MIT"
] | 0.1.1 | 1eee6b029f0312479bad622c48c5902458eb3888 | code | 24107 | function create_first_global_cluster(hyperparams::model_hyper_params, groups_dict::Dict, cluster_index::Int64)
pts = Vector{AbstractArray{Float64,2}}()
sub_labels = Vector{AbstractArray{Int64,2}}()
pts_to_groups = Vector{AbstractArray{Int64,1}}()
count = 0
for (k,v) in groups_dict
push!(pts,(@view v.points[1:hyperparams.local_dim-1,:]))
push!(sub_labels,v.labels_subcluster)
push!(pts_to_groups,ones(Int64,(size(pts[end],2)))*k)
for c in v.local_clusters
if c.globalCluster == cluster_index
count +=1
end
end
end
suff = create_sufficient_statistics(hyperparams.global_hyper_params, [])
# suff = nothing
post = hyperparams.global_hyper_params
dist = sample_distribution(post)
cp = cluster_parameters(hyperparams.global_hyper_params, dist, suff, post)
cpl = deepcopy(cp)
cpr = deepcopy(cp)
splittable = splittable_cluster_params(cp,cpl,cpr,[0.5,0.5], false, ones(20).*-Inf)
all_pts = reduce(hcat,pts)
all_sub_labels = reduce(vcat,sub_labels)[:]
all_pts_to_group = reduce(vcat,pts_to_groups)[:]
update_splittable_cluster_params!(splittable,all_pts[1:hyperparams.local_dim-1,:],
all_sub_labels,
true, all_pts_to_group)
# println(splittable)
# println(count)
cluster = global_cluster(splittable, hyperparams.total_dim, hyperparams.local_dim,splittable.cluster_params.suff_statistics.N,count,[1,1])
return cluster
end
function get_p_for_point(x)
x = log.(exp.(x .- maximum(x)) ./ exp(sum(x .- maximum(x))))
x ./= sum(x)
return log.(x)
end
function sample_group_cluster_labels(group_num::Int64, weights::AbstractArray{Float64, 1},final::Bool)
group = groups_dict[group_num]
points = group.points
parr = zeros(length(group.labels), length(global_clusters_vector))
for (k,v) in enumerate(global_clusters_vector)
local_dim = v.local_dim
log_likelihood!((@view parr[:,k]), points[1 : local_dim-1,:],v.cluster_params.cluster_params.distribution, group_num)
end
clusters_parr = zeros(length(group.local_clusters),length(global_clusters_vector))
labels = zeros(Int64,length(group.local_clusters),1)
for (i,c) in enumerate(group.local_clusters)
relevant_arr = parr[(@view group.labels[:]) .== i,:]
clusters_parr[i,:] .= sum(relevant_arr, dims = 1)[:] + log.(weights)
sum_arr_trick = exp.(clusters_parr[i,:])
testarr = clusters_parr[i,:] .- maximum(clusters_parr[i,:])
testarr = exp.(testarr)
if final
labels[i,1] = argmax(clusters_parr[i,:][:])
else
labels[i,1] = sample(1:length(global_clusters_vector), ProbabilityWeights(testarr[:]))
end
end
return labels
end
function sample_cluster_label(group::local_group, cluster::local_cluster ,i, weights::AbstractArray{Float64, 1},final::Bool)
points = group.points[1 : cluster.local_dim - 1, @view (group.labels .== i)[:]]
parr = zeros(size(points,2), length(global_clusters_vector))
for (k,v) in enumerate(global_clusters_vector)
log_likelihood!((@view parr[:,k]),points,v.cluster_params.cluster_params.distribution,group.group_num)
end
weights = reshape(weights,1,:)
sum_arr = sum(parr,dims = 1)
sum_arr .+= log.(weights)
sum_arr_trick = exp.(sum_arr)
testarr = sum_arr .- maximum(sum_arr)
testarr = exp.(testarr)
if final
cluster.globalCluster = argmax(sum_arr[:])
else
cluster.globalCluster = sample(1:length(global_clusters_vector), ProbabilityWeights(testarr[:]))
end
# println("sum arr: " *string(sum_arr) * " parr:" * string(sum_arr_trick) * " testarr:" * string(testarr) * " choosen: " * string(cluster.globalCluster))
return cluster.globalCluster
end
function sample_clusters_labels!(model::hdp_shared_features, final::Bool)
labels_dict = Vector{Array}(undef,length(model.groups_dict))
groups_count = zeros(length(global_clusters_vector))
wvector = model.weights
if mp
@sync for (k,v) in model.groups_dict
@async labels_dict[k] = @spawnat ((k % nworkers())+2) sample_group_cluster_labels(k, wvector, final)
# labels_dict[k] = Dict()
# for (i,c) in enumerate(v.local_clusters)
# # labels_dict[k][i] = @spawn sample_cluster_label(c, v.points[1 : v.model_hyperparams.local_dim - 1, (@view (v.labels .== i)[:])], model.weights, final)
# labels_dict[k][i] = sample_cluster_label(v,c, i, model.weights, final)
# end
end
else
Threads.@threads for k=1:length(model.groups_dict)
labels_dict[k] = sample_group_cluster_labels(k, wvector, final)
end
end
#at ((k % num_of_workers)+1)
for (k,v) in model.groups_dict
fetched_labels = fetch(labels_dict[k])
for (i,c) in enumerate(v.local_clusters)
c.globalCluster = fetched_labels[i,1]
groups_count[c.globalCluster] += 1
end
end
for (i,v) in enumerate(groups_count)
global_clusters_vector[i].clusters_count = v
end
end
function sample_sub_clusters!(model::hdp_shared_features)
labels_dict = Dict()
for v in global_clusters_vector
v.clusters_sub_counts = [0,0]
end
for (k,v) in model.groups_dict
labels_dict[k] = Vector{Array}(undef,length(model.groups_dict))
if mp
for (i,c) in enumerate(v.local_clusters)
labels_dict[k][i] = @spawnat ((k % num_of_workers)+1) sample_cluster_sub_label(c, v.points[1 : v.model_hyperparams.local_dim - 1, (@view (v.labels .== i)[:])])
end
else
Threads.@threads for i=1:length(v.local_clusters)
c=local_clusters[i]
labels_dict[k][i] = sample_cluster_sub_label(c, v.points[1 : v.model_hyperparams.local_dim - 1, (@view (v.labels .== i)[:])])
end
end
end
for (k,v) in model.groups_dict
for (i,c) in enumerate(v.local_clusters)
c.globalCluster_subcluster = fetch(labels_dict[k][i])
global_clusters_vector[c.globalCluster].clusters_sub_counts[c.globalCluster_subcluster] += 1
end
end
end
function split_cluster!(model::hdp_shared_features, index::Int64, new_index::Int64)
cluster = global_clusters_vector[index]
new_cluster = deepcopy(cluster)
new_cluster.cluster_params = create_splittable_from_params(cluster.cluster_params.cluster_params_r, model.model_hyperparams.Ξ³)
cluster.cluster_params = create_splittable_from_params(cluster.cluster_params.cluster_params_l, model.model_hyperparams.Ξ³)
new_cluster.points_count = new_cluster.cluster_params.cluster_params.suff_statistics.N
cluster.points_count = cluster.cluster_params.cluster_params.suff_statistics.N
global_clusters_vector[new_index] = new_cluster
end
function merge_clusters!(model::hdp_shared_features,index_l::Int64, index_r::Int64)
new_splittable_cluster = merge_clusters_to_splittable(global_clusters_vector[index_l].cluster_params.cluster_params, global_clusters_vector[index_r].cluster_params.cluster_params, model.model_hyperparams.Ξ±)
global_clusters_vector[index_l].cluster_params = new_splittable_cluster
global_clusters_vector[index_l].clusters_count += global_clusters_vector[index_r].clusters_count
global_clusters_vector[index_l].cluster_params.splittable = true
global_clusters_vector[index_r].cluster_params.cluster_params.suff_statistics.N = 0
global_clusters_vector[index_r].cluster_params.splittable = true
global_clusters_vector[index_r].clusters_count = 0
end
function should_split!(should_split::AbstractArray{Float64,1},
cluster_params::splittable_cluster_params,
groups_dict::Dict,
Ξ±::Float64,
Ξ³::Float64,
c_count::Int64,
lr_count::AbstractArray{Int64,1},
index::Int64,
glob_weight::Float64,
final::Bool)
cpl = cluster_params.cluster_params_l
cpr = cluster_params.cluster_params_r
cp = cluster_params.cluster_params
if final || cpl.suff_statistics.N == 0 ||cpr.suff_statistics.N == 0 #||lr_count[1] == 0 || lr_count[2] == 0
should_split .= 0
return
end
log_likihood_l = log_marginal_likelihood(cpl.hyperparams, cpl. posterior_hyperparams, cpl.suff_statistics)
log_likihood_r = log_marginal_likelihood(cpr.hyperparams, cpr. posterior_hyperparams, cpr.suff_statistics)
log_likihood = log_marginal_likelihood(cp.hyperparams, cp. posterior_hyperparams, cp.suff_statistics)
log_HR = log(Ξ³) + logabsgamma(cpl.suff_statistics.N)[1] + log_likihood_l + logabsgamma(cpr.suff_statistics.N)[1] + log_likihood_r -(logabsgamma(cp.suff_statistics.N)[1] + log_likihood) +
cp.suff_statistics.N*log(glob_weight)-cpl.suff_statistics.N*log(glob_weight*cluster_params.lr_weights[1])-cpr.suff_statistics.N*log(glob_weight*cluster_params.lr_weights[2])
log_HR += get_groups_split_log_likelihood(groups_dict,
index,
cluster_params.lr_weights[1],
cluster_params.lr_weights[2],
glob_weight,
Ξ±)
if log_HR > log(rand())
should_split .= 1
end
end
function should_merge!(should_merge::AbstractArray{Float64,1},
cpl::cluster_parameters,
cpr::cluster_parameters,
groups_dict::Dict,
Ξ±::Float64,
c1_count::Int64,
c2_count::Int64,
i::Int64,
j::Int64,
wi::Float64,
wj::Float64,
final::Bool)
new_suff = aggregate_suff_stats(cpl.suff_statistics, cpr.suff_statistics)
cp = cluster_parameters(cpl.hyperparams, cpl.distribution, new_suff,cpl.posterior_hyperparams)
cp.posterior_hyperparams = calc_posterior(cp.hyperparams, cp.suff_statistics)
log_likihood_l = log_marginal_likelihood(cpl.hyperparams, cpl.posterior_hyperparams, cpl.suff_statistics)
log_likihood_r = log_marginal_likelihood(cpr.hyperparams, cpr.posterior_hyperparams, cpr.suff_statistics)
log_likihood = log_marginal_likelihood(cp.hyperparams, cp.posterior_hyperparams, cp.suff_statistics)
log_HR = -log(Ξ±) + logabsgamma(Ξ±)[1] -logabsgamma(wi*Ξ±)[1] -logabsgamma(wj*Ξ±)[1] + logabsgamma(cp.suff_statistics.N)[1] -logabsgamma(cp.suff_statistics.N + Ξ±)[1] + logabsgamma(cpl.suff_statistics.N + wi*Ξ±)[1]-logabsgamma(cpl.suff_statistics.N)[1] -
logabsgamma(cpr.suff_statistics.N)[1] + logabsgamma(cpr.suff_statistics.N + wj*Ξ±)[1]+
log_likihood- log_likihood_l- log_likihood_r
if (log_HR > log(rand())) || (final && log_HR > log(0.5))
should_merge .= 1
end
end
function get_groups_split_log_likelihood(groups_dict::Dict,
global_cluster_index::Int64,
lweight::Float64,
rweight::Float64,
glob_weight::Float64,
Ξ³::Float64)
total_likelihood = 0.0
for (k,group) in groups_dict
lcount = 0.0
rcount = 0.0
for c in group.local_clusters
if c.globalCluster == global_cluster_index
lcount = c.global_suff_stats[1]
rcount = c.global_suff_stats[2]
if is_tp == false
total_likelihood += (logabsgamma(Ξ³ * glob_weight)[1] - logabsgamma(Ξ³ * glob_weight + lcount+ rcount)[1] +
logabsgamma(Ξ³ * glob_weight * lweight + lcount)[1] - logabsgamma(Ξ³ * glob_weight * lweight)[1] +
logabsgamma(Ξ³ * glob_weight * rweight + rcount)[1] - logabsgamma(Ξ³ * glob_weight * rweight)[1])
else
total_likelihood += (logabsgamma(Ξ³ * glob_weight)[1] - logabsgamma( glob_weight + lcount+ rcount)[1] +
logabsgamma(Ξ³ * glob_weight * lweight + lcount)[1] - logabsgamma(glob_weight * lweight)[1] +
logabsgamma(Ξ³ * glob_weight * rweight + rcount)[1] - logabsgamma(glob_weight * rweight)[1])
end
end
end
end
# println(total_likelihood)
return total_likelihood
end
function get_groups_merge_log_likelihood(groups_dict::Dict,
global_cluster_i::Int64,
global_cluster_j::Int64,
weight_i::Float64,
weight_j::Float64,
Ξ³::Float64)
total_likelihood = 0.0
for (k,group) in groups_dict
lcount = 0.0
rcount = 0.0
for c in group.local_clusters
if c.globalCluster == global_cluster_i
lcount += c.cluster_params.cluster_params_l.suff_statistics.N
end
if c.globalCluster == global_cluster_j
rcount += c.cluster_params.cluster_params_r.suff_statistics.N
end
end
total_likelihood += logabsgamma(Ξ³ * (weight_i+weight_j))[1] - logabsgamma(Ξ³ * (weight_i+weight_j)[1] + lcount+ rcount)[1] +
logabsgamma(Ξ³ * weight_i + lcount)[1] - logabsgamma(Ξ³ * weight_i)[1] +
logabsgamma(Ξ³ * weight_j + rcount)[1] - logabsgamma(Ξ³ * weight_j)[1]
end
return -total_likelihood
end
function check_and_split!(model::hdp_shared_features, final::Bool)
split_arr= zeros(length(global_clusters_vector))
for (index,cluster) in enumerate(global_clusters_vector)
# println("index: " * string(index) * " splittable: " * string(cluster.cluster_params.splittable))
if cluster.cluster_params.splittable == true
should_split!((@view split_arr[index,:]),
cluster.cluster_params,
model.groups_dict,
model.model_hyperparams.Ξ±,
model.model_hyperparams.Ξ³,
cluster.clusters_count,
cluster.clusters_sub_counts,
index,
model.weights[index],
final)
if split_arr[index,:] == 1
break
end
end
end
new_index = length(global_clusters_vector) + 1
resize!(global_clusters_vector,Int64(length(global_clusters_vector) + sum(split_arr)))
indices = Vector{Int64}()
for i=1:length(split_arr)
if split_arr[i,1] == 1
split_cluster!(model,i, new_index)
push!(indices,i)
push!(indices,new_index)
new_index += 1
end
end
return indices
end
function check_and_merge!(model::hdp_shared_features, final::Bool)
mergable = zeros(1)
indices = Vector{Int64}()
distance_matrix = zeros(length(global_clusters_vector),length(global_clusters_vector))
if (@isdefined use_mean_for_merge) && use_mean_for_merge == true
for i=1:length(global_clusters_vector)-1
for j=i+1:length(global_clusters_vector)
distance_matrix[i,j] = norm(global_clusters_vector[i].cluster_params.cluster_params.distribution.ΞΌ - global_clusters_vector[j].cluster_params.cluster_params.distribution.ΞΌ)
end
indice_to_check = argmin(distance_matrix[i,i+1:end])
if (global_clusters_vector[i].cluster_params.splittable == true && global_clusters_vector[indice_to_check].cluster_params.splittable == true) || final
should_merge!(mergable, global_clusters_vector[i].cluster_params.cluster_params,
global_clusters_vector[indice_to_check].cluster_params.cluster_params,model.groups_dict,
model.model_hyperparams.Ξ³,global_clusters_vector[i].clusters_count,
global_clusters_vector[indice_to_check].clusters_count,i,indice_to_check,model.weights[i], model.weights[indice_to_check], final)
end
if mergable[1] == 1
merge_clusters!(model, i, indice_to_check)
push!(indices,i)
push!(indices,indice_to_check)
break
end
end
else
for i=1:length(global_clusters_vector)
for j=i+1:length(global_clusters_vector)
if (global_clusters_vector[i].cluster_params.splittable == true && global_clusters_vector[j].cluster_params.splittable == true) || final
should_merge!(mergable, global_clusters_vector[i].cluster_params.cluster_params,
global_clusters_vector[j].cluster_params.cluster_params,model.groups_dict,
model.model_hyperparams.Ξ³,global_clusters_vector[i].clusters_count,
global_clusters_vector[j].clusters_count,i,j,model.weights[i], model.weights[j], final)
end
if mergable[1] == 1
merge_clusters!(model, i, j)
push!(indices,i)
push!(indices,j)
break
end
end
if mergable[1] == 1
break
end
end
end
return indices
end
function update_suff_stats_posterior!(model::hdp_shared_features, indices::AbstractArray{Int64,1})
local_dim = model.model_hyperparams.local_dim
pts_vector_dict = Dict()
sub_labels_vector_dict = Dict()
clusters_count_dict = Dict()
pts_to_groups = Dict()
for i=1:length(global_clusters_vector)
if i in indices
pts_vector_dict[i] = Vector{AbstractArray{Float64,2}}()
sub_labels_vector_dict[i] = Vector{AbstractArray{Int64,1}}()
pts_to_groups[i] = Vector{AbstractArray{Int64,1}}()
clusters_count_dict[i] = 0
end
end
for (k,v) in model.groups_dict
for (i,c) in enumerate(v.local_clusters)
if c.globalCluster in indices
push!(pts_vector_dict[c.globalCluster], (@view v.points[1:local_dim-1,(@view (v.labels .== i)[:])]))
sub_labels = (@view v.labels_subcluster[(@view (v.labels .== i)[:])])
push!(sub_labels_vector_dict[c.globalCluster], sub_labels)
push!(pts_to_groups[c.globalCluster],ones(Int64,size(sub_labels,1))*k)
end
end
end
cluster_params_dict = Dict()
begin
@sync for (index,cluster) in enumerate(global_clusters_vector)
if index in indices
if size(pts_vector_dict[index],1) > 0
#pts = reshape(CatView(Tuple(pts_vector_dict[index])),local_dim - 1,:)
cluster.clusters_sub_counts = [0,0]
pts = reduce(hcat,pts_vector_dict[index])
for sublabel in sub_labels_vector_dict[index]
if sum(sublabel .<= 2) >= sum(sublabel .> 2)
cluster.clusters_sub_counts[1] += 1
else
cluster.clusters_sub_counts[2] += 1
end
end
sublabels = reduce(vcat,sub_labels_vector_dict[index])
pts_group= reduce(vcat,pts_to_groups[index])
#sublabels = CatView(Tuple(sub_labels_vector_dict[index]))
# println(cluster.clusters_sub_counts)
cluster_params_dict[index] = update_splittable_cluster_params(cluster.cluster_params, pts , sublabels, true, pts_group)
end
end
end
for (index,cluster) in enumerate(global_clusters_vector)
if index in indices
if size(pts_vector_dict[index],1) > 0
cluster.cluster_params = fetch(cluster_params_dict[index])
#println(cluster.cluster_params)
end
end
end
end
end
function sample_global_clusters_params!(model::hdp_shared_features)
points_count = Vector{Float64}()
for cluster in global_clusters_vector
#push!(points_count, cluster.clusters_count)
push!(points_count, cluster.cluster_params.cluster_params.suff_statistics.N)
sample_cluster_params!(cluster.cluster_params, model.model_hyperparams.Ξ³, cluster.clusters_sub_counts)
end
push!(points_count, model.model_hyperparams.Ξ³)
model.weights = rand(Dirichlet(points_count))[1:end-1]
# println("Weights:" * string(model.weights))
# println("Samples: " * string([x.cluster_params.cluster_params.distribution for x in global_clusters_vector]))
end
function remove_empty_clusters!(model::hdp_shared_features)
new_vec = Vector{global_cluster}()
to_remove = []
for (index,cluster) in enumerate(global_clusters_vector)
if cluster.clusters_count == 0
push!(to_remove, index)
else
push!(new_vec,cluster)
end
end
if length(to_remove) > 0
for (k,v) in model.groups_dict
for (i,c) in enumerate(v.local_clusters)
c.globalCluster -= sum(to_remove .< c.globalCluster)
end
end
global global_clusters_vector = new_vec
end
end
function local_split!(model::hdp_shared_features, index::Int64, new_index::Int64)
cluster = global_clusters_vector[index]
new_cluster = deepcopy(cluster)
new_cluster.clusters_count = 0
new_cluster.cluster_params = create_splittable_from_params(cluster.cluster_params.cluster_params_r, model.model_hyperparams.Ξ³)
cluster.cluster_params = create_splittable_from_params(cluster.cluster_params.cluster_params_l, model.model_hyperparams.Ξ³)
new_cluster.points_count = new_cluster.cluster_params.cluster_params.suff_statistics.N
cluster.points_count = cluster.cluster_params.cluster_params.suff_statistics.N
global_clusters_vector[new_index] = new_cluster
end
function split_new_clusters_from_local!(model::hdp_shared_features)
cur_count = length(global_clusters_vector)
new_clusters_to_global = Dict() #keys are global clusters to split
for (i,group) in model.groups_dict
for c in group.local_clusters
new_global = c.globalCluster
if new_global > cur_count #new cluster
# println(new_global)
if haskey(new_clusters_to_global, new_global - cur_count)
push!(new_clusters_to_global[new_global - cur_count], c) #The last2 clusters are in the new global
else
new_clusters_to_global[new_global - cur_count] = [c]
end
end
end
end
new_index = Int64(length(global_clusters_vector)) +1
# println(keys(new_clusters_to_global))
resize!(global_clusters_vector,Int64(length(global_clusters_vector) + length(new_clusters_to_global)))
indicies_to_re_evaluate = Vector{Int64}()
for (k,v) in new_clusters_to_global
local_split!(model,k,new_index)
# global_clusters_vector[k].clusters_count += length(v) / 2
# global_clusters_vector[new_index].clusters_count += length(v)
for group in v
group.globalCluster = new_index
end
push!(indicies_to_re_evaluate,k)
push!(indicies_to_re_evaluate,new_index)
new_index += 1
end
if length(indicies_to_re_evaluate) > 0
update_pts_count!(model)
remove_empty_clusters!(model)
update_weights!(model)
end
return indicies_to_re_evaluate
end
function update_pts_count!(model::hdp_shared_features)
counts = zeros(length(global_clusters_vector))
for (k,v) in model.groups_dict
for c in v.local_clusters
counts[c.globalCluster] += 1
end
end
# println(counts)
for (k,v) in enumerate(global_clusters_vector)
v.clusters_count = counts[k]
end
end
function update_weights!(model::hdp_shared_features)
counts = Vector{Float64}()
for c in global_clusters_vector
push!(counts,c.clusters_count)
end
push!(counts, model.model_hyperparams.Ξ³)
model.weights = rand(Dirichlet(counts))[1:end-1]
end
function update_partial_pts_count!(model::hdp_shared_features)
counts = zeros(length(global_clusters_vector))
for (k,v) in model.groups_dict
for c in v.local_clusters
if c.globalCluster < length(counts)
counts[c.globalCluster] += 1
end
end
end
for (k,v) in enumerate(global_clusters_vector)
v.clusters_count = counts[k]
end
end
| VersatileHDPMixtureModels | https://github.com/BGU-CS-VIL/VersatileHDPMixtureModels.jl.git |
|
[
"MIT"
] | 0.1.1 | 1eee6b029f0312479bad622c48c5902458eb3888 | code | 1312 | #Global Setting
use_gpu = false
use_darrays = false #Only relevant if use_gpu = false
random_seed = 1234567
random_seed = nothing
#Data Loading specifics
data_path = "data/multi-d-gaussians//"
data_prefix = "g"
groups_count = 9
global_preprocessing = nothing
local_preprocessing = nothing
#Model Parameters
iterations = 100
hard_clustering = false
total_dim = 3
local_dim = 3
Ξ± = 500.0
Ξ³ = 10000.0
global_weight = 1.0
local_weight= 1.0
initial_global_clusters = 1
initial_local_clusters = 1 #this is per group
use_dict_for_global = false
split_delays = false
ignore_local = false
split_stop = 10
argmax_sample_stop = 10
use_mean_for_merge = false
global_hyper_params = niw_hyperparams(1.0,
zeros(2),
20.0,
Matrix{Float64}(I, 2, 2)*1)
local_hyper_params = niw_hyperparams(1.0,
zeros(1),
20.0,
Matrix{Float64}(I, 1, 1)*1)
# #Model Parameters
# iterations = 10
#
# total_dim = 3
# local_dim = 3
#
# Ξ± = 4.0
# Ξ³ = 2.0
# global_weight = 1.0
# local_weight= 1.0
#
#
# global_hyper_params = niw_hyperparams(5.0,
# ones(3)*2.5,
# 10.0,
# [[0.8662817 0.78323282 0.41225376];[0.78323282 0.74170384 0.50340258];[0.41225376 0.50340258 0.79185577]])
#
# local_hyper_params = niw_hyperparams(1.0,
# [217.0,510.0],
# 10.0,
# Matrix{Float64}(I, 2, 2)*0.5)
| VersatileHDPMixtureModels | https://github.com/BGU-CS-VIL/VersatileHDPMixtureModels.jl.git |
|
[
"MIT"
] | 0.1.1 | 1eee6b029f0312479bad622c48c5902458eb3888 | code | 14317 | function init_model(swap_axes; data = nothing , model_params = nothing)
if random_seed != nothing
@eval @everywhere Random.seed!($random_seed)
end
global lm
if data == nothing
points_dict = load_data(data_path, groups_count, prefix = data_prefix)
if swap_axes != nothing
points_dict = axes_swapper(points_dict,swap_axes)
end
preprocessing!(points_dict,local_dim,global_preprocessing,local_preprocessing)
else
points_dict = data
end
if model_params == nothing
model_hyperparams = model_hyper_params(global_hyper_params,local_hyper_params,Ξ±,Ξ³,Ξ·,global_weight,local_weight,total_dim,local_dim)
else
model_hyperparams = model_params
end
blocal_hyper_params = model_hyperparams.local_hyper_params
groups_dict = Dict()
for (k,v) in points_dict
labels = rand(1:initial_local_clusters,(size(v,2),1))
labels_subcluster = rand(1:4,(size(v,2),1))
weights = ones(initial_local_clusters) * (1/initial_local_clusters)
local_clusters = local_cluster[]
if isa(blocal_hyper_params,Array)
localised_params = model_hyper_params(model_hyperparams.global_hyper_params
,blocal_hyper_params[k],model_hyperparams.Ξ±,
model_hyperparams.Ξ³,model_hyperparams.Ξ·,model_hyperparams.global_weight,
model_hyperparams.local_weight,model_hyperparams.total_dim,model_hyperparams.local_dim)
else
localised_params = model_hyperparams
end
groups_dict[k] = local_group(localised_params,v,labels,labels_subcluster,local_clusters,Float64[],k)
end
if isa(model_hyperparams.global_hyper_params,topic_modeling_hyper)
global is_tp = true
else
global is_tp = false
end
@eval global groups_dict = $groups_dict
if mp
num_of_workers = nworkers()
for w in workers()
@spawnat w global groups_dict = Dict()
end
@sync for (index,group) in groups_dict
@spawnat ((index % num_of_workers)+2) set_group(index,group)
end
end
return hdp_shared_features(model_hyperparams,groups_dict,global_cluster[],Float64[])
end
function init_first_clusters!(hdp_model::hdp_shared_features)
for (k,v) in hdp_model.groups_dict
v.local_clusters = []
for i=1:initial_local_clusters
push!(v.local_clusters, create_first_local_cluster(v, initial_global_clusters))
end
end
global_c = []
for i=1:initial_global_clusters
push!(global_c,create_first_global_cluster(hdp_model.model_hyperparams, hdp_model.groups_dict, i))
end
global global_clusters_vector = global_c
for w in workers()
@eval @spawnat $w global global_clusters_vector = $global_c
end
# @eval @everywhere global global_clusters_vector = $global_c
end
function hdp_shared_features(model_params, swap_axes = nothing;multiprocess = false)
cur_dir = pwd()
include(model_params)
cd(cur_dir)
hdp_model = init_model(swap_axes)
init_first_clusters!(hdp_model)
groups_stats = Dict()
global num_of_workers = nworkers()
if @isdefined split_delays
@everywhere global split_delays = split_delays
else
@everywhere global split_delays = true
end
@everywhere global hard_clustering = hard_clustering
global posterior_history = []
global word_ll_history = []
global topic_count = []
global mp = multiprocess
for i=1:iterations
println("Iteration: " * string(i))
println("Global Counts: " * string([x.clusters_count for x in global_clusters_vector]))
final = false
no_more_splits = false
if i > iterations - argmax_sample_stop #We assume the cluters k has been setteled by now, and a low probability random split can do dmg
final = true
end
if i >= iterations - split_stop
no_more_splits = true
end
model_iteration(hdp_model,final,no_more_splits)
end
hdp_model.global_clusters = global_clusters_vector
return hdp_model, posterior_history, word_ll_history, topic_count
end
function model_iteration(hdp_model,final, no_more_splits,burnout = 5)
groups_stats = Vector{local_group_stats}(undef,length(groups_dict))
@everywhere global burnout_period = 5
sample_global_clusters_params!(hdp_model)
global global_clusters_vector = global_clusters_vector
refs= Dict()
if mp
for w in workers()
refs[w] = remotecall(set_global_clusters_vector, w, global_clusters_vector)
end
for w in workers()
fetch(refs[w])
end
end
begin
if mp
@sync for (index,group) in hdp_model.groups_dict
lc = group.local_clusters
groups_stats[index] = @spawnat ((index % num_of_workers)+2) group_step(index,lc, final)
end
else
Threads.@threads for index=1:length(groups_dict)
group=groups_dict[index]
lc = group.local_clusters
groups_stats[index] = group_step(index,lc, final)
end
end
for index=1:length(groups_dict)
update_group_from_stats!(hdp_model.groups_dict[index], fetch(groups_stats[index]))
end
end
sample_clusters_labels!(hdp_model, (hard_clustering ? true : final))
remove_empty_clusters!(hdp_model)
update_suff_stats_posterior!(hdp_model,collect(1:length(global_clusters_vector)))
hdp_model.global_clusters = global_clusters_vector
push!(posterior_history,calc_global_posterior(hdp_model))
# if isa(hdp_model.model_hyperparams.global_hyper_params, topic_modeling_hyper)
# word_ll = calc_avg_word(hdp_model)
# println("Per Word LL:" * string(word_ll))
# push!(word_ll_history,word_ll)
# push!(topic_count,length(global_clusters_vector))
# end
if no_more_splits == false
# println(length((global_clusters_vector)))
indices = check_and_split!(hdp_model, final)
i = 1
while i < length(indices)
for (index,group) in hdp_model.groups_dict
split_global_cluster!(group,indices[i],indices[i+1])
end
i+= 2
end
# println(length((global_clusters_vector)))
indices = check_and_merge!(hdp_model, final)
if length(indices) > 0
for (index,group) in hdp_model.groups_dict
merge_global_cluster!(group,indices[1],indices[2])
end
end
remove_empty_clusters!(hdp_model)
if length(indices) > 0
println("merged:" * string(indices))
end
end
end
function create_default_priors(gdim,ldim,prior_type::Symbol)
if prior_type == :niw
g_prior = niw_hyperparams(1.0,
zeros(gdim),
gdim+3,
Matrix{Float64}(I, gdim, gdim)*1)
l_prior = niw_hyperparams(1.0,
zeros(ldim),
ldim+3,
Matrix{Float64}(I, ldim, ldim)*1)
else
g_prior = multinomial_hyper(ones(gdim)*500.0)
l_prior = multinomial_hyper(ones(ldim)*500.0)
end
return g_prior, l_prior
end
@everywhere function swap_axes_worker(swap_vec)
for (k,v) in groups_dict
v.points = v.points[swap_vec,:]
end
end
function create_swap_vec(dim,glob_mapping, index)
swap_vec = zeros(dim)
reverse_swap_vec = zeros(dim)
front_index = 1
back_index = dim
for (k,v) in enumerate(glob_mapping)
if k == index || v == 1
swap_vec[front_index] = k
reverse_swap_vec[k] = front_index
front_index+=1
else
swap_vec[back_index] = k
reverse_swap_vec[k] = back_index
back_index -= 1
end
end
return Int.(swap_vec), Int.(reverse_swap_vec)
end
function calc_global_posterior(hdp_model::hdp_shared_features, ismnm = false)
pts_count = 0.0
log_posterior = log(hdp_model.model_hyperparams.Ξ³)
for (k,group) in hdp_model.groups_dict
pts_count += size(group.points,2)
# if ismnm
log_posterior+= calc_group_posterior(group)
# end
end
log_posterior-= logabsgamma(pts_count)[1]
for cluster in hdp_model.global_clusters
if cluster.cluster_params.cluster_params.suff_statistics.N == 0
continue
end
#posterior_param = update_posterior_evidence(model.hyper, model.clusters_params.sufficient_stats, index)
log_posterior += log_marginal_likelihood(cluster.cluster_params.cluster_params.hyperparams,
cluster.cluster_params.cluster_params.posterior_hyperparams,
cluster.cluster_params.cluster_params.suff_statistics)
log_posterior += log(hdp_model.model_hyperparams.Ξ³) + logabsgamma(cluster.cluster_params.cluster_params.suff_statistics.N)[1]
# println(cluster.cluster_params.cluster_params.suff_statistics)
end
return log_posterior
end
function calc_avg_word(hdp_model::hdp_shared_features)
# return 0
global_preds = get_model_global_pred(hdp_model)
group_mixtures = Dict([k => [x/ sum(counts(v,length(hdp_model.global_clusters))) for x in counts(v,length(hdp_model.global_clusters))] for (k,v) in global_preds])
# word_count = Dict([k => counts(Int.(v.points[:]), length(hdp_model.model_hyperparams.global_hyper_params.Ξ±)) for (k,v) in hdp_model.groups_dict])
cluster_dists = [x.cluster_params.cluster_params.distribution.Ξ± for x in hdp_model.global_clusters]
total_likelihood = 0.0
total_points = 0
for (k,v) in group_mixtures
parr = zeros(length(hdp_model.model_hyperparams.global_hyper_params.Ξ±),length(v))
for (index,part) in enumerate(v)
parr[:,index] = cluster_dists[index] .+ log(part)
end
parr = exp.(parr)
parr[isnan.(parr)] .= 0
wordp = sum(parr,dims = 2)
wordp = log.(wordp)
wordp[isnan.(wordp)] .= 0
rel_pts = hdp_model.groups_dict[k].points
word_counts = counts(Int.(rel_pts), length(hdp_model.model_hyperparams.global_hyper_params.Ξ±))
cluster_ll = wordp .* word_counts
cluster_ll[isnan.(cluster_ll)] .= 0
total_points += sum(word_counts)
# println(any(isnan.(cluster_ll)))
total_likelihood += sum(cluster_ll)
end
# println("ll: " * string(total_likelihood) * " pts: " * string(total_points))
return total_likelihood / total_points
end
function calc_group_posterior(group::local_group)
log_posterior = log(group.model_hyperparams.Ξ±) - logabsgamma(size(group.points,2))[1]
for cluster in group.local_clusters
if cluster.cluster_params.cluster_params.suff_statistics.N == 0
continue
end
log_posterior += log_marginal_likelihood(cluster.cluster_params.cluster_params.hyperparams,
cluster.cluster_params.cluster_params.posterior_hyperparams,
cluster.cluster_params.cluster_params.suff_statistics)
end
return log_posterior
end
function k_mean_likelihood(likehood_rating,k)
min_likelihood = minimum(likehood_rating)
max_likelihood = maximum(likehood_rating)
k_means = zeros(k)
k_means[1] = min_likelihood
k_means[k] = 0
k_interval = abs(max_likelihood - min_likelihood) / k
for i=2:k-1
k_means[i] = k_means[i-1] + k_interval
end
centers = reshape(k_means,1,k)
R = kmeans!(reshape(likehood_rating,1,:), centers; maxiter=100)
return R.centers, assignments(R)
end
function hdp_fit(data, Ξ±,Ξ³,prior,iters, initial_custers = 1,burnout = 5;multiprocess=false)
dim = size(data[1],1)
gdim = dim
gprior,lprior = create_default_priors(gdim,dim-gdim,:niw)
return vhdp_fit(data,gdim, Ξ±,Ξ³,Ξ±,prior,lprior,iters,initial_custers,burnout,multiprocess=multiprocess)
end
function vhdp_fit(data,gdim, Ξ±,Ξ³,Ξ·,prior::Symbol,iters, initial_custers = 1,burnout = 5;multiprocess=false)
dim = size(data[1],1)
gprior,lprior = create_default_priors(gdim,dim-gdim,prior)
return vhdp_fit(data,gdim, Ξ±,Ξ³,Ξ·,gprior,lprior,iters, initial_custers,burnout,multiprocess=multiprocess)
end
function vhdp_fit(data,gdim, Ξ±,Ξ³,Ξ·,gprior::distribution_hyper_params,lprior,iters, initial_custers = 1,burnout = 5;multiprocess=false)
global random_seed = nothing
global initial_local_clusters = initial_custers
global initial_global_clusters = initial_custers
global mp = multiprocess
dim = size(data[1],1)
model_hyperparams = model_hyper_params(gprior,lprior,Ξ±,Ξ³,Ξ·,1.0,1.0,dim,gdim + 1)
model = init_model(nothing; data = data , model_params = model_hyperparams)
global posterior_history = []
global word_ll_history = []
global topic_count = []
@everywhere global split_delays = true
global burnout_period = burnout
if mp
for w in workers()
@spawnat w set_burnout(burnout)
end
end
global num_of_workers = nworkers()
iter = 1
total_time = 0
init_first_clusters!(model)
for i=1:iters
tic = time()
model_iteration(model,false,false,burnout)
toc = time() -tic
println("Iteration: " * string(i) * "|| Global Counts: " * string([x.clusters_count for x in global_clusters_vector]) * "|| iter time: " * string(toc))
total_time+= toc
end
model.global_clusters = global_clusters_vector
return model, total_time, posterior_history,word_ll_history,topic_count
end
function remove_all_zeros_from_data(data)
concatenated_vals = reduce(hcat,values(data))
all_zeros = []
for i=1:size(concatenated_vals,1)
if all(concatenated_vals[i,:] .== 0)
push!(all_zeros,i)
end
end
allvec = [i for i=1:size(concatenated_vals,1) if !(i in(all_zeros))]
new_data = deepcopy(data)
for (k,v) in data
vlen = size(v,2)
new_data[k] = v[allvec,:]
end
return new_data
end
function get_model_global_pred(model)
return Dict([k=>create_global_labels(v) for (k,v) in model.groups_dict])
end
function results_stats(pred_dict, gt_dict)
avg_nmi = 0
for i=1:length(pred_dict)
nmi = mutualinfo(pred_dict[i],gt_dict[i])
avg_nmi += nmi
end
return avg_nmi / length(pred_dict)
end
| VersatileHDPMixtureModels | https://github.com/BGU-CS-VIL/VersatileHDPMixtureModels.jl.git |
|
[
"MIT"
] | 0.1.1 | 1eee6b029f0312479bad622c48c5902458eb3888 | code | 27670 | function create_first_local_cluster(group::local_group, max_global::Int64 = 1)
suff = create_sufficient_statistics(group.model_hyperparams.local_hyper_params, [])
post = group.model_hyperparams.local_hyper_params
dist = sample_distribution(post)
cp = cluster_parameters(group.model_hyperparams.local_hyper_params, dist, suff, post)
cpl = deepcopy(cp)
cpr = deepcopy(cp)
splittable = splittable_cluster_params(cp,cpl,cpr,[0.5,0.5], false, ones(20).*-Inf)
update_splittable_cluster_params!(splittable, (@view group.points[group.model_hyperparams.local_dim : end,:]), (@view group.labels_subcluster[:]), false)
cluster = local_cluster(splittable, group.model_hyperparams.total_dim,
group.model_hyperparams.local_dim,splittable.cluster_params.suff_statistics.N,1.0,1.0,rand(1:max_global),rand(1:2),[])
return cluster
end
function sample_sub_clusters!(group::local_group, final::Bool)
for (i,v) in enumerate(group.local_clusters)
create_subclusters_labels!(reshape((@view group.labels_subcluster[group.labels .== i]),:,1),
(@view group.points[:,(@view (group.labels .== i)[:])]), v.cluster_params, global_clusters_vector[v.globalCluster].cluster_params, v.local_dim, group.group_num, final)
end
end
function create_subclusters_labels!(labels::AbstractArray{Int64,2},
points::AbstractArray{Float64,2},
cluster_params::splittable_cluster_params,
global_cluster_params::splittable_cluster_params,
local_dim::Int64,
group_num::Int64, final::Bool)
if size(labels,1) == 0
return
end
parr = zeros(length(labels), 6)
log_likelihood!((@view parr[:,5]),(@view points[1:local_dim-1, : ]),global_cluster_params.cluster_params_l.distribution, group_num)
log_likelihood!((@view parr[:,6]),(@view points[1:local_dim-1, : ]),global_cluster_params.cluster_params_r.distribution, group_num)
if ignore_local == false
log_likelihood!((@view parr[:,1]),(@view points[local_dim:end, : ]),cluster_params.cluster_params_l.distribution)
log_likelihood!((@view parr[:,4]),(@view points[local_dim:end, : ]),cluster_params.cluster_params_r.distribution)
end
# println(global_cluster_params.cluster_params_l.distribution)
# println(global_cluster_params.cluster_params_r.distribution)
parr[:,5] .+= log(global_cluster_params.lr_weights[1])
parr[:,6] .+= log(global_cluster_params.lr_weights[2])
parr[:,1] .+= log(cluster_params.lr_weights[1])
parr[:,4] .+= log(cluster_params.lr_weights[2])
parr[:,3] .= parr[:,1]+ parr[:,6]
parr[:,1] .+= parr[:,5]
parr[:,2] .= parr[:,5] + parr[:,4]
parr[:,4] .+= parr[:,6]
if final
labels .= mapslices(argmax, parr, dims= [2])
else
sample_log_cat_array!(labels,parr[:,1:4])
end
end
function get_local_cluster_likelihood!(parr::AbstractArray{Float64,2}, points::AbstractArray{Float64,2}, cluster::local_cluster, group_num::Int64)
global_p = zeros(size(parr))
local_dim = cluster.local_dim
log_likelihood!(global_p, (@view points[1 : local_dim-1,:]),global_clusters_vector[cluster.globalCluster].cluster_params.cluster_params.distribution, group_num)
if ignore_local == false
log_likelihood!(parr, (@view points[local_dim:end,:]),cluster.cluster_params.cluster_params.distribution)
end
parr .*= (1 / cluster.local_weight)
parr .+= (global_p .* (1 / cluster.global_weight))
end
function sample_labels!(group::local_group, final::Bool)
sample_labels!(group.labels, group.points, group.local_clusters, group.weights, final, group.group_num)
end
function sample_labels!(labels::AbstractArray{Int64,2},
points::AbstractArray{Float64,2},
local_clusters::Vector{local_cluster},
weights::Vector{Float64},
final::Bool,
group_num::Int64)
parr = zeros(length(labels), length(local_clusters))
for (k,v) in enumerate(local_clusters)
get_local_cluster_likelihood!(reshape((@view parr[:,k]),:,1),points,v, group_num)
end
for (k,v) in enumerate(weights)
parr[:,k] .+= log(v)
end
if final
labels .= mapslices(argmax, parr, dims= [2])
else
sample_log_cat_array!(labels,parr)
end
end
function update_local_cluster_params!(cluster::local_cluster,
points::AbstractArray{Float64,2},
sub_labels::AbstractArray{Int64,1})
splittable_cluster = cluster.cluster_params
cpl = splittable_cluster.cluster_params_l
cpr = splittable_cluster.cluster_params_r
cp = splittable_cluster.cluster_params
local_dim = cluster.local_dim
gc = cluster.globalCluster
gc_params = global_clusters_vector[gc].cluster_params
# gl_suff_statistics = create_sufficient_statistics(gc_params.cluster_params_l.hyperparams,
# gc_params.cluster_params_l.posterior_hyperparams,
# (@view points[1:local_dim-1,(@view (sub_labels .<= 2)[:])]),
# ones(sum((@view (sub_labels .<= 2)[:]))))
# gr_suff_statistics = create_sufficient_statistics(gc_params.cluster_params_r.hyperparams,
# gc_params.cluster_params_r.posterior_hyperparams,
# (@view points[1:local_dim-1,(@view (sub_labels .> 2)[:])]),
# nes(sum((@view (sub_labels .> 2)[:]))))
cpl.suff_statistics = create_sufficient_statistics(cpl.hyperparams, cpl.posterior_hyperparams,@view points[local_dim: end,sub_labels .% 2 .== 1])
cpr.suff_statistics = create_sufficient_statistics(cpr.hyperparams, cpr.posterior_hyperparams,@view points[local_dim: end,sub_labels .% 2 .== 0])
l_count = sum(sub_labels .<= 2)
r_count = sum(sub_labels .> 2)
cp.suff_statistics = aggregate_suff_stats(cpl.suff_statistics, cpr.suff_statistics)
cp.posterior_hyperparams = calc_posterior(cp.hyperparams, cp.suff_statistics)
cpl.posterior_hyperparams = calc_posterior(cpl.hyperparams, cpl.suff_statistics)
cpr.posterior_hyperparams = calc_posterior(cpr.hyperparams, cpr.suff_statistics)
cluster.global_suff_stats = [l_count,r_count]
cluster.cluster_params = splittable_cluster
# cluster.global_suff_stats = [gl_suff_statistics, gr_suff_statistics]
end
function update_suff_stats_posterior!(group::local_group)
local_dim = group.model_hyperparams.local_dim
for (index,cluster) in enumerate(group.local_clusters)
pts = @view group.points[:, (@view (group.labels .== index)[:])]
sub_labels = @view group.labels_subcluster[group.labels .== index]
update_local_cluster_params!(cluster,pts, sub_labels)
end
end
function update_suff_stats_posterior!(group::local_group, clusters::AbstractArray{Int64,1})
local_dim = group.model_hyperparams.local_dim
for (index,cluster) in enumerate(group.local_clusters)
if index in clusters
pts = @view group.points[1 : end, (@view (group.labels .== index)[:])]
sub_labels = @view group.labels_subcluster[(@view (group.labels .== index)[:]),:]
cluster.points_count = size(pts,2)
if cluster.points_count > 0
update_splittable_cluster_params!(cluster.cluster_params,
pts[local_dim:end,:], (@view sub_labels[:]),false)
end
end
end
end
# function split_cluster!(group::local_group, cluster::local_cluster, index::Int64, new_index::Int64)
# labels = @view group.labels[group.labels .== index]
# sub_labels = @view group.labels_subcluster[group.labels .== index]
# labels[sub_labels .== 2] .= new_index
# labels[sub_labels .== 3] .= new_index+1
# labels[sub_labels .== 4] .= new_index+2
# g_split = copy_local_cluster(cluster)
# l_split = copy_local_cluster(cluster)
# lg_split = copy_local_cluster(cluster)
# l_split.cluster_params = create_splittable_from_params(cluster.cluster_params.cluster_params_r, group.model_hyperparams.Ξ±)
# lg_split.cluster_params = create_splittable_from_params(cluster.cluster_params.cluster_params_r, group.model_hyperparams.Ξ±)
# cluster.cluster_params = create_splittable_from_params(cluster.cluster_params.cluster_params_l, group.model_hyperparams.Ξ±)
# g_split.cluster_params = create_splittable_from_params(cluster.cluster_params.cluster_params_l, group.model_hyperparams.Ξ±)
# l_split.points_count = length(labels[sub_labels .== 2])
# cluster.points_count = length(labels[sub_labels .== 1])
# g_split.points_count = length(labels[sub_labels .== 3])
# lg_split.points_count = length(labels[sub_labels .== 4])
# sub_labels .= rand(1:4,length(sub_labels))
# g_split.globalCluster = cluster.globalCluster + length(global_clusters_vector)
# lg_split.globalCluster = g_split.globalCluster
# cluster.globalCluster_subcluster = rand(1:2)
# g_split.globalCluster_subcluster = rand(1:2)
# lg_split.globalCluster_subcluster = rand(1:2)
# l_split.globalCluster_subcluster = rand(1:2)
# # new_sub_labels = @view sub_labels[sub_labels .==2]
# # sub_labels = @view sub_labels[sub_labels .==1]
# # if size(sub_labels,1) > 0
# # create_subclusters_labels!(reshape(sub_labels,:,1),(@view group.points[group.model_hyperparams.local_dim: end,(@view (group.labels .== index)[:])]), cluster.cluster_params)
# # end
# # if size(new_sub_labels,1) > 0
# # create_subclusters_labels!(reshape(new_sub_labels,:,1),(@view group.points[group.model_hyperparams.local_dim: end,(@view (group.labels .== new_index)[:])]), new_cluster.cluster_params)
# # end
# group.local_clusters[new_index] = l_split
# group.local_clusters[new_index+1] = g_split
# group.local_clusters[new_index+2] = lg_split
# end
function split_cluster_local!(group::local_group, cluster::local_cluster, index::Int64, new_index::Int64)
labels = @view group.labels[group.labels .== index]
sub_labels = @view group.labels_subcluster[group.labels .== index]
labels[sub_labels .== 2] .= new_index
labels[sub_labels .== 4] .= new_index
l_split = copy_local_cluster(cluster)
l_split.cluster_params = create_splittable_from_params(cluster.cluster_params.cluster_params_r, group.model_hyperparams.Ξ·)
cluster.cluster_params = create_splittable_from_params(cluster.cluster_params.cluster_params_l, group.model_hyperparams.Ξ·)
l_split.points_count = length(labels[sub_labels .== 2]) + length(labels[sub_labels .== 4])
cluster.points_count = length(labels[sub_labels .== 1]) + length(labels[sub_labels .== 3])
sub_labels[(x -> x ==1 || x==2).(sub_labels)] .= rand(1:2,length((@view sub_labels[(x -> x ==1 || x==2).(sub_labels)])))
sub_labels[(x -> x ==3 || x==4).(sub_labels)] .= rand(3:4,length((@view sub_labels[(x -> x ==3 || x==4).(sub_labels)])))
group.local_clusters[new_index] = l_split
end
function split_cluster_global!(group::local_group, cluster::local_cluster, index::Int64, new_index::Int64, new_global_index::Int64)
labels = @view group.labels[group.labels .== index]
sub_labels = @view group.labels_subcluster[group.labels .== index]
labels[sub_labels .== 3] .= new_index
labels[sub_labels .== 4] .= new_index
g_split = copy_local_cluster(cluster)
g_split.points_count = length(labels[sub_labels .== 3]) + length(labels[sub_labels .== 4])
cluster.points_count = length(labels[sub_labels .== 1]) + length(labels[sub_labels .== 2])
sub_labels[(x -> x ==1 || x==3).(sub_labels)] .= rand(1:2:3,length((@view sub_labels[(x -> x ==1 || x==3).(sub_labels)])))
sub_labels[(x -> x ==2 || x==4).(sub_labels)] .= rand(2:2:4,length((@view sub_labels[(x -> x ==2 || x==4).(sub_labels)])))
g_split.globalCluster = new_global_index
group.local_clusters[new_index] = g_split
end
# function split_cluster!(group::local_group, cluster::local_cluster, index::Int64, new_index::Int64)
# labels = @view group.labels[group.labels .== index]
# sub_labels = @view group.labels_subcluster[group.labels .== index]
# labels[sub_labels .== 2] .= new_index
# labels[sub_labels .== 3] .= new_index+1
# labels[sub_labels .== 2] .= new_index+2
# new_cluster = copy_local_cluster(cluster)
# new_cluster.cluster_params = create_splittable_from_params(cluster.cluster_params.cluster_params_r, group.model_hyperparams.Ξ·)
# cluster.cluster_params = create_splittable_from_params(cluster.cluster_params.cluster_params_l, group.model_hyperparams.Ξ±)
# new_cluster.points_count = new_cluster.cluster_params.cluster_params.suff_statistics.N
# cluster.points_count = cluster.cluster_params.cluster_params.suff_statistics.N
# new_sub_labels = @view sub_labels[sub_labels .==2]
# sub_labels = @view sub_labels[sub_labels .==1]
# if size(sub_labels,1) > 0
# create_subclusters_labels!(reshape(sub_labels,:,1),(@view group.points[group.model_hyperparams.local_dim: end,(@view (group.labels .== index)[:])]), cluster.cluster_params)
# end
# if size(new_sub_labels,1) > 0
# create_subclusters_labels!(reshape(new_sub_labels,:,1),(@view group.points[group.model_hyperparams.local_dim: end,(@view (group.labels .== new_index)[:])]), new_cluster.cluster_params)
# end
# group.local_clusters[new_index] = new_cluster
# end
function merge_clusters!(group::local_group,index_l::Int64, index_r::Int64)
new_splittable_cluster = merge_clusters_to_splittable(group.local_clusters[index_l].cluster_params.cluster_params, group.local_clusters[index_r].cluster_params.cluster_params, group.model_hyperparams.Ξ·)
group.local_clusters[index_l].cluster_params = new_splittable_cluster
group.local_clusters[index_l].points_count += group.local_clusters[index_r].points_count
group.local_clusters[index_r].points_count = 0
group.local_clusters[index_r].cluster_params.cluster_params.suff_statistics.N = 0
group.local_clusters[index_r].cluster_params.splittable = false
# println("merging " * string(index_l) * " with " * string(index_r))
for i=1:size(group.labels_subcluster,1)
if group.labels[i] == index_l
if group.labels_subcluster[i] <= 2
group.labels_subcluster[i] = 1
else
group.labels_subcluster[i] = 3
end
elseif group.labels[i] == index_r
if group.labels_subcluster[i] <= 2
group.labels_subcluster[i] = 2
else
group.labels_subcluster[i] = 4
end
end
end
group.labels[@view (group.labels .== index_r)[:]] .= index_l
end
function should_split_local!(should_split::AbstractArray{Float64,1},
cluster_params::splittable_cluster_params,global_params::splittable_cluster_params,
global_subcluster_suff::Vector{sufficient_statistics}, Ξ±::Float64, final::Bool)
cpl = cluster_params.cluster_params_l
cpr = cluster_params.cluster_params_r
cp = cluster_params.cluster_params
cpgl = global_params.cluster_params_l
cpgr = global_params.cluster_params_r
cpg = global_params.cluster_params
# println("bob2")
if final || cpl.suff_statistics.N == 0 ||cpr.suff_statistics.N == 0
should_split .= 0
return
end
log_likihood_l = log_marginal_likelihood(cpl.hyperparams,cpl.posterior_hyperparams, cpl.suff_statistics)
log_likihood_r = log_marginal_likelihood(cpr.hyperparams,cpr.posterior_hyperparams, cpr.suff_statistics)
log_likihood_gl = log_marginal_likelihood(cpgl.hyperparams,cpgl.posterior_hyperparams, global_subcluster_suff[1])
log_likihood_gr = log_marginal_likelihood(cpgr.hyperparams,cpgr.posterior_hyperparams, global_subcluster_suff[2])
log_likihood = log_marginal_likelihood(cp.hyperparams, cp.posterior_hyperparams, cp.suff_statistics)
log_likihood_g = log_marginal_likelihood(cpg.hyperparams, cpg.posterior_hyperparams, global_subcluster_suff[3])
log_HR = (log(Ξ±) + logabsgamma(cpl.suff_statistics.N + cpgl.suff_statistics.N)[1] + log_likihood_l +
logabsgamma(cpr.suff_statistics.N + cpgr.suff_statistics.N)[1] + log_likihood_r +
log_likihood_gl +log_likihood_gr -
(logabsgamma(cp.suff_statistics.N + cpg.suff_statistics.N)[1] + log_likihood + log_likihood_g))
println(log_likihood_l)
if log_HR > log(rand())
should_split .= 1
end
end
function should_split_local!(should_split::AbstractArray{Float64,1},
cluster_params::splittable_cluster_params, Ξ±::Float64, final::Bool, is_zero_dim = false)
cpl = cluster_params.cluster_params_l
cpr = cluster_params.cluster_params_r
cp = cluster_params.cluster_params
# println("bob")
if (final || cpl.suff_statistics.N == 0 ||cpr.suff_statistics.N == 0) && is_zero_dim == false
should_split .= 0
return
end
log_likihood_l = log_marginal_likelihood(cpl.hyperparams,cpl.posterior_hyperparams, cpl.suff_statistics)
log_likihood_r = log_marginal_likelihood(cpr.hyperparams,cpr.posterior_hyperparams, cpr.suff_statistics)
log_likihood = log_marginal_likelihood(cp.hyperparams, cp.posterior_hyperparams, cp.suff_statistics)
if is_zero_dim
log_likihood_l = 0
log_likihood_r = 0
log_likihood = 0
end
log_HR = (log(Ξ±) + logabsgamma(cpl.suff_statistics.N)[1] + log_likihood_l +
logabsgamma(cpr.suff_statistics.N)[1] + log_likihood_r -
(logabsgamma(cp.suff_statistics.N)[1] + log_likihood))
if log_HR > log(rand())
should_split .= 1
end
end
function should_merge!(should_merge::AbstractArray{Float64,1},lr_weights::AbstractArray{Float64, 1}, cpl::cluster_parameters,cpr::cluster_parameters, Ξ±::Float64, final::Bool)
new_suff = aggregate_suff_stats(cpl.suff_statistics, cpr.suff_statistics)
cp = cluster_parameters(cpl.hyperparams, cpl.distribution, new_suff,cpl.posterior_hyperparams)
cp.posterior_hyperparams = calc_posterior(cp.hyperparams, cp.suff_statistics)
log_likihood_l = log_marginal_likelihood(cpl.hyperparams, cpl.posterior_hyperparams, cpl.suff_statistics)
log_likihood_r = log_marginal_likelihood(cpr.hyperparams, cpr.posterior_hyperparams, cpr.suff_statistics)
log_likihood = log_marginal_likelihood(cp.hyperparams, cp.posterior_hyperparams, cp.suff_statistics)
log_HR = -log(Ξ±) + logabsgamma(Ξ±)[1] -logabsgamma(lr_weights[1]*Ξ±)[1] -logabsgamma(lr_weights[2]*Ξ±)[1] + logabsgamma(cp.suff_statistics.N)[1] -logabsgamma(cp.suff_statistics.N + Ξ±)[1] + logabsgamma(cpl.suff_statistics.N + lr_weights[1]*Ξ±)[1]-logabsgamma(cpl.suff_statistics.N)[1] -
logabsgamma(cpr.suff_statistics.N)[1] + logabsgamma(cpr.suff_statistics.N + lr_weights[2]*Ξ±)[1]+
log_likihood- log_likihood_l- log_likihood_r
if (log_HR > log(rand())) || (final && log_HR > log(0.5))
should_merge .= 1
end
end
function check_and_split!(group::local_group, final::Bool)
split_arr= zeros(length(group.local_clusters))
for (index,cluster) in enumerate(group.local_clusters)
if cluster.cluster_params.splittable == true
should_split_local!((@view split_arr[index,:]), cluster.cluster_params,
group.model_hyperparams.Ξ·,final, false)
end
end
new_index = length(group.local_clusters) + 1
bobob = new_index
indices = Vector{Int64}()
resize!(group.local_clusters,Int64(length(group.local_clusters) + sum(split_arr)))
for i=1:length(split_arr)
if split_arr[i] == 1
# push!(indices, new_index)
# push!(indices, new_index+1)
# push!(indices, new_index+2)
# split_cluster!(group, group.local_clusters[i],i,new_index)
# new_index += 3
push!(indices, new_index)
split_cluster_local!(group, group.local_clusters[i],i,new_index)
new_index += 1
end
end
return indices
end
function check_and_merge!(group::local_group, final::Bool)
clusters_dict = create_mergable_dict(group)
indices = Vector{Int64}()
for (k,v) in clusters_dict
indices = vcat(indices,check_and_merge!(group, v, final))
end
return indices
end
function check_and_merge!(group::local_group, indices::Vector{Int64}, final::Bool)
mergable = zeros(1)
ret_indices = Vector{Int64}()
for i=1:length(indices)
for j=i+1:length(indices)
if (group.local_clusters[indices[i]].cluster_params.splittable == true && group.local_clusters[indices[j]].cluster_params.splittable == true)
should_merge!(mergable,group.local_clusters[indices[j]].cluster_params.lr_weights, group.local_clusters[indices[i]].cluster_params.cluster_params,
group.local_clusters[indices[j]].cluster_params.cluster_params, group.model_hyperparams.Ξ·, final)
end
if mergable[1] == 1
merge_clusters!(group, indices[i], indices[j])
push!(ret_indices, indices[i])
end
mergable[1] = 0
end
end
return ret_indices
end
function should_split_global!(should_split::AbstractArray{Float64,1},
cluster_params::splittable_cluster_params,
points::AbstractArray{Float64,2},
sublabels::AbstractArray{Int64,2},
Ξ±::Float64,
final::Bool,
group_num::Int64)
parr = zeros(size(points,2), 1)
sleft = @view (sublabels .<=2)[:]
sright = @view (sublabels .>2)[:]
lcount = sum(sleft)
rcount = sum(sright)
if lcount == 0 || rcount == 0
should_split .= 0
return
end
parr_left = zeros(lcount, 1)
parr_right = zeros(rcount, 1)
log_likelihood!(parr[:,1],points,cluster_params.cluster_params.distribution,group_num)
log_likelihood!(parr_left[:,1],points[:,sleft],cluster_params.cluster_params_l.distribution,group_num)
log_likelihood!(parr_right[:,1],points[:,sright],cluster_params.cluster_params_r.distribution,group_num)
# println("sublabels lr_parr: "* string(size(lr_arr)))
sum_all = sum(parr,dims = 1)[1]
sum_left = sum(parr_left,dims = 1)[1]
sum_right = sum(parr_right,dims = 1)[1]
h_ratio = sum_left + logabsgamma(lcount)[1] + sum_right + logabsgamma(rcount)[1] - sum_all - logabsgamma(lcount + rcount)[1]
if h_ratio > log(rand())
# println("hratio: " * string(h_ratio))
should_split .= 1
end
end
function check_and_split_global!(group::local_group, final::Bool)
split_arr= zeros(length(group.local_clusters))
for (index,cluster) in enumerate(group.local_clusters)
if cluster.cluster_params.splittable == true
# should_split_local!((@view split_arr[index,:]), cluster.cluster_params,
# global_clusters_vector[cluster.globalCluster].cluster_params,
# cluster.global_subcluster_suff, group.model_hyperparams.Ξ±,final)
should_split_global!((@view split_arr[index,:]),
global_clusters_vector[cluster.globalCluster].cluster_params,
group.points[1:cluster.local_dim-1,(@view (group.labels .== index)[:])],
(@view group.labels_subcluster[(@view (group.labels .== index)[:]),:]),
group.model_hyperparams.Ξ±,final, group.group_num)
# split_arr[index,:] .= 1
#break #This ensures 1 split per iteration
end
end
new_index = length(group.local_clusters) + 1
global_count = length(global_clusters_vector)
indices = Vector{Int64}()
resize!(group.local_clusters,Int64(length(group.local_clusters) + sum(split_arr)))
for i=1:length(split_arr)
if split_arr[i] == 1
# push!(indices, new_index)
# push!(indices, new_index+1)
# push!(indices, new_index+2)
# split_cluster!(group, group.local_clusters[i],i,new_index)
# new_index += 3
push!(indices, new_index)
# println("new g cluster:" * string(group.local_clusters[i].globalCluster + global_count))
split_cluster_global!(group, group.local_clusters[i],i,new_index,group.local_clusters[i].globalCluster + global_count)
new_index += 1
println("Global single split")
end
end
return indices
end
function create_mergable_dict(group::local_group)
clusters_dict = Dict()
for (index,cluster) in enumerate(group.local_clusters)
if haskey(clusters_dict,cluster.globalCluster)# && cluster.cluster_params.splittable == true
push!(clusters_dict[cluster.globalCluster], index)
else
clusters_dict[cluster.globalCluster] = [index]
end
end
return clusters_dict
end
function sample_clusters!(group::local_group)
points_count = Vector{Float64}()
for cluster in group.local_clusters
push!(points_count, sample_cluster_params!(cluster.cluster_params, group.model_hyperparams.Ξ±, true))
end
push!(points_count, group.model_hyperparams.Ξ±)
# println(points_count)
group.weights = rand(Dirichlet(points_count))[1:end-1]
end
function remove_empty_clusters!(group::local_group)
new_vec = Vector{local_cluster}()
removed = 0
for (index,cluster) in enumerate(group.local_clusters)
if cluster.cluster_params.cluster_params.suff_statistics.N == 0
# println("test" * string(index))
group.labels[group.labels .> index - removed] .-= 1
removed += 1
else
push!(new_vec,cluster)
end
end
group.local_clusters = new_vec
end
function split_global_cluster!(group::local_group,global_cluster::Int64, new_global_index::Int64)
clusters_count = 0
for cluster in group.local_clusters
if cluster.globalCluster == global_cluster
clusters_count += 1
end
end
new_index = length(group.local_clusters) + 1
resize!(group.local_clusters,Int64(length(group.local_clusters) + clusters_count))
for (i,cluster) in enumerate(group.local_clusters)
if cluster.globalCluster == global_cluster
split_cluster_global!(group, cluster, i,new_index,new_global_index)
new_index += 1
end
end
end
function merge_global_cluster!(group::local_group,global_cluster::Int64, merged_index::Int64)
clusters_count = 0
for cluster in group.local_clusters
if cluster.globalCluster == global_cluster
clusters_count += 1
end
end
for (i,cluster) in enumerate(group.local_clusters)
if cluster.globalCluster == global_cluster
sublabels = @view group.labels_subcluster[group.labels .== i]
sublabels[(x -> x ==3 || x==4).(sublabels)] .-= 2
elseif cluster.globalCluster == merged_index
sublabels = @view group.labels_subcluster[group.labels .== i]
sublabels[(x -> x ==1 || x==2).(sublabels)] .+= 2
cluster.globalCluster = global_cluster
end
end
end
function group_step(group_num::Number, local_clusters::Vector{local_cluster}, final::Bool)
group = groups_dict[group_num]
group.local_clusters = local_clusters
sample_clusters!(group)
sample_labels!(group, (hard_clustering ? true : final))
sample_sub_clusters!(group, false)
update_suff_stats_posterior!(group)
remove_empty_clusters!(group)
if final == false && ignore_local == false
check_and_split!(group, final)
indices = check_and_merge!(group, final)
end
remove_empty_clusters!(group)
return local_group_stats(group.labels, group.labels_subcluster, group.local_clusters)
end
function set_group(group_num,group)
groups_dict[group_num] = group
end
function set_burnout(burnout)
global burnout_period = burnout
end
function set_global_clusters_vector(g_vector)
global global_clusters_vector = g_vector
end
| VersatileHDPMixtureModels | https://github.com/BGU-CS-VIL/VersatileHDPMixtureModels.jl.git |
|
[
"MIT"
] | 0.1.1 | 1eee6b029f0312479bad622c48c5902458eb3888 | code | 641 | #Global Setting
use_gpu = false
use_darrays = false #Only relevant if use_gpu = false
hard_clustering = false
ignore_local = false
global_weight = 1.0
local_weight= 1.0
split_delays = false
# #Model Parameters
# iterations = 10
#
# total_dim = 3
# local_dim = 3
#
# Ξ± = 4.0
# Ξ³ = 2.0
# global_weight = 1.0
# local_weight= 1.0
#
#
# global_hyper_params = niw_hyperparams(5.0,
# ones(3)*2.5,
# 10.0,
# [[0.8662817 0.78323282 0.41225376];[0.78323282 0.74170384 0.50340258];[0.41225376 0.50340258 0.79185577]])
#
# local_hyper_params = niw_hyperparams(1.0,
# [217.0,510.0],
# 10.0,
# Matrix{Float64}(I, 2, 2)*0.5)
| VersatileHDPMixtureModels | https://github.com/BGU-CS-VIL/VersatileHDPMixtureModels.jl.git |
|
[
"MIT"
] | 0.1.1 | 1eee6b029f0312479bad622c48c5902458eb3888 | code | 12165 | function create_subclusters_labels!(labels::AbstractArray{Int64,2},points::AbstractArray{Float64,2},cluster_params::splittable_cluster_params)
if size(labels,1) == 0
return
end
lr_arr = zeros(length(labels), 2)
log_likelihood!(lr_arr[:,1],points,cluster_params.cluster_params_l.distribution)
log_likelihood!(lr_arr[:,2],points,cluster_params.cluster_params_r.distribution)
lr_arr[:,1] .+= log(cluster_params.lr_weights[1])
lr_arr[:,2] .+= log(cluster_params.lr_weights[2])
sample_log_cat_array!(labels,lr_arr)
end
function sample_labels!(labels::AbstractArray{Int64,2},points::AbstractArray{Float64,2},clusters_samples::Dict,final::Bool)
parr = zeros(length(labels), length(clusters_samples))
for (k,v) in clusters_samples
log_likelihood!((@view parr[:,k]),points,v)
end
sample_log_cat_array!(labels,parr,final)
end
function create_splittable_from_params(params::cluster_parameters, Ξ±::Float64)
params_l = deepcopy(params)
params_l.distribution = sample_distribution(params.posterior_hyperparams)
params_r = deepcopy(params)
params_r.distribution = sample_distribution(params.posterior_hyperparams)
#params_l = cluster_parameters(params.distribution_hyper_params, sample_distribution(params.posterior_hyperparams), params.suff_stats, params.posterior_hyperparams)
#params_r = cluster_parameters(params.distribution_hyper_params, sample_distribution(params.posterior_hyperparams), params.suff_stats, params.posterior_hyperparams)
lr_weights = rand(Dirichlet([Ξ± / 2, Ξ± / 2]))
return splittable_cluster_params(params,params_l,params_r,lr_weights, false, ones(20).*-Inf)
end
function merge_clusters_to_splittable(cpl::cluster_parameters,cpr::cluster_parameters, Ξ±::Float64)
suff_stats = aggregate_suff_stats(cpl.suff_statistics,cpr.suff_statistics)
posterior_hyperparams = calc_posterior(cpl.hyperparams, suff_stats)
lr_weights = rand(Dirichlet([cpl.suff_statistics.N + (Ξ± / 2), cpr.suff_statistics.N + (Ξ± / 2)]))
cp = cluster_parameters(cpl.hyperparams, cpl.distribution, suff_stats, posterior_hyperparams)
return splittable_cluster_params(cp,cpl,cpr,lr_weights, false, ones(20).*-Inf)
end
function should_split!(should_split::AbstractArray{Float64,1}, cluster_params::splittable_cluster_params, Ξ±::Float64, final::Bool)
cpl = cluster_params.cluster_params_l
cpr = cluster_params.cluster_params_r
cp = cluster_params.cluster_params
if final || cpl.suff_statistics.N == 0 ||cpr.suff_statistics.N == 0
should_split .= 0
return
end
log_likihood_l = log_marginal_likelihood(cpl.hyperparams, cpl. posterior_hyperparams, cpl.suff_statistics)
log_likihood_r = log_marginal_likelihood(cpr.hyperparams, cpr. posterior_hyperparams, cpr.suff_statistics)
log_likihood = log_marginal_likelihood(cp.hyperparams, cp. posterior_hyperparams, cp.suff_statistics)
log_HR = log(Ξ±) + logabsgamma(cpl.suff_statistics.N)[1] + log_likihood_l + logabsgamma(cpr.suff_statistics.N)[1] + log_likihood_r-(logabsgamma(cp.suff_statistics.N)[1] + log_likihood)
if log_HR > log(rand())
should_split .= 1
end
end
function should_split!(cluster_params::splittable_cluster_params, Ξ±::Float64, final::Bool)
cpl = cluster_params.cluster_params_l
cpr = cluster_params.cluster_params_r
cp = cluster_params.cluster_params
if final || cpl.suff_statistics.N == 0 ||cpr.suff_statistics.N == 0
should_split .= 0
return
end
log_likihood_l = log_marginal_likelihood(cpl.hyperparams, cpl. posterior_hyperparams, cpl.suff_statistics)
log_likihood_r = log_marginal_likelihood(cpr.hyperparams, cpr. posterior_hyperparams, cpr.suff_statistics)
log_likihood = log_marginal_likelihood(cp.hyperparams, cp. posterior_hyperparams, cp.suff_statistics)
log_HR = log(Ξ±) + logabsgamma(cpl.suff_statistics.N)[1] + log_likihood_l + logabsgamma(cpr.suff_statistics.N)[1] + log_likihood_r-(logabsgamma(cp.suff_statistics.N)[1] + log_likihood)
# println(log_HR)
end
function update_splittable_cluster_params!(splittable_cluser::splittable_cluster_params,
points::AbstractArray{Float64,2},
sub_labels::AbstractArray{Int64,1},
is_global::Bool,
pts_to_groups = -1)
cpl = splittable_cluser.cluster_params_l
cpr = splittable_cluser.cluster_params_r
cp = splittable_cluser.cluster_params
if is_global
cp.suff_statistics = create_sufficient_statistics(cp.hyperparams,cp.posterior_hyperparams, points, pts_to_groups)
pts_gl = @view pts_to_groups[(@view (sub_labels .<= 2)[:])]
pts_gr = @view pts_to_groups[(@view (sub_labels .> 2)[:])]
cpl.suff_statistics = create_sufficient_statistics(cpl.hyperparams,cpl.posterior_hyperparams, (@view points[:,(@view (sub_labels .<= 2)[:])]),pts_gl)
cpr.suff_statistics = create_sufficient_statistics(cpr.hyperparams,cpr.posterior_hyperparams, (@view points[:,(@view (sub_labels .> 2)[:])]),pts_gr)
else
cp.suff_statistics = create_sufficient_statistics(cp.hyperparams,cp.posterior_hyperparams, points)
cpl.suff_statistics = create_sufficient_statistics(cpl.hyperparams,cpl.posterior_hyperparams, @view points[:,(@view (sub_labels .% 2 .== 1)[:])])
cpr.suff_statistics = create_sufficient_statistics(cpr.hyperparams,cpr.posterior_hyperparams, @view points[:,(@view (sub_labels .% 2 .== 0)[:])])
end
cp.posterior_hyperparams = calc_posterior(cp.hyperparams, cp.suff_statistics)
cpl.posterior_hyperparams = calc_posterior(cpl.hyperparams, cpl.suff_statistics)
cpr.posterior_hyperparams = calc_posterior(cpr.hyperparams, cpr.suff_statistics)
end
function update_splittable_cluster_params(splittable_cluser::splittable_cluster_params,
points::AbstractArray{Float64,2},
sub_labels::AbstractArray{Int64,1},
is_global::Bool,
pts_to_groups = -1)
cpl = splittable_cluser.cluster_params_l
cpr = splittable_cluser.cluster_params_r
cp = splittable_cluser.cluster_params
# println(sum((@view (sub_labels .> 2)[:])))
if is_global
cp.suff_statistics = create_sufficient_statistics(cp.hyperparams,cp.posterior_hyperparams, points, pts_to_groups)
pts_gl = @view pts_to_groups[(@view (sub_labels .<= 2)[:])]
pts_gr = @view pts_to_groups[(@view (sub_labels .> 2)[:])]
cpl.suff_statistics = create_sufficient_statistics(cpl.hyperparams, cpl.posterior_hyperparams, (@view points[:,(@view (sub_labels .<= 2)[:])]),pts_gl)
cpr.suff_statistics = create_sufficient_statistics(cpr.hyperparams, cpr.posterior_hyperparams,(@view points[:,(@view (sub_labels .> 2)[:])]),pts_gr)
else
cp.suff_statistics = create_sufficient_statistics(cp.hyperparams,cp.posterior_hyperparams,points)
cpl.suff_statistics = create_sufficient_statistics(cpl.hyperparams, cpl.posterior_hyperparams,@view points[:,(@view (sub_labels .% 2 .== 1)[:])])
cpr.suff_statistics = create_sufficient_statistics(cpr.hyperparams, cpr.posterior_hyperparams,@view points[:,(@view (sub_labels .% 2 .== 0)[:])])
end
begin
cp.posterior_hyperparams = calc_posterior(cp.hyperparams, cp.suff_statistics)
cpl.posterior_hyperparams = calc_posterior(cpl.hyperparams, cpl.suff_statistics)
cpr.posterior_hyperparams = calc_posterior(cpr.hyperparams, cpr.suff_statistics)
end
return splittable_cluser
end
function sample_cluster_params!(params::splittable_cluster_params, Ξ±::Float64, is_zero_dim = false)
points_count = Vector{Float64}()
params.cluster_params.distribution = sample_distribution(params.cluster_params.posterior_hyperparams)
params.cluster_params_l.distribution = sample_distribution(params.cluster_params_l.posterior_hyperparams)
params.cluster_params_r.distribution = sample_distribution(params.cluster_params_r.posterior_hyperparams)
push!(points_count, params.cluster_params_l.suff_statistics.N)
push!(points_count, params.cluster_params_r.suff_statistics.N)
points_count .+= Ξ±/2
params.lr_weights = rand(Dirichlet(points_count))
log_likihood_l = log_marginal_likelihood(params.cluster_params_l.hyperparams,params.cluster_params_l.posterior_hyperparams, params.cluster_params_l.suff_statistics)
log_likihood_r = log_marginal_likelihood(params.cluster_params_r.hyperparams,params.cluster_params_r.posterior_hyperparams, params.cluster_params_r.suff_statistics)
# params.logsublikelihood_hist[1:4] = params.logsublikelihood_hist[2:5]
# params.logsublikelihood_hist[5] = log_likihood_l + log_likihood_r
# logsublikelihood_now = 0.0
# for i=1:5
# logsublikelihood_now += params.logsublikelihood_hist[i] *0.20
# end
# if logsublikelihood_now != -Inf && logsublikelihood_now - params.logsublikelihood_hist[5] < 1e-2 # propogate abs change to other versions?
# params.splittable = true
# end
# println(split_delays)
# logsublikelihood_now = (log_likihood_l+log_likihood_r) / (params.cluster_params_l.suff_statistics.N+ params.cluster_params_r.suff_statistics.N)
# if logsublikelihood_now - params.logsublikelihood_hist[1] < 0
# params.splittable = true
# end
# params.logsublikelihood_hist[1] = logsublikelihood_now
#
# if split_delays == false
# params.splittable = true
# end
params.logsublikelihood_hist[1:burnout_period-1] = params.logsublikelihood_hist[2:burnout_period]
params.logsublikelihood_hist[burnout_period] = log_likihood_l + log_likihood_r
logsublikelihood_now = 0.0
for i=1:burnout_period
logsublikelihood_now += params.logsublikelihood_hist[i] *(1/(burnout_period-0.1))
end
if logsublikelihood_now != -Inf && logsublikelihood_now - params.logsublikelihood_hist[burnout_period] < 1e-2 # propogate abs change to other versions?
# println(params.logsublikelihood_hist)
params.splittable = true
end
if is_zero_dim == true
params.splittable = true
end
return params.cluster_params.suff_statistics.N
end
function sample_cluster_params!(params::splittable_cluster_params, Ξ±::Float64, counts::AbstractArray{Int64,1})
points_count = [params.cluster_params_l.suff_statistics.N, params.cluster_params_r.suff_statistics.N]
params.cluster_params.distribution = sample_distribution(params.cluster_params.posterior_hyperparams)
params.cluster_params_l.distribution = sample_distribution(params.cluster_params_l.posterior_hyperparams)
params.cluster_params_r.distribution = sample_distribution(params.cluster_params_r.posterior_hyperparams)
points_count .+= Ξ±/2
# println(points_count)
params.lr_weights = rand(Dirichlet(points_count))
log_likihood_l = log_marginal_likelihood(params.cluster_params_l.hyperparams,params.cluster_params_l.posterior_hyperparams, params.cluster_params_l.suff_statistics)
log_likihood_r = log_marginal_likelihood(params.cluster_params_r.hyperparams,params.cluster_params_r.posterior_hyperparams, params.cluster_params_r.suff_statistics)
# params.logsublikelihood_hist[1:4] = params.logsublikelihood_hist[2:5]
# params.logsublikelihood_hist[5] = log_likihood_l + log_likihood_r
# logsublikelihood_now = 0.0
# for i=1:5
# logsublikelihood_now += params.logsublikelihood_hist[i] *0.20
# end
# if logsublikelihood_now != -Inf && logsublikelihood_now - params.logsublikelihood_hist[5] < 1e-2 # propogate abs change to other versions?
# params.splittable = true
# end
params.logsublikelihood_hist[1:burnout_period-1] = params.logsublikelihood_hist[2:burnout_period]
params.logsublikelihood_hist[burnout_period] = log_likihood_l + log_likihood_r
logsublikelihood_now = 0.0
for i=1:burnout_period
logsublikelihood_now += params.logsublikelihood_hist[i] *(1/(burnout_period-0.1))
end
if logsublikelihood_now != -Inf && logsublikelihood_now - params.logsublikelihood_hist[burnout_period] < 1e-2 # propogate abs change to other versions?
# println(params.logsublikelihood_hist)
params.splittable = true
end
return params.cluster_params.suff_statistics.N
end
| VersatileHDPMixtureModels | https://github.com/BGU-CS-VIL/VersatileHDPMixtureModels.jl.git |
|
[
"MIT"
] | 0.1.1 | 1eee6b029f0312479bad622c48c5902458eb3888 | code | 8335 | # We expects the data to be in npy format, return a dict of {group: items}, each file is a different group
function load_data(path::String,groupcount::Number; prefix::String="", swapDimension::Bool = true)
groups_dict = Dict()
for i = 1:groupcount
arr = npzread(path * prefix * string(i) * ".npy")
for (index, value) in enumerate(arr)
if isnan(value)
arr[index] = 0.0
end
end
groups_dict[i] = swapDimension ? transpose(arr) : arr
end
return groups_dict
end
# Preprocessing on the samples, global_preprocessing and local_preprocessing are functions. only same dimesions input output are supported atm
function preprocessing!(samples_dict,local_dim::Number, global_preprocessing, local_preprocessing)
gp = x->Base.invokelatest(global_preprocessing,x)
lp = x->Base.invokelatest(local_preprocessing,x)
if global_preprocessing == nothing && local_preprocessing == nothing
return
end
for (k,v) in samples_dict
if global_preprocessing != nothing
samples_dict[k][1 : local_dim-1,:] = mapslices(gp,v[1 : local_dim-1,:], dims= [2])
end
if local_preprocessing != nothing
samples_dict[k][local_dim : end,:] = mapslices(lp,v[local_dim : end,:], dims= [2])
end
end
end
function dcolwise_dot!(r::AbstractArray, a::AbstractMatrix, b::AbstractMatrix)
n = length(r)
for j = 1:n
v = zero(promote_type(eltype(a), eltype(b)))
for i = 1:size(a, 1)
@inbounds v += a[i, j]*b[i, j]
end
r[j] = v
end
end
function distributer_factory(arr::AbstractArray)
function distributer!(a,b)
#for arg in args
try
show(a)
show(b)
arr[b[1]] = b[2]
catch y
show(a)
show(b)
println("Exception: ", y)
end
return -1
#end
end
return distributer!
end
# Note that we expect the log_likelihood_array to be in rows (samples) x columns (clusters) , this is due to making it more efficent that way.
# function sample_log_cat_array!(labels::AbstractArray{Int64,2}, log_likelihood_array::AbstractArray{Float64,2})
# # println("lsample log cat" * string(log_likelihood_array))
# max_log_prob_arr = maximum(log_likelihood_array, dims = 2)
# log_likelihood_array .-= max_log_prob_arr
# map!(exp,log_likelihood_array,log_likelihood_array)
# # println("lsample log cat2" * string(log_likelihood_array))
# #sum_prob_arr = sum(log_likelihood_array, dims =[2])
# sum_prob_arr = (cumsum(log_likelihood_array, dims =2))
# randarr = rand(length(labels)) .* sum_prob_arr[:,(size(sum_prob_arr,2))]
# sum_prob_arr .-= randarr
# sum_prob_arr[sum_prob_arr .< 0] .= maxintfloat()
# #replace!(x -> x < 0 ? maxintfloat() : x, sum_prob_arr)
# labels .= mapslices(argmin, sum_prob_arr, dims= [2])
# end
function sample_log_cat_array!(labels::AbstractArray{Int64,2}, log_likelihood_array::AbstractArray{Float64,2})
# println("lsample log cat" * string(log_likelihood_array))
log_likelihood_array[isnan.(log_likelihood_array)] .= -Inf #Numerical errors arent fun
max_log_prob_arr = maximum(log_likelihood_array, dims = 2)
log_likelihood_array .-= max_log_prob_arr
map!(exp,log_likelihood_array,log_likelihood_array)
# println("lsample log cat2" * string(log_likelihood_array))
sum_prob_arr = sum(log_likelihood_array, dims =[2])
log_likelihood_array ./= sum_prob_arr
for i=1:length(labels)
labels[i,1] = sample(1:size(log_likelihood_array,2), ProbabilityWeights(log_likelihood_array[i,:]))
end
end
function sample_log_cat(logcat_array::AbstractArray{Float64, 1})
max_logprob::Float64 = maximum(logcat_array)
for i=1:length(logcat_array)
logcat_array[i] = exp(logcat_array[i]-max_logprob)
end
sum_logprob::Float64 = sum(logcat_array)
i::Int64 = 1
c::Float64 = logcat_array[1]
u::Float64 = rand()*sum(logcat_array)
while c < u && i < length(logcat_array)
c += logcat_array[i += 1]
end
return i
end
function create_sufficient_statistics(dist::distribution_hyper_params, pts::Array{Any,1})
return create_sufficient_statistics(dist,dist, Array{Float64}(undef, 0, 0))
end
function get_labels_histogram(labels)
hist_dict = Dict()
for v in labels
if haskey(hist_dict,v) == false
hist_dict[v] = 0
end
hist_dict[v] += 1
end
return sort(collect(hist_dict), by=x->x[1])
end
function create_global_labels(group::local_group)
clusters_dict = Dict()
for (i,v) in enumerate(group.local_clusters)
clusters_dict[i] = v.globalCluster
end
return [clusters_dict[i] for i in group.labels]
end
function print_global_sub_cluster(group::local_group)
println([v.globalCluster for v in group.local_clusters])
println([v.globalCluster_subcluster for v in group.local_clusters])
end
function print_groups_global_clusters(model::hdp_shared_features)
for (k,g) in model.groups_dict
println("Group: " * string(k) * "Global Clusters: " * string([x.globalCluster for x in g.local_clusters]))
end
end
function axes_swapper(groups_pts_dict::Dict, axes_swap_vector)
for (k,v) in groups_pts_dict
groups_pts_dict[k] = v[axes_swap_vector,:]
end
return groups_pts_dict
end
function create_params_jld(jld_path,
random_seed,
data_path,
data_prefix,
groups_count,
global_preprocessing,
local_preprocessing,
iterations,
hard_clustering,
total_dim,
local_dim,
split_stop,
argmax_sample_stop,
Ξ±,
Ξ³,
global_weight,
local_weight,
initial_global_clusters,
initial_local_clusters,
global_hyper_params,
local_hyper_params)
@save jld_path random_seed data_path data_prefix groups_count global_preprocessing local_preprocessing iterations hard_clustering total_dim local_dim split_stop argmax_sample_stop Ξ± Ξ³ global_weight local_weight initial_global_clusters initial_local_clusters global_hyper_params local_hyper_params
end
function print_params_to_files(file_path,
random_seed,
iterations,
hard_clustering,
split_stop,
argmax_sample_stop,
Ξ±,
Ξ³,
global_weight,
local_weight,
initial_global_clusters,
initial_local_clusters,
global_hyper_params,
local_hyper_params,
global_multiplier = 0,
local_multiplier = 0)
io = open(file_path, "w+")
println(io, "random_seed = " * string(random_seed))
println(io, "iterations = " * string(iterations))
println(io, "hard_clustering = " * string(hard_clustering))
println(io, "split_stop = " * string(split_stop))
println(io, "argmax_sample_stop = " * string(argmax_sample_stop))
println(io, "Ξ± = " * string(Ξ±))
println(io, "Ξ³ = " * string(Ξ³))
println(io, "global_weight = " * string(global_weight))
println(io, "local_weight = " * string(local_weight))
println(io, "initial_global_clusters = " * string(initial_global_clusters))
println(io, "initial_local_clusters = " * string(initial_local_clusters))
println(io, "global_hyper_params = " * string(global_hyper_params))
println(io, "local_hyper_params = " * string(local_hyper_params))
println(io, "global_multiplier = " * string(global_multiplier))
println(io, "local_multiplier = " * string(local_multiplier))
close(io)
end
function get_node_leaders_dict()
leader_dict = Dict()
cur_leader = 2
leader_dict[cur_leader] = []
for i in workers()
if i in procs(cur_leader)
push!(leader_dict[cur_leader], i)
else
cur_leader = i
leader_dict[cur_leader] = [i]
end
end
return leader_dict
end
function assign_group_leaders(groups_count, leader_dict)
group_assignments = zeros(groups_count)
group_leaders = collect(keys(leader_dict))
for i=1:length(group_assignments)
group_assignments[i] = group_leaders[i%length(group_leaders) +1 ]
end
return group_assignments
end
function log_multivariate_gamma(x::Number, D::Number)
res::Float64 = D*(D-1)/4*log(pi)
for d = 1:D
res += logabsgamma(x+(1-d)/2)[1]
end
return res
end
| VersatileHDPMixtureModels | https://github.com/BGU-CS-VIL/VersatileHDPMixtureModels.jl.git |
|
[
"MIT"
] | 0.1.1 | 1eee6b029f0312479bad622c48c5902458eb3888 | code | 583 | include("../ds.jl")
using LinearAlgebra
using Distributions
# using PDMats
struct compact_mnm_dist <: distibution_sample
Ξ±::AbstractArray{Float64,1}
end
#topic_modeling_dist
function log_likelihood!(r::AbstractArray,x::AbstractArray, distibution_sample::compact_mnm_dist , group::Int64 = -1)
if length(distibution_sample.Ξ±) == 0
return
end
@inbounds for i in eachindex(r)
r[i] = 0.0
@inbounds for j=1:size(x,1)
if x[j,i] > 0
r[i] += distibution_sample.Ξ±[Int64.(x[j,i])]
end
end
end
end
| VersatileHDPMixtureModels | https://github.com/BGU-CS-VIL/VersatileHDPMixtureModels.jl.git |
|
[
"MIT"
] | 0.1.1 | 1eee6b029f0312479bad622c48c5902458eb3888 | code | 265 | struct multinomial_dist <: distibution_sample
Ξ±::AbstractArray{Float64,1}
end
#Multinomial
function log_likelihood!(r::AbstractArray,x::AbstractArray, distibution_sample::multinomial_dist , group::Int64 = -1)
r .= (distibution_sample.Ξ±' * x)[1,:]
end
| VersatileHDPMixtureModels | https://github.com/BGU-CS-VIL/VersatileHDPMixtureModels.jl.git |
|
[
"MIT"
] | 0.1.1 | 1eee6b029f0312479bad622c48c5902458eb3888 | code | 551 | struct mv_gaussian <: distibution_sample
ΞΌ::AbstractArray{Float64,1}
Ξ£::AbstractArray{Float64,2}
invΞ£::AbstractArray{Float64,2}
logdetΞ£::Float64
# mvn::MvNormal
end
function dinvquad!(r,a,x)
dcolwise_dot!(r,x, a \ x)
end
function log_likelihood!(r::AbstractArray,x::AbstractArray, distibution_sample::mv_gaussian , group::Int64 = -1)
z = x .- distibution_sample.ΞΌ
dcolwise_dot!(r,z, distibution_sample.invΞ£ * z)
r .= -((length(distibution_sample.Ξ£) * Float64(log(2Ο)) + logdet(distibution_sample.Ξ£))/2) .-r
end
| VersatileHDPMixtureModels | https://github.com/BGU-CS-VIL/VersatileHDPMixtureModels.jl.git |
|
[
"MIT"
] | 0.1.1 | 1eee6b029f0312479bad622c48c5902458eb3888 | code | 494 | struct mv_group_gaussian <: distibution_sample
ΞΌ::Vector{AbstractArray{Float64,1}}
Ξ£::AbstractArray{Float64,2}
invΞ£::AbstractArray{Float64,2}
logdetΞ£::Float64
end
function log_likelihood!(r::AbstractArray,x::AbstractArray, distibution_sample::mv_group_gaussian ,group::Int64 = 1)
z = x .- distibution_sample.ΞΌ[group]
dcolwise_dot!(r,z, distibution_sample.invΞ£ * z)
r .= -((length(distibution_sample.Ξ£) * Float64(log(2Ο)) + logdet(distibution_sample.Ξ£))/2) .-r
end
| VersatileHDPMixtureModels | https://github.com/BGU-CS-VIL/VersatileHDPMixtureModels.jl.git |
|
[
"MIT"
] | 0.1.1 | 1eee6b029f0312479bad622c48c5902458eb3888 | code | 392 | struct topic_modeling_dist <: distibution_sample
Ξ±::AbstractArray{Float64,1}
end
#topic_modeling_dist
function log_likelihood!(r::AbstractArray,x::AbstractArray, distibution_sample::topic_modeling_dist , group::Int64 = -1)
if length(distibution_sample.Ξ±) == 0
return
end
@inbounds for i in eachindex(r)
r[i] = distibution_sample.Ξ±[Int64.(x[i])]
end
end
| VersatileHDPMixtureModels | https://github.com/BGU-CS-VIL/VersatileHDPMixtureModels.jl.git |
|
[
"MIT"
] | 0.1.1 | 1eee6b029f0312479bad622c48c5902458eb3888 | code | 5665 | struct bayes_network_model <: distribution_hyper_params
ΞΊ::Float64
m::AbstractArray{Float64}
Ξ½::Float64
Ο::AbstractArray{Float64}
count::Int64
ms::Vector{AbstractArray{Float64}}
Ξ»::Float64
end
mutable struct bayes_network_sufficient_statistics <: sufficient_statistics
N::Float64
Ngroups::Vector{Float64}
points_sum::AbstractArray{Float64,1}
mu_vector::Vector{AbstractArray{Float64,1}}
S::AbstractArray{Float64,2}
end
function calc_posterior(prior::bayes_network_model, suff_statistics::bayes_network_sufficient_statistics)
if suff_statistics.N == 0
return prior
end
ΞΊ = prior.ΞΊ + suff_statistics.N
Ξ½ = prior.Ξ½ + suff_statistics.N
m = (prior.m.*prior.ΞΊ + suff_statistics.points_sum) / ΞΊ
Ο = (prior.Ξ½ * prior.Ο + prior.ΞΊ*prior.m*prior.m' -ΞΊ*m*m'+ suff_statistics.S) / Ξ½
Ο = Matrix(Hermitian(Ο))
#Ο = Hermitian((prior.Ξ½ * prior.Ο + prior.ΞΊ*prior.m*prior.m' -ΞΊ*m*m' + suff_statistics.S) ./ Ξ½)
if isposdef(Ο) == false
println(Ο)
println(m)
println(ΞΊ*m*m')
println(suff_statistics)
end
return bayes_network_model(ΞΊ,m,Ξ½,Ο,prior.count,suff_statistics.mu_vector,prior.Ξ»)
end
function sample_distribution(hyperparams::bayes_network_model)
Ξ£ = rand(Distributions.InverseWishart(hyperparams.Ξ½, hyperparams.Ξ½* hyperparams.Ο))
mu_vector = Vector{AbstractArray{Float64,1}}()
ΞΌ = rand(Distributions.MvNormal(hyperparams.m, Ξ£/hyperparams.ΞΊ))
for i=1:hyperparams.count
push!(mu_vector, rand(Distributions.MvNormal(hyperparams.ms[i], Ξ£/hyperparams.ΞΊ)))
end
return mv_group_gaussian(mu_vector,Ξ£,inv(Ξ£),logdet(Ξ£))
end
function create_sufficient_statistics(hyper::bayes_network_model,
posterior::bayes_network_model,
points::AbstractArray{Float64,2},
point_to_group = 0)
if size(points,2) == 0
return bayes_network_sufficient_statistics(0,
[0 for i=1:hyper.count],
zeros(length(hyper.m)),
[hyper.m for i=1:hyper.count],
zeros(length(hyper.m),length(hyper.m)))
end
pts_dict = Dict()
for i=1:hyper.count
pts_dict[i] = @view points[:,point_to_group .== i]
end
mu_vector = create_mu_vector(pts_dict, posterior.Ο,hyper.Ξ»)
dim = size(posterior.Ο,1)
S = zeros(dim,dim)
points_sum = zeros(dim)
Ngroups = Vector{Float64}()
N = 0
for i=1:hyper.count
pts = Array(pts_dict[i])
if size(pts,2) == 0
push!(Ngroups,0)
else
movedPts = pts .- mu_vector[i]
S += movedPts * movedPts'
points_sum += sum(movedPts, dims = 2)
push!(Ngroups,size(pts,2))
N += size(pts,2)
end
end
return bayes_network_sufficient_statistics(N,Ngroups,points_sum[:],mu_vector,S)
end
function log_marginal_likelihood(hyper::bayes_network_model, posterior_hyper::bayes_network_model, suff_stats::bayes_network_sufficient_statistics)
D = size(suff_stats.S,1)
logpi = log(pi)
return -suff_stats.N*D/2*logpi +
log_multivariate_gamma(posterior_hyper.Ξ½/2, D)-
log_multivariate_gamma(hyper.Ξ½/2, D) +
(hyper.Ξ½/2)*logdet(hyper.Ο*hyper.Ξ½)-
(posterior_hyper.Ξ½/2)*logdet(posterior_hyper.Ο*posterior_hyper.Ξ½) +
(D/2)*(log(hyper.ΞΊ)-(D/2)*log(posterior_hyper.ΞΊ))
end
function aggregate_suff_stats(suff_l::bayes_network_sufficient_statistics, suff_r::bayes_network_sufficient_statistics)
new_suff = deepcopy(suff_l)
N = 0
for i=1:length(new_suff.Ngroups)
new_suff.mu_vector[i] = (new_suff.mu_vector[i] .* (new_suff.Ngroups[i] / (new_suff.Ngroups[i] + suff_r.Ngroups[i]))) +(suff_r.mu_vector[i] .* (suff_r.Ngroups[i] / (new_suff.Ngroups[i] + suff_r.Ngroups[i])))
new_suff.Ngroups[i] += suff_r.Ngroups[i]
new_suff.N += suff_r.Ngroups[i]
end
new_suff.S += suff_r.S
new_suff.points_sum += suff_r.points_sum
return new_suff
end
function create_mu_vector(points::Dict, Ξ£::AbstractArray{Float64}, Ξ»::Float64)
group_count = length(keys(points))
A, b = create_matrix_for_least_squares(points, Ξ£, Ξ», group_count)
xhat = inv(A'*A)*(A'*b)
dim = size(Ξ£,1)
mu_vector = reshape(xhat, dim, group_count)
return [mu_vector[:,i] for i=1:group_count]
end
function create_matrix_for_least_squares(points::Dict, Ξ£::AbstractArray{Float64}, Ξ»::Float64, group_count::Int64)
points_count = zeros(group_count)
points_sum::Int64 = 0
dim = size(Ξ£,1)
for i=1:length(points)
points_count[i] = (points[i] == -1 ? 0 : size(points[i],2))
points_sum += points_count[i]
end
points = [x for x in values(points) if x != -1]
points_arr = vcat(points, [zeros(dim) for x in 1:group_count])
points_arr = reduce(hcat, points_arr)
points_arr = points_arr[:]
A = zeros(length(points_arr),groups_count * dim)
L = cholesky(Ξ£).U
Linv = Matrix(inv(L))
for i=1:points_sum
b = points_arr[(i-1)*dim+1:i*dim]
tmp = Linv * b
points_arr[(i-1)*dim+1:i*dim] = Linv * points_arr[(i-1)*dim+1:i*dim]
end
count = 1
for i=1:group_count
for j=1:points_count[i]
A[count:count+dim-1,dim*(i-1)+1:dim*i] = Linv
count+= dim
end
end
Ξ»sqrt = Ξ»^2
count += dim #Skip the first ΞΌ
for i=1:(size(A,2) - dim)
A[count,i] = -Ξ»sqrt
A[count,i+dim] = Ξ»sqrt
count+=1
end
return A, points_arr
end
function create_bayes_model_hyper_from_niw(niw::niw_hyperparams, Ξ», count)
ms = [niw.m for i=1:count]
return bayes_network_model(niw.ΞΊ,niw.m,niw.Ξ½,niw.Ο,count,ms,Ξ»)
end
| VersatileHDPMixtureModels | https://github.com/BGU-CS-VIL/VersatileHDPMixtureModels.jl.git |
|
[
"MIT"
] | 0.1.1 | 1eee6b029f0312479bad622c48c5902458eb3888 | code | 1821 | include("../ds.jl")
struct compact_mnm_hyper <: distribution_hyper_params
Ξ±::AbstractArray{Float64,1}
end
mutable struct compact_mnm_sufficient_statistics <: sufficient_statistics
N::Float64
points_sum::AbstractArray{Int64,1}
end
function calc_posterior(prior:: compact_mnm_hyper, suff_statistics::compact_mnm_sufficient_statistics)
if suff_statistics.N == 0
return prior
end
return compact_mnm_hyper(prior.Ξ± + suff_statistics.points_sum)
end
function sample_distribution(hyperparams::compact_mnm_hyper)
cat_dist = rand(Dirichlet(hyperparams.Ξ±))
return compact_mnm_dist(log.(cat_dist))
end
function create_sufficient_statistics(hyper::compact_mnm_hyper,posterior::compact_mnm_hyper,points::AbstractArray{Float64,2}, pts_to_group = 0)
if length(points) == 0
return compact_mnm_sufficient_statistics(size(points,2),zeros(Int64,length(hyper.Ξ±)))
end
pt_count = counts(Int.(points),length(hyper.Ξ±))
return compact_mnm_sufficient_statistics(size(points,2),pt_count)
end
function log_multivariate_gamma(x::Number, D::Number)
res::Float64 = D*(D-1)/4*log(pi)
for d = 1:D
res += logabsgamma(x+(1-d)/2)[1]
end
return res
end
function log_marginal_likelihood(hyper::compact_mnm_hyper, posterior_hyper::compact_mnm_hyper, suff_stats::compact_mnm_sufficient_statistics)
D = length(suff_stats.points_sum)
logpi = log(pi)
val = logabsgamma(sum(hyper.Ξ±))[1] -logabsgamma(sum(posterior_hyper.Ξ±))[1] + sum((x-> logabsgamma(x)[1]).(posterior_hyper.Ξ±) - (x-> logabsgamma(x)[1]).(hyper.Ξ±))
return val
end
function aggregate_suff_stats(suff_l::compact_mnm_sufficient_statistics, suff_r::compact_mnm_sufficient_statistics)
return compact_mnm_sufficient_statistics(suff_l.N+suff_r.N, suff_l.points_sum + suff_r.points_sum)
end
| VersatileHDPMixtureModels | https://github.com/BGU-CS-VIL/VersatileHDPMixtureModels.jl.git |
|
[
"MIT"
] | 0.1.1 | 1eee6b029f0312479bad622c48c5902458eb3888 | code | 1538 | struct multinomial_hyper <: distribution_hyper_params
Ξ±::AbstractArray{Float64,1}
end
mutable struct multinomial_sufficient_statistics <: sufficient_statistics
N::Float64
points_sum::AbstractArray{Float64,1}
S
end
function calc_posterior(prior:: multinomial_hyper, suff_statistics::multinomial_sufficient_statistics)
if suff_statistics.N == 0
return prior
end
return multinomial_hyper(prior.Ξ± + suff_statistics.points_sum)
end
function sample_distribution(hyperparams::multinomial_hyper)
return multinomial_dist(log.(rand(Dirichlet(hyperparams.Ξ±))))
end
function create_sufficient_statistics(hyper::multinomial_hyper,posterior::multinomial_hyper,points::AbstractArray{Float64,2}, pts_to_group = 0)
# pts = copy(points)
points_sum = sum(points, dims = 2)[:]
#S = pts * pts'
return multinomial_sufficient_statistics(size(points,2),points_sum, 0)
end
function log_marginal_likelihood(hyper::multinomial_hyper, posterior_hyper::multinomial_hyper, suff_stats::multinomial_sufficient_statistics)
D = length(suff_stats.points_sum)
logpi = log(pi)
val = logabsgamma(sum(hyper.Ξ±))[1] -logabsgamma(sum(posterior_hyper.Ξ±))[1] + sum((x-> logabsgamma(x)[1]).(posterior_hyper.Ξ±) - (x-> logabsgamma(x)[1]).(hyper.Ξ±))
return val
end
function aggregate_suff_stats(suff_l::multinomial_sufficient_statistics, suff_r::multinomial_sufficient_statistics)
return multinomial_sufficient_statistics(suff_l.N+suff_r.N, suff_l.points_sum + suff_r.points_sum, suff_l.S+suff_r.S)
end
| VersatileHDPMixtureModels | https://github.com/BGU-CS-VIL/VersatileHDPMixtureModels.jl.git |
|
[
"MIT"
] | 0.1.1 | 1eee6b029f0312479bad622c48c5902458eb3888 | code | 2761 | struct niw_hyperparams <: distribution_hyper_params
ΞΊ::Float64
m::AbstractArray{Float64}
Ξ½::Float64
Ο::AbstractArray{Float64}
end
mutable struct niw_sufficient_statistics <: sufficient_statistics
N::Float64
points_sum::AbstractArray{Float64,1}
S::AbstractArray{Float64,2}
end
function calc_posterior(prior:: niw_hyperparams, suff_statistics::niw_sufficient_statistics)
if suff_statistics.N == 0
return prior
end
ΞΊ = prior.ΞΊ + suff_statistics.N
Ξ½ = prior.Ξ½ + suff_statistics.N
m = (prior.m.*prior.ΞΊ + suff_statistics.points_sum) / ΞΊ
Ο = (prior.Ξ½ * prior.Ο + prior.ΞΊ*prior.m*prior.m' -ΞΊ*m*m'+ suff_statistics.S) / Ξ½
Ο = Matrix(Hermitian(Ο))
if isposdef(Ο) == false
println(Ο)
println(m)
println(ΞΊ*m*m')
println(suff_statistics)
println(prior)
end
return niw_hyperparams(ΞΊ,m,Ξ½,Ο)
end
#function calc_posterior(prior:: niw_hyperparams, suff_statistics::niw_sufficient_statistics)
# ΞΊ = prior.ΞΊ + suff_statistics.N
# m = (ΞΊ * prior.m + suff_statistics.points_sum) / ΞΊ
# Ξ½ = prior.Ξ½ + suff_statistics.N
# Ο = prior.Ο + suff_statistics.S + prior.ΞΊ*prior.m*prior.m'-ΞΊ*m*m'
# return niw_hyperparams(ΞΊ,m,Ξ½,Ο)
#end
function sample_distribution(hyperparams::niw_hyperparams)
Ξ£ = rand(Distributions.InverseWishart(hyperparams.Ξ½, hyperparams.Ξ½* hyperparams.Ο))
ΞΌ = rand(Distributions.MvNormal(hyperparams.m, Ξ£/hyperparams.ΞΊ))
return mv_gaussian(ΞΌ,Ξ£,inv(Ξ£),logdet(Ξ£))
end
function create_sufficient_statistics(hyper::niw_hyperparams,posterior::niw_hyperparams,points::AbstractArray{Float64,2}, pts_to_group = 0)
if size(points,2) == 0
return niw_sufficient_statistics(size(points,2),zeros(length(hyper.m)),zeros(length(hyper.m),length(hyper.m)))
end
pts = Array(points)
points_sum = @view sum(pts, dims = 2)[:]
S = pts * pts'
#println(size(points))
#println(points_sum)
#println(S)
return niw_sufficient_statistics(size(points,2),points_sum,S)
end
function log_marginal_likelihood(hyper::niw_hyperparams, posterior_hyper::niw_hyperparams, suff_stats::niw_sufficient_statistics)
D = length(suff_stats.points_sum)
logpi = log(pi)
return -suff_stats.N*D/2*logpi +
log_multivariate_gamma(posterior_hyper.Ξ½/2, D)-
log_multivariate_gamma(hyper.Ξ½/2, D) +
(hyper.Ξ½/2)*logdet(hyper.Ο*hyper.Ξ½)-
(posterior_hyper.Ξ½/2)*logdet(posterior_hyper.Ο*posterior_hyper.Ξ½) +
(D/2)*(log(hyper.ΞΊ))-(D/2)*log(posterior_hyper.ΞΊ)
end
function aggregate_suff_stats(suff_l::niw_sufficient_statistics, suff_r::niw_sufficient_statistics)
return niw_sufficient_statistics(suff_l.N+suff_r.N, suff_l.points_sum + suff_r.points_sum, suff_l.S+suff_r.S)
end
| VersatileHDPMixtureModels | https://github.com/BGU-CS-VIL/VersatileHDPMixtureModels.jl.git |
|
[
"MIT"
] | 0.1.1 | 1eee6b029f0312479bad622c48c5902458eb3888 | code | 2861 | struct niw_stable_hyperparams <: distribution_hyper_params
ΞΊ::Float64
m::AbstractArray{Float64}
Ξ½::Float64
Ο::AbstractArray{Float64}
end
mutable struct niw_stable_sufficient_statistics <: sufficient_statistics
N::Float64
points_sum::AbstractArray{Float64,1}
S::AbstractArray{Float64,2}
end
function calc_posterior(prior:: niw_stable_hyperparams, suff_statistics::niw_stable_sufficient_statistics)
if suff_statistics.N == 0
return prior
end
ΞΊ = prior.ΞΊ + suff_statistics.N
Ξ½ = prior.Ξ½ + suff_statistics.N
m = (prior.m.*prior.ΞΊ + suff_statistics.points_sum) / ΞΊ
Ο = (prior.Ξ½ * prior.Ο + prior.ΞΊ*prior.m*prior.m' -ΞΊ*m*m'+ suff_statistics.S) / Ξ½
Ο = Matrix(Hermitian(Ο))
if isposdef(Ο) == false
println(Ο)
println(m)
println(ΞΊ*m*m')
println(suff_statistics)
println(prior)
end
return niw_stable_hyperparams(ΞΊ,m,Ξ½,Ο)
end
#function calc_posterior(prior:: niw_hyperparams, suff_statistics::niw_sufficient_statistics)
# ΞΊ = prior.ΞΊ + suff_statistics.N
# m = (ΞΊ * prior.m + suff_statistics.points_sum) / ΞΊ
# Ξ½ = prior.Ξ½ + suff_statistics.N
# Ο = prior.Ο + suff_statistics.S + prior.ΞΊ*prior.m*prior.m'-ΞΊ*m*m'
# return niw_hyperparams(ΞΊ,m,Ξ½,Ο)
#end
function sample_distribution(hyperparams::niw_stable_hyperparams)
Ξ£ = rand(Distributions.InverseWishart(hyperparams.Ξ½, hyperparams.Ξ½* hyperparams.Ο))
ΞΌ = rand(Distributions.MvNormal(hyperparams.m, Ξ£/hyperparams.ΞΊ))
Ξ£ = Matrix{Float64}(I, length(ΞΌ), length(ΞΌ)) * 1.0
return mv_gaussian(ΞΌ,Ξ£,inv(Ξ£),logdet(Ξ£))
end
function create_sufficient_statistics(hyper::niw_stable_hyperparams,posterior::niw_stable_hyperparams,points::AbstractArray{Float64,2}, pts_to_group = 0)
if size(points,2) == 0
return niw_stable_sufficient_statistics(size(points,2),zeros(length(hyper.m)),zeros(length(hyper.m),length(hyper.m)))
end
pts = Array(points)
points_sum = @view sum(pts, dims = 2)[:]
S = pts * pts'
return niw_stable_sufficient_statistics(size(points,2),points_sum,S)
end
function log_marginal_likelihood(hyper::niw_stable_hyperparams, posterior_hyper::niw_stable_hyperparams, suff_stats::niw_stable_sufficient_statistics)
D = length(suff_stats.points_sum)
logpi = log(pi)
return -suff_stats.N*D/2*logpi +
log_multivariate_gamma(posterior_hyper.Ξ½/2, D)-
log_multivariate_gamma(hyper.Ξ½/2, D) +
(hyper.Ξ½/2)*logdet(hyper.Ο*hyper.Ξ½)-
(posterior_hyper.Ξ½/2)*logdet(posterior_hyper.Ο*posterior_hyper.Ξ½) +
(D/2)*(log(hyper.ΞΊ)-(D/2)*log(posterior_hyper.ΞΊ))
end
function aggregate_suff_stats(suff_l::niw_stable_sufficient_statistics, suff_r::niw_stable_sufficient_statistics)
return niw_stable_sufficient_statistics(suff_l.N+suff_r.N, suff_l.points_sum + suff_r.points_sum, suff_l.S+suff_r.S)
end
| VersatileHDPMixtureModels | https://github.com/BGU-CS-VIL/VersatileHDPMixtureModels.jl.git |
|
[
"MIT"
] | 0.1.1 | 1eee6b029f0312479bad622c48c5902458eb3888 | code | 1746 | struct topic_modeling_hyper <: distribution_hyper_params
Ξ±::AbstractArray{Float64,1}
end
mutable struct topic_modeling_sufficient_statistics <: sufficient_statistics
N::Float64
points_sum::AbstractArray{Int64,1}
end
function calc_posterior(prior:: topic_modeling_hyper, suff_statistics::topic_modeling_sufficient_statistics)
if suff_statistics.N == 0
return prior
end
return topic_modeling_hyper(prior.Ξ± + suff_statistics.points_sum)
end
function sample_distribution(hyperparams::topic_modeling_hyper)
cat_dist = rand(Dirichlet(hyperparams.Ξ±))
return topic_modeling_dist(log.(cat_dist))
end
function create_sufficient_statistics(hyper::topic_modeling_hyper,posterior::topic_modeling_hyper,points::AbstractArray{Float64,2}, pts_to_group = 0)
if length(points) == 0
return topic_modeling_sufficient_statistics(size(points,2),zeros(Int64,length(hyper.Ξ±)))
end
pt_count = counts(Int.(points),length(hyper.Ξ±))
return topic_modeling_sufficient_statistics(size(points,2),pt_count)
end
function log_marginal_likelihood(hyper::topic_modeling_hyper, posterior_hyper::topic_modeling_hyper, suff_stats::topic_modeling_sufficient_statistics)
D = length(suff_stats.points_sum)
N = Int(ceil(sum(posterior_hyper.Ξ± - hyper.Ξ±)))
logpi = log(pi)
val = sum((x-> logabsgamma(x)[1]).(posterior_hyper.Ξ±) - suff_stats.points_sum .* (x-> logabsgamma(x)[1]).(hyper.Ξ±)) + logabsgamma(sum(hyper.Ξ±))[1] -logabsgamma(N + sum(hyper.Ξ±))[1]
return val
end
function aggregate_suff_stats(suff_l::topic_modeling_sufficient_statistics, suff_r::topic_modeling_sufficient_statistics)
return topic_modeling_sufficient_statistics(suff_l.N+suff_r.N, suff_l.points_sum + suff_r.points_sum)
end
| VersatileHDPMixtureModels | https://github.com/BGU-CS-VIL/VersatileHDPMixtureModels.jl.git |
|
[
"MIT"
] | 0.1.1 | 1eee6b029f0312479bad622c48c5902458eb3888 | docs | 3537 | # VersatileHDPMixtureModels.jl
This package is the code for our UAI '20 paper titled "Scalable and Flexible Clustering of Grouped Data via Parallel and Distributed Sampling in Versatile Hierarchical Dirichlet Processes". <br>
[Paper](https://www.cs.bgu.ac.il/~orenfr/papers/Dinari_UAI_2020.pdf),
[Supplemental Material](https://www.cs.bgu.ac.il/~orenfr/papers/Dinari_UAI_2020_supmat.pdf) <br>
### What can it do?
This package allows to perform inference in the *vHDPMM* setting, as described in the paper, or as an alternative, it can perform inference in *HDPMM* setting.
### Quick Start
1. Get Julia from [here](https://julialang.org/), any version above 1.1.0 should work, install, and run it.
2. Add the package `]add VersatileHDPMixtureModels`.
3. Add some processes and use the package:
```
using Distributed
addprocs(2)
@everywhere using VersatileHDPMixtureModels
```
4. Now you can start using it!
* For the HDP Version:
```
# Sample some data from a CRF PRIOR:
# We sample 3D data, 4 Groups, with $\alpha=10,\gamma=1$. and variance of 100 between the components means.
crf_prior = hdp_prior_crf_draws(100,3,10,1)
pts,labels = generate_grouped_gaussian_from_hdp_group_counts(crf_prior[2],3,100.0)
#Create the priors we opt to use:
#As we want HDP, we set the local prior dimension to 0, and the global prior dimension to 3
gprior, lprior = create_default_priors(3,0,:niw)
#Run the model:
model = hdp_fit(pts,10,1,gprior,100)
#Get results:
model_results = get_model_global_pred(model[1]) # Get global components assignments
##
```
* Running the vHDP full setting:
```
#Generate some data:
#We generate gaussian data, 20K pts each group, Global Dim= 2, Local Dim = 1, 3 Global components, 5 Local in each group, 10 groups:
pts,labels = generate_grouped_gaussian_data(20000, 2, 1, 3, 5, 10, false, 25.0, false)
#Create Priors:
g_prior, l_prior = create_default_priors(2,1,:niw)
#Run the model:
vhdpmm_results = vhdp_fit(pts,2,100.0,1000.0,100.0,g_prior,l_prior,50)
#Get global and local assignments for the points:
vhdpmm_global = Dict([i=> create_global_labels(vhdpmm_results[1].groups_dict[i]) for i=1:length(data)])
vhdpmm_local = Dict([i=> vhdpmm_results[1].groups_dict[i].labels for i=1:length(data)])
```
### Examples:
[Coseg with super pixels](https://nbviewer.jupyter.org/github/BGU-CS-VIL/VersatileHDPMixtureModels.jl/blob/master/examples/Coseg.ipynb) <br>
[vHDP as HDP](https://nbviewer.jupyter.org/github/BGU-CS-VIL/VersatileHDPMixtureModels.jl/blob/master/examples/vHDPasHDPGMM.ipynb) <br>
[Missing data experiment](https://nbviewer.jupyter.org/github/BGU-CS-VIL/VersatileHDPMixtureModels.jl/blob/master/examples/MissingData.ipynb) <br>
[Synthethic data experiemnt](https://nbviewer.jupyter.org/github/BGU-CS-VIL/VersatileHDPMixtureModels.jl/blob/master/examples/SynthethicData.ipynb)
### License
This software is released under the MIT License (included with the software). Note, however, that if you are using this code (and/or the results of running it) to support any form of publication (e.g., a book, a journal paper, a conference paper, a patent application, etc.) then we request you will cite our paper:
```
@inproceedings{dinari2020vhdp,
title={Scalable and Flexible Clustering of Grouped Data via Parallel and Distributed Sampling in Versatile Hierarchical {D}irichlet Processes},
author={{Dinari, Or and Freifeld, Oren},
booktitle={UAI},
year={2020}
}
```
### Misc
For any questions: dinari at post.bgu.ac.il
Contributions, feature requests, suggestion etc.. are welcomed.
| VersatileHDPMixtureModels | https://github.com/BGU-CS-VIL/VersatileHDPMixtureModels.jl.git |
|
[
"MIT"
] | 1.4.2 | fc0abb338eb8d90bc186ccf0a47c90825952c950 | code | 553 | using CFITSIO
using Documenter
DocMeta.setdocmeta!(CFITSIO, :DocTestSetup, :(using CFITSIO); recursive=true)
include("pages.jl")
makedocs(;
modules=[CFITSIO],
authors="JuliaAstro",
repo="https://github.com/JuliaAstro/CFITSIO.jl/blob/{commit}{path}#L{line}",
sitename="CFITSIO.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://juliaastro.github.io/CFITSIO.jl",
assets=String[],
),
pages=pages
)
deploydocs(;
repo="github.com/JuliaAstro/CFITSIO.jl",
)
| CFITSIO | https://github.com/JuliaAstro/CFITSIO.jl.git |
|
[
"MIT"
] | 1.4.2 | fc0abb338eb8d90bc186ccf0a47c90825952c950 | code | 60 | pages=[
"Home" => "index.md",
]
requiredmods = Symbol[]
| CFITSIO | https://github.com/JuliaAstro/CFITSIO.jl.git |
|
[
"MIT"
] | 1.4.2 | fc0abb338eb8d90bc186ccf0a47c90825952c950 | code | 68537 | module CFITSIO
using CFITSIO_jll
export FITSFile,
FITSMemoryHandle,
fits_assert_open,
fits_clobber_file,
fits_close_file,
fits_copy_image_section,
fits_create_ascii_tbl,
fits_create_binary_tbl,
fits_create_diskfile,
fits_create_file,
fits_create_img,
fits_delete_file,
fits_delete_key,
fits_delete_record,
fits_delete_rows,
fits_file_mode,
fits_file_name,
fits_get_hdrspace,
fits_get_hdu_num,
fits_get_hdu_type,
fits_delete_hdu,
fits_get_img_dim,
fits_get_img_equivtype,
fits_get_img_size,
fits_get_img_type,
fits_get_num_cols,
fits_get_num_hdus,
fits_get_num_rows,
fits_get_rowsize,
fits_get_colnum,
fits_get_coltype,
fits_get_eqcoltype,
fits_get_version,
fits_read_tdim,
fits_hdr2str,
fits_insert_img,
fits_insert_rows,
fits_movabs_hdu,
fits_movrel_hdu,
fits_movnam_hdu,
fits_open_data,
fits_open_diskfile,
fits_open_file,
fits_open_image,
fits_open_table,
fits_open_memfile,
fits_read_col,
fits_read_descript,
fits_read_keyn,
fits_read_key_str,
fits_read_key_lng,
fits_read_keys_lng,
fits_read_keyword,
fits_read_pix,
fits_read_pixnull,
fits_read_record,
fits_read_subset,
fits_resize_img,
fits_update_key,
fits_write_col,
fits_write_date,
fits_write_comment,
fits_write_history,
fits_write_key,
fits_write_pix,
fits_write_pixnull,
fits_write_subset,
fits_write_null_img,
fits_write_record,
fits_write_tdim,
libcfitsio_version,
cfitsio_typecode,
bitpix_from_type,
type_from_bitpix
@enum FileMode R = 0 RW = 1
"""
cfitsio_typecode(::Type) -> Cint
Return the CFITSIO type code for the given Julia type
"""
cfitsio_typecode
"""
bitpix_from_type(::Type) -> Cint
Return the FITS BITPIX code for the given Julia type
"""
bitpix_from_type
"""
type_from_bitpix(::Integer) -> Type
Return the Julia type from the FITS BITPIX code
"""
type_from_bitpix
for (T, code) in (
(UInt8, 11),
(Int8, 12),
(Bool, 14),
(String, 16),
(Cushort, 20),
(Cshort, 21),
(Cuint, 30),
(Cint, 31),
(UInt64, 80),
(Int64, 81),
(Float32, 42),
(Float64, 82),
(ComplexF32, 83),
(ComplexF64, 163),
)
@eval cfitsio_typecode(::Type{$T}) = Cint($code)
end
for (T, code) in ((UInt8, 8), # BYTE_IMG
(Int16, 16), # SHORT_IMG
(Int32, 32), # LONG_IMG
(Int64, 64), # LONGLONG_IMG
(Float32, -32), # FLOAT_IMG
(Float64, -64), # DOUBLE_IMG
(Int8, 10), # SBYTE_IMG
(UInt16, 20), # USHORT_IMG
(UInt32, 40), # ULONG_IMG
(UInt64, 80)) # ULONGLONG_IMG
local value = Cint(code)
@eval begin
bitpix_from_type(::Type{$T}) = $value
type_from_bitpix(::Val{$value}) = $T
end
end
type_from_bitpix(code::Integer) = type_from_bitpix(Val(Cint(code)))
# Above, we don't define a method for Clong because it is either Cint (Int32)
# or Int64 depending on the platform, and those methods are already defined.
# Culong is either UInt64 or Cuint depending on platform.
# -----------------------------------------------------------------------------
# FITSFile type
mutable struct FITSFile
ptr::Ptr{Cvoid}
FITSFile(ptr::Ptr{Cvoid}) = finalizer(fits_close_file, new(ptr))
end
# FITS wants to be able to update the ptr, so keep them
# in a mutable struct
mutable struct FITSMemoryHandle
ptr::Ptr{Cvoid}
size::Csize_t
end
FITSMemoryHandle() = FITSMemoryHandle(C_NULL, 0)
# -----------------------------------------------------------------------------
# error messaging
function fits_assert_open(f::FITSFile)
if f.ptr == C_NULL
error("attempt to access a FITS file that has been closed previously")
end
end
function fits_assert_nonempty(f::FITSFile)
if fits_get_num_hdus(f) == 0
error("No HDU found in FITS file")
end
end
struct CFITSIOError{T} <: Exception
filename :: T
errcode :: Cint
errmsgshort :: String
errmsgfull :: String
end
function Base.showerror(io::IO, c::CFITSIOError)
print(io, "CFITSIO has encountered an error")
if c.filename !== nothing
print(io, " while processing ", c.filename)
end
println(io, ". Error code ", c.errcode, ": ", c.errmsgshort)
if !isempty(c.errmsgfull)
println(io, "Detailed error message follows: ")
print(io, c.errmsgfull)
end
end
function fits_get_errstatus(status::Cint)
msg = Vector{UInt8}(undef, 31)
ccall((:ffgerr, libcfitsio), Cvoid, (Cint, Ptr{UInt8}), status, msg)
unsafe_string(pointer(msg))
end
function fits_read_errmsg()
msg = Vector{UInt8}(undef, 80)
msgstr = ""
ccall((:ffgmsg, libcfitsio), Cvoid, (Ptr{UInt8},), msg)
msgstr = unsafe_string(pointer(msg))
errstr = msgstr
while msgstr != ""
ccall((:ffgmsg, libcfitsio), Cvoid, (Ptr{UInt8},), msg)
msgstr = unsafe_string(pointer(msg))
errstr *= '\n' * msgstr
end
return errstr
end
function fits_assert_ok(status::Cint, filename = nothing)
if status != 0
err = CFITSIOError(filename,
status,
fits_get_errstatus(status),
fits_read_errmsg(),
)
throw(err)
end
end
fits_assert_isascii(str::String) =
!isascii(str) && error("FITS file format accepts ASCII strings only")
fits_get_version() = ccall((:ffvers, libcfitsio), Cfloat, (Ref{Cfloat},), 0.0)
# -----------------------------------------------------------------------------
# Utility function
zerost(::Type{T}, n) where {T} = ntuple(_ -> zero(T), n)
onest(::Type{T}, n) where {T} = ntuple(_ -> one(T), n)
# -----------------------------------------------------------------------------
# file access & info functions
"""
fits_create_file(filename::AbstractString)
Create and open a new empty output `FITSFile`. This methods uses the
[extended file name syntax](https://heasarc.gsfc.nasa.gov/docs/software/fitsio/c/c_user/node83.html)
to create the file.
See also [`fits_create_diskfile`](@ref) which does not use the extended filename parser.
"""
function fits_create_file(filename::AbstractString)
ptr = Ref{Ptr{Cvoid}}()
status = Ref{Cint}(0)
ccall(
(:ffinit, libcfitsio),
Cint,
(Ref{Ptr{Cvoid}}, Ptr{UInt8}, Ref{Cint}),
ptr,
filename,
status,
)
fits_assert_ok(status[], filename)
FITSFile(ptr[])
end
"""
fits_create_diskfile(filename::AbstractString)
Create and open a new empty output `FITSFile`. Unlike [`fits_create_file`](@ref), this function does
not use an extended filename parser and treats the string as is as the filename.
"""
function fits_create_diskfile(filename::AbstractString)
ptr = Ref{Ptr{Cvoid}}()
status = Ref{Cint}(0)
ccall(
(:ffdkinit, libcfitsio),
Cint,
(Ref{Ptr{Cvoid}}, Ptr{UInt8}, Ref{Cint}),
ptr,
filename,
status,
)
fits_assert_ok(status[], filename)
FITSFile(ptr[])
end
"""
fits_clobber_file(filename::AbstractString)
Like [`fits_create_file`](@ref), but overwrites `filename` if it exists.
"""
fits_clobber_file(filename::AbstractString) = fits_create_file("!" * filename)
"""
fits_open_data(filename::String, [mode = 0])
Open an existing data file (like [`fits_open_file`](@ref)) and move to the first HDU
containing either an image or a table.
## Modes:
* 0 : Read only (equivalently denoted by `CFITSIO.R`)
* 1 : Read-write (equivalently denoted by `CFITSIO.RW`)
"""
fits_open_data
"""
fits_open_file(filename::String, [mode = 0])
Open an existing data file.
## Modes:
* 0 : Read only (equivalently denoted by `CFITSIO.R`)
* 1 : Read-write (equivalently denoted by `CFITSIO.RW`)
This function uses the extended filename syntax to open the file. See also [`fits_open_diskfile`](@ref)
that does not use the extended filename parser and uses `filename` as is as the name of the file.
"""
fits_open_file
"""
fits_open_diskfile(filename::String, [mode = 0])
Open an existing data file.
## Modes:
* 0 : Read only (equivalently denoted by `CFITSIO.R`)
* 1 : Read-write (equivalently denoted by `CFITSIO.RW`)
This function does not use the extended filename parser, and uses `filename` as is as the name
of the file that is to be opened. See also [`fits_open_file`](@ref) which uses the extended filename syntax.
"""
fits_open_diskfile
"""
fits_open_image(filename::String, [mode = 0])
Open an existing data file (like [`fits_open_file`](@ref)) and move to the first
HDU containing an image.
## Modes:
* 0 : Read only (equivalently denoted by `CFITSIO.R`)
* 1 : Read-write (equivalently denoted by `CFITSIO.RW`)
"""
fits_open_image
"""
fits_open_table(filename::String, [mode = 0])
Open an existing data file (like [`fits_open_file`](@ref)) and move to the first
HDU containing either an ASCII or a binary table.
## Modes:
* 0 : Read only (equivalently denoted by `CFITSIO.R`)
* 1 : Read-write (equivalently denoted by `CFITSIO.RW`)
"""
fits_open_table
for (a, b) in (
(:fits_open_data, "ffdopn"),
(:fits_open_file, "ffopen"),
(:fits_open_image, "ffiopn"),
(:fits_open_table, "fftopn"),
(:fits_open_diskfile, "ffdkopn"),
)
@eval begin
function ($a)(filename::AbstractString, mode = 0)
ptr = Ref{Ptr{Cvoid}}()
status = Ref{Cint}(0)
ccall(
($b, libcfitsio),
Cint,
(Ref{Ptr{Cvoid}}, Ptr{UInt8}, Cint, Ref{Cint}),
ptr,
filename,
mode,
status,
)
fits_assert_ok(status[], filename)
FITSFile(ptr[])
end
end
end
# filename is ignored by the C library
function fits_open_memfile(data::Vector{UInt8}, mode = 0, filename = "")
# Only reading is supported right now
@assert Int(mode) == 0 "only reading is supported currently, so mode must be 0 or CFITSIO.R. Received mode = $mode"
ptr = Ref{Ptr{Cvoid}}(C_NULL)
status = Ref{Cint}(0)
handle = FITSMemoryHandle(pointer(data), length(data))
dataptr = Ptr{Ptr{Cvoid}}(pointer_from_objref(handle))
sizeptr = Ptr{Csize_t}(dataptr + sizeof(Ptr{Cvoid}))
ccall(
("ffomem", libcfitsio),
Cint,
(
Ptr{Ptr{Cvoid}},
Ptr{UInt8},
Cint,
Ptr{Ptr{UInt8}},
Ptr{Csize_t},
Csize_t,
Ptr{Cvoid},
Ptr{Cint},
),
ptr,
filename,
mode,
dataptr,
sizeptr,
2880,
C_NULL,
status,
)
fits_assert_ok(status[])
FITSFile(ptr[]), handle
end
"""
fits_close_file(f::FITSFile)
Close a previously opened FITS file.
"""
fits_close_file
"""
fits_delete_file(f::FITSFile)
Close an opened FITS file (like [`fits_close_file`](@ref)) and removes it
from the disk.
"""
fits_delete_file
for (a, b) in ((:fits_close_file, "ffclos"), (:fits_delete_file, "ffdelt"))
@eval begin
function ($a)(f::FITSFile)
# fits_close_file() is called during garbage collection, but file
# may already be closed by user, so we need to check if it is open.
if (ptr = f.ptr) != C_NULL
f.ptr = C_NULL # avoid closing twice even if an error occurs
status = Ref{Cint}(0)
ccall(($b, libcfitsio), Cint, (Ptr{Cvoid}, Ref{Cint}), ptr, status)
fits_assert_ok(status[])
end
end
end
end
Base.close(f::FITSFile) = fits_close_file(f)
"""
fits_file_name(f::FITSFile)
Return the name of the file associated with object `f`.
"""
function fits_file_name(f::FITSFile)
fits_assert_open(f)
value = Vector{UInt8}(undef, 1025)
status = Ref{Cint}(0)
ccall(
(:ffflnm, libcfitsio),
Cint,
(Ptr{Cvoid}, Ptr{UInt8}, Ref{Cint}),
f.ptr,
value,
status,
)
fits_assert_ok(status[])
unsafe_string(pointer(value))
end
"""
fits_file_mode(f::FITSFile)
Return the I/O mode of the FITS file, where 0 indicates a read-only mode and 1 indicates a read-write mode.
"""
function fits_file_mode(f::FITSFile)
fits_assert_open(f)
result = Ref{Cint}(0)
status = Ref{Cint}(0)
ccall(
("ffflmd", libcfitsio),
Cint,
(Ptr{Cvoid}, Ref{Cint}, Ref{Cint}),
f.ptr,
result,
status,
)
fits_assert_ok(status[])
result[]
end
# -----------------------------------------------------------------------------
# header access functions
"""
fits_get_hdrspace(f::FITSFile) -> (keysexist, morekeys)
Return the number of existing keywords (not counting the END keyword)
and the amount of space currently available for more keywords.
"""
function fits_get_hdrspace(f::FITSFile)
fits_assert_open(f)
keysexist = Ref{Cint}(0)
morekeys = Ref{Cint}(0)
status = Ref{Cint}(0)
ccall(
(:ffghsp, libcfitsio),
Cint,
(Ptr{Cvoid}, Ref{Cint}, Ref{Cint}, Ref{Cint}),
f.ptr,
keysexist,
morekeys,
status,
)
fits_assert_ok(status[])
(keysexist[], morekeys[])
end
function fits_read_key_str(f::FITSFile, keyname::String)
fits_assert_open(f)
value = Vector{UInt8}(undef, 71)
comment = Vector{UInt8}(undef, 71)
status = Ref{Cint}(0)
ccall(
(:ffgkys, libcfitsio),
Cint,
(Ptr{Cvoid}, Ptr{UInt8}, Ptr{UInt8}, Ptr{UInt8}, Ref{Cint}),
f.ptr,
keyname,
value,
comment,
status,
)
fits_assert_ok(status[])
unsafe_string(pointer(value)), unsafe_string(pointer(comment))
end
function fits_read_key_lng(f::FITSFile, keyname::String)
fits_assert_open(f)
value = Ref{Clong}(0)
comment = Vector{UInt8}(undef, 71)
status = Ref{Cint}(0)
ccall(
(:ffgkyj, libcfitsio),
Cint,
(Ptr{Cvoid}, Ptr{UInt8}, Ref{Clong}, Ptr{UInt8}, Ref{Cint}),
f.ptr,
keyname,
value,
comment,
status,
)
fits_assert_ok(status[])
value[], unsafe_string(pointer(comment))
end
function fits_read_keys_lng(f::FITSFile, keyname::String, nstart::Integer, nmax::Integer)
fits_assert_open(f)
value = Vector{Clong}(undef, nmax - nstart + 1)
nfound = Ref{Cint}(0)
status = Ref{Cint}(0)
ccall(
(:ffgknj, libcfitsio),
Cint,
(Ptr{Cvoid}, Ptr{UInt8}, Cint, Cint, Ptr{Clong}, Ref{Cint}, Ref{Cint}),
f.ptr,
keyname,
nstart,
nmax,
value,
nfound,
status,
)
fits_assert_ok(status[])
value, nfound[]
end
"""
fits_read_keyword(f::FITSFile, keyname::String) -> (value, comment)
yields the specified keyword value and commend (as a tuple of strings),
throws and error if the keyword is not found.
"""
function fits_read_keyword(f::FITSFile, keyname::String)
fits_assert_open(f)
value = Vector{UInt8}(undef, 71)
comment = Vector{UInt8}(undef, 71)
status = Ref{Cint}(0)
ccall(
(:ffgkey, libcfitsio),
Cint,
(Ptr{Cvoid}, Ptr{UInt8}, Ptr{UInt8}, Ptr{UInt8}, Ref{Cint}),
f.ptr,
keyname,
value,
comment,
status,
)
fits_assert_ok(status[])
unsafe_string(pointer(value)), unsafe_string(pointer(comment))
end
"""
fits_read_record(f::FITSFile, keynum::Int) -> String
Return the nth header record in the CHU. The first keyword in the
header is at `keynum = 1`.
"""
function fits_read_record(f::FITSFile, keynum::Integer)
fits_assert_open(f)
card = Vector{UInt8}(undef, 81)
status = Ref{Cint}(0)
ccall(
(:ffgrec, libcfitsio),
Cint,
(Ptr{Cvoid}, Cint, Ptr{UInt8}, Ref{Cint}),
f.ptr,
keynum,
card,
status,
)
fits_assert_ok(status[])
unsafe_string(pointer(card))
end
"""
fits_read_keyn(f::FITSFile, keynum::Int) -> (name, value, comment)
Return the nth header record in the CHU. The first keyword in the header is at `keynum = 1`.
"""
function fits_read_keyn(f::FITSFile, keynum::Integer)
fits_assert_open(f)
keyname = Vector{UInt8}(undef, 9)
value = Vector{UInt8}(undef, 71)
comment = Vector{UInt8}(undef, 71)
status = Ref{Cint}(0)
ccall(
(:ffgkyn, libcfitsio),
Cint,
(Ptr{Cvoid}, Cint, Ptr{UInt8}, Ptr{UInt8}, Ptr{UInt8}, Ref{Cint}),
f.ptr,
keynum,
keyname,
value,
comment,
status,
)
fits_assert_ok(status[])
(
unsafe_string(pointer(keyname)),
unsafe_string(pointer(value)),
unsafe_string(pointer(comment)),
)
end
"""
fits_write_key(f::FITSFile, keyname::String, value, comment::String)
Write a keyword of the appropriate data type into the CHU.
"""
function fits_write_key(
f::FITSFile,
keyname::String,
value::Union{Real,String},
comment::String,
)
fits_assert_open(f)
fits_assert_isascii(keyname)
fits_assert_isascii(comment)
cvalue = isa(value, String) ? value :
isa(value, Bool) ? Cint[value] : reinterpret(UInt8, [value])
status = Ref{Cint}(0)
ccall(
(:ffpky, libcfitsio),
Cint,
(Ptr{Cvoid}, Cint, Ptr{UInt8}, Ptr{UInt8}, Ptr{UInt8}, Ref{Cint}),
f.ptr,
cfitsio_typecode(typeof(value)),
keyname,
cvalue,
comment,
status,
)
fits_assert_ok(status[])
end
function fits_write_date(f::FITSFile)
fits_assert_open(f)
status = Ref{Cint}(0)
ccall((:ffpdat, libcfitsio), Cint, (Ptr{Cvoid}, Ref{Cint}), f.ptr, status)
fits_assert_ok(status[])
end
function fits_write_comment(f::FITSFile, comment::String)
fits_assert_open(f)
fits_assert_isascii(comment)
status = Ref{Cint}(0)
ccall(
(:ffpcom, libcfitsio),
Cint,
(Ptr{Cvoid}, Ptr{UInt8}, Ref{Cint}),
f.ptr,
comment,
status,
)
fits_assert_ok(status[])
end
function fits_write_history(f::FITSFile, history::String)
fits_assert_open(f)
fits_assert_isascii(history)
status = Ref{Cint}(0)
ccall(
(:ffphis, libcfitsio),
Cint,
(Ptr{Cvoid}, Ptr{UInt8}, Ref{Cint}),
f.ptr,
history,
status,
)
fits_assert_ok(status[])
end
# update key: if already present, update it, otherwise add it.
for (a, T, S) in (
("ffukys", :String, :(Ptr{UInt8})),
("ffukyl", :Bool, :Cint),
("ffukyj", :Integer, :Int64),
)
@eval begin
function fits_update_key(
f::FITSFile,
key::String,
value::$T,
comment::Union{String,Ptr{Cvoid}} = C_NULL,
)
fits_assert_open(f)
isa(value, String) && fits_assert_isascii(value)
isa(comment, String) && fits_assert_isascii(comment)
status = Ref{Cint}(0)
ccall(
($a, libcfitsio),
Cint,
(Ptr{Cvoid}, Ptr{UInt8}, $S, Ptr{UInt8}, Ref{Cint}),
f.ptr,
key,
value,
comment,
status,
)
fits_assert_ok(status[])
end
end
end
function fits_update_key(
f::FITSFile,
key::String,
value::AbstractFloat,
comment::Union{String,Ptr{Cvoid}} = C_NULL,
)
fits_assert_open(f)
isa(comment, String) && fits_assert_isascii(comment)
status = Ref{Cint}(0)
ccall(
("ffukyd", libcfitsio),
Cint,
(Ptr{Cvoid}, Ptr{UInt8}, Cdouble, Cint, Ptr{UInt8}, Ref{Cint}),
f.ptr,
key,
value,
-15,
comment,
status,
)
fits_assert_ok(status[])
end
function fits_update_key(
f::FITSFile,
key::String,
value::Nothing,
comment::Union{String,Ptr{Cvoid}} = C_NULL,
)
fits_assert_open(f)
isa(comment, String) && fits_assert_isascii(comment)
status = Ref{Cint}(0)
ccall(
("ffukyu", libcfitsio),
Cint,
(Ptr{Cvoid}, Ptr{UInt8}, Ptr{UInt8}, Ref{Cint}),
f.ptr,
key,
comment,
status,
)
fits_assert_ok(status[])
end
"""
fits_write_record(f::FITSFile, card::String)
Write a user specified keyword record into the CHU.
"""
function fits_write_record(f::FITSFile, card::String)
fits_assert_open(f)
fits_assert_isascii(card)
status = Ref{Cint}(0)
ccall(
(:ffprec, libcfitsio),
Cint,
(Ptr{Cvoid}, Ptr{UInt8}, Ref{Cint}),
f.ptr,
card,
status,
)
fits_assert_ok(status[])
end
"""
fits_delete_record(f::FITSFile, keynum::Int)
Delete the keyword record at the specified index.
"""
function fits_delete_record(f::FITSFile, keynum::Integer)
fits_assert_open(f)
status = Ref{Cint}(0)
ccall((:ffdrec, libcfitsio), Cint, (Ptr{Cvoid}, Cint, Ref{Cint}), f.ptr, keynum, status)
fits_assert_ok(status[])
end
"""
fits_delete_key(f::FITSFile, keyname::String)
Delete the keyword named `keyname`.
"""
function fits_delete_key(f::FITSFile, keyname::String)
fits_assert_open(f)
status = Ref{Cint}(0)
ccall(
(:ffdkey, libcfitsio),
Cint,
(Ptr{Cvoid}, Ptr{UInt8}, Ref{Cint}),
f.ptr,
keyname,
status,
)
fits_assert_ok(status[])
end
"""
fits_hdr2str(f::FITSFile, nocomments::Bool=false)
Return the header of the CHDU as a string. If `nocomments` is `true`, comment
cards are stripped from the output.
"""
function fits_hdr2str(f::FITSFile, nocomments::Bool = false)
fits_assert_open(f)
status = Ref{Cint}(0)
header = Ref{Ptr{UInt8}}()
nkeys = Ref{Cint}(0)
ccall(
(:ffhdr2str, libcfitsio),
Cint,
(Ptr{Cvoid}, Cint, Ref{Ptr{UInt8}}, Cint, Ptr{Ptr{UInt8}}, Ref{Cint}, Ref{Cint}),
f.ptr,
nocomments,
C_NULL,
0,
header,
nkeys,
status,
)
result = unsafe_string(header[])
# free header pointer allocated by cfitsio (result is a copy)
ccall((:fffree, libcfitsio), Ref{Cint}, (Ptr{UInt8}, Ref{Cint}), header[], status)
fits_assert_ok(status[])
result
end
# -----------------------------------------------------------------------------
# HDU info functions and moving the current HDU
function hdu_int_to_type(hdu_type_int)
if hdu_type_int == 0
return :image_hdu
elseif hdu_type_int == 1
return :ascii_table
elseif hdu_type_int == 2
return :binary_table
end
:unknown
end
"""
fits_movabs_hdu(f::FITSFile, hduNum::Integer)
Change the current HDU to the value specified by `hduNum`, and return a symbol
describing the type of the HDU.
Possible symbols are: `image_hdu`, `ascii_table`, or `binary_table`.
The value of `hduNum` must range between 1 and the value returned by
[`fits_get_num_hdus`](@ref).
"""
fits_movabs_hdu
"""
fits_movrel_hdu(f::FITSFile, hduNum::Integer)
Change the current HDU by moving forward or backward by `hduNum` HDUs
(positive means forward), and return the same as [`fits_movabs_hdu`](@ref).
"""
fits_movrel_hdu
for (a, b) in ((:fits_movabs_hdu, "ffmahd"), (:fits_movrel_hdu, "ffmrhd"))
@eval begin
function ($a)(f::FITSFile, hduNum::Integer)
fits_assert_open(f)
hdu_type = Ref{Cint}(0)
status = Ref{Cint}(0)
ccall(
($b, libcfitsio),
Cint,
(Ptr{Cvoid}, Cint, Ref{Cint}, Ref{Cint}),
f.ptr,
hduNum,
hdu_type,
status,
)
fits_assert_ok(status[])
hdu_int_to_type(hdu_type[])
end
end
end
"""
fits_movnam_hdu(f::FITSFile, extname::String, extver::Integer=0,
hdu_type_int::Integer=-1)
Change the current HDU by moving to the (first) HDU which has the specified
extension type and EXTNAME and EXTVER keyword values (or HDUNAME and HDUVER keywords).
If `extver` is 0 (the default) then the EXTVER keyword is ignored and the first HDU
with a matching EXTNAME (or HDUNAME) keyword will be found. If `hdu_type_int`
is -1 (the default) only the extname and extver values will be used to locate the
correct extension. If no matching HDU is found in the file, the current HDU will
remain unchanged.
"""
function fits_movnam_hdu(
f::FITSFile,
extname::String,
extver::Integer = 0,
hdu_type::Integer = -1,
)
fits_assert_open(f)
status = Ref{Cint}(0)
ccall(
(:ffmnhd, libcfitsio),
Cint,
(Ptr{Cvoid}, Cint, Ptr{UInt8}, Cint, Ref{Cint}),
f.ptr,
hdu_type,
extname,
extver,
status,
)
fits_assert_ok(status[])
end
function fits_get_hdu_num(f::FITSFile)
fits_assert_open(f)
hdunum = Ref{Cint}(0)
ccall((:ffghdn, libcfitsio), Cint, (Ptr{Cvoid}, Ref{Cint}), f.ptr, hdunum)
hdunum[]
end
function fits_get_hdu_type(f::FITSFile)
fits_assert_open(f)
hdutype = Ref{Cint}(0)
status = Ref{Cint}(0)
ccall(
(:ffghdt, libcfitsio),
Cint,
(Ptr{Cvoid}, Ref{Cint}, Ref{Cint}),
f.ptr,
hdutype,
status,
)
fits_assert_ok(status[])
hdu_int_to_type(hdutype[])
end
"""
fits_delete_hdu(f::FITSFile)
Delete the HDU from the FITS file and shift the following HDUs forward. If `f` is the primary HDU in the file
then it'll be replaced by a null primary HDU with no data and minimal header information.
Return a symbol to indicate the type of the new current HDU.
Possible symbols are: `image_hdu`, `ascii_table`, or `binary_table`.
The value of `hduNum` must range between 1 and the value returned by
[`fits_get_num_hdus`](@ref).
"""
function fits_delete_hdu(f::FITSFile)
fits_assert_open(f)
status = Ref{Cint}(0)
hdutype = Ref{Cint}(0)
ccall(
(:ffdhdu, libcfitsio),
Cint,
(Ptr{Cvoid}, Ptr{Cvoid}, Ref{Cint}),
f.ptr,
hdutype,
status,
)
fits_assert_ok(status[])
hdu_int_to_type(hdutype[])
end
# -----------------------------------------------------------------------------
# image HDU functions
"""
fits_get_img_size(f::FITSFile)
Get the dimensions of the image.
"""
fits_get_img_size
for (a, b) in (
(:fits_get_img_type, "ffgidt"),
(:fits_get_img_equivtype, "ffgiet"),
(:fits_get_img_dim, "ffgidm"),
)
@eval function ($a)(f::FITSFile)
fits_assert_open(f)
result = Ref{Cint}(0)
status = Ref{Cint}(0)
ccall(
($b, libcfitsio),
Cint,
(Ptr{Cvoid}, Ref{Cint}, Ref{Cint}),
f.ptr,
result,
status,
)
fits_assert_ok(status[])
result[]
end
end
"""
fits_create_img(f::FITSFile, T::Type, naxes::Vector{<:Integer})
Create a new primary array or IMAGE extension with the specified data type `T` and size `naxes`.
"""
function fits_create_img(f::FITSFile, ::Type{T}, naxes::Vector{<:Integer}) where {T}
fits_assert_open(f)
status = Ref{Cint}(0)
ccall(
(:ffcrimll, libcfitsio),
Cint,
(Ptr{Cvoid}, Cint, Cint, Ptr{Int64}, Ref{Cint}),
f.ptr,
bitpix_from_type(T),
length(naxes),
convert(Vector{Int64}, naxes),
status,
)
fits_assert_ok(status[])
end
# This method accepts a tuple of pixels instead of a vector
function fits_create_img(f::FITSFile, ::Type{T}, naxes::NTuple{N,Integer}) where {T,N}
status = Ref{Cint}(0)
naxesr = Ref(convert(NTuple{N,Int64}, naxes))
ccall(
(:ffcrimll, libcfitsio),
Cint,
(Ptr{Cvoid}, Cint, Cint, Ptr{NTuple{N,Int64}}, Ref{Cint}),
f.ptr,
bitpix_from_type(T),
N,
naxesr,
status,
)
fits_assert_ok(status[])
end
"""
fits_create_img(f::FITSFile, A::AbstractArray)
Create a new primary array or IMAGE extension with the element type and size of `A`,
that is capable of storing the entire array `A`.
"""
fits_create_img(f::FITSFile, a::AbstractArray) = fits_create_img(f, eltype(a), size(a))
"""
fits_insert_img(f::FITSFile, T::Type, naxes::Union{Vector{<:Integer}, Tuple{Vararg{Integer}}})
Insert a new image extension immediately following the CHDU, or insert a new Primary Array
at the beginning of the file.
"""
function fits_insert_img(f::FITSFile, T::Type, naxes::Vector{<:Integer})
fits_assert_open(f)
status = Ref{Cint}(0)
ccall(
(:ffiimgll, libcfitsio),
Cint,
(
Ptr{Cvoid},
Cint,
Cint,
Ptr{Int64},
Ref{Cint},
),
f.ptr,
bitpix_from_type(T),
length(naxes),
convert(Vector{Int64}, naxes),
status,
)
fits_assert_ok(status[])
end
function fits_insert_img(f::FITSFile, T::Type, naxes::NTuple{N,Integer}) where {N}
fits_assert_open(f)
status = Ref{Cint}(0)
naxesr = Ref(map(Int64, naxes))
ccall(
(:ffiimgll, libcfitsio),
Cint,
(
Ptr{Cvoid},
Cint,
Cint,
Ptr{NTuple{N,Int64}},
Ref{Cint},
),
f.ptr,
bitpix_from_type(T),
N,
naxesr,
status,
)
fits_assert_ok(status[])
end
fits_insert_img(f::FITSFile, a::AbstractArray) = fits_insert_img(f, eltype(a), size(a))
"""
fits_write_pix(f::FITSFile, fpixel::Union{Vector{<:Integer}, Tuple{Vararg{Integer}}}, nelements::Integer, data::StridedArray)
Write `nelements` pixels from `data` into the FITS file starting from the pixel `fpixel`.
!!! note
`data` needs to be stored contiguously in memory.
See also: [`fits_write_pixnull`](@ref)
"""
function fits_write_pix(
f::FITSFile,
fpixel::Vector{<:Integer},
nelements::Integer,
data::StridedArray,
)
fits_assert_open(f)
status = Ref{Cint}(0)
ccall(
(:ffppxll, libcfitsio),
Cint,
(
Ptr{Cvoid},
Cint,
Ptr{Int64},
Int64,
Ptr{Cvoid},
Ref{Cint},
),
f.ptr,
cfitsio_typecode(eltype(data)),
convert(Vector{Int64}, fpixel),
nelements,
data,
status,
)
fits_assert_ok(status[])
end
# This method accepts a tuple of pixels instead of a vector
function fits_write_pix(
f::FITSFile,
fpixel::NTuple{N,Integer},
nelements::Integer,
data::StridedArray,
) where {N}
fits_assert_open(f)
status = Ref{Cint}(0)
fpixelr = Ref(convert(NTuple{N,Int64}, fpixel))
ccall(
(:ffppxll, libcfitsio),
Cint,
(Ptr{Cvoid}, Cint, Ptr{NTuple{N,Int64}}, Int64, Ptr{Cvoid}, Ref{Cint}),
f.ptr,
cfitsio_typecode(eltype(data)),
fpixelr,
nelements,
data,
status,
)
fits_assert_ok(status[])
end
"""
fits_write_pix(f::FITSFile, data::StridedArray)
Write the entire array `data` into the FITS file.
!!! note
`data` needs to be stored contiguously in memory.
See also: [`fits_write_pixnull`](@ref), [`fits_write_subset`](@ref)
"""
function fits_write_pix(f::FITSFile, data::StridedArray)
fits_write_pix(f, onest(Int64, ndims(data)), length(data), data)
end
# cfitsio expects the null value to be of the same type as the eltype of data
# It may also be C_NULL or nothing
# We check if it is a number and convert it to the correct eltype, otherwise leave it alone
_maybeconvert(::Type{ET}, nullval::Real) where {ET<:Real} = convert(ET, nullval)
_maybeconvert(::Type, nullval) = nullval
"""
fits_write_pixnull(f::FITSFile, fpixel::Union{Vector{<:Integer}, Tuple{Vararg{Integer}}}, nelements::Integer, data::StridedArray, nulval)
Write `nelements` pixels from `data` into the FITS file starting from the pixel `fpixel`.
The argument `nulval` specifies the values that are to be considered as "null values", and replaced
by appropriate numbers corresponding to the element type of `data`.
!!! note
`data` needs to be stored contiguously in memory.
See also: [`fits_write_pix`](@ref)
"""
function fits_write_pixnull(
f::FITSFile,
fpixel::Vector{<:Integer},
nelements::Integer,
data::StridedArray,
nulval,
)
fits_assert_open(f)
status = Ref{Cint}(0)
ccall(
(:ffppxnll, libcfitsio),
Cint,
(
Ptr{Cvoid},
Cint,
Ptr{Int64},
Int64,
Ptr{Cvoid},
Ptr{Cvoid},
Ref{Cint},
),
f.ptr,
cfitsio_typecode(eltype(data)),
convert(Vector{Int64}, fpixel),
nelements,
data,
Ref(_maybeconvert(eltype(data), nulval)),
status,
)
fits_assert_ok(status[])
end
function fits_write_pixnull(
f::FITSFile,
fpixel::NTuple{N,Integer},
nelements::Integer,
data::StridedArray,
nulval,
) where {N}
fits_assert_open(f)
status = Ref{Cint}(0)
fpixelr = Ref(convert(NTuple{N,Int64}, fpixel))
ccall(
(:ffppxnll, libcfitsio),
Cint,
(
Ptr{Cvoid},
Cint,
Ptr{NTuple{N,Int64}},
Int64,
Ptr{Cvoid},
Ptr{Cvoid},
Ref{Cint},
),
f.ptr,
cfitsio_typecode(eltype(data)),
fpixelr,
nelements,
data,
Ref(_maybeconvert(eltype(data), nulval)),
status,
)
fits_assert_ok(status[])
end
"""
fits_write_pixnull(f::FITSFile, data::StridedArray, nulval)
Write the entire array `data` into the FITS file.
The argument `nulval` specifies the values that are to be considered as "null values", and replaced
by appropriate numbers corresponding to the element type of `data`.
!!! note
`data` needs to be stored contiguously in memory.
See also: [`fits_write_pix`](@ref)
"""
function fits_write_pixnull(f::FITSFile, data::StridedArray, nulval)
fits_write_pixnull(f, onest(Int64, ndims(data)), length(data), data, nulval)
end
"""
fits_write_subset(f::FITSFile, fpixel::V, lpixel::V, data::StridedArray) where {V<:Union{Vector{<:Integer}, Tuple{Vararg{Integer}}}}
Write a rectangular section of the FITS image. The number of pixels to be written will be computed from the
first and last pixels (specified as the `fpixel` and `lpixel` arguments respectively).
!!! note
The section to be written out must be contiguous in memory, so all the dimensions aside from
the last one must span the entire axis range.
The arguments `fpixel` and `lpixel` must account for this.
See also: [`fits_write_pix`](@ref)
"""
function fits_write_subset(
f::FITSFile,
fpixel::Vector{<:Integer},
lpixel::Vector{<:Integer},
data::StridedArray,
)
fits_assert_open(f)
status = Ref{Cint}(0)
ccall(
(:ffpss, libcfitsio),
Cint,
(
Ptr{Cvoid},
Cint,
Ptr{Clong},
Ptr{Clong},
Ptr{Cvoid},
Ref{Cint},
),
f.ptr,
cfitsio_typecode(eltype(data)),
convert(Vector{Clong}, fpixel),
convert(Vector{Clong}, lpixel),
data,
status,
)
fits_assert_ok(status[])
end
function fits_write_subset(
f::FITSFile,
fpixel::NTuple{N,Integer},
lpixel::NTuple{N,Integer},
data::StridedArray,
) where {N}
fits_assert_open(f)
status = Ref{Cint}(0)
fpixelr, lpixelr = map((fpixel, lpixel)) do x
Ref(convert(NTuple{N,Clong}, x))
end
ccall(
(:ffpss, libcfitsio),
Cint,
(
Ptr{Cvoid},
Cint,
Ptr{NTuple{N,Clong}},
Ptr{NTuple{N,Clong}},
Ptr{Cvoid},
Ref{Cint},
),
f.ptr,
cfitsio_typecode(eltype(data)),
fpixelr,
lpixelr,
data,
status,
)
fits_assert_ok(status[])
end
function fits_read_pix(
f::FITSFile,
fpixel::Vector{<:Integer},
nelements::Integer,
nullval,
data::StridedArray,
)
fits_assert_open(f)
fits_assert_nonempty(f)
anynull = Ref{Cint}(0)
status = Ref{Cint}(0)
ccall(
(:ffgpxvll, libcfitsio),
Cint,
(
Ptr{Cvoid},
Cint,
Ptr{Int64},
Int64,
Ptr{Cvoid},
Ptr{Cvoid},
Ref{Cint},
Ref{Cint},
),
f.ptr,
cfitsio_typecode(eltype(data)),
convert(Vector{Int64}, fpixel),
nelements,
Ref(_maybeconvert(eltype(data), nullval)),
data,
anynull,
status,
)
fits_assert_ok(status[])
anynull[]
end
# This method accepts a tuple of pixels instead of a vector
function fits_read_pix(
f::FITSFile,
fpixel::NTuple{N,Integer},
nelements::Integer,
nullval,
data::StridedArray,
) where {N}
fits_assert_open(f)
fits_assert_nonempty(f)
anynull = Ref{Cint}(0)
status = Ref{Cint}(0)
fpixelr = Ref(convert(NTuple{N,Int64}, fpixel))
ccall(
(:ffgpxvll, libcfitsio),
Cint,
(
Ptr{Cvoid},
Cint,
Ptr{NTuple{N,Int64}},
Int64,
Ptr{Cvoid},
Ptr{Cvoid},
Ref{Cint},
Ref{Cint},
),
f.ptr,
cfitsio_typecode(eltype(data)),
fpixelr,
nelements,
Ref(_maybeconvert(eltype(data), nullval)),
data,
anynull,
status,
)
fits_assert_ok(status[])
anynull[]
end
"""
fits_read_pix(f::FITSFile, fpixel::NTuple{Vector{<:Integer}, Tuple{Vararg{Integer}}}, nelements::Integer, [nulval], data::StridedArray)
Read `nelements` pixels from the FITS file into `data` starting from the pixel `fpixel`.
If the optional argument `nulval` is specified and is non-zero, any null value present in the array will be
replaced by it.
!!! note
`data` needs to be stored contiguously in memory.
See also: [`fits_read_pixnull`](@ref), [`fits_read_subset`](@ref)
"""
function fits_read_pix(
f::FITSFile,
fpixel::Vector{<:Integer},
nelements::Integer,
data::StridedArray,
)
fits_assert_open(f)
fits_assert_nonempty(f)
anynull = Ref{Cint}(0)
status = Ref{Cint}(0)
ccall(
(:ffgpxvll, libcfitsio),
Cint,
(
Ptr{Cvoid},
Cint,
Ptr{Int64},
Int64,
Ptr{Cvoid},
Ptr{Cvoid},
Ref{Cint},
Ref{Cint},
),
f.ptr,
cfitsio_typecode(eltype(data)),
convert(Vector{Int64}, fpixel),
nelements,
C_NULL,
data,
anynull,
status,
)
fits_assert_ok(status[])
anynull[]
end
# This method accepts a tuple of pixels instead of a vector
function fits_read_pix(
f::FITSFile,
fpixel::NTuple{N,Integer},
nelements::Int,
data::StridedArray,
) where {N}
fits_assert_open(f)
fits_assert_nonempty(f)
anynull = Ref{Cint}(0)
status = Ref{Cint}(0)
fpixelr = Ref(convert(NTuple{N,Int64}, fpixel))
ccall(
(:ffgpxvll, libcfitsio),
Cint,
(Ptr{Cvoid}, Cint, Ptr{NTuple{N,Int64}}, Int64, Ptr{Cvoid}, Ptr{Cvoid}, Ref{Cint}, Ref{Cint}),
f.ptr,
cfitsio_typecode(eltype(data)),
fpixelr,
nelements,
C_NULL,
data,
anynull,
status,
)
fits_assert_ok(status[])
anynull[]
end
"""
fits_read_pix(f::FITSFile, data::StridedArray, [nulval])
Read `length(data)` pixels from the FITS file into `data` starting from the first pixel.
The optional argument `nulval`, if specified and non-zero, is used to replace any null value present in the array.
!!! note
`data` needs to be stored contiguously in memory.
See also: [`fits_read_pixnull`](@ref)
"""
function fits_read_pix(f::FITSFile, data::StridedArray)
fits_read_pix(f, onest(Int64, ndims(data)), length(data), data)
end
function fits_read_pix(f::FITSFile, data::StridedArray, nulval)
fits_read_pix(f, onest(Int64, ndims(data)), length(data), nulval, data)
end
"""
fits_read_pixnull(f::FITSFile, fpixel::Union{Vector{<:Integer}, Tuple{Vararg{Integer}}}, nelements::Integer, data::StridedArray, nullarray::Array{UInt8})
Read `nelements` pixels from the FITS file into `data` starting from the pixel `fpixel`.
At output, the indices of `nullarray` where `data` has a corresponding null value are set to `1`.
!!! note
`data` needs to be stored contiguously in memory.
See also: [`fits_read_pix`](@ref)
"""
function fits_read_pixnull(f::FITSFile,
fpixel::Vector{<:Integer},
nelements::Integer,
data::StridedArray,
nullarray::Array{UInt8},
)
fits_assert_open(f)
fits_assert_nonempty(f)
if length(data) != length(nullarray)
error("data and nullarray must have the same number of elements")
end
anynull = Ref{Cint}(0)
status = Ref{Cint}(0)
ccall(
(:ffgpxfll, libcfitsio),
Cint,
(
Ptr{Cvoid},
Cint,
Ptr{Int64},
Int64,
Ptr{Cvoid},
Ptr{UInt8},
Ref{Cint},
Ref{Cint},
),
f.ptr,
cfitsio_typecode(eltype(data)),
convert(Vector{Int64}, fpixel),
nelements,
data,
nullarray,
anynull,
status,
)
fits_assert_ok(status[])
anynull[]
end
function fits_read_pixnull(f::FITSFile,
fpixel::NTuple{N,Integer},
nelements::Integer,
data::StridedArray,
nullarray::Array{UInt8},
) where {N}
fits_assert_open(f)
fits_assert_nonempty(f)
if length(data) != length(nullarray)
error("data and nullarray must have the same number of elements")
end
anynull = Ref{Cint}(0)
status = Ref{Cint}(0)
fpixelr = Ref(convert(NTuple{N,Int64}, fpixel))
ccall(
(:ffgpxfll, libcfitsio),
Cint,
(
Ptr{Cvoid},
Cint,
Ptr{NTuple{N,Int64}},
Int64,
Ptr{Cvoid},
Ptr{UInt8},
Ref{Cint},
Ref{Cint},
),
f.ptr,
cfitsio_typecode(eltype(data)),
fpixelr,
nelements,
data,
nullarray,
anynull,
status,
)
fits_assert_ok(status[])
anynull[]
end
"""
fits_read_pixnull(f::FITSFile, data::StridedArray, nullarray::Array{UInt8})
Read `length(data)` pixels from the FITS file into `data` starting from the first pixel.
At output, the indices of `nullarray` where `data` has a corresponding null value are set to `1`.
!!! note
`data` needs to be stored contiguously in memory.
See also: [`fits_read_pix`](@ref)
"""
function fits_read_pixnull(f::FITSFile, data::StridedArray, nullarray::Array{UInt8})
fits_read_pixnull(f, onest(Int64, ndims(data)), length(data), data, nullarray)
end
"""
fits_read_subset(f::FITSFile, fpixel::V, lpixel::V, inc::V, [nulval], data::StridedArray) where {V<:Union{Vector{<:Integer}, Tuple{Vararg{Integer}}}}
Read a rectangular section of the FITS image. The number of pixels to be read will be computed from the
first and last pixels (specified as the `fpixel` and `lpixel` arguments respectively). The argument `inc` specifies the
step-size in pixels along each dimension.
If the optional argument `nulval` is specified and is non-zero, null values in `data` will be replaced by it.
!!! note
`data` needs to be stored contiguously in memory, and will be populated contiguously with the
pixels that are read in.
See also: [`fits_read_pix`](@ref)
"""
function fits_read_subset(
f::FITSFile,
fpixel::Vector{<:Integer},
lpixel::Vector{<:Integer},
inc::Vector{<:Integer},
data::StridedArray,
)
fits_assert_open(f)
fits_assert_nonempty(f)
anynull = Ref{Cint}(0)
status = Ref{Cint}(0)
ccall(
(:ffgsv, libcfitsio),
Cint,
(
Ptr{Cvoid},
Cint,
Ptr{Clong},
Ptr{Clong},
Ptr{Clong},
Ptr{Cvoid},
Ptr{Cvoid},
Ref{Cint},
Ref{Cint},
),
f.ptr,
cfitsio_typecode(eltype(data)),
convert(Vector{Clong}, fpixel),
convert(Vector{Clong}, lpixel),
convert(Vector{Clong}, inc),
C_NULL,
data,
anynull,
status,
)
fits_assert_ok(status[])
anynull[]
end
function fits_read_subset(
f::FITSFile,
fpixel::Vector{<:Integer},
lpixel::Vector{<:Integer},
inc::Vector{<:Integer},
nulval,
data::StridedArray,
)
fits_assert_open(f)
fits_assert_nonempty(f)
anynull = Ref{Cint}(0)
status = Ref{Cint}(0)
ccall(
(:ffgsv, libcfitsio),
Cint,
(
Ptr{Cvoid},
Cint,
Ptr{Clong},
Ptr{Clong},
Ptr{Clong},
Ptr{Cvoid},
Ptr{Cvoid},
Ref{Cint},
Ref{Cint},
),
f.ptr,
cfitsio_typecode(eltype(data)),
convert(Vector{Clong}, fpixel),
convert(Vector{Clong}, lpixel),
convert(Vector{Clong}, inc),
Ref(_maybeconvert(eltype(data), nulval)),
data,
anynull,
status,
)
fits_assert_ok(status[])
anynull[]
end
function fits_read_subset(
f::FITSFile,
fpixel::NTuple{N,Integer},
lpixel::NTuple{N,Integer},
inc::NTuple{N,Integer},
data::StridedArray,
) where {N}
fits_assert_open(f)
fits_assert_nonempty(f)
anynull = Ref{Cint}(0)
status = Ref{Cint}(0)
fpixelr, lpixelr, incr = map((fpixel, lpixel, inc)) do x
Ref(convert(NTuple{N,Clong}, x))
end
ccall(
(:ffgsv, libcfitsio),
Cint,
(
Ptr{Cvoid},
Cint,
Ptr{NTuple{N,Clong}},
Ptr{NTuple{N,Clong}},
Ptr{NTuple{N,Clong}},
Ptr{Cvoid},
Ptr{Cvoid},
Ref{Cint},
Ref{Cint},
),
f.ptr,
cfitsio_typecode(eltype(data)),
fpixelr,
lpixelr,
incr,
C_NULL,
data,
anynull,
status,
)
fits_assert_ok(status[])
anynull[]
end
function fits_read_subset(
f::FITSFile,
fpixel::NTuple{N,Integer},
lpixel::NTuple{N,Integer},
inc::NTuple{N,Integer},
nulval,
data::StridedArray,
) where {N}
fits_assert_open(f)
fits_assert_nonempty(f)
anynull = Ref{Cint}(0)
status = Ref{Cint}(0)
fpixelr, lpixelr, incr = map((fpixel, lpixel, inc)) do x
Ref(convert(NTuple{N,Clong}, x))
end
ccall(
(:ffgsv, libcfitsio),
Cint,
(
Ptr{Cvoid},
Cint,
Ptr{NTuple{N,Clong}},
Ptr{NTuple{N,Clong}},
Ptr{NTuple{N,Clong}},
Ptr{Cvoid},
Ptr{Cvoid},
Ref{Cint},
Ref{Cint},
),
f.ptr,
cfitsio_typecode(eltype(data)),
fpixelr,
lpixelr,
incr,
Ref(_maybeconvert(eltype(data), nulval)),
data,
anynull,
status,
)
fits_assert_ok(status[])
anynull[]
end
"""
fits_copy_image_section(fin::FITSFile, fout::FITSFile, section::String)
Copy a rectangular section of an image from `fin` and write it to a new FITS primary image or
image extension in `fout`. The section specifier is described on the
[`CFITSIO website`](https://heasarc.gsfc.nasa.gov/docs/software/fitsio/c/c_user/node97.html).
"""
function fits_copy_image_section(fin::FITSFile, fout::FITSFile, section::String)
fits_assert_open(fin)
fits_assert_nonempty(fin)
fits_assert_open(fout)
status = Ref{Cint}(0)
ccall(
(:fits_copy_image_section, libcfitsio),
Cint,
(Ptr{Cvoid}, Ptr{Cvoid}, Ptr{UInt8}, Ref{Cint}),
fin.ptr,
fout.ptr,
section,
status,
)
fits_assert_ok(status[])
end
"""
fits_write_null_img(f::FITSFile, firstelem::Integer, nelements::Integer)
Set a stretch of elements to the appropriate null value, starting from the pixel number `firstelem`
and extending over `nelements` pixels.
"""
function fits_write_null_img(f::FITSFile, firstelem::Integer, nelements::Integer)
fits_assert_open(f)
status = Ref{Cint}(0)
ccall(
(:ffpprn, libcfitsio),
Cint,
(Ptr{Cvoid}, Clonglong, Clonglong, Ref{Cint}),
f.ptr,
firstelem,
nelements,
status,
)
fits_assert_ok(status[])
end
"""
fits_resize_img(f::FITSFile, T::Type, naxis::Integer, sz::Union{Vector{<:Integer}, Tuple{Vararg{Integer}}})
Modify the size, dimensions and optionally the element type of the image in `f`. The new image
will have an element type `T`, be a `naxis`-dimensional image with size `sz`.
If the new image is larger than the existing one, it will be zero-padded at the end.
If the new image is smaller, existing image data will be truncated.
fits_resize_img(f::FITSFile, sz::Union{Vector{<:Integer}, Tuple{Vararg{Integer}}})
Resize the image to the new size `sz`. The element type is preserved, and the number of dimensions
is set equal to `length(sz)`.
fits_resize_img(f::FITSFile, T::Type)
Change the element type of the image to `T`, leaving the size unchanged.
!!! note
This method reinterprets the data instead of coercing the elements.
# Example
```jldoctest
julia> f = fits_clobber_file(tempname());
julia> a = [1 2; 3 4];
julia> fits_create_img(f, a);
julia> fits_write_pix(f, a);
julia> fits_get_img_size(f)
2-element Vector{Int64}:
2
2
julia> fits_resize_img(f, [3,3]);
julia> fits_get_img_size(f)
2-element Vector{Int64}:
3
3
julia> b = similar(a, (3,3));
julia> fits_read_pix(f, b); b
3Γ3 Matrix{Int64}:
1 4 0
3 0 0
2 0 0
julia> fits_resize_img(f, [4]);
julia> b = similar(a, (4,));
julia> fits_read_pix(f, b); b
4-element Vector{Int64}:
1
3
2
4
```
"""
function fits_resize_img(f::FITSFile, T::Type, naxis::Integer, sz::Vector{<:Integer})
fits_assert_open(f)
fits_assert_nonempty(f)
status = Ref{Cint}(0)
ccall(
(:ffrsim, libcfitsio),
Cint,
(Ptr{Cvoid}, Cint, Cint, Ptr{Clong}, Ref{Cint}),
f.ptr,
bitpix_from_type(T),
naxis,
convert(Vector{Clong}, sz),
status,
)
fits_assert_ok(status[])
end
function fits_resize_img(f::FITSFile, T::Type, naxis::Integer, sz::NTuple{N,Integer}) where {N}
fits_assert_open(f)
fits_assert_nonempty(f)
status = Ref{Cint}(0)
szr = Ref(convert(NTuple{N,Clong}, sz))
ccall(
(:ffrsim, libcfitsio),
Cint,
(Ptr{Cvoid}, Cint, Cint, Ptr{NTuple{N,Clong}}, Ref{Cint}),
f.ptr,
bitpix_from_type(T),
naxis,
szr,
status,
)
fits_assert_ok(status[])
end
function fits_resize_img(f::FITSFile, sz::Union{Vector{<:Integer}, Tuple{Vararg{Integer}}})
fits_assert_open(f)
fits_assert_nonempty(f)
T = type_from_bitpix(fits_get_img_type(f))
naxis = length(sz)
fits_resize_img(f, T, naxis, sz)
end
function fits_resize_img(f::FITSFile, T::Type)
fits_assert_open(f)
fits_assert_nonempty(f)
sz = fits_get_img_size(f)
naxis = fits_get_img_dim(f)
fits_resize_img(f, T, naxis, sz)
end
# -----------------------------------------------------------------------------
# ASCII/binary table HDU functions
# The three fields are: ttype, tform, tunit (CFITSIO's terminology)
const ColumnDef = Tuple{String,String,String}
"""
fits_create_binary_tbl(f::FITSFile, numrows::Integer, coldefs::Array{ColumnDef},
extname::String)
Append a new HDU containing a binary table. The meaning of the parameters is the same
as in a call to [`fits_create_ascii_tbl`](@ref).
In general, one should pick this function for creating tables in a new HDU,
as binary tables require less space on the disk and are more efficient to read and write.
(Moreover, a few datatypes are not supported in ASCII tables).
"""
fits_create_binary_tbl
"""
fits_create_ascii_tbl(f::FITSFile, numrows::Integer, coldefs::Array{CFITSIO.ColumnDef},
extname::String)
Append a new HDU containing an ASCII table.
The table will have `numrows` rows (this parameter can be set to zero), each
initialized with the default value. In order to create a table, the programmer
must specify the characteristics of each column. The columns are specified by the
`coldefs` variable, which is an array of tuples.
Each tuple must have three string fields:
1. The name of the column.
2. The data type and the repetition count. It must be a string made by a number
(the repetition count) followed by a letter specifying the type (in the example
above, `D` stands for `Float64`, `E` stands for `Float32`, `A` stands for `Char`).
Refer to the CFITSIO documentation for more information about the syntax of this
parameter.
3. The measure unit of this field. This is used only as a comment.
The value of `extname` sets the "extended name" of the table, i.e., a string
that in some situations can be used to refer to the HDU itself.
Note that, unlike for binary tables, CFITSIO puts some limitations to the
types that can be used in an ASCII table column. Refer to the CFITSIO manual
for further information.
See also [`fits_create_binary_tbl`](@ref) for a similar function which
creates binary tables.
"""
fits_create_ascii_tbl
for (a, b) in ((:fits_create_binary_tbl, 2), (:fits_create_ascii_tbl, 1))
@eval begin
function ($a)(
f::FITSFile,
numrows::Integer,
coldefs::Array{ColumnDef},
extname::String,
)
fits_assert_open(f)
# Ensure that extension name, column names and units are
# ASCII, as these get written to the file. We don't check
# need to check that tform is ASCII because presumably
# cfitsio will thrown an appropriate error if it doesn't
# recognize the tform string.
fits_assert_isascii(extname)
for coldef in coldefs
fits_assert_isascii(coldef[1])
fits_assert_isascii(coldef[3])
end
# get length and convert coldefs to three arrays of Ptr{Uint8}
ntype = length(coldefs)
ttype = [pointer(x[1]) for x in coldefs]
tform = [pointer(x[2]) for x in coldefs]
tunit = [pointer(x[3]) for x in coldefs]
status = Ref{Cint}(0)
ccall(
("ffcrtb", libcfitsio),
Cint,
(
Ptr{Cvoid},
Cint,
Int64,
Cint,
Ptr{Ptr{UInt8}},
Ptr{Ptr{UInt8}},
Ptr{Ptr{UInt8}},
Ptr{UInt8},
Ref{Cint},
),
f.ptr,
$b,
numrows,
ntype,
ttype,
tform,
tunit,
extname,
status,
)
fits_assert_ok(status[])
end
end
end
"""
fits_get_num_hdus(f::FITSFile)
Return the number of HDUs in the file.
"""
fits_get_num_hdus
for (a, b, T) in (
(:fits_get_num_cols, "ffgncl", :Cint),
(:fits_get_num_hdus, "ffthdu", :Cint),
(:fits_get_rowsize, "ffgrsz", :Clong),
)
@eval begin
function ($a)(f::FITSFile)
fits_assert_open(f)
result = Ref{$T}(0)
status = Ref{Cint}(0)
ccall(
($b, libcfitsio),
Cint,
(Ptr{Cvoid}, Ref{$T}, Ref{Cint}),
f.ptr,
result,
status,
)
fits_assert_ok(status[])
result[]
end
end
end
function fits_get_colnum(f::FITSFile, tmplt::String; case_sensitive::Bool = true)
fits_assert_open(f)
result = Ref{Cint}(0)
status = Ref{Cint}(0)
# Second argument is case-sensitivity of search: 0 = case-insensitive
# 1 = case-sensitive
ccall(
("ffgcno", libcfitsio),
Cint,
(Ptr{Cvoid}, Cint, Ptr{UInt8}, Ref{Cint}, Ref{Cint}),
f.ptr,
case_sensitive,
tmplt,
result,
status,
)
fits_assert_ok(status[])
return result[]
end
# The following block are all functions that have separate variants for Clong
# and 64-bit integers in cfitsio. Rather than providing both of these, we
# provide only one according to the native integer type on the platform.
if promote_type(Int, Clong) == Clong
T = Clong
ffgtdm = "ffgtdm"
ffgnrw = "ffgnrw"
ffptdm = "ffptdm"
ffgtcl = "ffgtcl"
ffeqty = "ffeqty"
ffgdes = "ffgdes"
ffgisz = "ffgisz"
else
T = Int64
ffgtdm = "ffgtdmll"
ffgnrw = "ffgnrwll"
ffptdm = "ffptdmll"
ffgtcl = "ffgtclll"
ffeqty = "ffeqtyll"
ffgdes = "ffgdesll"
ffgisz = "ffgiszll"
end
"""
fits_get_coltype(f::FITSFile, colnum::Integer)
Provided that the current HDU contains either an ASCII or binary table, return
information about the column at position `colnum` (counting from 1).
Return is a tuple containing
- `typecode`: CFITSIO integer type code of the column.
- `repcount`: Repetition count for the column.
- `width`: Width of an individual element.
"""
fits_get_coltype
@eval begin
function fits_get_coltype(ff::FITSFile, colnum::Integer)
fits_assert_open(ff)
typecode = Ref{Cint}(0)
repcnt = Ref{$T}(0)
width = Ref{$T}(0)
status = Ref{Cint}(0)
ccall(
($ffgtcl, libcfitsio),
Cint,
(Ptr{Cvoid}, Cint, Ref{Cint}, Ref{$T}, Ref{$T}, Ref{Cint}),
ff.ptr,
colnum,
typecode,
repcnt,
width,
status,
)
fits_assert_ok(status[])
return Int(typecode[]), Int(repcnt[]), Int(width[])
end
function fits_get_eqcoltype(ff::FITSFile, colnum::Integer)
fits_assert_open(ff)
typecode = Ref{Cint}(0)
repcnt = Ref{$T}(0)
width = Ref{$T}(0)
status = Ref{Cint}(0)
ccall(
($ffeqty, libcfitsio),
Cint,
(Ptr{Cvoid}, Cint, Ref{Cint}, Ref{$T}, Ref{$T}, Ref{Cint}),
ff.ptr,
colnum,
typecode,
repcnt,
width,
status,
)
fits_assert_ok(status[])
return Int(typecode[]), Int(repcnt[]), Int(width[])
end
function fits_get_img_size(f::FITSFile)
fits_assert_open(f)
ndim = fits_get_img_dim(f)
naxes = Vector{$T}(undef, ndim)
status = Ref{Cint}(0)
ccall(
($ffgisz, libcfitsio),
Cint,
(Ptr{Cvoid}, Cint, Ptr{$T}, Ref{Cint}),
f.ptr,
ndim,
naxes,
status,
)
fits_assert_ok(status[])
naxes
end
function fits_get_img_size(f::FITSFile, ::Val{N}) where {N}
naxes = Ref(zerost($T, N))
status = Ref{Cint}(0)
ccall(
($ffgisz, libcfitsio),
Cint,
(Ptr{Cvoid}, Cint, Ptr{NTuple{N,$T}}, Ref{Cint}),
f.ptr,
N,
naxes,
status,
)
fits_assert_ok(status[])
naxes[]
end
function fits_get_num_rows(f::FITSFile)
fits_assert_open(f)
result = Ref{$T}(0)
status = Ref{Cint}(0)
ccall(
($ffgnrw, libcfitsio),
Cint,
(Ptr{Cvoid}, Ref{$T}, Ref{Cint}),
f.ptr,
result,
status,
)
fits_assert_ok(status[])
return Int(result[])
end
# `fits_read_tdim` returns the dimensions of a table column in a
# binary table. Normally this information is given by the TDIMn
# keyword, but if this keyword is not present then this routine
# returns `[r]` with `r` equals to the repeat count in the TFORM
# keyword.
function fits_read_tdim(ff::FITSFile, colnum::Integer)
fits_assert_open(ff)
naxes = Vector{$T}(undef, 99) # 99 is the maximum allowed number of axes
naxis = Ref{Cint}(0)
status = Ref{Cint}(0)
ccall(
($ffgtdm, libcfitsio),
Cint,
(Ptr{Cvoid}, Cint, Cint, Ref{Cint}, Ptr{$T}, Ref{Cint}),
ff.ptr,
colnum,
length(naxes),
naxis,
naxes,
status,
)
fits_assert_ok(status[])
return naxes[1:naxis[]]
end
function fits_write_tdim(ff::FITSFile, colnum::Integer, naxes::Array{$T})
fits_assert_open(ff)
status = Ref{Cint}(0)
ccall(
($ffptdm, libcfitsio),
Cint,
(Ptr{Cvoid}, Cint, Cint, Ptr{$T}, Ref{Cint}),
ff.ptr,
colnum,
length(naxes),
naxes,
status,
)
fits_assert_ok(status[])
end
function fits_read_descript(f::FITSFile, colnum::Integer, rownum::Integer)
fits_assert_open(f)
repeat = Ref{$T}(0)
offset = Ref{$T}(0)
status = Ref{Cint}(0)
ccall(
($ffgdes, libcfitsio),
Cint,
(Ptr{Cvoid}, Cint, Int64, Ref{$T}, Ref{$T}, Ref{Cint}),
f.ptr,
colnum,
rownum,
repeat,
offset,
status,
)
fits_assert_ok(status[])
return Int(repeat[]), Int(offset[])
end
end
"""
fits_read_col(f, colnum, firstrow, firstelem, data)
Read data from one column of an ASCII/binary table and convert the data into the
specified type `T`.
### Arguments ###
* `f::FITSFile`: the file to be read.
* `colnum::Integer`: the column number, where the value of the first column is `1`.
* `firstrow::Integer`: the elements to be read start from this row.
* `firstelem::Integer`: specifies which is the first element to be read, when each
cell contains more than one element (i.e., the "repetition count" of the field is
greater than one).
* `data::Array`: at the end of the call, this will be filled with the elements read
from the column. The length of the array gives the overall number of elements.
"""
function fits_read_col(
f::FITSFile,
colnum::Integer,
firstrow::Integer,
firstelem::Integer,
data::Array{String},
)
fits_assert_open(f)
# get width: number of characters in each string
typecode, repcount, width = fits_get_eqcoltype(f, colnum)
# ensure that data are strings, otherwise cfitsio will try to write
# formatted strings, which have widths given by fits_get_col_display_width
# not by the repeat value from fits_get_coltype.
abs(typecode) == 16 || error("not a string column")
# create an array of character buffers of the correct width
buffers = [Vector{UInt8}(undef, width) for i in 1:length(data)]
# Call the CFITSIO function
anynull = Ref{Cint}(0)
status = Ref{Cint}(0)
ccall(
(:ffgcvs, libcfitsio),
Cint,
(
Ptr{Cvoid},
Cint,
Int64,
Int64,
Int64,
Ptr{UInt8},
Ptr{Ptr{UInt8}},
Ref{Cint},
Ref{Cint},
),
f.ptr,
colnum,
firstrow,
firstelem,
length(data),
" ",
buffers,
anynull,
status,
)
fits_assert_ok(status[])
# Create strings out of the buffers, terminating at null characters.
# Note that `String(x)` does not copy the buffer x.
for i in 1:length(data)
zeropos = something(findfirst(isequal(0x00), buffers[i]), 0)
data[i] = (zeropos >= 1) ? String(buffers[i][1:(zeropos-1)]) : String(buffers[i])
end
end
function fits_read_col(
f::FITSFile,
colnum::Integer,
firstrow::Integer,
firstelem::Integer,
data::Array,
)
fits_assert_open(f)
anynull = Ref{Cint}(0)
status = Ref{Cint}(0)
ccall(
(:ffgcv, libcfitsio),
Cint,
(
Ptr{Cvoid},
Cint,
Cint,
Int64,
Int64,
Int64,
Ptr{Cvoid},
Ptr{Cvoid},
Ref{Cint},
Ref{Cint},
),
f.ptr,
cfitsio_typecode(eltype(data)),
colnum,
firstrow,
firstelem,
length(data),
C_NULL,
data,
anynull,
status,
)
fits_assert_ok(status[])
end
"""
fits_write_col(f, colnum, firstrow, firstelem, data)
Write some data in one column of a ASCII/binary table.
If there is no room for the elements, new rows will be created. (It is therefore
useless to call [`fits_insert_rows`](@ref) if you only need to *append* elements
to the end of a table.)
* `f::FITSFile`: the file in which data will be written.
* `colnum::Integer`: the column number, where the value of the first column is `1`.
* `firstrow::Integer`: the data wil be written from this row onwards.
* `firstelem::Integer`: specifies the position in the row where the first element
will be written.
* `data::Array`: contains the elements that are to be written to the column of the table.
"""
function fits_write_col(
f::FITSFile,
colnum::Integer,
firstrow::Integer,
firstelem::Integer,
data::Array{String},
)
fits_assert_open(f)
for el in data
fits_assert_isascii(el)
end
status = Ref{Cint}(0)
ccall(
(:ffpcls, libcfitsio),
Cint,
(Ptr{Cvoid}, Cint, Int64, Int64, Int64, Ptr{Ptr{UInt8}}, Ref{Cint}),
f.ptr,
colnum,
firstrow,
firstelem,
length(data),
data,
status,
)
fits_assert_ok(status[])
end
function fits_write_col(
f::FITSFile,
colnum::Integer,
firstrow::Integer,
firstelem::Integer,
data::Array,
)
fits_assert_open(f)
status = Ref{Cint}(0)
ccall(
(:ffpcl, libcfitsio),
Cint,
(Ptr{Cvoid}, Cint, Cint, Int64, Int64, Int64, Ptr{Cvoid}, Ref{Cint}),
f.ptr,
cfitsio_typecode(eltype(data)),
colnum,
firstrow,
firstelem,
length(data),
data,
status,
)
fits_assert_ok(status[])
end
"""
fits_insert_rows(f::FITSFile, firstrow::Integer, nrows::Integer)
Insert a number of rows equal to `nrows` after the row number `firstrow`.
The elements in each row are initialized to their default value: you can
modify them later using [`fits_write_col`](@ref).
Since the first row is at position 1, in order to insert rows *before*
the first one `firstrow` must be equal to zero.
"""
fits_insert_rows
"""
fits_delete_rows(f::FITSFile, firstrow::integer, nrows::Integer)
Delete `nrows` rows, starting from the one at position `firstrow`. The index of
the first row is 1.
"""
fits_delete_rows
for (a, b) in ((:fits_insert_rows, "ffirow"), (:fits_delete_rows, "ffdrow"))
@eval begin
function ($a)(f::FITSFile, firstrow::Integer, nrows::Integer)
fits_assert_open(f)
status = Ref{Cint}(0)
ccall(
($b, libcfitsio),
Cint,
(Ptr{Cvoid}, Int64, Int64, Ref{Cint}),
f.ptr,
firstrow,
nrows,
status,
)
fits_assert_ok(status[])
end
end
end
"""
libcfitsio_version() -> VersionNumber
Return the version of the underlying CFITSIO library
# Example
```julia
julia> libcfitsio_version()
v"3.37.0"
```
"""
function libcfitsio_version(version = fits_get_version())
# fits_get_version returns a float. e.g., 3.341f0. We parse that
# into a proper version number. E.g., 3.341 -> v"3.34.1"
v = round(Int, 1000 * version)
x = div(v, 1000)
y = div(rem(v, 1000), 10)
z = rem(v, 10)
VersionNumber(x, y, z)
end
end # module
| CFITSIO | https://github.com/JuliaAstro/CFITSIO.jl.git |
|
[
"MIT"
] | 1.4.2 | fc0abb338eb8d90bc186ccf0a47c90825952c950 | code | 25796 | using CFITSIO
using Test
using Aqua
function tempfitsfile(fn)
mktempdir() do dir
filename = joinpath(dir, "temp.fits")
fitsfile = fits_clobber_file(filename)
fn(fitsfile)
if fitsfile.ptr != C_NULL
# write some data to file to avoid errors on closing
data = ones(1)
fits_create_img(fitsfile, data)
fits_write_pix(fitsfile, data)
fits_delete_file(fitsfile)
end
end
end
@testset "project quality" begin
Aqua.test_all(CFITSIO)
end
# `create_test_file` : Create a simple FITS file for testing, with the
# given header string added after the required keywords. The length of
# `header` must be a multiple of 80. The purpose of creating such
# files is to test the parsing of non-standard FITS keyword records
# (non-standard files can't be created with cfitsio).
function create_test_file(fname::AbstractString, header::String)
if length(header) % 80 != 0
error("length of header must be multiple of 80")
end
f = open(fname, "w")
stdhdr = "SIMPLE = T / file does conform to FITS standard BITPIX = -64 / number of bits per data pixel NAXIS = 2 / number of data axes NAXIS1 = 10 / length of data axis 1 NAXIS2 = 10 / length of data axis 2 EXTEND = T / FITS dataset may contain extensions "
endline = "END "
data = fill(0., (10, 10)) # 10x10 array of big-endian Float64 zeros
# write header
write(f, stdhdr)
write(f, header)
write(f, endline)
# add padding
block_position = (length(stdhdr) + length(header) + length(endline)) % 2880
padding = (block_position == 0) ? 0 : 2880 - block_position
write(f, " "^padding)
# write data
write(f, data)
# add padding
block_position = sizeof(data) % 2880
padding = (block_position == 0) ? 0 : 2880 - block_position
write(f, fill(0x00, (padding,)))
close(f)
end
function writehealpix(filename, pixels, nside, ordering, coordsys)
if eltype(pixels) == Float32
tform = "1E"
elseif eltype(pixels) == Float64
tform = "1D"
end
file = fits_clobber_file(filename)
try
fits_create_img(file, Int16, Int[])
fits_write_date(file)
fits_movabs_hdu(file, 1)
fits_create_binary_tbl(file, length(pixels), [("SIGNAL", tform, "")], "BINTABLE")
fits_write_key(file, "PIXTYPE", "HEALPIX", "HEALPIX pixelization")
fits_write_key(file, "ORDERING", ordering, "Pixel ordering scheme (either RING or NESTED)")
fits_write_key(file, "NSIDE", nside, "Resolution parameter for HEALPIX")
fits_write_key(file, "COORDSYS", coordsys, "Pixelization coordinate system")
fits_write_comment(file, "G = galactic, E = ecliptic, C = celestial = equatorial")
fits_write_col(file, 1, 1, 1, pixels)
finally
fits_close_file(file)
end
end
function readhealpix(filename)
file = fits_open_file(filename)
try
hdutype = fits_movabs_hdu(file, 2)
tform, tform_comment = fits_read_key_str(file, "TFORM1")
if tform == "1E"
T = Float32
elseif tform == "1D"
T = Float64
end
naxes, naxis_comment = fits_read_key_lng(file, "NAXIS")
naxis, nfound = fits_read_keys_lng(file, "NAXIS", 1, naxes)
nside, nside_comment = fits_read_key_lng(file, "NSIDE")
npix = 12*nside*nside
ordering, ordering_comment = fits_read_key_str(file, "ORDERING")
coordsys, coordsys_comment = fits_read_key_str(file, "COORDSYS")
pixels = zeros(T, npix)
fits_read_col(file, 1, 1, 1, pixels)
return pixels, nside, ordering, coordsys
finally
fits_close_file(file)
end
end
@testset "CFITSIO.jl" begin
@testset "file name and mode" begin
mktempdir() do dir
filename = joinpath(dir, "temp.fits")
f = fits_clobber_file(filename)
@test fits_file_name(f) == filename
@test fits_file_mode(f) == 1
# Write some data to the file
a = ones(2,2)
fits_create_img(f, eltype(a), [size(a)...])
fits_write_pix(f, a)
close(f)
f = fits_open_file(filename, 0)
@test fits_file_mode(f) == 0 == Int(CFITSIO.R)
close(f)
f = fits_open_file(filename, CFITSIO.R)
@test fits_file_mode(f) == 0 == Int(CFITSIO.R)
close(f)
f = fits_open_file(filename, 1)
@test fits_file_mode(f) == 1 == Int(CFITSIO.RW)
close(f)
f = fits_open_file(filename, CFITSIO.RW)
@test fits_file_mode(f) == 1 == Int(CFITSIO.RW)
close(f)
end
end
@testset "types" begin
for (T, code) in (
(UInt8, 11),
(Int8, 12),
(Bool, 14),
(String, 16),
(Cushort, 20),
(Cshort, 21),
(Cuint, 30),
(Cint, 31),
(UInt64, 80),
(Int64, 81),
(Float32, 42),
(Float64, 82),
(ComplexF32, 83),
(ComplexF64, 163),
)
@test cfitsio_typecode(T) == Cint(code)
end
for (T, code) in ((UInt8, 8), # BYTE_IMG
(Int16, 16), # SHORT_IMG
(Int32, 32), # LONG_IMG
(Int64, 64), # LONGLONG_IMG
(Float32, -32), # FLOAT_IMG
(Float64, -64), # DOUBLE_IMG
(Int8, 10), # SBYTE_IMG
(UInt16, 20), # USHORT_IMG
(UInt32, 40), # ULONG_IMG
(UInt64, 80)) # ULONGLONG_IMG
@test bitpix_from_type(T) == code
@test type_from_bitpix(Cint(code)) == type_from_bitpix(Val(Cint(code))) == T
@test type_from_bitpix(Int16(code)) == T
end
@test_throws MethodError type_from_bitpix(7)
@test_throws MethodError type_from_bitpix("BITPIX")
end
# test reading/writing Healpix maps as FITS binary tables using the Libcfitsio interface
mktempdir() do dir
filename = joinpath(dir, "temp.fits")
for T in (Float32, Float64)
nside = 4
npix = 12*nside*nside
pixels = rand(T, npix)
ordering = "NESTED"
coordsys = "G"
writehealpix(filename, pixels, nside, ordering, coordsys)
@test readhealpix(filename) == (pixels, nside, ordering, coordsys)
end
end
@testset "Miscellaneous" begin
# test that this function works and returns the right type.
@test typeof(libcfitsio_version()) === VersionNumber
# test it parses a number as intended.
@test libcfitsio_version(3.341) === VersionNumber(3, 34, 1)
@test libcfitsio_version(3.41f0) === VersionNumber(3, 41, 0)
end
@testset "hdu operations" begin
tempfitsfile() do f
# Create a few HDUs
a = ones(2,2)
fits_create_img(f, eltype(a), [size(a)...])
fits_write_pix(f, a)
a = ones(3,3)
fits_create_img(f, eltype(a), [size(a)...])
fits_write_pix(f, a)
a = ones(4,4)
fits_create_img(f, eltype(a), [size(a)...])
fits_write_pix(f, a)
@test fits_get_num_hdus(f) == 3
for i in 1:3
fits_movabs_hdu(f, i)
@test fits_get_hdu_num(f) == i
@test fits_get_hdu_type(f) == :image_hdu
end
for i = 1:2
fits_movrel_hdu(f, -1)
@test fits_get_hdu_num(f) == 3 - i
end
fits_movabs_hdu(f, 2)
fits_delete_hdu(f)
@test fits_get_num_hdus(f) == 2
@test fits_get_hdu_num(f) == 2
@test fits_get_img_size(f) == [4,4]
fits_movabs_hdu(f, 1)
fits_delete_hdu(f)
@test fits_get_num_hdus(f) == 2
@test fits_get_hdu_num(f) == 1
@test fits_get_img_dim(f) == 0
end
@testset "insert image" begin
tempfitsfile() do f
a = ones(2,2); b = similar(a)
fits_insert_img(f, a)
fits_write_pix(f, a)
fits_read_pix(f, b)
@test b == a
@test fits_get_num_hdus(f) == 1
a .*= 2
fits_insert_img(f, eltype(a), [size(a)...])
fits_write_pix(f, a)
fits_read_pix(f, b)
@test b == a
@test fits_get_num_hdus(f) == 2
fits_movabs_hdu(f, 1)
a .*= 2
fits_insert_img(f, a)
fits_write_pix(f, a)
fits_read_pix(f, b)
@test b == a
@test fits_get_num_hdus(f) == 3
# test that the HDU is added in the middle
fits_movabs_hdu(f, 1)
fits_read_pix(f, b)
@test b == ones(2,2)
fits_movabs_hdu(f, 3)
fits_read_pix(f, b)
@test b == ones(2,2) .* 2
fits_movabs_hdu(f, 2)
fits_read_pix(f, b)
@test b == ones(2,2) .* 4
end
end
end
@testset "image type/size" begin
tempfitsfile() do f
a = ones(2,2)
fits_create_img(f, eltype(a), [size(a)...])
fits_write_pix(f, a)
@test fits_get_img_dim(f) == 2
@test fits_get_img_size(f) == [2, 2]
@test fits_get_img_type(f) == -64
@test fits_get_img_equivtype(f) == -64
a = ones(Int64, 2,2)
fits_create_img(f, eltype(a), [size(a)...])
fits_write_pix(f, a)
@test fits_get_img_dim(f) == 2
@test fits_get_img_size(f) == [2, 2]
@test fits_get_img_type(f) == 64
@test fits_get_img_equivtype(f) == 64
end
end
@testset "read/write subset" begin
tempfitsfile() do f
a = rand(10,10)
b = similar(a)
fits_create_img(f, a)
fits_write_pix(f, a)
fits_read_subset(f, [1,1], [size(a)...], [1,1], b)
@test a == b
fits_write_subset(f, [1,1], [size(a,1),4], a)
c = zeros(eltype(a), size(a,1), 4)
fits_read_subset(f, [1,1], [size(a,1),4], [1,1], c)
@test a[:, 1:4] == c
a[:,1] .= NaN
fits_write_subset(f, [1,1], [size(a,1),4], a)
c .= 0
fits_read_subset(f, [1,1], [size(a,1),4], [1,1], 100.0, c)
@test all(==(100), c[:, 1])
@test c[:, 2:4] == a[:,2:4]
fits_read_subset(f, [1,1], [size(a,1),4], [1,1], 0.0, c)
@test all(isnan, c[:, 1])
@test c[:, 2:4] == a[:,2:4]
tempfitsfile() do f2
a = rand(20,20)
fits_create_img(f, a)
fits_write_pix(f, a)
fits_copy_image_section(f, f2, "1:20,1:10")
b = similar(a, 20, 10)
fits_read_pix(f2, b)
close(f2)
@test a[1:20, 1:10] == b
end
end
end
@testset "error message" begin
mktempdir() do dir
filename = joinpath(dir, "temp.fits")
try
f = fits_clobber_file(filename)
fits_close_file(f)
catch e
@test e isa CFITSIO.CFITSIOError
@test e isa Exception # bugfix test as CFITSIOError didn't subtype Exception in #3
io = IOBuffer()
Base.showerror(io, e)
errstr = String(take!(io))
@test occursin(r"Error code"i, errstr)
@test occursin(r"Error message"i, errstr)
finally
rm(filename, force=true)
end
end
end
@testset "closed file errors" begin
tempfitsfile() do f
# write arbitrary data to the file
a = ones(2,2)
fits_create_img(f, eltype(a), [size(a)...])
fits_write_pix(f, a)
close(f)
# check that closed files throw a julia error and don't segfault
for fn in [fits_file_name, fits_file_mode, fits_get_hdrspace, fits_write_date,
fits_hdr2str, fits_get_hdu_num, fits_get_hdu_type, fits_get_img_type,
fits_get_img_equivtype, fits_get_img_dim, fits_get_num_cols, fits_get_num_hdus,
fits_get_rowsize, fits_get_img_size, fits_get_img_size, fits_get_num_rows,
fits_delete_hdu,
]
@test_throws Exception fn(f)
end
for fn in [fits_read_key_str, fits_read_key_lng, fits_read_keyword, fits_write_comment,
fits_write_history, fits_write_record, fits_delete_key, fits_movnam_hdu, fits_get_colnum,
]
@test_throws Exception fn(f, "abc")
end
for fn in [fits_read_record, fits_read_keyn, fits_delete_record,
fits_movabs_hdu, fits_movrel_hdu, fits_get_coltype,
fits_get_eqcoltype, fits_read_tdim, ]
@test_throws Exception fn(f, 1)
end
for fn in [fits_insert_rows, fits_delete_rows, fits_read_descript, CFITSIO.fits_write_null_img]
@test_throws Exception fn(f, 1, 2)
end
for fn in [fits_write_pix, fits_read_pix]
@test_throws Exception fn(f, a)
end
for fn in [fits_write_pix, fits_read_pix]
@test_throws Exception fn(f, [1,1], length(a), a)
end
@test_throws Exception fits_read_pix(f, ones(Int, ndims(a)), length(a), zero(eltype(a)), a)
@test_throws Exception fits_read_keys_lng(f, "a", 1, 2)
for fn in [fits_write_key, fits_update_key]
@test_throws Exception fn(f, "a", 1, "b")
end
for fn in [fits_read_col, fits_write_col]
@test_throws Exception fn(f, 1, 1, 1, ["abc"])
@test_throws Exception fn(f, 1, 1, 1, ["abc", 1])
end
for fn in [fits_create_binary_tbl, fits_create_ascii_tbl]
@test_throws Exception fn(f, 1, [("name", "3D", "c")], "extname")
end
@test_throws Exception fits_update_key(f, "a", 1.0, "b")
@test_throws Exception fits_update_key(f, "a", nothing, "b")
@test_throws Exception fits_write_tdim(f, 1, [1, 2])
@test_throws Exception fits_read_subset(f, [1,1], [2,2], [1,1], a)
@test_throws Exception fits_write_subset(f, [1,1], [2,2], a)
@test_throws Exception fits_create_img(f, Int64, [2,3])
tempfitsfile() do f2
fits_create_img(f2, eltype(a), [size(a)...])
fits_write_pix(f2, a)
close(f2)
@test_throws Exception fits_copy_image_section(f, f2, "1:2")
@test_throws Exception fits_copy_image_section(f2, f, "1:2")
end
end
end
@testset "null values" begin
tempfitsfile() do f
# all values are nullified
a = ones(2,2)
nullarray = similar(a, UInt8)
fits_create_img(f, eltype(a), [size(a)...])
fits_write_pixnull(f, ones(Int, ndims(a)), length(a), a, 1.0)
fits_read_pix(f, a)
@test all(isnan, a)
# one values is nullified
a .= [1 2; 3 4]
fits_write_pixnull(f, a, 3.0)
fits_read_pix(f, a)
@test isnan(a[2,1])
@test !isnan(a[2,2]) && all(!isnan, a[1,:])
# one values is nullified, but replaced while being read back in
a .= [1 2; 3 4]
fits_write_pixnull(f, a, 3.0)
fits_read_pix(f, a, 3.0)
@test !any(isnan, a)
@test a[2,1] == 3.0
fits_write_pixnull(f, a, 3.0)
fits_read_pix(f, a, 0.0)
@test isnan(a[2,1])
fits_read_pix(f, a, C_NULL)
@test isnan(a[2,1])
# get the indices of null values
nullarray .= 0
fits_read_pixnull(f, a, nullarray)
@test nullarray[2,1] == 1
@test iszero(nullarray[2,2]) && all(iszero, nullarray[1,:])
nullarray .= 0
fits_read_pixnull(f, a, vec(nullarray))
@test nullarray[2,1] == 1
@test iszero(nullarray[2,2]) && all(iszero, nullarray[1,:])
@test_throws Exception fits_read_pixnull(f, a, similar(nullarray, 1))
# don't treat null values as special while writing out
a .= [1 2; 3 4]
fits_write_pixnull(f, a, C_NULL)
fits_read_pix(f, a)
@test !any(isnan, a)
a .= [1 2; NaN 4]
fits_write_pixnull(f, a, C_NULL)
fits_read_pix(f, a)
@test isnan(a[2,1])
@test !isnan(a[2,2]) && all(!isnan, a[1,:])
# test that tuples and vectors of pixels behave identically
a .= [1 2; NaN 4]
nullarray .= 0
nullarray2 = similar(nullarray)
fits_write_pixnull(f, a, C_NULL)
b = similar(a)
fits_read_pixnull(f, [1,1], length(b), b, nullarray)
fits_read_pixnull(f, (1,1), length(b), b, nullarray2)
@test nullarray == nullarray2
# Read in data by replacing null values with the specified value
for nullval in Any[7, 7.0], fpixel in Any[[1,1], (1,1)]
b .= 0
fits_read_pix(f, fpixel, length(b), nullval, b)
@test b[2,1] == nullval
end
for fpixel in Any[[1,1], (1,1)]
# Write data first by treating the value 2 as null
fits_write_pixnull(f, fpixel, length(b), a, 2)
b .= 0
fits_read_pix(f, fpixel, length(b), b)
@test isnan(b[1,2])
@test isnan(b[2,1])
# replace the null value by a specified one
for nullval in Any[7, 7.0]
b .= 0
fits_read_pix(f, fpixel, length(b), nullval, b)
@test b[2,1] == nullval
@test b[1,2] == nullval
end
end
# set a stretch of values to null (NaN in this case)
# for the test we set all values to null
fits_write_null_img(f, 1, length(a))
fits_read_pix(f, a)
@test all(isnan, a)
end
end
@testset "empty file" begin
tempfitsfile() do f
a = zeros(2,2)
@test_throws ErrorException fits_read_pix(f, a)
@test_throws ErrorException fits_read_pix(f, a, 1)
@test_throws ErrorException fits_read_pixnull(f, a, similar(a, UInt8))
end
end
@testset "non-Int64 types" begin
tempfitsfile() do f
# write arbitrary data to the file
a = ones(2,2)
fits_create_img(f, eltype(a), [size(a)...])
fits_write_pix(f, Int32[1,1], big(length(a)), a)
b = similar(a)
fits_read_pix(f, Int32[1,1], Int128(length(b)), b)
@test b == a
end
end
@testset "resize image" begin
tempfitsfile() do f
a = [1 2; 3 4];
fits_create_img(f, a);
fits_write_pix(f, a);
fits_resize_img(f, [3,3]);
@test fits_get_img_size(f) == [3,3]
fits_resize_img(f, (4,4));
@test fits_get_img_size(f) == [4,4]
fits_resize_img(f, (7,));
@test fits_get_img_size(f) == [7]
fits_resize_img(f, Float64);
@test type_from_bitpix(fits_get_img_type(f)) == Float64
fits_write_pix(f, Float64.(1:7))
b = zeros(Float64, 7)
fits_read_pix(f, b)
@test b == 1:7
end
end
@testset "tuples vs vectors" begin
tempfitsfile() do f
a = Float64[1 3; 2 4]
b = similar(a); c = similar(a);
@testset "create" begin
fits_create_img(f, eltype(a), size(a))
fits_write_pix(f, a)
fits_read_pix(f, b)
fits_create_img(f, eltype(a), [size(a)...])
fits_write_pix(f, a)
fits_read_pix(f, c)
@test b == c
end
@testset "write" begin
fits_write_pix(f, [1,1], length(a), a)
fits_read_pix(f, b)
fits_write_pix(f, (1,1), length(a), a)
fits_read_pix(f, c)
@test b == c
end
@testset "size" begin
@test fits_get_img_size(f, Val(2)) == (2,2)
end
@testset "read" begin
@testset "full image" begin
fits_read_pix(f, b)
@test b == a
# test that vectors and tuples of pixels behave identically
fits_read_pix(f, [1,1], length(b), b)
fits_read_pix(f, (1,1), length(c), c)
@test b == c
end
@testset "subset" begin
b .= 0
# test that vectors and tuples of pixels behave identically
fits_read_subset(f, (1,1), (2,1), (1,1), @view b[:,1])
fits_read_subset(f, [1,1], [2,1], [1,1], @view b[:,2])
@test @views b[:,1] == b[:,2]
end
end
@testset "subset" begin
fits_create_img(f, eltype(a), (size(a,1),))
fits_write_subset(f, [1,1], [2,1], a)
fits_read_pix(f, @view b[:,1])
fits_write_subset(f, (1,1), (2,1), a)
fits_read_pix(f, @view b[:, 2])
@test @views b[:,1] == b[:,2]
b .= 0
fits_write_subset(f, [1,1], [2,1], a)
fits_read_subset(f, [1,1], [2,1], [1,1], @view b[:,1])
fits_read_subset(f, (1,1), (2,1), (1,1), @view b[:,2])
@test @views b[:,1] == b[:,2]
b .= 0
fits_write_subset(f, [1,1], [2,1], replace(a, 2 => NaN))
fits_read_subset(f, [1,1], [2,1], [1,1], 4, @view b[:,1])
fits_read_subset(f, (1,1), (2,1), (1,1), 4, @view b[:,2])
@test @views b[:,1] == b[:,2]
@test b[2,1] == b[2,2] == 4
end
end
end
@testset "extended filename parser" begin
filename = tempname()
a = [1 3; 2 4]
b = similar(a)
try
# for filenames that don't contain extended format specifications,
# the diskfile functions are equivalent to the file ones
for createfn in [fits_create_file, fits_create_diskfile]
f = createfn(filename)
fits_create_img(f, a)
fits_write_pix(f, a)
close(f)
# Extended format: flipping the image
f = fits_open_file(filename*"[*,-*]")
fits_read_pix(f, b)
@test a[:, [2,1]] == b
close(f)
f = fits_open_file(filename*"[-*,*]")
fits_read_pix(f, b)
@test a[[2,1], :] == b
close(f)
# without extended format
f = fits_open_diskfile(filename)
fits_read_pix(f, b)
@test a == b
close(f)
rm(filename)
end
finally
rm(filename, force = true)
end
# the diskfile functions may include [] in the filenames
filename2 = filename * "[abc].fits"
@test_throws CFITSIO.CFITSIOError fits_create_file(filename2)
try
f = fits_create_diskfile(filename2)
fits_create_img(f, a)
fits_write_pix(f, a)
fits_read_pix(f, b)
@test a == b
close(f)
f = fits_open_diskfile(filename2)
fits_read_pix(f, b)
@test a == b
close(f)
@test_throws CFITSIO.CFITSIOError fits_open_file(filename2)
finally
rm(filename2, force = true)
end
end
@testset "stdout/stdin streams" begin
# We redirect the output streams to a temp file to avoid cluttering the output
# At present this doesn't work completely, as there is some output from fits_create_img
# that is not captured
mktemp() do _, io
redirect_stdout(io) do
for fname in ["-", "stdout.gz"]
f = fits_create_file(fname);
for a in Any[[1 2; 3 4], Float64[1 2; 3 4]]
b = similar(a)
fits_create_img(f, a)
fits_write_pix(f, a)
fits_read_pix(f, b)
@test a == b
end
close(f)
end
end
end
end
end
| CFITSIO | https://github.com/JuliaAstro/CFITSIO.jl.git |
|
[
"MIT"
] | 1.4.2 | fc0abb338eb8d90bc186ccf0a47c90825952c950 | docs | 1158 | # CFITSIO.jl
[](https://github.com/JuliaAstro/CFITSIO.jl/actions)
[](https://juliaci.github.io/NanosoldierReports/pkgeval_badges/report.html)
[](https://codecov.io/gh/JuliaAstro/CFITSIO.jl)
[](https://JuliaAstro.github.io/CFITSIO.jl/stable)
[](https://JuliaAstro.github.io/CFITSIO.jl/dev)
## C-style interface to CFITSIO functions
- Function names closely mirror the C interface (e.g., `fits_open_file()`).
- Functions operate on `FITSFile`, a thin wrapper for `fitsfile` C struct
(`FITSFile` has concept of "current HDU", as in CFITSIO).
- Note that the wrapper functions *do* check the return status from CFITSIO
and throw an error with the appropriate message.
For more information and usage examples, please visit the [documentation](https://JuliaAstro.github.io/CFITSIO.jl/dev).
| CFITSIO | https://github.com/JuliaAstro/CFITSIO.jl.git |
|
[
"MIT"
] | 1.4.2 | fc0abb338eb8d90bc186ccf0a47c90825952c950 | docs | 6448 | ```@meta
CurrentModule = CFITSIO
```
# CFITSIO.jl
[](https://github.com/juliaastro/CFITSIO.jl)
[](https://github.com/JuliaAstro/CFITSIO.jl/actions)
[](https://juliaci.github.io/NanosoldierReports/pkgeval_badges/report.html)
[](https://codecov.io/gh/JuliaAstro/CFITSIO.jl)
This module provides an interface familiar to users of the [CFITSIO](http://heasarc.gsfc.nasa.gov/fitsio/) C library. It can be used with
```julia
using CFITSIO
```
The functions exported by this module operate on `FITSFile` objects,
which is a thin wrapper around a pointer to a CFITSIO `fitsfile`. For
the most part, the functions are thin wrappers around the CFITSIO
routines of the same names. Typically, they:
* Convert from Julia types to C types as necessary.
* Check the returned status value and raise an appropriate exception if
non-zero.
The following tables give the correspondances between CFITSIO "types",
the BITPIX keyword and Julia types.
## Type Conversions
### CFITSIO Types
| CODE | CFITSIO | Julia |
|----------------------: |-------------- |------------------ |
| | int | `Cint` |
| | long | `Clong` |
| | LONGLONG | `Int64` |
### FITS BITPIX
| CODE | CFITSIO | Julia |
|----------------------: |-------------- |------------------ |
| 8 | BYTE_IMG | `Uint8` |
| 16 | SHORT_IMG | `Int16` |
| 32 | LONG_IMG | `Int32` |
| 64 | LONGLONG_IMG | `Int64` |
| -32 | FLOAT_IMG | `Float32` |
| -64 | DOUBLE_IMG | `Float64` |
### CFITSIO Aliases
| CODE | CFITSIO | Julia | Comments |
|----------------------: |-------------- |------------------ |:-------------------------------------------------------- |
| 10 | SBYTE_IMG | `Int8` | written as: BITPIX = 8, BSCALE = 1, BZERO = -128 |
| 20 | USHORT_IMG | `Uint16` | written as: BITPIX = 16, BSCALE = 1, BZERO = 32768 |
| 40 | LONG_IMG | `Uint32` | written as: BITPIX = 32, BSCALE = 1, BZERO = 2147483648 |
| 80 | ULONGLONG_IMG | `UInt64` | written as: BITPIX = 64, BSCALE = 1, BZERO = 9223372036854775808 |
### FITS Table Data Types
| CODE | CFITSIO | Julia |
|----------------------: |-------------- |------------------- |
| 1 | TBIT | |
| 11 | TBYTE | `Cuchar`, `Uint8` |
| 12 | TSBYTE | `Cchar`, `Int8` |
| 14 | TLOGICAL | `Bool ` |
| 16 | TSTRING | `String ` |
| 20 | TUSHORT | `Cushort` |
| 21 | TSHORT | `Cshort` |
| 30 | TUINT | `Cuint` |
| 31 | TINT | `Cint` |
| 40 | TULONG | `Culong` |
| 41 | TLONG | `Clong` |
| 42 | TFLOAT | `Cfloat` |
| 80 | TULONGLONG | `UInt64` |
| 81 | TLONGLONG | `Int64` |
| 82 | TDOUBLE | `Cdouble` |
| 83 | TCOMPLEX | `Complex{Cfloat}` |
| 163 | TDBLCOMPLEX | `Complex{Cdouble}` |
```@docs
bitpix_from_type
type_from_bitpix
cfitsio_typecode
```
## File access
```@docs
fits_create_file
fits_create_diskfile
fits_clobber_file
fits_open_file
fits_open_diskfile
fits_open_table
fits_open_image
fits_open_data
fits_close_file
fits_delete_file
fits_file_name
fits_file_mode
```
## HDU Routines
The functions described in this section change the current
HDU and to find their number and type. The following is a short
example which shows how to use them:
```julia
num = fits_get_num_hdus(f)
println("Number of HDUs in the file: ", num)
for i = 1:num
hdu_type = fits_movabs_hdu(f, i)
println(i, ") hdu_type = ", hdu_type)
end
```
```@docs
fits_get_num_hdus
fits_movabs_hdu
fits_movrel_hdu
fits_movnam_hdu
fits_delete_hdu
```
## Header Keyword Routines
```@docs
fits_get_hdrspace
fits_read_keyword
fits_read_record
fits_read_keyn
fits_write_key
fits_write_record
fits_delete_record
fits_delete_key
fits_hdr2str
```
## Image HDU Routines
```@docs
fits_get_img_size
fits_create_img
fits_insert_img
fits_write_pix
fits_write_pixnull
fits_write_subset
fits_read_pix
fits_read_pixnull
fits_read_subset
fits_copy_image_section
fits_write_null_img
fits_resize_img
```
## Table Routines
There are two functions to create a new HDU table extension:
`fits_create_ascii_table` and `fits_create_binary_table`. In general,
one should pick the second as binary tables require less space on the
disk and are more efficient to read and write. (Moreover, a few
datatypes are not supported in ASCII tables). In order to create a
table, the programmer must specify the characteristics of each column
by passing an array of tuples. Here is an example:
```julia
f = fits_create_file("!new.fits")
coldefs = [("SPEED", "1D", "m/s"),
("MASS", "1E", "kg"),
("PARTICLE", "20A", "Name")]
fits_create_binary_tbl(f, 10, coldefs, "PARTICLE")
```
This example creates a table with room for 10 entries, each of them
describing the characteristics of a particle: its speed, its mass, and
its name (codified as a 20-character string). See the documentation of
`fits_create_ascii_tbl` for more details.
```@docs
fits_create_ascii_tbl
fits_create_binary_tbl
fits_get_coltype
fits_insert_rows
fits_delete_rows
fits_read_col
fits_write_col
```
## Miscellaneous
```@docs
libcfitsio_version
```
| CFITSIO | https://github.com/JuliaAstro/CFITSIO.jl.git |
|
[
"MIT"
] | 0.1.0 | 861e207072e8717757e97af5c3a4af747b0c76ea | code | 931 | using Documenter
using ClearSky
DocMeta.setdocmeta!(ClearSky, :DocTestSetup, :(using ClearSky); recursive=true)
makedocs(;
modules=[ClearSky],
authors="Mark Baum",
repo="https://github.com/wordsworthgroup/ClearSky.jl/blob/{commit}{path}#{line}",
sitename="ClearSky.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://wordsworthgroup.github.io/ClearSky.jl",
assets=String[],
),
pages=[
"Home" => "index.md",
"Absorption Data" => "absorption_data.md",
"Line Shapes" => "line_shapes.md",
"Gas Objects" => "gas_objects.md",
"Atmospheric Profiles" => "atmospheric_profiles.md",
"Modeling" => "modeling.md",
"Orbits & Insolation" => "orbits_insolation.md"
],
)
deploydocs(;
repo="github.com/wordsworthgroup/ClearSky.jl",
devbranch="main",
versions=["stable"=>"v^"]
)
| ClearSky | https://github.com/markmbaum/ClearSky.jl.git |
|
[
"MIT"
] | 0.1.0 | 861e207072e8717757e97af5c3a4af747b0c76ea | code | 525 | module ClearSky
using Base: tail
using Base.Threads: @threads
using QuadGK: gauss
using Cubature
using BasicInterpolators
using ScalarRadau
#order matters
include("constants.jl")
include("util.jl")
include("molparam.jl")
include("par.jl")
include("faddeyeva.jl")
include("line_shapes.jl")
include("gases.jl")
include("collision_induced_absorption.jl")
include("atmospherics.jl")
include("radiation.jl")
include("modeling_core.jl")
include("modeling.jl")
include("orbits.jl")
include("insolation.jl")
using .Faddeyeva
end
| ClearSky | https://github.com/markmbaum/ClearSky.jl.git |
|
[
"MIT"
] | 0.1.0 | 861e207072e8717757e97af5c3a4af747b0c76ea | code | 15281 | #minimum pressure in temperature profiles and floor for hydrostatic profile
const PMIN = 1e-9
#-------------------------------------------------------------------------------
#constructing generalized hydrostatic pressure profiles and inverting for z
export scaleheight, hydrostatic, altitude
export Hydrostatic
"""
scaleheight(g, ΞΌ, T)
Evaluate the [atmospheric scale height](https://en.wikipedia.org/wiki/Scale_height),
``\\frac{RT}{ΞΌg}``
where ``R`` is the [universial gas constant](https://en.wikipedia.org/wiki/Gas_constant).
# Arguments
* `g`: gravitational acceleration [m/s``^s``]
* `ΞΌ`: mean molar mass [kg/mole]
* `T`: temperature [K]
"""
scaleheight(g, ΞΌ, T)::Float64 = π*T/(ΞΌ*g)
#parameterized hydrostatic relation in log coordinates
function dlnPdz(z, lnP, param::Tuple)::Float64
#unpack parameters
Pβ, g, fT, fΞΌ = param
#evaluate temperature and mean molar mass [kg/mole]
P = exp(lnP)
if P < PMIN
return 0.0
end
P = min(P, Pβ) #don't allow tiny numerical dips below Pβ
T = fT(P)
ΞΌ = fΞΌ(T, P)
#evaluate derivative
-ΞΌ*g/(π*T)
end
"""
hydrostatic(z, Pβ, g, fT, fΞΌ)
Compute the hydrostatic pressure [Pa] at a specific altitude using arbitrary atmospheric profiles of temperature and mean molar mass
# Arguments
* `z`: altitude [m] to compute pressure at
* `Pβ`: surface pressure [Pa]
* `g`: gravitational acceleration [m/s``^2``]
* `fT`: temperature [K] as a function of pressure, `fT(P)`
* `fΞΌ`: mean molar mass [kg/mole] as a function of pressure and temperature `fΞΌ(T,P)`
"""
function hydrostatic(z, Pβ, g, fT::T, fΞΌ::U)::Float64 where {T,U}
@assert z >= 0 "cannot compute pressure at negative altitude $z m"
@assert Pβ > PMIN "pressure cannot be less than $PMIN Pa"
#parameters
param = (Pβ, g, fT, fΞΌ)
#integrate in log coordinates
lnP = radau(dlnPdz, log(Pβ), 0, z, param)
#convert
exp(lnP)
end
"""
altitude(P, Pβ, g, fT, fΞΌ)
Compute the altitude [m] at which a specific hydrostatic pressure occurs using arbitrary atmospheric profiles of temperature and mean molar mass
# Arguments
* `P`: pressure [Pa] to compute altitude at
* `Pβ`: surface pressure [Pa]
* `g`: gravitational acceleration [m/s``^2``]
* `fT`: temperature [K] as a function of pressure, `fT(P)`
* `fΞΌ`: mean molar mass [kg/mole] as a function of pressure and temperature `fΞΌ(T,P)`
"""
function altitude(P, Pβ, g, fT::T, fΞΌ::U)::Float64 where {T,U}
#pressure decreases monotonically, find altitudes bracketing Pβ
zβ = 0.0
zβ = 1e2
Pβ = Pβ
Pβ = hydrostatic(zβ, Pβ, g, fT, fΞΌ)
while Pβ > P
zβ = zβ
zβ *= 2
Pβ = Pβ
Pβ = hydrostatic(zβ, Pβ, g, fT, fΞΌ)
end
#find precise altitude where P = Pβ
falseposition((z,p) -> log(hydrostatic(z, Pβ, g, fT, fΞΌ)) - log(P), zβ, zβ)
end
"""
[Function-like type](https://docs.julialang.org/en/v1/manual/methods/#Function-like-objects) for initializing and evaluating a hydrostatic pressure profile with arbitrary temperature and mean molar mass profiles. A `Hydrostatic` object maps altitude to pressure.
# Constructor
Hydrostatic(Pβ, Pβ, g, fT, fΞΌ, N=1000)
* `Pβ`: surface pressure [Pa]
* `Pβ`: top of profile pressure [Pa]
* `g`: gravitational acceleration [m/s``^2``]
* `fT`: temperature [K] as a function of presssure, `fT(P)`
* `fΞΌ`: mean molar mass [kg/mole] as a function of temperature and pressure, `fΞΌ(T,P)`
* `N`: optional, number of interpolation nodes
For a constant molar mass or temperature, you can use [anonymous functions](https://docs.julialang.org/en/v1/manual/functions/#man-anonymous-functions) directly. For example, to construct a hydrostatic pressure profile for a crude Earth-like atmosphere:
```@example
#moist adiabatic temperature profile
M = MoistAdiabat(288, 1e5, 1040, 1996, 0.029, 0.018, 2.3e6, psatH2O, Ptropo=1e4);
#hydrostatic pressure profile with constant mean molar mass
H = Hydrostatic(1e5, 1, 9.8, M, (T,P)->0.029);
#evaluate pressures at a few different altitudes
H.([0, 1e3, 1e4])
```
"""
struct Hydrostatic
Ο::LinearInterpolator{Float64,WeakBoundaries}
zβ::Float64
end
function Hydrostatic(Pβ, Pβ, g, fT::T, fΞΌ::U, N::Int=1000) where {T,U}
#find the altitude corresponding to Pβ
zβ = altitude(Pβ, Pβ, g, fT, fΞΌ)
#interpolation knots and output array
z = logrange(0, zβ, N)
lnP = zeros(Float64, N)
#integrate to get a full pressure profile
radau!(lnP, z, dlnPdz, log(Pβ), 0, zβ, (Pβ, g, fT, fΞΌ))
#construct and return
Hydrostatic(LinearInterpolator(z, lnP, WeakBoundaries()), zβ)
end
(H::Hydrostatic)(z)::Float64 = exp(H.Ο(z))
"""
altitude(H::Hydrostatic, P)
Compute the altitude at which a specific pressure occurs in a [`Hydrostatic`](@ref) pressure profile.
"""
function altitude(H::Hydrostatic, P::Real)::Float64
falseposition((z,p) -> log(H(z)) - log(P), 0.0, H.zβ)
end
#-------------------------------------------------------------------------------
#general function for adiabat with one condensable in bulk non-condensable
function dTdΟ(Ο, T, cββ, cβα΅₯, Rβ, Rα΅₯, L, psat::F)::Float64 where {F}
#molar mixing ratio of condensible
Ξ± = psat(T)/Ο2P(Ο)
#whole expression at once
-T*(Rβ/cββ)*(1 + Ξ±*L/(Rβ*T))/(1 + Ξ±*(cβα΅₯/cββ + (L/(T*Rα΅₯) - 1)*L/(cββ*T)))
end
dTdΟ(Ο, T, param::Tuple)::Float64 = dTdΟ(Ο, T, param...)
#------------------------------------
abstract type AbstractAdiabat end
export MoistAdiabat, DryAdiabat
export tropopause
function checkadiabat(Tβ, Pβ, Pβ, Tstrat, Ptropo)
@assert Pβ > Pβ "Pβ must be greater than Pβ"
@assert Pβ > 0 "Pβ must be greater than 0"
if Tstrat > 0
@assert Tstrat < Tβ "Tstrat cannot be greater than Tβ"
end
if (Tstrat != 0) & (Ptropo != 0)
throw("Cannot have nonzero Tstrat and Ptropo, must use one or the other")
end
end
#------------------------------------
"""
[Function-like type](https://docs.julialang.org/en/v1/manual/methods/#Function-like-objects) for initializing and evaluating a dry adiabatic temperature profile. Optional uniform upper atmospheric temperature below a specified temperature or pressure.
# Constructor
DryAdiabat(Tβ, Pβ, cβ, ΞΌ, Pβ=$PMIN; Tstrat=0.0, Ptropo=0.0)
* `Tβ`: surface temperature [K]
* `Pβ`: surface pressure [K]
* `Pβ`: highest allowable pressure (can be small but not zero) [Pa]
* `cβ`: specific heat of the atmosphere [J/kg/K]
* `ΞΌ`: molar mass of the atmosphere [kg/mole]
* `Pβ`: highest pressure in the temperature profile (should generally be small to avoid evaluating out of range) [Pa]
If `Tstrat` is greater than zero, the temperature profile will not drop below that temperature. If `Ptropo` is greater than zero, the temperature profile at pressures lower than `Ptropo` will be equal to the temperature at exactly `Ptropo`. `Tstrat` and `Ptropo` cannot be greater than zero simultaneously.
# Example
Once constructed, use a `DryAdiabat` like a function to compute temperature at a given pressure.
```@example
Tβ = 288; #surface temperature [K]
Pβ = 1e5; #surface pressure [Pa]
cβ = 1040; #specific heat of air [J/kg/K]
ΞΌ = 0.029; #molar mass of air [kg/mole]
#construct the dry adiabat with an upper atmosphere temperature of 190 K
D = DryAdiabat(Tβ, Pβ, cβ, ΞΌ, Tstrat=190);
#temperatures at 40-10 kPa
D.([4e4, 3e4, 2e4, 1e4])
```
"""
struct DryAdiabat <: AbstractAdiabat
Tβ::Float64
Pβ::Float64
Pβ::Float64
cβ::Float64
ΞΌ::Float64
Tstrat::Float64
Ptropo::Float64
end
function DryAdiabat(Tβ, Pβ, cβ, ΞΌ, Pβ=PMIN; Tstrat=0, Ptropo=0)
checkadiabat(Tβ, Pβ, Pβ, Tstrat, Ptropo)
DryAdiabat(Tβ, Pβ, Pβ, cβ, ΞΌ, Tstrat, Ptropo)
end
#------------------------------------
"""
[Function-like type](https://docs.julialang.org/en/v1/manual/methods/#Function-like-objects) for initializing and evaluating a moist adiabatic temperature profile. Optional uniform upper atmospheric temperature below a specified temperature or pressure.
# Constructor
MoistAdiabat(Tβ, Pβ, cββ, cβα΅₯, ΞΌβ, ΞΌα΅₯, L, psat, Pβ=$PMIN; Tstrat=0, Ptropo=0, N=1000)
* `Tβ`: surface temperature [K]
* `Pβ`: surface pressure [K]
* `cββ`: specific heat of the non-condensible atmospheric component (air) [J/kg/K]
* `cβα΅₯`: specific heat of the condensible atmospheric component [J/kg/K]
* `ΞΌβ`: molar mass of the non-condensible atmospheric component (air) [kg/mole]
* `ΞΌα΅₯`: molar mass of the condensible atmospheric component [kg/mole]
* `L`: condsible component's latent heat of vaporization [J/kg]
* `psat`: function defining the saturation vapor pressure for a given temperature, `psat(T)`
* `Pβ`: highest pressure in the temperature profile (should generally be small to avoid evaluating pressures out of range) [Pa]
If `Tstrat` is greater than zero, the temperature profile will not drop below that temperature. If `Ptropo` is greater than zero, the temperature profile at pressures lower than `Ptropo` will be equal to the temperature at exactly `Ptropo`. `Tstrat` and `Ptropo` cannot be greater than zero simultaneously.
The profile is evaluated along a number of pressure values in the atmosphere set by `N`. Those points are then used to construct a cubic spline interpolator for efficient and accurate temperature calculation. Experience indicates that 1000 points is very accurate and also fast.
# Example
Once constructed, use a `MoistAdiabat` like a function to compute temperature at a given pressure.
```@example
Tβ = 288; #surface temperature [K]
Pβ = 1e5; #surface pressure [Pa]
cββ = 1040; #specific heat of air [J/kg/K]
cβα΅₯ = 1996; #specific heat of H2O [J/kg/K]
ΞΌβ = 0.029; #molar mass of air [kg/mole]
ΞΌα΅₯ = 0.018; #molar mass of H2O [kg/mole]
L = 2.3e6; #H2O latent heat of vaporization [J/kg]
#a saturation vapor pressure function for H2O is built in
psat = psatH2O;
#construct the moist adiabat with a tropopause pressure of 1e4 Pa
M = MoistAdiabat(Tβ, Pβ, cββ, cβα΅₯, ΞΌβ, ΞΌα΅₯, L, psat, Ptropo=1e4);
#temperatures at 30-5 kPa
M.([3e4, 2e4, 1e4, 5e3])
```
"""
struct MoistAdiabat <: AbstractAdiabat
Ο::LinearInterpolator{Float64,WeakBoundaries}
Pβ::Float64
Pβ::Float64
Tstrat::Float64
Ptropo::Float64
end
function MoistAdiabat(Tβ, Pβ, cββ, cβα΅₯, ΞΌβ, ΞΌα΅₯, L, psat::F,
Pβ=PMIN;
Tstrat=0.0,
Ptropo=0.0,
N::Int=1000) where {F<:Function}
checkadiabat(Tβ, Pβ, Pβ, Tstrat, Ptropo)
#interpolation knots and output vector
Οβ, Οβ = P2Ο(Pβ, Pβ)
Ο = logrange(Οβ, Οβ, N)
T = zeros(Float64, N)
#pack the parameters
param = (cββ, cβα΅₯, π/ΞΌβ, π/ΞΌα΅₯, L, psat)
#integrate with in-place dense output
radau!(T, Ο, dTdΟ, Tβ, Ο[1], Ο[end], param)
#natural spline in log pressure coordinates
itp = LinearInterpolator(Ο, T, WeakBoundaries())
MoistAdiabat(itp, Pβ, Pβ, Tstrat, Ptropo)
end
#------------------------------------
#general operations with an adiabat
#direct calculation
temperature(Ξ::DryAdiabat, P)::Float64 = Ξ.Tβ*(P/Ξ.Pβ)^(π/(Ξ.ΞΌ*Ξ.cβ))
#coordinate conversion and interpolation
temperature(Ξ::MoistAdiabat, P)::Float64 = Ξ.Ο(P2Ο(P))
#find the pressure corresponding to a temperature (ignores Tstrat, Ptropo)
function pressure(Ξ::AbstractAdiabat, T)::Float64
Tβ = temperature(Ξ, Ξ.Pβ)
Tβ = temperature(Ξ, Ξ.Pβ)
@assert Tβ >= T >= Tβ "temperature $T K out of adiabat range [$(Tβ),$(Tβ)] K"
falseposition((P,p) -> temperature(Ξ, P) - T, Ξ.Pβ, Ξ.Pβ)
end
function (Ξ::AbstractAdiabat)(P)::Float64
#check bounds
if (P < Ξ.Pβ) && !(P β Ξ.Pβ)
throw("Adiabat defined within $(Ξ.Pβ) and $(Ξ.Pβ) Pa, $P Pa is too low.")
end
if (P > Ξ.Pβ) && !(P β Ξ.Pβ)
throw("Adiabat defined within $(Ξ.Pβ) and $(Ξ.Pβ) Pa, $P Pa is too high.")
end
#check if pressure is below tropopause
if P < Ξ.Ptropo
return temperature(Ξ, Ξ.Ptropo)
end
#what the temperature would be without any floor
T = temperature(Ξ, P)
#apply the floor, if desired
if T < Ξ.Tstrat
return Ξ.Tstrat
end
#ensure positive
@assert T > 0 "non-positive temperature ($T K) encountered in adiabat at $P Pa"
return T
end
"""
tropopause(Ξ::AbstractAdiabat)
Compute the temperature [K] and pressure [Pa] at which the tropopause occurs in an adiabatic temperature profile. This function can be called on a `DryAdiabat` or a `MoistAdiabat` if it was constructed with nonzero `Tstrat` or `Ptropo`. Returns a tuple, `(T,P)`.
"""
function tropopause(Ξ::AbstractAdiabat)::Tuple{Float64,Float64}
if Ξ.Ptropo != 0
return temperature(Ξ, Ξ.Ptropo), Ξ.Ptropo
end
if Ξ.Tstrat != 0
return Ξ.Tstrat, pressure(Ξ, Ξ.Tstrat)
end
error("no stratosphere temperature or pressure has been defined (Tstrat/Ptropo)")
end
#-------------------------------------------------------------------------------
export psatH2O, tsatCO2, ozonelayer
"""
psatH2O(T)
Compute the saturation partial pressure of water vapor at a certain temperature using expressions from
* [Murphy, D. M. & Koop, T. Review of the vapour pressures of ice and supercooled water for atmospheric applications. Q. J. R. Meteorol. Soc. 131, 1539β1565 (2005).](https://rmets.onlinelibrary.wiley.com/doi/10.1256/qj.04.94)
"""
function psatH2O(T)::Float64
a = log(T)
b = 1/T
if T >= 273.15
#equation 10
c = 53.878 - 1331.22*b - 9.44523*a + 0.014025*T
d = c*tanh(0.0415*(T - 218.8))
P = exp(54.842763 - 6763.22*b - 4.21*a + 3.67e-4*T + d)
else
#equation 7
P = exp(9.550426 - 5723.265*b + 3.53068*a - 0.00728332*T)
end
return P
end
"""
tsatCO2(P)
Compute the saturation pressure of carbon dioxide at a certain pressure using an expression from Fanale et al. (1982)
"""
function tsatCO2(P)::Float64
@assert P <= 518000.0 "Pressure cannot be above 518000 Pa for CO2 saturation temperature"
-3167.8/(log(0.01*P) - 23.23)
end
"""
ozonelayer(P, Cmax=8e-6)
Approximate the molar concentration of ozone in Earth's ozone layer using an 8 ppm peak at 1600 Pa which falls to zero at 100 Pa and 25500 Pa. Peak concentration is defined by `Cmax`.
"""
function ozonelayer(P, Cmax::Float64=8e-6)::Float64
P = log(P)
Pβ = 10.146433731146518 #ln(25500)
Pβ = 7.3777589082278725 #ln(1600)
Pβ = 4.605170185988092 #ln(100)
if Pβ <= P <= Pβ
return Cmax*(Pβ - P)/(Pβ - Pβ)
elseif Pβ <= P <= Pβ
return Cmax*(P - Pβ)/(Pβ - Pβ)
end
return 0
end
#-------------------------------------------------------------------------------
# function for uniform condensible concentration in stratosphere
export adiabatconcentration
function adiabatconcentration(Ξ::AbstractAdiabat, psat::F)::Function where {F<:Function}
#insist on an isothermal stratosphere
@assert ((Ξ.Ptropo != 0) | (Ξ.Tstrat != 0)) "adiabat must have isothermal stratosphere"
#compute tropopause pressure and temperature
Tβ, Pβ = tropopause(Ξ)
#compute saturation partial pressure at tropopause
Psatβ = psat(Tβ)
#create concentration function
let (Pβ, Psatβ) = (Pβ, Psatβ)
function(T, P)
if P >= Pβ
Pβ = psat(T)
C = Pβ/(Pβ + P)
else
C = Psatβ/(Pβ + Psatβ)
end
return C
end
end
end | ClearSky | https://github.com/markmbaum/ClearSky.jl.git |
|
[
"MIT"
] | 0.1.0 | 861e207072e8717757e97af5c3a4af747b0c76ea | code | 17150 | #-------------------------------------------------------------------------------
# reading and constructing CIA
export readcia, CIATables
function nextspace(s::String, i::Int, L::Int)::Int
while (i <= L) && (s[i] != ' ')
i += 1
end
return i
end
function nextnonspace(s::String, i::Int, L::Int)::Int
while (i <= L) && (s[i] == ' ')
i += 1
end
return i
end
"""
readcia(filename)
Read a collision induced absorption data file. These files are available from [HITRAN](https://hitran.org/cia/) and desribed by [this readme](https://hitran.org/data/CIA/CIA_Readme.pdf). A vector of dictionaries is returned. Each dictionary represents absorption coefficients for a single temperature over a range of wavenumbers. Each dictionary contains:
| Key | Type | Description |
| --- | :--- | :---------- |
| `symbol` | `String` | chemical symbol |
| `Ξ½min` | `Float64` | minimum wavenumber of absorption range [cm``^{-1}``] |
| `Ξ½max` | `Float64` | maximum wavenumber of absorption range [cm``^{-1}``] |
| `npts` | `Int64` | number of points
| `T` | `Float64` | temperature for absorption data [K] |
| `Ξ½` | `Vector{Float64}` | wavenumber samples [cm``^{-1}``]
| `k` | `Vector{Float64}` | absorption coefficients [cm``^5``/molecule``^2``]
| `maxcia` | `Float64` | maximum absorption coefficient [cm``^5``/molecule``^2``]
| `res` | `Float64` | ? |
| `comments` | `String` | miscelleneous comments |
| `reference` | `Int64` | indices of data references |
"""
function readcia(filename::String)
@assert filename[end-3:end] == ".cia" "expected file with .cia extension downloaded from https://hitran.org/cia/"
lines = readlines(filename)
#lengths of all lines
L = length.(lines)
@assert maximum(L) == 100 "unexpected maximum line length in cia file, expected 100 but got $(maximum(L))"
#find locations of header lines
hidx = findall(len->len == 100, L)
#allocate a big array of dictionaries for each range of data
cia = Vector{Dict{String,Any}}(undef, length(hidx))
#parse the tables
push!(hidx, length(lines) + 1)
for i = 1:length(hidx) - 1
#start the dictionary for this table
cia[i] = Dict{String,Any}()
#line indices for the ith table
ia = hidx[i]
ib = hidx[i+1]
#parse the header values
line = lines[ia]
cia[i]["symbol"] = strip(line[1:20])
cia[i]["Ξ½min"] = parse(Float64, line[21:30])
cia[i]["Ξ½max"] = parse(Float64, line[31:40])
cia[i]["npts"] = parse(Int64, line[41:47])
cia[i]["T"] = parse(Float64, line[48:54])
cia[i]["maxcia"] = parse(Float64, line[55:64])
cia[i]["res"] = parse(Float64, line[65:70])
cia[i]["comments"] = strip(line[71:97])
cia[i]["reference"] = parse(Int64, line[98:100])
#read the data columns
table = lines[ia+1:ib-1]
L = length(table)
Ξ½ = zeros(Float64, L)
k = zeros(Float64, L)
for j = 1:L
#string representing row of table
line = table[j]
N = length(line)
#index of first non-space character
na = nextnonspace(line, 1, N)
#next space
nb = nextspace(line, na, N)
#and the next non-space
nc = nextnonspace(line, nb, N)
#finally, the next space or line end
nd = nextspace(line, nc, N)
#parse the values into arrays
Ξ½[j] = parse(Float64, line[na:nb-1])
k[j] = parse(Float64, line[nc:nd-1])
end
#add to the dictionary
cia[i]["Ξ½"] = Ξ½
cia[i]["k"] = k
end
return cia
end
"""
Organizing type for collision induced absorption data, with data tables loaded into [interpolators](https://wordsworthgroup.github.io/BasicInterpolators.jl/stable/).
| Field | Type | Description |
| ----- | :--- | :---------- |
| `name` | `String` | molecular symbol, i.e. `"CO2-H2"` |
| `formulae` | `Tuple{String,String}` | split molecular formulae, i.e `("CO2", "H2")` |
| `Ξ¦` | `Vector{BilinearInterpolator}` | interpolators for each grid of absorption coefficients |
| `Ο` | `Vector{LinearInterpolator}` | interpolators for isolated ranges of absorption coefficients |
| `T` | `Vector{Float64}` | temperatures [K] for single ranges in `Ο` |
| `extrapolate` | `Bool` | whether to extrapolate using flat boundaries from the coefficient grids in `Ξ¦` |
| `singles` | `Bool` | whether to use the single ranges in `Ο` at all |
The interpolator objects are described in the [`BasicInterpolators.jl`](https://wordsworthgroup.github.io/BasicInterpolators.jl/stable/) documentation.
# Constructors
CIATables(cia::Vector{Dict}; extrapolate=false, singles=false, verbose=true)
Construct a `CIATables` object from a dictionary of coefficient data, which can be read from files using [`readcia`](@ref). Keywords `extrapolate` and `singles` are used to set those fields of the returned object.
CIATables(filename; extrapolate=false, singles=false, verbose=true)
Construct a `CIATables` object directly from file, using [`readcia`](@ref) along the way.
# Examples
A `CIATables` object is [function-like](https://docs.julialang.org/en/v1/manual/methods/#Function-like-objects). To retrieve the absorption coefficient at a given wavenumber [cm``^{-1}``] and temperature [K], use the object like a function.
```julia
co2co2 = CIATables("data/cia/CO2-CO2_2018.cia"); #read data
Ξ½ = 100; #wavenumber [cm^-1]
T = 288; #temperature [K]
k = co2co2(Ξ½, T) #absorption coefficient [cm^5/molecule^2]
```
The object interpolates and sums all data tables that contain `Ξ½` and `T`. If `extrapolate` is `true`, boundary values are included whenever the temperature is out of range. If `singles` is `true`, data ranges for a single temperature are included whenever they contain `Ξ½`.
A `CIATables` can be passed to the [`cia`](@ref) function to compute an absorption cross-section with different temperatures and pressures.
```julia
co2ch4 = CIATables("data/cia/CO2-CH4_2018.cia"); #read data
Ξ½ = 250; #wavenumber [cm^-1]
T = 310 #temperature [K]
Pair = 1e5; #air pressure [Pa]
Pco2 = 40; #CO2 partial pressure [Pa]
Pch4 = 0.1; #CH4 partial pressure [Pa]
Ο = cia(Ξ½, co2ch4, T, Pair, Pco2, Pch4) #absorption cross-section [cm^2/molecule]
```
"""
struct CIATables
#molecular symbol (CO2-CO2, H2-H2, etc.)
name::String
#a Set object of the two gases involved
formulae::Tuple{String,String}
#interpolation structs
Ξ¦::Vector{BilinearInterpolator{NoBoundaries}}
Ο::Vector{LinearInterpolator{NoBoundaries}}
#temperatures for singles in Ο
T::Vector{Float64}
#whether to extrapolate (!)
extrapolate::Bool
#whether to use single-temperature CIATables ranges
singles::Bool
end
function CIATables(cia::Vector{Dict{String,Any}};
extrapolate::Bool=false,
singles::Bool=false,
verbose::Bool=true)
if verbose
println("creating CIATables")
end
#pull out wavenumber ranges and temperatures for each grid
Ξ½min = map(x->x["Ξ½min"], cia)
Ξ½max = map(x->x["Ξ½max"], cia)
T = map(x->x["T"], cia)
#select unique wavenumber ranges and sort 'em
Ξ½ranges = sort(unique(zip(Ξ½min, Ξ½max)), by=x->x[1])
n = length(Ξ½ranges)
#now get the interpolation tables and temperature ranges for each Ξ½range
Ξ¦ = Vector{BilinearInterpolator}()
Ο = Vector{LinearInterpolator}()
Ο = Vector{Float64}()
for i = 1:n
Ξ½min[i], Ξ½max[i] = Ξ½ranges[i]
#pull out the data for this wavenumber range
idx = findall(x->(x["Ξ½min"] β Ξ½min[i]) & (x["Ξ½max"] β Ξ½max[i]), cia)
T = map(x->x["T"], cia[idx])
Ξ½ = map(x->x["Ξ½"], cia[idx])
k = map(x->x["k"], cia[idx])
#if there is only one range, make a linear interpolation
if length(T) == 1
Ξ½, k = Ξ½[1], k[1]
k[k .<= 0.0] .= 0.0
push!(Ο, LinearInterpolator(Ξ½, log.(k), NoBoundaries()))
push!(Ο, T[1])
if verbose
println(" ? single temperature CIA range found at $(T[1]) K, $(minimum(Ξ½)) - $(maximum(Ξ½)) cm^-1")
end
else
#assert that all Ξ½ vectors are identical, then take one of them
for j = 2:length(Ξ½)
@assert sum(Ξ½[1] .- Ξ½[j]) β 0.0 "wavenumber sample within a wavenumber range appear to be different"
end
Ξ½ = Ξ½[1]
#ensure sorted by temperature
idx = sortperm(T)
T, k = T[idx], k[idx]
#put the k values into a 2d array
k = convert.(Float64, hcat(k...))
#replace bizarre negative values with tiny values
k[k .<= 0.0] .= floatmin(Float64)
#construct an interpolator WITH THE LOG OF K for accuracy
push!(Ξ¦, BilinearInterpolator(Ξ½, T, log.(k), NoBoundaries()))
end
end
#make sure symbols are all the same and get the individual gas strings
symbols = unique(map(x->x["symbol"], cia))
@assert length(symbols) == 1
symbol = symbols[1]
formulae = Tuple(map(String, split(symbol, '-')))
#construct
cia = CIATables(symbol, formulae, Ξ¦, Ο, Ο, extrapolate, singles)
#print some info if desired
if verbose
println(" formulae: $(cia.formulae[1]) & $(cia.formulae[2])")
Ξ¦ = cia.Ξ¦
n = length(Ξ¦)
Ο = cia.Ο
m = length(Ο)
println(" $(n+m) absorption region(s)")
for i = 1:n
println(" $i) Ξ½ = $(Ξ¦[i].G.xa) - $(Ξ¦[i].G.xb) cm^-1")
println(" T = $(Ξ¦[i].G.ya) - $(Ξ¦[i].G.yb) K")
end
for i = 1:m
println(" $(i+n)) Ξ½ = $(Ο[i].r.xa) - $(Ο[i].r.xb) cm^-1")
println(" T = $(cia.T[i]) (single)")
end
end
return cia
end
function CIATables(fn::String;
extrapolate::Bool=false,
singles::Bool=false,
verbose::Bool=true)
CIATables(readcia(fn), extrapolate=extrapolate, singles=singles, verbose=verbose)
end
#-------------------------------------------------------------------------------
# interpolating k, the raw CIA values
function (cia::CIATables)(Ξ½, T)::Float64
k = 0.0
#look at each grid
for Ξ¦ β cia.Ξ¦
if Ξ¦.G.xa <= Ξ½ <= Ξ¦.G.xb
#inside wavenumber range
if Ξ¦.G.ya <= T <= Ξ¦.G.yb
#interpolate inside the grid of data
k += exp(Ξ¦(Ξ½, T))
elseif cia.extrapolate
#otherwise extrapolate using flat boundary values, if desired
k += exp(Ξ¦(Ξ½, T > Ξ¦.G.yb ? Ξ¦.G.yb : Ξ¦.G.ya))
end
end
end
#optionally include the weird ranges at a single temperature
if cia.singles
for Ο β cia.Ο
#wavenumber range
if Ο.r.xa <= Ξ½ <= Ο.r.xb
k += exp(Ο(Ξ½))
end
end
end
return k
end
#-------------------------------------------------------------------------------
# computing collision induced absorption cross-sections
export cia, cia!
#the Loschmidt number, but in molecules/cm^3 then squared [molecules^2/cm^6]
const Locmsq = 7.21879268e38
"""
cia(k, T, Pβ, Pβ, Pβ)
Compute a collision induced absorption cross-section
# Arguments
* `k`: absorption coefficient [cm``^5``/molecule``^2``]
* `T`: temperature [K]
* `Pβ`: total air pressure [Pa]
* `Pβ`: partial pressure of first gas [Pa]
* `Pβ`: partial pressure of second gas [Pa]
"""
function cia(k, T, Pβ, Pβ, Pβ)::Float64
#number densities of gases, in amagats
Οβ = (Pβ/π)*(πβ/T)
Οβ = (Pβ/π)*(πβ/T)
#number density of air, in molecules/cm^3
Οβ = 1e-6*Pβ/(π€*T)
#Ο in cm^2/molecule, converting k from cm^5/molecule^2 to cm^-1/amagat^2
(k*Locmsq)*Οβ*Οβ/Οβ
end
"""
cia(Ξ½, x::CIATables, T, Pβ, Pβ, Pβ)
Compute a collision induced absorption cross-section after retrieving the total absorption coefficient from a [`CIATables`](@ref) object
# Arguments
* `Ξ½`: wavenumber [cm``^{-1}``]
* `x`: [`CIATables`](@ref) object
* `T`: temperature [K]
* `Pβ`: total air pressure [Pa]
* `Pβ`: partial pressure of first gas [Pa]
* `Pβ`: partial pressure of second gas [Pa]
"""
function cia(Ξ½::Real, x::CIATables, T, Pβ, Pβ, Pβ)::Float64
#first retrieve the absorption coefficient from the interpolator
k = x(Ξ½, T) #cm^5/molecule^2
#then compute the cross-section
cia(k, T, Pβ, Pβ, Pβ) #cm^2/molecule
end
"""
cia!(Ο::AbstractVector, Ξ½::AbstractVector, x::CIATables, T, Pβ, Pβ, Pβ)
Compute a vector of collision induced absorption cross-sections in-place, retrieving the total absorption coefficient from a [`CIATables`](@ref) object.
# Arguments
* `Ο`: vector to store computed cross-sections
* `Ξ½`: vector of wavenumbers [cm``^{-1}``]
* `x`: [`CIATables`](@ref) object
* `T`: temperature [K]
* `Pβ`: total air pressure [Pa]
* `Pβ`: partial pressure of first gas [Pa]
* `Pβ`: partial pressure of second gas [Pa]
"""
function cia!(Ο::AbstractVector, Ξ½::AbstractVector, x::CIATables, T, Pβ, Pβ, Pβ)
@assert length(Ο) == length(Ξ½)
for i = 1:length(Ο)
Ο[i] += cia(Ξ½[i], x, T, Pβ, Pβ, Pβ)
end
end
"""
cia(Ξ½::AbstractVector, x::CIATables, T, Pβ, Pβ, Pβ)
Compute a vector of collision induced absorption cross-sections, retrieving the total absorption coefficient from a [`CIATables`](@ref) object.
# Arguments
* `Ξ½`: vector of wavenumbers [cm``^{-1}``]
* `x`: [`CIATables`](@ref) object
* `T`: temperature [K]
* `Pβ`: total air pressure [Pa]
* `Pβ`: partial pressure of first gas [Pa]
* `Pβ`: partial pressure of second gas [Pa]
"""
function cia(Ξ½::AbstractVector, x::CIATables, T, Pβ, Pβ, Pβ)::Vector{Float64}
Ο = zeros(Float64, length(Ξ½))
cia!(Ο, Ξ½, x, T, Pβ, Pβ, Pβ)
return Ο
end
"""
cia(Ξ½, x::CIATables, T, Pβ, gβ::AbstractGas, gβ::AbstractGas)
Compute a collision induced absorption cross-section, retrieving the total absorption coefficient from a [`CIATables`](@ref) object and computing partial pressures from gas objects.
# Arguments
* `Ξ½`: vector of wavenumbers [cm``^{-1}``]
* `x`: [`CIATables`](@ref) object
* `T`: temperature [K]
* `Pβ`: total air pressure [Pa]
* `gβ`: gas object representing the first component of the CIA pair
* `gβ`: gas object representing the second component of the CIA pair
"""
function cia(Ξ½::Real, x::CIATables, T, Pβ, gβ::AbstractGas, gβ::AbstractGas)::Float64
Pβ = Pβ*concentration(gβ, T, Pβ)
Pβ = Pβ*concentration(gβ, T, Pβ)
cia(Ξ½, x, T, Pβ, Pβ, Pβ)
end
#-------------------------------------------------------------------------------
# computing cross-sections efficiently with known gas objects
export CIA
"""
Container for a [`CIATables`](@ref) object and the two gasses representing the CIA components. Specializes with the type of each gas for fast retreival of absorption cross-sections from CIA data and partial pressures.
| Field | Type | Description |
| ----- | :--- | :---------- |
| `name` | `String` | molecular symbol, i.e. `"CO2-H2"` |
| `formulae` | `Tuple{String,String}` | split molecular formulae, i.e `("CO2", "H2")` |
| `x` | `CIATables` | collision induced absorption tables |
| `gβ` | `<:AbstractGas` | first component of CIA pair |
| `gβ` | `<:AbstractGas` | second component of CIA pair |
# Constructors
CIA(x::CIATables, gβ::AbstractGas, gβ::AbstractGas)
The name and formulae are taken from `x`.
CIA(x::CIATables, gases::AbstractGas...)
Using the formulae in `x`, the correct pair of gases is automatically selected from a VarArg collection of gases.
# Example
A `CIA` object is [function-like](https://docs.julialang.org/en/v1/manual/methods/#Function-like-objects). Use it like a function, passing it wavenumber, temperature, and pressure arguments to compute an absorption cross-section. Underneath, the [`CIATables`](@ref) object is interpolated and partial pressures are computed using the concentrations stored with the gases.
```julia
#load gases
Ξ½ = LinRange(1, 2500, 2500);
Ξ© = AtmosphericDomain();
co2 = BulkGas("data/par/CO2.par", 0.96, Ξ½, Ξ©);
ch4 = MinorGas("data/par/CH4.par", 1e-6, Ξ½, Ξ©);
#create CIA object
co2ch4 = CIA(CIATables("data/cia/CO2-CH4_2018.cia"), co2, ch4);
#compute a cross-section
Ξ½ = 667;
T = 250;
P = 1e5;
Ο = co2ch4(Ξ½, T, P)
```
"""
struct CIA{T,U}
name::String
formulae::Tuple{String,String}
x::CIATables
gβ::T
gβ::U
end
function CIA(x::CIATables, gβ::AbstractGas, gβ::AbstractGas)
CIA(x.name, x.formulae, x, gβ, gβ)
end
function findgas(f::String, cianame::String, gases::AbstractGas...)
idx = findall(g -> g.formula == f, gases)
@assert length(idx) > 0 "pairing failed for $cianame CIA, gas $f is missing"
@assert length(idx) == 1 "pairing failed for $cianame CIA, duplicate $f gases found"
return gases[idx[1]]
end
function CIA(x::CIATables, gases::AbstractGas...)
#gas formulae
fβ, fβ = x.formulae
#find matching gases
gβ, gβ = findgas(fβ, x.name, gases...), findgas(fβ, x.name, gases...)
#make a CIA object
CIA(x, gβ, gβ)
end
(X::CIA)(Ξ½, T, P)::Float64 = cia(Ξ½, X.x, T, P, X.gβ, X.gβ)
| ClearSky | https://github.com/markmbaum/ClearSky.jl.git |
|
[
"MIT"
] | 0.1.0 | 861e207072e8717757e97af5c3a4af747b0c76ea | code | 665 | #speed of light [m/s]
const π = 299792458.0
#Planck constant [J*s]
const π‘ = 6.62607015e-34
#Boltzmann constant [J/Kelvin]
const π€ = 1.38064852e-23
#Stefan-Boltzmann constant [W/m^2*Kelvin^4]
const π = 5.67037442e-8
#gas constant [J/K*mole], equivalent to kB*Av
const π = 8.31446262
#Pascals in 1 atm
const π = 101325.0
#avogadros number [molecules/mole]
const ππ = 6.02214076e23
#Dalton [kg] (mass of particle basically, also β‘ 1/ππ/1000)
const ππ = 1.66053907e-27
#gravitational constant [m^3/kg/s^2]
const π = 6.6743e-11
#reference temperature of HITRAN database [K]
const πα΅£ = 296.0
#reference temperature [K] equivalent to 0 degrees Celsius
const πβ = 273.15
| ClearSky | https://github.com/markmbaum/ClearSky.jl.git |
|
[
"MIT"
] | 0.1.0 | 861e207072e8717757e97af5c3a4af747b0c76ea | code | 4194 | """
This module implements an efficient approximation for the Faddeyeva function
(sometimes called Faddeeva). Two methods are provided, one that is refined for
evaluation of the real part only and one for the whole complex result. The
approximation is due to Mofreh R. Zaghloul, as descriped in the paper:
* Mofreh R. Zaghloul. 2017. Algorithm 985: Simple, Efficient, and Relatively Accurate Approximation for the Evaluation of the Faddeyeva Function. ACM Trans. Math. Softw. 44, 2, Article 22 (August 2017), 9 pages. https://doi.org/10.1145/3119904
"""
module Faddeyeva
export faddeyeva
const ΞΈ = 1/βΟ
const Ξ±0 = 122.60793
const Ξ±1 = 214.38239
const Ξ±2 = 181.92853
const Ξ±3 = 93.15558
const Ξ±4 = 30.180142
const Ξ±5 = 5.9126262
const Ξ±6 = 1/βΟ
const Ξ²0 = 122.60793
const Ξ²1 = 352.73063
const Ξ²2 = 457.33448
const Ξ²3 = 348.70392
const Ξ²4 = 170.35400
const Ξ²5 = 53.992907
const Ξ²6 = 10.479857
const Ξ³0 = 36183.31
const Ξ³1 = 3321.99
const Ξ³2 = 1540.787
const Ξ³3 = 219.031
const Ξ³4 = 35.7668
const Ξ³5 = 1.320522
const Ξ³6 = 1/βΟ
const Ξ»0 = 32066.6
const Ξ»1 = 24322.84
const Ξ»2 = 9022.228
const Ξ»3 = 2186.181
const Ξ»4 = 364.2191
const Ξ»5 = 61.57037
const Ξ»6 = 1.841439
const s0 = 38000.0
const s1 = 256.0
const s2 = 62.0
const s3 = 30.0
const t3 = 1.0e-13
const s4 = 2.5
const t4 = 5.0e-9
const t5 = 0.072
#region 4: Laplace continued fractions, 4 convergents
function regionIV(z::Complex, x::Real, y::Real)::Complex
zΒ² = z^2
(ΞΈ*(-y + im*x))*(zΒ² - 2.5)/(zΒ²*(zΒ² - 3.0) + 0.75)
end
#region 5: Humlicek's w4 (Region IV), part a
function regionVa(z::Complex, xΒ²::Real)::Complex
zΒ² = z^2
r = Ξ³0 + zΒ²*(Ξ³1 + zΒ²*(Ξ³2 + zΒ²*(Ξ³3 + zΒ²*(Ξ³4 + zΒ²*(Ξ³5 + zΒ²*Ξ³6)))))
t = Ξ»0 + zΒ²*(Ξ»1 + zΒ²*(Ξ»2 + zΒ²*(Ξ»3 + zΒ²*(Ξ»4 + zΒ²*(Ξ»5 + zΒ²*(Ξ»6 + zΒ²))))))
exp(-xΒ²) + (im*z*r/t)
end
#region 5: Humlicek's w4 (Region IV), part b
function regionVb(z::Complex)::Complex
zΒ² = z^2
r = Ξ³0 + zΒ²*(Ξ³1 + zΒ²*(Ξ³2 + zΒ²*(Ξ³3 + zΒ²*(Ξ³4 + zΒ²*(Ξ³5 + zΒ²*Ξ³6)))))
t = Ξ»0 + zΒ²*(Ξ»1 + zΒ²*(Ξ»2 + zΒ²*(Ξ»3 + zΒ²*(Ξ»4 + zΒ²*(Ξ»5 + zΒ²*(Ξ»6 + zΒ²))))))
exp(-zΒ²) + (im*z*r/t)
end
#region 6: Hui's p-6 Approximation
function regionVI(x::Real, y::Real)::Complex
q = y - im*x
r = Ξ±0 + q*(Ξ±1 + q*(Ξ±2 + q*(Ξ±3 + q*(Ξ±4 + q*(Ξ±5 + q*Ξ±6)))))
t = Ξ²0 + q*(Ξ²1 + q*(Ξ²2 + q*(Ξ²3 + q*(Ξ²4 + q*(Ξ²5 + q*(Ξ²6 + q))))))
r/t
end
function faddeyeva(z::Complex)::Complex
x = real(z)
y = imag(z)
xΒ² = x*x
yΒ² = y*y
s = xΒ² + yΒ²
#region 1: Laplace continued fractions, 1 convergent
if s >= s0
return (y + im*x)*ΞΈ/s
end
#region 2: Laplace continued fractions, 2 convergents
if s >= s1
a = y*(0.5 + s)
b = x*(s - 0.5)
d = s^2 + (yΒ² - xΒ²) + 0.25
return (a + im*b)*(ΞΈ/d)
end
#region 3: Laplace continued fractions, 3 convergents
if s >= s2
q = yΒ² - xΒ² + 1.5
r = 4.0*xΒ²*yΒ²
a = y*((q - 0.5)*q + r + xΒ²)
b = x*((q - 0.5)*q + r - yΒ²)
d = s*(q*q + r)
return ΞΈ*(a + im*b)/d
end
#region 4: Laplace continued fractions, 4 convergents
if s >= s3 && yΒ² >= t3
return regionIV(z, x, y)
end
#region 5: Humlicek's w4 (Region IV)
if s > s4 && yΒ² < t4
return regionVa(z, xΒ²)
elseif s > s4 && yΒ² < t5
return regionVb(z)
end
#region 6: Hui's p-6 Approximation
return regionVI(x, y)
end
#real arguments representint z = x + im*y, returning only the real part
function faddeyeva(x::Real, y::Real)::Float64
xΒ² = x*x
yΒ² = y*y
s = xΒ² + yΒ²
#region 1: Laplace continued fractions, 1 convergent
if s >= s0
return y*ΞΈ/s
end
#region 2: Laplace continued fractions, 2 convergents
if s >= s1
return y*(0.5 + s)*(ΞΈ/((s^2 + (yΒ² - xΒ²)) + 0.25))
end
#region 3: Laplace continued fractions, 3 convergents
if s >= s2
q = yΒ² - xΒ² + 1.5
r = 4.0*xΒ²*yΒ²
return ΞΈ*(y*((q - 0.5)*q + r + xΒ²))/(s*(q*q + r))
end
if s >= s3 && yΒ² >= t3
return real(regionIV(x + im*y, x, y))
end
if s > s4 && yΒ² < t4
return real(regionVa(x + im*y, xΒ²))
elseif s > s4 && yΒ² < t5
return real(regionVb(x + im*y))
end
return real(regionVI(x, y))
end
end
| ClearSky | https://github.com/markmbaum/ClearSky.jl.git |
|
[
"MIT"
] | 0.1.0 | 861e207072e8717757e97af5c3a4af747b0c76ea | code | 12922 | #-------------------------------------------------------------------------------
# defining the radiative domain and associated functions
export AtmosphericDomain
"""
Structure defining the temperature and pressure ranges over which absorption cross-sections are generated when constructing gas objects. `AtmosphericDomain` objects store the temperature and pressure coordinates of cross-section interpolation grids. More points lead to higher accuracy interpolation. Generally, about 12 temperature points and 24 pressure points results in maximum error of ~1 % and much smaller average error.
| Field | Type | Description |
| ----- | :--- | :---------- |
| `T` | `Vector{Float64}` | temperature coordinates of grid [K] |
| `Tmin` | `Float64` | lowest temperature value |
| `Tmax` | `Float64` | highest temperature value |
| `nT` | `Int64` | number of temperature coordinates |
| `P` | `Vector{Float64}` | pressure coordinates of grid [Pa] |
| `Pmin` | `Float64` | lowest pressure value |
| `Pmax` | `Float64` | highest pressure value |
| `nP` | `Int64` | number of pressure coordinates |
# Constructors
AtmosphericDomain(Trange, nT, Prange, nP)
Creates a domain with the given temperature/pressure ranges and numbers of points. `Trange` and `Prange` should be tuples of two values. `nT` and `nP` indicate the number of points to use.
AtmosphericDomain()
For convenience, creates a domain with 12 temperature points in `[25, 550]` K and 24 pressure points in `[1,1e6]` Pa.
"""
struct AtmosphericDomain
#temperature samples for interpolator [K]
T::Vector{Float64}
Tmin::Float64
Tmax::Float64
nT::Int64
#pressure samples for interpolator [atm]
P::Vector{Float64}
Pmin::Float64
Pmax::Float64
nP::Int64
end
function AtmosphericDomain(Trange::Tuple{Real,Real}, nT::Int,
Prange::Tuple{Real,Real}, nP::Int)
#check for negatives
@assert all(Trange .> 0) "temperature range must be positive"
@assert all(Prange .> 0) "pressure range must be positive"
#check the Qref/Q range
@assert all(Trange .>= TMIN) "minimum temperature with Qref/Q accuracy is $TMIN K"
@assert all(Trange .<= TMAX) "maximum temperature with Qref/Q accuracy is $TMAX K"
#order
@assert Trange[1] < Trange[2] "Trange[1] ($(Trange[1])) can't be greater than Trange[2] ($(Trange[2]))"
@assert Prange[1] < Prange[2] "Prange[1] ($(Prange[1])) can't be greater than Prange[2] ($(Prange[2]))"
#generate grid points
T = chebygrid(Trange[1], Trange[2], nT)
P = exp.(chebygrid(log(Prange[1]), log(Prange[2]), nP))
#assemble!
AtmosphericDomain(T, Trange[1], Trange[2], nT, P, Prange[1], Prange[2], nP)
end
function AtmosphericDomain()
Trange = (25, 550)
Prange = (1, 1e6)
nT = 12
nP = 24
AtmosphericDomain(Trange, nT, Prange, nP)
end
#-------------------------------------------------------------------------------
#wrapper type for the BichebyshevInterpolators used for cross-sections
export OpacityTable
"""
An `OpacityTable` is a simple object wrapping a [BichebyshevInterpolator](https://wordsworthgroup.github.io/BasicInterpolators.jl/stable/chebyshev/). Inside, the interpolator stores a grid of `log` cross-section values along `log` pressure coordinates and temperature coordinates. An `OpacityTable` behaves like a function, recieving a temperature and pressure. When called, it retrieves a cross-section from the interpolator, undoes the `log`, and returns it. When constructing a gas object, each wavenumber is allocated a unique `OpacityTable` for fast and accurate cross-section evaluation at any temperature and pressure inside the `AtmosphericDomain`. Generally, `OpacityTable` objects should be used indirectly through gas objects.
"""
struct OpacityTable
Ξ¦::BichebyshevInterpolator
empty::Bool
end
function OpacityTable(T::AbstractVector{<:Real},
P::AbstractVector{<:Real},
Ο::AbstractArray{<:Real,2})
if all(Ο .== 0)
#avoid evaluating log(0) and passing -Infs to the interp constructor
Ξ¦ = BichebyshevInterpolator(T, log.(P), fill(0.0, size(Ο)))
empty = true
else
Ξ¦ = BichebyshevInterpolator(T, log.(P), log.(Ο))
empty = false
end
OpacityTable(Ξ¦, empty)
end
#gets cross sections out of interpolators, un-logged, cm^2/molecule
#also explicitly handles empty tables
function (Ξ ::OpacityTable)(T, P)::Float64
Ξ .empty && return 0.0
lnP = log(P)
lnΟ = Ξ .Ξ¦(T, lnP)
return exp(lnΟ)
end
#-------------------------------------------------------------------------------
#function for building gas opacity tables
function bake(sl::SpectralLines,
Cfun::F,
shape!::G,
ΞΞ½cut::Real,
Ξ½::Vector{Float64},
Ξ©::AtmosphericDomain
)::Vector{OpacityTable} where {F, G<:Function}
#check wavenumbers for problems
@assert all(diff(Ξ½) .> 0) "wavenumbers must be unique and in ascending order"
@assert all(Ξ½ .>= 0) "wavenumbers must be positive"
#number of wavenumbers
nΞ½ = length(Ξ½)
#create a single block of cross-sections
Ο = zeros(nΞ½, Ξ©.nT, Ξ©.nP)
#fill it by evaluating in batches of wavenumbers (slow part)
@threads for i = 1:Ξ©.nT # @distributed??
for j = 1:Ξ©.nP
#get a view into the big Ο array
Οα΅’β±Ό = view(Ο,:,i,j)
#get temperature, pressure, concentration
T = Ξ©.T[i]
P = Ξ©.P[j]
C = Cfun(T, P)
#make sure concentration isn't wacky
@assert 0 <= C <= 1 "gas molar concentrations must be in [0,1], not $C (encountered @ $T K, $P Pa)"
#evaluate line shapes (slow part)
shape!(Οα΅’β±Ό, Ξ½, sl, T, P, C*P, ΞΞ½cut)
end
end
#check for weirdness
z = zeros(Bool, nΞ½)
for i = 1:nΞ½
Οα΅₯ = view(Ο,i,:,:)
if (minimum(Οα΅₯) == 0) & (maximum(Οα΅₯) > 0)
z[i] = true
end
end
if any(z)
@info "Zero cross-section values are mixed with non-zero values for the following wavenumbers for $(sl.name):\n\n$(Ξ½[z])\n\n Likely, absorption is extremely weak in these regions, causing underflow. Absorption is being set to zero for all temperatures and pressures at those wavenumbers to avoid non-smooth and inaccurate interpolation tables."
Ο[z,:,:] .= 0.0
end
#split the block and create interpolators for each Ξ½
Ξ = Vector{OpacityTable}(undef, nΞ½)
@threads for i = 1:nΞ½
Ξ [i] = OpacityTable(Ξ©.T, Ξ©.P, Ο[i,:,:])
end
#fresh out the oven
return Ξ
end
#-------------------------------------------------------------------------------
#for testing opacity table errors
export opacityerror
function opacityerror(Ξ ::OpacityTable,
Ξ©::AtmosphericDomain,
sl::SpectralLines,
Ξ½::Real,
C::F, #C(T,P)
shape::G=voigt,
N::Int=50) where {F,G}
#create T and P grids from the domain
T = LinRange(Ξ©.Tmin, Ξ©.Tmax, N)
P = 10 .^ LinRange(log10(Ξ©.Pmin), log10(Ξ©.Pmax), N)
#compute exact and approximate cross-sections
Οop = zeros(N,N)
Οex = zeros(N,N)
@threads for i = 1:N
for j = 1:N
Οop[i,j] = Ξ (T[i], P[j])
Οex[i,j] = shape(Ξ½, sl, T[i], P[j], C(T[i],P[j])*P[j])
end
end
#compute error and relative error
aerr = Οop .- Οex
rerr = aerr./Οex
return T, P, aerr, rerr
end
#-------------------------------------------------------------------------------
#defining absorbers and access to cross-sections
abstract type AbstractGas end
export AbstractGas
export WellMixedGas, VariableGas
export concentration, reconcentrate
#abundance weighted average molar mass
meanmolarmass(sl::SpectralLines) = sum(sl.A .* sl.ΞΌ)/sum(sl.A)
#-------------------------------
"""
Gas type for well mixed atmospheric constituents. Must be constructed from a `.par` file or a [`SpectralLines`](@ref) object.
# Constructors
WellMixedGas(sl::SpectralLines, C, Ξ½, Ξ©, shape!=voigt!)
* `sl`: a [`SpectralLines`](@ref) object
* `C`: molar concentration of the constituent [mole/mole]
* `Ξ½`: vector of wavenumber samples [cm``^{-1}``]
* `Ξ©`: [`AtmosphericDomain`](@ref)
* `shape!`: line shape to use, must be the in-place version ([`voigt!`](@ref), [`lorentz!`](@ref), etc.)
* `ΞΞ½cut`: profile truncation distance [cm``^{-1}``]
WellMixedGas(par::String, C, Ξ½, Ξ©, shape!=voigt!, ΞΞ½cut=25; kwargs...)
Same arguments as the first constructor, but reads a `par` file directly into the gas object. Keyword arguments are passed through to [`readpar`](@ref).
"""
struct WellMixedGas <: AbstractGas
name::String
formula::String
ΞΌ::Float64 #mean molar mass [kg/mole]
C::Float64 #constant fraction [mole/mole] of dry gas constituents
Ξ½::Vector{Float64}
Ξ©::AtmosphericDomain
Ξ ::Vector{OpacityTable} #cross-section interpolators
end
function WellMixedGas(sl::SpectralLines,
C::Real,
Ξ½::AbstractVector{<:Real},
Ξ©::AtmosphericDomain,
shape!::Function=voigt!,
ΞΞ½cut::Real=25)
ΞΌ = meanmolarmass(sl)
Ξ½ = collect(Float64, Ξ½)
ΞΞ½cut = convert(Float64, ΞΞ½cut)
Ξ = bake(sl, (T,P)->C, shape!, ΞΞ½cut, Ξ½, Ξ©)
WellMixedGas(sl.name, sl.formula, ΞΌ, C, Ξ½, Ξ©, Ξ )
end
function WellMixedGas(par, C, Ξ½, Ξ©, shape!::Function=voigt!, ΞΞ½cut=25; kwargs...)
sl = SpectralLines(par; kwargs...)
WellMixedGas(sl, C, Ξ½, Ξ©, shape!, ΞΞ½cut)
end
#-------------------------------
"""
Gas type for variable concentration atmospheric constituents. Must be constructed from a `.par` file or a [`SpectralLines`](@ref) object.
# Constructors
VariableGas(sl::SpectralLines, C, Ξ½, Ξ©, shape!=voigt!)
* `sl`: a [`SpectralLines`](@ref) object
* `C`: molar concentration of the constituent [mole/mole] as a function of temperature and pressure `C(T,P)`
* `Ξ½`: vector of wavenumber samples [cm``^{-1}``]
* `Ξ©`: [`AtmosphericDomain`](@ref)
* `shape!`: line shape to use, must be the in-place version ([`voigt!`](@ref), [`lorentz!`](@ref), etc.)
* `ΞΞ½cut`: profile truncation distance [cm``^{-1}``]
VariableGas(par::String, C, Ξ½, Ξ©, shape!=voigt!, ΞΞ½cut=25; kwargs...)
Same arguments as the first constructor, but reads a `par` file directly into the gas object. Keyword arguments are passed through to [`readpar`](@ref).
"""
struct VariableGas{F} <: AbstractGas
name::String
formula::String
ΞΌ::Float64 #mean molar mass
C::F #concentration [mole/mole] from temperature and pressure, C(T,P)
Ξ½::Vector{Float64}
Ξ©::AtmosphericDomain
Ξ ::Vector{OpacityTable} #cross-section interpolators
end
function VariableGas(sl::SpectralLines,
C::Q,
Ξ½::AbstractVector{<:Real},
Ξ©::AtmosphericDomain,
shape!::Function=voigt!,
ΞΞ½cut::Real=25) where {Q}
ΞΌ = meanmolarmass(sl)
Ξ½ = collect(Float64, Ξ½)
Ξ = bake(sl, C, shape!, ΞΞ½cut, Ξ½, Ξ©)
VariableGas(sl.name, sl.formula, ΞΌ, C, Ξ½, Ξ©, Ξ )
end
function VariableGas(par, C, Ξ½, Ξ©, shape!::Function=voigt!, ΞΞ½cut=25; kwargs...)
sl = SpectralLines(par; kwargs...)
VariableGas(sl, C, Ξ½, Ξ©, shape!, ΞΞ½cut)
end
#-------------------------------
"""
concentration(g::WellMixedGas)
Furnishes the molar concentration [mole/mole] of a [`WellMixedGas`](@ref) object. Identical to `g.C`.
"""
concentration(g::WellMixedGas) = g.C
concentration(g::WellMixedGas, X...)::Float64 = g.C
"""
concentration(g::VariableGas, T, P)
Furnishes the molar concentration [mole/mole] of a [`VariableGas`](@ref) object at a particular temperature and pressure. Identical to `g.C(T,P)`.
"""
concentration(g::VariableGas, T, P)::Float64 = g.C(T,P)
"""
reconcentrate(g::WellMixedGas, C)
Create a copy of a [`WellMixedGas`](@ref) object with a different molar concentration, `C`, in mole/mole.
!!! warning
Only reconcentrate gas objects with very low concentrations. The self-broadening component of the line shape is not recomputed when using the `reconcentrate` function. This component is very small when partial pressure is very low, but may be appreciable for bulk components.
"""
function reconcentrate(g::WellMixedGas, C::Real)::WellMixedGas
@assert 0 <= C <= 1 "gas molar concentrations must be in [0,1], not $C"
Ξ© = deepcopy(g.Ξ©)
Ξ = deepcopy(g.Ξ )
WellMixedGas(g.name[:], g.formula[:], g.ΞΌ, C, g.Ξ½, Ξ©, Ξ )
end
#-------------------------------------------------------------------------------
#single cross-section
(g::AbstractGas)(i::Int, T, P)::Float64 = concentration(g, T, P)*g.Ξ [i](T,P)
#for full vectors of cross-sections with whatever gas
function (g::AbstractGas)(T::Real, P::Real)::Vector{Float64}
[g(i, T, P) for i β eachindex(g.Ξ½)]
end
| ClearSky | https://github.com/markmbaum/ClearSky.jl.git |
|
[
"MIT"
] | 0.1.0 | 861e207072e8717757e97af5c3a4af747b0c76ea | code | 3638 | export substellarlatitude, hourangle
export diurnalfluxfactor, diurnalfluxfactors
export annualfluxfactor, annualfluxfactors
"""
substellarlatitude(f, Ξ³)
Compute the latitude of the substellar point for a given solar longitude `f` (true anomaly) and obliquity `Ξ³`
"""
substellarlatitude(f, Ξ³) = asin(cos(f)*sin(Ξ³))
"""
hourangle(ΞΈ, ΞΈβ)
Compute the [hour angle](https://en.wikipedia.org/wiki/Hour_angle)
"""
function hourangle(ΞΈ, ΞΈβ)
x = -sin(ΞΈ)*sin(ΞΈβ)/(cos(ΞΈ)*cos(ΞΈβ))
if x <= -1
return Ο
elseif x >= 1
return 0.0
end
return acos(x)
end
#ΞΈ - latitude
#ΞΈβ - substellar latitude
"""
diurnalfluxfactor(ΞΈ, ΞΈβ)
Compute the diurnally averaged fraction of incoming stellar flux received by a point at latitude `ΞΈ` when the substellar latitude is `ΞΈβ`
"""
function diurnalfluxfactor(ΞΈ, ΞΈβ)
h = hourangle(ΞΈ, ΞΈβ)
return (sin(h)*cos(ΞΈ)*cos(ΞΈβ) + h*sin(ΞΈ)*sin(ΞΈβ))/Ο
end
#ΞΈ - latitude
#f - solar longitude
#Ξ³ - obliquity
"""
diurnalfluxfactor(ΞΈ, f, Ξ³)
Compute the diurnally averaged fraction of incoming stellar flux received by a point at latitude `ΞΈ` when the planet is at solar longitude (true anomaly) `f`, with obliquity `Ξ³`
"""
diurnalfluxfactor(ΞΈ, f, Ξ³) = diurnalfluxfactor(ΞΈ, substellarlatitude(f, Ξ³))
"""
diurnalfluxfactor(t, a, m, e, ΞΈ, Ξ³, p)
Compute the diurnally averaged fraction of incoming stellar flux received by a point at latitude `ΞΈ` for a general elliptical orbit
"""
function diurnalfluxfactor(t, a, m, e, ΞΈ, Ξ³, p)
f = trueanomaly(t, a, m, e)
r = orbitaldistance(a, f, e)
return diurnalfluxfactor(ΞΈ, f - p, Ξ³)*(a/r)^2
end
"""
diurnalfluxfactors(Ξ³; nf=251, nΞΈ=181)
Compute a grid of diurnally averaged fractions of incoming stellar flux received by a point at latitude `ΞΈ` for a planet with obliquity `Ξ³` in a circular orbit. Returns a solar longitude vector (column values), latitude vector (row values), and the grid of flux factors. `nf` indicates the number of points around the orbit and `nΞΈ` indicates the number of latitudes.
"""
function diurnalfluxfactors(Ξ³; nf::Int=251, nΞΈ::Int=181)
ΞΈ = LinRange(-Ο/2, Ο/2, nΞΈ)
f = LinRange(0, 2Ο, nf)
F, Ξ = meshgrid(f, ΞΈ)
return (f, ΞΈ, diurnalfluxfactor.(Ξ, F, Ξ³))
end
"""
diurnalfluxfactors(a, m, e, Ξ³, p; nt=251, nΞΈ=181)
Compute a grid of diurnally averaged fractions of incoming stellar flux for a planet in a general elliptical orbit. Returns a time vector (column values) over one orbital period, latitude vector (row values), and the grid of flux factors. `nt` indicates the number of time samples around the orbit and `nΞΈ` indicates the number of latitudes.
"""
function diurnalfluxfactors(a, m, e, Ξ³, p; nt::Int=251, nΞΈ::Int=181)
t = LinRange(0, orbitalperiod(a, m), nt)
ΞΈ = LinRange(-Ο/2, Ο/2, nΞΈ)
T, Ξ = meshgrid(t, ΞΈ)
return (t, ΞΈ, diurnalfluxfactor.(T, a, m, e, Ξ, Ξ³, p))
end
"""
annualfluxfactor(e, ΞΈ, Ξ³, p)
Compute the annually averaged flux factor for a latitude `ΞΈ` on a planet in a general elliptical orbit.
"""
function annualfluxfactor(e, ΞΈ, Ξ³, p; tol::Float64=1e-4)
T = orbitalperiod(1.0, 1.0)
f(t) = diurnalfluxfactor(t, 1.0, 1.0, e, ΞΈ, Ξ³, p)
F, _ = hquadrature(f, 0, T, reltol=tol, abstol=tol)
return F/T
end
"""
annualfluxfactors(e, Ξ³, p; nΞΈ=181)
Compute a range of annually averaged flux factors for a planet in a general elliptical orbit. Returns a latitude vector (row values) and a vector of flux factors. `nΞΈ` indicates the number of latitude samples.
"""
function annualfluxfactors(e, Ξ³, p; nΞΈ::Int=181)
ΞΈ = LinRange(-Ο/2, Ο/2, nΞΈ)
F = annualfluxfactor.(e, ΞΈ, Ξ³, p)
return ΞΈ, F
end
| ClearSky | https://github.com/markmbaum/ClearSky.jl.git |
|
[
"MIT"
] | 0.1.0 | 861e207072e8717757e97af5c3a4af747b0c76ea | code | 17641 | #precompute a few numbers
const sqΟ = βΟ
const osqΟln2 = 1/sqrt(Ο/log(2.0))
const sqln2 = sqrt(log(2.0))
const c2 = 100.0*π‘*π/π€
#-------------------------------------------------------------------------------
# wavenumber truncation of line shapes
cutline(Ξ½, Ξ½l, ΞΞ½cut)::Bool = abs(Ξ½ - Ξ½l) > ΞΞ½cut ? true : false
function includedlines(Ξ½::Real,
Ξ½l::AbstractVector{<:Real},
ΞΞ½cut::Real)::Vector{Int64}
findall(x -> !cutline(Ξ½, x, ΞΞ½cut), Ξ½l)
end
function includedlines(Ξ½::AbstractVector{<:Real},
Ξ½l::AbstractVector{<:Real},
ΞΞ½cut::Real)::Vector{Int64}
findall((Ξ½l .> minimum(Ξ½) - ΞΞ½cut) .& (Ξ½l .< maximum(Ξ½) + ΞΞ½cut))
end
#-------------------------------------------------------------------------------
# Chebyshev polynomial fit for Qref/Q
function chebyQrefQ(T::Real, n::Int64, a::Vector{Float64})::Float64
#check the temperature range
@assert TMIN <= T <= TMAX "temperature outside of Qref/Q interpolation range"
#map T to [-1,1]
Ο = 2*(T - TMIN)/(TMAX - TMIN) - 1
#values of first two chebys at Ο
cβ = 1.0
cβ = Ο
#value of expansion after first two terms
y = a[1] + a[2]*cβ
for k = 3:n
#next cheby value
cβ = 2*Ο*cβ - cβ
#contribute to expansion
y += a[k]*cβ
#swap values
cβ = cβ
cβ = cβ
end
#return the inverse, Qref/Q
return 1.0/y
end
#-------------------------------------------------------------------------------
# special strategy for line profiles from sorted vectors of wavenumbers
function surf!(Ο::AbstractVector,
f::F,
Ξ½::AbstractVector,
Ξ½l::AbstractVector,
ΞΞ½cut::Real,
A::Vararg{Vector{Float64},N}) where {F<:Function, N}
@assert all(diff(Ξ½) .> 0) "wavenumber vectors must be sorted in ascending order"
L = length(Ξ½l)
jstart = 1 #tracking index to avoid searching from beginning every time
for i = eachindex(Ξ½)
#find the first line that isn't cut off
j = jstart
while (j <= L) && cutline(Ξ½[i], Ξ½l[j], ΞΞ½cut)
j += 1
end
#only proceed if there is a line to include
if j <= L
#update the starting index for the search
jstart = j
#evaluate line profiles until one gets cut off, then move on
while (j <= L) && !cutline(Ξ½[i], Ξ½l[j], ΞΞ½cut)
#let block is required for good performance
args = let k = j
ntuple(n->A[n][k], N)
end
Ο[i] += f(Ξ½[i], Ξ½l[j], args...)
j += 1
end
end
end
end
#-------------------------------------------------------------------------------
# temperature scaling of line intensity
export scaleintensity
"""
scaleintensity(S, Ξ½l, Epp, M, I, T)
Compute the [temperature scaling for line intensity](https://hitran.org/docs/definitions-and-units/#mjx-eqn-eqn-intensity-temperature-dependence).
# Arguments
* `S`: spectal line intensity at 296 K [cm``^{-1}``/(molecule``\\cdot``cm``^{-2}``)]
* `Ξ½l`: wavenumber of line [cm``^{-1}``]
* `Epp`: lower-state energy of transition [cm``^{-1}``]
* `M`: [HITRAN molecular identification number](https://hitran.org/docs/molec-meta)
* `I`: [HITRAN local isotopologue number](https://hitran.org/docs/iso-meta/)
* `T`: temperature [K]
"""
function scaleintensity(S, Ξ½l, Epp, M::Int16, I::Int16, T)::Float64
#arguments to exp
a = -c2*Epp
b = -c2*Ξ½l
#numerator and denominator
n = exp(a/T)*(1 - exp(b/T))
d = exp(a/πα΅£)*(1 - exp(b/πα΅£))
#check if there is an approximating function
if MOLPARAM[M][10][I]
QrefQ = chebyQrefQ(T, MOLPARAM[M][11][I], MOLPARAM[M][13][I])
else
throw("no interpolating polynomial available to compute Qref/Q for isotopologue $I of $(MOLPARAM[M][3]) ($(MOLPARAM[M][2]))")
#QrefQ = (πα΅£/T)^1.5
end
#shifted line intensity
S*QrefQ*(n/d)
end
function scaleintensity(sl::SpectralLines, i::Vector{Int64}, T)::Vector{Float64}
S = view(sl.S, i)
Ξ½ = view(sl.Ξ½, i)
Epp = view(sl.Epp, i)
I = view(sl.I, i)
scaleintensity.(S, Ξ½, Epp, sl.M, I, T)
end
#-------------------------------------------------------------------------------
# doppler broadening
export Ξ±doppler, fdoppler, doppler, doppler!
"""
Ξ±doppler(Ξ½l, ΞΌ, T)
Compute doppler (gaussian) broadening coefficient from line wavenumber `Ξ½l` [cm``^{-1}``], gas molar mass `ΞΌ` [kg/mole], and temperature `T` [K].
"""
Ξ±doppler(Ξ½l, ΞΌ, T)::Float64 = (Ξ½l/π)*sqrt(2.0*π*T/ΞΌ)
function Ξ±doppler(sl::SpectralLines, i::Vector{Int64}, T)::Vector{Float64}
Ξ±doppler.(view(sl.Ξ½,i), view(sl.ΞΌ,i), T)
end
"""
fdoppler(Ξ½, Ξ½l, Ξ±)
Evaluate doppler (gaussian) profile
# Arguments
* `Ξ½`: profile evaluation wavenumber [cm``^{-1}``]
* `Ξ½l`: wavenumber of absorption line [cm``^{-1}``]
* `Ξ±`: doppler (gaussian) broadening coefficient
"""
fdoppler(Ξ½, Ξ½l, Ξ±)::Float64 = exp(-(Ξ½ - Ξ½l)^2/Ξ±^2)/(Ξ±*sqΟ)
"""
doppler(Ξ½, Ξ½l, S, Ξ±)
Evaluate doppler (gaussian) absoption cross-section [cm``^2``/molecule]
# Arguments
* `Ξ½`: profile evaluation wavenumber [cm``^{-1}``]
* `Ξ½l`: wavenumber of absorption line [cm``^{-1}``]
* `S`: line absoption intensity [cm``^{-1}``/(molecule``\\cdot``cm``^{-2}``)]
* `Ξ±`: doppler (gaussian) broadening coefficient
"""
doppler(Ξ½, Ξ½l, S, Ξ±)::Float64 = S*fdoppler(Ξ½, Ξ½l, Ξ±)
"""
doppler(Ξ½, sl, T, P, Pβ, ΞΞ½cut=25)
Evaluate a single doppler (gaussian) absoption cross-section [cm``^2``/molecule]. Temperature scaling and doppler profiles are evaluated along the way.
# Arguments
* `Ξ½`: wavenumber indicating where to evaluate [cm``^{-1}``]
* `sl`: [`SpectralLines`](@ref)
* `T`: temperature [K]
* `P`: air pressure [Pa]
* `Pβ`: partial pressure [Pa]
* `ΞΞ½cut`: profile truncation distance [cm``^{-1}``]
"""
function doppler(Ξ½, sl::SpectralLines, T, P, Pβ, ΞΞ½cut=25.0)::Float64
i = includedlines(Ξ½, sl.Ξ½, ΞΞ½cut)
S = scaleintensity(sl, i, T)
Ξ± = Ξ±doppler(sl, i, T)
sum(doppler.(Ξ½, view(sl.Ξ½,i), S, Ξ±))
end
"""
doppler!(Ο, Ξ½, sl, T, P, Pβ, ΞΞ½cut=25)
Identical to [`doppler`](@ref), but fills the vector of cross-sections (`Ο`) in-place.
"""
function doppler!(Ο::AbstractVector,
Ξ½::AbstractVector,
sl::SpectralLines,
T,
P,
Pβ,
ΞΞ½cut=25.0)
i = includedlines(Ξ½, sl.Ξ½, ΞΞ½cut)
S = scaleintensity(sl, i, T)
Ξ± = Ξ±doppler(sl, i, T)
surf!(Ο, doppler, Ξ½, view(sl.Ξ½,i), ΞΞ½cut, S, Ξ±)
end
"""
doppler(Ξ½, sl, T, P, Pβ, ΞΞ½cut=25)
Compute a vector of doppler (gaussian) absorption cross-sections [cm``^2``/molecule] from a [`SpectralLines`](@ref) object. Temperature scaling and doppler profiles are evaluated along the way.
# Arguments
* `Ξ½`: vector of wavenumbers indicating where to evaluate [cm``^{-1}``]
* `sl`: [`SpectralLines`](@ref)
* `T`: temperature [K]
* `P`: air pressure [Pa]
* `Pβ`: partial pressure [Pa]
* `ΞΞ½cut`: profile truncation distance [cm``^{-1}``]
"""
function doppler(Ξ½::AbstractVector,
sl::SpectralLines,
T,
P,
Pβ,
ΞΞ½cut=25.0)::Vector{Float64}
Ο = zeros(Float64, length(Ξ½))
doppler!(Ο, Ξ½, sl, T, P, Pβ, ΞΞ½cut)
return Ο
end
#-------------------------------------------------------------------------------
# pressure broadening
export Ξ³lorentz, florentz, lorentz, lorentz!
"""
Ξ³lorentz(Ξ³a, Ξ³s, na, T, P, Pβ)
Compute lorentzian broadening coefficient
# Arguments
* `Ξ³a`: air-broadened half width at half maximum (HWHM) [cm``^{-1}``/atm] at 296 K and 1 atm
* `Ξ³s`: self-broadened half width at half maximum (HWHM) [cm``^{-1}``/atm] at 296 K and 1 atm
* `na`: coefficient of temperature dependence of air-broadened half width
* `T`: temperature [K]
* `P`: air pressure [Pa]
* `Pβ`: partial pressure [Pa]
"""
function Ξ³lorentz(Ξ³a, Ξ³s, na, T, P, Pβ)::Float64
((πα΅£/T)^na)*(Ξ³a*(P - Pβ) + Ξ³s*Pβ)/π
end
function Ξ³lorentz(sl::SpectralLines, i::Vector{Int64}, T, P, Pβ)::Vector{Float64}
Ξ³lorentz.(view(sl.Ξ³a,i), view(sl.Ξ³s,i), view(sl.na,i), T, P, Pβ)
end
"""
florentz(Ξ½, Ξ½l, Ξ³)
Evaluate lorentz profile
# Arguments
* `Ξ½`: profile evaluation wavenumber [cm``^{-1}``]
* `Ξ½l`: wavenumber of absorption line [cm``^{-1}``]
* `Ξ³`: lorentzian broadening coefficient
"""
florentz(Ξ½, Ξ½l, Ξ³)::Float64 = Ξ³/(Ο*((Ξ½ - Ξ½l)*(Ξ½ - Ξ½l) + Ξ³*Ξ³))
"""
lorentz(Ξ½, Ξ½l, S, Ξ³)
Evaluate lorentzian absoption cross-section [cm``^2``/molecule]
# Arguments
* `Ξ½`: profile evaluation wavenumber [cm``^{-1}``]
* `Ξ½l`: wavenumber of absorption line [cm``^{-1}``]
* `S`: line absoption intensity [cm``^{-1}``/(molecule``\\cdot``cm``^{-2}``)]
* `Ξ³`: lorentzian broadening coefficient
"""
lorentz(Ξ½, Ξ½l, S, Ξ³)::Float64 = S*florentz(Ξ½, Ξ½l, Ξ³)
"""
lorentz(Ξ½, sl, T, P, Pβ, ΞΞ½cut=25)
Compute a single lorentzian absorption cross-sections [cm``^2``/molecule] from a [`SpectralLines`](@ref) object. Temperature scaling and lorentzian profiles are evaluated along the way.
# Arguments
* `Ξ½`: single wavenumber indicating where to evaluate [cm``^{-1}``]
* `sl`: [`SpectralLines`](@ref)
* `T`: temperature [K]
* `P`: air pressure [Pa]
* `Pβ`: partial pressure [Pa]
* `ΞΞ½cut`: profile truncation distance [cm``^{-1}``]
"""
function lorentz(Ξ½, sl::SpectralLines, T, P, Pβ, ΞΞ½cut=25.0)::Float64
i = includedlines(Ξ½, sl.Ξ½, ΞΞ½cut)
S = scaleintensity(sl, i, T)
Ξ³ = Ξ³lorentz(sl, i, T, P, Pβ)
sum(lorentz.(Ξ½, view(sl.Ξ½,i), S, Ξ³))
end
"""
lorentz!(Ο, Ξ½, sl, T, P, Pβ, ΞΞ½cut=25)
Identical to [`lorentz`](@ref), fills the vector of cross-sections (`Ο`) in-place.
"""
function lorentz!(Ο::AbstractVector,
Ξ½::AbstractVector,
sl::SpectralLines,
T,
P,
Pβ,
ΞΞ½cut=25.0)
i = includedlines(Ξ½, sl.Ξ½, ΞΞ½cut)
S = scaleintensity(sl, i, T)
Ξ³ = Ξ³lorentz(sl, i, T, P, Pβ)
surf!(Ο, lorentz, Ξ½, view(sl.Ξ½,i), ΞΞ½cut, S, Ξ³)
end
"""
lorentz(Ξ½, sl, T, P, Pβ, ΞΞ½cut=25)
Compute a vector of lorentzian absorption cross-sections [cm``^2``/molecule] from a [`SpectralLines`](@ref) object. Temperature scaling and lorentzian profiles are evaluated along the way.
# Arguments
* `Ξ½`: vector of wavenumbers indicating where to evaluate [cm``^{-1}``]
* `sl`: [`SpectralLines`](@ref)
* `T`: temperature [K]
* `P`: air pressure [Pa]
* `Pβ`: partial pressure [Pa]
* `ΞΞ½cut`: profile truncation distance [cm``^{-1}``]
"""
function lorentz(Ξ½::AbstractVector,
sl::SpectralLines,
T,
P,
Pβ,
ΞΞ½cut=25.0)::Vector{Float64}
Ο = zeros(Float64, length(Ξ½))
lorentz!(Ο, Ξ½, sl, T, P, Pβ, ΞΞ½cut)
return Ο
end
#-------------------------------------------------------------------------------
# voigt profile
export fvoigt, voigt, voigt!
"""
fvoigt(Ξ½, Ξ½l, Ξ±, Ξ³)
Evaluate Voigt profile
# Arguments
* `Ξ½`: profile evaluation wavenumber [cm``^{-1}``]
* `Ξ½l`: wavenumber of absorption line [cm``^{-1}``]
* `Ξ±`: doppler (gaussian) broadening coefficient
* `Ξ³`: lorentzian broadening coefficient
"""
function fvoigt(Ξ½, Ξ½l, Ξ±, Ξ³)::Float64
#inverse of the doppler parameter
Ξ² = 1/Ξ±
#factor for real and complex parts of Faddeeva args, avoiding Ξ² division
d = sqln2*Ξ²
#arguments to Faddeeva function
x = (Ξ½ - Ξ½l)*d
y = Ξ³*d
#evaluate real part of Faddeeva function
f = faddeyeva(x,y)
#final calculation, avoiding Ξ± division by using Ξ² again
osqΟln2*Ξ²*f
end
"""
voigt(Ξ½, Ξ½l, S, Ξ±, Ξ³)
Evaluate Voigt absoption cross-section [cm``^2``/molecule]
# Arguments
* `Ξ½`: profile evaluation wavenumber [cm``^{-1}``]
* `Ξ½l`: wavenumber of absorption line [cm``^{-1}``]
* `S`: line absoption intensity [cm``^{-1}``/(molecule``\\cdot``cm``^{-2}``)]
* `Ξ±`: doppler (gaussian) broadening coefficient
* `Ξ³`: lorentzian broadening coefficient
"""
voigt(Ξ½, Ξ½l, S, Ξ±, Ξ³)::Float64 = S*fvoigt(Ξ½, Ξ½l, Ξ±, Ξ³)
"""
voigt(Ξ½, sl::SpectralLines, T, P, Pβ, ΞΞ½cut=25)
Evaluate Voigt absorption cross-section at a single wavenumber.
"""
function voigt(Ξ½, sl::SpectralLines, T, P, Pβ, ΞΞ½cut=25.0)::Float64
i = includedlines(Ξ½, sl.Ξ½, ΞΞ½cut)
S = scaleintensity(sl, i, T)
Ξ± = Ξ±doppler(sl, i, T)
Ξ³ = Ξ³lorentz(sl, i, T, P, Pβ)
sum(voigt.(Ξ½, view(sl.Ξ½,i), S, Ξ±, Ξ³))
end
"""
voigt!(Ο, Ξ½, sl, T, P, Pβ, ΞΞ½cut=25)
Identical to [`voigt`](@ref), but fills the vector of cross-sections (`Ο`) in-place.
"""
function voigt!(Ο::AbstractVector,
Ξ½::AbstractVector,
sl::SpectralLines,
T,
P,
Pβ,
ΞΞ½cut=25.0)
i = includedlines(Ξ½, sl.Ξ½, ΞΞ½cut)
S = scaleintensity(sl, i, T)
Ξ± = Ξ±doppler(sl, i, T)
Ξ³ = Ξ³lorentz(sl, i, T, P, Pβ)
surf!(Ο, voigt, Ξ½, view(sl.Ξ½,i), ΞΞ½cut, S, Ξ±, Ξ³)
end
"""
voigt(Ξ½, sl, T, P, Pβ, ΞΞ½cut=25)
Compute a vector of Voigt absorption cross-sections [cm``^2``/molecule] from a [`SpectralLines`](@ref) object. Temperature scaling and Voigt profiles are evaluated along the way.
# Arguments
* `Ξ½`: vector of wavenumbers indicating where to evaluate [cm``^{-1}``]
* `sl`: [`SpectralLines`](@ref)
* `T`: temperature [K]
* `P`: air pressure [Pa]
* `Pβ`: partial pressure [Pa]
* `ΞΞ½cut`: profile truncation distance [cm``^{-1}``]
"""
function voigt(Ξ½::AbstractVector,
sl::SpectralLines,
T,
P,
Pβ,
ΞΞ½cut=25.0)::Vector{Float64}
Ο = zeros(Float64, length(Ξ½))
voigt!(Ο, Ξ½, sl, T, P, Pβ, ΞΞ½cut)
return Ο
end
#-------------------------------------------------------------------------------
# sublorentzian profile for CO2
# Perrin, M. Y., and J. M. Hartman. βTemperature-Dependent Measurements and Modeling of Absorption by CO2-N2 Mixtures in the Far Line-Wings of the 4.3 Ξm CO2 Band.β Journal of Quantitative Spectroscopy and Radiative Transfer, vol. 42, no. 4, 1989, pp. 311β17.
export Ξ§PHCO2, PHCO2, PHCO2!
"""
Ξ§PHCO2(Ξ½, Ξ½l, T)
Compute the `Ξ§` (Chi) factor for sub-lorentzian CO2 line profiles, as in
* [Perrin, M. Y., and J. M. Hartman. βTemperature-Dependent Measurements and Modeling of Absorption by CO2-N2 Mixtures in the Far Line-Wings of the 4.3 Ξm CO2 Band.β Journal of Quantitative Spectroscopy and Radiative Transfer, vol. 42, no. 4, 1989, pp. 311β17.](https://www.sciencedirect.com/science/article/abs/pii/0022407389900770)
# Arguments
* `Ξ½`: profile evaluation wavenumber [cm``^{-1}``]
* `Ξ½l`: wavenumber of absorption line [cm``^{-1}``]
* `T`: temperature [K]
"""
function Ξ§PHCO2(Ξ½, Ξ½l, T)::Float64
ΞΞ½ = abs(Ξ½ - Ξ½l)
if ΞΞ½ < 3.0
return 1.0
end
B1 = 0.0888 - 0.16*exp(-0.0041*T)
if ΞΞ½ < 30.0
return exp(-B1*(ΞΞ½ - 3.0))
end
B2 = 0.0526*exp(-0.00152*T)
if ΞΞ½ < 120.0
return exp(-B1*27.0 - B2*(ΞΞ½ - 30.0))
end
return exp(-B1*27.0 - B2*90.0 - 0.0232*(ΞΞ½ - 120.0))
end
"""
PHCO2(Ξ½, Ξ½l, S, Ξ±)
Evaluate Perrin & Hartman sub-lorentzian absoption cross-section [cm``^2``/molecule] for CO2
# Arguments
* `Ξ½`: profile evaluation wavenumber [cm``^{-1}``]
* `Ξ½l`: wavenumber of absorption line [cm``^{-1}``]
* `T`: temperature [K]
* `S`: line absoption intensity [cm``^{-1}``/(molecule``\\cdot``cm``^{-2}``)]
* `Ξ±`: doppler (gaussian) broadening coefficient
* `Ξ³`: lorentzian broadening coefficient
"""
function PHCO2(Ξ½, Ξ½l, T, S, Ξ±, Ξ³)::Float64
Ξ§ = Ξ§PHCO2(Ξ½, Ξ½l, T)
voigt(Ξ½, Ξ½l, S, Ξ±, Ξ§*Ξ³)
end
"""
PHCO2(Ξ½, sl, T, P, Pβ, ΞΞ½cut=500)
Compute a single Perrin & Hartman sub-lorentzian CO2 absorption cross-sections [cm``^2``/molecule] from a [`SpectralLines`](@ref) object. Temperature scaling and profiles are evaluated along the way.
# Arguments
* `Ξ½`: single wavenumber indicating where to evaluate [cm``^{-1}``]
* `sl`: [`SpectralLines`](@ref)
* `T`: temperature [K]
* `P`: air pressure [Pa]
* `Pβ`: partial pressure [Pa]
* `ΞΞ½cut`: profile truncation distance [cm``^{-1}``]
"""
function PHCO2(Ξ½, sl::SpectralLines, T, P, Pβ, ΞΞ½cut=500.0)::Float64
i = includedlines(Ξ½, sl.Ξ½, ΞΞ½cut)
S = scaleintensity(sl, i, T)
Ξ± = Ξ±doppler(sl, i, T)
Ξ³ = Ξ³lorentz(sl, i, T, P, Pβ)
sum(PHCO2.(Ξ½, view(sl.Ξ½,i), T, S, Ξ±, Ξ³))
end
"""
PHCO2!(Ο, Ξ½, sl, T, P, Pβ, ΞΞ½cut=500)
Identical to [`PHCO2`](@ref), but fills the vector of cross-sections (`Ο`) in-place.
"""
function PHCO2!(Ο::AbstractVector,
Ξ½::AbstractVector,
sl::SpectralLines,
T,
P,
Pβ,
ΞΞ½cut=500.0)
i = includedlines(Ξ½, sl.Ξ½, ΞΞ½cut)
S = scaleintensity(sl, i, T)
Ξ± = Ξ±doppler(sl, i, T)
Ξ³ = Ξ³lorentz(sl, i, T, P, Pβ)
f(Ξ½, Ξ½l, S, Ξ±, Ξ³) = PHCO2(Ξ½, Ξ½l, T, S, Ξ±, Ξ³) #shove T into the function
surf!(Ο, f, Ξ½, view(sl.Ξ½,i), ΞΞ½cut, S, Ξ±, Ξ³)
end
"""
PHCO2(Ξ½, sl, T, P, Pβ, ΞΞ½cut=500)
Compute a vector of Perrin & Hartman sub-lorentzian CO2 absorption cross-sections [cm``^2``/molecule] from a [`SpectralLines`](@ref) object. Temperature scaling and profiles are evaluated along the way.
# Arguments
* `Ξ½`: vector of wavenumbers indicating where to evaluate [cm``^{-1}``]
* `sl`: [`SpectralLines`](@ref)
* `T`: temperature [K]
* `P`: air pressure [Pa]
* `Pβ`: partial pressure [Pa]
* `ΞΞ½cut`: profile truncation distance [cm``^{-1}``]
"""
function PHCO2(Ξ½::AbstractVector,
sl::SpectralLines,
T,
P,
Pβ,
ΞΞ½cut=500.0)::Vector{Float64}
Ο = zeros(Float64, length(Ξ½))
PHCO2!(Ο, Ξ½, sl, T, P, Pβ, ΞΞ½cut)
return Ο
end
| ClearSky | https://github.com/markmbaum/ClearSky.jl.git |
|
[
"MIT"
] | 0.1.0 | 861e207072e8717757e97af5c3a4af747b0c76ea | code | 7311 | #-------------------------------------------------------------------------------
export opticaldepth
"""
opticaldepth(Pβ, Pβ, g, fT, fΞΌ, ΞΈ, absorbers...; tol=1e-5)
Compute monochromatic [optical depths](https://en.wikipedia.org/wiki/Optical_depth#Atmospheric_sciences) (Ο) between two pressure levels
# Arguments
* `Pβ`: first pressure level [Pa]
* `Pβ`: second pressure level [Pa]
* `g`: gravitational acceleration [m/s``^2``]
* `fT`: temperature [K] as a function of pressure [Pa], `fT(P)`. This may be any callable object, like [`MoistAdiabat`](@ref), for example.
* `fΞΌ`: mean molar mass as a function of temperature [K] and pressure [Pa], `fΞΌ(T,P)`
* `ΞΈ`: angle [radians] of path, must be β [0,Ο/2), where 0 is straight up/down
* `absorbers`: at least one gas object and any number of [`CIATables`](@ref) and functions in the form Ο(Ξ½, T, P)
Returns a vector of optical depths across all wavenumbers stored in gas objects. The `tol` keyword argument adjusts integrator error tolerance.
"""
function opticaldepth(Pβ::Real,
Pβ::Real,
g::Real,
fT::Q,
fΞΌ::R,
ΞΈ::Real,
absorbers...;
tol::Float64=1e-5
)::Vector{Float64} where {Q,R}
#initialization
Pβ, Pβ = max(Pβ, Pβ), min(Pβ, Pβ)
A = setupintegration(Pβ, Pβ, absorbers)
checkazimuth(ΞΈ)
#integrate wavenumbers in parallel
Ο = zeros(Float64, A.nΞ½)
m = 1/cos(ΞΈ)
@threads for i β eachindex(A.Ξ½)
#integrate
Ο[i] = depth(dΟdΟ, P2Ο(Pβ), P2Ο(Pβ), A, i, g, m, fT, fΞΌ, tol)
end
return Ο
end
#-------------------------------------------------------------------------------
#the basic transmittance method exp(-Ο) is already exported
"""
transmittance(Pβ, Pβ, g, fT, fΞΌ, ΞΈ, absorbers...; tol=1e-5)
Compute monochromatic [transmittances](https://en.wikipedia.org/wiki/Transmittance.) between two pressure levels
Accepts the same arguments as [`opticaldepth`](@ref) and returns a vector of transmittances across all wavenumbers stored in gas objects.
"""
transmittance(X...; kwargs...) = transmittance.(opticaldepth(X...; kwargs...))
#-------------------------------------------------------------------------------
export outgoing
"""
outgoing(Pβ, Pβ, g, fT, fΞΌ, absorbers; nstream=5, tol=1e-5)
Compute outgoing monochromatic radiative fluxes [W/m``^2``/cm``^{-1}``], line-by-line. Integrates the [`schwarzschild`](@ref) equation from `Pβ` to `Pβ` at each wavenumber in the provided gas object(s) using any number of streams/angles. Total flux [W/m``^2``] can be evaluated with the [`trapz`](@ref) function.
# Arguments
* `Pβ`: surface pressure [Pa]
* `Pβ`: top of atmopshere pressure [Pa]
* `g`: gravitational acceleration [m/s``^2``]
* `fT`: temperature [K] as a function of pressure [Pa], `fT(P)`. This may be any callable object, like [`MoistAdiabat`](@ref), for example.
* `fΞΌ`: mean molar mass as a function of temperature [K] and pressure [Pa], `fΞΌ(T,P)`
* `absorbers`: at least one [gas object](gas_objects.md) and any number of [`CIATables`](@ref) and functions in the form Ο(Ξ½, T, P)
The keyword argument `nstream` specifies how many independent streams, or beam angles through the atmosphere, to integrate. The keyword argument `tol` is a numerical error tolerance passed to the [`radau`](https://github.com/wordsworthgroup/ScalarRadau.jl) integrator.
"""
function outgoing(Pβ::Real,
Pβ::Real,
g::Real,
fT::Q,
fΞΌ::R,
absorbers...;
nstream::Int64=5, #number of streams to use
tol::Float64=1e-5 #integrator tolerance
)::Vector{Float64} where {Q,R}
#initialization
ca = setupintegration(Pβ, Pβ, absorbers)
Οβ, Οβ = P2Ο(Pβ, Pβ)
#surface temperature
Tβ = fT(Pβ)
#integrate wavenumbers in parallel
F = zeros(Float64, ca.nΞ½)
@threads for i β eachindex(ca.Ξ½)
Iβ = planck(ca.Ξ½[i], Tβ)
F[i] = streams(dIdΟ, Iβ, Οβ, Οβ, ca, i, g, nstream, fT, fΞΌ, tol)
end
return F
end
#-------------------------------------------------------------------------------
export topfluxes, topimbalance
"""
topfluxes(Pβ, Pβ, g, fT, fΞΌ, absorber::UnifiedAbsorber; nstream=5, ΞΈ=0.952, tol=1e-5)
Compute the upward and downward monochromatic fluxes at the top of the atmopshere.
# Arguments
* `Pβ`: surface pressure [Pa]
* `Pβ`: top of atmopshere pressure [Pa]
* `g`: gravitational acceleration [m/s``^2``]
* `fT`: temperature [K] as a function of pressure [Pa], `fT(P)`. This may be any callable object, like [`MoistAdiabat`](@ref), for example.
* `fΞΌ`: mean molar mass as a function of temperature [K] and pressure [Pa], `fΞΌ(T,P)`
* `absorber`: a `UnifiedAbsorber`, which is either a [`GroupedAbsorber`](@ref) or an [`AcceleratedAbsorber`](@ref)
"""
function topfluxes(Pβ::Real,
Pβ::Real,
g::Real,
fT::Q, # fT(P)
fΞΌ::R, # fΞΌ(T,P)
fstellar::S, # fstellar(Ξ½) [W/m^2]
albedo::Real,
absorber::UnifiedAbsorber;
nstream::Int64=5, #number of streams to use
ΞΈ::Float64=0.952, #corresponds to cos(ΞΈ) = 0.66
tol::Float64=1e-5)::NTuple{2,Vector{Float64}} where {Q,R,S}
#setup
checkpressures(absorber, Pβ, Pβ)
Ξ½, nΞ½ = absorber.Ξ½, absorber.nΞ½
Οβ, Οβ = P2Ο(Pβ, Pβ)
ΞΉβ, ΞΉβ = P2ΞΉ(Pβ, Pβ)
#incoming stellar flux at TOA
Fββ» = fstellar.(Ξ½)*cos(ΞΈ)
#downward stellar flux at the ground/surface
Fββ» = zeros(nΞ½)
@threads for i β eachindex(Ξ½)
Ο = depth(dΟdΞΉ, ΞΉβ, ΞΉβ, absorber, i, g, 1/cos(ΞΈ), fT, fΞΌ, tol)
Fββ»[i] = Fββ»[i]*exp(-Ο)
end
#some of it gets reflected back upward at the surface
IββΊ = Fββ»*albedo/Ο #Lambertian reflection
#surface temperature
Tβ = fT(Pβ)
#radiation streams up out of the atmosphere
FββΊ = zeros(nΞ½)
@threads for i β eachindex(Ξ½)
Iβ = IββΊ[i] + planck(Ξ½[i], Tβ)
FββΊ[i] = streams(dIdΟ, Iβ, Οβ, Οβ, absorber, i, g, nstream, fT, fΞΌ, tol)
end
return Fββ», FββΊ
end
"""
topimbalance(Pβ, Pβ, g, fT, fΞΌ, absorber::UnifiedAbsorber; nstream=5, ΞΈ=0.952, tol=1e-5)
Compute the difference between the total upward and downward radiative flux at the top of the atmopshere.
# Arguments
* `Pβ`: surface pressure [Pa]
* `Pβ`: top of atmopshere pressure [Pa]
* `g`: gravitational acceleration [m/s``^2``]
* `fT`: temperature [K] as a function of pressure [Pa], `fT(P)`. This may be any callable object, like [`MoistAdiabat`](@ref), for example.
* `fΞΌ`: mean molar mass as a function of temperature [K] and pressure [Pa], `fΞΌ(T,P)`
* `absorber`: a `UnifiedAbsorber`, which is either a [`GroupedAbsorber`](@ref) or an [`AcceleratedAbsorber`](@ref)
"""
function topimbalance(X...; kwargs...)::Float64
#compute monochromatic fluxes at top of atmosphere
Fββ», FββΊ = topfluxes(X...; kwargs...)
#get the wavenumber vector, knowing that last arg is UnifiedAbsorber
Ξ½ = X[end].Ξ½
#integrate across wavenumbers
trapz(Ξ½, Fββ») - trapz(Ξ½, FββΊ)
end
#-------------------------------------------------------------------------------
| ClearSky | https://github.com/markmbaum/ClearSky.jl.git |
|
[
"MIT"
] | 0.1.0 | 861e207072e8717757e97af5c3a4af747b0c76ea | code | 15385 | #-------------------------------------------------------------------------------
# general gets and checks
function pressurelimits(gases)::NTuple{2,Float64}
#largest minimum pressure in gas atmospheric domains
Pmin = maximum(map(g->g.Ξ©.Pmin, gases))
#smallest maximum pressure in gas atmospheric domains
Pmax = minimum(map(g->g.Ξ©.Pmax, gases))
return Pmin, Pmax
end
function checkpressures(gases, pressures...)::Nothing
#pressure bounds
Pmin, Pmax = pressurelimits(gases)
#demand all pressures within the range
for P β pressures
@assert P >= Pmin "Pressure $P Pa too low, domain minimum is $Pmin"
@assert P <= Pmax "Pressure $P Pa too low, domain minimum is $Pmax"
end
nothing
end
function checkazimuth(ΞΈ)::Nothing
@assert 0 <= ΞΈ < Ο/2 "azimuth angle ΞΈ must be β [0,Ο/2)"
nothing
end
function getwavenumbers(absorbers::Tuple)::Vector{Float64}
G = absorbers[findall(a -> typeof(a) <: AbstractGas, absorbers)]
@assert length(G) > 0 "no gas objects found"
getwavenumbers(G...)
end
#checks for identical wavenumber sampling across different gases
function getwavenumbers(G::AbstractGas...)::Vector{Float64}
Ξ½β = G[1].Ξ½
for g β G
@assert Ξ½β == g.Ξ½ "gases must have identical wavenumber vectors"
end
return Ξ½β
end
#-------------------------------------------------------------------------------
# super for both consolidated absorber types
abstract type UnifiedAbsorber end
export UnifiedAbsorber, noabsorption, getΟ
#-------------------------------------------------------------------------------
# specialized container for absorbing objects and functions
export GroupedAbsorber
"""
GroupedAbsorber(absorbers...)
A struct for consolidating absorbers. Construct with any number of [gas objects](gas_objects.md), functions in the form `Ο(Ξ½, T, P)` and [`CIATables`](@ref).
"""
struct GroupedAbsorber{T,U,V} <: UnifiedAbsorber
#tuple of gas objects
gas::T
#tuple of CIA objects
cia::U
#tuple of functions in the for f(Ξ½, T, P)
fun::V
#wavenumber vector [cm^-1], must be identical for all gases
Ξ½::Vector{Float64}
#length of wavenumber vector
nΞ½::Int64
#flag indicating whether cia and fun are both empty
gasonly::Bool
#flags indicating where all gas objects are empty
gasempty::Vector{Bool}
end
GroupedAbsorber(absorbers...) = GroupedAbsorber(absorbers)
#splits a group of gas, cia, & functions objects into their own tuples
function GroupedAbsorber(absorbers::Tuple)
#can't be empty
@assert length(absorbers) > 0 "no absorbers... no need for modeling?"
#check for dups
@assert length(absorbers) == length(unique(absorbers)) "duplicate absorbers"
#types of absorbers
T = map(typeof, absorbers)
#check for unexpected types
for t β T
if !((t <: AbstractGas) | (t == CIATables) | (t <: Function))
throw("absorbers must only be gasses (<: AbstractGas), CIA objects, or functions in the form Ο(Ξ½, T, P)")
end
end
#all gases
G = absorbers[findall(t->t<:AbstractGas, T)]
#cia tables, pairing with the correct gases in the process
C = tuple([CIA(x, G...) for x β absorbers[findall(t->t==CIATables, T)]]...)
#functions in the form Ο(Ξ½, T, P)
F = absorbers[findall(t->!(t<:AbstractGas) & !(t==CIATables), T)]
#wavenumber vector, must be identical for all gases
Ξ½ = getwavenumbers(G...)
nΞ½ = length(Ξ½)
#flag indicating whether there are only gases present, no cias orfunctions
gasonly = isempty(C) & isempty(F)
#flags indicating whether all gases are empty at each wavenumber
gasempty = zeros(Bool, nΞ½)
for i β eachindex(Ξ½)
gasempty[i] = all(ntuple(j->G[j].Ξ [i].empty, length(G)))
end
return GroupedAbsorber(G, C, F, Ξ½, nΞ½, gasonly, gasempty)
end
#https://discourse.julialang.org/t/tuple-indexing-taking-time/58309/18?u=markmbaum
function Οrecur(A::Q, x, T, P)::Float64 where {Q}
isempty(A) && return 0.0
first(A)(x, T, P) + Οrecur(tail(A), x, T, P)
end
function getΟ(ga::GroupedAbsorber, i::Int, T, P)::Float64
Ξ½ = ga.Ξ½[i]
Ο = Οrecur(ga.gas, i, T, P) + Οrecur(ga.cia, Ξ½, T, P) + Οrecur(ga.fun, Ξ½, T, P)
return Ο
end
(ga::GroupedAbsorber)(i::Int, T, P)::Float64 = getΟ(ga, i, T, P)
function (ga::GroupedAbsorber)(T::Real, P::Real)::Vector{Float64}
[ga(i, T, P) for i β eachindex(ga.Ξ½)]
end
#check whether integration is pointless because there's no absorption
noabsorption(ga::GroupedAbsorber, i::Int)::Bool = ga.gasonly && ga.gasempty[i]
function noabsorption(ga::GroupedAbsorber)::Vector{Bool}
[noabsorption(ga, i) for i β eachindex(ga.Ξ½)]
end
function checkpressures(ga::GroupedAbsorber, pressures...)
checkpressures(ga.gas, pressures...)
end
#-------------------------------------------------------------------------------
# accelerated interpolation of cross-sections
export AcceleratedAbsorber, update!
"""
AcceleratedAbsorber(ga, P, T)
An accelerated struct for getting cross-sections from groups of absorbers. Pressure and temperature coordinates must be provided.
"""
struct AcceleratedAbsorber <: UnifiedAbsorber
#cross-section interpolators
Ο::Vector{LinearInterpolator{Float64,WeakBoundaries}}
#wavenumber vector [cm^-1], must be identical for all gases
Ξ½::Vector{Float64}
#length of wavenumber vector
nΞ½::Int64
#flag indicating whether there is no absorption
empty::Vector{Bool}
#original pressures
P::Vector{Float64}
#reference to GroupedAbsorber
ga::GroupedAbsorber
end
function AcceleratedAbsorber(ga::GroupedAbsorber, P::Vector{Float64}, T::Vector{Float64})
Ξ½, nΞ½ = ga.Ξ½, ga.nΞ½
logP = log.(P)
Ο = Vector{LinearInterpolator{Float64,WeakBoundaries}}(undef, nΞ½)
empty = zeros(Bool, nΞ½)
@threads for i β eachindex(Ξ½)
empty[i] = noabsorption(ga, i)
if !empty[i]
Ο = ga.(i, T, P)
Ο[Ο .< 1e-200] .= 1e-200
Ο[i] = LinearInterpolator(logP, log.(Ο), WeakBoundaries())
end
end
AcceleratedAbsorber(Ο, Ξ½, nΞ½, empty, P, ga)
end
#also a method specifically for interpolators, P vs log(Ο)
getΟ(aa::AcceleratedAbsorber, i, _, P)::Float64 = exp(aa.Ο[i](log(P)))
(aa::AcceleratedAbsorber)(i::Int, P)::Float64 = getΟ(aa, i, nothing, P)
function (aa::AcceleratedAbsorber)(P::Real)::Vector{Float64}
[aa(i, P) for i β eachindex(aa.Ξ½)]
end
noabsorption(aa::AcceleratedAbsorber, i::Int)::Bool = aa.empty[i]
function noabsorption(ga::GroupedAbsorber)::Vector{Bool}
[noabsorption(aa, i) for i β eachindex(ga.Ξ½)]
end
function checkpressures(aa::AcceleratedAbsorber, pressures...)
checkpressures(aa.ga.gas, pressures...)
end
function update!(aa::AcceleratedAbsorber, T::Vector{Float64})
@assert length(T) == length(aa.P)
@threads for i β eachindex(aa.Ο)
if !aa.empty[i]
for j β eachindex(aa.P)
aa.Ο[i].r.y[j] = max(log(aa.ga(i, T[j], aa.P[j])), 1e-200)
end
end
end
end
#-------------------------------------------------------------------------------
#function and cache for gaussian quadrature of multiple streams over the azimuth
const NODECACHE = Dict{Int64,NTuple{2,Vector{Float64}}}()
function streamnodes(n::Int)::NTuple{2,Vector{Float64}}
#pedantic with the key type
n = convert(Int64, n)
#getting the gauss nodes is fast but not trivial
if !haskey(NODECACHE, n)
if n < 4
@warn "careful! using nstream < 4 is likely to be inaccurate!" maxlog=1
end
#gauss-legendre quadrature points and weights in [-1,1]
x, w = gauss(n)
#map angles and weights to ΞΈ β [0,Ο/2]
ΞΈ = @. (Ο/2)*(x + 1)/2
w .*= (Ο/2)/2
#sines and cosines of ΞΈ
c = cos.(ΞΈ)
s = sin.(ΞΈ)
#precompute 2Ο*cos(ΞΈ)*sin(ΞΈ)*wzxc
W = @. 2Ο*w*c*s
#precompute 1/cos(ΞΈ) using "m" because ΞΌ is for gas molar masses
m = 1 ./ c
#store these values
NODECACHE[n] = (m, W)
end
return NODECACHE[n]
end
#-------------------------------------------------------------------------------
# functions for setting up absorbers, coordinates, etc. for integration
#operations common to setting up high-level radiative functions
function setupintegration(Pβ, Pβ, absorbers)
#split gas objects from cia objects
ga = GroupedAbsorber(absorbers)
#check pressures in order
@assert Pβ > Pβ "Pβ must be greater than Pβ"
#check pressures against AtmosphericDomains
checkpressures(ga.gas, Pβ, Pβ)
return ga
end
#-------------------------------------------------------------------------------
# core differential equations with Tuples of parameters
function dΟdP(P::Float64, Ο::Float64, param::Tuple)::Float64
#unpack parameters
A, i, g, m, fT, fΞΌ = param
#temperature from given profile
T = fT(P)
#mean molar mass
ΞΌ = fΞΌ(T, P)
#sum of all cross-sections
Ο = getΟ(A, i, T, P)
#compute dΟ/dlnP, scaled by the angle m = 1/cos(ΞΈ)
m*dΟdP(Ο, g, ΞΌ) #no Planck emission
end
function dIdP(P::Float64, I::Float64, param::Tuple)::Float64
#unpack parameters
A, i, g, m, fT, fΞΌ = param
#compute temperature from given profile
T = fT(P)
#compute mean molar mass
ΞΌ = fΞΌ(T, P)
#sum of all cross-sections
Ο = getΟ(A, i, T, P)
#compute dI/dlnP, scaled by the angle m = 1/cos(ΞΈ)
m*schwarzschild(I, A.Ξ½[i], Ο, g, ΞΌ, T)
end
#-------------------------------------------------------------------------------
# wrappers for log pressure coordinates
function dΟdΞΉ(ΞΉ::Float64, Ο::Float64, param::Tuple)::Float64
P = ΞΉ2P(ΞΉ)
P*dΟdP(P, Ο, param)
end
function dΟdΟ(Ο::Float64, Ο::Float64, param::Tuple)::Float64
P = Ο2P(Ο)
P*dΟdP(P, Ο, param)
end
function dIdΟ(Ο::Float64, I::Float64, param::Tuple)::Float64
P = Ο2P(Ο)
P*dIdP(P, I, param)
end
function dIdΞΉ(ΞΉ::Float64, I::Float64, param::Tuple)::Float64
P = ΞΉ2P(ΞΉ)
P*dIdP(P, I, param)
end
#-------------------------------------------------------------------------------
# functions for optical depth paths
function depth(dΟdx::Q,
xβ::Real,
xβ::Real,
A::UnifiedAbsorber,
idx::Int,
g::Real,
m::Real, # 1/cos(ΞΈ)
fT::R,
fΞΌ::S,
tol::Float64
)::Float64 where {Q,R,S}
#if zero absorption, don't integrate
noabsorption(A, idx) && return 0.0
#pack parameters
param = (A, idx, g, m, fT, fΞΌ)
#integrate with the ODE solver (appears to be faster than quadrature)
radau(dΟdx, 0.0, xβ, xβ, param, atol=tol, rtol=tol)
end
#-------------------------------------------------------------------------------
# functions for streams and fluxes up/down the atmosphere, no storage
function stream(dIdx::Q, #version of schwarzschild equation
Iβ::Real, #initial irradiance
xβ::Real, #initial pressure coordinate
xβ::Real, #final pressure coordinate
A::UnifiedAbsorber,
idx::Int, #index for wavenumber and opacity table
g::Real, #gravity [m/s^2]
m::Real, #1/cos(ΞΈ), where ΞΈ is the stream angle
fT::R, #temperature profile fT(P)
fΞΌ::S, #mean molar mass ΞΌ(T,P)
tol::Float64 #integrator error tolerance
)::Float64 where {Q,R,S}
#if zero absorption, don't integrate
noabsorption(A, idx) && return Iβ
#pack parameters
param = (A, idx, g, m, fT, fΞΌ)
#integrate the Schwarzschild equation in log pressure coords and return
radau(dIdx, Iβ, xβ, xβ, param, atol=tol, rtol=tol)
end
function streams(dIdx::Q, #version of schwarzschild equation
Iβ::Real, #initial irradiance
xβ::Real, #initial pressure coordinate
xβ::Real, #final pressure coordinate
A::UnifiedAbsorber,
idx::Int, #index for wavenumber and opacity table
g::Real, #gravity [m/s^2]
nstream::Int,
fT::R, #temperature profile fT(P)
fΞΌ::S, #mean molar mass ΞΌ(T,P)
tol::Float64 #integrator error tolerance
)::Float64 where {Q,R,S}
#setup gaussian quadrature nodes
m, W = streamnodes(nstream)
#solve schwarzschild w multiple streams, integrating over hemisphere
F = 0.0
for i β 1:nstream
I = stream(dIdx, Iβ, xβ, xβ, A, idx, g, m[i], fT, fΞΌ, tol)
# integral over hemisphere: β«β« I cos(ΞΈ) sin(ΞΈ) dΞΈ dΟ, where ΞΈβ[0,Ο/2], Οβ[0,2Ο]
F += W[i]*I #W = 2Ο*w*cos(ΞΈ)*sin(ΞΈ), precomputed
end
return F
end
#-------------------------------------------------------------------------------
# functions for streams and fluxes up/down the atmosphere with in-place storage
function stream!(dIdx::Q, #version of schwarzschild equation
Iβ::Real, #initial irradiance
I::AbstractVector{Float64}, #output/solution vector
x::Vector{Float64}, #output/solution coordinates
xβ::Real, #initial pressure coordinate
xβ::Real, #final pressure coordinate
A::UnifiedAbsorber,
idx::Int, #index for wavenumber and interpolator
g::Real, #gravity [m/s^2]
m::Real, #1/cos(ΞΈ), where ΞΈ is the stream angle
fT::R, #temperature profile fT(P)
fΞΌ::S, #mean molar mass ΞΌ(T,P)
tol::Float64 #integrator error tolerance
)::Nothing where {Q,R,S}
#if zero absorption, don't integrate
if noabsorption(A, idx)
I .= Iβ
return nothing
end
#pack parameters
param = (A, idx, g, m, fT, fΞΌ)
#integrate the Schwarzschild equation in log pressure coords, in-place
radau!(I, x, dIdx, Iβ, xβ, xβ, param, atol=tol, rtol=tol)
return nothing
end
function streams!(dIdx::Q, #version of schwarzschild equation
Iβ::Real, #initial irradiance
I::AbstractVector{Float64}, #temporary irradiance vector
F::AbstractVector{Float64}, #output/solution vector
x::Vector{Float64}, #output/solution coordinates
xβ::Real, #initial pressure coordinate
xβ::Real, #final pressure coordinate
A::UnifiedAbsorber,
idx::Int, #index for wavenumber and opacity table
g::Real, #gravity [m/s^2]
nstream::Int,
fT::R, #temperature profile fT(P)
fΞΌ::S, #mean molar mass ΞΌ(T,P)
tol::Float64 #integrator error tolerance
)::Nothing where {Q,R,S}
@assert length(I) == length(F) == length(x)
#setup gaussian quadrature nodes
m, W = streamnodes(nstream)
#wipe any pre-existing values
F .= 0.0
#solve schwarzschild w multiple streams, integrating over hemisphere
for i β 1:nstream
stream!(dIdx, Iβ, I, x, xβ, xβ, A, idx, g, m[i], fT, fΞΌ, tol)
# integral over hemisphere: β«β« I cos(ΞΈ) sin(ΞΈ) dΞΈ dΟ, where ΞΈβ[0,Ο/2], Οβ[0,2Ο]
for j β eachindex(F)
F[j] += W[i]*I[j] #W = 2Ο*w*cos(ΞΈ)*sin(ΞΈ), precomputed
end
end
return nothing
end
| ClearSky | https://github.com/markmbaum/ClearSky.jl.git |
|
[
"MIT"
] | 0.1.0 | 861e207072e8717757e97af5c3a4af747b0c76ea | code | 61505 | const TMIN = 25.000000
const TMAX = 1000.000000
const MOLPARAM = [
#1, molecule number
[1,
#2, molecule formula
"H2O",
#3, molecule name
"Water",
#4, global isotopologue numbers
Int64[1, 2, 3, 4, 5, 6, 129],
#5, isotopologue formulae
String["H216O", "H218O", "H217O", "HD16O", "HD18O", "HD17O", "D216O"],
#6, AFGL code
Int64[161, 181, 171, 162, 182, 172, 262],
#7, abundance fractions
Float64[0.997317, 0.002, 0.000371884, 0.000310693, 6.23003e-07, 1.15853e-07, 2.4197e-08],
#8, molecular masses (kg/mole)
Float64[0.018010565, 0.020014811, 0.01901478, 0.019016739999999997, 0.021020985, 0.020020956000000003, 0.020022915000000002],
#9, Qref
Float64[174.58, 176.05, 1052.14, 864.74, 875.57, 5226.79, 1027.8],
#10, has interpolating chebyshev expansion?
Bool[true, true, true, true, true, true, true],
#11, lengths of interpolating chebyshev expansion
Int64[7, 7, 7, 8, 8, 8, 8],
#12, maximum relative errors of interpolation
Float64[0.0028, 0.0028, 0.0029, 0.0094, 0.0094, 0.0096, 0.0091],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[2.8854493216841015, 3.4632621262928915, 0.6001640690631624, 0.016178811904860996, 0.015211044164822699, -0.004364096635145624, 0.0012395758566148274],
Float64[2.8871644108577477, 3.465944358793127, 0.6012775643496776, 0.01626203386609267, 0.015073634800670662, -0.004421502003156756, 0.0012358242435368538],
Float64[2.8843598005839284, 3.4613544216657774, 0.5988802292683817, 0.015592997489868013, 0.015071636079156958, -0.0043773144167319105, 0.0012344781178038982],
Float64[2.988927843477164, 3.630916093351383, 0.6832681181788238, 0.03771653748442871, 0.01698698700299332, -0.0046881118088947715, 0.0017425144860104983, -0.00048799452244198605],
Float64[2.995264646453848, 3.641366946322687, 0.6888505901076387, 0.03935511770905783, 0.01702953291306411, -0.004802882155796924, 0.0017375357666057514, -0.0004781891512090551],
Float64[3.001991615535067, 3.651228173736914, 0.6917712772263419, 0.03877308927826554, 0.016728580596165613, -0.004750977994398704, 0.0017126280124818902, -0.00047877803465772625],
Float64[3.1511970042488793, 3.8905643121864615, 0.8073563192325093, 0.06660577751477327, 0.01805683243327309, -0.004508003223199252, 0.0020143284208121565, -0.0006707506150274156]
]
],
#1, molecule number
[2,
#2, molecule formula
"CO2",
#3, molecule name
"Carbon Dioxide",
#4, global isotopologue numbers
Int64[7, 8, 9, 10, 11, 12, 13, 14, 121, 15, 120, 122],
#5, isotopologue formulae
String["12C16O2", "13C16O2", "16O12C18O", "16O12C17O", "16O13C18O", "16O13C17O", "12C18O2", "17O12C18O", "12C17O2", "13C18O2", "18O13C17O", "13C17O2"],
#6, AFGL code
Int64[626, 636, 628, 627, 638, 637, 828, 827, 727, 838, 837, 737],
#7, abundance fractions
Float64[0.984204, 0.011057, 0.003947, 0.000733989, 4.43446e-05, 8.24623e-06, 3.95734e-06, 1.4717999999999998e-06, 1.36847e-07, 4.4459999999999996e-08, 1.65354e-08, 1.5375000000000001e-09],
#8, molecular masses (kg/mole)
Float64[0.04398983, 0.044993185, 0.045994076, 0.044994044999999996, 0.046997431, 0.0459974, 0.047998322, 0.046998291000000005, 0.045998262, 0.049001675, 0.048001646, 0.047001618237800004],
#9, Qref
Float64[286.09, 576.64, 607.81, 3542.61, 1225.46, 7141.32, 323.42, 3766.58, 10971.57, 652.24, 7595.04, 22120.47],
#10, has interpolating chebyshev expansion?
Bool[true, true, true, true, true, true, true, true, true, true, true, true],
#11, lengths of interpolating chebyshev expansion
Int64[7, 6, 7, 7, 6, 6, 7, 7, 7, 6, 6, 6],
#12, maximum relative errors of interpolation
Float64[0.0083, 0.0097, 0.0084, 0.0084, 0.0097, 0.0097, 0.0085, 0.0085, 0.0084, 0.0097, 0.0097, 0.0097],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[3.629868764336615, 4.6480808871061505, 1.3583273380857648, 0.2691914939179858, 0.012300344743387518, 0.004326678712311664, -0.0004876212718173771],
Float64[3.7179211688304883, 4.786969447800756, 1.420400066599882, 0.2814003436457875, 0.012916486545213601, 0.005068159441358943],
Float64[3.679005578420316, 4.726613084391979, 1.3953922253638875, 0.27781440229728993, 0.01314456082627761, 0.004419817566995832, -0.0004970214933885941],
Float64[3.655363377445139, 4.688828648648713, 1.3775475553307868, 0.2736558449760584, 0.012739727465199616, 0.0043752016281623325, -0.0004930857409695122],
Float64[3.770167807416816, 4.870520321377293, 1.45990358750975, 0.2906789686290149, 0.013857386918618885, 0.005159901644174525],
Float64[3.7449516621478636, 4.830204291937045, 1.4408492290337949, 0.28620850769525374, 0.013406177679559405, 0.005114771601030554],
Float64[3.7311033311460196, 4.809810916666548, 1.4345733694792095, 0.28689967884368855, 0.014046000533340042, 0.004524547647267359, -0.0005104693856422907],
Float64[3.705985669094004, 4.769708282307593, 1.4156946951706022, 0.2825250989881978, 0.013613061747109967, 0.0044736845305030455, -0.000503988378637151],
Float64[3.6816588053606285, 4.730840055680833, 1.3973606497267912, 0.2782602087069206, 0.013191466220107065, 0.004426317951875092, -0.000498417536415848],
Float64[3.8253535330410573, 4.958741134066428, 1.5015645373059177, 0.30044270685108937, 0.014856805465608858, 0.005263210781243188],
Float64[3.798742219093048, 4.916205675937364, 1.4814840206725617, 0.29574048323158914, 0.014376502287099413, 0.0052127748195452735],
Float64[3.772887856184446, 4.874864915118713, 1.461940449659132, 0.29114887205345086, 0.013907974624242314, 0.005165919678550068]
]
],
#1, molecule number
[3,
#2, molecule formula
"O3",
#3, molecule name
"Ozone",
#4, global isotopologue numbers
Int64[16, 17, 18, 19, 20],
#5, isotopologue formulae
String["16O3", "16O16O18O", "16O18O16O", "16O16O17O", "16O17O16O"],
#6, AFGL code
Int64[666, 668, 686, 667, 676],
#7, abundance fractions
Float64[0.992901, 0.003982, 0.001991, 0.000740475, 0.00037023700000000004],
#8, molecular masses (kg/mole)
Float64[0.047984744999999995, 0.049988990999999997, 0.049988990999999997, 0.04898896, 0.04898896],
#9, Qref
Float64[3483.71, 7465.68, 3647.08, 43330.85, 21404.96],
#10, has interpolating chebyshev expansion?
Bool[true, true, true, true, true],
#11, lengths of interpolating chebyshev expansion
Int64[8, 8, 8, 8, 8],
#12, maximum relative errors of interpolation
Float64[0.0081, 0.0085, 0.0082, 0.0083, 0.0081],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[5.198813326073512, 7.183627282058914, 2.4304058215844266, 0.47946442974793274, 0.04864953304330084, -0.003877687472476156, 0.0035095252917969333, -0.001342048768093613],
Float64[5.289447393725946, 7.328403504739264, 2.499786028390479, 0.4961019241043351, 0.05020868215804548, -0.0036478605934218778, 0.003467093548097568, -0.0013615638674566405],
Float64[5.32163909580045, 7.380784814348417, 2.527176975966621, 0.5040656085549876, 0.05090642322592535, -0.0036226978332624276, 0.0035401534819499147, -0.0013961748984979547],
Float64[5.24568623397049, 7.258502442396174, 2.46630032008053, 0.488079931032065, 0.049452989319160166, -0.0037593580768103658, 0.003494383612559509, -0.0013477264983363974],
Float64[5.262065893189635, 7.285163964911438, 2.4802630772444894, 0.4921466852101934, 0.0498022530511696, -0.0037485193119807087, 0.003527212690246679, -0.0013719459179242222]
]
],
#1, molecule number
[4,
#2, molecule formula
"N2O",
#3, molecule name
"Nitrogen oxide",
#4, global isotopologue numbers
Int64[21, 22, 23, 24, 25],
#5, isotopologue formulae
String["14N216O", "14N15N16O", "15N14N16O", "14N218O", "14N217O"],
#6, AFGL code
Int64[446, 456, 546, 448, 447],
#7, abundance fractions
Float64[0.990333, 0.003641, 0.003641, 0.001986, 0.00036928000000000004],
#8, molecular masses (kg/mole)
Float64[0.044001062, 0.044998095999999994, 0.044998095999999994, 0.046005308, 0.045005277999999996],
#9, Qref
Float64[4984.9, 3362.01, 3458.58, 5314.74, 30971.79],
#10, has interpolating chebyshev expansion?
Bool[true, true, true, true, true],
#11, lengths of interpolating chebyshev expansion
Int64[6, 6, 6, 6, 6],
#12, maximum relative errors of interpolation
Float64[0.007, 0.006, 0.0067, 0.0069, 0.0069],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[4.063103273197024, 5.333914221331409, 1.6639844097596002, 0.33067143798925497, 0.018935088814124867, 0.005959907290501931],
Float64[4.157765791743768, 5.4838751764969, 1.7297047842704245, 0.34323622510507956, 0.020258240973265628, 0.005979204910197211],
Float64[4.108724068330206, 5.406344295333827, 1.6961335382408997, 0.33687644580232573, 0.019294311268007645, 0.005866268561468502],
Float64[4.132106894555242, 5.44362298720071, 1.7136070490521895, 0.34075869422226673, 0.019555787367178824, 0.005909433476340098],
Float64[4.103738130527359, 5.3982340580606385, 1.6921417741556322, 0.3357097168910115, 0.01903526376545912, 0.005863689357000013]
]
],
#1, molecule number
[5,
#2, molecule formula
"CO",
#3, molecule name
"Carbon Monoxide",
#4, global isotopologue numbers
Int64[26, 27, 28, 29, 30, 31],
#5, isotopologue formulae
String["12C16O", "13C16O", "12C18O", "12C17O", "13C18O", "13C17O"],
#6, AFGL code
Int64[26, 36, 28, 27, 38, 37],
#7, abundance fractions
Float64[0.986544, 0.011084, 0.001978, 0.000367867, 2.2225000000000005e-05, 4.13292e-06],
#8, molecular masses (kg/mole)
Float64[0.027994915, 0.028998270000000003, 0.029999161, 0.02899913, 0.031002516, 0.030002485],
#9, Qref
Float64[107.42, 224.69, 112.77, 661.17, 236.44, 1384.66],
#10, has interpolating chebyshev expansion?
Bool[true, true, true, true, true, true],
#11, lengths of interpolating chebyshev expansion
Int64[4, 4, 4, 4, 4, 4],
#12, maximum relative errors of interpolation
Float64[0.0066, 0.0066, 0.0066, 0.0066, 0.0067, 0.0067],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[1.7730339354052334, 1.7139901664342414, 0.04076391415723165, 0.012492844178307502],
Float64[1.7765934677858093, 1.7196928711145105, 0.04337971749168412, 0.013094903152256995],
Float64[1.7769442249020646, 1.7202283022733258, 0.043607118540340384, 0.013146398444950247],
Float64[1.7750240019453913, 1.7171872823767103, 0.04223776885485675, 0.012834504758217532],
Float64[1.7808256851317825, 1.7264850221557417, 0.04648621217831724, 0.013784369656134091],
Float64[1.7787537718136903, 1.7231790283878674, 0.04498907480569617, 0.013455416168919024]
]
],
#1, molecule number
[6,
#2, molecule formula
"CH4",
#3, molecule name
"Methane",
#4, global isotopologue numbers
Int64[32, 33, 34, 35],
#5, isotopologue formulae
String["12CH4", "13CH4", "12CH3D", "13CH3D"],
#6, AFGL code
Int64[211, 311, 212, 312],
#7, abundance fractions
Float64[0.988274, 0.011103, 0.0006157510000000001, 6.917849999999999e-06],
#8, molecular masses (kg/mole)
Float64[0.016031300000000002, 0.017034655, 0.017037475, 0.01804083],
#9, Qref
Float64[590.48, 1180.82, 4794.73, 9599.16],
#10, has interpolating chebyshev expansion?
Bool[true, true, true, true],
#11, lengths of interpolating chebyshev expansion
Int64[11, 11, 8, 8],
#12, maximum relative errors of interpolation
Float64[0.0073, 0.0073, 0.0069, 0.0069],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[4.559320057536849, 6.246154790519078, 2.15851344284554, 0.563745455962757, 0.12015959690385385, 0.0061053536914517535, 0.004893088743789864, -0.0004958586065243687, 6.363692773234675e-05, -0.0001658264468598958, 0.0001301035427090369],
Float64[4.5276900460578515, 6.192333563178976, 2.125650980876807, 0.549549228804292, 0.11577307616779402, 0.005066519270059899, 0.004703210444850381, -0.0005103723347237743, 6.382180973449892e-05, -0.00016759358231226428, 0.0001297215051426548],
Float64[5.03072658646721, 7.023825144119229, 2.5828817683596705, 0.7081888710343947, 0.1496415451034128, 0.01136677561558308, 0.0055690189660779765, -0.0005441941308146982],
Float64[5.038521129225728, 7.036277974255681, 2.5888898889443, 0.7096428906298792, 0.14975799107681567, 0.011399201669279182, 0.005580246938073178, -0.0005477396012294784]
]
],
#1, molecule number
[7,
#2, molecule formula
"O2",
#3, molecule name
"Oxygen",
#4, global isotopologue numbers
Int64[36, 37, 38],
#5, isotopologue formulae
String["16O2", "16O18O", "16O17O"],
#6, AFGL code
Int64[66, 68, 67],
#7, abundance fractions
Float64[0.995262, 0.003991, 0.000742235],
#8, molecular masses (kg/mole)
Float64[0.031989830000000004, 0.033994076, 0.032994045],
#9, Qref
Float64[215.73, 455.23, 2658.12],
#10, has interpolating chebyshev expansion?
Bool[true, true, true],
#11, lengths of interpolating chebyshev expansion
Int64[4, 4, 4],
#12, maximum relative errors of interpolation
Float64[0.0041, 0.0046, 0.0049],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[1.8169911476436258, 1.7787349638684251, 0.06038173512650354, 0.010527289874679694],
Float64[1.8579285870104456, 1.8479587879352657, 0.09662133281030434, 0.021907776219117842],
Float64[1.8534855385966251, 1.841048066429084, 0.09392781106403698, 0.021584982116504392]
]
],
#1, molecule number
[8,
#2, molecule formula
"NO",
#3, molecule name
"Nitric Oxide",
#4, global isotopologue numbers
Int64[39, 40, 41],
#5, isotopologue formulae
String["14N16O", "15N16O", "14N18O"],
#6, AFGL code
Int64[46, 56, 48],
#7, abundance fractions
Float64[0.993974, 0.003654, 0.001993],
#8, molecular masses (kg/mole)
Float64[0.029997989, 0.030995023, 0.032002234000000004],
#9, Qref
Float64[1142.13, 789.26, 1204.44],
#10, has interpolating chebyshev expansion?
Bool[true, true, true],
#11, lengths of interpolating chebyshev expansion
Int64[10, 10, 10],
#12, maximum relative errors of interpolation
Float64[0.0083, 0.0084, 0.0085],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[2.022879002635259, 2.1237886286875303, 0.12399689023022563, -0.009259037224789404, 0.01487593099804976, -0.007768913072230708, 0.002867728523709386, -0.0007316837794678621, -7.443930904828101e-05, 0.00013029548728265498],
Float64[2.0273058689602084, 2.1309113128029358, 0.12723729874870546, -0.008638605216508043, 0.014726863586520296, -0.0077965256468788685, 0.0028961073794491693, -0.000737512056292195, -6.570282357958806e-05, 0.00013225984839040607],
Float64[2.0294899939509414, 2.1344200568742244, 0.12882581216417885, -0.008331484385140185, 0.014653941163746952, -0.007814956390896736, 0.002906477403806661, -0.0007396933931758479, -6.357731128186433e-05, 0.00012902023942729102]
]
],
#1, molecule number
[9,
#2, molecule formula
"SO2",
#3, molecule name
"Sulfur Dioxide",
#4, global isotopologue numbers
Int64[42, 43],
#5, isotopologue formulae
String["32S16O2", "34S16O2"],
#6, AFGL code
Int64[626, 646],
#7, abundance fractions
Float64[0.945678, 0.04195],
#8, molecular masses (kg/mole)
Float64[0.063961901, 0.065957695],
#9, Qref
Float64[6340.3, 6368.98],
#10, has interpolating chebyshev expansion?
Bool[true, true],
#11, lengths of interpolating chebyshev expansion
Int64[9, 9],
#12, maximum relative errors of interpolation
Float64[0.008, 0.008],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[5.322899596644206, 7.361793665479842, 2.4685172475289034, 0.4593724914539494, 0.04693984072073576, -0.0022777800327913322, 0.0016853901083817568, -0.0009605272724346747, 0.0003893627846269787],
Float64[5.322098471801372, 7.360509652089675, 2.4679029817288893, 0.4592255365312494, 0.04692738529504803, -0.0022790239425536374, 0.0016852856097759883, -0.0009602665910768415, 0.00038778403377648374]
]
],
#1, molecule number
[10,
#2, molecule formula
"NO2",
#3, molecule name
"Nitrogen Dioxide",
#4, global isotopologue numbers
Int64[44, 130],
#5, isotopologue formulae
String["14N16O2", "15N16O2"],
#6, AFGL code
Int64[646, 656],
#7, abundance fractions
Float64[0.991616, 0.003646],
#8, molecular masses (kg/mole)
Float64[0.045992904, 0.046989938],
#9, Qref
Float64[13577.48, 9324.7],
#10, has interpolating chebyshev expansion?
Bool[true, true],
#11, lengths of interpolating chebyshev expansion
Int64[8, 8],
#12, maximum relative errors of interpolation
Float64[0.0098, 0.0098],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[4.28158486704412, 5.705703174793279, 1.6937108567035881, 0.28623161362735894, 0.03210370018398624, -0.004833093686365356, 0.002727672554545535, -0.0009386770019688129],
Float64[4.28158486704412, 5.705703174793279, 1.6937108567035881, 0.28623161362735894, 0.03210370018398624, -0.004833093686365356, 0.002727672554545535, -0.0009386770019688129]
]
],
#1, molecule number
[11,
#2, molecule formula
"NH3",
#3, molecule name
"Ammonia",
#4, global isotopologue numbers
Int64[45, 46],
#5, isotopologue formulae
String["14NH3", "15NH3"],
#6, AFGL code
Int64[4111, 5111],
#7, abundance fractions
Float64[0.995872, 0.003661],
#8, molecular masses (kg/mole)
Float64[0.017026549, 0.018023583],
#9, Qref
Float64[1725.22, 1153.3],
#10, has interpolating chebyshev expansion?
Bool[true, true],
#11, lengths of interpolating chebyshev expansion
Int64[9, 9],
#12, maximum relative errors of interpolation
Float64[0.0075, 0.0075],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[3.8555960403301373, 5.040288257810079, 1.4109697012367264, 0.24722432325106913, 0.04050656557366161, -0.0038690829501497603, 0.002775394410359233, -0.0008768219769410557, 0.00020945927498106087],
Float64[3.7706796997985936, 4.891536114323181, 1.3123307039988636, 0.20002602773432243, 0.02604944595386982, -0.005835809118104995, 0.002916794286104585, -0.0008693703104207806, 0.00018585921410796402]
]
],
#1, molecule number
[12,
#2, molecule formula
"HNO3",
#3, molecule name
"Nitric Acid",
#4, global isotopologue numbers
Int64[47, 117],
#5, isotopologue formulae
String["H14N16O3", "H15N16O3"],
#6, AFGL code
Int64[146, 156],
#7, abundance fractions
Float64[0.98911, 0.003636],
#8, molecular masses (kg/mole)
Float64[0.062995644, 0.06399268],
#9, Qref
Float64[214000.0, 143000.0],
#10, has interpolating chebyshev expansion?
Bool[true, true],
#11, lengths of interpolating chebyshev expansion
Int64[9, 9],
#12, maximum relative errors of interpolation
Float64[0.0072, 0.0073],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[23.230966874867196, 37.713046763492684, 20.728891814718033, 8.121744804012192, 2.305601327065478, 0.48245116851004877, 0.07614122727266803, 0.0068653086813306174, 0.0014066597448483265],
Float64[23.63526946297278, 38.40170483557111, 21.15217371744953, 8.308610842642612, 2.3655429409129027, 0.49660656393444746, 0.07850397352590655, 0.0071504774322548315, 0.0014405894745461723]
]
],
#1, molecule number
[13,
#2, molecule formula
"OH",
#3, molecule name
"Hydroxyl",
#4, global isotopologue numbers
Int64[48, 49, 50],
#5, isotopologue formulae
String["16OH", "18OH", "16OD"],
#6, AFGL code
Int64[61, 81, 62],
#7, abundance fractions
Float64[0.997473, 0.002, 0.000155371],
#8, molecular masses (kg/mole)
Float64[0.01700274, 0.019006986, 0.018008914999999997],
#9, Qref
Float64[80.35, 80.88, 209.32],
#10, has interpolating chebyshev expansion?
Bool[true, true, true],
#11, lengths of interpolating chebyshev expansion
Int64[9, 9, 8],
#12, maximum relative errors of interpolation
Float64[0.0068, 0.0069, 0.0064],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[1.8348152488068417, 1.7541974761207322, 0.05921231834937474, -0.025587632394101223, 0.016697790226026132, -0.008941690431595317, 0.0052568726754337915, -0.003495533506610027, 0.0014773841302516688],
Float64[1.8505560852233072, 1.7811349343619796, 0.07407620302602635, -0.020226248256200174, 0.017225839806468524, -0.009253742271296772, 0.005191375517307202, -0.003471462133196712, 0.0014838792162081837],
Float64[1.8980931808868446, 1.8878476706620586, 0.06346999293280998, -0.025382216475793522, 0.01515875114784657, -0.007217914788709562, 0.003960092995426656, -0.0015407268617251596]
]
],
#1, molecule number
[14,
#2, molecule formula
"HF",
#3, molecule name
"Hydrogen Fluoride",
#4, global isotopologue numbers
Int64[51, 110],
#5, isotopologue formulae
String["H19F", "D19F"],
#6, AFGL code
Int64[19, 29],
#7, abundance fractions
Float64[0.999844, 0.000155741],
#8, molecular masses (kg/mole)
Float64[0.020006229, 0.021012404],
#9, Qref
Float64[41.47, 115.91],
#10, has interpolating chebyshev expansion?
Bool[true, true],
#11, lengths of interpolating chebyshev expansion
Int64[8, 3],
#12, maximum relative errors of interpolation
Float64[0.0099, 0.0072],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[1.7159194314292705, 1.603109095729781, 0.007421783834501869, 7.083322701672393e-05, 0.00164524541490359, -0.0009050350115482955, 0.0008683741416758904, -0.0004347685534003633],
Float64[1.7382395497979517, 1.6521889828315073, 0.016506244352682442]
]
],
#1, molecule number
[15,
#2, molecule formula
"HCl",
#3, molecule name
"Hydrogen Chloride",
#4, global isotopologue numbers
Int64[52, 53, 107, 108],
#5, isotopologue formulae
String["H35Cl", "H37Cl", "D35Cl", "D37Cl"],
#6, AFGL code
Int64[15, 17, 25, 27],
#7, abundance fractions
Float64[0.757587, 0.242257, 0.000118005, 3.7735000000000004e-05],
#8, molecular masses (kg/mole)
Float64[0.035976678, 0.037973729, 0.036982852999999996, 0.038979903999999996],
#9, Qref
Float64[160.65, 160.89, 462.78, 464.13],
#10, has interpolating chebyshev expansion?
Bool[true, true, true, true],
#11, lengths of interpolating chebyshev expansion
Int64[3, 3, 4, 4],
#12, maximum relative errors of interpolation
Float64[0.0076, 0.0077, 0.0075, 0.0075],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[1.7389932647539617, 1.6540553376906317, 0.016823417474485014],
Float64[1.7390283919416012, 1.6541608552427125, 0.016862589474273326],
Float64[1.7769014767992009, 1.7170608001937298, 0.046270829696325975, 0.013326424837602602],
Float64[1.7771378593252287, 1.7174573475033619, 0.04644272154650967, 0.013365234531843573]
]
],
#1, molecule number
[16,
#2, molecule formula
"HBr",
#3, molecule name
"Hydrogen Bromide",
#4, global isotopologue numbers
Int64[54, 55, 111, 112],
#5, isotopologue formulae
String["H79Br", "H81Br", "D79Br", "D81Br"],
#6, AFGL code
Int64[19, 11, 29, 21],
#7, abundance fractions
Float64[0.506781, 0.493063, 7.893840000000001e-05, 7.68016e-05],
#8, molecular masses (kg/mole)
Float64[0.07992616, 0.081924115, 0.08093233600000001, 0.082930289],
#9, Qref
Float64[200.17, 200.23, 586.4, 586.76],
#10, has interpolating chebyshev expansion?
Bool[true, true, true, true],
#11, lengths of interpolating chebyshev expansion
Int64[4, 4, 4, 4],
#12, maximum relative errors of interpolation
Float64[0.0075, 0.0075, 0.0072, 0.0072],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[1.7488750133890794, 1.669328183117771, 0.0263672556822101, 0.008005907904859702],
Float64[1.748900710055193, 1.669366126576109, 0.026379118142379514, 0.008009591348277537],
Float64[1.8028059428968322, 1.7589216134192316, 0.0639923517825675, 0.016990238558940145],
Float64[1.8028645166164645, 1.7590220344851282, 0.06403776029402719, 0.01699860427688697]
]
],
#1, molecule number
[17,
#2, molecule formula
"HI",
#3, molecule name
"Hydrogen Iodide",
#4, global isotopologue numbers
Int64[56, 113],
#5, isotopologue formulae
String["H127I", "D127I"],
#6, AFGL code
Int64[17, 27],
#7, abundance fractions
Float64[0.999844, 0.000155741],
#8, molecular masses (kg/mole)
Float64[0.127912297, 0.128918472],
#9, Qref
Float64[388.99, 1147.06],
#10, has interpolating chebyshev expansion?
Bool[true, true],
#11, lengths of interpolating chebyshev expansion
Int64[4, 4],
#12, maximum relative errors of interpolation
Float64[0.0077, 0.0054],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[1.766635713209643, 1.699786979483175, 0.03930603336481905, 0.011626090261548741],
Float64[1.8405398208009391, 1.8185964784146365, 0.08822013072731567, 0.020907340042985556]
]
],
#1, molecule number
[18,
#2, molecule formula
"ClO",
#3, molecule name
"Chlorine Monoxide",
#4, global isotopologue numbers
Int64[57, 58],
#5, isotopologue formulae
String["35Cl16O", "37Cl16O"],
#6, AFGL code
Int64[56, 76],
#7, abundance fractions
Float64[0.755908, 0.24172],
#8, molecular masses (kg/mole)
Float64[0.050963768, 0.052960819],
#9, Qref
Float64[3274.61, 3332.29],
#10, has interpolating chebyshev expansion?
Bool[true, true],
#11, lengths of interpolating chebyshev expansion
Int64[7, 7],
#12, maximum relative errors of interpolation
Float64[0.0096, 0.0095],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[2.726090979162171, 3.1747524473309494, 0.53340481837599, 0.00862471634302627, -0.0033188447116862716, 0.002818989455709797, -0.0014107531874335184],
Float64[2.730927657002032, 3.182125195917262, 0.536151715425191, 0.008740020580801774, -0.0034008147472519936, 0.0028494736999678714, -0.001422681090598843]
]
],
#1, molecule number
[19,
#2, molecule formula
"OCS",
#3, molecule name
"Carbonyl Sulfide",
#4, global isotopologue numbers
Int64[59, 60, 61, 62, 63, 135],
#5, isotopologue formulae
String["16O12C32S", "16O12C34S", "16O13C32S", "16O12C33S", "18O12C32S", "16O13C34S"],
#6, AFGL code
Int64[622, 624, 632, 623, 822, 634],
#7, abundance fractions
Float64[0.937395, 0.041583, 0.010531, 0.007399, 0.00188, 0.00046750800000000005],
#8, molecular masses (kg/mole)
Float64[0.059966986, 0.06196278, 0.060970341, 0.060966371000000005, 0.061971231, 0.06296613599999999],
#9, Qref
Float64[1221.01, 1253.48, 2484.15, 4950.11, 1313.78, 2546.53],
#10, has interpolating chebyshev expansion?
Bool[true, true, true, true, true, true],
#11, lengths of interpolating chebyshev expansion
Int64[6, 6, 6, 6, 6, 6],
#12, maximum relative errors of interpolation
Float64[0.0057, 0.0058, 0.0054, 0.0057, 0.0051, 0.0057],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[5.126342127660176, 7.034492494351397, 2.46587731887598, 0.5193274408013411, 0.03999760328325479, 0.007938846511621734],
Float64[5.156948279839978, 7.083476707015535, 2.4892336653688045, 0.5249638067698701, 0.040620760059749725, 0.008015867648682118],
Float64[5.272543748922432, 7.269032398550013, 2.575970190347018, 0.5458344409651564, 0.043964679171464384, 0.008125632759047364],
Float64[5.142002462968687, 7.059554966519485, 2.4778239245689733, 0.5222067488374783, 0.04031309017091687, 0.007976767222211124],
Float64[5.2433970110477635, 7.222282031530504, 2.554774663298425, 0.5409890003043852, 0.043020671404242705, 0.008132056962323376],
Float64[5.126342127660176, 7.034492494351397, 2.46587731887598, 0.5193274408013411, 0.03999760328325479, 0.007938846511621734]
]
],
#1, molecule number
[20,
#2, molecule formula
"H2CO",
#3, molecule name
"Formaldehyde",
#4, global isotopologue numbers
Int64[64, 65, 66],
#5, isotopologue formulae
String["H212C16O", "H213C16O", "H212C18O"],
#6, AFGL code
Int64[126, 136, 128],
#7, abundance fractions
Float64[0.986237, 0.01108, 0.001978],
#8, molecular masses (kg/mole)
Float64[0.030010565, 0.03101392, 0.032014811000000004],
#9, Qref
Float64[2844.53, 5837.69, 2986.44],
#10, has interpolating chebyshev expansion?
Bool[true, true, true],
#11, lengths of interpolating chebyshev expansion
Int64[7, 8, 8],
#12, maximum relative errors of interpolation
Float64[0.009, 0.0066, 0.0066],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[4.099296685534555, 5.46254008647207, 1.6762575149353685, 0.3607112319829267, 0.0658749047199505, -0.0032135863867808943, 0.0031701326455504386],
Float64[4.096562472963226, 5.45838322129773, 1.6745262086094586, 0.36054806548097645, 0.06622828358352943, -0.0027924541403665515, 0.0030753561215511077, -0.0005451817250504222],
Float64[4.0966583506132235, 5.458540573571016, 1.6745828181967304, 0.3605577126652277, 0.06623155701519713, -0.0027929745447514065, 0.0030755155592398103, -0.0005453868311245154]
]
],
#1, molecule number
[21,
#2, molecule formula
"HOCl",
#3, molecule name
"Hypochlorous Acid",
#4, global isotopologue numbers
Int64[67, 68],
#5, isotopologue formulae
String["H16O35Cl", "H16O37Cl"],
#6, AFGL code
Int64[165, 167],
#7, abundance fractions
Float64[0.75579, 0.241683],
#8, molecular masses (kg/mole)
Float64[0.051971592999999996, 0.053968643999999996],
#9, Qref
Float64[19274.79, 19616.2],
#10, has interpolating chebyshev expansion?
Bool[true, true],
#11, lengths of interpolating chebyshev expansion
Int64[9, 9],
#12, maximum relative errors of interpolation
Float64[0.0041, 0.0041],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[4.143619456416505, 5.464534302985132, 1.535937117235669, 0.21748079463269954, 0.01850070265914905, -0.003603611521264982, 0.0025436888861178897, -0.0012872390879090645, 0.0004250820383528975],
Float64[4.143652887452801, 5.46458889118496, 1.5359558687538701, 0.21748297167936315, 0.018501348337276458, -0.0036037772770924903, 0.0025437650379011023, -0.0012874828201296928, 0.0004249718859172802]
]
],
#1, molecule number
[22,
#2, molecule formula
"N2",
#3, molecule name
"Nitrogen",
#4, global isotopologue numbers
Int64[69, 118],
#5, isotopologue formulae
String["14N2", "14N15N"],
#6, AFGL code
Int64[44, 45],
#7, abundance fractions
Float64[0.992687, 0.007478],
#8, molecular masses (kg/mole)
Float64[0.028006147999999998, 0.029003182],
#9, Qref
Float64[467.1, 644.1],
#10, has interpolating chebyshev expansion?
Bool[true, true],
#11, lengths of interpolating chebyshev expansion
Int64[4, 4],
#12, maximum relative errors of interpolation
Float64[0.006, 0.0061],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[1.7615954213555896, 1.6956422885022302, 0.03176548637294833, 0.01029121610063847],
Float64[1.763674187848655, 1.6990430426037548, 0.03341123366974245, 0.010717926189910779]
]
],
#1, molecule number
[23,
#2, molecule formula
"HCN",
#3, molecule name
"Hydrogen Cyanide",
#4, global isotopologue numbers
Int64[70, 71, 72],
#5, isotopologue formulae
String["H12C14N", "H13C14N", "H12C15N"],
#6, AFGL code
Int64[124, 134, 125],
#7, abundance fractions
Float64[0.985114, 0.011068, 0.003622],
#8, molecular masses (kg/mole)
Float64[0.027010898999999998, 0.028014254000000002, 0.028007933000000002],
#9, Qref
Float64[892.2, 1830.97, 615.28],
#10, has interpolating chebyshev expansion?
Bool[true, true, true],
#11, lengths of interpolating chebyshev expansion
Int64[7, 5, 5],
#12, maximum relative errors of interpolation
Float64[0.0081, 0.01, 0.0079],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[3.1500573196671033, 3.871952656895569, 0.9740837519171931, 0.1661471760780874, -0.0004121953054499657, 0.0038889475688232977, -0.00040715469175870805],
Float64[3.168146071291571, 3.8993492460929553, 0.9876728830526877, 0.17458143818914662, -0.0006551859591950038],
Float64[3.3184960101262417, 4.140109825563163, 1.1038407893004953, 0.19960512535349317, -0.0016887611352265353]
]
],
#1, molecule number
[24,
#2, molecule formula
"CH3Cl",
#3, molecule name
"Methyl Chloride",
#4, global isotopologue numbers
Int64[73, 74],
#5, isotopologue formulae
String["12CH335Cl", "12CH337Cl"],
#6, AFGL code
Int64[215, 217],
#7, abundance fractions
Float64[0.748937, 0.239491],
#8, molecular masses (kg/mole)
Float64[0.049992328, 0.051989379],
#9, Qref
Float64[57916.12, 58833.9],
#10, has interpolating chebyshev expansion?
Bool[true, true],
#11, lengths of interpolating chebyshev expansion
Int64[8, 8],
#12, maximum relative errors of interpolation
Float64[0.0083, 0.0083],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[7.29969230201383, 10.76299105271942, 4.6188215208386625, 1.4061580874699755, 0.3052046132618746, 0.04120328062335891, 0.009648711581299096, -0.0008605824437495357],
Float64[7.299832398848403, 10.763224928473894, 4.61894630404787, 1.4062027724380906, 0.3052168648070337, 0.041205502783890936, 0.00964906200847285, -0.0008605280785930956]
]
],
#1, molecule number
[25,
#2, molecule formula
"H2O2",
#3, molecule name
"Hydrogen Peroxide",
#4, global isotopologue numbers
Int64[75],
#5, isotopologue formulae
String["H216O2"],
#6, AFGL code
Int64[1661],
#7, abundance fractions
Float64[0.994952],
#8, molecular masses (kg/mole)
Float64[0.03400548],
#9, Qref
Float64[9847.99],
#10, has interpolating chebyshev expansion?
Bool[true],
#11, lengths of interpolating chebyshev expansion
Int64[11],
#12, maximum relative errors of interpolation
Float64[0.0067],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[6.659148751736031, 9.483437660502407, 3.4240505334530247, 0.6560605220688904, 0.07013734708443575, 0.0001766124815127057, 0.000513028749733202, -0.00029800025544997056, 7.57280500629065e-05, -0.00016363556385172727, 0.00010136968662237678]
]
],
#1, molecule number
[26,
#2, molecule formula
"C2H2",
#3, molecule name
"Acetylene",
#4, global isotopologue numbers
Int64[76, 77, 105],
#5, isotopologue formulae
String["12C2H2", "H12C13CH", "H12C12CD"],
#6, AFGL code
Int64[1221, 1231, 1222],
#7, abundance fractions
Float64[0.977599, 0.021966, 0.00030455],
#8, molecular masses (kg/mole)
Float64[0.02601565, 0.027019005, 0.027021825],
#9, Qref
Float64[412.45, 1656.18, 1581.84],
#10, has interpolating chebyshev expansion?
Bool[true, true, true],
#11, lengths of interpolating chebyshev expansion
Int64[8, 8, 9],
#12, maximum relative errors of interpolation
Float64[0.0064, 0.0067, 0.005],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[6.606246082570006, 9.5021885391759, 3.8798354279414227, 1.0421227361830696, 0.15435826120434, 0.024812414831032723, 0.0013956143916372201, -0.0005373524520955186],
Float64[6.691310653700157, 9.642511905249071, 3.9554948181251057, 1.0672351744197028, 0.1592037312638897, 0.02524828278728946, 0.0014209977344309874, -0.0005472408750750089],
Float64[8.312818087515264, 12.296135611243283, 5.334539240450019, 1.5041555942106484, 0.25556636740202876, 0.040809216146493243, -5.996404029051661e-05, -0.0008823053560504945, 0.001325829564092551]
]
],
#1, molecule number
[27,
#2, molecule formula
"C2H6",
#3, molecule name
"Ethane",
#4, global isotopologue numbers
Int64[78, 106],
#5, isotopologue formulae
String["12C2H6", "12CH313CH3"],
#6, AFGL code
Int64[1221, 1231],
#7, abundance fractions
Float64[0.97699, 0.021953],
#8, molecular masses (kg/mole)
Float64[0.03004695, 0.031050305],
#9, Qref
Float64[70882.52, 36191.8],
#10, has interpolating chebyshev expansion?
Bool[true, true],
#11, lengths of interpolating chebyshev expansion
Int64[9, 9],
#12, maximum relative errors of interpolation
Float64[0.0084, 0.0084],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[29.306413760745293, 48.87646045145203, 29.277666381786332, 13.421126486275961, 4.868450706061395, 1.4265486625599113, 0.350571677937209, 0.07303068189280282, 0.012110399026012075],
Float64[29.31971200400752, 48.899765252850294, 29.29331784478448, 13.429280048978747, 4.87184061373862, 1.4276942043686383, 0.3508915525695748, 0.07310824356884638, 0.01212522620861023]
]
],
#1, molecule number
[28,
#2, molecule formula
"PH3",
#3, molecule name
"Phosphine",
#4, global isotopologue numbers
Int64[79],
#5, isotopologue formulae
String["31PH3"],
#6, AFGL code
Int64[1111],
#7, abundance fractions
Float64[0.999533],
#8, molecular masses (kg/mole)
Float64[0.033997238000000006],
#9, Qref
Float64[3249.44],
#10, has interpolating chebyshev expansion?
Bool[true],
#11, lengths of interpolating chebyshev expansion
Int64[8],
#12, maximum relative errors of interpolation
Float64[0.0061],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[4.77754526457992, 6.561487284314362, 2.241225465013582, 0.5263044273306788, 0.09227919777852593, 0.002846217167035497, 0.004031049657860259, -0.0011050831525177987]
]
],
#1, molecule number
[29,
#2, molecule formula
"COF2",
#3, molecule name
"Carbonyl Fluoride",
#4, global isotopologue numbers
Int64[80, 119],
#5, isotopologue formulae
String["12C16O19F2", "13C16O19F2"],
#6, AFGL code
Int64[269, 369],
#7, abundance fractions
Float64[0.986544, 0.011083],
#8, molecular masses (kg/mole)
Float64[0.065991722, 0.066995083],
#9, Qref
Float64[70028.43, 140000.0],
#10, has interpolating chebyshev expansion?
Bool[true, true],
#11, lengths of interpolating chebyshev expansion
Int64[9, 9],
#12, maximum relative errors of interpolation
Float64[0.0037, 0.0037],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[11.629931174695397, 17.881640555690623, 8.45377717908021, 2.6794696985587407, 0.5645965476906625, 0.07777414142631045, 0.00947097051224155, -0.001480514566241098, 0.0007799374263299796],
Float64[11.635249499599023, 17.889802343475637, 8.457586722083246, 2.6806408754837325, 0.5648398483725394, 0.07780705463459148, 0.009474014430588262, -0.0014807057725274575, 0.0007800005503160179]
]
],
#1, molecule number
[30,
#2, molecule formula
"SF6",
#3, molecule name
"Sulfur Hexafluoride",
#4, global isotopologue numbers
Int64[126],
#5, isotopologue formulae
String["32S19F6"],
#6, AFGL code
Int64[29],
#7, abundance fractions
Float64[0.95018],
#8, molecular masses (kg/mole)
Float64[0.145962492],
#9, Qref
Float64[1620000.0],
#10, has interpolating chebyshev expansion?
Bool[true],
#11, lengths of interpolating chebyshev expansion
Int64[14],
#12, maximum relative errors of interpolation
Float64[0.00024],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[1870.7117266249759, 3396.573083307492, 2545.0085260475485, 1579.432604203472, 815.1045425222118, 351.0061028885553, 126.3795862864786, 38.05037645536903, 9.559459105261725, 1.9936675698906514, 0.3424948763082424, 0.04771635428971002, 0.005364060141101408, 0.00045015125453154236]
]
],
#1, molecule number
[31,
#2, molecule formula
"H2S",
#3, molecule name
"Hydrogen Sulfide",
#4, global isotopologue numbers
Int64[81, 82, 83],
#5, isotopologue formulae
String["H232S", "H234S", "H233S"],
#6, AFGL code
Int64[121, 141, 131],
#7, abundance fractions
Float64[0.949884, 0.042137, 0.007498],
#8, molecular masses (kg/mole)
Float64[0.033987721, 0.035983514999999994, 0.034987105],
#9, Qref
Float64[505.79, 504.35, 2014.94],
#10, has interpolating chebyshev expansion?
Bool[true, true, true],
#11, lengths of interpolating chebyshev expansion
Int64[8, 8, 8],
#12, maximum relative errors of interpolation
Float64[0.0056, 0.0083, 0.0083],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[3.1610034569623626, 3.9079200993763954, 0.8188618250430909, 0.07182197256747361, 0.018994732241263388, -0.004588514291410765, 0.00193425807974279, -0.0006258386386391075],
Float64[3.136231167171715, 3.868597973183339, 0.801384127880046, 0.06811779813907153, 0.018316506534915036, -0.00478598833219644, 0.0020113628824416046, -0.0006519501360460518],
Float64[3.136195916650622, 3.868549974556976, 0.8013728165558528, 0.06811697766351898, 0.0183160074539575, -0.004786003572910781, 0.002011194206114096, -0.0006519716060844973]
]
],
#1, molecule number
[32,
#2, molecule formula
"HCOOH",
#3, molecule name
"Formic Acid",
#4, global isotopologue numbers
Int64[84],
#5, isotopologue formulae
String["H12C16O16OH"],
#6, AFGL code
Int64[126],
#7, abundance fractions
Float64[0.983898],
#8, molecular masses (kg/mole)
Float64[0.04600548],
#9, Qref
Float64[39132.76],
#10, has interpolating chebyshev expansion?
Bool[true],
#11, lengths of interpolating chebyshev expansion
Int64[9],
#12, maximum relative errors of interpolation
Float64[0.0038],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[11.020149393914501, 16.9755902316461, 8.167574402236214, 2.7594738274122346, 0.6634809411963838, 0.11285962933776061, 0.01800084865083562, -5.6527863588229366e-05, 0.000655575724366031]
]
],
#1, molecule number
[33,
#2, molecule formula
"HO2",
#3, molecule name
"Hydroperoxyl",
#4, global isotopologue numbers
Int64[85],
#5, isotopologue formulae
String["H16O2"],
#6, AFGL code
Int64[166],
#7, abundance fractions
Float64[0.995107],
#8, molecular masses (kg/mole)
Float64[0.032997655],
#9, Qref
Float64[4300.39],
#10, has interpolating chebyshev expansion?
Bool[true],
#11, lengths of interpolating chebyshev expansion
Int64[8],
#12, maximum relative errors of interpolation
Float64[0.0074],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[3.4589368258406075, 4.387817669014019, 1.053710491228722, 0.12771732192601018, 0.018001614852028273, -0.006237091281306625, 0.002782788224954099, -0.000725510875379801]
]
],
#1, molecule number
[34,
#2, molecule formula
"O",
#3, molecule name
"Oxygen Atom",
#4, global isotopologue numbers
Int64[86],
#5, isotopologue formulae
String["16O"],
#6, AFGL code
Int64[6],
#7, abundance fractions
Float64[0.997628],
#8, molecular masses (kg/mole)
Float64[0.015994915000000002],
#9, Qref
Float64[6.72],
#10, has interpolating chebyshev expansion?
Bool[false],
#11, lengths of interpolating chebyshev expansion
Int64[0],
#12, maximum relative errors of interpolation
Float64[0],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[]
]
],
#1, molecule number
[35,
#2, molecule formula
"ClONO2",
#3, molecule name
"Chlorine Nitrate",
#4, global isotopologue numbers
Int64[127, 128],
#5, isotopologue formulae
String["35Cl16O14N16O2", "37Cl16O14N16O2"],
#6, AFGL code
Int64[5646, 7646],
#7, abundance fractions
Float64[0.74957, 0.239694],
#8, molecular masses (kg/mole)
Float64[0.096956672, 0.098953723],
#9, Qref
Float64[4790000.0, 4910000.0],
#10, has interpolating chebyshev expansion?
Bool[true, true],
#11, lengths of interpolating chebyshev expansion
Int64[10, 10],
#12, maximum relative errors of interpolation
Float64[0.0016, 0.0015],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[112.41039753461149, 192.5969283401371, 121.68354064743774, 57.30328926867914, 20.284701556330944, 5.410076429365737, 1.0808835468204576, 0.15853736954761644, 0.016535623193792363, 0.001150132727673281],
Float64[112.45459127185921, 192.67265280226152, 121.7313921905935, 57.32582857162725, 20.292682333281764, 5.412206013024527, 1.081309309033366, 0.15859843363539816, 0.016538546940485805, 0.0011482640850822969]
]
],
#1, molecule number
[36,
#2, molecule formula
"NO+",
#3, molecule name
"Nitric Oxide Cation",
#4, global isotopologue numbers
Int64[87],
#5, isotopologue formulae
String["14N16O+"],
#6, AFGL code
Int64[46],
#7, abundance fractions
Float64[0.993974],
#8, molecular masses (kg/mole)
Float64[0.029997989],
#9, Qref
Float64[311.69],
#10, has interpolating chebyshev expansion?
Bool[true],
#11, lengths of interpolating chebyshev expansion
Int64[4],
#12, maximum relative errors of interpolation
Float64[0.0059],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[1.7605530293617953, 1.6939645379456973, 0.030855228843472766, 0.010020286719835495]
]
],
#1, molecule number
[37,
#2, molecule formula
"HOBr",
#3, molecule name
"Hypobromous Acid",
#4, global isotopologue numbers
Int64[88, 89],
#5, isotopologue formulae
String["H16O79Br", "H16O81Br"],
#6, AFGL code
Int64[169, 161],
#7, abundance fractions
Float64[0.505579, 0.491894],
#8, molecular masses (kg/mole)
Float64[0.095921076, 0.097919027],
#9, Qref
Float64[28339.38, 28237.98],
#10, has interpolating chebyshev expansion?
Bool[true, true],
#11, lengths of interpolating chebyshev expansion
Int64[9, 9],
#12, maximum relative errors of interpolation
Float64[0.0051, 0.0052],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[4.483055511505265, 5.995051967678824, 1.7647444347848933, 0.2533586767055279, 0.016936791901325243, -0.002838029482718052, 0.0023154774596215733, -0.0013133740502127011, 0.00048033005031822285],
Float64[4.486274449888522, 6.000089813847239, 1.7669127031543173, 0.25370966952649165, 0.01695092315480551, -0.0028181141360982265, 0.0023235097509621827, -0.0013007580536887886, 0.00048488773683352804]
]
],
#1, molecule number
[38,
#2, molecule formula
"C2H4",
#3, molecule name
"Ethylene",
#4, global isotopologue numbers
Int64[90, 91],
#5, isotopologue formulae
String["12C2H4", "12CH213CH2"],
#6, AFGL code
Int64[221, 231],
#7, abundance fractions
Float64[0.977294, 0.021959],
#8, molecular masses (kg/mole)
Float64[0.028031300000000002, 0.029034655],
#9, Qref
Float64[11041.54, 45196.89],
#10, has interpolating chebyshev expansion?
Bool[true, true],
#11, lengths of interpolating chebyshev expansion
Int64[7, 7],
#12, maximum relative errors of interpolation
Float64[0.0091, 0.0091],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[10.306665304688702, 15.910097139939962, 7.822086138506613, 2.834536775621054, 0.7613961848840939, 0.1521884153819452, 0.030223301156906263],
Float64[10.306853180349535, 15.910413449245775, 7.822252700108585, 2.83459834274206, 0.7614149949091343, 0.15219218454683414, 0.030224516959518628]
]
],
#1, molecule number
[39,
#2, molecule formula
"CH3OH",
#3, molecule name
"Methanol",
#4, global isotopologue numbers
Int64[92],
#5, isotopologue formulae
String["12CH316OH"],
#6, AFGL code
Int64[2161],
#7, abundance fractions
Float64[0.98593],
#8, molecular masses (kg/mole)
Float64[0.032026215000000004],
#9, Qref
Float64[70569.92],
#10, has interpolating chebyshev expansion?
Bool[true],
#11, lengths of interpolating chebyshev expansion
Int64[11],
#12, maximum relative errors of interpolation
Float64[0.0063],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[13.749331968563945, 21.61345904772461, 10.955577051534103, 3.977402542310325, 1.0933160614782178, 0.2226208170647183, 0.038152444149720924, 0.00540115477077876, 0.0001595952470339057, 1.367409072514647e-05, 8.94526436184151e-05]
]
],
#1, molecule number
[40,
#2, molecule formula
"CH3Br",
#3, molecule name
"Methyl Bromide",
#4, global isotopologue numbers
Int64[93, 94],
#5, isotopologue formulae
String["12CH379Br", "12CH381Br"],
#6, AFGL code
Int64[219, 211],
#7, abundance fractions
Float64[0.500995, 0.487433],
#8, molecular masses (kg/mole)
Float64[0.093941811, 0.095939764],
#9, Qref
Float64[83051.98, 83395.21],
#10, has interpolating chebyshev expansion?
Bool[true, true],
#11, lengths of interpolating chebyshev expansion
Int64[9, 9],
#12, maximum relative errors of interpolation
Float64[0.0054, 0.0054],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[8.322478463681351, 12.445792571437991, 5.528514219140895, 1.7163686770516318, 0.37806637006999333, 0.05547356269277692, 0.010458543694655376, -0.0007773050759798394, 0.0004671678943388713],
Float64[8.329461595021453, 12.457238194953582, 5.534590915534836, 1.7183801520233106, 0.3785478856826856, 0.05556730686168132, 0.010462922226282423, -0.0007745337034998911, 0.0004672506165874779]
]
],
#1, molecule number
[41,
#2, molecule formula
"CH3CN",
#3, molecule name
"Acetonitrile",
#4, global isotopologue numbers
Int64[95],
#5, isotopologue formulae
String["12CH312C14N"],
#6, AFGL code
Int64[2124],
#7, abundance fractions
Float64[0.973866],
#8, molecular masses (kg/mole)
Float64[0.041026549],
#9, Qref
Float64[88672.19],
#10, has interpolating chebyshev expansion?
Bool[true],
#11, lengths of interpolating chebyshev expansion
Int64[11],
#12, maximum relative errors of interpolation
Float64[0.0054],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[23.087630326330327, 37.50816558997893, 20.677521610415493, 8.221774398903325, 2.4554758737437163, 0.5635995654848657, 0.10208943244482249, 0.014556918325425272, 0.001771181894064, -0.00017207723102501403, 0.0001938628200505832]
]
],
#1, molecule number
[42,
#2, molecule formula
"CF4",
#3, molecule name
"PFC-14",
#4, global isotopologue numbers
Int64[96],
#5, isotopologue formulae
String["12C19F4"],
#6, AFGL code
Int64[29],
#7, abundance fractions
Float64[0.98889],
#8, molecular masses (kg/mole)
Float64[0.087993616],
#9, Qref
Float64[121000.0],
#10, has interpolating chebyshev expansion?
Bool[false],
#11, lengths of interpolating chebyshev expansion
Int64[0],
#12, maximum relative errors of interpolation
Float64[0],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[]
]
],
#1, molecule number
[43,
#2, molecule formula
"C4H2",
#3, molecule name
"Diacetylene",
#4, global isotopologue numbers
Int64[116],
#5, isotopologue formulae
String["12C4H2"],
#6, AFGL code
Int64[2211],
#7, abundance fractions
Float64[0.955998],
#8, molecular masses (kg/mole)
Float64[0.05001565],
#9, Qref
Float64[9818.97],
#10, has interpolating chebyshev expansion?
Bool[true],
#11, lengths of interpolating chebyshev expansion
Int64[10],
#12, maximum relative errors of interpolation
Float64[0.0044],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[139.66835957434776, 241.85305800353672, 157.95302972883783, 78.74096208726291, 30.283942023964958, 9.074668301483646, 2.130903025224642, 0.3944094168400372, 0.05767338336487329, 0.006565583269182045]
]
],
#1, molecule number
[44,
#2, molecule formula
"HC3N",
#3, molecule name
"Cyanoacetylene",
#4, global isotopologue numbers
Int64[109],
#5, isotopologue formulae
String["H12C314N"],
#6, AFGL code
Int64[1224],
#7, abundance fractions
Float64[0.963346],
#8, molecular masses (kg/mole)
Float64[0.051010899000000005],
#9, Qref
Float64[24786.84],
#10, has interpolating chebyshev expansion?
Bool[true],
#11, lengths of interpolating chebyshev expansion
Int64[9],
#12, maximum relative errors of interpolation
Float64[0.0062],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[54.67987143122882, 91.66794189149792, 54.56630682138059, 23.48211282466563, 7.394320212397229, 1.7249800510739384, 0.2979283599543656, 0.03885721387024432, 0.0032754500365292927]
]
],
#1, molecule number
[45,
#2, molecule formula
"H2",
#3, molecule name
"Hydrogen",
#4, global isotopologue numbers
Int64[103, 115],
#5, isotopologue formulae
String["H2", "HD"],
#6, AFGL code
Int64[11, 12],
#7, abundance fractions
Float64[0.999688, 0.00031143200000000005],
#8, molecular masses (kg/mole)
Float64[0.00201565, 0.003021825],
#9, Qref
Float64[7.67, 29.87],
#10, has interpolating chebyshev expansion?
Bool[true, true],
#11, lengths of interpolating chebyshev expansion
Int64[11, 11],
#12, maximum relative errors of interpolation
Float64[0.0072, 0.0085],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[1.664734139290654, 1.541274894552553, -0.009725177039028837, 0.010711847479083847, -0.0017437301893213019, -0.0036796434870607795, 0.006507341594540717, -0.006895927202643417, 0.006015970973214247, -0.004996438617030119, 0.002286683531610123],
Float64[1.702133003802882, 1.5472084650368458, 0.02093893129594614, -0.005247622987785912, 0.0065559250200964096, -0.004561928075496269, 0.0036220249507184165, -0.002945429432234235, 0.002444808920724935, -0.002149668002996563, 0.0010267991465589433]
]
],
#1, molecule number
[46,
#2, molecule formula
"CS",
#3, molecule name
"Carbon Monosulfide",
#4, global isotopologue numbers
Int64[97, 98, 99, 100],
#5, isotopologue formulae
String["12C32S", "12C34S", "13C32S", "12C33S"],
#6, AFGL code
Int64[22, 24, 32, 23],
#7, abundance fractions
Float64[0.939624, 0.041682, 0.010556, 0.007417],
#8, molecular masses (kg/mole)
Float64[0.043971036, 0.045966786999999995, 0.044974368, 0.044970399],
#9, Qref
Float64[253.62, 257.77, 537.5, 1022.97],
#10, has interpolating chebyshev expansion?
Bool[true, true, true, true],
#11, lengths of interpolating chebyshev expansion
Int64[4, 4, 4, 4],
#12, maximum relative errors of interpolation
Float64[0.0053, 0.0055, 0.0058, 0.0054],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[1.928548290410206, 1.9559277563753463, 0.13775785658135606, 0.025167997114126095],
Float64[1.9320348490293633, 1.9612931183817734, 0.13973653631415997, 0.025297795999263812],
Float64[1.941312753398263, 1.975502364252016, 0.1449170140435975, 0.025619198538681925],
Float64[1.9303466528392785, 1.9586867922618854, 0.13877109254914913, 0.02523509205534813]
]
],
#1, molecule number
[47,
#2, molecule formula
"SO3",
#3, molecule name
"Sulfur trioxide",
#4, global isotopologue numbers
Int64[114],
#5, isotopologue formulae
String["32S16O3"],
#6, AFGL code
Int64[26],
#7, abundance fractions
Float64[0.9434],
#8, molecular masses (kg/mole)
Float64[0.07995682],
#9, Qref
Float64[7783.3],
#10, has interpolating chebyshev expansion?
Bool[true],
#11, lengths of interpolating chebyshev expansion
Int64[9],
#12, maximum relative errors of interpolation
Float64[0.0083],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[15.308429278661556, 24.00276207581289, 11.897538539905902, 3.9481327675633446, 0.8795595441612623, 0.12893108012856036, 0.011262184567776501, -0.0008464360123436876, 0.0011600801108988534]
]
],
#1, molecule number
[48,
#2, molecule formula
"C2N2",
#3, molecule name
"Cyanogen",
#4, global isotopologue numbers
Int64[123],
#5, isotopologue formulae
String["12C214N2"],
#6, AFGL code
Int64[4224],
#7, abundance fractions
Float64[0.970752],
#8, molecular masses (kg/mole)
Float64[0.052006148],
#9, Qref
Float64[15582.44],
#10, has interpolating chebyshev expansion?
Bool[true],
#11, lengths of interpolating chebyshev expansion
Int64[8],
#12, maximum relative errors of interpolation
Float64[0.0078],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[23.898246264067748, 38.30762066370932, 20.017340299203973, 6.9592760018001645, 1.616441488972797, 0.2564194297139437, 0.025346935562266384, 0.0020212521367994896]
]
],
#1, molecule number
[49,
#2, molecule formula
"COCl2",
#3, molecule name
"Phosgene",
#4, global isotopologue numbers
Int64[124, 125],
#5, isotopologue formulae
String["12C16O35Cl2", "12C16O35Cl37Cl"],
#6, AFGL code
Int64[2655, 2657],
#7, abundance fractions
Float64[0.566392, 0.362235],
#8, molecular masses (kg/mole)
Float64[0.09793261997960001, 0.0999296698896],
#9, Qref
Float64[1480000.0, 3040000.0],
#10, has interpolating chebyshev expansion?
Bool[true, true],
#11, lengths of interpolating chebyshev expansion
Int64[10, 10],
#12, maximum relative errors of interpolation
Float64[0.0067, 0.0067],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[28.334677111812944, 45.91333430423453, 24.71182133380345, 9.013419514341322, 2.223400830715287, 0.3666455762516466, 0.03834885535248765, 0.0015768709148049867, 0.0005194428496066747, -0.00021849483690535484],
Float64[28.38289115870957, 45.99285456416629, 24.75674368149981, 9.030958287676107, 2.2280852109188753, 0.367484622231376, 0.03844429301198539, 0.0015817522400456913, 0.0005200295439730477, -0.00021895935986688327]
]
],
[50], #no molecule has been assigned this number
[51], #no molecule has been assigned this number
[52], #no molecule has been assigned this number
#1, molecule number
[53,
#2, molecule formula
"CS2",
#3, molecule name
"Carbon disulfide",
#4, global isotopologue numbers
Int64[131, 132, 133, 134],
#5, isotopologue formulae
String["12C32S2", "32S12C34S", "32S12C33S", "13C32S2"],
#6, AFGL code
Int64[222, 224, 223, 232],
#7, abundance fractions
Float64[0.892811, 0.07926, 0.014094, 0.01031],
#8, molecular masses (kg/mole)
Float64[0.07594414000000001, 0.07793994000000001, 0.076943256, 0.076947495],
#9, Qref
Float64[1352.6, 2798.0, 1107.0, 2739.7],
#10, has interpolating chebyshev expansion?
Bool[false, false, false, false],
#11, lengths of interpolating chebyshev expansion
Int64[0, 0, 0, 0],
#12, maximum relative errors of interpolation
Float64[0, 0, 0, 0],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[],
Float64[],
Float64[],
Float64[]
]
],
[54], #no molecule has been assigned this number
#1, molecule number
[55,
#2, molecule formula
"NF3",
#3, molecule name
"Nitrogen trifluoride",
#4, global isotopologue numbers
Int64[136],
#5, isotopologue formulae
String["14N19F3"],
#6, AFGL code
Int64[4999],
#7, abundance fractions
Float64[0.996337],
#8, molecular masses (kg/mole)
Float64[0.070998284],
#9, Qref
Float64[346000.0],
#10, has interpolating chebyshev expansion?
Bool[false],
#11, lengths of interpolating chebyshev expansion
Int64[0],
#12, maximum relative errors of interpolation
Float64[0],
#13, chebyshev expansion coefficients
Vector{Float64}[
Float64[]
]
]
]
| ClearSky | https://github.com/markmbaum/ClearSky.jl.git |
|
[
"MIT"
] | 0.1.0 | 861e207072e8717757e97af5c3a4af747b0c76ea | code | 2785 | export periapsis, apoapsis, semimajoraxis, eccentricity
export meananomaly, trueanomaly, eccentricanomaly
export orbitalperiod, orbitaldistance, orbit
"""
periapsis(a, e)
Compute the periapsis (closest approach) distance using semi-major axis and eccentricity
"""
periapsis(a, e) = a*(1 - e)
"""
apoapsis(a, e)
Compute the apoapsis (farthest distance) distance using semi-major axis and eccentricity
"""
apoapsis(a, e) = a*(1 + e)
"""
semimajoraxis(T, m)
Compute the [semi-major axis](https://en.wikipedia.org/wiki/Semi-major_and_semi-minor_axes) of an orbit
"""
semimajoraxis(T, m) = (π*m*T^2/(4Ο^2))^(1/3)
"""
eccentricity(rβ, rβ)
Compute [eccentricity](https://en.wikipedia.org/wiki/Orbital_eccentricity)
"""
eccentricity(rβ, rβ) = (rβ - rβ)/(rβ + rβ)
"""
meananomaly(E, e)
Compute the [mean anomaly](https://en.wikipedia.org/wiki/Mean_anomaly)
"""
meananomaly(E, e) = E - e*sin(E)
"""
trueanomaly(E, e)
Compute the [true anomaly](https://en.wikipedia.org/wiki/True_anomaly)
"""
function trueanomaly(E, e)
f = 2*atan(sqrt((1 + e)/(1 - e))*tan(E/2))
#use [0,2Ο] instead of [-Ο,Ο]
f < 0 ? f + 2Ο : f
end
"""
trueanomaly(t, a, m, e)
Compute the [true anomaly](https://en.wikipedia.org/wiki/True_anomaly)
"""
trueanomaly(t, a, m, e) = trueanomaly(eccentricanomaly(t, a, m, e), e)
"""
eccentricanomaly(t, a, m, e)
Numerically compute the [eccentric anomaly](https://en.wikipedia.org/wiki/Eccentric_anomaly) using [Kepler's equation](https://en.wikipedia.org/wiki/Kepler%27s_equation)
"""
function eccentricanomaly(t, a, m, e)
@assert t >= 0 "time must be positive"
#Kepler's Third Law
T = orbitalperiod(a, m)
#definition of mean anomaly
M = 2Ο*rem(t, T)/T
#eccentric anomaly must be found numerically
E = falseposition((E,p)->meananomaly(E, e) - M, 0, 2Ο)
return E
end
"""
orbitalperiod(a, m)
[Kepler's Third Law](https://en.wikipedia.org/wiki/Kepler%27s_laws_of_planetary_motion#Third_law) describing the orbital period of an elliptical orbit
"""
orbitalperiod(a, m) = 2Ο*β(a^3/(π*m))
"""
orbitaldistance(a, f, e)
Compute the distance of a planet from its host
"""
orbitaldistance(a, f, e) = a*(1 - e^2)/(1 + e*cos(f))
"""
orbitaldistance(t, a, m, e)
Compute the distance of a planet from its host, assuming the planet is at periapsis at t=0
"""
orbitaldistance(t, a, m, e) = orbitaldistance(a, trueanomaly(t, a, m, e), e)
"""
orbit(a, m, e, N=1000)
Create a distance time-series of `N` points for an elliptical orbit, returning vectors for time, distance, and true anomaly
"""
function orbit(a, m, e, N::Int=1000)
T = orbitalperiod(a, m)
t = LinRange(0, T, N+1)[1:end-1]
f = trueanomaly.(t, a, m, e)
r = orbitaldistance.(a, f, e)
return t, r, f
end
| ClearSky | https://github.com/markmbaum/ClearSky.jl.git |
|
[
"MIT"
] | 0.1.0 | 861e207072e8717757e97af5c3a4af747b0c76ea | code | 9798 | export readpar, SpectralLines
"""
Mapping between single character isotopologue codes and isotopologue numbers. This is only relevant for molecules with many isotopologues, like CO2. The isotopologue code is only alloted 1 column in the fixed width .par files, so capital letters are used after 0-9 are exhausted. For example, the 11th isotopologue of CO2 in the HITRAN database is coded 'A' and the 12th is coded 'B'.
"""
const ISOINDEX = Dict(
'1'=>1, '2'=>2, '3'=>3, '4'=>4,
'5'=>5, '6'=>6, '7'=>7, '8'=>8,
'9'=>9, '0'=>10, 'A'=>11, 'B'=>12,
'C'=>13, 'D'=>14, 'E'=>15, 'F'=>16,
'G'=>17, 'H'=>18, 'I'=>19, 'J'=>20,
'K'=>21, 'L'=>22, 'M'=>23, 'N'=>24,
'O'=>25, 'P'=>26, 'Q'=>27, 'R'=>28,
'S'=>29, 'T'=>30, 'U'=>31, 'V'=>32,
'W'=>33, 'X'=>34, 'Y'=>35, 'Z'=>36
)
"""
readpar(filename; Ξ½min=0, Ξ½max=Inf, Scut=0, I=[], maxlines=-1)
Read an absoption line file from the HITRAN database, which should have the ".par" extension. These files are available at [`https://hitran.org/lbl`](https://hitran.org/lbl) after registering for a free account.
# Keyword Arguments
* `Ξ½min`: smallest line wavenumber to include
* `Ξ½max`: largest line wavenumber to include
* `Scut`: smallest spectral line intensity
* `I`: array of isotopologue numbers to include (excludes all others)
* `maxlines`: maximum number of lines to include (includes only the most intense `maxlines` lines)
A dictionary of vectors is returned, reflecting the definitions from
1. [HITRAN website](https://hitran.org/docs/definitions-and-units`)
2. [Rothman, Laurence S., et al. "The HITRAN 2004 molecular spectroscopic database." Journal of quantitative spectroscopy and radiative transfer 96.2 (2005): 139-204.](https://www.sciencedirect.com/science/article/abs/pii/S0022407313002859)
| Key | Vector Type | Description |
| --- | :---------- | :---------- |
| `M` | `Int16` | [HITRAN molecular identification number](https://hitran.org/docs/molec-meta) |
| `I` | `Char` | HITRAN isotopologue identification symbol |
| `Ξ½` | `Float64` | spectral line wavenumber [cm``^{-1}``] in a vacuum |
| `S` | `Float64` | spectral line intensity [cm``^{-1}``/(molecule``\\cdot``cm``^{-2}``)] at 296 K |
| `A` | `Float64` | Einstein-A coefficient (s``^{-1}``) of a transition |
| `Ξ³a` | `Float64` | air-broadened half width at half maximum (HWHM) [cm``^{-1}``/atm] at 296 K and 1 atm |
| `Ξ³s` | `Float64` | self-broadened half width at half maximum (HWHM) [cm``^{-1}``/atm] at 296 K and 1 atm |
| `Epp` | `Float64` | lower-state energy of the transition [cm``^{-1}``] |
| `na` | `Float64` | coefficient of temperature dependence of air-broadened half width |
| `Ξ΄a` | `Float64` | pressure shift [cm``^{-1}``/atm] at 296 K and 1 atm of the line position with respect to vacuum transition wavenumber |
| `Vp` | `String` | upper-state "global" quanta |
| `Vpp` | `String` | lower-state "global" quanta |
| `Qp` | `String` | upper-state "local" quanta |
| `Qpp` | `String` | lower-state "local" quanta |
| `Ierr` | `String` | uncertainty indices |
| `Iref` | `String` | reference indices |
| `*` | `Char` | flag (?) |
| `gp` | `String` | statistical weight of upper state |
| `gpp` | `String` | statistical weight of lower state |
"""
function readpar(filename::String;
Ξ½min::Real=0,
Ξ½max::Real=Inf,
Scut::Real=0,
I::Vector=[],
maxlines::Int=-1)
@assert filename[end-3:end] == ".par" "expected file with .par extension, downloaded from https://hitran.org/lbl/"
lines = readlines(filename)
N = length(lines)
#hard code the format to make sure nothing slows it down Β―\_(γ)_/Β―
par = Dict{String,Vector}(
"M" =>Vector{Int16}(undef, N),
"I" =>Vector{Char}(undef, N),
"Ξ½" =>Vector{Float64}(undef, N),
"S" =>Vector{Float64}(undef, N),
"A" =>Vector{Float64}(undef, N),
"Ξ³a" =>Vector{Float64}(undef, N),
"Ξ³s" =>Vector{Float64}(undef, N),
"Epp" =>Vector{Float64}(undef, N),
"na" =>Vector{Float64}(undef, N),
"Ξ΄a" =>Vector{Float64}(undef, N),
"Vp" =>Vector{String}(undef, N),
"Vpp" =>Vector{String}(undef, N),
"Qp" =>Vector{String}(undef, N),
"Qpp" =>Vector{String}(undef, N),
"Ierr"=>Vector{String}(undef, N),
"Iref"=>Vector{String}(undef, N),
"*" =>Vector{Char}(undef, N),
"gp" =>Vector{String}(undef, N),
"gpp" =>Vector{String}(undef, N)
)
for (i,line) in enumerate(lines)
par["M"][i] = parse(Int16, SubString(line,1,2))
par["I"][i] = line[3]
par["Ξ½"][i] = parse(Float64, SubString(line,4,15))
par["S"][i] = parse(Float64, SubString(line,16,25))
par["A"][i] = parse(Float64, SubString(line,26,35))
par["Ξ³a"][i] = parse(Float64, SubString(line,36,40))
par["Ξ³s"][i] = parse(Float64, SubString(line,41,45))
par["Epp"][i] = parse(Float64, SubString(line,46,55))
par["na"][i] = parse(Float64, SubString(line,56,59))
par["Ξ΄a"][i] = parse(Float64, SubString(line,60,67))
par["Vp"][i] = line[68:82]
par["Vpp"][i] = line[83:97]
par["Qp"][i] = line[98:112]
par["Qpp"][i] = line[113:127]
par["Ierr"][i] = line[128:133]
par["Iref"][i] = line[134:145]
par["*"][i] = line[146]
par["gp"][i] = line[147:153]
par["gpp"][i] = line[154:160]
end
#filtering
mask = ones(Bool, N)
mask .&= par["Ξ½"] .>= Ξ½min
mask .&= par["Ξ½"] .<= Ξ½max
mask .&= par["S"] .>= Scut
if length(I) > 0
for j = 1:N
#isotopologue character
c = par["I"][j]
#isotopologue integer
i = ISOINDEX[c]
#check if I and its integer counterpart are not in I
if !(c in I) & !(i in I)
mask[j] = false
end
end
end
#check that there will be information left
@assert any(mask) "par information has been filtered to nothing!"
#slice all the par arrays
for (k,v) β par
par[k] = v[mask]
end
#take strongest lines if needed
if maxlines > 0
@assert maxlines > 0 "maxlines must be a positive integer"
#check if there are fewer lines than desired already
if N > maxlines
idx = reverse(sortperm(par["S"]))[1:maxlines]
for (k,v) in par
par[k] = v[idx]
end
end
end
#ensure sorted by wavenumber
idx = sortperm(par["Ξ½"])
for (k,v) β par
par[k] = v[idx]
end
return par
end
"""
Organizing type for spectral line data of a single gas
| Field | Type | Description |
| ----- | :--- | :---------- |
| `name` | `String` | gas name |
| `formula` | `String` | gas formula |
| `N` | `Int64` | number of lines |
| `M` | `Int16` | see [`readpar`](@ref) |
| `I` | `Vector{Int16}` | see [`readpar`](@ref) |
| `ΞΌ` | `Vector{Float64}` | molar mass of isotopologues [kg/mole] |
| `A` | `Vector{Float64}` | isotopologue abundance (Earth) |
| `Ξ½` | `Vector{Float64}` | see [`readpar`](@ref) |
| `S` | `Vector{Float64}` | see [`readpar`](@ref) |
| `Ξ³a` | `Vector{Float64}` | see [`readpar`](@ref) |
| `Ξ³s` | `Vector{Float64}` | see [`readpar`](@ref) |
| `Epp` | `Vector{Float64}` | see [`readpar`](@ref) |
| `na` | `Vector{Float64}` | see [`readpar`](@ref) |
# Constructors
SpectralLines(par::Dict)
Construct a `SpectralLines` object from a dictionary of line data. That dictionary can be created with [`readpar`](@ref).
SpectralLines(filename, Ξ½min=0, Ξ½max=Inf, Scut=0, I=[], maxlines=-1)
Read a `.par` file directly into a `SpectralLines` object. Keyword arguments are passed through to [`readpar`](@ref).
"""
struct SpectralLines
#gas name
name::String
#gas formula
formula::String
#number of lines
N::Int64
#molecule number (https://hitran.org/docs/molec-meta/)
M::Int16
#ordered isotopologue integer (https://hitran.org/docs/iso-meta/)
I::Vector{Int16}
#molar mass of isotopologues [kg/mole]
ΞΌ::Vector{Float64}
#isotopologue abundance in HITRAN
A::Vector{Float64}
#wavenumbers of absorption lines [cm^-1]
Ξ½::Vector{Float64}
#spectral line intensity [cm^-1/(molecule*cm^-2)]
S::Vector{Float64}
#air-broadened half-width [cm^-1/atm]
Ξ³a::Vector{Float64}
#self-broadened half-width [cm^-1/atm]
Ξ³s::Vector{Float64}
#lower state energy, E'' [cm^-1]
Epp::Vector{Float64}
#temperature-dependence exponent for Ξ³air [unitless]
na::Vector{Float64}
end
function SpectralLines(par::Dict)
#length
N = length(par["Ξ½"])
#make sure there is only one molecule present
@assert length(unique(par["M"])) == 1 "SpectralLines objects must contain only one molecule's lines"
#get the molecule name and formula
M = par["M"][1]
name = MOLPARAM[M][3]
form = MOLPARAM[M][2]
#create arrays for isotopologue index, abundance, and molar mass
I = map(i->ISOINDEX[par["I"][i]], 1:N)
A = map(i->MOLPARAM[M][7][I[i]], 1:N)
ΞΌ = map(i->MOLPARAM[M][8][I[i]], 1:N)
#get sorting indices to make sure Ξ½ vector is in ascending order
idx = sortperm(par["Ξ½"])
#create a SpectralLines structure
SpectralLines(
name,
form,
N,
M,
I[idx],
ΞΌ[idx],
A[idx],
par["Ξ½"][idx],
par["S"][idx],
par["Ξ³a"][idx],
par["Ξ³s"][idx],
par["Epp"][idx],
par["na"][idx]
)
end
function SpectralLines(filename::String; kwargs...)
par = readpar(filename; kwargs...)
SpectralLines(par)
end
| ClearSky | https://github.com/markmbaum/ClearSky.jl.git |
|
[
"MIT"
] | 0.1.0 | 861e207072e8717757e97af5c3a4af747b0c76ea | code | 4628 | #-------------------------------------------------------------------------------
#conversions between spectral units
export Ξ½2f, f2Ξ½, Ξ½2Ξ», Ξ»2Ξ½, Ξ»2f, f2Ξ»
"""
Convert wavenumber [cm``^{-1}``] to frequency [1/s]
"""
Ξ½2f(Ξ½)::Float64 = 100.0*π*Ξ½
"""
Convert frequency [1/s] to wavenumber [cm``^{-1}``]
"""
f2Ξ½(f)::Float64 = f/(100.0*π)
"""
Convert wavenumber [cm``^{-1}``] to wavelength [m]
"""
Ξ½2Ξ»(Ξ½)::Float64 = 0.01/Ξ½
"""
Convert wavelength [m] to wavenumber [cm``^{-1}``]
"""
Ξ»2Ξ½(Ξ»)::Float64 = 0.01/Ξ»
"""
Convert wavelength [m] to frequency [1/s]
"""
Ξ»2f(Ξ»)::Float64 = π/Ξ»
"""
Convert frequency [1/s] to wavelength [m]
"""
f2Ξ»(f)::Float64 = f/π
#-------------------------------------------------------------------------------
export planck, normplanck, stefanboltzmann, equilibriumtemperature
"""
planck(Ξ½, T)
Compute black body intensity [W/m``^2``/cm``^{-1}``/sr] using [Planck's law](https://en.wikipedia.org/wiki/Planck%27s_law)
# Arguments
* `Ξ½`: wavenumger [cm``^{-1}``]
* `T`: temperature [Kelvin]
"""
planck(Ξ½, T)::Float64 = 100*2*π‘*π^2*(100*Ξ½)^3/(exp(π‘*π*(100*Ξ½)/(π€*T)) - 1)
"""
normplanck(Ξ½, T)
Compute black body intensity [W/m``^2``/cm``^{-1}``/sr] using [Planck's law](https://en.wikipedia.org/wiki/Planck%27s_law), normalized by the power emitted per unit area at the given temperature ([`stefanboltzmann`](@ref)),
```
B(Ξ½,T)/ΟT^4
```
yielding units of 1/cm``^{-1}``/sr.
# Arguments
* `Ξ½`: wavenumger [cm``^{-1}``]
* `T`: temperature [Kelvin]
"""
normplanck(Ξ½, T)::Float64 = planck(Ξ½, T)/stefanboltzmann(T)
"""
stefanboltzmann(T)
Compute black body radiation power using the [Stefan-Boltzmann](https://en.wikipedia.org/wiki/Stefan%E2%80%93Boltzmann_law) law, ``ΟT^4`` [W/m``^2``].
"""
stefanboltzmann(T)::Float64 = π*T^4
"""
equilibriumtemperature(F, A)
Compute the [planetary equilibrium temperature](https://en.wikipedia.org/wiki/Planetary_equilibrium_temperature), or equivalent blackbody temperature of a planet.
``(\\frac{(1 - A)F}{4\\sigma})^{1/4}``
# Arguments
* `F`: stellar flux [W/m``^2``]
* `A`: albedo
"""
equilibriumtemperature(F, A)::Float64 = ((1 - A)*F/(4*π))^(1/4)
"""
equilibriumtemperature(L, A, R)
Compute the [planetary equilibrium temperature](https://en.wikipedia.org/wiki/Planetary_equilibrium_temperature), or equivalent blackbody temperature of a planet.
``(\\frac{(1 - A)L}{16 \\sigma \\pi R^2})^{1/4}``
# Arguments
* `L`: stellar luminosity [W]
* `A`: albedo
* `R`: orbital distance [m]
"""
equilibriumtemperature(L, A, R)::Float64 = (L*(1 - A)/(16*π*Ο*R^2))^(1/4)
#-------------------------------------------------------------------------------
export dΟdP, transmittance, schwarzschild
"""
dΟdP(Ο, g, ΞΌ)
Evaluate the differential increase in optical depth in pressure coordinates, equivalent to the [`schwarzschild`](@ref) equation without Planck emission.
``\\frac{dΟ}{dP} = Ο\\frac{\\textrm{N}_A}{g ΞΌ}``
where ``N_A`` is Avogadro's number.
# Arguments
* `Ο`: absorption cross-section [cm``^2``/molecule]
* `g`: gravitational acceleration [m/s``^2``]
* `ΞΌ`: mean molar mass [kg/mole]
"""
dΟdP(Ο, g, ΞΌ)::Float64 = 1e-4*Ο*(ππ/(ΞΌ*g))
"""
transmittance(Ο)
Evaluate transmittance from optical depth, ``t = e^{-Ο}``
"""
transmittance(Ο)::Float64 = exp(-Ο)
"""
schwarzschild(I, Ξ½, Ο, T, P)
Evaluate the [Schwarzschild differential equation](https://en.wikipedia.org/wiki/Schwarzschild%27s_equation_for_radiative_transfer) for radiative transfer with units of length/height [m] and assuming the ideal gas law.
``\\frac{dI}{dz} = Ο\\frac{P}{k_B T}[B_Ξ½(T) - I]``
where ``B_Ξ½`` is [`planck`](@ref)'s law.
# Arguments
* `I`: radiative intensity [W/m``^2``/cm``^{-1}``/sr]
* `Ξ½`: radiation wavenumber [cm``^{-1}``]
* `Ο`: absorption cross-section [cm``^2``/molecule]
* `T`: temperature [K]
* `P`: pressure [Pa]
"""
schwarzschild(I, Ξ½, Ο, T, P)::Float64 = 1e-4*Ο*(P/(π€*T))*(planck(Ξ½,T) - I)
"""
schwarzschild(I, Ξ½, Ο, g, ΞΌ, T)
Evaluate the [Schwarzschild differential equation](https://en.wikipedia.org/wiki/Schwarzschild%27s_equation_for_radiative_transfer) for radiative transfer with pressure units [Pa] and assuming the ideal gas law.
``\\frac{dI}{dP} = Ο\\frac{\\textrm{N}_A}{g ΞΌ}[B_Ξ½(T) - I]``
where ``B_Ξ½`` is [`planck`](@ref)'s law and ``N_A`` is Avogadro's number.
# Arguments
* `I`: radiative intensity [W/m``^2``/cm``^{-1}``/sr]
* `Ξ½`: radiation wavenumber [cm``^{-1}``]
* `Ο`: absorption cross-section [cm``^2``/molecule]
* `g`: gravitational acceleration [m/s``^2``]
* `ΞΌ`: mean molar mass [kg/mole]
* `T`: temperature [K]
"""
schwarzschild(I, Ξ½, Ο, g, ΞΌ, T)::Float64 = 1e-4*Ο*(ππ/(ΞΌ*g))*(planck(Ξ½,T) - I)
| ClearSky | https://github.com/markmbaum/ClearSky.jl.git |
|
[
"MIT"
] | 0.1.0 | 861e207072e8717757e97af5c3a4af747b0c76ea | code | 3111 | export trapz, logrange, meshgrid, shellintegral
"""
trapz(x, y)
Integrate a sorted group of coordinates using the composite [trapezoidal rule](https://en.wikipedia.org/wiki/Trapezoidal_rule).
"""
function trapz(x::AbstractVector, y::AbstractVector)::Float64
@assert length(x) == length(y) "vectors must be equal length"
s = 0.0
for i = 1:length(x) - 1
@inbounds s += (x[i+1] - x[i])*(y[i] + y[i+1])/2
end
return s
end
function meshgrid(x::AbstractVector, y::AbstractVector)
X = x' .* ones(length(y))
Y = y .* ones(length(x))'
return X, Y
end
function logrange(a, b, N::Int=101, Ξ³::Real=1)::Vector{Float64}
((10 .^ LinRange(0, Ξ³, N)) .- 1)*(b - a)/(10^Ξ³ - 1) .+ a
end
function interp(q::Float64,
x::AbstractVector{Float64},
y::AbstractVector{Float64})::Float64
#find the proper cell
i = findcell(q, x)
#interpolate
(q - x[i])*(y[i+1] - y[i])/(x[i+1] - x[i]) + y[i]
end
function falseposition(F::T,
xβ::Real,
xβ::Real,
param=nothing;
tol::Float64=1e-6)::Float64 where {T}
yβ = F(xβ, param)
if yβ == 0
return xβ
end
yβ = F(xβ, param)
if yβ == 0
return xβ
end
@assert sign(yβ) != sign(yβ) "false position non-bracketing"
yβ = Inf
yβ = NaN
n = 0
while abs(yβ - yβ) > (tol + tol*abs(yβ)) || (n < 2)
#store previous evaluation
yβ = yβ
#approximate zero
xβ = xβ - yβ*(xβ - xβ)/(yβ - yβ)
yβ = F(xβ, param)
#reduce the interval
if yβ*yβ > 0
xβ = xβ
else
xβ = xβ
end
#count
n += 1
end
return (xβ + xβ)/2
end
# ΞΈ: latitude [-Ο/2,Ο/2]
# Ο: longitude [0,2Ο]
# f(ΞΈ, Ο)
# approximates β«β« f cos(ΞΈ) dΞΈ dΟ , with ΞΈβ[-Ο/2,Ο/2] and Οβ[0,2Ο]
function shellintegral(f::T,
param=nothing;
nΞΈ::Int=360,
nΟ::Int=720)::Float64 where {T}
I = 0.0
ΞΞΈ = Ο/nΞΈ
ΞΟ = 2Ο/nΟ
#integrate
ΞΈ = -Ο/2 + ΞΞΈ/2
for ΞΈ β LinRange(-Ο/2 + ΞΞΈ/2, Ο/2 - ΞΞΈ/2, nΞΈ)
for Ο β LinRange(ΞΟ/2, 2Ο - ΞΟ/2, nΟ)
I += f(ΞΈ, Ο, param)*cos(ΞΈ)*ΞΞΈ*ΞΟ
end
end
return I
end
function derivative!(dydx::AbstractVector{T},
x::AbstractVector{T},
y::AbstractVector{T})::Nothing where {T}
@assert length(dydx) == length(x) == length(y)
n = length(x)
dydx[1] = (y[2] - y[1])/(x[2] - x[1])
for i β 2:n-1
dydx[i] = ((y[i+1] - y[i])/(x[i+1] - x[i]) + (y[i] - y[i-1])/(x[i] - x[i-1]))/2
end
dydx[n] = (y[n] - y[n-1])/(x[n] - x[n-1])
return nothing
end
#-------------------------------------------------------------------------------
#log coordinates are handy
#upward calculations
P2Ο(P)::Float64 = -log(P)
Ο2P(Ο)::Float64 = exp(-Ο)
P2Ο(Pβ, Pβ)::NTuple{2,Float64} = P2Ο(Pβ), P2Ο(Pβ)
#downward calculations
P2ΞΉ(P)::Float64 = log(P)
ΞΉ2P(ΞΉ)::Float64 = exp(ΞΉ)
P2ΞΉ(Pβ, Pβ)::NTuple{2,Float64} = P2ΞΉ(Pβ), P2ΞΉ(Pβ)
| ClearSky | https://github.com/markmbaum/ClearSky.jl.git |
|
[
"MIT"
] | 0.1.0 | 861e207072e8717757e97af5c3a4af747b0c76ea | code | 205 | using ClearSky
using Test
using SpecialFunctions
using ClearSky: faddeyeva, MOLPARAM
@testset "Faddeyeva" begin include("test_faddeyeva.jl") end
@testset "Molparam" begin include("test_molparam.jl") end
| ClearSky | https://github.com/markmbaum/ClearSky.jl.git |
|
[
"MIT"
] | 0.1.0 | 861e207072e8717757e97af5c3a4af747b0c76ea | code | 944 | #computes "exact" real part of faddeyeva
wofz(z::Complex)::Complex = erfcx(-im*z)
#grid characteristics for tests
xa = [-500, -200, -10, -500]
xb = [500, 200, 10, 500]
nx = [40001, 40001, 40001, 1000]
ya = [1e-5, 1e-20, 1e-5, 1e-20]
yb = [1e5, 1e4, 1e5, 1e5]
ny = [71, 71, 71, 25000]
for i = 1:4
#make a grid
x = collect(LinRange(xa[i], xb[i], nx[i]))
y = 10 .^ collect(LinRange(log10(ya[i]), log10(yb[i]), ny[i]))
X = ones(ny[i])*x'
Y = y*ones(nx[i])'
#complex argument to faddeyeva function
Z = X .+ im*Y
#whole complex result
W = wofz.(Z)
F = faddeyeva.(Z)
relerr = @. abs(real(W) - real(F))/abs(real(W))
@test maximum(relerr) < 1e-4
relerr = @. abs(imag(W) - imag(F))/abs(imag(W))
relerr = relerr[@. !isnan(relerr)]
@test maximum(relerr) < 1e-4
#real part only
W = real.(W)
F = faddeyeva.(X, Y)
relerr = @. abs(W - F)/abs(W)
@test maximum(relerr) < 1e-4
end
| ClearSky | https://github.com/markmbaum/ClearSky.jl.git |
|
[
"MIT"
] | 0.1.0 | 861e207072e8717757e97af5c3a4af747b0c76ea | code | 513 | for i in 1:length(MOLPARAM)
if length(MOLPARAM[i]) > 1
bcheb = MOLPARAM[i][10]
ncheb = MOLPARAM[i][11]
rlerr = MOLPARAM[i][12]
coefs = MOLPARAM[i][13]
@test all(rlerr .<= 0.01)
for j = 1:length(ncheb)
@test ncheb[j] == length(coefs[j])
if bcheb[j]
@test !any(isnan.(coefs[j]))
else
@test length(coefs[j]) == 0
end
end
@test sum(MOLPARAM[i][7]) <= 1.001
end
end
| ClearSky | https://github.com/markmbaum/ClearSky.jl.git |
|
[
"MIT"
] | 0.1.0 | 861e207072e8717757e97af5c3a4af747b0c76ea | docs | 1053 | # ClearSky
<!--- [](https://markmbaum.github.io/ClearSky.jl/stable) --->
[](https://markmbaum.github.io/ClearSky.jl/dev)
[](https://github.com/markmbaum/ClearSky.jl/actions)
[](https://codecov.io/gh/markmbaum/ClearSky.jl)
*under development*
to use the code, download or clone the repository, take `.jl` out of the folder name, then start Julia with all your threads. For example, if your compute has 8 threads,
```shell
julia --threads 8
```
Tell Julia where you put the code
```julia
push!(LOAD_PATH, "path/to/repo")
```
replacing `path/to/repo` with the path to the folder above the `ClearSky` folder. Then load the code with
```julia
using ClearSky
```
and you should see something like
```
[ Info: Precompiling ClearSky [5964c129-204c-4c32-bd6e-c8dff7ca179b]
```
| ClearSky | https://github.com/markmbaum/ClearSky.jl.git |
|
[
"MIT"
] | 0.1.0 | 861e207072e8717757e97af5c3a4af747b0c76ea | docs | 2390 | # Absorption Data
## Spectral Lines
The model is designed to use the [HITRAN](https://hitran.org/) database of spectral line data for various atmospheric gases. To download line data, you must register for a free account then [search](https://hitran.org/lbl/) for lines. It is generally best to download all lines for a single gas into a single file, which will have a `.par` extension. These are text files in fixed-width format and they can be read by functions in `ClearSky`.
To work with spectral line data directly, use the [`readpar`](@ref) function to load the data. This function simply parses the fixed-width file into a dictionary of vectors with the appropriate data types. For more information, see the [`readpar`](@ref) documentation.
If you plan to compute line shapes directly, read par files into [`SpectralLines`](@ref) objects. The constructor reads files using [`readpar`](@ref) then rearranges it for line shape calculations. Unnecessary information is dropped and the molecule name, formula, and molar masses are assigned. To compute line shapes, see [Computing Line Shapes](computing_line_shapes.md).
For only high-level calculations, `par` files can also be loaded directly into [gas objects](gas_objects.md).
```@docs
readpar
SpectralLines
```
## Collision Induced Absorption (CIA)
The model also makes it easy to include CIA data from HITRAN. These files can be [downloaded directly](https://hitran.org/cia/) or all at once using the [`download_cia.py`](https://github.com/wordsworthgroup/ClearSky.jl/blob/main/scripts/download_cia.py) script. Each file contains potentially many tables of absorption data at different wavenumbers and temperatures.
Like the line data, there is a function for reading these CIA files without doing anything else. The [`readcia`](@ref) function reads a `cia` file into a vector of dictionaries. Each dictionary represents a table of absorption data. This is the raw data, but it is relatively hard to work with.
A [`CIATables`](@ref) object arranges each table of absorption data into an interpolator and makes it easy to compute the CIA absorption coefficient at any wavenumber and temperature. Also, in combination with the [`cia`](@ref) function, a [`CIATables`](@ref) can be used to compute absorption cross-sections from provided wavenumber, temperature, and partial pressures.
-----
```@docs
readcia
CIATables
```
| ClearSky | https://github.com/markmbaum/ClearSky.jl.git |
|
[
"MIT"
] | 0.1.0 | 861e207072e8717757e97af5c3a4af747b0c76ea | docs | 1037 | # Atmospheric Profiles
`ClearSky` contains functions for common atmospheric profiles and quantities.
## Temperature Profiles
Many radiative transfer calculations require an atmospheric temperature profile. `ClearSky` is designed to work with any arbitrary temperature profile if it can be defined as a function of pressure, `T(P)`.
For convenience, dry and moist adiabatic profiles are available through the [`DryAdiabat`](@ref) and [`MoistAdiabat`](@ref) types, which are [function-like types](https://docs.julialang.org/en/v1/manual/methods/#Function-like-objects). There is also a [`tropopause`](@ref) function.
```@docs
DryAdiabat
MoistAdiabat
tropopause
```
## Pressure Profiles
In case a pressure profile with constant scale height isn't sufficient, hydrostatic profiles with arbitrary temperature and mean molar mass functions are available through the [`Hydrostatic`](@ref) type and related functions.
```@docs
Hydrostatic
hydrostatic
scaleheight
altitude
```
## Other Functions
```@docs
psatH2O
tsatCO2
ozonelayer
```
| ClearSky | https://github.com/markmbaum/ClearSky.jl.git |
|
[
"MIT"
] | 0.1.0 | 861e207072e8717757e97af5c3a4af747b0c76ea | docs | 3443 | # Gas Objects
Gas objects are high-level representations of greenhouse gases that allow fast and continuous retrieval of absorption cross-sections over a range of temperatures and pressures.
## Creating Gases
Before creating a gas object, you should start your Julia session with all available threads on your system. For example, if your computer has 8 threads available, use
```shell
julia --threads 8
```
then you can define the
1. vector of wavenumbers
2. temperature and pressure ranges ([`AtmosphericDomain`](@ref))
over which its absorption cross-sections will be defined. For example,
```julia
using ClearSky
Ξ½ = LinRange(1, 2500, 2500);
Ξ© = AtmosphericDomain((100,350), 12, (1,1e5), 24);
```
defines 2500 evenly spaced wavenumber samples over the longwave window and an atmospheric domain between 100-350 K and 1-1e5 Pa. The numbers 12 and 24 define the number of interpolation nodes along the temperature and pressure axes, respectively.
Now you can create a gas object directly from a `par` file containing the spectral line data from HITRAN. For example, to load carbon dioxide from the file `"CO2.par"` and assign a well-mixed concentration of 400 ppm,
```julia
co2 = WellMixedGas("CO2.par", 400e-6, Ξ½, Ξ©)
```
In the background, `ClearSky` does the following
1. reads line data
2. computes absorption cross-sections for each wavenumber, temperature, and pressure point defined by `Ξ½` and `Ξ©` (using the [`voigt!`](@ref) profile by default)
3. generates high-accuracy interpolation functions for the temperature-pressure grid at each wavenumber
4. stores concentration information
Consequently, loading gases will take some time. It will be faster with more threads and with fewer wavenumber, temperature, and pressure points.
## Retrieving Cross-Sections
Gases are [function-like objects](https://docs.julialang.org/en/v1/manual/methods/#Function-like-objects). They can be used like functions to retrieve **concentration-scaled** cross-sections at any temperature and pressure within the atmospheric domain. For example, computing cross-sections at a specific temperature and pressure, 250 K and 10000 Pa for example, is as simple as
```julia
co2(250, 1e4)
```
This returns a vector of cross-section values [cm``^2``/molecule] at each wavenumber point the gas was created with. The cross sections are scaled by the gas molar concentration that was used when constructing the gas.
If you only need a cross-section for one of the specific wavenumber points in the gas, you must pass the index of that wavenumber before the temperature and pressure. For example, to get the cross-section corresponding to `Ξ½[600]`,
```julia
co2(600, 250, 1e4)
```
## Storing Gases
Creating gas objects may take some time if you have few threads, a huge number of wavenumbers, and/or a dense temperature-pressure grid in your [`AtmosphericDomain`](@ref). To avoid loading the same gas twice, you can use Julia's built-in [serialization functions](https://docs.julialang.org/en/v1/stdlib/Serialization/) to save gases to files and quickly reload them. For example, assuming you have a gas object named `co2`, the following code will write the gas to file, then reload it.
```julia
using Serialization
#write the gas to a file called "co2"
serialize("co2", co2);
#reload the same gas from that file
co2 = deserialize("co2");
```
-----
```@docs
AtmosphericDomain
OpacityTable
WellMixedGas
VariableGas
concentration
reconcentrate
```
| ClearSky | https://github.com/markmbaum/ClearSky.jl.git |
|
[
"MIT"
] | 0.1.0 | 861e207072e8717757e97af5c3a4af747b0c76ea | docs | 469 | # ClearSky
Welcome to the documentation for `ClearSky`, a line-by-line radiative transfer model written entirely in Julia. The code is designed to make one-dimensional, clear-sky calculations easy, interactive, and fast.
* [Reading Absorption Data](absorption_data.md)
* [Computing Line Shapes](line_shapes.md)
* [Gas Objects](gas_objects.md)
* [Atmospheric Profiles](atmospheric_profiles.md)
* [Modeling](modeling.md)
* [Orbits and Insolation](orbits_insolation.md)
| ClearSky | https://github.com/markmbaum/ClearSky.jl.git |
|
[
"MIT"
] | 0.1.0 | 861e207072e8717757e97af5c3a4af747b0c76ea | docs | 2943 | # Line Shapes
Line shapes are computed following the definitions and equations in [HITRAN](https://hitran.org/docs/definitions-and-units/) (and elsewhere).
In `ClearSky`, line shapes can be computed from [`SpectralLines`](@ref) objects and built in shape functions. The following shape functions are implemented with the necessary supporting functions:
* [`voigt`](@ref)
* [`lorentz`](@ref), pressure-broadening
* [`doppler`](@ref)
* [`PHCO2`](@ref), the Perrin & Hartman sublorentzian shape for carbon dioxide
Each of these functions has three methods for computing cross-sections:
1. at a single wavenumber, temperature, and pressure
2. over a sorted vector of wavenumbers, a single temperature, and a single pressure
3. the same as item 2, but computing cross-sections in-place
Using multiple dispatch, the arguments supplied to the functions determine the behavior. For example, calling the [`voigt`](@ref) function with a single number for the `Ξ½` argument executes the function for a single cross-section. Calling the same function with a vector of wavenumbers in the `Ξ½` argument executes the version of the function optimized for that scenario.
Another way to get cross-sections is through [gas objects](gas_objects.md), which are used for higher-level modeling.
##### A Note on Handling TIPS
Evaluating line shapes requires evaluating the [temperature dependence of line intensities](https://hitran.org/docs/definitions-and-units/#mjx-eqn-eqn-intensity-temperature-dependence). To compute this scaling, the ratio of total internal partition functions (TIPS),
``Q(T_{ref})/Q(T)``
must be evaluated. The necessary information is provided by HITRAN [for every isotopologue](https://hitran.org/docs/iso-meta/) and computing the ratio requires interpolating a range of ``Q(T)`` values for the appropriate temperature.
`ClearSky` evaluates the ratio accurately and automatically inside the [`scaleintensity`](@ref) function.
To facilitate this, the [`molparam.py`](https://github.com/wordsworthgroup/ClearSky.jl/blob/main/scripts/molparam.py) script was used to download Q data for each isotopologue, generate high-accuracy interpolating Chebyshev polynomials for each one, and write the information to a Julia source file called [`molparam.jl`](https://github.com/wordsworthgroup/ClearSky.jl/blob/main/src/molparam.jl). The pre-computed interpolating coefficients are defined directly in source code, allowing rapid and accurate evaluation of the TIPS ratio. The interpolating functions are guaranteed to reproduce the provided data with less than 1 % error between 25 and 1000 K.
-----
## Voigt Profile
```@docs
fvoigt
voigt
voigt!
```
-----
## Lorentz Profile
```@docs
Ξ³lorentz
florentz
lorentz
lorentz!
```
-----
## Doppler Profile
```@docs
Ξ±doppler
fdoppler
doppler
doppler!
```
-----
## Perrin & Hartman Sublorentzian CO2 Profile
```@docs
Ξ§PHCO2
PHCO2
PHCO2!
```
-----
## Other
```@docs
scaleintensity
```
| ClearSky | https://github.com/markmbaum/ClearSky.jl.git |
|
[
"MIT"
] | 0.1.0 | 861e207072e8717757e97af5c3a4af747b0c76ea | docs | 120 | # Modeling
```@docs
opticaldepth
transmittance
outgoing
topfluxes
topimbalance
GroupedAbsorber
AcceleratedAbsorber
```
| ClearSky | https://github.com/markmbaum/ClearSky.jl.git |
|
[
"MIT"
] | 0.1.0 | 861e207072e8717757e97af5c3a4af747b0c76ea | docs | 1682 | # Orbits and Insolation
A collection of functions are available for working with general elliptical orbits and insolation patterns for planets orbiting stars.
Function arguments are defined below
| Argument | Definition | Units |
| -------: | :--------- | :---- |
| `a` | [semi-major axis](https://en.wikipedia.org/wiki/Semi-major_and_semi-minor_axes) | m |
| `e` | [eccentricity](https://en.wikipedia.org/wiki/Orbital_eccentricity) | - |
| `E` | [eccentric anomaly](https://en.wikipedia.org/wiki/Eccentric_anomaly) | rad |
| `f` | [true anomaly](https://en.wikipedia.org/wiki/True_anomaly) or stellar longitude | rad |
| `Ξ³` | [obliquity](https://en.wikipedia.org/wiki/Axial_tilt) | rad |
| `m` | star mass | kg |
| `p` | [precession angle](https://en.wikipedia.org/wiki/Axial_precession) | rad |
| `ΞΈ` | latitude | rad |
| `ΞΈβ` | substellar latitude | rad |
| `rβ` | apoapsis distance | m |
| `rβ` | periapsis distance | m |
| `t` | time | sec |
| `T` | orbital period | sec |
The mass `m`, most precisely, should be the sum of the star mass and planet mass, ``m_s + m_p``. For most cases the planet mass is negligible, however, and ``m_s + m_p \approx m_s``.
The precession angle `p` is defined so that when ``p=0``, the northern hemisphere is tilted directly toward the star at periapsis. This means that northern summer occurs when planet is closet to the star. Different values of ``p β [0,2Ο]`` control when in the orbital path the equinoxes and solstices occur. For example, if ``p = Ο/2``, the vernal equinox occurs at periapsis and the northern hemisphere is moving into summer.
-----
```@autodocs
Modules = [ClearSky]
Pages = ["orbital.jl", "insolation.jl"]
```
| ClearSky | https://github.com/markmbaum/ClearSky.jl.git |
|
[
"Apache-2.0"
] | 0.1.2 | 77d4f28d2f25763bb740a6646f36c4727e0a6605 | code | 416 | push!(LOAD_PATH,"../src/")
using Documenter
using DataIO
makedocs(
sitename = "DataIO.jl",
authors = "Tobias Frilling",
format = Documenter.HTML(),
modules = [DataIO],
pages = [
"Home" => "index.md",
"File Formats" => [
"Data `(*.lrn)`" => "lrn.md",
"Matrix `(*.umx)`" => "umx.md"
]
]
)
deploydocs(
repo = "github.com/ckafi/DataIO.jl"
)
| DataIO | https://github.com/ckafi/DataIO.jl.git |
|
[
"Apache-2.0"
] | 0.1.2 | 77d4f28d2f25763bb740a6646f36c4727e0a6605 | code | 1095 | # Copyright 2019 Tobias Frilling
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
readCLS(filename::String, directory=pwd())
Read the contents of a `*.cls` and return a `Dict` from classid to index.
"""
function readCLS(filename::String, directory = pwd())
filename = prepare_path(filename, "cls", directory)
result = Dict{Int, Vector{Int}}()
open(filename, "r") do f
skipStarting(f, ['#', '%'])
for row in eachrow(readdlm(f, '\t', Float64, skipblanks = true))
push!(get!(result, row[2], []), row[1])
end
end
return result
end
| DataIO | https://github.com/ckafi/DataIO.jl.git |
|
[
"Apache-2.0"
] | 0.1.2 | 77d4f28d2f25763bb740a6646f36c4727e0a6605 | code | 776 | # Copyright 2019 Tobias Frilling
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
module DataIO
using DelimitedFiles
include("utils.jl")
include("CLS.jl")
export readCLS
include("LRN.jl")
export LRNData, writeLRN, readLRN
include("UMX.jl")
export readUMX
end # module
| DataIO | https://github.com/ckafi/DataIO.jl.git |
|
[
"Apache-2.0"
] | 0.1.2 | 77d4f28d2f25763bb740a6646f36c4727e0a6605 | code | 5201 | # Copyright 2019 Tobias Frilling
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
LRNCType
Enum representing the column types for LRNData:
`ignore = 0, data = 1, class = 3, key = 9`
"""
@enum LRNCType begin
ignore = 0
data = 1
class = 3
key = 9
end
"""
LRNData
`LRNData` represents the contents of a `*.lrn` file with the following fields:
- `data::Matrix{Float64}`
Matrix of data, cases in rows, variables in columns
- `column_types::Array{LRNCType, 1}`
Column types, see `LRNCType`
- `key::Array{Integer, 1}`
Unique key for each line
- `names::Array{String, 1}`
Column names
- `key_name::String`
Name for key column
- `comment::String`
Comments about the data
"""
struct LRNData
data::Matrix{Float64}
column_types::Array{LRNCType,1}
key::Array{Int64,1}
names::Array{String,1}
key_name::String
comment::String
function LRNData(data::Matrix{Float64}, column_types, key, names, key_name, comment)
(nrow, ncol) = size(data)
# Enforcing invariants
@assert length(names) == ncol
@assert length(key) == nrow
@assert length(column_types) == ncol
@assert allunique(key)
@assert all(i -> 1 <= i <= nrow, key)
new(data, column_types, key, names, key_name, comment)
end
end
"""
LRNData(data::AbstractMatrix{Float64})
Convenience constructor for `LRNData`. Uses sensible defaults:
```
column_types=[9,1,1...]
key=[1,2,3...]
names=["C1","C2","C3"...]
key_name="Key"
comment=""
```
"""
function LRNData(
data::AbstractMatrix{Float64};
column_types = LRNCType.(fill(1, size(data, 2))),
key = collect(1:size(data, 1)),
names = fill("C", size(data, 2)) .* string.(1:size(data, 2)),
key_name = "Key",
comment = "",
)
LRNData(data, column_types, key, names, key_name, comment)
end
Base.firstindex(D::LRNData) = 1
Base.lastindex(D::LRNData) = size(D, 2)
Base.getindex(D::LRNData, i::Int) = D.data[D.key[i], :]
Base.getindex(D::LRNData, I) = [D[i] for i in I]
"""
writeLRN(filename::String, lrn::LRNData, directory=pwd())
Write the contents of a `LRNData` struct into a file.
"""
function writeLRN(lrn::LRNData, filename::String, directory = pwd())
(nrow, ncol) = size(lrn.data)
filename = prepare_path(filename, "lrn", directory)
open(filename, "w") do f
# write comment if it exists
if !isempty(lrn.comment)
write(f, "# $(lrn.comment)\n#\n")
end
# write number of cases
write(f, "% $(nrow)\n")
# write number of variables (+1 for key)
write(f, "% $(ncol+1)\n")
# write column types (9 for key)
write(f, "% $(Int(key))\t$(join(map(Int,lrn.column_types), '\t'))\n")
# write names
write(f, "% $(lrn.key_name)\t$(join(lrn.names, '\t'))\n")
# write data
for (index, row) in enumerate(eachrow(lrn.data))
new_row = replace(row, Inf => NaN)
write(f, "$(lrn.key[index])\t$(join(new_row,'\t'))\n")
end
end
end
"""
readLRN(filename::String, directory=pwd())
Read the contents of a `*.lrn` and return a `LRNData` struct.
"""
function readLRN(filename::String, directory = pwd())
data = []
column_types = []
key = []
names = []
key_name = ""
comment = ""
key_index = 0
# There is currently no way to have a native file chooser dialogue
# without building a whole Gtk/Tk GUI
filename = prepare_path(filename, "lrn", directory)
open(filename, "r") do f
line = readline(f)
# Comments
while startswith(line, '#')
comment *= strip(line, [' ', '\t', '#'])
line = readline(f)
end
strip_header = l -> strip(l, [' ', '\t', '%'])
# Number of datasets
nrow = parse(Int, strip_header(line))
line = readline(f)
# Number of columns
ncol = parse(Int, strip_header(line))
line = readline(f)
# Column types
column_types = split(strip_header(line), '\t')
column_types = map(x -> LRNCType(parse(Int, x)), column_types)
key_index = findfirst(x -> x == DataIO.key, column_types)
deleteat!(column_types, key_index)
line = readline(f)
# Names
names = split(strip_header(line), '\t')
key_name = names[key_index]
deleteat!(names, key_index)
# Data
data = readdlm(f, '\t', Float64, skipblanks = true)
key = map(Int, data[:, key_index])
data = data[:, deleteat!(collect(1:ncol), key_index)] # remove key column
end
LRNData(data, column_types, key, names, key_name, comment)
end
| DataIO | https://github.com/ckafi/DataIO.jl.git |
|
[
"Apache-2.0"
] | 0.1.2 | 77d4f28d2f25763bb740a6646f36c4727e0a6605 | code | 1013 | # Copyright 2019 Tobias Frilling
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
readUMX(filename::String, directory=pwd())
Read the contents of a `*.umx` and return a `Matrix{Float64}`
"""
function readUMX(filename::String, directory = pwd())
filename = prepare_path(filename, "umx", directory)
result = Matrix{Float64}(undef, (0,0))
open(filename, "r") do f
skipStarting(f, ['#', '%'])
result = readdlm(f, '\t', Float64, skipblanks = true)
end
return result
end
| DataIO | https://github.com/ckafi/DataIO.jl.git |
|
[
"Apache-2.0"
] | 0.1.2 | 77d4f28d2f25763bb740a6646f36c4727e0a6605 | code | 2142 | # Copyright 2019 Tobias Frilling
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
function addext(filename::String, extension::String)::String
filename = rstrip(filename, '.')
extension = extension[1] == '.' ? extension : '.' * extension
endswith(filename, extension) ? filename : filename * extension
end
function prepare_path(filename::String, extension::String, directory = pwd())::String
# make sure the filename has the right extension
filename = addext(filename, extension)
# normalize path
normpath(joinpath(directory, filename))
end
function skipStarting(stream::IOStream, chars::AbstractVector{Char})
c = read(stream, Char)
while c in chars
readline(stream)
c = read(stream, Char)
end
skip(stream, -ncodeunits(c))
end
function r2j_weights(rweights::AbstractMatrix{Float64}, rows::Int, columns::Int)
result = Array{Float64, 3}(undef, size(rweights)[2], rows, columns)
for r in 1:rows, c in 1:columns
ind = j2r_ind(r,c,columns)
result[:,r,c] = rweights[ind,:]
end
result
end
function j2r_weights(jweights::AbstractArray{Float64, 3})
(n, rows, columns) = size(jweights)
result = Array{Float64, 2}(undef, rows*columns, n)
for r in 1:rows, c in 1:columns
ind = j2r_ind(r,c,columns)
result[ind,:] = jweights[:,r,c]
end
result
end
@inline j2r_ind(_, r, c, columns) = j2r_ind(r, c, columns)
function j2r_ind(i::CartesianIndex, columns)
j2r_ind(i.I...,columns)
end
function j2r_ind(r, c, columns)
(r-1) * columns + c
end
function r2j_ind(i, col)
(div(i-1, col) + 1,
mod(i-1, col) + 1)
end
| DataIO | https://github.com/ckafi/DataIO.jl.git |
|
[
"Apache-2.0"
] | 0.1.2 | 77d4f28d2f25763bb740a6646f36c4727e0a6605 | docs | 397 | # DataIO.jl
[](https://ckafi.github.io/DataIO.jl/stable/)
[](https://ckafi.github.io/DataIO.jl/dev/)
[](https://travis-ci.com/ckafi/DataIO.jl)
A Julia port of the [DataIO R package](https://github.com/aultsch/DataIO).
| DataIO | https://github.com/ckafi/DataIO.jl.git |
|
[
"Apache-2.0"
] | 0.1.2 | 77d4f28d2f25763bb740a6646f36c4727e0a6605 | docs | 405 | # DataIO.jl
A Julia port of the [DataIO R package](https://github.com/aultsch/DataIO).
## Introduction
This package provides functions for reading and writing of data files consisting
of ``n Γ k`` matrices.
## Installation
`DataIO` is not yet registered. To install the development version from a Julia
REPL type `]` to enter Pkg REPL mode and run
```
pkg> add https://github.com/ckafi/DataIO.jl
```
| DataIO | https://github.com/ckafi/DataIO.jl.git |
|
[
"Apache-2.0"
] | 0.1.2 | 77d4f28d2f25763bb740a6646f36c4727e0a6605 | docs | 1324 | # LRN
## File structure
The `*.lrn` file contains the input data as tab separated values. Features in
columns, datasets in rows. The first column should be a unique integer key. The
optional header contains descriptions of the columns.
```
# comment
#
% n
% m
% s1 s2 .. sm
% var_name1 var_name2 .. var_namem
x11 x12 .. x1m
x21 x22 .. x2m
. . . .
. . . .
xn1 xn2 .. xnm
```
|Element |Description |
|:------ |:---------- |
|``n`` | Number of datasets |
|``m`` | Number of columns (including index) |
|``s_i`` | Type of column: 9 for unique key, 1 for data, 0 to ignore column |
|``var\_name_i`` | Name for the i-th feature |
|``x_{ij}`` | Elements of the data matrix. Decimal numbers denoted by '.', not by ','|
## Writing and Reading
```@docs
writeLRN
readLRN
```
## Types and Constructors
```@docs
LRNData
LRNData(::AbstractMatrix{Float64})
LRNCType
```
| DataIO | https://github.com/ckafi/DataIO.jl.git |
|
[
"Apache-2.0"
] | 0.1.2 | 77d4f28d2f25763bb740a6646f36c4727e0a6605 | docs | 666 | # UMX
## File structure
The `*.umx` file contains a height value for each neuron of an ESOM, e.g. the elements of the U-Matrix.
```
% k l
h11 h21 h31 .. hk1
h12 h22 h32 .. hk2
. . . .. .
. . . .. .
h1l h2l h3l .. hkl
```
|Element | Description |
|:------ | :---------- |
|``k`` | Number of rows of the ESOM. |
|``l`` | Number of columns of the ESOM. |
|``h_{ij}`` | Height of the neuron at position i in x-direction and j in y-direction.|
| DataIO | https://github.com/ckafi/DataIO.jl.git |
|
[
"MIT"
] | 0.1.0 | 74840bce8734609e4b921aefdceed22e18460b52 | code | 312 | using SurfaceTopology
using Documenter
using Literate
Literate.markdown(joinpath(@__DIR__, "../examples/features.jl"), joinpath(@__DIR__,"src/"); credit = false, name = "index")
makedocs(sitename="SurfaceTopology.jl",pages = ["index.md"])
deploydocs(
repo = "github.com/akels/SurfaceTopology.jl.git",
)
| SurfaceTopology | https://github.com/akels/SurfaceTopology.jl.git |
|
[
"MIT"
] | 0.1.0 | 74840bce8734609e4b921aefdceed22e18460b52 | code | 2534 | using GeometryTypes
using SurfaceTopology
using LinearAlgebra
function quadraticform(vects,vnormal)
Lx = [0 0 0; 0 0 -1; 0 1 0]
Ly = [0 0 1; 0 0 0; -1 0 0]
Lz = [0 -1 0; 1 0 0; 0 0 0]
d = [0,0,1] + vnormal
d /= norm(d)
Ln = d[1]*Lx + d[2]*Ly + d[3]*Lz
R = exp(pi*Ln)
vects = copy(vects)
for vj in 1:length(vects)
vects[vj] = R*vects[vj]
end
### Construction of the system
A = Array{Float64}(undef,3,3)
B = Array{Float64}(undef,3)
vects_norm2 = Array{Float64}(undef,length(vects))
for vj in 1:length(vects)
vects_norm2[vj] = norm(vects[vj])^2
end
A[1,1] = sum((v[1]^4 for v in vects) ./ vects_norm2)
A[1,2] = sum((v[1]^3*v[2] for v in vects) ./ vects_norm2)
A[1,3] = sum((v[1]^2*v[2]^2 for v in vects) ./ vects_norm2)
A[2,1] = A[1,2]
A[2,2] = A[1,3]
A[2,3] = sum( (v[2]^3*v[1] for v in vects) ./vects_norm2)
A[3,1] = A[1,3]
A[3,2] = A[2,3]
A[3,3] = sum((v[2]^4 for v in vects) ./vects_norm2)
B[1] = sum((v[3]*v[1]^2 for v in vects) ./vects_norm2)
B[2] = sum((v[1]*v[2]*v[3] for v in vects) ./vects_norm2)
B[3] = sum((v[2]^2*v[3] for v in vects) ./vects_norm2)
C,D,E = A\B
return C,D,E
end
function meancurvature(points,topology)
curvatures = Array{Float64}(undef,length(points))
for v in 1:length(points)
s = Point(0,0,0)
for (v1,v2) in EdgeRing(v,topology)
s += cross(points[v2],points[v1])
end
normal = s ./ norm(s)
vring = collect(VertexRing(v,topology))
vects = [points[vi] - points[v] for vi in vring]
C,D,E = quadraticform(vects,normal)
A = [C D/2;D/2 E]
k1,k2 = eigvals(-A)
H = (k1 + k2)/2
curvatures[v] = H
end
return curvatures
end
### Testing
t = ( 1 + sqrt( 5 ) ) / 2;
vertices = Point{3,Float64}[
[ -1, t, 0 ], [ 1, t, 0 ], [ -1, -t, 0 ], [ 1, -t, 0 ],
[ 0, -1, t ], [ 0, 1, t ], [ 0, -1, -t ], [ 0, 1, -t ],
[ t, 0, -1 ], [ t, 0, 1 ], [ -t, 0, -1 ], [ -t, 0, 1 ]
] ./ sqrt(1 + t^2)
faces = Face{3,Int64}[
[1, 12, 6], [1, 6, 2], [1, 2, 8], [1, 8, 11], [1, 11, 12], [2, 6, 10], [6, 12, 5],
[12, 11, 3], [11, 8, 7], [8, 2, 9], [4, 10, 5], [4, 5, 3], [4, 3, 7], [4, 7, 9],
[4, 9, 10], [5, 10, 6], [3, 5, 12], [7, 3, 11], [9, 7, 8], [10, 9, 2]
]
### As paraboloid grows faster than sphere the estimated curvature for a coarse mesh would be lower
curvatures = meancurvature(vertices,faces)
| SurfaceTopology | https://github.com/akels/SurfaceTopology.jl.git |
|
[
"MIT"
] | 0.1.0 | 74840bce8734609e4b921aefdceed22e18460b52 | code | 4924 | # # Intorduction
# As we know, triangular meshes can be stored in a computer in multiple different ways, each having strength and weaknesses in a particular case at hand. But it is not always clear which data structure would be most suitable for a specific task. Thus it is wise to write a data structure generic code which is the precise purpose of this package for closed oriented closed surfaces.
# The most straightforward representation of triangular mesh topology is in array `Array{Faces{3,Int},1}` containing a list of triangular faces which are defined by their vertices. That as name stands allows quick iteration over faces and also edges. However, often in a numerical code one wants not only to iterate over faces or vertices but also in case of singularity subtraction, integration and local property estimation like in normal vector and curvature calculations to know what are neighbouring vertices surrounding a given vertex while keeping track of the orientation of the normals. Also, one wishes to modify the topology itself by collapsing, flipping and splitting edges. And that is why different data structures are needed for different problems.
# Fortunately, it is possible to abstract mesh topology queries through iterators:
# ```@docs
# Faces
# Edges
# ```
# and circulators:
# ```@docs
# VertexRing
# EdgeRing
# ```
# ## API
# The package implements multiple kinds of data structures. The simplest one is `PlainDS` one which stores a list of faces and is just an alias to `Array{Faces{3,Int},1}`. As an example of how that works, let's define the data structure.
using GeometryTypes
using SurfaceTopology
faces = Face{3,Int64}[
[1, 12, 6], [1, 6, 2], [1, 2, 8], [1, 8, 11], [1, 11, 12], [2, 6, 10], [6, 12, 5],
[12, 11, 3], [11, 8, 7], [8, 2, 9], [4, 10, 5], [4, 5, 3], [4, 3, 7], [4, 7, 9],
[4, 9, 10], [5, 10, 6], [3, 5, 12], [7, 3, 11], [9, 7, 8], [10, 9, 2]
]
# We can use the data structure `PlainDS` for the queries. The iterators, for example.
collect(Faces(faces))
# and
collect(Edges(faces))
# giving us desirable output.
# We can also ask what neighbouring vertices and edges for a particular vertex by using circulators. For this simple data structure that requires us to do a full lookup on the face list, which is nicely abstracted away:
collect(VertexRing(3,faces))
# and
collect(EdgeRing(3,faces))
# In practice, one should use `EdgeRing` over `VertexRing` since, in the latter one, vertices are not ordered and thus can not be used for example to deduce the direction of the normal vector.
# ## Data structures
# The same API works for all other data structures. There is a data structure `CachedDS` built on top of `PlainDS` stores closest vertices (vertex ring). Then there is a data structure `FaceDS` which with `PlainDS` also stores neighbouring faces which have a common edge. And then there is the most commonly used data structure in numerics `HalfEdgeDS` (implemented as `EdgeDS`).
# The most straightforward extension of `PlainDS` is just a plain caching of neighbouring vertices for each vertex which are stored in `CacheDS` also with the list of faces.
# ```@docs
# CachedDS
# CachedDS(::SurfaceTopology.PlainDS)
# ```
# which can be initialised from `PlainDS`
cachedtopology = CachedDS(faces)
# And the same API can be used for querries:
collect(VertexRing(3,cachedtopology))
# A more advanced data structure is a face based data structure `FaceDS` which additionally for each face stores three neighbouring face indices.
# ```@docs
# FaceDS
# FaceDS(::SurfaceTopology.PlainDS)
# ```
# which again can be initialised from `PlainDS`
facedstopology = FaceDS(faces)
# and what would one expect
collect(VertexRing(3,facedstopology))
# works.
# All previous ones were some forms of face-based data structures. More common (by my own impression) the numerical world uses edge-based data structures. This package implements half-edge data structure `EdgeDS` which stores a list of edges by three numbers - base vertex index, next edge index and twin edge index.
# ```@docs
# EdgeDS
# ```
# To initialise this datastructure one executes:
edgedstopology = EdgeDS(faces)
#
collect(VertexRing(3,edgedstopology))
# ## Wishlist
# At the moment the package is able only to answer queries, but it would be desirable also to be able to do topological surgery operations. For completeness, those would include.
# + `edgeflip(topology,edge)`
# + `edgesplit(topology,edge)`
# + `edgecollapse(topology,edge)`
# And with them even a method for `defragmenting` the topology (actually trivial if we generalize constructors as in `CachedDS`). Unfortunately, at the moment, I am not working with anything geometry related thus the development of that on my own will be slow. I hope that the clarity and simplicity of this package could serve someone as a first step, and so eventually, topological operations would be implemented out of necessity.
| SurfaceTopology | https://github.com/akels/SurfaceTopology.jl.git |
|
[
"MIT"
] | 0.1.0 | 74840bce8734609e4b921aefdceed22e18460b52 | code | 264 | module SurfaceTopology
using GeometryTypes
include("primitives.jl")
include("plainds.jl")
include("faceds.jl")
include("cachedds.jl")
include("edgeds.jl")
export FaceDS, CachedDS, EdgeDS
export FaceRing, VertexRing, EdgeRing
export Edges,Faces
end # module
| SurfaceTopology | https://github.com/akels/SurfaceTopology.jl.git |
|
[
"MIT"
] | 0.1.0 | 74840bce8734609e4b921aefdceed22e18460b52 | code | 1141 | """
A datastructure which in addition to a list of faces stores connectivity information for each vertex.
"""
struct CachedDS
faces
connectivity
end
Faces(t::CachedDS) = t.faces
Edges(t::CachedDS) = filter(x->x[1]<x[2],decompose(Face{2,Int},t.faces))
"""
CacheDS(t)
Constructs cached face based datastructure `CachedDS` from arbitrary topology `t` which provides `EdgeRing` iterator.
"""
function CachedDS(faces)
vmax = maximum(maximum(faces))
connectivity = Array{Int,1}[]
for i in 1:vmax
v1 = Array{Int}(undef,0)
v2 = Array{Int}(undef,0)
for (v1i,v2i) in EdgeRing(i,faces)
push!(v1,v1i)
push!(v2,v2i)
end
vj = v2[1]
coni = [vj]
for j in 2:length(v1)
vj, = v2[v1.==vj]
push!(coni,vj)
end
push!(connectivity,coni)
end
return CachedDS(faces,connectivity)
end
VertexRing(vi::Int64,t::CachedDS) = t.connectivity[vi]
EdgeRing(vi::Int64,t::CachedDS) = PairIterator(t.connectivity[vi])
FaceRing(vi::Int64,t::CachedDS) = error("Face ring is not suitable for this datastructure.")
| SurfaceTopology | https://github.com/akels/SurfaceTopology.jl.git |
|
[
"MIT"
] | 0.1.0 | 74840bce8734609e4b921aefdceed22e18460b52 | code | 3444 | function findtwin(edges,e)
#edges = decompose(Face{2,Int},faces)
for j in 1:length(edges)
if edges[j][2]==e[1] && edges[j][1]==e[2]
return j
end
end
end
"""
EdgeDS(faces::PlainDS)
Constructs and returns edge based datastructure `EdgeDS` from plain face based datastructure PlainDS. Half edge based datastructure `EdgeDS` stores list of edges consisting of base vertex, next edge index and twin edge index.
"""
struct EdgeDS
edges::Array{Face{3,Int},1} ### It is equal to PlainDS. That is why we should introduce a new type.
# row is data assciated with edge. It contains a base vertex, next edge and the twin edge
#neighs::Array{Face{3,Int},1} ### For now keeping simple
#vfaces::Array{Int,1}
function EdgeDS(faces::PlainDS)
edges = decompose(Face{2,Int},faces)
rows = Face{3,Int}[]
for ei in edges
basev = ei[1]
twin = -1
for j in 1:length(edges)
if edges[j][2]==ei[1] && edges[j][1]==ei[2]
twin = j
break
end
end
### Now I could find triangle with specific edge
### Orientation is important
ti = findtriangle(faces,ei)
face = faces[ti]
nedge = getedge(face,ei)
next = -1
for j in 1:length(edges)
if edges[j]==nedge
next = j
break
end
end
# @show basev, next, twin
push!(rows,Face(basev,next,twin))
end
return new(rows)
end
end
function findtriangle(faces::PlainDS,edge)
equal(v1,v2) = v1==edge[1] && v2==edge[2]
for i in 1:length(faces)
if in(edge[1],faces[i]) & in(edge[2],faces[i])
v1,v2,v3 = faces[i]
if equal(v1,v2) || equal(v2,v3) || equal(v3,v1)
return i
end
end
end
end
# Returns an edge which we need to look up
function getedge(t,edge)
v1,v2,v3=t
if edge==Face(v1,v2)
return Face(v2,v3)
elseif edge==Face(v2,v3)
return Face(v3,v1)
else
return Face(v1,v2)
end
end
### A little bit of thought
### the state could consist of
### This one would construct the beginign vertex
### So aftger that one is able to do VertexRing(v,t::EdgeDS)
function VertexRing(v::Int,t::EdgeDS)
edges = t.edges
for ei in edges
if v==ei[1]
next = ei[2]
return VertexRing(v,next,t)
end
end
end
function Base.iterate(iter::VertexRing{EdgeDS})
e = iter.start
v = iter.t.edges[e][1]
return (v,e)
end
function Base.iterate(iter::VertexRing{EdgeDS},e)
edges = iter.t.edges
ea = edges[e][2]
eb = edges[ea][3] # the twin
ec = edges[eb][2]
if ec==iter.start
return nothing
else
return (edges[ec][1],ec)
end
end
function EdgeRing(v::Int,t::EdgeDS)
iter = VertexRing(v,t)
return PairIterator(iter)
end
### One would need to mark used rows one by one and loop over the rows. Implementation is rather easy. Also important for exporting.
Faces(t::EdgeDS) = error("Not implemented")
### Selects only edges if their next index is larger to avoid dublications. For simplicity let's not write it as an iterator.
Edges(t::EdgeDS) = error("Not implemented")
| SurfaceTopology | https://github.com/akels/SurfaceTopology.jl.git |
|
[
"MIT"
] | 0.1.0 | 74840bce8734609e4b921aefdceed22e18460b52 | code | 3058 | """
A face based datastructure storing faces, neighbour face indices and vertex to face map arrays.
"""
struct FaceDS
faces::Array{Face{3,Int},1}
neighs::Array{Face{3,Int},1}
vfaces::Array{Int,1}
end
Faces(t::FaceDS) = t.faces
Edges(t::FaceDS) = filter(x->x[1]<x[2],decompose(Face{2,Int},t.faces))
"""
FaceDS(faces::PlainDS)
Constructs a face based datastructure from PlainDS. Returns a struct FaceDS with original faces, computed neighbour faces and vertex to face map (one face for each vertex).
"""
function FaceDS(faces::PlainDS)
vfaces = Array{Int}(undef,maximum(maximum(faces)))
neighs = Array{Face{3,Int},1}(undef,length(faces))
for vi in 1:maximum(maximum(faces))
vfaces[vi] = find_triangle_vertex(vi,faces)
end
for ti in 1:length(faces)
v1, v2, v3 = faces[ti]
t1 = find_other_triangle_edge(v2,v3,ti,faces)
t2 = find_other_triangle_edge(v1,v3,ti,faces)
t3 = find_other_triangle_edge(v1,v2,ti,faces)
neighs[ti] = Face(t1,t2,t3)
end
return FaceDS(faces,neighs,vfaces)
end
function start(iter::FaceRing{FaceDS})
vface = iter.t.vfaces[iter.v]
i0 = 1
return (i0,vface)
end
function done(iter::FaceRing{FaceDS},state::Tuple{Int,Int})
i, face = state
i0, vface = start(iter)
if !(i==i0) & (face==vface)
return true
else
return false
end
end
function next(iter::FaceRing{FaceDS},state::Tuple{Int,Int})
i, tri = state
v = iter.v
face = iter.t.faces[tri]
neighbours = iter.t.neighs[tri]
index = face.==v
w = index[[1,2,3]]
cw = index[[3,1,2]]
nexttri, = neighbours[cw]
if nexttri==-1
error("The surface is not closed")
end
return tri, (i+1,nexttri)
end
Base.iterate(iter::FaceRing{FaceDS}) = next(iter,start(iter))
function Base.iterate(iter::FaceRing{FaceDS},ti)
if done(iter,ti)
return nothing
else
return next(iter,ti)
end
end
VertexRing(v::Int,t::FaceDS) = VertexRing(v,t.vfaces[v],t)
function done(iter::VertexRing{FaceDS},state::Tuple{Int,Int})
i, face = state
if !(i==1) & (face==iter.t.vfaces[iter.v])
return true
else
return false
end
end
function next(iter::VertexRing{FaceDS},tri::Int)
v = iter.v
face = iter.t.faces[tri]
neighbours = iter.t.neighs[tri]
index = face .== v
w = index[[1,2,3]]
cw = index[[3,1,2]]
nexttri, = neighbours[cw]
if nexttri==-1
error("The surface is not closed")
end
### Code for extracting vertex from face tri
face = iter.t.faces[tri]
cw = (face.==v)[[3,1,2]]
vi, = face[cw]
return vi, nexttri
end
function Base.iterate(iter::VertexRing{FaceDS})
face = iter.start
return next(iter,face)
end
function Base.iterate(iter::VertexRing{FaceDS},ti)
if ti==iter.start
return nothing
else
return next(iter,ti)
end
end
function EdgeRing(v::Int,t::FaceDS)
iter = VertexRing(v,t)
return PairIterator(iter)
end
| SurfaceTopology | https://github.com/akels/SurfaceTopology.jl.git |
|
[
"MIT"
] | 0.1.0 | 74840bce8734609e4b921aefdceed22e18460b52 | code | 2100 | @doc "`Faces(t)` returns an iterator for faces from representation of topology `t`" Faces
@doc "`Edges(t)` returns an iterator for edges from representation of topology `t`" Edges
Faces(t::PlainDS) = t
Edges(t::PlainDS) = filter(x->x[1]<x[2],decompose(Face{2,Int},t)) ### Hopefully works
start(iter::FaceRing{PlainDS}) = find_triangle_vertex(iter.v,iter.t)
done(iter::FaceRing{PlainDS},ti::Int) = ti<=length(iter.t) ? false : true
function next(iter::FaceRing{PlainDS},i::Int)
v = iter.v
nexti = find_triangle_vertex(iter.v,iter.t[i+1:end]) + i # possible botleneck here
return i, nexti
end
Base.iterate(iter::FaceRing{PlainDS}) = next(iter,start(iter))
function Base.iterate(iter::FaceRing{PlainDS},ti::Int)
if done(iter,ti)
return nothing
else
return next(iter,ti)
end
end
start(iter::EdgeRing{PlainDS}) = find_triangle_vertex(iter.v,iter.t)
done(iter::EdgeRing{PlainDS},ti::Int) = ti<=length(iter.t) ? false : true
function next(iter::EdgeRing{PlainDS},i::Int)
v = iter.v
nexti = find_triangle_vertex(iter.v,iter.t[i+1:end]) + i # possible botleneck here
face = iter.t[i]
w = face.==v
cw = w[[3,1,2]]
ccw = w[[2,3,1]]
return (face[cw]...,face[ccw]...), nexti
end
Base.iterate(iter::EdgeRing{PlainDS}) = next(iter,start(iter))
function Base.iterate(iter::EdgeRing{PlainDS},ti::Int)
if done(iter,ti)
return nothing
else
return next(iter,ti)
end
end
start(iter::VertexRing{PlainDS}) = find_triangle_vertex(iter.v,iter.t)
done(iter::VertexRing{PlainDS},ti::Int) = ti<=length(iter.t) ? false : true
function next(iter::VertexRing{PlainDS},ti::Int)
v = iter.v
faces = iter.t
face = faces[ti]
cw = (face.==v)[[3,1,2]]
vi, = face[cw]
nexti = find_triangle_vertex(iter.v,iter.t[ti+1:end]) + ti # possible botleneck
return vi, nexti
end
Base.iterate(iter::VertexRing{PlainDS}) = next(iter,start(iter))
function Base.iterate(iter::VertexRing{PlainDS},ti::Int)
if done(iter,ti)
return nothing
else
return next(iter,ti)
end
end
| SurfaceTopology | https://github.com/akels/SurfaceTopology.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.