licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 2313 | using Test
using Scruff
using Scruff.Operators
using Scruff.SFuncs
using Scruff.Utils
@testset "Score" begin
@testset "Computing score" begin
@testset "Hard score" begin
s = HardScore(2)
@test get_score(s, 1) == 0.0
@test get_score(s, 2) == 1.0
@test get_log_score(s, 1) == -Inf
@test get_log_score(s, 2) == 0.0
end
@testset "Soft score" begin
s = SoftScore([:a, :b], [0.1, 0.2])
@test isapprox(get_score(s, :a), 0.1)
@test isapprox(get_score(s, :b), 0.2)
@test get_score(s, :c) == 0.0
@test isapprox(get_log_score(s, :a), log(0.1))
@test isapprox(get_log_score(s, :b), log(0.2))
@test get_log_score(s, :c) == -Inf
end
@testset "Log score" begin
s = LogScore([:a, :b], [-1.0, -2.0])
@test isapprox(get_score(s, :a), exp(-1.0))
@test isapprox(get_score(s, :b), exp(-2.0))
@test get_log_score(s, :a) == -1.0
@test get_log_score(s, :b) == -2.0
end
@testset "Multiple score" begin
s1 = SoftScore([:a, :b], [0.1, 0.2])
s2 = LogScore([:b, :c], [-1.0, -2.0])
s = MultipleScore([s1, s2])
@test isapprox(get_log_score(s, :a), -Inf64)
@test isapprox(get_log_score(s, :b), log(0.2) - 1.0)
@test isapprox(get_log_score(s, :c), -Inf64)
end
@testset "Functional score" begin
f(x) = 1.0 / x
s = FunctionalScore{Float64}(f)
@test isapprox(get_score(s, 2.0), 0.5)
@test isapprox(get_log_score(s, 2.0), log(0.5))
end
@testset "Normal score" begin
s = NormalScore(1.0, 2.0)
r = normal_density(3.0, 1.0, 2.0)
@test isapprox(get_score(s, 3.0), r)
@test isapprox(get_log_score(s, 3.0), log(r))
end
@testset "Parzen score" begin
s = Parzen([-1.0, 1.0], 2.0)
r1 = normal_density(0.5, -1.0, 2.0)
r2 = normal_density(0.5, 1.0, 2.0)
ans = 0.5 * (r1 + r2)
@test isapprox(get_score(s, 0.5), ans)
@test isapprox(get_log_score(s, 0.5), log(ans))
end
end
end | Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 37278 | using Test
using Scruff
using Scruff.Operators
using Scruff.MultiInterface
using Scruff.SFuncs
using Scruff.Utils
import Distributions
function test_support(sf::SFunc{I,O}, parranges, target, quality;
size = 100, curr = O[]) where {I,O}
s = support(sf, parranges, size, curr)
@test Set(s) == Set(target)
@test support_quality(sf, parranges) == quality
end
function test_sample(sf::SFunc{I,O}, parvals, range, probs;
num_samples = 1000, tolerance = 0.1) where {I,O}
d = Dict{O, Int}()
for i in 1:num_samples
x = sample(sf, parvals)
d[x] = get(d,x,0) + 1
end
for (x,p) in zip(range, probs)
@test isapprox(d[x] / num_samples, p; atol = tolerance)
end
end
function test_pi(pi, range, probs)
for (x,p) in zip(range, probs)
@test isapprox(cpdf(pi, (), x), p)
end
end
@testset "SFuncs" begin
@testset "Constant" begin
c = Constant(2)
test_support(c, (), [2], :CompleteSupport)
test_sample(c, (), [2], 1.0)
@test logcpdf(c, (), 2) == 0.0
@test logcpdf(c, (), 1) == -Inf
(lfs, ufs) = make_factors(c, [2], (), 1, ())
@test length(lfs) == 1
lf = lfs[1]
@test lf.keys == (1,)
@test lf.dims == (1,)
@test lf.entries == [1.0]
@test ufs == lfs
#=
s1 = initial_stats(c)
@test s1 === nothing
s2 = expected_stats(c, [2], (), (), SoftScore([2], [1.0]))
@test s2 === nothing
s3 = accumulate_stats(c, s1, s2)
@test s3 === nothing
ps = maximize_stats(c, s3)
@test ps === nothing
=#
pi = compute_pi(c, [1,2], (), ())
test_pi(pi, [1,2], [0.0, 1.0])
end
@testset "Cat" begin
c = Cat([:a,:b,:c], [0.2, 0.3, 0.5])
c2 = Cat([:a => 0.2, :b => 0.3, :c => 0.5])
c3 = Cat([1,1,2], [0.1, 0.3, 0.6]) # must handle duplicates in range correctly
@test c2.original_range == [:a,:b,:c]
@test c2.params == [0.2, 0.3, 0.5]
test_support(c, (), [:a,:b,:c], :CompleteSupport)
test_support(c3, (), [1,2], :CompleteSupport)
test_sample(c, (), [:a,:b,:c], [0.2, 0.3, 0.5])
@test isapprox(logcpdf(c, (), :a), log(0.2))
@test isapprox(logcpdf(c, (), :b), log(0.3))
@test isapprox(logcpdf(c, (), :c), log(0.5))
@test logcpdf(c,(),:d) == -Inf
@test isapprox(logcpdf(c3, (), 1), log(0.1 + 0.3))
@test isapprox(logcpdf(c3, (), 2), log(0.6))
(lfs, ufs) = make_factors(c, [:a, :b, :c], (), 1, ())
@test length(lfs) == 1
lf = lfs[1]
@test lf.keys == (1,)
@test lf.dims == (3,)
@test lf.entries == [0.2, 0.3, 0.5]
@test ufs == lfs
#=
@test initial_stats(c) == [0, 0, 0]
chlam1 = SoftScore(c.range, [0.3, 0.1, 0.2])
chlam2 = SoftScore(c.range, [0.2, 0.3, 0.1])
s1 = expected_stats(c, [:a,:b,:c], (), (), chlam1)
@test isapprox(s1, [0.06, 0.03, 0.1])
s2 = expected_stats(c, [:a,:b,:c], (), (), chlam2)
@test isapprox(s2, [0.04, 0.09, 0.05])
s3 = accumulate_stats(c, s1, s2)
@test isapprox(s3[1], 0.06 + 0.04)
@test isapprox(s3[2], 0.03 + 0.09)
@test isapprox(s3[3], 0.1 + 0.05)
ps2 = maximize_stats(c, s3)
z = sum(s3)
@test isapprox(ps2[1], s3[1] / z)
@test isapprox(ps2[2], s3[2] / z)
@test isapprox(ps2[3], s3[3] / z)
=#
ps3 = compute_pi(c, [:a, :b, :c], (), ())
test_pi(ps3, [:a, :b, :c], [0.2, 0.3, 0.5])
cf = Cat(["abc", "defg"], [0.1, 0.9])
e = 3 * 0.1 + 4 * 0.9
@test isapprox(f_expectation(cf, (), length), e)
end
@testset "Cat with duplicates" begin
c = Cat([:a,:b,:a], [0.2, 0.3, 0.5])
test_support(c, (), [:a,:b], :CompleteSupport)
test_sample(c, (), [:a,:b], [0.7, 0.3])
end
@testset "Flip" begin
f = Flip(0.7)
test_support(f, (), [false, true], :CompleteSupport)
test_sample(f, (), [false, true], [0.3, 0.7])
(lfs, ufs) = make_factors(f, [false, true], (), 7, ())
@test length(lfs) == 1
lf = lfs[1]
@test lf.keys == (7,)
@test lf.dims == (2,)
@test isapprox(lf.entries, [0.3, 0.7])
@test ufs == lfs
#=
@test initial_stats(f) == [0.0, 0.0]
chlam1 = SoftScore(f.range, [0.2, 0.3])
chlam2 = SoftScore(f.range, [0.4, 0.5])
s1 = expected_stats(f, [false, true], (), (), chlam1)
@test isapprox(s1[1], 0.06)
@test isapprox(s1[2], 0.21)
s2 = expected_stats(f, [false, true], (), (), chlam2)
@test isapprox(s2[1], 0.12)
@test isapprox(s2[2], 0.35)
s3 = accumulate_stats(f, s1, s2)
@test isapprox(s3[1], 0.18)
@test isapprox(s3[2], 0.56)
ps = maximize_stats(f, s3)
@test isapprox(ps[2], 0.56 / 0.74)
set_params!(f, ps)
=#
@test isapprox(compute_pi(f, [false, true], (), ()).params, [0.3, 0.7])
end
@testset "Uniform" begin
u = SFuncs.Uniform(-1.0, 3.0)
test_support(u, (), [-1.0, 0.0, 1.0, 2.0, 3.0], :IncrementalSupport; size = 5)
test_support(u, (), [-1.0, -0.5, 0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0], :IncrementalSupport;
size = 9, curr = [-0.5, 0.5, 1.5, 2.5])
test_support(u, (), [-1.0, -0.5, 0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0], :IncrementalSupport;
size = 10, curr = [-0.5, 0.5, 1.0, 1.5, 2.5])
cs = [0.0, 0.0, 0.0, 0.0]
tot = 1000
for i in 1:tot
x = sample(u, ())
cs[Int(floor(x)) + 2] += 1
end
for j in 1:4
@test isapprox(cs[j] / tot, 0.25; atol = 0.1)
end
@test isapprox(logcpdf(u, (), 0.0), log(0.25))
@test isapprox(logcpdf(u, (), 5.0), -Inf64)
@test isapprox(bounded_probs(u, [-1.0, 0.0, 1.0, 2.0, 3.0], ())[1],
[0.125, 0.25, 0.25, 0.25, 0.125])
@test isapprox(bounded_probs(u, [-1.0, 0.0, 1.0, 2.0, 3.0], ())[2],
[0.125, 0.25, 0.25, 0.25, 0.125])
@test isapprox(bounded_probs(u, [-1.0, -9.0, -8.0], ())[1], [0.0, 0.0, 1.0])
end
@testset "Normal" begin
n = SFuncs.Normal(-1.0,1.0)
dist = Distributions.Normal(-1.0, 1.0)
empty = Vector{Float64}()
test_support(n, (), [-1.0], :IncrementalSupport; size = 1)
test_support(n, (), [-2.0, -1.0, 0.0], :IncrementalSupport; size = 3)
test_support(n, (), [1.0, 2.0, 3.0, 4.0], :IncrementalSupport;
size = 3, curr = [1.0, 2.0, 3.0, 4.0])
test_support(n, (), [-2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
:IncrementalSupport; size = 5, curr = [1.0, 2.0, 3.0, 4.0])
range = [-2.0, -1.5, -1.0, -0.5, 0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5,
4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0, 7.5]
test_support(n, (), range,
:IncrementalSupport; size = 15, curr = [1.0, 2.0, 3.0, 4.0])
c = 0
tot = 1000
for i = 1:tot
if sample(n, ()) < 0.5
c += 1
end
end
@test isapprox(c / tot, Distributions.cdf(dist, 0.5); atol = 0.05)
@test isapprox(logcpdf(n, (), 0.5), Distributions.logpdf(dist, 0.5))
(lfs, ufs) = make_factors(n, range, (), 2, ())
@test length(lfs) == 1
@test length(ufs) == 1
lf = lfs[1]
uf = ufs[1]
@test lf.keys == (2,)
@test uf.keys == lf.keys
@test lf.dims == (length(range),)
@test uf.dims == lf.dims
ls = lf.entries
us = uf.entries
@test length(ls) == length(range)
@test length(us) == length(ls)
for i = 1:length(range)
@test ls[i] >= 0
@test us[i] <= 1
@test ls[i] <= us[i]
otherl = 0
otheru = 0
for j = 1:length(range)
if j != i
otherl += ls[j]
otheru += ls[j]
end
end
@test us[i] <= 1 - otherl
@test otheru <= 1 - ls[i]
end
pi = compute_pi(n, range, (), ())
probs = [Distributions.pdf(dist, x) for x in range]
test_pi(pi, range, probs)
end
@testset "Det" begin
f(i :: Float64, j :: Float64) = Int(floor(i+j))
d = Det(Tuple{Float64, Float64}, Int, f)
parranges = ([1.1, 2.2], [3.3, 4.4, 5.5])
pis = (Cat([1.1, 2.2], [0.4, 0.6]), Cat([3.3, 4.4, 5.5], [0.2,0.3,0.5]))
test_support(d, parranges, [4,5,6,7], :CompleteSupport)
c1 = Constant(1)
c2 = Constant(4)
test_sample(d, (1.1, 4.4), [5], 1.0)
@test logcpdf(d, (1.1, 4.4), 5) == 0.0
@test logcpdf(d, (1.1, 4.4), 4) == -Inf
a = [1.0,0.0,0.0,0.0, 0.0,1.0,0.0,0.0, 0.0,0.0,1.0,0.0, 0.0,1.0,0.0,0.0, 0.0,0.0,1.0,0.0, 0.0,0.0,0.0,1.0]
(lfs, ufs) = make_factors(d, [4,5,6,7], parranges, 1, (2, 3))
@test length(lfs) == 1
lf = lfs[1]
@test lf.keys == (2,3,1)
@test lf.dims == (2,3,4)
@test lf.entries == a
@test ufs == lfs
#=
s1 = initial_stats(d)
@test s1 === nothing
s2 = expected_stats(d, [4,5,6,7], parranges, (), SoftScore(Vector{Int}(), Vector{Float64}()))
@test s2 === nothing
s3 = accumulate_stats(d, s1, s2)
@test s3 === nothing
ps = maximize_stats(d, s3)
@test ps === nothing
set_params!(d, ps)
=#
p4 = 0.4 * 0.2
p5 = 0.4 * 0.3 + 0.6 * 0.2
p6 = 0.4 * 0.5 + 0.6 * 0.3
p7 = 0.6 * 0.5
pi1 = compute_pi(d, [4,5,6,7], parranges, pis)
test_pi(pi1, [4,5,6,7], [p4, p5, p6, p7])
chlam1 = SoftScore([4,5,6,7], [0.1, 0.2, 0.3, 0.4])
lam11 = send_lambda(d, chlam1, [4,5,6,7], parranges, pis, 1)
lam12 = send_lambda(d, chlam1, [4,5,6,7], parranges, pis, 2)
l11 = 0.2 * 0.1 + 0.3 * 0.2 + 0.5 * 0.3 # pi(v2) * chlam(f(1.1, v2))
l12 = 0.2 * 0.2 + 0.3 * 0.3 + 0.5 * 0.4 # pi(v2) * chlam(f(2.2, v2))
l23 = 0.4 * 0.1 + 0.6 * 0.2 # pi(v1) * chlam(f(v1, 3.3))
l24 = 0.4 * 0.2 + 0.6 * 0.3 # pi(v1) * chlam(f(v1, 4.4))
l25 = 0.4 * 0.3 + 0.6 * 0.4 # pi(v1) * chlam(f(v1, 5.5))
@test isapprox(get_score(lam11, 1.1), l11)
@test isapprox(get_score(lam11, 2.2), l12)
@test isapprox(get_score(lam12, 3.3), l23)
@test isapprox(get_score(lam12, 4.4), l24)
@test isapprox(get_score(lam12, 5.5), l25)
# test incremental support
@test issubset([4], support(d, parranges, 3, [4])) == true
@test issubset([4,5], support(d, parranges, 3, [4,5])) == true
@test issubset([4,5,6], support(d, parranges, 3, [4,5,6])) == true
@test issubset([4,5,6,7], support(d, parranges, 3, [4,5,6,7])) == true
# test size in support
@test length(support(d, parranges, 3, [4])) == 3
@test length(support(d, parranges, 3, [4,5])) == 3
@test length(support(d, parranges, 3, [4,5])) == 3
@test length(support(d, parranges, 3, [4,5,5,5])) == 3
@test length(support(d, parranges, 3, [4,5,6,7])) == 4
@test length(support(d, parranges, 50, collect(1:60))) == 4
@test length(support(d, parranges, 50, collect(1:30))) == 4
end
@testset "DiscreteCPT" begin
d = Dict((:x,1) => [0.1,0.2,0.7], (:x,2) => [0.2,0.3,0.5], (:x,3) => [0.3,0.4,0.3],
(:y,1) => [0.4,0.5,0.1], (:y,2) => [0.5,0.1,0.4], (:y,3) => [0.6,0.2,0.2])
# A bug was uncaught because this range was originally in alphabetical order!
range = ['c', 'b', 'a'] # note reverse order
c = DiscreteCPT(range, d)
pis = ([0.4,0.6], [0.2,0.3,0.5])
picat1 = Cat([:x, :y], pis[1])
picat2 = Cat([1,2,3], pis[2])
picats = (picat1, picat2)
# Cat range is in arbitrary order so we need to get it directly from the Cat
parranges = (picat1.__compiled_range, picat2.__compiled_range)
ks = collect(keys(d))
ks1 = unique(first.(ks))
ks2 = unique(last.(ks))
test_support(c, parranges, range, :CompleteSupport)
test_sample(c, (:x,2), ['a', 'b', 'c'], [0.5, 0.3, 0.2]) # note reverse order
l3 = logcpdf(c, (:x,2), 'b')
l4 = logcpdf(c, (:x,2), 'd')
@test isapprox(l3, log(0.3))
@test isapprox(l4, -Inf)
k1 = nextkey()
k2 = nextkey()
k3 = nextkey()
(lfs, ufs) = make_factors(c, range, parranges, k1, (k2, k3))
@test length(ufs) == length(lfs)
for i = 1:length(ufs)
@test ufs[i].entries == lfs[i].entries
end
@test length(lfs) == 8 # six for the cases and two for the switches
switchfact1 = lfs[7]
switchkeys1 = switchfact1.keys
@test length(switchkeys1) == 2
@test switchkeys1[1] == k2
switchkey = switchkeys1[2]
@test switchfact1.dims == (2,6)
@test switchfact1.entries ==
[
1.0, 1.0, 1.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 1.0, 1.0, 1.0,
]
switchfact2 = lfs[8]
@test switchfact2.keys == (k3, switchkey)
@test switchfact2.dims == (3,6)
@test switchfact2.entries ==
[
1.0, 0.0, 0.0, 1.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 1.0, 0.0, 0.0, 1.0
]
for i = 1:2
for j = 1:3
switchval = (i-1)*3 + j
infact = lfs[switchval]
@test infact.keys == (k1, switchkey)
@test infact.dims == (3,6)
es = infact.entries
x = parranges[1][i]
y = parranges[2][j]
ps = d[(x,y)]
for k = 1:3
for l = 1:6
n = (k-1)*6 + l
if l == switchval
@test es[n] == ps[k]
else
@test es[n] == 1.0
end
end
end
end
end
chlam1 = SoftScore(range, [0.9,0.8,0.7])
chlam2 = SoftScore(range, [0.6,0.5,0.4])
#=
@test isempty(initial_stats(c))
s1 = expected_stats(c, range, parranges, picats, chlam1)
ks1 = collect(keys(s1))
@test length(ks1) == 6
@test (:x,1) in ks1
@test (:x,2) in ks1
@test (:x,3) in ks1
@test (:y,1) in ks1
@test (:y,2) in ks1
@test (:y,3) in ks1
pix2 = cpdf(picats[1], (), :x) * cpdf(picats[2], (), 2)
nm1 = [cpdf(c, (:x,2), range[i]) * get_score(chlam1, range[i]) for i in 1:3]
@test isapprox(s1[(:x,2)], pix2 .* nm1)
s2 = expected_stats(c, range, parranges, picats, chlam2)
nm2 = [cpdf(c, (:x,2), range[i]) * get_score(chlam2, range[i]) for i in 1:3]
@test isapprox(s2[(:x,2)], pix2 .* nm2)
s3 = accumulate_stats(c, s1, s2)
ks3 = collect(keys(s3))
@test length(ks1) == 6
@test (:x,1) in ks1
@test (:x,2) in ks1
@test (:x,3) in ks1
@test (:y,1) in ks1
@test (:y,2) in ks1
@test (:y,3) in ks1
@test isapprox(s3[(:x,2)], s1[(:x,2)] .+ s2[(:x,2)])
ps = maximize_stats(c, s3)
for i in 1:2
for j in 1:3
x = parranges[1][i]
y = parranges[2][j]
m = c.inversemaps[1][x]
n = c.inversemaps[2][y]
@test ps[(m-1)*3+n] == normalize(s3[(x,y)])
end
end
=#
p = compute_pi(c, range, parranges, picats)
q1 = pis[1][1] .* (pis[2][1] .* d[(:x, 1)] .+ pis[2][2] .* d[(:x, 2)] .+ pis[2][3] .* d[(:x, 3)])
q2 = pis[1][2] .* (pis[2][1] .* d[(:y, 1)] .+ pis[2][2] .* d[(:y, 2)] .+ pis[2][3] .* d[(:y, 3)])
q = q1 .+ q2
test_pi(p, range, q)
# FIXME cannot test send_lambda
l1 = send_lambda(c, chlam1, range, parranges, picats, 1)
l2 = send_lambda(c, chlam1, range, parranges, picats, 2)
b1x = 0.0
b1y = 0.0
chl1 = [get_score(chlam1, i) for i in range]
for j = 1:3
p = cpdf(picats[2], (), parranges[2][j])
qx = [cpdf(c, (:x, parranges[2][j]), r) for r in range]
qy = [cpdf(c, (:y, parranges[2][j]), r) for r in range]
b1x += p * sum(qx .* chl1)
b1y += p * sum(qy .* chl1)
end
@test isapprox(get_score(l1, :x), b1x)
@test isapprox(get_score(l1, :y), b1y)
b21 = 0.0
b22 = 0.0
b23 = 0.0
for i = 1:2
p = cpdf(picats[1], (), parranges[1][i])
q1 = [cpdf(c, (parranges[1][i], 1), r) for r in range]
q2 = [cpdf(c, (parranges[1][i], 2), r) for r in range]
q3 = [cpdf(c, (parranges[1][i], 3), r) for r in range]
b21 += p * sum(q1 .* chl1)
b22 += p * sum(q2 .* chl1)
b23 += p * sum(q3 .* chl1)
end
@test isapprox(get_score(l2, 1), b21)
@test isapprox(get_score(l2, 2), b22)
@test isapprox(get_score(l2, 3), b23)
end
@testset "LinearGaussian" begin
lg = LinearGaussian((-1.0, 1.0, 2.0), 3.0, 1.0)
pars = ([0.0, 1.0], [2.0], [3.0, 4.0, 5.0])
v1 = support(lg, pars, 10, Vector{Float64}())
v2 = support(lg, pars, 100, v1)
@test support_quality(lg, pars) == :IncrementalSupport
@test length(v1) >= 10
@test length(v2) >= 100
@test all(v -> v in v2, v1)
end
@testset "CLG" begin
d = Dict((:x,1) => ((-1.0, 1.0, 2.0), 3.0, 1.0), (:x,2) => ((-2.0, 4.0, 2.0), 3.0, 1.0),
(:x,3) => ((-3.0, 2.0, 2.0), 3.0, 1.0), (:y,1) => ((-4.0, 5.0, 2.0), 3.0, 1.0),
(:y,2) => ((-5.0, 3.0, 2.0), 3.0, 1.0), (:y,3) => ((-6.0, 6.0, 2.0), 3.0, 1.0))
clg = CLG(d)
pars = ([:x, :y], [1, 2, 3], [0.0, 1.0], [2.0], [3.0, 4.0, 5.0])
v1 = support(clg, pars, 10, Vector{Float64}())
v2 = support(clg, pars, 100, v1)
v3 = support(clg, pars, 1000, v2)
@test support_quality(clg, pars) == :IncrementalSupport
@test length(v1) >= 10
@test length(v2) >= 100
@test length(v3) >= 1000
@test all(v -> v in v2, v1)
@test all(v -> v in v3, v2)
# CLG with 1 discrete and 0 continuos parents
d2 = Dict((:x,) => ((), 0.0, 0.1), (:y,) => ((), 0.5, 0.2), (:z,) => ((), 1.5, 0.2))
clg2 = CLG(d2)
pars = ([:x, :y, :z],)
v2 = support(clg2, pars, 100, Float64[])
end
@testset "Mixture" begin
s1 = Flip(0.9)
s2 = Cat([true, false], [0.2, 0.8]) # order of values reversed
mx1 = Mixture([s1, s2], [0.4, 0.6])
d1 = DiscreteCPT([1,2], Dict(tuple(false) => [0.1, 0.9], tuple(true) => [0.2, 0.8]))
d2 = DiscreteCPT([1,2], Dict(tuple(false) => [0.3, 0.7], tuple(true) => [0.4, 0.6]))
mx2 = Mixture([d1, d2], [0.6, 0.4])
d3 = DiscreteCPT([:a, :b], Dict((1,1) => [0.1, 0.9], (1,2) => [0.2, 0.8], (2,1) => [0.3, 0.7], (2,2) => [0.4, 0.6]))
d4 = DiscreteCPT([:a, :b], Dict((1,1) => [0.9, 0.1], (1,2) => [0.8, 0.2], (2,1) => [0.7, 0.3], (2,2) => [0.6, 0.4]))
mx3 = Mixture([d3, d4], [0.6, 0.4])
vr = support(mx1, tuple(), 100, Vector{Bool}())
@test support_quality(mx1, tuple()) == :CompleteSupport
@test length(vr) == 2
@test false in vr
@test true in vr
total = 1000
n = 0
for i in 1:total
if sample(mx1, ())
n += 1
end
end
pt = 0.3 * 0.2 + 0.7 * 0.6
@test isapprox(n / total, pt, atol = 0.05)
p1givenf = 0.6 * 0.1 + 0.4 * 0.3
p1givent = 0.6 * 0.2 + 0.4 * 0.4
@test isapprox(logcpdf(mx1, tuple(), true), log(pt))
@test isapprox(logcpdf(mx2, tuple(false), 1), log(p1givenf))
@test isapprox(logcpdf(mx2, tuple(true), 1), log(p1givent))
pi = compute_pi(mx2, [1,2], ([false, true],), (Cat([false, true], [0.7, 0.3]),))
p1 = cpdf(pi, (), 1)
p2 = cpdf(pi, (), 2)
@test isapprox(p1, 0.7 * p1givenf + 0.3 * p1givent)
@test isapprox(p2, 1 - p1)
p1given11 = 0.6 * 0.1 + 0.4 * 0.9
p1given12 = 0.6 * 0.2 + 0.4 * 0.8
p1given21 = 0.6 * 0.3 + 0.4 * 0.7
p1given22 = 0.6 * 0.4 + 0.4 * 0.6
q1 = 0.3 * p1given11 + 0.7 * p1given12 # takes into account pi message from parent 2
q2 = 0.3 * p1given21 + 0.7 * p1given22 # takes into account pi message from parent 2
lam1 = send_lambda(mx3,
SoftScore([:a, :b], [0.4, 0.3]), [:a, :b], ([1,2], [1,2]),
(Cat([1,2], [0.1, 0.9]), Cat([1,2], [0.3, 0.7])), 1)
@test isapprox(get_score(lam1, 1), 0.4 * q1 + 0.3 * (1 - q1))
@test isapprox(get_score(lam1, 2), 0.4 * q2 + 0.3 * (1 - q2))
end
@testset "Separable" begin
cpt1 = Dict((:x,) => [0.1, 0.9], (:y,) => [0.2, 0.8], (:z,) => [0.3, 0.7])
cpt2 = Dict((1,) => [0.4, 0.6], (2,) => [0.5, 0.5])
cpt3 = Dict(('a',) => [0.6, 0.4], ('b',) => [0.7, 0.3])
s = Separable([false, true], [0.2, 0.3, 0.5], [cpt1, cpt2, cpt3])
parranges = ([:x, :y, :z], [1, 2], ['a', 'b'])
myrange = [false, true]
@test support(s, parranges, 100, Bool[]) == myrange
@test support_quality(s, parranges) == :CompleteSupport
parvals = (:y, 1, 'b')
c1 = Cat([:x, :y, :z], [0.2, 0.3, 0.5])
c2 = Cat([1, 2], [0.1, 0.9])
c3 = Cat(['a', 'b'], [0.2, 0.8])
n = 0
tot = 1000
for i = 1:tot
if sample(s, (:y, 1, 'b'))
n += 1
end
end
p = 0.2 * 0.8 + 0.3 * 0.6 + 0.5 * 0.3
@test isapprox(n / tot, p; atol = 0.05)
@test isapprox(logcpdf(s, parvals, true), log(p))
k1 = nextkey()
k2 = nextkey()
k3 = nextkey()
k4 = nextkey()
(lfs,ufs) = make_factors(s, myrange, parranges, k1, (k2, k3, k4))
@test length(ufs) == length(lfs)
for i = 1:length(ufs)
@test ufs[i].entries == lfs[i].entries
end
@test length(lfs) == 11 # each component has (num parent values + 1) factors, and one for the mixture
mixfact = lfs[11]
@test length(mixfact.keys) == 1
mixkey = mixfact.keys[1]
@test mixfact.dims == (3,)
@test mixfact.entries == [0.2, 0.3, 0.5]
comp1facts = lfs[1:4]
comp2facts = lfs[5:7]
comp3facts = lfs[8:10]
comp1sw = last(comp1facts)
comp2sw = last(comp2facts)
comp3sw = last(comp3facts)
@test length(comp1sw.keys) == 3
@test length(comp2sw.keys) == 3
@test length(comp3sw.keys) == 3
@test comp1sw.keys[1] == k2
@test comp2sw.keys[1] == k3
@test comp3sw.keys[1] == k4
sw1key = comp1sw.keys[2]
sw2key = comp2sw.keys[2]
sw3key = comp3sw.keys[2]
@test comp1sw.keys[3] == mixkey
@test comp2sw.keys[3] == mixkey
@test comp3sw.keys[3] == mixkey
@test comp1sw.dims == (3,3,3)
@test comp2sw.dims == (2,2,3)
@test comp3sw.dims == (2,2,3)
@test comp1sw.entries ==
[
1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0,
0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0,
0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0
]
@test comp2sw.entries ==
[
1.0, 1.0, 1.0, 1.0, 0.0, 1.0,
1.0, 0.0, 1.0, 1.0, 1.0, 1.0
]
@test comp3sw.entries ==
[
1.0, 1.0, 1.0, 1.0, 1.0, 0.0,
1.0, 1.0, 0.0, 1.0, 1.0, 1.0
]
end
@testset "Switch" begin
s1 = LinearSwitch(2, Symbol)
s2 = If{Symbol}()
prs1 = ([2,1], [:a, :b], [:c, :b]) # different order
prs2 = ([false, true], [:a, :b], [:c, :b])
range = [:c, :b, :a] # different order
v1 = support(s1, prs1, 100, Symbol[])
v2 = support(s2, prs2, 100, Symbol[])
@test length(v1) == 3
@test :a in v1
@test :b in v1
@test :c in v1
@test length(v2) == 3
@test :a in v2
@test :b in v2
@test :c in v2
ns1 = Dict(:a => 0, :b => 0, :c => 0)
ns2 = Dict(:a => 0, :b => 0, :c => 0)
tot = 1000
for j in 1:tot
ns1[sample(s1, (2,:a,:b))] += 1
ns2[sample(s2, (false,:a,:b))] += 1
end
@test ns1[:a] == 0
@test ns1[:b] == tot
@test ns1[:c] == 0
@test ns2[:a] == 0
@test ns2[:b] == tot
@test ns2[:c] == 0
@test isapprox(logcpdf(s1, (2,:a,:b), :a), -Inf)
@test isapprox(logcpdf(s1, (2,:a,:b), :b), 0.0)
@test support_quality(s1, prs1) == :CompleteSupport
@test support_quality(s2, prs2) == :CompleteSupport
@test support_quality(s1, ([2], [:a, :b], [:b, :c])) == :BestEffortSupport
@test support_quality(s1, ([false], [:a, :b], [:b, :c])) == :BestEffortSupport
incoming_pis1 = (Cat([2,1], [0.4, 0.6]), Cat([:a, :b], [0.1, 0.9]), Cat([:c, :b], [0.8, 0.2]))
incoming_pis2 = (Cat([false, true], [0.4, 0.6]), Cat([:a, :b], [0.1, 0.9]), Cat([:c, :b], [0.8, 0.2]))
pi1 = compute_pi(s1, range, prs1, incoming_pis1)
pi2 = compute_pi(s2, range, prs2, incoming_pis2)
p2ac = 0.4 * 0.1 * 0.8
p2ab = 0.4 * 0.1 * 0.2
p2bc = 0.4 * 0.9 * 0.8
p2bb = 0.4 * 0.9 * 0.2
p1ac = 0.6 * 0.1 * 0.8
p1ab = 0.6 * 0.1 * 0.2
p1bc = 0.6 * 0.9 * 0.8
p1bb = 0.6 * 0.9 * 0.2
pc = p2ac + p2bc
pb = p2ab + p2bb + p1bc + p1bb
pa = p1ac + p1ab
test_pi(pi1, range, [pc, pb, pa])
test_pi(pi2, range, [pc, pb, pa])
chlam = SoftScore(range, [0.1, 0.2, 0.7])
lam1 = send_lambda(s1, chlam, range, prs1, incoming_pis1, 1)
lam2 = send_lambda(s1, chlam, range, prs1, incoming_pis1, 2)
lam3 = send_lambda(s1, chlam, range, prs1, incoming_pis1, 3)
@test Set(keys(lam1.logscores)) == Set(prs1[1])
@test Set(keys(lam2.logscores)) == Set(prs1[2])
@test Set(keys(lam3.logscores)) == Set(prs1[3])
pac = 0.1 * 0.8 # ignoring first parent for lambda message to first parent
pab = 0.1 * 0.2
pbc = 0.9 * 0.8
pbb = 0.9 * 0.2
l2ac = pac * 0.1
l2ab = pab * 0.2
l2bc = pbc * 0.1
l2bb = pbb * 0.2
l1ac = pac * 0.7
l1ab = pab * 0.7
l1bc = pbc * 0.2
l1bb = pbb * 0.2
l2 = [l2ac + l2bc, l2ab + l2bb]
l1 = [l1ac + l1ab, l1bc + l1bb]
@test isapprox(get_score(lam1, 2), sum(l2))
@test isapprox(get_score(lam1, 1), sum(l1))
# Since this is a LinearSwitch, parent 2 is the choice for input 1, which is second in the first parent's range
con2 = 0.4 * sum(l2)
@test isapprox(get_score(lam2, :a), 0.6 * 0.7 + con2)
@test isapprox(get_score(lam2, :b), 0.6 * 0.2 + con2)
# And parent 3 is the choice for input 2, which is first in the first parent's range
con3 = 0.6 * sum(l1)
@test isapprox(get_score(lam3, :c), 0.4 * 0.1 + con3)
@test isapprox(get_score(lam3, :b), 0.4 * 0.2 + con3)
end
@testset "Generate" begin
frng1 = [Flip(0.1), Flip(0.2)]
f1 = Flip(0.1)
f2 = Flip(0.2)
g = Generate{Bool}()
vs = support(g, ([f1, f2],), 0, Bool[])
@test support_quality(g, ([f1, f2],)) == :CompleteSupport
@test length(vs) == 2
@test Set(vs) == Set([false, true])
@test isapprox(cpdf(g, (f1,), true), 0.1)
total = 1000
n = 0
for i = 1:total
if sample(g, (f1,))
n += 1
end
end
@test isapprox(n / total, 0.1, atol = 0.05)
end
@testset "Apply" begin
frng1 = [Flip(0.1), Flip(0.2)]
l1 = LinearGaussian((1.0,), 0.0, 1.0)
l2 = LinearGaussian((1.0,), 1.0, 1.0)
@test typeof(l2) <: SFunc{<:Tuple{Vararg{Float64}}, Float64}
frng2 = [l1, l2]
jrng2 = [(1.0,), (2.0,), (3.0,)]
a1 = Apply{Tuple{}, Bool}()
a2 = Apply{Tuple{Float64}, Float64}()
vs = support(a1, (frng1, Vector{Tuple}[]), 0, Bool[])
@test support_quality(a1, (frng1, Tuple[])) == :CompleteSupport
@test support_quality(a2, (frng2, jrng2)) == :IncrementalSupport
@test length(vs) == 2
@test Set(vs) == Set([false, true])
@test isapprox(logcpdf(a2, (l1, (1.0,)), 1.0), log(1 / sqrt(2 * pi)))
total = 1000
n = 0
for i = 1:total
if sample(a2, (l1, (1.0,))) < 1.0
n += 1
end
end
@test isapprox(n / total, 0.5, atol = 0.05)
end
@testset "Chain" begin
@testset "With simple I and no J" begin
sf = Chain(Int, Int, i -> Constant(i+1))
@test sample(sf, (1,)) == 2
end
@testset "With tuple I and no J" begin
sf = Chain(Tuple{Int}, Int, i -> Constant(i[1]+1))
@test sample(sf, (1,)) == 2
end
@testset "With simple I and tuple J" begin
sf = Chain(Int, Tuple{Int}, Int,
i -> Det(Tuple{Int}, Int, j -> j[1] + i))
@test sample(sf, (1,2)) == 3
end
end
@testset "Invertible" begin
i = Invertible{Int,Int}(i -> i + 1, o -> o - 1)
@test support(i, ([1,2],), 100, Int[]) == [2,3]
@test support_quality(i, ([1,2],)) == :CompleteSupport
@test sample(i, (1,)) == 2
@test cpdf(i, (1,), 2) == 1.0
@test cpdf(i, (1,), 3) == 0.0
ps = [1.0,0.0,0.0,0.0,1.0,0.0]
(bps1, bps2) = Scruff.Operators.bounded_probs(i, [2,3,4], ([1,2],))
@test bps1 == ps
@test bps2 == bps1
(facts1, facts2) = make_factors(i, [2,3,4], ([1,2],), 5, (7,))
@test facts1 == facts2
@test length(facts1) == 1
fact1 = facts1[1]
@test fact1.keys == (7,5)
@test fact1.dims == (2,3)
@test fact1.entries == ps
parpis = (Cat([1,2], [0.1,0.9]),)
chlam = SoftScore([2,3,4], [0.2,0.3,0.5])
pi = compute_pi(i, [2,3,4], ([1,2],), parpis)
@test cpdf(pi, (), 2) == 0.1
@test cpdf(pi, (), 3) == 0.9
@test cpdf(pi, (), 4) == 0.0
lam = send_lambda(i, chlam, [2,3,4], ([1,2],), parpis, 1)
@test get_score(lam, 1) == 0.2
@test get_score(lam, 2) == 0.3
@test get_score(lam, 3) == 0.0 # even though it maps to 4, it's not in the parent range
#=
@test initial_stats(i) == nothing
@test accumulate_stats(i, nothing, nothing) == nothing
@test expected_stats(i, [2,3,4], ([1,2],), parpis, chlam) == nothing
@test maximize_stats(i, nothing) == nothing
=#
end
@testset "Serial" begin
sf1 = DiscreteCPT([:a, :b],
Dict((1,1) => [0.1, 0.9], (1,2) => [0.2, 0.8],
(2,1) => [0.3, 0.7], (2,2) => [0.4, 0.6]))
sf2 = DiscreteCPT([false, true], Dict((:a,) => [0.6, 0.4], (:b,) => [0.8, 0.2]))
sf3 = Invertible{Bool, Int}(b -> b ? 5 : 6, i -> i == 5)
ser = Serial(Tuple{Int,Int}, Int, (sf1,sf2,sf3))
total = 1000
n = 0
for i in 1:total
if sample(ser, (1,2)) == 6
n += 1
end
end
@test isapprox(n / total, 0.2 * 0.6 + 0.8 * 0.8; atol = 0.05)
prs = ([1,2], [2,1])
sup = support(ser, prs, 10, Int[])
@test Set(sup) == Set([5,6])
@test support_quality(ser, prs) == :CompleteSupport
@test isapprox(cpdf(ser, (1,2), 6), 0.2 * 0.6 + 0.8 * 0.8)
bps = [
0.2 * 0.6 + 0.8 * 0.8, 0.2 * 0.4 + 0.8 * 0.2,
0.1 * 0.6 + 0.9 * 0.8, 0.1 * 0.4 + 0.9 * 0.2,
0.4 * 0.6 + 0.6 * 0.8, 0.4 * 0.4 + 0.6 * 0.2,
0.3 * 0.6 + 0.7 * 0.8, 0.3 * 0.4 + 0.7 * 0.2
]
@test isapprox(bounded_probs(ser, [6,5], prs)[1], bps)
facts = make_factors(ser, [6,5], prs, 3, (1,2))[1]
fs1 = make_factors(sf1, [:a,:b], prs, 4, (1,2))[1]
fs2 = make_factors(sf2, [false, true], ([:a,:b],), 5, (4,))[1]
fs3 = make_factors(sf3, [6,5], ([false, true],), 3, (5,))[1]
@test length(facts) == length(fs1) + length(fs2) + length(fs3)
fs = copy(fs1)
append!(fs, fs2)
append!(fs, fs3)
for (computed, actual) in zip(facts, fs)
@test computed.entries == actual.entries
end
parpis = (Cat([1,2], [0.9, 0.1]), Cat([2,1], [0.8, 0.2]))
pi = compute_pi(ser, [6,5], prs, parpis)
pi6 = 0.9 * 0.8 * bps[1] + 0.9 * 0.2 * bps[3] +
0.1 * 0.8 * bps[5] + 0.1 * 0.2 * bps[7]
pi5 = 0.9 * 0.8 * bps[2] + 0.9 * 0.2 * bps[4] +
0.1 * 0.8 * bps[6] + 0.1 * 0.2 * bps[8]
@test isapprox(cpdf(pi, (), 6), pi6)
@test isapprox(cpdf(pi, (), 5), pi5)
chlam = SoftScore([6,5], [0.7, 0.3])
lam1 = send_lambda(ser, chlam, [6,5], prs, parpis, 1)
lam2 = send_lambda(ser, chlam, [6,5], prs, parpis, 2)
lf = 0.7
lt = 0.3
la = 0.6 * lf + 0.4 * lt
lb = 0.8 * lf + 0.2 * lt
l11 = 0.8 * (0.2 * la + 0.8 * lb) + 0.2 * (0.1 * la + 0.9 * lb)
l12 = 0.8 * (0.4 * la + 0.6 * lb) + 0.2 * (0.3 * la + 0.7 * lb)
@test isapprox(get_score(lam1, 1), l11)
@test isapprox(get_score(lam1, 2), l12)
l22 = 0.9 * (0.2 * la + 0.8 * lb) + 0.1 * (0.4 * la + 0.6 * lb)
l21 = 0.9 * (0.1 * la + 0.9 * lb) + 0.1 * (0.3 * la + 0.7 * lb)
@test isapprox(get_score(lam2, 1), l21)
@test isapprox(get_score(lam2, 2), l22)
#=
is1 = initial_stats(sf1)
is2 = initial_stats(sf2)
is3 = initial_stats(sf3)
istats = initial_stats(ser)
@test istats == (is1,is2,is3)
es1 = expected_stats(sf1, [:a,:b], prs, parpis, SoftScore([:a,:b], [la,lb]))
pi1 = compute_pi(sf1, [:a,:b], prs, parpis)
es2 = expected_stats(sf2, [false, true], ([:a,:b],), (pi1,),
SoftScore([false, true], [lf,lt]))
pi2 = compute_pi(sf2, [false, true], ([:a,:b],), (pi1,))
es3 = expected_stats(sf3, [6,5], ([false, true],), (pi2,), chlam)
estats = expected_stats(ser, [6,5], prs, parpis, chlam)
@test length(estats) == 3
@test keys(estats[1]) == keys(es1)
for k in keys(estats[1])
@test isapprox(estats[1][k], es1[k])
end
@test keys(estats[2]) == keys(es2)
for k in keys(estats[2])
@test isapprox(estats[2][k], es2[k])
end
@test estats[3] == es3
as1 = accumulate_stats(sf1, is1, es1)
as2 = accumulate_stats(sf2, is2, es2)
as3 = accumulate_stats(sf3, is3, es3)
astats = accumulate_stats(ser, istats, estats)
@test length(astats) == 3
@test keys(astats[1]) == keys(as1)
for k in keys(astats[1])
@test isapprox(astats[1][k], as1[k])
end
@test keys(astats[2]) == keys(as2)
for k in keys(astats[2])
@test isapprox(astats[2][k], as2[k])
end
@test astats[3] == as3
mp1 = maximize_stats(sf1, as1)
mp2 = maximize_stats(sf2, as2)
mp3 = maximize_stats(sf3, as3)
mparams = maximize_stats(ser, astats)
@test length(mparams) == 3
@test isapprox(mparams[1], mp1)
@test isapprox(mparams[2], mp2)
@test mparams[3] == mp3
=#
end
@testset "Discrete Distributions.jl" begin
d = Distributions.Categorical([0.4, 0.3, 0.3])
sf = DistributionsSF(d)
N = 1000
samples = [sample(sf, ()) for _ in 1:N]
sf_mean = expectation(sf, ())
@test isapprox(sf_mean, sum(samples) / N; atol=0.15)
# must handle duplicates in range correctly
c3 = Discrete([1, 1, 2],
[0.1, 0.3, 0.6])
test_support(c3, (), [1, 2], :CompleteSupport)
@test isapprox(logcpdf(c3, (), 1), log(0.1 + 0.3))
@test isapprox(logcpdf(c3, (), 2), log(0.6))
end
@testset "Continuous Distributions.jl" begin
d = Distributions.Normal()
sf = DistributionsSF(d)
num_samples = 1024
samples = [sample(sf, ()) for _ in 1:num_samples]
@test isapprox(expectation(sf, ()), 0.0)
@test isapprox(variance(sf, ()), 1.0)
sf2 = sumsfs((sf, sf))
@test isapprox(variance(sf2, ()), 2.0)
cat = Discrete(samples, [1.0/num_samples for _ in 1:num_samples])
fit_normal = fit_mle(Normal{Float64}, cat)
@test isapprox(expectation(sf, ()), expectation(fit_normal, ()), atol=0.1)
@test isapprox(variance(sf, ()), variance(fit_normal, ()), atol=0.1)
end
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 6194 | using Test
using Scruff
using Scruff.Utils
using Scruff.RTUtils
using AbstractTrees
using DataStructures
@testset "Util" begin
@testset "Cartesian product" begin
@testset "With no arrays" begin
@test cartesian_product(Array{Int,1}[]) == [[]]
end
@testset "With single array" begin
@test cartesian_product([[1,2]]) == [[1], [2]]
end
@testset "With two arrays" begin
@test cartesian_product([[1,2], [3,4,5]]) ==
[[1,3], [1,4], [1,5], [2,3], [2,4], [2,5]]
end
@testset "With empty array" begin
@test cartesian_product([[1,2], [], [3,4,5]]) == []
end
end
@testset "Topsort" begin
@testset "Acyclic" begin
graph = Dict(2 => [1], 5 => [2, 1], 3 => [2], 4 => [1])
ord = topsort(graph)
@test length(ord) == 5
@test 1 in ord
@test 2 in ord
@test 3 in ord
@test 4 in ord
@test 5 in ord
i1 = indexin(1, ord)[1]
i2 = indexin(2, ord)[1]
i3 = indexin(3, ord)[1]
i4 = indexin(4, ord)[1]
i5 = indexin(5, ord)[1]
@test i1 < i2
@test i2 < i5
@test i2 < i3
@test i1 < i4
end
@testset "Cyclic" begin
graph = Dict(2 => [1, 3], 5 => [2, 1], 3 => [2], 4 => [1])
ord = topsort(graph)
@test length(ord) == 5
@test 1 in ord
@test 2 in ord
@test 3 in ord
@test 4 in ord
@test 5 in ord
i1 = indexin(1, ord)[1]
i2 = indexin(2, ord)[1]
i3 = indexin(3, ord)[1]
i4 = indexin(4, ord)[1]
i5 = indexin(5, ord)[1]
# i2 and i3 could be in any order, but must be greater than i1 and less than i5
@test i1 < i2
@test i1 < i3
@test i2 < i5
@test i3 < i5
@test i1 < i4
end
end
@testset "Continuous utilities" begin
@testset "Intervals" begin
@test make_intervals([0, 1, 3]) == [(-Inf,0.5), (0.5,2), (2,Inf)]
@test make_intervals(([1])) == [(-Inf, Inf)]
end
@testset "Linear value" begin
@test linear_value([2,-3], -1, [4,-2]) == 13
@test linear_value([], -1, []) == -1
end
@testset "Bounded linear value" begin
is1 = [(3,5), (-3,-1)]
cs1 = [[3,-1], [3,-3], [5,-1], [5,-3]]
vs1 = map(x -> linear_value([2,-3], -1, x), cs1)
@test bounded_linear_value([2,-3], -1, is1) ==
(minimum(vs1), maximum(vs1))
end
@testset "Normal density" begin
@test isapprox(normal_density(0, 0, 1), 0.3989, atol = 0.0001)
@test isapprox(normal_density(1, 1, 1), 0.3989, atol = 0.0001)
@test isapprox(normal_density(-1, 0, 1), 0.2420, atol = 0.0001)
@test isapprox(normal_density(-2, 0, 4), 0.2420 / 2, atol = 0.0001)
end
end
@testset "Memo" begin
count = 0
function f(x)
count += 1
return x
end
g = memo(f)
@test g(1) == 1
@test count == 1
@test g(1) == 1
@test count == 1
@test g(2) == 2
@test count == 2
end
@testset "factor" begin
fact1 = Factor((2,), (1,), [0.1, 0.9])
fact2 = Factor((3,), (2,), [0.2, 0.3, 0.5])
prod12 = Factor((3, 2), (2, 1), [0.02, 0.18, 0.03, 0.27, 0.05, 0.45])
prod21 = Factor((2, 3), (1, 2), [0.02, 0.03, 0.05, 0.18, 0.27, 0.45])
fact23 = Factor((3, 2), (2, 3), [0.2, 0.8, 0.3, 0.7, 0.4, 0.6])
prod1223 = Factor((3, 2, 2), (2, 3, 1),
[0.004, 0.036, 0.016, 0.144,
0.009, 0.081, 0.021, 0.189,
0.02, 0.18, 0.03, 0.27])
prod2123 = prod1223 # Even though the order of variables in one of the
# arguments is different, the result is the same
prod122321 = Factor((2, 3, 2), (1, 2, 3),
[0.00008, 0.00032, 0.00027, 0.00063,
0.001, 0.0015, 0.00648, 0.02592,
0.02187, 0.05103, 0.081, 0.1215])
sum1 = Factor((3, 2), (2, 3),
[0.00008 + 0.00648, 0.00032 + 0.02592, 0.00027 + 0.02187,
0.00063 + 0.05103, 0.001 + 0.081, 0.0015 + 0.1215])
sum2 = Factor((2, 2), (1, 3),
[0.00008 + 0.00027 + 0.001, 0.00032 + 0.00063 + 0.0015,
0.00648 + 0.02187 + 0.081, 0.02592 + 0.05103 + 0.1215])
sum3 = Factor((2, 3), (1, 2),
[0.00008 + 0.00032, 0.00027 + 0.00063, 0.001 + 0.0015,
0.00648 + 0.02592, 0.02187 + 0.05103, 0.081 + 0.1215])
@testset "Product" begin
@testset "Multiplying two independent factors" begin
@test isapprox(product(fact1, fact2), prod12)
@test isapprox(product(fact2, fact1), prod21)
end
@testset "Multiplying two factors with a shared variable" begin
@test isapprox(product(prod12, fact23), prod1223)
@test isapprox(product(prod21, fact23), prod2123)
end
@testset "Multiplying factors with two shared variables" begin
@test isapprox(product(prod1223, prod21), prod122321)
end
end
@testset "Summing out a variable" begin
@testset "With variables" begin
@test isapprox(sum_over(prod122321, 1), sum1)
@test isapprox(sum_over(prod122321, 2), sum2)
@test isapprox(sum_over(prod122321, 3), sum3)
end
@testset "When last variable" begin
f = sum_over(fact1, 1)
@test length(f.dims) == 0
@test length(f.keys) == 0
@test f.entries == [1.0]
end
end
end
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 17797 | using Test
using Scruff
using Scruff.Utils
using Scruff.RTUtils
using Scruff.Models
using Scruff.SFuncs
using Scruff.Operators
import Scruff.Algorithms: VE, ve, infer, probability, greedy_order, unconnected_neighbors, cost, copy_graph, eliminate
@testset "VE" begin
@testset "vegraph" begin
g = Graph()
add_node!(g, 1, 2)
add_node!(g, 2, 3)
add_node!(g, 3, 2)
add_node!(g, 4, 2)
add_node!(g, 5, 5)
add_node!(g, 6, 1)
add_undirected!(g, 1, 3)
add_undirected!(g, 2, 3)
add_undirected!(g, 3, 4)
add_undirected!(g, 3, 5)
add_undirected!(g, 1, 4)
@testset "Construction" begin
@test g.nodes == [1, 2, 3, 4, 5, 6]
@test g.sizes == Dict(1 => 2, 2 => 3, 3 => 2, 4 => 2, 5 => 5, 6 => 1)
@test g.edges == Dict(1 => [3, 4], 2 => [3], 3 => [1, 2, 4, 5],
4 => [3, 1], 5 => [3], 6 => [])
end
@testset "Unconnected neighbors" begin
@test unconnected_neighbors(g, 3) == [(1,2), (1,5), (2,4), (2,5), (4,5)]
@test unconnected_neighbors(g, 6) == []
@test cost(g, 3) == 5
@test cost(g, 6) == 0
end
@testset "Elimination" begin
h = copy_graph(g)
eliminate(h, 3)
@test h.nodes == [1, 2, 4, 5, 6]
@test h.sizes == Dict(1 => 2, 2 => 3, 4 => 2, 5 => 5, 6 => 1)
@test h.edges == Dict(1 => [4, 2, 5], 2 => [1, 4, 5],
4 => [1, 2, 5], 5 => [1, 2, 4], 6 => [])
end
@testset "Greedy elimination order" begin
@testset "With all variables eliminated" begin
ord = greedy_order(g)
@test Set(ord) == Set([1,2,3,4,5,6])
@test length(ord) == 6
# Only 3 has unconnected neighbors.
# It cannot be eliminated before at least 3 of its neighbors
# have been eliminated.
inds = indexin([1,2,4,5,3], ord)
count = 0
if inds[1] < inds[4] count += 1 end
if inds[2] < inds[4] count += 1 end
if inds[3] < inds[4] count += 1 end
if inds[4] < inds[4] count += 1 end
@test count >= 3
end
@testset "With uneliminated variables" begin
ord = greedy_order(g, [5, 6])
@test Set(ord) == Set([1,2,3,4])
@test length(ord) == 4
# 3 must be eliminated last because all the others
# are a disconnected neighbor from 5
@test ord[4] == 3
end
end
end
@testset "range" begin
dn1 = Cat([1,2], [0.1, 0.9])
dv1 = dn1()(:dv1)
dn2 = Cat([1,2,3], [0.2, 0.3, 0.5])
dv2 = dn2()(:dv2)
dn3 = DiscreteCPT([1,2],
Dict((1,1) => [0.3, 0.7], (2,1) => [0.6, 0.4],
(1,2) => [0.4, 0.6], (2,2) => [0.7, 0.3],
(1,3) => [0.5, 0.5], (2,3) => [0.8, 0.2]))
dv3 = dn3()(:dv3)
cn1 = Normal(-0.1, 1.0)
cv1 = cn1()(:cv1)
cn2 = CLG(Dict((1,) => ((1.5,), -0.3, 0.5), (2,) => ((0.7,), 0.4, 0.5)))
cv2 = cn2()(:cv2)
network = InstantNetwork(Variable[dv1,dv2,dv3,cv1,cv2], VariableGraph(dv3=>[dv1,dv2],cv2=>[dv3,cv1]))
runtime = Runtime(network)
ensure_all!(runtime, 0)
order = topsort(get_initial_graph(network))
iv = Vector{Int}()
fv = Vector{Float64}()
dr1 = Operators.support(dn1, (), 10, iv)
dr2 = Operators.support(dn2, (), 10, iv)
dr3 = Operators.support(dn3, (dr1, dr2), 10, iv)
cr1 = Operators.support(cn1, (), 10, fv)
cr2 = Operators.support(cn2, (dr3, cr1), 10, fv)
dx1 = Operators.support(dn1, (), 20, dr1)
dx2 = Operators.support(dn2, (), 20, dr2)
dx3 = Operators.support(dn3, (dx1, dx2), 20, dr3)
cx1 = Operators.support(cn1, (), 20, cr1)
cx2 = Operators.support(cn2, (dx3, cx1), 20, cr2)
@testset "Setting initial ranges" begin
set_ranges!(runtime, Dict{Symbol, Score}(), 10)
@test get_range(runtime, dv1) == dr1
@test get_range(runtime, dv2) == dr2
@test get_range(runtime, dv3) == dr3
@test get_range(runtime, cv1) == cr1
@test get_range(runtime, cv2) == cr2
end
@testset "Setting expanded ranges" begin
set_ranges!(runtime, Dict{Symbol, Score}(), 20)
@test get_range(runtime, dv1) == dx1
@test get_range(runtime, dv2) == dx2
@test get_range(runtime, dv3) == dx3
@test get_range(runtime, cv1) == cx1
@test get_range(runtime, cv2) == cx2
end
@testset "Ranges from previous instance" begin
ensure_all!(runtime, 2)
@test get_range(runtime, dv1) == dx1
end
end
@testset "ve" begin
dn1 = Cat([1,2], [0.1, 0.9])
i11 = indexin(1, dn1.__compiled_range)[1]
i12 = indexin(2, dn1.__compiled_range)[1]
dv1 = dn1()(:dv1)
dn2 = Cat([1,2,3], [0.2, 0.3, 0.5])
i21 = indexin(1, dn2.__compiled_range)[1]
i22 = indexin(2, dn2.__compiled_range)[1]
i23 = indexin(3, dn2.__compiled_range)[1]
dv2 = dn2()(:dv2)
dn3 = DiscreteCPT([1,2], Dict((1,1) => [0.3, 0.7], (2,1) => [0.4, 0.6],
(1,2) => [0.5, 0.5], (2,2) => [0.6, 0.4],
(1,3) => [0.7, 0.3], (2,3) => [0.8, 0.2]))
i31 = indexin(1, dn3.__sfs[1,1].__compiled_range)[1]
i32 = indexin(2, dn3.__sfs[1,1].__compiled_range)[1]
dv3 = dn3()(:dv3)
cn1 = Normal(-0.1, 1.0)
cv1 = cn1()(:cv1)
cn2 = CLG(Dict((1,) => ((1.5,), -0.3, 0.5), (2,) => ((0.7,), 0.4, 0.5)))
cv2 = cn2()(:cv2)
net1 = InstantNetwork(Variable[dv1,dv2,dv3], VariableGraph(dv3=>[dv1,dv2]))
ord1 = topsort(get_initial_graph(net1))
@testset "A discrete network" begin
@testset "With one query variable and bounds" begin
runtime = Runtime(net1)
ensure_all!(runtime, 0)
set_ranges!(runtime, Dict{Symbol, Score}(), 10)
((l,u), ids) = ve(runtime, ord1, [dv3]; bounds = true)
pa = 0.1 * 0.2 * 0.3 + 0.1 * 0.3 * 0.5 + 0.1 * 0.5 * 0.7 +
0.9 * 0.2 * 0.4 + 0.9 * 0.3 * 0.6 + 0.9 * 0.5 * 0.8
pb = 1 - pa
@test length(l.keys) == 1
@test l.keys[1] == ids[dv3]
@test l.dims == (2,)
@test length(l.entries) == 2
@test isapprox(l.entries[i32], pa, atol = 0.0000001)
@test isapprox(l.entries[i31], pb, atol = 0.0000001)
@test length(u.keys) == 1
@test u.keys[1] == ids[dv3]
@test u.dims == (2,)
@test length(u.entries) == 2
@test isapprox(u.entries[i32], pa, atol = 0.0000001)
@test isapprox(u.entries[i31], pb, atol = 0.0000001)
end
@testset "With one query variable and no bounds" begin
runtime = Runtime(net1)
ensure_all!(runtime, 0)
set_ranges!(runtime, Dict{Symbol, Score}(), 10)
(l,ids) = ve(runtime, ord1, [dv3]; bounds = false)
pa = 0.1 * 0.2 * 0.3 + 0.1 * 0.3 * 0.5 + 0.1 * 0.5 * 0.7 +
0.9 * 0.2 * 0.4 + 0.9 * 0.3 * 0.6 + 0.9 * 0.5 * 0.8
pb = 1 - pa
@test length(l.keys) == 1
@test l.keys[1] == ids[dv3]
@test l.dims == (2,)
@test length(l.entries) == 2
@test isapprox(l.entries[i32], pa, atol = 0.0000001)
@test isapprox(l.entries[i31], pb, atol = 0.0000001)
end
@testset "With disconnected variable" begin
x = Cat([1,2], [0.5, 0.5])()(:x)
y = Cat([1,2], [0.2, 0.8])()(:y)
net = InstantNetwork(Variable[x,y], VariableGraph())
run = Runtime(net)
ensure_all!(run)
ord = topsort(get_initial_graph(net))
set_ranges!(run, Dict{Symbol, Score}(), 2)
(l,ids) = ve(run, ord, [x]; bounds = false)
@test l.entries == [0.5, 0.5]
end
@testset "With two query variables" begin
runtime = Runtime(net1)
ensure_all!(runtime, 0)
set_ranges!(runtime, Dict{Symbol, Score}(), 10)
(l,ids) = ve(runtime, ord1, [dv3, dv1]; bounds = false)
ppa = 0.1 * 0.2 * 0.3 + 0.1 * 0.3 * 0.5 + 0.1 * 0.5 * 0.7
ppb = 0.1 * 0.2 * 0.7 + 0.1 * 0.3 * 0.5 + 0.1 * 0.5 * 0.3
pqa = 0.9 * 0.2 * 0.4 + 0.9 * 0.3 * 0.6 + 0.9 * 0.5 * 0.8
pqb = 0.9 * 0.2 * 0.6 + 0.9 * 0.3 * 0.4 + 0.9 * 0.5 * 0.2
@test l.dims == (2,2)
@test length(l.keys) == 2
@test length(l.entries) == 4
k1 = l.keys[1]
k2 = l.keys[2]
@test k1 == ids[dv1] && k2 == ids[dv3] || k1 == ids[dv3] && k2 == ids[dv1]
if k1 == ids[dv1]
@test isapprox(l.entries[(i12-1)*2 + i31], ppa, atol = 0.000001)
@test isapprox(l.entries[(i12-1)*2 + i32], ppb, atol = 0.000001)
@test isapprox(l.entries[(i11-1)*2 + i31], pqa, atol = 0.000001)
@test isapprox(l.entries[(i11-1)*2 + i32], pqb, atol = 0.000001)
else
@test isapprox(l.entries[(i32-1)*2 + i12], ppa, atol = 0.000001)
@test isapprox(l.entries[(i32-1)*2 + i11], pqa, atol = 0.000001)
@test isapprox(l.entries[(i31-1)*2 + i12], ppb, atol = 0.000001)
@test isapprox(l.entries[(i31-1)*2 + i11], pqb, atol = 0.000001)
end
end
@testset "with hard evidence" begin
runtime = Runtime(net1)
ensure_all!(runtime)
set_ranges!(runtime, Dict{Symbol, Score}(), 10)
inst1 = current_instance(runtime, dv1)
post_evidence!(runtime, inst1, HardScore(1))
(l, ids) = ve(runtime, ord1, [dv3]; bounds = false)
p31 = 0.1 * (0.2 * 0.7 + 0.3 * 0.5 + 0.5 * 0.3)
p32 = 0.1 * (0.2 * 0.3 + 0.3 * 0.5 + 0.5 * 0.7)
@test isapprox(l.entries[i31], p31, atol = 0.000001)
@test isapprox(l.entries[i32], p32, atol = 0.000001)
end
@testset "with soft evidence" begin
runtime = Runtime(net1)
ensure_all!(runtime)
set_ranges!(runtime, Dict{Symbol, Score}(), 10)
inst1 = current_instance(runtime, dv1)
post_evidence!(runtime, inst1, SoftScore([1,2], [3.0, 5.0]))
(l, ids) = ve(runtime, ord1, [dv3]; bounds = false)
p31 = 0.1 * 3.0 * (0.2 * 0.7 + 0.3 * 0.5 + 0.5 * 0.3) +
0.9 * 5.0 * (0.2 * 0.6 + 0.3 * 0.4 + 0.5 * 0.2)
p32 = 0.1 * 3.0 * (0.2 * 0.3 + 0.3 * 0.5 + 0.5 * 0.7) +
0.9 * 5.0 * (0.2 * 0.4 + 0.3 * 0.6 + 0.5 * 0.8)
@test isapprox(l.entries[i31], p31, atol = 0.000001)
@test isapprox(l.entries[i32], p32, atol = 0.000001)
end
@testset "with separable models" begin
sf1 = Cat([1,2], [0.1, 0.9])
z1 = sf1()(:z1)
sf2 = DiscreteCPT([1,2], Dict((1,) => [0.2, 0.8], (2,) => [0.3, 0.7]))
z2 = sf2()(:z2)
sf3 = DiscreteCPT([1,2], Dict((1,) => [0.4, 0.6], (2,) => [0.5, 0.5]))
z3 = sf3()(:z3)
cpt1 = Dict((1,) => [0.6, 0.4], (2,) => [0.7, 0.3])
cpt2 = Dict((1,) => [0.8, 0.2], (2,) => [0.9, 0.1])
cpts :: SepCPTs = [cpt1, cpt2]
sf4 = Separable([1,2], [0.75, 0.25], cpts)
z4 = sf4()(:z4)
net = InstantNetwork(Variable[z1,z2,z3,z4], VariableGraph(z2=>[z1],z3=>[z1],z4=>[z2,z3]))
ord = topsort(get_initial_graph(net))
aceg = 0.1 * 0.2 * 0.4 * (0.75 * 0.6 + 0.25 * 0.8)
aceh = 0.1 * 0.2 * 0.4 * (0.75 * 0.4 + 0.25 * 0.2)
acfg = 0.1 * 0.2 * 0.6 * (0.75 * 0.6 + 0.25 * 0.9)
acfh = 0.1 * 0.2 * 0.6 * (0.75 * 0.4 + 0.25 * 0.1)
adeg = 0.1 * 0.8 * 0.4 * (0.75 * 0.7 + 0.25 * 0.8)
adeh = 0.1 * 0.8 * 0.4 * (0.75 * 0.3 + 0.25 * 0.2)
adfg = 0.1 * 0.8 * 0.6 * (0.75 * 0.7 + 0.25 * 0.9)
adfh = 0.1 * 0.8 * 0.6 * (0.75 * 0.3 + 0.25 * 0.1)
bceg = 0.9 * 0.3 * 0.5 * (0.75 * 0.6 + 0.25 * 0.8)
bceh = 0.9 * 0.3 * 0.5 * (0.75 * 0.4 + 0.25 * 0.2)
bcfg = 0.9 * 0.3 * 0.5 * (0.75 * 0.6 + 0.25 * 0.9)
bcfh = 0.9 * 0.3 * 0.5 * (0.75 * 0.4 + 0.25 * 0.1)
bdeg = 0.9 * 0.7 * 0.5 * (0.75 * 0.7 + 0.25 * 0.8)
bdeh = 0.9 * 0.7 * 0.5 * (0.75 * 0.3 + 0.25 * 0.2)
bdfg = 0.9 * 0.7 * 0.5 * (0.75 * 0.7 + 0.25 * 0.9)
bdfh = 0.9 * 0.7 * 0.5 * (0.75 * 0.3 + 0.25 * 0.1)
@testset "without evidence" begin
run = Runtime(net)
ensure_all!(run)
set_ranges!(run, Dict{Symbol, Score}(), 10)
(l, ids) = ve(run, ord, [z4]; bounds = false)
g = aceg + acfg + adeg + adfg + bceg + bcfg + bdeg + bdfg
h = aceh + acfh + adeh + adfh + bceh + bcfh + bdeh + bdfh
es = normalize(l.entries)
r4 = get_range(run, current_instance(run, z4))
i1 = indexin(1, r4)[1]
i2 = indexin(2, r4)[1]
@test isapprox(es[i1], g)
@test isapprox(es[i2], h)
end
@testset "with evidence" begin
run = Runtime(net)
ensure_all!(run)
set_ranges!(run, Dict{Symbol, Score}(), 10)
inst4 = current_instance(run, z4)
post_evidence!(run, inst4, HardScore(2))
(l, ids) = ve(run, ord, [z1]; bounds = false)
ah = aceh + acfh + adeh + adfh
bh = bceh + bcfh + bdeh + bdfh
es = normalize(l.entries)
r1 = get_range(run, current_instance(run, z1))
i1 = indexin(1, r1)[1]
i2 = indexin(2, r1)[1]
@test isapprox(es[i1], ah / (ah + bh))
@test isapprox(es[i2], bh / (ah + bh))
end
end
end
@testset "Using the VE instant algorithm" begin
@testset "Basic" begin
v1 = Cat([:a,:b], [0.1, 0.9])()(:v1)
v2 = DiscreteCPT([1,2], Dict((:a,) => [0.2, 0.8], (:b,) => [0.3, 0.7]))()(:v2)
net = InstantNetwork(Variable[v1,v2], VariableGraph(v2 => [v1]))
runtime = Runtime(net)
default_initializer(runtime)
alg = VE([v1,v2])
infer(alg, runtime)
i1 = current_instance(runtime, v1)
i2 = current_instance(runtime, v2)
@test isapprox(probability(alg, runtime, i1, :a), 0.1)
@test isapprox(probability(alg, runtime, i1, :b), 0.9)
@test isapprox(probability(alg, runtime, i2, 1), 0.1 * 0.2 + 0.9 * 0.3)
@test isapprox(probability(alg, runtime, i2, 2), 0.1 * 0.8 + 0.9 * 0.7)
end
@testset "With placeholder" begin
p1 = Placeholder{Symbol}(:p1)
v2 = DiscreteCPT([1,2], Dict((:a,) => [0.2, 0.8], (:b,) => [0.3, 0.7]))()(:v2)
net = InstantNetwork(Variable[v2], VariableGraph(v2 => [p1]), Placeholder[p1])
runtime = Runtime(net)
default_initializer(runtime, 10, Dict(p1.name => Cat([:a,:b], [0.1, 0.9])))
alg = VE([v2])
infer(alg, runtime)
i2 = current_instance(runtime, v2)
@test isapprox(probability(alg, runtime, i2, 1), 0.1 * 0.2 + 0.9 * 0.3)
@test isapprox(probability(alg, runtime, i2, 2), 0.1 * 0.8 + 0.9 * 0.7)
end
@testset "With evidence" begin
v1 = Cat([:a,:b], [0.1, 0.9])()(:v1)
v2 = DiscreteCPT([1,2], Dict((:a,) => [0.2, 0.8], (:b,) => [0.3, 0.7]))()(:v2)
net = InstantNetwork(Variable[v1,v2], VariableGraph(v2 => [v1]))
runtime = Runtime(net)
default_initializer(runtime)
alg = VE([v1,v2])
infer(alg, runtime, Dict{Symbol, Score}(:v2 => HardScore(2)))
i1 = current_instance(runtime, v1)
i2 = current_instance(runtime, v2)
p1 = 0.1 * 0.8
p2 = 0.9 * 0.7
z = p1 + p2
@test isapprox(probability(alg, runtime, i1, :a), p1 / z)
@test isapprox(probability(alg, runtime, i1, :b), p2 / z)
@test isapprox(probability(alg, runtime, i2, 1), 0.0)
@test isapprox(probability(alg, runtime, i2, 2), 1.0)
end
end
end
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | docs | 3131 |
[![][docs-main-img]][docs-main-url][![][docs-dev-img]][docs-dev-url] [![][CI-img]][CI-url] [![][codecov-img]][codecov-url]
# Scruff.jl
Scruff is an AI framework to build agents that sense, reason, and learn in the world using a variety of models. It aims to integrate many different kinds of models in a coherent framework, provide flexibility in spatiotemporal modeling, and provide tools to compose, share, and reuse models and model components.
Scruff is provided as a [Julia](https://julialang.org/) package and is licensed under the BSD-3-Clause License.
> *Warning*: Scruff is rapidly evolving beta research software. Although the software already has a lot of functionality, we intend to expand on this in the future and cannot promise stability of the code or the APIs at the moment.
## Download and Installation
To download the package, from the Julia package manager, run
```julia-repl
(v1.7) pkg> add Scruff
```
## Scruff Tutorial and Examples
The Scruff tutorial can be found in the [tutorial](https://github.com/charles-river-analytics/Scruff.jl/tree/develop/docs/src/tutorial) section of the documentation.
Scruff examples can be found in the [examples/](docs/examples/) directory.
## Building the documentation
Scruff uses [Documenter.jl](https://juliadocs.github.io/Documenter.jl/stable/) to generate its documentation. To build, navigate to the `docs` folder and run
```julia
Scruff.jl\docs> julia --project=.. --color=yes make.jl
```
This will create a `docs/build` directory with an `index.html` file, which will contain the documentation.
## Running tests
To run the tests, activate the project as above and just run `test` from the `pkg` prompt. From the `julia` prompt, `include("test/runtests.jl")` can be used to run the tests.
## Development
Development against the Scruff codebase should _only_ be done by branching the `develop` branch.
### Scruff module layout
The Scruff packages are split into four (4) main modules: `Models`, `Algorithms`, `SFuncs`, and `Operators`.
- To add to the `Models` module, add a `.jl` file to the `src/models/` directory and `include` it in the `src/models.jl` file
- To add to the `Algorithms` module, add a `.jl` file to the `src/algorithms/` directory and `include` it in the `src/algorithms.jl` file
- To add to the `SFuncs` module, add a `.jl` file to the `src/sfuncs/` directory and `include` it in the `src/sfuncs.jl` file
- To add to the `Operators` module, add a `.jl` file to the `src/operators` directory and `include` it in the `src/operators.jl` file
[docs-main-img]: https://img.shields.io/badge/docs-main-blue.svg
[docs-main-url]: https://charles-river-analytics.github.io/Scruff.jl/stable
[docs-dev-img]: https://img.shields.io/badge/docs-dev-blue.svg
[docs-dev-url]: https://charles-river-analytics.github.io/Scruff.jl/dev
[CI-img]: https://github.com/p2t2/Scruff.jl/actions/workflows/ci.yml/badge.svg
[CI-url]: https://github.com/p2t2/Scruff.jl/actions/workflows/ci.yml
[codecov-img]: https://codecov.io/gh/p2t2/Scruff.jl/branch/main/graph/badge.svg
[codecov-url]: https://codecov.io/gh/p2t2/Scruff.jl
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | docs | 1119 | # MultiInterface.jl
Defining and selecting alternative parameterized implementations of an interface in Julia
```
@interface a(x::Int)::Int
@impl function a(x::Int)
struct A1 end
return 1
end
@impl function a(x::Int)
struct A2
time::AbstractFloat = 0.01
end
sleep(time)
return 2
end
struct Policy1 <: Policy end
get_imp(policy::Policy1, ::Type{A}, args...) = A1()
struct Policy2 <: Policy end
get_imp(policy::Policy2, ::Type{A}, args...) = A2()
with_policy(Policy1()) do
@test a(0)==1
end
with_policy(Policy2()) do
@test a(0)==2
end
```
See tests for more examples. See test/demo.jl for an in-line example of the macro expanded representations. This may not be exactly up-to-date.
## Debugging
Debugging implementations may be a bit tricky right now. Currently we don't have a `NotImplemented` fallthrough call for reasons similar to outlined here: https://www.oxinabox.net/2020/04/19/Julia-Antipatterns.html. This would also preclude certain sophisticated usage of `hasmethod` by complex Policies. `methods(f)` can help demonstrate issues with calling implementations.
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | docs | 1405 | # Scruff
Scruff is an AI framework to build agents that sense, reason, and learn in the world using a variety of models.
It aims to integrate many different kinds of models in a coherent framework, provide flexibility in spatiotemporal modeling, and provide tools to compose, share, and reuse models and model components.
Warning: Scruff is rapidly evolving beta research software. Although the software already has a lot of functionality, we intend to expand on this in the future and cannot promise stability of the code or the APIs at the moment.
## Installation
First, [download Julia 1.6.0 or later](https://julialang.org/downloads/).
Then, install the Scruff package with the Julia package manager. From the Julia REPL, type `]` to enter the Pkg REPL mode and then run:
```julia-repl
pkg> add Scruff
```
## Developing Scruff
To develop Scruff, first pull down the code
```bash
$ git clone https://github.com/p2t2/Scruff.git
```
## Learning about Scruff
Please read the [The Scruff Tutorial](@ref), which describes most of the language features through examples.
The library documentation contains detailed information about most of the data structures
and functions used in the code.
## Contributing to Scruff
We welcome contributions from the community. Please see the issues in Github for some of the improvements
we would like to make, and feel free to add your own suggestions. | Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | docs | 97 | # [Scruff.Algorithms](@id scruff_algorithms)
```@autodocs
Modules = [Scruff.Algorithms]
```
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | docs | 106 | # [Scruff](@id scruff_core)
```@meta
CurrentModule = Scruff
```
```@autodocs
Modules = [Scruff]
```
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | docs | 85 | # [Scruff.Models](@id scruff_models)
```@autodocs
Modules = [Scruff.Models]
```
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | docs | 94 | # [Scruff.Operators](@id scruff_operators)
```@autodocs
Modules = [Scruff.Operators]
```
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | docs | 88 | # [Scruff.RTUtils](@id scruff_rtutils)
```@autodocs
Modules = [Scruff.RTUtils]
```
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | docs | 85 | # [Scruff.SFuncs](@id scruff_sfuncs)
```@autodocs
Modules = [Scruff.SFuncs]
```
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | docs | 82 | # [Scruff.Utils](@id scruff_utils)
```@autodocs
Modules = [Scruff.Utils]
```
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | docs | 536 | # Examples
* [rembrandt_example.jl](https://github.com/p2t2/Scruff.jl/tree/main/docs/examples/rembrandt_example.jl)
* [novelty_example.jl](https://github.com/p2t2/Scruff.jl/tree/main/docs/examples/novelty_example.jl)
* [novelty_lazy.jl](https://github.com/p2t2/Scruff.jl/tree/main/docs/examples/novelty_lazy.jl)
* [novelty_filtering.jl](https://github.com/p2t2/Scruff.jl/tree/main/docs/examples/novelty_filtering.jl)
* [soccer_example.jl](https://github.com/charles-river-analytics/Scruff.jl/tree/main/docs/examples/soccer_example.jl)
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | docs | 50351 | # The Scruff Tutorial
## What Scruff is all about
Scruff is a flexible framework for building AI systems. Although its roots are in probabilistic programming, it is not strictly speaking a probabilistic programming language. Instead, it is a framework for combining models of different kinds and reasoning with them. Scruff provides three main features:
1. The ability to combine different kinds of models and reason with them using an algorithm in an integrated way. Scruff decomposes the representation of models from algorithms that work with them using operators. Any representation (the scruff word is sfunc (stochastic function, pronounced "essfunk")) that implements the operators can appear in algorithms. Using this approach enables us to generalize algorithms like belief propagation and importance sampling that have traditionally been applied to probabilistic models. A given sfunc does not have to support all operators and algorithms can use sfuncs in the appropriate way. For example, it is legal to have an sfunc that you can't sample from, which would not be possible in a typical probabilistic programming language.
2. A flexible framework for inference using these representations. Scruff distinguishes between the notion of a variable, which represents a value that can vary over time, and an instance of that variable, which represents its value at a particular time. In Scruff, variables are associated with models, which determine which sfunc to use for specific instances. There is no requirement that instances follow a regular time pattern; if the model supports it, instances can appear at any time interval. It is also possible to combine instances appearing at different time intervals, for example slowly changing and rapidly changing variables. Scruff also provides the ability to perform iterative inference, where beliefs about instances are refined through repeated computation.
3. Composition, reuse, and experimentation with different models, sfuncs, and algorithms. Scruff comes with an extensible and structured library of models, sfuncs, operators, and algorithms, making it easy to mix and match or extend with your own. For example, it is possible to implement alternative versions of an operators for an sfunc side by side and choose between them manually, or even automatically based on the characteristics of the specific instance. Another example is to compare accuracy and runtime between different time granularities on a variable by variable basis. Finally, as sfunc composition is highly structured, it is possible to experiment with specific sfunc choices in a systematic way.
The name Scruff derives from the old debates in AI between the neats and the scruffies. Neats believed that unless systems were developed in a coherent framework, it would be impossible to scale development of AI systems to complex real-world problems. Scruffies believed that real-world problems require a variety of techniques that must be combined as best as possible, and forcing everything into a neat framework would hinder progress.
We believe that both camps have an element of the truth, and Scruff is an attempt to provide the best of both worlds.
Scruff's philosophy is to allow a variety of representations and implementations to coexist side by side, and not every algorithm can be applied to every representation. However, they all coexist in a clean, well-defined and organized framework that enables scalable development of models and systems.
## Some opening examples
We start this tutorial with three examples illustrating idiomatic use of Scruff and some of its capabilities. These examples can be found in the `docs/examples` folder (they are also linked by the [Examples](@ref) page).
### Instant reasoning
Our first example, found in [`novelty_example.jl`](https://github.com/p2t2/Scruff.jl/tree/main/docs/examples/novelty_example.jl) is about detecting and characterizing novel behaviors. In this example, a behavior is simply something that generates a real number, but the example extends to more interesting kinds of behavior. The example shows how to create sfuncs, models, variables, and networks, and how to reason with them. We call this an instant reasoning example because there is no temporal element.
We begin with some imports:
```julia
using Scruff
using Scruff.Models
using Scruff.SFuncs
using Scruff.Algorithms
```
Since we're going to run experiments with different setups, we define a NoveltySetup data structure.
```julia
struct NoveltySetup
known_sfs::Vector{Dist{Float64}}
known_probs::Vector{Float64}
novelty_prob::Float64
novelty_prior_mean::Float64
novelty_prior_sd::Float64
novelty_sd::Float64
end
```
Here, `known_sfs` is a vector of known behaviors, each one represented by a sfunc. In particular, each behavior is a `Dist{Float64}`, meaning it is an unconditional distribution over `Float64`. `known_probs` is the probabilities of the known behaviors, assuming the behavior is not novel, while `novelty_prob` is the probability that the behavior is novel. A novel behavior has a mean and standard deviation. The mean is drawn from a normal distrbution with mean `novelty_prior_mean` and standard deviation `novelty_prior_sd`. The novel behavior's own standard deviation is given by `novelty_sd`.
We now define a function that takes a setup and returns a network. Since observations are also part of the network, this function also takes the number of observations as an argument.
```julia
function novelty_network(setup::NoveltySetup, numobs::Int)::InstantNetwork
```
This function begins by defining some variables. For the first variable, we'll go through the steps in detail. For the remaining variables, we'll use some syntactic sugar. The first variable represents the known behavior. Defining it takes three steps: creating the sfunc, defining the model, and associating it with a variable. Much of Scruff's power comes from separating these three concepts. However, for the common case where we want to do all three of these things together, we provide syntactic sugar.
First we create the sfunc:
```julia
known_sf = Cat(setup.known_sfs, setup.known_probs)
```
This defines `known_sf` to be a categorical distribution, where the choices are provided by `setup.known_sfs` and the probabilities are specified by `setup.known_probs`. The important idea is that this distribution is an entity of its own, irrespective of specific variables that are related using it. An sfunc is similar to the mathematical concept of a function. It describes a relationship between variables that is not necessarily determinisitic. In mathematics, we can define concepts like function composition, which operate on the functions directly and don't require the notion of variables. Similarly in Scruff, there are a variety of ways to compose and combine sfuncs. Furthermore, we can have sfuncs be values in models as well, which enables higher-order probabilistic programming. In fact, in this example, `known_sf` represents a categorical distribution over sfuncs.
After creating the sfunc, we create a model using the sfunc:
```julia
known_model = SimpleModel(known_sf)
```
A model describes which sfunc to generate for a variable in different situations. In general, the sfunc representing a variable's distribution can change depending on the situation, such as the time of instantiation of the variable and times of related instances. Here, we just have a `SimpleModel` that always returns the same sfunc, but later we will have more interesting models.
The third step is to create a variable and associate it with the model. This is achieved by calling the model with the variable name (a symbol) as argument:
```julia
known = known_model(:known)
```
We now have the Julia variable `known` whose value is a Scruff variable with the name `:known` associated with `known_model`. If you just want to create a variable with a `SimpleModel` for a specific sfunc, you can use syntactic sugar as follows:
```julia
known = Cat(setup.known_sfs, setup.known_probs)()(:known)
```
When we call the sfunc with zero arguments, we create a `SimpleModel` with the sfunc; then, when we apply that model to the variable name, we create the variable. In the rest of this example, we will use this form. Let's create some more variables:
```julia
is_novel = Flip(setup.novelty_prob)()(:is_novel)
novelty_mean = Normal(setup.novelty_prior_mean, setup.novelty_prior_sd)()(:novelty_mean)
novelty = Det(Tuple{Float64}, Dist{Float64}, m -> Normal(m[1], setup.novelty_sd))()(:novelty)
behavior = If{Dist{Float64}}()()(:behavior)
```
`is_novel` represents whether the behavior is novel or known. This variable will be true with probability `setup.novelty_prob`.
`novelty_mean` represents the mean of the novel behavior using a normal distribuiton whose mean and standard deviation are given by the setup.
`novelty` uses an sfunc called `Det`, which stands for "deterministic". It describes a determinstic relationship between one or more arguments and a result.
When you define a `Det`, you have to give the Julia compiler hints about the input and output types of the function. The input type of an sfunc in Scruff is always a tuple of arguments, so in this case it is a tuple of a single `Float64` argument. Our intent is for this input to represent the mean of the novel behavior; however, as we have discussed, sfuncs exist independently of the variables to which they will be applied. The connection to the novelty mean will be made later.
The output of the `Det` is an unconditional distribution of type `Dist{Float64}`. This is another example of an sfunc outputting an sfunc representing a behavior.
We now have two such sfuncs: `known` and `novelty`. We are ready to choose the actual behavior, using the `sf_choice` variable.
The sfunc for `sf_choice` is defined by `If{Dist{Float64}}()`. Unlike most probabilistic programming languages, which almost always provide an `if` control flow concept that choose between specific alternatives based on a test, Scruff's `If` describes the general process of choosing between two alternatives using a Boolean test. In this example, the intent is to choose between `novelty` and `known` based on `is_novel`. These connections will be made later.
Note that the type of value produced by the `If` is a type parameter, which in this case is again a `Dist{Float64}`, representing the actual behavior that gets chosen.
Now that we have these variables, we are ready to start building the connections described in the previous paragraph. We will build the ingredients to an `InstantNetwork`, which are a list of variables, and a `VariableGraph`, representing a dictionary from variables to their parents.
```julia
variables = [known, is_novel, novelty_mean, novelty, behavior]
graph = VariableGraph(novelty => [novelty_mean], behavior => [is_novel, novelty, known])
```
Finally, we need to add observations, which is done in a flexible way depending on the number of observations.
```julia
for i in 1:numobs
obs = Generate{Float64}()()(obsname(i))
push!(variables, obs)
graph[obs] = [behavior]
end
```
For each observation, we create a variable whose name is given by the utility function
`obsname(i)`. The sfunc is `Generate{Float64}`. `Generate{Float64}` is a second-order sfunc
that takes as input a `Dist{Float64}` and generates a `Float64` from it.
Thus, each observation is an independent sample from the behavior.
We add the observation to the `variables` vector and make its parents the `behavior` variable.
Finally, we create the instant network and return it.
```julia
return InstantNetwork(variables, graph)
```
Now that we've built the network, we're ready to run some experiments.
Here's the code to run an experiment. It takes as arguments the setup, the vector of
observations, and the `InstantAlgorithm` to use (an `InstantAlgorithm` is an algorithm
run on an `InstantNetwork`; it does not handle dynamics).
```julia
function do_experiment(setup::NoveltySetup, obs::Vector{Float64}, alg::InstantAlgorithm)
net = novelty_network(setup, length(obs))
evidence = Dict{Symbol, Score}()
for (i,x) in enumerate(obs)
evidence[obsname(i)] = HardScore(x)
end
runtime = Runtime(net)
infer(alg, runtime, evidence)
is_novel = get_node(net, :is_novel)
novelty_mean = get_node(net, :novelty_mean)
println("Probability of novel = ", probability(alg, runtime, is_novel, true))
println("Posterior mean of novel behavior = ", mean(alg, runtime, novelty_mean))
end
```
`do_experiment` first creates the network and then builds up the `evidence` data structure,
which is a dictionary from variable names to scores. In Scruff, a `Score` is an sfunc
with no outputs that specifies a number for each value of its input. A `HardScore` is a
score that assigns the value 1 to its argument and 0 to everything else.
The next step is to create a runtime using the network.
The runtime holds all the information needed by the inference algorithm to perform
its computations and answer queries.
We then call `infer`, which does the actual work.
Once `infer` completes, we can answer some queries.
To answer a query, we need handles to the variables we want to use,
which is done using the `get_node` method.
Finally, the `probability` and `mean` methods give us the answers we want.
The examples next defines some setups and an observation list.
```julia
function setup(generation_sd::Float64, prob_novel::Float64)::NoveltySetup
known = [Normal(0.0, generation_sd), Normal(generation_sd, generation_sd)]
return NoveltySetup(known, [0.75, 0.25], prob_novel, 0.0, 10.0, generation_sd)
end
setup1 = setup(1.0, 0.1)
setup2 = setup(4.0, 0.1)
obs = [5.0, 6.0, 7.0, 8.0, 9.0]
```
In `setup1`, behaviors have a smaller standard deviation, while in `setup2`,
the standard deviation is larger.
We would expect the posterior probability of `is_novel` to be higher for `setup1`
than `setup2` because it is harder to explain the observations with known behaviors
when they have a small standard deviation.
Finally, we run some experiments.
```julia
println("Importance sampling")
println("Narrow generation standard deviation")
do_experiment(setup1, obs, LW(1000))
println("Broad generation evidence")
do_experiment(setup2, obs, LW(1000))
println("\nBelief propagation")
println("Narrow generation standard deviation")
do_experiment(setup1, obs, ThreePassBP())
println("Broad generation evidence")
do_experiment(setup2, obs, ThreePassBP())
println("\nBelief propagation with larger ranges")
println("Narrow generation standard deviation")
do_experiment(setup1, obs, ThreePassBP(25))
println("Broad generation evidence")
do_experiment(setup2, obs, ThreePassBP(25))
```
`LW(1000)` creates a likelihood weighting algorithm
that uses 1000 particles, while `ThreePassBP()` creates a non-loopy belief propagation
algorithm. In this example, the network has no loops so using a non-loopy BP algorithm is
good. However, BP needs to discretize the continuous variables, which most of the
variables in this example are. With no arguments, it uses the default number of bins
(currently 10). `ThreePassBP(25)` creates a BP algorithm that uses 25 bins.
The first time you run this example, it might take a
while. Julia uses just in time (JIT) compilation, so the first run can involve a lot of
compilation overhead. But subsequent runs are very fast. When you run this example, it produces output like this:
julia> include("docs/examples/novelty_example.jl")
Importance sampling
Narrow generation standard deviation
Probability of novel = 1.0
Posterior mean of novel behavior = 7.334211013744095
Broad generation evidence
Probability of novel = 0.1988404327033635
Posterior mean of novel behavior = 0.631562661691411
Belief propagation
Narrow generation standard deviation
Probability of novel = 1.0
Posterior mean of novel behavior = 7.71606183538526
Broad generation evidence
Probability of novel = 0.2534439250343668
Posterior mean of novel behavior = 1.7131189737655137
Belief propagation with larger ranges
Narrow generation standard deviation
Probability of novel = 1.0
Posterior mean of novel behavior = 6.979068103646596
Broad generation evidence
Probability of novel = 0.2591460898562207
Posterior mean of novel behavior = 1.7363865329521413
We see that as expected, the probability of novel is much higher with narrow generation
standard deviation than with broad. All three algorithms have similar qualitative results.
Running the experiment a few times shows that the importance sampling method has relatively
high variance. We also see that the estimate of the posterior mean changes significantly
as we add more values to the ranges of variables for the BP method.
### Incremental reasoning
Building on the last point, our next example, found in
[`novelty_lazy.jl`](https://github.com/p2t2/Scruff.jl/tree/main/docs/examples/novelty_lazy.jl), uses Scruff's
incremental inference capabilities to gradually increase the range sizes of the
variables to improve the estimates. We're going to use an algorithm called Lazy Structured
Factored Inference (LSFI). LSFI repeatedly calls an `InstantAlgorithm` (in this case
variable elimination) on more and more refined versions of the network.
Refinement generally takes two forms: Expanding recursive networks to a greater depth,
and enlarging the ranges of continuous variables.
Our example only has the latter refinement.
When expanding recursive networks, LSFI can produce lower and upper bounds to query
answers at each iteration. This capability is less useful for range refinement,
but our code needs to handle the bounds.
The network and setups are just as in `novelty_example.jl`. The code for running an
experiment is similar in structure but has some new features.
```julia
function do_experiment(setup, obs)
net = novelty_network(setup, length(obs))
is_novel = get_node(net, :is_novel)
novelty_mean = get_node(net, :novelty_mean)
evidence = Dict{Symbol, Score}()
for (i,x) in enumerate(obs)
evidence[obsname(i)] = HardScore(x)
end
alg = LSFI([is_novel, novelty_mean]; start_size = 5, increment = 5)
runtime = Runtime(net)
prepare(alg, runtime, evidence)
for i = 1:10
println("Range size: ", alg.state.next_size)
refine(alg, runtime)
is_novel_lb = probability_bounds(alg, runtime, is_novel, [false, true])[1]
println("Probability of novel = ", is_novel_lb[2])
println("Posterior mean of novel behavior = ", mean(alg, runtime, novelty_mean))
end
end
```
As before, the code creates the network, gets handles of some variables, and fills
the evidence data structure.
In this case, we use `LSFI`. When creating an `LSFI` algorithm, we need to tell it which
variables we want to query, which are `is_novel` and `novelty_mean`. `LSFI` also has
some optional arguments. In this example, we configure it to have a starting range size
of 5 and increment the range size by 5 on each refinement.
Before running inference, we need to call `prepare(alg, runtime, evidence)`. Then we go
through ten steps of refinement. We can get the range size of the next refinement
using `alg.state.next_size` (we only use this for printing).
Refinement is done through a call to `refine(alg, runtime)`.
We then need to do a little more work than before to get the answers to queries because
of the probabilities bounds.
`probability_bounds(alg, runtime, is_novel, [false, true])` returns lower and upper bounds
as 2-element vectors of probabilities of `false` and `true`. As discussed earlier,
these bounds are not true bounds in the case of range refinement, so we just pick
the first one, and then pick the second value, corresponding to `true`, out of that vector.
The `mean` method already arbitrarily uses the lower bounds so we don't have to do any work there.
Running this example produces a result like:
Lazy Inference
Narrow generation standard deviation
Range size: 5
Probability of novel = 1.0
Posterior mean of novel behavior = 5.0
Range size: 10
Probability of novel = 1.0
Posterior mean of novel behavior = 5.000012747722526
Range size: 15
Probability of novel = 1.0
Posterior mean of novel behavior = 5.000012747722526
Range size: 20
Probability of novel = 1.0
Posterior mean of novel behavior = 5.000012747722526
Range size: 25
Probability of novel = 1.0
Posterior mean of novel behavior = 5.000012747722526
Range size: 30
Probability of novel = 1.0
Posterior mean of novel behavior = 5.000012747722526
Range size: 35
Probability of novel = 1.0
Posterior mean of novel behavior = 5.000012747722526
Range size: 40
Probability of novel = 1.0
Posterior mean of novel behavior = 5.000012747722526
Range size: 45
Probability of novel = 1.0
Posterior mean of novel behavior = 5.000012747722526
Range size: 50
Probability of novel = 1.0
Posterior mean of novel behavior = 5.000012747722526
Broad generation evidence
Range size: 5
Probability of novel = 0.23525574698998955
Posterior mean of novel behavior = 1.1750941076530532
Range size: 10
Probability of novel = 0.19825748797545847
Posterior mean of novel behavior = -0.11214142944113853
Range size: 15
Probability of novel = 0.19745646974840933
Posterior mean of novel behavior = -0.11168834527313051
Range size: 20
Probability of novel = 0.19283490006948978
Posterior mean of novel behavior = 0.3602757973718763
Range size: 25
Probability of novel = 0.1926826680899825
Posterior mean of novel behavior = 0.35995765581210176
Range size: 30
Probability of novel = 0.1825284089501074
Posterior mean of novel behavior = 1.1318032244818
Range size: 35
Probability of novel = 0.18251757269528399
Posterior mean of novel behavior = 1.1294239567980586
Range size: 40
Probability of novel = 0.18251757269528404
Posterior mean of novel behavior = 1.1294239567980597
Range size: 45
Probability of novel = 0.18251757269528404
Posterior mean of novel behavior = 1.1294239567980597
Range size: 50
Probability of novel = 0.18251757269528404
Posterior mean of novel behavior = 1.1294239567980597
Looking at this output, we see that the narrow generation standard deviation case is easy and the algorithm
quickly converges. However, in the broad generation standard deviation case, we see that there is a big
change in the posterior mean of novel behavior between range size 25 and 30. This is to
do with the way values in the range are generated. As the range size is increased,
values further and further away from the prior mean are created.
At range size 30, a value is introduced that has low prior but fits the data well,
which increases the posterior mean.
### Dynamic reasoning
Our final example [`novelty_filtering.jl`](https://github.com/p2t2/Scruff.jl/tree/main/docs/examples/novelty_filtering.jl) riffs on the novelty theme to use dynamic reasoning.
Now, observations are received over time at irregular intervals.
A behavior now represents the velocity of an object moving in one dimension,
starting at point 0.0.
This example moves away from the higher-order sfuncs but introduces some new kinds of
models.
The setup is similar but slightly different:
```julia
struct NoveltySetup
known_velocities::Vector{Float64}
known_probs::Vector{Float64}
novelty_prob::Float64
novelty_prior_mean::Float64
novelty_prior_sd::Float64
transition_sd::Float64
observation_sd::Float64
end
```
We have known velocities and their probabilities, the probability of novelty, and the mean and standard deviation of the novel velocity.
We also have the standard deviation of the transition and observation models.
Because the observations appear irregularly and not at fixed time steps, we are going to
use a `VariableTimeModel` to represent the position of the object.
To create a `VariableTimeModel`, we need to create a new type that inherits from
`VariableTimeModel` and implement the methods `make_initial`, which creates the sfunc
for the initial time step, and `make_transition`, which creates the sfunc at each time
step at which we instantiate the variable.
```julia
struct PositionModel <: VariableTimeModel{Tuple{}, Tuple{Float64, Float64}, Float64}
setup::NoveltySetup
end
function make_initial(::PositionModel, ::Float64)::Dist{Float64}
return Constant(0.0)
end
function make_transition(posmod::PositionModel, parenttimes::Tuple{Float64, Float64}, time::Float64)::SFunc{Tuple{Float64, Float64}, Float64}
function f(pair)
(prevval, velocity) = pair
Normal(prevval + t * velocity, t * posmod.setup.transition_sd)
end
t = time - parenttimes[1]
return Chain(Tuple{Float64, Float64}, Float64, f)
end
```
`make_initial` simply returns `Constant(0.0)`, meaning that the object always starts at
position 0.0 with no uncertainty.
Because the amount of time between instantiations
is variable, `make_transition` takes as argument a vector of times of the previous
instantiation of its parents, as well as the current time.
It uses these times to determine exactly what the transition model should be.
Here, it computes the time `t` between the current time and the previous instantiation
of the first parent, which we will later connect to the position variable.
So `t` represents the time since the last instantiation of the position variable.
`make_transition` uses the `Chain` sfunc, which takes parent values and applies
a Julia function to produce the sfunc used to generate the value of the `Chain`.
In this case, once we make the connections, the `Chain` will take the previous value of
the position and the velocity and create a `Normal` sfunc whose mean and standard deviation
depend on `t`, as well as the standard deviation of the transition model in the setup.
This Normal is then used to generate the current position.
This code is a little sophisticated, but the ability to create variable time models and perform
asynchronous dynamic reasoning is a powerful feature of Scruff.
The rest of the example is simpler and we won't go over it in full detail.
We do introduce the `StaticModel`, which represents a variable whose value is generated
at the beginning of a run and never changes.
`StaticModel` is implemented as a `VariableTimeModel` where the transition function is
the identify function.
Also, the `observation` variable uses a `SimpleModel`, because it is generated afresh
instantaneously every time it is instantiated. It is defined to be a normal whose mean
is the position and whose standard deviation is given by the setup. This is implemented
using the `LinearGaussian` sfunc.
A `DynamicNetwork` uses two variable graphs for the initial and transition steps. In this
example, all the logic of choosing the behavior happens in the initial graph, while the
position logic and its dependence on previous position and velocity is in the transition
graph. The transition graph also contains copy edges for the static variables.
```julia
variables = [known_velocity, is_novel, novel_velocity, velocity, position, observation]
initial_graph = VariableGraph(velocity => [is_novel, novel_velocity, known_velocity], observation => [position])
transition_graph = VariableGraph(known_velocity => [known_velocity], is_novel => [is_novel], novel_velocity => [novel_velocity], velocity => [velocity], position => [position, velocity], observation => [position])
```
We'll show the `do_experiment` implementation in detail because it illustrates how
asynchronous inference is performed.
```julia
function do_experiment(setup::NoveltySetup, obs::Vector{Tuple{Float64, Float64}}, alg::Filter)
net = novelty_network(setup, length(obs))
runtime = Runtime(net, 0.0) # Set the time type to Float64 and initial time to 0
init_filter(alg, runtime)
is_novel = get_node(net, :is_novel)
velocity = get_node(net, :velocity)
observation = get_node(net, :observation)
for (time, x) in obs
evidence = Dict{Symbol, Score}(:observation => HardScore(x))
println("Observing ", x, " at time ", time)
# At a minimum, we need to include query and evidence variables in the filter step
filter_step(alg, runtime, Variable[is_novel, velocity, observation], time, evidence)
println("Probability of novel = ", probability(alg, runtime, is_novel, true))
println("Posterior mean of velocity = ", mean(alg, runtime, velocity))
end
end
```
After creating the network, we create a runtime. The call to `Runtime` takes a second
argument that not only sets the initial time but also established the type used to
represent time, which is `Float64`. We first need to initialize the filter with `init_filter` which runs the
initial time step, and get handles
to the variables we care about. Our observation sequence is a vector (sorted by increasing
time) of (time, value) pairs.
For each such pair, we create the evidence at that time point.
Then we run a `filter_step`.
Besides the algorithm and runtime, the filter step takes a vector of variables to instantiate,
the current time, and the evidence.
There is no need to instantiate all the variables at every filter step.
At a minimum, we need to instantiate evidence variables as well as any variables
we want to query.
Since we're going to query `is_novel` and `velocity`, we'll have to instantiate those
using their copy transition model.
However, we never need to instantiate the `known_velocity` and `novel_velocity` variables
after the initial time step.
Finally, we can answer queries about the current state in a similar way to the other
examples.
For the experiments, we create a setup and two sequences of observations, the second of
which is harder to explain with known behaviors.
```julia
# Known velocities are 0 and 1, novelty has mean 0 and standard deviation 10
setup = NoveltySetup([0.0, 1.0], [0.7, 0.3], 0.1, 0.0, 10.0, 1.0, 1.0)
obs1 = [(1.0, 2.1), (3.0, 5.8), (3.5, 7.5)] # consistent with velocity 2
obs2 = [(1.0, 4.9), (3.0, 17.8), (3.5, 20.5)] # consistent with velocity 6
```
We then use `CoherentPF(1000)` as the filtering algorithm. Current filtering algorithms in
Scruff combine an instantiation method that creates a window with an underlying
`InstantAlgorithm` to infer with the window. Available window creation methods include
synchronous, asynchronous, and coherent. Coherent is similar to asynchronous except that
it adds variables to the instantiation to maintain coherence of parent-child relationships.
In this example, it ensures that the position variable is also instantiated, not just the query and
evidence variables. `CoherentPF(1000)` describes a particle filter that uses a coherent window
creator and an importance sampling algorithm with 1000 particles. The example also shows
how you can similarly create a coherent BP algorithm. However, BP does not work well in
models with static variables because dependencies between the static variables are lost
between filtering steps.
Running this example produces output like the following for the particle filter:
Particle filter
Smaller velocity
Observing 2.1 at time 1.0
Probability of novel = 0.0351642575352557
Posterior mean of velocity = 0.5411884423148781
Observing 5.8 at time 3.0
Probability of novel = 0.057222570825582145
Posterior mean of velocity = 0.8705507592898075
Observing 7.5 at time 3.5
Probability of novel = 0.08166159149240186
Posterior mean of velocity = 1.007810909419299
Larger velocity
Observing 4.9 at time 1.0
Probability of novel = 0.6741688102988623
Posterior mean of velocity = 3.6150131656907174
Observing 17.8 at time 3.0
Probability of novel = 1.0
Posterior mean of velocity = 5.898986723263269
Observing 20.5 at time 3.5
Probability of novel = 1.0
Posterior mean of velocity = 5.86994402484129
## Scruff concepts
The central concepts of Scruff are:
- Sfuncs, or stochastic functions, which represent mathematical relationships between variables
- Operators, which define and implement computations on sfuncs
- Models, which specify how to create sfuncs in different situations
- Variables, which represent domain entities that may take on different values at different times
- Networks, which consist of variables and the dependencies between them
- Instances, which represent a specific instantiation of a variable at a point in time
- Algorithms, which use operations to perform computations on networks
- Runtimes, which manage instances as well as information used by algorithms
## Sfuncs
An `SFunc` has an input type, which is a tuple, and an output type.
Although the name implies probabilistic relationships, in principle sfuncs
can be used to represent any kind of information.
The representation of an sfunc is often quite minimal, with most of the detail contained
in operators. The general type is `SFunc{I <: Tuple, O}`.
### Dists
A `Dist{O}` is an `SFunc{Tuple{}, O}`.
In other words, a `Dist` represents an unconditional distribution with no parents.
Examples of `Dist` include `Constant`, `Cat`, `Flip`, and `Normal`.
### Scores
A `Score{I}` is an `SFunc{Tuple{I}, Nothing}`. In other words, it takes a single value of
type `I`, and rather than produce an output, it just associates information (typically a likelihood)
with its input. A `Score` is often used to represent evidence.
Examples of `Score` include `HardScore` (only a single value allowed),
`SoftScore` (allows multiple values), `LogScore` (similar to `SoftScore` but represented
in log form), `FunctionalScore` (score is computed by applying a function to the input),
`NormalScore` (representing a normal distribution around a value),
and `Parzen` (mixture of normal scores).
### Conditional Sfuncs
Scruff provides a range of ways to construct sfuncs representing conditional distributions.
These are organized in a type hierarchy:
— `Invertible`: deterministic functions with a deterministic inverse, enabling efficient operator implementations\
— `Det`: deterministic functions without an inverse\
└ `Switch`: chooses between multiple incoming choices based on first argument\
└ `LinearSwitch`: first argument is an integer and switch chooses corresponding result\
└ `If`: first argument is a Boolean and switch chooses appropriate other argument \
— `Conditional`: abstract representation of sfuncs that use first arguments to create sfunc to apply to other arguments\
└ `LinearGaussian`: sfunc representing normal distribution whose mean is a linear function of the parents\
└ `Table`: abstract representation of sfuncs that use first arguments to choose sfunc to apply from a table\
└ `DiscreteCPT`: discrete conditional probability table\
└ `CLG`: conditional linear Gaussian model: table of linear Gaussians depending on discrete parents\
— `Separable`: Mixture of `DiscreteCPT` to decompose dependency on many parents, enabling efficient operator implementations\
### Compound Sfuncs
Compound sfuncs can be though of as a construction kit to compose more complex sfuncs out of
simpler ones. These also include some higher-order sfuncs.
- `Generate`: generate a value from its sfunc argument
- `Apply`: similar to generate, but the sfunc argument is applied to another argument
- `Chain`: apply a function to the arguments to produce an sfunc, then generate a value from the sfunc
- `Mixture`: choose which sfunc to use to generate values according to a probability distribution
- `Serial`: connect any number of sfuncs in series
- `NetworkSFunc`: connect any number of sfuncs according to a graph
- `Expander`: apply a function to the arguments to produce a network that can be used recursively
## Operators
An operator represents a computation that can be performed on an sfunc.
An operator is not just a function or a method.
It is an object that can contain information (such as configuration instructions)
and can be reasoned about, for example to specify policies to choose between alternative
implementations.
Operators consist of definitions, created using `@op_def`, which specify type information,
and implementation, created using `@impl`.
Here are some of the most commonly used operators:
- `cpdf(sf, parent_values, x)` returns the conditional probability of `x` given `parent_values`
- `logcpdf(sf, parent_values, x)`
- `sample(sf, parent_values)`
- `get_score(sf, x)` returns the score associated with `x`
- `get_log_score(sf, x)`
- `support(sf, parent_ranges, target_size, current)` computes a range of values for the sfunc given that the parents have values in `parent_ranges`. `target_size` is guidance as to the size of support to produce, which does not need to be obeyed precisely. `current` is a list of values that should be included in the support, which is useful for iterative refinement.
The above operators will be implemented specifically for a given sfunc. In general, an sfunc does not need to support all operators. For example, typically only a `Score` will support `get_score` and `get_log_score`.
Some sfuncs will not be able to support sampling or density computation, and that's okay.
For example, if an sfunc doesn't support `sample`, but it does support `cpdf`, and that sfunc is always observed,
it can be used in likelihood weighting.
If it is not always observed, it won't be usable in importance sampling but it might be usable in BP.
The goal is to enable representations to be used as much as possible, rather than require everything to work uniformly.
This is where the scruffiness of Scruff comes in.
There are a variety of operators useful in BP and related algorithms.
Most of these have default implementations that work for sfuncs in general and you don't
need to worry about implementing them for a new sfunc.
The two that need to be implemented specifically are:
- `compute_pi(sf, range, parent_ranges, parent_pi_messages)`, which integrates over the parents to produce a distribution over the value of the instance associated with the sfunc. The `parent_pi_messages`, as well as the computed distribution, are represented as `Dist`s, rather than vectors or anything specific, which enables great flexibility in implementation.
- `send_lambda(sf, lambda, range, parent_ranges, parent_pi_messages, parent_index)` computes the lambda message to be sent to the parent specified by `parent_index`.
Once these two operators are implemented for an sfunc, the sfunc can participate in any BP
algorithm. Furthermore, sfuncs at the leaves of a network do not need to implement `compute_pi`.
For example, `send_lambda` can be implemented for a feedforward neural network, enabling it
to be included in a general BP inference process.
## Models
One of Scruff's key features is the ability to reason flexibly about variables that vary over time, and, in future, space. This is accomplished using models, which specify how to make the sfunc to use for a particular instance of a variable. Currently, Scruff's `models` library is relatively small. We plan to expand it in future, for example with learning models that improve their sfuncs based on experience.
Here is the current type hierarchy of models
—`InstantModel`: for a variable with no dependencies on previous time points\
└ `TimelessInstantModel`: an `InstantModel` where the sfunc also does not depend on the current time\
└ `SimpleModel`: a `TimelessInstantModel` in which the sfunc to use is passed in the definition of the model\
— `FixedTimeModel`: a model for a variable that depends on its own state at the previous time point and other variables at the current time point. The delta between the current and previous time point must be a fixed `dt`.\
└ `TimelessFixedTimeModel`: a `FixedTimeModel` in which the sfunc does not depend on the current time\
└ `HomogenousModel`: a `TimelessFixedTimeModel` in which the initial and transition sfuncs are passed in the definition of the model\
— `VariableTimeModel`: a model for a variable whose transition sfunc depends on the time intervals since the instantiations of its parents (which may be at different times)\
└ `StaticModel`: a model for a variable whose value is set in the initial time point and never changes afterward\
## Networks
Networks contains nodes, which are either variables or placeholders. Unlike variables, placeholders are not associated with models. Rather, they are intended to indicate values that should be received from outside the network. They are particularly useful for recursive reasoning, as well as dynamic inference.
An `InstantNetwork` is created with two to four arguments:
- A vector of variables
- A variable graph, associating variables with their parents. If a variable has no parents, it can be omitted from the graph.
- (Optional) A vector of placeholders, which defaults to empty
- (Optional) A vector of outputs, which should be a subset of the variables, again defaults to empty. This is intended to support providing an interface to networks that enables internal nodes and embedded networks to be eliminated, but this feature is not used yet.
A `DynamicNetwork` is created with three to six arguments
- A vector of variables
- An initial variable graph
- A transition variable graph
- (Optional) A vector of initial placeholders, defaults to empty
- (Optional) A vector of transition placeholders, defaults to empty
- (Optional) A vector of outputs, defaults to empty
## Algorithms
Scruff's algorithms library is structured so that more complex algorithms can be built out of simpler algorithms. The basic algorithms are instances of `InstantAlgorithm` and run on an `InstantNetwork`.
Scruff currently provides the following hierarchy of instant algorithms. We intend to expand this list over time:
— `InstantAlgorithm`: abstract type for which implementations must implement the `infer` method\
└ `Importance`: general importance sampling framework\
└ `Rejection`: rejection sampling\
└ `LW`: likelihood weighting\
└ Custom proposal: An importance sampling algorithm can be made from a proposal scheme using `make_custom_proposal`. A proposal scheme specifies alternative sfuncs to use as alternatives to the prior distribution for specific sfuncs\
└ `BP`: general belief propagation framework\
└ `ThreePassBP`: non-loopy belief propagation\
└ `LoopyBP`\
└ `VE`: variable elimination
Scruff provides iterative algorithms that gradually improve their answers over time. These follow the following hierarchy:
— `IterativeAlgorithm`: abstract type for which implementations must implement the `prepare` and `refine` methods\
└ `IterativeSampler`: iterative algorithm that uses a sampler to increase the number of samples each refinement. For example, you can use `IterativeSampler(LW(1000))` to use a likelihood weighting algorithm that adds 1,000 more samples on each call to `refine`.\
└ `LazyInference`: an algorithm that expands the recursion depth and ranges of variables on each call to `refine` and then invokes an `InstantAlgorithm`\
└ `LSFI`: a `LazyInference` algorithm that uses variable elimination as its instant algorithm
For filtering, Scruff provides the general `Filter` class, for which implementations must implement the `init_filter` and `filter_step` methods. All current filter implementations in Scruff derive from `WindowFilter`, where, on each call to `filter_step`, the algorithm first creates an `InstantNetwork` representing a window and then invokes an `InstantAlgorithm`. To create a `WindowFilter`, you choose a windowing method from `SyncWindow`, `AsyncWindow`, and `CoherentWindow`, and specify the instant algorithm to use. For example, Scruff provides the following constructor for asynchronous BP:
AsyncBP(range_size = 10, T = Float64) = WindowFilter(AsyncWindow{T}(), ThreePassBP(range_size))
Once algorithms have been run, queries can be answered using a uniform interface. This includes methods like
`marginal`, `joint`, `probability` (which could be the probability of a specific value or the probability of a predicate), `expectation`, `mean`, and `variance`. As usual, not all algorithms need implement all queries.
When you implement a new algorithm, you can specify how to answer queries using a standard `answer` method. Take a look at algorithm implementations to see how this works.
## The runtime
Unless you are implementing a new algorithm, you can largely ignore details of the runtime after you have created it, as everything happens under the hood.
In general, the responsibilities of the runtime are to:
- Instantiate variables and associate them with the correct instance parents and sfunc
- Identify the appropriate instances of variables at different point in time
- Store and retrieve values associated with instances
- Store and retrieve algorithm state across multiple invocations (e.g., using `refine`)
- Manage passing of messages between variables
## Future work
Future work in Scruff will follow five main lines:
developing more extensive libraries, including integration of other frameworks;
developing a larger suite of algorithms using compositional methods;
developing a more flexible framework of networks and recursive models;
creating spatial and spatiotemporal models with the same flexibility as current temporal models;
and operators for performance characterization and optimization.
We welcome contributions from the user community.
If any of these items catches your interest, let us know and we will be happy to help
with design and development.
### Larger libraries and integration of other frameworks
Scruff's current library, particularly of SFuncs, is fairly minimal, and needs to be extended to provide a fully functional probabilistic programming framework.
Our intent is not to write sfuncs ourselves, but rather to wrap existing implementations wherever possible.
An immediate goal is to wrap `Distributions.jl`, while will provide a wide range of `Dist` sfuncs.
We also want to integrate with other probabilistic programming frameworks in Julia, such as Gen.
In addition, the ability to use data-driven models that don't support sampling but do support inference is central to Scruff.
We want to develop a library of such models, again by integrating with existing frameworks and wrapping with appropriate observations.
Algorithms also need to be modified to take advantage of such models.
### More algorithms
It is important that algorithms in Scruff are well-structured and compositional.
The algorithms developed so far are a starter set that have been carefully designed with this philosophy.
Noticable by its absence is MCMC, which is common in many probabilistic programming frameworks.
Gibbs sampling can be implemented as a message passing algorithm and fits well with the current framework.
Metropolis-Hastings and reversible jump algorithms will take more thought, but experience with other
probabilistic programming languages should show how to implement them in a consistent, compositional way.
A very natural next step is to generalize our algorithms to use other semirings besides aum-product. Again, this should happen in a compositional way.
It should be possible to say something like `with_semiring(semiring, algorithm)` and have all computations in operators invoked by the algorithm
drawn from the appropriate semiring. If we do this, it will be natural to write learning algorithms like EM and decision-making algorithms using maximum
expected utility using our instant algorithms. This will lead to powerful combinations. Would anyone like asynchronous online EM using BP?
Similarly, BP is just one example of a variational method. We want to expand BP into a more general compositional
variational inference library.
Finally, we want to generalize our elimination methods to employ conditioning as well as elimination.
### More flexible networks and recursion
The ability for networks to contain other networks is critical to structured, modular, representations
as well as efficient inference through encapsulation and conditional compilation.
In addition, the ability to generate contained networks stochastically supports open universe modeling.
Scruff currently supports these capabilities through Expanders.
However, Expanders were an early addition to Scruff and are not integrated all that well in the most recent Scruff development.
NetworkSFuncs are better integrated, but do not currently support containment and recursion.
We want to align Expanders and NetworkSFuncs to provide more general structured and recursive networks.
### Spatially flexible models
Scruff currently has a flexible representation of variables that vary over time, but not of variables that vary over space, or space and time together.
We want to provide spatiotemporal networks with the same flexibility as current DynamicNetworks.
Moving beyond spatial models, we also want to create a framework for reasoning about variables that vary across graphs, such as social networks.
### Performance Characterization and Optimization
Scruff's design is intended to enable reasoning about performance characteristics of operators and to support algorithms making decisions
about which operators to use. Multiple operator implementations can exist side by side for given sfuncs and algorithms can use policies
to decide which ones to use. This capability is currently only exercised in very rudimentary ways.
We want to take advantage of this capability to provide a wide set of performance characteristics and intelligent algorithms that use them. | Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"MIT"
] | 1.0.0 | e2b9dbaac6b8f44bc90afdc2c38eee5d2204d98f | code | 3013 | module VaxData
export AbstractVax, VaxInt, VaxFloat
abstract type AbstractVax <: Real end
abstract type VaxInt <: AbstractVax end
abstract type VaxFloat <: AbstractVax end
import Base: IEEEFloat, convert, read, exponent, significand_bits, significand_mask,
exponent_bits, exponent_mask, exponent_bias, floatmin, floatmax, typemin, typemax,
nextfloat, prevfloat, zero, one, uinttype
export VaxInt16, VaxInt32, VaxFloatF, VaxFloatD, VaxFloatG, @vaxf_str, @vaxd_str, @vaxg_str
include("constants.jl")
include("vaxints.jl")
include("vaxfloatf.jl")
include("vaxfloatd.jl")
include("vaxfloatg.jl")
include("promote.jl")
include("math.jl")
const VaxTypes = Union{VaxInt16,VaxInt32,VaxFloatF,VaxFloatD,VaxFloatG}
function convert(::Type{T}, b::BigFloat) where {T<:VaxFloat}
sig = abs(significand(b))
U = uinttype(T)
m = zero(uinttype(T))
mbits = 0
while !iszero(sig) && mbits <= significand_bits(T)
setbit = Bool(sig >= 1)
m = U(m | setbit) << 1
sig -= setbit
sig *= 2
mbits += 1
end
e = ((exponent(b) + exponent_bias(T) + 1) % uinttype(T)) << (15 - exponent_bits(T))
if e > exponent_mask(T)
# overflow
throw(InexactError(:convert, T, b))
end
0.5 ≤ sig < 1 && (m += one(m))
m >>>= -(significand_bits(T) - mbits) % Int
if iszero(e)
# underflow
return zero(T)
end
m = swap16bword(m)
m &= significand_mask(T)
return T(e | (U(signbit(b)) << 15) | m)
end
# dumb and probably not-at-all performant but works
function convert(::Type{BigFloat}, v::T; precision=significand_bits(T)+1) where {T<:VaxFloat}
iszero(v) && return BigFloat(0; precision)
m = swap16bword(v.x)
bstr = bitstring(m)
s = signbit(v) ? "-" : ""
local sig
setprecision(precision) do
sig = parse(BigFloat,
string(s, "0.1", @view(bstr[end-significand_bits(T)+1:end]));
base=2)
sig *= big"2."^exponent(v)
end
return sig
end
function read(s::IO, ::Type{T}) where {T<:VaxTypes}
return read!(s, Ref{T}(0))[]::T
end
export swap16bword
@inline function swap16bword(x::Union{UInt32,Int32})
part1 = x & typemax(UInt16)
part2 = (x >>> 16) & typemax(UInt16)
return (part1 << 16) | part2
end
@inline function swap16bword(x::Union{UInt64,Int64})
part1 = UInt64(swap16bword(UInt32(x & typemax(UInt32))))
part2 = UInt64(swap16bword(UInt32((x >>> 32) & typemax(UInt32))))
return (part1 << 32) | part2
end
function Base.show(io::IO, x::VaxFloat)
T = typeof(x)
letter = (T === VaxFloatF) ? 'f' :
(T === VaxFloatD) ? 'd' : 'g'
print(io, "vax", letter)
if get(io, :compact, false)
if T === VaxFloatF
show(io, replace(repr(convert(Float32, x); context=IOContext(io)), "f" => "e"))
else
show(io, repr(convert(Float64, x); context=IOContext(io)))
end
else
show(io, repr(convert(BigFloat, x)))
end
return nothing
end
end # module
| VaxData | https://github.com/halleysfifthinc/VaxData.jl.git |
|
[
"MIT"
] | 1.0.0 | e2b9dbaac6b8f44bc90afdc2c38eee5d2204d98f | code | 1885 | # Floating point data format invariants
const SIGN_BIT = 0x80000000
const SIGN_BIT_64 = UInt64(SIGN_BIT)
const bmask16 = 0xFFFF
const bmask32 = 0xFFFFFFFF
const UNO = one(UInt32)
const UNO64 = one(UInt64)
# VAX floating point data formats (see VAX Architecture Reference Manual)
const VAX_F_SIGN_BIT = SIGN_BIT
const VAX_F_EXPONENT_MASK = Int32(0x7F800000)
const VAX_F_EXPONENT_SIZE = UInt32(8)
const VAX_F_EXPONENT_BIAS = Int32(128)
const VAX_F_MANTISSA_MASK = UInt32(0x007FFFFF)
const VAX_F_MANTISSA_SIZE = UInt32(23)
const VAX_F_HIDDEN_BIT = UInt32( UNO << VAX_F_MANTISSA_SIZE )
const VAX_D_EXPONENT_MASK = Int64(VAX_F_EXPONENT_MASK)
const VAX_D_EXPONENT_SIZE = UInt64(VAX_F_EXPONENT_SIZE)
const VAX_D_EXPONENT_BIAS = Int64(VAX_F_EXPONENT_BIAS)
const VAX_D_MANTISSA_MASK = UInt64(VAX_F_MANTISSA_MASK)
const VAX_D_MANTISSA_SIZE = UInt64(VAX_F_MANTISSA_SIZE)
const VAX_D_HIDDEN_BIT = UInt64(VAX_F_HIDDEN_BIT)
const VAX_G_EXPONENT_MASK = Int64(0x7FF00000)
const VAX_G_EXPONENT_SIZE = UInt64(11)
const VAX_G_EXPONENT_BIAS = Int64(1024)
const VAX_G_MANTISSA_MASK = UInt64(0x000FFFFF)
const VAX_G_MANTISSA_SIZE = UInt64(20)
const VAX_G_HIDDEN_BIT = UInt64( UNO << VAX_G_MANTISSA_SIZE )
# IEEE floating point data formats (see Alpha Architecture Reference Manual)
const IEEE_S_SIGN_BIT = SIGN_BIT
const IEEE_S_EXPONENT_MASK = Int32(0x7F800000)
const IEEE_S_EXPONENT_SIZE = UInt32(8)
const IEEE_S_EXPONENT_BIAS = Int32(127)
const IEEE_S_MANTISSA_MASK = UInt32(0x007FFFFF)
const IEEE_S_MANTISSA_SIZE = UInt32(23)
const IEEE_S_HIDDEN_BIT = UInt32( UNO << IEEE_S_MANTISSA_SIZE )
const IEEE_T_EXPONENT_MASK = Int64(0x7FF00000)
const IEEE_T_EXPONENT_SIZE = UInt64(11)
const IEEE_T_EXPONENT_BIAS = Int64(1023)
const IEEE_T_MANTISSA_MASK = UInt64(0x000FFFFF)
const IEEE_T_MANTISSA_SIZE = UInt64(20)
const IEEE_T_HIDDEN_BIT = UInt64( 1 << IEEE_T_MANTISSA_SIZE )
| VaxData | https://github.com/halleysfifthinc/VaxData.jl.git |
|
[
"MIT"
] | 1.0.0 | e2b9dbaac6b8f44bc90afdc2c38eee5d2204d98f | code | 2826 | # Define common arithmetic operations (default for two of the same unknown number type is to no-op error)
# Promotion rules are such that the promotion will always be to a valid IEEE number type,
# even in the case of two identical AbstractVax types
for op in [:+, :-, :*, :/, :^, :<, :<=]
@eval(begin
Base.$op(x::T, y::T) where {T<:AbstractVax} = ($op)(promote(x,y)...)
end)
end
Base.signbit(x::VaxFloat) = !iszero(x.x & 0x8000)
Base.:-(x::T) where {T<:VaxFloat} = T(x.x ⊻ 0x8000)
function Base.:<(x::T,y::T) where {T<:VaxFloat}
if signbit(x) == signbit(y)
return (swap16bword(x.x) & (typemax(uinttype(T)) >> 1)) < (swap16bword(y.x) & (typemax(uinttype(T)) >> 1))
else
return signbit(y) < signbit(x)
end
end
function Base.:<=(x::T,y::T) where {T<:VaxFloat}
if signbit(x) == signbit(y)
return (swap16bword(x.x) & (typemax(uinttype(T)) >> 1)) <= (swap16bword(y.x) & (typemax(uinttype(T)) >> 1))
else
return signbit(y) < signbit(x)
end
end
function exponent(v::VaxFloat)
e = v.x & exponent_mask(typeof(v))
e >>= (15 - exponent_bits(typeof(v)))
return Int(e) - exponent_bias(typeof(v))
end
# copied and slightly modified from Base
function nextfloat(f::VaxFloat, d::Integer)
d == 0 && return f
f == typemax(f) && (d > 0) && return typemax(f)
f == typemin(f) && (d < 0) && return typemin(f)
F = typeof(f)
fumax = swap16bword(typemax(f).x)
U = typeof(fumax)
fi = signed(swap16bword(f.x))
fneg = fi < 0
fu = unsigned(fi & typemax(fi))
dneg = d < 0
da = Base.uabs(d)
if da > typemax(U)
fneg = dneg
fu = fumax
else
du = da % U
if fneg ⊻ dneg
if du > fu
fu = min(fumax, du - fu)
fneg = !fneg
else
fu = fu - du
end
else
if fumax - fu < du
fu = fumax
else
fu = fu + du
end
end
end
if fneg
fu |= one(U) << (sizeof(fu)*8 - 1)
end
# Jump past the VAX FP reserved operand (sign = 1, exp = 0, mant ≠ 0)
dz_hi = ~(swap16bword(exponent_mask(F)) % U)
dz_lo = dz_hi - swap16bword(significand_mask(F))
if dz_lo ≤ fu ≤ dz_hi
@debug "reserved op", dneg
return dneg ? nextfloat(F(U(0x00008000) | (0x1 << (15 - exponent_bits(F)))), d + 1) :
nextfloat(zero(F), d - 1)
elseif fu ≤ swap16bword(significand_mask(F))
@debug "dirty zero", dneg
return dneg ? nextfloat(zero(F), d + 1) :
nextfloat(floatmin(F), d - 1)
end
return F(swap16bword(fu))
end
nextfloat(f::VaxFloat) = nextfloat(f,1)
prevfloat(f::VaxFloat) = nextfloat(f,-1)
prevfloat(f::VaxFloat, d::Integer) = nextfloat(f, -d)
| VaxData | https://github.com/halleysfifthinc/VaxData.jl.git |
|
[
"MIT"
] | 1.0.0 | e2b9dbaac6b8f44bc90afdc2c38eee5d2204d98f | code | 2666 | function Base.promote(x::T, y::T) where T <: AbstractVax
Base.@_inline_meta
px, py = Base._promote(x, y)
Base.not_sametype((x,y), (px,py))
px, py
end
function Base.promote(x::T, y::T, z::T) where T <: AbstractVax
Base.@_inline_meta
px, py, pz = Base._promote(x, y, z)
Base.not_sametype((x,y,z), (px,py,pz))
px, py, pz
end
function Base.promote(x::T, y::T, z::T, a::T...) where T <: AbstractVax
p = Base._promote(x, y, z, a...)
Base.not_sametype((x, y, z, a...), p)
p
end
Base.promote_rule(::Type{VaxInt16}, ::Type{T}) where T <: Union{Int8,Int16} = Int16
Base.promote_rule(::Type{VaxInt16}, ::Type{T}) where T <: Union{Int32,Int64,Int128} = T
Base.promote_rule(::Type{VaxInt16}, ::Type{T}) where T <: IEEEFloat = T
Base.promote_rule(::Type{VaxInt32}, ::Type{T}) where T <: Union{Int8,Int16,Int32,VaxInt16} = Int32
Base.promote_rule(::Type{VaxInt32}, ::Type{T}) where T <: Union{Int64,Int128,IEEEFloat} = T
Base.promote_rule(::Type{VaxInt32}, ::Type{T}) where T <: IEEEFloat = T
Base.promote_rule(::Type{VaxFloatF}, ::Type{T}) where T <: Union{Int8,Int16,Int32,Int64,Int128} = Float32
Base.promote_rule(::Type{VaxFloatF}, ::Type{T}) where T <: Union{Float16,Float32} = Float32
Base.promote_rule(::Type{VaxFloatF}, ::Type{T}) where T <: VaxInt = Float32
Base.promote_rule(::Type{VaxFloatF}, ::Type{Float64}) = Float64
Base.promote_rule(::Type{VaxFloatD}, ::Type{T}) where T <: Union{Int8,Int16,Int32,Int64,Int128} = Float64
Base.promote_rule(::Type{VaxFloatD}, ::Type{T}) where T <: Union{VaxFloatF,VaxFloatG} = Float64
Base.promote_rule(::Type{VaxFloatD}, ::Type{T}) where T <: VaxInt = Float64
Base.promote_rule(::Type{VaxFloatD}, ::Type{T}) where T <: IEEEFloat = Float64
Base.promote_rule(::Type{VaxFloatG}, ::Type{T}) where T <: Union{Int8,Int16,Int32,Int64,Int128} = Float64
Base.promote_rule(::Type{VaxFloatG}, ::Type{T}) where T <: Union{VaxFloatF,VaxFloatG} = Float64
Base.promote_rule(::Type{VaxFloatG}, ::Type{T}) where T <: VaxInt = Float64
Base.promote_rule(::Type{VaxFloatG}, ::Type{T}) where T <: IEEEFloat = Float64
Base.promote_rule(::Type{BigFloat}, ::Type{<:VaxFloat}) = BigFloat
Base.promote_rule(::Type{BigFloat}, ::Type{<:VaxInt}) = BigFloat
Base.promote_rule(::Type{BigInt}, ::Type{<:VaxFloat}) = BigFloat
Base.promote_rule(::Type{BigInt}, ::Type{<:VaxInt}) = BigFloat
Base.promote_type(::Type{VaxInt16}, ::Type{VaxInt16}) = Int16
Base.promote_type(::Type{VaxInt32}, ::Type{VaxInt32}) = Int32
Base.promote_type(::Type{VaxFloatF}, ::Type{VaxFloatF}) = Float32
Base.promote_type(::Type{VaxFloatD}, ::Type{VaxFloatD}) = Float64
Base.promote_type(::Type{VaxFloatG}, ::Type{VaxFloatG}) = Float64
| VaxData | https://github.com/halleysfifthinc/VaxData.jl.git |
|
[
"MIT"
] | 1.0.0 | e2b9dbaac6b8f44bc90afdc2c38eee5d2204d98f | code | 4346 | struct VaxFloatD <: VaxFloat
x::UInt64
VaxFloatD(x::Union{UInt32,UInt64}) = new(UInt64(ltoh(x)))
end
function VaxFloatD(x::T) where {T<:Real}
y = reinterpret(UInt64, convert(Float64, x))
part1 = y & bmask32
part2 = (y >>> 32) & bmask32
if ENDIAN_BOM === 0x04030201
vaxpart2 = part1
ieeepart1 = part2
else
vaxpart2 = part2
ieeepart1 = part1
end
e = reinterpret(Int64, ieeepart1 & IEEE_T_EXPONENT_MASK)
if ieeepart1 & ~SIGN_BIT_64 === zero(UInt64)
# ±0.0 becomes 0.0
return zero(VaxFloatD)
elseif e === IEEE_T_EXPONENT_MASK
# Vax types don't support ±Inf or NaN
throw(InexactError(:VaxFloatD, VaxFloatD, x))
else
e >>>= IEEE_T_MANTISSA_SIZE
m = ieeepart1 & IEEE_T_MANTISSA_MASK
if e === zero(Int64)
m = (m << 1) | (vaxpart2 >>> 31)
vaxpart2 <<= 1
while m & IEEE_T_HIDDEN_BIT === zero(UInt64)
m = (m << 1) | (vaxpart2 >>> 31)
vaxpart2 <<= 1
e -= one(Int64)
end
m &= IEEE_T_MANTISSA_MASK
end
e += one(Int64) + VAX_D_EXPONENT_BIAS - IEEE_T_EXPONENT_BIAS
if e <= zero(Int64)
# Silent underflow
return zero(VaxFloatD)
elseif e > (2 * VAX_D_EXPONENT_BIAS - 1)
# Overflow
throw(InexactError(:VaxFloatD, VaxFloatD, x))
else
vaxpart = (ieeepart1 & SIGN_BIT_64) |
(e << VAX_D_MANTISSA_SIZE) |
(m << (VAX_D_MANTISSA_SIZE - IEEE_T_MANTISSA_SIZE)) |
(vaxpart2 >>> (32 - (VAX_D_MANTISSA_SIZE - IEEE_T_MANTISSA_SIZE)))
vaxpart2 <<= (VAX_D_MANTISSA_SIZE - IEEE_T_MANTISSA_SIZE)
end
end
vaxpart_1 = vaxpart & bmask16
vaxpart_2 = (vaxpart >>> 16) & bmask16
vaxpart_3 = vaxpart2 & bmask16
vaxpart_4 = (vaxpart2 >>> 16) & bmask16
res = htol((vaxpart_3 << 48) |
(vaxpart_4 << 32) |
(vaxpart_1 << 16) |
vaxpart_2)
return VaxFloatD(res)
end
function convert(::Type{Float64}, x::VaxFloatD)
y = ltoh(x.x)
vaxpart_1 = y & bmask16
vaxpart_2 = (y >>> 16) & bmask16
vaxpart1 = (vaxpart_1 << 16) | vaxpart_2
vaxpart_3 = (y >>> 32) & bmask16
vaxpart_4 = (y >>> 48) & bmask16
vaxpart2 = (vaxpart_3 << 16) | vaxpart_4
if vaxpart1 & VAX_D_EXPONENT_MASK === zero(UInt64)
if vaxpart1 & SIGN_BIT_64 === SIGN_BIT_64
# Reserved floating-point reserved operand
throw(InexactError(:convert, Float64, x))
end
# Dirty zero
return zero(Float64)
else
ieeepart1 = ((vaxpart1 & SIGN_BIT_64) |
((vaxpart1 & ~SIGN_BIT_64) >>>
(VAX_D_MANTISSA_SIZE - IEEE_T_MANTISSA_SIZE))) -
((UNO64 + VAX_D_EXPONENT_BIAS - IEEE_T_EXPONENT_BIAS) << IEEE_T_MANTISSA_SIZE)
ieeepart2 = (vaxpart1 << (32 - (VAX_D_MANTISSA_SIZE - IEEE_T_MANTISSA_SIZE))) |
(vaxpart2 >>> (VAX_D_MANTISSA_SIZE - IEEE_T_MANTISSA_SIZE))
if ENDIAN_BOM === 0x04030201
out1 = ieeepart2
out2 = ieeepart1
else
out1 = ieeepart1
out2 = ieeepart2
end
end
res = (out2 << 32) | (out1 & bmask32)
return reinterpret(Float64, res)
end
function convert(::Type{T}, x::VaxFloatD) where {T<:Union{Float16,Float32,Integer}}
return convert(T, convert(Float64, x))
end
macro vaxd_str(str)
T = VaxFloatD
return convert(T, BigFloat(str; precision=significand_bits(T)+1))
end
floatmax(::Type{VaxFloatD}) = VaxFloatD(0xffffffffffff7fff)
floatmin(::Type{VaxFloatD}) = VaxFloatD(0x0000000000000080)
typemax(::Type{VaxFloatD}) = VaxFloatD(0xffffffffffff7fff)
typemin(::Type{VaxFloatD}) = VaxFloatD(typemax(UInt64))
zero(::Type{VaxFloatD}) = VaxFloatD(0x0000000000000000)
one(::Type{VaxFloatD}) = VaxFloatD(0x0000000000004080)
uinttype(::Type{VaxFloatD}) = UInt64
exponent_bits(::Type{VaxFloatD}) = VAX_D_EXPONENT_SIZE
exponent_mask(::Type{VaxFloatD}) = UInt64(0x00007f80)
exponent_bias(::Type{VaxFloatD}) = VAX_D_EXPONENT_BIAS
significand_bits(::Type{VaxFloatD}) = 64 - 1 - VAX_D_EXPONENT_SIZE
significand_mask(::Type{VaxFloatD}) = 0xffffffffffff007f
| VaxData | https://github.com/halleysfifthinc/VaxData.jl.git |
|
[
"MIT"
] | 1.0.0 | e2b9dbaac6b8f44bc90afdc2c38eee5d2204d98f | code | 3240 | struct VaxFloatF <: VaxFloat
x::UInt32
VaxFloatF(x::UInt32) = new(ltoh(x))
end
function VaxFloatF(x::T) where {T<:Real}
ieeepart1 = reinterpret(UInt32, convert(Float32, x))
e = reinterpret(Int32, ieeepart1 & IEEE_S_EXPONENT_MASK)
if ieeepart1 & ~SIGN_BIT === zero(UInt32)
# ±0.0 becomes 0.0
return zero(VaxFloatF)
elseif e === IEEE_S_EXPONENT_MASK
# Vax types don't support ±Inf or NaN
throw(InexactError(:VaxFloatF, VaxFloatF, x))
else
e >>>= VAX_F_MANTISSA_SIZE
m = ieeepart1 & VAX_F_MANTISSA_MASK
if e === zero(Int32)
m <<= 1
while m & VAX_F_HIDDEN_BIT === zero(UInt32)
m <<= UNO
e -= one(Int32)
end
m &= VAX_F_MANTISSA_MASK
end
e += one(Int32) + VAX_F_EXPONENT_BIAS - IEEE_S_EXPONENT_BIAS
if e <= 0
# Silent underflow
return zero(VaxFloatF)
elseif e > (2 * VAX_F_EXPONENT_BIAS - 1)
# Overflow
throw(InexactError(:VaxFloatF, VaxFloatF, x))
else
vaxpart = (ieeepart1 & SIGN_BIT) | (e << VAX_F_MANTISSA_SIZE) | m
end
end
vaxpart = htol(vaxpart)
vaxpart1 = vaxpart & bmask16
vaxpart2 = (vaxpart >>> 16) & bmask16
vaxpart1 = (vaxpart1 << 16) | vaxpart2
return VaxFloatF(vaxpart1)
end
function convert(::Type{Float32}, x::VaxFloatF)
y = x.x
vaxpart1 = y & bmask16
vaxpart2 = (y >>> 16) & bmask16
vaxpart1 = (vaxpart1 << 16) | vaxpart2
e = reinterpret(Int32, vaxpart1 & VAX_F_EXPONENT_MASK)
if e === zero(Int32)
if (vaxpart1 & SIGN_BIT) === SIGN_BIT
# Reserved floating-point reserved operand
throw(InexactError(:convert, Float32, x))
end
# Dirty zero
return zero(Float32)
else
e >>>= VAX_F_MANTISSA_SIZE
e -= one(Int32) + VAX_F_EXPONENT_BIAS - IEEE_S_EXPONENT_BIAS
if e > zero(Int32)
out = vaxpart1 -
((UNO + VAX_F_EXPONENT_BIAS - IEEE_S_EXPONENT_BIAS) <<
IEEE_S_MANTISSA_SIZE)
else
# out will be a subnormal
out = (vaxpart1 & SIGN_BIT) |
((VAX_F_HIDDEN_BIT | (vaxpart1 & VAX_F_MANTISSA_MASK)) >>> (UNO - e))
end
end
return reinterpret(Float32, out)
end
function convert(::Type{T}, x::VaxFloatF) where {T<:Union{Float16,Float64,Integer}}
return convert(T, convert(Float32, x))
end
macro vaxf_str(str)
T = VaxFloatF
return convert(T, BigFloat(str; precision=significand_bits(T)+1))
end
floatmax(::Type{VaxFloatF}) = VaxFloatF(0xffff7fff)
floatmin(::Type{VaxFloatF}) = VaxFloatF(0x00000080)
typemax(::Type{VaxFloatF}) = VaxFloatF(0xffff7fff)
typemin(::Type{VaxFloatF}) = VaxFloatF(typemax(UInt32))
zero(::Type{VaxFloatF}) = VaxFloatF(0x00000000)
one(::Type{VaxFloatF}) = VaxFloatF(0x00004080)
uinttype(::Type{VaxFloatF}) = UInt32
exponent_bits(::Type{VaxFloatF}) = VAX_F_EXPONENT_SIZE
exponent_mask(::Type{VaxFloatF}) = 0x00007f80
exponent_bias(::Type{VaxFloatF}) = VAX_F_EXPONENT_BIAS
significand_bits(::Type{VaxFloatF}) = VAX_F_MANTISSA_SIZE
significand_mask(::Type{VaxFloatF}) = 0xffff007f
| VaxData | https://github.com/halleysfifthinc/VaxData.jl.git |
|
[
"MIT"
] | 1.0.0 | e2b9dbaac6b8f44bc90afdc2c38eee5d2204d98f | code | 4284 | struct VaxFloatG <: VaxFloat
x::UInt64
VaxFloatG(x::Union{UInt32,UInt64}) = new(UInt64(ltoh(x)))
end
function VaxFloatG(x::T) where {T<:Real}
y = reinterpret(UInt64, convert(Float64, x))
part1 = y & bmask32
part2 = (y >>> 32) & bmask32
if ENDIAN_BOM === 0x04030201
vaxpart2 = part1
ieeepart1 = part2
else
vaxpart2 = part2
ieeepart1 = part1
end
e = reinterpret(Int64, ieeepart1 & IEEE_T_EXPONENT_MASK)
if ieeepart1 & ~SIGN_BIT_64 === zero(UInt64)
# ±0.0 becomes 0.0
return zero(VaxFloatG)
elseif e === IEEE_T_EXPONENT_MASK
# Vax types don't support ±Inf or NaN
throw(InexactError(:VaxFloatG, VaxFloatG, x))
else
e >>>= VAX_G_MANTISSA_SIZE
m = ieeepart1 & VAX_G_MANTISSA_MASK
if e === zero(Int64)
m = (m << 1) | (vaxpart2 >>> 31)
vaxpart2 <<= 1
while m & VAX_G_HIDDEN_BIT === zero(UInt64)
m = (m << 1) | (vaxpart2 >>> 31)
vaxpart2 <<= 1
e -= one(Int64)
end
m &= VAX_G_MANTISSA_MASK
end
e += one(Int64) + VAX_G_EXPONENT_BIAS - IEEE_T_EXPONENT_BIAS
if e <= zero(Int64)
# Silent underflow
return zero(VaxFloatG)
elseif e > (2 * VAX_G_EXPONENT_BIAS - 1)
# Overflow
throw(InexactError(:VaxFloatG, VaxFloatG, x))
else
vaxpart = (ieeepart1 & SIGN_BIT_64) | (e << VAX_G_MANTISSA_SIZE) | m
end
end
vaxpart_1 = vaxpart & bmask16
vaxpart_2 = (vaxpart >>> 16) & bmask16
vaxpart_3 = vaxpart2 & bmask16
vaxpart_4 = (vaxpart2 >>> 16) & bmask16
res = htol((vaxpart_3 << 48) |
(vaxpart_4 << 32) |
(vaxpart_1 << 16) |
vaxpart_2)
return VaxFloatG(res)
end
function convert(::Type{Float64}, x::VaxFloatG)
y = ltoh(x.x)
vaxpart_1 = y & bmask16
vaxpart_2 = (y >>> 16) & bmask16
vaxpart1 = (vaxpart_1 << 16) | vaxpart_2
vaxpart_3 = (y >>> 32) & bmask16
vaxpart_4 = (y >>> 48) & bmask16
vaxpart2 = (vaxpart_3 << 16) | vaxpart_4
e = reinterpret(Int64, vaxpart1 & VAX_G_EXPONENT_MASK)
if e === zero(Int64)
if vaxpart1 & SIGN_BIT_64 === SIGN_BIT_64
# Reserved floating-point reserved operand
throw(InexactError(:convert, Float64, x))
end
# Dirty zero
return zero(Float64)
else
e >>>= VAX_G_MANTISSA_SIZE
e -= one(Int64) + VAX_G_EXPONENT_BIAS - IEEE_T_EXPONENT_BIAS
if e > zero(Int64)
ieeepart1 = vaxpart1 - ((UNO64 + VAX_G_EXPONENT_BIAS - IEEE_T_EXPONENT_BIAS) << IEEE_T_MANTISSA_SIZE)
ieeepart2 = vaxpart2
else
# Subnormal result
vaxpart1 = (vaxpart1 & (SIGN_BIT_64 | VAX_G_MANTISSA_MASK)) | VAX_G_HIDDEN_BIT
ieeepart1 = (vaxpart1 & SIGN_BIT_64) | ((vaxpart1 & (VAX_G_HIDDEN_BIT | VAX_G_MANTISSA_MASK)) >>> (1 - e))
ieeepart2 = (vaxpart1 << (31 + e)) | (vaxpart2 >>> (1 - e))
end
if ENDIAN_BOM === 0x04030201
out1 = ieeepart2
out2 = ieeepart1
else
out1 = ieeepart1
out2 = ieeepart2
end
end
res = (out2 << 32) | out1
return reinterpret(Float64, res)
end
function convert(::Type{T}, x::VaxFloatG) where {T<:Union{Float16,Float32,Integer}}
return convert(T, convert(Float64, x))
end
macro vaxg_str(str)
T = VaxFloatG
return convert(T, BigFloat(str; precision=significand_bits(T)+1))
end
floatmax(::Type{VaxFloatG}) = VaxFloatG(0xffffffffffff7fff)
floatmin(::Type{VaxFloatG}) = VaxFloatG(0x0000000000000010)
typemax(::Type{VaxFloatG}) = VaxFloatG(0xffffffffffff7fff)
typemin(::Type{VaxFloatG}) = VaxFloatG(typemax(UInt64))
zero(::Type{VaxFloatG}) = VaxFloatG(0x0000000000000000)
one(::Type{VaxFloatG}) = VaxFloatG(0x0000000000004010)
uinttype(::Type{VaxFloatG}) = UInt64
exponent_bits(::Type{VaxFloatG}) = VAX_G_EXPONENT_SIZE
exponent_mask(::Type{VaxFloatG}) = UInt64(0x00007ff0)
exponent_bias(::Type{VaxFloatG}) = VAX_G_EXPONENT_BIAS
significand_bits(::Type{VaxFloatG}) = 64 - 1 - VAX_G_EXPONENT_SIZE
significand_mask(::Type{VaxFloatG}) = 0xffffffffffff000f
| VaxData | https://github.com/halleysfifthinc/VaxData.jl.git |
|
[
"MIT"
] | 1.0.0 | e2b9dbaac6b8f44bc90afdc2c38eee5d2204d98f | code | 697 | struct VaxInt16 <: VaxInt
x::UInt16
VaxInt16(x::UInt16) = new(htol(x))
end
VaxInt16(x::Signed) = VaxInt16(trunc(Int16,x) % UInt16)
Base.convert(::Type{Int16}, x::VaxInt16) = ltoh(x.x) % Int16
function Base.convert(::Type{T}, x::VaxInt16) where T <: Union{Int32,Int64,Int128,BigInt,AbstractFloat}
return convert(T, convert(Int16, x))
end
struct VaxInt32 <: VaxInt
x::UInt32
VaxInt32(x::UInt32) = new(htol(x))
end
VaxInt32(x::Signed) = VaxInt32(trunc(Int32,x) % UInt32)
Base.convert(::Type{Int32}, x::VaxInt32) = ltoh(x.x) % Int32
function Base.convert(::Type{T}, x::VaxInt32) where T <: Union{Int16,Int64,Int128,AbstractFloat}
return convert(T,convert(Int32,x))
end
| VaxData | https://github.com/halleysfifthinc/VaxData.jl.git |
|
[
"MIT"
] | 1.0.0 | e2b9dbaac6b8f44bc90afdc2c38eee5d2204d98f | code | 577 | using VaxData, Test, InteractiveUtils
@testset "General" begin
# Overflowing conversion
@test_throws InexactError convert(VaxFloatF, big"1.7e39")
@test sprint(show, vaxf"1.0") == "vaxf\"1.0\""
@test -one(VaxFloatF) < one(VaxFloatF)
@test one(VaxFloatF) < nextfloat(one(VaxFloatF))
@test -one(VaxFloatF) <= one(VaxFloatF)
@test one(VaxFloatF) <= nextfloat(one(VaxFloatF))
@test prevfloat(one(VaxFloatF), -5) === nextfloat(one(VaxFloatF), 5)
end
include("vaxints.jl")
include("vaxfloatf.jl")
include("vaxfloatd.jl")
include("vaxfloatg.jl")
| VaxData | https://github.com/halleysfifthinc/VaxData.jl.git |
|
[
"MIT"
] | 1.0.0 | e2b9dbaac6b8f44bc90afdc2c38eee5d2204d98f | code | 4827 | @testset "Vax Float D" begin
d8_vax = [ 0x0000000000004080,
0x000000000000c080,
0x0000000000004160,
0x000000000000c160,
0x68c0a2210fda4149,
0x68c0a2210fdac149,
0x48d81abbbdc27df0,
0x48d81abbbdc2fdf0,
0x5c7814541cea0308,
0x5c7814541cea8308,
0xcee814620652409e,
0xcee814620652c09e]
d8_ieee = Array{Float64}([ one(Float64),
-one(Float64),
3.5,
-3.5,
Float64(pi),
-Float64(pi),
1.0e37,
-1.0e37,
9.9999999999999999999999999e-38,
-9.9999999999999999999999999e-38,
1.2345678901234500000000000000,
-1.2345678901234500000000000000 ])
@testset "Basic operators" begin
@test signbit(zero(VaxFloatD)) == false
@test signbit(one(VaxFloatD)) == false
@test signbit(-one(VaxFloatD)) == true
@test signbit(-(-one(VaxFloatD))) == false
@test zero(VaxFloatD) < one(VaxFloatD)
@test !(one(VaxFloatD) < one(VaxFloatD))
@test !(one(VaxFloatD) < zero(VaxFloatD))
@test one(VaxFloatD) <= one(VaxFloatD)
@test nextfloat(typemax(VaxFloatD)) == typemax(VaxFloatD)
@test prevfloat(typemin(VaxFloatD)) == typemin(VaxFloatD)
@test -prevfloat(-one(VaxFloatD)) == nextfloat(one(VaxFloatD))
@test nextfloat(zero(VaxFloatD)) == floatmin(VaxFloatD)
@test prevfloat(floatmin(VaxFloatD)) == zero(VaxFloatD)
@test prevfloat(zero(VaxFloatD)) == -floatmin(VaxFloatD)
@test nextfloat(-floatmin(VaxFloatD)) == zero(VaxFloatD)
end
@testset "Conversion..." begin
for (vax, ieee) in zip(d8_vax, d8_ieee)
@test VaxFloatD(vax) == VaxFloatD(ieee)
@test convert(Float64, VaxFloatD(vax)) == ieee
end
@test convert(VaxFloatD, big"1.0") == one(VaxFloatD)
@test convert(VaxFloatD, big"-1.0") == -one(VaxFloatD)
bigpi = BigFloat(π; precision=Base.significand_bits(VaxFloatD)+1)
bige = BigFloat(ℯ; precision=Base.significand_bits(VaxFloatD)+1)
@test convert(BigFloat, convert(VaxFloatD, bigpi)) == bigpi
@test convert(BigFloat, convert(VaxFloatD, bige)) == bige
end
@testset "Promotion..." begin
for t in [subtypes(VaxInt); subtypes(VaxFloat); Int8; Int16; Int32; Int64; Int128; Float16; Float32; Float64]
@test isa(one(t)*VaxFloatD(1), Float64)
end
@test isa(one(BigInt)*VaxFloatD(1), BigFloat)
@test isa(one(BigFloat)*VaxFloatD(1), BigFloat)
un = one(VaxFloatD)
@test promote(un, un, un) == (1.0, 1.0, 1.0)
@test promote(un, un, un, un) == (1.0, 1.0, 1.0, 1.0)
end
@testset "Number definitions" begin
@test floatmax(VaxFloatD) == typemax(VaxFloatD)
@test -typemax(VaxFloatD) == typemin(VaxFloatD)
@test zero(VaxFloatD) == 0
@test one(VaxFloatD) == 1
end
@testset "Edge cases" begin
# Reserved Vax floating point operand
@test_throws InexactError convert(Float64, VaxFloatD(UInt64(0x8000)))
# Inf and NaN should error too
@test_throws InexactError VaxFloatD(Inf64)
@test_throws InexactError VaxFloatD(-Inf64)
@test_throws InexactError VaxFloatD(NaN64)
# Both IEEE zeros should be converted to Vax true zero
@test VaxFloatD(-0.0) === VaxFloatD(0.0) === zero(VaxFloatD)
# Dirty zero
@test convert(Float64, VaxFloatD(UInt64(0x08))) === zero(Float64)
# Numbers smaller than floatmin(VaxFloatD) should underflow
@test VaxFloatD(prevfloat(convert(Float64, floatmin(VaxFloatD)))) === zero(VaxFloatD)
@test VaxFloatD(convert(Float64, floatmin(VaxFloatD))) === floatmin(VaxFloatD)
# Subnormals become zero
@test VaxFloatD(prevfloat(floatmin(Float64))) == zero(VaxFloatD)
# Numbers larger than floatmax(VaxFloatD) should error
@test_throws InexactError VaxFloatD(nextfloat(convert(Float64, floatmax(VaxFloatD))))
# Because the D Float as more precision, the conversion to Float64 and back to D Float will not be circular
# @test VaxFloatD(convert(Float64, floatmax(VaxFloatD))) === floatmax(VaxFloatD)
end
@testset "IO" begin
io = IOBuffer(reinterpret(UInt8, ones(VaxFloatD, 4)))
@test read(io, VaxFloatD) === one(VaxFloatD)
@test read!(io, Vector{VaxFloatD}(undef, 3)) == ones(VaxFloatD, 3)
end
end
| VaxData | https://github.com/halleysfifthinc/VaxData.jl.git |
|
[
"MIT"
] | 1.0.0 | e2b9dbaac6b8f44bc90afdc2c38eee5d2204d98f | code | 4474 | @testset "Vax Float F" begin
f4_vax = [ 0x00004080,
0x0000C080,
0x00004160,
0x0000C160,
0x0FD04149,
0x0FD0C149,
0xBDC27DF0,
0xBDC2FDF0,
0x1CEA0308,
0x1CEA8308,
0x0652409E,
0x0652C09E ]
f4_ieee = Array{Float32}([ 1.000000,
-1.000000,
3.500000,
-3.500000,
3.141590,
-3.141590,
9.9999999E+36,
-9.9999999E+36,
9.9999999E-38,
-9.9999999E-38,
1.23456789,
-1.23456789 ])
@testset "Basic operators" begin
@test signbit(zero(VaxFloatF)) == false
@test signbit(one(VaxFloatF)) == false
@test signbit(-one(VaxFloatF)) == true
@test signbit(-(-one(VaxFloatF))) == false
@test zero(VaxFloatF) < one(VaxFloatF)
@test !(one(VaxFloatF) < one(VaxFloatF))
@test !(one(VaxFloatF) < zero(VaxFloatF))
@test one(VaxFloatF) <= one(VaxFloatF)
@test nextfloat(typemax(VaxFloatF)) == typemax(VaxFloatF)
@test prevfloat(typemin(VaxFloatF)) == typemin(VaxFloatF)
@test -prevfloat(-one(VaxFloatF)) == nextfloat(one(VaxFloatF))
@test nextfloat(zero(VaxFloatF)) == floatmin(VaxFloatF)
@test prevfloat(floatmin(VaxFloatF)) == zero(VaxFloatF)
@test prevfloat(zero(VaxFloatF)) == -floatmin(VaxFloatF)
@test nextfloat(-floatmin(VaxFloatF)) == zero(VaxFloatF)
end
@testset "Conversion..." begin
for (vax, ieee) in zip(f4_vax, f4_ieee)
@test VaxFloatF(vax) == VaxFloatF(ieee)
@test convert(Float32, VaxFloatF(vax)) == ieee
end
@test convert(VaxFloatF, big"1.0") == one(VaxFloatF)
@test convert(VaxFloatF, big"-1.0") == -one(VaxFloatF)
bigpi = BigFloat(π; precision=Base.significand_bits(VaxFloatF)+1)
bige = BigFloat(ℯ; precision=Base.significand_bits(VaxFloatF)+1)
@test convert(BigFloat, convert(VaxFloatF, bigpi)) == bigpi
@test convert(BigFloat, convert(VaxFloatF, bige)) == bige
end
@testset "Promotion..." begin
for t in [subtypes(VaxInt); Int8; Int16; Int32; Int64; Int128; Float16; Float32; VaxFloatF]
@test isa(one(t)*VaxFloatF(1), Float32)
end
@test isa(one(Float64)*VaxFloatF(1), Float64)
@test isa(one(BigInt)*VaxFloatF(1), BigFloat)
@test isa(one(BigFloat)*VaxFloatF(1), BigFloat)
un = one(VaxFloatF)
@test promote(un, un, un) == (1.0, 1.0, 1.0)
@test promote(un, un, un, un) == (1.0, 1.0, 1.0, 1.0)
end
@testset "Number definitions" begin
@test floatmax(VaxFloatF) == typemax(VaxFloatF)
@test -typemax(VaxFloatF) == typemin(VaxFloatF)
@test zero(VaxFloatF) == 0
@test one(VaxFloatF) == 1
end
@testset "Edge cases" begin
# Reserved Vax floating point operand
@test_throws InexactError convert(Float32, VaxFloatF(UInt32(0x8000)))
# Inf and NaN should error too
@test_throws InexactError VaxFloatF(Inf32)
@test_throws InexactError VaxFloatF(-Inf32)
@test_throws InexactError VaxFloatF(NaN32)
# Both IEEE zeros should be converted to Vax true zero
@test VaxFloatF(-0.0f0) === VaxFloatF(0.0f0) === zero(VaxFloatF)
# Dirty zero
@test convert(Float32, VaxFloatF(UInt32(0x40))) === zero(Float32)
# Numbers smaller than floatmin(VaxFloatF) should underflow
@test VaxFloatF(prevfloat(convert(Float32, floatmin(VaxFloatF)))) === zero(VaxFloatF)
@test VaxFloatF(convert(Float32, floatmin(VaxFloatF))) === floatmin(VaxFloatF)
# Numbers larger than floatmax(VaxFloatF) should error
@test_throws InexactError VaxFloatF(nextfloat(convert(Float32, floatmax(VaxFloatF))))
@test VaxFloatF(convert(Float32, floatmax(VaxFloatF))) === floatmax(VaxFloatF)
end
@testset "IO" begin
io = IOBuffer(reinterpret(UInt8, ones(VaxFloatF, 4)))
@test read(io, VaxFloatF) === one(VaxFloatF)
@test read!(io, Vector{VaxFloatF}(undef, 3)) == ones(VaxFloatF, 3)
end
end
| VaxData | https://github.com/halleysfifthinc/VaxData.jl.git |
|
[
"MIT"
] | 1.0.0 | e2b9dbaac6b8f44bc90afdc2c38eee5d2204d98f | code | 512 | function isvalid_bitpattern(::Type{T}, x::UInt32) where {T<:VaxFloat}
(~Base.exponent_mask(T) ⊻ (x | Base.exponent_mask(T))) === typemax(x)
end
inexacts = [ UInt32[] for _ in 1:Threads.nthreads() ]
Threads.@threads for i in typemin(UInt32):typemax(UInt32)
!isvalid_bitpattern(VaxFloatF, i) && continue
if convert(VaxFloatF, convert(BigFloat, VaxFloatF(i))) !== VaxFloatF(i)
push!(inexacts[Threads.threadid()], i)
end
end
allinexact = reduce(vcat, inexacts)
@test isempty(allinexact)
| VaxData | https://github.com/halleysfifthinc/VaxData.jl.git |
|
[
"MIT"
] | 1.0.0 | e2b9dbaac6b8f44bc90afdc2c38eee5d2204d98f | code | 4609 | @testset "Vax Float G" begin
g8_vax = [ 0x0000000000004010,
0x000000000000C010,
0x000000000000402C,
0x000000000000C02C,
0x2D18544421FB4029,
0x2D18544421FBC029,
0x691B435717B847BE,
0x691B435717B8C7BE,
0x8B8F428A039D3861,
0x8B8F428A039DB861,
0x59DD428CC0CA4013,
0x59DD428CC0CAC013 ]
g8_ieee = Array{Float64}([ one(Float64),
-one(Float64),
3.5,
-3.5,
Float64(pi),
-Float64(pi),
1.0e37,
-1.0e37,
9.9999999999999999999999999e-38,
-9.9999999999999999999999999e-38,
1.2345678901234500000000000000,
-1.2345678901234500000000000000 ])
@testset "Basic operators" begin
@test signbit(zero(VaxFloatG)) == false
@test signbit(one(VaxFloatG)) == false
@test signbit(-one(VaxFloatG)) == true
@test signbit(-(-one(VaxFloatG))) == false
@test zero(VaxFloatG) < one(VaxFloatG)
@test !(one(VaxFloatG) < one(VaxFloatG))
@test !(one(VaxFloatG) < zero(VaxFloatG))
@test one(VaxFloatG) <= one(VaxFloatG)
@test nextfloat(typemax(VaxFloatG)) == typemax(VaxFloatG)
@test prevfloat(typemin(VaxFloatG)) == typemin(VaxFloatG)
@test -prevfloat(-one(VaxFloatG)) == nextfloat(one(VaxFloatG))
@test nextfloat(zero(VaxFloatG)) == floatmin(VaxFloatG)
@test prevfloat(floatmin(VaxFloatG)) == zero(VaxFloatG)
@test prevfloat(zero(VaxFloatG)) == -floatmin(VaxFloatG)
@test nextfloat(-floatmin(VaxFloatG)) == zero(VaxFloatG)
end
@testset "Conversion..." begin
for (vax, ieee) in zip(g8_vax, g8_ieee)
@test VaxFloatG(vax) == VaxFloatG(ieee)
@test convert(Float64, VaxFloatG(vax)) == ieee
end
@test convert(VaxFloatG, big"1.0") == one(VaxFloatG)
@test convert(VaxFloatG, big"-1.0") == -one(VaxFloatG)
bigpi = BigFloat(π; precision=Base.significand_bits(VaxFloatG)+1)
bige = BigFloat(ℯ; precision=Base.significand_bits(VaxFloatG)+1)
@test convert(BigFloat, convert(VaxFloatG, bigpi)) == bigpi
@test convert(BigFloat, convert(VaxFloatG, bige)) == bige
end
@testset "Promotion..." begin
for t in [subtypes(VaxInt); subtypes(VaxFloat); Int8; Int16; Int32; Int64; Int128; Float16; Float32; Float64]
@test isa(one(t)*VaxFloatG(1), Float64)
end
@test isa(one(BigInt)*VaxFloatG(1), BigFloat)
@test isa(one(BigFloat)*VaxFloatG(1), BigFloat)
un = one(VaxFloatG)
@test promote(un, un, un) == (1.0, 1.0, 1.0)
@test promote(un, un, un, un) == (1.0, 1.0, 1.0, 1.0)
end
@testset "Number definitions" begin
@test floatmax(VaxFloatG) == typemax(VaxFloatG)
@test -typemax(VaxFloatG) == typemin(VaxFloatG)
@test zero(VaxFloatG) == 0
@test one(VaxFloatG) == 1
end
@testset "Edge cases" begin
# Reserved Vax floating point operand
@test_throws InexactError convert(Float64, VaxFloatG(UInt64(0x8000)))
# Inf and NaN should error too
@test_throws InexactError VaxFloatG(Inf64)
@test_throws InexactError VaxFloatG(-Inf64)
@test_throws InexactError VaxFloatG(NaN64)
# Both IEEE zeros should be converted to Vax true zero
@test VaxFloatG(-0.0) === VaxFloatG(0.0) === VaxFloatG(zero(UInt64))
# Dirty zero
@test convert(Float64, VaxFloatG(UInt64(0x08))) === zero(Float64)
# Numbers smaller than floatmin(VaxFloatG) should underflow
@test VaxFloatG(prevfloat(convert(Float64, floatmin(VaxFloatG)))) === zero(VaxFloatG)
@test VaxFloatG(convert(Float64, floatmin(VaxFloatG))) === floatmin(VaxFloatG)
# Numbers larger than floatmax(VaxFloatG) should error
@test_throws InexactError VaxFloatG(nextfloat(convert(Float64, floatmax(VaxFloatG))))
@test VaxFloatG(convert(Float64, floatmax(VaxFloatG))) === floatmax(VaxFloatG)
end
@testset "IO" begin
io = IOBuffer(reinterpret(UInt8, ones(VaxFloatG, 4)))
@test read(io, VaxFloatG) === one(VaxFloatG)
@test read!(io, Vector{VaxFloatG}(undef, 3)) == ones(VaxFloatG, 3)
end
end
| VaxData | https://github.com/halleysfifthinc/VaxData.jl.git |
|
[
"MIT"
] | 1.0.0 | e2b9dbaac6b8f44bc90afdc2c38eee5d2204d98f | code | 2008 | @testset "Vax Ints" begin
i2_vax = [ 0x0001, 0xFFFF, 0x0100, 0xFF00, 0x3039, 0xCFC7 ]
i2_ieee = Array{Int16}([ 1, -1, 256, -256, 12345, -12345 ])
i4_vax = [ 0x00000001,
0xFFFFFFFF,
0x00000100,
0xFFFFFF00,
0x00010000,
0xFFFF0000,
0x01000000,
0xFF000000,
0x075BCD15,
0xF8A432EB ]
i4_ieee = Array{Int32}([ 1,
-1,
256,
-256,
65536,
-65536,
16777216,
-16777216,
123456789,
-123456789 ])
@testset "VaxInt16" begin
@testset "Conversion..." begin
for (vax, ieee) in zip(i2_vax, i2_ieee)
@test VaxInt16(vax) == VaxInt16(ieee)
@test convert(Int16, VaxInt16(vax)) == ieee
end
end
@testset "Promotion..." begin
@test isa(one(Int8)*VaxInt16(1), Int16)
@test isa(VaxInt16(1)*VaxInt16(1), Int16)
for t in [Float16, Float32, Float64, Int16, Int32, Int64, Int128]
@test isa(one(t)*VaxInt16(1), t)
end
end
end
@testset "VaxInt32" begin
@testset "Conversion..." begin
for (vax, ieee) in zip(i4_vax, i4_ieee)
@test VaxInt32(vax) == VaxInt32(ieee)
@test convert(Int32, VaxInt32(vax)) == ieee
end
end
@testset "Promotion..." begin
for t in [subtypes(VaxInt); Int8; Int16]
@test isa(one(t)*VaxInt32(1), Int32)
end
for t in [Float16, Float32, Float64, Int32, Int64, Int128]
@test isa(one(t)*VaxInt32(1), t)
end
end
end
end
| VaxData | https://github.com/halleysfifthinc/VaxData.jl.git |
|
[
"MIT"
] | 1.0.0 | e2b9dbaac6b8f44bc90afdc2c38eee5d2204d98f | docs | 1835 | # VaxData
[](https://juliahub.com/ui/Packages/VaxData/T8cvD)
[](https://juliahub.com/ui/Packages/VaxData/T8cvD)
[](https://github.com/halleysfifthinc/VaxData.jl/actions/workflows/CI.yml)
[](http://codecov.io/github/halleysfifthinc/VaxData.jl?branch=master)
[](https://www.repostatus.org/#active)
VaxData.jl is a direct port to Julia from [libvaxdata](https://pubs.usgs.gov/of/2005/1424/) [^1]. See [this report](https://pubs.usgs.gov/of/2005/1424/of2005-1424_v1.2.pdf) for an in-depth review of the underlying structure and differences between VAX data types and IEEE types.
There are 5 Vax datatypes implemented by this package: `VaxInt16`, `VaxInt32`, `VaxFloatF`,
`VaxFloatG`, and `VaxFloatD`.
# Examples
```julia
julia> one(VaxFloatF)
vaxf"1.0"
julia> -one(VaxFloatF)
vaxf"-1.0"
julia> vaxg"3.14159265358979323846264338327950"
vaxg"3.1415926535897931"
julia> vaxd"3.14159265358979323846264338327950"
vaxd"3.14159265358979323"
```
Conversion to and from each type is defined; Vax types are promoted to the next appropriately sized type supporting math operations:
```julia
promote_type(VaxFloatF, Float32)
Float32
promote_type(VaxFloatF, VaxFloatF)
Float32
promote_type(VaxFloatF, Float64)
Float64
```
[^1]: Baker, L.M., 2005, libvaxdata: VAX Data Format Conversion Routines: U.S. Geological Survey Open-File Report 2005-1424 (http://pubs.usgs.gov/of/2005/1424/).
| VaxData | https://github.com/halleysfifthinc/VaxData.jl.git |
|
[
"MIT"
] | 0.1.0 | d47f3b5aab46977da02368aab00a2ec2b27e2dbe | code | 1919 | import Documenter
import Literate
import Pasteee
ENV["JULIA_DEBUG"] = "Documenter,Literate,Pasteee"
#=
We place Literate.jl source .jl files and the generated .md files inside docs/src/literate.
=#
const literate_dir = joinpath(@__DIR__, "src/literate")
#=
Helper function to remove all "*.md" files from a directory.
=#
function clear_md_files(dir::String)
for (root, dirs, files) in walkdir(dir)
for file in files
if endswith(file, ".md")
rm(joinpath(root, file))
end
end
end
end
#=
Remove previously Literate.jl generated files. This removes all "*.md" files inside
`literate_dir`. This is a precaution: if we build docs locally and something fails,
and then change the name of a source file (".jl"), we will be left with a lingering
".md" file which will be included in the current docs build. The following line makes
sure this doesn't happen.
=#
clear_md_files(literate_dir)
#=
Run Literate.jl on the .jl source files within docs/src/literate (recursively).
For each .jl file, this creates a markdown .md file at the same location as
and with the same name as the corresponding .jl file, but with the extension
changed (.jl -> .md).
=#
for (root, dirs, files) in walkdir(literate_dir)
for file in files
if endswith(file, ".jl")
Literate.markdown(joinpath(root, file), root; documenter=true)
end
end
end
#=
Build docs.
=#
Documenter.makedocs(
modules = [Pasteee],
sitename = "Pasteee.jl",
pages = [
"Home" => "index.md",
"Examples" => "literate/examples.md",
"Reference" => "reference.md"
],
strict = true
)
#=
After the docs have been compiled, we can remove the *.md files generated by Literate.
=#
clear_md_files(literate_dir)
#=
Deploy docs to Github pages.
=#
Documenter.deploydocs(
repo = "github.com/cossio/Pasteee.jl.git",
devbranch = "master"
)
| Pasteee | https://github.com/cossio/Pasteee.jl.git |
|
[
"MIT"
] | 0.1.0 | d47f3b5aab46977da02368aab00a2ec2b27e2dbe | code | 929 | #=
# Examples
=#
import Pasteee
#=
To access the Paste.ee API, you need an Application Key.
You can create yours at the following link:
<https://paste.ee/account/api>
(after logging in to [Paste.ee](https://paste.ee/))
In that page, first create an Application,
then click ACTIONS -> AUTHORIZATION PAGE, and then SUBMIT.
You will see the Application Key.
=#
const appkey = ENV["PASTEEE_APPKEY"]; nothing #hide
#=
In the following examples I assume that you have assigned your
Application Key to the `appkey` variable.
=#
# Create a paste that expires in one hour.
id = Pasteee.paste(appkey, "Hola mundo"; expiration="3600")
# The paste can be retrieved using the returned `id`.
paste = Pasteee.get(appkey, id)
# Paste.ee pastes are organized in sections.
# Here we retrieve the contents of section number 1 of the paste we just created.
paste["sections"][1]["contents"]
# Delete the paste.
Pasteee.delete(appkey, id)
| Pasteee | https://github.com/cossio/Pasteee.jl.git |
|
[
"MIT"
] | 0.1.0 | d47f3b5aab46977da02368aab00a2ec2b27e2dbe | code | 3017 | module Pasteee
import HTTP
import JSON
"""
Section(contents; name = "", syntax = "")
Creates a paste `Section` with `contents`.
The `syntax` argument determines syntax highlight.
"""
struct Section
name::String
syntax::String
contents::String
Section(contents::String; name::String = "", syntax::String = "") = new(name, syntax, contents)
end
dict(sec::Section) = Dict("name" => sec.name, "syntax" => sec.syntax, "contents" => sec.contents)
"""
paste(appkey, sections; description = "", expiration = "never")
Submit a paste to Paste.ee and return its `id`.
The `sections` argument is either a `Vector` of [`Section`](@ref) objects,
or a single `Section` object.
The `expiration` setting can be set to `"never"` (default), or to a number of seconds given
as a `String` (e.g., `"3600"` for one hour).
See <https://pastee.github.io/docs/>.
"""
function paste(
appkey::AbstractString,
sections::AbstractVector{Section};
description::AbstractString = "",
#encrypted::Bool = false, # Not sure how this works
expiration::AbstractString = "never"
)
headers = ["X-Auth-Token" => appkey, "Content-Type" => "application/json"]
data = Dict{String,Any}(
"sections" => dict.(sections),
"expiration" => expiration,
"description" => description
)
# if encrypted
# data["encrypted"] = true
# end
response = HTTP.post("https://api.paste.ee/v1/pastes", headers, JSON.json(data))
return JSON.parse(String(response.body))["id"]
end
paste(appkey::AbstractString, section::Section; kw...) = paste(appkey, [section]; kw...)
"""
paste(appkey, contents::String; name = "", syntax = "", ...)
Paste a single section with `contents`, `name` and `syntax` given.
"""
function paste(
appkey::AbstractString, contents::String;
name::String = "",
syntax::String = "",
kwargs...
)
paste(appkey, Section(contents; name, syntax); kwargs...)
end
"""
delete(appkey, id)
Deletes paste `id` from Paste.ee.
"""
function delete(appkey::AbstractString, id::AbstractString)
headers = ["X-Auth-Token" => appkey]
HTTP.request("DELETE", "https://api.paste.ee/v1/pastes/$id", headers)
return nothing
end
"""
get(appkey, id)
Fetch paste `id` from Paste.ee.
"""
function get(appkey::AbstractString, id::AbstractString)
headers = ["X-Auth-Token" => appkey]
response = HTTP.get("https://api.paste.ee/v1/pastes/$id", headers)
return JSON.parse(String(response.body))["paste"]
end
"""
pastes(appkey; perpage = 25, page = 1)
Retrieve all pastes, organized in pages containing `perpage` entries.
Returns entries in page number `page`.
"""
function pastes(appkey::AbstractString; perpage::Int = 25, page::Int = 1)
headers = ["X-Auth-Token" => appkey, "Content-Type" => "application/json"]
data = Dict("perpage" => perpage, "page" => page)
response = HTTP.get("https://api.paste.ee/v1/pastes", headers, JSON.json(data))
return JSON.parse(String(response.body))
end
end # module
| Pasteee | https://github.com/cossio/Pasteee.jl.git |
|
[
"MIT"
] | 0.1.0 | d47f3b5aab46977da02368aab00a2ec2b27e2dbe | code | 543 | using Test: @test, @testset, @test_throws
import Pasteee
import HTTP
const appkey = ENV["PASTEEE_APPKEY"]
@testset "Pasteee" begin
id = Pasteee.paste(appkey, "Batido de mamey"; expiration="3600")
paste = Pasteee.get(appkey, id)
@test paste["sections"][1]["contents"] == "Batido de mamey"
Pasteee.delete(appkey, id)
@test_throws HTTP.ExceptionRequest.StatusError Pasteee.get(appkey, id)
pastes = Pasteee.pastes(appkey; perpage=12, page=2)
@test pastes["current_page"] == 2
@test pastes["per_page"] == 12
end
| Pasteee | https://github.com/cossio/Pasteee.jl.git |
|
[
"MIT"
] | 0.1.0 | d47f3b5aab46977da02368aab00a2ec2b27e2dbe | docs | 1166 | # Pasteee.jl - Julia API for Paste.ee
[](https://github.com/cossio/Pasteee.jl/blob/master/LICENSE.md)
[](https://cossio.github.io/Pasteee.jl/stable)
[](https://cossio.github.io/Pasteee.jl/dev)

[](https://codecov.io/gh/cossio/Pasteee.jl)
[Paste.ee](https://paste.ee/) is a free version of [Pastebin](https://pastebin.com/) with SSL, IPv6, and an easy to use API.
This package provides a Julia wrapper around the Paste.ee API (see <https://pastee.github.io/docs/>).
## Installation
This package is registered.
Install with:
```Julia
using Pkg
Pkg.add("Pasteee")
```
## Related
[Pastebin](https://pastebin.com/) Julia wrapper: <https://github.com/cossio/Pastebin.jl>.
Note that Pastebin has some limitations, such as 10 pastes / day for guest accounts (see <https://pastebin.com/faq#11a>).
Paste.ee does not have these limitations.
| Pasteee | https://github.com/cossio/Pasteee.jl.git |
|
[
"MIT"
] | 0.1.0 | d47f3b5aab46977da02368aab00a2ec2b27e2dbe | docs | 586 | # Pasteee.jl Documentation
A Julia wrapper around the [Paste.ee](https://paste.ee/) API.
See <https://pastee.github.io/docs/>.
## Installation
This package is registered.
Install with:
```julia
import Pkg
Pkg.add("Pasteee")
```
The source code is hosted on Github:
<https://github.com/cossio/Pasteee.jl>
## Usage
This package doesn't export any symbols.
There are three main functions:
* `Pasteee.paste` to create a paste
* `Pasteee.get` to retrieve a paste
* `Pasteee.delete` to delete a paste
See the [Examples](@ref) for usage help.
See also the [Reference](@ref) section.
| Pasteee | https://github.com/cossio/Pasteee.jl.git |
|
[
"MIT"
] | 0.1.0 | d47f3b5aab46977da02368aab00a2ec2b27e2dbe | docs | 49 | # Reference
```@autodocs
Modules = [Pasteee]
``` | Pasteee | https://github.com/cossio/Pasteee.jl.git |
|
[
"MIT"
] | 1.9.0 | 273bd1cd30768a2fddfa3fd63bbc746ed7249e5f | code | 354 | using Distributions, Documenter, GLM, StatsBase
makedocs(
format = Documenter.HTML(),
sitename = "GLM",
modules = [GLM],
pages = [
"Home" => "index.md",
"examples.md",
"api.md",
],
debug = false,
doctest = true,
strict = :doctest,
)
deploydocs(
repo = "github.com/JuliaStats/GLM.jl.git",
)
| GLM | https://github.com/JuliaStats/GLM.jl.git |
|
[
"MIT"
] | 1.9.0 | 273bd1cd30768a2fddfa3fd63bbc746ed7249e5f | code | 884 | using GLM, Random, StatsModels
# create a column table with dummy response
n = 2_500_000
rng = MersenneTwister(1234321)
tbl = (
x1 = randn(rng, n),
x2 = Random.randexp(rng, n),
ss = rand(rng, string.(50:99), n),
y = zeros(n),
)
# apply a formula to create a model matrix
f = @formula(y ~ 1 + x1 + x2 + ss)
f = apply_schema(f, schema(f, tbl))
resp, pred = modelcols(f, tbl)
# simulate β and the response
β = randn(rng, size(pred, 2))
β[1] = 0.5 # to avoid edge cases
logistic(x::Real) = inv(1 + exp(-x))
resp .= rand(rng, n) .< logistic.(pred * β)
# fit a subset of the data
gm6 = glm(pred[1:1000, :], resp[1:1000], Bernoulli())
# time the fit on the whole data set
@time glm(pred, resp, Bernoulli());
| GLM | https://github.com/JuliaStats/GLM.jl.git |
|
[
"MIT"
] | 1.9.0 | 273bd1cd30768a2fddfa3fd63bbc746ed7249e5f | code | 3526 | module GLM
using Distributions, LinearAlgebra, Printf, Reexport, SparseArrays, Statistics, StatsBase, StatsFuns
using LinearAlgebra: copytri!, QRCompactWY, Cholesky, CholeskyPivoted, BlasReal
using Printf: @sprintf
using StatsBase: CoefTable, StatisticalModel, RegressionModel
using StatsFuns: logit, logistic
@reexport using StatsModels
using Distributions: sqrt2, sqrt2π
import Base: (\), convert, show, size
import LinearAlgebra: cholesky, cholesky!
import Statistics: cor
using StatsAPI
import StatsBase: coef, coeftable, coefnames, confint, deviance, nulldeviance, dof, dof_residual,
loglikelihood, nullloglikelihood, nobs, stderror, vcov,
residuals, predict, predict!,
fitted, fit, model_response, response, modelmatrix, r2, r², adjr2, adjr², PValue
import StatsFuns: xlogy
import SpecialFunctions: erfc, erfcinv, digamma, trigamma
import StatsModels: hasintercept
export coef, coeftable, confint, deviance, nulldeviance, dof, dof_residual,
loglikelihood, nullloglikelihood, nobs, stderror, vcov, residuals, predict,
fitted, fit, fit!, model_response, response, modelmatrix, r2, r², adjr2, adjr²,
cooksdistance, hasintercept, dispersion, vif, gvif, termnames
export
# types
## Distributions
Bernoulli,
Binomial,
Gamma,
Geometric,
InverseGaussian,
NegativeBinomial,
Normal,
Poisson,
## Link types
Link,
CauchitLink,
CloglogLink,
IdentityLink,
InverseLink,
InverseSquareLink,
LogitLink,
LogLink,
NegativeBinomialLink,
PowerLink,
ProbitLink,
SqrtLink,
# Model types
GeneralizedLinearModel,
LinearModel,
# functions
canonicallink, # canonical link function for a distribution
deviance, # deviance of fitted and observed responses
devresid, # vector of squared deviance residuals
formula, # extract the formula from a model
glm, # general interface
linpred, # linear predictor
lm, # linear model
negbin, # interface to fitting negative binomial regression
nobs, # total number of observations
predict, # make predictions
ftest # compare models with an F test
const FP = AbstractFloat
const FPVector{T<:FP} = AbstractArray{T,1}
"""
ModResp
Abstract type representing a model response vector
"""
abstract type ModResp end # model response
"""
LinPred
Abstract type representing a linear predictor
"""
abstract type LinPred end # linear predictor in statistical models
abstract type DensePred <: LinPred end # linear predictor with dense X
abstract type LinPredModel <: RegressionModel end # model based on a linear predictor
@static if VERSION < v"1.8.0-DEV.1139"
pivoted_cholesky!(A; kwargs...) = cholesky!(A, Val(true); kwargs...)
else
pivoted_cholesky!(A; kwargs...) = cholesky!(A, RowMaximum(); kwargs...)
end
include("linpred.jl")
include("lm.jl")
include("glmtools.jl")
include("glmfit.jl")
include("ftest.jl")
include("negbinfit.jl")
include("deprecated.jl")
end # module
| GLM | https://github.com/JuliaStats/GLM.jl.git |
|
[
"MIT"
] | 1.9.0 | 273bd1cd30768a2fddfa3fd63bbc746ed7249e5f | code | 299 | @deprecate predict(mm::LinearModel, newx::AbstractMatrix, interval::Symbol, level::Real = 0.95) predict(mm, newx; interval=interval, level=level)
@deprecate confint(obj::LinearModel, level::Real) confint(obj, level=level)
@deprecate confint(obj::AbstractGLM, level::Real) confint(obj, level=level)
| GLM | https://github.com/JuliaStats/GLM.jl.git |
|
[
"MIT"
] | 1.9.0 | 273bd1cd30768a2fddfa3fd63bbc746ed7249e5f | code | 8109 | struct SingleFTestResult
nobs::Int
dof::Int
fstat::Float64
pval::Float64
end
mutable struct FTestResult{N}
nobs::Int
ssr::NTuple{N, Float64}
dof::NTuple{N, Int}
r2::NTuple{N, Float64}
fstat::NTuple{N, Float64}
pval::NTuple{N, Float64}
end
@deprecate issubmodel(mod1::LinPredModel, mod2::LinPredModel; atol::Real=0.0) StatsModels.isnested(mod1, mod2; atol=atol)
function StatsModels.isnested(mod1::LinPredModel, mod2::LinPredModel; atol::Real=0.0)
mod1.rr.y != mod2.rr.y && return false # Response variables must be equal
# Test that models are nested
pred1 = mod1.pp.X
npreds1 = size(pred1, 2)
pred2 = mod2.pp.X
npreds2 = size(pred2, 2)
# If model 1 has more predictors, it can't possibly be a submodel
npreds1 > npreds2 && return false
# Test min norm pred2*B - pred1 ≈ 0
rtol = Base.rtoldefault(typeof(pred1[1,1]))
nresp = size(pred2, 1)
return norm(view(qr(pred2).Q'pred1, npreds2 + 1:nresp, :)) <= max(atol, rtol*norm(pred1))
end
_diffn(t::NTuple{N, T}) where {N, T} = ntuple(i->t[i]-t[i+1], N-1)
_diff(t::NTuple{N, T}) where {N, T} = ntuple(i->t[i+1]-t[i], N-1)
"""
ftest(mod::LinearModel)
Perform an F-test to determine whether model `mod` fits significantly better
than the null model (i.e. which includes only the intercept).
```jldoctest; setup = :(using DataFrames, GLM)
julia> dat = DataFrame(Result=[1.1, 1.2, 1, 2.2, 1.9, 2, 0.9, 1, 1, 2.2, 2, 2],
Treatment=[1, 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2]);
julia> model = lm(@formula(Result ~ 1 + Treatment), dat);
julia> ftest(model.model)
F-test against the null model:
F-statistic: 241.62 on 12 observations and 1 degrees of freedom, p-value: <1e-07
```
"""
function ftest(mod::LinearModel)
hasintercept(mod) || throw(ArgumentError("ftest only works for models with an intercept"))
rss = deviance(mod)
tss = nulldeviance(mod)
n = Int(nobs(mod))
p = dof(mod) - 2 # -2 for intercept and dispersion parameter
fstat = ((tss - rss) / rss) * ((n - p - 1) / p)
fdist = FDist(p, dof_residual(mod))
SingleFTestResult(n, p, promote(fstat, ccdf(fdist, abs(fstat)))...)
end
"""
ftest(mod::LinearModel...; atol::Real=0.0)
For each sequential pair of linear models in `mod...`, perform an F-test to determine if
the one model fits significantly better than the other. Models must have been fitted
on the same data, and be nested either in forward or backward direction.
A table is returned containing consumed degrees of freedom (DOF),
difference in DOF from the preceding model, sum of squared residuals (SSR), difference in
SSR from the preceding model, R², difference in R² from the preceding model, and F-statistic
and p-value for the comparison between the two models.
!!! note
This function can be used to perform an ANOVA by testing the relative fit of two models
to the data
Optional keyword argument `atol` controls the numerical tolerance when testing whether
the models are nested.
# Examples
Suppose we want to compare the effects of two or more treatments on some result. Because
this is an ANOVA, our null hypothesis is that `Result ~ 1` fits the data as well as
`Result ~ 1 + Treatment`.
```jldoctest ; setup = :(using CategoricalArrays, DataFrames, GLM)
julia> dat = DataFrame(Result=[1.1, 1.2, 1, 2.2, 1.9, 2, 0.9, 1, 1, 2.2, 2, 2],
Treatment=[1, 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2],
Other=categorical([1, 1, 2, 1, 2, 1, 3, 1, 1, 2, 2, 1]));
julia> nullmodel = lm(@formula(Result ~ 1), dat);
julia> model = lm(@formula(Result ~ 1 + Treatment), dat);
julia> bigmodel = lm(@formula(Result ~ 1 + Treatment + Other), dat);
julia> ftest(nullmodel.model, model.model)
F-test: 2 models fitted on 12 observations
─────────────────────────────────────────────────────────────────
DOF ΔDOF SSR ΔSSR R² ΔR² F* p(>F)
─────────────────────────────────────────────────────────────────
[1] 2 3.2292 0.0000
[2] 3 1 0.1283 -3.1008 0.9603 0.9603 241.6234 <1e-07
─────────────────────────────────────────────────────────────────
julia> ftest(nullmodel.model, model.model, bigmodel.model)
F-test: 3 models fitted on 12 observations
─────────────────────────────────────────────────────────────────
DOF ΔDOF SSR ΔSSR R² ΔR² F* p(>F)
─────────────────────────────────────────────────────────────────
[1] 2 3.2292 0.0000
[2] 3 1 0.1283 -3.1008 0.9603 0.9603 241.6234 <1e-07
[3] 5 2 0.1017 -0.0266 0.9685 0.0082 1.0456 0.3950
─────────────────────────────────────────────────────────────────
```
"""
function ftest(mods::LinearModel...; atol::Real=0.0)
if !all(==(nobs(mods[1])), nobs.(mods))
throw(ArgumentError("F test is only valid for models fitted on the same data, " *
"but number of observations differ"))
end
forward = length(mods) == 1 || dof(mods[1]) <= dof(mods[2])
if forward
for i in 2:length(mods)
if dof(mods[i-1]) >= dof(mods[i]) || !StatsModels.isnested(mods[i-1], mods[i], atol=atol)
throw(ArgumentError("F test is only valid for nested models"))
end
end
else
for i in 2:length(mods)
if dof(mods[i]) >= dof(mods[i-1]) || !StatsModels.isnested(mods[i], mods[i-1], atol=atol)
throw(ArgumentError("F test is only valid for nested models"))
end
end
end
SSR = deviance.(mods)
df = dof.(mods)
Δdf = _diff(df)
dfr = Int.(dof_residual.(mods))
MSR1 = _diffn(SSR) ./ Δdf
MSR2 = (SSR ./ dfr)
if forward
MSR2 = MSR2[2:end]
dfr_big = dfr[2:end]
else
MSR2 = MSR2[1:end-1]
dfr_big = dfr[1:end-1]
end
fstat = (NaN, (MSR1 ./ MSR2)...)
pval = (NaN, ccdf.(FDist.(abs.(Δdf), dfr_big), abs.(fstat[2:end]))...)
return FTestResult(Int(nobs(mods[1])), SSR, df, r2.(mods), fstat, pval)
end
function show(io::IO, ftr::SingleFTestResult)
print(io, "F-test against the null model:\nF-statistic: ", StatsBase.TestStat(ftr.fstat), " ")
print(io, "on ", ftr.nobs, " observations and ", ftr.dof, " degrees of freedom, ")
print(io, "p-value: ", PValue(ftr.pval))
end
function show(io::IO, ftr::FTestResult{N}) where N
Δdof = _diff(ftr.dof)
Δssr = _diff(ftr.ssr)
ΔR² = _diff(ftr.r2)
nc = 9
nr = N
outrows = Matrix{String}(undef, nr+1, nc)
outrows[1, :] = ["", "DOF", "ΔDOF", "SSR", "ΔSSR",
"R²", "ΔR²", "F*", "p(>F)"]
# get rid of negative zero -- doesn't matter mathematically,
# but messes up doctests and various other things
# cf. Issue #461
r2vals = [replace(@sprintf("%.4f", val), "-0.0000" => "0.0000") for val in ftr.r2]
outrows[2, :] = ["[1]", @sprintf("%.0d", ftr.dof[1]), " ",
@sprintf("%.4f", ftr.ssr[1]), " ",
r2vals[1], " ", " ", " "]
for i in 2:nr
outrows[i+1, :] = ["[$i]",
@sprintf("%.0d", ftr.dof[i]), @sprintf("%.0d", Δdof[i-1]),
@sprintf("%.4f", ftr.ssr[i]), @sprintf("%.4f", Δssr[i-1]),
r2vals[i], @sprintf("%.4f", ΔR²[i-1]),
@sprintf("%.4f", ftr.fstat[i]), string(PValue(ftr.pval[i])) ]
end
colwidths = length.(outrows)
max_colwidths = [maximum(view(colwidths, :, i)) for i in 1:nc]
totwidth = sum(max_colwidths) + 2*8
println(io, "F-test: $N models fitted on $(ftr.nobs) observations")
println(io, '─'^totwidth)
for r in 1:nr+1
for c in 1:nc
cur_cell = outrows[r, c]
cur_cell_len = length(cur_cell)
padding = " "^(max_colwidths[c]-cur_cell_len)
if c > 1
padding = " "*padding
end
print(io, padding)
print(io, cur_cell)
end
print(io, "\n")
r == 1 && println(io, '─'^totwidth)
end
print(io, '─'^totwidth)
end
| GLM | https://github.com/JuliaStats/GLM.jl.git |
|
[
"MIT"
] | 1.9.0 | 273bd1cd30768a2fddfa3fd63bbc746ed7249e5f | code | 26152 | """
GlmResp
The response vector and various derived vectors in a generalized linear model.
"""
struct GlmResp{V<:FPVector,D<:UnivariateDistribution,L<:Link} <: ModResp
"`y`: response vector"
y::V
d::D
"`link`: link function with relevant parameters"
link::L
"`devresid`: the squared deviance residuals"
devresid::V
"`eta`: the linear predictor"
eta::V
"`mu`: mean response"
mu::V
"`offset:` offset added to `Xβ` to form `eta`. Can be of length 0"
offset::V
"`wts:` prior case weights. Can be of length 0."
wts::V
"`wrkwt`: working case weights for the Iteratively Reweighted Least Squares (IRLS) algorithm"
wrkwt::V
"`wrkresid`: working residuals for IRLS"
wrkresid::V
end
function GlmResp(y::V, d::D, l::L, η::V, μ::V, off::V, wts::V) where {V<:FPVector, D, L}
n = length(y)
nη = length(η)
nμ = length(μ)
lw = length(wts)
lo = length(off)
# Check y values
checky(y, d)
# Lengths of y, η, and η all need to be n
if !(nη == nμ == n)
throw(DimensionMismatch("lengths of η, μ, and y ($nη, $nμ, $n) are not equal"))
end
# Lengths of wts and off can be either n or 0
if lw != 0 && lw != n
throw(DimensionMismatch("wts must have length $n or length 0 but was $lw"))
end
if lo != 0 && lo != n
throw(DimensionMismatch("offset must have length $n or length 0 but was $lo"))
end
return GlmResp{V,D,L}(y, d, l, similar(y), η, μ, off, wts, similar(y), similar(y))
end
function GlmResp(y::FPVector, d::Distribution, l::Link, off::FPVector, wts::FPVector)
# Instead of convert(Vector{Float64}, y) to be more ForwardDiff friendly
_y = convert(Vector{float(eltype(y))}, y)
_off = convert(Vector{float(eltype(off))}, off)
_wts = convert(Vector{float(eltype(wts))}, wts)
η = similar(_y)
μ = similar(_y)
r = GlmResp(_y, d, l, η, μ, _off, _wts)
initialeta!(r.eta, d, l, _y, _wts, _off)
updateμ!(r, r.eta)
return r
end
function GlmResp(y::AbstractVector{<:Real}, d::D, l::L, off::AbstractVector{<:Real},
wts::AbstractVector{<:Real}) where {D, L}
GlmResp(float(y), d, l, float(off), float(wts))
end
deviance(r::GlmResp) = sum(r.devresid)
"""
cancancel(r::GlmResp{V,D,L})
Returns `true` if dμ/dη for link `L` is the variance function for distribution `D`
When `L` is the canonical link for `D` the derivative of the inverse link is a multiple
of the variance function for `D`. If they are the same a numerator and denominator term in
the expression for the working weights will cancel.
"""
cancancel(::GlmResp) = false
cancancel(::GlmResp{V,D,LogitLink}) where {V,D<:Union{Bernoulli,Binomial}} = true
cancancel(::GlmResp{V,D,NegativeBinomialLink}) where {V,D<:NegativeBinomial} = true
cancancel(::GlmResp{V,D,IdentityLink}) where {V,D<:Normal} = true
cancancel(::GlmResp{V,D,LogLink}) where {V,D<:Poisson} = true
"""
updateμ!{T<:FPVector}(r::GlmResp{T}, linPr::T)
Update the mean, working weights and working residuals, in `r` given a value of
the linear predictor, `linPr`.
"""
function updateμ! end
function updateμ!(r::GlmResp{T}, linPr::T) where T<:FPVector
isempty(r.offset) ? copyto!(r.eta, linPr) : broadcast!(+, r.eta, linPr, r.offset)
updateμ!(r)
if !isempty(r.wts)
map!(*, r.devresid, r.devresid, r.wts)
map!(*, r.wrkwt, r.wrkwt, r.wts)
end
r
end
function updateμ!(r::GlmResp{V,D,L}) where {V<:FPVector,D,L}
y, η, μ, wrkres, wrkwt, dres = r.y, r.eta, r.mu, r.wrkresid, r.wrkwt, r.devresid
@inbounds for i in eachindex(y, η, μ, wrkres, wrkwt, dres)
μi, dμdη = inverselink(r.link, η[i])
μ[i] = μi
yi = y[i]
wrkres[i] = (yi - μi) / dμdη
wrkwt[i] = cancancel(r) ? dμdη : abs2(dμdη) / glmvar(r.d, μi)
dres[i] = devresid(r.d, yi, μi)
end
end
function _weights_residuals(yᵢ, ηᵢ, μᵢ, omμᵢ, dμdηᵢ, l::LogitLink)
# LogitLink is the canonical link function for Binomial so only wrkresᵢ can
# possibly fail when dμdη==0 in which case it evaluates to ±1.
if iszero(dμdηᵢ)
wrkresᵢ = ifelse(yᵢ == 1, one(μᵢ), -one(μᵢ))
else
wrkresᵢ = ifelse(yᵢ == 1, omμᵢ, yᵢ - μᵢ) / dμdηᵢ
end
wrkwtᵢ = μᵢ*omμᵢ
return wrkresᵢ, wrkwtᵢ
end
function _weights_residuals(yᵢ, ηᵢ, μᵢ, omμᵢ, dμdηᵢ, l::ProbitLink)
# Since μomμ will underflow before dμdη for Probit, we can just check the
# former to decide when to evaluate with the tail approximation.
μomμᵢ = μᵢ*omμᵢ
if iszero(μomμᵢ)
wrkresᵢ = 1/abs(ηᵢ)
wrkwtᵢ = dμdηᵢ
else
wrkresᵢ = ifelse(yᵢ == 1, omμᵢ, yᵢ - μᵢ) / dμdηᵢ
wrkwtᵢ = abs2(dμdηᵢ)/μomμᵢ
end
return wrkresᵢ, wrkwtᵢ
end
function _weights_residuals(yᵢ, ηᵢ, μᵢ, omμᵢ, dμdηᵢ, l::CloglogLink)
if yᵢ == 1
wrkresᵢ = exp(-ηᵢ)
else
emη = exp(-ηᵢ)
if iszero(emη)
# Diverges to -∞
wrkresᵢ = oftype(emηᵢ, -Inf)
elseif isinf(emη)
# converges to -1
wrkresᵢ = -one(emη)
else
wrkresᵢ = (yᵢ - μᵢ)/omμᵢ*emη
end
end
wrkwtᵢ = exp(2*ηᵢ)/expm1(exp(ηᵢ))
# We know that both limits are zero so we'll convert NaNs
wrkwtᵢ = ifelse(isnan(wrkwtᵢ), zero(wrkwtᵢ), wrkwtᵢ)
return wrkresᵢ, wrkwtᵢ
end
# Fallback for remaining link functions
function _weights_residuals(yᵢ, ηᵢ, μᵢ, omμᵢ, dμdηᵢ, l::Link01)
wrkresᵢ = ifelse(yᵢ == 1, omμᵢ, yᵢ - μᵢ)/dμdηᵢ
wrkwtᵢ = abs2(dμdηᵢ)/(μᵢ*omμᵢ)
return wrkresᵢ, wrkwtᵢ
end
function updateμ!(r::GlmResp{V,D,L}) where {V<:FPVector,D<:Union{Bernoulli,Binomial},L<:Link01}
y, η, μ, wrkres, wrkwt, dres = r.y, r.eta, r.mu, r.wrkresid, r.wrkwt, r.devresid
@inbounds for i in eachindex(y, η, μ, wrkres, wrkwt, dres)
yᵢ, ηᵢ = y[i], η[i]
μᵢ, omμᵢ, dμdηᵢ = inverselink(L(), ηᵢ)
μ[i] = μᵢ
# For large values of ηᵢ the quantities dμdη and μomμ will underflow.
# The ratios defining (yᵢ - μᵢ)/dμdη and dμdη^2/μomμ have fairly stable
# tail behavior so we can switch algorithm to avoid 0/0. The behavior
# is specific to the link function so _weights_residuals dispatches to
# robust versions for LogitLink and ProbitLink
wrkres[i], wrkwt[i] = _weights_residuals(yᵢ, ηᵢ, μᵢ, omμᵢ, dμdηᵢ, L())
dres[i] = devresid(r.d, yᵢ, μᵢ)
end
end
function updateμ!(r::GlmResp{V,D,L}) where {V<:FPVector,D<:NegativeBinomial,L<:NegativeBinomialLink}
y, η, μ, wrkres, wrkwt, dres = r.y, r.eta, r.mu, r.wrkresid, r.wrkwt, r.devresid
@inbounds for i in eachindex(y, η, μ, wrkres, wrkwt, dres)
θ = r.d.r # the shape parameter of the negative binomial distribution
μi, dμdη, μomμ = inverselink(L(θ), η[i])
μ[i] = μi
yi = y[i]
wrkres[i] = (yi - μi) / dμdη
wrkwt[i] = dμdη
dres[i] = devresid(r.d, yi, μi)
end
end
"""
wrkresp(r::GlmResp)
The working response, `r.eta + r.wrkresid - r.offset`.
"""
wrkresp(r::GlmResp) = wrkresp!(similar(r.eta), r)
"""
wrkresp!{T<:FPVector}(v::T, r::GlmResp{T})
Overwrite `v` with the working response of `r`
"""
function wrkresp!(v::T, r::GlmResp{T}) where T<:FPVector
broadcast!(+, v, r.eta, r.wrkresid)
isempty(r.offset) ? v : broadcast!(-, v, v, r.offset)
end
abstract type AbstractGLM <: LinPredModel end
mutable struct GeneralizedLinearModel{G<:GlmResp,L<:LinPred} <: AbstractGLM
rr::G
pp::L
fit::Bool
maxiter::Int
minstepfac::Float64
atol::Float64
rtol::Float64
end
GeneralizedLinearModel(rr::GlmResp, pp::LinPred, fit::Bool) =
GeneralizedLinearModel(rr, pp, fit, 0, NaN, NaN, NaN)
function coeftable(mm::AbstractGLM; level::Real=0.95)
cc = coef(mm)
se = stderror(mm)
zz = cc ./ se
p = 2 * ccdf.(Ref(Normal()), abs.(zz))
ci = se*quantile(Normal(), (1-level)/2)
levstr = isinteger(level*100) ? string(Integer(level*100)) : string(level*100)
CoefTable(hcat(cc,se,zz,p,cc+ci,cc-ci),
["Coef.","Std. Error","z","Pr(>|z|)","Lower $levstr%","Upper $levstr%"],
["x$i" for i = 1:size(mm.pp.X, 2)], 4, 3)
end
function confint(obj::AbstractGLM; level::Real=0.95)
hcat(coef(obj),coef(obj)) + stderror(obj)*quantile(Normal(),(1. -level)/2.)*[1. -1.]
end
deviance(m::AbstractGLM) = deviance(m.rr)
function nulldeviance(m::GeneralizedLinearModel)
r = m.rr
wts = weights(r.wts)
y = r.y
d = r.d
offset = r.offset
hasint = hasintercept(m)
dev = zero(eltype(y))
if isempty(offset) # Faster method
if !isempty(wts)
mu = hasint ?
mean(y, wts) :
linkinv(r.link, zero(eltype(y))*zero(eltype(wts))/1)
@inbounds for i in eachindex(y, wts)
dev += wts[i] * devresid(d, y[i], mu)
end
else
mu = hasint ? mean(y) : linkinv(r.link, zero(eltype(y))/1)
@inbounds for i in eachindex(y)
dev += devresid(d, y[i], mu)
end
end
else
X = fill(1.0, length(y), hasint ? 1 : 0)
nullm = fit(GeneralizedLinearModel,
X, y, d, r.link; wts=wts, offset=offset,
dropcollinear=isa(m.pp.chol, CholeskyPivoted),
maxiter=m.maxiter, minstepfac=m.minstepfac,
atol=m.atol, rtol=m.rtol)
dev = deviance(nullm)
end
return dev
end
function loglikelihood(m::AbstractGLM)
r = m.rr
wts = r.wts
y = r.y
mu = r.mu
d = r.d
ll = zero(eltype(mu))
if !isempty(wts)
ϕ = deviance(m)/sum(wts)
@inbounds for i in eachindex(y, mu, wts)
ll += loglik_obs(d, y[i], mu[i], wts[i], ϕ)
end
else
ϕ = deviance(m)/length(y)
@inbounds for i in eachindex(y, mu)
ll += loglik_obs(d, y[i], mu[i], 1, ϕ)
end
end
ll
end
function nullloglikelihood(m::GeneralizedLinearModel)
r = m.rr
wts = r.wts
y = r.y
d = r.d
offset = r.offset
hasint = hasintercept(m)
ll = zero(eltype(y))
if isempty(r.offset) # Faster method
if !isempty(wts)
mu = hasint ? mean(y, weights(wts)) : linkinv(r.link, zero(ll)/1)
ϕ = nulldeviance(m)/sum(wts)
@inbounds for i in eachindex(y, wts)
ll += loglik_obs(d, y[i], mu, wts[i], ϕ)
end
else
mu = hasint ? mean(y) : linkinv(r.link, zero(ll)/1)
ϕ = nulldeviance(m)/length(y)
@inbounds for i in eachindex(y)
ll += loglik_obs(d, y[i], mu, 1, ϕ)
end
end
else
X = fill(1.0, length(y), hasint ? 1 : 0)
nullm = fit(GeneralizedLinearModel,
X, y, d, r.link; wts=wts, offset=offset,
dropcollinear=isa(m.pp.chol, CholeskyPivoted),
maxiter=m.maxiter, minstepfac=m.minstepfac,
atol=m.atol, rtol=m.rtol)
ll = loglikelihood(nullm)
end
return ll
end
function dof(x::GeneralizedLinearModel)
modelrank = linpred_rank(x.pp)
dispersion_parameter(x.rr.d) ? modelrank + 1 : modelrank
end
function _fit!(m::AbstractGLM, verbose::Bool, maxiter::Integer, minstepfac::Real,
atol::Real, rtol::Real, start)
# Return early if model has the fit flag set
m.fit && return m
# Check arguments
maxiter >= 1 || throw(ArgumentError("maxiter must be positive"))
0 < minstepfac < 1 || throw(ArgumentError("minstepfac must be in (0, 1)"))
# Extract fields and set convergence flag
cvg, p, r = false, m.pp, m.rr
lp = r.mu
# Initialize β, μ, and compute deviance
if start == nothing || isempty(start)
# Compute beta update based on default response value
# if no starting values have been passed
delbeta!(p, wrkresp(r), r.wrkwt)
linpred!(lp, p)
updateμ!(r, lp)
installbeta!(p)
else
# otherwise copy starting values for β
copy!(p.beta0, start)
fill!(p.delbeta, 0)
linpred!(lp, p, 0)
updateμ!(r, lp)
end
devold = deviance(m)
for i = 1:maxiter
f = 1.0 # line search factor
local dev
# Compute the change to β, update μ and compute deviance
try
delbeta!(p, r.wrkresid, r.wrkwt)
linpred!(lp, p)
updateμ!(r, lp)
dev = deviance(m)
catch e
isa(e, DomainError) ? (dev = Inf) : rethrow(e)
end
# Line search
## If the deviance isn't declining then half the step size
## The rtol*dev term is to avoid failure when deviance
## is unchanged except for rouding errors.
while dev > devold + rtol*dev
f /= 2
f > minstepfac || error("step-halving failed at beta0 = $(p.beta0)")
try
updateμ!(r, linpred(p, f))
dev = deviance(m)
catch e
isa(e, DomainError) ? (dev = Inf) : rethrow(e)
end
end
installbeta!(p, f)
# Test for convergence
verbose && println("Iteration: $i, deviance: $dev, diff.dev.:$(devold - dev)")
if devold - dev < max(rtol*devold, atol)
cvg = true
break
end
@assert isfinite(dev)
devold = dev
end
cvg || throw(ConvergenceException(maxiter))
m.fit = true
m
end
function StatsBase.fit!(m::AbstractGLM;
verbose::Bool=false,
maxiter::Integer=30,
minstepfac::Real=0.001,
atol::Real=1e-6,
rtol::Real=1e-6,
start=nothing,
kwargs...)
if haskey(kwargs, :maxIter)
Base.depwarn("'maxIter' argument is deprecated, use 'maxiter' instead", :fit!)
maxiter = kwargs[:maxIter]
end
if haskey(kwargs, :minStepFac)
Base.depwarn("'minStepFac' argument is deprecated, use 'minstepfac' instead", :fit!)
minstepfac = kwargs[:minStepFac]
end
if haskey(kwargs, :convTol)
Base.depwarn("'convTol' argument is deprecated, use `atol` and `rtol` instead", :fit!)
rtol = kwargs[:convTol]
end
if !issubset(keys(kwargs), (:maxIter, :minStepFac, :convTol))
throw(ArgumentError("unsupported keyword argument"))
end
if haskey(kwargs, :tol)
Base.depwarn("`tol` argument is deprecated, use `atol` and `rtol` instead", :fit!)
rtol = kwargs[:tol]
end
m.maxiter = maxiter
m.minstepfac = minstepfac
m.atol = atol
m.rtol = rtol
_fit!(m, verbose, maxiter, minstepfac, atol, rtol, start)
end
function StatsBase.fit!(m::AbstractGLM,
y;
wts=nothing,
offset=nothing,
dofit::Bool=true,
verbose::Bool=false,
maxiter::Integer=30,
minstepfac::Real=0.001,
atol::Real=1e-6,
rtol::Real=1e-6,
start=nothing,
kwargs...)
if haskey(kwargs, :maxIter)
Base.depwarn("'maxIter' argument is deprecated, use 'maxiter' instead", :fit!)
maxiter = kwargs[:maxIter]
end
if haskey(kwargs, :minStepFac)
Base.depwarn("'minStepFac' argument is deprecated, use 'minstepfac' instead", :fit!)
minstepfac = kwargs[:minStepFac]
end
if haskey(kwargs, :convTol)
Base.depwarn("'convTol' argument is deprecated, use `atol` and `rtol` instead", :fit!)
rtol = kwargs[:convTol]
end
if !issubset(keys(kwargs), (:maxIter, :minStepFac, :convTol))
throw(ArgumentError("unsupported keyword argument"))
end
if haskey(kwargs, :tol)
Base.depwarn("`tol` argument is deprecated, use `atol` and `rtol` instead", :fit!)
rtol = kwargs[:tol]
end
r = m.rr
V = typeof(r.y)
r.y = copy!(r.y, y)
isa(wts, Nothing) || copy!(r.wts, wts)
isa(offset, Nothing) || copy!(r.offset, offset)
initialeta!(r.eta, r.d, r.l, r.y, r.wts, r.offset)
updateμ!(r, r.eta)
fill!(m.pp.beta0, 0)
m.fit = false
m.maxiter = maxiter
m.minstepfac = minstepfac
m.atol = atol
m.rtol = rtol
if dofit
_fit!(m, verbose, maxiter, minstepfac, atol, rtol, start)
else
m
end
end
const FIT_GLM_DOC = """
In the first method, `formula` must be a
[StatsModels.jl `Formula` object](https://juliastats.org/StatsModels.jl/stable/formula/)
and `data` a table (in the [Tables.jl](https://tables.juliadata.org/stable/) definition, e.g. a data frame).
In the second method, `X` must be a matrix holding values of the independent variable(s)
in columns (including if appropriate the intercept), and `y` must be a vector holding
values of the dependent variable.
In both cases, `distr` must specify the distribution, and `link` may specify the link
function (if omitted, it is taken to be the canonical link for `distr`; see [`Link`](@ref)
for a list of built-in links).
# Keyword Arguments
- `dropcollinear::Bool=true`: Controls whether or not `lm` accepts a model matrix which
is less-than-full rank.
If `true` (the default) the coefficient for redundant linearly dependent columns is
`0.0` and all associated statistics are set to `NaN`.
Typically from a set of linearly-dependent columns the last ones are identified as redundant
(however, the exact selection of columns identified as redundant is not guaranteed).
- `dofit::Bool=true`: Determines whether model will be fit
- `wts::Vector=similar(y,0)`: Prior frequency (a.k.a. case) weights of observations.
Such weights are equivalent to repeating each observation a number of times equal
to its weight. Do note that this interpretation gives equal point estimates but
different standard errors from analytical (a.k.a. inverse variance) weights and
from probability (a.k.a. sampling) weights which are the default in some other
software.
Can be length 0 to indicate no weighting (default).
- `offset::Vector=similar(y,0)`: offset added to `Xβ` to form `eta`. Can be of
length 0
- `verbose::Bool=false`: Display convergence information for each iteration
- `maxiter::Integer=30`: Maximum number of iterations allowed to achieve convergence
- `atol::Real=1e-6`: Convergence is achieved when the relative change in
deviance is less than `max(rtol*dev, atol)`.
- `rtol::Real=1e-6`: Convergence is achieved when the relative change in
deviance is less than `max(rtol*dev, atol)`.
- `minstepfac::Real=0.001`: Minimum line step fraction. Must be between 0 and 1.
- `start::AbstractVector=nothing`: Starting values for beta. Should have the
same length as the number of columns in the model matrix.
"""
"""
fit(GeneralizedLinearModel, formula, data,
distr::UnivariateDistribution, link::Link = canonicallink(d); <keyword arguments>)
fit(GeneralizedLinearModel, X::AbstractMatrix, y::AbstractVector,
distr::UnivariateDistribution, link::Link = canonicallink(d); <keyword arguments>)
Fit a generalized linear model to data.
$FIT_GLM_DOC
"""
function fit(::Type{M},
X::AbstractMatrix{<:FP},
y::AbstractVector{<:Real},
d::UnivariateDistribution,
l::Link = canonicallink(d);
dropcollinear::Bool = true,
dofit::Bool = true,
wts::AbstractVector{<:Real} = similar(y, 0),
offset::AbstractVector{<:Real} = similar(y, 0),
fitargs...) where {M<:AbstractGLM}
# Check that X and y have the same number of observations
if size(X, 1) != size(y, 1)
throw(DimensionMismatch("number of rows in X and y must match"))
end
rr = GlmResp(y, d, l, offset, wts)
res = M(rr, cholpred(X, dropcollinear), false)
return dofit ? fit!(res; fitargs...) : res
end
fit(::Type{M},
X::AbstractMatrix,
y::AbstractVector,
d::UnivariateDistribution,
l::Link=canonicallink(d); kwargs...) where {M<:AbstractGLM} =
fit(M, float(X), float(y), d, l; kwargs...)
"""
glm(formula, data,
distr::UnivariateDistribution, link::Link = canonicallink(distr); <keyword arguments>)
glm(X::AbstractMatrix, y::AbstractVector,
distr::UnivariateDistribution, link::Link = canonicallink(distr); <keyword arguments>)
Fit a generalized linear model to data. Alias for `fit(GeneralizedLinearModel, ...)`.
$FIT_GLM_DOC
"""
glm(X, y, args...; kwargs...) = fit(GeneralizedLinearModel, X, y, args...; kwargs...)
GLM.Link(r::GlmResp) = r.link
GLM.Link(m::GeneralizedLinearModel) = Link(m.rr)
Distributions.Distribution(r::GlmResp{T,D,L}) where {T,D,L} = D
Distributions.Distribution(m::GeneralizedLinearModel) = Distribution(m.rr)
"""
dispersion(m::AbstractGLM, sqr::Bool=false)
Return the estimated dispersion (or scale) parameter for a model's distribution,
generally written σ for linear models and ϕ for generalized linear models.
It is, by definition, equal to 1 for the Bernoulli, Binomial, and Poisson families.
If `sqr` is `true`, the squared dispersion parameter is returned.
"""
function dispersion(m::AbstractGLM, sqr::Bool=false)
r = m.rr
if dispersion_parameter(r.d)
wrkwt, wrkresid = r.wrkwt, r.wrkresid
dofr = dof_residual(m)
s = sum(i -> wrkwt[i] * abs2(wrkresid[i]), eachindex(wrkwt, wrkresid)) / dofr
dofr > 0 || return oftype(s, Inf)
sqr ? s : sqrt(s)
else
one(eltype(r.mu))
end
end
"""
predict(mm::AbstractGLM, newX::AbstractMatrix; offset::FPVector=eltype(newX)[],
interval::Union{Symbol,Nothing}=nothing, level::Real = 0.95,
interval_method::Symbol = :transformation)
Return the predicted response of model `mm` from covariate values `newX` and,
optionally, an `offset`.
If `interval=:confidence`, also return upper and lower bounds for a given coverage `level`.
By default (`interval_method = :transformation`) the intervals are constructed by applying
the inverse link to intervals for the linear predictor. If `interval_method = :delta`,
the intervals are constructed by the delta method, i.e., by linearization of the predicted
response around the linear predictor. The `:delta` method intervals are symmetric around
the point estimates, but do not respect natural parameter constraints
(e.g., the lower bound for a probability could be negative).
"""
function predict(mm::AbstractGLM, newX::AbstractMatrix;
offset::FPVector=eltype(newX)[],
interval::Union{Symbol,Nothing}=nothing,
level::Real=0.95,
interval_method=:transformation)
eta = newX * coef(mm)
if !isempty(mm.rr.offset)
length(offset) == size(newX, 1) ||
throw(ArgumentError("fit with offset, so `offset` kw arg must be an offset of length `size(newX, 1)`"))
broadcast!(+, eta, eta, offset)
else
length(offset) > 0 && throw(ArgumentError("fit without offset, so value of `offset` kw arg does not make sense"))
end
mu = linkinv.(Link(mm), eta)
if interval === nothing
return mu
elseif interval == :confidence
normalquantile = quantile(Normal(), (1 + level)/2)
# Compute confidence intervals in two steps
# (2nd step varies depending on `interval_method`)
# 1. Estimate variance for eta based on variance for coefficients
# through the diagonal of newX*vcov(mm)*newX'
vcovXnewT = vcov(mm)*newX'
stdeta = [sqrt(dot(view(newX, i, :), view(vcovXnewT, :, i))) for i in axes(newX,1)]
if interval_method == :delta
# 2. Now compute the variance for mu based on variance of eta and
# construct intervals based on that (Delta method)
stdmu = stdeta .* abs.(mueta.(Link(mm), eta))
lower = mu .- normalquantile .* stdmu
upper = mu .+ normalquantile .* stdmu
elseif interval_method == :transformation
# 2. Construct intervals for eta, then apply inverse link
lower = linkinv.(Link(mm), eta .- normalquantile .* stdeta)
upper = linkinv.(Link(mm), eta .+ normalquantile .* stdeta)
else
throw(ArgumentError("interval_method can be only :transformation or :delta"))
end
else
throw(ArgumentError("only :confidence intervals are defined"))
end
(prediction = mu, lower = lower, upper = upper)
end
# A helper function to choose default values for eta
function initialeta!(eta::AbstractVector,
dist::UnivariateDistribution,
link::Link,
y::AbstractVector,
wts::AbstractVector,
off::AbstractVector)
n = length(y)
lw = length(wts)
lo = length(off)
if lw == n
@inbounds @simd for i = eachindex(y, eta, wts)
μ = mustart(dist, y[i], wts[i])
eta[i] = linkfun(link, μ)
end
elseif lw == 0
@inbounds @simd for i = eachindex(y, eta)
μ = mustart(dist, y[i], 1)
eta[i] = linkfun(link, μ)
end
else
throw(ArgumentError("length of wts must be either $n or 0 but was $lw"))
end
if lo == n
@inbounds @simd for i = eachindex(eta, off)
eta[i] -= off[i]
end
elseif lo != 0
throw(ArgumentError("length of off must be either $n or 0 but was $lo"))
end
return eta
end
# Helper function to check that the values of y are in the allowed domain
function checky(y, d::Distribution)
if any(x -> !insupport(d, x), y)
throw(ArgumentError("y must be in the support of D"))
end
return nothing
end
function checky(y, d::Binomial)
for yy in y
0 ≤ yy ≤ 1 || throw(ArgumentError("$yy in y is not in [0,1]"))
end
return nothing
end
| GLM | https://github.com/JuliaStats/GLM.jl.git |
|
[
"MIT"
] | 1.9.0 | 273bd1cd30768a2fddfa3fd63bbc746ed7249e5f | code | 15909 | """
Link
An abstract type whose subtypes refer to link functions.
GLM currently supports the following links:
[`CauchitLink`](@ref), [`CloglogLink`](@ref), [`IdentityLink`](@ref),
[`InverseLink`](@ref), [`InverseSquareLink`](@ref), [`LogitLink`](@ref),
[`LogLink`](@ref), [`NegativeBinomialLink`](@ref), [`PowerLink`](@ref), [`ProbitLink`](@ref),
[`SqrtLink`](@ref).
Subtypes of `Link` are required to implement methods for
[`GLM.linkfun`](@ref), [`GLM.linkinv`](@ref), [`GLM.mueta`](@ref),
and [`GLM.inverselink`](@ref).
"""
abstract type Link end
# Make links broadcast like a scalar
Base.Broadcast.broadcastable(l::Link) = Ref(l)
"""
Link01
An abstract subtype of [`Link`](@ref) which are links defined on (0, 1)
"""
abstract type Link01 <: Link end
"""
CauchitLink
A [`Link01`](@ref) corresponding to the standard Cauchy distribution,
[`Distributions.Cauchy`](https://juliastats.org/Distributions.jl/stable/univariate/#Distributions.Cauchy).
"""
struct CauchitLink <: Link01 end
"""
CloglogLink
A [`Link01`](@ref) corresponding to the extreme value (or log-Weibull) distribution. The
link is the complementary log-log transformation, `log(1 - log(-μ))`.
"""
struct CloglogLink <: Link01 end
"""
IdentityLink
The canonical [`Link`](@ref) for the `Normal` distribution, defined as `η = μ`.
"""
struct IdentityLink <: Link end
"""
InverseLink
The canonical [`Link`](@ref) for [`Distributions.Gamma`](https://juliastats.org/Distributions.jl/stable/univariate/#Distributions.Gamma) distribution, defined as `η = inv(μ)`.
"""
struct InverseLink <: Link end
"""
InverseSquareLink
The canonical [`Link`](@ref) for [`Distributions.InverseGaussian`](https://juliastats.org/Distributions.jl/stable/univariate/#Distributions.InverseGaussian) distribution, defined as `η = inv(abs2(μ))`.
"""
struct InverseSquareLink <: Link end
"""
LogitLink
The canonical [`Link01`](@ref) for [`Distributions.Bernoulli`](https://juliastats.org/Distributions.jl/stable/univariate/#Distributions.Bernoulli) and [`Distributions.Binomial`](https://juliastats.org/Distributions.jl/stable/univariate/#Distributions.Binomial).
The inverse link, [`linkinv`](@ref), is the c.d.f. of the standard logistic distribution,
[`Distributions.Logistic`](https://juliastats.org/Distributions.jl/stable/univariate/#Distributions.Logistic).
"""
struct LogitLink <: Link01 end
"""
LogLink
The canonical [`Link`](@ref) for [`Distributions.Poisson`](https://juliastats.org/Distributions.jl/stable/univariate/#Distributions.Poisson), defined as `η = log(μ)`.
"""
struct LogLink <: Link end
"""
NegativeBinomialLink
The canonical [`Link`](@ref) for [`Distributions.NegativeBinomial`](https://juliastats.org/Distributions.jl/stable/univariate/#Distributions.NegativeBinomial) distribution, defined as `η = log(μ/(μ+θ))`.
The shape parameter θ has to be fixed for the distribution to belong to the exponential family.
"""
struct NegativeBinomialLink <: Link
θ::Float64
end
"""
PowerLink
A [`Link`](@ref) defined as `η = μ^λ` when `λ ≠ 0`, and to `η = log(μ)` when `λ = 0`,
i.e. the class of transforms that use a power function or logarithmic function.
Many other links are special cases of `PowerLink`:
- [`IdentityLink`](@ref) when λ = 1.
- [`SqrtLink`](@ref) when λ = 0.5.
- [`LogLink`](@ref) when λ = 0.
- [`InverseLink`](@ref) when λ = -1.
- [`InverseSquareLink`](@ref) when λ = -2.
"""
struct PowerLink <: Link
λ::Float64
end
"""
ProbitLink
A [`Link01`](@ref) whose [`linkinv`](@ref) is the c.d.f. of the standard normal
distribution, [`Distributions.Normal()`](https://juliastats.org/Distributions.jl/stable/univariate/#Distributions.Normal).
"""
struct ProbitLink <: Link01 end
"""
SqrtLink
A [`Link`](@ref) defined as `η = √μ`
"""
struct SqrtLink <: Link end
"""
GLM.linkfun(L::Link, μ::Real)
Return `η`, the value of the linear predictor for link `L` at mean `μ`.
# Examples
```jldoctest; setup = :(using GLM: linkfun, LogitLink)
julia> μ = inv(10):inv(5):1
0.1:0.2:0.9
julia> show(linkfun.(LogitLink(), μ))
[-2.197224577336219, -0.8472978603872036, 0.0, 0.8472978603872034, 2.1972245773362196]
```
"""
function linkfun end
"""
GLM.linkinv(L::Link, η::Real)
Return `μ`, the mean value, for link `L` at linear predictor value `η`.
# Examples
```jldoctest; setup = :(using GLM: logit, linkinv, LogitLink)
julia> μ = 0.1:0.2:1
0.1:0.2:0.9
julia> η = logit.(μ);
julia> linkinv.(LogitLink(), η) ≈ μ
true
```
"""
function linkinv end
"""
GLM.mueta(L::Link, η::Real)
Return the derivative of [`linkinv`](@ref), `dμ/dη`, for link `L` at linear predictor value `η`.
# Examples
```jldoctest; setup = :(using GLM: mueta, LogitLink, CloglogLink, LogLink)
julia> mueta(LogitLink(), 0.0)
0.25
julia> mueta(CloglogLink(), 0.0) ≈ 0.36787944117144233
true
julia> mueta(LogLink(), 2.0) ≈ 7.38905609893065
true
```
"""
function mueta end
"""
GLM.inverselink(L::Link, η::Real)
Return a 3-tuple of the inverse link, the derivative of the inverse link, and when appropriate, the variance function `μ*(1 - μ)`.
The variance function is returned as NaN unless the range of μ is (0, 1)
# Examples
```jldoctest; setup = :(using GLM)
julia> GLM.inverselink(LogitLink(), 0.0)
(0.5, 0.5, 0.25)
julia> μ, oneminusμ, variance = GLM.inverselink(CloglogLink(), 0.0);
julia> μ + oneminusμ ≈ 1
true
julia> μ*(1 - μ) ≈ variance
false
julia> isnan(last(GLM.inverselink(LogLink(), 2.0)))
true
```
"""
function inverselink end
"""
canonicallink(D::Distribution)
Return the canonical link for distribution `D`, which must be in the exponential family.
# Examples
```jldoctest; setup = :(using GLM)
julia> canonicallink(Bernoulli())
LogitLink()
```
"""
function canonicallink end
linkfun(::CauchitLink, μ::Real) = tan(pi * (μ - oftype(μ, 1/2)))
linkinv(::CauchitLink, η::Real) = oftype(η, 1/2) + atan(η) / pi
mueta(::CauchitLink, η::Real) = one(η) / (pi * (one(η) + abs2(η)))
function inverselink(::CauchitLink, η::Real)
# atan decays so slowly that we don't need to be careful when evaluating μ
μ = atan(η) / π
μ += one(μ)/2
return μ, 1 - μ, inv(π * (1 + abs2(η)))
end
linkfun(::CloglogLink, μ::Real) = log(-log1p(-μ))
linkinv(::CloglogLink, η::Real) = -expm1(-exp(η))
mueta(::CloglogLink, η::Real) = exp(η) * exp(-exp(η))
function inverselink(::CloglogLink, η::Real)
expη = exp(η)
μ = -expm1(-expη)
omμ = exp(-expη) # the complement, 1 - μ
return μ, omμ, expη * omμ
end
linkfun(::IdentityLink, μ::Real) = μ
linkinv(::IdentityLink, η::Real) = η
mueta(::IdentityLink, η::Real) = one(η)
inverselink(::IdentityLink, η::Real) = η, one(η), convert(float(typeof(η)), NaN)
linkfun(::InverseLink, μ::Real) = inv(μ)
linkinv(::InverseLink, η::Real) = inv(η)
mueta(::InverseLink, η::Real) = -inv(abs2(η))
function inverselink(::InverseLink, η::Real)
μ = inv(η)
return μ, -abs2(μ), convert(float(typeof(μ)), NaN)
end
linkfun(::InverseSquareLink, μ::Real) = inv(abs2(μ))
linkinv(::InverseSquareLink, η::Real) = inv(sqrt(η))
mueta(::InverseSquareLink, η::Real) = -inv(2η*sqrt(η))
function inverselink(::InverseSquareLink, η::Real)
μ = inv(sqrt(η))
return μ, -μ / (2η), convert(float(typeof(μ)), NaN)
end
linkfun(::LogitLink, μ::Real) = logit(μ)
linkinv(::LogitLink, η::Real) = logistic(η)
function mueta(::LogitLink, η::Real)
expabs = exp(-abs(η))
denom = 1 + expabs
return (expabs / denom) / denom
end
function inverselink(::LogitLink, η::Real)
expabs = exp(-abs(η))
opexpabs = 1 + expabs
deriv = (expabs / opexpabs) / opexpabs
if η < 0
μ, omμ = expabs / opexpabs, 1 / opexpabs
else
μ, omμ = 1 / opexpabs, expabs / opexpabs
end
return μ, omμ, deriv
end
linkfun(::LogLink, μ::Real) = log(μ)
linkinv(::LogLink, η::Real) = exp(η)
mueta(::LogLink, η::Real) = exp(η)
function inverselink(::LogLink, η::Real)
μ = exp(η)
return μ, μ, convert(float(typeof(μ)), NaN)
end
linkfun(nbl::NegativeBinomialLink, μ::Real) = log(μ / (μ + nbl.θ))
linkinv(nbl::NegativeBinomialLink, η::Real) = -exp(η) * nbl.θ / expm1(η)
mueta(nbl::NegativeBinomialLink, η::Real) = -exp(η) * nbl.θ / expm1(η)
function inverselink(nbl::NegativeBinomialLink, η::Real)
μ = -exp(η) * nbl.θ / expm1(η)
deriv = μ * (1 + μ / nbl.θ)
return μ, deriv, convert(float(typeof(μ)), NaN)
end
linkfun(pl::PowerLink, μ::Real) = pl.λ == 0 ? log(μ) : μ^pl.λ
linkinv(pl::PowerLink, η::Real) = pl.λ == 0 ? exp(η) : η^(1 / pl.λ)
function mueta(pl::PowerLink, η::Real)
if pl.λ == 0
return exp(η)
else
invλ = inv(pl.λ)
return invλ * η^(invλ - 1)
end
end
function inverselink(pl::PowerLink, η::Real)
if pl.λ == 0
μ = exp(η)
return μ, μ, convert(float(typeof(η)), NaN)
else
invλ = inv(pl.λ)
return η^invλ, invλ * η^(invλ - 1), convert(float(typeof(η)), NaN)
end
end
linkfun(::ProbitLink, μ::Real) = -sqrt2 * erfcinv(2μ)
linkinv(::ProbitLink, η::Real) = erfc(-η / sqrt2) / 2
mueta(::ProbitLink, η::Real) = exp(-abs2(η) / 2) / sqrt2π
function inverselink(::ProbitLink, η::Real)
μ = cdf(Normal(), η)
omμ = ccdf(Normal(), η)
return μ, omμ, pdf(Normal(), η)
end
linkfun(::SqrtLink, μ::Real) = sqrt(μ)
linkinv(::SqrtLink, η::Real) = abs2(η)
mueta(::SqrtLink, η::Real) = 2η
inverselink(::SqrtLink, η::Real) = abs2(η), 2η, convert(float(typeof(η)), NaN)
canonicallink(::Bernoulli) = LogitLink()
canonicallink(::Binomial) = LogitLink()
canonicallink(::Gamma) = InverseLink()
canonicallink(::Geometric) = LogLink()
canonicallink(::InverseGaussian) = InverseSquareLink()
canonicallink(d::NegativeBinomial) = NegativeBinomialLink(d.r)
canonicallink(::Normal) = IdentityLink()
canonicallink(::Poisson) = LogLink()
"""
GLM.glmvar(D::Distribution, μ::Real)
Return the value of the variance function for `D` at `μ`
The variance of `D` at `μ` is the product of the dispersion parameter, ϕ, which does not
depend on `μ` and the value of `glmvar`. In other words `glmvar` returns the factor of the
variance that depends on `μ`.
# Examples
```jldoctest; setup = :(using GLM: glmvar, Normal, Bernoulli, Poisson, Geometric)
julia> μ = 1/6:1/3:1;
julia> glmvar.(Normal(), μ) # constant for Normal()
3-element Vector{Float64}:
1.0
1.0
1.0
julia> glmvar.(Bernoulli(), μ) ≈ μ .* (1 .- μ)
true
julia> glmvar.(Poisson(), μ) == μ
true
julia> glmvar.(Geometric(), μ) ≈ μ .* (1 .+ μ)
true
```
"""
function glmvar end
glmvar(::Union{Bernoulli,Binomial}, μ::Real) = μ * (1 - μ)
glmvar(::Gamma, μ::Real) = abs2(μ)
glmvar(::Geometric, μ::Real) = μ * (1 + μ)
glmvar(::InverseGaussian, μ::Real) = μ^3
glmvar(d::NegativeBinomial, μ::Real) = μ * (1 + μ/d.r)
glmvar(::Normal, μ::Real) = one(μ)
glmvar(::Poisson, μ::Real) = μ
"""
GLM.mustart(D::Distribution, y, wt)
Return a starting value for μ.
For some distributions it is appropriate to set `μ = y` to initialize the IRLS algorithm but
for others, notably the Bernoulli, the values of `y` are not allowed as values of `μ` and
must be modified.
# Examples
```jldoctest; setup = :(using GLM)
julia> GLM.mustart(Bernoulli(), 0.0, 1) ≈ 1/4
true
julia> GLM.mustart(Bernoulli(), 1.0, 1) ≈ 3/4
true
julia> GLM.mustart(Binomial(), 0.0, 10) ≈ 1/22
true
julia> GLM.mustart(Normal(), 0.0, 1) ≈ 0
true
julia> GLM.mustart(Geometric(), 4, 1) ≈ 4
true
```
"""
function mustart end
mustart(::Bernoulli, y, wt) = (y + oftype(y, 1/2)) / 2
mustart(::Binomial, y, wt) = (wt * y + oftype(y, 1/2)) / (wt + one(y))
function mustart(::Union{Gamma, InverseGaussian}, y, wt)
fy = float(y)
iszero(y) ? oftype(y, 1/10) : fy
end
function mustart(::Geometric, y, wt)
fy = float(y)
iszero(y) ? fy + oftype(fy, 1 / 6) : fy
end
function mustart(::NegativeBinomial, y, wt)
fy = float(y)
iszero(y) ? fy + oftype(fy, 1/6) : fy
end
mustart(::Normal, y, wt) = y
function mustart(::Poisson, y, wt)
fy = float(y)
fy + oftype(fy, 1/10)
end
"""
devresid(D, y, μ::Real)
Return the squared deviance residual of `μ` from `y` for distribution `D`
The deviance of a GLM can be evaluated as the sum of the squared deviance residuals. This
is the principal use for these values. The actual deviance residual, say for plotting, is
the signed square root of this value
```julia
sign(y - μ) * sqrt(devresid(D, y, μ))
```
# Examples
```jldoctest; setup = :(using GLM: Bernoulli, Normal, devresid)
julia> devresid(Normal(), 0, 0.25) ≈ abs2(0.25)
true
julia> devresid(Bernoulli(), 1, 0.75) ≈ -2*log(0.75)
true
julia> devresid(Bernoulli(), 0, 0.25) ≈ -2*log1p(-0.25)
true
```
"""
function devresid end
function devresid(::Bernoulli, y, μ::Real)
if y == 1
return -2 * log(μ)
elseif y == 0
return -2 * log1p(-μ)
end
throw(ArgumentError("y should be 0 or 1 (got $y)"))
end
function devresid(::Binomial, y, μ::Real)
if y == 1
return -2 * log(μ)
elseif y == 0
return -2 * log1p(-μ)
else
return 2 * (y * (log(y) - log(μ)) + (1 - y)*(log1p(-y) - log1p(-μ)))
end
end
devresid(::Gamma, y, μ::Real) = -2 * (log(y / μ) - (y - μ) / μ)
function devresid(::Geometric, y, μ::Real)
μ == 0 && return convert(float(promote_type(typeof(μ), typeof(y))), NaN)
return 2 * (xlogy(y, y / μ) - xlogy(y + 1, (y + 1) / (μ + 1)))
end
devresid(::InverseGaussian, y, μ::Real) = abs2(y - μ) / (y * abs2(μ))
function devresid(d::NegativeBinomial, y, μ::Real)
μ == 0 && return convert(float(promote_type(typeof(μ), typeof(y))), NaN)
θ = d.r
return 2 * (xlogy(y, y / μ) + xlogy(y + θ, (μ + θ)/(y + θ)))
end
devresid(::Normal, y, μ::Real) = abs2(y - μ)
devresid(::Poisson, y, μ::Real) = 2 * (xlogy(y, y / μ) - (y - μ))
"""
GLM.dispersion_parameter(D)
Does distribution `D` have a separate dispersion parameter, ϕ?
Returns `false` for the `Bernoulli`, `Binomial` and `Poisson` distributions, `true` otherwise.
# Examples
```jldoctest; setup = :(using GLM)
julia> show(GLM.dispersion_parameter(Normal()))
true
julia> show(GLM.dispersion_parameter(Bernoulli()))
false
```
"""
dispersion_parameter(D) = true
dispersion_parameter(::Union{Bernoulli, Binomial, Poisson}) = false
"""
_safe_int(x::T)
Convert to Int, when `x` is within 1 eps of an integer.
"""
function _safe_int(x::T) where {T<:AbstractFloat}
r = round(Int, x)
abs(x - r) <= eps(x) && return r
throw(InexactError(nameof(T), T, x))
end
_safe_int(x) = Int(x)
"""
GLM.loglik_obs(D, y, μ, wt, ϕ)
Returns `wt * logpdf(D(μ, ϕ), y)` where the parameters of `D` are derived from `μ` and `ϕ`.
The `wt` argument is a multiplier of the result except in the case of the `Binomial` where
`wt` is the number of trials and `μ` is the proportion of successes.
The loglikelihood of a fitted model is the sum of these values over all the observations.
"""
function loglik_obs end
loglik_obs(::Bernoulli, y, μ, wt, ϕ) = wt*logpdf(Bernoulli(μ), y)
loglik_obs(::Binomial, y, μ, wt, ϕ) = logpdf(Binomial(Int(wt), μ), _safe_int(y*wt))
loglik_obs(::Gamma, y, μ, wt, ϕ) = wt*logpdf(Gamma(inv(ϕ), μ*ϕ), y)
# In Distributions.jl, a Geometric distribution characterizes the number of failures before
# the first success in a sequence of independent Bernoulli trials with success rate p.
# The mean of Geometric distribution is (1 - p) / p.
# Hence, p = 1 / (1 + μ).
loglik_obs(::Geometric, y, μ, wt, ϕ) = wt * logpdf(Geometric(1 / (μ + 1)), y)
loglik_obs(::InverseGaussian, y, μ, wt, ϕ) = wt*logpdf(InverseGaussian(μ, inv(ϕ)), y)
loglik_obs(::Normal, y, μ, wt, ϕ) = wt*logpdf(Normal(μ, sqrt(ϕ)), y)
loglik_obs(::Poisson, y, μ, wt, ϕ) = wt*logpdf(Poisson(μ), y)
# We use the following parameterization for the Negative Binomial distribution:
# (Γ(θ+y) / (Γ(θ) * y!)) * μ^y * θ^θ / (μ+θ)^{θ+y}
# The parameterization of NegativeBinomial(r=θ, p) in Distributions.jl is
# Γ(θ+y) / (y! * Γ(θ)) * p^θ(1-p)^y
# Hence, p = θ/(μ+θ)
loglik_obs(d::NegativeBinomial, y, μ, wt, ϕ) = wt*logpdf(NegativeBinomial(d.r, d.r/(μ+d.r)), y)
| GLM | https://github.com/JuliaStats/GLM.jl.git |
|
[
"MIT"
] | 1.9.0 | 273bd1cd30768a2fddfa3fd63bbc746ed7249e5f | code | 9702 | """
linpred!(out, p::LinPred, f::Real=1.0)
Overwrite `out` with the linear predictor from `p` with factor `f`
The effective coefficient vector, `p.scratchbeta`, is evaluated as `p.beta0 .+ f * p.delbeta`,
and `out` is updated to `p.X * p.scratchbeta`
"""
function linpred!(out, p::LinPred, f::Real=1.)
mul!(out, p.X, iszero(f) ? p.beta0 : broadcast!(muladd, p.scratchbeta, f, p.delbeta, p.beta0))
end
"""
linpred(p::LinPred, f::Real=1.0)
Return the linear predictor `p.X * (p.beta0 .+ f * p.delbeta)`
"""
linpred(p::LinPred, f::Real=1.) = linpred!(Vector{eltype(p.X)}(undef, size(p.X, 1)), p, f)
"""
installbeta!(p::LinPred, f::Real=1.0)
Install `pbeta0 .+= f * p.delbeta` and zero out `p.delbeta`. Return the updated `p.beta0`.
"""
function installbeta!(p::LinPred, f::Real=1.)
beta0 = p.beta0
delbeta = p.delbeta
@inbounds for i = eachindex(beta0,delbeta)
beta0[i] += delbeta[i]*f
delbeta[i] = 0
end
beta0
end
"""
DensePredQR
A `LinPred` type with a dense, unpivoted QR decomposition of `X`
# Members
- `X`: Model matrix of size `n` × `p` with `n ≥ p`. Should be full column rank.
- `beta0`: base coefficient vector of length `p`
- `delbeta`: increment to coefficient vector, also of length `p`
- `scratchbeta`: scratch vector of length `p`, used in `linpred!` method
- `qr`: a `QRCompactWY` object created from `X`, with optional row weights.
"""
mutable struct DensePredQR{T<:BlasReal} <: DensePred
X::Matrix{T} # model matrix
beta0::Vector{T} # base coefficient vector
delbeta::Vector{T} # coefficient increment
scratchbeta::Vector{T}
qr::QRCompactWY{T}
function DensePredQR{T}(X::Matrix{T}, beta0::Vector{T}) where T
n, p = size(X)
length(beta0) == p || throw(DimensionMismatch("length(β0) ≠ size(X,2)"))
new{T}(X, beta0, zeros(T,p), zeros(T,p), qr(X))
end
function DensePredQR{T}(X::Matrix{T}) where T
n, p = size(X)
new{T}(X, zeros(T, p), zeros(T,p), zeros(T,p), qr(X))
end
end
DensePredQR(X::Matrix, beta0::Vector) = DensePredQR{eltype(X)}(X, beta0)
DensePredQR(X::Matrix{T}) where T = DensePredQR{T}(X, zeros(T, size(X,2)))
convert(::Type{DensePredQR{T}}, X::Matrix{T}) where {T} = DensePredQR{T}(X, zeros(T, size(X, 2)))
"""
delbeta!(p::LinPred, r::Vector)
Evaluate and return `p.delbeta` the increment to the coefficient vector from residual `r`
"""
function delbeta! end
function delbeta!(p::DensePredQR{T}, r::Vector{T}) where T<:BlasReal
p.delbeta = p.qr\r
return p
end
"""
DensePredChol{T}
A `LinPred` type with a dense Cholesky factorization of `X'X`
# Members
- `X`: model matrix of size `n` × `p` with `n ≥ p`. Should be full column rank.
- `beta0`: base coefficient vector of length `p`
- `delbeta`: increment to coefficient vector, also of length `p`
- `scratchbeta`: scratch vector of length `p`, used in `linpred!` method
- `chol`: a `Cholesky` object created from `X'X`, possibly using row weights.
- `scratchm1`: scratch Matrix{T} of the same size as `X`
- `scratchm2`: scratch Matrix{T} os the same size as `X'X`
"""
mutable struct DensePredChol{T<:BlasReal,C} <: DensePred
X::Matrix{T} # model matrix
beta0::Vector{T} # base vector for coefficients
delbeta::Vector{T} # coefficient increment
scratchbeta::Vector{T}
chol::C
scratchm1::Matrix{T}
scratchm2::Matrix{T}
end
function DensePredChol(X::AbstractMatrix, pivot::Bool)
F = Hermitian(float(X'X))
T = eltype(F)
F = pivot ? pivoted_cholesky!(F, tol = -one(T), check = false) : cholesky!(F)
DensePredChol(Matrix{T}(X),
zeros(T, size(X, 2)),
zeros(T, size(X, 2)),
zeros(T, size(X, 2)),
F,
similar(X, T),
similar(cholfactors(F)))
end
cholpred(X::AbstractMatrix, pivot::Bool=false) = DensePredChol(X, pivot)
cholfactors(c::Union{Cholesky,CholeskyPivoted}) = c.factors
cholesky!(p::DensePredChol{T}) where {T<:FP} = p.chol
cholesky(p::DensePredQR{T}) where {T<:FP} = Cholesky{T,typeof(p.X)}(copy(p.qr.R), 'U', 0)
function cholesky(p::DensePredChol{T}) where T<:FP
c = p.chol
Cholesky(copy(cholfactors(c)), c.uplo, c.info)
end
cholesky!(p::DensePredQR{T}) where {T<:FP} = Cholesky{T,typeof(p.X)}(p.qr.R, 'U', 0)
function delbeta!(p::DensePredChol{T,<:Cholesky}, r::Vector{T}) where T<:BlasReal
ldiv!(p.chol, mul!(p.delbeta, transpose(p.X), r))
p
end
function delbeta!(p::DensePredChol{T,<:CholeskyPivoted}, r::Vector{T}) where T<:BlasReal
ch = p.chol
delbeta = mul!(p.delbeta, adjoint(p.X), r)
rnk = rank(ch)
if rnk == length(delbeta)
ldiv!(ch, delbeta)
else
permute!(delbeta, ch.p)
for k=(rnk+1):length(delbeta)
delbeta[k] = -zero(T)
end
LAPACK.potrs!(ch.uplo, view(ch.factors, 1:rnk, 1:rnk), view(delbeta, 1:rnk))
invpermute!(delbeta, ch.p)
end
p
end
function delbeta!(p::DensePredChol{T,<:Cholesky}, r::Vector{T}, wt::Vector{T}) where T<:BlasReal
scr = mul!(p.scratchm1, Diagonal(wt), p.X)
cholesky!(Hermitian(mul!(cholfactors(p.chol), transpose(scr), p.X), :U))
mul!(p.delbeta, transpose(scr), r)
ldiv!(p.chol, p.delbeta)
p
end
function delbeta!(p::DensePredChol{T,<:CholeskyPivoted}, r::Vector{T}, wt::Vector{T}) where T<:BlasReal
piv = p.chol.p # inverse vector
delbeta = p.delbeta
# p.scratchm1 = WX
mul!(p.scratchm1, Diagonal(wt), p.X)
# p.scratchm2 = X'WX
mul!(p.scratchm2, adjoint(p.scratchm1), p.X)
# delbeta = X'Wr
mul!(delbeta, transpose(p.scratchm1), r)
# calculate delbeta = (X'WX)\X'Wr
rnk = rank(p.chol)
if rnk == length(delbeta)
cf = cholfactors(p.chol)
cf .= p.scratchm2[piv, piv]
cholesky!(Hermitian(cf, Symbol(p.chol.uplo)))
ldiv!(p.chol, delbeta)
else
permute!(delbeta, piv)
for k=(rnk+1):length(delbeta)
delbeta[k] = -zero(T)
end
# shift full rank column to 1:rank
cf = cholfactors(p.chol)
cf .= p.scratchm2[piv, piv]
cholesky!(Hermitian(view(cf, 1:rnk, 1:rnk), Symbol(p.chol.uplo)))
ldiv!(Cholesky(view(cf, 1:rnk, 1:rnk), Symbol(p.chol.uplo), p.chol.info),
view(delbeta, 1:rnk))
invpermute!(delbeta, piv)
end
p
end
mutable struct SparsePredChol{T,M<:SparseMatrixCSC,C} <: GLM.LinPred
X::M # model matrix
Xt::M # X'
beta0::Vector{T} # base vector for coefficients
delbeta::Vector{T} # coefficient increment
scratchbeta::Vector{T}
chol::C
scratch::M
end
function SparsePredChol(X::SparseMatrixCSC{T}) where T
chol = cholesky(sparse(I, size(X, 2), size(X,2)))
return SparsePredChol{eltype(X),typeof(X),typeof(chol)}(X,
X',
zeros(T, size(X, 2)),
zeros(T, size(X, 2)),
zeros(T, size(X, 2)),
chol,
similar(X))
end
cholpred(X::SparseMatrixCSC, pivot::Bool=false) = SparsePredChol(X)
function delbeta!(p::SparsePredChol{T}, r::Vector{T}, wt::Vector{T}) where T
scr = mul!(p.scratch, Diagonal(wt), p.X)
XtWX = p.Xt*scr
c = p.chol = cholesky(Symmetric{eltype(XtWX),typeof(XtWX)}(XtWX, 'L'))
p.delbeta = c \ mul!(p.delbeta, adjoint(scr), r)
end
function delbeta!(p::SparsePredChol{T}, r::Vector{T}) where T
scr = p.scratch = p.X
XtWX = p.Xt*scr
c = p.chol = cholesky(Symmetric{eltype(XtWX),typeof(XtWX)}(XtWX, 'L'))
p.delbeta = c \ mul!(p.delbeta, adjoint(scr), r)
end
LinearAlgebra.cholesky(p::SparsePredChol{T}) where {T} = copy(p.chol)
LinearAlgebra.cholesky!(p::SparsePredChol{T}) where {T} = p.chol
invchol(x::DensePred) = inv(cholesky!(x))
function invchol(x::DensePredChol{T,<: CholeskyPivoted}) where T
ch = x.chol
rnk = rank(ch)
p = length(x.delbeta)
rnk == p && return inv(ch)
fac = ch.factors
res = fill(convert(T, NaN), size(fac))
for j in 1:rnk, i in 1:rnk
res[i, j] = fac[i, j]
end
copytri!(LAPACK.potri!(ch.uplo, view(res, 1:rnk, 1:rnk)), ch.uplo, true)
ipiv = invperm(ch.p)
res[ipiv, ipiv]
end
invchol(x::SparsePredChol) = cholesky!(x) \ Matrix{Float64}(I, size(x.X, 2), size(x.X, 2))
vcov(x::LinPredModel) = rmul!(invchol(x.pp), dispersion(x, true))
function cor(x::LinPredModel)
Σ = vcov(x)
invstd = inv.(sqrt.(diag(Σ)))
lmul!(Diagonal(invstd), rmul!(Σ, Diagonal(invstd)))
end
stderror(x::LinPredModel) = sqrt.(diag(vcov(x)))
function show(io::IO, obj::LinPredModel)
println(io, "$(typeof(obj)):\n\nCoefficients:\n", coeftable(obj))
end
modelframe(obj::LinPredModel) = obj.fr
modelmatrix(obj::LinPredModel) = obj.pp.X
response(obj::LinPredModel) = obj.rr.y
fitted(m::LinPredModel) = m.rr.mu
predict(mm::LinPredModel) = fitted(mm)
StatsModels.formula(::LinPredModel) = throw(ArgumentError("model was fitted without a formula"))
residuals(obj::LinPredModel) = residuals(obj.rr)
"""
nobs(obj::LinearModel)
nobs(obj::GLM)
For linear and generalized linear models, returns the number of rows, or,
when prior weights are specified, the sum of weights.
"""
function nobs(obj::LinPredModel)
if isempty(obj.rr.wts)
oftype(sum(one(eltype(obj.rr.wts))), length(obj.rr.y))
else
sum(obj.rr.wts)
end
end
coef(x::LinPred) = x.beta0
coef(obj::LinPredModel) = coef(obj.pp)
dof_residual(obj::LinPredModel) = nobs(obj) - dof(obj) + 1
hasintercept(m::LinPredModel) = any(i -> all(==(1), view(m.pp.X , :, i)), 1:size(m.pp.X, 2))
linpred_rank(x::LinPred) = length(x.beta0)
linpred_rank(x::DensePredChol{<:Any, <:CholeskyPivoted}) = x.chol.rank
| GLM | https://github.com/JuliaStats/GLM.jl.git |
|
[
"MIT"
] | 1.9.0 | 273bd1cd30768a2fddfa3fd63bbc746ed7249e5f | code | 11108 | """
LmResp
Encapsulates the response for a linear model
# Members
- `mu`: current value of the mean response vector or fitted value
- `offset`: optional offset added to the linear predictor to form `mu`
- `wts`: optional vector of prior frequency (a.k.a. case) weights for observations
- `y`: observed response vector
Either or both `offset` and `wts` may be of length 0
"""
mutable struct LmResp{V<:FPVector} <: ModResp # response in a linear model
mu::V # mean response
offset::V # offset added to linear predictor (may have length 0)
wts::V # prior weights (may have length 0)
y::V # response
function LmResp{V}(mu::V, off::V, wts::V, y::V) where V
n = length(y)
length(mu) == n || error("mismatched lengths of mu and y")
ll = length(off)
ll == 0 || ll == n || error("length of offset is $ll, must be $n or 0")
ll = length(wts)
ll == 0 || ll == n || error("length of wts is $ll, must be $n or 0")
new{V}(mu, off, wts, y)
end
end
function LmResp(y::AbstractVector{<:Real}, wts::Union{Nothing,AbstractVector{<:Real}}=nothing)
# Instead of convert(Vector{Float64}, y) to be more ForwardDiff friendly
_y = convert(Vector{float(eltype(y))}, y)
_wts = if wts === nothing
similar(_y, 0)
else
convert(Vector{float(eltype(wts))}, wts)
end
return LmResp{typeof(_y)}(zero(_y), zero(_y), _wts, _y)
end
function updateμ!(r::LmResp{V}, linPr::V) where V<:FPVector
n = length(linPr)
length(r.y) == n || error("length(linPr) is $n, should be $(length(r.y))")
length(r.offset) == 0 ? copyto!(r.mu, linPr) : broadcast!(+, r.mu, linPr, r.offset)
deviance(r)
end
updateμ!(r::LmResp{V}, linPr) where {V<:FPVector} = updateμ!(r, convert(V, vec(linPr)))
function deviance(r::LmResp)
y = r.y
mu = r.mu
wts = r.wts
v = zero(eltype(y)) + zero(eltype(y)) * zero(eltype(wts))
if isempty(wts)
@inbounds @simd for i = eachindex(y,mu)
v += abs2(y[i] - mu[i])
end
else
@inbounds @simd for i = eachindex(y,mu,wts)
v += abs2(y[i] - mu[i])*wts[i]
end
end
v
end
function loglikelihood(r::LmResp)
n = isempty(r.wts) ? length(r.y) : sum(r.wts)
-n/2 * (log(2π * deviance(r)/n) + 1)
end
residuals(r::LmResp) = r.y - r.mu
"""
LinearModel
A combination of a [`LmResp`](@ref) and a [`LinPred`](@ref)
# Members
- `rr`: a `LmResp` object
- `pp`: a `LinPred` object
"""
struct LinearModel{L<:LmResp,T<:LinPred} <: LinPredModel
rr::L
pp::T
end
LinearAlgebra.cholesky(x::LinearModel) = cholesky(x.pp)
function StatsBase.fit!(obj::LinearModel)
if isempty(obj.rr.wts)
delbeta!(obj.pp, obj.rr.y)
else
delbeta!(obj.pp, obj.rr.y, obj.rr.wts)
end
installbeta!(obj.pp)
updateμ!(obj.rr, linpred(obj.pp, zero(eltype(obj.rr.y))))
return obj
end
const FIT_LM_DOC = """
In the first method, `formula` must be a
[StatsModels.jl `Formula` object](https://juliastats.org/StatsModels.jl/stable/formula/)
and `data` a table (in the [Tables.jl](https://tables.juliadata.org/stable/) definition, e.g. a data frame).
In the second method, `X` must be a matrix holding values of the independent variable(s)
in columns (including if appropriate the intercept), and `y` must be a vector holding
values of the dependent variable.
The keyword argument `wts` can be a `Vector` specifying frequency weights for observations.
Such weights are equivalent to repeating each observation a number of times equal
to its weight. Do note that this interpretation gives equal point estimates but
different standard errors from analytical (a.k.a. inverse variance) weights and
from probability (a.k.a. sampling) weights which are the default in some other
software.
`dropcollinear` controls whether or not `lm` accepts a model matrix which
is less-than-full rank. If `true` (the default), only the first of each set of
linearly-dependent columns is used. The coefficient for redundant linearly dependent columns is
`0.0` and all associated statistics are set to `NaN`.
"""
"""
fit(LinearModel, formula, data, allowrankdeficient=false;
[wts::AbstractVector], dropcollinear::Bool=true)
fit(LinearModel, X::AbstractMatrix, y::AbstractVector;
wts::AbstractVector=similar(y, 0), dropcollinear::Bool=true)
Fit a linear model to data.
$FIT_LM_DOC
"""
function fit(::Type{LinearModel}, X::AbstractMatrix{<:Real}, y::AbstractVector{<:Real},
allowrankdeficient_dep::Union{Bool,Nothing}=nothing;
wts::AbstractVector{<:Real}=similar(y, 0),
dropcollinear::Bool=true)
if allowrankdeficient_dep !== nothing
@warn "Positional argument `allowrankdeficient` is deprecated, use keyword " *
"argument `dropcollinear` instead. Proceeding with positional argument value: $allowrankdeficient_dep"
dropcollinear = allowrankdeficient_dep
end
fit!(LinearModel(LmResp(y, wts), cholpred(X, dropcollinear)))
end
"""
lm(formula, data, allowrankdeficient=false;
[wts::AbstractVector], dropcollinear::Bool=true)
lm(X::AbstractMatrix, y::AbstractVector;
wts::AbstractVector=similar(y, 0), dropcollinear::Bool=true)
Fit a linear model to data.
An alias for `fit(LinearModel, X, y; wts=wts, dropcollinear=dropcollinear)`
$FIT_LM_DOC
"""
lm(X, y, allowrankdeficient_dep::Union{Bool,Nothing}=nothing; kwargs...) =
fit(LinearModel, X, y, allowrankdeficient_dep; kwargs...)
dof(x::LinearModel) = linpred_rank(x.pp) + 1
"""
deviance(obj::LinearModel)
For linear models, the deviance is equal to the residual sum of squares (RSS).
"""
deviance(obj::LinearModel) = deviance(obj.rr)
"""
nulldeviance(obj::LinearModel)
For linear models, the deviance of the null model is equal to the total sum of squares (TSS).
"""
function nulldeviance(obj::LinearModel)
y = obj.rr.y
wts = obj.rr.wts
if hasintercept(obj)
if isempty(wts)
m = mean(y)
else
m = mean(y, weights(wts))
end
else
@warn("Starting from GLM.jl 1.8, null model is defined as having no predictor at all " *
"when a model without an intercept is passed.")
m = zero(eltype(y))
end
v = zero(eltype(y))*zero(eltype(wts))
if isempty(wts)
@inbounds @simd for yi in y
v += abs2(yi - m)
end
else
@inbounds @simd for i = eachindex(y,wts)
v += abs2(y[i] - m)*wts[i]
end
end
v
end
loglikelihood(obj::LinearModel) = loglikelihood(obj.rr)
function nullloglikelihood(obj::LinearModel)
r = obj.rr
n = isempty(r.wts) ? length(r.y) : sum(r.wts)
-n/2 * (log(2π * nulldeviance(obj)/n) + 1)
end
r2(obj::LinearModel) = 1 - deviance(obj)/nulldeviance(obj)
adjr2(obj::LinearModel) = 1 - (1 - r²(obj))*(nobs(obj)-hasintercept(obj))/dof_residual(obj)
function dispersion(x::LinearModel, sqr::Bool=false)
dofr = dof_residual(x)
ssqr = deviance(x.rr)/dofr
dofr > 0 || return oftype(ssqr, Inf)
return sqr ? ssqr : sqrt(ssqr)
end
function coeftable(mm::LinearModel; level::Real=0.95)
cc = coef(mm)
dofr = dof_residual(mm)
se = stderror(mm)
tt = cc ./ se
if dofr > 0
p = ccdf.(Ref(FDist(1, dofr)), abs2.(tt))
ci = se*quantile(TDist(dofr), (1-level)/2)
else
p = [isnan(t) ? NaN : 1.0 for t in tt]
ci = [isnan(t) ? NaN : -Inf for t in tt]
end
levstr = isinteger(level*100) ? string(Integer(level*100)) : string(level*100)
CoefTable(hcat(cc,se,tt,p,cc+ci,cc-ci),
["Coef.","Std. Error","t","Pr(>|t|)","Lower $levstr%","Upper $levstr%"],
["x$i" for i = 1:size(mm.pp.X, 2)], 4, 3)
end
"""
predict(mm::LinearModel, newx::AbstractMatrix;
interval::Union{Symbol,Nothing} = nothing, level::Real = 0.95)
If `interval` is `nothing` (the default), return a vector with the predicted values
for model `mm` and new data `newx`.
Otherwise, return a vector with the predicted values, as well as vectors with
the lower and upper confidence bounds for a given `level` (0.95 equates alpha = 0.05).
Valid values of `interval` are `:confidence` delimiting the uncertainty of the
predicted relationship, and `:prediction` delimiting estimated bounds for new data points.
"""
function predict(mm::LinearModel, newx::AbstractMatrix;
interval::Union{Symbol,Nothing}=nothing, level::Real = 0.95)
retmean = newx * coef(mm)
if interval === :confint
Base.depwarn("interval=:confint is deprecated in favor of interval=:confidence", :predict)
interval = :confidence
end
if interval === nothing
return retmean
elseif mm.pp.chol isa CholeskyPivoted &&
mm.pp.chol.rank < size(mm.pp.chol, 2)
throw(ArgumentError("prediction intervals are currently not implemented " *
"when some independent variables have been dropped " *
"from the model due to collinearity"))
end
length(mm.rr.wts) == 0 || error("prediction with confidence intervals not yet implemented for weighted regression")
chol = cholesky!(mm.pp)
# get the R matrix from the QR factorization
if chol isa CholeskyPivoted
ip = invperm(chol.p)
R = chol.U[ip, ip]
else
R = chol.U
end
residvar = ones(size(newx,2)) * deviance(mm)/dof_residual(mm)
if interval == :confidence
retvariance = (newx/R).^2 * residvar
elseif interval == :prediction
retvariance = (newx/R).^2 * residvar .+ deviance(mm)/dof_residual(mm)
else
error("only :confidence and :prediction intervals are defined")
end
retinterval = quantile(TDist(dof_residual(mm)), (1. - level)/2) * sqrt.(retvariance)
(prediction = retmean, lower = retmean .+ retinterval, upper = retmean .- retinterval)
end
function confint(obj::LinearModel; level::Real=0.95)
hcat(coef(obj),coef(obj)) + stderror(obj) *
quantile(TDist(dof_residual(obj)), (1. - level)/2.) * [1. -1.]
end
"""
cooksdistance(obj::LinearModel)
Compute [Cook's distance](https://en.wikipedia.org/wiki/Cook%27s_distance)
for each observation in linear model `obj`, giving an estimate of the influence
of each data point.
Currently only implemented for linear models without weights.
"""
function StatsBase.cooksdistance(obj::LinearModel)
u = residuals(obj)
mse = dispersion(obj,true)
k = dof(obj)-1
d_res = dof_residual(obj)
X = modelmatrix(obj)
XtX = crossmodelmatrix(obj)
k == size(X,2) || throw(ArgumentError("Models with collinear terms are not currently supported."))
wts = obj.rr.wts
if isempty(wts)
hii = diag(X * inv(XtX) * X')
else
throw(ArgumentError("Weighted models are not currently supported."))
end
D = @. u^2 * (hii / (1 - hii)^2) / (k*mse)
return D
end
| GLM | https://github.com/JuliaStats/GLM.jl.git |
|
[
"MIT"
] | 1.9.0 | 273bd1cd30768a2fddfa3fd63bbc746ed7249e5f | code | 5568 | function mle_for_θ(y::AbstractVector, μ::AbstractVector, wts::AbstractVector;
maxiter=30, tol=1.e-6)
function first_derivative(θ::Real)
tmp(yi, μi) = (yi+θ)/(μi+θ) + log(μi+θ) - 1 - log(θ) - digamma(θ+yi) + digamma(θ)
unit_weights ? sum(tmp(yi, μi) for (yi, μi) in zip(y, μ)) :
sum(wti * tmp(yi, μi) for (wti, yi, μi) in zip(wts, y, μ))
end
function second_derivative(θ::Real)
tmp(yi, μi) = -(yi+θ)/(μi+θ)^2 + 2/(μi+θ) - 1/θ - trigamma(θ+yi) + trigamma(θ)
unit_weights ? sum(tmp(yi, μi) for (yi, μi) in zip(y, μ)) :
sum(wti * tmp(yi, μi) for (wti, yi, μi) in zip(wts, y, μ))
end
unit_weights = length(wts) == 0
if unit_weights
n = length(y)
θ = n / sum((yi/μi - 1)^2 for (yi, μi) in zip(y, μ))
else
n = sum(wts)
θ = n / sum(wti * (yi/μi - 1)^2 for (wti, yi, μi) in zip(wts, y, μ))
end
δ, converged = one(θ), false
for t = 1:maxiter
θ = abs(θ)
δ = first_derivative(θ) / second_derivative(θ)
if abs(δ) <= tol
converged = true
break
end
θ = θ - δ
end
if !converged
info_msg = "Estimating dispersion parameter failed, which may " *
"indicate Poisson distributed data."
throw(ConvergenceException(maxiter, NaN, NaN, info_msg))
end
θ
end
"""
negbin(formula, data, [link::Link];
<keyword arguments>)
negbin(X::AbstractMatrix, y::AbstractVector, [link::Link];
<keyword arguments>)
Fit a negative binomial generalized linear model to data, while simultaneously
estimating the shape parameter θ. Extra arguments and keyword arguments will be
passed to [`glm`](@ref).
In the first method, `formula` must be a
[StatsModels.jl `Formula` object](https://juliastats.org/StatsModels.jl/stable/formula/)
and `data` a table (in the [Tables.jl](https://tables.juliadata.org/stable/) definition, e.g. a data frame).
In the second method, `X` must be a matrix holding values of the independent variable(s)
in columns (including if appropriate the intercept), and `y` must be a vector holding
values of the dependent variable.
In both cases, `link` may specify the link function
(if omitted, it is taken to be `NegativeBinomial(θ)`).
# Keyword Arguments
- `initialθ::Real=Inf`: Starting value for shape parameter θ. If it is `Inf`
then the initial value will be estimated by fitting a Poisson distribution.
- `maxiter::Integer=30`: See `maxiter` for [`glm`](@ref)
- `atol::Real=1.0e-6`: See `atol` for [`glm`](@ref)
- `rtol::Real=1.0e-6`: See `rtol` for [`glm`](@ref)
- `verbose::Bool=false`: See `verbose` for [`glm`](@ref)
"""
function negbin(F,
D,
args...;
initialθ::Real=Inf,
maxiter::Integer=30,
minstepfac::Real=0.001,
atol::Real=1e-6,
rtol::Real=1.e-6,
verbose::Bool=false,
kwargs...)
if haskey(kwargs, :maxIter)
Base.depwarn("'maxIter' argument is deprecated, use 'maxiter' instead", :negbin)
maxiter = kwargs[:maxIter]
end
if haskey(kwargs, :minStepFac)
Base.depwarn("'minStepFac' argument is deprecated, use 'minstepfac' instead", :negbin)
minstepfac = kwargs[:minStepFac]
end
if haskey(kwargs, :convTol)
Base.depwarn("`convTol` argument is deprecated, use `atol` and `rtol` instead", :negbin)
rtol = kwargs[:convTol]
end
if !issubset(keys(kwargs), (:maxIter, :minStepFac, :convTol))
throw(ArgumentError("unsupported keyword argument"))
end
if haskey(kwargs, :tol)
Base.depwarn("`tol` argument is deprecated, use `atol` and `rtol` instead", :negbin)
rtol = kwargs[:tol]
end
maxiter >= 1 || throw(ArgumentError("maxiter must be positive"))
atol > 0 || throw(ArgumentError("atol must be positive"))
rtol > 0 || throw(ArgumentError("rtol must be positive"))
initialθ > 0 || throw(ArgumentError("initialθ must be positive"))
# fit a Poisson regression model if the user does not specify an initial θ
if isinf(initialθ)
regmodel = glm(F, D, Poisson(), args...;
maxiter=maxiter, atol=atol, rtol=rtol, verbose=verbose, kwargs...)
else
regmodel = glm(F, D, NegativeBinomial(initialθ), args...;
maxiter=maxiter, atol=atol, rtol=rtol, verbose=verbose, kwargs...)
end
μ = regmodel.model.rr.mu
y = regmodel.model.rr.y
wts = regmodel.model.rr.wts
lw, ly = length(wts), length(y)
if lw != ly && lw != 0
throw(ArgumentError("length of wts must be either $ly or 0 but was $lw"))
end
θ = mle_for_θ(y, μ, wts; maxiter=maxiter, tol=rtol)
d = sqrt(2 * max(1, deviance(regmodel)))
δ = one(θ)
ll = loglikelihood(regmodel)
ll0 = ll + 2 * d
converged = false
for i = 1:maxiter
if abs(ll0 - ll)/d + abs(δ) <= rtol
converged = true
break
end
verbose && println("[ Alternating iteration ", i, ", θ = ", θ, " ]")
regmodel = glm(F, D, NegativeBinomial(θ), args...;
maxiter=maxiter, atol=atol, rtol=rtol, verbose=verbose, kwargs...)
μ = regmodel.model.rr.mu
prevθ = θ
θ = mle_for_θ(y, μ, wts; maxiter=maxiter, tol=rtol)
δ = prevθ - θ
ll0 = ll
ll = loglikelihood(regmodel)
end
converged || throw(ConvergenceException(maxiter))
regmodel
end
| GLM | https://github.com/JuliaStats/GLM.jl.git |
|
[
"MIT"
] | 1.9.0 | 273bd1cd30768a2fddfa3fd63bbc746ed7249e5f | code | 75615 | using CategoricalArrays, CSV, DataFrames, LinearAlgebra, SparseArrays, StableRNGs,
Statistics, StatsBase, Test, RDatasets
using GLM
using StatsFuns: logistic
using Distributions: TDist
test_show(x) = show(IOBuffer(), x)
const glm_datadir = joinpath(dirname(@__FILE__), "..", "data")
## Formaldehyde data from the R Datasets package
form = DataFrame([[0.1,0.3,0.5,0.6,0.7,0.9],[0.086,0.269,0.446,0.538,0.626,0.782]],
[:Carb, :OptDen])
function simplemm(x::AbstractVecOrMat)
n = size(x, 2)
mat = fill(one(float(eltype(x))), length(x), n + 1)
copyto!(view(mat, :, 2:(n + 1)), x)
mat
end
linreg(x::AbstractVecOrMat, y::AbstractVector) = qr!(simplemm(x)) \ y
@testset "lm" begin
lm1 = fit(LinearModel, @formula(OptDen ~ Carb), form)
test_show(lm1)
@test isapprox(coef(lm1), linreg(form.Carb, form.OptDen))
Σ = [6.136653061224592e-05 -9.464489795918525e-05
-9.464489795918525e-05 1.831836734693908e-04]
@test isapprox(vcov(lm1), Σ)
@test isapprox(cor(lm1.model), Diagonal(diag(Σ))^(-1/2)*Σ*Diagonal(diag(Σ))^(-1/2))
@test dof(lm1) == 3
@test isapprox(deviance(lm1), 0.0002992000000000012)
@test isapprox(loglikelihood(lm1), 21.204842144047973)
@test isapprox(nulldeviance(lm1), 0.3138488333333334)
@test isapprox(nullloglikelihood(lm1), 0.33817870295676444)
@test r²(lm1) == r2(lm1)
@test isapprox(r²(lm1), 0.9990466748057584)
@test adjr²(lm1) == adjr2(lm1)
@test isapprox(adjr²(lm1), 0.998808343507198)
@test isapprox(aic(lm1), -36.409684288095946)
@test isapprox(aicc(lm1), -24.409684288095946)
@test isapprox(bic(lm1), -37.03440588041178)
lm2 = fit(LinearModel, hcat(ones(6), 10form.Carb), form.OptDen, true)
@test isa(lm2.pp.chol, CholeskyPivoted)
@test lm2.pp.chol.piv == [2, 1]
@test isapprox(coef(lm1), coef(lm2) .* [1., 10.])
lm3 = lm(@formula(y~x), (y=1:25, x=repeat(1:5, 5)), contrasts=Dict(:x=>DummyCoding()))
lm4 = lm(@formula(y~x), (y=1:25, x=categorical(repeat(1:5, 5))))
@test coef(lm3) == coef(lm4) ≈ [11, 1, 2, 3, 4]
end
@testset "Linear Model Cook's Distance" begin
st_df = DataFrame(
Y=[6.4, 7.4, 10.4, 15.1, 12.3 , 11.4],
XA=[1.5, 6.5, 11.5, 19.9, 17.0, 15.5],
XB=[1.8, 7.8, 11.8, 20.5, 17.3, 15.8],
XC=[3., 13., 23., 39.8, 34., 31.],
# values from SAS proc reg
CooksD_base=[1.4068501943, 0.176809102, 0.0026655177, 1.0704009915, 0.0875726457, 0.1331183932],
CooksD_noint=[0.0076891801, 0.0302993877, 0.0410262965, 0.0294348488, 0.0691589296, 0.0273045538],
CooksD_multi=[1.7122291956, 18.983407026, 0.000118078, 0.8470797843, 0.0715921999, 0.1105843157],
)
# linear regression
t_lm_base = lm(@formula(Y ~ XA), st_df)
@test isapprox(st_df.CooksD_base, cooksdistance(t_lm_base))
# linear regression, no intercept
t_lm_noint = lm(@formula(Y ~ XA +0), st_df)
@test isapprox(st_df.CooksD_noint, cooksdistance(t_lm_noint))
# linear regression, two collinear variables (Variance inflation factor ≊ 250)
t_lm_multi = lm(@formula(Y ~ XA + XB), st_df)
@test isapprox(st_df.CooksD_multi, cooksdistance(t_lm_multi))
# linear regression, two full collinear variables (XC = 2 XA) hence should get the same results as the original
# after pivoting
t_lm_colli = lm(@formula(Y ~ XA + XC), st_df, dropcollinear=true)
# Currently fails as the collinear variable is not dropped from `modelmatrix(obj)`
@test_throws ArgumentError isapprox(st_df.CooksD_base, cooksdistance(t_lm_colli))
end
@testset "linear model with weights" begin
df = dataset("quantreg", "engel")
N = nrow(df)
df.weights = repeat(1:5, Int(N/5))
f = @formula(FoodExp ~ Income)
lm_model = lm(f, df, wts = df.weights)
glm_model = glm(f, df, Normal(), wts = df.weights)
@test isapprox(coef(lm_model), [154.35104595140706, 0.4836896390157505])
@test isapprox(coef(glm_model), [154.35104595140706, 0.4836896390157505])
@test isapprox(stderror(lm_model), [9.382302620120193, 0.00816741377772968])
@test isapprox(r2(lm_model), 0.8330258148644486)
@test isapprox(adjr2(lm_model), 0.832788298242634)
@test isapprox(vcov(lm_model), [88.02760245551447 -0.06772589439264813;
-0.06772589439264813 6.670664781664879e-5])
@test isapprox(first(predict(lm_model)), 357.57694841780994)
@test isapprox(loglikelihood(lm_model), -4353.946729075838)
@test isapprox(loglikelihood(glm_model), -4353.946729075838)
@test isapprox(nullloglikelihood(lm_model), -4984.892139711452)
@test isapprox(mean(residuals(lm_model)), -5.412966629787718)
end
@testset "rankdeficient" begin
rng = StableRNG(1234321)
# an example of rank deficiency caused by a missing cell in a table
dfrm = DataFrame([categorical(repeat(string.('A':'D'), inner = 6)),
categorical(repeat(string.('a':'c'), inner = 2, outer = 4))],
[:G, :H])
f = @formula(0 ~ 1 + G*H)
X = ModelMatrix(ModelFrame(f, dfrm)).m
y = X * (1:size(X, 2)) + 0.1 * randn(rng, size(X, 1))
inds = deleteat!(collect(1:length(y)), 7:8)
m1 = fit(LinearModel, X, y)
@test isapprox(deviance(m1), 0.12160301538297297)
Xmissingcell = X[inds, :]
ymissingcell = y[inds]
@test_throws PosDefException m2 = fit(LinearModel, Xmissingcell, ymissingcell; dropcollinear=false)
m2p = fit(LinearModel, Xmissingcell, ymissingcell)
@test isa(m2p.pp.chol, CholeskyPivoted)
@test rank(m2p.pp.chol) == 11
@test isapprox(deviance(m2p), 0.1215758392280204)
@test isapprox(coef(m2p), [0.9772643585228885, 8.903341608496437, 3.027347397503281,
3.9661379199401257, 5.079410103608552, 6.1944618141188625, 0.0, 7.930328728005131,
8.879994918604757, 2.986388408421915, 10.84972230524356, 11.844809275711485])
@test all(isnan, hcat(coeftable(m2p).cols[2:end]...)[7,:])
m2p_dep_pos = fit(LinearModel, Xmissingcell, ymissingcell, true)
@test_logs (:warn, "Positional argument `allowrankdeficient` is deprecated, use keyword " *
"argument `dropcollinear` instead. Proceeding with positional argument value: true") fit(LinearModel, Xmissingcell, ymissingcell, true)
@test isa(m2p_dep_pos.pp.chol, CholeskyPivoted)
@test rank(m2p_dep_pos.pp.chol) == rank(m2p.pp.chol)
@test isapprox(deviance(m2p_dep_pos), deviance(m2p))
@test isapprox(coef(m2p_dep_pos), coef(m2p))
m2p_dep_pos_kw = fit(LinearModel, Xmissingcell, ymissingcell, true; dropcollinear = false)
@test isa(m2p_dep_pos_kw.pp.chol, CholeskyPivoted)
@test rank(m2p_dep_pos_kw.pp.chol) == rank(m2p.pp.chol)
@test isapprox(deviance(m2p_dep_pos_kw), deviance(m2p))
@test isapprox(coef(m2p_dep_pos_kw), coef(m2p))
end
@testset "saturated linear model" begin
df = DataFrame(x=["a", "b", "c"], y=[1, 2, 3])
model = lm(@formula(y ~ x), df)
ct = coeftable(model)
@test dof_residual(model) == 0
@test dof(model) == 4
@test isinf(GLM.dispersion(model.model))
@test coef(model) ≈ [1, 1, 2]
@test isequal(hcat(ct.cols[2:end]...),
[Inf 0.0 1.0 -Inf Inf
Inf 0.0 1.0 -Inf Inf
Inf 0.0 1.0 -Inf Inf])
model = lm(@formula(y ~ 0 + x), df)
ct = coeftable(model)
@test dof_residual(model) == 0
@test dof(model) == 4
@test isinf(GLM.dispersion(model.model))
@test coef(model) ≈ [1, 2, 3]
@test isequal(hcat(ct.cols[2:end]...),
[Inf 0.0 1.0 -Inf Inf
Inf 0.0 1.0 -Inf Inf
Inf 0.0 1.0 -Inf Inf])
model = glm(@formula(y ~ x), df, Normal(), IdentityLink())
ct = coeftable(model)
@test dof_residual(model) == 0
@test dof(model) == 4
@test isinf(GLM.dispersion(model.model))
@test coef(model) ≈ [1, 1, 2]
@test isequal(hcat(ct.cols[2:end]...),
[Inf 0.0 1.0 -Inf Inf
Inf 0.0 1.0 -Inf Inf
Inf 0.0 1.0 -Inf Inf])
model = glm(@formula(y ~ 0 + x), df, Normal(), IdentityLink())
ct = coeftable(model)
@test dof_residual(model) == 0
@test dof(model) == 4
@test isinf(GLM.dispersion(model.model))
@test coef(model) ≈ [1, 2, 3]
@test isequal(hcat(ct.cols[2:end]...),
[Inf 0.0 1.0 -Inf Inf
Inf 0.0 1.0 -Inf Inf
Inf 0.0 1.0 -Inf Inf])
# Saturated and rank-deficient model
df = DataFrame(x1=["a", "b", "c"], x2=["a", "b", "c"], y=[1, 2, 3])
for model in (lm(@formula(y ~ x1 + x2), df),
glm(@formula(y ~ x1 + x2), df, Normal(), IdentityLink()))
ct = coeftable(model)
@test dof_residual(model) == 0
@test dof(model) == 4
@test isinf(GLM.dispersion(model.model))
@test coef(model) ≈ [1, 1, 2, 0, 0]
@test isequal(hcat(ct.cols[2:end]...),
[Inf 0.0 1.0 -Inf Inf
Inf 0.0 1.0 -Inf Inf
Inf 0.0 1.0 -Inf Inf
NaN NaN NaN NaN NaN
NaN NaN NaN NaN NaN])
end
end
@testset "Linear model with no intercept" begin
@testset "Test with NoInt1 Dataset" begin
# test case to test r2 for no intercept model
# https://www.itl.nist.gov/div898/strd/lls/data/LINKS/DATA/NoInt1.dat
data = DataFrame(x = 60:70, y = 130:140)
mdl = lm(@formula(y ~ 0 + x), data)
@test coef(mdl) ≈ [2.07438016528926]
@test stderror(mdl) ≈ [0.165289256198347E-01]
@test GLM.dispersion(mdl.model) ≈ 3.56753034006338
@test dof(mdl) == 2
@test dof_residual(mdl) == 10
@test r2(mdl) ≈ 0.999365492298663
@test adjr2(mdl) ≈ 0.9993020415285
@test nulldeviance(mdl) ≈ 200585.00000000000
@test deviance(mdl) ≈ 127.2727272727272
@test aic(mdl) ≈ 62.149454400575
@test loglikelihood(mdl) ≈ -29.07472720028775
@test nullloglikelihood(mdl) ≈ -69.56936343308669
@test predict(mdl) ≈ [124.4628099173554, 126.5371900826446, 128.6115702479339,
130.6859504132231, 132.7603305785124, 134.8347107438017,
136.9090909090909, 138.9834710743802, 141.0578512396694,
143.1322314049587, 145.2066115702479]
end
@testset "Test with NoInt2 Dataset" begin
# test case to test r2 for no intercept model
# https://www.itl.nist.gov/div898/strd/lls/data/LINKS/DATA/NoInt2.dat
data = DataFrame(x = [4, 5, 6], y = [3, 4, 4])
mdl = lm(@formula(y ~ 0 + x), data)
@test coef(mdl) ≈ [0.727272727272727]
@test stderror(mdl) ≈ [0.420827318078432E-01]
@test GLM.dispersion(mdl.model) ≈ 0.369274472937998
@test dof(mdl) == 2
@test dof_residual(mdl) == 2
@test r2(mdl) ≈ 0.993348115299335
@test adjr2(mdl) ≈ 0.990022172949
@test nulldeviance(mdl) ≈ 41.00000000000000
@test deviance(mdl) ≈ 0.27272727272727
@test aic(mdl) ≈ 5.3199453808329
@test loglikelihood(mdl) ≈ -0.6599726904164597
@test nullloglikelihood(mdl) ≈ -8.179255266668315
@test predict(mdl) ≈ [2.909090909090908, 3.636363636363635, 4.363636363636362]
end
@testset "Test with without formula" begin
X = [4 5 6]'
y = [3, 4, 4]
data = DataFrame(x = [4, 5, 6], y = [3, 4, 4])
mdl1 = lm(@formula(y ~ 0 + x), data)
mdl2 = lm(X, y)
@test coef(mdl1) ≈ coef(mdl2)
@test stderror(mdl1) ≈ stderror(mdl2)
@test GLM.dispersion(mdl1.model) ≈ GLM.dispersion(mdl2)
@test dof(mdl1) ≈ dof(mdl2)
@test dof_residual(mdl1) ≈ dof_residual(mdl2)
@test r2(mdl1) ≈ r2(mdl2)
@test adjr2(mdl1) ≈ adjr2(mdl2)
@test nulldeviance(mdl1) ≈ nulldeviance(mdl2)
@test deviance(mdl1) ≈ deviance(mdl2)
@test aic(mdl1) ≈ aic(mdl2)
@test loglikelihood(mdl1) ≈ loglikelihood(mdl2)
@test nullloglikelihood(mdl1) ≈ nullloglikelihood(mdl2)
@test predict(mdl1) ≈ predict(mdl2)
end
end
dobson = DataFrame(Counts = [18.,17,15,20,10,20,25,13,12],
Outcome = categorical(repeat(string.('A':'C'), outer = 3)),
Treatment = categorical(repeat(string.('a':'c'), inner = 3)))
@testset "Poisson GLM" begin
gm1 = fit(GeneralizedLinearModel, @formula(Counts ~ 1 + Outcome + Treatment),
dobson, Poisson())
@test GLM.cancancel(gm1.model.rr)
test_show(gm1)
@test dof(gm1) == 5
@test isapprox(deviance(gm1), 5.12914107700115, rtol = 1e-7)
@test isapprox(nulldeviance(gm1), 10.581445863750867, rtol = 1e-7)
@test isapprox(loglikelihood(gm1), -23.380659200978837, rtol = 1e-7)
@test isapprox(nullloglikelihood(gm1), -26.10681159435372, rtol = 1e-7)
@test isapprox(aic(gm1), 56.76131840195767)
@test isapprox(aicc(gm1), 76.76131840195768)
@test isapprox(bic(gm1), 57.74744128863877)
@test isapprox(coef(gm1)[1:3],
[3.044522437723423,-0.45425527227759555,-0.29298712468147375])
end
## Example from http://www.ats.ucla.edu/stat/r/dae/logit.htm
admit = CSV.read(joinpath(glm_datadir, "admit.csv"), DataFrame)
admit.rank = categorical(admit.rank)
@testset "$distr with LogitLink" for distr in (Binomial, Bernoulli)
gm2 = fit(GeneralizedLinearModel, @formula(admit ~ 1 + gre + gpa + rank), admit, distr())
@test GLM.cancancel(gm2.model.rr)
test_show(gm2)
@test dof(gm2) == 6
@test deviance(gm2) ≈ 458.5174924758994
@test nulldeviance(gm2) ≈ 499.9765175549154
@test loglikelihood(gm2) ≈ -229.25874623794968
@test nullloglikelihood(gm2) ≈ -249.9882587774585
@test isapprox(aic(gm2), 470.51749247589936)
@test isapprox(aicc(gm2), 470.7312329339146)
@test isapprox(bic(gm2), 494.4662797585473)
@test isapprox(coef(gm2),
[-3.9899786606380756, 0.0022644256521549004, 0.804037453515578,
-0.6754428594116578, -1.340203811748108, -1.5514636444657495])
end
@testset "Bernoulli ProbitLink" begin
gm3 = fit(GeneralizedLinearModel, @formula(admit ~ 1 + gre + gpa + rank), admit,
Binomial(), ProbitLink())
test_show(gm3)
@test !GLM.cancancel(gm3.model.rr)
@test dof(gm3) == 6
@test isapprox(deviance(gm3), 458.4131713833386)
@test isapprox(nulldeviance(gm3), 499.9765175549236)
@test isapprox(loglikelihood(gm3), -229.20658569166932)
@test isapprox(nullloglikelihood(gm3), -249.9882587774585)
@test isapprox(aic(gm3), 470.41317138333864)
@test isapprox(aicc(gm3), 470.6269118413539)
@test isapprox(bic(gm3), 494.36195866598655)
@test isapprox(coef(gm3),
[-2.3867922998680777, 0.0013755394922972401, 0.47772908362646926,
-0.4154125854823675, -0.8121458010130354, -0.9359047862425297])
end
@testset "Bernoulli CauchitLink" begin
gm4 = fit(GeneralizedLinearModel, @formula(admit ~ gre + gpa + rank), admit,
Binomial(), CauchitLink())
@test !GLM.cancancel(gm4.model.rr)
test_show(gm4)
@test dof(gm4) == 6
@test isapprox(deviance(gm4), 459.3401112751141)
@test isapprox(nulldeviance(gm4), 499.9765175549311)
@test isapprox(loglikelihood(gm4), -229.6700556375571)
@test isapprox(nullloglikelihood(gm4), -249.9882587774585)
@test isapprox(aic(gm4), 471.3401112751142)
@test isapprox(aicc(gm4), 471.5538517331295)
@test isapprox(bic(gm4), 495.28889855776214)
end
@testset "Bernoulli CloglogLink" begin
gm5 = fit(GeneralizedLinearModel, @formula(admit ~ gre + gpa + rank), admit,
Binomial(), CloglogLink())
@test !GLM.cancancel(gm5.model.rr)
test_show(gm5)
@test dof(gm5) == 6
@test isapprox(deviance(gm5), 458.89439629612616)
@test isapprox(nulldeviance(gm5), 499.97651755491677)
@test isapprox(loglikelihood(gm5), -229.44719814806314)
@test isapprox(nullloglikelihood(gm5), -249.9882587774585)
@test isapprox(aic(gm5), 470.8943962961263)
@test isapprox(aicc(gm5), 471.1081367541415)
@test isapprox(bic(gm5), 494.8431835787742)
# When data are almost separated, the calculations are prone to underflow which can cause
# NaN in wrkwt and/or wrkres. The example here used to fail but works with the "clamping"
# introduced in #187
@testset "separated data" begin
n = 100
rng = StableRNG(127)
X = [ones(n) randn(rng, n)]
y = logistic.(X*ones(2) + 1/10*randn(rng, n)) .> 1/2
@test coeftable(glm(X, y, Binomial(), CloglogLink())).cols[4][2] < 0.05
end
end
## Example with offsets from Venables & Ripley (2002, p.189)
anorexia = CSV.read(joinpath(glm_datadir, "anorexia.csv"), DataFrame)
@testset "Normal offset" begin
gm6 = fit(GeneralizedLinearModel, @formula(Postwt ~ 1 + Prewt + Treat), anorexia,
Normal(), IdentityLink(), offset=Array{Float64}(anorexia.Prewt))
@test GLM.cancancel(gm6.model.rr)
test_show(gm6)
@test dof(gm6) == 5
@test isapprox(deviance(gm6), 3311.262619919613)
@test isapprox(nulldeviance(gm6), 4525.386111111112)
@test isapprox(loglikelihood(gm6), -239.9866487711122)
@test isapprox(nullloglikelihood(gm6), -251.2320886191385)
@test isapprox(aic(gm6), 489.9732975422244)
@test isapprox(aicc(gm6), 490.8823884513153)
@test isapprox(bic(gm6), 501.35662813730465)
@test isapprox(coef(gm6),
[49.7711090, -0.5655388, -4.0970655, 4.5630627])
@test isapprox(GLM.dispersion(gm6.model, true), 48.6950385282296)
@test isapprox(stderror(gm6),
[13.3909581, 0.1611824, 1.8934926, 2.1333359])
end
@testset "Normal LogLink offset" begin
gm7 = fit(GeneralizedLinearModel, @formula(Postwt ~ 1 + Prewt + Treat), anorexia,
Normal(), LogLink(), offset=anorexia.Prewt, rtol=1e-8)
@test !GLM.cancancel(gm7.model.rr)
test_show(gm7)
@test isapprox(deviance(gm7), 3265.207242977156)
@test isapprox(nulldeviance(gm7), 507625.1718547432)
@test isapprox(loglikelihood(gm7), -239.48242060326643)
@test isapprox(nullloglikelihood(gm7), -421.1535438334255)
@test isapprox(coef(gm7),
[3.99232679, -0.99445269, -0.05069826, 0.05149403])
@test isapprox(GLM.dispersion(gm7.model, true), 48.017753573192266)
@test isapprox(stderror(gm7),
[0.157167944, 0.001886286, 0.022584069, 0.023882826],
atol=1e-6)
end
@testset "Poisson LogLink offset" begin
gm7p = fit(GeneralizedLinearModel, @formula(round(Postwt) ~ 1 + Prewt + Treat), anorexia,
Poisson(), LogLink(), offset=log.(anorexia.Prewt), rtol=1e-8)
@test GLM.cancancel(gm7p.model.rr)
test_show(gm7p)
@test deviance(gm7p) ≈ 39.686114742427705
@test nulldeviance(gm7p) ≈ 54.749010639715294
@test loglikelihood(gm7p) ≈ -245.92639857546905
@test nullloglikelihood(gm7p) ≈ -253.4578465241127
@test coef(gm7p) ≈
[0.61587278, -0.00700535, -0.048518903, 0.05331228]
@test stderror(gm7p) ≈
[0.2091138392, 0.0025136984, 0.0297381842, 0.0324618795]
end
@testset "Poisson LogLink offset with weights" begin
gm7pw = fit(GeneralizedLinearModel, @formula(round(Postwt) ~ 1 + Prewt + Treat), anorexia,
Poisson(), LogLink(), offset=log.(anorexia.Prewt),
wts=repeat(1:4, outer=18), rtol=1e-8)
@test GLM.cancancel(gm7pw.model.rr)
test_show(gm7pw)
@test deviance(gm7pw) ≈ 90.17048668870225
@test nulldeviance(gm7pw) ≈ 139.63782826574652
@test loglikelihood(gm7pw) ≈ -610.3058020030296
@test nullloglikelihood(gm7pw) ≈ -635.0394727915523
@test coef(gm7pw) ≈
[0.6038154675, -0.0070083965, -0.038390455, 0.0893445315]
@test stderror(gm7pw) ≈
[0.1318509718, 0.0015910084, 0.0190289059, 0.0202335849]
end
## Gamma example from McCullagh & Nelder (1989, pp. 300-2)
clotting = DataFrame(u = log.([5,10,15,20,30,40,60,80,100]),
lot1 = [118,58,42,35,27,25,21,19,18])
@testset "Gamma" begin
gm8 = fit(GeneralizedLinearModel, @formula(lot1 ~ 1 + u), clotting, Gamma())
@test !GLM.cancancel(gm8.model.rr)
@test isa(GLM.Link(gm8.model), InverseLink)
test_show(gm8)
@test dof(gm8) == 3
@test isapprox(deviance(gm8), 0.016729715178484157)
@test isapprox(nulldeviance(gm8), 3.5128262638285594)
@test isapprox(loglikelihood(gm8), -15.994961974777247)
@test isapprox(nullloglikelihood(gm8), -40.34632899455258)
@test isapprox(aic(gm8), 37.989923949554495)
@test isapprox(aicc(gm8), 42.78992394955449)
@test isapprox(bic(gm8), 38.58159768156315)
@test isapprox(coef(gm8), [-0.01655438172784895,0.01534311491072141])
@test isapprox(GLM.dispersion(gm8.model, true), 0.002446059333495581, atol=1e-6)
@test isapprox(stderror(gm8), [0.00092754223, 0.000414957683], atol=1e-6)
end
@testset "InverseGaussian" begin
gm8a = fit(GeneralizedLinearModel, @formula(lot1 ~ 1 + u), clotting, InverseGaussian())
@test !GLM.cancancel(gm8a.model.rr)
@test isa(GLM.Link(gm8a.model), InverseSquareLink)
test_show(gm8a)
@test dof(gm8a) == 3
@test isapprox(deviance(gm8a), 0.006931128347234519)
@test isapprox(nulldeviance(gm8a), 0.08779963125372384)
@test isapprox(loglikelihood(gm8a), -27.787426008849867)
@test isapprox(nullloglikelihood(gm8a), -39.213082069623105)
@test isapprox(aic(gm8a), 61.57485201769973)
@test isapprox(aicc(gm8a), 66.37485201769974)
@test isapprox(bic(gm8a), 62.16652574970839)
@test isapprox(coef(gm8a), [-0.0011079770504295668,0.0007219138982289362])
@test isapprox(GLM.dispersion(gm8a.model, true), 0.0011008719709455776, atol=1e-6)
@test isapprox(stderror(gm8a), [0.0001675339726910311,9.468485015919463e-5], atol=1e-6)
end
@testset "Gamma LogLink" begin
gm9 = fit(GeneralizedLinearModel, @formula(lot1 ~ 1 + u), clotting, Gamma(), LogLink(),
rtol=1e-8, atol=0.0)
@test !GLM.cancancel(gm9.model.rr)
test_show(gm9)
@test dof(gm9) == 3
@test deviance(gm9) ≈ 0.16260829451739
@test nulldeviance(gm9) ≈ 3.512826263828517
@test loglikelihood(gm9) ≈ -26.24082810384911
@test nullloglikelihood(gm9) ≈ -40.34632899455252
@test aic(gm9) ≈ 58.48165620769822
@test aicc(gm9) ≈ 63.28165620769822
@test bic(gm9) ≈ 59.07332993970688
@test coef(gm9) ≈ [5.50322528458221, -0.60191617825971]
@test GLM.dispersion(gm9.model, true) ≈ 0.02435442293561081
@test stderror(gm9) ≈ [0.19030107482720, 0.05530784660144]
end
@testset "Gamma IdentityLink" begin
gm10 = fit(GeneralizedLinearModel, @formula(lot1 ~ 1 + u), clotting, Gamma(), IdentityLink(),
rtol=1e-8, atol=0.0)
@test !GLM.cancancel(gm10.model.rr)
test_show(gm10)
@test dof(gm10) == 3
@test isapprox(deviance(gm10), 0.60845414895344)
@test isapprox(nulldeviance(gm10), 3.512826263828517)
@test isapprox(loglikelihood(gm10), -32.216072437284176)
@test isapprox(nullloglikelihood(gm10), -40.346328994552515)
@test isapprox(aic(gm10), 70.43214487456835)
@test isapprox(aicc(gm10), 75.23214487456835)
@test isapprox(bic(gm10), 71.02381860657701)
@test isapprox(coef(gm10), [99.250446880986, -18.374324929002])
@test isapprox(GLM.dispersion(gm10.model, true), 0.10417373, atol=1e-6)
@test isapprox(stderror(gm10), [17.864084, 4.297895], atol=1e-4)
end
# Logistic regression using aggregated data and weights
admit_agr = DataFrame(count = [28., 97, 93, 55, 33, 54, 28, 12],
admit = repeat([false, true], inner=[4]),
rank = categorical(repeat(1:4, outer=2)))
@testset "Aggregated Binomial LogitLink" begin
for distr in (Binomial, Bernoulli)
gm14 = fit(GeneralizedLinearModel, @formula(admit ~ 1 + rank), admit_agr, distr(),
wts=Array(admit_agr.count))
@test dof(gm14) == 4
@test nobs(gm14) == 400
@test isapprox(deviance(gm14), 474.9667184280627)
@test isapprox(nulldeviance(gm14), 499.97651755491546)
@test isapprox(loglikelihood(gm14), -237.48335921403134)
@test isapprox(nullloglikelihood(gm14), -249.98825877745773)
@test isapprox(aic(gm14), 482.96671842822883)
@test isapprox(aicc(gm14), 483.0679842510136)
@test isapprox(bic(gm14), 498.9325766164946)
@test isapprox(coef(gm14),
[0.164303051291, -0.7500299832, -1.36469792994, -1.68672866457], atol=1e-5)
end
end
# Logistic regression using aggregated data with proportions of successes and weights
admit_agr2 = DataFrame(Any[[61., 151, 121, 67], [33., 54, 28, 12], categorical(1:4)],
[:count, :admit, :rank])
admit_agr2.p = admit_agr2.admit ./ admit_agr2.count
## The model matrix here is singular so tests like the deviance are just round off error
@testset "Binomial LogitLink aggregated" begin
gm15 = fit(GeneralizedLinearModel, @formula(p ~ rank), admit_agr2, Binomial(),
wts=admit_agr2.count)
test_show(gm15)
@test dof(gm15) == 4
@test nobs(gm15) == 400
@test deviance(gm15) ≈ -2.4424906541753456e-15 atol = 1e-13
@test nulldeviance(gm15) ≈ 25.009799126861324
@test loglikelihood(gm15) ≈ -9.50254433604239
@test nullloglikelihood(gm15) ≈ -22.007443899473067
@test aic(gm15) ≈ 27.00508867208478
@test aicc(gm15) ≈ 27.106354494869592
@test bic(gm15) ≈ 42.970946860516705
@test coef(gm15) ≈ [0.1643030512912767, -0.7500299832303851, -1.3646980342693287, -1.6867295867357475]
end
# Weighted Gamma example (weights are totally made up)
@testset "Gamma InverseLink Weights" begin
gm16 = fit(GeneralizedLinearModel, @formula(lot1 ~ 1 + u), clotting, Gamma(),
wts=[1.5,2.0,1.1,4.5,2.4,3.5,5.6,5.4,6.7])
test_show(gm16)
@test dof(gm16) == 3
@test nobs(gm16) == 32.7
@test isapprox(deviance(gm16), 0.03933389380881689)
@test isapprox(nulldeviance(gm16), 9.26580653637595)
@test isapprox(loglikelihood(gm16), -43.35907878769152)
@test isapprox(nullloglikelihood(gm16), -133.42962325047895)
@test isapprox(aic(gm16), 92.71815757538305)
@test isapprox(aicc(gm16), 93.55439450918095)
@test isapprox(bic(gm16), 97.18028280909267)
@test isapprox(coef(gm16), [-0.017217012615523237, 0.015649040411276433])
end
# Weighted Poisson example (weights are totally made up)
@testset "Poisson LogLink Weights" begin
gm17 = fit(GeneralizedLinearModel, @formula(Counts ~ Outcome + Treatment), dobson, Poisson(),
wts = [1.5,2.0,1.1,4.5,2.4,3.5,5.6,5.4,6.7])
test_show(gm17)
@test dof(gm17) == 5
@test isapprox(deviance(gm17), 17.699857821414266)
@test isapprox(nulldeviance(gm17), 47.37955120289139)
@test isapprox(loglikelihood(gm17), -84.57429468506352)
@test isapprox(nullloglikelihood(gm17), -99.41414137580216)
@test isapprox(aic(gm17), 179.14858937012704)
@test isapprox(aicc(gm17), 181.39578038136298)
@test isapprox(bic(gm17), 186.5854647596431)
@test isapprox(coef(gm17), [3.1218557035404793, -0.5270435906931427,-0.40300384148562746,
-0.017850203824417415,-0.03507851122782909])
end
# "quine" dataset discussed in Section 7.4 of "Modern Applied Statistics with S"
quine = dataset("MASS", "quine")
@testset "NegativeBinomial LogLink Fixed θ" begin
gm18 = fit(GeneralizedLinearModel, @formula(Days ~ Eth+Sex+Age+Lrn), quine, NegativeBinomial(2.0), LogLink())
@test !GLM.cancancel(gm18.model.rr)
test_show(gm18)
@test dof(gm18) == 8
@test isapprox(deviance(gm18), 239.11105911824325, rtol = 1e-7)
@test isapprox(nulldeviance(gm18), 280.1806722491237, rtol = 1e-7)
@test isapprox(loglikelihood(gm18), -553.2596040803376, rtol = 1e-7)
@test isapprox(nullloglikelihood(gm18), -573.7944106457778, rtol = 1e-7)
@test isapprox(aic(gm18), 1122.5192081606751)
@test isapprox(aicc(gm18), 1123.570303051186)
@test isapprox(bic(gm18), 1146.3880611343418)
@test isapprox(coef(gm18)[1:7],
[2.886448718885344, -0.5675149923412003, 0.08707706381784373,
-0.44507646428307207, 0.09279987988262384, 0.35948527963485755, 0.29676767190444386])
end
@testset "NegativeBinomial NegativeBinomialLink Fixed θ" begin
# the default/canonical link is NegativeBinomialLink
gm19 = fit(GeneralizedLinearModel, @formula(Days ~ Eth+Sex+Age+Lrn), quine, NegativeBinomial(2.0))
@test GLM.cancancel(gm19.model.rr)
test_show(gm19)
@test dof(gm19) == 8
@test isapprox(deviance(gm19), 239.68562048977307, rtol = 1e-7)
@test isapprox(nulldeviance(gm19), 280.18067224912204, rtol = 1e-7)
@test isapprox(loglikelihood(gm19), -553.5468847661017, rtol = 1e-7)
@test isapprox(nullloglikelihood(gm19), -573.7944106457775, rtol = 1e-7)
@test isapprox(aic(gm19), 1123.0937695322034)
@test isapprox(aicc(gm19), 1124.1448644227144)
@test isapprox(bic(gm19), 1146.96262250587)
@test isapprox(coef(gm19)[1:7],
[-0.12737182842213654, -0.055871700989224705, 0.01561618806384601,
-0.041113722732799125, 0.024042387142113462, 0.04400234618798099, 0.035765875508382027,
])
end
@testset "NegativeBinomial LogLink, θ to be estimated" begin
gm20 = negbin(@formula(Days ~ Eth+Sex+Age+Lrn), quine, LogLink())
test_show(gm20)
@test dof(gm20) == 8
@test isapprox(deviance(gm20), 167.9518430624193, rtol = 1e-7)
@test isapprox(nulldeviance(gm20), 195.28668602703388, rtol = 1e-7)
@test isapprox(loglikelihood(gm20), -546.57550938017, rtol = 1e-7)
@test isapprox(nullloglikelihood(gm20), -560.2429308624774, rtol = 1e-7)
@test isapprox(aic(gm20), 1109.15101876034)
@test isapprox(aicc(gm20), 1110.202113650851)
@test isapprox(bic(gm20), 1133.0198717340068)
@test isapprox(coef(gm20)[1:7],
[2.894527697811509, -0.5693411448715979, 0.08238813087070128, -0.4484636623590206,
0.08805060372902418, 0.3569553124412582, 0.2921383118842893])
@testset "NegativeBinomial Parameter estimation" begin
# Issue #302
df = DataFrame(y = [1, 1, 0, 2, 3, 0, 0, 1, 1, 0, 2, 1, 3, 1, 1, 1, 4])
for maxiter in [30, 50]
try
negbin(@formula(y ~ 1), df, maxiter = maxiter,
# set minstepfac to a very small value to avoid an ErrorException
# instead of a ConvergenceException
minstepfac=1e-20)
catch err
if err isa ConvergenceException
@test err.iters == maxiter
else
rethrow(err)
end
end
end
end
end
@testset "NegativeBinomial NegativeBinomialLink, θ to be estimated" begin
# the default/canonical link is NegativeBinomialLink
gm21 = negbin(@formula(Days ~ Eth+Sex+Age+Lrn), quine)
test_show(gm21)
@test dof(gm21) == 8
@test isapprox(deviance(gm21), 168.0465485656672, rtol = 1e-7)
@test isapprox(nulldeviance(gm21), 194.85525025005109, rtol = 1e-7)
@test isapprox(loglikelihood(gm21), -546.8048603957335, rtol = 1e-7)
@test isapprox(nullloglikelihood(gm21), -560.2092112379252, rtol = 1e-7)
@test isapprox(aic(gm21), 1109.609720791467)
@test isapprox(aicc(gm21), 1110.660815681978)
@test isapprox(bic(gm21), 1133.4785737651337)
@test isapprox(coef(gm21)[1:7],
[-0.08288628676491684, -0.03697387258037785, 0.010284124099280421, -0.027411445371127288,
0.01582155341041012, 0.029074956147127032, 0.023628812427424876])
end
@testset "Geometric LogLink" begin
# the default/canonical link is LogLink
gm22 = fit(GeneralizedLinearModel, @formula(Days ~ Eth + Sex + Age + Lrn), quine, Geometric())
test_show(gm22)
@test dof(gm22) == 8
@test deviance(gm22) ≈ 137.8781581814965
@test loglikelihood(gm22) ≈ -548.3711276642073
@test aic(gm22) ≈ 1112.7422553284146
@test aicc(gm22) ≈ 1113.7933502189255
@test bic(gm22) ≈ 1136.6111083020812
@test coef(gm22)[1:7] ≈ [2.8978546663153897, -0.5701067649409168, 0.08040181505082235,
-0.4497584898742737, 0.08622664933901254, 0.3558996662512287,
0.29016080736927813]
@test stderror(gm22) ≈ [0.22754287093719366, 0.15274755092180423, 0.15928431669166637,
0.23853372776980591, 0.2354231414867577, 0.24750780320597515,
0.18553339017028742]
end
@testset "Geometric is a special case of NegativeBinomial with θ = 1" begin
gm23 = glm(@formula(Days ~ Eth + Sex + Age + Lrn), quine, Geometric(), InverseLink())
gm24 = glm(@formula(Days ~ Eth + Sex + Age + Lrn), quine, NegativeBinomial(1), InverseLink())
@test coef(gm23) ≈ coef(gm24)
@test stderror(gm23) ≈ stderror(gm24)
@test confint(gm23) ≈ confint(gm24)
@test dof(gm23) ≈ dof(gm24)
@test deviance(gm23) ≈ deviance(gm24)
@test loglikelihood(gm23) ≈ loglikelihood(gm24)
@test aic(gm23) ≈ aic(gm24)
@test aicc(gm23) ≈ aicc(gm24)
@test bic(gm23) ≈ bic(gm24)
@test predict(gm23) ≈ predict(gm24)
end
@testset "GLM with no intercept" begin
# Gamma with single numeric predictor
nointglm1 = fit(GeneralizedLinearModel, @formula(lot1 ~ 0 + u), clotting, Gamma())
@test !hasintercept(nointglm1.model)
@test !GLM.cancancel(nointglm1.model.rr)
@test isa(GLM.Link(nointglm1.model), InverseLink)
test_show(nointglm1)
@test dof(nointglm1) == 2
@test deviance(nointglm1) ≈ 0.6629903395245351
@test isnan(nulldeviance(nointglm1))
@test loglikelihood(nointglm1) ≈ -32.60688972888763
@test_throws DomainError nullloglikelihood(nointglm1)
@test aic(nointglm1) ≈ 69.21377945777526
@test aicc(nointglm1) ≈ 71.21377945777526
@test bic(nointglm1) ≈ 69.6082286124477
@test coef(nointglm1) ≈ [0.009200201253724151]
@test GLM.dispersion(nointglm1.model, true) ≈ 0.10198331431820506
@test stderror(nointglm1) ≈ [0.000979309363228589]
# Bernoulli with numeric predictors
nointglm2 = fit(GeneralizedLinearModel, @formula(admit ~ 0 + gre + gpa), admit, Bernoulli())
@test !hasintercept(nointglm2.model)
@test GLM.cancancel(nointglm2.model.rr)
test_show(nointglm2)
@test dof(nointglm2) == 2
@test deviance(nointglm2) ≈ 503.5584368354113
@test nulldeviance(nointglm2) ≈ 554.5177444479574
@test loglikelihood(nointglm2) ≈ -251.77921841770578
@test nullloglikelihood(nointglm2) ≈ -277.2588722239787
@test aic(nointglm2) ≈ 507.55843683541156
@test aicc(nointglm2) ≈ 507.58866353566344
@test bic(nointglm2) ≈ 515.5413659296275
@test coef(nointglm2) ≈ [0.0015622695743609228, -0.4822556276412118]
@test stderror(nointglm2) ≈ [0.000987218133602179, 0.17522675354523715]
# Poisson with categorical predictors, weights and offset
nointglm3 = fit(GeneralizedLinearModel, @formula(round(Postwt) ~ 0 + Prewt + Treat), anorexia,
Poisson(), LogLink(); offset=log.(anorexia.Prewt),
wts=repeat(1:4, outer=18), rtol=1e-8, dropcollinear=false)
@test !hasintercept(nointglm3.model)
@test GLM.cancancel(nointglm3.model.rr)
test_show(nointglm3)
@test deviance(nointglm3) ≈ 90.17048668870225
@test nulldeviance(nointglm3) ≈ 159.32999067102548
@test loglikelihood(nointglm3) ≈ -610.3058020030296
@test nullloglikelihood(nointglm3) ≈ -644.885553994191
@test aic(nointglm3) ≈ 1228.6116040060592
@test aicc(nointglm3) ≈ 1228.8401754346307
@test bic(nointglm3) ≈ 1241.38343140962
@test coef(nointglm3) ≈
[-0.007008396492196935, 0.6038154674863438, 0.5654250124481003, 0.6931599989992452]
@test stderror(nointglm3) ≈
[0.0015910084415445974, 0.13185097176418983, 0.13016395889443858, 0.1336778089431681]
end
@testset "Sparse GLM" begin
rng = StableRNG(1)
X = sprand(rng, 1000, 10, 0.01)
β = randn(rng, 10)
y = Bool[rand(rng) < logistic(x) for x in X * β]
gmsparse = fit(GeneralizedLinearModel, X, y, Binomial())
gmdense = fit(GeneralizedLinearModel, Matrix(X), y, Binomial())
@test isapprox(deviance(gmsparse), deviance(gmdense))
@test isapprox(coef(gmsparse), coef(gmdense))
@test isapprox(vcov(gmsparse), vcov(gmdense))
end
@testset "Sparse LM" begin
rng = StableRNG(1)
X = sprand(rng, 1000, 10, 0.01)
β = randn(rng, 10)
y = Bool[rand(rng) < logistic(x) for x in X * β]
gmsparsev = [fit(LinearModel, X, y),
fit(LinearModel, X, sparse(y)),
fit(LinearModel, Matrix(X), sparse(y))]
gmdense = fit(LinearModel, Matrix(X), y)
for gmsparse in gmsparsev
@test isapprox(deviance(gmsparse), deviance(gmdense))
@test isapprox(coef(gmsparse), coef(gmdense))
@test isapprox(vcov(gmsparse), vcov(gmdense))
end
end
@testset "Predict" begin
rng = StableRNG(123)
X = rand(rng, 10, 2)
Y = logistic.(X * [3; -3])
gm11 = fit(GeneralizedLinearModel, X, Y, Binomial())
@test isapprox(predict(gm11), Y)
@test predict(gm11) == fitted(gm11)
newX = rand(rng, 5, 2)
newY = logistic.(newX * coef(gm11))
gm11_pred1 = predict(gm11, newX)
gm11_pred2 = predict(gm11, newX; interval=:confidence, interval_method=:delta)
gm11_pred3 = predict(gm11, newX; interval=:confidence, interval_method=:transformation)
@test gm11_pred1 == gm11_pred2.prediction == gm11_pred3.prediction≈ newY
J = newX.*last.(GLM.inverselink.(LogitLink(), newX*coef(gm11)))
se_pred = sqrt.(diag(J*vcov(gm11)*J'))
@test gm11_pred2.lower ≈ gm11_pred2.prediction .- quantile(Normal(), 0.975).*se_pred ≈
[0.20478201781547786, 0.2894172253195125, 0.17487705636545708, 0.024943206131575357, 0.41670326978944977]
@test gm11_pred2.upper ≈ gm11_pred2.prediction .+ quantile(Normal(), 0.975).*se_pred ≈
[0.6813754418027714, 0.9516561735593941, 1.0370309285468602, 0.5950732511233356, 1.192883895763427]
@test ndims(gm11_pred1) == 1
@test ndims(gm11_pred2.prediction) == 1
@test ndims(gm11_pred2.upper) == 1
@test ndims(gm11_pred2.lower) == 1
@test ndims(gm11_pred3.prediction) == 1
@test ndims(gm11_pred3.upper) == 1
@test ndims(gm11_pred3.lower) == 1
off = rand(rng, 10)
newoff = rand(rng, 5)
@test_throws ArgumentError predict(gm11, newX, offset=newoff)
gm12 = fit(GeneralizedLinearModel, X, Y, Binomial(), offset=off)
@test_throws ArgumentError predict(gm12, newX)
@test isapprox(predict(gm12, newX, offset=newoff),
logistic.(newX * coef(gm12) .+ newoff))
# Prediction from DataFrames
d = DataFrame(X, :auto)
d.y = Y
gm13 = fit(GeneralizedLinearModel, @formula(y ~ 0 + x1 + x2), d, Binomial())
@test predict(gm13) ≈ predict(gm13, d[:,[:x1, :x2]])
@test predict(gm13) ≈ predict(gm13, d)
newd = DataFrame(newX, :auto)
predict(gm13, newd)
Ylm = X * [0.8, 1.6] + 0.8randn(rng, 10)
mm = fit(LinearModel, X, Ylm)
pred1 = predict(mm, newX)
pred2 = predict(mm, newX, interval=:confidence)
se_pred = sqrt.(diag(newX*vcov(mm)*newX'))
@test pred1 == pred2.prediction ≈
[1.1382137814295972, 1.2097057044789292, 1.7983095679661645, 1.0139576473310072, 0.9738243263215998]
@test pred2.lower ≈ pred2.prediction - quantile(TDist(dof_residual(mm)), 0.975)*se_pred ≈
[0.5483482828723035, 0.3252331944785751, 0.6367574076909834, 0.34715818536935505, -0.41478974520958345]
@test pred2.upper ≈ pred2.prediction + quantile(TDist(dof_residual(mm)), 0.975)*se_pred ≈
[1.7280792799868907, 2.0941782144792835, 2.9598617282413455, 1.6807571092926594, 2.362438397852783]
@test ndims(pred1) == 1
@test ndims(pred2.prediction) == 1
@test ndims(pred2.lower) == 1
@test ndims(pred2.upper) == 1
pred3 = predict(mm, newX, interval=:prediction)
@test pred1 == pred3.prediction ≈
[1.1382137814295972, 1.2097057044789292, 1.7983095679661645, 1.0139576473310072, 0.9738243263215998]
@test pred3.lower ≈ pred3.prediction - quantile(TDist(dof_residual(mm)), 0.975)*sqrt.(diag(newX*vcov(mm)*newX') .+ deviance(mm)/dof_residual(mm)) ≈
[-1.6524055967145255, -1.6576810549645142, -1.1662846080257512, -1.7939306570282658, -2.0868723667435027]
@test pred3.upper ≈ pred3.prediction + quantile(TDist(dof_residual(mm)), 0.975)*sqrt.(diag(newX*vcov(mm)*newX') .+ deviance(mm)/dof_residual(mm)) ≈
[3.9288331595737196, 4.077092463922373, 4.762903743958081, 3.82184595169028, 4.034521019386702]
# Prediction with dropcollinear (#409)
x = [1.0 1.0
1.0 2.0
1.0 -1.0]
y = [1.0, 3.0, -2.0]
m1 = lm(x, y, dropcollinear=true)
m2 = lm(x, y, dropcollinear=false)
p1 = predict(m1, x, interval=:confidence)
p2 = predict(m2, x, interval=:confidence)
@test p1.prediction ≈ p2.prediction
@test p1.upper ≈ p2.upper
@test p1.lower ≈ p2.lower
# Prediction with dropcollinear and complex column permutations (#431)
x = [1.0 100.0 1.2
1.0 20000.0 2.3
1.0 -1000.0 4.6
1.0 5000 2.4]
y = [1.0, 3.0, -2.0, 4.5]
m1 = lm(x, y, dropcollinear=true)
m2 = lm(x, y, dropcollinear=false)
p1 = predict(m1, x, interval=:confidence)
p2 = predict(m2, x, interval=:confidence)
@test p1.prediction ≈ p2.prediction
@test p1.upper ≈ p2.upper
@test p1.lower ≈ p2.lower
# Deprecated argument value
@test predict(m1, x, interval=:confint) == p1
# Prediction intervals would give incorrect results when some variables
# have been dropped due to collinearity (#410)
x = [1.0 1.0 2.0
1.0 2.0 3.0
1.0 -1.0 0.0]
y = [1.0, 3.0, -2.0]
m1 = lm(x, y)
m2 = lm(x[:, 1:2], y)
@test predict(m1) ≈ predict(m2)
@test_broken predict(m1, interval=:confidence) ≈
predict(m2, interval=:confidence)
@test_broken predict(m1, interval=:prediction) ≈
predict(m2, interval=:prediction)
@test_throws ArgumentError predict(m1, x, interval=:confidence)
@test_throws ArgumentError predict(m1, x, interval=:prediction)
end
@testset "GLM confidence intervals" begin
X = [fill(1,50) range(0,1, length=50)]
Y = vec([0 0 0 1 0 1 1 0 0 0 0 0 0 0 1 0 1 1 0 1 1 0 1 0 0 1 1 1 0 1 1 1 1 1 0 1 0 1 1 1 0 1 1 1 1 1 1 1 1 1])
gm = fit(GeneralizedLinearModel, X, Y, Binomial())
newX = [fill(1,5) [0.0000000, 0.2405063, 0.4936709, 0.7468354, 1.0000000]]
ggplot_prediction = [0.1804678, 0.3717731, 0.6262062, 0.8258605, 0.9306787]
ggplot_lower = [0.05704968, 0.20624382, 0.46235427, 0.63065189, 0.73579237]
ggplot_upper = [0.4449066, 0.5740713, 0.7654544, 0.9294403, 0.9847846]
R_glm_se = [0.09748766, 0.09808412, 0.07963897, 0.07495792, 0.05177654]
preds_transformation = predict(gm, newX, interval=:confidence, interval_method=:transformation)
preds_delta = predict(gm, newX, interval=:confidence, interval_method=:delta)
@test preds_transformation.prediction == preds_delta.prediction
@test preds_transformation.prediction ≈ ggplot_prediction atol=1e-3
@test preds_transformation.lower ≈ ggplot_lower atol=1e-3
@test preds_transformation.upper ≈ ggplot_upper atol=1e-3
@test preds_delta.upper .- preds_delta.lower ≈ 2 .* 1.96 .* R_glm_se atol=1e-3
@test_throws ArgumentError predict(gm, newX, interval=:confidence, interval_method=:undefined_method)
@test_throws ArgumentError predict(gm, newX, interval=:undefined)
end
@testset "F test comparing to null model" begin
d = DataFrame(Treatment=[1, 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2.],
Result=[1.1, 1.2, 1, 2.2, 1.9, 2, .9, 1, 1, 2.2, 2, 2],
Other=categorical([1, 1, 2, 1, 2, 1, 3, 1, 1, 2, 2, 1]))
mod = lm(@formula(Result~Treatment), d).model
othermod = lm(@formula(Result~Other), d).model
nullmod = lm(@formula(Result~1), d).model
bothmod = lm(@formula(Result~Other+Treatment), d).model
nointerceptmod = lm(reshape(d.Treatment, :, 1), d.Result)
ft1 = ftest(mod)
ft1base = ftest(nullmod, mod)
@test ft1.nobs == ft1base.nobs
@test ft1.dof ≈ dof(mod) - dof(nullmod)
@test ft1.fstat ≈ ft1base.fstat[2]
@test ft1.pval ≈ ft1base.pval[2]
if VERSION >= v"1.6.0"
@test sprint(show, ft1) == """
F-test against the null model:
F-statistic: 241.62 on 12 observations and 1 degrees of freedom, p-value: <1e-07"""
else
@test sprint(show, ft1) == """
F-test against the null model:
F-statistic: 241.62 on 12 observations and 1 degrees of freedom, p-value: <1e-7"""
end
ft2 = ftest(othermod)
ft2base = ftest(nullmod, othermod)
@test ft2.nobs == ft2base.nobs
@test ft2.dof ≈ dof(othermod) - dof(nullmod)
@test ft2.fstat ≈ ft2base.fstat[2]
@test ft2.pval ≈ ft2base.pval[2]
@test sprint(show, ft2) == """
F-test against the null model:
F-statistic: 1.12 on 12 observations and 2 degrees of freedom, p-value: 0.3690"""
ft3 = ftest(bothmod)
ft3base = ftest(nullmod, bothmod)
@test ft3.nobs == ft3base.nobs
@test ft3.dof ≈ dof(bothmod) - dof(nullmod)
@test ft3.fstat ≈ ft3base.fstat[2]
@test ft3.pval ≈ ft3base.pval[2]
if VERSION >= v"1.6.0"
@test sprint(show, ft3) == """
F-test against the null model:
F-statistic: 81.97 on 12 observations and 3 degrees of freedom, p-value: <1e-05"""
else
@test sprint(show, ft3) == """
F-test against the null model:
F-statistic: 81.97 on 12 observations and 3 degrees of freedom, p-value: <1e-5"""
end
@test_throws ArgumentError ftest(nointerceptmod)
end
@testset "F test for model comparison" begin
d = DataFrame(Treatment=[1, 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2.],
Result=[1.1, 1.2, 1, 2.2, 1.9, 2, .9, 1, 1, 2.2, 2, 2],
Other=categorical([1, 1, 2, 1, 2, 1, 3, 1, 1, 2, 2, 1]))
mod = lm(@formula(Result~Treatment), d).model
othermod = lm(@formula(Result~Other), d).model
nullmod = lm(@formula(Result~1), d).model
bothmod = lm(@formula(Result~Other+Treatment), d).model
@test StatsModels.isnested(nullmod, mod)
@test !StatsModels.isnested(othermod, mod)
@test StatsModels.isnested(mod, bothmod)
@test !StatsModels.isnested(bothmod, mod)
@test StatsModels.isnested(othermod, bothmod)
d.Sum = d.Treatment + (d.Other .== 1)
summod = lm(@formula(Result~Sum), d).model
@test StatsModels.isnested(summod, bothmod)
ft1a = ftest(mod, nullmod)
@test isnan(ft1a.pval[1])
@test ft1a.pval[2] ≈ 2.481215056713184e-8
if VERSION >= v"1.6.0"
@test sprint(show, ft1a) == """
F-test: 2 models fitted on 12 observations
─────────────────────────────────────────────────────────────────
DOF ΔDOF SSR ΔSSR R² ΔR² F* p(>F)
─────────────────────────────────────────────────────────────────
[1] 3 0.1283 0.9603
[2] 2 -1 3.2292 3.1008 0.0000 -0.9603 241.6234 <1e-07
─────────────────────────────────────────────────────────────────"""
else
@test sprint(show, ft1a) == """
F-test: 2 models fitted on 12 observations
────────────────────────────────────────────────────────────────
DOF ΔDOF SSR ΔSSR R² ΔR² F* p(>F)
────────────────────────────────────────────────────────────────
[1] 3 0.1283 0.9603
[2] 2 -1 3.2292 3.1008 0.0000 -0.9603 241.6234 <1e-7
────────────────────────────────────────────────────────────────"""
end
ft1b = ftest(nullmod, mod)
@test isnan(ft1b.pval[1])
@test ft1b.pval[2] ≈ 2.481215056713184e-8
if VERSION >= v"1.6.0"
@test sprint(show, ft1b) == """
F-test: 2 models fitted on 12 observations
─────────────────────────────────────────────────────────────────
DOF ΔDOF SSR ΔSSR R² ΔR² F* p(>F)
─────────────────────────────────────────────────────────────────
[1] 2 3.2292 0.0000
[2] 3 1 0.1283 -3.1008 0.9603 0.9603 241.6234 <1e-07
─────────────────────────────────────────────────────────────────"""
else
@test sprint(show, ft1b) == """
F-test: 2 models fitted on 12 observations
────────────────────────────────────────────────────────────────
DOF ΔDOF SSR ΔSSR R² ΔR² F* p(>F)
────────────────────────────────────────────────────────────────
[1] 2 3.2292 0.0000
[2] 3 1 0.1283 -3.1008 0.9603 0.9603 241.6234 <1e-7
────────────────────────────────────────────────────────────────"""
end
bigmod = lm(@formula(Result~Treatment+Other), d).model
ft2a = ftest(nullmod, mod, bigmod)
@test isnan(ft2a.pval[1])
@test ft2a.pval[2] ≈ 2.481215056713184e-8
@test ft2a.pval[3] ≈ 0.3949973540194818
if VERSION >= v"1.6.0"
@test sprint(show, ft2a) == """
F-test: 3 models fitted on 12 observations
─────────────────────────────────────────────────────────────────
DOF ΔDOF SSR ΔSSR R² ΔR² F* p(>F)
─────────────────────────────────────────────────────────────────
[1] 2 3.2292 0.0000
[2] 3 1 0.1283 -3.1008 0.9603 0.9603 241.6234 <1e-07
[3] 5 2 0.1017 -0.0266 0.9685 0.0082 1.0456 0.3950
─────────────────────────────────────────────────────────────────"""
else
@test sprint(show, ft2a) == """
F-test: 3 models fitted on 12 observations
─────────────────────────────────────────────────────────────────
DOF ΔDOF SSR ΔSSR R² ΔR² F* p(>F)
─────────────────────────────────────────────────────────────────
[1] 2 3.2292 0.0000
[2] 3 1 0.1283 -3.1008 0.9603 0.9603 241.6234 <1e-7
[3] 5 2 0.1017 -0.0266 0.9685 0.0082 1.0456 0.3950
─────────────────────────────────────────────────────────────────"""
end
ft2b = ftest(bigmod, mod, nullmod)
@test isnan(ft2b.pval[1])
@test ft2b.pval[2] ≈ 0.3949973540194818
@test ft2b.pval[3] ≈ 2.481215056713184e-8
if VERSION >= v"1.6.0"
@test sprint(show, ft2b) == """
F-test: 3 models fitted on 12 observations
─────────────────────────────────────────────────────────────────
DOF ΔDOF SSR ΔSSR R² ΔR² F* p(>F)
─────────────────────────────────────────────────────────────────
[1] 5 0.1017 0.9685
[2] 3 -2 0.1283 0.0266 0.9603 -0.0082 1.0456 0.3950
[3] 2 -1 3.2292 3.1008 0.0000 -0.9603 241.6234 <1e-07
─────────────────────────────────────────────────────────────────"""
else
@test sprint(show, ft2b) == """
F-test: 3 models fitted on 12 observations
─────────────────────────────────────────────────────────────────
DOF ΔDOF SSR ΔSSR R² ΔR² F* p(>F)
─────────────────────────────────────────────────────────────────
[1] 5 0.1017 0.9685
[2] 3 -2 0.1283 0.0266 0.9603 -0.0082 1.0456 0.3950
[3] 2 -1 3.2292 3.1008 0.0000 -0.9603 241.6234 <1e-7
─────────────────────────────────────────────────────────────────"""
end
@test_throws ArgumentError ftest(mod, bigmod, nullmod)
@test_throws ArgumentError ftest(nullmod, bigmod, mod)
@test_throws ArgumentError ftest(bigmod, nullmod, mod)
mod2 = lm(@formula(Result~Treatment), d[2:end, :]).model
@test_throws ArgumentError ftest(mod, mod2)
end
@testset "F test rounding error" begin
# Data and Regressors
Y = [8.95554, 10.7601, 11.6401, 6.53665, 9.49828, 10.5173, 9.34927, 5.95772, 6.87394, 9.56881, 13.0369, 10.1762]
X = [1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0;
0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0 1.0 0.0 1.0 0.0]'
# Correlation matrix
V = [7.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0 7.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 3.0
0.0 0.0 7.0 1.056 2.0 1.0 1.0 1.056 1.056 2.0 2.0 0.0
0.0 0.0 1.056 6.68282 1.112 2.888 1.944 4.68282 5.68282 1.112 1.112 0.0
0.0 0.0 2.0 1.112 7.0 1.0 1.0 1.112 1.112 5.0 4.004 0.0
0.0 0.0 1.0 2.888 1.0 7.0 2.0 2.888 2.888 1.0 1.0 0.0
0.0 0.0 1.0 1.944 1.0 2.0 7.0 1.944 1.944 1.0 1.0 0.0
0.0 0.0 1.056 4.68282 1.112 2.888 1.944 6.68282 4.68282 1.112 1.112 0.0
0.0 0.0 1.056 5.68282 1.112 2.888 1.944 4.68282 6.68282 1.112 1.112 0.0
0.0 0.0 2.0 1.112 5.0 1.0 1.0 1.112 1.112 7.0 4.008 0.0
0.0 0.0 2.0 1.112 4.004 1.0 1.0 1.112 1.112 4.008 6.99206 0.0
0.0 3.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 7.0]
# Cholesky
RL = cholesky(V).L
Yc = RL\Y
# Fit 1 (intercept)
Xc1 = RL\X[:,[1]]
mod1 = lm(Xc1, Yc)
# Fit 2 (both)
Xc2 = RL\X
mod2 = lm(Xc2, Yc)
@test StatsModels.isnested(mod1, mod2)
end
@testset "coeftable" begin
lm1 = fit(LinearModel, @formula(OptDen ~ Carb), form)
t = coeftable(lm1)
@test t.cols[1:3] ==
[coef(lm1), stderror(lm1), coef(lm1)./stderror(lm1)]
@test t.cols[4] ≈ [0.5515952883836446, 3.409192065429258e-7]
@test hcat(t.cols[5:6]...) == confint(lm1)
# TODO: call coeftable(gm1, ...) directly once DataFrameRegressionModel
# supports keyword arguments
t = coeftable(lm1.model, level=0.99)
@test hcat(t.cols[5:6]...) == confint(lm1, level=0.99)
gm1 = fit(GeneralizedLinearModel, @formula(Counts ~ 1 + Outcome + Treatment),
dobson, Poisson())
t = coeftable(gm1)
@test t.cols[1:3] ==
[coef(gm1), stderror(gm1), coef(gm1)./stderror(gm1)]
@test t.cols[4] ≈ [5.4267674619082684e-71, 0.024647114627808674, 0.12848651178787643,
0.9999999999999981, 0.9999999999999999]
@test hcat(t.cols[5:6]...) == confint(gm1)
# TODO: call coeftable(gm1, ...) directly once DataFrameRegressionModel
# supports keyword arguments
t = coeftable(gm1.model, level=0.99)
@test hcat(t.cols[5:6]...) == confint(gm1, level=0.99)
end
@testset "Issue 84" begin
X = [1 1; 2 4; 3 9]
Xf = [1 1; 2 4; 3 9.]
y = [2, 6, 12]
yf = [2, 6, 12.]
@test isapprox(lm(X, y).pp.beta0, ones(2))
@test isapprox(lm(Xf, y).pp.beta0, ones(2))
@test isapprox(lm(X, yf).pp.beta0, ones(2))
end
@testset "Issue 117" begin
data = DataFrame(x = [1,2,3,4], y = [24,34,44,54])
@test isapprox(coef(glm(@formula(y ~ x), data, Normal(), IdentityLink())), [14., 10])
end
@testset "Issue 118" begin
@inferred nobs(lm(randn(10, 2), randn(10)))
end
@testset "Issue 153" begin
X = [ones(10) randn(10)]
Test.@inferred cholesky(GLM.DensePredQR{Float64}(X))
end
@testset "Issue 224" begin
rng = StableRNG(1009)
# Make X slightly ill conditioned to amplify rounding errors
X = Matrix(qr(randn(rng, 100, 5)).Q)*Diagonal(10 .^ (-2.0:1.0:2.0))*Matrix(qr(randn(rng, 5, 5)).Q)'
y = randn(rng, 100)
@test coef(glm(X, y, Normal(), IdentityLink())) ≈ coef(lm(X, y))
end
@testset "Issue #228" begin
@test_throws ArgumentError glm(randn(10, 2), rand(1:10, 10), Binomial(10))
end
@testset "Issue #263" begin
data = dataset("datasets", "iris")
data.SepalWidth2 = data.SepalWidth
model1 = lm(@formula(SepalLength ~ SepalWidth), data)
model2 = lm(@formula(SepalLength ~ SepalWidth + SepalWidth2), data, true)
model3 = lm(@formula(SepalLength ~ 0 + SepalWidth), data)
model4 = lm(@formula(SepalLength ~ 0 + SepalWidth + SepalWidth2), data, true)
@test dof(model1) == dof(model2)
@test dof(model3) == dof(model4)
@test dof_residual(model1) == dof_residual(model2)
@test dof_residual(model3) == dof_residual(model4)
end
@testset "Issue #286 (separable data)" begin
x = rand(1000)
df = DataFrame(y = x .> 0.5, x₁ = x, x₂ = rand(1000))
@testset "Binomial with $l" for l in (LogitLink(), ProbitLink(), CauchitLink(), CloglogLink())
@test deviance(glm(@formula(y ~ x₁ + x₂), df, Binomial(), l, maxiter=40)) < 1e-6
end
end
@testset "Issue #376 (== and isequal for links)" begin
@test GLM.LogitLink() == GLM.LogitLink()
@test NegativeBinomialLink(0.3) == NegativeBinomialLink(0.3)
@test NegativeBinomialLink(0.31) != NegativeBinomialLink(0.3)
@test isequal(GLM.LogitLink(), GLM.LogitLink())
@test isequal(NegativeBinomialLink(0.3), NegativeBinomialLink(0.3))
@test !isequal(NegativeBinomialLink(0.31), NegativeBinomialLink(0.3))
@test hash(GLM.LogitLink()) == hash(GLM.LogitLink())
@test hash(NegativeBinomialLink(0.3)) == hash(NegativeBinomialLink(0.3))
@test hash(NegativeBinomialLink(0.31)) != hash(NegativeBinomialLink(0.3))
end
@testset "hasintercept" begin
d = DataFrame(Treatment=[1, 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2.],
Result=[1.1, 1.2, 1, 2.2, 1.9, 2, .9, 1, 1, 2.2, 2, 2],
Other=categorical([1, 1, 2, 1, 2, 1, 3, 1, 1, 2, 2, 1]))
mod = lm(@formula(Result~Treatment), d).model
@test hasintercept(mod)
nullmod = lm(@formula(Result~1), d).model
@test hasintercept(nullmod)
nointerceptmod = lm(reshape(d.Treatment, :, 1), d.Result)
@test !hasintercept(nointerceptmod)
nointerceptmod2 = glm(reshape(d.Treatment, :, 1), d.Result, Normal(), IdentityLink())
@test !hasintercept(nointerceptmod2)
rng = StableRNG(1234321)
secondcolinterceptmod = glm([randn(rng, 5) ones(5)], ones(5), Binomial(), LogitLink())
@test hasintercept(secondcolinterceptmod)
end
@testset "Views" begin
@testset "#444" begin
X = randn(10, 2)
y = X*ones(2) + randn(10)
@test coef(glm(X, y, Normal(), IdentityLink())) ==
coef(glm(view(X, 1:10, :), view(y, 1:10), Normal(), IdentityLink()))
x, y, w = rand(100, 2), rand(100), rand(100)
lm1 = lm(x, y)
lm2 = lm(x, view(y, :))
lm3 = lm(view(x, :, :), y)
lm4 = lm(view(x, :, :), view(y, :))
@test coef(lm1) == coef(lm2) == coef(lm3) == coef(lm4)
lm5 = lm(x, y, wts=w)
lm6 = lm(x, view(y, :), wts=w)
lm7 = lm(view(x, :, :), y, wts=w)
lm8 = lm(view(x, :, :), view(y, :), wts=w)
lm9 = lm(x, y, wts=view(w, :))
lm10 = lm(x, view(y, :), wts=view(w, :))
lm11 = lm(view(x, :, :), y, wts=view(w, :))
lm12 = lm(view(x, :, :), view(y, :), wts=view(w, :))
@test coef(lm5) == coef(lm6) == coef(lm7) == coef(lm8) == coef(lm9) == coef(lm10) ==
coef(lm11) == coef(lm12)
x, y, w = rand(100, 2), rand(Bool, 100), rand(100)
glm1 = glm(x, y, Binomial())
glm2 = glm(x, view(y, :), Binomial())
glm3 = glm(view(x, :, :), y, Binomial())
glm4 = glm(view(x, :, :), view(y, :), Binomial())
@test coef(glm1) == coef(glm2) == coef(glm3) == coef(glm4)
glm5 = glm(x, y, Binomial(), wts=w)
glm6 = glm(x, view(y, :), Binomial(), wts=w)
glm7 = glm(view(x, :, :), y, Binomial(), wts=w)
glm8 = glm(view(x, :, :), view(y, :), Binomial(), wts=w)
glm9 = glm(x, y, Binomial(), wts=view(w, :))
glm10 = glm(x, view(y, :), Binomial(), wts=view(w, :))
glm11 = glm(view(x, :, :), y, Binomial(), wts=view(w, :))
glm12 = glm(view(x, :, :), view(y, :), Binomial(), wts=view(w, :))
@test coef(glm5) == coef(glm6) == coef(glm7) == coef(glm8) == coef(glm9) == coef(glm10) ==
coef(glm11) == coef(glm12)
end
@testset "Views: #213, #470" begin
xs = randn(46, 3)
ys = randn(46)
glm_dense = lm(xs, ys)
glm_views = lm(@view(xs[1:end, 1:end]), ys)
@test coef(glm_dense) == coef(glm_views)
rows = 1:2:size(xs,1)
cols = 1:2:size(xs,2)
xs_altcopy = xs[rows, cols]
xs_altview = @view xs[rows, cols]
ys_altcopy = ys[rows]
ys_altview = @view ys[rows]
glm_dense_alt = lm(xs_altcopy, ys_altcopy)
glm_views_alt = lm(xs_altview, ys_altview)
# exact equality fails in the final decimal digit for Julia 1.9
@test coef(glm_dense_alt) ≈ coef(glm_views_alt)
end
end
@testset "PowerLink" begin
@testset "Functions related to PowerLink" begin
@test GLM.linkfun(IdentityLink(), 10) ≈ GLM.linkfun(PowerLink(1), 10)
@test GLM.linkfun(SqrtLink(), 10) ≈ GLM.linkfun(PowerLink(0.5), 10)
@test GLM.linkfun(LogLink(), 10) ≈ GLM.linkfun(PowerLink(0), 10)
@test GLM.linkfun(InverseLink(), 10) ≈ GLM.linkfun(PowerLink(-1), 10)
@test GLM.linkfun(InverseSquareLink(), 10) ≈ GLM.linkfun(PowerLink(-2), 10)
@test GLM.linkfun(PowerLink(1 / 3), 10) ≈ 2.154434690031884
@test GLM.linkinv(IdentityLink(), 10) ≈ GLM.linkinv(PowerLink(1), 10)
@test GLM.linkinv(SqrtLink(), 10) ≈ GLM.linkinv(PowerLink(0.5), 10)
@test GLM.linkinv(LogLink(), 10) ≈ GLM.linkinv(PowerLink(0), 10)
@test GLM.linkinv(InverseLink(), 10) ≈ GLM.linkinv(PowerLink(-1), 10)
@test GLM.linkinv(InverseSquareLink(), 10) ≈ GLM.linkinv(PowerLink(-2), 10)
@test GLM.linkinv(PowerLink(1 / 3), 10) ≈ 1000.0
@test GLM.mueta(IdentityLink(), 10) ≈ GLM.mueta(PowerLink(1), 10)
@test GLM.mueta(SqrtLink(), 10) ≈ GLM.mueta(PowerLink(0.5), 10)
@test GLM.mueta(LogLink(), 10) ≈ GLM.mueta(PowerLink(0), 10)
@test GLM.mueta(InverseLink(), 10) ≈ GLM.mueta(PowerLink(-1), 10)
@test GLM.mueta(InverseSquareLink(), 10) == GLM.mueta(PowerLink(-2), 10)
@test GLM.mueta(PowerLink(1 / 3), 10) ≈ 300.0
@test PowerLink(1 / 3) == PowerLink(1 / 3)
@test isequal(PowerLink(1 / 3), PowerLink(1 / 3))
@test !isequal(PowerLink(1 / 3), PowerLink(0.33))
@test hash(PowerLink(1 / 3)) == hash(PowerLink(1 / 3))
end
trees = dataset("datasets", "trees")
@testset "GLM with PowerLink" begin
mdl = glm(@formula(Volume ~ Height + Girth), trees, Normal(), PowerLink(1 / 3); rtol=1.0e-12, atol=1.0e-12)
@test coef(mdl) ≈ [-0.05132238692134761, 0.01428684676273272, 0.15033126098228242]
@test stderror(mdl) ≈ [0.224095414423756, 0.003342439119757, 0.005838227761632] atol=1.0e-8
@test dof(mdl) == 4
@test GLM.dispersion(mdl.model, true) ≈ 6.577062388609384
@test loglikelihood(mdl) ≈ -71.60507986987612
@test deviance(mdl) ≈ 184.15774688106
@test aic(mdl) ≈ 151.21015973975
@test predict(mdl)[1] ≈ 10.59735275421753
end
@testset "Compare PowerLink(0) and LogLink" begin
mdl1 = glm(@formula(Volume ~ Height + Girth), trees, Normal(), PowerLink(0))
mdl2 = glm(@formula(Volume ~ Height + Girth), trees, Normal(), LogLink())
@test coef(mdl1) ≈ coef(mdl2)
@test stderror(mdl1) ≈ stderror(mdl2)
@test dof(mdl1) == dof(mdl2)
@test dof_residual(mdl1) == dof_residual(mdl2)
@test GLM.dispersion(mdl1.model, true) ≈ GLM.dispersion(mdl2.model,true)
@test deviance(mdl1) ≈ deviance(mdl2)
@test loglikelihood(mdl1) ≈ loglikelihood(mdl2)
@test confint(mdl1) ≈ confint(mdl2)
@test aic(mdl1) ≈ aic(mdl2)
@test predict(mdl1) ≈ predict(mdl2)
end
@testset "Compare PowerLink(0.5) and SqrtLink" begin
mdl1 = glm(@formula(Volume ~ Height + Girth), trees, Normal(), PowerLink(0.5))
mdl2 = glm(@formula(Volume ~ Height + Girth), trees, Normal(), SqrtLink())
@test coef(mdl1) ≈ coef(mdl2)
@test stderror(mdl1) ≈ stderror(mdl2)
@test dof(mdl1) == dof(mdl2)
@test dof_residual(mdl1) == dof_residual(mdl2)
@test GLM.dispersion(mdl1.model, true) ≈ GLM.dispersion(mdl2.model,true)
@test deviance(mdl1) ≈ deviance(mdl2)
@test loglikelihood(mdl1) ≈ loglikelihood(mdl2)
@test confint(mdl1) ≈ confint(mdl2)
@test aic(mdl1) ≈ aic(mdl2)
@test predict(mdl1) ≈ predict(mdl2)
end
@testset "Compare PowerLink(1) and IdentityLink" begin
mdl1 = glm(@formula(Volume ~ Height + Girth), trees, Normal(), PowerLink(1))
mdl2 = glm(@formula(Volume ~ Height + Girth), trees, Normal(), IdentityLink())
@test coef(mdl1) ≈ coef(mdl2)
@test stderror(mdl1) ≈ stderror(mdl2)
@test dof(mdl1) == dof(mdl2)
@test dof_residual(mdl1) == dof_residual(mdl2)
@test deviance(mdl1) ≈ deviance(mdl2)
@test loglikelihood(mdl1) ≈ loglikelihood(mdl2)
@test GLM.dispersion(mdl1.model, true) ≈ GLM.dispersion(mdl2.model,true)
@test confint(mdl1) ≈ confint(mdl2)
@test aic(mdl1) ≈ aic(mdl2)
@test predict(mdl1) ≈ predict(mdl2)
end
end
@testset "dropcollinear with GLMs" begin
data = DataFrame(x1=[4, 5, 9, 6, 5], x2=[5, 3, 6, 7, 1],
x3=[4.2, 4.6, 8.4, 6.2, 4.2], y=[14, 14, 24, 20, 11])
@testset "Check normal with identity link against equivalent linear model" begin
mdl1 = lm(@formula(y ~ x1 + x2 + x3), data; dropcollinear=true)
mdl2 = glm(@formula(y ~ x1 + x2 + x3), data, Normal(), IdentityLink();
dropcollinear=true)
@test coef(mdl1) ≈ coef(mdl2)
@test stderror(mdl1)[1:3] ≈ stderror(mdl2)[1:3]
@test isnan(stderror(mdl1)[4])
@test dof(mdl1) == dof(mdl2)
@test dof_residual(mdl1) == dof_residual(mdl2)
@test GLM.dispersion(mdl1.model, true) ≈ GLM.dispersion(mdl2.model,true)
@test deviance(mdl1) ≈ deviance(mdl2)
@test loglikelihood(mdl1) ≈ loglikelihood(mdl2)
@test aic(mdl1) ≈ aic(mdl2)
@test predict(mdl1) ≈ predict(mdl2)
end
@testset "Check against equivalent linear model when dropcollinear = false" begin
mdl1 = lm(@formula(y ~ x1 + x2), data; dropcollinear=false)
mdl2 = glm(@formula(y ~ x1 + x2), data, Normal(), IdentityLink();
dropcollinear=false)
@test coef(mdl1) ≈ coef(mdl2)
@test stderror(mdl1) ≈ stderror(mdl2)
@test dof(mdl1) == dof(mdl2)
@test dof_residual(mdl1) == dof_residual(mdl2)
@test GLM.dispersion(mdl1.model, true) ≈ GLM.dispersion(mdl2.model,true)
@test deviance(mdl1) ≈ deviance(mdl2)
@test loglikelihood(mdl1) ≈ loglikelihood(mdl2)
@test aic(mdl1) ≈ aic(mdl2)
@test predict(mdl1) ≈ predict(mdl2)
end
@testset "Check normal with identity link against outputs from R" begin
mdl = glm(@formula(y ~ x1 + x2 + x3), data, Normal(), IdentityLink();
dropcollinear=true)
@test coef(mdl) ≈ [1.350439882697950, 1.740469208211143, 1.171554252199414, 0.0]
@test stderror(mdl)[1:3] ≈ [0.58371400875263, 0.10681694901238, 0.08531532203251]
@test dof(mdl) == 4
@test dof_residual(mdl) == 2
@test GLM.dispersion(mdl.model, true) ≈ 0.1341642228738996
@test deviance(mdl) ≈ 0.2683284457477991
@test loglikelihood(mdl) ≈ 0.2177608775670037
@test aic(mdl) ≈ 7.564478244866
@test predict(mdl) ≈ [14.17008797653959, 13.56744868035191, 24.04398826979472,
19.99413489736071, 11.22434017595308]
end
num_rows = 100
dfrm = DataFrame()
dfrm.x1 = randn(StableRNG(123), num_rows)
dfrm.x2 = randn(StableRNG(1234), num_rows)
dfrm.x3 = 2*dfrm.x1 + 3*dfrm.x2
dfrm.y = Int.(randn(StableRNG(12345), num_rows) .> 0)
@testset "Test Logistic Regression Outputs from R" begin
mdl = glm(@formula(y ~ x1 + x2 + x3), dfrm, Binomial(), LogitLink();
dropcollinear=true)
@test coef(mdl) ≈ [-0.1402582892604246, 0.1362176272953289, 0, -0.1134751362230204] atol = 1.0E-6
stderr = stderror(mdl)
@test isnan(stderr[3]) == true
@test vcat(stderr[1:2], stderr[4]) ≈ [0.20652049856206, 0.25292632684716, 0.07496476901643] atol = 1.0E-4
@test deviance(mdl) ≈ 135.68506068159
@test loglikelihood(mdl) ≈ -67.8425303407948
@test dof(mdl) == 3
@test dof_residual(mdl) == 98
@test aic(mdl) ≈ 141.68506068159
@test GLM.dispersion(mdl.model, true) ≈ 1
@test predict(mdl)[1:3] ≈ [0.4241893070433117, 0.3754516361306202, 0.6327877688720133] atol = 1.0E-6
@test confint(mdl)[1:2,1:2] ≈ [-0.5493329715011036 0.26350316142056085;
-0.3582545657827583 0.64313795309765587] atol = 1.0E-1
end
@testset "`rankdeficient` test case of lm in glm" begin
rng = StableRNG(1234321)
# an example of rank deficiency caused by a missing cell in a table
dfrm = DataFrame([categorical(repeat(string.('A':'D'), inner = 6)),
categorical(repeat(string.('a':'c'), inner = 2, outer = 4))],
[:G, :H])
f = @formula(0 ~ 1 + G*H)
X = ModelMatrix(ModelFrame(f, dfrm)).m
y = X * (1:size(X, 2)) + 0.1 * randn(rng, size(X, 1))
inds = deleteat!(collect(1:length(y)), 7:8)
m1 = fit(GeneralizedLinearModel, X, y, Normal())
@test isapprox(deviance(m1), 0.12160301538297297)
Xmissingcell = X[inds, :]
ymissingcell = y[inds]
@test_throws PosDefException m2 = glm(Xmissingcell, ymissingcell, Normal();
dropcollinear=false)
m2p = glm(Xmissingcell, ymissingcell, Normal(); dropcollinear=true)
@test isa(m2p.pp.chol, CholeskyPivoted)
@test rank(m2p.pp.chol) == 11
@test isapprox(deviance(m2p), 0.1215758392280204)
@test isapprox(coef(m2p), [0.9772643585228885, 8.903341608496437, 3.027347397503281,
3.9661379199401257, 5.079410103608552, 6.1944618141188625, 0.0, 7.930328728005131,
8.879994918604757, 2.986388408421915, 10.84972230524356, 11.844809275711485])
@test all(isnan, hcat(coeftable(m2p).cols[2:end]...)[7,:])
m2p_dep_pos = glm(Xmissingcell, ymissingcell, Normal())
@test_logs (:warn, "Positional argument `allowrankdeficient` is deprecated, use keyword " *
"argument `dropcollinear` instead. Proceeding with positional argument value: true") fit(LinearModel, Xmissingcell, ymissingcell, true)
@test isa(m2p_dep_pos.pp.chol, CholeskyPivoted)
@test rank(m2p_dep_pos.pp.chol) == rank(m2p.pp.chol)
@test isapprox(deviance(m2p_dep_pos), deviance(m2p))
@test isapprox(coef(m2p_dep_pos), coef(m2p))
end
@testset "`rankdeficient` test in GLM with Gamma distribution" begin
rng = StableRNG(1234321)
# an example of rank deficiency caused by a missing cell in a table
dfrm = DataFrame([categorical(repeat(string.('A':'D'), inner = 6)),
categorical(repeat(string.('a':'c'), inner = 2, outer = 4))],
[:G, :H])
f = @formula(0 ~ 1 + G*H)
X = ModelMatrix(ModelFrame(f, dfrm)).m
y = X * (1:size(X, 2)) + 0.1 * randn(rng, size(X, 1))
inds = deleteat!(collect(1:length(y)), 7:8)
m1 = fit(GeneralizedLinearModel, X, y, Gamma())
@test isapprox(deviance(m1), 0.0407069934950098)
Xmissingcell = X[inds, :]
ymissingcell = y[inds]
@test_throws PosDefException glm(Xmissingcell, ymissingcell, Gamma(); dropcollinear=false)
m2p = glm(Xmissingcell, ymissingcell, Gamma(); dropcollinear=true)
@test isa(m2p.pp.chol, CholeskyPivoted)
@test rank(m2p.pp.chol) == 11
@test isapprox(deviance(m2p), 0.04070377141288433)
@test isapprox(coef(m2p), [ 1.0232644374837732, -0.0982622592717195, -0.7735523403010212,
-0.820974608805111, -0.8581573302333557, -0.8838279927663583, 0.0, 0.667219148331652,
0.7087696966674913, 0.011287703617517712, 0.6816245514668273, 0.7250492032072612])
@test all(isnan, hcat(coeftable(m2p).cols[2:end]...)[7,:])
m2p_dep_pos = fit(GeneralizedLinearModel, Xmissingcell, ymissingcell, Gamma())
@test_logs (:warn, "Positional argument `allowrankdeficient` is deprecated, use keyword " *
"argument `dropcollinear` instead. Proceeding with positional argument value: true") fit(LinearModel, Xmissingcell, ymissingcell, true)
@test isa(m2p_dep_pos.pp.chol, CholeskyPivoted)
@test rank(m2p_dep_pos.pp.chol) == rank(m2p.pp.chol)
@test isapprox(deviance(m2p_dep_pos), deviance(m2p))
@test isapprox(coef(m2p_dep_pos), coef(m2p))
end
end
@testset "Floating point error in Binomial loglik" begin
@test_throws InexactError GLM._safe_int(1.3)
@test GLM._safe_int(1) === 1
# see issue 503
y, μ, wt, ϕ = 0.6376811594202898, 0.8492925285671102, 69.0, NaN
# due to floating point:
# 1. y * wt == 43.99999999999999
# 2. 44 / y == wt
# 3. 44 / wt == y
@test GLM.loglik_obs(Binomial(), y, μ, wt, ϕ) ≈ GLM.logpdf(Binomial(Int(wt), μ), 44)
end
@testset "[G]VIF" begin
# Reference values from car::vif in R:
# > library(car)
# > data(Duncan)
# > lm1 = lm(prestige ~ 1 + income + education, Duncan)
# > vif(lm1)
# income education
# 2.1049 2.1049
# > lm2 = lm(prestige ~ 1 + income + education + type, Duncan)
# > vif(lm2)
# GVIF Df GVIF^(1/(2*Df))
# income 2.209178 1 1.486330
# education 5.297584 1 2.301648
# type 5.098592 2 1.502666
duncan = RDatasets.dataset("car", "Duncan")
lm1 = lm(@formula(Prestige ~ 1 + Income + Education), duncan)
@test termnames(lm1)[2] == coefnames(lm1)
@test vif(lm1) ≈ gvif(lm1)
lm1_noform = lm(modelmatrix(lm1), response(lm1))
@test vif(lm1) ≈ vif(lm1_noform)
@test_throws ArgumentError("model was fitted without a formula") gvif(lm1_noform)
lm1log = lm(@formula(Prestige ~ 1 + exp(log(Income)) + exp(log(Education))), duncan)
@test termnames(lm1log)[2] == coefnames(lm1log) == ["(Intercept)", "exp(log(Income))", "exp(log(Education))"]
@test vif(lm1) ≈ vif(lm1log)
gm1 = glm(modelmatrix(lm1), response(lm1), Normal())
@test vif(lm1) ≈ vif(gm1)
lm2 = lm(@formula(Prestige ~ 1 + Income + Education + Type), duncan)
@test termnames(lm2)[2] != coefnames(lm2)
@test gvif(lm2; scale=true) ≈ [1.486330, 2.301648, 1.502666] atol=1e-4
gm2 = glm(@formula(Prestige ~ 1 + Income + Education + Type), duncan, Normal())
@test termnames(gm2)[2] != coefnames(gm2)
@test gvif(gm2; scale=true) ≈ [1.486330, 2.301648, 1.502666] atol=1e-4
# the VIF definition depends on modelmatrix, vcov and stderror returning valid
# values. It doesn't care about links, offsets, etc. as long as the model matrix,
# vcov matrix and stderrors are well defined.
end
| GLM | https://github.com/JuliaStats/GLM.jl.git |
|
[
"MIT"
] | 1.9.0 | 273bd1cd30768a2fddfa3fd63bbc746ed7249e5f | docs | 1051 | # Linear and generalized linear models in Julia
| Documentation | CI Status | Coverage | DOI
|:-----------------:|:------------------:|:-----------------:|:----------:|
| [![][docs-stable-img]][docs-stable-url] [![][docs-latest-img]][docs-latest-url] | [![][ci-img]][ci-url] | [![][codecov-img]][codecov-url] | [![][DOI-img]][DOI-url] |
[docs-latest-img]: https://img.shields.io/badge/docs-latest-blue.svg
[docs-latest-url]: https://JuliaStats.github.io/GLM.jl/dev
[docs-stable-img]: https://img.shields.io/badge/docs-stable-blue.svg
[docs-stable-url]: https://JuliaStats.github.io/GLM.jl/stable
[ci-img]: https://github.com/JuliaStats/GLM.jl/workflows/CI-stable/badge.svg
[ci-url]: https://github.com/JuliaStats/GLM.jl/actions?query=workflow%3ACI-stable+branch%3Amaster
[codecov-img]: https://codecov.io/gh/JuliaStats/GLM.jl/branch/master/graph/badge.svg?token=cVkd4c3M8H
[codecov-url]: https://codecov.io/gh/JuliaStats/GLM.jl
[DOI-img]: https://zenodo.org/badge/DOI/10.5281/zenodo.3376013.svg
[DOI-url]: https://doi.org/10.5281/zenodo.3376013
| GLM | https://github.com/JuliaStats/GLM.jl.git |
|
[
"MIT"
] | 1.9.0 | 273bd1cd30768a2fddfa3fd63bbc746ed7249e5f | docs | 2265 | # API
```@meta
DocTestSetup = quote
using CategoricalArrays, DataFrames, Distributions, GLM, RDatasets
end
```
## Types defined in the package
```@docs
LinearModel
GLM.DensePredChol
GLM.DensePredQR
GLM.LmResp
GLM.GlmResp
GLM.LinPred
GLM.ModResp
```
## Constructors for models
The most general approach to fitting a model is with the `fit` function, as in
```jldoctest
julia> using Random
julia> fit(LinearModel, hcat(ones(10), 1:10), randn(MersenneTwister(12321), 10))
LinearModel{GLM.LmResp{Vector{Float64}}, GLM.DensePredChol{Float64, LinearAlgebra.CholeskyPivoted{Float64, Matrix{Float64}, Vector{Int64}}}}:
Coefficients:
────────────────────────────────────────────────────────────────
Coef. Std. Error t Pr(>|t|) Lower 95% Upper 95%
────────────────────────────────────────────────────────────────
x1 0.717436 0.775175 0.93 0.3818 -1.07012 2.50499
x2 -0.152062 0.124931 -1.22 0.2582 -0.440153 0.136029
────────────────────────────────────────────────────────────────
```
This model can also be fit as
```jldoctest
julia> using Random
julia> lm(hcat(ones(10), 1:10), randn(MersenneTwister(12321), 10))
LinearModel{GLM.LmResp{Vector{Float64}}, GLM.DensePredChol{Float64, LinearAlgebra.CholeskyPivoted{Float64, Matrix{Float64}, Vector{Int64}}}}:
Coefficients:
────────────────────────────────────────────────────────────────
Coef. Std. Error t Pr(>|t|) Lower 95% Upper 95%
────────────────────────────────────────────────────────────────
x1 0.717436 0.775175 0.93 0.3818 -1.07012 2.50499
x2 -0.152062 0.124931 -1.22 0.2582 -0.440153 0.136029
────────────────────────────────────────────────────────────────
```
```@docs
lm
glm
negbin
fit
```
## Model methods
```@docs
StatsBase.deviance
GLM.dispersion
GLM.ftest
GLM.installbeta!
StatsBase.nobs
StatsBase.nulldeviance
StatsBase.predict
StatsModels.isnested
```
## Links and methods applied to them
```@docs
Link
GLM.Link01
CauchitLink
CloglogLink
IdentityLink
InverseLink
InverseSquareLink
LogitLink
LogLink
NegativeBinomialLink
PowerLink
ProbitLink
SqrtLink
GLM.linkfun
GLM.linkinv
GLM.mueta
GLM.inverselink
canonicallink
GLM.glmvar
GLM.mustart
devresid
GLM.dispersion_parameter
GLM.loglik_obs
GLM.cancancel
```
| GLM | https://github.com/JuliaStats/GLM.jl.git |
|
[
"MIT"
] | 1.9.0 | 273bd1cd30768a2fddfa3fd63bbc746ed7249e5f | docs | 16722 | # Examples
```@meta
DocTestSetup = quote
using CategoricalArrays, DataFrames, Distributions, GLM, RDatasets, Optim
end
```
## Linear regression
```jldoctest
julia> using DataFrames, GLM, StatsBase
julia> data = DataFrame(X=[1,2,3], Y=[2,4,7])
3×2 DataFrame
Row │ X Y
│ Int64 Int64
─────┼──────────────
1 │ 1 2
2 │ 2 4
3 │ 3 7
julia> ols = lm(@formula(Y ~ X), data)
StatsModels.TableRegressionModel{LinearModel{GLM.LmResp{Vector{Float64}}, GLM.DensePredChol{Float64, LinearAlgebra.CholeskyPivoted{Float64, Matrix{Float64}, Vector{Int64}}}}, Matrix{Float64}}
Y ~ 1 + X
Coefficients:
─────────────────────────────────────────────────────────────────────────
Coef. Std. Error t Pr(>|t|) Lower 95% Upper 95%
─────────────────────────────────────────────────────────────────────────
(Intercept) -0.666667 0.62361 -1.07 0.4788 -8.59038 7.25704
X 2.5 0.288675 8.66 0.0732 -1.16797 6.16797
─────────────────────────────────────────────────────────────────────────
julia> round.(stderror(ols), digits=5)
2-element Vector{Float64}:
0.62361
0.28868
julia> round.(predict(ols), digits=5)
3-element Vector{Float64}:
1.83333
4.33333
6.83333
julia> round.(confint(ols); digits=5)
2×2 Matrix{Float64}:
-8.59038 7.25704
-1.16797 6.16797
julia> round(r2(ols); digits=5)
0.98684
julia> round(adjr2(ols); digits=5)
0.97368
julia> round(deviance(ols); digits=5)
0.16667
julia> dof(ols)
3
julia> dof_residual(ols)
1.0
julia> round(aic(ols); digits=5)
5.84252
julia> round(aicc(ols); digits=5)
-18.15748
julia> round(bic(ols); digits=5)
3.13835
julia> round(dispersion(ols.model); digits=5)
0.40825
julia> round(loglikelihood(ols); digits=5)
0.07874
julia> round(nullloglikelihood(ols); digits=5)
-6.41736
julia> round.(vcov(ols); digits=5)
2×2 Matrix{Float64}:
0.38889 -0.16667
-0.16667 0.08333
```
## Probit regression
```jldoctest
julia> data = DataFrame(X=[1,2,2], Y=[1,0,1])
3×2 DataFrame
Row │ X Y
│ Int64 Int64
─────┼──────────────
1 │ 1 1
2 │ 2 0
3 │ 2 1
julia> probit = glm(@formula(Y ~ X), data, Binomial(), ProbitLink())
StatsModels.TableRegressionModel{GeneralizedLinearModel{GLM.GlmResp{Vector{Float64}, Binomial{Float64}, ProbitLink}, GLM.DensePredChol{Float64, LinearAlgebra.CholeskyPivoted{Float64, Matrix{Float64}, Vector{Int64}}}}, Matrix{Float64}}
Y ~ 1 + X
Coefficients:
────────────────────────────────────────────────────────────────────────
Coef. Std. Error z Pr(>|z|) Lower 95% Upper 95%
────────────────────────────────────────────────────────────────────────
(Intercept) 9.63839 293.909 0.03 0.9738 -566.414 585.69
X -4.81919 146.957 -0.03 0.9738 -292.849 283.211
────────────────────────────────────────────────────────────────────────
```
## Negative binomial regression
```jldoctest
julia> using GLM, RDatasets
julia> quine = dataset("MASS", "quine")
146×5 DataFrame
Row │ Eth Sex Age Lrn Days
│ Cat… Cat… Cat… Cat… Int32
─────┼───────────────────────────────
1 │ A M F0 SL 2
2 │ A M F0 SL 11
3 │ A M F0 SL 14
4 │ A M F0 AL 5
5 │ A M F0 AL 5
6 │ A M F0 AL 13
7 │ A M F0 AL 20
8 │ A M F0 AL 22
⋮ │ ⋮ ⋮ ⋮ ⋮ ⋮
140 │ N F F3 AL 3
141 │ N F F3 AL 3
142 │ N F F3 AL 5
143 │ N F F3 AL 15
144 │ N F F3 AL 18
145 │ N F F3 AL 22
146 │ N F F3 AL 37
131 rows omitted
julia> nbrmodel = glm(@formula(Days ~ Eth+Sex+Age+Lrn), quine, NegativeBinomial(2.0), LogLink())
StatsModels.TableRegressionModel{GeneralizedLinearModel{GLM.GlmResp{Vector{Float64}, NegativeBinomial{Float64}, LogLink}, GLM.DensePredChol{Float64, LinearAlgebra.CholeskyPivoted{Float64, Matrix{Float64}, Vector{Int64}}}}, Matrix{Float64}}
Days ~ 1 + Eth + Sex + Age + Lrn
Coefficients:
────────────────────────────────────────────────────────────────────────────
Coef. Std. Error z Pr(>|z|) Lower 95% Upper 95%
────────────────────────────────────────────────────────────────────────────
(Intercept) 2.88645 0.227144 12.71 <1e-36 2.44125 3.33164
Eth: N -0.567515 0.152449 -3.72 0.0002 -0.86631 -0.26872
Sex: M 0.0870771 0.159025 0.55 0.5840 -0.224606 0.398761
Age: F1 -0.445076 0.239087 -1.86 0.0627 -0.913678 0.0235251
Age: F2 0.0927999 0.234502 0.40 0.6923 -0.366816 0.552416
Age: F3 0.359485 0.246586 1.46 0.1449 -0.123814 0.842784
Lrn: SL 0.296768 0.185934 1.60 0.1105 -0.0676559 0.661191
────────────────────────────────────────────────────────────────────────────
julia> nbrmodel = negbin(@formula(Days ~ Eth+Sex+Age+Lrn), quine, LogLink())
StatsModels.TableRegressionModel{GeneralizedLinearModel{GLM.GlmResp{Vector{Float64}, NegativeBinomial{Float64}, LogLink}, GLM.DensePredChol{Float64, LinearAlgebra.CholeskyPivoted{Float64, Matrix{Float64}, Vector{Int64}}}}, Matrix{Float64}}
Days ~ 1 + Eth + Sex + Age + Lrn
Coefficients:
────────────────────────────────────────────────────────────────────────────
Coef. Std. Error z Pr(>|z|) Lower 95% Upper 95%
────────────────────────────────────────────────────────────────────────────
(Intercept) 2.89453 0.227415 12.73 <1e-36 2.4488 3.34025
Eth: N -0.569341 0.152656 -3.73 0.0002 -0.868541 -0.270141
Sex: M 0.0823881 0.159209 0.52 0.6048 -0.229655 0.394431
Age: F1 -0.448464 0.238687 -1.88 0.0603 -0.916281 0.0193536
Age: F2 0.0880506 0.235149 0.37 0.7081 -0.372834 0.548935
Age: F3 0.356955 0.247228 1.44 0.1488 -0.127602 0.841513
Lrn: SL 0.292138 0.18565 1.57 0.1156 -0.0717297 0.656006
────────────────────────────────────────────────────────────────────────────
julia> println("Estimated theta = ", round(nbrmodel.model.rr.d.r, digits=5))
Estimated theta = 1.27489
```
## Julia and R comparisons
An example of a simple linear model in R is
```r
> coef(summary(lm(optden ~ carb, Formaldehyde)))
Estimate Std. Error t value Pr(>|t|)
(Intercept) 0.005085714 0.007833679 0.6492115 5.515953e-01
carb 0.876285714 0.013534536 64.7444207 3.409192e-07
```
The corresponding model with the `GLM` package is
```jldoctest
julia> using GLM, RDatasets
julia> form = dataset("datasets", "Formaldehyde")
6×2 DataFrame
Row │ Carb OptDen
│ Float64 Float64
─────┼──────────────────
1 │ 0.1 0.086
2 │ 0.3 0.269
3 │ 0.5 0.446
4 │ 0.6 0.538
5 │ 0.7 0.626
6 │ 0.9 0.782
julia> lm1 = fit(LinearModel, @formula(OptDen ~ Carb), form)
StatsModels.TableRegressionModel{LinearModel{GLM.LmResp{Vector{Float64}}, GLM.DensePredChol{Float64, LinearAlgebra.CholeskyPivoted{Float64, Matrix{Float64}, Vector{Int64}}}}, Matrix{Float64}}
OptDen ~ 1 + Carb
Coefficients:
───────────────────────────────────────────────────────────────────────────
Coef. Std. Error t Pr(>|t|) Lower 95% Upper 95%
───────────────────────────────────────────────────────────────────────────
(Intercept) 0.00508571 0.00783368 0.65 0.5516 -0.0166641 0.0268355
Carb 0.876286 0.0135345 64.74 <1e-06 0.838708 0.913864
───────────────────────────────────────────────────────────────────────────
```
A more complex example in R is
```r
> coef(summary(lm(sr ~ pop15 + pop75 + dpi + ddpi, LifeCycleSavings)))
Estimate Std. Error t value Pr(>|t|)
(Intercept) 28.5660865407 7.3545161062 3.8841558 0.0003338249
pop15 -0.4611931471 0.1446422248 -3.1885098 0.0026030189
pop75 -1.6914976767 1.0835989307 -1.5609998 0.1255297940
dpi -0.0003369019 0.0009311072 -0.3618293 0.7191731554
ddpi 0.4096949279 0.1961971276 2.0881801 0.0424711387
```
with the corresponding Julia code
```jldoctest
julia> LifeCycleSavings = dataset("datasets", "LifeCycleSavings")
50×6 DataFrame
Row │ Country SR Pop15 Pop75 DPI DDPI
│ String15 Float64 Float64 Float64 Float64 Float64
─────┼─────────────────────────────────────────────────────────────
1 │ Australia 11.43 29.35 2.87 2329.68 2.87
2 │ Austria 12.07 23.32 4.41 1507.99 3.93
3 │ Belgium 13.17 23.8 4.43 2108.47 3.82
4 │ Bolivia 5.75 41.89 1.67 189.13 0.22
5 │ Brazil 12.88 42.19 0.83 728.47 4.56
6 │ Canada 8.79 31.72 2.85 2982.88 2.43
7 │ Chile 0.6 39.74 1.34 662.86 2.67
8 │ China 11.9 44.75 0.67 289.52 6.51
⋮ │ ⋮ ⋮ ⋮ ⋮ ⋮ ⋮
44 │ United States 7.56 29.81 3.43 4001.89 2.45
45 │ Venezuela 9.22 46.4 0.9 813.39 0.53
46 │ Zambia 18.56 45.25 0.56 138.33 5.14
47 │ Jamaica 7.72 41.12 1.73 380.47 10.23
48 │ Uruguay 9.24 28.13 2.72 766.54 1.88
49 │ Libya 8.89 43.69 2.07 123.58 16.71
50 │ Malaysia 4.71 47.2 0.66 242.69 5.08
35 rows omitted
julia> fm2 = fit(LinearModel, @formula(SR ~ Pop15 + Pop75 + DPI + DDPI), LifeCycleSavings)
StatsModels.TableRegressionModel{LinearModel{GLM.LmResp{Vector{Float64}}, GLM.DensePredChol{Float64, LinearAlgebra.CholeskyPivoted{Float64, Matrix{Float64}, Vector{Int64}}}}, Matrix{Float64}}
SR ~ 1 + Pop15 + Pop75 + DPI + DDPI
Coefficients:
─────────────────────────────────────────────────────────────────────────────────
Coef. Std. Error t Pr(>|t|) Lower 95% Upper 95%
─────────────────────────────────────────────────────────────────────────────────
(Intercept) 28.5661 7.35452 3.88 0.0003 13.7533 43.3788
Pop15 -0.461193 0.144642 -3.19 0.0026 -0.752518 -0.169869
Pop75 -1.6915 1.0836 -1.56 0.1255 -3.87398 0.490983
DPI -0.000336902 0.000931107 -0.36 0.7192 -0.00221225 0.00153844
DDPI 0.409695 0.196197 2.09 0.0425 0.0145336 0.804856
─────────────────────────────────────────────────────────────────────────────────
```
The `glm` function (or equivalently, `fit(GeneralizedLinearModel, ...)`)
works similarly to the R `glm` function except that the `family`
argument is replaced by a `Distribution` type and, optionally, a `Link` type.
The first example from `?glm` in R is
```r
glm> ## Dobson (1990) Page 93: Randomized Controlled Trial : (slightly modified)
glm> counts <- c(18,17,15,20,10,21,25,13,13)
glm> outcome <- gl(3,1,9)
glm> treatment <- gl(3,3)
glm> print(d.AD <- data.frame(treatment, outcome, counts))
treatment outcome counts
1 1 1 18
2 1 2 17
3 1 3 15
4 2 1 20
5 2 2 10
6 2 3 21
7 3 1 25
8 3 2 13
9 3 3 13
glm> glm.D93 <- glm(counts ~ outcome + treatment, family=poisson())
glm> anova(glm.D93)
Analysis of Deviance Table
Model: poisson, link: log
Response: counts
Terms added sequentially (first to last)
Df Deviance Resid. Df Resid. Dev
NULL 8 10.3928
outcome 2 5.2622 6 5.1307
treatment 2 0.0132 4 5.1175
glm> ## No test:
glm> summary(glm.D93)
Call:
glm(formula = counts ~ outcome + treatment, family = poisson())
Deviance Residuals:
1 2 3 4 5 6 7 8 9
-0.6122 1.0131 -0.2819 -0.2498 -0.9784 1.0777 0.8162 -0.1155 -0.8811
Coefficients:
Estimate Std. Error z value Pr(>|z|)
(Intercept) 3.0313 0.1712 17.711 <2e-16 ***
outcome2 -0.4543 0.2022 -2.247 0.0246 *
outcome3 -0.2513 0.1905 -1.319 0.1870
treatment2 0.0198 0.1990 0.100 0.9207
treatment3 0.0198 0.1990 0.100 0.9207
---
Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
(Dispersion parameter for poisson family taken to be 1)
Null deviance: 10.3928 on 8 degrees of freedom
Residual deviance: 5.1175 on 4 degrees of freedom
AIC: 56.877
Number of Fisher Scoring iterations: 4
```
In Julia this becomes
```jldoctest
julia> using DataFrames, CategoricalArrays, GLM
julia> dobson = DataFrame(Counts = [18.,17,15,20,10,21,25,13,13],
Outcome = categorical([1,2,3,1,2,3,1,2,3]),
Treatment = categorical([1,1,1,2,2,2,3,3,3]))
9×3 DataFrame
Row │ Counts Outcome Treatment
│ Float64 Cat… Cat…
─────┼─────────────────────────────
1 │ 18.0 1 1
2 │ 17.0 2 1
3 │ 15.0 3 1
4 │ 20.0 1 2
5 │ 10.0 2 2
6 │ 21.0 3 2
7 │ 25.0 1 3
8 │ 13.0 2 3
9 │ 13.0 3 3
julia> gm1 = fit(GeneralizedLinearModel, @formula(Counts ~ Outcome + Treatment), dobson, Poisson())
StatsModels.TableRegressionModel{GeneralizedLinearModel{GLM.GlmResp{Vector{Float64}, Poisson{Float64}, LogLink}, GLM.DensePredChol{Float64, LinearAlgebra.CholeskyPivoted{Float64, Matrix{Float64}, Vector{Int64}}}}, Matrix{Float64}}
Counts ~ 1 + Outcome + Treatment
Coefficients:
────────────────────────────────────────────────────────────────────────────
Coef. Std. Error z Pr(>|z|) Lower 95% Upper 95%
────────────────────────────────────────────────────────────────────────────
(Intercept) 3.03128 0.171155 17.71 <1e-69 2.69582 3.36674
Outcome: 2 -0.454255 0.202171 -2.25 0.0246 -0.850503 -0.0580079
Outcome: 3 -0.251314 0.190476 -1.32 0.1870 -0.624641 0.122012
Treatment: 2 0.0198026 0.199017 0.10 0.9207 -0.370264 0.409869
Treatment: 3 0.0198026 0.199017 0.10 0.9207 -0.370264 0.409869
────────────────────────────────────────────────────────────────────────────
julia> round(deviance(gm1), digits=5)
5.11746
```
## Linear regression with PowerLink
In this example, we choose the best model from a set of λs, based on minimum BIC.
```jldoctest
julia> using GLM, RDatasets, StatsBase, DataFrames, Optim
julia> trees = DataFrame(dataset("datasets", "trees"))
31×3 DataFrame
Row │ Girth Height Volume
│ Float64 Int64 Float64
─────┼──────────────────────────
1 │ 8.3 70 10.3
2 │ 8.6 65 10.3
3 │ 8.8 63 10.2
4 │ 10.5 72 16.4
5 │ 10.7 81 18.8
6 │ 10.8 83 19.7
7 │ 11.0 66 15.6
8 │ 11.0 75 18.2
⋮ │ ⋮ ⋮ ⋮
25 │ 16.3 77 42.6
26 │ 17.3 81 55.4
27 │ 17.5 82 55.7
28 │ 17.9 80 58.3
29 │ 18.0 80 51.5
30 │ 18.0 80 51.0
31 │ 20.6 87 77.0
16 rows omitted
julia> bic_glm(λ) = bic(glm(@formula(Volume ~ Height + Girth), trees, Normal(), PowerLink(λ)));
julia> optimal_bic = optimize(bic_glm, -1.0, 1.0);
julia> round(optimal_bic.minimizer, digits = 5) # Optimal λ
0.40935
julia> glm(@formula(Volume ~ Height + Girth), trees, Normal(), PowerLink(optimal_bic.minimizer)) # Best model
StatsModels.TableRegressionModel{GeneralizedLinearModel{GLM.GlmResp{Vector{Float64}, Normal{Float64}, PowerLink}, GLM.DensePredChol{Float64, LinearAlgebra.CholeskyPivoted{Float64, Matrix{Float64}, Vector{Int64}}}}, Matrix{Float64}}
Volume ~ 1 + Height + Girth
Coefficients:
────────────────────────────────────────────────────────────────────────────
Coef. Std. Error z Pr(>|z|) Lower 95% Upper 95%
────────────────────────────────────────────────────────────────────────────
(Intercept) -1.07586 0.352543 -3.05 0.0023 -1.76684 -0.384892
Height 0.0232172 0.00523331 4.44 <1e-05 0.0129601 0.0334743
Girth 0.242837 0.00922555 26.32 <1e-99 0.224756 0.260919
────────────────────────────────────────────────────────────────────────────
julia> round(optimal_bic.minimum, digits=5)
156.37638
``` | GLM | https://github.com/JuliaStats/GLM.jl.git |
|
[
"MIT"
] | 1.9.0 | 273bd1cd30768a2fddfa3fd63bbc746ed7249e5f | docs | 11280 | # GLM.jl Manual
Linear and generalized linear models in Julia
## Installation
```julia
Pkg.add("GLM")
```
will install this package and its dependencies, which includes the [Distributions package](https://github.com/JuliaStats/Distributions.jl).
The [RDatasets package](https://github.com/johnmyleswhite/RDatasets.jl) is useful for fitting models on standard R datasets to compare the results with those from R.
## Fitting GLM models
Two methods can be used to fit a Generalized Linear Model (GLM):
`glm(formula, data, family, link)` and `glm(X, y, family, link)`.
Their arguments must be:
- `formula`: a [StatsModels.jl `Formula` object](https://juliastats.org/StatsModels.jl/stable/formula/)
referring to columns in `data`; for example, if column names are `:Y`, `:X1`, and `:X2`,
then a valid formula is `@formula(Y ~ X1 + X2)`
- `data`: a table in the Tables.jl definition, e.g. a data frame;
rows with `missing` values are ignored
- `X` a matrix holding values of the independent variable(s) in columns
- `y` a vector holding values of the dependent variable
(including if appropriate the intercept)
- `family`: chosen from `Bernoulli()`, `Binomial()`, `Gamma()`, `Geometric()`, `Normal()`, `Poisson()`, or `NegativeBinomial(θ)`
- `link`: chosen from the list below, for example, `LogitLink()` is a valid link for the `Binomial()` family
Typical distributions for use with `glm` and their canonical link
functions are
Bernoulli (LogitLink)
Binomial (LogitLink)
Gamma (InverseLink)
Geometric (LogLink)
InverseGaussian (InverseSquareLink)
NegativeBinomial (NegativeBinomialLink, often used with LogLink)
Normal (IdentityLink)
Poisson (LogLink)
Currently the available Link types are
CauchitLink
CloglogLink
IdentityLink
InverseLink
InverseSquareLink
LogitLink
LogLink
NegativeBinomialLink
PowerLink
ProbitLink
SqrtLink
Note that the canonical link for negative binomial regression is `NegativeBinomialLink`, but
in practice one typically uses `LogLink`.
The `NegativeBinomial` distribution belongs to the exponential family only if θ (the shape
parameter) is fixed, thus θ has to be provided if we use `glm` with `NegativeBinomial` family.
If one would like to also estimate θ, then `negbin(formula, data, link)` should be
used instead.
An intercept is included in any GLM by default.
## Categorical variables
Categorical variables will be dummy coded by default if they are non-numeric or if they are
[`CategoricalVector`s](https://juliadata.github.io/CategoricalArrays.jl/stable/) within a
[Tables.jl](https://juliadata.github.io/Tables.jl/stable/) table (`DataFrame`, JuliaDB table,
named tuple of vectors, etc). Alternatively, you can pass an explicit
[contrasts](https://juliastats.github.io/StatsModels.jl/stable/contrasts/) argument if you
would like a different contrast coding system or if you are not using DataFrames.
The response (dependent) variable may not be categorical.
Using a `CategoricalVector` constructed with `categorical` or `categorical!`:
```jldoctest categorical
julia> using CategoricalArrays, DataFrames, GLM, StableRNGs
julia> rng = StableRNG(1); # Ensure example can be reproduced
julia> data = DataFrame(y = rand(rng, 100), x = categorical(repeat([1, 2, 3, 4], 25)));
julia> lm(@formula(y ~ x), data)
StatsModels.TableRegressionModel{LinearModel{GLM.LmResp{Vector{Float64}}, GLM.DensePredChol{Float64, LinearAlgebra.CholeskyPivoted{Float64, Matrix{Float64}, Vector{Int64}}}}, Matrix{Float64}}
y ~ 1 + x
Coefficients:
───────────────────────────────────────────────────────────────────────────
Coef. Std. Error t Pr(>|t|) Lower 95% Upper 95%
───────────────────────────────────────────────────────────────────────────
(Intercept) 0.490985 0.0564176 8.70 <1e-13 0.378997 0.602973
x: 2 0.0527655 0.0797865 0.66 0.5100 -0.105609 0.21114
x: 3 0.0955446 0.0797865 1.20 0.2341 -0.0628303 0.25392
x: 4 -0.032673 0.0797865 -0.41 0.6831 -0.191048 0.125702
───────────────────────────────────────────────────────────────────────────
```
Using [`contrasts`](https://juliastats.github.io/StatsModels.jl/stable/contrasts/):
```jldoctest categorical
julia> using StableRNGs
julia> data = DataFrame(y = rand(StableRNG(1), 100), x = repeat([1, 2, 3, 4], 25));
julia> lm(@formula(y ~ x), data, contrasts = Dict(:x => DummyCoding()))
StatsModels.TableRegressionModel{LinearModel{GLM.LmResp{Vector{Float64}}, GLM.DensePredChol{Float64, LinearAlgebra.CholeskyPivoted{Float64, Matrix{Float64}, Vector{Int64}}}}, Matrix{Float64}}
y ~ 1 + x
Coefficients:
───────────────────────────────────────────────────────────────────────────
Coef. Std. Error t Pr(>|t|) Lower 95% Upper 95%
───────────────────────────────────────────────────────────────────────────
(Intercept) 0.490985 0.0564176 8.70 <1e-13 0.378997 0.602973
x: 2 0.0527655 0.0797865 0.66 0.5100 -0.105609 0.21114
x: 3 0.0955446 0.0797865 1.20 0.2341 -0.0628303 0.25392
x: 4 -0.032673 0.0797865 -0.41 0.6831 -0.191048 0.125702
───────────────────────────────────────────────────────────────────────────
```
## Comparing models with F-test
Comparisons between two or more linear models can be performed using the `ftest` function,
which computes an F-test between each pair of subsequent models and reports fit statistics:
```jldoctest
julia> using DataFrames, GLM, StableRNGs
julia> data = DataFrame(y = (1:50).^2 .+ randn(StableRNG(1), 50), x = 1:50);
julia> ols_lin = lm(@formula(y ~ x), data);
julia> ols_sq = lm(@formula(y ~ x + x^2), data);
julia> ftest(ols_lin.model, ols_sq.model)
F-test: 2 models fitted on 50 observations
─────────────────────────────────────────────────────────────────────────────────
DOF ΔDOF SSR ΔSSR R² ΔR² F* p(>F)
─────────────────────────────────────────────────────────────────────────────────
[1] 3 1731979.2266 0.9399
[2] 4 1 40.7581 -1731938.4685 1.0000 0.0601 1997177.0357 <1e-99
─────────────────────────────────────────────────────────────────────────────────
```
## Methods applied to fitted models
Many of the methods provided by this package have names similar to those in [R](http://www.r-project.org).
- `adjr2`: adjusted R² for a linear model (an alias for `adjr²`)
- `aic`: Akaike's Information Criterion
- `aicc`: corrected Akaike's Information Criterion for small sample sizes
- `bic`: Bayesian Information Criterion
- `coef`: estimates of the coefficients in the model
- `confint`: confidence intervals for coefficients
- `cooksdistance`: [Cook's distance](https://en.wikipedia.org/wiki/Cook%27s_distance) for each observation
- `deviance`: measure of the model fit, weighted residual sum of squares for lm's
- `dispersion`: dispersion (or scale) parameter for a model's distribution
- `dof`: number of degrees of freedom consumed in the model
- `dof_residual`: degrees of freedom for residuals, when meaningful
- `fitted`: fitted values of the model
- `glm`: fit a generalized linear model (an alias for `fit(GeneralizedLinearModel, ...)`)
- `lm`: fit a linear model (an alias for `fit(LinearModel, ...)`)
- `loglikelihood`: log-likelihood of the model
- `modelmatrix`: design matrix
- `nobs`: number of rows, or sum of the weights when prior weights are specified
- `nulldeviance`: deviance of the model with all predictors removed
- `nullloglikelihood`: log-likelihood of the model with all predictors removed
- `predict`: predicted values of the dependent variable from the fitted model
- `r2`: R² of a linear model (an alias for `r²`)
- `residuals`: vector of residuals from the fitted model
- `response`: model response (a.k.a the dependent variable)
- `stderror`: standard errors of the coefficients
- `vcov`: variance-covariance matrix of the coefficient estimates
Note that the canonical link for negative binomial regression is `NegativeBinomialLink`, but
in practice one typically uses `LogLink`.
```jldoctest methods
julia> using GLM, DataFrames, StatsBase
julia> data = DataFrame(X=[1,2,3], y=[2,4,7]);
julia> mdl = lm(@formula(y ~ X), data);
julia> round.(coef(mdl); digits=8)
2-element Vector{Float64}:
-0.66666667
2.5
julia> round(r2(mdl); digits=8)
0.98684211
julia> round(aic(mdl); digits=8)
5.84251593
```
The [`predict`](@ref) method returns predicted values of response variable from covariate values in an input `newX`.
If `newX` is omitted then the fitted response values from the model are returned.
```jldoctest methods
julia> test_data = DataFrame(X=[4]);
julia> round.(predict(mdl, test_data); digits=8)
1-element Vector{Float64}:
9.33333333
```
The [`cooksdistance`](@ref) method computes [Cook's distance](https://en.wikipedia.org/wiki/Cook%27s_distance) for each observation used to fit a linear model, giving an estimate of the influence of each data point.
Note that it's currently only implemented for linear models without weights.
```jldoctest methods
julia> round.(cooksdistance(mdl); digits=8)
3-element Vector{Float64}:
2.5
0.25
2.5
```
## Separation of response object and predictor object
The general approach in this code is to separate functionality related
to the response from that related to the linear predictor. This
allows for greater generality by mixing and matching different
subtypes of the abstract type ```LinPred``` and the abstract type ```ModResp```.
A ```LinPred``` type incorporates the parameter vector and the model
matrix. The parameter vector is a dense numeric vector but the model
matrix can be dense or sparse. A ```LinPred``` type must incorporate
some form of a decomposition of the weighted model matrix that allows
for the solution of a system ```X'W * X * delta=X'wres``` where ```W``` is a
diagonal matrix of "X weights", provided as a vector of the square
roots of the diagonal elements, and ```wres``` is a weighted residual vector.
Currently there are two dense predictor types, ```DensePredQR``` and
```DensePredChol```, and the usual caveats apply. The Cholesky
version is faster but somewhat less accurate than that QR version.
The skeleton of a distributed predictor type is in the code
but not yet fully fleshed out. Because Julia by default uses
OpenBLAS, which is already multi-threaded on multicore machines, there
may not be much advantage in using distributed predictor types.
A ```ModResp``` type must provide methods for the ```wtres``` and
```sqrtxwts``` generics. Their values are the arguments to the
```updatebeta``` methods of the ```LinPred``` types. The
```Float64``` value returned by ```updatedelta``` is the value of the
convergence criterion.
Similarly, ```LinPred``` types must provide a method for the
```linpred``` generic. In general ```linpred``` takes an instance of
a ```LinPred``` type and a step factor. Methods that take only an instance
of a ```LinPred``` type use a default step factor of 1. The value of
```linpred``` is the argument to the ```updatemu``` method for
```ModResp``` types. The ```updatemu``` method returns the updated
deviance.
| GLM | https://github.com/JuliaStats/GLM.jl.git |
|
[
"MIT"
] | 0.5.1 | 508457ed7a2afb432590247dc363fffc51f242fc | code | 4729 | using LightXML
macro logmsg(s)
end
include("../src/types.jl")
name2sym(name) = join(map(uppercasefirst, split(name, '-')))
const GEN_TOP = """
# This file is automatically generated.
# Do not edit this file by hand.
# Make changes to gen.jl or the source specification instead.
"""
const GEN_BOTTOM = "# end generated code"
const CLS_TOP = """# Classes
const CLASS_MAP = Dict{TAMQPClassId,ClassSpec}("""
const CLS_BOTTOM = """) # CLASS_MAP")
function make_classmethod_map()
cmmap = Dict{Tuple{Symbol,Symbol},MethodSpec}()
for v in values(CLASS_MAP)
for m in values(v.method_map)
cmmap[(v.name,m.name)] = m
end
end
cmmap
end
const CLASSNAME_MAP = Dict{Symbol,ClassSpec}(v.name => v for v in values(CLASS_MAP))
const CLASSMETHODNAME_MAP = make_classmethod_map()
# end Classes
"""
const DOMAIN_TOP = "# Domains"
const DOMAIN_BOTTOM = "# end Domains\n"
const domainmap = Dict{String,Type}(
"bit" => TAMQPBit,
"octet" => TAMQPOctet,
"short" => TAMQPShortInt,
"long" => TAMQPLongInt,
"longlong" => TAMQPLongLongInt,
"shortstr" => TAMQPShortStr,
"longstr" => TAMQPLongStr,
"timestamp" => TAMQPTimeStamp,
"table" => TAMQPFieldTable,
"class-id" => TAMQPClassId,
"method-id" => TAMQPMethodId
)
const precreated_consts = ["FrameEnd"]
const clsindent = " "^4
const methindent = " "^8
const argsindent = " "^12
function gen_spec(specfile)
xdoc = parse_file(specfile)
amqp = root(xdoc)
println("# Source: ", specfile)
println(GEN_TOP)
println("const AMQP_VERSION = v", '"', attribute(amqp, "major"), '.', attribute(amqp, "minor"), '.', attribute(amqp, "revision"), '"')
println("const AMQP_DEFAULT_PORT = ", attribute(amqp, "port"))
println("")
println("# Constants")
for constant in get_elements_by_tagname(amqp, "constant")
has_attribute(constant, "class") && continue
constantname = name2sym(attribute(constant, "name"))
(constantname in precreated_consts) && continue
constantvalue = attribute(constant, "value")
println("const ", constantname, " = ", constantvalue)
end
println("")
println("# Error Codes")
for constant in get_elements_by_tagname(amqp, "constant")
!has_attribute(constant, "class") && continue
cls = attribute(constant, "class")
name = attribute(constant, "name")
constantname = name2sym("$cls-$name")
constantvalue = attribute(constant, "value")
println("const ", constantname, " = ", constantvalue)
end
println("")
# domains
println(DOMAIN_TOP)
for domain in get_elements_by_tagname(amqp, "domain")
name = attribute(domain, "name")
(name in keys(domainmap)) && continue
name = "TAMQP" * name2sym(name)
typ = domainmap[attribute(domain, "type")].name
println("const $name = $typ")
end
println(DOMAIN_BOTTOM)
# classes and methods
println(CLS_TOP)
clssep = ""
for cls in get_elements_by_tagname(amqp, "class")
clsname = Symbol(name2sym(attribute(cls, "name")))
clsidx = parse(Int, attribute(cls, "index"))
println(clsindent, clssep, "$clsidx => ClassSpec($clsidx, :$clsname, Dict{TAMQPMethodId, MethodSpec}(")
isempty(clssep) && (clssep = ", ")
methsep = ""
for meth in get_elements_by_tagname(cls, "method")
methname = Symbol(name2sym(attribute(meth, "name")))
methidx = parse(Int, attribute(meth, "index"))
methargs = Pair{Symbol,Type}[]
methrespelem = find_element(meth, "response")
methresp = (methrespelem === nothing) ? :Nothing : Symbol(name2sym(attribute(methrespelem, "name")))
println(methindent, methsep, "$methidx => MethodSpec($methidx, :$methname, :$methresp, Pair{Symbol,DataType}[")
isempty(methsep) && (methsep = ", ")
argssep = ""
for arg in get_elements_by_tagname(meth, "field")
fieldname = Symbol(name2sym(attribute(arg, "name")))
fielddomain = attribute(arg, "domain")
if fielddomain === nothing
fielddomain = attribute(arg, "type")
end
fieldtype = (fielddomain in keys(domainmap)) ? domainmap[fielddomain].name : ("TAMQP" * name2sym(fielddomain))
println(argsindent, argssep, ":$fieldname => $fieldtype")
isempty(argssep) && (argssep = ", ")
end
println(methindent, "]) # method $methname")
end
println(clsindent, ")) # class $clsname")
end
println(CLS_BOTTOM)
println(GEN_BOTTOM)
end
gen_spec("amqp0-9-1.extended.xml")
| AMQPClient | https://github.com/JuliaComputing/AMQPClient.jl.git |
|
[
"MIT"
] | 0.5.1 | 508457ed7a2afb432590247dc363fffc51f242fc | code | 1175 | module AMQPClient
import Base: write, read, read!, close, convert, show, isopen, flush
using Sockets
using MbedTLS
# Client property info that gets sent to the server on connection startup
const CLIENT_IDENTIFICATION = Dict{String,Any}(
"product" => "Julia AMQPClient",
"product_version" => string(VERSION),
"capabilities" => Dict{String,Any}()
)
include("types.jl")
include("spec.jl")
include("message.jl")
include("auth.jl")
include("buffered_socket.jl")
include("amqps.jl")
include("protocol.jl")
include("convert.jl")
include("show.jl")
export connection, channel, CloseReason, amqps_configure
export exchange_declare, exchange_delete, exchange_bind, exchange_unbind, default_exchange_name
export queue_declare, queue_bind, queue_unbind, queue_purge, queue_delete
export tx_select, tx_commit, tx_rollback
export basic_qos, basic_consume, basic_cancel, basic_publish, basic_get, basic_ack, basic_reject, basic_recover
export confirm_select
export EXCHANGE_TYPE_DIRECT, EXCHANGE_TYPE_FANOUT, EXCHANGE_TYPE_TOPIC, EXCHANGE_TYPE_HEADERS
export read, read!, close, convert, show, flush
export Message, set_properties, PERSISTENT, NON_PERSISTENT
end # module
| AMQPClient | https://github.com/JuliaComputing/AMQPClient.jl.git |
|
[
"MIT"
] | 0.5.1 | 508457ed7a2afb432590247dc363fffc51f242fc | code | 2530 | function default_tls_debug(level, filename, number, msg)
@debug(level, filename, number, msg)
end
function default_tls_rng()
entropy = MbedTLS.Entropy()
rng = MbedTLS.CtrDrbg()
MbedTLS.seed!(rng, entropy)
rng
end
"""
amqps_configure(;
cacerts = nothing,
verify = MbedTLS.MBEDTLS_SSL_VERIFY_NONE,
client_cert = nothing,
client_key = nothing
)
Creates and returns a configuration for making AMQPS connections.
- cacerts: A CA certificate file (or it's contents) to use for certificate verification.
- verify: Whether to verify server certificate. Default is false if cacerts is not provided and true if it is.
- client_cert and client_key: The client certificate and corresponding private key to use. Default is nothing (no client certificate). Values can either be the file name or certificate/key contents.
"""
function amqps_configure(;
rng = default_tls_rng(),
cacerts::Union{String,Nothing} = nothing,
verify::Int64 = (cacerts === nothing) ? MbedTLS.MBEDTLS_SSL_VERIFY_NONE : MbedTLS.MBEDTLS_SSL_VERIFY_REQUIRED,
client_cert::Union{String,Nothing} = nothing,
client_key::Union{String,Nothing} = nothing,
debug::Union{Function,Nothing} = nothing)
conf = MbedTLS.SSLConfig()
MbedTLS.config_defaults!(conf)
MbedTLS.rng!(conf, rng)
(debug === nothing) || MbedTLS.dbg!(conf, debug)
if cacerts !== nothing
if isfile(cacerts)
# if it is a file name instead of certificate contents, read the contents
cacerts = read(cacerts, String)
end
MbedTLS.ca_chain!(conf, MbedTLS.crt_parse(cacerts))
end
MbedTLS.authmode!(conf, verify)
if (client_cert !== nothing) && (client_key !== nothing)
if isfile(client_cert)
# if it is a file name instead of certificate contents, read the contents
client_cert = read(client_cert, String)
end
if isfile(client_key)
client_key = read(client_key, String)
end
key = MbedTLS.PKContext()
MbedTLS.parse_key!(key, client_key)
MbedTLS.own_cert!(conf, MbedTLS.crt_parse(client_cert), key)
end
conf
end
function setup_tls(sock::TCPSocket, hostname::String, ssl_options::MbedTLS.SSLConfig)
@debug("setting up TLS")
ctx = MbedTLS.SSLContext()
MbedTLS.setup!(ctx, ssl_options)
MbedTLS.set_bio!(ctx, sock)
MbedTLS.hostname!(ctx, hostname)
MbedTLS.handshake(ctx)
@debug("TLS setup done")
BufferedTLSSocket(ctx)
end | AMQPClient | https://github.com/JuliaComputing/AMQPClient.jl.git |
|
[
"MIT"
] | 0.5.1 | 508457ed7a2afb432590247dc363fffc51f242fc | code | 447 | function auth_resp_amqplain(auth_params::Dict{String,Any})
params = Dict{String,Any}("LOGIN" => auth_params["LOGIN"], "PASSWORD" => auth_params["PASSWORD"])
iob = IOBuffer()
write(iob, TAMQPFieldTable(params))
bytes = take!(iob)
skipbytes = sizeof(fieldtype(TAMQPFieldTable, :len))
bytes = bytes[(skipbytes+1):end]
TAMQPLongStr(bytes)
end
const AUTH_PROVIDERS = Dict{String,Function}("AMQPLAIN" => auth_resp_amqplain)
| AMQPClient | https://github.com/JuliaComputing/AMQPClient.jl.git |
|
[
"MIT"
] | 0.5.1 | 508457ed7a2afb432590247dc363fffc51f242fc | code | 2644 | const TLS_BUSY_READ_SECS = 1
const TLS_BUSY_READ_YIELD_SECS = 0.001
const TLS_READBUFF_SIZE = MbedTLS.MBEDTLS_SSL_MAX_CONTENT_LEN * 5
const TLS_MIN_WRITEBUFF_SIZE = MbedTLS.MBEDTLS_SSL_MAX_CONTENT_LEN
const TCP_MAX_WRITEBUFF_SIZE = 1024*512
const TCP_MIN_WRITEBUFF_SIZE = 1024*64
struct BufferedTLSSocket <: IO
in::IOBuffer # no read lock, single task reads socket and distributes messages to channels
out::IOBuffer
sock::MbedTLS.SSLContext
readbuff::Vector{UInt8}
out_lck::ReentrantLock # protect out::IOBuffer when there are multiple channels on the connection
function BufferedTLSSocket(sock::MbedTLS.SSLContext; readbuff_size::Int=TLS_READBUFF_SIZE)
new(PipeBuffer(), PipeBuffer(), sock, Vector{UInt8}(undef, readbuff_size), ReentrantLock())
end
end
isopen(bio::BufferedTLSSocket) = isopen(bio.sock)
close(bio::BufferedTLSSocket) = close(bio.sock)
function read(bio::BufferedTLSSocket, ::Type{UInt8})
fill_in(bio, 1)
read(bio.in, UInt8)
end
function read(bio::BufferedTLSSocket, T::Union{Type{Int16},Type{UInt16},Type{Int32},Type{UInt32},Type{Int64},Type{UInt64},Type{Int128},Type{UInt128},Type{Float16},Type{Float32},Type{Float64}})
fill_in(bio, sizeof(T))
read(bio.in, T)
end
function read!(bio::BufferedTLSSocket, buff::Vector{UInt8})
fill_in(bio, length(buff))
read!(bio.in, buff)
end
function peek(bio::BufferedTLSSocket, T::Union{Type{Int16},Type{UInt16},Type{Int32},Type{UInt32},Type{Int64},Type{UInt64},Type{Int128},Type{UInt128},Type{Float16},Type{Float32},Type{Float64}})
fill_in(bio, sizeof(T))
peek(bio.in, T)
end
function fill_in(bio::BufferedTLSSocket, atleast::Int)
avail = bytesavailable(bio.in)
if atleast > avail
while (atleast > avail) && isopen(bio.sock)
bytes_read = isreadable(bio.sock) ? readbytes!(bio.sock, bio.readbuff; all=false) : 0
if bytes_read > 0
avail += Base.write(bio.in, first(bio.readbuff, bytes_read))
else
eof(bio.sock)
end
end
end
end
function write(bio::BufferedTLSSocket, data::UInt8)
lock(bio.out_lck) do
write(bio.out, data)
end
end
function write(bio::BufferedTLSSocket, data::Union{Int16,UInt16,Int32,UInt32,Int64,UInt64,Int128,UInt128,Float16,Float32,Float64})
lock(bio.out_lck) do
write(bio.out, data)
end
end
function write(bio::BufferedTLSSocket, data::Array)
lock(bio.out_lck) do
write(bio.out, data)
end
end
function flush(bio::BufferedTLSSocket)
lock(bio.out_lck) do
write(bio.sock, take!(bio.out))
end
nothing
end | AMQPClient | https://github.com/JuliaComputing/AMQPClient.jl.git |
|
[
"MIT"
] | 0.5.1 | 508457ed7a2afb432590247dc363fffc51f242fc | code | 518 | convert(::Type{String}, s::T) where {T<:Union{TAMQPShortStr,TAMQPLongStr,TAMQPByteArray}} = String(copy(s.data))
convert(::Type{Bool}, b::TAMQPBit) = Bool(b.val & 0x1)
simplify(val::T) where {T <: Union{TAMQPShortStr,TAMQPLongStr,TAMQPByteArray}} = String(copy(val.data))
simplify(val::TAMQPFieldArray) = [simplify(elem) for elem in val.data]
simplify(table::TAMQPFieldTable) = Dict{String,Any}(simplify(f.name)=>simplify(f.val) for f in table.data)
simplify(val::TAMQPFieldValue) = simplify(val.fld)
simplify(x) = x
| AMQPClient | https://github.com/JuliaComputing/AMQPClient.jl.git |
|
[
"MIT"
] | 0.5.1 | 508457ed7a2afb432590247dc363fffc51f242fc | code | 3478 | struct PropertySpec
name::Symbol
typ::Type
mask::UInt16
end
const NON_PERSISTENT = TAMQPOctet(1)
const PERSISTENT = TAMQPOctet(2)
const PROPERTIES = Dict{Symbol, PropertySpec}(
:content_type => PropertySpec(:content_type, TAMQPShortStr, 0x0001 << 15), # MIME content type (MIME typing)
:content_encoding => PropertySpec(:content_encoding, TAMQPShortStr, 0x0001 << 14), # MIME content encoding (MIME typing)
:headers => PropertySpec(:headers, TAMQPFieldTable, 0x0001 << 13), # message header field table (For applications, and for header exchange routing)
:delivery_mode => PropertySpec(:delivery_mode, TAMQPOctet, 0x0001 << 12), # non-persistent (1) or persistent (2) (For queues that implement persistence)
:priority => PropertySpec(:priority, TAMQPOctet, 0x0001 << 11), # message priority, 0 to 9 (For queues that implement priorities)
:correlation_id => PropertySpec(:correlation_id, TAMQPShortStr, 0x0001 << 10), # application correlation identifier (For application use, no formal behaviour)
:reply_to => PropertySpec(:reply_to, TAMQPShortStr, 0x0001 << 9), # address to reply to (For application use, no formal behaviour)
:expiration => PropertySpec(:expiration, TAMQPShortStr, 0x0001 << 8), # message expiration specification (For application use, no formal behaviour)
:message_id => PropertySpec(:message_id, TAMQPShortStr, 0x0001 << 7), # application message identifier (For application use, no formal behaviour)
:timestamp => PropertySpec(:timestamp, TAMQPTimeStamp, 0x0001 << 6), # message timestamp (For application use, no formal behaviour)
:message_type => PropertySpec(:message_type, TAMQPShortStr, 0x0001 << 5), # message type name (For application use, no formal behaviour)
:user_id => PropertySpec(:user_id, TAMQPShortStr, 0x0001 << 4), # creating user id (For application use, no formal behaviour)
:app_id => PropertySpec(:app_id, TAMQPShortStr, 0x0001 << 3), # creating application id (For application use, no formal behaviour)
:cluster_id => PropertySpec(:cluster_id, TAMQPShortStr, 0x0001 << 2) # reserved, must be empty (Deprecated, was old cluster-id property)
)
const SORTED_PROPERTY_NAMES = [:content_type, :content_encoding, :headers, :delivery_mode, :priority, :correlation_id, :reply_to, :expiration, :message_id, :timestamp, :message_type, :user_id, :app_id, :cluster_id]
const SORTED_PROPERTIES = [PROPERTIES[k] for k in SORTED_PROPERTY_NAMES]
mutable struct Message
data::Vector{UInt8}
properties::Dict{Symbol,TAMQPField}
filled::Int
consumer_tag::String
delivery_tag::TAMQPDeliveryTag
redelivered::Bool
exchange::String
routing_key::String
remaining::TAMQPMessageCount
end
function Message(data::Vector{UInt8}; kwargs...)
msg = Message(data, Dict{Symbol,TAMQPField}(), length(data), "", TAMQPDeliveryTag(0), false, "", "", TAMQPMessageCount(0))
set_properties(msg; kwargs...)
msg
end
function set_properties(msg::Message; kwargs...)
for (k,v) in kwargs
if v === nothing
delete!(msg.properties, k)
else
# all possible property types have constructors that can be used to create them
msg.properties[k] = (PROPERTIES[k].typ)(v)
end
end
nothing
end
| AMQPClient | https://github.com/JuliaComputing/AMQPClient.jl.git |
|
[
"MIT"
] | 0.5.1 | 508457ed7a2afb432590247dc363fffc51f242fc | code | 59203 | # default client timeout to use with blocking methods after which they throw an error
# Julia Timer converts seconds to milliseconds and adds 1 to it before passing it to libuv, hence the magic numbers to prevent overflow
const DEFAULT_TIMEOUT = round(Int, typemax(Int)/1000) - 1
const DEFAULT_CONNECT_TIMEOUT = round(Int, typemax(Int)/1000) - 1
# ----------------------------------------
# IO for types begin
# ----------------------------------------
function read(io::IO, ::Type{TAMQPBit})
TAMQPBit(ntoh(read(io, UInt8)))
end
function write(io::IO, b::TAMQPBit)
write(io, hton(b.val))
end
function read(io::IO, ::Type{TAMQPFrameProperties})
TAMQPFrameProperties(
ntoh(read(io, fieldtype(TAMQPFrameProperties, :channel))),
ntoh(read(io, fieldtype(TAMQPFrameProperties, :payloadsize))),
)
end
write(io::IO, p::TAMQPFrameProperties) = write(io, hton(p.channel), hton(p.payloadsize))
function read!(io::IO, b::TAMQPBodyPayload)
read!(io, b.data)
b
end
write(io::IO, b::TAMQPBodyPayload) = write(io, b.data)
function read(io::IO, ::Type{TAMQPShortStr})
len = ntoh(read(io, TAMQPOctet))
TAMQPShortStr(len, read!(io, Vector{UInt8}(undef, len)))
end
function read(io::IO, ::Type{TAMQPLongStr})
len = ntoh(read(io, TAMQPLongUInt))
TAMQPLongStr(len, read!(io, Vector{UInt8}(undef, len)))
end
function read(io::IO, ::Type{TAMQPByteArray})
len = ntoh(read(io, TAMQPLongUInt))
TAMQPByteArray(len, read!(io, Vector{UInt8}(undef, len)))
end
write(io::IO, s::T) where {T<:Union{TAMQPShortStr,TAMQPLongStr,TAMQPByteArray}} = write(io, hton(s.len), s.data)
function read(io::IO, ::Type{TAMQPFieldValue})
c = read(io, Char)
v = read(io, FieldValueIndicatorMap[c])
T = FieldValueIndicatorMap[c]
if T <: Integer
v = ntoh(v)
end
TAMQPFieldValue{T}(c, v)
end
function write(io::IO, fv::TAMQPFieldValue)
v = isa(fv.fld, Integer) ? hton(fv.fld) : fv.fld
write(io, fv.typ, v)
end
read(io::IO, ::Type{TAMQPFieldValuePair}) = TAMQPFieldValuePair(read(io, TAMQPFieldName), read(io, TAMQPFieldValue))
write(io::IO, fv::TAMQPFieldValuePair) = write(io, fv.name, fv.val)
function read(io::IO, ::Type{TAMQPFieldTable})
len = ntoh(read(io, fieldtype(TAMQPFieldTable, :len)))
@debug("read fieldtable", len)
buff = read!(io, Vector{UInt8}(undef, len))
data = TAMQPFieldValuePair[]
iob = IOBuffer(buff)
while !eof(iob)
push!(data, read(iob, TAMQPFieldValuePair))
end
TAMQPFieldTable(len, data)
end
function write(io::IO, ft::TAMQPFieldTable)
@debug("write fieldtable", nfields=length(ft.data))
iob = IOBuffer()
for fv in ft.data
write(iob, fv)
end
buff = take!(iob)
len = TAMQPLongUInt(length(buff))
@debug("write fieldtable", len)
l = write(io, hton(len))
if len > 0
l += write(io, buff)
end
l
end
"""
Read a generic frame. All frames have the following wire format:
0 1 3 7 size+7 size+8
+------+---------+---------+ +-------------+ +-----------+
| type | channel | size | | payload | | frame-end |
+------+---------+---------+ +-------------+ +-----------+
octet short long 'size' octets octet
"""
function read(io::IO, ::Type{TAMQPGenericFrame})
hdr = ntoh(read(io, fieldtype(TAMQPGenericFrame, :hdr)))
@assert hdr in (1,2,3,8)
props = read(io, fieldtype(TAMQPGenericFrame, :props))
@debug("reading generic frame", type=hdr, channel=props.channel, payloadsize=props.payloadsize)
payload = read!(io, TAMQPBodyPayload(Vector{TAMQPOctet}(undef, props.payloadsize)))
fend = ntoh(read(io, fieldtype(TAMQPGenericFrame, :fend)))
@assert fend == FrameEnd
TAMQPGenericFrame(hdr, props, payload, fend)
end
write(io::IO, f::TAMQPGenericFrame) = write(io, hton(f.hdr), f.props, f.payload, f.fend)
# """
# Given a generic frame, convert it to appropriate exact frame type.
# """
#function narrow_frame(f::TAMQPGenericFrame)
# if f.hdr == FrameMethod
# return TAMQPMethodFrame(f)
# end
# throw(AMQPProtocolException("Unknown frame type $(f.hdr)"))
#end
function method_name(payload::TAMQPMethodPayload)
c = CLASS_MAP[payload.class]
m = c.method_map[payload.method]
#(c.name, m.name)
string(c.name) * "." * string(m.name)
end
"""
Validate if the method frame is for the given class and method.
"""
function is_method(m::TAMQPMethodFrame, class::Symbol, method::Symbol)
c = CLASS_MAP[m.payload.class]
if c.name === class
m = c.method_map[m.payload.method]
return m.name === method
end
false
end
function method_key(classname::Symbol, methodname::Symbol)
class = CLASSNAME_MAP[classname]
method = CLASSMETHODNAME_MAP[classname,methodname]
(FrameMethod, class.id, method.id)
end
frame_key(frame_type) = (UInt8(frame_type),)
# ----------------------------------------
# IO for types end
# ----------------------------------------
# ----------------------------------------
# Connection and Channel begin
# ----------------------------------------
const UNUSED_CHANNEL = -1
const DEFAULT_CHANNEL = 0
const DEFAULT_CHANNELMAX = 256
const DEFAULT_AUTH_PARAMS = Dict{String,Any}("MECHANISM"=>"AMQPLAIN", "LOGIN"=>"guest", "PASSWORD"=>"guest")
const CONN_STATE_CLOSED = 0
const CONN_STATE_OPENING = 1
const CONN_STATE_OPEN = 2
const CONN_STATE_CLOSING = 3
const CONN_MAX_QUEUED = 1024 #typemax(Int)
const DEFAULT_KEEPALIVE_SECS = 60
abstract type AbstractChannel end
function keepalive!(sock, enable::Bool; interval::Integer=DEFAULT_KEEPALIVE_SECS)
@debug("setting tcp keepalive on tcp socket", enable, interval)
err = ccall(:uv_tcp_keepalive, Cint, (Ptr{Nothing}, Cint, Cuint), sock.handle, enable, interval)
if err != 0
throw(AMQPProtocolException("error setting keepalive on socket to $enable with interval $interval"))
end
return sock
end
mutable struct Connection
virtualhost::String
host::String
port::Int
sock::Union{TCPSocket, BufferedTLSSocket, Nothing}
properties::Dict{Symbol,Any}
capabilities::Dict{String,Any}
channelmax::TAMQPShortInt
framemax::TAMQPLongInt
heartbeat::TAMQPShortInt
enable_heartbeat::Bool
keepalive::Integer
enable_keepalive::Bool
state::UInt8
sendq::Channel{TAMQPGenericFrame}
sendlck::Channel{UInt8}
channels::Dict{TAMQPChannel, AbstractChannel}
sender::Union{Task, Nothing}
receiver::Union{Task, Nothing}
heartbeater::Union{Task, Nothing}
heartbeat_time_server::Float64
heartbeat_time_client::Float64
function Connection(;
virtualhost::String="/",
host::String="localhost",
port::Int=AMQP_DEFAULT_PORT,
send_queue_size::Int=CONN_MAX_QUEUED,
heartbeat::Integer=0,
enable_heartbeat::Bool=true,
keepalive::Integer=DEFAULT_KEEPALIVE_SECS,
enable_keepalive::Bool=true,
)
sendq = Channel{TAMQPGenericFrame}(send_queue_size)
sendlck = Channel{UInt8}(1)
put!(sendlck, 1)
new(virtualhost, host, port, nothing,
Dict{Symbol,Any}(), Dict{String,Any}(), 0, 0,
heartbeat, enable_heartbeat, keepalive, enable_keepalive,
CONN_STATE_CLOSED, sendq, sendlck, Dict{TAMQPChannel, AbstractChannel}(),
nothing, nothing, nothing,
0.0, 0.0)
end
end
mutable struct MessageConsumer
chan_id::TAMQPChannel
consumer_tag::String
recvq::Channel{Message}
callback::Function
receiver::Task
function MessageConsumer(chan_id::TAMQPChannel, consumer_tag::String, callback::Function;
buffer_size::Int=typemax(Int),
buffer::Channel{Message}=Channel{Message}(buffer_size))
c = new(chan_id, consumer_tag, buffer, callback)
c.receiver = @async connection_processor(c, "Consumer $consumer_tag", channel_message_consumer)
c
end
end
close(consumer::MessageConsumer) = close(consumer.recvq)
mutable struct MessageChannel <: AbstractChannel
id::TAMQPChannel
conn::Connection
state::UInt8
flow::Bool
recvq::Channel{TAMQPGenericFrame}
receiver::Union{Task, Nothing}
callbacks::Dict{Tuple,Tuple{Function,Any}}
partial_msgs::Vector{Message} # holds partial messages while they are getting read (message bodies arrive in sequence)
chan_get::Channel{Union{Message, Nothing}} # channel used for received messages, in sync get call (TODO: maybe type more strongly?)
consumers::Dict{String,MessageConsumer}
pending_msgs::Dict{String,Channel{Message}} # holds messages received that do not have a consumer registered
lck::ReentrantLock
closereason::Union{CloseReason, Nothing}
function MessageChannel(id, conn)
new(id, conn, CONN_STATE_CLOSED, true,
Channel{TAMQPGenericFrame}(CONN_MAX_QUEUED), nothing, Dict{Tuple,Tuple{Function,Any}}(),
Message[], Channel{Union{Message, Nothing}}(1), Dict{String,MessageConsumer}(),
Dict{String,Channel{Message}}(), ReentrantLock(), nothing)
end
end
flush(c::MessageChannel) = flush(c.conn)
function flush(c::Connection)
while isready(c.sendq) && (c.sender !== nothing) && !istaskdone(c.sender)
yield()
end
end
sock(c::MessageChannel) = sock(c.conn)
sock(c::Connection) = c.sock
isopen(c::Connection) = c.sock !== nothing && isopen(c.sock)
isopen(c::MessageChannel) = isopen(c.conn) && (c.id in keys(c.conn.channels))
get_property(c::MessageChannel, s::Symbol, default) = get_property(c.conn, s, default)
get_property(c::Connection, s::Symbol, default) = get(c.properties, s, default)
with_sendlock(f, c::MessageChannel) = with_sendlock(f, c.conn)
with_sendlock(f, c::Connection) = with_sendlock(f, c.sendlck)
function with_sendlock(f, sendlck::Channel{UInt8})
lck = take!(sendlck)
try
f()
finally
put!(sendlck, lck)
end
end
send(c::MessageChannel, f) = send(c.conn, f)
send(c::Connection, f) = put!(c.sendq, TAMQPGenericFrame(f))
function send(c::MessageChannel, payload::TAMQPMethodPayload)
@debug("sending without content", methodname=method_name(payload))
frameprop = TAMQPFrameProperties(c.id,0)
send(c, TAMQPMethodFrame(frameprop, payload))
end
function send(c::MessageChannel, payload::TAMQPMethodPayload, msg::Message)
@debug("sending with content", methodname=method_name(payload))
frameprop = TAMQPFrameProperties(c.id,0)
framemax = c.conn.framemax
if framemax <= 0
errormsg = (c.conn.state == CONN_STATE_OPEN) ? "Unexpected framemax ($framemax) value for connection" : "Connection closed"
throw(AMQPClientException(errormsg))
end
with_sendlock(c) do
send(c, TAMQPMethodFrame(frameprop, payload))
hdrpayload = TAMQPHeaderPayload(payload.class, msg)
send(c, TAMQPContentHeaderFrame(frameprop, hdrpayload))
# send one or more message body frames
offset = 1
msglen = length(msg.data)
@debug("sending message with content body", msglen)
while offset <= msglen
msgend = min(msglen, offset + framemax - 1)
bodypayload = TAMQPBodyPayload(msg.data[offset:msgend])
offset = msgend + 1
@debug("sending content body frame", msglen, offset)
send(c, TAMQPContentBodyFrame(frameprop, bodypayload))
end
end
end
# ----------------------------------------
# Async message handler framework begin
# ----------------------------------------
function wait_for_state(c, states; interval=1, timeout=typemax(Int))
timedwait(Float64(timeout); pollint=Float64(interval)) do
# if we are looking for open states, and connection gets closed in the meantime, it's an error, break out
conn_error = !(CONN_STATE_CLOSED in states) && (c.state == CONN_STATE_CLOSED)
state_found = (c.state in states)
conn_error || state_found
end
c.state in states
end
function connection_processor(c, name, fn)
@debug("Starting task", name)
try
while true
fn(c)
end
catch err
reason = "$name task exiting."
if isa(c, MessageConsumer)
reason = reason * " Unhandled exception: $err"
@warn(reason, exception=(err,catch_backtrace()))
close(c)
else
isconnclosed = !isopen(c)
ischanclosed = isa(c, MessageChannel) && isa(err, InvalidStateException) && err.state == :closed
if ischanclosed || isconnclosed
reason = reason * " Connection closed"
if c.state !== CONN_STATE_CLOSING
reason = reason * " by peer"
close(c, false, true)
end
@debug(reason, exception=(err,catch_backtrace()))
else
if !(c.state in (CONN_STATE_CLOSING, CONN_STATE_CLOSED))
reason = reason * " Unhandled exception: $err"
@warn(reason, exception=(err,catch_backtrace()))
end
close(c, false, true)
end
end
end
end
function connection_sender(c::Connection)
@debug("==> sending on conn", host=c.host, port=c.port, virtualhost=c.virtualhost)
nbytes = sendq_to_stream(sock(c), c.sendq)
@debug("==> sent", nbytes)
c.heartbeat_time_client = time() # update heartbeat time for client
nothing
end
function sendq_to_stream(conn::TCPSocket, sendq::Channel{TAMQPGenericFrame})
msg = take!(sendq)
if length(msg.payload.data) > TCP_MIN_WRITEBUFF_SIZE # write large messages directly
nbytes = write(conn, msg)
else # coalesce short messages and do single write
buff = IOBuffer()
nbytes = write(buff, msg)
while isready(sendq) && (nbytes < TCP_MAX_WRITEBUFF_SIZE)
nbytes += write(buff, take!(sendq))
end
write(conn, take!(buff))
end
nbytes
end
function sendq_to_stream(conn::BufferedTLSSocket, sendq::Channel{TAMQPGenericFrame})
# avoid multiple small writes to TLS layer
nbytes = write(conn, take!(sendq))
while isready(sendq) && (nbytes < MbedTLS.MBEDTLS_SSL_MAX_CONTENT_LEN)
nbytes += write(conn, take!(sendq))
end
# flush does a single write of accumulated buffer
flush(conn)
nbytes
end
function connection_receiver(c::Connection)
f = read(sock(c), TAMQPGenericFrame)
# update heartbeat time for server
c.heartbeat_time_server = time()
channelid = f.props.channel
@debug("<== read message on conn", host=c.virtualhost, channelid)
if !(channelid in keys(c.channels))
@warn("Discarding message for unknown channel", channelid)
end
chan = channel(c, channelid)
put!(chan.recvq, f)
nothing
end
function connection_heartbeater(c::Connection)
sleep(c.heartbeat)
isopen(c) || throw(AMQPClientException("Connection closed"))
now = time()
if (now - c.heartbeat_time_client) > c.heartbeat
send_connection_heartbeat(c)
end
if (now - c.heartbeat_time_server) > (2 * c.heartbeat)
@warn("server heartbeat missed", secs=(now - c.heartbeat_time_server))
close(c, false, false)
end
nothing
end
function channel_receiver(c::MessageChannel)
f = take!(c.recvq)
if f.hdr == FrameMethod
m = TAMQPMethodFrame(f)
@debug("<== received", channel=f.props.channel, class=m.payload.class, method=m.payload.method)
cbkey = (f.hdr, m.payload.class, m.payload.method)
elseif f.hdr == FrameHeartbeat
m = TAMQPHeartBeatFrame(f)
@debug("<== received heartbeat", channel=f.props.channel)
cbkey = (f.hdr,)
elseif f.hdr == FrameHeader
m = TAMQPContentHeaderFrame(f)
@debug("<== received contentheader", channel=f.props.channel)
cbkey = (f.hdr,)
elseif f.hdr == FrameBody
m = TAMQPContentBodyFrame(f)
@debug("<== received contentbody", channel=f.props.channel)
cbkey = (f.hdr,)
else
m = f
@warn("<== received unhandled frame type", channel=f.props.channel, type=f.hdr)
cbkey = (f.hdr,)
end
(cb,ctx) = get(c.callbacks, cbkey, (on_unexpected_message, nothing))
@assert f.props.channel == c.id
cb(c, m, ctx)
nothing
end
function channel_message_consumer(c::MessageConsumer)
m = take!(c.recvq)
c.callback(m)
nothing
end
clear_handlers(c::MessageChannel) = (empty!(c.callbacks); nothing)
function handle(c::MessageChannel, classname::Symbol, methodname::Symbol, cb=nothing, ctx=nothing)
cbkey = method_key(classname, methodname)
if cb === nothing
delete!(c.callbacks, cbkey)
else
c.callbacks[cbkey] = (cb, ctx)
end
nothing
end
function handle(c::MessageChannel, frame_type::Integer, cb=nothing, ctx=nothing)
cbkey = frame_key(frame_type)
if cb === nothing
delete!(c.callbacks, cbkey)
else
c.callbacks[cbkey] = (cb, ctx)
end
nothing
end
# ----------------------------------------
# Async message handler framework end
# ----------------------------------------
# ----------------------------------------
# Open channel / connection begin
# ----------------------------------------
function find_unused_channel(c::Connection)
k = keys(c.channels)
maxid = c.channelmax
for id in 0:maxid
if !(id in k)
return id
end
end
throw(AMQPClientException("No free channel available (max: $maxid)"))
end
"""
channel(conn, id, create)
channel(f, args...)
Create or return an existing a channel object.
Multiple channels can be multiplexed over a single connection.
Can be used with the Julia do block syntax to create a channel and close it afterwards.
- `conn`: The connection over which to create the channel.
- `id`: Channels are identified by their numeric id. Specifying `AMQPClient.UNUSED_CHANNEL` as channel
id during creation will automatically assign an unused id.
- `create`: If true, a new channel will be created. Else an existing channel with the specified id
will be returned.
"""
channel(c::MessageChannel, id::Integer) = channel(c.conn, id)
channel(c::Connection, id::Integer) = c.channels[id]
channel(c::MessageChannel, id::Integer, create::Bool) = channel(c.conn, id, create)
function channel(c::Connection, id::Integer, create::Bool; connect_timeout=DEFAULT_CONNECT_TIMEOUT)
if create
if id == UNUSED_CHANNEL
id = find_unused_channel(c)
elseif id in keys(c.channels)
throw(AMQPClientException("Channel Id $id is already in use"))
end
chan = MessageChannel(id, c)
chan.state = CONN_STATE_OPENING
c.channels[chan.id] = chan
if id != DEFAULT_CHANNEL
# open the channel
chan.receiver = @async connection_processor(chan, "ChannelReceiver($(chan.id))", channel_receiver)
handle(chan, :Channel, :OpenOk, on_channel_open_ok)
send_channel_open(chan)
if !wait_for_state(chan, CONN_STATE_OPEN; timeout=connect_timeout)
error_message = "Channel handshake failed"
if nothing !== chan.closereason
error_message = string(error_message, " - ", string(chan.closereason.code), " (", convert(String, chan.closereason.msg), ")")
end
throw(AMQPClientException(error_message))
end
end
else
chan = channel(c, id)
end
chan
end
function channel(f, args...; kwargs...)
chan = channel(args...; kwargs...)
try
f(chan)
catch
rethrow()
finally
close(chan)
end
end
"""
connection(f; kwargs...)
connection(;
virtualhost = "/",
host = "localhost",
port = AMQPClient.AMQP_DEFAULT_PORT,
framemax = 0,
heartbeat = true,
keepalive = DEFAULT_KEEPALIVE_SECS,
send_queue_size = CONN_MAX_QUEUED,
auth_params = AMQPClient.DEFAULT_AUTH_PARAMS,
channelmax = AMQPClient.DEFAULT_CHANNELMAX,
connect_timeout = AMQPClient.DEFAULT_CONNECT_TIMEOUT,
amqps = nothing
)
Creates a fresh connection to the AMQP server.
Returns a connection that can be used to open channels subsequently.
Can be used with the Julia do block syntax to create a connection and close it afterwards.
Keyword arguments:
- `host`: The message server host to connect to. Defaults to "localhost".
- `port`: The message server port to connect to. Defaults to the default AMQP port.
- `virtualhost`: The virtual host to connect to. Defaults to "/".
- `amqps`: If connection is to be done over AMQPS, the TLS options to use. See `amqps_configure`.
- `connect_timeout`: TCP connect timeout to impose. Default `AMQPClient.DEFAULT_CONNECT_TIMEOUT`,
- `framemax`: The maximum frame size to use. Defaults to 0, which means no limit.
- `heartbeat`: `true` to enable heartbeat, `false` to disable. Can also be set to a positive integer,
in which case it is the heartbeat interval in seconds. Defaults to `true`. If `false`, ensure
`keepalive` is enabled to detect dead connections. This parameter is negotiated with the server.
- `keepalive`: `true` to enable TCP keepalives, `false` to disable. Can also be set to a positive integer,
in which case it is the keepalive interval in seconds. Defaults to `DEFAULT_KEEPALIVE_SECS`.
- `send_queue_size`: Maximum number of items to buffer in memory before blocking the send API until
messages are drained. Defaults to CONN_MAX_QUEUED.
- `auth_params`: Parameters to use to authenticate the connection. Defaults to AMQPClient.DEFAULT_AUTH_PARAMS.
- `channelmax`: Maximum channel number to impose/negotiate with the server. Defaults to AMQPClient.DEFAULT_CHANNELMAX.
"""
function connection(; virtualhost="/", host="localhost", port=AMQPClient.AMQP_DEFAULT_PORT,
framemax=0,
heartbeat::Union{Int,Bool}=true,
keepalive::Union{Int,Bool}=DEFAULT_KEEPALIVE_SECS,
send_queue_size::Integer=CONN_MAX_QUEUED,
auth_params=AMQPClient.DEFAULT_AUTH_PARAMS,
channelmax::Integer=AMQPClient.DEFAULT_CHANNELMAX,
connect_timeout=AMQPClient.DEFAULT_CONNECT_TIMEOUT,
amqps::Union{MbedTLS.SSLConfig,Nothing}=nothing)
@debug("connecting", host, port, virtualhost)
keepalive_interval = isa(keepalive, Bool) ? DEFAULT_KEEPALIVE_SECS : keepalive
enable_keepalive = isa(keepalive, Bool) ? keepalive : (keepalive_interval > 0)
heartbeat_interval = isa(heartbeat, Bool) ? 0 : heartbeat
enable_heartbeat = isa(heartbeat, Bool) ? heartbeat : (heartbeat > 0)
conn = Connection(;
virtualhost=virtualhost,
host=host,
port=port,
send_queue_size=send_queue_size,
heartbeat=heartbeat_interval,
enable_heartbeat=enable_heartbeat,
keepalive=keepalive_interval,
enable_keepalive=enable_keepalive,)
chan = channel(conn, AMQPClient.DEFAULT_CHANNEL, true)
# setup handler for Connection.Start
ctx = Dict(:auth_params=>auth_params, :channelmax=>channelmax, :framemax=>framemax, :heartbeat=>heartbeat_interval)
AMQPClient.handle(chan, :Connection, :Start, AMQPClient.on_connection_start, ctx)
# open socket and start processor tasks
sock = connect(conn.host, conn.port)
isdefined(Sockets, :nagle) && Sockets.nagle(sock, false)
isdefined(Sockets, :quickack) && Sockets.quickack(sock, true)
keepalive!(sock, enable_keepalive; interval=keepalive_interval)
conn.sock = (amqps !== nothing) ? setup_tls(sock, host, amqps) : sock
conn.sender = @async AMQPClient.connection_processor(conn, "ConnectionSender", AMQPClient.connection_sender)
conn.receiver = @async AMQPClient.connection_processor(conn, "ConnectionReceiver", AMQPClient.connection_receiver)
chan.receiver = @async AMQPClient.connection_processor(chan, "ChannelReceiver($(chan.id))", AMQPClient.channel_receiver)
# initiate handshake
conn.state = chan.state = AMQPClient.CONN_STATE_OPENING
write(AMQPClient.sock(chan), AMQPClient.ProtocolHeader)
flush(AMQPClient.sock(chan))
if !AMQPClient.wait_for_state(conn, AMQPClient.CONN_STATE_OPEN; timeout=connect_timeout) || !AMQPClient.wait_for_state(chan, AMQPClient.CONN_STATE_OPEN; timeout=connect_timeout)
error_message = "Connection handshake failed"
if nothing !== chan.closereason
error_message = string(error_message, " - ", string(chan.closereason.code), " (", convert(String, chan.closereason.msg), ")")
end
throw(AMQPClientException(error_message))
end
conn
end
function connection(f; kwargs...)
conn = connection(; kwargs...)
try
f(conn)
catch
rethrow()
finally
close(conn)
end
end
# ----------------------------------------
# Open channel / connection end
# ----------------------------------------
# ----------------------------------------
# Close channel / connection begin
# ----------------------------------------
function close(chan::MessageChannel, handshake::Bool=true, by_peer::Bool=false, reply_code=ReplySuccess, reply_text="", class_id=0, method_id=0)
(chan.state == CONN_STATE_CLOSED) && (return nothing)
conn = chan.conn
if chan.id == DEFAULT_CHANNEL
# default channel represents the connection
close(conn, handshake, by_peer, reply_code, reply_text, class_id, method_id)
elseif chan.state != CONN_STATE_CLOSING
# send handshake if needed and when called the first time
chan.state = CONN_STATE_CLOSING
if handshake && !by_peer
send_channel_close(chan, reply_code, reply_text, class_id, method_id)
end
end
# release resources when closed by peer or when closing abruptly
if !handshake || by_peer
close(chan.recvq)
close(chan.chan_get)
map(close, values(chan.consumers))
empty!(chan.consumers)
chan.receiver = nothing
chan.callbacks = Dict{Tuple,Tuple{Function,Any}}()
delete!(chan.conn.channels, chan.id)
chan.state = CONN_STATE_CLOSED
end
nothing
end
function close(conn::Connection, handshake::Bool=true, by_peer::Bool=false, reply_code=ReplySuccess, reply_text="", class_id=0, method_id=0)
(conn.state == CONN_STATE_CLOSED) && (return nothing)
# send handshake if needed and when called the first time
if conn.state != CONN_STATE_CLOSING
conn.state = CONN_STATE_CLOSING
# close all other open channels
for open_channel in collect(values(conn.channels))
if open_channel.id != DEFAULT_CHANNEL
close(open_channel, false, by_peer)
end
end
# send handshake if needed
if handshake && !by_peer
send_connection_close(conn, reply_code, reply_text, class_id, method_id)
end
end
if !handshake || by_peer
# close socket
close(conn.sock)
conn.sock = nothing
# reset all members
conn.properties = Dict{Symbol,Any}()
conn.capabilities = Dict{String,Any}()
conn.channelmax = 0
conn.framemax = 0
conn.heartbeat = 0
# close and reset the sendq channel
close(conn.sendq)
conn.sendq = Channel{TAMQPGenericFrame}(CONN_MAX_QUEUED)
# reset the tasks
conn.sender = nothing
conn.receiver = nothing
conn.heartbeater = nothing
conn.state = CONN_STATE_CLOSED
end
nothing
end
# ----------------------------------------
# Close channel / connection end
# ----------------------------------------
# ----------------------------------------
# Connection and Channel end
# ----------------------------------------
# ----------------------------------------
# Exchange begin
# ----------------------------------------
const EXCHANGE_TYPE_DIRECT = "direct" # must be implemented by servers
const EXCHANGE_TYPE_FANOUT = "fanout" # must be implemented by servers
const EXCHANGE_TYPE_TOPIC = "topic" # optional, must test before typing to open
const EXCHANGE_TYPE_HEADERS = "headers" # optional, must test before typing to open
# The server MUST, in each virtual host, predeclare an exchange instance for each standard
# exchange type that it implements, where the name of the exchange instance, if defined, is "amq."
# followed by the exchange type name.
# The server MUST predeclare a direct exchange with no public name to act as the default
# exchange for content Publish methods and for default queue bindings.
default_exchange_name(excg_type) = ("amq." * excg_type)
default_exchange_name() = ""
function _wait_resp(sendmethod, chan::MessageChannel, default_result::T,
nowait::Bool=true, resp_handler=nothing, resp_class=nothing, resp_meth=nothing,
timeout_result::T=default_result, timeout::Int=DEFAULT_TIMEOUT) where {T}
result = default_result
if !nowait
reply = Channel{T}(1)
# register a callback
handle(chan, resp_class, resp_meth, resp_handler, reply)
end
sendmethod()
if !nowait
# wait for response
result = timeout_result
if :ok === timedwait(()->(isready(reply) || !isopen(chan)), Float64(timeout); pollint=0.01)
if isready(reply)
result = take!(reply)
else
error_message = "Connection closed"
if nothing !== chan.closereason
error_message = string(error_message, " - ", string(chan.closereason.code), " (", convert(String, chan.closereason.msg), ")")
end
throw(AMQPClientException(error_message))
end
end
close(reply)
end
result
end
function exchange_declare(chan::MessageChannel, name::String, typ::String;
passive::Bool=false, durable::Bool=false, auto_delete::Bool=false,
nowait::Bool=false, timeout::Int=DEFAULT_TIMEOUT,
arguments::Dict{String,Any}=Dict{String,Any}())
(isempty(name) || startswith(name, "amq.")) && !passive && throw(AMQPClientException("Exchange name '$name' is reserved. Use a different name."))
if auto_delete
@debug("Warning: auto_delete exchange types are deprecated")
end
_wait_resp(chan, true, nowait, on_exchange_declare_ok, :Exchange, :DeclareOk, false, timeout) do
send_exchange_declare(chan, name, typ, passive, durable, auto_delete, nowait, arguments)
end
end
function exchange_delete(chan::MessageChannel, name::String; if_unused::Bool=false, nowait::Bool=false, timeout::Int=DEFAULT_TIMEOUT)
(isempty(name) || startswith(name, "amq.")) && throw(AMQPClientException("Exchange name '$name' is reserved. Use a different name."))
_wait_resp(chan, true, nowait, on_exchange_delete_ok, :Exchange, :DeleteOk, false, timeout) do
send_exchange_delete(chan, name, if_unused, nowait)
end
end
function exchange_bind(chan::MessageChannel, dest::String, src::String, routing_key::String;
nowait::Bool=false, timeout::Int=DEFAULT_TIMEOUT,
arguments::Dict{String,Any}=Dict{String,Any}())
_wait_resp(chan, true, nowait, on_exchange_bind_ok, :Exchange, :BindOk, false, timeout) do
send_exchange_bind(chan, dest, src, routing_key, nowait, arguments)
end
end
function exchange_unbind(chan::MessageChannel, dest::String, src::String, routing_key::String;
nowait::Bool=false, timeout::Int=DEFAULT_TIMEOUT,
arguments::Dict{String,Any}=Dict{String,Any}())
_wait_resp(chan, true, nowait, on_exchange_unbind_ok, :Exchange, :UnbindOk, false, timeout) do
send_exchange_unbind(chan, dest, src, routing_key, nowait, arguments)
end
end
# ----------------------------------------
# Exchange end
# ----------------------------------------
# ----------------------------------------
# Queue begin
# ----------------------------------------
"""Declare a queue (or query an existing queue).
Returns a tuple: (boolean success/failure, queue name, message count, consumer count)
"""
function queue_declare(chan::MessageChannel, name::String;
passive::Bool=false, durable::Bool=false, exclusive::Bool=false, auto_delete::Bool=false,
nowait::Bool=false, timeout::Int=DEFAULT_TIMEOUT,
arguments::Dict{String,Any}=Dict{String,Any}())
_wait_resp(chan, (true, "", TAMQPMessageCount(0), Int32(0)), nowait, on_queue_declare_ok, :Queue, :DeclareOk, (false,"", TAMQPMessageCount(0), Int32(0)), timeout) do
send_queue_declare(chan, name, passive, durable, exclusive, auto_delete, nowait, arguments)
end
end
function queue_bind(chan::MessageChannel, queue_name::String, excg_name::String, routing_key::String; nowait::Bool=false, timeout::Int=DEFAULT_TIMEOUT, arguments::Dict{String,Any}=Dict{String,Any}())
_wait_resp(chan, true, nowait, on_queue_bind_ok, :Queue, :BindOk, false, timeout) do
send_queue_bind(chan, queue_name, excg_name, routing_key, nowait, arguments)
end
end
function queue_unbind(chan::MessageChannel, queue_name::String, excg_name::String, routing_key::String; arguments::Dict{String,Any}=Dict{String,Any}(), timeout::Int=DEFAULT_TIMEOUT)
nowait = false
_wait_resp(chan, true, nowait, on_queue_unbind_ok, :Queue, :UnbindOk, false, timeout) do
send_queue_unbind(chan, queue_name, excg_name, routing_key, arguments)
end
end
"""Purge messages from a queue.
Returns a tuple: (boolean success/failure, message count)
"""
function queue_purge(chan::MessageChannel, name::String; nowait::Bool=false, timeout::Int=DEFAULT_TIMEOUT)
_wait_resp(chan, (true,TAMQPMessageCount(0)), nowait, on_queue_purge_ok, :Queue, :PurgeOk, (false,TAMQPMessageCount(0)), timeout) do
send_queue_purge(chan, name, nowait)
end
end
"""Delete a queue.
Returns a tuple: (boolean success/failure, message count)
"""
function queue_delete(chan::MessageChannel, name::String; if_unused::Bool=false, if_empty::Bool=false, nowait::Bool=false, timeout::Int=DEFAULT_TIMEOUT)
_wait_resp(chan, (true,TAMQPMessageCount(0)), nowait, on_queue_delete_ok, :Queue, :DeleteOk, (false,TAMQPMessageCount(0)), timeout) do
send_queue_delete(chan, name, if_unused, if_empty, nowait)
end
end
# ----------------------------------------
# Queue end
# ----------------------------------------
# ----------------------------------------
# Tx begin
# ----------------------------------------
function _tx(sendmethod, chan::MessageChannel, respmethod::Symbol, on_resp, timeout::Int)
nowait = false
_wait_resp(chan, true, nowait, on_resp, :Tx, respmethod, false, timeout) do
sendmethod(chan)
end
end
tx_select(chan::MessageChannel; timeout::Int=DEFAULT_TIMEOUT) = _tx(send_tx_select, chan, :SelectOk, on_tx_select_ok, timeout)
tx_commit(chan::MessageChannel; timeout::Int=DEFAULT_TIMEOUT) = _tx(send_tx_commit, chan, :CommitOk, on_tx_commit_ok, timeout)
tx_rollback(chan::MessageChannel; timeout::Int=DEFAULT_TIMEOUT) = _tx(send_tx_rollback, chan, :RollbackOk, on_tx_rollback_ok, timeout)
# ----------------------------------------
# Tx end
# ----------------------------------------
# ----------------------------------------
# Basic begin
# ----------------------------------------
function basic_qos(chan::MessageChannel, prefetch_size, prefetch_count, apply_global::Bool; timeout::Int=DEFAULT_TIMEOUT)
nowait = false
_wait_resp(chan, true, nowait, on_basic_qos_ok, :Basic, :QosOk, false, timeout) do
send_basic_qos(chan, prefetch_size, prefetch_count, apply_global)
end
end
"""Start a queue consumer.
queue: queue name
consumer_tag: id of the consumer, server generates a unique tag if this is empty
no_local: do not deliver own messages
no_ack: no acknowledgment needed, server automatically and silently acknowledges delivery (speed at the cost of reliability)
exclusive: request exclusive access (only this consumer can access the queue)
nowait: do not send a reply method
"""
function basic_consume(chan::MessageChannel, queue::String, consumer_fn::Function; consumer_tag::String="", no_local::Bool=false, no_ack::Bool=false,
exclusive::Bool=false, nowait::Bool=false, arguments::Dict{String,Any}=Dict{String,Any}(), timeout::Int=DEFAULT_TIMEOUT, buffer_sz::Int=typemax(Int))
# register the consumer and get the consumer_tag
result = _wait_resp(chan, (true, ""), nowait, on_basic_consume_ok, :Basic, :ConsumeOk, (false, ""), timeout) do
send_basic_consume(chan, queue, consumer_tag, no_local, no_ack, exclusive, nowait, arguments)
end
# start the message consumer
if result[1]
consumer_tag = result[2]
# set up message buffer beforehand to store messages that the consumer may receive while we are still setting things up,
# or get the buffer that was set up already because we received messages
lock(chan.lck) do
consumer_buffer = get!(chan.pending_msgs, consumer_tag) do
Channel{Message}(buffer_sz)
end
consumer_buffer.sz_max = buffer_sz
chan.consumers[consumer_tag] = MessageConsumer(chan.id, consumer_tag, consumer_fn; buffer=consumer_buffer)
delete!(chan.pending_msgs, consumer_tag)
end
end
result
end
"""Cancels a consumer.
This does not affect already delivered messages, but it does mean the server will not send any more messages for that consumer. The client may receive an arbitrary number of
messages in between sending the cancel method and receiving the cancelok reply.
"""
function basic_cancel(chan::MessageChannel, consumer_tag::String; nowait::Bool=false, timeout::Int=DEFAULT_TIMEOUT)
result = _wait_resp(chan, (true, ""), nowait, on_basic_cancel_ok, :Basic, :CancelOk, (false, ""), timeout) do
send_basic_cancel(chan, consumer_tag, nowait)
end
# clear a message consumer
if result[1]
if consumer_tag in keys(chan.consumers)
close(chan.consumers[consumer_tag])
delete!(chan.consumers, consumer_tag)
end
end
result[1]
end
"""Publish a message
This method publishes a message to a specific exchange. The message will be routed to queues as defined by the exchange
configuration and distributed to any active consumers when the transaction, if any, is committed.
"""
function basic_publish(chan::MessageChannel, msg::Message; exchange::String="", routing_key::String="", mandatory::Bool=false, immediate::Bool=false)
send_basic_publish(chan, msg, exchange, routing_key, mandatory, immediate)
end
const GET_EMPTY_RESP = nothing
function basic_get(chan::MessageChannel, queue::String, no_ack::Bool)
send_basic_get(chan, queue, no_ack)
take!(chan.chan_get)
end
basic_ack(chan::MessageChannel, delivery_tag::TAMQPDeliveryTag; all_upto::Bool=false) = send_basic_ack(chan, delivery_tag, all_upto)
basic_reject(chan::MessageChannel, delivery_tag::TAMQPDeliveryTag; requeue::Bool=false) = send_basic_reject(chan, delivery_tag, requeue)
function basic_recover(chan::MessageChannel, requeue::Bool=false; async::Bool=false, timeout::Int=DEFAULT_TIMEOUT)
_wait_resp(chan, true, async, on_basic_recover_ok, :Basic, :RecoverOk, false, timeout) do
send_basic_recover(chan, requeue, async)
end
end
# ----------------------------------------
# Basic end
# ----------------------------------------
# ----------------------------------------
# Confirm begin
# ----------------------------------------
function confirm_select(chan::MessageChannel; nowait::Bool=false, timeout::Int=DEFAULT_TIMEOUT)
_wait_resp(chan, true, nowait, on_confirm_select_ok, :Confirm, :SelectOk, false, timeout) do
send_confirm_select(chan)
end
end
send_confirm_select(chan::MessageChannel) = send(chan, TAMQPMethodPayload(:Confirm, :Select, ()))
# ----------------------------------------
# Confirm end
# ----------------------------------------
# ----------------------------------------
# send and recv for methods begin
# ----------------------------------------
function on_unexpected_message(c::MessageChannel, m::TAMQPMethodFrame, ctx)
@debug("Unexpected message", channel=c.id, class=m.payload.class, method=m.payload.method)
nothing
end
function on_unexpected_message(c::MessageChannel, f, ctx)
@debug("Unexpected message", channel=c.id, frametype=f.hdr)
nothing
end
function _on_ack(chan::MessageChannel, m::TAMQPMethodFrame, class::Symbol, method::Symbol, ctx)
@assert is_method(m, class, method)
if ctx !== nothing
put!(ctx, true)
end
handle(chan, class, method)
nothing
end
_send_close_ok(context_class::Symbol, chan::MessageChannel) = send(chan, TAMQPMethodPayload(context_class, :CloseOk, ()))
function _on_close_ok(context_class::Symbol, chan::MessageChannel, m::TAMQPMethodFrame, ctx)
@assert is_method(m, context_class, :CloseOk)
close(chan, false, true)
nothing
end
function _send_close(context_class::Symbol, chan::MessageChannel, reply_code=ReplySuccess, reply_text="", class_id=0, method_id=0)
chan.closereason = CloseReason(TAMQPReplyCode(reply_code), TAMQPReplyText(reply_text), TAMQPClassId(class_id), TAMQPMethodId(method_id))
if context_class === :Channel && chan.id == DEFAULT_CHANNEL
@debug("closing channel 0 is equivalent to closing the connection!")
context_class = :Connection
end
context_chan_id = context_class === :Connection ? 0 : chan.id
_send_close(context_class, context_chan_id, chan.conn, reply_code, reply_text, class_id, method_id, chan.id)
end
_send_close(context_class::Symbol, context_chan_id, conn::Connection, reply_code=ReplySuccess, reply_text="", class_id=0, method_id=0, chan_id=0) =
send(conn, TAMQPMethodFrame(TAMQPFrameProperties(context_chan_id,0), TAMQPMethodPayload(context_class, :Close, (TAMQPReplyCode(reply_code), TAMQPReplyText(reply_text), TAMQPClassId(class_id), TAMQPMethodId(method_id)))))
send_connection_close_ok(chan::MessageChannel) = _send_close_ok(:Connection, chan)
on_connection_close_ok(chan::MessageChannel, m::TAMQPMethodFrame, ctx) = _on_close_ok(:Connection, chan, m, ctx)
function on_connection_close(chan::MessageChannel, m::TAMQPMethodFrame, ctx)
@assert is_method(m, :Connection, :Close)
@assert chan.id == DEFAULT_CHANNEL
chan.closereason = CloseReason(m.payload.fields[1].second, m.payload.fields[2].second, m.payload.fields[3].second, m.payload.fields[4].second)
send_connection_close_ok(chan)
t1 = time()
while isready(chan.conn.sendq) && ((time() - t1) < 5)
yield() # wait 5 seconds (arbirtary) for the message to get sent
end
close(chan, false, true)
end
function on_channel_close(chan::MessageChannel, m::TAMQPMethodFrame, ctx)
@assert is_method(m, :Channel, :Close)
@assert chan.id != DEFAULT_CHANNEL
chan.closereason = CloseReason(m.payload.fields[1].second, m.payload.fields[2].second, m.payload.fields[3].second, m.payload.fields[4].second)
send_channel_close_ok(chan)
close(chan, false, true)
end
send_connection_close(chan::MessageChannel, reply_code=ReplySuccess, reply_text="", class_id=0, method_id=0) = _send_close(:Connection, chan, reply_code, reply_text, class_id, method_id)
send_connection_close(conn::Connection, reply_code=ReplySuccess, reply_text="", class_id=0, method_id=0) = _send_close(:Connection, 0, conn, reply_code, reply_text, class_id, method_id)
send_channel_close_ok(chan::MessageChannel) = _send_close_ok(:Channel, chan)
on_channel_close_ok(chan::MessageChannel, m::TAMQPMethodFrame, ctx) = _on_close_ok(:Channel, chan, m, ctx)
send_channel_close(chan::MessageChannel, reply_code=ReplySuccess, reply_text="", class_id=0, method_id=0) = _send_close(:Channel, chan, reply_code, reply_text, class_id, method_id)
function on_connection_start(chan::MessageChannel, m::TAMQPMethodFrame, ctx)
@assert is_method(m, :Connection, :Start)
@assert chan.id == DEFAULT_CHANNEL
conn = chan.conn
# setup server properties and capabilities
merge!(conn.properties, Dict{Symbol,Any}(Symbol(n)=>simplify(v) for (n,v) in m.payload.fields))
server_props = simplify(get_property(chan, :ServerProperties, TAMQPFieldTable(Dict{String,Any}())))
if "capabilities" in keys(server_props)
merge!(conn.capabilities, server_props["capabilities"])
end
handle(chan, :Connection, :Start)
auth_params = ctx[:auth_params]
delete!(ctx, :auth_params) # we cont need auth params any more
handle(chan, :Connection, :Tune, on_connection_tune, ctx)
send_connection_start_ok(chan, auth_params)
nothing
end
function send_connection_start_ok(chan::MessageChannel, auth_params::Dict{String,Any})
conn = chan.conn
# set up client_props
client_props = copy(CLIENT_IDENTIFICATION)
client_cap = client_props["capabilities"]
server_cap = conn.capabilities
@debug("server capabilities", server_cap)
if "consumer_cancel_notify" in keys(server_cap)
client_cap["consumer_cancel_notify"] = server_cap["consumer_cancel_notify"]
end
if "connection.blocked" in keys(server_cap)
client_cap["connection.blocked"] = server_cap["connection.blocked"]
end
@debug("client_props", client_props)
# assert that auth mechanism is supported
mechanism = auth_params["MECHANISM"]
mechanisms = split(get_property(chan, :Mechanisms, ""), ' ')
@debug("checking auth mechanism", mechanism, supported=mechanisms)
@assert mechanism in mechanisms
# set up locale
# pick up one of the server locales
locales = split(get_property(chan, :Locales, ""), ' ')
@debug("supported locales", locales)
client_locale = locales[1]
@debug("client_locale", client_locale)
# respond to login
auth_resp = AUTH_PROVIDERS[mechanism](auth_params)
@debug("auth_resp", auth_resp)
send(chan, TAMQPMethodPayload(:Connection, :StartOk, (client_props, mechanism, auth_resp, client_locale)))
nothing
end
function on_connection_tune(chan::MessageChannel, m::TAMQPMethodFrame, ctx)
@assert is_method(m, :Connection, :Tune)
@assert chan.id == DEFAULT_CHANNEL
conn = chan.conn
conn.channelmax = m.payload.fields[1].second
conn.framemax = m.payload.fields[2].second
conn.heartbeat = m.payload.fields[3].second
@debug("got_connection_tune", channelmax=conn.channelmax, framemax=conn.framemax, heartbeat=conn.heartbeat)
handle(chan, FrameHeartbeat, on_connection_heartbeat)
send_connection_tune_ok(chan, ctx[:channelmax], ctx[:framemax], ctx[:heartbeat])
handle(chan, :Connection, :Tune)
handle(chan, :Connection, :OpenOk, on_connection_open_ok, ctx)
send_connection_open(chan)
nothing
end
function send_connection_tune_ok(chan::MessageChannel, channelmax=0, framemax=0, heartbeat=0)
conn = chan.conn
# negotiate (min of what expected by both parties)
function opt(desired_param, limited_param)
if desired_param > 0 && limited_param > 0
min(desired_param, limited_param)
else
max(desired_param, limited_param)
end
end
conn.channelmax = opt(channelmax, conn.channelmax)
conn.framemax = opt(framemax, conn.framemax)
conn.heartbeat = conn.enable_heartbeat ? opt(heartbeat, conn.heartbeat) : 0
@debug("send_connection_tune_ok", channelmax=conn.channelmax, framemax=conn.framemax, heartbeat=conn.heartbeat)
send(chan, TAMQPMethodPayload(:Connection, :TuneOk, (conn.channelmax, conn.framemax, conn.heartbeat)))
if conn.enable_heartbeat
# start heartbeat timer
conn.heartbeater = @async connection_processor(conn, "HeartBeater", connection_heartbeater)
end
nothing
end
send_connection_open(chan::MessageChannel) = send(chan, TAMQPMethodPayload(:Connection, :Open, (chan.conn.virtualhost, "", false)))
function on_connection_open_ok(chan::MessageChannel, m::TAMQPMethodFrame, ctx)
@assert is_method(m, :Connection, :OpenOk)
@assert chan.id == DEFAULT_CHANNEL
conn = chan.conn
conn.state = CONN_STATE_OPEN
chan.state = CONN_STATE_OPEN
handle(chan, :Connection, :Close, on_connection_close, ctx)
handle(chan, :Connection, :CloseOk, on_connection_close_ok, ctx)
handle(chan, :Connection, :OpenOk)
nothing
end
send_connection_heartbeat(conn::Connection) = send(conn, TAMQPHeartBeatFrame())
on_connection_heartbeat(chan::MessageChannel, h::TAMQPHeartBeatFrame, ctx) = nothing
send_channel_open(chan::MessageChannel) = send(chan, TAMQPMethodPayload(:Channel, :Open, ("",)))
send_channel_flow(chan::MessageChannel, flow::Bool) = send(chan, TAMQPMethodPayload(:Channel, :Flow, (flow,)))
function on_channel_open_ok(chan::MessageChannel, m::TAMQPMethodFrame, ctx)
chan.state = CONN_STATE_OPEN
handle(chan, :Channel, :Flow, on_channel_flow, :Flow)
handle(chan, :Channel, :FlowOk, on_channel_flow, :FlowOk)
handle(chan, :Channel, :Close, on_channel_close)
handle(chan, :Channel, :CloseOk, on_channel_close_ok)
handle(chan, :Basic, :GetOk, on_basic_get_empty_or_ok)
handle(chan, :Basic, :GetEmpty, on_basic_get_empty_or_ok)
handle(chan, :Basic, :Deliver, on_basic_get_empty_or_ok)
handle(chan, FrameHeader, on_channel_message_in)
handle(chan, FrameBody, on_channel_message_in)
nothing
end
function on_channel_flow(chan::MessageChannel, m::TAMQPMethodFrame, ctx)
@assert is_method(m, :Channel, ctx)
chan.flow = m.payload.fields[1].second
@debug("on_channel_flow", channel=chan.id, flow=chan.flow)
nothing
end
send_exchange_declare(chan::MessageChannel, name::String, typ::String, passive::Bool, durable::Bool, auto_delete::Bool, nowait::Bool, arguments::Dict{String,Any}) =
send(chan, TAMQPMethodPayload(:Exchange, :Declare, (0, name, typ, passive, durable, auto_delete, false, nowait, arguments)))
send_exchange_delete(chan::MessageChannel, name::String, if_unused::Bool, nowait::Bool) = send(chan, TAMQPMethodPayload(:Exchange, :Delete, (0, name, if_unused, nowait)))
_send_exchange_bind_unbind(chan::MessageChannel, meth::Symbol, dest::String, src::String, routing_key::String, nowait::Bool, arguments::Dict{String,Any}) =
send(chan, TAMQPMethodPayload(:Exchange, meth, (0, dest, src, routing_key, nowait, arguments)))
send_exchange_bind(chan::MessageChannel, dest::String, src::String, routing_key::String, nowait::Bool, arguments::Dict{String,Any}) = _send_exchange_bind_unbind(chan, :Bind, dest, src, routing_key, nowait, arguments)
send_exchange_unbind(chan::MessageChannel, dest::String, src::String, routing_key::String, nowait::Bool, arguments::Dict{String,Any}) = _send_exchange_bind_unbind(chan, :Unbind, dest, src, routing_key, nowait, arguments)
on_exchange_declare_ok(chan::MessageChannel, m::TAMQPMethodFrame, ctx) = _on_ack(chan, m, :Exchange, :DeclareOk, ctx)
on_exchange_delete_ok(chan::MessageChannel, m::TAMQPMethodFrame, ctx) = _on_ack(chan, m, :Exchange, :DeleteOk, ctx)
on_exchange_bind_ok(chan::MessageChannel, m::TAMQPMethodFrame, ctx) = _on_ack(chan, m, :Exchange, :BindOk, ctx)
on_exchange_unbind_ok(chan::MessageChannel, m::TAMQPMethodFrame, ctx) = _on_ack(chan, m, :Exchange, :UnbindOk, ctx)
send_queue_declare(chan::MessageChannel, name::String, passive::Bool, durable::Bool, exclusive::Bool, auto_delete::Bool, nowait::Bool, arguments::Dict{String,Any}) =
send(chan, TAMQPMethodPayload(:Queue, :Declare, (0, name, passive, durable, exclusive, auto_delete, nowait, arguments)))
send_queue_bind(chan::MessageChannel, queue_name::String, excg_name::String, routing_key::String, nowait::Bool, arguments::Dict{String,Any}) =
send(chan, TAMQPMethodPayload(:Queue, :Bind, (0, queue_name, excg_name, routing_key, nowait, arguments)))
send_queue_unbind(chan::MessageChannel, queue_name::String, excg_name::String, routing_key::String, arguments::Dict{String,Any}) = send(chan, TAMQPMethodPayload(:Queue, :Unbind, (0, queue_name, excg_name, routing_key, arguments)))
send_queue_purge(chan::MessageChannel, name::String, nowait::Bool) = send(chan, TAMQPMethodPayload(:Queue, :Purge, (0, name, nowait)))
send_queue_delete(chan::MessageChannel, name::String, if_unused::Bool, if_empty::Bool, nowait::Bool) = send(chan, TAMQPMethodPayload(:Queue, :Delete, (0, name, if_unused, if_empty, nowait)))
function on_queue_declare_ok(chan::MessageChannel, m::TAMQPMethodFrame, ctx)
@assert is_method(m, :Queue, :DeclareOk)
if ctx !== nothing
name = convert(String, m.payload.fields[1].second)
msg_count = m.payload.fields[2].second
consumer_count = m.payload.fields[3].second
put!(ctx, (true, name, msg_count, consumer_count))
end
handle(chan, :Queue, :DeclareOk)
nothing
end
function _on_queue_purge_delete_ok(method::Symbol, chan::MessageChannel, m::TAMQPMethodFrame, ctx)
@assert is_method(m, :Queue, method)
if ctx !== nothing
msg_count = m.payload.fields[1].second
put!(ctx, (true, msg_count))
end
handle(chan, :Queue, method)
nothing
end
on_queue_purge_ok(chan::MessageChannel, m::TAMQPMethodFrame, ctx) = _on_queue_purge_delete_ok(:PurgeOk, chan, m, ctx)
on_queue_delete_ok(chan::MessageChannel, m::TAMQPMethodFrame, ctx) = _on_queue_purge_delete_ok(:DeleteOk, chan, m, ctx)
on_queue_bind_ok(chan::MessageChannel, m::TAMQPMethodFrame, ctx) = _on_ack(chan, m, :Queue, :BindOk, ctx)
on_queue_unbind_ok(chan::MessageChannel, m::TAMQPMethodFrame, ctx) = _on_ack(chan, m, :Queue, :UnbindOk, ctx)
_send_tx(chan::MessageChannel, method::Symbol) = send(chan, TAMQPMethodPayload(:Tx, method, ()))
send_tx_select(chan::MessageChannel) = _send_tx(chan, :Select)
send_tx_commit(chan::MessageChannel) = _send_tx(chan, :Commit)
send_tx_rollback(chan::MessageChannel) = _send_tx(chan, :Rollback)
on_tx_select_ok(chan::MessageChannel, m::TAMQPMethodFrame, ctx) = _on_ack(chan, m, :Tx, :SelectOk, ctx)
on_tx_commit_ok(chan::MessageChannel, m::TAMQPMethodFrame, ctx) = _on_ack(chan, m, :Tx, :CommitOk, ctx)
on_tx_rollback_ok(chan::MessageChannel, m::TAMQPMethodFrame, ctx) = _on_ack(chan, m, :Tx, :RollbackOk, ctx)
send_basic_qos(chan::MessageChannel, prefetch_size, prefetch_count, apply_global::Bool) = send(chan, TAMQPMethodPayload(:Basic, :Qos, (prefetch_size, prefetch_count, apply_global)))
send_basic_consume(chan::MessageChannel, queue::String, consumer_tag::String, no_local::Bool, no_ack::Bool, exclusive::Bool, nowait::Bool, arguments::Dict{String,Any}) =
send(chan, TAMQPMethodPayload(:Basic, :Consume, (0, queue, consumer_tag, no_local, no_ack, exclusive, nowait, arguments)))
send_basic_cancel(chan::MessageChannel, consumer_tag::String, nowait::Bool) = send(chan, TAMQPMethodPayload(:Basic, :Cancel, (consumer_tag, nowait)))
send_basic_publish(chan::MessageChannel, msg::Message, exchange::String, routing_key::String, mandatory::Bool=false, immediate::Bool=false) =
send(chan, TAMQPMethodPayload(:Basic, :Publish, (0, exchange, routing_key, mandatory, immediate)), msg)
send_basic_get(chan::MessageChannel, queue::String, no_ack::Bool) = send(chan, TAMQPMethodPayload(:Basic, :Get, (0, queue, no_ack)))
send_basic_ack(chan::MessageChannel, delivery_tag::TAMQPDeliveryTag, all_upto::Bool) = send(chan, TAMQPMethodPayload(:Basic, :Ack, (delivery_tag, all_upto)))
send_basic_reject(chan::MessageChannel, delivery_tag::TAMQPDeliveryTag, requeue::Bool) = send(chan, TAMQPMethodPayload(:Basic, :Reject, (delivery_tag, requeue)))
send_basic_recover(chan::MessageChannel, requeue::Bool, async::Bool) = send(chan, TAMQPMethodPayload(:Basic, async ? :RecoverAsync : :Recover, (requeue,)))
on_basic_qos_ok(chan::MessageChannel, m::TAMQPMethodFrame, ctx) = _on_ack(chan, m, :Basic, :QosOk, ctx)
function _on_basic_consume_cancel_ok(method::Symbol, chan::MessageChannel, m::TAMQPMethodFrame, ctx)
@assert is_method(m, :Basic, method)
if ctx !== nothing
consumer_tag = convert(String, m.payload.fields[1].second)
put!(ctx, (true, consumer_tag))
end
handle(chan, :Basic, method)
nothing
end
on_basic_consume_ok(chan::MessageChannel, m::TAMQPMethodFrame, ctx) = _on_basic_consume_cancel_ok(:ConsumeOk, chan, m, ctx)
on_basic_cancel_ok(chan::MessageChannel, m::TAMQPMethodFrame, ctx) = _on_basic_consume_cancel_ok(:CancelOk, chan, m, ctx)
on_basic_recover_ok(chan::MessageChannel, m::TAMQPMethodFrame, ctx) = _on_ack(chan, m, :Basic, :RecoverOk, ctx)
function on_basic_get_empty_or_ok(chan::MessageChannel, m::TAMQPMethodFrame, ctx)
if is_method(m, :Basic, :GetEmpty)
put!(chan.chan_get, GET_EMPTY_RESP)
else
msg = Message(UInt8[])
if is_method(m, :Basic, :Deliver)
msg.consumer_tag = m.payload.fields[1].second
msg.delivery_tag = m.payload.fields[2].second
msg.redelivered = convert(Bool, m.payload.fields[3].second)
msg.exchange = convert(String, m.payload.fields[4].second)
msg.routing_key = convert(String, m.payload.fields[5].second)
else
msg = Message(UInt8[])
msg.delivery_tag = m.payload.fields[1].second
msg.redelivered = convert(Bool, m.payload.fields[2].second)
msg.exchange = convert(String, m.payload.fields[3].second)
msg.routing_key = convert(String, m.payload.fields[4].second)
msg.remaining = m.payload.fields[5].second
end
# wait for message header and body
push!(chan.partial_msgs, msg)
end
nothing
end
function on_channel_message_completed(chan::MessageChannel, msg::Message)
# got all data for msg
if isempty(msg.consumer_tag)
put!(chan.chan_get, pop!(chan.partial_msgs))
else
lock(chan.lck) do
if msg.consumer_tag in keys(chan.consumers)
put!(chan.consumers[msg.consumer_tag].recvq, pop!(chan.partial_msgs))
else
put!(get!(()->Channel{Message}(typemax(Int)), chan.pending_msgs, msg.consumer_tag), msg)
@debug("holding message, no consumer yet with tag", tag=msg.consumer_tag)
end
end
end
nothing
end
function on_channel_message_in(chan::MessageChannel, m::TAMQPContentHeaderFrame, ctx)
msg = last(chan.partial_msgs)
msg.properties = m.hdrpayload.proplist
msg.data = Vector{UInt8}(undef, m.hdrpayload.bodysize)
msg.filled = 0
if m.hdrpayload.bodysize == 0
# got all data for msg
on_channel_message_completed(chan, msg)
end
nothing
end
function on_channel_message_in(chan::MessageChannel, m::TAMQPContentBodyFrame, ctx)
msg = last(chan.partial_msgs)
data = m.payload.data
startpos = msg.filled + 1
endpos = min(length(msg.data), msg.filled + length(data))
msg.data[startpos:endpos] = data
msg.filled = endpos
if msg.filled >= length(msg.data)
# got all data for msg
on_channel_message_completed(chan, msg)
end
nothing
end
on_confirm_select_ok(chan::MessageChannel, m::TAMQPMethodFrame, ctx) = _on_ack(chan, m, :Confirm, :SelectOk, ctx)
# ----------------------------------------
# send and recv for methods end
# ----------------------------------------
| AMQPClient | https://github.com/JuliaComputing/AMQPClient.jl.git |
|
[
"MIT"
] | 0.5.1 | 508457ed7a2afb432590247dc363fffc51f242fc | code | 1634 | function show(io::IO, p::TAMQPFrameProperties)
print(io, "Channel $(p.channel), Size $(p.payloadsize) bytes")
end
function show(io::IO, p::TAMQPMethodPayload)
print(io, displayname(p.class, p.method))
end
function show(io::IO, m::TAMQPMethodFrame)
print(io, "MethodFrame ", m.payload, "(", length(m.payload.fields), " fields...)")
if isa(io, IOContext)
if !((:limit => true) in io)
print(io, '\n')
show(io, m.payload.fields)
end
end
end
function show(io::IO, f::TAMQPFieldValue)
show(io, f.fld)
end
function show(io::IO, f::TAMQPFieldValuePair)
indent = isa(io, IOContext) ? get(io, :indent, "") : ""
print(io, indent)
show(io, f.name)
print(io, " => ")
show(io, f.val)
end
function show(io::IO, f::TAMQPFieldTable)
indent = isa(io, IOContext) ? get(io, :indent, "") : ""
println(io, "FieldTable")
ioc = IOContext(io, :indent => (indent * " "))
idx = 1
for fpair in f.data
(idx > 1) && print(ioc, '\n')
show(ioc, fpair)
idx += 1
end
end
function show(io::IO, s::T) where {T<:Union{TAMQPShortStr,TAMQPLongStr}}
print(io, convert(String, s))
end
function show(io::IO, fields::Vector{Pair{Symbol,TAMQPField}})
indent = isa(io, IOContext) ? get(io, :indent, "") : ""
println(io, indent, "Fields:")
indent = indent * " "
ioc = IOContext(io, :indent => indent)
idx = 1
for fld in fields
(idx > 1) && print(ioc, '\n')
print(ioc, indent)
show(ioc, fld.first)
print(ioc, " => ")
show(ioc, fld.second)
idx += 1
end
end
| AMQPClient | https://github.com/JuliaComputing/AMQPClient.jl.git |
|
[
"MIT"
] | 0.5.1 | 508457ed7a2afb432590247dc363fffc51f242fc | code | 14314 | # Source: amqp0-9-1.extended.xml
# This file is automatically generated.
# Do not edit this file by hand.
# Make changes to gen.jl or the source specification instead.
const AMQP_VERSION = v"0.9.1"
const AMQP_DEFAULT_PORT = 5672
const AMQPS_DEFAULT_PORT = 5671
# Constants
const FrameMethod = 1
const FrameHeader = 2
const FrameBody = 3
const FrameHeartbeat = 8
const FrameMinSize = 4096
const ReplySuccess = 200
# Error Codes
const SoftErrorContentTooLarge = 311
const SoftErrorNoConsumers = 313
const HardErrorConnectionForced = 320
const HardErrorInvalidPath = 402
const SoftErrorAccessRefused = 403
const SoftErrorNotFound = 404
const SoftErrorResourceLocked = 405
const SoftErrorPreconditionFailed = 406
const HardErrorFrameError = 501
const HardErrorSyntaxError = 502
const HardErrorCommandInvalid = 503
const HardErrorChannelError = 504
const HardErrorUnexpectedFrame = 505
const HardErrorResourceError = 506
const HardErrorNotAllowed = 530
const HardErrorNotImplemented = 540
const HardErrorInternalError = 541
# Domains
const TAMQPConsumerTag = TAMQPShortStr
const TAMQPDeliveryTag = Int64
const TAMQPExchangeName = TAMQPShortStr
const TAMQPNoAck = TAMQPBit
const TAMQPNoLocal = TAMQPBit
const TAMQPNoWait = TAMQPBit
const TAMQPPath = TAMQPShortStr
const TAMQPPeerProperties = TAMQPFieldTable
const TAMQPQueueName = TAMQPShortStr
const TAMQPRedelivered = TAMQPBit
const TAMQPMessageCount = Int32
const TAMQPReplyCode = Int16
const TAMQPReplyText = TAMQPShortStr
# end Domains
# Classes
const CLASS_MAP = Dict{TAMQPClassId,ClassSpec}(
10 => ClassSpec(10, :Connection, Dict{TAMQPMethodId, MethodSpec}(
10 => MethodSpec(10, :Start, :StartOk, Pair{Symbol,DataType}[
:VersionMajor => UInt8
, :VersionMinor => UInt8
, :ServerProperties => TAMQPPeerProperties
, :Mechanisms => TAMQPLongStr
, :Locales => TAMQPLongStr
]) # method Start
, 11 => MethodSpec(11, :StartOk, :Nothing, Pair{Symbol,DataType}[
:ClientProperties => TAMQPPeerProperties
, :Mechanism => TAMQPShortStr
, :Response => TAMQPLongStr
, :Locale => TAMQPShortStr
]) # method StartOk
, 20 => MethodSpec(20, :Secure, :SecureOk, Pair{Symbol,DataType}[
:Challenge => TAMQPLongStr
]) # method Secure
, 21 => MethodSpec(21, :SecureOk, :Nothing, Pair{Symbol,DataType}[
:Response => TAMQPLongStr
]) # method SecureOk
, 30 => MethodSpec(30, :Tune, :TuneOk, Pair{Symbol,DataType}[
:ChannelMax => Int16
, :FrameMax => Int32
, :Heartbeat => Int16
]) # method Tune
, 31 => MethodSpec(31, :TuneOk, :Nothing, Pair{Symbol,DataType}[
:ChannelMax => Int16
, :FrameMax => Int32
, :Heartbeat => Int16
]) # method TuneOk
, 40 => MethodSpec(40, :Open, :OpenOk, Pair{Symbol,DataType}[
:VirtualHost => TAMQPPath
, :Reserved1 => TAMQPShortStr
, :Reserved2 => TAMQPBit
]) # method Open
, 41 => MethodSpec(41, :OpenOk, :Nothing, Pair{Symbol,DataType}[
:Reserved1 => TAMQPShortStr
]) # method OpenOk
, 50 => MethodSpec(50, :Close, :CloseOk, Pair{Symbol,DataType}[
:ReplyCode => TAMQPReplyCode
, :ReplyText => TAMQPReplyText
, :ClassId => UInt16
, :MethodId => UInt16
]) # method Close
, 51 => MethodSpec(51, :CloseOk, :Nothing, Pair{Symbol,DataType}[
]) # method CloseOk
, 60 => MethodSpec(60, :Blocked, :Nothing, Pair{Symbol,DataType}[
:Reason => TAMQPShortStr
]) # method Blocked
, 61 => MethodSpec(61, :Unblocked, :Nothing, Pair{Symbol,DataType}[
]) # method Unblocked
)) # class Connection
, 20 => ClassSpec(20, :Channel, Dict{TAMQPMethodId, MethodSpec}(
10 => MethodSpec(10, :Open, :OpenOk, Pair{Symbol,DataType}[
:Reserved1 => TAMQPShortStr
]) # method Open
, 11 => MethodSpec(11, :OpenOk, :Nothing, Pair{Symbol,DataType}[
:Reserved1 => TAMQPLongStr
]) # method OpenOk
, 20 => MethodSpec(20, :Flow, :FlowOk, Pair{Symbol,DataType}[
:Active => TAMQPBit
]) # method Flow
, 21 => MethodSpec(21, :FlowOk, :Nothing, Pair{Symbol,DataType}[
:Active => TAMQPBit
]) # method FlowOk
, 40 => MethodSpec(40, :Close, :CloseOk, Pair{Symbol,DataType}[
:ReplyCode => TAMQPReplyCode
, :ReplyText => TAMQPReplyText
, :ClassId => UInt16
, :MethodId => UInt16
]) # method Close
, 41 => MethodSpec(41, :CloseOk, :Nothing, Pair{Symbol,DataType}[
]) # method CloseOk
)) # class Channel
, 40 => ClassSpec(40, :Exchange, Dict{TAMQPMethodId, MethodSpec}(
10 => MethodSpec(10, :Declare, :DeclareOk, Pair{Symbol,DataType}[
:Reserved1 => Int16
, :Exchange => TAMQPExchangeName
, :Type => TAMQPShortStr
, :Passive => TAMQPBit
, :Durable => TAMQPBit
, :AutoDelete => TAMQPBit
, :Internal => TAMQPBit
, :NoWait => TAMQPNoWait
, :Arguments => TAMQPFieldTable
]) # method Declare
, 11 => MethodSpec(11, :DeclareOk, :Nothing, Pair{Symbol,DataType}[
]) # method DeclareOk
, 20 => MethodSpec(20, :Delete, :DeleteOk, Pair{Symbol,DataType}[
:Reserved1 => Int16
, :Exchange => TAMQPExchangeName
, :IfUnused => TAMQPBit
, :NoWait => TAMQPNoWait
]) # method Delete
, 21 => MethodSpec(21, :DeleteOk, :Nothing, Pair{Symbol,DataType}[
]) # method DeleteOk
, 30 => MethodSpec(30, :Bind, :BindOk, Pair{Symbol,DataType}[
:Reserved1 => Int16
, :Destination => TAMQPExchangeName
, :Source => TAMQPExchangeName
, :RoutingKey => TAMQPShortStr
, :NoWait => TAMQPNoWait
, :Arguments => TAMQPFieldTable
]) # method Bind
, 31 => MethodSpec(31, :BindOk, :Nothing, Pair{Symbol,DataType}[
]) # method BindOk
, 40 => MethodSpec(40, :Unbind, :UnbindOk, Pair{Symbol,DataType}[
:Reserved1 => Int16
, :Destination => TAMQPExchangeName
, :Source => TAMQPExchangeName
, :RoutingKey => TAMQPShortStr
, :NoWait => TAMQPNoWait
, :Arguments => TAMQPFieldTable
]) # method Unbind
, 51 => MethodSpec(51, :UnbindOk, :Nothing, Pair{Symbol,DataType}[
]) # method UnbindOk
)) # class Exchange
, 50 => ClassSpec(50, :Queue, Dict{TAMQPMethodId, MethodSpec}(
10 => MethodSpec(10, :Declare, :DeclareOk, Pair{Symbol,DataType}[
:Reserved1 => Int16
, :Queue => TAMQPQueueName
, :Passive => TAMQPBit
, :Durable => TAMQPBit
, :Exclusive => TAMQPBit
, :AutoDelete => TAMQPBit
, :NoWait => TAMQPNoWait
, :Arguments => TAMQPFieldTable
]) # method Declare
, 11 => MethodSpec(11, :DeclareOk, :Nothing, Pair{Symbol,DataType}[
:Queue => TAMQPQueueName
, :MessageCount => TAMQPMessageCount
, :ConsumerCount => Int32
]) # method DeclareOk
, 20 => MethodSpec(20, :Bind, :BindOk, Pair{Symbol,DataType}[
:Reserved1 => Int16
, :Queue => TAMQPQueueName
, :Exchange => TAMQPExchangeName
, :RoutingKey => TAMQPShortStr
, :NoWait => TAMQPNoWait
, :Arguments => TAMQPFieldTable
]) # method Bind
, 21 => MethodSpec(21, :BindOk, :Nothing, Pair{Symbol,DataType}[
]) # method BindOk
, 50 => MethodSpec(50, :Unbind, :UnbindOk, Pair{Symbol,DataType}[
:Reserved1 => Int16
, :Queue => TAMQPQueueName
, :Exchange => TAMQPExchangeName
, :RoutingKey => TAMQPShortStr
, :Arguments => TAMQPFieldTable
]) # method Unbind
, 51 => MethodSpec(51, :UnbindOk, :Nothing, Pair{Symbol,DataType}[
]) # method UnbindOk
, 30 => MethodSpec(30, :Purge, :PurgeOk, Pair{Symbol,DataType}[
:Reserved1 => Int16
, :Queue => TAMQPQueueName
, :NoWait => TAMQPNoWait
]) # method Purge
, 31 => MethodSpec(31, :PurgeOk, :Nothing, Pair{Symbol,DataType}[
:MessageCount => TAMQPMessageCount
]) # method PurgeOk
, 40 => MethodSpec(40, :Delete, :DeleteOk, Pair{Symbol,DataType}[
:Reserved1 => Int16
, :Queue => TAMQPQueueName
, :IfUnused => TAMQPBit
, :IfEmpty => TAMQPBit
, :NoWait => TAMQPNoWait
]) # method Delete
, 41 => MethodSpec(41, :DeleteOk, :Nothing, Pair{Symbol,DataType}[
:MessageCount => TAMQPMessageCount
]) # method DeleteOk
)) # class Queue
, 60 => ClassSpec(60, :Basic, Dict{TAMQPMethodId, MethodSpec}(
10 => MethodSpec(10, :Qos, :QosOk, Pair{Symbol,DataType}[
:PrefetchSize => Int32
, :PrefetchCount => Int16
, :Global => TAMQPBit
]) # method Qos
, 11 => MethodSpec(11, :QosOk, :Nothing, Pair{Symbol,DataType}[
]) # method QosOk
, 20 => MethodSpec(20, :Consume, :ConsumeOk, Pair{Symbol,DataType}[
:Reserved1 => Int16
, :Queue => TAMQPQueueName
, :ConsumerTag => TAMQPConsumerTag
, :NoLocal => TAMQPNoLocal
, :NoAck => TAMQPNoAck
, :Exclusive => TAMQPBit
, :NoWait => TAMQPNoWait
, :Arguments => TAMQPFieldTable
]) # method Consume
, 21 => MethodSpec(21, :ConsumeOk, :Nothing, Pair{Symbol,DataType}[
:ConsumerTag => TAMQPConsumerTag
]) # method ConsumeOk
, 30 => MethodSpec(30, :Cancel, :CancelOk, Pair{Symbol,DataType}[
:ConsumerTag => TAMQPConsumerTag
, :NoWait => TAMQPNoWait
]) # method Cancel
, 31 => MethodSpec(31, :CancelOk, :Nothing, Pair{Symbol,DataType}[
:ConsumerTag => TAMQPConsumerTag
]) # method CancelOk
, 40 => MethodSpec(40, :Publish, :Nothing, Pair{Symbol,DataType}[
:Reserved1 => Int16
, :Exchange => TAMQPExchangeName
, :RoutingKey => TAMQPShortStr
, :Mandatory => TAMQPBit
, :Immediate => TAMQPBit
]) # method Publish
, 50 => MethodSpec(50, :Return, :Nothing, Pair{Symbol,DataType}[
:ReplyCode => TAMQPReplyCode
, :ReplyText => TAMQPReplyText
, :Exchange => TAMQPExchangeName
, :RoutingKey => TAMQPShortStr
]) # method Return
, 60 => MethodSpec(60, :Deliver, :Nothing, Pair{Symbol,DataType}[
:ConsumerTag => TAMQPConsumerTag
, :DeliveryTag => TAMQPDeliveryTag
, :Redelivered => TAMQPRedelivered
, :Exchange => TAMQPExchangeName
, :RoutingKey => TAMQPShortStr
]) # method Deliver
, 70 => MethodSpec(70, :Get, :GetOk, Pair{Symbol,DataType}[
:Reserved1 => Int16
, :Queue => TAMQPQueueName
, :NoAck => TAMQPNoAck
]) # method Get
, 71 => MethodSpec(71, :GetOk, :Nothing, Pair{Symbol,DataType}[
:DeliveryTag => TAMQPDeliveryTag
, :Redelivered => TAMQPRedelivered
, :Exchange => TAMQPExchangeName
, :RoutingKey => TAMQPShortStr
, :MessageCount => TAMQPMessageCount
]) # method GetOk
, 72 => MethodSpec(72, :GetEmpty, :Nothing, Pair{Symbol,DataType}[
:Reserved1 => TAMQPShortStr
]) # method GetEmpty
, 80 => MethodSpec(80, :Ack, :Nothing, Pair{Symbol,DataType}[
:DeliveryTag => TAMQPDeliveryTag
, :Multiple => TAMQPBit
]) # method Ack
, 90 => MethodSpec(90, :Reject, :Nothing, Pair{Symbol,DataType}[
:DeliveryTag => TAMQPDeliveryTag
, :Requeue => TAMQPBit
]) # method Reject
, 100 => MethodSpec(100, :RecoverAsync, :Nothing, Pair{Symbol,DataType}[
:Requeue => TAMQPBit
]) # method RecoverAsync
, 110 => MethodSpec(110, :Recover, :Nothing, Pair{Symbol,DataType}[
:Requeue => TAMQPBit
]) # method Recover
, 111 => MethodSpec(111, :RecoverOk, :Nothing, Pair{Symbol,DataType}[
]) # method RecoverOk
, 120 => MethodSpec(120, :Nack, :Nothing, Pair{Symbol,DataType}[
:DeliveryTag => TAMQPDeliveryTag
, :Multiple => TAMQPBit
, :Requeue => TAMQPBit
]) # method Nack
)) # class Basic
, 90 => ClassSpec(90, :Tx, Dict{TAMQPMethodId, MethodSpec}(
10 => MethodSpec(10, :Select, :SelectOk, Pair{Symbol,DataType}[
]) # method Select
, 11 => MethodSpec(11, :SelectOk, :Nothing, Pair{Symbol,DataType}[
]) # method SelectOk
, 20 => MethodSpec(20, :Commit, :CommitOk, Pair{Symbol,DataType}[
]) # method Commit
, 21 => MethodSpec(21, :CommitOk, :Nothing, Pair{Symbol,DataType}[
]) # method CommitOk
, 30 => MethodSpec(30, :Rollback, :RollbackOk, Pair{Symbol,DataType}[
]) # method Rollback
, 31 => MethodSpec(31, :RollbackOk, :Nothing, Pair{Symbol,DataType}[
]) # method RollbackOk
)) # class Tx
, 85 => ClassSpec(85, :Confirm, Dict{TAMQPMethodId, MethodSpec}(
10 => MethodSpec(10, :Select, :SelectOk, Pair{Symbol,DataType}[
:Nowait => TAMQPBit
]) # method Select
, 11 => MethodSpec(11, :SelectOk, :Nothing, Pair{Symbol,DataType}[
]) # method SelectOk
)) # class Confirm
) # CLASS_MAP")
function make_classmethod_map()
cmmap = Dict{Tuple{Symbol,Symbol},MethodSpec}()
for v in values(CLASS_MAP)
for m in values(v.method_map)
cmmap[(v.name,m.name)] = m
end
end
cmmap
end
const CLASSNAME_MAP = Dict{Symbol,ClassSpec}(v.name => v for v in values(CLASS_MAP))
const CLASSMETHODNAME_MAP = make_classmethod_map()
# end Classes
# end generated code
| AMQPClient | https://github.com/JuliaComputing/AMQPClient.jl.git |
|
[
"MIT"
] | 0.5.1 | 508457ed7a2afb432590247dc363fffc51f242fc | code | 12405 | const LiteralAMQP = UInt8[65, 77, 81, 80] # "AMQP"
const ProtocolId = UInt8(0)
const ProtocolVersion = UInt8[0, 9, 1]
const ProtocolHeader = vcat(LiteralAMQP, ProtocolId, ProtocolVersion)
const ContentWeight = 0x0000
const FrameEnd = 0xCE
const HeartBeat = UInt8[8, 0, 0, FrameEnd]
abstract type TAMQPLengthPrefixed end
#const TAMQPBit = UInt8
const TAMQPBool = UInt8 # 0 = FALSE, else TRUE
const TAMQPScale = UInt8 # number of decimal digits
const TAMQPOctet = UInt8
const TAMQPShortShortInt = UInt8
const TAMQPShortShortUInt = UInt8
const TAMQPShortInt = Int16
const TAMQPShortUInt = UInt16
const TAMQPLongInt = Int32
const TAMQPLongUInt = UInt32
const TAMQPLongLongInt = Int64
const TAMQPLongLongUInt = UInt64
const TAMQPFloat = Float32
const TAMQPDouble = Float64
const TAMQPTimeStamp = TAMQPLongLongUInt
struct TAMQPBit
val::UInt8
end
function TAMQPBit(b::TAMQPBit, pos::Int)
TAMQPBit((b.val >> (pos-1)) & 0x1)
end
function TAMQPBit(b::TAMQPBit, setbit::TAMQPBit, pos::Int)
TAMQPBit(b.val | (setbit.val << (pos-1)))
end
TAMQPBit(b::Bool) = TAMQPBit(UInt8(b))
TAMQPBit(b::T) where {T<:Integer} = TAMQPBit(Bool(b))
struct TAMQPDecimalValue
scale::TAMQPScale
val::TAMQPLongUInt
end
struct TAMQPShortStr <: TAMQPLengthPrefixed
len::TAMQPOctet
data::Vector{UInt8}
end
TAMQPShortStr(d::Vector{UInt8}) = TAMQPShortStr(length(d), d)
TAMQPShortStr(s::AbstractString) = TAMQPShortStr(Vector{UInt8}(codeunits(String(s))))
struct TAMQPLongStr <: TAMQPLengthPrefixed
len::TAMQPLongUInt
data::Vector{UInt8}
end
TAMQPLongStr(d::Vector{UInt8}) = TAMQPLongStr(length(d), d)
TAMQPLongStr(s::AbstractString) = TAMQPLongStr(Vector{UInt8}(codeunits(String(s))))
struct TAMQPByteArray <: TAMQPLengthPrefixed
len::TAMQPLongUInt
data::Vector{UInt8}
end
TAMQPByteArray(d::Vector{UInt8}) = TAMQPByteArray(length(d), d)
TAMQPByteArray(s::AbstractString) = TAMQPByteArray(Vector{UInt8}(codeunits(String(s))))
const TAMQPFieldName = TAMQPShortStr
const TAMQPFV = Union{Real, TAMQPDecimalValue, TAMQPLengthPrefixed, Nothing}
struct TAMQPFieldValue{T <: TAMQPFV}
typ::Char # as in FieldValueIndicatorMap
fld::T
end
TAMQPFieldValue(v::T) where {T} = TAMQPFieldValue{T}(FieldIndicatorMap[T], v)
TAMQPFieldValue(v::Dict) = TAMQPFieldValue(TAMQPFieldTable(v))
TAMQPFieldValue(v::String) = TAMQPFieldValue(TAMQPLongStr(v))
TAMQPFieldValue(v::Bool) = TAMQPFieldValue('b', TAMQPBool(v))
struct TAMQPFieldValuePair{T <: TAMQPFV}
name::TAMQPFieldName
val::TAMQPFieldValue{T}
end
struct TAMQPFieldArray <: TAMQPLengthPrefixed
len::TAMQPLongInt
data::Vector{TAMQPFieldValue}
end
TAMQPFieldArray(data::Vector{TAMQPFieldValue}) = TAMQPFieldArray(length(data), data)
struct TAMQPFieldTable <: TAMQPLengthPrefixed
len::TAMQPLongUInt
data::Vector{TAMQPFieldValuePair}
end
TAMQPFieldTable(data::Vector{TAMQPFieldValuePair}) = TAMQPFieldTable(length(data), data)
TAMQPFieldTable(dict::Dict) = TAMQPFieldTable(TAMQPFieldValuePair[TAMQPFieldValuePair(TAMQPShortStr(String(n)), TAMQPFieldValue(v)) for (n,v) in dict])
const TAMQPField = Union{TAMQPBit, Integer, TAMQPShortStr, TAMQPLongStr, TAMQPFieldTable}
const FieldValueIndicatorMap = Dict{Char,DataType}(
't' => TAMQPBool,
'b' => TAMQPShortShortInt,
'B' => TAMQPShortShortUInt,
'U' => TAMQPShortInt,
'u' => TAMQPShortUInt,
'I' => TAMQPLongInt,
'i' => TAMQPLongUInt,
'L' => TAMQPLongLongInt,
'l' => TAMQPLongLongUInt,
'f' => TAMQPFloat,
'd' => TAMQPDouble,
'D' => TAMQPDecimalValue,
's' => TAMQPShortStr,
'S' => TAMQPLongStr,
'x' => TAMQPByteArray,
'A' => TAMQPFieldArray,
'T' => TAMQPTimeStamp,
'F' => TAMQPFieldTable,
'V' => Nothing
)
const FieldIndicatorMap = Dict{DataType,Char}(v=>n for (n,v) in FieldValueIndicatorMap)
const TAMQPChannel = TAMQPShortUInt
const TAMQPPayloadSize = TAMQPLongUInt
const TAMQPContentBodySize = TAMQPLongLongUInt
const TAMQPClassId = UInt16
const TAMQPMethodId = UInt16
const TAMQPContentClass = TAMQPClassId
struct TAMQPFrameProperties
channel::TAMQPChannel
payloadsize::TAMQPPayloadSize
end
struct TAMQPPropertyFlags
flags::UInt16
nextval::Union{TAMQPPropertyFlags, Nothing}
end
TAMQPPropertyFlags(flags::UInt16) = TAMQPPropertyFlags(flags, nothing)
struct TAMQPBodyPayload
# TODO: may be better to allow sub arrays, for efficient writing of large messages
data::Vector{TAMQPOctet}
end
struct TAMQPMethodPayload
class::TAMQPClassId
method::TAMQPMethodId
fields::Vector{Pair{Symbol,TAMQPField}}
TAMQPMethodPayload(p::TAMQPBodyPayload) = TAMQPMethodPayload(p.data)
TAMQPMethodPayload(b::Vector{TAMQPOctet}) = TAMQPMethodPayload(IOBuffer(b))
function TAMQPMethodPayload(io)
class = ntoh(read(io, TAMQPClassId))
method = ntoh(read(io, TAMQPMethodId))
args = methodargs(class, method)
fields = Vector{Pair{Symbol,TAMQPField}}(undef, length(args))
@debug("reading method payload", class, method, nargs=length(args))
bitpos = 0
bitval = TAMQPBit(0)
for idx in 1:length(fields)
fld = args[idx]
@debug("reading", field=fld.first, type=fld.second)
if fld.second === TAMQPBit
bitpos += 1
(bitpos == 1) && (bitval = read(io, fld.second))
v = TAMQPBit(bitval, bitpos)
(bitpos == 8) && (bitpos == 0)
else
bitpos = 0
v = read(io, fld.second)
end
(fld.second <: Integer) && (v = ntoh(v))
fields[idx] = Pair{Symbol,TAMQPField}(fld.first, v)
end
new(class, method, fields)
end
function TAMQPMethodPayload(class_name::Symbol, method_name::Symbol, fldvals)
class = CLASSNAME_MAP[class_name]
method = CLASSMETHODNAME_MAP[(class_name,method_name)]
fields = Pair{Symbol,TAMQPField}[]
for idx in 1:length(method.args)
(argname,argtype) = method.args[idx]
argval = fldvals[idx]
push!(fields, Pair{Symbol,TAMQPField}(argname, isa(argval, argtype) ? argval : argtype(argval)))
end
new(class.id, method.id, fields)
end
end
struct TAMQPHeaderPayload
class::TAMQPContentClass
weight::UInt16 # must be ContentWeight
bodysize::TAMQPContentBodySize
propflags::TAMQPPropertyFlags
proplist::Dict{Symbol,TAMQPField}
TAMQPHeaderPayload(p::TAMQPBodyPayload) = TAMQPHeaderPayload(p.data)
TAMQPHeaderPayload(b::Vector{TAMQPOctet}) = TAMQPHeaderPayload(IOBuffer(b))
function TAMQPHeaderPayload(io)
class = ntoh(read(io, TAMQPClassId))
wt = ntoh(read(io, UInt16))
@assert wt === ContentWeight
bodysize = ntoh(read(io, TAMQPContentBodySize))
propflags = TAMQPPropertyFlags(ntoh(read(io, UInt16)))
proplist = Dict{Symbol,TAMQPField}()
flags = propflags.flags
for prop in SORTED_PROPERTIES
if (flags & prop.mask) > 0x0000
proplist[prop.name] = read(io, prop.typ)
end
end
new(class, ContentWeight, bodysize, propflags, proplist)
end
function TAMQPHeaderPayload(class::TAMQPContentClass, message)
bodysize = length(message.data)
flags = 0x0000
for name in keys(message.properties)
flags = flags | PROPERTIES[name].mask
end
new(class, ContentWeight, bodysize, TAMQPPropertyFlags(flags), message.properties)
end
end
# Generic frame, used to read any frame
struct TAMQPGenericFrame
hdr::UInt8
props::TAMQPFrameProperties
payload::TAMQPBodyPayload
fend::UInt8 # must be FrameEnd
end
# Type = 1, "METHOD": method frame
struct TAMQPMethodFrame
props::TAMQPFrameProperties
payload::TAMQPMethodPayload
end
function TAMQPMethodFrame(f::TAMQPGenericFrame)
@debug("Frame Conversion: generic => method")
@assert f.hdr == FrameMethod
TAMQPMethodFrame(f.props, TAMQPMethodPayload(f.payload))
end
function TAMQPGenericFrame(f::TAMQPMethodFrame)
@debug("Frame Conversion method => generic")
iob = IOBuffer()
methpayload = f.payload
write(iob, hton(methpayload.class))
write(iob, hton(methpayload.method))
bitpos = 0
bitval = TAMQPBit(0)
for (n,v) in methpayload.fields
if isa(v, TAMQPBit)
bitpos += 1
bitval = TAMQPBit(bitval, v, bitpos)
if bitpos == 8
write(iob, bitval)
bitpos = 0
bitval = TAMQPBit(0)
end
else
if bitpos > 0
write(iob, bitval)
bitpos = 0
bitval = TAMQPBit(0)
end
(typeof(v) <: Integer) && (v = hton(v))
write(iob, v)
end
end
if bitpos > 0
write(iob, bitval)
end
bodypayload = TAMQPBodyPayload(take!(iob))
TAMQPGenericFrame(FrameMethod, TAMQPFrameProperties(f.props.channel, length(bodypayload.data)), bodypayload, FrameEnd)
end
# Type = 2, "HEADER": content header frame.
struct TAMQPContentHeaderFrame
props::TAMQPFrameProperties
hdrpayload::TAMQPHeaderPayload
end
function TAMQPContentHeaderFrame(f::TAMQPGenericFrame)
@debug("Frame Conversion: generic => contentheader")
@assert f.hdr == FrameHeader
TAMQPContentHeaderFrame(f.props, TAMQPHeaderPayload(f.payload))
end
function TAMQPGenericFrame(f::TAMQPContentHeaderFrame)
@debug("Frame Conversion contentheader => generic")
iob = IOBuffer()
hdrpayload = f.hdrpayload
propflags = hdrpayload.propflags
proplist = hdrpayload.proplist
write(iob, hton(hdrpayload.class))
write(iob, hton(hdrpayload.weight))
write(iob, hton(hdrpayload.bodysize))
write(iob, hton(propflags.flags))
flags = propflags.flags
for prop in SORTED_PROPERTIES
if (flags & prop.mask) > 0x0000
write(iob, proplist[prop.name])
end
end
bodypayload = TAMQPBodyPayload(take!(iob))
TAMQPGenericFrame(FrameHeader, TAMQPFrameProperties(f.props.channel, length(bodypayload.data)), bodypayload, FrameEnd)
end
# Type = 3, "BODY": content body frame.
struct TAMQPContentBodyFrame
props::TAMQPFrameProperties
payload::TAMQPBodyPayload
end
function TAMQPContentBodyFrame(f::TAMQPGenericFrame)
@debug("Frame Conversion: generic => contentbody")
@assert f.hdr == FrameBody
TAMQPContentBodyFrame(f.props, f.payload)
end
function TAMQPGenericFrame(f::TAMQPContentBodyFrame)
@debug("Frame Conversion contentbody => generic")
TAMQPGenericFrame(FrameBody, TAMQPFrameProperties(f.props.channel, length(f.payload.data)), f.payload, FrameEnd)
end
# Type = 4, "HEARTBEAT": heartbeat frame.
struct TAMQPHeartBeatFrame
end
function TAMQPHeartBeatFrame(f::TAMQPGenericFrame)
@assert f.hdr == FrameHeartbeat
TAMQPHeartBeatFrame()
end
function TAMQPGenericFrame(f::TAMQPHeartBeatFrame)
@debug("Frame Conversion heartbeat => generic")
TAMQPGenericFrame(FrameHeartbeat, TAMQPFrameProperties(DEFAULT_CHANNEL, 0), TAMQPBodyPayload(TAMQPOctet[]), FrameEnd)
end
struct TAMQPContent
hdr::TAMQPContentHeaderFrame
body::Vector{TAMQPContentBodyFrame}
end
struct TAMQPMethod
frame::TAMQPMethodFrame
content::Union{TAMQPContent, Nothing}
end
# Exceptions
mutable struct AMQPProtocolException <: Exception
msg::String
end
mutable struct AMQPClientException <: Exception
msg::String
end
# Spec code gen types
struct MethodSpec
id::Int
name::Symbol
respname::Symbol
args::Vector{Pair{Symbol,DataType}}
end
struct ClassSpec
id::Int
name::Symbol
method_map::Dict{Int,MethodSpec}
end
struct CloseReason
code::Int16
msg::TAMQPShortStr
classid::TAMQPClassId
methodid::TAMQPMethodId
end
# Utility Methods for Types
method(classid::TAMQPClassId, methodid::TAMQPMethodId) = CLASS_MAP[classid].method_map[methodid]
methodargs(classid::TAMQPClassId, methodid::TAMQPMethodId) = method(classid, methodid).args
function displayname(classid::TAMQPClassId, methodid::TAMQPMethodId)
c = CLASS_MAP[classid]
m = c.method_map[methodid]
"$(c.name).$(m.name)"
end
| AMQPClient | https://github.com/JuliaComputing/AMQPClient.jl.git |
|
[
"MIT"
] | 0.5.1 | 508457ed7a2afb432590247dc363fffc51f242fc | code | 2211 | using AMQPClient
using Test
include("test_coverage.jl")
include("test_throughput.jl")
include("test_rpc.jl")
@testset "AMQPClient" begin
@testset "AMQP" begin
@testset "Functionality" begin
for keepalive in [true, false]
for heartbeat in (true, false)
@testset "keepalive=$keepalive,heartbeat=$heartbeat" begin
AMQPTestCoverage.runtests(; keepalive=keepalive, heartbeat=heartbeat)
end
end
end
end
@testset "Throughput" begin
AMQPTestThroughput.runtests()
end
@testset "RPC" begin
AMQPTestRPC.runtests()
end
end
if length(ARGS) > 0
@testset "AMQPS" begin
amqps_host = ARGS[1]
virtualhost = ARGS[2]
port = AMQPClient.AMQPS_DEFAULT_PORT
login = ENV["AMQPPLAIN_LOGIN"]
password = ENV["AMQPPLAIN_PASSWORD"]
auth_params = Dict{String,Any}("MECHANISM"=>"AMQPLAIN", "LOGIN"=>login, "PASSWORD"=>password)
@testset "Functionality" begin
for keepalive in [true, false]
for heartbeat in (true, false)
@testset "keepalive=$keepalive,heartbeat=$heartbeat" begin
AMQPTestCoverage.runtests(;
host=amqps_host,
port=AMQPClient.AMQPS_DEFAULT_PORT,
virtualhost=virtualhost,
amqps=amqps_configure(),
auth_params=auth_params,
keepalive=keepalive,
heartbeat=heartbeat)
end
end
end
end
@testset "Throughput" begin
AMQPTestThroughput.runtests(; host=amqps_host, port=AMQPClient.AMQPS_DEFAULT_PORT, tls=true)
end
@testset "RPC" begin
AMQPTestRPC.runtests(; host=amqps_host, port=AMQPClient.AMQPS_DEFAULT_PORT, amqps=amqps_configure())
end
end
end
end
exit(0)
| AMQPClient | https://github.com/JuliaComputing/AMQPClient.jl.git |
|
[
"MIT"
] | 0.5.1 | 508457ed7a2afb432590247dc363fffc51f242fc | code | 13350 | module AMQPTestCoverage
using AMQPClient, Test, Random
const JULIA_HOME = Sys.BINDIR
const EXCG_DIRECT = "ExcgDirect"
const EXCG_FANOUT = "ExcgFanout"
const QUEUE1 = "queue1"
const ROUTE1 = "key1"
const invalid_auth_params = Dict{String,Any}("MECHANISM"=>"AMQPLAIN", "LOGIN"=>randstring(10), "PASSWORD"=>randstring(10))
function runtests(;virtualhost="/", host="localhost", port=AMQPClient.AMQP_DEFAULT_PORT, auth_params=AMQPClient.DEFAULT_AUTH_PARAMS, amqps=nothing, keepalive=true, heartbeat=true)
verify_spec()
test_types()
test_queue_expire(;
virtualhost=virtualhost,
host=host,
port=port,
auth_params=auth_params,
amqps=amqps,
keepalive=keepalive,
heartbeat=heartbeat)
@test default_exchange_name("direct") == "amq.direct"
@test default_exchange_name() == ""
@test AMQPClient.method_name(AMQPClient.TAMQPMethodPayload(:Basic, :Ack, (1, false))) == "Basic.Ack"
# test failure on invalid auth_params
@test_throws AMQPClient.AMQPClientException connection(;virtualhost=virtualhost, host=host, port=port, amqps=amqps, auth_params=invalid_auth_params)
conn_ref = nothing
# open a connection
@info("opening connection")
connection(;virtualhost=virtualhost, host=host, port=port, amqps=amqps, auth_params=auth_params, send_queue_size=512, keepalive=keepalive, heartbeat=heartbeat) do conn
# Issue #51
@test isa(conn, AMQPClient.Connection)
@test conn.sendq.sz_max == 512
# open a channel
@info("opening channel")
channel(conn, AMQPClient.UNUSED_CHANNEL, true) do chan1
@test chan1.id == 1
@test conn.sendq.sz_max == 512
# test default exchange names
@test default_exchange_name() == ""
@test default_exchange_name(EXCHANGE_TYPE_DIRECT) == "amq.direct"
# create exchanges
@info("creating exchanges")
@test exchange_declare(chan1, EXCG_DIRECT, EXCHANGE_TYPE_DIRECT; arguments=Dict{String,Any}("Hello"=>"World", "Foo"=>"bar"))
@test exchange_declare(chan1, EXCG_FANOUT, EXCHANGE_TYPE_FANOUT)
# redeclaring the exchange with same attributes should be fine
@test exchange_declare(chan1, EXCG_FANOUT, EXCHANGE_TYPE_FANOUT)
# redeclaring an existing exchange with different attributes should fail
@test_throws AMQPClient.AMQPClientException exchange_declare(chan1, EXCG_FANOUT, EXCHANGE_TYPE_DIRECT)
end
chan_ref = nothing
# must reconnect as channel gets closed after a channel exception
channel(conn, AMQPClient.UNUSED_CHANNEL, true) do chan1
@test chan1.id == 1
# create and bind queues
@info("creating queues")
success, queue_name, message_count, consumer_count = queue_declare(chan1, QUEUE1)
@test success
@test message_count == 0
@test consumer_count == 0
@test queue_bind(chan1, QUEUE1, EXCG_DIRECT, ROUTE1)
# rabbitmq 3.6.5 does not support qos
# basic_qos(chan1, 1024*10, 10, false)
M = Message(Vector{UInt8}("hello world"), content_type="text/plain", delivery_mode=PERSISTENT)
@info("testing basic publish and get")
# publish 10 messages
for idx in 1:10
basic_publish(chan1, M; exchange=EXCG_DIRECT, routing_key=ROUTE1)
flush(chan1)
@test !isready(chan1.conn.sendq)
end
# basic get 10 messages
for idx in 1:10
result = basic_get(chan1, QUEUE1, false)
@test result !== nothing
rcvd_msg = result
basic_ack(chan1, rcvd_msg.delivery_tag)
@test rcvd_msg.remaining == (10-idx)
@test rcvd_msg.exchange == EXCG_DIRECT
@test rcvd_msg.redelivered == false
@test rcvd_msg.routing_key == ROUTE1
@test rcvd_msg.data == M.data
@test :content_type in keys(rcvd_msg.properties)
@test convert(String, rcvd_msg.properties[:content_type]) == "text/plain"
end
# basic get returns null if no more messages
@test basic_get(chan1, QUEUE1, false) === nothing
## test reject and requeue
basic_publish(chan1, M; exchange=EXCG_DIRECT, routing_key=ROUTE1)
result = basic_get(chan1, QUEUE1, false)
@test result !== nothing
rcvd_msg = result
@test rcvd_msg.redelivered == false
basic_reject(chan1, rcvd_msg.delivery_tag; requeue=true)
result = basic_get(chan1, QUEUE1, false)
@test result !== nothing
rcvd_msg = result
@test rcvd_msg.redelivered == true
basic_ack(chan1, rcvd_msg.delivery_tag)
@info("testing basic consumer")
# start a consumer task
global msg_count = 0
consumer_fn = (rcvd_msg) -> begin
@test rcvd_msg.exchange == EXCG_DIRECT
@test rcvd_msg.redelivered == false
@test rcvd_msg.routing_key == ROUTE1
global msg_count
msg_count += 1
if msg_count <= 10
@test rcvd_msg.data == M.data
else
@test rcvd_msg.data == UInt8[]
end
println("received msg $(msg_count): $(String(rcvd_msg.data))")
basic_ack(chan1, rcvd_msg.delivery_tag)
end
success, consumer_tag = basic_consume(chan1, QUEUE1, consumer_fn)
@test success
# publish 10 messages
for idx in 1:10
basic_publish(chan1, M; exchange=EXCG_DIRECT, routing_key=ROUTE1)
end
# wait for a reasonable time to receive all messages
for idx in 1:10
(msg_count == 10) && break
sleep(1)
end
@test msg_count == 10
@info("testing empty messages")
# Test sending and receiving empty message
M_empty = Message(Vector{UInt8}(), content_type="text/plain", delivery_mode=PERSISTENT)
basic_publish(chan1, M_empty; exchange=EXCG_DIRECT, routing_key=ROUTE1)
M_no_ct = Message(Vector{UInt8}(), delivery_mode=PERSISTENT)
basic_publish(chan1, M_no_ct; exchange=EXCG_DIRECT, routing_key=ROUTE1)
println("Waiting")
# wait for a reasonable time to receive last two messages
for idx in 1:5
(msg_count == 12) && break
sleep(1)
end
println("Waited")
@test msg_count == 12
# cancel the consumer task
@test basic_cancel(chan1, consumer_tag)
# test transactions
@info("testing tx")
@test tx_select(chan1)
@test tx_commit(chan1)
@test tx_rollback(chan1)
# test heartbeats
if 120 >= conn.heartbeat > 0
c = conn
@info("testing heartbeats (waiting $(3*c.heartbeat) secs)...")
ts1 = c.heartbeat_time_server
tc1 = c.heartbeat_time_client
sleeptime = c.heartbeat/2
for idx in 1:6
(c.heartbeat_time_server > ts1) && (c.heartbeat_time_client > tc1) && break
sleep(sleeptime)
end
@test c.heartbeat_time_server > ts1
@test c.heartbeat_time_client > tc1
elseif conn.heartbeat == 0
@info("heartbeat disabled")
else
@info("not testing heartbeats (wait too long at $(3*conn.heartbeat) secs)")
end
@info("closing down")
success, message_count = queue_purge(chan1, QUEUE1)
@test success
@test message_count == 0
@test queue_unbind(chan1, QUEUE1, EXCG_DIRECT, ROUTE1)
success, message_count = queue_delete(chan1, QUEUE1)
@test success
@test message_count == 0
# delete exchanges
@test exchange_delete(chan1, EXCG_DIRECT; nowait=true)
@test exchange_delete(chan1, EXCG_FANOUT)
chan_ref = chan1 # to do additional tests on a closed channel
end
close(chan_ref) # closing a closed channel should not be an issue
AMQPClient.wait_for_state(chan_ref, AMQPClient.CONN_STATE_CLOSED)
@test !isopen(chan_ref)
conn_ref = conn # to do additional tests on a closed connection
end
# closing a closed connection should not be an issue
close(conn_ref)
AMQPClient.wait_for_state(conn_ref, AMQPClient.CONN_STATE_CLOSED)
@test !isopen(conn_ref)
@info("done")
nothing
end
function verify_spec()
ALLCLASSES = (:Connection, :Basic, :Channel, :Confirm, :Exchange, :Queue, :Tx)
for n in ALLCLASSES
@test n in keys(AMQPClient.CLASSNAME_MAP)
end
for (n,v) in keys(AMQPClient.CLASSMETHODNAME_MAP)
@test n in ALLCLASSES
end
end
function test_types()
d = Dict{String,Any}(
"bool" => 0x1,
"int" => 10,
"uint" => 0x1,
"float" => rand(),
"shortstr" => AMQPClient.TAMQPShortStr(randstring(10)),
"longstr" => AMQPClient.TAMQPLongStr(randstring(1024)))
ft = AMQPClient.TAMQPFieldTable(d)
iob = IOBuffer()
show(iob, ft)
@test length(take!(iob)) > 0
fields = [Pair{Symbol,AMQPClient.TAMQPField}(:bit, AMQPClient.TAMQPBit(0x1)),
Pair{Symbol,AMQPClient.TAMQPField}(:shortstr, AMQPClient.TAMQPShortStr(randstring(10))),
Pair{Symbol,AMQPClient.TAMQPField}(:longstr, AMQPClient.TAMQPLongStr(randstring(1024))),
Pair{Symbol,AMQPClient.TAMQPField}(:fieldtable, ft)]
show(iob, fields)
@test length(take!(iob)) > 0
mpayload = AMQPClient.TAMQPMethodPayload(:Channel, :Open, ("",))
show(iob, mpayload)
@test length(take!(iob)) > 0
mfprop = AMQPClient.TAMQPFrameProperties(AMQPClient.TAMQPChannel(0), AMQPClient.TAMQPPayloadSize(100))
show(iob, mfprop)
@test length(take!(iob)) > 0
mframe = AMQPClient.TAMQPMethodFrame(mfprop, mpayload)
show(iob, mframe)
@test length(take!(iob)) > 0
fields = AMQPClient.TAMQPFieldValue[
AMQPClient.TAMQPFieldValue(true),
AMQPClient.TAMQPFieldValue(1.1),
AMQPClient.TAMQPFieldValue(1),
AMQPClient.TAMQPFieldValue("hello world"),
AMQPClient.TAMQPFieldValue(Dict{String,Int}("one"=>1, "two"=>2)),
]
fieldarray = AMQPClient.TAMQPFieldArray(fields)
simplified_fields = AMQPClient.simplify(fieldarray)
@test simplified_fields == Any[
0x01,
1.1,
1,
"hello world",
Dict{String, Any}("two" => 2, "one" => 1)
]
iob = PipeBuffer()
write(iob, hton(AMQPClient.TAMQPLongUInt(10)))
write(iob, UInt8[1,2,3,4,5,6,7,8,9,0])
barr = read(iob, AMQPClient.TAMQPByteArray)
@test barr.len == 10
@test barr.data == UInt8[1,2,3,4,5,6,7,8,9,0]
end
function test_queue_expire(;virtualhost="/", host="localhost", port=AMQPClient.AMQP_DEFAULT_PORT, auth_params=AMQPClient.DEFAULT_AUTH_PARAMS, amqps=nothing, keepalive=true, heartbeat=true)
@info("testing create queue and queue expire with TTL")
# open a connection
@info("opening connection")
conn_ref = nothing
chan_ref = nothing
connection(;virtualhost=virtualhost, host=host, port=port, amqps=amqps, auth_params=auth_params, send_queue_size=512, keepalive=keepalive, heartbeat=heartbeat) do conn
# open a channel
@info("opening channel")
channel(conn, AMQPClient.UNUSED_CHANNEL, true) do chan1
@test chan1.id == 1
# test queue create and expire
expires_ms = 10 * 1000 # 10 seconds
success, queue_name, message_count, consumer_count = queue_declare(chan1, QUEUE1, arguments=Dict{String,Any}("x-expires"=>expires_ms))
@test success
@test message_count == 0
@test consumer_count == 0
exchange_name = default_exchange_name("direct")
# queue bind should be successful when queue not expired
@test queue_bind(chan1, QUEUE1, exchange_name, ROUTE1)
# wait for queue to expire, and a subsequent bind should fail
sleep(2 + expires_ms/1000)
@test_throws AMQPClient.AMQPClientException queue_bind(chan1, QUEUE1, exchange_name, ROUTE1)
chan_ref = chan1 # to do additional tests on a closed channel
end
# close(chan_ref) # closing a closed channel should not be an issue
AMQPClient.wait_for_state(chan_ref, AMQPClient.CONN_STATE_CLOSED)
@test !isopen(chan_ref)
conn_ref = conn # to do additional tests on a closed connection
end
# closing a closed connection should not be an issue
# close(conn_ref)
AMQPClient.wait_for_state(conn_ref, AMQPClient.CONN_STATE_CLOSED)
@test !isopen(conn_ref)
@info("done")
nothing
end
end # module AMQPTestCoverage
| AMQPClient | https://github.com/JuliaComputing/AMQPClient.jl.git |
|
[
"MIT"
] | 0.5.1 | 508457ed7a2afb432590247dc363fffc51f242fc | code | 6847 | module AMQPTestRPC
using AMQPClient, Test, Random
const JULIA_HOME = Sys.BINDIR
const QUEUE_RPC = "queue_rpc"
const NRPC_MSGS = 100
const NRPC_CLNTS = 4
const NRPC_SRVRS = 4
const server_lck = Ref(ReentrantLock())
const servers_done = Channel{Int}(NRPC_SRVRS)
const server_rpc_count = Ref(0)
function test_rpc_client(reply_queue_id; virtualhost="/", host="localhost", port=AMQPClient.AMQP_DEFAULT_PORT, auth_params=AMQPClient.DEFAULT_AUTH_PARAMS, amqps=amqps)
rpc_queue_name = QUEUE_RPC * ((amqps === nothing) ? "amqp" : "amqps")
# open a connection
@info("client opening connection", reply_queue_id)
conn = connection(;virtualhost=virtualhost, host=host, port=port, auth_params=auth_params, amqps=amqps)
# open a channel
@debug("client opening channel")
chan1 = channel(conn, AMQPClient.UNUSED_CHANNEL, true)
# create a reply queue for a client
queue_name = rpc_queue_name * "_" * string(reply_queue_id) * "_" * string(getpid())
@debug("client creating queue", queue_name)
success, queue_name, message_count, consumer_count = queue_declare(chan1, queue_name; exclusive=true)
@test success
@debug("client testing rpc")
rpc_reply_count = 0
rpc_fn = (rcvd_msg) -> begin
rpc_reply_count += 1
msg_str = String(rcvd_msg.data)
@debug("client", reply_quque_id, msg_str)
basic_ack(chan1, rcvd_msg.delivery_tag)
end
# start a consumer task
success, consumer_tag = basic_consume(chan1, queue_name, rpc_fn)
@test success
correlation_id = 0
# publish NRPC_MSGS messages to the queue
while correlation_id < NRPC_MSGS
correlation_id += 1
M = Message(Vector{UInt8}("hello from " * queue_name), content_type="text/plain", delivery_mode=PERSISTENT, reply_to=queue_name, correlation_id=string(correlation_id))
basic_publish(chan1, M; exchange=default_exchange_name(), routing_key=rpc_queue_name)
# sleep a random time between 1 and 5 seconds between requests
sleep(rand())
end
while (rpc_reply_count < NRPC_MSGS)
sleep(1)
end
@debug("client closing down", reply_queue_id)
success, message_count = queue_purge(chan1, queue_name)
@test success
@test message_count == 0
@test basic_cancel(chan1, consumer_tag)
success, message_count = queue_delete(chan1, queue_name)
@test success
@test message_count == 0
# close channels and connection
close(chan1)
AMQPClient.wait_for_state(chan1, AMQPClient.CONN_STATE_CLOSED)
@test !isopen(chan1)
close(conn)
AMQPClient.wait_for_state(conn, AMQPClient.CONN_STATE_CLOSED)
@test !isopen(conn)
@info("client done", reply_queue_id, rpc_reply_count)
end
function test_rpc_server(my_server_id; virtualhost="/", host="localhost", port=AMQPClient.AMQP_DEFAULT_PORT, auth_params=AMQPClient.DEFAULT_AUTH_PARAMS, amqps=amqps)
rpc_queue_name = QUEUE_RPC * ((amqps === nothing) ? "amqp" : "amqps")
# open a connection
@info("server opening connection", my_server_id)
conn = connection(;virtualhost=virtualhost, host=host, port=port, auth_params=auth_params, amqps=amqps)
# open a channel
@debug("server opening channel", my_server_id)
chan1 = channel(conn, AMQPClient.UNUSED_CHANNEL, true)
# create queues (no need to bind if we are using the default exchange)
lock(server_lck[]) do
@debug("server creating queues", my_server_id)
# this is the callback queue
success, message_count, consumer_count = queue_declare(chan1, rpc_queue_name)
@test success
end
# test RPC
@debug("server testing rpc", my_server_id)
rpc_fn = (rcvd_msg) -> begin
rpc_count = lock(server_lck[]) do
server_rpc_count[] = server_rpc_count[] + 1
end
@test :reply_to in keys(rcvd_msg.properties)
reply_to = convert(String, rcvd_msg.properties[:reply_to])
correlation_id = convert(String, rcvd_msg.properties[:correlation_id])
resp_str = "$(my_server_id) received msg $(rpc_count) - $(reply_to): $(String(rcvd_msg.data))"
@debug("server response", resp_str)
M = Message(Vector{UInt8}(resp_str), content_type="text/plain", delivery_mode=PERSISTENT, correlation_id=correlation_id)
basic_publish(chan1, M; exchange=default_exchange_name(), routing_key=reply_to)
basic_ack(chan1, rcvd_msg.delivery_tag)
end
# start a consumer task
success, consumer_tag = basic_consume(chan1, rpc_queue_name, rpc_fn)
@test success
server_done = false
while !server_done
sleep(5)
lock(server_lck[]) do
server_done = (server_rpc_count[] >= NRPC_MSGS*NRPC_CLNTS)
@debug("rpc_count", server_rpc_count[], my_server_id)
end
end
@debug("server closing down", my_server_id)
@test basic_cancel(chan1, consumer_tag)
@debug("server cancelled consumer", my_server_id)
lock(server_lck[]) do
take!(servers_done)
# the last server to finish will purge and delete the queue
if length(servers_done.data) == 0
success, message_count = queue_purge(chan1, rpc_queue_name)
@test success
@test message_count == 0
@debug("server purged queue", my_server_id)
success, message_count = queue_delete(chan1, rpc_queue_name)
@test success
@test message_count == 0
@debug("server deleted rpc queue", my_server_id)
end
end
# close channels and connection
close(chan1)
AMQPClient.wait_for_state(chan1, AMQPClient.CONN_STATE_CLOSED)
@test !isopen(chan1)
close(conn)
AMQPClient.wait_for_state(conn, AMQPClient.CONN_STATE_CLOSED)
@test !isopen(conn)
@info("server done", my_server_id)
nothing
end
function runtests(; host="localhost", port=AMQPClient.AMQP_DEFAULT_PORT, amqps=nothing)
@info("testing multiple client server rpc")
server_rpc_count[] = 0
for idx in 1:NRPC_SRVRS
put!(servers_done, idx)
end
@sync begin
for idx in 1:NRPC_SRVRS
@async begin
try
test_rpc_server(idx, host=host, port=port, amqps=amqps)
catch ex
@error("server exception", exception=(ex,catch_backtrace()))
rethrow()
end
end
end
for idx in 1:NRPC_CLNTS
@async begin
try
test_rpc_client(idx, host=host, port=port, amqps=amqps)
catch ex
@error("client exception", exception=(ex,catch_backtrace()))
rethrow()
end
end
end
end
@info("testing multiple client server rpc done")
end
end # module AMQPTestRPC | AMQPClient | https://github.com/JuliaComputing/AMQPClient.jl.git |
|
[
"MIT"
] | 0.5.1 | 508457ed7a2afb432590247dc363fffc51f242fc | code | 5018 | module AMQPTestThroughput
using AMQPClient, Test, Random
const JULIA_HOME = Sys.BINDIR
const EXCG_DIRECT = "amq.direct"
const QUEUE1 = "queue1"
const ROUTE1 = "key1"
const MSG_SIZE = 1024
const NMSGS = 10^5
const no_ack = true
const M = Message(rand(UInt8, 1024), content_type="application/octet-stream", delivery_mode=PERSISTENT)
function setup(;virtualhost="/", host="localhost", port=AMQPClient.AMQP_DEFAULT_PORT, auth_params=AMQPClient.DEFAULT_AUTH_PARAMS, tls=false)
# open a connection
@debug("opening connection")
amqps = tls ? amqps_configure() : nothing
conn = connection(;virtualhost=virtualhost, host=host, port=port, auth_params=auth_params, amqps=amqps)
# open a channel
@debug("opening channel")
chan1 = channel(conn, AMQPClient.UNUSED_CHANNEL, true)
@test chan1.id == 1
# create and bind queues
@debug("creating queues")
success, name, message_count, consumer_count = queue_declare(chan1, QUEUE1)
@test success
@test message_count == 0
@test queue_bind(chan1, QUEUE1, EXCG_DIRECT, ROUTE1)
conn, chan1
end
function teardown(conn, chan1, delete=false)
@info("closing down")
if delete
success, message_count = queue_purge(chan1, QUEUE1)
@test success
@test message_count == 0
@test queue_unbind(chan1, QUEUE1, EXCG_DIRECT, ROUTE1)
success, message_count = queue_delete(chan1, QUEUE1)
@test success
@test message_count == 0
end
# close channels and connection
close(chan1)
AMQPClient.wait_for_state(chan1, AMQPClient.CONN_STATE_CLOSED)
@test !isopen(chan1)
close(conn)
AMQPClient.wait_for_state(conn, AMQPClient.CONN_STATE_CLOSED)
@test !isopen(conn)
end
function publish(conn, chan1)
@info("starting basic publisher")
# publish N messages
for idx in 1:NMSGS
basic_publish(chan1, M; exchange=EXCG_DIRECT, routing_key=ROUTE1)
if (idx % 10000) == 0
@info("publishing", idx)
sleep(1)
end
end
end
function consume(conn, chan1)
@info("starting basic consumer")
# start a consumer task
msg_count = 0
start_time = time()
end_time = 0
consumer_fn = (rcvd_msg) -> begin
msg_count += 1
if ((msg_count % 10000) == 0) || (msg_count == NMSGS)
#basic_ack(chan1, 0; all_upto=true)
@info("ack sent", msg_count)
end
no_ack || basic_ack(chan1, rcvd_msg.delivery_tag)
if msg_count == NMSGS
end_time = time()
end
end
success, consumer_tag = basic_consume(chan1, QUEUE1, consumer_fn; no_ack=no_ack)
@test success
# wait to receive all messages
while msg_count < NMSGS
@info("$msg_count of $NMSGS messages processed")
sleep(2)
end
# cancel the consumer task
@test basic_cancel(chan1, consumer_tag)
# time to send and receive
total_time = max(end_time - start_time, 1)
@info("time to send and receive", message_count=NMSGS, total_time, rate=NMSGS/total_time)
end
function run_publisher()
host = ARGS[2]
port = parse(Int, ARGS[3])
tls = parse(Bool, ARGS[4])
conn, chan1 = AMQPTestThroughput.setup(; host=host, port=port, tls=tls)
AMQPTestThroughput.publish(conn, chan1)
AMQPTestThroughput.teardown(conn, chan1, false) # exit without destroying queue
nothing
end
function run_consumer()
host = ARGS[2]
port = parse(Int, ARGS[3])
tls = parse(Bool, ARGS[4])
conn, chan1 = AMQPTestThroughput.setup(; host=host, port=port, tls=tls)
AMQPTestThroughput.consume(conn, chan1)
@debug("waiting for publisher to exit gracefully...")
sleep(10) # wait for publisher to exit gracefully
AMQPTestThroughput.teardown(conn, chan1, true)
nothing
end
function spawn_test(script, flags, host, port, tls)
opts = Base.JLOptions()
inline_flag = opts.can_inline == 1 ? `` : `--inline=no`
cov_flag = (opts.code_coverage == 1) ? `--code-coverage=user` :
(opts.code_coverage == 2) ? `--code-coverage=all` :
``
srvrscript = joinpath(dirname(@__FILE__), script)
srvrcmd = `$(joinpath(JULIA_HOME, "julia")) $cov_flag $inline_flag $srvrscript $flags $host $port $tls`
@debug("Running tests from ", script, flags, host, port, tls)
ret = run(srvrcmd)
@debug("Finished ", script, flags, host, port, tls)
nothing
end
function runtests(; host="localhost", port=AMQPClient.AMQP_DEFAULT_PORT, tls=false)
@sync begin
@info("starting consumer")
consumer = @async spawn_test("test_throughput.jl", "--runconsumer", host, port, tls)
sleep(10)
@info("starting publisher")
publisher = @async spawn_test("test_throughput.jl", "--runpublisher", host, port, tls)
end
nothing
end
end # module AMQPTestThroughput
!isempty(ARGS) && (ARGS[1] == "--runpublisher") && AMQPTestThroughput.run_publisher()
!isempty(ARGS) && (ARGS[1] == "--runconsumer") && AMQPTestThroughput.run_consumer()
| AMQPClient | https://github.com/JuliaComputing/AMQPClient.jl.git |
|
[
"MIT"
] | 0.5.1 | 508457ed7a2afb432590247dc363fffc51f242fc | docs | 3658 | ## Connections and Channels
More than one connection can be made to a single server, though one is sufficient for most cases.
The IANA assigned port number for AMQP is 5672. It is available as the constant `AMQPClient.AMQP_DEFAULT_PORT`.
The IANA assigned port number for AMQPS is 5671. It is available as the constant `AMQPClient.AMQPS_DEFAULT_PORT`.
The `AMQPPLAIN` authentication mechanism is supported as of now.
```julia
using AMQPClient
port = AMQPClient.AMQP_DEFAULT_PORT
login = get_userid() # default is usually "guest"
password = get_password() # default is usually "guest"
auth_params = Dict{String,Any}("MECHANISM"=>"AMQPLAIN", "LOGIN"=>login, "PASSWORD"=>password)
conn = connection(; virtualhost="/", host="localhost", port=port, auth_params=auth_params)
```
An example of making an AMQPS connection:
```julia
using AMQPClient
port = AMQPFlient.AMQPS_DEFAULT_PORT
login = get_userid() # default is usually "guest"
password = get_password() # default is usually "guest"
auth_params = Dict{String,Any}("MECHANISM"=>"AMQPLAIN", "LOGIN"=>login, "PASSWORD"=>password)
amqps = amqps_configure()
conn = connection(; virtualhost="/", host="amqps.example.com", port=port, auth_params=auth_params, amqps=amqps)
```
The `amqps_configure` method can be provided additional parameters for TLS connections:
- cacerts: A CA certificate file (or it's contents) to use for certificate verification.
- verify: Whether to verify server certificate. Default is false if cacerts is not provided and true if it is.
- client_cert and client_key: The client certificate and corresponding private key to use. Default is nothing (no client certificate). Values can either be the file name or certificate/key contents.
```julia
amqps_configure(;
cacerts = nothing,
verify = MbedTLS.MBEDTLS_SSL_VERIFY_NONE,
client_cert = nothing,
client_key = nothing
)
```
Multiple channels can be multiplexed over a single connection. Channels are identified by their numeric id.
An existing channel can be attached to, or a new one created if it does not exist.
Specifying `AMQPClient.UNUSED_CHANNEL` as channel id during creation will automatically assign an unused id.
```julia
chan1 = channel(conn, AMQPClient.UNUSED_CHANNEL, true)
# to attach to a channel only if it already exists:
chanid = 2
chan2 = channel(conn, chanid)
# to specify a channel id and create if it does not exists yet:
chanid = 3
chan3 = channel(conn, chanid, true)
```
Channels and connections remain open until they are closed or they run into an error. The server can also initiate a close in some cases.
Channels represent logical multiplexing over a single connection, so closing a connection implicitly closes all its channels.
```julia
if isopen(conn)
close(conn)
# close is an asynchronous operation. To wait for the negotiation to complete:
AMQPClient.wait_for_state(conn, AMQPClient.CONN_STATE_CLOSED)
end
# an individual channel can be closed similarly too
```
The `connection` and `channel` methods can also be used with Julia's do-block syntax, which ensures it's closure when the block exits.
```julia
connection(; virtualhost="/", host="localhost", port=port, auth_params=auth_params) do conn
channel(conn, AMQPClient.UNUSED_CHANNEL, true) do chan
# use channel
end
end
```
If a channel or connection is closed due to an error or by the server, the `closereason` attribute (type `CloseReason`) of the channel or connection object
may contain the error code and diagnostic message.
```julia
if conn.closereason !== nothing
@error("connection has errors", code=conn.closereason.code, message=conn.closereason.msg)
end
```
| AMQPClient | https://github.com/JuliaComputing/AMQPClient.jl.git |
|
[
"MIT"
] | 0.5.1 | 508457ed7a2afb432590247dc363fffc51f242fc | docs | 1897 | ## Exchanges and Queues
Constants representing the standard exchange types are available as: `EXCHANGE_TYPE_DIRECT`, `EXCHANGE_TYPE_FANOUT`, `EXCHANGE_TYPE_TOPIC`, and `EXCHANGE_TYPE_HEADERS`.
Exchanges can be delcared and deleted using the `exchange_declare` and `exchange_delete` APIs. They return a boolean to indicate success (`true`) or failure (`false`).
Declaring an already existing exchange simply attaches to it, a new exchange is created otherwise.
```julia
# declare (create if they do not exist) new exchange
EXCG_DIRECT = "MyDirectExcg"
EXCG_FANOUT = "MyFanoutExcg"
@assert exchange_declare(chan1, EXCG_DIRECT, EXCHANGE_TYPE_DIRECT)
@assert exchange_declare(chan1, EXCG_FANOUT, EXCHANGE_TYPE_FANOUT)
# operate with the exchanges...
# delete exchanges
@assert exchange_delete(chan1, EXCG_DIRECT)
@assert exchange_delete(chan1, EXCG_FANOUT)
```
Queues can similarly be declared and deleted.
Attaching to an existing queue also returns the number of pending messages and the number of consumers attached to the queue.
```julia
QUEUE1 = "MyQueue"
success, queue_name, message_count, consumer_count = queue_declare(chan1, QUEUE1)
@assert success
# operate with the queue
# delete the queue
success, message_count = queue_delete(chan1, QUEUE1)
@assert success
```
Messages are routed by binding queues and exchanges to other exchanges. The type of exchange and the routing key configured determine the path.
```julia
ROUTE1 = "routingkey1"
# bind QUEUE1 to EXCG_DIRECT,
# specifying that only messages with routing key ROUTE1 should be delivered to QUEUE1
@assert queue_bind(chan1, QUEUE1, EXCG_DIRECT, ROUTE1)
# operate with the queue
# remove the binding
@assert queue_unbind(chan1, QUEUE1, EXCG_DIRECT, ROUTE1)
```
Messages on a queue can be purged:
```julia
success, message_count = queue_purge(chan1, QUEUE1)
@assert success
@info("messages purged", message_count)
```
| AMQPClient | https://github.com/JuliaComputing/AMQPClient.jl.git |
|
[
"MIT"
] | 0.5.1 | 508457ed7a2afb432590247dc363fffc51f242fc | docs | 1335 | # AMQPClient
[](https://github.com/JuliaComputing/AMQPClient.jl/actions?query=workflow%3ACI+branch%3Amaster)
[](http://codecov.io/github/JuliaComputing/AMQPClient.jl?branch=master)
A Julia [AMQP (Advanced Message Queuing Protocol)](http://www.amqp.org/) Client.
Supports protocol version 0.9.1 and [RabbitMQ](https://www.rabbitmq.com/) extensions.
This library has been tested with RabbitMQ, though it should also work with other AMQP 0.9.1 compliant systems.
# Using AMQPClient:
- [Connections and Channels](CONNECTIONS.md)
- [Exchanges and Queues](QUEUES.md)
- [Sending and Receiving Messages](SENDRECV.md)
Note: These documents may not mention all implemented APIs yet. Please look at the protocol references or exported methods of the package to get the complete list.
### Protocol reference:
- [AMQP v0.9.1](http://www.amqp.org/resources/download)
- [RabbitMQ Extensions](https://www.rabbitmq.com/extensions.html)
### Examples
Julia code examples from [RabbitMQ tutorials](https://www.rabbitmq.com/getstarted.html) can be found in [rabbitmq/rabbitmq-tutorials](https://github.com/rabbitmq/rabbitmq-tutorials/tree/main/julia) repository.
| AMQPClient | https://github.com/JuliaComputing/AMQPClient.jl.git |
|
[
"MIT"
] | 0.5.1 | 508457ed7a2afb432590247dc363fffc51f242fc | docs | 4516 | ## Sending and Receiving Messages
An AMQP message is represented by the `Message` type. Receiving a message from a queue returns an instance of this type. To send a `Message` must be created first.
Messages can also have one or more of these properties:
| property name | description |
| ---------------- | ---------------------------------------------------------------------------------- |
| content_type | MIME content type (MIME typing) |
| content_encoding | MIME content encoding (MIME typing) |
| headers | message header field table (For applications, and for header exchange routing) |
| delivery_mode | `NONPERSISTENT` or `PERSISTENT` (For queues that implement persistence) |
| priority | message priority, 0 to 9 (For queues that implement priorities) |
| correlation_id | application correlation identifier (For application use, no formal behaviour) |
| reply_to | address to reply to (For application use, no formal behaviour) |
| expiration | message expiration specification (For application use, no formal behaviour) |
| message_id | application message identifier (For application use, no formal behaviour) |
| timestamp | message timestamp (For application use, no formal behaviour) |
| message_type | message type name (For application use, no formal behaviour) |
| user_id | creating user id (For application use, no formal behaviour) |
| app_id | creating application id (For application use, no formal behaviour) |
| cluster_id | reserved, must be empty (Deprecated, was old cluster-id property) |
A message received from a queue can also have the following attributes:
| attribute name | type | description |
| ---------------- | ----------- | ----------------------------------------------------------------------------------------------------------------- |
| consumer_tag | String | Identifier for the queue consumer, valid within the current channel. |
| delivery_tag | Int64 | A tag to refer to a delivery attempt. This can be used to acknowledge/reject the message. |
| redelivered | Bool | Whether this message was delivered earlier, but was rejected ot not acknowledged. |
| exchange | String | Name of the exchange that the message was originally published to. May be empty, indicating the default exchange. |
| routing_key | String | The routing key name specified when the message was published. |
| remaining | Int32 | Number of messages remaining in the queue. |
```julia
# create a message with 10 bytes of random value as data
msg = Message(rand(UInt8, 10))
# create a persistent plain text message
data = convert(Vector{UInt8}, codeunits("hello world"))
msg = Message(data, content_type="text/plain", delivery_mode=PERSISTENT)
```
Messages are published to an exchange, optionally specifying a routing key.
```julia
EXCG_DIRECT = "MyDirectExcg"
ROUTE1 = "routingkey1"
basic_publish(chan1, msg; exchange=EXCG_DIRECT, routing_key=ROUTE1)
```
To poll a queue for messages:
```julia
msg = basic_get(chan1, QUEUE1, false)
# check if we got a message
if msg !== nothing
# process msg...
# acknowledge receipt
basic_ack(chan1, msg.delivery_tag)
end
```
To subscribe for messages (register an asynchronous callback):
```julia
# define a callback function to process messages
function consumer(msg)
# process msg...
# acknowledge receipt
basic_ack(chan1, msg.delivery_tag)
end
# subscribe and register the callback function
success, consumer_tag = basic_consume(chan1, QUEUE1, consumer)
@assert success
println("consumer registered with tag $consumer_tag")
# go ahead with other stuff...
# or wait for an indicator for shutdown
# unsubscribe the consumer from the queue
basic_cancel(chan1, consumer_tag)
```
| AMQPClient | https://github.com/JuliaComputing/AMQPClient.jl.git |
|
[
"MIT"
] | 0.0.2 | bcb1b982169b75aa4017c72d47e8341f2598b50e | code | 4826 | # FastGeoProjections to Proj speed comparison
using FastGeoProjections
using BenchmarkTools
using DataFrames
using GLMakie
outfile = abspath("./benchmark/benchmark");
ns = [100, 1000, 10000, 100000, 1000000]
epsg_target_source = [
(EPSG(4326), EPSG(3413)),
(EPSG(3031), EPSG(4326)),
(EPSG(4326), EPSG(32636)),
(EPSG(32735), EPSG(4326))
]
system = "Apple M2 Max"
threads = Threads.nthreads()
solutions = ["Proj: single-thread", "Proj: multi-thread", "FGP: single-thread", "FGP: multi-thread"]
# Float32 and GPU yielded little benefit
df = DataFrame();
for solution in solutions
df[!, solution*"_time"] = zeros(length(epsg_target_source) * length(ns))
df[!, solution*"_err"] = zeros(length(epsg_target_source) * length(ns))
end
df[!, :npoints] = zeros(length(epsg_target_source) * length(ns));
df[!, :epsg_target_source] .= [(EPSG(4326), EPSG(3413))];
function trans_bench(X, Y, X0, Y0, df, r, solution, source_epsg, target_epsg, threaded, proj_only, always_xy)
b = @benchmark begin
trans = FastGeoProjections.Transformation($source_epsg, $target_epsg; threaded=$threaded, proj_only=$proj_only, always_xy=$always_xy)
X1, Y1 = trans($X, $Y)
end
trans = FastGeoProjections.Transformation(source_epsg, target_epsg; threaded=threaded, proj_only=proj_only, always_xy=always_xy)
display(minimum(b))
X1, Y1 = trans(X, Y)
err = maximum(abs.([X1 - X0; Y1 - Y0]))
printstyled("\n MAXIMUM ERROR:\t\t$err\n\n", color=:lightgrey)
df[r, solution*"_time"] = minimum(b).time
df[r, solution*"_err"] = err
return df
end
for (i, n) in enumerate(ns)
rando = rand(n);
for k = eachindex(epsg_target_source)
source_epsg = epsg_target_source[k][1]
target_epsg = epsg_target_source[k][2]
if k == 1
Y = rando * 30 .+ 60;
X = rando * 360 .- 180;
elseif k == 2
Y = -(rando * 30 .+ 60);
X = rando * 360 .- 180;
X, Y = FastGeoProjections.polarstereo_fwd(X, Y; lat_ts=-71.0, lon_0=0.0);
elseif k == 3
Y = rando * 80.
X = rando * 9 .+ 28.5
elseif k == 4
Y = rando * -80.0
X = rando * 9 .+ 22.5
X, Y = FastGeoProjections.utm_fwd(X, Y; epsg=EPSG(source_epsg))
end
r = i+(k-1)*length(ns);
df[r, :npoints] = n;
df[r, :epsg_target_source] = epsg_target_source[k];
printstyled("**EPSG:$(source_epsg.val) to EPSG:$(target_epsg.val) [n = $n]**\n", color=:blue)
trans = FastGeoProjections.Transformation(source_epsg, target_epsg; threaded=false, proj_only=true, always_xy=true)
X0, Y0 = trans(X, Y)
printstyled("*Proj: single-thread*\n", color=:lightgrey)
threaded = false; proj_only = true; always_xy = true
df = trans_bench(X, Y, X0, Y0, df, r, solutions[1], source_epsg, target_epsg, threaded, proj_only, always_xy)
printstyled("*Proj: multi-thread*\n", color=:lightgrey)
threaded = true; proj_only = true; always_xy = true
df = trans_bench(X, Y, X0, Y0, df, r, solutions[2], source_epsg, target_epsg, threaded, proj_only, always_xy)
printstyled("*FastGeoProjections: single-thread*\n", color=:lightgrey)
threaded = false; proj_only = false; always_xy = true
df = trans_bench(X, Y, X0, Y0, df, r, solutions[3], source_epsg, target_epsg, threaded, proj_only, always_xy)
printstyled("*FastGeoProjections: multi-thread - Float64*\n", color=:lightgrey)
threaded = true; proj_only = false; always_xy = true
df = trans_bench(X, Y, X0, Y0, df, r, solutions[4], source_epsg, target_epsg, threaded, proj_only, always_xy)
end
end
Makie.inline!(false)
f = Figure(resolution=(1500, 750 * ceil(length(epsg_target_source)/2)), fontsize = 25)
col = Makie.wong_colors();
for (i, epsg) in enumerate(epsg_target_source)
r = ceil(Int64, i / 2)
c = i - 2*(r-1)
ax = Axis(
f[r, c],
yscale=log10,
xscale=log10,
title="EPSG:$(epsg[1].val) => EPSG:$(epsg[2].val)",
yminorticksvisible=true,
yminorgridvisible=true,
xlabel="points converted",
ylabel="compute time [µs]",
yminorticks=IntervalsBetween(5),
)
rs = df.epsg_target_source .== [epsg]
re = (df.epsg_target_source .== [epsg]) .& (df[:, :npoints] .== maximum(ns))
lins = [lines!(df[rs, :npoints], df[rs, solution*"_time"] ./ 1000, label="$solution", linewidth=6, color=col[i]) for (i, solution) in enumerate(solutions)]
if i == 1
legends = axislegend(ax, lins, solutions, position=:lt)
end
end
supertitle = Label(f[0, :], "FastGeoProjections.jl benchmarks, $system using $threads threads", fontsize=30)
save(abspath("$outfile.jpg"), f)
| FastGeoProjections | https://github.com/alex-s-gardner/FastGeoProjections.jl.git |
|
[
"MIT"
] | 0.0.2 | bcb1b982169b75aa4017c72d47e8341f2598b50e | code | 954 | module FastGeoProjections
using Proj # Proj dependancy included untill package is more mature
using GeoFormatTypes
using LoopVectorization
using CoordinateTransformations
include("ellipsoids.jl")
include("polarstereo.jl")
include("tranmerc.jl")
include("utm_ups.jl")
include("epsg2epsg.jl")
include("coord.jl")
export Transformation
export inv
export EPSG
precompile(tranmerc_fwd, (Vector{Float64}, Vector{Float64},))
precompile(tranmerc_fwd, (Vector{Float32}, Vector{Float32},))
precompile(tranmerc_inv, (Vector{Float64}, Vector{Float64},))
precompile(tranmerc_inv, (Vector{Float32}, Vector{Float32},))
precompile(polarstereo_fwd, (Vector{Float64}, Vector{Float64},))
precompile(polarstereo_fwd, (Vector{Float32}, Vector{Float32},))
precompile(polarstereo_inv, (Vector{Float64}, Vector{Float64},))
precompile(polarstereo_inv, (Vector{Float32}, Vector{Float32},))
end | FastGeoProjections | https://github.com/alex-s-gardner/FastGeoProjections.jl.git |
|
[
"MIT"
] | 0.0.2 | bcb1b982169b75aa4017c72d47e8341f2598b50e | code | 3521 | """
Transformation(source_epsg, target_epsg; threaded=true, always_xy=false, proj_only=false)
Create a Transformation that is a pipeline between two known coordinate reference systems.
Transformation implements the
[CoordinateTransformations.jl](https://github.com/JuliaGeometry/CoordinateTransformations.jl)
API.
To do the transformation on coordinates, call an instance of this struct like a function.
See below for an example. These functions accept either 2 numbers or two vectors of numbers.
`source_crs` and `target_crs` must be an EPSG authority code (see https://epsg.io/), like
"EPSG:3413" or 3413::EPSG. The created pipeline will expect that the coordinates respect
the axis order and axis unit of the official definition (so for example,
for EPSG:4326, with latitude first and longitude next, in degrees). Similarly, when using
that syntax for a target CRS, output values will be emitted according to the official
definition of this CRS. This behavior can be overruled by passing `always_xy=true`.
`threaded` turns on an off multi-threading.
`always_xy` can optionally fix the axis orderding to x,y or lon,lat order. By default it is
`false`, meaning the order is defined by the authority in charge of a given coordinate
reference system, as explained in [this PROJ FAQ
entry](https://proj.org/faq.html#why-is-the-axis-ordering-in-proj-not-consistent).
`proj_only` can optionally only use Proj.jl for Transformations even when a
FastGeoProjection is available. By default Proj.jl is only used when a FastGeoProjection is
not avaiable
# Examples
```julia
julia> trans = Proj.Transformation("EPSG:4326", "EPSG:28992", always_xy=true)
Transformation
source: WGS 84 (with axis order normalized for visualization)
target: Amersfoort / RD New
julia> trans(5.39, 52.16) # this is in lon,lat order, since we set always_xy to true
(155191.3538124342, 463537.1362732911)
```
"""
mutable struct Transformation <: CoordinateTransformations.Transformation
pj::Function
threaded::Bool
proj_only::Bool
end
function Transformation(
source_epsg::EPSG,
target_epsg::EPSG;
threaded::Bool=true,
always_xy::Bool=false,
proj_only::Bool=false
)
pj = epsg2epsg(source_epsg, target_epsg; threaded, always_xy, proj_only)
return Transformation(pj, threaded, proj_only)
end
function Transformation(
source_epsg::String,
target_epsg::String;
threaded::Bool=true,
always_xy::Bool=false,
proj_only::Bool=false
)
pj = epsg2epsg(EPSG(source_epsg), EPSG(target_epsg); threaded, always_xy, proj_only)
return Transformation(pj, threaded, proj_only)
end
function Base.show(io::IO, trans::Transformation)
print(
io,
"""Transformation
source_epsg: $(trans.pj.source_epsg)
target_epsg: $(trans.pj.target_epsg)
threaded: $(trans.threaded)
always_xy: $(trans.pj.always_xy)
proj_only: $(trans.proj_only)
""",
)
end
function Base.inv(
trans::Transformation;
)
# swap source and target
return Transformation(
trans.pj.target_epsg,
trans.pj.source_epsg;
always_xy=trans.pj.always_xy,
threaded=trans.threaded,
proj_only=trans.proj_only
)
end
function (trans::Transformation)(x::Real, y::Real)
p = trans.pj(Float64(x), Float64(y))
return p
end
function (trans::Transformation)(x::AbstractVector, y::AbstractVector)
p = trans.pj(Float64.(x), Float64.(y))
return p
end | FastGeoProjections | https://github.com/alex-s-gardner/FastGeoProjections.jl.git |
|
[
"MIT"
] | 0.0.2 | bcb1b982169b75aa4017c72d47e8341f2598b50e | code | 2209 |
"""
An ellipsoidal representation of the Earth [modified from Geodesy.jl]
"""
struct Ellipsoid
a::Float64 # Semi-major axis
b::Float64 # Semi-minor axis
f::Float64 # Flattening
e::Float64 # Eccentricity
name::Union{Nothing,Symbol} # Conventional name - for clarity, should match the name
epsg::EPSG # epsg code
# of the const instance in the package!
end
function Ellipsoid(;
a::Union{Nothing,Float64}=nothing,
b::Union{Nothing,Float64}=nothing,
f_inv::Union{Nothing,Float64}=nothing,
name::Union{Nothing,Symbol} = nothing,
epsg::Union{Nothing,EPSG} = nothing
)
if isnothing(a) || (isnothing(b) == isnothing(f_inv))
throw(ArgumentError("Specify parameter 'a' and either 'b' or 'f_inv'"))
end
if isnothing(b)
_ellipsoid_af(a, f_inv, name, epsg)
else
_ellipsoid_ab(a, b, name, epsg)
end
end
function _ellipsoid_ab(a::Float64, b::Float64, name, epsg)
f = 1 - b / a
e = sqrt(f * (2 - f))
Ellipsoid(a, b, f, e, name, epsg)
end
function _ellipsoid_af(a::Float64, f_inv::Float64, name, epsg)
b = a * (1 - inv(f_inv))
_ellipsoid_ab(a, b, name, epsg)
end
function Base.show(io::IO, el::Ellipsoid)
if !isnothing(el.name)
# To clarify that these are Ellipsoids, we wrap the name in
# 'Ellipsoid', even though the name itself should resolve to the
# correct ellipsoid instance.
print(io, "Ellipsoid(name = $(el.name), epsg = $(el.epsg))")
else
print(io, "Ellipsoid(a=$(el.a), b=$(el.b))")
end
end
"""
ellipsoid(epsg::EPSG)
define an ellipsoid given an EPSG
TODO: need to adapt this for new EPSG multiple value convention, likely second epsg not first
"""
function ellipsoid(epsg::EPSG)
if first(epsg.val) == 7030
ellips = Ellipsoid(; a = 6378137., f_inv = 298.257223563, name = :WGS_84, epsg = EPSG(7030))
elseif first(epsg.val) == 7019
ellips = Ellipsoid(; a = 6378137., f_inv = 298.257222101, name = :GRS_1980, epsg = EPSG(7019))
else
error("$(epsg.val[1]) ellisoid is not defined, you may need to add it to ellipsoids.jl")
end
return ellips
end
| FastGeoProjections | https://github.com/alex-s-gardner/FastGeoProjections.jl.git |
|
[
"MIT"
] | 0.0.2 | bcb1b982169b75aa4017c72d47e8341f2598b50e | code | 3776 | """
epsg2epsg(source_epsg::EPSG, target_epsg::EPSG; threaded=true, proj_only=false)
Returns the Transform for points defined by `x` and `y`from one coordinate reference systems defined by `source_epsg` to another define by `target_epsg`. Coodinates `x` and `y` can be either a scalar or a vector. Multithreading can be turned on and off with `threaded`. Optimized Julia native code used when available. To force use of Proj set proj_only = true
"""
function epsg2epsg(source_epsg::EPSG, target_epsg::EPSG; threaded=true, proj_only=false, always_xy=true)
if isfastepsg(source_epsg, target_epsg) && !proj_only
# if both EPSG codes have been implimented in then use native transformation
f = function (x::Union{Real,Vector{<:Real}}, y::Union{Real,Vector{<:Real}})
x, y = project_from(source_epsg; threaded=threaded, always_xy=always_xy)(x,y)
xx, yy = project_to(target_epsg; threaded=threaded, always_xy=always_xy)(x,y)
return xx, yy
end
else
if threaded
# This will work with threads (and you can add your own Proj context in ctxs)
ctxs = [Proj.proj_context_clone() for _ in 1:Threads.nthreads()]
trans = [Proj.Transformation("EPSG:$(first(source_epsg.val))", "EPSG:$(first(target_epsg.val))"; ctx, always_xy=always_xy) for ctx in ctxs]
f = function (x::Union{Real,Vector{<:Real}}, y::Union{Real,Vector{<:Real}})
xx = zeros(size(x))
yy = zeros(size(x))
Threads.@threads for i in eachindex(x)
xx[i], yy[i] = trans[Threads.threadid()](x[i], y[i])
end
return xx, yy
end
else
f = function (x::Union{Real,Vector{<:Real}}, y::Union{Real,Vector{<:Real}})
xx = zeros(size(x))
yy = zeros(size(x))
trans = Proj.Transformation("EPSG:$(first(source_epsg.val))", "EPSG:$(first(target_epsg.val))", always_xy=always_xy)
for i in eachindex(x)
xx[i], yy[i] = trans(x[i],y[i])
end
return xx, yy
end
end
end
end
## ⬇ ADD FAST PROJECTIONS HERE ⬇ ##
# project from an EPSG => EPSG(4326)
function project_from(epsg::EPSG; threaded=true, always_xy=true)
if epsg == EPSG(4326)
f = (x,y) -> identity((x,y))
elseif epsg == EPSG(3031)
f = (x,y) -> polarstereo_inv(x, y; lat_ts=-71.0, lon_0=0.0, ellips=ellipsoid(EPSG(7030)), threaded=threaded, always_xy=always_xy)
elseif epsg == EPSG(3413)
f = (x,y) -> polarstereo_inv(x, y; lat_ts=70.0, lon_0=-45.0, ellips=ellipsoid(EPSG(7030)), threaded=threaded, always_xy=always_xy)
elseif isutm(epsg)
f = (x,y) -> utm_inv(x, y, threaded=threaded, epsg=epsg, always_xy=always_xy)
end
end
# project from EPSG(4326) => EPSG
function project_to(epsg::EPSG; threaded=true, always_xy=true)
if epsg == EPSG(4326)
f = (x,y) -> identity((x,y))
elseif epsg == EPSG(3031)
f = (x,y) -> polarstereo_fwd(x, y; lat_ts=-71.0, lon_0=0.0, ellips=ellipsoid(EPSG(7030)), threaded=threaded, always_xy=always_xy)
elseif epsg == EPSG(3413)
f = (x,y) -> polarstereo_fwd(x, y; lat_ts=70.0, lon_0=-45.0, ellips=ellipsoid(EPSG(7030)), threaded=threaded, always_xy=always_xy)
elseif isutm(epsg)
f = (x,y) -> utm_fwd(x, y; threaded=threaded, epsg=epsg, always_xy=always_xy)
end
end
# List of FastGeoProjections native projections
fast_epsgs = [EPSG(3031), EPSG(3413), EPSG(4326)]
function isfastepsg(source_epsg, target_epsg)
tf = (any(fast_epsgs .== Ref(source_epsg)) || isutm(source_epsg)) && (any(fast_epsgs .== Ref(target_epsg)) || isutm(target_epsg))
return tf
end
| FastGeoProjections | https://github.com/alex-s-gardner/FastGeoProjections.jl.git |
|
[
"MIT"
] | 0.0.2 | bcb1b982169b75aa4017c72d47e8341f2598b50e | code | 6583 | """
polarstereo_fwd(lon, lat; lon_0, lat_ts, ellips, threaded, always_xy)
Returns x and y coordinates [meteres] in Polar Stereographic (PS) coordinates given geodetic
coordinates (EPSG:4326) of longitude and latitude [decimal degrees]. The PS projection is
defined kwargs of: lon_0: the meridian along positive Y axis, lat_ts: standard parallel,
which is the latitude of true scale and an ellipsoid that is define by an equatorial radius
in meters (a) and its eccentricity (e). Also returnes scale factor (k).
"""
function polarstereo_fwd(
lon::Union{AbstractVector{<:AbstractFloat},AbstractFloat},
lat::Union{AbstractVector{<:AbstractFloat},AbstractFloat};
lon_0::Real,
lat_ts::Real,
ellips::Ellipsoid=ellipsoid(EPSG(7030)),
threaded=true,
always_xy=true
)
# This is a Julia implimnetation, written by Alex Gardner - JPL/NASA, 2023 of a Matlab
# version written by Andy Bliss, 9/12/2011
if !always_xy
(lat, lon) = (lon, lat)
end
T = eltype(lon);
lat_ts = lat_ts * pi / 180
lon_0 = lon_0 * pi / 180
# If the standard parallel is in Southern Hemisphere, switch signs.
if lat_ts < 0
pm = -1 # plus or minus, north lat. or south
lat_ts = -lat_ts
lon_0 = -lon_0
else
pm = 1
end
t_c = convert(T, tan(pi / 4 - lat_ts / 2) / ((1 - ellips.e * sin(lat_ts)) / (1 + ellips.e * sin(lat_ts)))^(ellips.e / 2))
m_c = convert(T, cos(lat_ts) / sqrt(1 - ellips.e^2 * (sin(lat_ts))^2))
p = convert(T, pi)
e = convert(T, ellips.e)
a = convert(T, ellips.a)
d2r = convert(T, pi / 180)
lon_0 = convert(T, lon_0)
x = Vector{T}(undef, length(lat))
y = Vector{T}(undef, length(lat))
#k = Vector{T}(undef, length(lat))
if !isa(lat, Array)
lat = [lat]
lon = [lon]
end
if threaded && Threads.nthreads() > 1
@turbo thread = true for i = eachindex(lat)
t = tan((p / 4) - (lat[i] * d2r * pm / 2)) / ((1 - e * sin(lat[i] * d2r * pm)) / (1 + e * sin(lat[i] * d2r * pm)))^(e / 2)
#m = cos(lat[i]) / sqrt(1 - e^2 * (sin(lat[i]))^2)
rho = a * m_c * t / t_c # True Scale at Lat lat_ts
x[i] = pm * rho * sin(lon[i] * d2r * pm - lon_0)
y[i] = -pm * rho * cos(lon[i] * d2r * pm - lon_0)
#k[i] = rho / (a * m)
end
else
@turbo thread = false for i = eachindex(lat)
t = tan((p / 4) - (lat[i] * d2r * pm / 2)) / ((1 - e * sin(lat[i] * d2r * pm)) / (1 + e * sin(lat[i] * d2r * pm)))^(e / 2)
#m = cos(lat[i]) / sqrt(1 - e^2 * (sin(lat[i]))^2)
rho = a * m_c * t / t_c # True Scale at Lat lat_ts
x[i] = pm * rho * sin(lon[i] * d2r * pm - lon_0)
y[i] = -pm * rho * cos(lon[i] * d2r * pm - lon_0)
#k[i] = rho / (a * m)
end
end
if length(x) == 1
x = x[1]
y = y[1]
#k = k[1]
end
return x, y
end
"""
polarstereo_inv(x, y; lon_0, lat_ts, ellips, threaded, always_xy)
Returns geodetic coordinates (EPSG:4326) of longitude and latitude [decimal degrees] given
x and y coordinates [meteres] in Polar Stereographic (PS) coordinates. The PS projection is
defined kwargs of: lon_0: the meridian along positive Y axis, lat_ts: standard parallel,
which is the latitude of true scale and an ellipsoid that is define by an equatorial radius
in meters (a) and its eccentricity (e).
"""
function polarstereo_inv(
x::Union{AbstractVector{<:AbstractFloat},AbstractFloat},
y::Union{AbstractVector{<:AbstractFloat},AbstractFloat};
lon_0::Real,
lat_ts::Real,
ellips::Ellipsoid=ellipsoid(EPSG(7030)),
threaded=false,
always_xy=true)
# This is a Julia implimnetation, written by Alex Gardner - JPL/NASA, 2023 of a Matlab
# version written by Andy Bliss, 9/12/2011
# set types
T = eltype(x)
# convert to radians
lat_ts = lat_ts * pi / 180
lon_0 = lon_0 * pi / 180
# if the standard parallel is in S.Hemi., switch signs.
if lat_ts < 0
pm = -1
lat_ts = -lat_ts
lon_0 = -lon_0
x = -x
y = -y
else
pm = 1
end
# See Snyder for details.
t_c = convert(T, tan(pi / 4 - lat_ts / 2) / ((1 - ellips.e * sin(lat_ts)) / (1 + ellips.e * sin(lat_ts)))^(ellips.e / 2))
m_c = convert(T, cos(lat_ts) / sqrt(1 - ellips.e^2 * (sin(lat_ts))^2))
a = convert(T, ellips.a)
e = convert(T, ellips.e)
lon_0 = convert(T,lon_0)
r2d = convert(T, 180 / pi)
p = convert(T, pi)
e = convert(T, ellips.e)
lat = Vector{T}(undef, length(x))
lon = Vector{T}(undef, length(x))
if !isa(x, Array)
x = [x]
y = [y]
end
# looping caused issues with @turbo so use broadcast
if threaded && isa(x, Array) && Threads.nthreads() > 1
@turbo thread = true for i = eachindex(x)
rho = sqrt((pm * x[i]) ^ 2 + (pm * y[i]) ^ 2)
t = rho * t_c / (a * m_c)
chi = p / 2 - 2 * atan(t)
# find lat with a series instead of iterating
lat[i] = chi + (e^2 / 2 + 5 * e^4 / 24 + e^6 / 12 + 13 * e^8 / 360) * sin(2 * chi) +
(7 * e^4 / 48 + 29 * e^6 / 240 + 811 * e^8 / 11520) * sin(4 * chi) +
(7 * e^6 / 120 + 81 * e^8 / 1120) * sin(6 * chi) +
(4279 * e^8 / 161280) * sin(8 * chi)
lon[i] = lon_0 + atan(pm * x[i], -y[i])
# correct the signs and phasing
lat[i] = lat[i] * pm * r2d
lon[i] = pm * (mod(pm * lon[i] + p, 2 * p) - p) * r2d
end
else
@turbo thread = false for i = eachindex(x)
rho = sqrt((pm * x[i])^2 + (pm * y[i])^2)
t = rho * t_c / (a * m_c)
chi = p / 2 - 2 * atan(t)
# find lat with a series instead of iterating
lat[i] = chi + (e^2 / 2 + 5 * e^4 / 24 + e^6 / 12 + 13 * e^8 / 360) * sin(2 * chi) +
(7 * e^4 / 48 + 29 * e^6 / 240 + 811 * e^8 / 11520) * sin(4 * chi) +
(7 * e^6 / 120 + 81 * e^8 / 1120) * sin(6 * chi) +
(4279 * e^8 / 161280) * sin(8 * chi)
lon[i] = lon_0 + atan(pm * x[i], -y[i])
# correct the signs and phasing
lat[i] = lat[i] * pm * r2d
lon[i] = pm * (mod(pm * lon[i] + p, 2 * p) - p) * r2d
end
end
if length(lon) == 1
lon = lon[1]
lat = lat[1]
end
if always_xy
return lon, lat
else
return lat, lon
end
end | FastGeoProjections | https://github.com/alex-s-gardner/FastGeoProjections.jl.git |
|
[
"MIT"
] | 0.0.2 | bcb1b982169b75aa4017c72d47e8341f2598b50e | code | 31634 | """
tranmerc_fwd(lon, lat; lon0, lat0, ellips, threaded, always_xy
Returns x and y coordinates [meteres] in transverse Mercator (TM) projection given geodetic
coordinates (EPSG:4326) of longitude and latitude [decimal degrees]. The TM projection is
defined by kwargs of longitude (lon0) and latitude (lat0), which specify the center of the
projeciton, and an ellipsoid that is define by an equatorial radius in meters (a) and its
eccentricity (e). Also returnes meridian convergence (gam) and scale factor (k).
"""
function tranmerc_fwd(
lon::Union{AbstractVector{<:AbstractFloat},AbstractFloat},
lat::Union{AbstractVector{<:AbstractFloat},AbstractFloat};
lon0::Real=0,
lat0::Real=0,
ellips::Ellipsoid=ellipsoid(EPSG(7030)),
threaded = true,
always_xy = true
)
# This code has been heavily modified for maximum preformance in Julia from the MATLAB
# implimentation of geographiclib_toolbox-2.0 by Alex Gardner JPL/NASA.
#
# This implementation of the projection is based on the series method
# described in
#
# C. F. F. Karney, Transverse Mercator with an accuracy of a few
# nanometers, J. Geodesy 85(8), 475-485 (Aug. 2011);
# Addenda: https://geographiclib.sourceforge.io/tm-addenda.html
#
# This extends the series given by Krueger (1912) to sixth order in the
# flattening. In particular the errors in the projection
# are less than 5 nanometers within 3900 km of the central meridian (and
# less than 1 mm within 7600 km of the central meridian). The mapping
# can be continued accurately over the poles to the opposite meridian.
#
# Copyright (c) Charles Karney (2012-2022) <[email protected]>.
if !always_xy
(lat, lon) = (lon, lat)
end
if isa(lon, AbstractFloat)
lon = [lon]
lat = [lat]
end
T = eltype(lon)
oneT = one(T)
zeroT = zero(T)
lat = copy(lat)
lon = copy(lon)
if isa(lat, AbstractArray)
lat = vec(lat)
lon = vec(lon)
end
# parameters are wrapped in dispatch funciton for type stability
d2r, p, lon0, lat0, e2, f, e2m, e2, cc, n, b1, a1 =
_tranmerc_parameters(lon[1], lat0, lon0, ellips.e, ellips.a)
e = convert(T, ellips.e)
if lat0 == 0
y0 = zero(T)
else
sbet0, cbet0 = norm2((one(T) - f) * sin(lat0 * d2r), cose(lat0 * d2r))
y0 = a1 * (atan(sbet0, cbet0) +
SinCosSeries(true, sbet0, cbet0, convert.(T, C1f(n))))
end
k = Vector{T}(undef, length(lon))
gam = Vector{T}(undef, length(lon))
x = Vector{T}(undef, length(lon))
y = Vector{T}(undef, length(lon))
latsign = Vector{T}(undef, length(lon))
lonsign = Vector{T}(undef, length(lon))
backside = Vector{Bool}(undef, length(lon))
xip = Vector{T}(undef, length(lon))
etap = Vector{T}(undef, length(lon))
xi = Vector{T}(undef, length(lon))
eta = Vector{T}(undef, length(lon))
fmin = sqrt(floatmin(lon0))
twoT = convert(T, 2)
alp = convert.(T, alpf(n))
T90 = convert(T, 90)
T180 = convert(T, 180)
T360 = convert(T, 360)
lon00 = rem(-lon0, T360)
ind = lon00 < (-T360 / twoT)
lon00 = (lon00 + T360) * ind + lon00 * !ind
ind = lon00 > (T360 / twoT)
lon00 = (lon00 - T360) * ind + lon00 * !ind
# the specific implementation of this code is very deliberate to maximize the
# performance provided by, and to work within the limits of,
# LoopVectorization.jl v0.12.159.
if threaded && Threads.nthreads()>1
@turbo thread = true for i = eachindex(lon)
b_1 = rem(lon[i], T360)
ind_1 = b_1 < (-T360 / twoT)
b_2 = (b_1 + T360) * ind_1 + b_1 * !ind_1
ind_2 = b_2 > (T360 / twoT)
b_3 = (b_2 - T360) * ind_2 + b_2 * !ind_2
s_1 = lon00 + b_3
up_1 = s_1 - b_3
vpp_1 = s_1 - up_1
up_1 -= lon00
t_1 = b_3 - vpp_1 - up_1
u_1 = rem(s_1, T360)
ind_3 = u_1 < (-T360 / twoT)
u_2 = (u_1 + T360) * ind_3 + u_1 * !ind_3
ind_4 = u_2 > (T360 / twoT)
u_3 = (u_2 - T360) * ind_4 + u_2 * !ind_4
s_2 = u_3 + t_1
up_2 = s_2 - t_1
vpp_2 = s_2 - up_2
up_2 -= u_3
t_1 -= (vpp_2 + up_2)
l = ((s_2 == 0) | (abs(s_2) == T180))
z_1 = (lon[i] - lon0) * l
ll = (t_1 * l) != 0
z_2 = -t_1 * ll + z_1 * !ll
lon[i] = (copysign(s_2, z_2)*l + (s_2 * !l))
latsign[i] = sign(lat[i])
lonsign[i] = sign(lon[i])
lon[i] = lon[i] * lonsign[i]
lat[i] = lat[i] * latsign[i]
backside[i] = lon[i] > T90
bs = backside[i]>0.5
b_4 = (bs & (lat[i] == 0))
latsign[i] = (-oneT * b_4) + (latsign[i] * !b_4)
lon[i] = (T180 - lon[i]) * bs + lon[i] * !bs
slam = sin(lon[i] * d2r)
clam = cos(lon[i] * d2r)
tau = sin(lat[i] * d2r) / max(fmin, cos(lat[i] * d2r))
tau1 = sqrt(tau^2 + oneT)
sig = sinh(e * atanh(e * tau / tau1))
taup = sqrt(sig^2 + oneT) * tau - sig * tau1
h1t = sqrt(oneT + tau^2)
htc = sqrt(taup^2 + clam^2)
xip[i] = atan(taup, clam)
etap[i] = asinh(slam / htc)
gam[i] = atan(slam * taup, clam * h1t) / d2r
k[i] = sqrt(e2m + e2 * cos(lat[i] * d2r)^2) * h1t / htc
c_3 = lat[i] == T90
xip[i] = p / twoT * c_3 + xip[i] * !c_3
etap[i] = zeroT * c_3 + etap[i] * !c_3
gam[i] = lon[i] * c_3 + gam[i] * !c_3
k[i] = cc * c_3 + k[i] * !c_3
end
# splitting function in 2 greatly improves compile time
@turbo thread = true for i = eachindex(lon)
c0 = cos(twoT * xip[i]) # turbo
ch0 = cosh(twoT * etap[i]) # turbo
s0 = sin(twoT * xip[i]) # turbo
sh0 = sinh(twoT * etap[i]) # turbo
ar = twoT * c0 * ch0
ai = twoT * -s0 * sh0
# --- j = 6 ------
y1r_6 = alp[6]
y1i_6 = zeroT
z1r_6 = twoT * 6 * alp[6]
z1i_6 = zeroT
y0r_6 = ar * y1r_6 - ai * y1i_6 + alp[5]
y0i_6 = ar * y1i_6 + ai * y1r_6
z0r_6 = ar * z1r_6 - ai * z1i_6 + twoT * 5 * alp[5]
z0i_6 = ar * z1i_6 + ai * z1r_6
# --- j = 4 ------
y1r_4 = ar * y0r_6 - ai * y0i_6 - y1r_6 + alp[4]
y1i_4 = ar * y0i_6 + ai * y0r_6 - y1i_6
z1r_4 = ar * z0r_6 - ai * z0i_6 - z1r_6 + twoT * 4 * alp[4]
z1i_4 = ar * z0i_6 + ai * z0r_6 - z1i_6
y0r_4 = ar * y1r_4 - ai * y1i_4 - y0r_6 + alp[3]
y0i_4 = ar * y1i_4 + ai * y1r_4 - y0i_6
z0r_4 = ar * z1r_4 - ai * z1i_4 - z0r_6 + twoT * 3 * alp[3]
z0i_4 = ar * z1i_4 + ai * z1r_4 - z0i_6
# --- j = 2 ------
y1r_2 = ar * y0r_4 - ai * y0i_4 - y1r_4 + alp[2]
y1i_2 = ar * y0i_4 + ai * y0r_4 - y1i_4
z1r_2 = ar * z0r_4 - ai * z0i_4 - z1r_4 + twoT * 2 * alp[2]
z1i_2 = ar * z0i_4 + ai * z0r_4 - z1i_4
y0r = ar * y1r_2 - ai * y1i_2 - y0r_4 + alp[1]
y0i = ar * y1i_2 + ai * y1r_2 - y0i_4
z0r_2 = ar * z1r_2 - ai * z1i_2 - z0r_4 + twoT * alp[1]
z0i_2 = ar * z1i_2 + ai * z1r_2 - z0i_4
z1r = oneT - z1r_2 + z0r_2 * ar / twoT - z0i_2 * ai / twoT
z1i = -z1i_2 + z0r_2 * ai / twoT + z0i_2 * ar / twoT
xi[i] = xip[i] + y0r * s0 * ch0 - y0i * c0 * sh0
eta[i] = etap[i] + y0r * c0 * sh0 + y0i * s0 * ch0
gam[i] -= -atan(z1i, z1r) / d2r
k[i] *= (b1 * sqrt(z1r^2 + z1i^2))
bs = backside[i] > 0.5
xi[i] = (p - xi[i]) * bs + (xi[i] * !bs)
y[i] = a1 * xi[i] * latsign[i]
x[i] = a1 * eta[i] * lonsign[i]
gam[i] = ((T180 - gam[i]) * bs) + (gam[i] * !bs)
yx_1 = rem(gam[i] * latsign[i] * lonsign[i], T360)
ind_lt = yx_1 < (-T360 / twoT)
yx_2 = (yx_1 + T360) * ind_lt + yx_1 * !ind_lt
ind_gt = yx_2 > (T360 / twoT)
yx = (yx_2 - T360) * ind_gt + (yx_2 * !ind_gt)
ind_5 = yx == -T180
gam[i] = (-yx * ind_5) + (yx * !ind_5)
y[i] -= y0
end
else
@turbo thread = false for i = eachindex(lon)
b_1 = rem(lon[i], T360)
ind_1 = b_1 < (-T360 / twoT)
b_2 = (b_1 + T360) * ind_1 + b_1 * !ind_1
ind_2 = b_2 > (T360 / twoT)
b_3 = (b_2 - T360) * ind_2 + b_2 * !ind_2
s_1 = lon00 + b_3
up_1 = s_1 - b_3
vpp_1 = s_1 - up_1
up_1 -= lon00
t_1 = b_3 - vpp_1 - up_1
u_1 = rem(s_1, T360)
ind_3 = u_1 < (-T360 / twoT)
u_2 = (u_1 + T360) * ind_3 + u_1 * !ind_3
ind_4 = u_2 > (T360 / twoT)
u_3 = (u_2 - T360) * ind_4 + u_2 * !ind_4
s_2 = u_3 + t_1
up_2 = s_2 - t_1
vpp_2 = s_2 - up_2
up_2 -= u_3
t_1 -= (vpp_2 + up_2)
l = ((s_2 == 0) | (abs(s_2) == T180))
z_1 = (lon[i] - lon0) * l
ll = (t_1 * l) != 0
z_2 = -t_1 * ll + z_1 * !ll
lon[i] = (copysign(s_2, z_2)*l + (s_2 * !l))
latsign[i] = sign(lat[i])
lonsign[i] = sign(lon[i])
lon[i] = lon[i] * lonsign[i]
lat[i] = lat[i] * latsign[i]
backside[i] = lon[i] > T90
bs = backside[i]>0.5
b_4 = (bs & (lat[i] == 0))
latsign[i] = (-oneT * b_4) + (latsign[i] * !b_4)
lon[i] = (T180 - lon[i]) * bs + lon[i] * !bs
slam = sin(lon[i] * d2r)
clam = cos(lon[i] * d2r)
tau = sin(lat[i] * d2r) / max(fmin, cos(lat[i] * d2r))
tau1 = sqrt(tau^2 + oneT)
sig = sinh(e * atanh(e * tau / tau1))
taup = sqrt(sig^2 + oneT) * tau - sig * tau1
h1t = sqrt(oneT + tau^2)
htc = sqrt(taup^2 + clam^2)
xip[i] = atan(taup, clam)
etap[i] = asinh(slam / htc)
gam[i] = atan(slam * taup, clam * h1t) / d2r
k[i] = sqrt(e2m + e2 * cos(lat[i] * d2r)^2) * h1t / htc
c_3 = lat[i] == T90
xip[i] = p / twoT * c_3 + xip[i] * !c_3
etap[i] = zeroT * c_3 + etap[i] * !c_3
gam[i] = lon[i] * c_3 + gam[i] * !c_3
k[i] = cc * c_3 + k[i] * !c_3
end
# splitting function in 2 greatly improves compile time
@turbo thread = false for i = eachindex(lon)
c0 = cos(twoT * xip[i]) # turbo
ch0 = cosh(twoT * etap[i]) # turbo
s0 = sin(twoT * xip[i]) # turbo
sh0 = sinh(twoT * etap[i]) # turbo
ar = twoT * c0 * ch0
ai = twoT * -s0 * sh0
# --- j = 6 ------
y1r_6 = alp[6]
y1i_6 = zeroT
z1r_6 = twoT * 6 * alp[6]
z1i_6 = zeroT
y0r_6 = ar * y1r_6 - ai * y1i_6 + alp[5]
y0i_6 = ar * y1i_6 + ai * y1r_6
z0r_6 = ar * z1r_6 - ai * z1i_6 + twoT * 5 * alp[5]
z0i_6 = ar * z1i_6 + ai * z1r_6
# --- j = 4 ------
y1r_4 = ar * y0r_6 - ai * y0i_6 - y1r_6 + alp[4]
y1i_4 = ar * y0i_6 + ai * y0r_6 - y1i_6
z1r_4 = ar * z0r_6 - ai * z0i_6 - z1r_6 + twoT * 4 * alp[4]
z1i_4 = ar * z0i_6 + ai * z0r_6 - z1i_6
y0r_4 = ar * y1r_4 - ai * y1i_4 - y0r_6 + alp[3]
y0i_4 = ar * y1i_4 + ai * y1r_4 - y0i_6
z0r_4 = ar * z1r_4 - ai * z1i_4 - z0r_6 + twoT * 3 * alp[3]
z0i_4 = ar * z1i_4 + ai * z1r_4 - z0i_6
# --- j = 2 ------
y1r_2 = ar * y0r_4 - ai * y0i_4 - y1r_4 + alp[2]
y1i_2 = ar * y0i_4 + ai * y0r_4 - y1i_4
z1r_2 = ar * z0r_4 - ai * z0i_4 - z1r_4 + twoT * 2 * alp[2]
z1i_2 = ar * z0i_4 + ai * z0r_4 - z1i_4
y0r = ar * y1r_2 - ai * y1i_2 - y0r_4 + alp[1]
y0i = ar * y1i_2 + ai * y1r_2 - y0i_4
z0r_2 = ar * z1r_2 - ai * z1i_2 - z0r_4 + twoT * alp[1]
z0i_2 = ar * z1i_2 + ai * z1r_2 - z0i_4
z1r = oneT - z1r_2 + z0r_2 * ar / twoT - z0i_2 * ai / twoT
z1i = -z1i_2 + z0r_2 * ai / twoT + z0i_2 * ar / twoT
xi[i] = xip[i] + y0r * s0 * ch0 - y0i * c0 * sh0
eta[i] = etap[i] + y0r * c0 * sh0 + y0i * s0 * ch0
gam[i] -= -atan(z1i, z1r) / d2r
k[i] *= (b1 * sqrt(z1r^2 + z1i^2))
bs = backside[i] > 0.5
xi[i] = (p - xi[i]) * bs + (xi[i] * !bs)
y[i] = a1 * xi[i] * latsign[i]
x[i] = a1 * eta[i] * lonsign[i]
gam[i] = ((T180 - gam[i]) * bs) + (gam[i] * !bs)
yx_1 = rem(gam[i] * latsign[i] * lonsign[i], T360)
ind_lt = yx_1 < (-T360 / twoT)
yx_2 = (yx_1 + T360) * ind_lt + yx_1 * !ind_lt
ind_gt = yx_2 > (T360 / twoT)
yx = (yx_2 - T360) * ind_gt + (yx_2 * !ind_gt)
ind_5 = yx == -T180
gam[i] = (-yx * ind_5) + (yx * !ind_5)
y[i] -= y0
end
end
if length(x) == 1;
x = x[1]
y = y[1]
gam = gam[1]
k = k[1]
end
return x, y, gam, k
end
"""
tranmerc_inv(x, y; lon0, lat0, ellips, threaded, always_xy
Returns of longitude and latitude in geodetic coordinates (EPSG:4326) coordinates [decimal
degrees] given x and y coodinates [meteres] in transverse Mercator (TM) projection. The
TM projection is defined by kwargs of longitude (lon0) and latitude (lat0), which specify
the center of the projeciton, and an ellipsoid that is define by an equatorial radius
[meters] (a) and its eccentricity (e). Also returnes meridian convergence (gam) and scale
factor (k).
"""
function tranmerc_inv(
x::Union{AbstractVector{<:AbstractFloat},AbstractFloat},
y::Union{AbstractVector{<:AbstractFloat},AbstractFloat};
lon0::Real=0,
lat0::Real=0,
ellips::Ellipsoid=ellipsoid(EPSG(7030)),
threaded=true,
always_xy=true
)
# This code has been heavily modified for maximum preformance in Julia from the MATLAB
# implimentation of geographiclib_toolbox-2.0 by Alex Gardner JPL/NASA.
#
# This implementation of the projection is based on the series method
# described in
#
# C. F. F. Karney, Transverse Mercator with an accuracy of a few
# nanometers, J. Geodesy 85(8), 475-485 (Aug. 2011);
# Addenda: https://geographiclib.sourceforge.io/tm-addenda.html
#
# This extends the series given by Krueger (1912) to sixth order in the
# flattening. In particular the errors in the projection
# are less than 5 nanometers within 3900 km of the central meridian (and
# less than 1 mm within 7600 km of the central meridian). The mapping
# can be continued accurately over the poles to the opposite meridian.
#
# Copyright (c) Charles Karney (2012-2022) <[email protected]>.
T = eltype(x)
if isa(x, AbstractFloat)
x = [x]
y = [y]
end
# parameters are wrapped in dispatch funciton for type stability
d2r, p, lon0, lat0, e2, f, e2m, e2, cc, n, b1, a1 =
_tranmerc_parameters(x[1], lat0, lon0, ellips.e, ellips.a)
e = convert(T, ellips.e)
if lat0 == 0
y0 = 0
else
sbet0, cbet0 = norm2((one(T) .- f) .* sin(lat0 * d2r), cos(lat0 * d2r))
y0 = a1 * (atan(sbet0, cbet0) +
SinCosSeries(true, sbet0, cbet0, convert(T,C1f(n))))
end
p2 = p / 2
bet = convert.(T, betf(n))
lon00 = AngNormalize(lon0)
gam = Vector{T}(undef, length(x))
k = Vector{T}(undef, length(x))
lat = Vector{T}(undef, length(x))
lon = Vector{T}(undef, length(x))
xip = Vector{T}(undef, length(x))
etap = Vector{T}(undef, length(x))
gam = Vector{T}(undef, length(x))
tau = Vector{T}(undef, length(x))
k = Vector{T}(undef, length(x))
xisign = Vector{T}(undef, length(x))
etasign = Vector{T}(undef, length(x))
backside = Vector{Bool}(undef, length(x))
twoT = convert(T, 2)
oneT = one(T)
zeroT = zero(T)
T90 = convert(T, 90)
T180 = convert(T, 180)
T360 = convert(T, 360)
# the specific implementation of this code is very deliberate to maximize the
# performance provided by and work within the limits of LoopVectorization.jl v0.12.159.
if threaded && Threads.nthreads()>1
@tturbo thread = true for i = eachindex(y)
yA = y[i] + y0
xiA = yA / a1
etaA = x[i] / a1
xisign[i] = sign(xiA)
etasign[i] = sign(etaA)
xiB = xiA * xisign[i]
eta = etaA * etasign[i]
backside[i] = xiB > p2
bs = backside[i]>0.5
xi = ((p - xiB) * bs) + (xiB * !bs)
c0 = cos(twoT * xi)
ch0 = cosh(twoT * eta)
s0 = sin(twoT * xi)
sh0 = sinh(twoT * eta)
ar = twoT * c0 * ch0
ai = twoT * -s0 * sh0
# --- j = 6 ------
y1r_6 = - bet[6]
y1i_6 = zeroT
z1r_6 = -twoT * 6 * bet[6]
z1i_6 = zeroT
y0r_6 = ar * y1r_6 - ai * y1i_6 - bet[6-1]
y0i_6 = ar * y1i_6 + ai * y1r_6
z0r_6 = ar * z1r_6 - ai * z1i_6 - twoT * (6 - 1) * bet[6-1]
z0i_6 = ar * z1i_6 + ai * z1r_6
# --- j = 4 ------
y1r_4 = ar * y0r_6 - ai * y0i_6 - y1r_6 - bet[4]
y1i_4 = ar * y0i_6 + ai * y0r_6 - y1i_6
z1r_4 = ar * z0r_6 - ai * z0i_6 - z1r_6 - twoT * 4 * bet[4]
z1i_4 = ar * z0i_6 + ai * z0r_6 - z1i_6
y0r_4 = ar * y1r_4 - ai * y1i_4 - y0r_6 - bet[4-1]
y0i_4 = ar * y1i_4 + ai * y1r_4 - y0i_6
z0r_4 = ar * z1r_4 - ai * z1i_4 - z0r_6 - twoT * (4 - 1) * bet[4-1]
z0i_4 = ar * z1i_4 + ai * z1r_4 - z0i_6
# --- j = 2 ------
y1r_2 = ar * y0r_4 - ai * y0i_4 - y1r_4 - bet[2]
y1i_2 = ar * y0i_4 + ai * y0r_4 - y1i_4
z1r_2 = ar * z0r_4 - ai * z0i_4 - z1r_4 - twoT * 2 * bet[2]
z1i_2 = ar * z0i_4 + ai * z0r_4 - z1i_4
y0r_2 = ar * y1r_2 - ai * y1i_2 - y0r_4 - bet[2-1]
y0i_2 = ar * y1i_2 + ai * y1r_2 - y0i_4
z0r_2 = ar * z1r_2 - ai * z1i_2 - z0r_4 - twoT * (2 - 1) * bet[2-1]
z0i_2 = ar * z1i_2 + ai * z1r_2 - z0i_4
z1r = oneT - z1r_2 + z0r_2 * ar / twoT - z0i_2 * ai / twoT
z1i = -z1i_2 + z0r_2 * ai / twoT + z0i_2 * ar / twoT
xip[i] = xi + y0r_2 * s0 * ch0 - y0i_2 * c0 * sh0
etap[i] = eta + y0r_2 * c0 * sh0 + y0i_2 * s0 * ch0
gam[i] = atan(z1i, z1r) / d2r
k[i] = b1 / sqrt(z1r^2 + z1i^2)
s = sinh(etap[i])
c = max(zeroT, cos(xip[i]))
r = sqrt(s^2 + c^2)
lon[i] = atan(s, c) / d2r
sxip = sin(xip[i])
sr = sxip / r
gam[i] += atan(sxip * tanh(etap[i]), c) / d2r
tau[i] = sr / e2m
#numit = 5, iter = 1
tau1_1 = sqrt(tau[i]^2 + oneT)
sig_1 = sinh(e * atanh(e * tau[i]/ tau1_1))
taupa_1 = sqrt(sig_1^2 + oneT) * tau[i] - sig_1 * tau1_1
tau[i] +=
(sr - taupa_1) * (oneT + e2m * tau[i]^2) /
(e2m * tau1_1 * sqrt(taupa_1^2 + oneT))
#numit = 5, iter = 2
tau1_2 = sqrt(tau[i]^2 + oneT)
sig_2 = sinh(e * atanh(e * tau[i] / tau1_2))
taupa_2 = sqrt(sig_2^2 + oneT) * tau[i] - sig_2 * tau1_2
tau[i] += (sr - taupa_2) * (oneT + e2m * tau[i]^2) /
(e2m * tau1_2 * sqrt(taupa_2^2 + oneT))
#numit = 5, iter = 3
tau1_3 = sqrt(tau[i]^2 + oneT)
sig_3 = sinh(e * atanh(e * tau[i] / tau1_3))
taupa_3 = sqrt(sig_3^2 + oneT) * tau[i] - sig_3 * tau1_3
tau[i] += (sr - taupa_3) * (oneT + e2m * tau[i]^2) /
(e2m * tau1_3 * sqrt(taupa_3^2 + oneT))
#numit = 5, iter = 4
tau1_4 = sqrt(tau[i]^2 + oneT)
sig_4 = sinh(e * atanh(e * tau[i] / tau1_4))
taupa_4 = sqrt(sig_4^2 + oneT) * tau[i] - sig_4 * tau1_4
tau[i] +=
(sr - taupa_4) * (oneT + e2m * tau[i]^2) /
(e2m * tau1_4 * sqrt(taupa_4^2 + oneT))
#numit = 5, iter = 5
tau1_5 = sqrt(tau[i]^2 + oneT)
sig_5 = sinh(e * atanh(e * tau[i] / tau1_5))
taupa_5 = sqrt(sig_5^2 + oneT) * tau[i] - sig_5 * tau1_5
tau[i] +=
(sr - taupa_5) * (oneT + e2m * tau[i]^2) /
(e2m * tau1_5 * sqrt(taupa_5^2 + oneT))
lat[i] = atan(tau[i], oneT) / d2r
ca = r != zeroT
k[i] *= ((e2m + e2 / sqrt(oneT + tau[i]^2)) * sqrt(oneT + tau[i]^2) * r * ca) + !ca
cb = !ca
lat[i] = T90 * cb + lat[i] * !cb
lon[i] = zeroT * cb + lon[i] * !cb
k[i] = k[i] * cc * cb + k[i] * !cb
lat[i] *= xisign[i]
bs = backside[i] > 0.5;
lon[i] = ((T180 - lon[i]) * bs) + (lon[i] * !bs)
yx_1 = rem(lon[i] * etasign[i] + lon00, T360)
ind_lt = yx_1 < (-T360 / twoT)
yx_2 = ((yx_1 + T360) * ind_lt) + (yx_1 * !ind_lt)
ind_gt = yx_2 > (T360 / twoT)
yx_3 = ((yx_2 - T360) * ind_gt) + (yx_2 * !ind_gt)
ind_1 = yx_3 == -T180
lon[i] = (-yx_3 * ind_1) + (yx_3 * !ind_1)
bs2 = backside[i] > 0.5
gam[i] = ((T180 - gam[i]) * bs2) + (gam[i] * !bs2)
yx_4 = rem(gam[i] * xisign[i] * etasign[i], T360)
ind_lt2 = yx_4 < (-T360 / twoT)
yx_5 = ((yx_4 + T360) * ind_lt2) + (yx_4 * !ind_lt2)
ind_gt2 = yx_5 > (T360 / twoT)
yx_6 = ((yx_5 - T360) * ind_gt2) + (yx_5 * !ind_gt2)
ind_2 = yx_6 == -T180
gam[i] = -yx_6 * ind_2 + yx_6 * !ind_2
end
else
@tturbo thread = false for i = eachindex(y)
yA = y[i] + y0
xiA = yA / a1
etaA = x[i] / a1
xisign[i] = sign(xiA)
etasign[i] = sign(etaA)
xiB = xiA * xisign[i]
eta = etaA * etasign[i]
backside[i] = xiB > p2
bs = backside[i]>0.5
xi = ((p - xiB) * bs) + (xiB * !bs)
c0 = cos(twoT * xi)
ch0 = cosh(twoT * eta)
s0 = sin(twoT * xi)
sh0 = sinh(twoT * eta)
ar = twoT * c0 * ch0
ai = twoT * -s0 * sh0
# --- j = 6 ------
y1r_6 = - bet[6]
y1i_6 = zeroT
z1r_6 = -twoT * 6 * bet[6]
z1i_6 = zeroT
y0r_6 = ar * y1r_6 - ai * y1i_6 - bet[6-1]
y0i_6 = ar * y1i_6 + ai * y1r_6
z0r_6 = ar * z1r_6 - ai * z1i_6 - twoT * (6 - 1) * bet[6-1]
z0i_6 = ar * z1i_6 + ai * z1r_6
# --- j = 4 ------
y1r_4 = ar * y0r_6 - ai * y0i_6 - y1r_6 - bet[4]
y1i_4 = ar * y0i_6 + ai * y0r_6 - y1i_6
z1r_4 = ar * z0r_6 - ai * z0i_6 - z1r_6 - twoT * 4 * bet[4]
z1i_4 = ar * z0i_6 + ai * z0r_6 - z1i_6
y0r_4 = ar * y1r_4 - ai * y1i_4 - y0r_6 - bet[4-1]
y0i_4 = ar * y1i_4 + ai * y1r_4 - y0i_6
z0r_4 = ar * z1r_4 - ai * z1i_4 - z0r_6 - twoT * (4 - 1) * bet[4-1]
z0i_4 = ar * z1i_4 + ai * z1r_4 - z0i_6
# --- j = 2 ------
y1r_2 = ar * y0r_4 - ai * y0i_4 - y1r_4 - bet[2]
y1i_2 = ar * y0i_4 + ai * y0r_4 - y1i_4
z1r_2 = ar * z0r_4 - ai * z0i_4 - z1r_4 - twoT * 2 * bet[2]
z1i_2 = ar * z0i_4 + ai * z0r_4 - z1i_4
y0r_2 = ar * y1r_2 - ai * y1i_2 - y0r_4 - bet[2-1]
y0i_2 = ar * y1i_2 + ai * y1r_2 - y0i_4
z0r_2 = ar * z1r_2 - ai * z1i_2 - z0r_4 - twoT * (2 - 1) * bet[2-1]
z0i_2 = ar * z1i_2 + ai * z1r_2 - z0i_4
z1r = oneT - z1r_2 + z0r_2 * ar / twoT - z0i_2 * ai / twoT
z1i = -z1i_2 + z0r_2 * ai / twoT + z0i_2 * ar / twoT
xip[i] = xi + y0r_2 * s0 * ch0 - y0i_2 * c0 * sh0
etap[i] = eta + y0r_2 * c0 * sh0 + y0i_2 * s0 * ch0
gam[i] = atan(z1i, z1r) / d2r
k[i] = b1 / sqrt(z1r^2 + z1i^2)
s = sinh(etap[i])
c = max(zeroT, cos(xip[i]))
r = sqrt(s^2 + c^2)
lon[i] = atan(s, c) / d2r
sxip = sin(xip[i])
sr = sxip / r
gam[i] += atan(sxip * tanh(etap[i]), c) / d2r
tau[i] = sr / e2m
#numit = 5, iter = 1
tau1_1 = sqrt(tau[i]^2 + oneT)
sig_1 = sinh(e * atanh(e * tau[i]/ tau1_1))
taupa_1 = sqrt(sig_1^2 + oneT) * tau[i] - sig_1 * tau1_1
tau[i] +=
(sr - taupa_1) * (oneT + e2m * tau[i]^2) /
(e2m * tau1_1 * sqrt(taupa_1^2 + oneT))
#numit = 5, iter = 2
tau1_2 = sqrt(tau[i]^2 + oneT)
sig_2 = sinh(e * atanh(e * tau[i] / tau1_2))
taupa_2 = sqrt(sig_2^2 + oneT) * tau[i] - sig_2 * tau1_2
tau[i] += (sr - taupa_2) * (oneT + e2m * tau[i]^2) /
(e2m * tau1_2 * sqrt(taupa_2^2 + oneT))
#numit = 5, iter = 3
tau1_3 = sqrt(tau[i]^2 + oneT)
sig_3 = sinh(e * atanh(e * tau[i] / tau1_3))
taupa_3 = sqrt(sig_3^2 + oneT) * tau[i] - sig_3 * tau1_3
tau[i] += (sr - taupa_3) * (oneT + e2m * tau[i]^2) /
(e2m * tau1_3 * sqrt(taupa_3^2 + oneT))
#numit = 5, iter = 4
tau1_4 = sqrt(tau[i]^2 + oneT)
sig_4 = sinh(e * atanh(e * tau[i] / tau1_4))
taupa_4 = sqrt(sig_4^2 + oneT) * tau[i] - sig_4 * tau1_4
tau[i] +=
(sr - taupa_4) * (oneT + e2m * tau[i]^2) /
(e2m * tau1_4 * sqrt(taupa_4^2 + oneT))
#numit = 5, iter = 5
tau1_5 = sqrt(tau[i]^2 + oneT)
sig_5 = sinh(e * atanh(e * tau[i] / tau1_5))
taupa_5 = sqrt(sig_5^2 + oneT) * tau[i] - sig_5 * tau1_5
tau[i] +=
(sr - taupa_5) * (oneT + e2m * tau[i]^2) /
(e2m * tau1_5 * sqrt(taupa_5^2 + oneT))
lat[i] = atan(tau[i], oneT) / d2r
ca = r != zeroT
k[i] *= ((e2m + e2 / sqrt(oneT + tau[i]^2)) * sqrt(oneT + tau[i]^2) * r * ca) + !ca
cb = !ca
lat[i] = T90 * cb + lat[i] * !cb
lon[i] = zeroT * cb + lon[i] * !cb
k[i] = k[i] * cc * cb + k[i] * !cb
lat[i] *= xisign[i]
bs = backside[i] > 0.5;
lon[i] = ((T180 - lon[i]) * bs) + (lon[i] * !bs)
yx_1 = rem(lon[i] * etasign[i] + lon00, T360)
ind_lt = yx_1 < (-T360 / twoT)
yx_2 = ((yx_1 + T360) * ind_lt) + (yx_1 * !ind_lt)
ind_gt = yx_2 > (T360 / twoT)
yx_3 = ((yx_2 - T360) * ind_gt) + (yx_2 * !ind_gt)
ind_1 = yx_3 == -T180
lon[i] = (-yx_3 * ind_1) + (yx_3 * !ind_1)
bs2 = backside[i] > 0.5
gam[i] = ((T180 - gam[i]) * bs2) + (gam[i] * !bs2)
yx_4 = rem(gam[i] * xisign[i] * etasign[i], T360)
ind_lt2 = yx_4 < (-T360 / twoT)
yx_5 = ((yx_4 + T360) * ind_lt2) + (yx_4 * !ind_lt2)
ind_gt2 = yx_5 > (T360 / twoT)
yx_6 = ((yx_5 - T360) * ind_gt2) + (yx_5 * !ind_gt2)
ind_2 = yx_6 == -T180
gam[i] = -yx_6 * ind_2 + yx_6 * !ind_2
end
end
if length(lon) == 1
lon = lon[1]
lat = lat[1]
gam = gam[1]
k = k[1]
end
if always_xy
return lon, lat, gam, k
else
return lat, lon, gam, k
end
end
function _tranmerc_parameters(x::Float32, lat0, lon0, e, a)
d2r = Float32(pi / 180)
p = Float32(pi)
lat0 = Float32(lat0)
lon0 = Float32(lon0)
e2 = e^2
f = e2 / (1 + sqrt(1 - e2))
e2m = 1 - e2
e2 = Float32(e2)
cc = Float32(sqrt(e2m) * exp(e * atanh(e * 1)))
e2m = Float32(e2m)
n = f / (2 - f)
b1 = Float32((1 - f) * (A1m1f(n) + 1))
a1 = Float32(b1 * a)
return d2r, p, lon0, lat0, e2, f, e2m, e2, cc, n, b1, a1
end
function _tranmerc_parameters(x::Float64, lat0, lon0, e, a)
d2r = Float64(pi / 180)
p = Float64(pi)
lat0 = Float64(lat0)
lon0 = Float64(lon0)
e2 = e^2
f = e2 / (1 + sqrt(1 - e2))
e2m = 1 - e2
e2 = Float64(e2)
cc = Float64(sqrt(e2m) * exp(e * atanh(e * 1)))
e2m = Float64(e2m)
n = f / (2 - f)
b1 = Float64((1 - f) * (A1m1f(n) + 1))
a1 = Float64(b1 * a)
return d2r, p, lon0, lat0, e2, f, e2m, e2, cc, n, b1, a1
end
function alpf(n)
alpcoeff = [
31564, -66675, 34440, 47250, -100800, 75600, 151200,
-1983433, 863232, 748608, -1161216, 524160, 1935360,
670412, 406647, -533952, 184464, 725760,
6601661, -7732800, 2230245, 7257600,
-13675556, 3438171, 7983360,
212378941, 319334400,
]
maxpow = 6
alp = zeros(maxpow)
o = 1
d = n
pwr = 0:length(alpcoeff)
for l = 1:maxpow
m = maxpow - l
coeff = reverse((alpcoeff[o:(o+m)]));
poly = sum(coeff .* n .^ pwr[1:length(coeff)])
alp[l] = d * poly / alpcoeff[o+m+1]
o = o + m + 2
d = d * n
end
return alp
end
function betf(n)
betcoeff = [
384796, -382725, -6720, 932400, -1612800, 1209600, 2419200,
-1118711, 1695744, -1174656, 258048, 80640, 3870720,
22276, -16929, -15984, 12852, 362880,
-830251, -158400, 197865, 7257600,
-435388, 453717, 15966720,
20648693, 638668800
]
maxpow = 6
bet = zeros(maxpow)
o = 1
d = n
pwr = 0:length(betcoeff)
for l = 1:maxpow
m = maxpow - l
coeff = reverse((betcoeff[o:(o+m)]))
poly = sum(coeff .* n .^ pwr[1:length(coeff)])
bet[l] = d * poly / betcoeff[o+m+1]
o += m + 2
d *= n
end
return bet
end
function A1m1f(epsi)
# A1M1F Evaluate A_1 - 1
#
# A1m1 = A1M1F(epsi) evaluates A_1 - 1 using Eq. (17). epsi and A1m1 are
# K x 1 arrays.
eps2 = epsi^2
coeff = [0, 64, 4, 1]
pwr = 0:(length(coeff)-1)
t = sum(coeff .* eps2 .^ pwr) / 256
A1m1 = (t + epsi) / (1 - epsi)
return A1m1
end
function AngNormalize(x)
#ANGNORMALIZE Reduce angle to range (-180, 180]
#
# x = ANGNORMALIZE(x) reduces angles to the range (-180, 180]. x can be
# any shape.
y = remx.(x, convert(eltype(x), 360))
ind = y .== -180
if any(ind)
y[ind] .*= -one(eltype(x))
end
return y
end
function remx(x, y)
#REMX The remainder function
#
# REMX(x, y) is the remainder of x on division by y. Result is in [-y/2,
# y/2]. x can be compatible shapes. y should be a positive scalar.
z = rem(x, y);
if z < -y/2
z += y
elseif z > y/2
z -= y
end
return z
end | FastGeoProjections | https://github.com/alex-s-gardner/FastGeoProjections.jl.git |
|
[
"MIT"
] | 0.0.2 | bcb1b982169b75aa4017c72d47e8341f2598b50e | code | 6552 | """
utm_inv(lon, lat; epsg::EPSG=EPSG(0), zone, isnorth, threaded, always_xy)
Returns geodetic coordinates (EPSG:4326) of longitude and latitude [decimal degrees] given x
and y coordinates [meteres] in UTM projection. The UTM projection is defined by kwargs of
EPSG *or* zone and isnorth. Also returnes meridian convergence (gam) and scale factor (k).
"""
function utm_inv(
x::Union{AbstractVector{<:AbstractFloat},AbstractFloat},
y::Union{AbstractVector{<:AbstractFloat},AbstractFloat};
epsg::EPSG=EPSG(0),
threaded = true,
always_xy = true
)
if epsg !== EPSG(0)
zone, isnorth = epsg2utmzone(epsg::EPSG)
end
T = eltype(x)
#UTM_INV Inverse UTM projection
lon0 = convert(T, -183 + 6 * zone);
lat0 = zero(T);
fe = convert(T,5e5);
fn = convert(T,100e5 * !isnorth);
k0 = convert(T,0.9996);
x = copy(x)
y = copy(y)
if threaded && Threads.nthreads() > 1
@turbo thread = true for i = eachindex(x)
x[i] = (x[i] - fe) / k0
y[i] = (y[i] - fn) / k0
end
else
@turbo thread = false for i = eachindex(x)
x[i] = (x[i] - fe) / k0
y[i] = (y[i] - fn) / k0
end
end
lon, lat = tranmerc_inv(x, y; lon0=lon0, lat0=lat0, ellips=ellipsoid(EPSG(7030)), threaded=threaded, always_xy=always_xy)
return lon, lat
end
function ups_inv(
x::Union{AbstractVector{<:AbstractFloat},AbstractFloat},
y::Union{AbstractVector{<:AbstractFloat},AbstractFloat};
isnorth::Bool=true)
#UPS_INV Inverse UPS projection
fe = 20e5;
fn = 20e5;
k0 = 0.994;
x = x .- fe;
y = y .- fn;
x = x / k0;
y = y / k0;
if isnorth
lat_ts = 90;
lon_0 = 0;
else
lat_ts = -90;
lon_0 = 0;
end
lat, lon = polarstereo_inv(x, y; lat_ts=lat_ts, lon_0=lon_0, ellips=ellipsoid(EPSG(7030)))
#k = k * k0;
return (lon, lat)
end
"""
utm_fwd(lon, lat; epsg::EPSG=EPSG(0), zone, isnorth, threaded, always_xy)
Returns x and y coordinates [meteres] in UTM projection given geodetic
coordinates (EPSG:4326) of longitude and latitude [decimal degrees]. The UTM projection is
defined by kwargs of EPSG *or* zone and isnorth. Also returnes meridian convergence (gam)
and scale factor (k).
"""
function utm_fwd(
lon::Union{AbstractVector{<:AbstractFloat},AbstractFloat},
lat::Union{AbstractVector{<:AbstractFloat},AbstractFloat};
epsg::EPSG=EPSG(0),
threaded=true,
always_xy=true
)
if epsg !== EPSG(0)
zone, isnorth = epsg2utmzone(epsg::EPSG)
end
T = eltype(lon)
lon0 = convert(T, -183 + 6 * zone);
lat0 = zero(T);
fe = convert(T, 5e5);
fn = convert(T, 100e5 * !isnorth);
k0 = convert(T, 0.9996);
x, y = tranmerc_fwd(lon, lat; lon0=lon0, lat0=lat0, ellips=ellipsoid(EPSG(7030)), threaded=threaded, always_xy=always_xy)
if !isa(x, Array)
x = x * k0 + fe
y = y * k0 + fn
#k = k * k0
elseif threaded && Threads.nthreads()>1
@turbo thread = true for i = eachindex(x)
x[i] = x[i] * k0 + fe
y[i] = y[i] * k0 + fn
#k[i] = k[i] * k0;
end
else
@turbo thread = false for i = eachindex(x)
x[i] = x[i] * k0 + fe
y[i] = y[i] * k0 + fn
#k[i] = k[i] * k0;
end
end
return x, y
end
function ups_fwd(lon::Union{AbstractVector{<:AbstractFloat},AbstractFloat}, lat::Union{AbstractVector{<:AbstractFloat},AbstractFloat}; isnorth::Bool=true)
#UPS_FWD Forward UPS projection
if isnorth
lat_ts = 90
lon_0 = 0
else
lat_ts = -90
lon_0 = 0
end
fe = 20e5;
fn = 20e5;
k0 = 0.994;
x, y, gam, k = polarstereo_fwd(lon::Union{AbstractVector{<:AbstractFloat},AbstractFloat}, lat::Union{AbstractVector{<:AbstractFloat},AbstractFloat}, lon_0=lon0, lat_ts=lat_ts, ellips=ellipsoid(EPSG(7030)))
x = x * k0;
y = y * k0;
k = k * k0;
x = x + fe;
y = y + fn;
return x, y
end
"""
utm_epsg(lon::Real, lat::Real, always_xy=true)
returns the EPSG code for the intersecting universal transverse Mercator (UTM) zone -OR-
the relevant polar stereographic projection if outside of UTM limits.
modified from: https://github.com/JuliaGeo/Geodesy.jl/blob/master/src/utm.jl
"""
function utm_epsg(lon::Real, lat::Real, always_xy=true)
if !always_xy
lat, lon = (lon, lat)
end
if lat > 84
# NSIDC Sea Ice Polar Stereographic North
return epsg = 3995
elseif lat < -80
# Antarctic Polar Stereographic
return epsg = 19992
end
# make sure lon is from -180 to 180
lon = lon - floor((lon + 180) / (360)) * 360
# int versions
ilat = floor(Int64, lat)
ilon = floor(Int64, lon)
# get the latitude band
band = max(-10, min(9, fld((ilat + 80), 8) - 10))
# and check for weird ones
zone = fld((ilon + 186), 6)
if ((band == 7) && (zone == 31) && (ilon >= 3)) # Norway
zone = 32
elseif ((band == 9) && (ilon >= 0) && (ilon < 42)) # Svalbard
zone = 2 * fld((ilon + 183), 12) + 1
end
if lat >= 0
epsg = 32600 + zone
else
epsg = 32700 + zone
end
# convert to proj string
epsg = EPSG(epsg)
return epsg
end
function utmzone2epsg(zone::Int = 0, isnorth::Bool = true)
if zone == 0
if isnorth
# NSIDC Sea Ice Polar Stereographic North
return epsg = EPSG(3995)
else
# Antarctic Polar Stereographic
return epsg = EPSG(19992)
end
end
if isnorth
epsg = 32600 + zone
else
epsg = 32700 + zone
end
# convert to EPSG type
epsg = EPSG(epsg)
return epsg
end
function epsg2utmzone(epsg::EPSG)
if first(epsg.val) == 3995
isnorth = true
zone = 0
elseif first(epsg.val) == 19992
isnorth = false
zone = 0
elseif Int32(floor(first(epsg.val)[1], digits = -2)) == 32600
isnorth = true
zone = first(epsg.val) - 32600
elseif Int32(floor(first(epsg.val), digits = -2)) == 32700
isnorth = false
zone = first(epsg.val) - 32700
else
error("supplied epsg is not a UTM epsg")
end
return (zone = zone, isnorth = isnorth)
end
function isutm(epsg::EPSG)
tf = Int32(floor(first(epsg.val), digits = -2)) == 32600 || Int32(floor(first(epsg.val), digits = -2)) == 32700
return tf
end | FastGeoProjections | https://github.com/alex-s-gardner/FastGeoProjections.jl.git |
|
[
"MIT"
] | 0.0.2 | bcb1b982169b75aa4017c72d47e8341f2598b50e | code | 2782 | using FastGeoProjections
using Test
using Proj
@testset "FastGeoProjections.jl" begin
## [1] Test WGS 84 / NSIDC Sea Ice Polar Stereographic North
lat0 = 84.0;
lon0 = 50.0;
trans = Proj.Transformation("EPSG:4326", "EPSG:3413")
x0, y0 = trans(lat0,lon0)
trans = FastGeoProjections.Transformation("EPSG:4326", "EPSG:3413")
x1, y1 = trans(lat0, lon0)
# check accuracy
@test round(x0, digits=5) == round(x1, digits=5)
@test round(y0, digits=5) == round(y1, digits=5)
# now inverse
lat1, lon1 = inv(trans)(x1, y1)
@test round(lat0, digits=8) == round(lat1, digits=8)
@test round(lon0, digits=8) == round(lon1, digits=8)
## [2] Test WGS 84 / Antarctic Polar Stereographic
lat0 = -84.0
lon0 = 50.0
trans = Proj.Transformation("EPSG:4326", "EPSG:3031")
x0, y0 = trans(lat0, lon0)
trans = FastGeoProjections.Transformation("EPSG:4326", "EPSG:3031")
x1, y1 = trans(lat0, lon0)
# check accuracy
@test round(x0, digits=5) == round(x1, digits=5)
@test round(y0, digits=5) == round(y1, digits=5)
# now inverse
lat1, lon1 = inv(trans)(x1, y1)
@test round(lat0, digits=8) == round(lat1, digits=8)
@test round(lon0, digits=8) == round(lon1, digits=8)
## [3] Test Transverse Mercator projection [UTM] and vector input for North
x0 = [10000., 20000.]
y0 = [10000., 20000.]
trans = Proj.Transformation("EPSG:32619", "EPSG:4326")
ll0 = trans.(x0, y0)
lat0 = [i[1] for i in ll0]
lon0 = [i[2] for i in ll0]
trans = FastGeoProjections.Transformation("EPSG:32619", "EPSG:4326")
lat1, lon1 = trans(x0, y0)
# check accuracy
@test round.(lat0, digits=8) == round.(lat1, digits=8)
@test round.(lon0, digits=8) == round.(lon1, digits=8)
# now inverse
x1, y1 = inv(trans)(lat1, lon1)
# check accuracy
@test round.(x0, digits=5) == round.(x1, digits=5)
@test round.(y0, digits=5) == round.(y1, digits=5)
## [4] Test Transverse Mercator projection [UTM] and vector input for South
lat0 = -[80., 40., 1.]
lon0 = [30., 31., 34.]
trans = Proj.Transformation("EPSG:4326", "EPSG:32736")
xy0 = trans.(lat0, lon0)
x0 = [i[1] for i in xy0]
y0 = [i[2] for i in xy0]
trans = FastGeoProjections.Transformation(EPSG(4326), EPSG(32736))
x1, y1 = trans(lat0, lon0)
# check accuracy
@test round.(x0, digits=5) == round.(x1, digits=5)
@test round.(y0, digits=5) == round.(y1, digits=5)
# now inverse
lat1, lon1 = inv(trans)(x1, y1)
# check accuracy
@test round.(lat0, digits=8) == round.(lat1, digits=8)
@test round.(lon0, digits=8) == round.(lon1, digits=8)
## [4] make sure EPSG Type is working
@test typeof(EPSG(3031)) <: EPSG
end | FastGeoProjections | https://github.com/alex-s-gardner/FastGeoProjections.jl.git |
|
[
"MIT"
] | 0.0.2 | bcb1b982169b75aa4017c72d47e8341f2598b50e | docs | 1574 | [](https://github.com/alex-s-gardner/FastGeoProjections.jl/actions/workflows/CI.yml?query=branch%3Amain)
**FastGeoProjections** is intended to provide highly optimized native Julia geospatial coordinate transformations from one coordinate reference system (CRS) to another as defined by EPSG codes. It is not intended to replace, nor to be as comprehensive as, [Proj](https://github.com/JuliaGeo/Proj.jl). The package will natively support only the most common geospatial transformations and relies on **Proj.jl** for all others.
*Supported Projection EPSGs*
- 3031: WGS 84 / Antarctic Polar Stereographic
- 3413: WGS 84 / NSIDC Sea Ice Polar Stereographic North
- 4326: WGS84 - World Geodetic System 1984
- 326XX: WGS 84 / UTM zone XXN
- 327XX: WGS 84 / UTM zone XXS
*Example*
```julia
julia> using Pkg; Pkg.add("FastGeoProjections")
julia> using FastGeoProjections
julia> lat = [84.0, 83.0]; lon = [50.0, 51.0];
julia> trans = FastGeoProjections.Transformation(EPSG(4326), EPSG(3413))
Transformation
source_epsg: EPSG(4326)
target_epsg: EPSG(3413)
threaded: true
always_xy: false
proj_only: false
julia> x, y = trans(lat, lon)
([648059.0510298966, 755038.7580833685], [56697.82026048427, 79357.77126429843])
```
*Benchmark*
ME = Maximum Error

**Note**
If you have recommendations for additional projections to support feel free to submit a an issue | FastGeoProjections | https://github.com/alex-s-gardner/FastGeoProjections.jl.git |
|
[
"MIT"
] | 0.2.1 | 3e2b8bad6ab6b479cba858dd9ac974ad272f5d41 | code | 10329 | module IterativeRefinement
# this file is part of IterativeRefinement.jl, released under the MIT Expat license.
using LinearAlgebra
export rfldiv, equilibrators, condInfest, rfeigen
include("infra.jl")
# Algorithm 3 from
# J.Demmel et al., "Error bounds from extra precise iterative refinement",
# LAPACK Working Note Nr. 165 (2005), also published as
# ACM TOMS, 32, 325 (2006) (henceforth "the paper").
"""
rfldiv(A,b,f=lu; kwargs...) -> x,bnorm,bcomp,flags
Compute an accurate solution to a linear system ``A x = b`` using
extra-precise iterative refinement, with error bounds.
Returns solution `x`, a normwise relative forward error estimate `bnorm`,
and maximum componentwise relative error estimate `bcomp`.
Specifically, `bnorm` is an estimate of ``‖xtrue - x‖ / ‖x‖`` (max norms).
If the problem is so ill-conditioned that a good solution is unrealizable,
`bnorm` and `bcomp` are set to unity (unless `expert`).
`flags` contains convergence diagnostics potentially interesting to
specialists.
# Arguments
- `A`: a matrix,
- `b`: a vector with the same `eltype`,
- `f`: a factorization function such as `lu`.
## Keywords
- `DT`: higher-precision type for refinement; defaults to `widen(eltype(A))`
- `verbosity`: 0(default): quiet, 1: report on iterations, 2: details.
- `equilibrate::Bool`: whether the function should equilibrate `A`
(default `true`).
- `maxiter`: default 20.
- `tol`: relative tolerance for convergence, in units of `eps(T)`.
- `expert::Bool`: whether to return questionable bounds in extreme cases.
- `κ`: the (max-norm) condition of `A` (see below).
- `F`: a factorization of `A` (see below).
If `A` has already been equilibrated, and a `Factorization` object `F`
and condition estimate `κ` have already been computed, they may be
provided as keyword arguments; no check for consistency is done here.
Uses the algorithm of Demmel et al. ACM TOMS, 32, 325 (2006).
"""
function rfldiv(A::AbstractMatrix{T},
b::AbstractVecOrMat{T},
factor = lu;
DT = widen(T),
maxiter=20, tol=max(10.0,sqrt(size(A,1))),
equilibrate = true,
verbosity = 0,
ρthresh = 0.5, # "what Wilkinson used"
expert = false,
κ = -one(real(T)),
F::Union{Nothing, Factorization} = nothing
) where {T}
RT = real(T)
rfldiv_(A,b,lu,DT,RT,maxiter,tol,equilibrate,verbosity,ρthresh,expert,κ,F)
end
function rfldiv_(A::AbstractMatrix{T},
b::AbstractVecOrMat{T},
factor,
::Type{DT},
::Type{RT},
maxiter, tol, equilibrate, verbosity, ρthresh, expert, κ, F
) where {T, DT, RT}
# maxiter is ithresh in paper
# tol is γ in paper
m,n = size(A,1), size(A,2)
if size(b,1) != m
throw(DimensionMismatch("first dimension of A, $n, does not match that of b, $(size(b,1))"))
end
nrhs = size(b,2)
cvtok = true
# the use of this variable in closures subverts inference, as of v1.1
ϵw::RT = RT(2)^(-precision(RT)) # typically eps(T) / 2
tol1 = 1 / (tol * ϵw) # $1/γϵ_w$ in the paper
if equilibrate
Rv,Cv = equilibrators(A)
cnorm = maximum(abs.(Cv))
equil = cnorm > 10
else
equil = false
end
if equil
(verbosity > 1) && println("equilibrating, ‖C‖=$cnorm")
C = Diagonal(Cv)
R = Diagonal(Rv)
As = R * A * C
else
C = I
As = A
end
local Asd
try
Asd = DT.(As)
catch
cvtok = false
end
cvtok || throw(ArgumentError("unable to convert to "
* "designated wide type $DT"))
if F === nothing
Fs = factor(As)
else
Fs = F
end
if κ < 0
anorm = opnorm(As, Inf)
κs = condInfest(As,Fs,anorm)
if verbosity > 1
equil && print("equilibrated ")
println("norm: $anorm condition: $κs; compare to $tol1")
end
else
κs = κ
end
dzthresh = 1/4 # "works well for binary arithmetic"
nsingle = 1
ndouble = 0
relnormx = relnormz = RT(Inf)
dxnorm = dznorm = RT(Inf)
ρmax_x = ρmax_z = zero(RT)
xstate = :working
zstate = :unstable
yscheme = :single
incrprec = false
if nrhs > 1
X = zeros(T,n,nrhs)
normwisebounds = zeros(RT,nrhs)
termwisebounds = zeros(RT,nrhs)
flagss = zeros(Int,3,nrhs)
end
function newxstate(state, xnorm, dxnorm, dxprev)
curnorm = relnormx
dxratio = dxnorm / dxprev
dxrel = dxnorm / xnorm
if (state == :noprogress) && (dxratio <= ρthresh)
state = :working
end
if state == :working
if dxrel <= ϵw
# tiny dx, criterion (18) in paper
state = :converged
(verbosity > 1) && println("convergence (in norm)")
elseif dxratio > ρthresh
if yscheme == :single
(verbosity > 1) && println("increasing precision(x)")
incrprec = true
elseif ndouble > 1
# lack of progress, criterion (17) in paper
state = :noprogress
(verbosity > 1) && println("x stalled")
end
else
ρmax_x = max( ρmax_x, dxratio)
end
(state != :working) && (curnorm = dxrel)
end
state, ρmax_x, curnorm
end
function newzstate(state, dznorm, dzprev)
curnorm = relnormz
dzratio = dznorm / dzprev
if (state == :unstable) && (dznorm <= dzthresh)
state = :working
end
if (state == :noprogress) && (dzratio <= ρthresh)
state = :working
end
if state == :working
if dznorm <= ϵw
# tiny dz
state = :converged
(verbosity > 1) && println("convergence (component-wise)")
elseif dznorm > dzthresh
state = :unstable
relnormz = RT(Inf)
ρmax_z = zero(RT)
elseif dzratio > ρthresh
if yscheme == :single
(verbosity > 1) && println("increasing precision(z)")
incrprec = true
elseif ndouble > 1
state = :noprogress
(verbosity > 1) && println("z stalled")
end
else
ρmax_z = max(ρmax_z, dzratio)
end
(state != :working) && (curnorm = dznorm)
end
state, ρmax_z, curnorm
end
# simple "for" loop w/o scoping
irhs = 1
@label NEXTRHS
if equil
bs = R * b[:,irhs]
else
bs = b[:,irhs]
end
bd = DT.(bs)
y = Fs \ bs
local yd, xnorm
for iter=1:maxiter
# compute residual in appropriate precision
if yscheme == :single
r = As * y - bs
nsingle += 1
else
r = T.(Asd * yd - bd)
ndouble += 1
end
# compute correction to y
dy = Fs \ r
# check error-related stopping criteria
xnorm = norm(C*y,Inf)
dxprev = dxnorm
dxnorm = norm(C*dy,Inf)
dzprev = dznorm
dznorm = maximum( abs.(dy) ./ abs.(y))
(verbosity > 0) && println("iter $iter |dx|=$dxnorm |dz|=$dznorm")
ay0,ay1 = extrema(abs.(y))
if (yscheme == :single) && (κs * ay1 / ay0 >= tol1)
(verbosity > 1) && println("increasing precision")
incrprec = true
end
xstate, ρmax_x, relnormx = newxstate(xstate, xnorm, dxnorm, dxprev)
zstate, ρmax_z, relnormz = newzstate(zstate, dznorm, dzprev)
# the unstable z case is not in the paper but seems
# necessary to prevent early stalling
if ((xstate != :working) && !(zstate ∈ [:working,:unstable]))
break
end
if incrprec
# with modified logic above:
# if yscheme == :double
# @warn "secondary widening is indicated but not implemented"
# end
yscheme = :double
incrprec = false
yd = DT.(y)
end
# update solution
if yscheme == :single
y .-= dy
else
yd .-= DT.(dy)
y = T.(yd)
end
end
if xstate == :working
relnormx = dxnorm / xnorm
end
if zstate == :working
relnormz = dznorm
end
x::Vector{T} = C * y
min1 = max(10,sqrt(n)) * ϵw # value from paper
min2 = ϵw
normwisebound = RT(max( relnormx/(1-ρmax_x), min2))
termwisebound = RT(max( relnormz/(1-ρmax_z), min1))
if !expert
flag = false
if normwisebound > sqrt(ϵw)
flag = true
normwisebound = one(RT)
end
if termwisebound > sqrt(ϵw)
flag = true
termwisebound = one(RT)
end
if flag && (verbosity >= 0)
@warn "no convergence: result is not meaningful"
end
end
fval = Dict(:converged => 0, :working => 1,
:noprogress => 2, :unstable => 3)
flags = [10*fval[xstate]+fval[zstate],nsingle,ndouble]
# let's see if we can make this type-stable.
if !(b isa AbstractVector)
X[:,irhs] .= x
normwisebounds[irhs] = normwisebound
termwisebounds[irhs] = termwisebound
flagss[:,irhs] .= flags
if irhs == nrhs
return (X, normwisebounds, termwisebounds, flagss)
end
else
# return (X, normwisebounds, termwisebounds, flagss)
# take that, you poor confused compiler!
nb::RT = convert(RT,normwisebound)
tb::RT = convert(RT,termwisebound)
ff::Vector{Int} = convert.(Int,flags)
return x, nb, tb, ff
end
# re-initialize state
nsingle = 1
ndouble = 0
relnormx = relnormz = RT(Inf)
dxnorm = dznorm = RT(Inf)
ρmax_x = ρmax_z = zero(RT)
xstate = :working
zstate = :unstable
yscheme = :single
incrprec = false
irhs += 1
@goto NEXTRHS
# no path to this location
end
include("eigen.jl")
end # module
| IterativeRefinement | https://github.com/RalphAS/IterativeRefinement.jl.git |
|
[
"MIT"
] | 0.2.1 | 3e2b8bad6ab6b479cba858dd9ac974ad272f5d41 | code | 9388 |
# Implementation of Dongarra et al., "Improving the accuracy...," SINUM 1983
# find peak index
function _normalizeInf!(x)
n = length(x)
s=1
xm = abs(x[1])
for j=1:n
t = abs(x[j])
if t > xm
xm = t
s = j
end
end
x ./= xm
s
end
# simple version for an isolated eigenvalue
"""
rfeigen(A,x,λ,DT) => λnew, xnew, status
Improve the precision of a computed eigenpair `(x,λ)` for matrix `A`
via multi-precision iterative refinement, using more-precise real type `DT`.
The higher precision `DT` is only used for residual computation
(i.e. matrix-vector products), so this can be much faster than a full
eigensystem solution with precise eltype. This method works on a
single eigenpair, and can fail spectacularly if there is another
eigenvalue nearby.
"""
function rfeigen(A::AbstractMatrix{T},
x::AbstractVector{Tx},
λ::Tλ,
DT::Type{<:AbstractFloat} = widen(real(T));
maxiter=5,
tol=1,
factor = lu,
scale = true,
verbose = false
) where {T,Tλ,Tx}
Tr = promote_type(promote_type(Tx,DT),Tλ)
res = _rfeigen(A, x, λ, Tr, factor, maxiter, tol, scale, verbose)
return res
end
"""
rfeigen(A,λ,DT) => λnew, xnew, status
Like `rfeigen(A,x,λ,DT)`, but initialize `x` via one step of inverse
iteration.
"""
function rfeigen(A::AbstractMatrix{T},
λ::Tλ,
DT::Type{<:AbstractFloat} = widen(real(T));
maxiter=5,
tol=1,
factor = lu,
scale = true,
verbose = false
) where {T,Tλ}
# CHECKME: is this condition adequate?
if issymmetric(A) && (Tλ <: Real)
Tx = Tλ
else
Tx = complex(Tλ)
end
# There may not be a sampler for types of interest (hello, Quadmath)
# so let's promote.
x = normalize!((A - λ * I) \ Tx.(rand(size(A,1))))
Tr = promote_type(promote_type(Tx,DT))
res = _rfeigen(A, x, λ, Tr, factor, maxiter, tol, scale, verbose)
return res
end
function _rfeigen(A::AbstractMatrix{T},
x::AbstractVector{Tx},
λ::Tλ,
::Type{DT},
factor,
maxiter,
tol,
scale,
verbose
) where {T,Tx,Tλ,DT}
status = :unfinished
λd = convert(DT,λ)
n = LinearAlgebra.checksquare(A)
tol1 = tol * eps(real(DT)) # CHECKME: factor of n?
Ad = DT.(A)
B = Ad - λd * I
s = _normalizeInf!(x)
xd = DT.(x)
B[:,s] .= -xd
Btmp = A - λ * I # do it over to get the type right
Btmp[:,s] .= -x
FB = factor(Btmp)
# initial residual
r::Vector{DT} = λd * xd - Ad * xd
y = zeros(Tx,n)
ys = zero(Tx)
δ = similar(y)
δp = similar(y)
yp = similar(y)
rt = similar(y)
prevnorm = convert(real(DT),Inf)
for p = 1:maxiter
verbose && println("iter $p resnorm: ",norm(r))
δ = FB \ Tx.(r) # ldiv!(δ,FB,Tx.(r))
y .= y .+ δ
δnorm = norm(δ)
ynorm = norm(y)
yp .= y
ys = y[s]
yp[s] = zero(T)
if δnorm > prevnorm
if δnorm > 10.0 * prevnorm
status = :diverging
else
status = :stalled
end
verbose && println("$status at iter $p; early exit")
break
end
prevnorm = δnorm
if δnorm / ynorm < tol1
status = :converged
verbose && println("converged")
# println("iter $p ratio ",δnorm / ynorm)
break
end
δp .= δ
δs = δ[s]
r .= r .- B * DT.(δ)
δp[s] = zero(T)
r .= r .+ ys * δp .+ δs * y
end
xnew = x + yp
λnew = λ + ys
return λnew, xnew, status
end
"""
rfeigen(A, S::Schur, idxλ, DT, maxiter=5) -> vals, vecs, status
Improves the precision of a cluster of eigenvalues of matrix `A`
via multi-precision iterative refinement, using more-precise real type `DT`.
Returns improved estimates of eigenvalues and vectors generating
the corresponding invariant subspace.
This method works on the set of eigenvalues in `S.values` indexed by `idxλ`.
It is designed to handle (nearly) defective cases, but will fail if
the matrix is extremely non-normal or the initial estimates are poor.
Note that `S` must be a true Schur decomposition, not a "real Schur".
"""
function rfeigen(A::AbstractMatrix{T}, S::Schur{TS}, idxλ,
DT = widen(real(T)), maxiter=5;
tol = 1, verbose = false) where {T, TS <: Complex}
n = size(A,1)
m = length(idxλ)
λ = [S.values[idxλ[i]] for i in 1:m]
Tw = promote_type(T,eltype(λ))
DTw = promote_type(DT,Tw)
tol1 = tol * eps(real(DT)) # CHECKME: factor of n?
status = :unfinished
# compute X, M
# X is an adequately-conditioned set spanning the invariant subspace
# M is an upper-triangular matrix of mixing coefficients
# Most of the work is in the space of Schur vectors
Z = zeros(Tw, n, m)
z = zeros(Tw, n)
idxz = Vector{Int}(undef, m)
k = idxλ[1]
if k==1
z[1] = one(Tw)
else
x0 = (S.T[1:k-1,1:k-1] - λ[1] * I) \ S.T[1:k-1,k]
z[1:k-1] .= -x0
z[k] = one(Tw)
end
zm,zi = findmax(abs.(z))
z ./= zm
idxz[1] = zi
Z[:,1] .= z
M = zeros(Tw, m, m)
M[1,1] = λ[1]
for l=2:m
kp = k
k = idxλ[l]
@assert k > kp
x0 = (S.T[1:k-1,1:k-1] - λ[l]*I) \ S.T[1:k-1,k]
X1 = (S.T[1:k-1,1:k-1] - λ[l]*I) \ Z[1:k-1,1:l-1]
z[1:k-1] .= -x0
z[k] = one(Tw)
# pick mixing coeffts so that each vector has a good dominant index
rhs = [-z[idxz[i]] for i=1:l-1]
mtmp = [X1[idxz[i],j] for i=1:l-1,j=1:l-1]
mv = mtmp \ rhs
M[l,l] = λ[l]
M[1:l-1,l] .= mv
z[1:k-1] .= z[1:k-1] .+ X1 * mv
zm, zi = findmax(abs.(z))
idxz[l] = zi
Z[:,l] .= z
end
X = S.Z * Z
s = Int[]
for j=1:m
xm = zero(real(Tw))
xi = 0
for i=1:n
xt = abs(X[i,j])
if xt > xm && i ∉ s
xm = xt
xi = i
end
end
push!(s, xi)
end
λd = DTw.(λ)
Ad = DTw.(A)
Xd = DTw.(X)
Md = DTw.(M)
# TODO: if cluster is tight enough, only need a singleton B
# How tight is tight enough?
B = Vector{Matrix{DTw}}(undef, m)
for j=1:m
B[j] = Ad - λd[j] * I
end
for j=1:m
for i=1:m
B[j][:,s[i]] .= -Xd[:,i]
end
end
r = zeros(DTw, n, m)
for j=1:m
r[:,j] = λd[j] * Xd[:,j] - Ad * Xd[:,j]
for i=1:j-1
r[:,j] .= r[:,j] .+ M[i,j] * Xd[:,i]
end
end
verbose && println("at iter 0 res norm = ", norm(r))
FB0 = lu(Tw.(B[1]))
FB = Vector{typeof(FB0)}(undef, m)
FB[1] = FB0
for j=1:m
FB[j] = lu(Tw.(B[j]))
end
y = zeros(Tw, n, m)
yp = zeros(Tw, n, m)
ys = zeros(Tw, m, m)
δp = zeros(Tw, n, m)
δs = zeros(Tw, m, m)
prevnorm = convert(real(DT),Inf)
for p=1:maxiter
δnorm = zero(real(DT))
for j=1:m
rhs = Tw.(r[:,j])
# for jj=1:j
# rhs -= M[jj,j] * yp[:,jj]
# end
δ = FB[j] \ rhs
δnorm += norm(δ)
y[:,j] .+= δ
yp[:,j] .= y[:,j]
δp[:,j] .= δ
for i=1:m
δs[i,j] = δ[s[i]]
δp[s[i],j] = zero(Tw)
yp[s[i],j] = zero(Tw)
end
r[:,j] .= (r[:,j] - B[j] * DTw.(δ))
end
# there are occasional strange transients
if (p > 3) && (δnorm > prevnorm)
if δnorm > 10.0 * prevnorm
status = :diverging
else
status = :stalled
end
verbose && println("$status at iter $p; early exit")
break
end
prevnorm = δnorm
for j=1:m
for jj=1:m
r[:,j] .= r[:,j] + (DTw(ys[jj,j]) * DTw.(δp[:,jj])
+ DTw(δs[jj,j]) * DTw.(yp[:,jj]))
end
for jj=1:j-1
r[:,j] .= r[:,j] + Md[jj,j] * DTw.(δp[:,jj])
end
end
# this update is done late to avoid doubly adding δ δ terms
for j=1:m
for i=1:m
ys[i,j] = y[s[i],j]
end
end
verbose && println("at iter $p res norm = ", norm(r))
# println("ew: ",eigvals(Md + DTw.(ys)))
verbose && println("DP subspace error: ",
norm((Xd + DTw.(yp))*(Md+DTw.(ys)) - Ad * (Xd + DTw.(yp))))
ynorm = norm(y)
if δnorm / ynorm < tol1
status = :converged
verbose && println("converged")
# println("iter $p ratio ",δnorm / ynorm)
break
end
end
Xnew = X + yp
verbose && println("final subspace error norm: ", norm(Xnew*(M+ys) - A * Xnew))
λbar = (1/m)*sum(λ)
Mnew = Tw.(Md + DTw.(ys) - DTw(λbar) * I)
dλ = eigvals(Mnew)
λnew = λbar .+ dλ
λnew, Xnew, status
end
| IterativeRefinement | https://github.com/RalphAS/IterativeRefinement.jl.git |
|
[
"MIT"
] | 0.2.1 | 3e2b8bad6ab6b479cba858dd9ac974ad272f5d41 | code | 4603 | # some basic linear algebra stuff missing from stdlib
using LinearAlgebra.LAPACK: BlasInt, chklapackerror, @blasfunc, liblapack
using LinearAlgebra.LAPACK: checksquare
"""
condInfest(A,F,anorm)
computes an approximation to the condition of matrix `A` in
the infinity-norm, using factorization `F` and the precomputed
infinity norm `anorm` of `A`.
"""
function condInfest(A::StridedMatrix{T},F::Factorization{T},
anorm=opnorm(A,Inf)) where {T}
γ = normInfest(F) * anorm
end
"""
norm1est!(applyA!,applyAH!,y::Vector) => γ
Estimate the 1-norm of a linear operator `A` expressed as functions which
apply `A` and `adjoint(A)` to a vector such as `y`.
cf. N.J. Higham, SIAM J. Sci. Stat. Comp. 11, 804 (1990)
"""
function norm1est!(applyA!,applyAH!,x::AbstractVector{T}) where {T}
n = length(x)
RT = real(T)
x = fill(one(T)/n,n)
y = copy(x)
z = similar(y)
za = Vector{RT}(undef,n)
asign(a::Real) = a >= zero(T) ? one(T) : -one(T)
asign(a::Complex) = a == zero(T) ? one(T) : a / abs(a)
γ = zero(RT)
jprev=0
for iter=1:5
applyA!(y)
z = asign.(y)
applyAH!(z)
za .= abs2.(z)
zam = maximum(za)
j = findfirst(za .== zam)
if (iter > 1) && (zam <= za[jprev])
γ = norm(y,1)
break
end
fill!(x,zero(T))
x[j] = one(T)
jprev = j
end
v,w = x,z
v = T.((n-1:2n-2)/(n-1))
for j=2:2:n
v[j] = -v[j]
end
vnorm = norm(v,1)
applyA!(v)
max(γ, norm(v,1) / vnorm)
end
function norm1est(F::Factorization{T}) where {T}
n = size(F,1)
y = Vector{T}(undef, n)
norm1est!(x->ldiv!(F,x),x->ldiv!(F',x),y)
end
function normInfest(F::Factorization{T}) where {T}
n = size(F,1)
y = Vector{T}(undef, n)
norm1est!(x->ldiv!(F',x), x->ldiv!(F,x), y)
end
"""
equilibrators(A) -> R,C
compute row- and column-wise scaling vectors `R,C` for a matrix `A`
such that the absolute value of the largest element in any row or
column of `Diagonal(R)*A*Diagonal(C)` is close to unity. Designed to
reduce the condition number of the working matrix.
"""
function equilibrators(A::AbstractMatrix{T}) where {T}
abs1(x::Real) = abs(x)
abs1(x::Complex) = abs(real(x)) + abs(imag(x))
m,n = size(A)
R = zeros(T,m)
C = zeros(T,n)
@inbounds for j=1:n
for i=1:m
R[i] = max(R[i],abs(A[i,j]))
end
end
@inbounds for i=1:m
if R[i] > 0
R[i] = T(2)^floor(Int,log2(R[i]))
end
end
R .= 1 ./ R
@inbounds for i=1:m
for j=1:n
C[j] = max(C[j],R[i] * abs(A[i,j]))
end
end
@inbounds for j=1:n
if C[j] > 0
C[j] = T(2)^floor(Int,log2(C[j]))
end
end
C .= 1 ./ C
R,C
end
const BlasTypes = Union{Float32,Float64,ComplexF32,ComplexF64}
# can use LAPACK.gecon for BLAS types
function condInfest(A::StridedMatrix{T},F::Factorization{T},
anorm=opnorm(A,Inf)) where {T<:BlasTypes}
1/LAPACK.gecon!('I',F.factors,anorm)
end
# can use LAPACK.geequb for BLAS types
function equilibrators(A::AbstractMatrix{T}) where {T<:BlasTypes}
Rv, Cv, rowcond, colcond, amax = geequb(A)
Rv,Cv
end
# but first we need to wrap it...
for (geequb, elty, relty) in
((:dgeequb_, :Float64, :Float64),
(:zgeequb_, :ComplexF64, :Float64),
(:cgeequb_, :ComplexF32, :Float32),
(:sgeequb_, :Float32, :Float32))
@eval begin
#=
* SUBROUTINE DGEEQUB( M, N, A, LDA, R, C, ROWCND, COLCND, AMAX,
* INFO )
*
* .. Scalar Arguments ..
* INTEGER INFO, LDA, M, N
* DOUBLE PRECISION AMAX, COLCND, ROWCND
* ..
* .. Array Arguments ..
* DOUBLE PRECISION A( LDA, * ), C( * ), R( * )
=#
function geequb(A::AbstractMatrix{$elty})
m,n = size(A)
lda = max(1, stride(A,2))
C = Vector{$relty}(undef, n)
R = Vector{$relty}(undef, m)
info = Ref{BlasInt}()
rowcond = Ref{$relty}()
colcond = Ref{$relty}()
amax = Ref{$relty}()
ccall((@blasfunc($geequb), liblapack), Cvoid,
(Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt},
Ptr{$relty}, Ptr{$relty},
Ptr{$relty}, Ptr{$relty}, Ptr{$relty},
Ptr{BlasInt}),
m, n, A, lda, R, C, rowcond, colcond, amax, info)
chklapackerror(info[])
R, C, rowcond, colcond, amax
end
end
end
| IterativeRefinement | https://github.com/RalphAS/IterativeRefinement.jl.git |
|
[
"MIT"
] | 0.2.1 | 3e2b8bad6ab6b479cba858dd9ac974ad272f5d41 | code | 3317 | # TODO: possibly add GenericSchur to deps and test T == Float64 etc.
@testset "simple eigenvalues $T" for T in (Float32, ComplexF32)
DT = widen(T)
maxit = 5
tol = 20.0
e = eps(real(T))
dmin = 1e3 * e
for n in [8,32]
A = mkmat_simple(n,dmin,T)
Ad = convert.(DT,A)
ewd = eigvals(Ad)
ew, ev = eigen(A)
if verbose
ewerrs = [minimum(abs.(ew[j] .- ewd)) for j in 1:n]
println("initial errors ", ewerrs / (e * n))
end
newvecs = similar(ev)
newews = similar(ew)
for j=1:n
λ, x = rfeigen(A, ev[:,j], ew[j], real(DT), maxiter=maxit)
newvecs[:,j] .= x
newews[j] = λ
end
# allow for different orderings because of roundoff
ewerrs = [minimum(abs.(newews[j] .- ewd)) for j in 1:n]
if verbose
println("final errors ", ewerrs / (e * n))
end
@test maximum(abs.(ewerrs)) < tol * e * n * norm(A)
# TODO: check newvecs against DP version
end
end
@testset "(nearly) defective eigenvalues $T" for T in (Float32, ComplexF32)
DT = widen(T)
etarget = T(2)
maxit = 5
tol = 20.0
for n in [5,10,32]
for k in [2,3]
@label retry
A = mkmat_defective(n,k,etarget,T)
# we need a true Schur here
S = schur(A .+ 0im)
Ad = convert.(DT,A)
ew = eigvals(Ad)
idx = findall(abs.(S.values .- etarget) .< 0.2)
# don't try if A is so nonnormal that initial estimates are bad
if length(idx) != k
@goto retry
end
e = eps(real(T))
if verbose
ewerrs = [minimum(abs.(S.values[j] .- ew)) for j in idx]
println("initial errors ", ewerrs / (e * n))
end
newew, newvecs = rfeigen(A, S, idx, DT, maxit)
ewerrs = [minimum(abs.(newew[j] .- ew)) for j in 1:k]
if verbose
println("final errors ", ewerrs / (e * n))
end
@test maximum(ewerrs) / abs(etarget) < tol * e * n
end
end
end
@testset "multiple eigenvalues $T" for T in (Float32, ComplexF32)
DT = widen(T)
etarget = T(2)
dmin = 1e3 * eps(real(T))
maxit = 5
tol = 20.0
for n in [5,10,32]
for k in [2,3]
@label retry
A = mkmat_multiple(n,k,etarget,dmin,T)
# we need a true Schur here
S = schur(A .+ 0im)
Ad = convert.(DT,A)
ew = eigvals(Ad)
idx = findall(abs.(S.values .- etarget) .< 0.2)
# don't try if A is so nonnormal that initial estimates are bad
if length(idx) != k
@goto retry
end
e = eps(real(T))
if verbose
ewerrs = [minimum(abs.(S.values[j] .- ew)) for j in idx]
println("initial errors ", ewerrs / (e * n))
end
newew, newvecs = rfeigen(A, S, idx, DT, maxit)
ewerrs = [minimum(abs.(newew[j] .- ew)) for j in 1:k]
if verbose
println("final errors ", ewerrs / (e * n))
end
@test maximum(ewerrs) / abs(etarget) < tol * e * n
end
end
end
| IterativeRefinement | https://github.com/RalphAS/IterativeRefinement.jl.git |
|
[
"MIT"
] | 0.2.1 | 3e2b8bad6ab6b479cba858dd9ac974ad272f5d41 | code | 5050 | using LinearAlgebra, Random
using Test
using Quadmath
using IterativeRefinement
const verbose = (get(ENV,"VERBOSITY","0") == "1")
Random.seed!(1101)
include("utils.jl")
function runone(A::Matrix{T},x0::AbstractVector) where {T}
n = size(A,1)
DT = widen(T)
# println("wide type is $DT")
Ad = DT.(A)
xd = DT.(x0)
b = T.(Ad * xd)
# checkme: Demmel et al. use refined solver here
xtrue = Ad \ DT.(b)
xt = T.(xtrue)
Rv, Cv = equilibrators(A)
if maximum(abs.(Cv)) > 10
RA = Diagonal(Rv)*A
else
RA = A
end
a = opnorm(RA,Inf)
F = lu(RA,check=false)
# Some version of OpenBLAS gave exact singularity for one of our "random" cases.
# Handle gracefully so we can just try again.
if F.info != 0
return false
end
κnorm = condInfest(RA,F,a)
RAx = RA*Diagonal(xt)
a = opnorm(RAx,Inf)
F = lu(RAx, check=false)
if F.info != 0
return false
end
κcomp = condInfest(RAx,F,a)
crit = 1 / (max(sqrt(n),10) * eps(real(T)))
if verbose
println("problem difficulty (rel. to convergence criterion):")
println("normwise: ", κnorm/crit, " componentwise: ", κcomp/crit)
end
xhat,Bnorm,Bcomp = @inferred(rfldiv(A,b))
# xhat,Bnorm,Bcomp = rfldiv(A,b)
Enorm = norm(xhat-xtrue,Inf)/norm(xtrue,Inf)
Ecomp = maximum(abs.(xhat-xtrue) ./ abs.(xtrue))
if verbose
println("Bounds: $Bnorm $Bcomp")
println("Errors: $Enorm $Ecomp")
end
if Bnorm > 0.1
@test κcomp > 100 * crit
else
γ = max(10,sqrt(n))
@test Enorm < 1.1*Bnorm
if κnorm < crit
@test Bnorm < γ * eps(real(T))
end
@test Ecomp < 1.1*Bcomp
if κcomp < crit
@test Bcomp < γ * eps(real(T))
end
end
return true
end
# pick log10(condition-number) for various cases
function lkval(class,T)
if class == :easy
if real(T) <: Float32
return 5.0
elseif real(T) <: Float64
return 13.0
elseif real(T) <: Float128
return 29.0
end
elseif class == :moderate
if real(T) <: Float32
return 7.5
elseif real(T) <: Float64
return 16.0
end
elseif class == :painful
if real(T) <: Float32
return 9.0
elseif real(T) <: Float64
return 18.0
end
end
throw(ArgumentError("undefined lkval"))
end
@testset "matrix rhs $T" for T in (Float32, Float64, ComplexF32, ComplexF64)
for n in [10]
A = mkmat(n,lkval(:easy,T),T)
nrhs = 3
X = rand(T,n,nrhs)
B = copy(X)
X1 = copy(X)
bn1 = zeros(T,nrhs)
bc1 = zeros(T,nrhs)
# check validity w/ view arg (someday maybe more tricky AbstractArrays)
runone(A,view(X,:,2))
for j=1:nrhs
x,bnorm,bcomp = @inferred(rfldiv(A,view(X,:,j)))
X1[:,j] .= x
bn1[j] = bnorm
bc1[j] = bcomp
end
X2, bn2, bc2 = @inferred(rfldiv(A,B))
@test X1 ≈ X2
@test bn1 ≈ bn2
@test bc1 ≈ bc2
end
end
@testset "preprocessed args $T" for T in (Float32, Float128)
n = 16
A = mkmat(n,lkval(:easy,T),T)
# make it badly scaled
s = 1 / sqrt(floatmax(T))
A = s * A
x = rand(T,n)
b = A * x
# basic usage for comparison
x1, bn1, bc1 = rfldiv(A,b)
# example of use with precomputed factor
Rv, Cv = equilibrators(A)
R = Diagonal(Rv)
As = R * A * Diagonal(Cv)
bs = R * b
F = lu(As)
a = opnorm(As,Inf)
κnorm = condInfest(As,F,a)
x2, bn2, bc2 = rfldiv(As,bs; F=F, κ = κnorm, equilibrate = false)
cx2 = Diagonal(Cv) * x2
@test cx2 ≈ x1
@test bn2 ≈ bn1
@test bc2 ≈ bc1
# make sure this was not an empty test
x3, bn3, bc3 = @test_logs (:warn, r"no convergence.*") rfldiv(A,b; F=F, κ = κnorm, equilibrate = false)
@test ! (x3 ≈ x1)
end
@testset "well-conditioned $T" for T in (Float32, Float64, ComplexF32, ComplexF64)
for n in [10,30,100]
A = mkmat(n,lkval(:easy,T),T)
x = rand(n)
runone(A,x)
end
end
@testset "marginally-conditioned $T" for T in (Float32, Float64, ComplexF32, ComplexF64)
for n in [10,30,100]
A = mkmat(n,lkval(:moderate,T),T)
x = rand(n)
runone(A,x)
end
end
@info "The next block of tests is expected to produce warnings"
@testset "badly-conditioned $T" for T in (Float32, Float64, ComplexF32, ComplexF64)
# We don't test for convergence failure here because
# the method occasionally works in this regime.
for n in [10,30,100]
LU_ok = false
for j in 1:10
A = mkmat(n,lkval(:painful,T),T)
x = rand(n)
LU_ok = runone(A,x)
if LU_ok
break
end
end
if !LU_ok
@info "failed to find nonsingular example for n=$n"
@test_broken LU_ok
end
end
end
include("eigen.jl")
| IterativeRefinement | https://github.com/RalphAS/IterativeRefinement.jl.git |
|
[
"MIT"
] | 0.2.1 | 3e2b8bad6ab6b479cba858dd9ac974ad272f5d41 | code | 3476 | """
mkmat(n,log10κ=5,T=Float32)
construct a matrix of size `(n,n)` and eltype `T` with log-spaced
singular values from `10^(-log10κ)` to 1.
"""
function mkmat(n, log10κ, ::Type{T}) where {T}
if T <: Real
q1,_ = qr(randn(n,n))
q2,_ = qr(randn(n,n))
else
q1,_ = qr(randn(ComplexF64,n,n))
q2,_ = qr(randn(ComplexF64,n,n))
end
DT = real(T)
s = 10.0 .^(-shuffle(0:(n-1))*log10κ/(n-1))
A = T.(Matrix(q1)*Diagonal(s)*Matrix(q2)')
end
# matrix with simple eigenvalues, separated by at least `dmin`
function mkmat_simple(n, dmin, ::Type{T}) where {T}
if dmin > 1 / (2*n)
throw(ArgumentError("unrealistic value of dmin, I give up."))
end
dmin1 = 0.0
local ews
while dmin1 < dmin
ews = rand(n)
dmin1 = minimum(abs.((ews .- ews') + I))
end
X = rand(n,n)
# println("cond X: ",cond(X))
A1 = X * diagm(0 => ews) * inv(X)
A = T.(A1)
end
function mkmat_multiple(n, k, target, dmin, ::Type{T}) where {T <: Real}
if dmin > 1 / (2*(n-k))
throw(ArgumentError("unrealistic value of dmin, I give up."))
end
dmin1 = 0.0
local ews
while dmin1 < dmin
ews = rand(n-k)
dmin1 = minimum(abs.((ews .- ews') + I))
end
append!(ews,fill(Float64(target),k))
X = rand(n,n)
# println("cond X: ",cond(X))
A1 = X * diagm(0 => ews) * inv(X)
A = T.(A1)
end
function mkmat_multiple(n, k, target, dmin, ::Type{T}) where {T <: Complex}
dmin1 = 0.0
local ews
while dmin1 < dmin
ews = rand(ComplexF64,n-k)
dmin1 = minimum(abs.((ews .- ews') + I))
end
append!(ews,fill(ComplexF64(target),k))
X = rand(ComplexF64,n,n)
# println("cond X: ",cond(X))
A1 = X * diagm(0 => ews) * inv(X)
A = T.(A1)
end
"""
construct a matrix similar to one with one Jordan block of size `k`,
eigenvalue `w1` and other eigenvalues random, likely simple, in [0,1).
"""
function mkmat_defective(n, k, w1, ::Type{T}) where {T <: Real}
# putting defective ones at end seems to make final location more random
ews = vcat(rand(n-k), w1 * ones(k))
Ts = diagm(0=>ews) + diagm(1 => vcat(zeros(n-k), ones(k-1)))
X = rand(n,n)
# println("cond X: ",cond(X))
A1 = X * Ts * inv(X)
A = T.(A1)
end
function mkmat_defective(n, k, w1, ::Type{T}) where {T <: Complex}
ews = vcat(rand(ComplexF64, n-k), w1 * ones(ComplexF64, k))
Ts = diagm(0=>ews) + diagm(1 => vcat(zeros(n-k), ones(k-1)))
X = rand(ComplexF64,n,n)
A1 = X * Ts * inv(X)
A = T.(A1)
end
"""
construct a matrix with a cluster of eigenvalues with specified condition.
`lbdiag` specifies whether lower block is normal. (Otherwise it is likely
to have worse condition than the cluster of interest, which may be
undesirable.)
"""
function mkmat_cond(n, targets, cond, ::Type{T}; lbdiag=false) where T
if (cond < 1)
throw(ArgumentError("condition cannot be < 1"))
end
k = length(targets)
Tw = (T <: Real) ? Float64 : ComplexF64
A11 = diagm(0=>Tw.(targets)) + triu(rand(Tw,k,k),1)
ews = rand(n-k)
if lbdiag
A22 = diagm(0=>rand(Tw,n-k))
else
A22 = triu(rand(Tw,n-k,n-k))
end
R = rand(Tw,k,n-k)
condr = sqrt(cond^2 - 1.0)
lmul!(condr/opnorm(R,2),R)
A12 = -A11 * R + R * A22
U,_ = qr(randn(Tw,n,n))
At = [A11 A12; zeros(Tw,n-k,k) A22]
# norm(A12) / norm(R) might be a good estimate for sep(A11,A22)
A = T.(U' * At * U)
end
| IterativeRefinement | https://github.com/RalphAS/IterativeRefinement.jl.git |
|
[
"MIT"
] | 0.2.1 | 3e2b8bad6ab6b479cba858dd9ac974ad272f5d41 | code | 266 | # only push coverage from one bot
get(ENV, "TRAVIS_OS_NAME", nothing) == "linux" || exit(0)
get(ENV, "TRAVIS_JULIA_VERSION", nothing) == "1.0" || exit(0)
using Coverage
cd(joinpath(@__DIR__, "..", "..")) do
Codecov.submit(Codecov.process_folder())
end
| IterativeRefinement | https://github.com/RalphAS/IterativeRefinement.jl.git |
|
[
"MIT"
] | 0.2.1 | 3e2b8bad6ab6b479cba858dd9ac974ad272f5d41 | docs | 3809 | # IterativeRefinement
<!--
 -->
<!--



 -->
[](https://github.com/RalphAS/IterativeRefinement.jl/actions)
[](http://codecov.io/github/RalphAS/IterativeRefinement.jl?branch=master)
This package is an implementation of multi-precision iterative refinement for
certain dense-matrix linear algebra problems.
# Background
The purpose of iterative refinement (IR) is to improve the accuracy of a
solution. If `x` is the exact solution of `A*x=b`, a simple solve of
the form `y = A \ b` will have a relative forward error
(`norm(y-x)/norm(x)`) of approximately `ϵ * O(n) * cond(A)` where `ϵ`
is the unit roundoff error in the standard precision. Iterative
refinement with higher precision residuals can reduce this to
`ϵ * O(n)`, as long as the matrix `A` is not very badly conditioned
relative to `ϵ`.
Why not do everything in high precision? The factorization step is
typically *very* expensive (`O(n^3)`) in high precision, whereas the
residual computation is relatively cheap (`O(n^2)`). Furthermore, IR
schemes often provide useful error bounds.
For typical use, one would have a basic working precision of `Float64`
(`ϵ = 2.2e-16`), so that fast LAPACK/BLAS routines dominate the runtime.
`rfldiv` will then (by default) use `BigFloat` for residuals.
One might alternatively use `Double64` from
[DoubleFloats.jl](https://github.com/JuliaMath/DoubleFloats.jl)
or `Float128` from
[Quadmath.jl](https://github.com/JuliaMath/Quadmath.jl)
# Linear systems
The most mature part of the package provides a function `rfldiv`, which
handles linear matrix-vector problems of the form
`A x = b`.
## Basic Usage
```julia
julia> using LinearAlgebra, IterativeRefinement
julia> x, bnorm, bcomp = rfldiv(A,b)
```
This provides an accurate solution vector `x` and estimated bounds
on norm-wise and component-wise relative error. By default `LU` decomposition
is used.
## Advanced Usage
See the function docstring for details.
If one has several right-hand-sides, one can equilibrate and factor
`A` in advance; see the tests for an example.
## Reference
J.Demmel et al., "Error bounds from extra precise iterative refinement,"
LAPACK Working Note Nr. 165 (2005), also published as
ACM TOMS, 32, 325 (2006). The work
described therein eventually turned into a collection of subroutines
included in some versions of LAPACK. This implementation is based on
the paper; minor modifications were introduced based on experimentation.
To be precise, this package implements Algorithm 3.
# Eigensystems
Additional methods (`rfeigen`) are provided for improving estimates of
eigenvalue/subspace pairs of the form
`A X = X λ`.
For a simple eigenvalue, `X` is the corresponding eigenvector, and
the user provides coarse estimates of both. In the case of
multiple or defective eigenvalues, columns of `X` are generators for the
corresponding invariant subspace, and the user provides a Schur decomposition
with a list of indices for the cluster of interest.
Problem-specific error bound estimates are not yet provided for eigensystems.
## Reference
J.J.Dongarra, C.B.Moler, and J.H.Wilkinson, "Improving the accuracy of computed
eigenvalues and eigenvectors," SIAM J. Numer. Anal. 20, 23-45 (1983).
| IterativeRefinement | https://github.com/RalphAS/IterativeRefinement.jl.git |
|
[
"MIT"
] | 1.1.0 | db6713d1db975f325d4d609fc7d3e92d32635104 | code | 286 | using TimeSpans
using Documenter
makedocs(modules=[TimeSpans],
sitename="TimeSpans",
authors="Beacon Biosignals, Inc.",
pages=["API Documentation" => "index.md"])
deploydocs(repo="github.com/beacon-biosignals/TimeSpans.jl.git",
devbranch="main")
| TimeSpans | https://github.com/beacon-biosignals/TimeSpans.jl.git |
|
[
"MIT"
] | 1.1.0 | db6713d1db975f325d4d609fc7d3e92d32635104 | code | 252 | module TimeSpansArrowTypesExt
using ArrowTypes
using TimeSpans
const TIME_SPAN_ARROW_NAME = Symbol("JuliaLang.TimeSpan")
ArrowTypes.arrowname(::Type{TimeSpan}) = TIME_SPAN_ARROW_NAME
ArrowTypes.JuliaType(::Val{TIME_SPAN_ARROW_NAME}) = TimeSpan
end
| TimeSpans | https://github.com/beacon-biosignals/TimeSpans.jl.git |
|
[
"MIT"
] | 1.1.0 | db6713d1db975f325d4d609fc7d3e92d32635104 | code | 12092 | module TimeSpans
using Base.Iterators
using Compat
using Dates
using Statistics
export TimeSpan, start, stop, istimespan, translate, overlaps,
shortest_timespan_containing, duration, index_from_time,
time_from_index, merge_spans!, merge_spans, invert_spans
const NS_IN_SEC = Dates.value(Nanosecond(Second(1))) # Number of nanoseconds in one second
#####
##### `TimeSpan`
#####
"""
TimeSpan(start, stop)
Return `TimeSpan(Nanosecond(start), Nanosecond(stop))` representing the interval `[start, stop)`.
If `start == stop`, a single `Nanosecond` is added to `stop` since `stop` is an exclusive
upper bound and TimeSpan operations only generally support up to nanosecond precision anyway.
The benefit of this type over e.g. `Nanosecond(start):Nanosecond(1):Nanosecond(stop)` is
that instances of this type are guaranteed to obey `TimeSpans.start(x) < TimeSpans.stop(x)`
by construction.
"""
struct TimeSpan
start::Nanosecond
stop::Nanosecond
function TimeSpan(start::Nanosecond, stop::Nanosecond)
stop += Nanosecond(start == stop)
start < stop || throw(ArgumentError("start(span) < stop(span) must be true, got $start and $stop"))
return new(start, stop)
end
end
_to_ns(t::Dates.CompoundPeriod) = convert(Nanosecond, t)
_to_ns(t::Any) = Nanosecond(t)
TimeSpan(start, stop) = TimeSpan(_to_ns(start), _to_ns(stop))
"""
TimeSpan(x)
Return `TimeSpan(start(x), stop(x))`.
"""
TimeSpan(x) = TimeSpan(start(x), stop(x))
Base.in(x::TimePeriod, y::TimeSpan) = start(y) <= x < stop(y)
# work around <https://github.com/JuliaLang/julia/issues/40311>:
# written as two methods and not with obj::Union{AbstractArray,Tuple} to avoid
# a method ambiguity in Julia 1.7
Base.findall(pred::Base.Fix2{typeof(in), TimeSpan}, obj::AbstractArray) = invoke(findall, Tuple{Function, typeof(obj)}, pred, obj)
Base.findall(pred::Base.Fix2{typeof(in), TimeSpan}, obj::Tuple) = invoke(findall, Tuple{Function, typeof(obj)}, pred, obj)
# allow TimeSpans to be broadcasted
Base.broadcastable(t::TimeSpan) = Ref(t)
#####
##### pretty printing
#####
function nanosecond_to_periods(ns::Integer)
μs, ns = divrem(ns, 1000)
ms, μs = divrem(μs, 1000)
s, ms = divrem(ms, 1000)
m, s = divrem(s, 60)
hr, m = divrem(m, 60)
return (hr, m, s, ms, μs, ns)
end
format_duration(t::Period) = format_duration(convert(Nanosecond, t).value)
function format_duration(ns::Integer)
sig = signbit(ns) ? "-" : ""
hr, m, s, ms, μs, ns = nanosecond_to_periods(abs(ns))
hr = lpad(hr, 2, '0')
m = lpad(m, 2, '0')
s = lpad(s, 2, '0')
ms = lpad(ms, 3, '0')
μs = lpad(μs, 3, '0')
ns = lpad(ns, 3, '0')
return string(sig, hr, ':', m, ':', s, '.', ms, μs, ns)
end
function Base.show(io::IO, w::TimeSpan)
start_string = format_duration(start(w))
stop_string = format_duration(stop(w))
return print(io, "TimeSpan(", start_string, ", ", stop_string, ')')
end
#####
##### generic TimeSpans.jl interface
#####
"""
istimespan(x)
Return `true` if `x` has been declared to support `TimeSpans.start(x)` and `TimeSpans.stop(x)`,
return `false` otherwise.
Types that overload `TimeSpans.start`/`TimeSpans.stop` should also overload `istimespan`.
"""
istimespan(::Any) = false
istimespan(::TimeSpan) = true
istimespan(::Period) = true
"""
start(span)
Return the inclusive lower bound of `span` as a `Nanosecond` value.
"""
start(span::TimeSpan) = span.start
start(t::Period) = convert(Nanosecond, t)
"""
stop(span)
Return the exclusive upper bound of `span` as a `Nanosecond` value.
"""
stop(span::TimeSpan) = span.stop
stop(t::Period) = convert(Nanosecond, t) + Nanosecond(1)
#####
##### generic utilities
#####
"""
translate(span, by::Period)
Return `TimeSpan(start(span) + by, stop(span) + by)`.
"""
function translate(span, by::Period)
by = convert(Nanosecond, by)
return TimeSpan(start(span) + by, stop(span) + by)
end
"""
TimeSpans.contains(a, b)
Return `true` if the timespan `b` lies entirely within the timespan `a`, return `false` otherwise.
"""
contains(a, b) = start(a) <= start(b) && stop(a) >= stop(b)
"""
overlaps(a, b)
Return `true` if the timespan `a` and the timespan `b` overlap, return `false` otherwise.
"""
function overlaps(a, b)
starts_earlier, starts_later = ifelse(start(b) > start(a), (a, b), (b, a))
return stop(starts_earlier) > start(starts_later)
end
"""
shortest_timespan_containing(spans)
Return the shortest possible `TimeSpan` containing all timespans in `spans`.
`spans` is assumed to be an iterable of timespans.
"""
function shortest_timespan_containing(spans)
isempty(spans) && throw(ArgumentError("input iterator must be nonempty"))
lo, hi = Nanosecond(typemax(Int64)), Nanosecond(0)
for span in spans
lo = min(start(span), lo)
hi = max(stop(span), hi)
end
return TimeSpan(lo, hi)
end
"""
shortest_timespan_containing(a, b)
Return the shortest possible `TimeSpan` containing the timespans `a` and `b`.
"""
shortest_timespan_containing(a, b) = TimeSpan(min(start(a), start(b)), max(stop(a), stop(b)))
"""
duration(span)
Return `stop(span) - start(span)`.
"""
duration(span) = stop(span) - start(span)
"""
TimeSpans.nanoseconds_per_sample(sample_rate)
Given `sample_rate` in Hz, return the number of nanoseconds corresponding to one sample.
Note that this function performs the relevant calculation using `Float64(sample_rate)`
in order to improve the accuracy of the result.
"""
nanoseconds_per_sample(sample_rate) = NS_IN_SEC / Float64(sample_rate)
"""
index_from_time(sample_rate, sample_time::Period)
Given `sample_rate` in Hz, return the integer index of the most recent sample
taken at `sample_time`. Note that `sample_time` must be non-negative and support
`convert(Nanosecond, sample_time)`.
Examples:
```jldoctest
julia> index_from_time(1, Second(0))
1
julia> index_from_time(1, Second(1))
2
julia> index_from_time(100, Millisecond(999))
100
julia> index_from_time(100, Millisecond(1000))
101
```
"""
function index_from_time(sample_rate, sample_time::Period)
time_in_nanoseconds = convert(Nanosecond, sample_time).value
time_in_nanoseconds >= 0 || throw(ArgumentError("`sample_time` must be >= 0 nanoseconds"))
time_in_seconds = time_in_nanoseconds / NS_IN_SEC
return floor(Int, time_in_seconds * sample_rate) + 1 # the `+ 1` here converts from 0-based to 1-based indexing
end
"""
index_from_time(sample_rate, span)
Return the `UnitRange` of indices corresponding to `span` given `sample_rate` in Hz:
```jldoctest
julia> index_from_time(100, TimeSpan(Second(0), Second(1)))
1:100
julia> index_from_time(100, TimeSpan(Second(1)))
101:101
julia> index_from_time(100, TimeSpan(Second(3), Second(6)))
301:600
```
"""
function index_from_time(sample_rate, span)
i = index_from_time(sample_rate, start(span))
# Recall that `stop(span)` returns `span`'s *exclusive* upper bound, but for this
# calculation, we want to use `span`'s *inclusive* upper bound. Otherwise, we might
# potentially "include" an additional sample point that doesn't actually fall within
# `span`, but falls right after it. Thus, our `j` calculation uses `stop(span) - Nanosecond(1)`,
# which is the final nanosecond actually included in the `span`.
j = index_from_time(sample_rate, stop(span) - Nanosecond(1))
return i:j
end
"""
time_from_index(sample_rate, sample_index)
Given `sample_rate` in Hz and assuming `sample_index > 0`, return the earliest
`Nanosecond` containing `sample_index`.
Examples:
```jldoctest
julia> time_from_index(1, 1)
0 nanoseconds
julia> time_from_index(1, 2)
1000000000 nanoseconds
julia> time_from_index(100, 100)
990000000 nanoseconds
julia> time_from_index(100, 101)
1000000000 nanoseconds
```
"""
function time_from_index(sample_rate, sample_index)
sample_index > 0 || throw(ArgumentError("`sample_index` must be > 0"))
return Nanosecond(ceil(Int, (sample_index - 1) * nanoseconds_per_sample(sample_rate)))
end
"""
time_from_index(sample_rate, sample_range::AbstractUnitRange)
Return the `TimeSpan` corresponding to `sample_range` given `sample_rate` in Hz.
Note that the returned span includes the time period between the final sample and
its successor, excluding the successor itself.
Examples:
```jldoctest
julia> time_from_index(100, 1:100)
TimeSpan(0 nanoseconds, 1000000000 nanoseconds)
julia> time_from_index(100, 101:101)
TimeSpan(1000000000 nanoseconds, 1000000001 nanoseconds)
julia> time_from_index(100, 301:600)
TimeSpan(3000000000 nanoseconds, 6000000000 nanoseconds)
```
"""
function time_from_index(sample_rate, sample_range::AbstractUnitRange)
i, j = first(sample_range), last(sample_range)
return TimeSpan(time_from_index(sample_rate, i),
time_from_index(sample_rate, j + 1))
end
"""
merge_spans!(predicate, spans)
Given a mutable, indexable iterator of timespans and a function to compare two
time-sequential timespans, return the iterator in sorted order with all pairs for
which `predicate` returns `true` merged via [`shortest_timespan_containing`](@ref).
```jldoctest
julia> spans = [TimeSpan(0, 10), TimeSpan(6, 12), TimeSpan(15, 20),
TimeSpan(21, 30), TimeSpan(29, 31)]
5-element Vector{TimeSpan}:
TimeSpan(00:00:00.000000000, 00:00:00.000000010)
TimeSpan(00:00:00.000000006, 00:00:00.000000012)
TimeSpan(00:00:00.000000015, 00:00:00.000000020)
TimeSpan(00:00:00.000000021, 00:00:00.000000030)
TimeSpan(00:00:00.000000029, 00:00:00.000000031)
julia> merge_spans!(overlaps, spans)
3-element Vector{TimeSpan}:
TimeSpan(00:00:00.000000000, 00:00:00.000000012)
TimeSpan(00:00:00.000000015, 00:00:00.000000020)
TimeSpan(00:00:00.000000021, 00:00:00.000000031)
julia> merge_spans!((a, b) -> start(b) - stop(a) < Nanosecond(5), spans)
1-element Vector{TimeSpan}:
TimeSpan(00:00:00.000000000, 00:00:00.000000031)
```
"""
function merge_spans!(predicate, spans)
length(spans) <= 1 && return spans
sort!(spans; by=start)
merged_indices = Int[]
merge_target_index = firstindex(spans)
for i in Iterators.drop(eachindex(spans), 1)
target = spans[merge_target_index]
current = spans[i]
if predicate(target, current)
spans[merge_target_index] = shortest_timespan_containing(target, current)
push!(merged_indices, i)
else
merge_target_index = i
end
end
deleteat!(spans, merged_indices)
return spans
end
"""
merge_spans(predicate, spans)
Return `merge_spans!(predicate, collect(spans))`.
See also [`merge_spans!`](@ref).
"""
merge_spans(predicate, spans) = merge_spans!(predicate, collect(spans))
"""
Statistics.middle(t::TimeSpan, r::RoundingMode=RoundToZero)
Return the midpoint of a TimeSpan in `Nanosecond`s.
"""
Statistics.middle(t::TimeSpan, r::RoundingMode=RoundToZero) = div(start(t) + stop(t), 2, r)
"""
invert_spans(spans, parent_span)
Return a vector of `TimeSpan`s representing the gaps between the spans in the
iterable `spans` that are contained within `parent_span`.
"""
function invert_spans(spans, parent_span)
spans = collect(spans)
filter!(x -> overlaps(x, parent_span), spans)
isempty(spans) && return [TimeSpan(parent_span)]
merge_spans!((a, b) -> start(b) <= stop(a), spans)
gaps = TimeSpan[]
previous_span = first(spans)
if start(previous_span) > start(parent_span)
push!(gaps, TimeSpan(start(parent_span), start(previous_span)))
end
for span in drop(spans, 1)
if start(span) > stop(previous_span)
push!(gaps, TimeSpan(stop(previous_span), start(span)))
end
previous_span = span
end
if stop(parent_span) > stop(previous_span)
push!(gaps, TimeSpan(stop(previous_span), stop(parent_span)))
end
return gaps
end
#####
##### Package extensions (TODO: remove this section once we require Julia 1.9+)
#####
if !isdefined(Base, :get_extension)
include(joinpath(dirname(@__DIR__), "ext", "TimeSpansArrowTypesExt.jl"))
end
end # module
| TimeSpans | https://github.com/beacon-biosignals/TimeSpans.jl.git |
|
[
"MIT"
] | 1.1.0 | db6713d1db975f325d4d609fc7d3e92d32635104 | code | 11641 | using Test
using TimeSpans
using TimeSpans: contains, nanoseconds_per_sample
using Compat
using Dates
using Statistics
function naive_index_from_time(sample_rate, sample_time)
# This stepping computation is prone to roundoff error, so we'll work in high precision
sample_time_in_seconds = big(Dates.value(Nanosecond(sample_time))) // big(TimeSpans.NS_IN_SEC)
# At time 0, we are at index 1
t = Rational{BigInt}(0//1)
index = 1
while true
# Now step forward in time; one index, and time 1/sample_rate
t += 1 // sample_rate
index += 1
if t > sample_time_in_seconds
# we just passed it, so previous index is the last one before the time of interest
return index - 1
end
end
end
@testset "basic TimeSpan code paths" begin
t = TimeSpan(Nanosecond(rand(UInt32)))
@test t == TimeSpan(t)
@test t == TimeSpan(start(t), stop(t))
@test t == TimeSpan(start(t), start(t))
@test t == TimeSpan(start(t), start(t) + Nanosecond(1))
@test contains(t, t)
@test overlaps(t, t)
@test start(t) ∈ t
@test !(stop(t) ∈ t)
@test stop(t) + Nanosecond(1) ∉ t
@test shortest_timespan_containing([t]) == t
@test shortest_timespan_containing((t,t,t)) == t
@test shortest_timespan_containing(t, t) == t
@test duration(TimeSpan(start(t), stop(t) + Nanosecond(100))) == Nanosecond(101)
@test duration(start(t)) == Nanosecond(1)
@test_throws ArgumentError TimeSpan(4, 2)
@test istimespan(t)
@test istimespan(start(t))
@test !istimespan(1)
@test !istimespan(1:10)
by = Second(rand(1:10))
@test translate(t, by) === TimeSpan(start(t) + Nanosecond(by), stop(t) + Nanosecond(by))
@test translate(t, -by) === TimeSpan(start(t) - Nanosecond(by), stop(t) - Nanosecond(by))
@test repr(TimeSpan(6149872364198, 123412345678910)) == "TimeSpan(01:42:29.872364198, 34:16:52.345678910)"
# Periods and compound periods are supported
for start in [Nanosecond(3), Minute(1), Minute(3) + Nanosecond(1)]
stop = start + Nanosecond(8)
start_ns = convert(Nanosecond, start)
stop_ns = convert(Nanosecond, stop)
@test TimeSpan(start, stop) == TimeSpan(start_ns, stop_ns) == TimeSpan(Dates.value(start_ns), Dates.value(stop_ns))
end
@test_throws MethodError TimeSpan(now(), now() + Nanosecond(1))
# Different types for start and stop are supported
for (start, stop) in [(3, Nanosecond(8)), (Nanosecond(3), 8), (3, Minute(8))]
start_ns = Nanosecond(start)
stop_ns = Nanosecond(stop)
@test TimeSpan(start, stop) == TimeSpan(start_ns, stop_ns) == TimeSpan(Dates.value(start_ns), Dates.value(stop_ns))
end
end
@testset "format_duration" begin
@test TimeSpans.format_duration(3723004005006) == "01:02:03.004005006"
@test TimeSpans.format_duration(-3723004005006) == "-01:02:03.004005006"
end
@testset "contains(::TimeSpan...)" begin
@test contains(TimeSpan(10, 20), TimeSpan(10, 20))
@test contains(TimeSpan(10, 20), TimeSpan(11, 19))
@test contains(TimeSpan(11, 20), TimeSpan(11, 19))
@test contains(TimeSpan(10, 19), TimeSpan(11, 19))
@test !contains(TimeSpan(10, 20), TimeSpan(11, 21))
@test !contains(TimeSpan(11, 20), TimeSpan(10, 19))
@test !contains(TimeSpan(10, 19), TimeSpan(10, 21))
@test !contains(TimeSpan(11, 19), TimeSpan(10, 20))
@test contains(TimeSpan(1, 10), Nanosecond(4))
end
@testset "overlaps(::TimeSpan...)" begin
@test overlaps(TimeSpan(10, 20), TimeSpan(10, 20))
@test overlaps(TimeSpan(10, 20), TimeSpan(11, 19))
@test overlaps(TimeSpan(11, 20), TimeSpan(11, 19))
@test overlaps(TimeSpan(10, 19), TimeSpan(11, 19))
@test overlaps(TimeSpan(10, 20), TimeSpan(11, 21))
@test overlaps(TimeSpan(11, 20), TimeSpan(10, 19))
@test overlaps(TimeSpan(10, 19), TimeSpan(10, 21))
@test overlaps(TimeSpan(11, 19), TimeSpan(10, 20))
@test !overlaps(TimeSpan(20, 30), TimeSpan(10, 20))
@test !overlaps(TimeSpan(10, 20), TimeSpan(20, 30))
@test !overlaps(TimeSpan(10, 20), TimeSpan(21, 30))
@test !overlaps(TimeSpan(21, 30), TimeSpan(10, 20))
end
@testset "shortest_timespan_containing(spans)" begin
@test shortest_timespan_containing([TimeSpan(1, 2),
TimeSpan(5, 10),
TimeSpan(2, 3)]) == TimeSpan(1, 10)
@test shortest_timespan_containing([TimeSpan(3, 7),
TimeSpan(1, 10),
TimeSpan(2, 5)]) == TimeSpan(1, 10)
@test shortest_timespan_containing(TimeSpan(1, 10),
TimeSpan(4, 20)) == TimeSpan(1, 20)
end
@testset "time <--> index conversion" begin
@test_throws ArgumentError time_from_index(200, 0)
@test time_from_index(100, 1) == Nanosecond(0)
@test time_from_index(100, 301:600) == TimeSpan(Second(3), Second(6))
@test time_from_index(100, 101:101) == TimeSpan(Second(1), Nanosecond(1010000000))
@test_throws ArgumentError index_from_time(200, Nanosecond(-1))
@test index_from_time(100, Nanosecond(0)) == 1
@test index_from_time(100, TimeSpan(Second(3), Second(6))) == 301:600
@test index_from_time(100, TimeSpan(Second(1))) == 101:101
# https://github.com/beacon-biosignals/TimeSpans.jl/issues/28
@test index_from_time(1, Millisecond(1500)) == 2
@test index_from_time(1, Millisecond(2500)) == 3
@test index_from_time(1, TimeSpan(Millisecond(1500), Millisecond(2500))) == 2:3
# test non-integer sample rates
rate = 100.66
ns_per_sample = nanoseconds_per_sample(rate)
for i in 1:1000
t = Nanosecond(ceil(Int, (i - 1) * ns_per_sample))
@test index_from_time(rate, t) == i
@test time_from_index(rate, i) == t
end
for rate in (101//2, 1001//10, 200, 256, 1, 10)
for sample_time in (Nanosecond(12345), Minute(5), Nanosecond(Minute(5)) + Nanosecond(1),
Nanosecond(1), Nanosecond(10^6), Nanosecond(6970297031))
# compute with a very simple algorithm
index = naive_index_from_time(rate, sample_time)
# Check against our `TimeSpans.index_from_time`:
@test index == index_from_time(rate, sample_time)
# Works even if `rate` is in Float64 precision:
@test index == index_from_time(Float64(rate), sample_time)
end
end
@testset "docstring" begin
@test index_from_time(1, Second(0)) == 1
@test index_from_time(1, Second(1)) == 2
@test index_from_time(100, Millisecond(999)) == 100
@test index_from_time(100, Millisecond(1000)) == 101
end
@testset "floating-point precision" begin
ns = Nanosecond((2 * 60 + 30) * 1e9)
@test index_from_time(200, ns) == 30001
@test index_from_time(200e0, ns) == 30001
@test index_from_time(200f0, ns) == 30001
@test time_from_index(143.5, 8611) == Nanosecond(60000000000)
@test time_from_index(Float32(143.5), 8611) == Nanosecond(60000000000)
end
for i in 1:10
@test index_from_time(1.5, time_from_index(1.5, 1:i)) == 1:i
end
end
@testset "`in` and `findall`" begin
@test findall(in(TimeSpan(1, 10)), Nanosecond.(5:15)) == 1:5
@test findall(in(TimeSpan(1, 10)), map(Nanosecond, (9,10,11))) == 1:1
@test in(TimeSpan(1,2))(Nanosecond(1))
@test !in(TimeSpan(1,2))(Nanosecond(2))
end
@testset "merge_spans!" begin
spans = [TimeSpan(0, 10), TimeSpan(6, 12), TimeSpan(15, 20),
TimeSpan(21, 30), TimeSpan(29, 31)]
merge_spans!(overlaps, spans)
@test spans == [TimeSpan(0, 12), TimeSpan(15, 20), TimeSpan(21, 31)]
# No-op when the predicate is never `true`
merge_spans!(overlaps, spans)
@test spans == [TimeSpan(0, 12), TimeSpan(15, 20), TimeSpan(21, 31)]
merge_spans!((a, b) -> true, spans)
@test spans == [TimeSpan(0, 31)]
@test merge_spans!((a, b) -> rand(Bool), TimeSpan[]) == TimeSpan[]
@test merge_spans!((a, b) -> rand(Bool), [TimeSpan(0, 1)]) == [TimeSpan(0, 1)]
end
@testset "merge_spans" begin
@test merge_spans((a, b) -> start(b) - stop(a) < Nanosecond(5),
(TimeSpan(0, 1), TimeSpan(4, 10))) == [TimeSpan(0, 10)]
x = [TimeSpan(0, 10), TimeSpan(100, 200), TimeSpan(400, 1000)]
@test merge_spans((a, b) -> true, x) == [shortest_timespan_containing(x)]
end
@testset "Statistics.middle" begin
@test middle(TimeSpan(Nanosecond(0), Nanosecond(2))) == Nanosecond(1)
@test middle(TimeSpan(Nanosecond(-1), Nanosecond(1))) == Nanosecond(0)
# rounding
@test middle(TimeSpan(Nanosecond(0), Nanosecond(1))) == Nanosecond(0)
@test middle(TimeSpan(Nanosecond(0), Nanosecond(1)), RoundUp) == Nanosecond(1)
@test middle(TimeSpan(Nanosecond(-1), Nanosecond(0))) == Nanosecond(0)
@test middle(TimeSpan(Nanosecond(-1), Nanosecond(0)), RoundDown) == Nanosecond(-1)
end
@testset "invert_spans" begin
parent_span = TimeSpan(Second(0), Second(60))
# non-overlapping spans that extend to limits of parent_span
spans = [TimeSpan(Second(x), Second(x + 1)) for x in 0:10:59]
i_spans = invert_spans(spans, parent_span)
@test length(i_spans) == 6
@test all(duration.(i_spans) .== Second(9))
spans = [TimeSpan(Second(x + 8), Second(x + 10)) for x in 0:10:50]
i_spans = invert_spans(spans, parent_span)
@test length(i_spans) == 6
@test all(duration.(i_spans) .== Second(8))
# non-overlapping spans that do not extend to limits of parent_span
spans = [TimeSpan(Second(x + 1), Second(x + 2)) for x in 0:10:59]
i_spans = invert_spans(spans, parent_span)
@test length(i_spans) == 7
@test i_spans[1] == TimeSpan(Second(0), Second(1))
@test all(duration.(i_spans[2:6]) .== Second(9))
@test i_spans[end] == TimeSpan(Second(52), stop(parent_span))
# some spans lie outside of parent_span
i_spans = invert_spans(spans, TimeSpan(Second(0), Second(30)))
@test length(i_spans) == 4
@test maximum(stop, i_spans) <= Second(30)
# all spans lie outside of parent_span
i_spans = invert_spans(spans, TimeSpan(Minute(10), Minute(30)))
@test only(i_spans) == TimeSpan(Minute(10), Minute(30))
# adjacent but not overlapping spans, unsorted
spans = vcat([TimeSpan(Second(x), Second(x + 1)) for x in 0:10:59],
[TimeSpan(Second(x + 1), Second(x + 3)) for x in 0:10:59])
i_spans = invert_spans(spans, parent_span)
@test length(i_spans) == 6
@test all(duration.(i_spans) .== Second(7))
# overlapping, unsorted
spans = vcat([TimeSpan(Second(x), Second(x + 1)) for x in 0:10:59],
[TimeSpan(Millisecond(x * 1000) + Millisecond(500), Second(x + 2))
for x in 0:10:59])
i_spans = invert_spans(spans, parent_span)
@test length(i_spans) == 6
@test all(duration.(i_spans) .== Second(8))
# empty
@test invert_spans(TimeSpan[], parent_span) == [parent_span]
# some spans cross the parent span's boundary
i_spans = invert_spans([TimeSpan(-5, 3), TimeSpan(6, 8)], TimeSpan(0, 10))
@test i_spans == [TimeSpan(3, 6), TimeSpan(8, 10)]
end
@testset "broadcast_spans" begin
test_vec = [TimeSpan(0, 100), TimeSpan(0, 200)]
test_vec .= TimeSpan(0, 300)
@test test_vec == [TimeSpan(0, 300), TimeSpan(0, 300)]
test_vec = []
test_vec .= TimeSpan(0, 300)
@test test_vec == []
end
@testset "extensions" begin
@testset "ArrowTypes" begin
using ArrowTypes
@test ArrowTypes.JuliaType(Val(ArrowTypes.arrowname(TimeSpan))) === TimeSpan
end
end
| TimeSpans | https://github.com/beacon-biosignals/TimeSpans.jl.git |
|
[
"MIT"
] | 1.1.0 | db6713d1db975f325d4d609fc7d3e92d32635104 | docs | 3202 | # TimeSpans.jl
[](https://github.com/beacon-biosignals/TimeSpans.jl/actions/workflows/CI.yml)
[](https://codecov.io/gh/beacon-biosignals/TimeSpans.jl)
[](https://beacon-biosignals.github.io/TimeSpans.jl/stable)
[](https://beacon-biosignals.github.io/TimeSpans.jl/dev)
TimeSpans.jl provides a simple `TimeSpan` type for representing a continuous span between two points in time, along with generic utility functions for common operations on `TimeSpan`-like types. Importantly, the package exposes a minimal interface (`TimeSpans.start` and `TimeSpans.stop`) that any type can implement to enable support for the TimeSpans API.
## Example usage
```julia
julia> span = TimeSpan(Nanosecond(100), Nanosecond(1000))
TimeSpan(00:00:00.000000100, 00:00:00.000001000)
julia> start(span)
100 nanoseconds
julia> stop(span)
1000 nanoseconds
julia> duration(span)
900 nanoseconds
```
TimeSpans.jl supports common functions for comparing timespans, such as `contains` and `overlaps`:
```julia
julia> overlaps(TimeSpan(Minute(1), Minute(5)), TimeSpan(Minute(2), Minute(10)))
true
julia> TimeSpans.contains(TimeSpan(Minute(1), Minute(5)), TimeSpan(Minute(2), Minute(10)))
false
```
Operations on collections of timespans include `merge_spans` and `invert_spans`:
```julia
julia> spans = [TimeSpan(Minute(1), Minute(5)), TimeSpan(Minute(2), Minute(6)), TimeSpan(Minute(10), Minute(15))]
3-element Vector{TimeSpan}:
TimeSpan(00:01:00.000000000, 00:05:00.000000000)
TimeSpan(00:02:00.000000000, 00:06:00.000000000)
TimeSpan(00:10:00.000000000, 00:15:00.000000000)
# 2 out of 3 spans overlap, returning 2 merged timespans
julia> merge_spans(overlaps, spans)
2-element Vector{TimeSpan}:
TimeSpan(00:01:00.000000000, 00:06:00.000000000)
TimeSpan(00:10:00.000000000, 00:15:00.000000000)
# no timespans contain one another
julia> merge_spans(TimeSpans.contains, spans)
3-element Vector{TimeSpan}:
TimeSpan(00:01:00.000000000, 00:05:00.000000000)
TimeSpan(00:02:00.000000000, 00:06:00.000000000)
TimeSpan(00:10:00.000000000, 00:15:00.000000000)
julia> parent_span = TimeSpan(Minute(0), Minute(15))
TimeSpan(00:00:00.000000000, 00:15:00.000000000)
# return spans within `parent_span` when provided `spans` are removed
julia> invert_spans(spans, parent_span)
2-element Vector{TimeSpan}:
TimeSpan(00:00:00.000000000, 00:01:00.000000000)
TimeSpan(00:06:00.000000000, 00:10:00.000000000)
```
Timespans can be indexed corresponding to a signal of a given sample rate, and vice versa.
```julia
julia> index_from_time(100, TimeSpan(Second(0), Second(1)))
1:100
julia> index_from_time(100, TimeSpan(Second(1)))
101:101
julia> index_from_time(100, TimeSpan(Second(3), Second(6)))
301:600
julia> time_from_index(1, 1)
0 nanoseconds
julia> time_from_index(1, 2)
1000000000 nanoseconds
julia> time_from_index(100, 100)
990000000 nanoseconds
julia> time_from_index(100, 101)
1000000000 nanoseconds
```
| TimeSpans | https://github.com/beacon-biosignals/TimeSpans.jl.git |
|
[
"MIT"
] | 1.1.0 | db6713d1db975f325d4d609fc7d3e92d32635104 | docs | 307 | # API Documentation
```@meta
CurrentModule = TimeSpans
```
```@docs
TimeSpan
start
stop
TimeSpans.contains
TimeSpans.overlaps
TimeSpans.shortest_timespan_containing
TimeSpans.duration
TimeSpans.translate
TimeSpans.time_from_index
TimeSpans.index_from_time
TimeSpans.merge_spans
TimeSpans.merge_spans!
```
| TimeSpans | https://github.com/beacon-biosignals/TimeSpans.jl.git |
|
[
"MIT"
] | 0.21.29 | 52cfdf2df400205dd8912e997224331d6d185f6a | code | 1063 | using Documenter
import PALEOboxes
using DocumenterCitations
bib = CitationBibliography(
joinpath(@__DIR__, "src/paleo_references.bib"),
style=:authoryear,
)
makedocs(;
sitename="PALEOboxes Documentation",
pages = [
"Home" => "index.md",
"Design" => [
"DesignOverview.md",
"CreateInitializeLoop.md"
],
"Reference" => [
"DomainsVariablesFields.md",
"Solver API.md",
"Reaction API.md",
"ReactionCatalog.md",
],
"References.md",
"indexpage.md",
],
format = Documenter.HTML(
prettyurls = get(ENV, "CI", nothing) == "true"
),
plugins = [bib],
# repo = "https://github.com/PALEOtoolkit/PALEOboxes.jl/blob/master/{path}#{line}"
)
@info "Local html documentation is available at $(joinpath(@__DIR__, "build/index.html"))"
deploydocs(
repo = "github.com/PALEOtoolkit/PALEOboxes.jl.git",
)
| PALEOboxes | https://github.com/PALEOtoolkit/PALEOboxes.jl.git |
|
[
"MIT"
] | 0.21.29 | 52cfdf2df400205dd8912e997224331d6d185f6a | code | 1898 |
"""
AbstractCellRange
Defines a range of cells within a [`Domain`](@ref).
# Fields
All implementations should define:
- `domain::Domain`: the [`Domain`](@ref) covered by this cellrange.
- `operatorID::Int`: If `operatorID==0`, call all `Reaction`s, otherwise
only call those with matching `operatorID` (this enables operator splitting).
- `indices`: an iterable list of cell indices.
And then may provide subtype-specific fields defining additional ranges of cells.
"""
AbstractCellRange
"""
CellRange <: AbstractCellRange
Defines a range of cells in a specified [`Domain`](@ref) as a linear list.
# Fields
$(FIELDS)
"""
Base.@kwdef mutable struct CellRange{T} <: AbstractCellRange
domain::Domain
operatorID::Int = 0
"may be any valid Julia indexing range thing eg 1:100, [1 2 3 4], etc"
indices::T
end
"Add an array of indices to a CellRange instance"
function add_indices!(cellrange::CellRange{Vector{Int64}}, indicestoadd::Vector{Int64})
append!(cellrange.indices, indicestoadd)
if length(unique(cellrange.indices)) != length(cellrange.indices)
error("add_indices! duplicate indices")
end
end
"""
CellRangeColumns <: AbstractCellRange
Defines a range of cells in a specified [`Domain`](@ref), organised by `columns`.
# Fields
$(FIELDS)
"""
Base.@kwdef mutable struct CellRangeColumns{T1, T2} <: AbstractCellRange
domain::Domain
operatorID::Int = 0
"iterator through all cells in arbitrary order"
indices::T1
"iterator through columns: columns[n] returns a Pair icol=>cells where cells are ordered top to bottom"
columns::T2
end
"replace a contiguous range of indices (as a Vector of indices) with a Range"
function replace_contiguous_range(indices)
if indices == first(indices):last(indices)
return first(indices):last(indices)
else
return indices
end
end
| PALEOboxes | https://github.com/PALEOtoolkit/PALEOboxes.jl.git |
|
[
"MIT"
] | 0.21.29 | 52cfdf2df400205dd8912e997224331d6d185f6a | code | 6208 |
################################
# Coordinates
#################################
"""
FixedCoord
A fixed (state independent) coordinate
"""
mutable struct FixedCoord
name::String
values::Vector{Float64}
attributes::Dict{Symbol, Any}
end
"""
append_units(name::AbstractString, attributes) -> "name (units)"
Utility function to append variable units string to a variable name for display.
"""
function append_units(name::AbstractString, attributes::Dict{Symbol, Any})
units = get(attributes, :units, "")
if isempty(units)
return name
else
return name*" ($units)"
end
end
append_units(name::AbstractString, attributes::Nothing) = name
"""
build_coords_edges(coords_vec::Vector{FixedCoord}) -> Vector{Float64}
Build a vector of coordinate edges (length `n+1``) from `coords_vec`, assuming the PALEO
convention that `coords_vec` contains three elements with
cell midpoints, lower edges, upper edges each of length `n`, in that order.
Falls back to just returning the first entry in `coords_vec` for other cases.
"""
function build_coords_edges(coords_vec::Vector{FixedCoord})
if length(coords_vec) == 1 || length(coords_vec) > 3
# 1 coordinate or something we don't understand - take first
co = first(coords_vec)
co_values = co.values
co_label = append_units(co.name, co.attributes)
elseif length(coords_vec) in (2, 3)
# 2 coordinates assume lower, upper edges
# 3 coordinates assume mid, lower, upper
co_lower = coords_vec[end-1]
co_upper = coords_vec[end]
co_label = append_units(co_lower.name*", "*co_upper.name, co_lower.attributes)
first(co_lower.values) < first(co_upper.values) ||
@warn "build_coords_edges: $co_label co_lower is > co_upper - check model grid"
if co_lower.values[end] > co_lower.values[1] # ascending order
co_lower.values[2:end] == co_upper.values[1:end-1] ||
@warn "build_coords_edges: $co_label lower and upper edges don't match"
co_values = [co_lower.values; co_upper.values[end]]
else # descending order
co_lower.values[1:end-1] == co_upper.values[2:end] ||
@warn "build_coords_edges: $co_label lower and upper edges don't match"
co_values = [co_upper.values[1]; co_lower.values]
end
end
return co_values, co_label
end
"guess coordinate edges from midpoints, assuming uniform spacing"
function guess_coords_edges(x_midpoints)
first_x = x_midpoints[1] - 0.5*(x_midpoints[2] - x_midpoints[1])
last_x = x_midpoints[end] + 0.5*(x_midpoints[end] - x_midpoints[end-1])
return [first_x; 0.5.*(x_midpoints[1:end-1] .+ x_midpoints[2:end]); last_x]
end
function get_region(fc::FixedCoord, indices::AbstractVector)
return FixedCoord(fc.name, fc.values[indices], fc.attributes)
end
function get_region(fcv::Vector{FixedCoord}, indices::AbstractVector)
return [FixedCoord(fc.name, fc.values[indices], fc.attributes) for fc in fcv]
end
"find indices of coord from first before range[1] to first after range[2]"
function find_indices(coord::AbstractVector, range)
length(range) == 2 ||
throw(ArgumentError("find_indices: length(range) != 2 $range"))
idxstart = findlast(t -> t<=range[1], coord)
isnothing(idxstart) && (idxstart = 1)
idxend = findfirst(t -> t>=range[2], coord)
isnothing(idxend) && (idxend = length(coord))
return idxstart:idxend, (coord[idxstart], coord[idxend])
end
"find indices of coord nearest val"
function find_indices(coord::AbstractVector, val::Real)
idx = 1
for i in 1:length(coord)
if abs(coord[i] - val) < abs(coord[idx] - val)
idx = i
end
end
return [idx], coord[idx]
end
#################################################
# Dimensions
#####################################################
"""
NamedDimension
A named dimension, with optional attached fixed coordinates `coords`
PALEO convention is that where possible `coords` contains three elements, for cell
midpoints, lower edges, upper edges, in that order.
"""
mutable struct NamedDimension
name::String
size::Int64
coords::Vector{FixedCoord} # may be empty
end
"create from size only (no coords)"
function NamedDimension(name, size::Integer)
return NamedDimension(
name,
size,
FixedCoord[],
)
end
"create from coord mid-points"
function NamedDimension(name, coord::AbstractVector)
return NamedDimension(
name,
length(coord),
[
FixedCoord(name, coord, Dict{Symbol, Any}()),
]
)
end
"create from coord mid-points and edges"
function NamedDimension(name, coord::AbstractVector, coord_edges::AbstractVector)
if coord[end] > coord[1]
# ascending order
coord_lower = coord_edges[1:end-1]
coord_upper = coord_edges[2:end]
else
# descending order
coord_lower = coord_edges[2:end]
coord_upper = coord_edges[1:end-1]
end
return NamedDimension(
name,
length(coord),
[
FixedCoord(name, coord, Dict{Symbol, Any}()),
FixedCoord(name*"_lower", coord_lower, Dict{Symbol, Any}()),
FixedCoord(name*"_upper", coord_upper, Dict{Symbol, Any}()),
]
)
end
function get_region(nd::NamedDimension, indices::AbstractVector)
return NamedDimension(nd.name, length(indices), get_region(nd.coords, indices))
end
"""
build_coords_edges(nd::NamedDimension) -> Vector{Float64}
Call [`build_coords_edges`](@ref)(nd.coords), or fallback to just returning indices
if no coords present.
"""
function build_coords_edges(nd::NamedDimension)
if !isempty(nd.coords)
return build_coords_edges(nd.coords)
else
@warn "no coords for NamedDimension $(nd.name), returning indices"
return collect(1:nd.size), nd.name*" (indices)"
end
end
function Base.show(io::IO, nd::NamedDimension)
print(io, "NamedDimension(name=", nd.name, ", size=", nd.size, ", coords=(")
join(io, [c.name for c in nd.coords], ", ")
print(io, "))")
return nothing
end
| PALEOboxes | https://github.com/PALEOtoolkit/PALEOboxes.jl.git |
|
[
"MIT"
] | 0.21.29 | 52cfdf2df400205dd8912e997224331d6d185f6a | code | 23701 | import Infiltrator
"""
Domain
A model region containing Variables and Reactions that act on them.
Domain spatial size is defined by `grid`, which may be `nothing` to define a scalar Domain,
or an [`AbstractMesh`](@ref) to define a spatially-resolved Domain with multiple cells.
Named `data_dims` may be set by [`set_data_dimension!`](@ref) to allow Variables with additional non-spatial dimensions, eg
to represent quantities on a wavelength grid.
"""
Base.@kwdef mutable struct Domain <: AbstractDomain
name::String
ID::Int
data_dims::Vector{NamedDimension} = Vector{NamedDimension}()
parameters::Dict{String, Any}
grid::Union{Nothing, AbstractMesh} = nothing
reactions::Vector{AbstractReaction} = Vector{AbstractReaction}()
variables::Dict{String, VariableDomain} = Dict{String, VariableDomain}()
end
"""
set_data_dimension!(domain::Domain, dim::NamedDimension; allow_exists=false)
Define a Domain data dimension as a [`NamedDimension`](@ref)
Variables may then specify data dimensions as a list of names using the `:data_dims` Variable Attribute.
"""
function set_data_dimension!(domain::Domain, dim::NamedDimension; allow_exists=false)
@info "set_data_dimension!: setting Domain '$(domain.name)' data dimension '$dim'"
idx = findfirst(d -> d.name==dim.name, domain.data_dims)
allow_exists || isnothing(idx) ||
error("set_data_dimensions! Domain '$(domain.name)' already has dimension "*
" name $(dim.name)")
if isnothing(idx)
push!(domain.data_dims, dim)
else
domain.data_dims[idx] = dim
end
return nothing
end
has_data_dimension(domain::Domain, dimname::AbstractString) =
!isnothing(findfirst(d -> d.name==dimname, domain.data_dims))
function get_data_dimension(domain::Domain, dimname::AbstractString)
idx = findfirst(d -> d.name==dimname, domain.data_dims)
!isnothing(idx) ||
error("Domain $(domain.name) has no dimension='$dimname' (available dimensions: $(domain.data_dims)")
return domain.data_dims[idx]
end
function get_length(domain::Domain)
if isnothing(domain.grid)
return 1 # scalar Domain
else
return domain.grid.ncells::Int
end
end
"Get number of Domain variables"
function get_num_variables(domain::Domain)
return length(domain.variables)
end
"""
get_variables(domain; hostdep=nothing, vfunction=VF_Undefined) -> Vector{VariableDomain}
Get domain variables, optionally filtering for subsets based on `hostdep` and `:vfunction` attribute
"""
function get_variables(
domain::Domain;
hostdep::Union{Bool,Nothing}=nothing,
vfunction::VariableFunction=VF_Undefined,
)
# define function to filter variables
filter(var) = (
(isnothing(hostdep) || (host_dependent(var) == hostdep))
&& (vfunction == VF_Undefined || get_attribute(var, :vfunction, VF_Undefined) == vfunction)
)
return get_variables(domain, filter)
end
"""
get_variables(domain, filter) -> Vector{VariableDomain}
Get subset of domain variables where `filter(var) == true`.
"""
function get_variables(domain::Domain, filter)
return VariableDomain[var for (name, var) in domain.variables if filter(var)]
end
"Get variable by name"
function get_variable(domain::Domain, name::AbstractString; allow_not_found=true)
var = get(domain.variables, name, nothing)
!isnothing(var) || allow_not_found || error("get_variable: Domain $(domain.name) Variable name $name not found")
return var
end
"""
get_host_variables(domain, vfunction; [match_deriv_suffix=""] [, operatorID=0] [, exclude_var_nameroots=[]] [, verbose=false])
-> (host_vars, host_deriv_vars)
Get state Variables with [`VariableFunction`](@ref) `vfunction`, and optionally
corresponding time derivative with [`VariableFunction`](@ref) `VF_Deriv` and name matching
hostvarname*<`match_deriv_suffix``>.
Optionally filter by `operatorID`, omit Variables with name matching `exclude_var_nameroots`.
"""
function get_host_variables(
domain::Domain, vfunction::VariableFunction;
match_deriv_suffix="",
operatorID=0,
exclude_var_nameroots=[],
)
# return a function that filters Variables that match requested VariableFunction,
# are host dependent, have matching operatorID, and optionally have name match_name
function filter_func(vf::VariableFunction, match_name)
function filt_func(var)
var_opID = get_attribute(var, :operatorID, missing)
!ismissing(var_opID) ||
error("Variable $(fullname(var)) has no operatorID attribute")
return (
host_dependent(var)
&& (get_attribute(var, :vfunction, VF_Undefined)==vf)
&& (operatorID == 0 || operatorID in var_opID)
&& !(var.name in exclude_var_nameroots)
&& (isempty(match_name) || match_name == var.name)
)
end
return filt_func
end
host_vars = get_variables(domain, filter_func(vfunction, ""))
if !isempty(match_deriv_suffix)
host_deriv_vars = VariableDomain[get_variables(domain, filter_func(VF_Deriv, var.name*match_deriv_suffix))[] for var in host_vars]
else
host_deriv_vars = nothing
end
return (host_vars, host_deriv_vars)
end
"""
get_reactions(domain, filter) -> Vector
Get Reactions where `filter(react) == true`.
"""
function get_reactions(domain::Domain, filter)
return AbstractReaction[react for react in domain.reactions if filter(react)]
end
"""
get_reaction(domain, reactname; allow_not_found) -> Reaction or nothing
Get a reaction by name.
"""
function get_reaction(domain::Domain, reactname::AbstractString; allow_not_found=true)
reactions = get_reactions(domain, r -> r.name == reactname)
if isempty(reactions)
allow_not_found || error("get_reaction: Domain $(domain.name) reactname $reactname not found")
return nothing
else
return first(reactions)
end
end
"""
allocate_variables!(domain, modeldata, arrays_idx; [hostdep=false] [, kwargs...])
Allocate memory for Domain Variables.
If `hostdep=false`, only internal Variables are allocated, allowing host-dependent Variables
(usually state Variables and derivatives + any external dependencies) to be set to views on host-managed arrays.
See [`allocate_variables!(vars, modeldata::AbstractModelData, arrays_idx::Int)`](@ref).
"""
function allocate_variables!(
domain::Domain, modeldata::AbstractModelData, arrays_idx::Int;
hostdep::Union{Bool,Nothing}=nothing,
kwargs...
)
vars = get_variables(domain, hostdep=hostdep)
@info "Domain $(rpad(domain.name,20)) data dimensions $(rpad(domain.data_dims,20)) "*
"allocating $(rpad(length(vars),4)) variables (hostdep=$(hostdep))"
allocate_variables!(
vars, modeldata, arrays_idx;
kwargs...
)
return nothing
end
"""
get_unallocated_variables(domain, modeldata, arrays_idx::Int) -> Vector{VariableDomain}
Return any unallocated variables (host-dependent variables which have no data pointer set)
"""
function get_unallocated_variables(
domain::Domain, modeldata::AbstractModelData, arrays_idx::Int
)
allvars = get_variables(domain)
unallocated_variables = [v for v in allvars if !is_allocated(v, modeldata, arrays_idx)]
return unallocated_variables
end
"Check all variable pointers set"
function check_ready(
domain::Domain, modeldata::AbstractModelData, arrays_idx::Int=1;
throw_on_error=true
)
vars_unallocated = get_unallocated_variables(domain, modeldata, arrays_idx)
num_unallocated = length(vars_unallocated)
if num_unallocated == 0
return true
else
@error "Domain \"$(domain.name)\" unallocated variables:"
for var in vars_unallocated
linknames = [fullname(vl) for vl in get_all_links(var)]
@error " \"$(var.name)\" linked by: $linknames"
end
if throw_on_error
error("Domain $(domain.name) check_ready failed num_unallocated=", num_unallocated)
end
return false
end
end
"Check configuration"
function check_configuration(domain::Domain, model::Model)
configok = true
for react in domain.reactions
if !check_configuration(react, model)
configok = false
end
end
return configok
end
###################################
# creation from _cfg.yaml
##################################
function create_domain_from_config(
name::AbstractString, domainID::Integer, conf_domain::Dict{Any,Any}, external_parameters::Dict{String, Any}, rdict::Dict{String, Type}
)
for k in keys(conf_domain)
if !(k in ("data_dims", "reactions"))
error("Domain $(name) configuration error invalid key '$k'")
end
end
domain = Domain(name=name, ID=domainID, parameters=external_parameters)
# optional data_dims key
conf_dimensions = get(conf_domain, "data_dims", Dict{Any,Any}())
for (name, len) in conf_dimensions
set_data_dimension!(domain, NamedDimension(name, len, []))
end
# reactions
conf_reactions = get(conf_domain, "reactions", Dict{Any,Any}())
function pop_bool_key!(reactname, conf, keyname, defaultval)
keyval = pop!(conf, keyname, defaultval)
keyval = externalvalue(keyval, external_parameters)
keyval isa Bool ||
error("config error: reaction $(name).$(reactname) "*
"invalid '$keyname' key $keyval (must be a Bool)")
return keyval
end
if !isnothing(conf_reactions)
for (reactname, conf_reactionraw) in conf_reactions
!isnothing(conf_reactionraw) ||
error("config error: reaction $(domain.name).$(reactname) has no configuration")
conf_reaction = copy(conf_reactionraw)
reactenabled = pop_bool_key!(reactname, conf_reaction, "enabled", true)
reactdisabled = pop_bool_key!(reactname, conf_reaction, "disabled", false)
if reactenabled && !reactdisabled
classname = pop!(conf_reaction, "class", missing)
!ismissing(classname) ||
error("config error: reaction $(domain.name).$(reactname) missing 'class' key")
# create the reaction instance and add it to our list
push!(
domain.reactions,
create_reaction_from_config(
classname, rdict, domain, reactname, conf_reaction, domain.parameters
)
)
else
@info "not creating reaction $(domain.name).$(reactname) (enabled=$reactenabled, disabled=$reactdisabled)"
end
end
else
@warn "create_domain_from_config Domain '$(domain.name)' empty 'reactions:' key in .yaml file"
end
return domain
end
function _next_variable_ID(domain::Domain)
return get_num_variables(domain::Domain) + 1
end
#################################
# Variable linking
#################################
function _link_print(domain::Domain, @nospecialize(reaction::AbstractReaction), variable::VariableReaction,
linkvar_domain::Domain, linkvar_name::AbstractString, dolog)
@debug "Link requested $(domain.name).reactions.$(reaction.name) $(variable.localname) --> $(combine_link_name(linkvar_domain.name, variable.linkreq_subdomain, linkvar_name))"
return nothing
end
function _link_print_not_linked(domain::Domain, @nospecialize(reaction::AbstractReaction), variable::VariableReaction,
linkvar_domain::Domain, linkvar_name::AbstractString, io::IOBuffer)
if isnothing(variable.linkvar)
linkreq_fullname = combine_link_name(variable.linkreq_domain, variable.linkreq_subdomain, linkvar_name)
rname = domain.name*"."*reaction.name
if variable.link_optional
println(io, " optional $(rpad(rname, 40)) $(rpad(variable.localname,20)) -| $linkreq_fullname")
else
@warn " required $(rpad(rname, 40)) $(rpad(variable.localname,20)) -| $linkreq_fullname"
end
end
return nothing
end
"Create Domain variables for VariableReaction Property and Target, and create property/target link"
function _link_create(domain::Domain, @nospecialize(reaction::AbstractReaction), variable::VariableReaction,
linkvar_domain::Domain, linkvar_name::AbstractString, dolog)
# generic method does nothing for VT_ReactDependency, VT_ReactContributor
return nothing
end
function _link_create(domain::Domain, @nospecialize(reaction::AbstractReaction),
variable::VariableReaction{VT_ReactProperty},
linkvar_domain::Domain, linkvar_name::AbstractString, dolog)
dolog && @debug "Creating Property $(reaction.base.domain.name).reactions.$(reaction.name).$(variable.localname) "*
"--> $(linkvar_domain.name).$(linkvar_name)"
if haskey(linkvar_domain.variables, linkvar_name)
newvar = linkvar_domain.variables[linkvar_name]
if ((is_method_setup(variable.method) && !isnothing(newvar.var_property_setup)) ||
(!is_method_setup(variable.method) && !isnothing(newvar.var_property)))
errstr = is_method_setup(variable.method) ? "property_setup" : "property"
io = IOBuffer()
show_links(io, linkvar_domain.variables[linkvar_name])
error("Duplicate variable name: Linking VariableReactProperty $(fullname(variable)) --> $(linkvar_domain.name).$(linkvar_name)\n",
" Variable $(linkvar_domain.name).$(linkvar_name) already exists and has a $errstr Variable, links:\n",
String(take!(io)))
end
else
newvar = create_VariableDomPropDep(linkvar_domain, linkvar_name, variable)
end
if is_method_setup(variable.method)
newvar.var_property_setup = variable
else
newvar.var_property = variable
end
variable.linkvar = newvar
return nothing
end
function _link_create(domain::Domain, @nospecialize(reaction::AbstractReaction),
variable::VariableReaction{VT_ReactTarget},
linkvar_domain::Domain, linkvar_name::AbstractString, dolog)
dolog && @debug "Creating Target $(reaction.base.domain.name).reactions.$(reaction.name).$(variable.localname) "*
"--> $(linkvar_domain.name).$(linkvar_name)"
if haskey(linkvar_domain.variables, linkvar_name)
io = IOBuffer()
show_links(io, linkvar_domain.variables[linkvar_name])
error("Duplicate variable name: Linking VariableReactTarget $(fullname(variable)) --> $(linkvar_domain.name).$(linkvar_name)\n",
" Variable $(linkvar_domain.name).$(linkvar_name) already exists, with links:\n",
String(take!(io)))
end
newvar = create_VariableDomContribTarget(linkvar_domain, linkvar_name, variable)
newvar.var_target = variable
variable.linkvar = newvar
return nothing
end
"Create any additional (host-dependent) Domain variables for any non-optional VariableReaction Contrib"
function _link_create_contrib(domain::Domain, @nospecialize(reaction::AbstractReaction), variable::VariableReaction,
linkvar_domain::Domain, linkvar_name::AbstractString, dolog)
# generic method does nothing for VT_ReactProperty, VT_ReactTarget, VT_ReactDependency
return nothing
end
function _link_create_contrib(domain::Domain, @nospecialize(reaction::AbstractReaction),
variable::VariableReaction{VT_ReactContributor},
linkvar_domain::Domain, linkvar_name::AbstractString, dolog)
if (!haskey(linkvar_domain.variables, linkvar_name)
&& !variable.link_optional)
dolog && @debug "Creating host Target for Contributor $(reaction.base.domain.name).reactions.$(reaction.name).$(variable.localname) "*
"--> $(linkvar_domain.name).$(linkvar_name)"
linkvar = create_VariableDomContribTarget(linkvar_domain, linkvar_name, variable)
# don't create link - that happens later in _link_link
end
return nothing
end
"Create any additional (host-dependent) Domain variables for any non-optional VariableReaction Dependency"
function _link_create_dep(domain::Domain, @nospecialize(reaction::AbstractReaction), variable::VariableReaction,
linkvar_domain::Domain, linkvar_name::AbstractString, dolog)
# generic method does nothing for VT_ReactProperty, VT_ReactTarget, VT_ReactContributor
return nothing
end
function _link_create_dep(domain::Domain, @nospecialize(reaction::AbstractReaction),
variable::VariableReaction{VT_ReactDependency},
linkvar_domain::Domain, linkvar_name::AbstractString, dolog)
if (!haskey(linkvar_domain.variables, linkvar_name)
&& !variable.link_optional)
dolog && @debug "Creating host Property for Dependency $(reaction.base.domain.name).reactions.$(reaction.name).$(variable.localname) "*
"--> $(linkvar_domain.name).$(linkvar_name)"
linkvar = create_VariableDomPropDep(linkvar_domain,linkvar_name, variable)
# don't create link - that happens later in _link_link
end
return nothing
end
"Link VariableReaction Dependency and Contrib to Domain variables"
function _link_link(domain::Domain, @nospecialize(reaction::AbstractReaction), variable::VariableReaction,
linkvar_domain::Domain, linkvar_name::AbstractString, dolog)
# generic method does nothing for VT_ReactProperty, VT_ReactTarget
return nothing
end
function _link_link(domain::Domain, @nospecialize(reaction::AbstractReaction),
variable::VariableReaction{VT_ReactDependency},
linkvar_domain::Domain, linkvar_name::AbstractString, dolog)
linkvar = get(linkvar_domain.variables, linkvar_name, nothing)
if !isnothing(linkvar)
dolog && @debug "Linking Dependency $(fullname(variable)) --> $(linkvar_domain.name).$(linkvar_name)"
add_dependency(linkvar, variable)
else
if variable.link_optional
dolog && @debug "No Property for optional Dependency $(fullname(variable))"
else
@warn "Programming error - no property for dependency $(fullname(variable)) with link_optional=false"
end
end
variable.linkvar = linkvar
return nothing
end
function _link_link(domain::Domain, @nospecialize(reaction::AbstractReaction),
variable::VariableReaction{VT_ReactContributor},
linkvar_domain::Domain, linkvar_name::AbstractString, dolog)
linkvar = get(linkvar_domain.variables, linkvar_name, nothing)
if !isnothing(linkvar)
dolog && @debug "Linking Contributor $(fullname(variable)) --> $(linkvar_domain.name).$(linkvar_name)"
add_contributor(linkvar, variable)
else
if variable.link_optional
dolog && @debug "No target for optional Contributor $(fullname(variable))"
else
@warn "Programming error - no target for contributor $(fullname(variable)) with link_optional=false"
end
end
variable.linkvar = linkvar
return nothing
end
"Visit all Reaction Variables and call supplied function oper (one of _link_print, _link_create, etc"
function _link_variables!(domain::Domain, model::Model, oper, dolog)
# create a datastructure
for react in domain.reactions
for var in get_variables(react)
if isempty(var.linkreq_domain)
linkvar_domain = var.method.domain
else
linkvar_domain = get_domain(model, var.linkreq_domain)
!isnothing(linkvar_domain) ||
error("linking VariableReaction $(fullname(var)): linkreq_domain='$(var.linkreq_domain)' not found")
end
linkvar_name = sub_variablereaction_linkreq_name(var.linkreq_name, react.name*"/")
oper(domain, react, var, linkvar_domain, linkvar_name, dolog)
end
end
return nothing
end
function _link_clear!(domain::Domain)
empty!(domain.variables)
return nothing
end
#############################
# Pretty printing
############################'
"compact form"
function Base.show(io::IO, domain::Domain)
print(io, "Domain(name='", domain.name, "')")
end
"multiline form"
function Base.show(io::IO, ::MIME"text/plain", domain::Domain)
println(io, "Domain")
println(io, " name='$(domain.name)'")
println(io, " ID=$(domain.ID)")
println(io, " data_dims=", domain.data_dims)
println(io, " grid=", isnothing(domain.grid) ? "<nothing>" : domain.grid)
println(io, " reactions:")
for r in domain.reactions
println(io, " ", r)
end
println(io, " variables (VariableDomPropDep):")
for var in sort(get_variables(domain, v -> v isa VariableDomPropDep), by = v -> v.name)
println(io, " ", var)
end
println(io, " variables (VariableDomContribTarget):")
for var in sort(get_variables(domain, v -> v isa VariableDomContribTarget), by = v -> v.name)
println(io, " ", var)
end
end
"""
show_variables(domain::Domain; [attributes], [filter], showlinks=false, modeldata=nothing) -> DataFrame
Show table of Domain Variables. Optionally get variable links, data.
# Keywords:
- `attributes=[:units, :vfunction, :space, :field_data, :description]`: Variable attributes to show
- `showlinks=false`: true to show [`VariableReaction`](@ref)s that link to this Domain Variable.
- `modeldata=nothing`: set to also show Variable values.
- `filter=attrb->true`: function to filter by Variable attributes.
Example: `filter=attrb->attrb[:vfunction]!=PB.VF_Undefined` to show state Variables and derivatives.
"""
function show_variables(
domain::Domain;
attributes=[:units, :vfunction, :space, :field_data, :description],
filter = attrb->true,
showlinks=false,
modeldata=nothing
)
vars = get_variables(domain, var->filter(var.attributes))
df = DataFrames.DataFrame()
df.name = [v.name for v in vars]
df.type = [typeof(v) for v in vars]
for att in attributes
DataFrames.insertcols!(df, att=>[get_attribute(v, att) for v in vars])
end
# functions to collect links
get_property(v::VariableDomPropDep) =
(pvars = get_properties(v); isempty(pvars) ? missing : [fullname(pv) for pv in pvars])
get_property(v::VariableDomContribTarget) = missing
get_target(v::VariableDomPropDep) = missing
get_target(v::VariableDomContribTarget) =
isnothing(v.var_target) ? missing : fullname(v.var_target)
get_contributors(v::VariableDomPropDep) = missing
get_contributors(v::VariableDomContribTarget) =
isempty(v.var_contributors) ? missing : [fullname(vc) for vc in v.var_contributors]
get_dependencies(v) =
isempty(v.var_dependencies) ? missing : [fullname(vd) for vd in v.var_dependencies]
if showlinks
df.property = [get_property(v) for v in vars]
df.dependencies = [get_dependencies(v) for v in vars]
df.target = [get_target(v) for v in vars]
df.contributors = [get_contributors(v) for v in vars]
end
if !isnothing(modeldata)
df.data = [get_data(v, modeldata) for v in vars]
end
DataFrames.sort!(df, [:name])
return df
end
| PALEOboxes | https://github.com/PALEOtoolkit/PALEOboxes.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.