licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.2.3 | b248c6aefe448c9ad4efa106f31cd767d50c931d | code | 2497 | using BayesFlux
using Flux, Distributions, Random
using Test
function test_GGMC_regression(steps, k=5, n=10_000)
x = randn(Float32, k, n)
β = randn(Float32, k)
y = x' * β + 1.0f0 * randn(Float32, n)
net = Chain(Dense(k, 1))
nc = destruct(net)
sigma_prior = Gamma(2.0f0, 0.5f0)
like = FeedforwardNormal(nc, sigma_prior)
prior = GaussianPrior(nc, 10.0f0)
init = InitialiseAllSame(Normal(0.0f0, 0.1f0), like, prior)
bnn = BNN(x, y, like, prior, init)
# firts run optimisation
opt = FluxModeFinder(bnn, Flux.RMSProp(); windowlength=50)
θmode = find_mode(bnn, 1000, 100, opt; showprogress=false)
l = 1f-2
# l = 1.0f-15
sadapter = DualAveragingStepSize(l; adapt_steps=1000, target_accept=0.25f0)
madapter = DiagCovMassAdapter(1000, 100, kappa=0.1f0, epsilon=1.0f-8)
sampler = GGMC(;
l=l,
β=0.1f0,
steps=steps,
sadapter=sadapter,
madapter=madapter
)
ch = mcmc(bnn, 1_000, 20_000, sampler; showprogress=true, θstart=copy(θmode))
ch_short = ch[:, end-9999:end]
θmean = mean(ch_short; dims=2)
βhat = θmean[1:length(β)]
# coefficients
test1 = maximum(abs, β - βhat) < 0.05
# intercept
test2 = abs(θmean[end-1]) < 0.05
# variance
test3 = 0.9f0 <= mean(exp.(ch_short[end, :])) <= 1.1f0
ch_longer = mcmc(bnn, 1000, 25_000, sampler; continue_sampling=true, showprogress=false)
test4 = all(ch_longer[:, 1:20_000] .== ch)
return [test1, test2, test3, test4]
end
Random.seed!(6150533)
@testset "GGMC" begin
# Only testing up until 3 steps. Everything higher becomes numerically
# instable for any reasonably stepsizes
@testset "Linear Regression" for steps in [1, 2, 3]
@testset "Steps = $steps" begin
# Because GitHub Actions seem very slow and occasionally run out of
# memory, we will decrease the number of tests if the tests are run on
# GitHub actions. Hostnames on GH actions seem to always start with fv
ntests = gethostname()[1:2] == "fv" ? 1 : 10
results = fill(false, ntests, 4)
for i = 1:ntests
results[i, :] = test_GGMC_regression(steps)
end
pct_pass = mean(results; dims=1)
@test pct_pass[1] > 0.9
@test pct_pass[2] > 0.9
@test pct_pass[3] > 0.8 # variances are difficult to estimate
@test pct_pass[4] == 1
end
end
end
| BayesFlux | https://github.com/enweg/BayesFlux.jl.git |
|
[
"MIT"
] | 0.2.3 | b248c6aefe448c9ad4efa106f31cd767d50c931d | code | 3058 | # Testing HMC
using BayesFlux
using Flux, Distributions, Random
using LinearAlgebra
function test_HMC_regression(madapter; k=5, n=10_000)
x = randn(Float32, k, n)
β = randn(Float32, k)
y = x' * β + 1.0f0 * randn(Float32, n)
net = Chain(Dense(k, 1))
nc = destruct(net)
sigma_prior = Gamma(2.0f0, 0.5f0)
like = FeedforwardNormal(nc, sigma_prior)
prior = GaussianPrior(nc, 10.0f0)
init = InitialiseAllSame(Normal(0.0f0, 1.0f0), like, prior)
bnn = BNN(x, y, like, prior, init)
l = 1.0f-4
sadapter = DualAveragingStepSize(l; adapt_steps=1000, target_accept=0.5f0)
# Below was tested together with DiagCovMassAdapter and worked but results
# are very sensitive to the stepsize set and thus proper testing must be
# deferred for now.
# TODO: properly test ConstantStepsize
# l = 1f-3
# sadapter = ConstantStepsize(l)
leapfrog_steps = 5
sampler = HMC(l, leapfrog_steps; sadapter=sadapter, madapter=madapter)
opt = FluxModeFinder(bnn, Flux.ADAM())
θmap = find_mode(bnn, 1000, 100, opt; showprogress=false)
# Commented below was also tested and always worked. But good practice
# for BNNs is to have a warm start.
# ch = mcmc(bnn, 1000, 20_000, sampler; showprogress = true)
ch = mcmc(bnn, 1000, 20_000, sampler; showprogress=true, θstart=θmap)
ch_short = ch[:, end-9999:end]
θmean = mean(ch_short; dims=2)
βhat = θmean[1:length(β)]
# coefficient estimate
test1 = maximum(abs, β - βhat) < 0.05
# Intercept
test2 = abs(θmean[end-1]) < 0.05
# Variance
test3 = 0.9f0 <= mean(exp.(ch_short[end, :])) <= 1.1f0
# Continue sampling
test4 = BayesFlux.calculate_epochs(sampler, 100, 25_000; continue_sampling=true) == 50 * leapfrog_steps
ch_longer = mcmc(bnn, 1000, 25_000, sampler; continue_sampling=true)
test5 = all(ch_longer[:, 1:20_000] .== ch)
return [test1, test2, test3, test4, test5]
end
Random.seed!(6150533)
@testset "HMC" begin
madapter1 = DiagCovMassAdapter(1000, 100; kappa=0.1f0)
madapter2 = FullCovMassAdapter(1000, 100; kappa=0.1f0)
madapter3 = RMSPropMassAdapter(1000)
mas = [madapter1, madapter2, madapter3]
@testset "Linear Regression" for madapter in mas
@testset "Linear Regression madapter = $madapter" begin
# Because GitHub Actions seem very slow and occasionally run out of
# memory, we will decrease the number of tests if the tests are run on
# GitHub actions. Hostnames on GH actions seem to always start with fv
ntests = gethostname()[1:2] == "fv" ? 1 : 10
results = fill(false, ntests, 5)
for i = 1:ntests
results[i, :] = test_HMC_regression(madapter)
end
pct_pass = mean(results; dims=1)
@test pct_pass[1] > 0.9
@test pct_pass[2] > 0.9
@test pct_pass[3] > 0.8 # variances are difficult to estimate
@test pct_pass[4] == 1
@test pct_pass[5] == 1
end
end
end | BayesFlux | https://github.com/enweg/BayesFlux.jl.git |
|
[
"MIT"
] | 0.2.3 | b248c6aefe448c9ad4efa106f31cd767d50c931d | code | 1250 | using Flux
using Distributions, Random
using BayesFlux
Random.seed!(6150533)
@testset "Initialise" begin
@testset "Basic Initialiser" for dist in [Normal(), Normal(0.0f0, 10.0f0), Uniform(-0.5f0, 0.5f0)]
net = Chain(LSTM(1, 10), Dense(10, 1))
nc = destruct(net)
like = FeedforwardNormal(nc, Gamma(2.0, 2.0))
prior = GaussianPrior(nc, 10.0f0)
init = InitialiseAllSame(dist, like, prior)
θnet, θhyper, θlike = init()
@test length(θnet) == nc.num_params_network
@test length(θhyper) == prior.num_params_hyper
@test length(θlike) == like.num_params_like
draws = [vcat(init()...) for _ in 1:100_000]
draws = reduce(hcat, draws)
mindraw = minimum(draws; dims=2)
maxdraw = maximum(draws; dims=2)
meandraw = mean(draws; dims=2)
vardraw = var(draws; dims=2)
supdist = support(dist)
mindist = supdist.lb
maxdist = supdist.ub
meandist = mean(dist)
vardist = var(dist)
@test all(mindist .<= mindraw .<= maxdist)
@test all(mindist .<= maxdraw .<= maxdist)
@test maximum(abs, meandraw .- meandist) < 0.15
@test maximum(abs, vardraw ./ vardist) < 1.1
end
end | BayesFlux | https://github.com/enweg/BayesFlux.jl.git |
|
[
"MIT"
] | 0.2.3 | b248c6aefe448c9ad4efa106f31cd767d50c931d | code | 488 | using Flux
@testset "Dense Layer" begin
@testset "Destructuring" begin
net = Dense(10, 10)
x = randn(10, 10)
θ, re = BayesFlux.destruct(net)
net_re = re(θ)
yoriginal = net(x)
yre = net_re(x)
@test all(yoriginal .== yre)
θnew = randn(length(θ))
netnew = re(θnew)
ynew = netnew(x)
# The chance that they are equal if everything is correct is 0
@test all(yoriginal .!= ynew)
end
end | BayesFlux | https://github.com/enweg/BayesFlux.jl.git |
|
[
"MIT"
] | 0.2.3 | b248c6aefe448c9ad4efa106f31cd767d50c931d | code | 4563 | # Each likelihood must be a subtype of BNNLikelihood and must have implemented
# at least the fields `num_params_likelihood::Int` and `nc<:NetConstructor`.
using Distributions
using Flux
using BayesFlux
using Bijectors
Random.seed!(6150533)
@testset "Likelihood Feedforward" begin
@testset "Gaussian" begin
net = Chain(Dense(10, 10, sigmoid), Dense(10, 1))
nc = destruct(net)
gl = FeedforwardNormal(nc, Gamma(2.0, 2.0))
# A Gaussian likelihood has one additional parameter: σ
@test gl.num_params_like == 1
T = eltype(nc.θ)
# If we set all network parameters to zero then the prediction will
# always be zero. If additionally we set σ = 1, then we can compare the
# network likelihood to a standard normal
# We only need to subtract the contribution of the prior for σ
y = T.(quantile.(Normal(), 0.1:0.1:0.9))
x = randn(T, 10, length(y))
θ = zeros(eltype(nc.θ), nc.num_params_network)
tdist = transformed(gl.prior_σ)
tσ = link(gl.prior_σ, 1.0f0)
@test gl(x, y, θ, [tσ]) ≈ sum(logpdf.(Normal(), y)) + logpdf(tdist, tσ)
# Similarly, using any x should result in predictions that are
# distributed according to a standard normal
x = randn(T, 10, 100_000)
ypp = posterior_predict(gl, x, θ, [tσ])
q = T.(quantile.([ypp], 0.1:0.1:0.9))
@test maximum(abs, q - y) < 0.05
end
@testset "TDist" begin
net = Chain(Dense(10, 10, sigmoid), Dense(10, 1))
nc = destruct(net)
tl = FeedforwardTDist(nc, Gamma(2.0, 2.0), 10.0f0)
@test tl.num_params_like == 1
T = eltype(nc.θ)
# We can do the same as for the Gaussian case.
y = T.(quantile.(TDist(tl.ν), 0.1:0.1:0.9))
x = randn(T, 10, length(y))
θ = zeros(T, nc.num_params_network)
tdist = transformed(tl.prior_σ)
tσ = link(tl.prior_σ, 1.0f0)
@test tl(x, y, θ, [tσ]) ≈ sum(logpdf.(TDist(tl.ν), y)) + logpdf(tdist, tσ)
# And doing the same for prediction
x = randn(T, 10, 100_000)
ypp = posterior_predict(tl, x, θ, [tσ])
q = T.(quantile.([ypp], 0.1:0.1:0.9))
@test maximum(abs, q - y) < 0.05
end
end
Random.seed!(6150533)
@testset "Likelihood Seq-to-One" begin
@testset "Gaussian" for rnn in [RNN, LSTM]
net = Chain(rnn(10, 10), Dense(10, 1))
nc = destruct(net)
gl = SeqToOneNormal(nc, Gamma(2.0, 2.0))
# A Gaussian likelihood has one additional parameter: σ
@test gl.num_params_like == 1
T = eltype(nc.θ)
# If we set all network parameters to zero then the prediction will
# always be zero. If additionally we set σ = 1, then we can compare the
# network likelihood to a standard normal
# We only need to subtract the contribution of the prior for σ
y = T.(quantile.(Normal(), 0.1:0.1:0.9))
# x = [randn(T, 10, length(y)) for _ in 1:10]
x = randn(T, 10, 10, length(y))
θ = zeros(eltype(nc.θ), nc.num_params_network)
tdist = transformed(gl.prior_σ)
tσ = link(gl.prior_σ, 1.0f0)
@test gl(x, y, θ, [tσ]) ≈ sum(logpdf.(Normal(), y)) + logpdf(tdist, tσ)
# Similarly, using any x should result in predictions that are
# distributed according to a standard normal
# x = [randn(T, 10, 100_000) for _ in 1:10]
x = randn(T, 10, 10, 100_000)
ypp = posterior_predict(gl, x, θ, [tσ])
q = T.(quantile.([ypp], 0.1:0.1:0.9))
@test maximum(abs, q - y) < 0.05
end
@testset "TDist" for rnn in [RNN, LSTM]
net = Chain(rnn(10, 10), Dense(10, 1))
nc = destruct(net)
tl = SeqToOneTDist(nc, Gamma(2.0, 2.0), 10.0f0)
@test tl.num_params_like == 1
T = eltype(nc.θ)
# We can do the same as for the Gaussian case.
y = T.(quantile.(TDist(tl.ν), 0.1:0.1:0.9))
# x = [randn(T, 10, length(y)) for _ in 1:10]
x = randn(T, 10, 10, length(y))
θ = zeros(T, nc.num_params_network)
tdist = transformed(tl.prior_σ)
tσ = link(tl.prior_σ, 1.0f0)
@test tl(x, y, θ, [tσ]) ≈ sum(logpdf.(TDist(tl.ν), y)) + logpdf(tdist, tσ)
# And doing the same for prediction
# x = [randn(T, 10, 100_000) for _ in 1:10]
x = randn(T, 10, 10, 100_000)
ypp = posterior_predict(tl, x, θ, [tσ])
q = T.(quantile.([ypp], 0.1:0.1:0.9))
@test maximum(abs, q - y) < 0.05
end
end | BayesFlux | https://github.com/enweg/BayesFlux.jl.git |
|
[
"MIT"
] | 0.2.3 | b248c6aefe448c9ad4efa106f31cd767d50c931d | code | 1286 | using Flux
using Distributions, Random, LinearAlgebra
using Bijectors
@testset "Mode Finding" begin
@testset "Mode Linear Regression" begin
k = 5
n = 100_000
x = randn(Float32, k, n)
β = randn(Float32, k)
y = x' * β + 0.1f0 * randn(Float32, n)
net = Chain(Dense(k, 1))
nc = destruct(net)
like = FeedforwardNormal(nc, Gamma(2.0, 2.0))
prior = GaussianPrior(nc, 10.0f0)
init = InitialiseAllSame(Normal(0.0f0, 1.0f0), like, prior)
bnn = BNN(x, y, like, prior, init)
opt = FluxModeFinder(bnn, Flux.ADAM(); windowlength=50)
θmode = find_mode(bnn, 10000, 1000, opt; showprogress=false)
# We do not have a constant in the original model so discard bias
βhat = θmode[1:bnn.like.nc.num_params_network-1]
@test maximum(abs, β .- βhat) < 0.01
end
end
# @testset "Mode Finding" begin
# @testset "Full Gradient $i" for i=2:100
# μ = randn(i)
# lpdf(θ) = logpdf(MvNormal(μ, I), θ)
# mode = find_mode(lpdf, randn(i), 10_000, 1e-10;
# showprogress = false, verbose = false)
# @test isapprox(mode[1], μ, atol = 0.01)
# end
# # TODO: implement SGD test; How to design a good test?
# end
| BayesFlux | https://github.com/enweg/BayesFlux.jl.git |
|
[
"MIT"
] | 0.2.3 | b248c6aefe448c9ad4efa106f31cd767d50c931d | code | 2208 | # Every prior of network parameters must be a subtype of NetworkPrior. It must
# be callable and return the logprior density and it must implement a sample
# method, sampling a vector of network parameters from the prior
Random.seed!(6150533)
@testset "Network Prior" begin
@testset "Gaussian" for σ0 in [0.5, 1.0, 3.0, 10.0]
@testset "Gaussian σ0 = $σ0" begin
net = Chain(Dense(10, 10, sigmoid), Dense(10, 1))
nc = destruct(net)
T = eltype(nc.θ)
gp = GaussianPrior(nc, T(σ0))
@test gp.num_params_hyper == 0
n = nc.num_params_network
θ = T.(collect(0.1:0.1:0.9))
# out prior is standard normal
@test gp(θ, Float32[]) ≈ T(sum(logpdf.(Normal(T(0), T(σ0)), 0.1:0.1:0.9)))
θdraws = reduce(hcat, [sample_prior(gp) for _ in 1:1_000_000])
𝔼θdraws = vec(mean(θdraws; dims=2))
@test maximum(abs, 𝔼θdraws) < 0.1
𝕍θdraws = vec(var(θdraws; dims=2))
@test maximum(𝕍θdraws ./ (σ0^2)) < 1.01
end
end
@testset "Mixture Gaussian" for (μ1, σ1, σ2) in zip([0.01f0, 0.1f0, 0.5f0, 0.9f0], [0.001f0, 0.1f0, 1.0f0], [1.0f0, 5.0f0, 10.0f0])
@testset "Mixture Gaussian μ1=$μ1, σ1=$σ1, σ2=$σ2" begin
net = Chain(Dense(10, 10, sigmoid), Dense(10, 1))
nc = destruct(net)
T = eltype(nc.θ)
prior = MixtureScalePrior(nc, σ1, σ2, μ1)
@test prior.num_params_hyper == 0
# Both have zero mean so mixture has zero mean
θdraws = reduce(hcat, [sample_prior(prior) for _ in 1:1_000_000])
𝔼θdraws = vec(mean(θdraws; dims=2))
@test maximum(abs, 𝔼θdraws) < 0.1
# Gaussian are independent so Var(Mixture) = π1^2Var(G1) + π2^2Var(G2)
# θ = z₁θ₁ + (1-z₁)θ₂ where z₁ ~ Bernoulli(π1) and thus 1-z₁ ~ Bernoulli(π2)
# This gives a theoretical variance of
# V(θ) = π1*σ1^2 + π2*σ2^2
𝕍θdraws = vec(var(θdraws; dims=2))
var_theoretic = prior.π1 * prior.σ1^2 + prior.π2 * prior.σ2^2
@test maximum(𝕍θdraws ./ var_theoretic) < 1.01
end
end
end | BayesFlux | https://github.com/enweg/BayesFlux.jl.git |
|
[
"MIT"
] | 0.2.3 | b248c6aefe448c9ad4efa106f31cd767d50c931d | code | 767 | using BayesFlux
using Flux, Distributions, Random
using Test
@testset "Posterior Predictive Draws" begin
n = 1_000
k = 5
x = randn(Float32, k, n)
β = randn(Float32, k)
y = x' * β + 1.0f0 * randn(Float32, n)
net = Chain(Dense(k, 1))
nc = destruct(net)
sigma_prior = Gamma(2.0f0, 0.5f0)
like = FeedforwardNormal(nc, sigma_prior)
prior = GaussianPrior(nc, 10.0f0)
init = InitialiseAllSame(Normal(0.0f0, 1.0f0), like, prior)
bnn = BNN(x, y, like, prior, init)
sampler = SGLD(; stepsize_a=1.0f0)
ch = mcmc(bnn, 1000, 20_000, sampler; showprogress=true)
ch_short = ch[:, end-9999:end]
pp = sample_posterior_predict(bnn, ch_short)
@test(size(pp, 1) == n)
@test(size(pp, 2) == size(ch_short, 2))
end | BayesFlux | https://github.com/enweg/BayesFlux.jl.git |
|
[
"MIT"
] | 0.2.3 | b248c6aefe448c9ad4efa106f31cd767d50c931d | code | 1140 | using BayesFlux
using Flux
using Distributions, Random, Bijectors
using Test
using LinearAlgebra
println("Hostname: $(gethostname())")
@testset "BayesFlux" begin
Random.seed!(6150533)
# destructing Networks and layers
include("./deconstruct.jl")
# # likelihoods
include("./likelihoods.jl")
# network priors
include("./networkpriors.jl")
# initialisers
include("./initialisers.jl")
# BNN basic operations
include("./bnn.jl")
# Mode Finding
include("./modes.jl")
# Posterior Predictive Draws
include("./posterior_predict.jl")
# Checking whether all derivatives work. See issue #6
include("./derivatives.jl")
# Tests after this line are reduced in the number of samples when run
# on GitHub actions.
if gethostname()[1:2] == "fv"
@info "Tests run on GitHub actions are reduced. For the full tests suit, please run tests on another machine."
end
# MCMC
include("./sgld.jl")
include("./sgnht.jl")
include("./sgnht-s.jl")
include("./ggmc.jl")
include("./amh.jl")
include("./hmc.jl")
# vi
include("./bbb.jl")
end | BayesFlux | https://github.com/enweg/BayesFlux.jl.git |
|
[
"MIT"
] | 0.2.3 | b248c6aefe448c9ad4efa106f31cd767d50c931d | code | 1940 | # Testing SGLD
using BayesFlux
using Flux, Distributions, Random
using Test
function test_SGLD_regression(; k=5, n=10_000)
x = randn(Float32, k, n)
β = randn(Float32, k)
y = x' * β + 1.0f0 * randn(Float32, n)
net = Chain(Dense(k, 1))
nc = destruct(net)
sigma_prior = Gamma(2.0f0, 0.5f0)
like = FeedforwardNormal(nc, sigma_prior)
prior = GaussianPrior(nc, 10.0f0)
init = InitialiseAllSame(Normal(0.0f0, 1.0f0), like, prior)
bnn = BNN(x, y, like, prior, init)
sampler = SGLD(; stepsize_a=1.0f0)
ch = mcmc(bnn, 1000, 20_000, sampler; showprogress=true)
ch_short = ch[:, end-9999:end]
θmean = mean(ch_short; dims=2)
βhat = θmean[1:length(β)]
# coefficient estimate
test1 = maximum(abs, β - βhat) < 0.05
# Intercept
test2 = abs(θmean[end-1]) < 0.05
# Variance
test3 = 0.9f0 <= mean(exp.(ch_short[end, :])) <= 1.1f0
# Continue sampling
test4 = BayesFlux.calculate_epochs(sampler, 100, 25_000; continue_sampling=true) == 50
ch_longer = mcmc(bnn, 1000, 25_000, sampler; continue_sampling=true)
test5 = all(ch_longer[:, 1:20_000] .== ch)
return [test1, test2, test3, test4, test5]
end
Random.seed!(6150533)
@testset "SGLD" begin
@testset "Linear Regression" begin
# Because GitHub Actions seem very slow and occasionally run out of
# memory, we will decrease the number of tests if the tests are run on
# GitHub actions. Hostnames on GH actions seem to always start with fv
ntests = gethostname()[1:2] == "fv" ? 1 : 10
results = fill(false, ntests, 5)
for i = 1:ntests
results[i, :] = test_SGLD_regression()
end
pct_pass = mean(results; dims=1)
@test pct_pass[1] > 0.9
@test pct_pass[2] > 0.9
@test pct_pass[3] > 0.8 # variances are difficult to estimate
@test pct_pass[4] == 1
@test pct_pass[5] == 1
end
end | BayesFlux | https://github.com/enweg/BayesFlux.jl.git |
|
[
"MIT"
] | 0.2.3 | b248c6aefe448c9ad4efa106f31cd767d50c931d | code | 2115 | # Testing SGNHT-S
using BayesFlux
using Flux, Distributions, Random
function test_SGNHTS_regression(; k=5, n=10_000)
x = randn(Float32, k, n)
β = randn(Float32, k)
y = x' * β + 1.0f0 * randn(Float32, n)
net = Chain(Dense(k, 1))
nc = destruct(net)
sigma_prior = Gamma(2.0f0, 0.5f0)
like = FeedforwardNormal(nc, sigma_prior)
prior = GaussianPrior(nc, 10.0f0)
init = InitialiseAllSame(Normal(0.0f0, 1.0f0), like, prior)
bnn = BNN(x, y, like, prior, init)
opt = FluxModeFinder(bnn, Flux.ADAM())
θmap = find_mode(bnn, 100, 300, opt; showprogress=false)
l = 1.0f-3
madapter = RMSPropMassAdapter(1000)
sampl = SGNHTS(l, 6.0f0; madapter=madapter)
ch = mcmc(bnn, 1000, 20_000, sampl; showprogress=true, θstart=θmap)
ch_short = ch[:, end-9999:end]
θmean = mean(ch_short; dims=2)
βhat = θmean[1:length(β)]
# coefficient estimate
test1 = maximum(abs, β - βhat) < 0.05
# Intercept
test2 = abs(θmean[end-1]) < 0.05
# Variance
test3 = 0.9f0 <= mean(exp.(ch_short[end, :])) <= 1.1f0
# Continue sampling
test4 = BayesFlux.calculate_epochs(sampl, 100, 25_000; continue_sampling=true) == 50
ch_longer = mcmc(bnn, 1000, 25_000, sampl; continue_sampling=true)
test5 = all(ch_longer[:, 1:20_000] .== ch)
return [test1, test2, test3, test4, test5]
end
Random.seed!(6150533)
@testset "SGNHT-S" begin
@testset "Linear Regression" begin
# Because GitHub Actions seem very slow and occasionally run out of
# memory, we will decrease the number of tests if the tests are run on
# GitHub actions. Hostnames on GH actions seem to always start with fv
ntests = gethostname()[1:2] == "fv" ? 1 : 10
results = fill(false, ntests, 5)
for i = 1:ntests
results[i, :] = test_SGNHTS_regression()
end
pct_pass = mean(results; dims=1)
@test pct_pass[1] > 0.9
@test pct_pass[2] > 0.9
@test pct_pass[3] > 0.8 # variances are difficult to estimate
@test pct_pass[4] == 1
@test pct_pass[5] == 1
end
end | BayesFlux | https://github.com/enweg/BayesFlux.jl.git |
|
[
"MIT"
] | 0.2.3 | b248c6aefe448c9ad4efa106f31cd767d50c931d | code | 2166 | # Testing SGNHT
# SGNHT seems be in the ballpark but not very precise. It also seems to have
# much too high posterior variance.
using BayesFlux
using Flux, Distributions, Random
function test_SGNHT_regression(; k=5, n=10_000)
x = randn(Float32, k, n)
β = randn(Float32, k)
y = x' * β + 1.0f0 * randn(Float32, n)
net = Chain(Dense(k, 1))
nc = destruct(net)
sigma_prior = Gamma(2.0f0, 0.5f0)
like = FeedforwardNormal(nc, sigma_prior)
prior = GaussianPrior(nc, 10.0f0)
init = InitialiseAllSame(Normal(0.0f0, 0.1f0), like, prior)
bnn = BNN(x, y, like, prior, init)
# l = 1f-3
l = 2.0f-4
sampl = SGNHT(l, 0.1f0 * 36.0f0; xi=0.1f0 * 36.0f0)
ch = mcmc(bnn, 1000, 20_000, sampl; showprogress=true)
# ch = mcmc(bnn, 1000, 20_000, sampl; showprogress = true, θstart = θmap)
ch_short = ch[:, end-9999:end]
θmean = mean(ch_short; dims=2)
βhat = θmean[1:length(β)]
# coefficient estimate
test1 = maximum(abs, β - βhat) < 0.05
# Intercept
test2 = abs(θmean[end-1]) < 0.05
# Variance
test3 = 0.9f0 <= mean(exp.(ch_short[end, :])) <= 1.1f0
# Continue sampling
test4 = BayesFlux.calculate_epochs(sampl, 100, 25_000; continue_sampling=true) == 50
ch_longer = mcmc(bnn, 1000, 25_000, sampl; continue_sampling=true)
test5 = all(ch_longer[:, 1:20_000] .== ch)
return [test1, test2, test3, test4, test5]
end
Random.seed!(6150533)
@testset "SGNHT" begin
@testset "Linear Regression" begin
# Because GitHub Actions seem very slow and occasionally run out of
# memory, we will decrease the number of tests if the tests are run on
# GitHub actions. Hostnames on GH actions seem to always start with fv
ntests = gethostname()[1:2] == "fv" ? 1 : 10
results = fill(false, ntests, 5)
for i = 1:ntests
results[i, :] = test_SGNHT_regression()
end
pct_pass = mean(results; dims=1)
@test pct_pass[1] > 0.9
@test pct_pass[2] > 0.9
@test pct_pass[3] > 0.8 # variances are difficult to estimate
@test pct_pass[4] == 1
@test pct_pass[5] == 1
end
end | BayesFlux | https://github.com/enweg/BayesFlux.jl.git |
|
[
"MIT"
] | 0.2.3 | b248c6aefe448c9ad4efa106f31cd767d50c931d | docs | 26988 | <!-- Create the .md file by running
Literate.markdown("./README.jl", flavor = Literate.CommonMarkFlavor()) -->
[](https://github.com/enweg/BayesFlux.jl/actions/workflows/tests.yml)
[](https://enweg.github.io/BayesFlux.jl/stable)
[](https://enweg.github.io/BayesFlux.jl/dev)
````julia
using BayesFlux, Flux
using Random, Distributions
using StatsPlots
Random.seed!(6150533)
````
## BayesFlux (Bayesian extension for Flux)
BayesFlux is meant to be an extension to Flux.jl, a machine learning
library written entirely in Julia. BayesFlux will and is not meant to be the
fastest production ready library, but rather is meant to make research and
experimentation easy.
BayesFlux is part of my Master Thesis in Economic and Financial Research -
specialisation Econometrics and will therefore likelily still go through some
revisions in the coming months.
## Structure
Every Bayesian model can in general be broken down into the probablistic
model, which gives the likelihood function and the prior on all parameters of
the probabilistic model. BayesFlux somewhat follows this and splits every Bayesian
Network into the following parts:
1. **Network**: Every BNN must have some general network structure. This is
defined using Flux and currently supports Dense, RNN, and LSTM layers. More
on this later
2. **Network Constructor**: Since BayesFlux works with vectors of parameters, we
need to be able to go from a vector to the network and back. This works by
using the NetworkConstructor.
3. **Likelihood**: The likelihood function. In traditional estimation of NNs,
this would correspond to the negative loss function. BayesFlux has a twist on
this though and nomenclature might change because of this twist: The
likelihood also contains all additional parameters and priors. For example,
for a Gaussian likelihood, the likelihood object also defines the standard
deviation and the prior for the standard deviation. This desing choice was
made to keep the likelihood and everything belonging to it separate from
the network; Again, due to the potential confusion, the nomenclature might
change in later revisions.
4. **Prior on network parameters**: A prior on all network parameters.
Currently the RNN layers do not define priors on the initial state and thus
the initial state is also not sampled. Priors can have hyper-priors.
5. **Initialiser**: Unless some special initialisation values are given, BayesFlux
will draw initial values as defined by the initialiser. An initialiser
initialises all network and likelihood parameters to reasonable values.
All of the above are then used to create a BNN which can then be estimated
using the MAP, can be sampled from using any of the MCMC methods implemented,
or can be estimated using Variational Inference.
The examples and the sections below hopefully clarify everything. If any
questions remain, please open an issue.
## Linear Regression using BayesFlux
Although not meant for Simple Linear Regression, BayesFlux can be used for it, and
we will do so in this section. This will hopefully demonstrate the basics.
Later sections will show better examples.
Let's say we have the idea that the data can be modelled via a linear model of
the form
$$y_i = x_i'\beta + e_i$$
with $e_i \sim N(0, 1)$
````julia
k = 5
n = 500
x = randn(Float32, k, n);
β = randn(Float32, k);
y = x'*β + randn(Float32, n);
````
This is a standard linear model and we would likely be better off using STAN
or Turing for this, but due to the availability of a Dense layer with linear
activation function, we can also implent it in BayesFlux.
The first step is to define the network. As mentioned above, the network
consists of a single Dense layer with a linear activation function (the
default activation in Flux and hence not explicitly shown).
````julia
net = Chain(Dense(k, 1)) # k inputs and one output
````
Since BayesFlux works with vectors, we need to be able to transform a vector to
the above network and back. We thus need a NetworkConstructor, which we obtain
as a the return value of a `destruct`
````julia
nc = destruct(net)
````
We can check whether everything work by just creating a random vector of the
right dimension and calling the NetworkConstructor using this vector.
````julia
θ = randn(Float32, nc.num_params_network)
nc(θ)
````
We indeed obtain a network of the right size and structure.
Next, we will define a prior for all parameters of the network. Since weight
decay is a popular regularisation method in standard ML estimation, we will be
using a Gaussian prior, which is the Bayesian weight decay:
````julia
prior = GaussianPrior(nc, 0.5f0) # the last value is the standard deviation
````
We also need a likelihood and a prior on all parameters the likelihood
introduces to the model. We will go for a Gaussian likelihood, which
introduces the standard deviation of the model. BayesFlux currently implements
Gaussian and Student-t likelihoods for Feedforward and Seq-to-one cases but
more can easily be implemented. See **TODO HAR link** for an example.
````julia
like = FeedforwardNormal(nc, Gamma(2.0, 0.5)) # Second argument is prior for standard deviation.
````
Lastly, when no explicit initial value is given, BayesFlux will draw it from an
initialiser. Currently only one type of initialiser is implemented in BayesFlux,
but this can easily be extended by the user itself.
````julia
init = InitialiseAllSame(Normal(0.0f0, 0.5f0), like, prior) # First argument is dist we draw parameters from.
````
Given all the above, we can now define the BNN:
````julia
bnn = BNN(x, y, like, prior, init)
````
### MAP estimate.
It is always a good idea to first find the MAP estimate. This can serve two
purposes:
1. It is faster than fully estimating the model using MCMC or VI and can thus
serve as a quick check; If the MAP estimate results in bad point
predictions, so will likely the full estimation results.
2. It can serve as a starting value for the MCMC samplers.
To find a MAP estimate, we must first specify how we want to find it: We need
to define an optimiser. BayesFlux currently only implements optimisers derived
from Flux itself, but this can be extended by the user.
````julia
opt = FluxModeFinder(bnn, Flux.ADAM()) # We will use ADAM
θmap = find_mode(bnn, 10, 500, opt) # batchsize 10 with 500 epochs
````
We can already use the MAP estimate to make some predictions and calculate the
RMSE.
````julia
nethat = nc(θmap)
yhat = vec(nethat(x))
sqrt(mean(abs2, y .- yhat))
````
### MCMC - SGLD
If the MAP estimate does not show any problems, it can be used as the starting
point for SGLD or any of the other MCMC methods (see later section).
Simulations have shown that using a relatively large initial stepsize with a
slow decaying stepsize schedule often results in the best mixing. *Note: We
would usually use samplers such as NUTS for linear regressions, which are much
more efficient than SGLD*
````julia
sampler = SGLD(Float32; stepsize_a = 10f-0, stepsize_b = 0.0f0, stepsize_γ = 0.55f0)
ch = mcmc(bnn, 10, 50_000, sampler)
ch = ch[:, end-20_000+1:end]
````
We can obtain summary statistics and trace and density plots of network
parameters and likelihood parameters by transforming the BayesFlux chain into a
MCMCChain.
````julia
using MCMCChains
chain = Chains(ch')
plot(chain)
````
In more complicated networks, it is usually a hopeless goal to obtain good
mixing in parameter space and thus we rather focus on the output space of the
network. *Mixing in parameter space is hopeless due to the very complicated
topology of the posterior; see ...*
We will use a little helper function to get the output values of the network:
````julia
function naive_prediction(bnn, draws::Array{T, 2}; x = bnn.x, y = bnn.y) where {T}
yhats = Array{T, 2}(undef, length(y), size(draws, 2))
Threads.@threads for i=1:size(draws, 2)
net = bnn.like.nc(draws[:, i])
yh = vec(net(x))
yhats[:,i] = yh
end
return yhats
end
yhats = naive_prediction(bnn, ch)
chain_yhat = Chains(yhats')
maximum(summarystats(chain_yhat)[:, :rhat])
````
Similarly, we can obtain posterior predictive values and evaluate quantiles
obtained using these to how many percent of the actual data fall below the
quantiles. What we would like is that 5% of the data fall below the 5%
quantile of the posterior predictive draws.
````julia
function get_observed_quantiles(y, posterior_yhat, target_q = 0.05:0.05:0.95)
qs = [quantile(yr, target_q) for yr in eachrow(posterior_yhat)]
qs = reduce(hcat, qs)
observed_q = mean(reshape(y, 1, :) .< qs; dims = 2)
return observed_q
end
posterior_yhat = sample_posterior_predict(bnn, ch)
t_q = 0.05:0.05:0.95
o_q = get_observed_quantiles(y, posterior_yhat, t_q)
plot(t_q, o_q, label = "Posterior Predictive", legend=:topleft,
xlab = "Target Quantile", ylab = "Observed Quantile")
plot!(x->x, t_q, label = "Target")
````
### MCMC - SGNHTS
Just like SGLD, SGNHTS also does not apply a Metropolis-Hastings correction
step. Contrary to SGLD though, SGNHTS implementes a Thermostat, whose task it
is to keep the temperature in the dynamic system close to one, and thus the
sampling more accurate. Although the thermostats goal is often not achieved,
samples obtained using SGNHTS often outperform those obtained using SGLD.
````julia
sampler = SGNHTS(1f-2, 2f0; xi = 2f0^2, μ = 50f0)
ch = mcmc(bnn, 10, 50_000, sampler)
ch = ch[:, end-20_000+1:end]
chain = Chains(ch')
````
----
````julia
yhats = naive_prediction(bnn, ch)
chain_yhat = Chains(yhats')
maximum(summarystats(chain_yhat)[:, :rhat])
````
----
````julia
posterior_yhat = sample_posterior_predict(bnn, ch)
t_q = 0.05:0.05:0.95
o_q = get_observed_quantiles(y, posterior_yhat, t_q)
plot(t_q, o_q, label = "Posterior Predictive", legend=:topleft,
xlab = "Target Quantile", ylab = "Observed Quantile")
plot!(x->x, t_q, label = "Target")
````
### MCMC - GGMC
As pointed out above, neither SGLD nor SGNHTS apply a Metropolis-Hastings
acceptance step and are thus difficult to monitor. Indeed, draws from SGLD or
SGNHTS should perhaps rather be considered as giving and ensemble of models
rather than draws from the posterior, since without any MH step, it is unclear
whether the chain actually will converge to the posterior.
BayesFlux also implements three methods that do apply a MH step and are thus
easier to monitor. These are GGMC, AdaptiveMH, and HMC. Both GGMC and HMC do
allow for taking stochastic gradients. GGMC also allows to use delayed
acceptance in which the MH step is only applied after a couple of steps,
rather than after each step (see ... for details).
Because both GGMC and HMC use a MH step, they provide a measure of the mean
acceptance rate, which can be used to tune the stepsize using Dual Averaging
(see .../STAN for details). Similarly, both also make use of mass matrices,
which can also be tuned.
BayesFlux implements both stepsize adapters and mass adapters but to this point
does not implement a smart way of combining them (this will come in the
future). In my experience, naively combining them often only helps in more
complex models and thus we will only use a stepsize adapter here.
````julia
sadapter = DualAveragingStepSize(1f-9; target_accept = 0.55f0, adapt_steps = 10000)
sampler = GGMC(Float32; β = 0.1f0, l = 1f-9, sadapter = sadapter)
ch = mcmc(bnn, 10, 50_000, sampler)
ch = ch[:, end-20_000+1:end]
chain = Chains(ch')
````
----
````julia
yhats = naive_prediction(bnn, ch)
chain_yhat = Chains(yhats')
maximum(summarystats(chain_yhat)[:, :rhat])
````
----
````julia
posterior_yhat = sample_posterior_predict(bnn, ch)
t_q = 0.05:0.05:0.95
o_q = get_observed_quantiles(y, posterior_yhat, t_q)
plot(t_q, o_q, label = "Posterior Predictive", legend=:topleft,
xlab = "Target Quantile", ylab = "Observed Quantile")
plot!(x->x, t_q, label = "Target")
````
The above uses a MH correction after each step. This can be costly in big-data
environments or when the evaluation of the likelihood is costly. If either of
the above applies, delayed acceptance can speed up the process.
````julia
sadapter = DualAveragingStepSize(1f-9; target_accept = 0.25f0, adapt_steps = 10000)
sampler = GGMC(Float32; β = 0.1f0, l = 1f-9, sadapter = sadapter, steps = 3)
ch = mcmc(bnn, 10, 50_000, sampler)
ch = ch[:, end-20_000+1:end]
chain = Chains(ch')
````
----
````julia
yhats = naive_prediction(bnn, ch)
chain_yhat = Chains(yhats')
maximum(summarystats(chain_yhat)[:, :rhat])
````
----
````julia
posterior_yhat = sample_posterior_predict(bnn, ch)
t_q = 0.05:0.05:0.95
o_q = get_observed_quantiles(y, posterior_yhat, t_q)
plot(t_q, o_q, label = "Posterior Predictive", legend=:topleft,
xlab = "Target Quantile", ylab = "Observed Quantile")
plot!(x->x, t_q, label = "Target")
````
### MCMC - HMC
Since HMC showed some mixing problems for some variables during the testing of
this README, we decided to use a mass matrix adaptation. This turned out to
work better even in this simple case.
````julia
sadapter = DualAveragingStepSize(1f-9; target_accept = 0.55f0, adapt_steps = 10000)
madapter = DiagCovMassAdapter(5000, 1000)
sampler = HMC(1f-9, 5; sadapter = sadapter)
ch = mcmc(bnn, 10, 50_000, sampler)
ch = ch[:, end-20_000+1:end]
chain = Chains(ch')
````
----
````julia
yhats = naive_prediction(bnn, ch)
chain_yhat = Chains(yhats')
maximum(summarystats(chain_yhat)[:, :rhat])
````
----
````julia
posterior_yhat = sample_posterior_predict(bnn, ch)
t_q = 0.05:0.05:0.95
o_q = get_observed_quantiles(y, posterior_yhat, t_q)
plot(t_q, o_q, label = "Posterior Predictive", legend=:topleft,
xlab = "Target Quantile", ylab = "Observed Quantile")
plot!(x->x, t_q, label = "Target")
````
### MCMC - Adaptive Metropolis-Hastings
As a derivative free alternative, BayesFlux also implements Adaptive MH as
introduced in (...). This is currently quite a costly method for complex
models since it needs to evaluate the MH ratio at each step. Plans exist to
parallelise the calculation of the likelihood which should speed up Adaptive
MH.
````julia
sampler = AdaptiveMH(diagm(ones(Float32, bnn.num_total_params)), 1000, 0.5f0, 1f-4)
ch = mcmc(bnn, 10, 50_000, sampler)
ch = ch[:, end-20_000+1:end]
chain = Chains(ch')
````
----
````julia
yhats = naive_prediction(bnn, ch)
chain_yhat = Chains(yhats')
maximum(summarystats(chain_yhat)[:, :rhat])
````
----
````julia
posterior_yhat = sample_posterior_predict(bnn, ch)
t_q = 0.05:0.05:0.95
o_q = get_observed_quantiles(y, posterior_yhat, t_q)
plot(t_q, o_q, label = "Posterior Predictive", legend=:topleft,
xlab = "Target Quantile", ylab = "Observed Quantile")
plot!(x->x, t_q, label = "Target")
````
### Variation Inference
In some cases MCMC method either do not work well or even the methods above
take too long. For these cases BayesFlux currently implements Bayes-By-Backprop
(...); One shortcoming of the current implementation is that the variational
family is constrained to a diagonal multivariate gaussian and thus any
correlations between network parameters are set to zero. This can cause
problems in some situations and plans exist to allow for more felxible
covariance specifications.
````julia
q, params, losses = bbb(bnn, 10, 2_000; mc_samples = 1, opt = Flux.ADAM(), n_samples_convergence = 10)
ch = rand(q, 20_000)
posterior_yhat = sample_posterior_predict(bnn, ch)
t_q = 0.05:0.05:0.95
o_q = get_observed_quantiles(y, posterior_yhat, t_q)
plot(t_q, o_q, label = "Posterior Predictive", legend=:topleft,
xlab = "Target Quantile", ylab = "Observed Quantile")
plot!(x->x, t_q, label = "Target")
````
## More complicated FNN
What changes if I want to implement BNNs using more complicated Feedforward
structures than above? Nothing! Well, almost nothing. The only thing that
truly changes is the network you specify. All the rest could in theory stay
the same. As the network becomes more complicated, it might be worth it to
specify better priors or likelihoods though. Say, for example, we use the same
data as above (in reality we would not know that it is coming from a linear
model although it is always good practice to try simple models first), but
instead of using the above network structure corresponding to a linear model,
use the following:
````julia
net = Chain(Dense(k, k, relu), Dense(k, k, relu), Dense(k, 1))
````
We can then still use the same prior, likelihood, and initialiser. But we do
need to change the NetworkConstructor.
````julia
nc = destruct(net)
like = FeedforwardNormal(nc, Gamma(2.0, 0.5))
prior = GaussianPrior(nc, 0.5f0)
init = InitialiseAllSame(Normal(0.0f0, 0.5f0), like, prior)
bnn = BNN(x, y, like, prior, init)
````
The rest is the same as above. We can, for example, first find the MAP:
````julia
opt = FluxModeFinder(bnn, Flux.ADAM()) # We will use ADAM
θmap = find_mode(bnn, 10, 500, opt) # batchsize 10 with 500 epochs
````
----
````julia
nethat = nc(θmap)
yhat = vec(nethat(x))
sqrt(mean(abs2, y .- yhat))
````
Or we can use any of the MCMC or VI method - SGNHTS is just one option:
````julia
sampler = SGNHTS(1f-2, 1f0; xi = 1f0^2, μ = 10f0)
ch = mcmc(bnn, 10, 50_000, sampler)
ch = ch[:, end-20_000+1:end]
chain = Chains(ch')
````
----
````julia
yhats = naive_prediction(bnn, ch)
chain_yhat = Chains(yhats')
maximum(summarystats(chain_yhat)[:, :rhat])
````
----
````julia
posterior_yhat = sample_posterior_predict(bnn, ch)
t_q = 0.05:0.05:0.95
o_q = get_observed_quantiles(y, posterior_yhat, t_q)
plot(t_q, o_q, label = "Posterior Predictive", legend=:topleft,
xlab = "Target Quantile", ylab = "Observed Quantile")
plot!(x->x, t_q, label = "Target")
````
## Recurrent Structures
Next to Dense layers, BayesFlux also implements RNN and LSTM layers. These two do
require some additional care though, since the layout of the data must be
adjusted. In general, the last dimension of `x` and `y` is always the
dimension along which BayesFlux batches. Thus, if we are in a seq-to-one setting
- seq-to-seq is not implemented itself but users can implement custom
likelihoods to for a seq-to-seq setting - then the sequences must be along
the last dimension (here the third). To demonstrate this, let us simulate
sime AR1 data
````julia
Random.seed!(6150533)
gamma = 0.8
N = 500
burnin = 1000
y = zeros(N + burnin + 1)
for t=2:(N+burnin+1)
y[t] = gamma*y[t-1] + randn()
end
y = Float32.(y[end-N+1:end])
````
Just like in the FNN case, we need a network structure and its constructor, a
prior on the network parameters, a likelihood with a prior on the additional
parameters introduced by the likelihood, and an initialiser
````julia
net = Chain(RNN(1, 1), Dense(1, 1)) # last layer is linear output layer
nc = destruct(net)
like = SeqToOneNormal(nc, Gamma(2.0, 0.5))
prior = GaussianPrior(nc, 0.5f0)
init = InitialiseAllSame(Normal(0.0f0, 0.5f0), like, prior)
````
We are given a single sequence (time series). To exploit batching and to not
always have to feed through the whole sequence, we will split the single
sequence into overlapping subsequences of length 5 and store these in a
tensor. Note that we add 1 to the subsequence length, because the last
observation of each subsequence will be our training observation to predict
using the fist five items in the subsequence.
````julia
x = make_rnn_tensor(reshape(y, :, 1), 5 + 1)
y = vec(x[end, :, :])
x = x[1:end-1, :, :]
````
We are now ready to create the BNN and find the MAP estimate. The MAP will be
used to check whether the overall network structure makes sense (does provide
at least good point estimates).
````julia
bnn = BNN(x, y, like, prior, init)
opt = FluxModeFinder(bnn, Flux.RMSProp())
θmap = find_mode(bnn, 10, 1000, opt)
````
When checking the performance we need to make sure to feed the sequences
through the network observation by observation:
````julia
nethat = nc(θmap)
yhat = vec([nethat(xx) for xx in eachslice(x; dims =1 )][end])
sqrt(mean(abs2, y .- yhat))
````
The rest works just like before with some minor adjustments to the helper
functions.
````julia
sampler = SGNHTS(1f-2, 1f0; xi = 1f0^2, μ = 10f0)
ch = mcmc(bnn, 10, 50_000, sampler)
ch = ch[:, end-20_000+1:end]
chain = Chains(ch')
function naive_prediction_recurrent(bnn, draws::Array{T, 2}; x = bnn.x, y = bnn.y) where {T}
yhats = Array{T, 2}(undef, length(y), size(draws, 2))
Threads.@threads for i=1:size(draws, 2)
net = bnn.like.nc(draws[:, i])
yh = vec([net(xx) for xx in eachslice(x; dims = 1)][end])
yhats[:,i] = yh
end
return yhats
end
````
----
````julia
yhats = naive_prediction_recurrent(bnn, ch)
chain_yhat = Chains(yhats')
maximum(summarystats(chain_yhat)[:, :rhat])
````
----
````julia
posterior_yhat = sample_posterior_predict(bnn, ch)
t_q = 0.05:0.05:0.95
o_q = get_observed_quantiles(y, posterior_yhat, t_q)
plot(t_q, o_q, label = "Posterior Predictive", legend=:topleft,
xlab = "Target Quantile", ylab = "Observed Quantile")
plot!(x->x, t_q, label = "Target")
````
# Customising BayesFlux
BayesFlux is coded in such a way that the user can easily extend many of the
funcitonalities. The following are the easiest to extend and we will cover
them below:
- Initialisers
- Layers
- Priors
- Likelihoods
## Customising Initialisers
Every Initialiser must be implemented as a callable type extending the
abstract type `BNNInitialiser`. As aready mentioned, it must be callable,
with the only (optional) argument being a random number generator. It must
return a tupe of vectors: `(θnet, θhyper, θlike)` where any of the latter two
vectors is allowed to be of length zero. *For more information read the
documentation of `BNNInitialiser`
**Example**: See the code for `InitialiseAllSame`
## Customising Layers
BayesFlux relies on the layers currently implemented in `Flux`. Thus, the first step
in implementing a new layer for BayesFlux is to implement a new layer for `Flux`.
Once that is done, one must also implement a destruct method. For example, for
the Dense layer this has the following form
````julia
function destruct(cell::Flux.Dense)
@unpack weight, bias, σ = cell
θ = vcat(vec(weight), vec(bias))
function re(θ::AbstractVector)
s = 1
pweight = length(weight)
new_weight = reshape(θ[s:s+pweight-1], size(weight))
s += pweight
pbias = length(bias)
new_bias = reshape(θ[s:s+pbias-1], size(bias))
return Flux.Dense(new_weight, new_bias, σ)
end
return θ, re
end
````
The destruct method takes as input a cell with the type of the cell being the
newly implemented layer. It must return a vector containing all network
parameter that should be trained/inferred and a function that given a vector
of the right length can restruct the layer. **Note: Flux also implements a
general destructure and restructure method. In my experience, this often
caused problems in AD and thus until this is more stable, BayesFlux will stick
with this manual setup**.
Care must be taken when cells are recurrent. The actual layer is then not an
`RNNCell`, but rather the full recurrent version: `Flux.Recur{RNNCell}`. Thus,
the destruct methos for `RNN` cells takes the following form:
````julia
function destruct(cell::Flux.Recur{R}) where {R<:Flux.RNNCell}
@unpack σ, Wi, Wh, b, state0 = cell.cell
# θ = vcat(vec(Wi), vec(Wh), vec(b), vec(state0))
θ = vcat(vec(Wi), vec(Wh), vec(b))
function re(θ::Vector{T}) where {T}
s = 1
pWi = length(Wi)
new_Wi = reshape(θ[s:s+pWi-1], size(Wi))
s += pWi
pWh = length(Wh)
new_Wh = reshape(θ[s:s+pWh-1], size(Wh))
s += pWh
pb = length(b)
new_b = reshape(θ[s:s+pb-1], size(b))
s += pb
# pstate0 = length(state0)
# new_state0 = reshape(θ[s:s+pstate0-1], size(state0))
new_state0 = zeros(T, size(state0))
return Flux.Recur(Flux.RNNCell(σ, new_Wi, new_Wh, new_b, new_state0))
end
return θ, re
end
````
As can be seen from the commented out lines, we are currently not inferring
the initial state. While this would be great and could theoretically be done
in a Bayesian setting, it also often seems to cause bad mixing and other
difficulties in the inferential process.
## Customising Priors
BayesFlux implements priors as subtypes of the abstract type `NetworkPrior`.
Generally what happens when one calles `loglikeprior` is that BayesFlux splits the
vector into `θnet, θhyper, θlike` and calls the prior with `θnet` and
`θhyper`. The number of hyper-parameters is given in the prior type. As such,
BayesFlux in theory allows for simple to highly complex multi-level priors. The
hope is that this provides enough flexibility to encourage researchers to try
out different priors. *For more documentation, please see the docs for
`NetworkPrior` and for an example of a mixture scale prior check out the code
for `MixtureScalePrior`*.
> :bangbang: Note that the prior defined here is only for the network. All
> additional priors for parameters needed by the likelihood are handled in the
> likelihood. This might at first sound odd, but nicely splits network
> specific things from likelihood specific things and thus should make BayesFlux
> more flexible.
## Customising Likelihoods
Likelihoods are implemented as types extending the abstract type
`BNNLikelihood` and thus can be extended by implementing a new subtype.
Traditionally likelihood truly only refer to the likelihood. We decided to go
a somewhat unconventional way and decided to design BayesFlux in a way that
likelihood types also include the prior for all parameters they introduce.
This was done so that the network specification with priors and all is
separate from the likelihood specification. As such, these two parts can be
freely changed without changing any of the other?
**Example**: Say we would like to implement a simple Gaussian likelihood for a
Feedforward structure (this is already implemented). Unless we predifine the
standard deviation of the
Gaussian, we will also estimate it, and thus we need a prior for it. While in
the traditional setting this would be covered in the prior type, here it is
covered in the likelihood type. Thus, the version as implemented in BayesFlux
takes upon construction also a prior distribution that shall be used for the
standard deviation. This prior does not have to have as its domain the real
line. It can also be constrained, such as a Gamma distribution, as long as the
code of the likelihood type makes sure to appropriately transform the
distribution to the real line. In the already implemented version this is done
using `Bijectors.jl`.
The documentation for `BNNLikelihood` gives details about what exactly need to
be implemented.
---
*This page was generated using [Literate.jl](https://github.com/fredrikekre/Literate.jl).*
| BayesFlux | https://github.com/enweg/BayesFlux.jl.git |
|
[
"MIT"
] | 0.2.3 | b248c6aefe448c9ad4efa106f31cd767d50c931d | docs | 2799 | # BayesFlux.jl a Bayesian extension to [Flux.jl](https://fluxml.ai)
````julia
using BayesFlux, Flux
using Random, Distributions
using StatsPlots
using LinearAlgebra
Random.seed!(6150533)
````
```@meta
CurrentModule = BayesFlux
DocTestSetup = quote
using BayesFlux
end
```
BayesFlux is meant to be an extension to Flux.jl, a machine learning library written
entirely in Julia. BayesFlux will and is not meant to be the fastest production
ready library, but rather is meant to make research and experimentation easy.
BayesFlux is part of my Master Thesis in Economic and Financial Research -
specialisation Econometrics at Maastricht University and will therefore likely
still go through some revisions in the coming months.
## Structure
Every Bayesian model can in general be broken down into the probabilistic
model, which gives the likelihood function and the prior on all parameters of
the probabilistic model. BayesFlux somewhat follows this and splits every Bayesian
Network into the following parts:
1. **Network**: Every BNN must have some general network structure. This is
defined using Flux and currently supports Dense, RNN, and LSTM layers. More
on this later
2. **Network Constructor**: Since BayesFlux works with vectors of parameters, we
need to be able to go from a vector to the network and back. This works by
using the NetworkConstructor.
3. **Likelihood**: The likelihood function. In traditional estimation of NNs,
this would correspond to the negative loss function. BayesFlux has a twist on
this though and nomenclature might change because of this twist: The
likelihood also contains all additional parameters and priors. For example,
for a Gaussian likelihood, the likelihood object also defines the standard
deviation and the prior for the standard deviation. This design choice was
made to keep the likelihood and everything belonging to it separate from
the network; Again, due to the potential confusion, the nomenclature might
change in later revisions.
4. **Prior on network parameters**: A prior on all network parameters.
Currently the RNN layers do not define priors on the initial state and thus
the initial state is also not sampled. Priors can have hyper-priors.
5. **Initialiser**: Unless some special initialisation values are given, BayesFlux
will draw initial values as defined by the initialiser. An initialiser
initialises all network and likelihood parameters to reasonable values.
All the above are then used to create a BNN which can then be estimated
using the MAP, can be sampled from using any of the MCMC methods implemented,
or can be estimated using Variational Inference.
The examples and the sections below hopefully clarify everything. If any
questions remain, please open an issue.
| BayesFlux | https://github.com/enweg/BayesFlux.jl.git |
|
[
"MIT"
] | 0.2.3 | b248c6aefe448c9ad4efa106f31cd767d50c931d | docs | 70 |
# MAP Estimation
```@docs
BNNModeFinder
find_mode
FluxModeFinder
``` | BayesFlux | https://github.com/enweg/BayesFlux.jl.git |
|
[
"MIT"
] | 0.2.3 | b248c6aefe448c9ad4efa106f31cd767d50c931d | docs | 311 | # MCMC Estimation
## Sampler
```@docs
MCMCState
mcmc
SGLD
SGNHTS
SGNHT
GGMC
HMC
AdaptiveMH
```
## Mass Adaptation
```@docs
MassAdapter
DiagCovMassAdapter
FullCovMassAdapter
FixedMassAdapter
RMSPropMassAdapter
```
## Stepsize Adaptation
```@docs
StepsizeAdapter
ConstantStepsize
DualAveragingStepSize
```
| BayesFlux | https://github.com/enweg/BayesFlux.jl.git |
|
[
"MIT"
] | 0.2.3 | b248c6aefe448c9ad4efa106f31cd767d50c931d | docs | 41 | # Variational Inference
```@docs
bbb
``` | BayesFlux | https://github.com/enweg/BayesFlux.jl.git |
|
[
"MIT"
] | 0.2.3 | b248c6aefe448c9ad4efa106f31cd767d50c931d | docs | 63 | # Initialisation
```@docs
BNNInitialiser
InitialiseAllSame
``` | BayesFlux | https://github.com/enweg/BayesFlux.jl.git |
|
[
"MIT"
] | 0.2.3 | b248c6aefe448c9ad4efa106f31cd767d50c931d | docs | 2004 | # Example: Feedforward NN Regression
Let's say we have the same setting as in [Example: Linear Regression](@ref) but
are not aware that it is a linear model and thus decide to use a Feedforward
Neural Network.
````julia
k = 5
n = 500
x = randn(Float32, k, n);
β = randn(Float32, k);
y = x'*β + randn(Float32, n);
````
While some might think this will change a lot, given that the
model we are estimating is a lot more complicated than a linear regression
model, BayesFlux abstracts away all of this and all that changes is the network
definition.
````julia
net = Chain(Dense(k, k, relu), Dense(k, k, relu), Dense(k, 1))
````
We can then still use the same prior, likelihood, and initialiser. But we do
need to change the NetworkConstructor, which we still obtain in the same way by
calling `destruct`
````julia
nc = destruct(net)
like = FeedforwardNormal(nc, Gamma(2.0, 0.5))
prior = GaussianPrior(nc, 0.5f0)
init = InitialiseAllSame(Normal(0.0f0, 0.5f0), like, prior)
bnn = BNN(x, y, like, prior, init)
````
The rest is the same as for the linear regression case. We can, for example,
first find the MAP:
````julia
opt = FluxModeFinder(bnn, Flux.ADAM()) # We will use ADAM
θmap = find_mode(bnn, 10, 500, opt) # batchsize 10 with 500 epochs
````
----
````julia
nethat = nc(θmap)
yhat = vec(nethat(x))
sqrt(mean(abs2, y .- yhat))
````
Or we can use any of the MCMC or VI method - SGNHTS is just one option:
````julia
sampler = SGNHTS(1f-2, 1f0; xi = 1f0^2, μ = 10f0)
ch = mcmc(bnn, 10, 50_000, sampler)
ch = ch[:, end-20_000+1:end]
chain = Chains(ch')
````
----
````julia
yhats = naive_prediction(bnn, ch)
chain_yhat = Chains(yhats')
maximum(summarystats(chain_yhat)[:, :rhat])
````
----
````julia
posterior_yhat = sample_posterior_predict(bnn, ch)
t_q = 0.05:0.05:0.95
o_q = get_observed_quantiles(y, posterior_yhat, t_q)
plot(t_q, o_q, label = "Posterior Predictive", legend=:topleft,
xlab = "Target Quantile", ylab = "Observed Quantile")
plot!(x->x, t_q, label = "Target")
```` | BayesFlux | https://github.com/enweg/BayesFlux.jl.git |
|
[
"MIT"
] | 0.2.3 | b248c6aefe448c9ad4efa106f31cd767d50c931d | docs | 12960 | # Example: Linear Regression
Although not meant for Simple Linear Regression, BayesFlux can be used for it, and
we will do so in this section. This will hopefully demonstrate the basics.
Later sections will show better examples.
Let's say we have the idea that the data can be modelled via a linear model of
the form
$$y_i = x_i'\beta + e_i$$
with $e_i \sim N(0, 1)$
````julia
k = 5
n = 500
x = randn(Float32, k, n);
β = randn(Float32, k);
y = x'*β + randn(Float32, n);
````
This is a standard linear model and we would likely be better off using STAN
or Turing for this, but due to the availability of a Dense layer with linear
activation function, we can also implement it in BayesFlux.
The first step is to define the network. As mentioned above, the network
consists of a single Dense layer with a linear activation function (the
default activation in Flux and hence not explicitly shown).
````julia
net = Chain(Dense(k, 1)) # k inputs and one output
````
Since BayesFlux works with vectors, we need to be able to transform a vector to
the above network and back. We thus need a NetworkConstructor, which we obtain
as a the return value of a `destruct`
````julia
nc = destruct(net)
````
We can check whether everything works by just creating a random vector of the
right dimension and calling the NetworkConstructor using this vector.
````julia
θ = randn(Float32, nc.num_params_network)
nc(θ)
````
We indeed obtain a network of the right size and structure.
Next, we will define a prior for all parameters of the network. Since weight
decay is a popular regularisation method in standard ML estimation, we will be
using a Gaussian prior, which is the Bayesian weight decay:
````julia
prior = GaussianPrior(nc, 0.5f0) # the last value is the standard deviation
````
We also need a likelihood and a prior on all parameters the likelihood
introduces to the model. We will go for a Gaussian likelihood, which
introduces the standard deviation of the model. BayesFlux currently implements
Gaussian and Student-t likelihoods for Feedforward and Seq-to-one cases but
more can easily be implemented.
````julia
like = FeedforwardNormal(nc, Gamma(2.0, 0.5)) # Second argument is prior for standard deviation.
````
Lastly, when no explicit initial value is given, BayesFlux will draw it from an
initialiser. Currently only one type of initialiser is implemented,
but this can easily be extended by the user itself.
````julia
init = InitialiseAllSame(Normal(0.0f0, 0.5f0), like, prior) # First argument is dist we draw parameters from.
````
Given all the above, we can now define the BNN:
````julia
bnn = BNN(x, y, like, prior, init)
````
### MAP estimate.
It is always a good idea to first find the MAP estimate. This can serve two
purposes:
1. It is faster than fully estimating the model using MCMC or VI and can thus
serve as a quick check; If the MAP estimate results in bad point
predictions, so will likely the full estimation results.
2. It can serve as a starting value for the MCMC samplers.
To find a MAP estimate, we must first specify how we want to find it: We need
to define an optimiser. BayesFlux currently only implements optimisers derived
from Flux itself, but this can be extended by the user.
````julia
opt = FluxModeFinder(bnn, Flux.ADAM()) # We will use ADAM
θmap = find_mode(bnn, 10, 500, opt) # batchsize 10 with 500 epochs
````
We can already use the MAP estimate to make some predictions and calculate the
RMSE.
````julia
nethat = nc(θmap)
yhat = vec(nethat(x))
sqrt(mean(abs2, y .- yhat))
````
### MCMC - SGLD
If the MAP estimate does not show any problems, it can be used as the starting
point for SGLD or any of the other MCMC methods (see later section).
Simulations have shown that using a relatively large initial stepsize with a
slow decaying stepsize schedule often results in the best mixing. *Note: We
would usually use samplers such as NUTS for linear regressions, which are much
more efficient than SGLD*
````julia
sampler = SGLD(Float32; stepsize_a = 10f-0, stepsize_b = 0.0f0, stepsize_γ = 0.55f0)
ch = mcmc(bnn, 10, 50_000, sampler)
ch = ch[:, end-20_000+1:end]
````
We can obtain summary statistics and trace and density plots of network
parameters and likelihood parameters by transforming the BayesFlux chain into a
MCMCChain.
````julia
using MCMCChains
chain = Chains(ch')
plot(chain)
````
In more complicated networks, it is usually a hopeless goal to obtain good
mixing in parameter space and thus we rather focus on the output space of the
network. *Mixing in parameter space is hopeless due to the very complicated
topology of the posterior; Simulations have also found that perfect mixing in the output space is not always needed to obtain good point and interval predictions.*
We will use a little helper function to get the output values of the network:
````julia
function naive_prediction(bnn, draws::Array{T, 2}; x = bnn.x, y = bnn.y) where {T}
yhats = Array{T, 2}(undef, length(y), size(draws, 2))
Threads.@threads for i=1:size(draws, 2)
net = bnn.like.nc(draws[:, i])
yh = vec(net(x))
yhats[:,i] = yh
end
return yhats
end
yhats = naive_prediction(bnn, ch)
chain_yhat = Chains(yhats')
maximum(summarystats(chain_yhat)[:, :rhat])
````
Similarly, we can obtain posterior predictive values and evaluate quantiles
obtained using these to how many percent of the actual data fall below the
quantiles. What we would like is that 5% of the data fall below the 5%
quantile of the posterior predictive draws.
````julia
function get_observed_quantiles(y, posterior_yhat, target_q = 0.05:0.05:0.95)
qs = [quantile(yr, target_q) for yr in eachrow(posterior_yhat)]
qs = reduce(hcat, qs)
observed_q = mean(reshape(y, 1, :) .< qs; dims = 2)
return observed_q
end
posterior_yhat = sample_posterior_predict(bnn, ch)
t_q = 0.05:0.05:0.95
o_q = get_observed_quantiles(y, posterior_yhat, t_q)
plot(t_q, o_q, label = "Posterior Predictive", legend=:topleft,
xlab = "Target Quantile", ylab = "Observed Quantile")
plot!(x->x, t_q, label = "Target")
````
### MCMC - SGNHTS
Just like SGLD, SGNHTS also does not apply a Metropolis-Hastings correction
step. Contrary to SGLD though, SGNHTS implementes a Thermostat, whose task it
is to keep the temperature in the dynamic system close to one, and thus the
sampling more accurate. Although the thermostats goal is often not achieved,
samples obtained using SGNHTS often outperform those obtained using SGLD.
````julia
sampler = SGNHTS(1f-2, 2f0; xi = 2f0^2, μ = 50f0)
ch = mcmc(bnn, 10, 50_000, sampler)
ch = ch[:, end-20_000+1:end]
chain = Chains(ch')
````
----
````julia
yhats = naive_prediction(bnn, ch)
chain_yhat = Chains(yhats')
maximum(summarystats(chain_yhat)[:, :rhat])
````
----
````julia
posterior_yhat = sample_posterior_predict(bnn, ch)
t_q = 0.05:0.05:0.95
o_q = get_observed_quantiles(y, posterior_yhat, t_q)
plot(t_q, o_q, label = "Posterior Predictive", legend=:topleft,
xlab = "Target Quantile", ylab = "Observed Quantile")
plot!(x->x, t_q, label = "Target")
````
### MCMC - GGMC
As pointed out above, neither SGLD nor SGNHTS apply a Metropolis-Hastings
acceptance step and are thus difficult to monitor. Indeed, draws from SGLD or
SGNHTS should perhaps rather be considered as giving an ensemble of models
rather than draws from the posterior, since without any MH step, it is unclear
whether the chain actually will converge to the posterior.
BayesFlux also implements three methods that do apply a MH step and are thus
easier to monitor. These are GGMC, AdaptiveMH, and HMC. Both GGMC and HMC do
allow for taking stochastic gradients. GGMC also allows to use delayed
acceptance in which the MH step is only applied after a couple of steps,
rather than after each step (see [`GGMC`](@ref) for details).
Because both GGMC and HMC use a MH step, they provide a measure of the mean
acceptance rate, which can be used to tune the stepsize using Dual Averaging
(see [`DualAveragingStepSize`](@ref) details). Similarly, both also make use of mass matrices, which can also be tuned (see [`MassAdapter`](@ref)).
BayesFlux implements both stepsize adapters and mass adapters but to this point
does not implement a smart way of combining them (this will come in the
future). In my experience, naively combining them often only helps in more
complex models and thus we will only use a stepsize adapter here.
````julia
sadapter = DualAveragingStepSize(1f-9; target_accept = 0.55f0, adapt_steps = 10000)
sampler = GGMC(Float32; β = 0.1f0, l = 1f-9, sadapter = sadapter)
ch = mcmc(bnn, 10, 50_000, sampler)
ch = ch[:, end-20_000+1:end]
chain = Chains(ch')
````
----
````julia
yhats = naive_prediction(bnn, ch)
chain_yhat = Chains(yhats')
maximum(summarystats(chain_yhat)[:, :rhat])
````
----
````julia
posterior_yhat = sample_posterior_predict(bnn, ch)
t_q = 0.05:0.05:0.95
o_q = get_observed_quantiles(y, posterior_yhat, t_q)
plot(t_q, o_q, label = "Posterior Predictive", legend=:topleft,
xlab = "Target Quantile", ylab = "Observed Quantile")
plot!(x->x, t_q, label = "Target")
````
The above uses a MH correction after each step. This can be costly in big-data
environments or when the evaluation of the likelihood is costly. If either of
the above applies, delayed acceptance can speed up the process.
````julia
sadapter = DualAveragingStepSize(1f-9; target_accept = 0.25f0, adapt_steps = 10000)
sampler = GGMC(Float32; β = 0.1f0, l = 1f-9, sadapter = sadapter, steps = 3)
ch = mcmc(bnn, 10, 50_000, sampler)
ch = ch[:, end-20_000+1:end]
chain = Chains(ch')
````
----
````julia
yhats = naive_prediction(bnn, ch)
chain_yhat = Chains(yhats')
maximum(summarystats(chain_yhat)[:, :rhat])
````
----
````julia
posterior_yhat = sample_posterior_predict(bnn, ch)
t_q = 0.05:0.05:0.95
o_q = get_observed_quantiles(y, posterior_yhat, t_q)
plot(t_q, o_q, label = "Posterior Predictive", legend=:topleft,
xlab = "Target Quantile", ylab = "Observed Quantile")
plot!(x->x, t_q, label = "Target")
````
### MCMC - HMC
Since HMC showed some mixing problems for some variables during the testing of
this README, we decided to use a mass matrix adaptation. This turned out to
work better even in this simple case. Note that the use of stochastic gradients introduces problems into HMC and thus is not fully theoretically justified.
````julia
sadapter = DualAveragingStepSize(1f-9; target_accept = 0.55f0, adapt_steps = 10000)
madapter = DiagCovMassAdapter(5000, 1000)
sampler = HMC(1f-9, 5; sadapter = sadapter)
ch = mcmc(bnn, 10, 50_000, sampler)
ch = ch[:, end-20_000+1:end]
chain = Chains(ch')
````
----
````julia
yhats = naive_prediction(bnn, ch)
chain_yhat = Chains(yhats')
maximum(summarystats(chain_yhat)[:, :rhat])
````
----
````julia
posterior_yhat = sample_posterior_predict(bnn, ch)
t_q = 0.05:0.05:0.95
o_q = get_observed_quantiles(y, posterior_yhat, t_q)
plot(t_q, o_q, label = "Posterior Predictive", legend=:topleft,
xlab = "Target Quantile", ylab = "Observed Quantile")
plot!(x->x, t_q, label = "Target")
````
### MCMC - Adaptive Metropolis-Hastings
As a derivative free alternative, BayesFlux also implements Adaptive MH as
introduced (see [`AdaptiveMH`](@ref)). This is currently quite a costly method
for complex models since it needs to evaluate the MH ratio at each step. Plans
exist to parallelise the calculation of the likelihood which should speed up
Adaptive MH.
````julia
sampler = AdaptiveMH(diagm(ones(Float32, bnn.num_total_params)), 1000, 0.5f0, 1f-4)
ch = mcmc(bnn, 10, 50_000, sampler)
ch = ch[:, end-20_000+1:end]
chain = Chains(ch')
````
----
````julia
yhats = naive_prediction(bnn, ch)
chain_yhat = Chains(yhats')
maximum(summarystats(chain_yhat)[:, :rhat])
````
----
````julia
posterior_yhat = sample_posterior_predict(bnn, ch)
t_q = 0.05:0.05:0.95
o_q = get_observed_quantiles(y, posterior_yhat, t_q)
plot(t_q, o_q, label = "Posterior Predictive", legend=:topleft,
xlab = "Target Quantile", ylab = "Observed Quantile")
plot!(x->x, t_q, label = "Target")
````
### Variation Inference
In some cases MCMC method either do not work well or even the methods above take
too long. For these cases BayesFlux currently implements Bayes-By-Backprop (see
[`bbb`](@ref)); One shortcoming of the current implementation is that the
variational family is constrained to a diagonal multivariate gaussian and thus
any correlations between network parameters are set to zero. This can cause
problems in some situations and plans exist to allow for more felxible
covariance specifications.
````julia
q, params, losses = bbb(bnn, 10, 2_000; mc_samples = 1, opt = Flux.ADAM(), n_samples_convergence = 10)
ch = rand(q, 20_000)
posterior_yhat = sample_posterior_predict(bnn, ch)
t_q = 0.05:0.05:0.95
o_q = get_observed_quantiles(y, posterior_yhat, t_q)
plot(t_q, o_q, label = "Posterior Predictive", legend=:topleft,
xlab = "Target Quantile", ylab = "Observed Quantile")
plot!(x->x, t_q, label = "Target")
```` | BayesFlux | https://github.com/enweg/BayesFlux.jl.git |
|
[
"MIT"
] | 0.2.3 | b248c6aefe448c9ad4efa106f31cd767d50c931d | docs | 3656 | # Example: Recurrent Neural Networks
Next to Dense layers, BayesFlux also implements RNN and LSTM layers. These two do
require some additional care though, since the layout of the data must be
adjusted. In general, the last dimension of `x` and `y` is always the dimension
along which BayesFlux batches, which is also what Flux does. Thus, if we are in a
seq-to-one setting then the sequences must be along the last dimension (here the
third). To demonstrate this, let us simulate some AR1 data
!!! note "Note"
BayesFlux currently only implements univariate regression problems (a single
dependent variable) and for recurrent structures only seq-to-one type of
settings. This can be extended by the user. For this see
[`BNNLikelihood`](@ref)
````julia
Random.seed!(6150533)
gamma = 0.8
N = 500
burnin = 1000
y = zeros(N + burnin + 1)
for t=2:(N+burnin+1)
y[t] = gamma*y[t-1] + randn()
end
y = Float32.(y[end-N+1:end])
````
Just like in the FNN case, we need a network structure and its constructor, a
prior on the network parameters, a likelihood with a prior on the additional
parameters introduced by the likelihood, and an initialiser. Note how most
things are the same as for the FNN case, with the differences being the actual
network defined and the likelihood.
````julia
net = Chain(RNN(1, 1), Dense(1, 1)) # last layer is linear output layer
nc = destruct(net)
like = SeqToOneNormal(nc, Gamma(2.0, 0.5))
prior = GaussianPrior(nc, 0.5f0)
init = InitialiseAllSame(Normal(0.0f0, 0.5f0), like, prior)
````
We are given a single sequence (time series). To exploit batching and to not
always have to feed through the whole sequence, we will split the single
sequence into overlapping subsequences of length 5 and store these in a
tensor. Note that we add 1 to the subsequence length, because the last
observation of each subsequence will be our training observation to predict
using the fist five items in the subsequence.
````julia
x = make_rnn_tensor(reshape(y, :, 1), 5 + 1)
y = vec(x[end, :, :])
x = x[1:end-1, :, :]
````
We are now ready to create the BNN and find the MAP estimate. The MAP will be
used to check whether the overall network structure makes sense (does provide
at least good point estimates).
````julia
bnn = BNN(x, y, like, prior, init)
opt = FluxModeFinder(bnn, Flux.RMSProp())
θmap = find_mode(bnn, 10, 1000, opt)
````
When checking the performance we need to make sure to feed the sequences
through the network observation by observation:
````julia
nethat = nc(θmap)
yhat = vec([nethat(xx) for xx in eachslice(x; dims =1 )][end])
sqrt(mean(abs2, y .- yhat))
````
The rest works just like before with some minor adjustments to the helper
functions.
````julia
sampler = SGNHTS(1f-2, 1f0; xi = 1f0^2, μ = 10f0)
ch = mcmc(bnn, 10, 50_000, sampler)
ch = ch[:, end-20_000+1:end]
chain = Chains(ch')
function naive_prediction_recurrent(bnn, draws::Array{T, 2}; x = bnn.x, y = bnn.y) where {T}
yhats = Array{T, 2}(undef, length(y), size(draws, 2))
Threads.@threads for i=1:size(draws, 2)
net = bnn.like.nc(draws[:, i])
yh = vec([net(xx) for xx in eachslice(x; dims = 1)][end])
yhats[:,i] = yh
end
return yhats
end
````
----
````julia
yhats = naive_prediction_recurrent(bnn, ch)
chain_yhat = Chains(yhats')
maximum(summarystats(chain_yhat)[:, :rhat])
````
----
````julia
posterior_yhat = sample_posterior_predict(bnn, ch)
t_q = 0.05:0.05:0.95
o_q = get_observed_quantiles(y, posterior_yhat, t_q)
plot(t_q, o_q, label = "Posterior Predictive", legend=:topleft,
xlab = "Target Quantile", ylab = "Observed Quantile")
plot!(x->x, t_q, label = "Target")
````
| BayesFlux | https://github.com/enweg/BayesFlux.jl.git |
|
[
"MIT"
] | 0.2.3 | b248c6aefe448c9ad4efa106f31cd767d50c931d | docs | 74 | # Feedforward Likelihoods
```@docs
FeedforwardNormal
FeedforwardTDist
``` | BayesFlux | https://github.com/enweg/BayesFlux.jl.git |
|
[
"MIT"
] | 0.2.3 | b248c6aefe448c9ad4efa106f31cd767d50c931d | docs | 39 | # Interface
```@docs
BNNLikelihood
``` | BayesFlux | https://github.com/enweg/BayesFlux.jl.git |
|
[
"MIT"
] | 0.2.3 | b248c6aefe448c9ad4efa106f31cd767d50c931d | docs | 67 | # Seq-to-One Likelihoods
```@docs
SeqToOneNormal
SeqToOneTDist
``` | BayesFlux | https://github.com/enweg/BayesFlux.jl.git |
|
[
"MIT"
] | 0.2.3 | b248c6aefe448c9ad4efa106f31cd767d50c931d | docs | 84 | # Model Basics
```@docs
BNN
loglikeprior
∇loglikeprior
NetConstructor
destruct
``` | BayesFlux | https://github.com/enweg/BayesFlux.jl.git |
|
[
"MIT"
] | 0.2.3 | b248c6aefe448c9ad4efa106f31cd767d50c931d | docs | 118 | # Prior and Posterior Predictive
```@docs
sample_prior_predictive
get_posterior_networks
sample_posterior_predict
``` | BayesFlux | https://github.com/enweg/BayesFlux.jl.git |
|
[
"MIT"
] | 0.2.3 | b248c6aefe448c9ad4efa106f31cd767d50c931d | docs | 44 | # Gaussian Prior
```@docs
GaussianPrior
``` | BayesFlux | https://github.com/enweg/BayesFlux.jl.git |
|
[
"MIT"
] | 0.2.3 | b248c6aefe448c9ad4efa106f31cd767d50c931d | docs | 38 | # Interface
```@docs
NetworkPrior
``` | BayesFlux | https://github.com/enweg/BayesFlux.jl.git |
|
[
"MIT"
] | 0.2.3 | b248c6aefe448c9ad4efa106f31cd767d50c931d | docs | 53 | # Mixture Scale Prior
```@docs
MixtureScalePrior
``` | BayesFlux | https://github.com/enweg/BayesFlux.jl.git |
|
[
"MIT"
] | 0.2.3 | b248c6aefe448c9ad4efa106f31cd767d50c931d | docs | 55 | # Recurrent Architectures
```@docs
make_rnn_tensor
``` | BayesFlux | https://github.com/enweg/BayesFlux.jl.git |
|
[
"MIT"
] | 1.0.1 | 01fa2a3303a93ad9fd037582d99c8f8429e9ec62 | code | 22096 | module OpenLocationCode
export is_valid, is_short, is_full, encode, decode, recover_nearest, shorten
export CodeArea, latitude_low, longitude_low, latitude_high, longitude_high
export latitude_center, longitude_center, latitude_precision, longitude_precision
export latlong
"A separator used to break the code into two parts to aid memorability."
const SEPARATOR = '+'
"The number of characters to place before the separator."
const SEPARATOR_POSITION = 8
# The character used to pad codes.
const PADDING_CHARACTER = '0'
# The character set used to encode the values.
const CODE_DIGITS = [UInt8(c) for c in "23456789CFGHJMPQRVWX"]
# The base to use to convert numbers to/from.
const ENCODING_BASE = length(CODE_DIGITS) # 20
# The maximum value for latitude in degrees.
const LATITUDE_MAX = 90
# The maximum value for longitude in degrees.
const LONGITUDE_MAX = 180
# The max number of digits to process in a plus code.
const MAX_DIGIT_COUNT = 15
"""
Maximum code length using lat/lng pair encoding. The area of such a
code is approximately 13x13 meters (at the equator), and should be suitable
for identifying buildings. This excludes prefix and separator characters.
"""
const PAIR_CODE_LENGTH = 10
# First place value of the pairs (if the last pair value is 1).
const PAIR_FIRST_PLACE_VALUE = ENCODING_BASE^(PAIR_CODE_LENGTH ÷ 2 - 1)
# Inverse of the precision of the pair section of the code.
const PAIR_PRECISION = ENCODING_BASE^3
# The resolution values in degrees for each position in the lat/lng pair
# encoding. These give the place value of each position, and therefore the
# dimensions of the resulting area.
const PAIR_RESOLUTIONS = [20.0, 1.0, 0.05, 0.0025, 0.000125]
# Number of digits in the grid precision part of the code.
const GRID_CODE_LENGTH = MAX_DIGIT_COUNT - PAIR_CODE_LENGTH
# Number of columns in the grid refinement method.
const GRID_COLUMNS = 4
# Number of rows in the grid refinement method.
const GRID_ROWS = 5
# First place value of the latitude grid (if the last place is 1).
const GRID_LAT_FIRST_PLACE_VALUE = GRID_ROWS^(GRID_CODE_LENGTH - 1)
# First place value of the longitude grid (if the last place is 1).
const GRID_LNG_FIRST_PLACE_VALUE = GRID_COLUMNS^(GRID_CODE_LENGTH - 1)
# inverse of best precision of grid part
const GRID_LATPRECISION = GRID_LAT_FIRST_PLACE_VALUE * GRID_ROWS
const GRID_LNGPRECISION = GRID_LNG_FIRST_PLACE_VALUE * GRID_COLUMNS
# Multiply latitude by this much to make it a multiple of the finest
# precision.
const FINAL_LAT_PRECISION = PAIR_PRECISION * GRID_LATPRECISION
# Multiply longitude by this much to make it a multiple of the finest
# precision.
const FINAL_LNG_PRECISION = PAIR_PRECISION * GRID_LNGPRECISION
# Minimum length of a code that can be shortened.
const MIN_TRIMMABLE_CODE_LEN = 6
const GRID_SIZE_DEGREES = 0.000125
"""
CodeArea(latLo, longLo, codelength)
Coordinates of a decoded Open Location Code.
The coordinates include the latitude and longitude of the lower left
and the code length, which defines the extension of the area.
Attributes:
latitude_low: The latitude of the SW corner in degrees.
longitude_low: The longitude of the SW corner in degrees.
latitude_high: The latitude of the NE corner in degrees.
longitude_high: The longitude of the NE corner in degrees.
latitude_center: The latitude of the center in degrees.
longitude_center: The longitude of the center in degrees.
code_length: The number of significant characters that were in the code.
"""
struct CodeArea{T<:Real}
code::String
latlo::T
longlo::T
codelength::Int
end
function CodeArea(latlo, longlo, codelength=PAIR_CODE_LENGTH)
latlo, longlo = promote(latlo, longlo)
CodeArea{typeof(latlo)}(encode(latlo, longlo, codelength), latlo, longlo, codelength)
end
function CodeArea(code::AbstractString)
lat, lon, len = _decode(code)
CodeArea{typeof(lat)}(normcode(code), lat, lon, len)
end
latitude_low(ca::CodeArea) = ca.latlo
longitude_low(ca::CodeArea) = ca.longlo
latitude_high(ca::CodeArea) = min_lat(ca, latitude_precision(ca))
longitude_high(ca::CodeArea) = min_lon(ca, longitude_precision(ca))
latitude_center(ca::CodeArea) = min_lat(ca, latitude_precision(ca) / 2)
longitude_center(ca::CodeArea) = min_lon(ca, longitude_precision(ca) / 2)
latitude_precision(ca::CodeArea) = latitude_precision(ca.codelength)
longitude_precision(ca::CodeArea) = longitude_precision(ca.codelength)
plus_code(ca::CodeArea) = ca.code
latlong(ca::CodeArea) = latitude_center(ca), longitude_center(ca)
normcode(code) = length(code) > MAX_DIGIT_COUNT + 1 ? code[1:MAX_DIGIT_COUNT+1] : code
min_lat(ca, delta) = min(latitude_low(ca) + delta, LATITUDE_MAX )
min_lon(ca, delta) = min(longitude_low(ca) + delta, LONGITUDE_MAX)
import Base: ==
function ==(ca::CodeArea, cb::CodeArea)
ca.code == cb.code &&
ca.codelength == cb.codelength &&
ca.latlo == cb.latlo &&
ca.longlo == cb.longlo
end
function Base.isapprox(ca::CodeArea, cb::CodeArea)
ca.code == cb.code &&
ca.codelength == cb.codelength &&
ca.latlo ≈ cb.latlo &&
ca.longlo ≈ cb.longlo
end
function Base.show(io::IO, ca::CodeArea)
print(io, "CodeArea(\"", plus_code(ca),"\", ")
print(io, latitude_low(ca), "+", latitude_precision(ca), ", ")
print(io, longitude_low(ca), "+", longitude_precision(ca), ", ")
print(io, ca.codelength, ")")
end
"""
is_valid(code::AbstractString)
Determine if a code is valid.
To be valid, all characters must be from the Open Location Code character
set with at most one separator. The separator can be in any even-numbered
position up to the eighth digit.
"""
function is_valid(code::AbstractString)
# The separator is required exactly once.
sep = findfirst(SEPARATOR, code)
sep !== nothing && findlast(SEPARATOR, code) == sep || return false
# Not more than 8 characters before separator.
sep > SEPARATOR_POSITION + 1 && return false
# Is it the only character?
length(code) <= 1 && return false
# Is it in an illegal position?
sep % 2 == 0 && return false
# We can have an even number of padding characters before the separator,
# but then it must be the final character.
pad = findfirst(PADDING_CHARACTER, code)
if pad !== nothing
# Short codes cannot have padding
sep < SEPARATOR_POSITION && return false
# Not allowed to start with them or start at even poaition
pad == 1 || iseven(pad) && return false
# There can only be one group and it must have even length.
pad2 = findlast(PADDING_CHARACTER, code)
(pad2 - pad) % 2 == 0 && return false
all(isequal(PADDING_CHARACTER), view(code, pad:pad2)) || return false
# If the code is long enough to end with a separator, make sure it does.
return code[end] == SEPARATOR
end
# If there are characters after the separator, make sure there isn't just
# one of them (not legal).
length(code) - sep == 1 && return false
# Check the code contains only valid characters.
return all(ch -> UInt8(uppercase(ch)) in CODE_DIGITS || ch == SEPARATOR || ch == PADDING_CHARACTER, code)
end
"""
is_short(code)
Determine if a code is a valid short code.
A short Open Location Code is a sequence created by removing four or more
digits from an Open Location Code. It must include a separator
character.
"""
function is_short(code::AbstractString)
# Check it's valid.
is_valid(code) || return false
# If there are less characters than expected before the SEPARATOR.
sep = findfirst(SEPARATOR, code)
return sep <= SEPARATOR_POSITION
end
"""
is_full(code)
Determine if a code is a valid full Open Location Code.
Not all possible combinations of Open Location Code characters decode to
valid latitude and longitude values. This checks that a code is valid
and also that the latitude and longitude values are legal. If the prefix
character is present, it must be the first character. If the separator
character is present, it must be after four characters.
"""
function is_full(code::AbstractString)
is_valid(code) || return false
# If it's short, it's not full
is_short(code) && return false
# Work out what the first latitude character indicates for latitude.
clat = UInt8(uppercase(code[1]))
firstLatValue = (findfirst(==(clat), CODE_DIGITS) - 1) * ENCODING_BASE
if firstLatValue >= LATITUDE_MAX * 2
# The code would decode to a latitude of >= 90 degrees.
return false
end
# Work out what the first longitude character indicates for longitude.
clng = UInt8(uppercase(code[2]))
firstLngValue = (findfirst(==(clng), CODE_DIGITS) - 1) * ENCODING_BASE
if firstLngValue >= LONGITUDE_MAX * 2
# The code would decode to a longitude of >= 180 degrees.
return false
end
return true
end
"""
encode(latitude, longitude[, codelength])
Encode a location into an Open Location Code.
Produces a code of the specified length, or the default length if no length
is provided.
The length determines the accuracy of the code. The default length is
10 characters, returning a code of approximately 13.9x13.9 meters. Longer
codes represent smaller areas, but lengths > 15 are sub-centimetre and so
11 or 12 are probably the limit of useful codes.
# Arguments
- `latitude`: A latitude in signed degrees. Will be clipped to the range -90 to 90.
- `longitude`: A longitude in signed degrees. Will be normalised to
the range -180 to 180.
- `codelength`: The number of significant digits in the output code, not
including any separator characters.
# Examples:
```julia
julia> encode(50.173168, 8.338086, 11)
"9F2C58FQ+768"
```
"""
function encode(latitude::Real, longitude::Real, codelength=PAIR_CODE_LENGTH)
encode(float.(promote(latitude, longitude))..., codelength)
end
function encode(latitude::T, longitude::T, codelength::Int=PAIR_CODE_LENGTH) where {T<:AbstractFloat}
if codelength < 2 || (codelength < PAIR_CODE_LENGTH && codelength % 2 == 1)
throw(ArgumentError("Invalid Open Location Code length - $codelength"))
end
codelength = min(codelength, MAX_DIGIT_COUNT)
# Ensure that latitude and longitude are valid.
latitude = clipLatitude(latitude)
longitude = normalizeLongitude(longitude)
# Latitude 90 needs to be adjusted to be just less, so the returned code
# can also be decoded.
if latitude == LATITUDE_MAX
latitude = latitude - latitude_precision(codelength)
end
bcode = UInt8[]
# Compute the code.
# This approach converts each value to an integer after multiplying it by
# the final precision. This allows us to use only integer operations, so
# avoiding any accumulation of floating point representation errors.
# Multiply values by their precision and convert to positive.
# Force to integers so the division operations will have integer results.
latVal = valpairgrid(latitude, FINAL_LAT_PRECISION, LATITUDE_MAX)
lngVal = valpairgrid(longitude, FINAL_LNG_PRECISION, LONGITUDE_MAX)
# Compute the grid part of the code if necessary.
if codelength > PAIR_CODE_LENGTH
for _ in 1:GRID_CODE_LENGTH
latDigit = latVal % GRID_ROWS
lngDigit = lngVal % GRID_COLUMNS
ndx = latDigit * GRID_COLUMNS + lngDigit
push!(bcode, CODE_DIGITS[ndx+1])
latVal ÷= GRID_ROWS
lngVal ÷= GRID_COLUMNS
end
else
latVal ÷= ^(GRID_ROWS, GRID_CODE_LENGTH)
lngVal ÷= ^(GRID_COLUMNS, GRID_CODE_LENGTH)
end
# Compute the pair section of the code.
for _ in 1:PAIR_CODE_LENGTH÷2
push!(bcode, CODE_DIGITS[lngVal%ENCODING_BASE+1])
push!(bcode, CODE_DIGITS[latVal%ENCODING_BASE+1])
latVal ÷= ENCODING_BASE
lngVal ÷= ENCODING_BASE
end
reverse!(bcode)
# If we don't need to pad the code, return the requested section.
if codelength >= SEPARATOR_POSITION
resize!(bcode, codelength)
insert!(bcode, SEPARATOR_POSITION + 1, SEPARATOR)
return String(bcode)
end
# Pad and return the code.
resize!(bcode, codelength)
for i = codelength+1:SEPARATOR_POSITION
push!(bcode, PADDING_CHARACTER)
end
push!(bcode, SEPARATOR)
return String(bcode)
end
"""
valpairgrid(lat, precision, max)
Return integer value in multiples of `1/precision`
"""
function valpairgrid(latlo::AbstractFloat, prec::Integer, max::Integer)
unsafe_trunc(Int64, floor(nextfloat(latlo) * prec)) + Int64(prec) * max
end
"""
decode(code)
Decode an Open Location Code into the location coordinates.
# Arguments:
- `code`: The Open Location Code to decode.
# Returns:
A `CodeArea` object that provides the latitude and longitude of two of the
corners of the area, the center, and the length of the original code.
"""
decode(code::AbstractString) = CodeArea(normcode(code), _decode(code)...)
function _decode(code::AbstractString)
if !is_full(code)
throw(ArgumentError("Passed Open Location Code is not a valid full code - $code"))
end
# Strip out separator character (we've already established the code is
# valid so the maximum is one), and padding characters. Convert to upper
# case and constrain to the maximum number of digits.
bcode = UInt8[]
for c in code
c == PADDING_CHARACTER && break
c == SEPARATOR && continue
push!(bcode, uppercase(c))
end
if length(bcode) > MAX_DIGIT_COUNT
resize!(bcode, MAX_DIGIT_COUNT)
end
codelength = length(bcode)
# Initialise the values for each section. We work them out as integers and
# convert them to floats at the end.
normalLat = -LATITUDE_MAX * PAIR_PRECISION
normalLng = -LONGITUDE_MAX * PAIR_PRECISION
gridLat = 0
gridLng = 0
# How many digits do we have to process?
digits = min(codelength, PAIR_CODE_LENGTH)
# Define the place value for the most significant pair.
pv = PAIR_FIRST_PLACE_VALUE
# Decode the paired digits.
for i in 1:2:digits
normalLat += (findfirst(==(bcode[i]), CODE_DIGITS) - 1) * pv
normalLng += (findfirst(==(bcode[i+1]), CODE_DIGITS) - 1) * pv
if i < digits - 2
pv ÷= ENCODING_BASE
end
end
# Convert the place value to a float in degrees.
latPrecision = pv / PAIR_PRECISION
lngPrecision = pv / PAIR_PRECISION
# Process any extra precision digits.
if codelength > PAIR_CODE_LENGTH
# Initialise the place values for the grid.
rowpv = GRID_LAT_FIRST_PLACE_VALUE
colpv = GRID_LNG_FIRST_PLACE_VALUE
# How many digits do we have to process?
digits = min(codelength, MAX_DIGIT_COUNT)
for i in PAIR_CODE_LENGTH:digits-1
digitVal = findfirst(==(bcode[i+1]), CODE_DIGITS) - 1
row = digitVal ÷ GRID_COLUMNS
col = digitVal % GRID_COLUMNS
gridLat += row * rowpv
gridLng += col * colpv
if i < digits - 1
rowpv ÷= GRID_ROWS
colpv ÷= GRID_COLUMNS
end
end
# Adjust the precisions from the integer values to degrees.
latPrecision = rowpv / FINAL_LAT_PRECISION
lngPrecision = colpv / FINAL_LNG_PRECISION
end
# Merge the values from the normal and extra precision parts of the code.
lat = normalLat / PAIR_PRECISION + gridLat / FINAL_LAT_PRECISION
lng = normalLng / PAIR_PRECISION + gridLng / FINAL_LNG_PRECISION
lat, lng, codelength
end
"""
recover_nearest(code, latitude, longitude)
Recover the nearest matching code to a specified location.
Given a short code of between four and seven characters, this recovers
the nearest matching full code to the specified location.
# Arguments:
- `code`: A valid OLC character sequence.
- `latitude`: The latitude (in signed degrees) to use to
find the nearest matching full code.
- `longitude``: The longitude (in signed degrees) to use
to find the nearest matching full code.
# Returns:
The nearest full Open Location Code to the reference location that matches
the short code. If the passed code was not a valid short code, but was a
valid full code, it is returned with proper capitalization but otherwise
unchanged.
"""
function recover_nearest(code::AbstractString, latitude::Real, longitude::Real)
# Clean up the passed code.
code = uppercase(code)
# if code is a valid full code, return it properly capitalized
is_full(code) && return code
if !is_short(code)
throw(ArgumentError("Passed short code is not valid - $code"))
end
# Ensure that latitude and longitude are valid.
referenceLatitude = clipLatitude(latitude)
referenceLongitude = normalizeLongitude(longitude)
# Compute the number of digits we need to recover.
paddingLength = SEPARATOR_POSITION - findfirst(SEPARATOR, code) + 1
# The resolution (height and width) of the padded area in degrees.
resolution = compute_precision(paddingLength, 1)
# Distance from the center to an edge (in degrees).
halfResolution = resolution / 2
# Use the reference location to pad the supplied short code and decode it.
refcode = encode(referenceLatitude, referenceLongitude, paddingLength)
codeArea = decode(view(refcode, 1:paddingLength) * code)
# How many degrees latitude is the code from the reference? If it is more
# than half the resolution, we need to move it north or south but keep it
# within -90 to 90 degrees.
latcenter = latitude_center(codeArea)
if referenceLatitude + halfResolution < latcenter &&
latcenter - resolution >= -LATITUDE_MAX
# If the proposed code is more than half a cell north of the reference location,
# it's too far, and the best match will be one cell south.
latcenter -= resolution
elseif referenceLatitude - halfResolution > latcenter &&
latcenter + resolution <= LATITUDE_MAX
# If the proposed code is more than half a cell south of the reference location,
# it's too far, and the best match will be one cell north.
latcenter += resolution
end
# Adjust longitude if necessary.
longcenter = longitude_center(codeArea)
if referenceLongitude + halfResolution < longcenter
longcenter -= resolution
elseif referenceLongitude - halfResolution > longcenter
longcenter += resolution
end
return encode(latcenter, longcenter, codeArea.codelength)
end
"""
shorten(code, latitude, longitude)
Remove characters from the start of an OLC code.
This uses a reference location to determine how many initial characters
can be removed from the OLC code. The number of characters that can be
removed depends on the distance between the code center and the reference
location.
The minimum number of characters that will be removed is four. If more than
four characters can be removed, the additional characters will be replaced
with the padding character. At most eight characters will be removed.
The reference location must be within 50% of the maximum range. This ensures
that the shortened code will be able to be recovered using slightly different
locations.
# Arguments
- `code`: A full, valid code to shorten.
- `latitude`: A latitude, in signed degrees, to use as the reference point.
- `longitude`: A longitude, in signed degrees, to use as the reference point.
# Returns:
Either the original code, if the reference location was not close enough,
or the shortest code which can be used to recover from the reference location.
"""
function shorten(code, latitude, longitude)
if !is_full(code)
throw(ArgumentError("Passed code is not valid and full: $code"))
end
if findfirst(PADDING_CHARACTER, code) !== nothing
throw(ArgumentError("Cannot shorten padded codes: $code"))
end
code = uppercase(code)
codeArea = decode(code)
# Ensure that latitude and longitude are valid.
latitude = clipLatitude(latitude)
longitude = normalizeLongitude(longitude)
# How close are the latitude and longitude to the code center.
coderange = max(abs(latitude_center(codeArea) - latitude),
abs(longitude_center(codeArea) - longitude))
for i in length(PAIR_RESOLUTIONS)-2:-1:0
# Check if we're close enough to shorten. The range must be less than 1/2
# the resolution to shorten at all, and we want to allow some safety, so
# use 0.3 instead of 0.5 as a multiplier.
if coderange < (PAIR_RESOLUTIONS[i+1] * 0.3)
# Trim it.
return code[(i+1)*2+1:end]
end
end
return code
end
shorten(ca::CodeArea, lat, lon) = shorten(plus_code(ca), lat, lon)
"""
computeLatitudePrecision(codelength)
Compute the latitude precision value for a given code length. Lengths <=
10 have the same precision for latitude and longitude, but lengths > 10
have different precisions due to the grid method having fewer columns than
rows.
"""
latitude_precision(codelength) = compute_precision(codelength, GRID_ROWS)
longitude_precision(codelength) = compute_precision(codelength, GRID_COLUMNS)
function compute_precision(codelength, grid)
if codelength <= PAIR_CODE_LENGTH
ENCODING_BASE / (ENCODING_BASE^(codelength ÷ 2 - 1))
else
1 / (ENCODING_BASE^3 * grid^(codelength - PAIR_CODE_LENGTH))
end
end
"""
clipLatitude(latitude)
Clip a latitude into the range -90 to 90.
Args:
latitude: A latitude in signed degrees.
"""
function clipLatitude(latitude::Real)
return min(LATITUDE_MAX, max(-LATITUDE_MAX, latitude))
end
"""
normalizeLongitude(longitude)
Normalize a longitude into the range -180 to 180, not including 180.
Args:
longitude: A longitude in signed degrees.
"""
function normalizeLongitude(longitude::Real)
while longitude < -LONGITUDE_MAX
longitude = longitude + LONGITUDE_MAX * 2
end
while longitude >= LONGITUDE_MAX
longitude = longitude - LONGITUDE_MAX * 2
end
return longitude
end
include("geometric.jl")
end # module
| OpenLocationCode | https://github.com/KlausC/OpenLocationCode.jl.git |
|
[
"MIT"
] | 1.0.1 | 01fa2a3303a93ad9fd037582d99c8f8429e9ec62 | code | 1203 |
export distance, area
const GEO_RADIUS = 6378.388e3 # meters
"""
distance(lat1, lon1, lat2, lon2)
Distance between two points on a sphere with radius `r` in meters.
The point are given in degrees.
Uses haversine algorithm.
"""
function distance(lat1::Real, lon1::Real, lat2::Real, lon2::Real, r::Real=GEO_RADIUS)
distance(promote(lat1, lon1, lat2, lon2, r)...)
end
function distance(lat1::T, lon1::T, lat2::T, lon2::T, r::T=6378.388e3) where T<:Real
dlat = lat1 - lat2
dlon = lon1 - lon2
a = sind(dlat / 2) ^ 2 + sind(dlon / 2) ^ 2 * cosd(lat1) * cosd(lat2)
dist = asin(sqrt(a)) * r * 2
dist
end
function distance(ca::CodeArea, cb::CodeArea)
lata, lona = latitude_center(ca), longitude_center(ca)
latb, lonb = latitude_center(cb), longitude_center(cb)
distance(lata, lona, latb, lonb)
end
function distance(a::AbstractString, b::AbstractString)
distance(decode(a), decode(b))
end
function area(ca::CodeArea)
lat1 = latitude_high(ca)
lat0 = latitude_low(ca)
lod = longitude_high(ca) - longitude_low(ca)
r = GEO_RADIUS
area = (sind(lat1) - sind(lat0)) * lod / 180 * π * r^2
area
end
area(ca::AbstractString) = area(decode(ca))
| OpenLocationCode | https://github.com/KlausC/OpenLocationCode.jl.git |
|
[
"MIT"
] | 1.0.1 | 01fa2a3303a93ad9fd037582d99c8f8429e9ec62 | code | 2164 | # Test decoding Open Location Codes.
#
# Provides test cases for decoding valid codes.
#
# Format:
# code,length,latLo,lngLo,latHi,lngHi
DATA = [
("7FG49Q00+",6,20.35,2.75,20.4,2.8)
("7FG49QCJ+2V",10,20.37,2.782125,20.370125,2.78225)
("7FG49QCJ+2VX",11,20.3701,2.78221875,20.370125,2.78225)
("7FG49QCJ+2VXGJ",13,20.370113,2.782234375,20.370114,2.78223632813)
("8FVC2222+22",10,47.0,8.0,47.000125,8.000125)
("4VCPPQGP+Q9",10,-41.273125,174.785875,-41.273,174.786)
("62G20000+",4,0.0,-180.0,1,-179)
("22220000+",4,-90,-180,-89,-179)
("7FG40000+",4,20.0,2.0,21.0,3.0)
("22222222+22",10,-90.0,-180.0,-89.999875,-179.999875)
("6VGX0000+",4,0,179,1,180)
("6FH32222+222",11,1,1,1.000025,1.00003125)
("CFX30000+",4,89,1,90,2)
("62H20000+",4,1,-180,2,-179)
("62H30000+",4,1,-179,2,-178)
("CFX3X2X2+X2",10,89.9998750,1,90,1.0001250)
# Test non-precise latitude/longitude value
("6FH56C22+22",10,1.2000000000000028,3.4000000000000057,1.2001249999999999,3.4001250000000027)
# Validate that digits after the first 15 are ignored when decoding
("849VGJQF+VX7QR3J",15,37.5396691200,-122.3750698242,37.5396691600,-122.3750697021)
("849VGJQF+VX7QR3J7QR3J",15,37.5396691200,-122.3750698242,37.5396691600,-122.3750697021)
]
@testset "decoding $(d[1])" for d in DATA
code, codelength, latlo, longlo, lathi, longhi = d
ca = decode(code)
@test ca ≈ CodeArea(latlo, longlo, codelength)
@test latitude_high(ca) ≈ lathi
@test longitude_high(ca) ≈ longhi
end
@testset "validity" begin
@test is_full("C2XXXXXX+")
@test !is_full("F2XXXXXX+")
@test is_full("2VXXXX00+")
@test !is_full("2WXXXXXX+")
@test !is_valid("+0022")
@test !is_valid("00+")
@test_throws ArgumentError decode("XX+22")
@test_throws ArgumentError recover_nearest("X+", 0, 0)
@test_throws ArgumentError shorten("XX+22", 0, 0)
@test_throws ArgumentError shorten("22XX0000+", 0, 0)
end
@testset "show and convert CodeArea" begin
code = "9G000000+"
ca = decode(code)
@test latlong(ca) == (60.0, 30.0)
@test sprint(show, ca) == "CodeArea(\"$code\", 50.0+20.0, 20.0+20.0, 2)"
@test CodeArea(code) == ca
@test CodeArea(50, 20, 2) == ca
end
| OpenLocationCode | https://github.com/KlausC/OpenLocationCode.jl.git |
|
[
"MIT"
] | 1.0.1 | 01fa2a3303a93ad9fd037582d99c8f8429e9ec62 | code | 5736 | # Test encoding Open Location Codes.
#
# Provides test cases for encoding latitude and longitude to codes.
#
# Format:
# latitude,longitude,length,"expected code (empty if the input should cause an error)"
DATA = [
(20.375,2.775,6,"7FG49Q00+")
(20.3700625,2.7821875,10,"7FG49QCJ+2V")
(20.3701125,2.782234375,11,"7FG49QCJ+2VX")
(20.3701135,2.78223535156,13,"7FG49QCJ+2VXGJ")
(47.0000625,8.0000625,10,"8FVC2222+22")
(-41.2730625,174.7859375,10,"4VCPPQGP+Q9")
(0.5,-179.5,4,"62G20000+")
(-89.5,-179.5,4,"22220000+")
(20.5,2.5,4,"7FG40000+")
(-89.9999375,-179.9999375,10,"22222222+22")
(0.5,179.5,4,"6VGX0000+")
(1,1,11,"6FH32222+222")
################################################################################
#
# Special cases over 90 latitude
#
################################################################################
(90,1,2,"CF000000+")
(90,1,4,"CFX30000+")
(92,1,4,"CFX30000+")
(90,1,10,"CFX3X2X2+X2")
################################################################################
#
# Special cases with longitude needing normalization (<< -180 or >> +180)
#
################################################################################
(1,180,4,"62H20000+")
(1,181,4,"62H30000+")
(20.3701135,362.78223535156,13,"7FG49QCJ+2VXGJ")
(47.0000625,728.0000625,10,"8FVC2222+22")
(-41.2730625,1254.7859375,10,"4VCPPQGP+Q9")
(20.3701135,-357.217764648,13,"7FG49QCJ+2VXGJ")
(47.0000625,-711.9999375,10,"8FVC2222+22")
(-41.2730625,-905.2140625,10,"4VCPPQGP+Q9")
################################################################################
#
# Test non-precise latitude/longitude value
#
################################################################################
(1.2,3.4,10,"6FH56C22+22")
################################################################################
#
# Validate that codes generated with a length exceeding 15 significant digits
# return a 15-digit code
#
################################################################################
(37.539669125,-122.375069724,15,"849VGJQF+VX7QR3J")
(37.539669125,-122.375069724,16,"849VGJQF+VX7QR3J")
(37.539669125,-122.375069724,100,"849VGJQF+VX7QR3J")
################################################################################
#
# Test floating point representation/rounding errors.
#
################################################################################
(35.6,3.033,10,"8F75J22M+26")
(-48.71,142.78,8,"4R347QRJ+")
(-70,163.7,8,"3V252P22+")
(-2.804,7.003,13,"6F9952W3+C6222")
(13.9,164.88,12,"7V56WV2J+2222")
(-13.23,172.77,8,"5VRJQQCC+")
(40.6,129.7,8,"8QGFJP22+")
(-52.166,13.694,14,"3FVMRMMV+JJ2222")
(-14,106.9,6,"5PR82W00+")
(70.3,-87.64,13,"C62J8926+22222")
(66.89,-106,10,"95RPV2R2+22")
(2.5,-64.23,11,"67JQGQ2C+222")
(-56.7,-47.2,14,"38MJ8R22+222222")
(-34.45,-93.719,6,"46Q8H700+")
(-35.849,-93.75,12,"46P85722+C222")
(65.748,24.316,12,"9GQ6P8X8+6C22")
(-57.32,130.43,12,"3QJGMCJJ+2222")
(17.6,-44.4,6,"789QJJ00+")
(-27.6,-104.8,6,"554QC600+")
(41.87,-145.59,13,"83HPVCC6+22222")
(-4.542,148.638,13,"6R7CFJ5Q+66222")
(-37.014,-159.936,10,"43J2X3P7+CJ")
(-57.25,125.49,15,"3QJ7QF2R+2222222")
(48.89,-80.52,13,"86WXVFRJ+22222")
(53.66,170.97,14,"9V5GMX6C+222222")
(0.49,-76.97,15,"67G5F2RJ+2222222")
(40.44,-36.7,12,"89G5C8R2+2222")
(58.73,69.95,8,"9JCFPXJ2+")
(16.179,150.075,12,"7R8G53HG+J222")
(-55.574,-70.061,12,"37PFCWGQ+CJ22")
(76.1,-82.5,15,"C68V4G22+2222222")
(58.66,149.17,10,"9RCFM56C+22")
(-67.2,48.6,6,"3H4CRJ00+")
(-5.6,-54.5,14,"6867CG22+222222")
(-34,145.5,14,"4RR72G22+222222")
(-34.2,66.4,12,"4JQ8RC22+2222")
(17.8,-108.5,6,"759HRG00+")
(10.734,-168.294,10,"722HPPM4+JC")
(-28.732,54.32,8,"5H3P789C+")
(64.1,107.9,12,"9PP94W22+2222")
(79.7525,6.9623,8,"CFF8QX36+")
(-63.6449,-25.1475,8,"398P9V43+")
(35.019,148.827,11,"8R7C2R9G+JR2")
(71.132,-98.584,15,"C6334CJ8+RC22222")
(53.38,-51.34,12,"985C9MJ6+2222")
(-1.2,170.2,12,"6VCGR622+2222")
(50.2,-162.8,11,"922V6622+222")
(-25.798,-59.812,10,"5862652Q+R6")
(81.654,-162.422,14,"C2HVMH3H+J62222")
(-75.7,-35.4,8,"29P68J22+")
(67.2,115.1,11,"9PVQ6422+222")
(-78.137,-42.995,12,"28HVV274+6222")
(-56.3,114.5,11,"3PMPPG22+222")
(10.767,-62.787,13,"772VQ687+R6222")
(-19.212,107.423,10,"5PG9QCQF+66")
(21.192,-45.145,15,"78HP5VR4+R222222")
(16.701,148.648,14,"7R8CPJ2X+C62222")
(52.25,-77.45,15,"97447H22+2222222")
(-68.54504,-62.81725,11,"373VF53M+X4J")
(76.7,-86.172,12,"C68MPR2H+2622")
(-6.2,96.6,13,"6M5RRJ22+22222")
(59.32,-157.21,12,"93F48QCR+2222")
(29.7,39.6,12,"7GXXPJ22+2222")
(-18.32,96.397,10,"5MHRM9JW+2R")
(-30.3,76.5,11,"4JXRPG22+222")
(50.342,-112.534,15,"95298FR8+RC22222")
#
# There is no exact IEEE754 representation of 80.01 (or the negative), so test
# on either side.
#
(80.0100000001,58.57,15,"CHGW2H6C+2222222")
(80.0099999999,58.57,15,"CHGW2H5C+X2RRRRR")
(-80.0099999999,58.57,15,"2HFWXHRC+2222222")
(-80.0100000001,58.57,15,"2HFWXHQC+X2RRRRR")
#
# Add a few other examples.
#
(47.000000080000000,8.00022229,15,"8FVC2222+235235C")
(68.3500147997595,113.625636875353,15,"9PWM9J2G+272FWJV")
(38.1176000887231,165.441989844555,15,"8VC74C9R+2QX445C")
(-28.1217794010122,-154.066811473758,15,"5337VWHM+77PR2GR")
(59.99999999999998,-40.00000000000003,15, "98FXXXXX+XXXXXXX")
# from the specification document
(47.365562,8.524813,10,"8FVC9G8F+6W")
# close to upper boundaries
(prevfloat(50.0, 1),prevfloat(20.0, 1),4,"9G220000+")
(prevfloat(50.0, 2),prevfloat(20.0, 2),4,"8FXX0000+")
]
@testset "encoding $(d[3])-$(d[4])" for d in DATA
lat, long, codelength, code = d
@test encode(lat, long, codelength) == code
end
@testset "encode args" begin
@test_throws ArgumentError encode(0, 0, 1)
@test_throws ArgumentError encode(0, 0, 9)
@test encode(50, 8) == encode(50, 8, 10)
@test CodeArea(50, 8) == CodeArea(50, 8, 10)
end
| OpenLocationCode | https://github.com/KlausC/OpenLocationCode.jl.git |
|
[
"MIT"
] | 1.0.1 | 01fa2a3303a93ad9fd037582d99c8f8429e9ec62 | code | 573 |
const R = OpenLocationCode.GEO_RADIUS
@testset "distances" begin
lat1, lon1 = 50.1732, 8.3381
lat2, lon2 = 50.0279, 8.0334
c1 = encode(lat1, lon1, 15)
c2 = encode(lat2, lon2, 15)
@test distance(c1, c2) ≈ 27111.7984
@test distance(-90.0, 350, 0, 0) ≈ π / 2 * R
end
@testset "areas" begin
a00 = area(encode(-10, 0, 2))
a10 = area(encode(10, 0, 2))
a30 = area(encode(30, 0, 2))
a50 = area(encode(50, 0, 2))
a70 = area(encode(70, 0, 2))
s20 = a00 + 2*(a10 + a30 + a50 + a70)
s00 = s20 * 18
@test s00 ≈ 4π * R ^ 2
end
| OpenLocationCode | https://github.com/KlausC/OpenLocationCode.jl.git |
|
[
"MIT"
] | 1.0.1 | 01fa2a3303a93ad9fd037582d99c8f8429e9ec62 | code | 366 | using OpenLocationCode
using Test
@testset "OpenLocationCode" begin
@testset "validity" begin include("validityTests.jl") end
@testset "decode" begin include("decoding.jl") end
@testset "encode" begin include("encoding.jl") end
@testset "shorten" begin include("shortCodeTests.jl") end
@testset "geometric" begin include("geometric.jl") end
end
| OpenLocationCode | https://github.com/KlausC/OpenLocationCode.jl.git |
|
[
"MIT"
] | 1.0.1 | 01fa2a3303a93ad9fd037582d99c8f8429e9ec62 | code | 2398 | # Test shortening and extending codes.
#
# Format:
# full code,lat,lng,shortcode,test_type
# test_type is R for recovery only, :S for shorten only, or :B for Both.
DATA = [
("9C3W9QCJ+2VX",51.3701125,-1.217765625,"+2VX",:B)
# Adjust so we can't trim by 8 (+/- .000755)
("9C3W9QCJ+2VX",51.3708675,-1.217765625,"CJ+2VX",:B)
("9C3W9QCJ+2VX",51.3693575,-1.217765625,"CJ+2VX",:B)
("9C3W9QCJ+2VX",51.3701125,-1.218520625,"CJ+2VX",:B)
("9C3W9QCJ+2VX",51.3701125,-1.217010625,"CJ+2VX",:B)
# Adjust so we can't trim by 6 (+/- .0151)
("9C3W9QCJ+2VX",51.3852125,-1.217765625,"9QCJ+2VX",:B)
("9C3W9QCJ+2VX",51.3550125,-1.217765625,"9QCJ+2VX",:B)
("9C3W9QCJ+2VX",51.3701125,-1.232865625,"9QCJ+2VX",:B)
("9C3W9QCJ+2VX",51.3701125,-1.202665625,"9QCJ+2VX",:B)
# Added to detect error in recoverNearest functionality
("8FJFW222+",42.899,9.012,"22+",:B)
("796RXG22+",14.95125,-23.5001,"22+",:B)
# Reference location is in the 4 digit cell to the south.
("8FVC2GGG+GG",46.976,8.526,"2GGG+GG",:B)
# Reference location is in the 4 digit cell to the north.
("8FRCXGGG+GG",47.026,8.526,"XGGG+GG",:B)
# Reference location is in the 4 digit cell to the east.
("8FR9GXGG+GG",46.526,8.026,"GXGG+GG",:B)
# Reference location is in the 4 digit cell to the west.
("8FRCG2GG+GG",46.526,7.976,"G2GG+GG",:B)
# From the specification document
("8FVC9G8F+6W",47.373313,8.537562,"8F+6W",:B)
("8FVC9G8F+6W",47.339563,8.556687,"9G8F+6W",:B)
("8FVC9G8F+6W",47.985187,8.440688,"VC9G8F+6W",:B)
("8FVC9G8F+6W",38.800562,-9.064937,"8FVC9G8F+6W",:B)
# Added to detect errors recovering codes near the poles.
# This tests recovery function, but these codes won't shorten.
("CFX22222+22",89.6,0.0,"2222+22",:R)
("2CXXXXXX+XX",-81.0,0.0,"XXXXXX+XX",:R)
# Recovered full codes should be the full code
("8FRCG2GG+GG",46.526,7.976,"8FRCG2GG+GG",:R)
# Recovered full codes should be the uppercased full code
("8FRCG2GG+GG",46.526,7.976,"8frCG2GG+gG",:R)
]
@testset "shorten $(d[1])" for d in DATA
code, lat, long, short, opt = d
if opt in (:B, :S)
@test shorten(code, lat, long) == short
end
end
@testset "recover $(d[4])" for d in DATA
code, lat, long, short, opt = d
if opt in (:B, :R)
@test recover_nearest(short, lat, long) == code
end
end
@testset "shorten from CodeArea" begin
code = "23XXXXXX+"
lat, lon = -70, -140
@test shorten(code, lat, lon) == shorten(CodeArea(code), lat, lon)
end
| OpenLocationCode | https://github.com/KlausC/OpenLocationCode.jl.git |
|
[
"MIT"
] | 1.0.1 | 01fa2a3303a93ad9fd037582d99c8f8429e9ec62 | code | 1295 | # Test data for validity tests.
# Format of each line is:
# code,isValid,isShort,isFull
# Valid full codes:
DATA = [
("8FWC2345+G6",true,false,true)
("8FWC2345+G6G",true,false,true)
("8fwc2345+",true,false,true)
("8FWCX400+",true,false,true)
# Valid short codes:
("WC2345+G6g",true,true,false)
("2345+G6",true,true,false)
("45+G6",true,true,false)
("+G6",true,true,false)
# Invalid codes
("G+",false,false,false)
("+",false,false,false)
("8FWC2345+G",false,false,false)
("8FWC2_45+G6",false,false,false)
("8FWC2η45+G6",false,false,false)
("8FWC2345+G6+",false,false,false)
("8FWC2345G6+",false,false,false)
("8FWC2300+G6",false,false,false)
("WC2300+G6g",false,false,false)
("WC2345+G",false,false,false)
("WC2300+",false,false,false)
# Validate that codes at and exceeding 15 digits are still valid when all their
# digits are valid, and invalid when not.
("849VGJQF+VX7QR3J",true,false,true)
("849VGJQF+VX7QR3U",false,false,false)
("849VGJQF+VX7QR3JW",true,false,true)
("849VGJQF+VX7QR3JU",false,false,false)
("62H00002+",false,false,false)
("620AA000+",false,false,false)
("+",false,false,false)
]
@testset "validity $(d[1])" for d in DATA
code, isValid, isShort, isFull = d
@test is_valid(code) == isValid
@test is_short(code) == isShort
@test is_full(code) == isFull
end
| OpenLocationCode | https://github.com/KlausC/OpenLocationCode.jl.git |
|
[
"MIT"
] | 1.0.1 | 01fa2a3303a93ad9fd037582d99c8f8429e9ec62 | docs | 6964 | # OpenLocationCode
A package for Open Location Codes aka Plus Codes to represent geographical coordinates
[![Build Status][gha-img]][gha-url] [![Coverage Status][codecov-img]][codecov-url]
Encode and decode, shorten and recover relative to a position.
This `Julia` implementation originates from the `Python` sources and test data
in [google/open-location-code](https://github.com/google/open-location-code) v1.0.4
with this [specification](https://github.com/google/open-location-code/blob/main/docs/specification.md).
A concise description is found here [OLC](https://en.wikipedia.org/wiki/Open_Location_Code).
## Description
Open Location Codes or "Plus Codes" are short, 10-15 character codes that can be used instead
of street addresses. The codes can be generated and decoded offline, and use
a reduced character set that minimises the chance of codes including words.
Codes are able to be shortened relative to a nearby location. This means that
in many cases, only four to seven characters of the code are needed.
To recover the original code, the same location is not required, as long as
a nearby location is provided.
Codes represent rectangular areas rather than points, and the longer the
code, the smaller the area. A 10 character code represents a 13.9x13.9
meter area (at the equator). An 11 character code represents approximately
a 2.8x3.5 meter area, while a 15 character code is 4.5x13.6 mm.
Two encoding algorithms are used. The first 10 characters are pairs of
characters, one for latitude and one for longitude, using base 20. Each pair
reduces the area of the code by a factor of 400. Only even code lengths are
sensible, since an odd-numbered length would have sides in a ratio of 20:1.
At positions 11-15, the algorithm changes so that each character selects one
position from a 4x5 grid. This allows refinements by one to five characters.
All degrees are integer or floating point numbers. The created output degrees are `Float64`.
The latitude and longitude should be [WGS84](https://en.wikipedia.org/wiki/World_Geodetic_System#WGS84) values.
## API
```doc
encode(latitude, longitude[, codelength])
Encode a location into an Open Location Code. Produces a code of the specified length, or the default length
if no length is provided.
The length determines the accuracy of the code. The default length is 10 characters, returning a code of
approximately 13.9x13.9 meters. Longer codes represent smaller areas, but lengths > 15 are sub-centimetre and
so 11 or 12 are probably the limit of useful codes.
Arguments:
• latitude: A latitude in signed degrees. Will be clipped to the range -90 to 90.
• longitude: A longitude in signed degrees. Will be normalised to the range -180 to 180.
• codelength: The number of significant digits in the output code, not including any separator
characters.
Examples:
julia> encode(50.173168, 8.338086, 11)
"9F2C58FQ+768"
```
```doc
decode(code)
Decode an Open Location Code into the location coordinates.
Arguments:
• code: The Open Location Code to decode.
Returns:
A CodeArea object that provides the latitude and longitude of two of the corners of the area, the center, and the length of the original code.
```
```doc
shorten(code, latitude, longitude)
Remove characters from the start of an OLC code. This uses a reference location to determine how many initial characters can be removed from the OLC code. The
number of characters that can be removed depends on the distance between the code center and the reference location.
The minimum number of characters that will be removed is four. If more than four characters can be removed, the additional characters will be replaced with the
padding character. At most eight characters will be removed. The reference location must be within 50% of the maximum range. This ensures that the shortened code
will be able to be recovered using slightly different locations.
Arguments
• code: A full, valid code to shorten.
• latitude: A latitude, in signed degrees, to use as the reference point.
• longitude: A longitude, in signed degrees, to use as the reference point.
Returns:
Either the original code, if the reference location was not close enough, or the shortest code which can be used to recover from the reference location.
```
```doc
recover_nearest(code, latitude, longitude)
Recover the nearest matching code to a specified location. Given a short code of between four and seven characters, this recovers the nearest matching full code
to the specified location.
Arguments:
• code: A valid OLC character sequence.
• latitude: The latitude (in signed degrees) to use to find the nearest matching full code.
• longitude`: The longitude (in signed degrees) to use to find the nearest matching full code.
Returns:
The nearest full Open Location Code to the reference location that matches the short code. If the passed code was not a valid short code, but was a valid full code, it is returned with proper capitalization but otherwise unchanged.
```
## Examples
```jldoctest
julia> using OpenLocationCode
julia> # Encode a location, default accuracy:
encode(47.365590, 8.524997)
"8FVC9G8F+6X"
julia> ca = CodeArea(47.365590, 8.524997, 6)
CodeArea("8FVC9G00+", 47.36559+0.05, 8.524997+0.05, 6)
julia> # Encode a location using five digits of additional refinement:
encode(47.365590, 8.524997, 15)
"8FVC9G8F+6XQQ435"
julia> # Decode a full code:
code
"8FVCCJ8F+6X"
julia> ca = CodeArea(code)
CodeArea("8FVCCJ8F+6X", 47.4155+0.000125, 8.624875+0.000125, 10)
julia> ca = decode(code)
CodeArea("8FVCCJ8F+6X", 47.4155+0.000125, 8.624875+0.000125, 10)
julia> println("# Center is lat=$(latitude_center(ca)), lon=$(longitude_center(ca))")
# Center is lat=47.4155625, lon=8.6249375
julia> println("# extension of area is $(latitude_precision(ca) * 111321)x$(longitude_precision(ca) * 111321 * cosd(latitude_low(ca))) m")
# extension of area is 13.915125x9.416042416499675 m
julia> # Attempt to trim the first characters from a code:
shorten("8FVC9G8F+6X", 47.5, 8.5)
"9G8F+6X"
julia> # Recover the full code from a short code:
recover_nearest("9G8F+6X", 47.4, 8.6)
"8FVC9G8F+6X"
julia> recover_nearest("8F+6X", 47.4, 8.6)
"8FVCCJ8F+6X"
julia> area(ca) # area in m^2
131.03201149353194
julia> distance(ca, CodeArea("8FVC0000+")) # distance in meters
13296.381156694346
julia> latlong(CodeArea("9F000000+")) # center coordinates
(60.0, 10.0)
```
© 2023 Klaus Crusius
[gha-img]: https://github.com/KlausC/OpenLocationCode.jl/actions/workflows/CI.yml/badge.svg?branch=main
[gha-url]: https://github.com/KlausC/OpenLocationCode.jl/actions/workflows/CI.yml?query=branch%3Amain
[codecov-img]: https://codecov.io/gh/KlausC/OpenLocationCode.jl/branch/main/graph/badge.svg
[codecov-url]: https://codecov.io/gh/KlausC/OpenLocationCode.jl
| OpenLocationCode | https://github.com/KlausC/OpenLocationCode.jl.git |
|
[
"MIT"
] | 0.1.9 | 5f86727335b9896bc6f9fa81964a03174be91dbe | code | 5298 | module ChemicalIdentifiers
const DATA_DB = Dict{Symbol,Any}()
const DATA_INFO = Dict{Symbol,Any}()
import UUIDs, Unicode, Downloads
import Arrow
import Scratch, Preferences
export search_chemical
download_cache = ""
const PKG_UUID = parse(UUIDs.UUID,"fa4ea961-1416-484e-bda2-883ee1634ba5")
"""
function search_chemical(query,cache=cache=ChemicalIdentifiers.SEARCH_CACHE)
Given a query, performs a search on a database of over 70000 common compounds from CalebBell/Chemicals, returning a Named Tuple with the identifiers of the substance in question.
## Examples
```
julia>using ChemicalIdentifiers
julia>res = search_chemical("water")
(pubchemid = 962, CAS = (7732, 18, 5), formula = "H2O", MW = 18.01528, smiles = "O", InChI = "H2O/h1H2", InChI_key = "XLYOFNOQVPJJNP-UHFFFAOYSA-N", iupac_nam e = "oxidane", common_name = "water")
#worst case scenario, not found on the present databases
julia> @btime search_chemical("dimethylpyruvic acid22",nothing)
273.700 μs (264 allocations: 15.05 KiB)
missing
#common compound found in the short database
julia> @btime search_chemical("methane",nothing)
7.075 μs (57 allocations: 2.97 KiB)
(pubchemid = 297, CAS = (74, 82, 8), formula = "CH4", MW = 16.04246, smiles = "C", InChI = "CH4/h1H4", InChI_key = "VNWKTOKETHGBQD-UHFFFAOYSA-N", iupac_name = "methane", common_name = "methane")
```
A query is usually a string and its type is detected automatically when possible. the supported query types are:
### PubChemID:
By using any ``<:Integer` (or a string containing an Integer)
```julia
julia> search_chemical(8003)
(pubchemid = 8003, CAS = (109, 66, 0), formula = "C5H12", MW = 72.14878, smiles =
"CCCCC", InChI = "C5H12/c1-3-5-4-2/h3-5H2,1-2H3", InChI_key = "OFBQJSOFQDEBGM-UHFFFAOYSA-N", iupac_name = "pentane", common_name = "pentane")
```
### CAS registry number:
By using a Tuple of integers or a string with the digits separated by `-` :
```
julia> search_chemical((67,56,1))
(pubchemid = 887, CAS = (67, 56, 1), formula = "CH4O", MW = 32.04186, smiles = "CO", InChI = "CH4O/c1-2/h2H,1H3", InChI_key = "OKKJLVBELUTLKV-UHFFFAOYSA-N", iupac_name = "methanol", common_name = "methanol")
search_chemical((67,56,1),nothing) == search_chemical("67-56-1",nothing) #true
```
### SMILES:
By using a string starting with `SMILES=` :
```
julia> search_chemical("SMILES=N")
(pubchemid = 222, CAS = (7664, 41, 7), formula = "H3N", MW = 17.03052, smiles = "N", InChI = "H3N/h1H3", InChI_key = "QGZKDVFQNNGYKY-UHFFFAOYSA-N", iupac_name = "azane", common_name = "ammonia")
```
### InChI:
By using a string starting with `InChI=1/` or `InChI=1S/` :
```
julia> search_chemical("InChI=1/C2H4/c1-2/h1-2H2")
(pubchemid = 6325, CAS = (74, 85, 1), formula = "C2H4", MW = 28.05316, smiles = "C=C", InChI = "C2H4/c1-2/h1-2H2", InChI_key = "VGGSQFUCUMXWEO-UHFFFAOYSA-N", iupac_name = "ethene", common_name = "ethene")
```
### InChI key:
By using a string with the pattern `XXXXXXXXXXXXXX-YYYYYYYYFV-P`:
```
julia> search_chemical("IMROMDMJAWUWLK-UHFFFAOYSA-N")
(pubchemid = 11199, CAS = (9002, 89, 5),
formula = "C2H4O", MW = 44.05256, smiles
= "C=CO", InChI = "C2H4O/c1-2-3/h2-3H,1H2", InChI_key = "IMROMDMJAWUWLK-UHFFFAOYSA-N", iupac_name = "ethenol", common_name
= "ethenol")
```
Searches by CAS and PubChemID are a little bit faster thanks to being encoded as native numeric types, other properties are stores as strings.
The package stores each query in `ChemicalIdentifiers.SEARCH_CACHE` as a `Dict{String,Any}`, so subsequent queries on the same (or similar) strings, dont pay the cost of searching in the database.
If you don't want to store the query, you could use `search_chemical(query,nothing)`, or, if you want your own cache to be used, pass your own cache via `search_chemical(query,mycache)`.
"""
function search_chemical end
include("data_script.jl")
include("search_types.jl")
include("search.jl")
function _precompile_()
ccall(:jl_generating_output, Cint, ()) == 1 || return nothing
Base.precompile(search_id_impl,(Int,Symbol))
Base.precompile(search_id_impl,(String,Symbol))
Base.precompile(search_id_impl,(Tuple{Int32,Int16,Int16},Symbol))
Base.precompile(search_chemical_id,(AnyQuery,))
end
function clear_download_cache!()
Preferences.@set_preferences!("CLEAR_CACHE" => "true")
@info("ChemicalIdentifiers.jl download cache has been marked for deletion; restart your Julia session for this change to take effect.")
end
function __init__()
clear_cache = Preferences.@load_preference("CLEAR_CACHE","false")
if clear_cache == "true"
@info "deleting download cache..."
Scratch.delete_scratch!(PKG_UUID,"databases")
Preferences.@set_preferences!("CLEAR_CACHE" => "false")
end
global download_cache = Scratch.@get_scratch!("databases")
url_short = "https://github.com/CalebBell/chemicals/raw/master/chemicals/Identifiers/chemical%20identifiers%20pubchem%20small.tsv"
url_long = "https://github.com/CalebBell/chemicals/raw/master/chemicals/Identifiers/chemical%20identifiers%20pubchem%20large.tsv"
load_data!(:short,url= url_short)
load_data!(:long,url = url_long)
load_db!(:short)
load_db!(:long)
return nothing
end
end
| ChemicalIdentifiers | https://github.com/longemen3000/ChemicalIdentifiers.jl.git |
|
[
"MIT"
] | 0.1.9 | 5f86727335b9896bc6f9fa81964a03174be91dbe | code | 5768 |
"""
cas_parse(str)
given a CAS number string, returns a tuple of 3 Int32 containing the CAS numbers, it performs no validation on the data
"""
function cas_parse(str)
a1,a2,a3 = split(str,'-')
n1 = parse(Int32,a1)
n2 = parse(Int16,a2)
n3 = parse(Int16,a3)
return (n1,n2,n3)
end
function unique_idxs_sorted(vec)
n = length(vec)
res = zeros(Int,n)
k = 1
res[1] = k
@inbounds for i = 2:n
if !isequal(vec[i],vec[i-1])
k+=1
res[k] = i
end
end
resize!(res,k)
return res
end
"""
load_db!(dbtype::Symbol)
downloads, processes and stores a database corresponding to the one with the same key stored in DATA_INFO
"""
function load_db!(dbtype::Symbol)
data = DATA_INFO[dbtype]
if !isfile(data.textdb)
@info ":" * string(dbtype) * " database file not found, downloading from " * data.url
url = data.url
fname = data.textdb
path = Downloads.download(url,fname)
@info ":" * string(dbtype) * " database file downloaded."
end
path = data.textdb
if !isfile(data.db)
@info ":" * string(dbtype) * " arrow file not generated, processing..."
arrow_db,arrow_synonym_db,arrow_sort_db =parse_and_write_db!(dbtype)
else
arrow_db = Arrow.Table(data.db)
arrow_synonym_db = Arrow.Table(data.symsdb)
arrow_sort_db = Arrow.Table(data.sorteddb)
end
DATA_DB[dbtype] = (arrow_db,arrow_synonym_db,arrow_sort_db)
return arrow_db,arrow_synonym_db,arrow_sort_db
end
function parse_and_write_db!(dbtype::Symbol)
data = DATA_INFO[dbtype]
path = data.textdb
i = 0
for _ in eachline(path)
i +=1
end
pubchemid = zeros(Int64,i)
CAS = Vector{Tuple{Int32,Int16,Int16}}(undef,i)
formula = Vector{String}(undef,i)
MW = Vector{Float64}(undef,i)
smiles = Vector{String}(undef,i)
InChI = Vector{String}(undef,i)
InChI_key = Vector{String}(undef,i)
iupac_name = Vector{String}(undef,i)
common_name = Vector{String}(undef,i)
_synonyms = Vector{Vector{String}}(undef,i)
i = 0
for line in eachline(path)
i += 1
strs = line |> z->rstrip(z) |> z->split(z,'\t',limit=10)
pubchemid[i] = parse(Int64,strs[1])
CAS[i] = cas_parse(strs[2])
formula[i] = strs[3]
MW[i] = parse(Float64,strs[4])
smiles[i] = strs[5]
InChI[i] = strs[6]
InChI_key[i] = strs[7]
iupac_name[i] = strs[8]
common_name[i] = strs[9]
if length(strs) >= 10 #not any synonyms
sym_i = split(strs[10],('\t',';'))
push!(sym_i,strs[8])
_synonyms[i] = sym_i
else
_synonyms[i] = String[strs[8]]
end
end
syms_i = mapreduce(length,+,_synonyms)
synonyms_list = Vector{String}(undef,syms_i)
synonyms_index = Vector{Int}(undef,syms_i)
#for some reason,some empty strings are generated as synonyms.
#those are eliminated here.
k = 0
for (ii,sym_vec) in pairs(_synonyms)
for (jj,sym) in pairs(sym_vec)
if !isempty(sym)
k+=1
synonyms_list[k] = sym
synonyms_index[k] = ii
end
end
end
resize!(synonyms_list,k)
resize!(synonyms_index,k)
pubchemid_sort =sortperm(pubchemid)
CAS_sort = sortperm(CAS)
formula_sort =sortperm(formula)
MW_sort =sortperm(MW)
smiles_sort =sortperm(smiles)
InChI_sort =sortperm(InChI)
InChI_key_sort =sortperm(InChI_key)
iupac_name_sort =sortperm(iupac_name)
common_name_sort =sortperm(common_name)
synonyms_sort = sortperm(synonyms_list)
list = synonyms_list[synonyms_sort]
index = synonyms_index[synonyms_sort]
#there is the posibility of repeated elements.
#this is not present in CalebBell/chemicals because it uses a dict, so equal keys
#store the same value
list_unique_idx = unique_idxs_sorted(list)
list = list[list_unique_idx]
index = index[list_unique_idx]
db = (;pubchemid, CAS, formula, MW, smiles, InChI, InChI_key, iupac_name, common_name)
synonym_db = (;list,index)
sort_db = (;pubchemid_sort, CAS_sort, formula_sort, MW_sort, smiles_sort, InChI_sort, InChI_key_sort, iupac_name_sort, common_name_sort)
Arrow.write(data.db,db)
Arrow.write(data.symsdb,synonym_db)
Arrow.write(data.sorteddb,sort_db)
arrow_db = Arrow.Table(data.db)
arrow_synonym_db = Arrow.Table(data.symsdb)
arrow_sort_db = Arrow.Table(data.sorteddb)
return arrow_db,arrow_synonym_db,arrow_sort_db
end
"""
load_data!(key::Symbol;url=nothing,file=nothing)
generates and adds to the global DATA_INFO dict a new database. download and process this database with ´load_db(key)´
"""
function load_data!(key::Symbol;url=nothing,file=nothing)
if url == file == nothing
throw(ArgumentError("a file or a url must be provided."))
elseif (url !== nothing) & (file !== nothing)
throw(ArgumentError("a file or a url must be provided."))
elseif (url !== nothing)
fname = joinpath(download_cache, "pubchem_" * string(key))
farrow = fname * ".arrow"
fsyms = fname * "_synonyms.arrow"
sorted = fname * "_sorted.arrow"
data = (url=url,textdb=fname,db=farrow,symsdb=fsyms,sorteddb=sorted)
else #file
fname = file
url = fname
farrow = fname * ".arrow"
fsyms = fname * "_synonyms.arrow"
sorted = fname * "_sorted.arrow"
data = (url=url,textdb=fname,db=farrow,symsdb=fsyms,sorteddb=sorted)
end
DATA_INFO[key] = data
end
#3 * x^0.7 - 2 * x + 1
#a,b,c,a
#3,2,1,0.7
#=
ax = bx-c
c = (b-a)x
=# | ChemicalIdentifiers | https://github.com/longemen3000/ChemicalIdentifiers.jl.git |
|
[
"MIT"
] | 0.1.9 | 5f86727335b9896bc6f9fa81964a03174be91dbe | code | 9091 | const SEARCH_CACHE = Dict{String,Any}()
"""
synonyms(query)
Given a chemical search query, return the synonyms associated to that query.
This function doesn't have any cache.
# Examples:
```repl
synonyms("water")
```
"""
function synonyms(query)
compound_id,key = search_chemical_id(detect_query(query))
return __synonyms(compound_id,key)
end
function __synonyms(idx,key)
db,sdb = DATA_DB[key]
return sdb.list[findall(isequal(idx),sdb.index)]
end
function build_result(idx,key)
if idx == -1
return missing
end
db,sdb = DATA_DB[key]
pubchemid= db.pubchemid[idx]
CAS= db.CAS[idx]
formula= db.formula[idx]
MW= db.MW[idx]
smiles= db.smiles[idx]
InChI= db.InChI[idx]
InChI_key= db.InChI_key[idx]
iupac_name= db.iupac_name[idx]
common_name= db.common_name[idx]
return (;pubchemid, CAS, formula, MW, smiles, InChI, InChI_key, iupac_name, common_name)
end
function detect_query(id::String)
if is_element(id)
return ElementQuery(id)
elseif is_cas(id)
return CASQuery(id)
elseif is_pubchemid(id)
return PubChemIDQuery(id)
elseif is_inchi(id)
return InChIQuery(id)
elseif is_inchikey(id)
return InChIKeyQuery(id)
elseif is_smiles(id)
return SMILESQuery(id)
else
return AnyQuery(id)
end
end
function detect_query(id::Int)
return PubChemIDQuery(id)
end
detect_query(::Missing) = MissingQuery()
function detect_query(id::Tuple{Integer,Integer,Integer})
return CASQuery(id)
end
function db_iteration_order(DB)
#generating keys iteration order: user, short database, long database
dbnames = [:short,:long]
for k in keys(DATA_DB)
if !(k in (:short,:long))
push!(dbnames,k)
end
end
return dbnames
end
#string modifications to look for
function modified_ids!(id_vector,id)
#normalizes and removes any extra spaces
normalized = replace(Unicode.normalize(id,casefold = true,stripmark=true),r"\s+" => " ")
#if the normalized id is different than the input value
if normalized != id
push!(id_vector,normalized)
normmalized_no_spaces = replace(normalized," "=>"")
push!(id_vector,normmalized_no_spaces)
normmalized_no_dashes = replace(normalized,"-"=>"")
push!(id_vector,normmalized_no_dashes)
normalized_no_spaces_no_dash = replace(normmalized_no_spaces,"-"=>"")
push!(id_vector,normalized_no_spaces_no_dash)
end
no_spaces = replace(id," "=>"")
push!(id_vector,no_spaces)
no_dashes = replace(id,"-"=>"")
push!(id_vector,no_dashes)
no_spaces_no_dash = replace(no_spaces,"-"=>"")
push!(id_vector,no_spaces_no_dash)
return unique!(id_vector)
end
function search_chemical(query,cache=SEARCH_CACHE)
if cache !== nothing
#the base is that two chemicals with different casing should be the same.
if query isa NTuple{3,Integer}
ID = tuple_to_casstr(query)
else
ID = string(query)
end
if haskey(cache,ID)
return cache[ID]
end
normalized_id = Unicode.normalize(ID,casefold = true,stripmark=true)
if haskey(cache,normalized_id)
return cache[normalized_id]
end
compound_id,key = search_chemical_id(detect_query(query))
res = build_result(compound_id,key) #return db.common_name[compound_id]
cache[ID] = res
cache[normalized_id] = res
else
compound_id,key = search_chemical_id(detect_query(query))
return build_result(compound_id,key) #return db.common_name[compound_id]
end
end
function search_chemical_id(ID::AnyQuery;skip_common_name = false,try_strategies = true, original_query = "")::Tuple{Int,Symbol}
#skip_common_name skips search on the common_name col
compound_id = -1
id = value(ID)
fail = (-1,:not_found)
if original_query == id
#this query has already been looked for.
return fail
end
#set original_query.
if original_query == ""
original_query = id
end
search_done = false
_keys = db_iteration_order(DATA_DB)
for key in _keys
db,sdb,sortdb = DATA_DB[key]
if !skip_common_name
searchvec = view(db.common_name,sortdb.common_name_sort)
idx_sort = searchsorted(searchvec,id)
if length(idx_sort) == 1 #found an element
idx = only(sortdb.common_name_sort[idx_sort])
compound_id =idx
search_done = true
end
end
if !search_done
idx = searchsorted(sdb.list,id)
if length(idx) == 1 #found an element
compound_id = only(sdb.index[idx])
search_done = true
end
end
#found in db,returning
search_done && return compound_id,key
end
#bail out here if requested
!try_strategies && return fail
#==
result not found, trying same strategies as present in CalebBell/Chemicals
#strategy 1: trying without spaces and dashs.
==#
_ids = Vector{String}(undef,0)
#adds unique modified variants, writes those variants in _ids
modified_ids!(_ids,id)
#those matches find chemicals of the form n-name
#or 1-name
if occursin(r"1-[A-Za-z]+",_ids[1]) | occursin(r"n-[A-Za-z]+",_ids[1])
modified_ids!(_ids,chop(id,head=2,tail=0))
end
#propyl buthyl ether
modified_ids!(_ids,replace(replace(id,"yl" => "yl "),r"\s+" => " "))
#remove initial lookup value
filter!(!isequal(id),_ids)
#remove original query value
filter!(!isequal(original_query),_ids)
for _id in _ids
#we don't try strategies here, because adding characters leads stackoverflow in search
compound_id,key = search_chemical_id(AnyQuery(_id),original_query = original_query,try_strategies = false)
if compound_id !== -1
search_done = true
break
end
end
search_done && return compound_id,key
#strategy 2: trying to match in the form 'water (H2O)'
re = r"\w+\s+\([\s\w]+\)"
if occursin(re,id)
_id = id |> z->replace(z,")"=>"") |> z->split(z,"(") .|> strip
id1,id2 = first(_id),last(_id)
compound_id1,key1 = search_chemical_id(AnyQuery(id1),original_query = original_query)
compound_id2,key2 = search_chemical_id(AnyQuery(id2),original_query = original_query)
if (compound_id1 == compound_id2) & (key1==key2)
search_done = true
compound_id = compound_id2
key = key2
end
end
search_done && return compound_id,key
#nothing has been found
return fail
end
function search_chemical_id(ID::CASQuery)::Tuple{Int,Symbol}
id = cas(ID)
compound_id,key = search_id_impl(id,:CAS)
if compound_id != -1
return compound_id,key
end
return search_chemical_id(AnyQuery(value(ID)),skip_common_name=true,try_strategies=false)
end
function search_chemical_id(ID::InChIKeyQuery)::Tuple{Int,Symbol}
id = value(ID)
return search_id_impl(id,:InChI_key)
end
function search_chemical_id(ID::PubChemIDQuery)::Tuple{Int,Symbol}
id = id_num(ID)
return search_id_impl(id,:pubchemid)
end
function search_chemical_id(ID::InChIQuery)::Tuple{Int,Symbol}
id = value(ID)
search_id_impl(id,:InChI)
end
function search_chemical_id(ID::SMILESQuery)::Tuple{Int,Symbol}
id = value(ID)
search_id_impl(id,:smiles)
end
function search_chemical_id(::MissingQuery)
return -1,missing
end
arrowtype(::Type{Int}) = Arrow.Primitive{Int,Vector{Int}}
arrowtype(::Type{String}) = Arrow.List{String, Int32, Vector{UInt8}}
arrowtype(::Type{Tuple{Int32,Int16,Int16}}) = Arrow.Struct{Tuple{Int32, Int16, Int16}, Tuple{Arrow.Primitive{Int32, Vector{Int32}}, Arrow.Primitive{Int16, Vector{Int16}}, Arrow.Primitive{Int16, Vector{Int16}}}}
function search_id_impl(id::T,k::Symbol)::Tuple{Int,Symbol} where {T}
arrowT = arrowtype(T)
return search_id_impl(id,k,arrowT)
end
function search_id_impl(id::T,sym::Symbol,::Type{A})::Tuple{Int,Symbol} where {T,A}
compound_id::Int = -1
search_done = false
dbnames = db_iteration_order(DATA_DB)::Vector{Symbol}
for dbname in dbnames
db,_,sortdb = DATA_DB[dbname]
dbcol = getproperty(db,sym)::A
sort_sym = Symbol(sym,:_sort)
dbidx = getproperty(sortdb,sort_sym)::Arrow.Primitive{Int,Vector{Int}}
searchvec = view(dbcol,dbidx)
idxs = searchsorted(searchvec,id)
if length(idxs) == 1 #found an element
compound_id = only(dbidx[idxs])::Int
search_done = true
elseif length(idxs) > 1
throw("Search is not unique, multiple matches found for $id in database $dbname, on the $sym column")
end
if search_done
return compound_id,dbname
end
end
return compound_id,sym #to identify where it fails
end
| ChemicalIdentifiers | https://github.com/longemen3000/ChemicalIdentifiers.jl.git |
|
[
"MIT"
] | 0.1.9 | 5f86727335b9896bc6f9fa81964a03174be91dbe | code | 3713 | abstract type AbstractSearchQuery end
struct CASQuery <: AbstractSearchQuery
ID::String
cas::Tuple{Int32,Int16,Int16}
end
function CASQuery(str::String)
cas = cas_parse(str)
return CASQuery(str,cas)
end
function CASQuery(val::NTuple{3,Integer})
n1 = val[1]
n2 = val[2]
n3 = val[3]
@assert n1 >=0
@assert n2 >=0
@assert n3 >=0
v1 = convert(Int32,n1)
v2 = convert(Int16,n2)
v3 = convert(Int16,n3)
cas = (v1,v2,v3)
return CASQuery("",cas)
end
function tuple_to_casstr(n1,n2,n3)
return string(n1) * "-" * string(n2) * "-" * string(n3)
end
tuple_to_casstr(val::NTuple{3,Integer}) = tuple_to_casstr(val...)
cas(id::CASQuery) = id.cas
struct InChIQuery <: AbstractSearchQuery
ID::String
end
struct InChIKeyQuery <: AbstractSearchQuery
ID::String
end
struct PubChemIDQuery <: AbstractSearchQuery
ID::String
val::Int
end
function PubChemIDQuery(str::String)
val = parse(Int,str)
@assert val >0
return PubChemIDQuery(str,val)
end
function PubChemIDQuery(val::Integer)
@assert val >0
return PubChemIDQuery("",val)
end
id_num(ID::PubChemIDQuery) = ID.val
struct SMILESQuery <: AbstractSearchQuery
ID::String
end
struct FormulaQuery <: AbstractSearchQuery
ID::String
end
struct ElementQuery <: AbstractSearchQuery
ID::String
end
struct AnyQuery <: AbstractSearchQuery
ID::String
end
struct MissingQuery <: AbstractSearchQuery end
value(id::AbstractSearchQuery)::String = strip(id.ID)
function value(id::SMILESQuery)::String
id = strip(id.ID)
return replace(id,"SMILES=" =>"")
end
function value(id::InChIQuery)::String
id_raw = strip(id.ID)
id_lower = lowercase(id_raw)
re1 = r"^inchi=1s/"
re2 = r"^inchi=1/"
t1 = occursin(re1,id_lower)
t2 = occursin(re2,id_lower)
if t1
return chop(id_raw,head=9,tail=0)
elseif t2
return chop(id_raw,head=8,tail=0)
else
throw("incorrect InChI passed")
end
end
"""
is_cas(str)::Bool
check if a given string is a cas number and returns true or false accordingly.
"""
function is_cas(str)
str = strip(str)
#regex from https://gist.github.com/KhepryQuixote/00946f2f7dd5f89324d8#file-pypubchemxtractor-py-L22
cas_regex = r"^[1-9][0-9]{1,6}\\-[0-9]{2}\\-[0-9]$"
return occursin(cas_regex,str)
end
"""
is_inchi(str)::Bool
check if a given string is an InChI name and returns true or false accordingly.
"""
function is_inchi(str)
str = strip(str)
#regex from https://chemistry.stackexchange.com/a/86892
inchi_regex = r"^InChI\=1S?\/[^\s]+(\s|$)"
return occursin(inchi_regex,str)
end
"""
is_inchikey(str)::Bool
check if a given string is an InChI key and returns true or false accordingly.
"""
function is_inchikey(str)
str = strip(str)
#14(A-Z)-10(A-Z)-1(A-Z)
inchikey_regex =r"[A-Z]{14}-[A-Z]{10}-[A-Z]{1}"
return occursin(inchikey_regex,str)
end
"""
is_element(str)::Bool
check if a given string is an element symbol and returns true or false accordingly.
"""
function is_element(str)
return false
end
"""
is_pubchemid(str)::Bool
check if a given string is a PubChem ID and returns true or false accordingly.
A PubChem ID is a positive integer.
"""
function is_pubchemid(str)
str = strip(str)
res = tryparse(Int,str)
if res === nothing
return false
elseif res <= 0
return false
else
return true
end
end
"""
is_smiles(str)::Bool
check if a given string is an element symbol and returns true or false accordingly.
"""
function is_smiles(str)
str = strip(str)
str = uppercase(str)
smiles_regex =r"^SMILES="
return occursin(smiles_regex,str)
end
| ChemicalIdentifiers | https://github.com/longemen3000/ChemicalIdentifiers.jl.git |
|
[
"MIT"
] | 0.1.9 | 5f86727335b9896bc6f9fa81964a03174be91dbe | code | 563 | using ChemicalIdentifiers
using Test
@testset "ChemicalIdentifiers.jl" begin
res1 = search_chemical("water",nothing)
res2 = search_chemical("SMILES=O",nothing)
res3 = search_chemical("water (H2O)", nothing)
@test res1.formula == res2.formula
@test res1.formula == res3.formula
@test ismissing(search_chemical("[3-(2,3-EPOXYPROXY)PROPYL]TRIMETHOXYSILANE",nothing))
@test ismissing(search_chemical(missing))
end
@testset "issue #10" begin
@test !ismissing(search_chemical("propyl ethylether ",nothing)) #spurious spaces
end
| ChemicalIdentifiers | https://github.com/longemen3000/ChemicalIdentifiers.jl.git |
|
[
"MIT"
] | 0.1.9 | 5f86727335b9896bc6f9fa81964a03174be91dbe | docs | 5414 | # ChemicalIdentifiers.jl
[](https://github.com/longemen3000/ChemicalIdentifiers.jl/actions)
A chemical identifiers search package, using the databases present at CalebBell/chemicals.
## Instalation:
```
using Pkg
Pkg.add("ChemicalIdentifiers.jl")
```
The databases are downloaded, parsed, processed and stored as Apache Arrow files at the first package usage, so the first usage may take some time.
## Usage
This package exports `search_chemical`, that, given a search string, performs a search on a database of over 70000 compounds, returning a Named Tuple with the identifiers of the substance in question.
```
julia>using ChemicalIdentifiers
julia> res = search_chemical("water")
(pubchemid = 962, CAS = (7732, 18, 5), formula = "H2O", MW = 18.01528, smiles = "O", InChI = "H2O/h1H2", InChI_key = "XLYOFNOQVPJJNP-UHFFFAOYSA-N", iupac_name = "oxidane", common_name = "water")
#worst case scenario, not found on the present databases
julia> @btime search_chemical("dimethylpyruvic acid22",nothing)
273.700 μs (264 allocations: 15.05 KiB)
missing
#common compound found in the short database
julia> @btime search_chemical("methane",nothing)
7.075 μs (57 allocations: 2.97 KiB)
(pubchemid = 297, CAS = (74, 82, 8), formula = "CH4", MW = 16.04246, smiles = "C", InChI = "CH4/h1H4", InChI_key = "VNWKTOKETHGBQD-UHFFFAOYSA-N", iupac_name = "methane", common_name = "methane")
```
A query is usually a string and its type is detected automatically when possible. the supported query types are:
- **PubChemID**: by using any ``<:Integer`(or a string containing an Integer)
```
julia> search_chemical(8003)
(pubchemid = 8003, CAS = (109, 66, 0), formula = "C5H12", MW = 72.14878, smiles =
"CCCCC", InChI = "C5H12/c1-3-5-4-2/h3-5H2,1-2H3", InChI_key = "OFBQJSOFQDEBGM-UHFFFAOYSA-N", iupac_name = "pentane", common_name = "pentane")
```
- **CAS registry number**:by using a Tuple of integers or a string with the digits separated by `-` :
```
julia> search_chemical((67,56,1))
(pubchemid = 887, CAS = (67, 56, 1), formula = "CH4O", MW = 32.04186, smiles = "CO", InChI = "CH4O/c1-2/h2H,1H3", InChI_key = "OKKJLVBELUTLKV-UHFFFAOYSA-N", iupac_name = "methanol", common_name = "methanol")
search_chemical((67,56,1),nothing) == search_chemical("67-56-1",nothing) #true
```
- **SMILES**: by using a string starting with `SMILES=` :
```
julia> search_chemical("SMILES=N")
(pubchemid = 222, CAS = (7664, 41, 7), formula = "H3N", MW = 17.03052, smiles = "N", InChI = "H3N/h1H3", InChI_key = "QGZKDVFQNNGYKY-UHFFFAOYSA-N", iupac_name = "azane", common_name = "ammonia")
```
- **InChI** : by using a string starting with `InChI=1/` or `InChI=1S/` :
```
julia> search_chemical("InChI=1/C2H4/c1-2/h1-2H2")
(pubchemid = 6325, CAS = (74, 85, 1), formula = "C2H4", MW = 28.05316, smiles = "C=C", InChI = "C2H4/c1-2/h1-2H2", InChI_key = "VGGSQFUCUMXWEO-UHFFFAOYSA-N", iupac_name = "ethene", common_name = "ethene")
```
- **InChI key** : by using a string with the pattern `XXXXXXXXXXXXXX-YYYYYYYYFV-P`:
```
julia> search_chemical("IMROMDMJAWUWLK-UHFFFAOYSA-N")
(pubchemid = 11199, CAS = (9002, 89, 5),
formula = "C2H4O", MW = 44.05256, smiles
= "C=CO", InChI = "C2H4O/c1-2-3/h2-3H,1H2", InChI_key = "IMROMDMJAWUWLK-UHFFFAOYSA-N", iupac_name = "ethenol", common_name
= "ethenol")
```
Searches by CAS and PubChemID are a little bit faster thanks to being encoded as native numeric types, other properties are stored as strings.
The package stores each query in `ChemicalIdentifiers.SEARCH_CACHE` as a `Dict{String,Any}`, so subsequent queries on the same (or similar) strings, dont pay the cost of searching in the database.
If you don't want to store the query, you could use `search_chemical(query,nothing)`, or, if you want your own cache to be used, pass your own cache via `search_chemical(query,mycache)`.
## Custom Databases
If you want to add your own databases, you could use the (unexported) data utilities to do so. lets say we also want to add the inorganic database located at https://github.com/CalebBell/chemicals/blob/master/chemicals/Identifiers/Inorganic%20db.tsv. we could do:
```
using ChemicalIdentifiers
inorganic_url = "https://github.com/CalebBell/chemicals/blob/master/chemicals/Identifiers/Inorganic%20db.tsv"
ChemicalIdentifiers.load_data!(:inorganic,url = inorganic_url)
ChemicalIdentifiers.load_db!(:inorganic)
```
or if you already have a local database:
```
using ChemicalIdentifiers
filepath = "path/to/my/db.tsv"
ChemicalIdentifiers.load_data!(:custom,file = filepath)
ChemicalIdentifiers.load_db!(:custom)
```
`ChemicalIdentifiers.load_data!` will generate a named tuple of file paths (stored in `ChemicalIdentifiers.DATA_INFO`), and `ChemicalIdentifiers.load_db!` will use that data to generate the corresponding Apache Arrow files and store those in a [scratch](https://github.com/JuliaPackaging/Scratch.jl) space (`ChemicalIdentifiers.download_cache`). This download cache can be cleaned (in case a download goes wrong) with `ChemicalIdentifiers.clear_download_cache!()`
The raw databases are then stored in `ChemicalIdentifiers.DATA_DB`. if the data was already processed, then the arrow files are read directly, saving significant loading time.
In case of adding user databases, those are searched first, so there is a possibility of collision.
| ChemicalIdentifiers | https://github.com/longemen3000/ChemicalIdentifiers.jl.git |
|
[
"BSD-3-Clause"
] | 0.4.1 | e222c67fc3d63250e9af159857df8beab55c00bb | code | 757 | using Documenter, PowerModelsAnalytics
makedocs(
modules = [PowerModelsAnalytics],
format = Documenter.HTML(analytics = "", mathengine = Documenter.MathJax()),
sitename = "PowerModelsAnalytics",
authors = "David M Fobes, Carleton Coffrin, and contributors.",
pages = [
"Home" => "index.md",
"Manual" => [
"Getting Started" => "quickguide.md"
],
"Library" => [
"Functions" => "library.md",
# "Graphs" => "graphs.md",
# "Plots" => "plots.md",
# "Layouts" => "layouts.md"
],
"Developer" => [
"Developer" => "developer.md"
],
]
)
deploydocs(
repo = "github.com/lanl-ansi/PowerModelsAnalytics.jl.git",
)
| PowerModelsAnalytics | https://github.com/lanl-ansi/PowerModelsAnalytics.jl.git |
|
[
"BSD-3-Clause"
] | 0.4.1 | e222c67fc3d63250e9af159857df8beab55c00bb | code | 1061 | module PowerModelsAnalytics
import LightGraphs
import Vega
import Setfield: @set!
import Colors
import Colors: @colorant_str
import ColorVectorSpace
import LinearAlgebra: norm
import Random: rand
import Statistics: mean, std
import Compat: isnothing
import PyCall
const nx = PyCall.PyNULL()
const scipy = PyCall.PyNULL()
function __init__()
copy!(nx, PyCall.pyimport_conda("networkx", "networkx"))
copy!(scipy, PyCall.pyimport_conda("scipy", "scipy"))
end
include("core/types.jl") # must be first to properly define new types
include("core/options.jl")
include("core/data.jl")
include("core/parameters.jl")
include("vega/default_specs.jl")
include("graph/common.jl")
include("graph/metrics.jl")
include("layouts/common.jl")
include("layouts/networkx.jl")
include("plots/graph.jl")
include("plots/analytics.jl")
include("plots/networks.jl")
include("core/export.jl") # must be last to properly export all functions
end
| PowerModelsAnalytics | https://github.com/lanl-ansi/PowerModelsAnalytics.jl.git |
|
[
"BSD-3-Clause"
] | 0.4.1 | e222c67fc3d63250e9af159857df8beab55c00bb | code | 7440 | "converts nan values to 0.0"
_convert_nan(x) = isnan(x) ? 0.0 : x
_replace_nan(v) = map(x -> isnan(x) ? zero(x) : x, v)
"Returns true if InfrastructureGraph `graph` has a `property` on an edge or a node `obj`"
function hasprop(graph::InfrastructureGraph{T}, obj::Union{Int,LightGraphs.AbstractEdge}, property::Symbol) where T <: LightGraphs.AbstractGraph
if haskey(graph.metadata, obj)
return haskey(graph.metadata[obj], property)
else
return false
end
end
"Sets a `property` in the metadata at `key` of `graph` on `obj`"
function set_property!(graph::InfrastructureGraph{T}, obj::Union{Int,LightGraphs.AbstractEdge}, key::Symbol, property::Any) where T <: LightGraphs.AbstractGraph
if !haskey(graph.metadata, obj)
graph.metadata[obj] = Dict{Symbol,Any}()
end
graph.metadata[obj][key] = property
end
"Sets multiple `properties` in the metadata of `graph` on `obj` at `key`"
function set_properties!(graph::InfrastructureGraph{T}, obj::Union{Int,LightGraphs.AbstractEdge}, properties::Dict{Symbol,<:Any}) where T <: LightGraphs.AbstractGraph
if !haskey(graph.metadata, obj)
graph.metadata[obj] = Dict{Symbol,Any}()
end
merge!(graph.metadata[obj], properties)
end
"Gets the property in the metadata of `graph` on `obj` at `key`. If property doesn't exist, returns `default`"
function get_property(graph::InfrastructureGraph{T}, obj::Union{Int,LightGraphs.AbstractEdge}, key::Symbol, default::Any) where T <: LightGraphs.AbstractGraph
return get(get(graph.metadata, obj, Dict{Symbol,Any}()), key, default)
end
"Adds an edge defined by `i` & `j` to `graph`"
function add_edge!(graph::InfrastructureGraph{T}, i::Int, j::Int) where T <: LightGraphs.AbstractGraph
LightGraphs.add_edge!(graph.graph, i, j)
end
"Returns an iterator of all of the nodes/vertices in `graph`"
function vertices(graph::InfrastructureGraph{T}) where T <: LightGraphs.AbstractGraph
return LightGraphs.vertices(graph.graph)
end
"Returns an iterator of all the edges in `graph`"
function edges(graph::InfrastructureGraph{T}) where T <: LightGraphs.AbstractGraph
return LightGraphs.edges(graph.graph)
end
"Returns all of the metadata for `obj` in `graph`"
function properties(graph::InfrastructureGraph{T}, obj::Union{Int,LightGraphs.AbstractEdge}) where T <: LightGraphs.AbstractGraph
return get(graph.metadata, obj)
end
""
function identify_blocks(case::Dict{String,<:Any}; node_settings::Dict{String,<:Any}=default_node_settings_math, edge_settings::Dict{String,<:Any}=default_edge_settings_math)::Dict{Int,Set{Any}}
cc = calc_connected_components(case; node_settings=node_settings, edge_settings=edge_settings)
return Dict{Int,Set{Any}}(i => s for (i,s) in enumerate(cc))
end
""
function calc_connected_components(data::Dict{String,<:Any}; node_settings::Dict{String,<:Any}=default_node_settings_math, edge_settings::Dict{String,<:Any}=default_edge_settings_math)::Set{Set{Any}}
if Int(get(data, "data_model", 1)) == 0
if node_settings == default_node_settings_math
node_settings = default_node_settings_eng
end
if edge_settings == default_edge_settings_math
edge_settings = default_edge_settings_eng
end
end
active_node = Dict{Any,Dict{String,Any}}(x for x in data[get(node_settings, "node", "bus")] if Int(x.second[get(node_settings, "disabled", "bus_type" => 4)[1]]) != get(node_settings, "disabled", "bus_type" => 4))
active_node_ids = Set{Any}([i for (i,node) in active_node])
neighbors = Dict{Any,Vector{Any}}(i => [] for i in active_node_ids)
for (type, settings) in edge_settings
for (id, obj) in get(data, type, Dict{Any,Dict{String,Any}}())
(disabled_key, disabled_value) = get(settings, "disabled", "status" => 0)
(open_key, open_value) = get(settings, "open", "state" => 0)
f_key = get(settings, "fr_node", "f_bus")
t_key = get(settings, "to_node", "t_bus")
nodes_key = get(settings, "nodes", "")
status = Int(get(obj, disabled_key, 1)) != disabled_value && Int(get(obj, open_key, 1)) != open_value
if status
if !isempty(nodes_key) && haskey(obj, nodes_key)
edges_set = Set{Any}()
for f_node in obj[nodes_key]
for t_node in obj[nodes_key]
if f_node != t_node
push!(edges_set, Set([f_node, t_node]))
end
end
end
for (f_node, t_node) in edges_set
push!(neighbors["$f_node"], "$t_node")
push!(neighbors["$t_node"], "$f_node")
end
else
push!(neighbors["$(obj[f_key])"], "$(obj[t_key])")
push!(neighbors["$(obj[t_key])"], "$(obj[f_key])")
end
end
end
end
component_lookup = Dict(i => Set{Any}([i]) for i in active_node_ids)
touched = Set{Any}()
for i in active_node_ids
if !(i in touched)
_cc_dfs(i, neighbors, component_lookup, touched)
end
end
ccs = (Set(values(component_lookup)))
return ccs
end
"DFS on a graph"
function _cc_dfs(i, neighbors, component_lookup, touched)
push!(touched, i)
for j in neighbors[i]
if !(j in touched)
for k in component_lookup[j]
push!(component_lookup[i], k)
end
for k in component_lookup[j]
component_lookup[k] = component_lookup[i]
end
_cc_dfs(j, neighbors, component_lookup, touched)
end
end
end
"""
`ans = is_active`
Determines if block is "active", e.g. energized, based on criteria in `sources`
Arguements:
`case::Dict{String,<:Any}`: Network case
`block::Set{<:Any}`: block of node ids
`sources::Dict{String,<:Dict{String,<:Any}}`: sources with settings that define criteria for active
Returns:
`ans::Bool`
"""
function is_active(case::Dict{String,<:Any}, block::Set{<:Any}; sources::Dict{String,<:Any}=default_sources_math)::Bool
if Int(get(case, "data_model", 1)) == 0 && sources == default_sources_math
sources = default_sources_eng
end
for node in block
for (type,settings) in sources
node_key = get(settings, "node", "bus")
(disabled_key, disabled_value) = get(settings, "disabled", "status" => 0)
(inactive_real_key, inactive_real_value) = get(settings, "inactive_real", "" => 0)
(inactive_imaginary_key, inactive_imaginary_value) = get(settings, "inactive_imaginary", "" => 0)
for (_,obj) in get(case, type, Dict{Any,Dict{String,Any}}())
if node == obj[node_key] && isa(obj[disabled_key], Enum) ? Int(obj[disabled_key]) != disabled_value : obj[disabled_key] != disabled_value
if (!isempty(inactive_real_key) && haskey(obj, inactive_real_key) && any(obj[inactive_real_key] .!= inactive_real_value)) || (!isempty(inactive_imaginary_key) && haskey(obj, inactive_imaginary_key) && any(obj[inactive_imaginary_key] .!= inactive_imaginary_value))
return true
end
end
end
end
end
return false
end
| PowerModelsAnalytics | https://github.com/lanl-ansi/PowerModelsAnalytics.jl.git |
|
[
"BSD-3-Clause"
] | 0.4.1 | e222c67fc3d63250e9af159857df8beab55c00bb | code | 780 | # PowerModelsAnalytics exports everything except internal symbols, which are defined as
# those whose name starts with an underscore. If you don't want all of these
# symbols in your environment, then use `import PowerModelsAnalytics` instead of
# `using PowerModelsAnalytics`.
# Do not add PowerModelsAnalytics-defined symbols to this exclude list. Instead, rename
# them with an underscore.
const _EXCLUDE_SYMBOLS = [Symbol(@__MODULE__), :eval, :include]
for sym in names(@__MODULE__, all=true)
sym_string = string(sym)
if sym in _EXCLUDE_SYMBOLS || startswith(sym_string, "_")
continue
end
if !(Base.isidentifier(sym) || (startswith(sym_string, "@") &&
Base.isidentifier(sym_string[2:end])))
continue
end
@eval export $sym
end
| PowerModelsAnalytics | https://github.com/lanl-ansi/PowerModelsAnalytics.jl.git |
|
[
"BSD-3-Clause"
] | 0.4.1 | e222c67fc3d63250e9af159857df8beab55c00bb | code | 7499 | "Default plot colors, including all supported component variations"
const default_colors = Dict{String,Colors.Colorant}(
"enabled open free edge" => colorant"gold",
"enabled open fixed edge" => colorant"red",
"enabled closed free edge" => colorant"green",
"enabled closed fixed edge" => colorant"black",
"disabled open fixed edge" => colorant"orange",
"disabled open free edge" => colorant"orange",
"disabled closed fixed edge" => colorant"orange",
"disabled closed free edge" => colorant"orange",
"connector" => colorant"lightgrey",
"enabled active extra node" => colorant"cyan",
"enabled inactive extra node" => colorant"orange",
"disabled inactive extra node" => colorant"red",
"disabled active extra node" => colorant"red",
"enabled node wo demand" => colorant"darkgrey",
"disabled node wo demand" => colorant"grey95",
"enabled node w demand" => colorant"green3",
"disabled node w demand" => colorant"gold",
)
"default color range for partially loaded buses"
const default_demand_color_range = Colors.range(default_colors["disabled node w demand"], default_colors["enabled node w demand"], length=11)
"default edge types for eng data structure"
const default_edge_settings_eng = Dict{String,Any}(
"line" => Dict{String,Any}(
"fr_node" => "f_bus",
"to_node" => "t_bus",
"disabled" => "status" => 0,
),
"transformer" => Dict{String,Any}(
"fr_node" => "f_bus",
"to_node" => "t_bus",
"nodes" => "bus",
"disabled" => "status" => 0,
),
"switch" => Dict{String,Any}(
"fr_node" => "f_bus",
"to_node" => "t_bus",
"disabled" => "status" => 0,
"open" => "state" => 0,
"dispatchable" => "dispatchable" => 1,
)
)
"default edge types for math data structure (PowerModels, PowerModelsDistribution"
const default_edge_settings_math = Dict{String,Any}(
"branch" => Dict{String,Any}(
"fr_node" => "f_bus",
"to_node" => "t_bus",
"disabled" => "br_status" => 0,
),
"transformer" => Dict{String,Any}(
"fr_node" => "f_bus",
"to_node" => "t_bus",
"disabled" => "br_status" => 0,
),
"dcline" => Dict{String,Any}(
"fr_node" => "f_bus",
"to_node" => "t_bus",
"disabled" => "br_status" => 0,
),
"switch" => Dict{String,Any}(
"fr_node" => "f_bus",
"to_node" => "t_bus",
"disabled" => "status" => 0,
)
)
"default edge type between blocks (PowerModels, PowerModelsDistribution"
const default_block_connectors = Dict{String,Any}(
"switch" => Dict{String,Any}(
"fr_node" => "f_bus",
"to_node" => "t_bus",
"disabled" => "status" => 0,
),
)
"default node object to plot for eng data structure (PowerModelsDistribution)"
const default_extra_nodes_eng = Dict{String,Any}(
"generator" => Dict{String,Any}(
"node" => "bus",
"label" => "~",
"size" => "pg",
"inactive_real" => "pg" => 0,
"inactive_imaginary" => "qg" => 0,
),
"solar" => Dict{String,Any}(
"node" => "bus",
"label" => "!",
"size" => "pg",
"inactive_real" => "pg" => 0,
"inactive_imaginary" => "qg" => 0,
),
"storage" => Dict{String,Any}(
"node" => "bus",
"label" => "S",
"size" => "ps",
"inactive_real" => "ps" => 0,
"inactive_imaginary" => "qs" => 0,
),
"voltage_source" => Dict{String,Any}(
"node" => "bus",
"label" => "V",
"size" => "pg",
"inactive_real" => "pg" => 0,
"inactive_imaginary" => "qg" => 0,
)
)
"default node object to plot for math data structure (PowerModels, PowerModelsDistribution)"
const default_extra_nodes_math = Dict{String,Any}(
"gen" => Dict{String,Any}(
"node" => "gen_bus",
"label" => "~",
"size" => "pg",
"inactive_real" => "pg" => 0,
"inactive_imaginary" => "qg" => 0
),
"storage" => Dict{String,Any}(
"node" => "storage_bus",
"label" => "S",
"size" => "ps",
"inactive_real" => "ps",
"inactive_imaginary" => "qs"
)
)
"default node information for math model (PowerModels, PowerModelsDistribution)"
const default_node_settings_math = Dict{String,Any}(
"node" => "bus",
"disabled" => "bus_type" => 4,
"x" => "lon",
"y" => "lat",
)
"default node information for eng model (PowerModelsDistribution)"
const default_node_settings_eng = Dict{String,Any}(
"node" => "bus",
"disabled" => "status" => 0,
"x" => "lon",
"y" => "lat"
)
"default sources (generators) for the math model (PowerModels,PowerModelsDistribution)"
const default_sources_math = Dict{String,Any}(
"gen" => Dict{String,Any}(
"node" => "gen_bus",
"disabled" => "gen_status" => 0,
"inactive_real" => "pg" => 0,
"inactive_imaginary" => "qg" => 0,
),
"storage" => Dict{String,Any}(
"node" => "storage_bus",
"disabled" => "storage_status" => 0,
"inactive_real" => "ps" => 0,
"inactive_imaginary" => "qs" => 0,
)
)
"default sources (generators) for the eng model (PowerModelsDistribution)"
const default_sources_eng = Dict{String,Any}(
"generator" => Dict{String,Any}(
"node" => "bus",
"disabled" => "status" => 0,
"inactive_real" => "pg" => 0,
"inactive_imaginary" => "qg" => 0,
),
"storage" => Dict{String,Any}(
"node" => "bus",
"disabled" => "status" => 0,
"inactive_real" => "ps" => 0,
"inactive_imaginary" => "qs" => 0,
),
"solar" => Dict{String,Any}(
"node" => "bus",
"disabled" => "status" => 0,
"inactive_real" => "pg" => 0,
"inactive_imaginary" => "pg" => 0,
),
"voltage_source" => Dict{String,Any}(
"node" => "bus",
"disabled" => "status" => 0,
"inactive_real" => "pg" => 0,
"inactive_imaginary" => "pg" => 0,
),
)
"default demands (loads) for eng model (PowerModelsDistribution)"
const default_demands_eng = Dict{String,Any}(
"load" => Dict{String,Any}(
"node" => "bus",
"disabled" => "status" => 0,
"inactive_real" => "pd" => 0,
"inactive_imaginary" => "qd" => 0,
"original_demand_real" => "pd_nom",
"original_demand_imaginary" => "qd_nom",
"status" => "status"
)
)
"default demands (loads) for math model (PowerModels, PowerModelsDistribution)"
const default_demands_math = Dict{String,Any}(
"load" => Dict{String,Any}(
"node" => "load_bus",
"disabled" => "load_status" => 0,
"inactive_real" => "pd" => 0,
"inactive_imaginary" => "qd" => 0,
"status" => "status"
)
)
"default dpi of plots"
const default_plot_dpi = 100
"default size of plots in pixels"
const default_plot_size = Tuple{Int,Int}((600,600))
"default fontsize in pt"
const default_fontsize = 10
"default fontcolor"
const default_fontcolor = :black
"default fontfamily"
const default_fontfamily = "Times"
"default text alignemtn"
const default_textalign = :center
"default upper and lower bound of the size of nodes"
const default_node_size_limits = Vector{Real}([2, 2.5])
"default upper and lower bound of the width of edges"
const default_edge_width_limits = Vector{Real}([0.5, 0.75])
"default spring constant for spring_layout"
const default_spring_constant = 1e-2
| PowerModelsAnalytics | https://github.com/lanl-ansi/PowerModelsAnalytics.jl.git |
|
[
"BSD-3-Clause"
] | 0.4.1 | e222c67fc3d63250e9af159857df8beab55c00bb | code | 11546 | ### Checks parameters in PowerModels components ###
""
function parameter_check_summary(data::Dict{String,Any})
if get(data, "multinetwork", false)
error("parameter_check_summary does not yet support multinetwork data")
end
if haskey(data, "conductors")
error("parameter_check_summary does not yet support multiconductor data")
end
if !(haskey(data, "per_unit") && data["per_unit"])
error("parameter_check_summary requires data in per_unit")
end
messages = Dict{String,Any}()
messages["bus"] = _parameter_check_bus(data)
messages["load"] = _parameter_check_load(data)
messages["shunt"] = _parameter_check_shunt(data)
messages["gen"] = _parameter_check_gen(data)
messages["branch"] = _parameter_check_branch(data)
messages["network"] = _parameter_check_network(data)
return messages
end
""
function _parameter_check_bus(data::Dict{String,Any})
messages = Dict{Symbol,Set{Int}}()
messages[:vm_bounds] = Set{Int}()
messages[:vm_start] = Set{Int}()
messages[:ref_bus] = Set{Int}()
for (i,bus) in data["bus"]
index = bus["index"]
if bus["vmin"] <= 0.8 || bus["vmax"] >= 1.2
Memento.warn(LOGGER, "bus $(i) voltage magnitude bounds $(bus["vmin"]) - $(bus["vmax"]) are out side of typical bounds 0.8 - 1.2")
push!(messages[:vm_bounds], index)
end
if bus["vm"] < bus["vmin"] || bus["vm"] > bus["vmax"]
Memento.warn(LOGGER, "bus $(i) voltage magnitude start is not within given bounds $(bus["vmin"]) - $(bus["vmax"])")
push!(messages[:vm_start], index)
end
if bus["bus_type"] == 3 && !isapprox(bus["va"], 0.0)
Memento.warn(LOGGER, "reference bus $(i) voltage angle start is not zero $(bus["va"])")
push!(messages[:ref_bus], index)
end
end
return messages
end
""
function _parameter_check_load(data::Dict{String,Any})
messages = Dict{Symbol,Set{Int}}()
messages[:p_source] = Set{Int}()
messages[:q_source] = Set{Int}()
for (i,load) in data["load"]
index = load["index"]
if load["pd"] < 0.0
Memento.warn(LOGGER, "load $(i) is acting as an active power source")
push!(messages[:p_source], index)
end
if load["qd"] < 0.0
Memento.warn(LOGGER, "load $(i) is acting as a reactive power source")
push!(messages[:q_source], index)
end
end
return messages
end
""
function _parameter_check_shunt(data::Dict{String,Any})
messages = Dict{Symbol,Set{Int}}()
messages[:sign_mismatch] = Set{Int}()
for (i,shunt) in data["shunt"]
index = shunt["index"]
if shunt["gs"] < 0.0 && shunt["bs"] < 0.0 || shunt["gs"] > 0.0 && shunt["bs"] > 0.0
Memento.warn(LOGGER, "shunt $(i) admittance has matching signs $(shunt["gs"] + shunt["bs"]im)")
push!(messages[:sign_mismatch], index)
end
end
return messages
end
""
function _parameter_check_gen(data::Dict{String,Any})
messages = Dict{Symbol,Set{Int}}()
messages[:p_demand] = Set{Int}()
messages[:qg_bounds_nonzero] = Set{Int}()
messages[:qg_bounds_large] = Set{Int}()
messages[:qg_bounds_shape] = Set{Int}()
messages[:cost_negative] = Set{Int}()
for (i,gen) in data["gen"]
index = gen["index"]
max_pg_mag = max(abs(gen["pmin"]), abs(gen["pmax"]))
if gen["pmin"] < 0.0
Memento.warn(LOGGER, "generator $(i) can behave as an active power demand")
push!(messages[:p_demand], index)
end
if gen["qmin"] > 0.0 || gen["qmax"] < 0.0
Memento.warn(LOGGER, "generator $(i) reactive power bounds $(gen["qmin"]) - $(gen["qmax"]) do not include 0.0")
push!(messages[:qg_bounds_nonzero], index)
end
# filter out reactive support devices
if !isapprox(max_pg_mag, 0.0)
if abs(gen["qmin"]) > max_pg_mag || abs(gen["qmax"]) > max_pg_mag
Memento.warn(LOGGER, "generator $(i) reactive power capabilities $(gen["qmin"]) - $(gen["qmax"]) exceed active power capabilities $(gen["pmin"]) - $(gen["pmax"])")
push!(messages[:qg_bounds_large], index)
end
if gen["qmin"] < -max_pg_mag/12.0 && gen["qmax"] > max_pg_mag/4.0
Memento.warn(LOGGER, "generator $(i) reactive power capabilities $(gen["qmin"]) - $(gen["qmax"]) to do not match the 1/12 - 1/4 rule of active power capabilities $(max_pg_mag)")
push!(messages[:qg_bounds_shape], index)
end
end
if haskey(gen, "model") && haskey(gen, "cost")
if gen["model"] == 1
else
@assert gen["model"] == 2
if any(x < 0 for x in gen["cost"])
Memento.warn(LOGGER, "generator $(i) has negative cost coefficients $(gen["cost"])")
push!(messages[:cost_negative], index)
end
end
end
end
return messages
end
""
function _parameter_check_branch(data::Dict{String,Any})
messages = Dict{Symbol,Set{Int}}()
messages[:mva_decreasing] = Set{Int}()
messages[:mva_redundant_15d] = Set{Int}()
messages[:mva_redundant_30d] = Set{Int}()
messages[:impedance] = Set{Int}()
messages[:reactance] = Set{Int}()
messages[:admittance_fr] = Set{Int}()
messages[:admittance_to] = Set{Int}()
messages[:basekv_line] = Set{Int}()
messages[:rx_ratio_line] = Set{Int}()
messages[:bx_fr_ratio] = Set{Int}()
messages[:bx_to_ratio] = Set{Int}()
messages[:basekv_xfer] = Set{Int}()
messages[:rx_ratio_xfer] = Set{Int}()
messages[:tm_range] = Set{Int}()
messages[:ta_range] = Set{Int}()
bus_lookup = Dict(bus["index"] => bus for (i,bus) in data["bus"])
for (i,branch) in data["branch"]
index = branch["index"]
rate_a = branch["rate_a"]
rate_b = haskey(branch, "rate_b") ? branch["rate_b"] : rate_a
rate_c = haskey(branch, "rate_c") ? branch["rate_c"] : rate_a
basekv_fr = bus_lookup[branch["f_bus"]]["base_kv"]
basekv_to = bus_lookup[branch["t_bus"]]["base_kv"]
if rate_a > rate_b || rate_b > rate_c
Memento.warn(LOGGER, "branch $(i) thermal limits are decreasing")
push!(messages[:mva_decreasing], index)
end
# epsilon of 0.05 accounts for rounding in data
rate_ub_15 = _compute_mva_ub(branch, bus_lookup, 0.261798)
if rate_ub_15 < rate_a - 0.05
Memento.warn(LOGGER, "branch $(i) thermal limit A $(rate_a) is redundant with a 15 deg. angle difference $(rate_ub_15)")
push!(messages[:mva_redundant_15d], index)
end
# epsilon of 0.05 accounts for rounding in data
rate_ub_30 = _compute_mva_ub(branch, bus_lookup, 0.523598)
if rate_ub_30 < rate_a - 0.05
Memento.warn(LOGGER, "branch $(i) thermal limit A $(rate_a) is redundant with a 30 deg. angle difference $(rate_ub_30)")
push!(messages[:mva_redundant_30d], index)
end
if branch["br_r"] < 0.0 || branch["br_x"] < 0.0
Memento.warn(LOGGER, "branch $(i) impedance $(branch["br_r"] + branch["br_x"]im) is non-positive")
push!(messages[:impedance], index)
end
if branch["g_fr"] > 0.0 || branch["b_fr"] < 0.0
Memento.warn(LOGGER, "branch $(i) from-side admittance $(branch["g_fr"] + branch["b_fr"]im) signs may be incorrect")
push!(messages[:admittance_fr], index)
end
if branch["g_to"] > 0.0 || branch["b_to"] < 0.0
Memento.warn(LOGGER, "branch $(i) to-side admittance $(branch["g_to"] + branch["b_to"]im) signs may be incorrect")
push!(messages[:admittance_to], index)
end
if isapprox(branch["br_x"], 0.0)
Memento.warn(LOGGER, "branch $(i) reactance $(branch["br_x"]) is zero")
push!(messages[:reactance], index)
continue
end
rx_ratio = abs(branch["br_r"]/branch["br_x"])
if !branch["transformer"] # branch specific checks
if !isapprox(basekv_fr, basekv_to)
Memento.warn(LOGGER, "branch $(i) base kv values are different $(basekv_fr) - $(basekv_to)")
push!(messages[:basekv_line], index)
end
if rx_ratio >= 0.5
Memento.warn(LOGGER, "branch $(i) r/x ratio $(rx_ratio) is above 0.5")
push!(messages[:rx_ratio_line], index)
end
if !isapprox(branch["b_fr"], 0.0)
bx_ratio = abs(branch["b_fr"]/branch["br_x"])
if bx_ratio > 0.5 || bx_ratio < 0.04
Memento.warn(LOGGER, "branch $(i) from-side b/x ratio $(bx_ratio) is outside the range 0.04 - 0.5")
push!(messages[:bx_fr_ratio], index)
end
end
if !isapprox(branch["b_to"], 0.0)
bx_ratio = abs(branch["b_to"]/branch["br_x"])
if bx_ratio > 0.5 || bx_ratio < 0.04
Memento.warn(LOGGER, "branch $(i) to-side b/x ratio $(bx_ratio) is outside the range 0.04 - 0.5")
push!(messages[:bx_to_ratio], index)
end
end
else # transformer specific checks
if isapprox(basekv_fr, basekv_to)
Memento.warn(LOGGER, "transformer branch $(i) base kv values are the same $(basekv_fr) - $(basekv_to)")
push!(messages[:basekv_xfer], index)
end
if rx_ratio >= 0.05
Memento.warn(LOGGER, "transformer branch $(i) r/x ratio $(rx_ratio) is above 0.05")
push!(messages[:rx_ratio_xfer], index)
end
if branch["tap"] < 0.9 || branch["tap"] > 1.1
Memento.warn(LOGGER, "transformer branch $(i) tap ratio $(branch["tap"]) is out side of the nominal range 0.9 - 1.1")
push!(messages[:tm_range], index)
end
if branch["shift"] < -0.174533 || branch["shift"] > 0.174533
Memento.warn(LOGGER, "transformer branch $(i) phase shift $(branch["shift"]) is out side of the range -0.174533 - 0.174533")
push!(messages[:ta_range], index)
end
end
end
return messages
end
""
function _compute_mva_ub(branch::Dict{String,Any}, bus_lookup, vad_bound::Real)
vad_max = max(abs(branch["angmin"]), abs(branch["angmax"]))
if vad_bound > vad_max
Memento.info(LOGGER, "given vad bound $(vad_bound) is larger than branch vad max $(vad_max)")
end
vad_max = vad_bound
if vad_max > pi
error(LOGGER, "compute_mva_ub does not support vad bounds larger than pi, given $(vad_max)")
end
r = branch["br_r"]
x = branch["br_x"]
z = r + im * x
y = 1/z
y_mag = abs(y)
fr_vm_max = bus_lookup[branch["f_bus"]]["vmax"]
to_vm_max = bus_lookup[branch["t_bus"]]["vmax"]
vm_max = max(fr_vm_max, to_vm_max)
c_max = sqrt(fr_vm_max^2 + to_vm_max^2 - 2*fr_vm_max*to_vm_max*cos(vad_max))
rate_ub = y_mag*vm_max*c_max
end
""
function _parameter_check_network(data::Dict{String,Any})
messages = Dict{Symbol,Number}()
vm_center_list = [ (bus["vmax"]+bus["vmin"])/2.0 for (i,bus) in data["bus"] ]
messages[:vm_center_mean] = mean(vm_center_list)
messages[:vm_center_std] = std(vm_center_list)
return messages
end
| PowerModelsAnalytics | https://github.com/lanl-ansi/PowerModelsAnalytics.jl.git |
|
[
"BSD-3-Clause"
] | 0.4.1 | e222c67fc3d63250e9af159857df8beab55c00bb | code | 821 | """
InfrastructureGraph{T<:LightGraphs.AbstractGraph}
A structure containing a graph of a PowerModels or PowerModelsDistribution network in
the format of a LightGraphs.AbstractGraph and corresponding metadata necessary for
analysis / plotting.
"""
mutable struct InfrastructureGraph{T<:LightGraphs.AbstractGraph}
graph::LightGraphs.AbstractGraph
metadata::Dict{Union{Int,LightGraphs.AbstractEdge},Dict{Symbol,<:Any}}
end
"""
InfrastructureGraph(nvertices)
Constructor for the InfrastructureGraph struct, given a number of vertices `nvertices`
"""
function InfrastructureGraph(nvertices::Int)
graph = LightGraphs.SimpleDiGraph(nvertices)
metadata = Dict{Union{Int,LightGraphs.AbstractEdge},Dict{Symbol,<:Any}}()
return InfrastructureGraph{LightGraphs.SimpleDiGraph}(graph, metadata)
end
| PowerModelsAnalytics | https://github.com/lanl-ansi/PowerModelsAnalytics.jl.git |
|
[
"BSD-3-Clause"
] | 0.4.1 | e222c67fc3d63250e9af159857df8beab55c00bb | code | 15611 | """
`apply_plot_network_metadata!(graph; kwargs...)`
Builds metadata properties, i.e. color/size of nodes/edges, for plotting based on graph metadata
Arguments:
`graph::InfrastructureGraph`: Graph of power network
`colors::Dict{String,<:Colors.Colorant}`: Dictionary of colors to be changed from `default_colors`
`load_color_range::Vector{<:Colors.Colorant}`: Range of colors for load statuses
`node_size_limitss::Vector{<:Real}`: Min/Max values for the size of nodes
`edge_width_limits::Vector{<:Real}`: Min/Max values for the width of edges
"""
function apply_plot_network_metadata!(graph::InfrastructureGraph{T};
colors::Dict{String,<:Colors.Colorant}=default_colors,
demand_color_range::Vector{<:Colors.Colorant}=default_demand_color_range,
node_size_limits::Vector{<:Real}=default_node_size_limits,
edge_width_limits::Vector{<:Real}=default_edge_width_limits
) where T <: LightGraphs.AbstractGraph
colors = merge(default_colors, colors)
for edge in edges(graph)
set_property!(graph, edge, :edge_color, colors[get_property(graph, edge, :edge_membership, "enabled closed fixed edge")])
set_property!(graph, edge, :edge_size, get_property(graph, edge, :switch, false) ? 2 : 1)
end
for node in vertices(graph)
node_membership = get_property(graph, node, :node_membership, "enabled node wo demand")
set_property!(graph, node, :node_color, colors[node_membership])
set_property!(graph, node, :node_size, node_size_limits[1])
if hasprop(graph, node, :size)
set_property!(graph, node, :node_size, get_property(graph, node, :size, 0.0))
end
if hasprop(graph, node, :demand)
demand = get_property(graph, node, :demand, 1.0)
idx = trunc(Int, demand * (length(demand_color_range) - 1) + 1)
set_property!(graph, node, :node_color, occursin("disabled", node_membership) || occursin("wo demand", node_membership) ? colors[node_membership] : demand_color_range[idx])
end
end
node_sizes = [get_property(graph, node, :node_size, 0.0) for node in vertices(graph)]
for node in vertices(graph)
set_property!(graph, node, :node_size, (get_property(graph, node, :node_size, 0.0)-minimum(node_sizes)) / (maximum(node_sizes) - minimum(node_sizes)) * (node_size_limits[2] - node_size_limits[1]) + node_size_limits[1])
end
end
"""
`graph = build_power_network_graph(case::Dict{String,<:Any}; kwargs...)`
Builds a `InfrastructureGraph` from a power network `case`.
Arguments:
`case::Dict{String,<:Any}`: Network case
`edge_types::Vector{<:String}`: Component types that are edges
`block_connector_types::Vector{<:String}`: Types of edges that connect blocks (only used when `block_graph==true`)
`node_objects::Dict{String,<:Dict{String,<:String}}`: Other non-bus components to include in the graph
`block_graph::Bool`: If `true`, return block graph
`aggregate_node_objects::Bool`: If `true`, if multiple node objects present at a bus, aggregate into a single vertex
`exclusions::Vector{Any}`: Pattern for exclusion from graph
Returns:
`graph`: InfrastructureGraph
"""
function build_network_graph(case::Dict{String,<:Any};
node_settings::Dict{String,<:Any}=default_node_settings_math,
edge_settings::Dict{String,<:Any}=default_edge_settings_math,
extra_nodes::Dict{String,<:Any}=default_extra_nodes_math,
aggregate_extra_nodes::Bool=false,
sources::Dict{String,<:Any}=default_sources_math,
demands::Dict{String,<:Any}=default_demands_math,
block_graph::Bool=false,
block_connectors::Dict{String,<:Any}=default_block_connectors,
exclusions::Dict{String,<:Vector{<:Any}}=Dict{String,Vector{Any}}(),
kwargs...)::InfrastructureGraph
if Int(get(case, "data_model", 1)) == 0
if node_settings == default_node_settings_math
node_settings = default_node_settings_eng
end
if edge_settings == default_edge_settings_math
edge_settings = default_edge_settings_eng
end
if extra_nodes == default_extra_nodes_math
extra_nodes = default_extra_nodes_eng
end
if sources == default_sources_math
sources = default_sources_eng
end
if demands == default_demands_math
demands = default_demands_eng
end
end
node_key = get(node_settings, "node", "bus")
node_x_key = get(node_settings, "x", "")
node_y_key = get(node_settings, "y", "")
(disabled_node_key, disabled_node_value) = get(node_settings, "disabled", "bus_type" => 4)
if block_graph
_case = deepcopy(case)
for (type, settings) in block_connectors
if haskey(_case, type)
for (_,obj) in _case[type]
(k, v) = get(settings, "disabled", "status" => 0)
obj[k] = v
end
end
end
blocks = identify_blocks(_case)
node2graph_map = Dict{Any,Int}(node_id => block_id for (block_id, block) in blocks for node_id in block)
else
blocks = identify_blocks(case)
node2graph_map = Dict{Any,Int}(id => i for (i, (id,_)) in enumerate(case[node_key]))
end
n_nodes = block_graph ? length(blocks) : length(node2graph_map)
if aggregate_extra_nodes
used_nodes = Set{Any}()
n_extra_nodes = 0
for (type,settings) in extra_nodes
for (id,obj) in get(case, type, Dict())
if !(node2graph_map["$(obj[get(settings, "node", "bus")])"] in used_nodes)
n_extra_nodes += 1
push!(used_nodes, node2graph_map["$(obj[get(settings, "node", "bus")])"])
end
end
end
else
n_extra_nodes = sum(Int[length(get(case, type, Dict())) for type in keys(extra_nodes)])
end
# Generate blank graph
graph = InfrastructureGraph(n_nodes + n_extra_nodes)
extra_node2graph_map = Dict{String,Dict{Any,Int}}(type => Dict{Any,Int}() for type in keys(extra_nodes))
n = n_nodes
used_nodes = Dict{Any,Int}()
for (type,settings) in extra_nodes
for (id,obj) in get(case, type, Dict())
if aggregate_extra_nodes
if !(node2graph_map["$(obj[get(settings, "node", "bus")])"] in keys(used_nodes))
n += 1
extra_node2graph_map[type][id] = n
used_nodes[node2graph_map["$(obj[get(settings, "node", "bus")])"]] = n
else
extra_node2graph_map[type][id] = used_nodes[node2graph_map["$(obj[get(settings, "node", "bus")])"]]
end
else
n += 1
extra_node2graph_map[type][id] = n
end
end
end
# Add edges
if block_graph
edge_settings = block_connectors
end
for (type,settings) in edge_settings
f_key = get(settings, "fr_node", "f_bus")
t_key = get(settings, "to_node", "t_bus")
nodes_key = get(settings, "nodes", "")
(disabled_key, disabled_value) = get(settings, "disabled", "status" => 0)
(open_key, open_value) = get(settings, "open", "state" => 0)
(fixed_key, fixed_value) = get(settings, "fixed", "dispatchable" => 0)
for (id,edge) in get(case, type, Dict())
disabled = Int(get(edge, disabled_key, 1)) == disabled_value ? "disabled" : "enabled"
open = Int(get(edge, open_key, 1)) == open_value ? "open" : "closed"
fixed = Int(get(edge, fixed_key, 0)) == fixed_value ? "fixed" : "free"
edge_props = Dict{Symbol,Any}(
:label => id,
:type => type,
:edge_membership => "$disabled $open $fixed edge",
)
edges_set = Set{Any}()
if !isempty(nodes_key) && haskey(edge, nodes_key)
for f_node in edge[nodes_key]
for t_node in edge[nodes_key]
if f_node != t_node
push!(edges_set, Set{Any}([f_node, t_node]))
end
end
end
else
if edge[f_key] != edge[t_key]
edges_set = Set{Any}([Set([edge[f_key], edge[t_key]])])
end
end
for (f_node, t_node) in edges_set
add_edge!(graph, node2graph_map["$f_node"], node2graph_map["$t_node"])
set_properties!(graph, LightGraphs.Edge(node2graph_map["$f_node"], node2graph_map["$t_node"]), edge_props)
end
end
end
used_extra_f_verts = Dict{Int,Int}()
for (type, settings) in extra_nodes
extra_node_key = get(settings, "node", "bus")
(disabled_key, disabled_value) = get(settings, "disabled", "status" => 0)
(inactive_real_key, inactive_real_value) = get(settings, "inactive_real", "" => 0)
(inactive_imaginary_key, inactive_imaginary_value) = get(settings, "inactive_imaginary", "" => 0)
for (id, obj) in get(case, type, Dict())
f_vert = node2graph_map["$(obj[extra_node_key])"]
t_vert = aggregate_extra_nodes && f_vert in keys(used_extra_f_verts) ? used_extra_f_verts[f_vert] : extra_node2graph_map[type][id]
if !(aggregate_extra_nodes && f_vert in keys(used_extra_f_verts))
add_edge!(graph, f_vert, t_vert)
edge_props = Dict{Symbol,Any}(
:label => "",
:edge_membership => "connector",
)
set_properties!(graph, LightGraphs.Edge(f_vert, t_vert), edge_props)
end
if isa(get(obj, disabled_key, 1), Enum)
disabled = Int(get(obj, disabled_key, 1)) == disabled_value ? "disabled" : "enabled"
else
disabled = get(obj, disabled_key, 1) == disabled_value ? "disabled" : "enabled"
end
real_inactive = !isempty(inactive_real_key) && haskey(obj, inactive_real_key) && all(obj[inactive_real_key] .== inactive_real_value)
imaginary_inactive = !isempty(inactive_imaginary_key) && haskey(obj, inactive_imaginary_key) && all(obj[inactive_imaginary_key] .== inactive_imaginary_value)
inactive = real_inactive && imaginary_inactive ? "inactive" : "active"
node_membership = "$disabled $inactive extra node"
node_props = Dict{Symbol,Any}(
:label => get(settings, "label", id),
:node_membership => node_membership,
:force_label => !isempty(get(settings, "label", ""))
)
if haskey(settings, "size")
node_props[:size] = sum(get(obj, settings["size"], 0.0))
end
if aggregate_extra_nodes && f_vert in keys(used_extra_f_verts)
_node_membership = get_property(graph, t_vert, :node_membership, "")
_label = get_property(graph, t_vert, :label, "")
_force_label = get_property(graph, t_vert, :force_label, false)
_inactive = inactive == "active" || occursin(" active", _node_membership) ? "active" : "inactive"
_disabled = disabled == "enabled" || startswith(_node_membership, "enabled") ? "enabled" : "disabled"
node_props[:node_membership] = "$_disabled $_inactive extra node"
node_props[:label] = node_props[:label] != _label ? join([node_props[:label], _label], ",") : node_props[:label]
node_props[:force_label] = any([node_props[:force_label], _force_label])
if haskey(node_props, :size)
_size = get_property(graph, t_vert, :size, 0.0)
node_props[:size] += _size
end
end
set_properties!(graph, t_vert, node_props)
used_extra_f_verts[f_vert] = t_vert
end
end
if !block_graph && !isempty(node_x_key) && !isempty(node_y_key)
for (node, vert) in node2graph_map
obj = case[node_key][node]
if haskey(obj, node_x_key) && haskey(obj, node_y_key)
set_property!(graph, vert, :coordinate, [obj[node_x_key], obj[node_y_key]])
end
end
end
active_blocks = Dict{Int,Bool}(id => is_active(case, block) for (id,block) in blocks)
node_has_demand = Dict{Any,Bool}("$(obj[get(settings, "node", "bus")])" => true for (type, settings) in demands for (_,obj) in get(case, type, Dict()))
block2node_map = block_graph ? Dict{Int,Any}(id => [id] for (id,block) in blocks) : blocks
for (block_id, block) in blocks
if block_graph
disabled = "disabled"
has_demand = "wo demand"
for node_id in block
disabled = Int(case[node_key][node_id][disabled_node_key]) != disabled_node_value ? "enabled" : disabled
has_demand = get(node_has_demand, node_id, false) ? "w demand" : has_demand
end
node_membership = "$disabled node $has_demand"
node_props = Dict{Symbol,Any}(
:label => block_id,
:node_membership => node_membership,
:active => active_blocks[block_id],
)
set_properties!(graph, block_id, node_props)
else
for node_id in block
disabled = Int(case[node_key][node_id][disabled_node_key]) == disabled_node_value ? "disabled" : "enabled"
has_demand = get(node_has_demand, node_id, false) ? "w demand" : "wo demand"
node_membership = "$disabled node $has_demand"
node_props = Dict{Symbol,Any}(
:label => node_id,
:node_membership => node_membership,
:active => active_blocks[block_id]
)
set_properties!(graph, node2graph_map[node_id], node_props)
end
end
end
_node_demand_status = Dict{Any,Vector{Real}}(obj[get(settings, "node", "bus")] => Vector{Real}() for (type,settings) in demands for (_,obj) in get(case, type, Dict()))
for (type, settings) in demands
demand_node_key = get(settings, "node", "bus")
demand_status_key = get(settings, "status", "status")
for (_,obj) in get(case, type, Dict())
demand_status = get(obj, demand_status_key, 1)
push!(_node_demand_status[obj[demand_node_key]], isa(demand_status, Enum) ? Int(demand_status) : demand_status)
end
if block_graph
node_demand_status = Dict{Int,Real}()
for (block_id, block) in blocks
block_demand = []
for node in block
if node in keys(_node_demand_status)
append!(block_demand, _node_demand_status[node])
end
end
if !isempty(block_demand)
node_demand_status[block_id] = sum(block_demand) / length(block_demand)
end
end
else
node_demand_status = Dict{Any,Real}(id => sum(v) / length(v) for (id, v) in _node_demand_status if !isempty(v))
end
for (id, status) in node_demand_status
if block_graph
set_property!(graph, id, :demand, status)
else
set_property!(graph, node2graph_map["$id"], :demand, status)
end
end
end
return graph
end
| PowerModelsAnalytics | https://github.com/lanl-ansi/PowerModelsAnalytics.jl.git |
|
[
"BSD-3-Clause"
] | 0.4.1 | e222c67fc3d63250e9af159857df8beab55c00bb | code | 2157 | """
`layout_graph!(graph::InfrastructureGraph, layout_engine::Function; kwargs...)`
A routine to assign positions to all nodes of a `graph` for plotting using `layout_engine`.
Positions are assigned to the metadata of each node at `:x` and `:y`.
Arguments:
`graph::InfrastructureGraph`: Network graph
`layout_engine`: Layout Function to use. Applies only when not using `use_coordinates`
`use_coordinates::Bool`: If `true`, `spring_layout` will be used instead of `layout_engine`
`apply_spring_layout::Bool`: If `true`, `spring_layout` will be applied after `layout_engine` to ensure separation of overlapping nodes
`spring_constant::Real`: Spring constant to be used by `spring_layout`
`kwargs`: Keyword arguments to be used in `layout_engine`
"""
function layout_graph!(graph::InfrastructureGraph{T}, layout_engine::Function=kamada_kawai_layout;
use_coordinates::Bool=false,
apply_spring_layout::Bool=false,
spring_constant::Real=default_spring_constant,
kwargs...) where T <: LightGraphs.AbstractGraph
if use_coordinates
pos = Dict{Int,Union{Missing,Vector{Real}}}(node => get_property(graph, node, :coordinate, missing) for node in vertices(graph))
fixed = [node for (node, p) in pos if !ismissing(p)]
avg_x, avg_y = mean(hcat(skipmissing([v for v in values(pos)])...), dims=2)
std_x, std_y = std(hcat(skipmissing([v for v in values(pos)])...), dims=2)
for (v, p) in pos
if ismissing(p)
pos[v] = [avg_x+std_x*rand(), avg_y+std_y*rand()]
end
end
positions = spring_layout(graph; pos=pos, fixed=fixed, k=spring_constant*sqrt(length(pos)), iterations=100)
else
positions = layout_engine(graph; kwargs...)
if apply_spring_layout
positions = spring_layout(graph; pos=positions, k=spring_constant*sqrt(length(positions)), iterations=100)
end
end
for (node, (x, y)) in positions
set_property!(graph, node, :x, x)
set_property!(graph, node, :y, y)
end
end
| PowerModelsAnalytics | https://github.com/lanl-ansi/PowerModelsAnalytics.jl.git |
|
[
"BSD-3-Clause"
] | 0.4.1 | e222c67fc3d63250e9af159857df8beab55c00bb | code | 1136 | "NetworkX Kamada-Kawai layout function"
function kamada_kawai_layout(graph::InfrastructureGraph{T}; dist=nothing, pos=nothing, weight="weight", scale=1.0, center=nothing, dim=2) where T <: LightGraphs.AbstractGraph
G = nx.Graph()
for edge in edges(graph)
G.add_edge(edge.src, edge.dst)
end
for node in vertices(graph)
G.add_node(node)
end
positions = nx.kamada_kawai_layout(G, dist=dist, pos=pos, weight=weight, scale=scale, center=center, dim=dim)
return positions
end
"NetworkX spring layout function"
function spring_layout(graph::InfrastructureGraph{T}; k=nothing, pos=nothing, fixed=nothing, iterations=50, threshold=0.0001, weight="weight", scale=1, center=nothing, dim=2, seed=nothing) where T <: LightGraphs.AbstractGraph
G = nx.Graph()
for edge in edges(graph)
G.add_edge(edge.src, edge.dst)
end
for node in vertices(graph)
G.add_node(node)
end
positions = nx.spring_layout(G, k=k, pos=pos, fixed=fixed, iterations=iterations, threshold=threshold, weight=weight, scale=scale, center=center, dim=dim, seed=seed)
return positions
end
| PowerModelsAnalytics | https://github.com/lanl-ansi/PowerModelsAnalytics.jl.git |
|
[
"BSD-3-Clause"
] | 0.4.1 | e222c67fc3d63250e9af159857df8beab55c00bb | code | 11634 | "Plots branch impedances"
function plot_branch_impedance(data::Dict{String,Any}; branch_key::Any="branch", resistance_key::String="br_r", reactance_key::String="br_x")::Vega.VGSpec
spec = deepcopy(default_branch_impedance_spec)
scatter_data = [Dict("resistance" => sum(sum(branch["br_r"])), "reactance" => sum(sum(branch["br_x"])), "id" => id) for (id, branch) in data["branch"]]
sort!(scatter_data; by=x -> parse(Int, x["id"]))
@set! spec.data = []
pushfirst!(spec.data, Dict("name" => "branch-impedances", "values" => scatter_data))
resistance = Float64[x["resistance"] for x in scatter_data]
reactance = Float64[x["reactance"] for x in scatter_data]
for (field, _data) in zip(["resistance", "reactance"], [resistance, reactance])
push!(spec.data, Dict(
"name" => "binned-$field",
"source" => "branch-impedances",
"transform" => [
Dict(
"type" => "bin", "field" => field,
"extent" => [minimum(_data), maximum(_data)],
"anchor" => mean(_data),
"step" => std(_data) / 2,
"nice" => true
),
Dict(
"type" => "aggregate",
"key" => "bin0",
"groupby" => ["bin0", "bin1"],
"fields" => ["bin0"],
"ops" => ["count"],
"as" => ["count"]
)
]
)
)
end
return spec
end
"""
`plot_load_summary(file, result, case; kwargs...)`
Plots total generation, total load served, and total forecasted load for a given `case` and `result`, saving to `file`
Arguments:
`file::String`: file path to saved figure
`result::Dict{String,Any}`: multinetwork solution data (contains load statuses)
`case::Dict{String,Any}`: Original case file (without calcuated loads) for forecasted loads
`log::Bool`: If `true`, plots y-axis on log scale
`intermediate::Bool`: If `true`, plots intermediate steps of plot (for animations).
`legend_position::Symbol`: Position of legend, accepts the following symbols: `:right`, `:left`, `:top`, `:bottom`, `:inside`, `:best`, `:legend`, `:topright`, `:topleft`, `:bottomleft`, `:bottomright`
"""
function plot_load_summary(file::String, result::Dict{String,Any}, case::Dict{String,Any}; log::Bool=false, intermediate::Bool=false, legend_position::Symbol=:best)::Vega.VGSpec
@assert Int(get(case, "data_model", 1)) == 1 && get(case, "per_unit", true) "This function only supports plotting MATHEMATICAL data models in per-unit representation"
spec = Vega.loadvgspec("src/vega/load_summary.json")
x = 0:length(result["nw"]) - 1
generation = [x for (n, x) in sort([(parse(Int, n), sum(sum(_replace_nan(gen["pg"])) * nw["baseMVA"] for (i, gen) in nw["gen"])) for (n, nw) in result["nw"]]; by=x -> x[1])]
storage = [x for (n, x) in sort([(parse(Int, n), sum(sum(_replace_nan(strg["ps"])) * nw["baseMVA"] for (i, strg) in nw["storage"])) for (n, nw) in result["nw"]]; by=x -> x[1])]
total_generated = generation .+ storage
total_load_served = [x for (n, x) in sort([(parse(Int, n), sum(sum(_replace_nan(load["status"] * case["nw"]["$n"]["load"]["$i"]["pd"])) * nw["baseMVA"] for (i, load) in nw["load"])) for (n, nw) in result["nw"]]; by=x -> x[1])]
total_load_forecast = [x for (n, x) in sort([(parse(Int, n), sum(sum(_replace_nan(load["pd"])) * case["nw"]["$n"]["baseMVA"] for (i, load) in nw["load"])) for (n, nw) in case["nw"]]; by=x -> x[1])]
max_digits = max_digits = maximum([length("$n") for n in x])
@debug "" total_generated total_load_served total_load_forecast
spec = deepcopy(default_source_demand_summary_spec)
power_summary_data = [
Dict(
"x" => x[i],
"y" => y[i],
"c" => c - 1,
) for (c, y) in enumerate([total_generated, total_load_served, total_load_forecast]) for i in 1:length(x)
]
@set! spec.data = [
Dict(
"name" => "table",
"values" => power_summary_data,
"transform" => [
Dict(
"type" => "stack",
"groupby" => ["x"],
"sort" => Dict(
"field" => "c"
),
"field" => "y"
)
]
)
]
@set! spec.axes[2]["title"] = "Power (MW)"
if log
@set! spec.scales[2]["type"] = "log"
end
if intermediate
_tmp_data = []
for (i, _data) in enumerate(eachrow(reshape(power_summary_data, :, 3)))
append!(_tmp_data, _data)
@set! spec.data = [
Dict(
"name" => "table",
"values" => _tmp_data,
"transform" => [
Dict(
"type" => "stack",
"groupby" => ["x"],
"sort" => Dict(
"field" => "c"
),
"field" => "y"
)
]
)
]
filename_parts = split(file, ".")
filename = join(filename_parts[1:end-1], ".")
ext = filename_parts[end]
_fileout = "$(filename)_$(lpad(i, max_digits, "0")).$(ext)"
Vega.save(_fileout, spec)
end
else
Vega.save(file, spec)
end
return spec
end
"""
`plot_source_demand_summary(file::String, mn_case::Dict{String,<:Any}; kwargs...)`
Plots the total delivery from sources (generation) and total receipts by demands (load)
Arguments:
`fileout::String`: path to file where plot will be saved
`mn_case::Dict{String,<:Any}`: a multinetwork case
`yscale::Symbol`: To set log scale, `:log10`, else `:identity`
`save_intermediate_frames::Bool`: if `true`, each frame of the multinetwork will be saved separately
`legend_position::Symbol`: Position of legend, accepts the following symbols: `:right`, `:left`, `:top`, `:bottom`, `:inside`, `:best`, `:legend`, `:topright`, `:topleft`, `:bottomleft`, `:bottomright`
`sources::Dict{String,<:Any}`: information about sources (e.g. generators)
`demands::Dict{String,<:Any}`: information about demands (e.g. loads)
`totals::Symbol`: Choose `:real`, `:imaginary`, `:complex`
"""
function plot_source_demand_summary(fileout::String, mn_case::Dict{String,<:Any};
yscale::Symbol=:identity,
save_intermediate_frames::Bool=false,
legend_position::Symbol=:best,
sources::Dict{String,<:Any}=default_sources_eng,
demands::Dict{String,<:Any}=default_demands_eng,
totals::Symbol=:real,
)::Vega.VGSpec
x = 1:length(mn_case["nw"])
total_generated = Vector{Real}(undef, length(x))
for (n, nw) in get(mn_case, "nw", Dict())
value = Complex(0.0, 0.0)
for (type, settings) in sources
real_key = get(settings, "inactive_real", "" => 0)[1]
imag_key = get(settings, "inactive_imaginary", "" => 0)[1]
for (_,obj) in get(nw, type, Dict())
if totals == :real
v_real = get(obj, real_key, 0.0)
v_imag = 0.0
elseif totals == :imaginary
v_real = 0.0
v_imag = get(obj, imag_key, 0.0)
else
v_real = get(obj, real_key, 0.0)
v_imag = get(obj, imag_key, 0.0)
end
value += sum(Complex.(v_real, v_imag))
end
end
total_generated[parse(Int, n)] = norm(value)
end
total_demand_served = Vector{Real}(undef, length(x))
total_demand_forecast = Vector{Real}(undef, length(x))
for (n, nw) in get(mn_case, "nw", Dict())
served_value = Complex(0.0, 0.0)
forecast_value = Complex(0.0, 0.0)
for (type, settings) in demands
served_real_key = get(settings, "inactive_real", "" => 0)[1]
served_imag_key = get(settings, "inactive_imaginary", "" => 0)[1]
forecast_real_key = get(settings, "original_demand_real", "")
forecast_imag_key = get(settings, "original_demand_imaginary", "")
for (_,obj) in get(nw, type, Dict())
if totals == :real
v_served_real = get(obj, served_real_key, 0.0)
v_served_imag = 0.0
v_forecast_real = get(obj, forecast_real_key, 0.0)
v_forecast_imag = 0.0
elseif totals == :imaginary
v_served_real = 0.0
v_served_imag = get(obj, served_imag_key, 0.0)
v_forecast_real = 0.0
v_forecast_imag = get(obj, forecast_imag_key, 0.0)
else
v_served_real = get(obj, served_real_key, 0.0)
v_served_imag = get(obj, served_imag_key, 0.0)
v_forecast_real = get(obj, forecast_real_key, 0.0)
v_forecast_imag = get(obj, forecast_imag_key, 0.0)
end
served_value += sum(Complex.(v_served_real, v_served_imag))
forecast_value += sum(Complex.(v_forecast_real, v_forecast_imag))
end
end
total_demand_served[parse(Int, n)] = norm(served_value)
total_demand_forecast[parse(Int, n)] = norm(forecast_value)
end
max_digits = maximum([length(n) for (n,_) in mn_case["nw"]])
power_scale_factor = mn_case["settings"]["power_scale_factor"]
units_str = power_scale_factor == 1.0 ? "W" : power_scale_factor == 1e3 ? "kW" : power_scale_factor == 1e6 ? "MW" : "$power_scale_factor W"
spec = deepcopy(default_source_demand_summary_spec)
power_summary_data = [
Dict(
"x" => x[i],
"y" => y[i],
"c" => c - 1,
) for (c, y) in enumerate([total_generated, total_demand_served, total_demand_forecast]) for i in 1:length(x)
]
@set! spec.data = [
Dict(
"name" => "table",
"values" => power_summary_data,
"transform" => [
Dict(
"type" => "stack",
"groupby" => ["x"],
"sort" => Dict("field" => "c"),
"field" => "y"
)
]
)
]
@set! spec.axes[2]["title"] = "Power ($units_str)"
if save_intermediate_frames
_tmp_data = []
for (i, _data) in enumerate(eachrow(reshape(power_summary_data, :, 3)))
append!(_tmp_data, _data)
@set! spec.data = [
Dict(
"name" => "table",
"values" => _tmp_data,
"transform" => [
Dict(
"type" => "stack",
"groupby" => ["x"],
"sort" => Dict("field" => "c"),
"field" => "y"
)
]
)
]
filename_parts = split(fileout, ".")
filename = join(filename_parts[1:end-1], ".")
ext = filename_parts[end]
_fileout = "$(filename)_$(lpad(i, max_digits, "0")).$(ext)"
Vega.save(_fileout, spec)
end
else
Vega.save(fileout, spec)
end
return spec
end
| PowerModelsAnalytics | https://github.com/lanl-ansi/PowerModelsAnalytics.jl.git |
|
[
"BSD-3-Clause"
] | 0.4.1 | e222c67fc3d63250e9af159857df8beab55c00bb | code | 3681 | """
`spec = plot_graph(graph::InfrastructureGraph; kwargs...)`
Builds a figure sepcification. Returns `Vega.VGSpec`.
Arguments:
`graph::InfrastructureGraph{<:LightGraphs.AbstractGraph}`: Network graph
`label_nodes::Bool`: Plot labels on nodes
`label_edges::Bool`: Plot labels on edges
`fontsize::Real`: Fontsize of labels
`fontfamily::String`: Font Family of labels
`fontcolor::Union{Symbol,<:Colors.Colorant}`: Color of the labels
`textalign::Symbol`: Alignment of text: "left", "center", "right"
`plot_size::Tuple{Int,Int}`: Size of the plot in pixels
`plot_dpi::Int`: Dots-per-inch of the plot
Returns:
`spec<:Vega.VGSpec`: Vega.jl figure specification
"""
function plot_graph(graph::InfrastructureGraph{T};
label_nodes::Bool=false,
label_edges::Bool=false,
fontsize::Real=default_fontsize,
fontfamily::String=default_fontfamily,
fontcolor::Union{Symbol,Colors.Colorant}=default_fontcolor,
textalign::Symbol=default_textalign,
plot_size::Tuple{Int,Int}=default_plot_size,
plot_dpi::Int=default_plot_dpi,
kwargs...) where T <: LightGraphs.AbstractGraph
nodes = [
Dict(
"id"=>node,
"x"=>get_property(graph, node, :x, 0),
"y"=>get_property(graph, node, :y, 0),
"color"=>"#$(Colors.hex(get_property(graph, node, :node_color, colorant"black")))",
"size"=>get_property(graph, node, :size, 1),
"label"=>get_property(graph, node, :label, "")
) for node in vertices(graph)
]
vert2node = Dict(node["id"] => i for (i, node) in enumerate(nodes))
links = [
Dict(
"source"=>vert2node[LightGraphs.src(edge)]-1,
"target"=>vert2node[LightGraphs.dst(edge)]-1,
"color"=>"#$(Colors.hex(get_property(graph, edge, :edge_color, colorant"black")))",
"size"=>get_property(graph, edge, :edge_size, 1),
"label"=>get_property(graph, edge, :label, "")
) for (n, edge) in enumerate(edges(graph))
]
has_layout = all(hasprop(graph, node, :x) && hasprop(graph, node, :y) for node in vertices(graph))
if !has_layout
@error "no layout, cannot plot"
end
spec = deepcopy(default_network_graph_spec)
push!(spec.data, Dict("name"=>"node-data", "values"=>nodes))
push!(spec.data, Dict("name"=>"link-data", "values"=>links))
width, height = plot_size
@set! spec.width = width
@set! spec.height = height
if label_nodes
# TODO add "label" transformation (requires Vega > v5.16+)
node_labels = deepcopy(default_node_label_spec)
@set! node_labels.encode.enter.fontSize = Dict("value" => fontsize)
@set! node_labels.encode.enter.font = Dict("value" => fontfamily)
@set! node_labels.encode.enter.fill = Dict("value" => "#$(isa(fontcolor, Symbol) ? Colors.hex(Colors.color(String(fontcolor))) : Colors.hex(fontcolor))")
@set! node_labels.encode.enter.align = textalign
push!(spec.marks, node_labels)
end
if label_edges
# TODO this is currently broken, I think it needs "label" transformation (requires Vega > v5.16+)
edge_labels = deepcopy(default_edge_label_spec)
@set! edge_labels.encode.update.fontSize = Dict("value" => fontsize)
@set! edge_labels.encode.update.font = Dict("value" => fontfamily)
@set! edge_labels.encode.update.fill = Dict("value" => "#$(isa(fontcolor, Symbol) ? Colors.hex(Colors.color(String(fontcolor))) : Colors.hex(fontcolor))")
@set! edge_labels.encode.update.align = textalign
push!(spec.marks, edge_labels)
end
return spec
end
| PowerModelsAnalytics | https://github.com/lanl-ansi/PowerModelsAnalytics.jl.git |
|
[
"BSD-3-Clause"
] | 0.4.1 | e222c67fc3d63250e9af159857df8beab55c00bb | code | 9724 | """
`plot_network(graph; kwargs...)`
Plots a network `graph`. Returns `InfrastructureGraph` and `Plots.AbstractPlot`.
Arguments:
`graph::InfrastructureGraph{<:LightGraphs.AbstractGraph}`: Network graph
`filename::String`: File to output the plot to, will use user-set Plots.jl backend
`label_nodes::Bool`: Plot labels on nodes
`label_edges::Bool`: Plot labels on edges
`colors::Dict{String,<:Colors.Colorant}`: Changes to default colors, see `default_colors` for available components
`load_color_range::Vector{<:Colors.Colorant}}`: Range of colors for load statuses
`node_size_limits::Vector{<:Real}`: Min/Max values for the size of nodes
`edge_width_limits::Vector{<:Real}`: Min/Max values for the width of edges
`positions::Union{Dict{Int,<:Real}, InfrastructureGraph}`: Used to specify node locations of graph (avoids running layout algorithm every time)
`use_coordinates::Bool`: Use buscoord field on buses for node positions
`spring_constant::Real`: Only used if buscoords=true. Spring constant to be used to force-direct-layout buses with no buscoord field
`apply_spring_layout::Bool`: Apply spring layout after initial layout
`fontsize::Real`: Fontsize of labels
`fontfamily::String`: Font Family of labels
`fontcolor::Union{Symbol,<:Colors.Colorant}`: Color of the labels
`textalign::Symbol`: Alignment of text
`plot_size::Tuple{Int,Int}`: Size of the plot in pixels
`plot_dpi::Int`: Dots-per-inch of the plot
Returns:
`graph::InfrastructureGraph`: InfrastructureGraph of the network
"""
function plot_network(graph::InfrastructureGraph{T};
filename::String="",
label_nodes::Bool=false,
label_edges::Bool=false,
colors::Dict{String,<:Colors.Colorant}=default_colors,
demand_color_range::Vector{<:Colors.Colorant}=default_demand_color_range,
node_size_limits::Vector{<:Real}=default_node_size_limits,
edge_width_limits::Vector{<:Real}=default_edge_width_limits,
positions::Union{Dict{Int,<:Real},InfrastructureGraph}=Dict{Int,Real}(),
use_coordinates::Bool=false,
spring_constant::Real=default_spring_constant,
apply_spring_layout::Bool=false,
fontsize::Real=default_fontsize,
fontfamily::String=default_fontfamily,
fontcolor::Union{Symbol,<:Colors.Colorant}=default_fontcolor,
textalign::Symbol=default_textalign,
plot_size::Tuple{<:Int,<:Int}=default_plot_size,
plot_dpi::Int=default_plot_dpi,
kwargs...
) where T <: LightGraphs.AbstractGraph
apply_plot_network_metadata!(graph;
colors=colors,
demand_color_range=demand_color_range,
node_size_limits=node_size_limits,
edge_width_limits=edge_width_limits
)
# Graph Layout
if isa(positions, InfrastructureGraph)
positions = Dict{Int,Vector{Real}}(
node => Vector{Real}([get_property(positions, node, :x, 0.0), get_property(positions, node, :y, 0.0)]) for node in vertices(positions)
)
end
for (node, (x, y)) in positions
set_properties!(graph, node, Dict{Symbol,Any}(
:x=>x,
:y=>y
)
)
end
if !all(hasprop(graph, node, :x) && hasprop(graph, node, :y) for node in vertices(graph))
layout_graph!(graph, kamada_kawai_layout;
use_coordinates=use_coordinates,
apply_spring_layout=apply_spring_layout,
spring_constant=spring_constant
)
end
# Plot
fig = plot_graph(graph;
label_nodes=label_nodes,
label_edges=label_edges,
fontsize=fontsize,
fontfamily=fontfamily,
fontcolor=fontcolor,
textalign=textalign,
plot_size=plot_size,
plot_dpi=plot_dpi
)
if isempty(filename)
display(fig)
else
Vega.save(filename, fig)
end
return graph
end
"""
`graph = plot_network(case::Dict{String,<:Any}; kwargs...)`
Plots a whole network `case` at the bus-level. Returns `InfrastructureGraph` and `Plots.AbstractPlot`.
This function will build the graph from the `case`. Additional `kwargs` are passed to
`plot_network(graph; kwargs...)`.
Arguments:
`case::Dict{String,Any}`: Network case data structure
`positions::Union{Dict{Int,<:Any},InfrastructureGraph}`: Pre-set positions of graph vertices
Returns:
`graph::InfrastructureGraph`: InfrastructureGraph of the network
"""
function plot_network(case::Dict{String,<:Any}; positions::Union{Dict{Int,<:Any},InfrastructureGraph}=Dict{Int,Any}(), kwargs...)
graph = build_network_graph(case; kwargs...)
if isa(positions, InfrastructureGraph)
positions = Dict(node => [get_property(positions, node, :x, 0.0), get_property(positions, node, :y, 0.0)] for node in vertices(positions))
end
for (node, (x, y)) in positions
set_properties!(graph, node, Dict(:x=>x, :y=>y))
end
graph = plot_network(graph; kwargs...)
return graph
end
"""
`plot_network(graph; kwargs...)`
Plots a network `graph`. Returns `InfrastructureGraph` and `Plots.AbstractPlot`.
Arguments:
`graph::InfrastructureGraph{<:LightGraphs.AbstractGraph}`: Network graph
`filename::String`: File to output the plot to, will use user-set Plots.jl backend
`label_nodes::Bool`: Plot labels on nodes
`label_edges::Bool`: Plot labels on edges
`colors::Dict{String,<:Colors.Colorant}`: Changes to default colors, see `default_colors` for available components
`load_color_range::Vector{<:Colors.Colorant}}`: Range of colors for load statuses
`node_size_limits::Vector{<:Real}`: Min/Max values for the size of nodes
`edge_width_limits::Vector{<:Real}`: Min/Max values for the width of edges
`positions::Union{Dict{Int,<:Real}, InfrastructureGraph}`: Used to specify node locations of graph (avoids running layout algorithm every time)
`use_coordinates::Bool`: Use buscoord field on buses for node positions
`spring_constant::Real`: Only used if buscoords=true. Spring constant to be used to force-direct-layout buses with no buscoord field
`apply_spring_layout::Bool`: Apply spring layout after initial layout
`fontsize::Real`: Fontsize of labels
`fontfamily::String`: Font Family of labels
`fontcolor::Union{Symbol,<:Colors.Colorant}`: Color of the labels
`textalign::Symbol`: Alignment of text
`plot_size::Tuple{Int,Int}`: Size of the plot in pixels
`plot_dpi::Int`: Dots-per-inch of the plot
Returns:
`fig`
"""
function plot_network!(graph::InfrastructureGraph{T};
filename::String="",
label_nodes::Bool=false,
label_edges::Bool=false,
colors::Dict{String,<:Colors.Colorant}=default_colors,
demand_color_range::Vector{<:Colors.Colorant}=default_demand_color_range,
node_size_limits::Vector{<:Real}=default_node_size_limits,
edge_width_limits::Vector{<:Real}=default_edge_width_limits,
positions::Union{Dict{Int,<:Real},InfrastructureGraph}=Dict{Int,Real}(),
use_coordinates::Bool=false,
spring_constant::Real=default_spring_constant,
apply_spring_layout::Bool=false,
fontsize::Real=default_fontsize,
fontfamily::String=default_fontfamily,
fontcolor::Union{Symbol,<:Colors.Colorant}=default_fontcolor,
textalign::Symbol=default_textalign,
plot_size::Tuple{<:Int,<:Int}=default_plot_size,
plot_dpi::Int=default_plot_dpi,
kwargs...
) where T <: LightGraphs.AbstractGraph
apply_plot_network_metadata!(graph;
colors=colors,
demand_color_range=demand_color_range,
node_size_limits=node_size_limits,
edge_width_limits=edge_width_limits
)
# Graph Layout
if isa(positions, InfrastructureGraph)
positions = Dict{Int,Vector{Real}}(
node => Vector{Real}([get_property(positions, node, :x, 0.0), get_property(positions, node, :y, 0.0)]) for node in vertices(positions)
)
end
for (node, (x, y)) in positions
set_properties!(graph, node, Dict{Symbol,Any}(
:x=>x,
:y=>y
)
)
end
if !all(hasprop(graph, node, :x) && hasprop(graph, node, :y) for node in vertices(graph))
layout_graph!(graph, kamada_kawai_layout;
use_coordinates=use_coordinates,
apply_spring_layout=apply_spring_layout,
spring_constant=spring_constant
)
end
return plot_graph(graph;
label_nodes=label_nodes,
label_edges=label_edges,
fontsize=fontsize,
fontfamily=fontfamily,
fontcolor=fontcolor,
textalign=textalign,
plot_size=plot_size,
plot_dpi=plot_dpi
)
end
"""
`graph = plot_network(case::Dict{String,<:Any}; kwargs...)`
Plots a whole network `case` at the bus-level. Returns `InfrastructureGraph` and `Plots.AbstractPlot`.
This function will build the graph from the `case`. Additional `kwargs` are passed to
`plot_network(graph; kwargs...)`.
Arguments:
`case::Dict{String,Any}`: Network case data structure
`positions::Union{Dict{Int,<:Any},InfrastructureGraph}`: Pre-set positions of graph vertices
Returns:
`fig`
"""
function plot_network!(case::Dict{String,<:Any}; positions::Union{Dict{Int,<:Any},InfrastructureGraph}=Dict{Int,Any}(), kwargs...)
graph = build_network_graph(case; kwargs...)
if isa(positions, InfrastructureGraph)
positions = Dict(node => [get_property(positions, node, :x, 0.0), get_property(positions, node, :y, 0.0)] for node in vertices(positions))
end
for (node, (x, y)) in positions
set_properties!(graph, node, Dict(:x=>x, :y=>y))
end
return plot_network!(graph; kwargs...)
end
| PowerModelsAnalytics | https://github.com/lanl-ansi/PowerModelsAnalytics.jl.git |
|
[
"BSD-3-Clause"
] | 0.4.1 | e222c67fc3d63250e9af159857df8beab55c00bb | code | 923 | "Vega spec for network graph plot (base)"
const default_network_graph_spec = Vega.loadvgspec(joinpath(dirname(pathof(PowerModelsAnalytics)), "vega", "network_graph.json"))
"Vega spec for extension to network graph spec for labeling nodes"
const default_node_label_spec = Vega.loadvgspec(joinpath(dirname(pathof(PowerModelsAnalytics)), "vega", "node_labels.json"))
"Vega spec for extension to network graph spec for labeling edges"
const default_edge_label_spec = Vega.loadvgspec(joinpath(dirname(pathof(PowerModelsAnalytics)), "vega", "edge_labels.json"))
"Vega spec for branch impedance plot"
const default_branch_impedance_spec = Vega.loadvgspec(joinpath(dirname(pathof(PowerModelsAnalytics)), "vega", "branch_impedance.json"))
"Vega spec for Source Demand Summary Plot"
const default_source_demand_summary_spec = Vega.loadvgspec(joinpath(dirname(pathof(PowerModelsAnalytics)), "vega", "source_demand_summary.json"))
| PowerModelsAnalytics | https://github.com/lanl-ansi/PowerModelsAnalytics.jl.git |
|
[
"BSD-3-Clause"
] | 0.4.1 | e222c67fc3d63250e9af159857df8beab55c00bb | code | 1934 | using PowerModelsAnalytics
import LightGraphs
import Colors
import PowerModels
import PowerModelsDistribution
PowerModels.silence()
using Test
@testset "PowerModelsAnalytics" begin
data = PowerModels.parse_file("$(joinpath(dirname(pathof(PowerModels)), ".."))/test/data/matpower/case5.m")
mp_data = PowerModels.parse_file("$(joinpath(dirname(pathof(PowerModels)), ".."))/test/data/matpower/case5.m")
PowerModelsDistribution.make_multiconductor!(mp_data, 3)
n_graph = build_network_graph(data)
n_graph_load_colors = build_network_graph(data)
n_mp_graph = build_network_graph(mp_data)
lb_graph = build_network_graph(data; block_graph=true)
lb_mp_graph = build_network_graph(data; block_graph=true)
@testset "graphs" begin
for graph in [n_graph, n_mp_graph, lb_graph, lb_mp_graph]
@test isa(graph, InfrastructureGraph{T} where T<:LightGraphs.AbstractGraph)
end
apply_plot_network_metadata!(n_graph)
@test all(hasprop(n_graph, node, :node_color) && hasprop(n_graph, node, :node_size) for node in vertices(n_graph))
@test all(hasprop(n_graph, edge, :edge_color) && hasprop(n_graph, edge, :edge_size) for edge in edges(n_graph))
@testset "load_color_range" begin
load_color_range = Colors.range(default_colors["disabled node w demand"], default_colors["enabled node w demand"], length=11)
@test_nowarn apply_plot_network_metadata!(n_graph_load_colors; demand_color_range=load_color_range)
end
end
@testset "layout" begin
layout_graph!(n_graph, kamada_kawai_layout)
layout_graph!(n_mp_graph, spring_layout)
@test all(hasprop(n_graph, node, :x) && hasprop(n_graph, node, :y) for node in vertices(n_graph))
@test all(hasprop(n_mp_graph, node, :x) && hasprop(n_mp_graph, node, :y) for node in vertices(n_mp_graph))
end
@testset "plot" begin
end
end | PowerModelsAnalytics | https://github.com/lanl-ansi/PowerModelsAnalytics.jl.git |
|
[
"BSD-3-Clause"
] | 0.4.1 | e222c67fc3d63250e9af159857df8beab55c00bb | docs | 1280 | # PowerModelsAnalytics.jl Change Log
## staged
- none
## v0.4.1
- Adds plot_network! functions that will return figure instead of graph; useful for generating plots in Pluto notebooks
- Fixes a bug in build_graph where one part of the graph builder conditional would not check to see if the nodes on either side of the edge were the same
## v0.4.0
- Updated backend to Vega.jl, removing Plots.jl
- Minimum Julia v1.3 required for Vega.jl
- Removed Travis-CI, switching to Github Actions
## v0.3.0
- Makes `build_network_graph` more agnostic to type of Infrastructure network being graphed
- Rename `build_graph_network` to `build_network_graph`
- Changes kwargs in functions
- Moves kwarg defaults to `src/core/options.jl`, and changes color defaults
- Changes type from `PowerModelsGraph` to `InfrastructureGraph`
- Removes `plot_load_blocks` and `build_graph_load_blocks` in favor of using kwarg `block_graph=true`
## v0.2.2
- Add additional compatible versions to dependencies
- Fix type enforcement for `load_color_range` (#8)
## v0.2.1
- Fix dependency issue for Julia < v1.3 of SpecialFunctions in Manifest.toml
## v0.2.0
- Support PowerModels v0.13 and PowerModelsDistribution v0.6.0
## v0.1.0
- Initial release with basic plotting tools and graph creation
| PowerModelsAnalytics | https://github.com/lanl-ansi/PowerModelsAnalytics.jl.git |
|
[
"BSD-3-Clause"
] | 0.4.1 | e222c67fc3d63250e9af159857df8beab55c00bb | docs | 1149 | # PowerModelsAnalytics.jl
 
Tools for the analysis and visualization of PowerModels data and results.
**BETA / IN ACTIVE DEVELOPMENT**: Features will change quickly and without warning
## Using PowerModelsAnalytics
To use the `plot_network` function for example, one must load a network case, e.g. using `parse_file` in PowerModels or PowerModelsDistribution, and then
```julia
using PowerModelsAnalytics
plot_network(network_case)
```
should plot the network using the currently enabled backend, or e.g.
```julia
plot_network(network_case; filename="network.pdf")
```
will save a network plot to a file using Vega.jl.
## Plotting
This package relies on Vega.jl for plotting. See the Vega [Documentation](https://vega.github.io/) for additional information about how to build new Specifications.
## License
This code is provided under a BSD license as part of the Multi-Infrastructure Control and Optimization Toolkit (MICOT) project, LA-CC-13-108.
| PowerModelsAnalytics | https://github.com/lanl-ansi/PowerModelsAnalytics.jl.git |
|
[
"BSD-3-Clause"
] | 0.4.1 | e222c67fc3d63250e9af159857df8beab55c00bb | docs | 822 | # Building the Documentation for PowerModelsAnalytics.jl
## Installation
We rely on [Documenter.jl](https://github.com/JuliaDocs/Documenter.jl). To install it, run the following command in a julia session:
```julia
Pkg.add("Documenter")
```
## Building the Docs
To preview the html output of the documents, run the following command:
```julia
julia --color=yes make.jl
```
You can then view the documents in `build/index.html`.
**Warning**: Do not `git commit` the contents of build (or any other content generated by Documenter) to your repository's master branch. This helps to avoid including unnessesary changes for anyone reviewing commits that happen to include documentation changes.
For further details, please read the [documentation for Documenter.jl](https://juliadocs.github.io/Documenter.jl/stable/). | PowerModelsAnalytics | https://github.com/lanl-ansi/PowerModelsAnalytics.jl.git |
|
[
"BSD-3-Clause"
] | 0.4.1 | e222c67fc3d63250e9af159857df8beab55c00bb | docs | 26 | # Developer Documentation
| PowerModelsAnalytics | https://github.com/lanl-ansi/PowerModelsAnalytics.jl.git |
|
[
"BSD-3-Clause"
] | 0.4.1 | e222c67fc3d63250e9af159857df8beab55c00bb | docs | 616 | # PowerModelsAnalytics.jl Documentation
```@meta
CurrentModule = PowerModelsAnalytics
```
## Overview
PowerModelsAnalytics.jl is a Julia/JuMP extension package to PowerModels.jl for modeling of Multi-Phase (with a focus on three-phase) power grids.
## Installation
The latest stable release of PowerModels can be installed using the Julia package manager with
```julia
Pkg.add("PowerModelsAnalytics")
```
For the current development version, "checkout" this package with
```julia
Pkg.checkout("PowerModelsAnalytics")
```
Test that the package works by running
```julia
Pkg.test("PowerModelsAnalytics")
```
| PowerModelsAnalytics | https://github.com/lanl-ansi/PowerModelsAnalytics.jl.git |
|
[
"BSD-3-Clause"
] | 0.4.1 | e222c67fc3d63250e9af159857df8beab55c00bb | docs | 85 | # PowerModelsAnalytics.jl Library
```@autodocs
Modules = [PowerModelsAnalytics]
```
| PowerModelsAnalytics | https://github.com/lanl-ansi/PowerModelsAnalytics.jl.git |
|
[
"BSD-3-Clause"
] | 0.4.1 | e222c67fc3d63250e9af159857df8beab55c00bb | docs | 1892 | # Quick Start Guide
Once PowerModelsAnalytics.jl is installed, Plots.jl is installed, and a Plots.jl backend is installed (we will use Plotly, which is included in Plots.jl, for this guide), and a network data file (e.g. `case5.m"` in the PowerModels.jl package folder under `./test/data/matpower`) has been acquired, the network can be plotted with,
```julia
using PowerModels, PowerModelsAnalytics
using Plots
plotly()
data = PowerModels.parse_file("$(joinpath(dirname(pathof(PowerModels)), ".."))/test/data/matpower/case5.m")
plot_network(data)
```
## IJulia Example Output
```julia
using PowerModelsAnalytics
import PowerModels
import PowerModelsDistribution
import Ipopt
PowerModels.silence()
```
```julia
ENV["GRDIR"]=""
import Plots
Plots.gr()
```
Plots.GRBackend()
```julia
case5 = PowerModels.parse_file("$(joinpath(dirname(pathof(PowerModels)), ".."))/test/data/matpower/case5.m");
case24 = PowerModels.parse_file("$(joinpath(dirname(pathof(PowerModels)), ".."))/test/data/matpower/case24.m");
case_distribution = PowerModelsDistribution.parse_file("$(joinpath(dirname(pathof(PowerModelsDistribution)), ".."))/test/data/opendss/test2_master.dss");
```
```julia
graph = plot_network(case5;
node_size_limits=[10, 15],
edge_width_limits=[2, 3],
label_nodes=true,
fontsize=10,
plot_size=(600,600),
plot_dpi=100);
```

```julia
graph = plot_network(case24; aggregate_extra_nodes=true,
node_size_limits=[10, 15],
edge_width_limits=[2, 3],
label_nodes=true,
fontsize=10,
plot_size=(600,600),
plot_dpi=100);
```

```julia
graph = plot_network(case_distribution; aggregate_extra_nodes=true,
node_size_limits=[10, 15],
edge_width_limits=[2, 3],
label_nodes=true,
fontsize=10,
plot_size=(600,600),
plot_dpi=100);
```

| PowerModelsAnalytics | https://github.com/lanl-ansi/PowerModelsAnalytics.jl.git |
|
[
"MIT"
] | 0.5.7 | 4b618484bf94a52f02595cd73ac8a6417f4c0c70 | code | 2300 | # remove deps.jl if it exists, in case build.jl fails
isfile("deps.jl") && rm("deps.jl")
using Libdl
println("Pardiso library")
println("===============")
const LIBPARDISONAMES =
if Sys.iswindows()
[
"libpardiso.dll",
"libpardiso600-WIN-X86-64.dll",
]
elseif Sys.isapple()
[
"libpardiso.dylib",
"libpardiso600-MACOS-X86-64.dylib",
]
elseif Sys.islinux()
[
"libpardiso.so",
"libpardiso600-GNU800-X86-64.so",
]
else
error("unhandled OS")
end
println("Looking for libraries with name: ", join(LIBPARDISONAMES, ", "), ".")
PATH_PREFIXES = [@__DIR__; get(ENV, "JULIA_PARDISO", [])]
if !haskey(ENV, "JULIA_PARDISO")
println("INFO: use the `JULIA_PARDISO` environment variable to set a path to " *
"the folder where the Pardiso library is located")
end
function find_paradisolib()
found_lib = false
for prefix in PATH_PREFIXES
println("Looking in \"$(abspath(prefix))\" for libraries")
for libname in LIBPARDISONAMES
local path
try
path = joinpath(prefix, libname)
if isfile(path)
println(" found \"$(abspath(path))\", attempting to load it...")
Libdl.dlopen(path, Libdl.RTLD_GLOBAL)
println(" loaded successfully!")
global PARDISO_LIB_FOUND = true
return path, true
end
catch e
println(" failed to load due to:")
Base.showerror(stderr, e)
end
end
end
println("did not find libpardiso, assuming PARDISO 5/6 is not installed")
return "", false
end
pardisopath, found_pardisolib = find_paradisolib()
#################################################
println("\nMKL Pardiso")
println("=============")
function find_mklparadiso()
if haskey(ENV, "MKLROOT")
println("found MKLROOT environment variable, enabling local MKL")
return true
end
println("did not find MKLROOT environment variable, using MKL_jll")
return false
end
found_mklpardiso = find_mklparadiso()
open("deps.jl", "w") do f
print(f,
"""
const LOCAL_MKL_FOUND = $found_mklpardiso
const PARDISO_LIB_FOUND = $found_pardisolib
const PARDISO_PATH = raw"$pardisopath"
"""
)
end
| Pardiso | https://github.com/JuliaSparse/Pardiso.jl.git |
|
[
"MIT"
] | 0.5.7 | 4b618484bf94a52f02595cd73ac8a6417f4c0c70 | code | 2414 | # This is an example script demonstrating how PARDISO works on a
# medium-sized Hermitian positive definite matrix.
using Pardiso
using LinearAlgebra # for norm
using Printf
using SparseArrays
using Random
using Test
function example_hermitian_psd(solver=MKLPardisoSolver)
# Script parameters.
# -----------------
verbose = false
n = 100
lambda = 3
# Create the Hermitian positive definite matrix A and the vector b in the
# linear system Ax = b.
e = ones(n)
e2 = ones(n-1)
A = spdiagm(-1 => im*e2, 0 => lambda*e, 1 => -im*e2)
b = rand(n) + im * zeros(n)
# Initialize the PARDISO internal data structures.
ps = solver()
if verbose
set_msglvl!(ps, Pardiso.MESSAGE_LEVEL_ON)
end
# If we want, we could just solve the system right now.
# Pardiso.jl will automatically detect the correct matrix type,
# solve the system and free the data
X1 = solve(ps, A, b)
# We also show how to do this in incremental steps.
ps = solver()
# First set the matrix type to handle general complex
# hermitian positive definite matrices
set_matrixtype!(ps, Pardiso.COMPLEX_HERM_POSDEF)
# Initialize the default settings with the current matrix type
pardisoinit(ps)
# Remember that we pass in a CSC matrix to Pardiso, so need
# to set the transpose iparm option.
fix_iparm!(ps, :N)
# Get the correct matrix to be sent into the pardiso function.
# :N for normal matrix, :T for transpose, :C for conjugate
A_pardiso = get_matrix(ps, A, :N)
# Analyze the matrix and compute a symbolic factorization.
set_phase!(ps, Pardiso.ANALYSIS)
pardiso(ps, A_pardiso, b)
@printf("The factors have %d nonzero entries.\n", get_iparm(ps, 18))
# Compute the numeric factorization.
set_phase!(ps, Pardiso.NUM_FACT)
pardiso(ps, A_pardiso, b)
# Compute the solutions X using the symbolic factorization.
set_phase!(ps, Pardiso.SOLVE_ITERATIVE_REFINE)
x = similar(b) # Solution is stored in X
pardiso(ps, x, A_pardiso, b)
@printf("PARDISO performed %d iterative refinement steps.\n", get_iparm(ps, 7))
# Compute the residuals.
r = abs.(A*x - b)
@printf("The maximum residual for the solution is %0.3g.\n",maximum(r))
@test norm(r) < 1e-10
# Free the PARDISO data structures.
set_phase!(ps, Pardiso.RELEASE_ALL)
pardiso(ps)
end
| Pardiso | https://github.com/JuliaSparse/Pardiso.jl.git |
|
[
"MIT"
] | 0.5.7 | 4b618484bf94a52f02595cd73ac8a6417f4c0c70 | code | 2714 | # This is an example script demonstrating how PARDISO works on a small,
# sparse, real symmetric matrix. It computes the m solutions X to the
# collection of linear systems
#
# A * X = B
#
# using the PARDISO solver, where A is a symmetric n x n matrix, B is an
# n x m matrix, and X is another n x m matrix.
using Pardiso
using SparseArrays
using Random
using Printf
using Test
function example_symmetric(solver=MKLPardisoSolver)
# Script parameters.
# -----------------
verbose = false
n = 4 # The number of equations.
m = 3 # The number of right-hand sides.
A = sparse([ 1. 0 -2 3
0 5 1 2
-2 1 4 -7
3 2 -7 5 ])
# Generate a random collection of right-hand sides.
B = rand(n,m)
# Initialize the PARDISO internal data structures.
ps = solver()
if verbose
set_msglvl!(ps, Pardiso.MESSAGE_LEVEL_ON)
end
# If we want, we could just solve the system right now.
# Pardiso.jl will automatically detect the correct matrix type,
# solve the system and free the data
X1 = solve(ps, A, B)
# We also show how to do this in incremental steps.
ps = solver()
# First set the matrix type to handle general real symmetric matrices
set_matrixtype!(ps, Pardiso.REAL_SYM_INDEF)
# Initialize the default settings with the current matrix type
pardisoinit(ps)
# Remember that we pass in a CSC matrix to Pardiso, so need
# to set the transpose iparm option.
fix_iparm!(ps, :N)
# Get the correct matrix to be sent into the pardiso function.
# :N for normal matrix, :T for transpose, :C for conjugate
A_pardiso = get_matrix(ps, A, :N)
# Analyze the matrix and compute a symbolic factorization.
set_phase!(ps, Pardiso.ANALYSIS)
set_perm!(ps, randperm(n))
pardiso(ps, A_pardiso, B)
@printf("The factors have %d nonzero entries.\n", get_iparm(ps, 18))
# Compute the numeric factorization.
set_phase!(ps, Pardiso.NUM_FACT)
pardiso(ps, A_pardiso, B)
@printf("The matrix has %d positive and %d negative eigenvalues.\n",
get_iparm(ps, 22), get_iparm(ps, 23))
# Compute the solutions X using the symbolic factorization.
set_phase!(ps, Pardiso.SOLVE_ITERATIVE_REFINE)
X = similar(B) # Solution is stored in X
pardiso(ps, X, A_pardiso, B)
@printf("PARDISO performed %d iterative refinement steps.\n", get_iparm(ps, 7))
# Compute the residuals.
R = maximum(abs.(A*X - B))
@printf("The maximum residual for the solution X is %0.3g.\n", R)
@test R < 1e-10
# Free the PARDISO data structures.
set_phase!(ps, Pardiso.RELEASE_ALL)
pardiso(ps)
end
| Pardiso | https://github.com/JuliaSparse/Pardiso.jl.git |
|
[
"MIT"
] | 0.5.7 | 4b618484bf94a52f02595cd73ac8a6417f4c0c70 | code | 2753 | # This is an example script demonstrating how PARDISO works on a small,
# sparse, real non-symmetric matrix. It computes the m solutions X to the
# collection of linear systems
#
# A * X = B
#
# using the PARDISO solver, where A is a non symmetric n x n matrix, B is an
# n x m matrix, and X is another n x m matrix.
using Pardiso
using SparseArrays
using Random
using Printf
using Test
function example_unsymmetric(solver=MKLPardisoSolver)
# Script parameters.
# -----------------
verbose = false
n = 4 # The number of equations.
m = 3 # The number of right-hand sides.
A = sparse([ 0. -2 3 0
-2 4 -4 1
-3 5 1 1
1 -3 0 2])
# Generate a random collection of right-hand sides.
B = rand(n,m)
# Initialize the PARDISO internal data structures.
# ps = PardisoSolver()
ps = solver()
if verbose
set_msglvl!(ps, Pardiso.MESSAGE_LEVEL_ON)
end
# If we want, we could just solve the system right now.
# Pardiso.jl will automatically detect the correct matrix type,
# solve the system and free the data
X1 = solve(ps, A, B)
# We also show how to do this in incremental steps.
ps = solver()
# First set the matrix type to handle general real unsymmetric matrices
set_matrixtype!(ps, Pardiso.REAL_NONSYM)
# Initialize the default settings with the current matrix type
pardisoinit(ps)
# Remember that we pass in a CSC matrix to Pardiso, so need
# to set the transpose iparm option.
fix_iparm!(ps, :N)
# Get the correct matrix to be sent into the pardiso function.
# :N for normal matrix, :T for transpose, :C for conjugate
A_pardiso = get_matrix(ps, A, :N)
# Analyze the matrix and compute a symbolic factorization.
set_phase!(ps, Pardiso.ANALYSIS)
set_perm!(ps, randperm(n))
pardiso(ps, A_pardiso, B)
@printf("The factors have %d nonzero entries.\n", get_iparm(ps, 18))
# Compute the numeric factorization.
set_phase!(ps, Pardiso.NUM_FACT)
pardiso(ps, A_pardiso, B)
@printf("The matrix has %d positive and %d negative eigenvalues.\n",
get_iparm(ps, 22), get_iparm(ps, 23))
# Compute the solutions X using the symbolic factorization.
set_phase!(ps, Pardiso.SOLVE_ITERATIVE_REFINE)
X = similar(B) # Solution is stored in X
pardiso(ps, X, A_pardiso, B)
@printf("PARDISO performed %d iterative refinement steps.\n", get_iparm(ps, 7))
# Compute the residuals.
R = maximum(abs.(A*X - B))
@printf("The maximum residual for the solution X is %0.3g.\n", R)
@test R < 1e-12
# Free the PARDISO data structures.
set_phase!(ps, Pardiso.RELEASE_ALL)
pardiso(ps)
end
| Pardiso | https://github.com/JuliaSparse/Pardiso.jl.git |
|
[
"MIT"
] | 0.5.7 | 4b618484bf94a52f02595cd73ac8a6417f4c0c70 | code | 15849 | __precompile__()
module Pardiso
if !isfile(joinpath(@__DIR__, "..", "deps", "deps.jl"))
error("""please run Pkg.build("Pardiso") before loading the package""")
end
include("../deps/deps.jl")
function show_build_log()
logfile = joinpath(@__DIR__, "..", "deps", "build.log")
if !isfile(logfile)
error("no log file found")
else
println(read(logfile, String))
end
end
using Libdl
using SparseArrays
using LinearAlgebra
import Base.show
if !LOCAL_MKL_FOUND
import MKL_jll
end
MKL_LOAD_FAILED = false
mkl_is_available() = (LOCAL_MKL_FOUND || MKL_jll.is_available()) && !MKL_LOAD_FAILED
if LinearAlgebra.BLAS.vendor() === :mkl && LinearAlgebra.BlasInt == Int64
const MklInt = Int64
const PARDISO_FUNC = :pardiso_64
else
const MklInt = Int32
const PARDISO_FUNC = :pardiso
end
const libmkl_rt = Ref{String}("")
export PardisoSolver, MKLPardisoSolver
export set_iparm!, set_dparm!, set_matrixtype!, set_solver!, set_phase!, set_msglvl!, set_nprocs!
export get_iparm, get_iparms, get_dparm, get_dparms
export get_matrixtype, get_solver, get_phase, get_msglvl, get_nprocs
export set_maxfct!, set_perm!, set_mnum!
export get_maxfct, get_perm, get_mnum
export checkmatrix, checkvec, printstats, pardisoinit, pardiso
export solve, solve!
export get_matrix
export schur_complement, pardisogetschur
export fix_iparm!
export mkl_is_available, panua_is_available
struct PardisoException <: Exception
info::String
end
struct PardisoPosDefException <: Exception
info::String
end
Base.showerror(io::IO, e::Union{PardisoException,PardisoPosDefException}) = print(io, e.info);
const PardisoNumTypes = Union{Float64,ComplexF64}
abstract type AbstractPardisoSolver end
function collect_gfortran_lib_candidates(main_ver)
candidates = String[]
if Sys.isapple()
homebrew_gcc = "/usr/local/Cellar/gcc/"
isdir(homebrew_gcc) || return String[]
vers = readdir(homebrew_gcc)
filter!(x -> startswith(x, "$main_ver."), vers)
for v in vers
push!(candidates, joinpath(homebrew_gcc, v, "lib/gcc/$main_ver/"))
end
elseif Sys.islinux()
gcc_path = "/usr/lib/gcc/x86_64-linux-gnu/"
isdir(gcc_path) || return String[]
vers = readdir(gcc_path)
filter!(x -> startswith(x, "$main_ver.") || isequal(x, "$main_ver"), vers)
for v in vers
push!(candidates, joinpath(gcc_path, v))
end
end
return candidates
end
load_lib_fortran(lib::String, v::Int) = load_lib_fortran(lib, [v])
function load_lib_fortran(lib::String, vs::Vector{Int})
candidates = String[]
for v in vs
append!(candidates, collect_gfortran_lib_candidates(v))
end
path = Libdl.find_library(lib, candidates)
isempty(path) && (path = lib)
Libdl.dlopen(path * "." * Libdl.dlext, Libdl.RTLD_GLOBAL)
end
# Pardiso
const init = Ref{Ptr}()
const pardiso_f = Ref{Ptr}()
const pardiso_chkmatrix = Ref{Ptr}()
const pardiso_chkmatrix_z = Ref{Ptr}()
const pardiso_printstats = Ref{Ptr}()
const pardiso_printstats_z = Ref{Ptr}()
const pardiso_chkvec = Ref{Ptr}()
const pardiso_chkvec_z = Ref{Ptr}()
const pardiso_get_schur_f = Ref{Ptr}()
const PARDISO_LOADED = Ref(false)
panua_is_available() = PARDISO_LOADED[]
function __init__()
global MKL_LOAD_FAILED
if LOCAL_MKL_FOUND
if Sys.iswindows()
libmkl_rt[] = "mkl_rt"
elseif Sys.isapple()
libmkl_rt[] = "@rpath/libmkl_rt.dylib"
else
libmkl_rt[] = "libmkl_rt"
end
elseif MKL_jll.is_available()
libmkl_rt[] = MKL_jll.libmkl_rt_path
end
if !haskey(ENV, "PARDISOLICMESSAGE")
ENV["PARDISOLICMESSAGE"] = 1
end
if LOCAL_MKL_FOUND && !haskey(ENV, "MKLROOT")
@warn "MKLROOT not set, MKL Pardiso solver will not be functional"
end
if mkl_is_available()
try
libmklpardiso = Libdl.dlopen(libmkl_rt[])
mklpardiso_f = Libdl.dlsym(libmklpardiso, "pardiso")
catch e
@error("MKL Pardiso did not manage to load, error thrown was: $(sprint(showerror, e))")
MKL_LOAD_FAILED = true
end
end
# This is apparently needed for MKL to not get stuck on 1 thread when
# libpardiso is loaded in the block below...
if libmkl_rt[] !== ""
get_nprocs_mkl()
end
if PARDISO_LIB_FOUND
try
libpardiso = Libdl.dlopen(PARDISO_PATH)
init[] = Libdl.dlsym(libpardiso, "pardisoinit")
pardiso_f[] = Libdl.dlsym(libpardiso, "pardiso")
pardiso_chkmatrix[] = Libdl.dlsym(libpardiso, "pardiso_chkmatrix")
pardiso_chkmatrix_z[] = Libdl.dlsym(libpardiso, "pardiso_chkmatrix_z")
pardiso_printstats[] = Libdl.dlsym(libpardiso, "pardiso_printstats")
pardiso_printstats_z[] = Libdl.dlsym(libpardiso, "pardiso_printstats_z")
pardiso_chkvec[] = Libdl.dlsym(libpardiso, "pardiso_chkvec")
pardiso_chkvec_z[] = Libdl.dlsym(libpardiso, "pardiso_chkvec_z")
pardiso_get_schur_f[] = Libdl.dlsym(libpardiso, "pardiso_get_schur")
if Sys.isunix()
gfortran_v = [8, 9]
for lib in ("libgfortran", "libgomp")
load_lib_fortran(lib, gfortran_v)
end
end
# Windows Pardiso lib comes with BLAS + LAPACK prebaked but not on UNIX so we open them here
# if not MKL is loaded
if Sys.isunix()
ptr = C_NULL
for l in ("libblas", "libblas.so.3")
ptr = Libdl.dlopen_e(l, Libdl.RTLD_GLOBAL)
if ptr !== C_NULL
break
end
end
if ptr == C_NULL
error("could not load blas library")
end
end
PARDISO_LOADED[] = true
catch e
@error("Pardiso did not manage to load, error thrown was: $(sprint(showerror, e))")
end
end
end
include("enums.jl")
include("project_pardiso.jl")
include("mkl_pardiso.jl")
# Getters and setters
set_matrixtype!(ps::AbstractPardisoSolver, v::Int) = set_matrixtype!(ps, MatrixType(v))
function set_matrixtype!(ps::AbstractPardisoSolver, v::MatrixType)
ps.mtype = v
end
get_matrixtype(ps::AbstractPardisoSolver) = ps.mtype
get_iparm(ps::AbstractPardisoSolver, i::Integer) = ps.iparm[i]
get_iparms(ps::AbstractPardisoSolver) = ps.iparm
set_iparm!(ps::AbstractPardisoSolver, i::Integer, v::Integer) = ps.iparm[i] = v
get_mnum(ps::AbstractPardisoSolver) = ps.mnum
set_mnum!(ps::AbstractPardisoSolver, mnum::Integer) = ps.mnum = mnum
get_maxfct(ps::AbstractPardisoSolver) = ps.maxfct
set_maxfct!(ps::AbstractPardisoSolver, maxfct::Integer) = ps.maxfct = maxfct
get_perm(ps::AbstractPardisoSolver) = ps.perm
set_perm!(ps::AbstractPardisoSolver, perm::Vector{T}) where {T <: Integer} = ps.perm = convert(Vector{Int32}, perm)
get_phase(ps::AbstractPardisoSolver) = ps.phase
set_phase!(ps::AbstractPardisoSolver, v::Int) = set_phase!(ps, Phase(v))
function set_phase!(ps::AbstractPardisoSolver, v::Phase)
ps.phase = v
end
get_msglvl(ps::AbstractPardisoSolver) = ps.msglvl
set_msglvl!(ps::AbstractPardisoSolver, v::Integer) = set_msglvl!(ps, MessageLevel(v))
function set_msglvl!(ps::AbstractPardisoSolver, v::MessageLevel)
ps.msglvl = v
end
function pardisoinit(ps::AbstractPardisoSolver)
ccall_pardisoinit(ps)
return
end
function solve(ps::AbstractPardisoSolver, A::SparseMatrixCSC{Tv,Ti},
B::StridedVecOrMat{Tv}, T::Symbol=:N) where {Ti, Tv <: PardisoNumTypes}
X = copy(B)
solve!(ps, X, A, B, T)
return X
end
function fix_iparm!(ps::AbstractPardisoSolver, T::Symbol)
# We need to set the transpose flag in PARDISO when we DON'T want
# a transpose in Julia because we are passing a CSC formatted
# matrix to PARDISO which expects a CSR matrix.
if T === :N
if isa(ps, PardisoSolver)
set_iparm!(ps, 12, 1)
else
# iparm[12] = 1 is complex conjugate in MKL
set_iparm!(ps, 12, 2)
end
elseif T === :C || T === :T
set_iparm!(ps, 12, 0)
else
throw(ArgumentError("only :T, :N and :C, are valid transpose symbols"))
end
end
function solve!(ps::AbstractPardisoSolver, X::StridedVecOrMat{Tv},
A::SparseMatrixCSC{Tv,Ti}, B::StridedVecOrMat{Tv},
T::Symbol=:N) where {Ti, Tv <: PardisoNumTypes}
set_phase!(ps, ANALYSIS_NUM_FACT_SOLVE_REFINE)
# This is the heuristics for choosing what matrix type to use
##################################################################
# - If hermitian try to solve with symmetric positive definite.
# - On pos def exception, solve instead with symmetric indefinite.
# - If complex and symmetric, solve with symmetric complex solver
# - Else solve as unsymmetric.
if ishermitian(A)
eltype(A) == Float64 ? set_matrixtype!(ps, REAL_SYM_POSDEF) : set_matrixtype!(ps, COMPLEX_HERM_POSDEF)
pardisoinit(ps)
fix_iparm!(ps, T)
try
pardiso(ps, X, get_matrix(ps, A, T), B)
catch e
set_phase!(ps, RELEASE_ALL)
pardiso(ps, X, A, B)
set_phase!(ps, ANALYSIS_NUM_FACT_SOLVE_REFINE)
if !isa(e, PardisoPosDefException)
rethrow()
end
eltype(A) == Float64 ? set_matrixtype!(ps, REAL_SYM_INDEF) : set_matrixtype!(ps, COMPLEX_HERM_INDEF)
pardisoinit(ps)
fix_iparm!(ps, T)
pardiso(ps, X, get_matrix(ps, A, T), B)
end
elseif issymmetric(A)
set_matrixtype!(ps, COMPLEX_SYM)
pardisoinit(ps)
fix_iparm!(ps, T)
pardiso(ps, X, get_matrix(ps, A, T), B)
else
eltype(A) == Float64 ? set_matrixtype!(ps, REAL_NONSYM) : set_matrixtype!(ps, COMPLEX_NONSYM)
pardisoinit(ps)
fix_iparm!(ps, T)
pardiso(ps, X, get_matrix(ps, A, T), B)
end
# Release memory, TODO: We are running the convert on IA and JA here
# again which is unnecessary.
set_phase!(ps, RELEASE_ALL)
pardiso(ps, X, A, B)
set_phase!(ps, ANALYSIS_NUM_FACT_SOLVE_REFINE)
return X
end
function get_matrix(ps::AbstractPardisoSolver, A, T)
mtype = get_matrixtype(ps)
if isposornegdef(mtype)
if ps isa MKLPardisoSolver
T == :C && return conj(tril(A))
return tril(A)
elseif ps isa PardisoSolver
T == :T && return tril(A)
return conj(tril(A))
end
end
if !issymmetric(mtype)
T == :C && return conj(A)
return A
end
if mtype == COMPLEX_SYM
T == :C && return conj(tril(A))
return tril(A)
end
error("Unhandled matrix type")
end
function pardiso(ps::AbstractPardisoSolver, X::StridedVecOrMat{Tv}, A::SparseMatrixCSC{Tv,Ti},
B::StridedVecOrMat{Tv}) where {Ti, Tv <: PardisoNumTypes}
if length(X) != 0
dim_check(X, A, B)
end
if Tv <: Complex && isreal(get_matrixtype(ps))
throw(ErrorException(string("input matrix is complex while PardisoSolver ",
"has a real matrix type set: $(get_matrixtype(ps))")))
end
if Tv <: Real && !isreal(get_matrixtype(ps))
throw(ErrorException(string("input matrix is real while PardisoSolver ",
"has a complex matrix type set: $(get_matrixtype(ps))")))
end
N = size(A, 2)
resize!(ps.perm, size(B, 1))
NRHS = size(B, 2)
ccall_pardiso(ps, N, A.nzval, A.colptr, A.rowval, NRHS, B, X)
end
pardiso(ps::AbstractPardisoSolver) = ccall_pardiso(ps, Int32(0), Float64[], Int32[], Int32[], Int32(0), Float64[], Float64[])
function pardiso(ps::AbstractPardisoSolver, A::SparseMatrixCSC{Tv,Ti}, B::StridedVecOrMat{Tv}) where {Ti, Tv <: PardisoNumTypes}
pardiso(ps, Tv[], A, B)
end
# populated rows of S determine schur complment block
"""
schur_complement(ps,A,x) -> S
Schur complement `S` of the submatrix defined by the nonzero entries of `x` in matrix `A`.
If `n=nnz(x)`, then `S` is `n`-by-`n`.
WARNING: for complex `M`, seems to be unstable, made worse as number of nonzero elements in `M` decreases
"""
schur_complement(ps::AbstractPardisoSolver,A,x::SparseVector,T::Symbol=:N) = _schur_complement_permuted(ps,A,x.nzind,T)
schur_complement(ps::AbstractPardisoSolver,A,x::SparseMatrixCSC,T::Symbol=:N) = _schur_complement_permuted(ps,A,unique!(sort!(x.rowval)),T)
# permute A and then compute complement of lower right-hand `n`-by-`n` block
function _schur_complement_permuted(ps,A,rows,T::Symbol)
P = sparse(vcat(setdiff(1:A.n,rows),rows),1:A.n,1,size(A)...)
schur_complement(ps,P'*A*P,length(rows),T)
end
# or integer gives last n rows and columns as schur complement block
"""
schur_complement(ps,M,n) -> S
Schur complement `S` of upper-left block in `M`, where `n` is the size of lower-right block (and therefore also of Schur complement)
WARNING: for complex `M`, seems to be unstable, made worse as number of nonzero elements in `M` decreases
"""
function schur_complement(ps::AbstractPardisoSolver,A::SparseMatrixCSC{Tv},n::Integer,T::Symbol=:N) where Tv <: PardisoNumTypes
n ≥ size(A,1) ? throw(ErrorException("complement block size n=$n≥A.m=$(A.m)")) : nothing
# Tv<:Complex ? (@warn "unstable for complex types, unknown why") : nothing
pardisoinit(ps)
original_phase = get_phase(ps)
original_iparms = get_iparms(ps)
set_iparm!(ps,1,1) # use custom IPARM
set_iparm!(ps,38,n) # set Schur complement block size to n
set_phase!(ps,12) # analyze and factorize
B = Matrix{Tv}(undef,size(A,1),0) # dummy array to feed to pardiso
# transpose via IPARM(12) doesn't work at factorize step (only on entry to solve step)
if T==:N
M = permutedims(A)
set_iparm!(ps, 12, 1)
elseif T == :C
M = conj(permutedims(A))
set_iparm!(ps, 12, 0)
elseif T == :T
M = A
set_iparm!(ps, 12, 0)
else
throw(ArgumentError("only :T, :N and :C, are valid transpose symbols"))
end
pardiso(ps,B,M,B)
S = pardisogetschur(ps) # get schur complement matrix
set_phase!(ps, RELEASE_ALL)
pardiso(ps, B, M, B)
set_phase!(ps, original_phase) # reset phase to user setting
for i ∈ eachindex(original_iparms)
set_iparm!(ps,i,original_iparms[i])
end
return S
end
"""
pardisogetschur(ps) -> S
retrieve schur complement from PardisoSolver `ps`.
"""
function pardisogetschur(ps::AbstractPardisoSolver)
nnzschur = get_iparm(ps, 39)
nschur = get_iparm(ps,38)
T = isreal(get_matrixtype(ps)) ? Float64 : ComplexF64
if nnzschur==0
return spzeros(T,nschur,nschur)
else
S = Vector{T}(undef,nnzschur)
IS = Vector{Int32}(undef,nschur)
JS = Vector{Int32}(undef,nnzschur)
ccall_pardiso_get_schur(ps,S,IS,JS)
IS = pushfirst!(IS,Int32(1)) # some issue with IS (nschur+1 doesn't seem to work)
S = permutedims(SparseMatrixCSC(nschur,nschur,IS,JS,S)) # really constructing CSR and then transposing
return S
end
end
function dim_check(X, A, B)
size(X) == size(B) || throw(DimensionMismatch(string("solution has $(size(X)), ",
"RHS has size as $(size(B)).")))
size(A, 1) == size(B, 1) || throw(DimensionMismatch(string("matrix has $(size(A,1)) ",
"rows, RHS has $(size(B,1)) rows.")))
size(B, 1) == stride(B, 2) || throw(DimensionMismatch(
string("Only memory-contiguous RHS supported")))
end
end # module
| Pardiso | https://github.com/JuliaSparse/Pardiso.jl.git |
|
[
"MIT"
] | 0.5.7 | 4b618484bf94a52f02595cd73ac8a6417f4c0c70 | code | 3524 | # Matrix type
@enum(MatrixType::Int32,
REAL_SYM = 1,
REAL_SYM_POSDEF = 2,
REAL_SYM_INDEF = -2,
COMPLEX_STRUCT_SYM = 3,
COMPLEX_HERM_POSDEF = 4,
COMPLEX_HERM_INDEF = -4,
COMPLEX_SYM = 6,
REAL_NONSYM = 11,
COMPLEX_NONSYM = 13,
)
Base.isreal(v::MatrixType) = v in (REAL_SYM, REAL_SYM_POSDEF, REAL_SYM_INDEF, REAL_NONSYM)
LinearAlgebra.issymmetric(v::MatrixType) = v in (REAL_SYM, REAL_SYM_POSDEF, REAL_SYM_INDEF, COMPLEX_STRUCT_SYM,
COMPLEX_HERM_POSDEF, COMPLEX_HERM_INDEF, COMPLEX_SYM)
LinearAlgebra.ishermitian(v::MatrixType) = v in (REAL_SYM_POSDEF, COMPLEX_HERM_POSDEF, COMPLEX_HERM_INDEF)
isposornegdef(v::MatrixType) = v in (REAL_SYM_POSDEF, REAL_SYM_INDEF, COMPLEX_HERM_POSDEF, COMPLEX_HERM_INDEF)
const MATRIX_STRING = Dict{MatrixType, String}(
REAL_SYM => "Real structurally symmetric",
REAL_SYM_POSDEF => "Real symmetric positive definite",
REAL_SYM_INDEF => "Real symmetric indefinite",
COMPLEX_STRUCT_SYM => "Complex structurally symmetric",
COMPLEX_HERM_POSDEF => "Complex Hermitian postive definite",
COMPLEX_HERM_INDEF => "Complex Hermitian indefinite",
COMPLEX_SYM => "Complex symmetric",
REAL_NONSYM => "Real nonsymmetric",
COMPLEX_NONSYM => "Complex nonsymmetric"
)
const REAL_MATRIX_TYPES = [REAL_SYM, REAL_SYM_POSDEF, REAL_SYM_INDEF, REAL_NONSYM]
const COMPLEX_MATRIX_TYPES = [COMPLEX_STRUCT_SYM, COMPLEX_HERM_POSDEF, COMPLEX_HERM_INDEF, COMPLEX_NONSYM]
# Messages
@enum(MessageLevel::Int32,
MESSAGE_LEVEL_OFF = 0,
MESSAGE_LEVEL_ON = 1
)
# Solver
@enum(Solver::Int32,
DIRECT_SOLVER = 0,
ITERATIVE_SOLVER = 1
)
const SOLVER_STRING = Dict{Solver, String}(
DIRECT_SOLVER => "Direct solver",
ITERATIVE_SOLVER => "Iterative solver"
)
# Phase
@enum(Phase::Int32,
ANALYSIS = 11,
ANALYSIS_NUM_FACT = 12,
ANALYSIS_NUM_FACT_SOLVE_REFINE = 13,
NUM_FACT = 22,
SELECTED_INVERSION = -22,
NUM_FACT_SOLVE_REFINE = 23,
SOLVE_ITERATIVE_REFINE = 33,
SOLVE_ITERATIVE_REFINE_ONLY_FORWARD = 331,
SOLVE_ITERATIVE_REFINE_ONLY_DIAG = 332,
SOLVE_ITERATIVE_REFINE_ONLY_BACKWARD = 333,
RELEASE_LU_MNUM = 0,
RELEASE_ALL = -1
)
const PHASE_STRING = Dict{Phase, String}(
ANALYSIS => "Analysis",
ANALYSIS_NUM_FACT => "Analysis, numerical factorization",
ANALYSIS_NUM_FACT_SOLVE_REFINE => "Analysis, numerical factorization, solve, iterative refinement",
NUM_FACT => "Numerical factorization",
SELECTED_INVERSION => "Selected Inversion",
NUM_FACT_SOLVE_REFINE => "Numerical factorization, solve, iterative refinement",
SOLVE_ITERATIVE_REFINE => "Solve, iterative refinement",
RELEASE_LU_MNUM => "Release internal memory for L and U matrix number MNUM",
RELEASE_ALL => "Release all internal memory for all matrices",
SOLVE_ITERATIVE_REFINE_ONLY_FORWARD => "like phase=33, but only forward substitution",
SOLVE_ITERATIVE_REFINE_ONLY_DIAG => "like phase=33, but only diagonal substitution (if available)",
SOLVE_ITERATIVE_REFINE_ONLY_BACKWARD => "like phase=33, but only backward substitution",
)
| Pardiso | https://github.com/JuliaSparse/Pardiso.jl.git |
|
[
"MIT"
] | 0.5.7 | 4b618484bf94a52f02595cd73ac8a6417f4c0c70 | code | 3658 | const MKL_DOMAIN_PARDISO = Int32(4)
mutable struct MKLPardisoSolver <: AbstractPardisoSolver
pt::Vector{Int}
iparm::Vector{MklInt}
mtype::MatrixType
solver::Solver
phase::Phase
msglvl::MessageLevel
maxfct::MklInt
mnum::MklInt
perm::Vector{MklInt}
end
function MKLPardisoSolver()
if !(mkl_is_available())
error("MKL is not available")
end
pt = zeros(Int, 64)
iparm = zeros(MklInt, 64)
mtype = REAL_NONSYM
solver = DIRECT_SOLVER
phase = ANALYSIS_NUM_FACT_SOLVE_REFINE
msglvl = MESSAGE_LEVEL_OFF
mnum = MklInt(1)
maxfct = MklInt(1)
perm = MklInt[]
ps = MKLPardisoSolver(pt, iparm, mtype, solver,
phase, msglvl, maxfct, mnum, perm)
return ps
end
show(io::IO, ps::MKLPardisoSolver) = print(io, string("$MKLPardisoSolver:\n",
"\tMatrix type: $(MATRIX_STRING[get_matrixtype(ps)])\n",
"\tPhase: $(PHASE_STRING[get_phase(ps)])"))
set_nprocs!(ps::MKLPardisoSolver, n::Integer) = set_nprocs_mkl!(n)
set_nprocs_mkl!(n::Integer) =
ccall((:mkl_domain_set_num_threads, libmkl_rt[]), Cvoid, (Ptr{Int32}, Ptr{Int32}), Ref((Int32(n))), Ref(MKL_DOMAIN_PARDISO))
get_nprocs(ps::MKLPardisoSolver) = get_nprocs_mkl()
get_nprocs_mkl() =
ccall((:mkl_domain_get_max_threads, libmkl_rt[]), Int32, (Ptr{Int32},), Ref(MKL_DOMAIN_PARDISO))
valid_phases(ps::MKLPardisoSolver) = keys(MKL_PHASES)
phases(ps::MKLPardisoSolver) = MKL_PHASES
function ccall_pardisoinit(ps::MKLPardisoSolver)
ERR = Ref{MklInt}(0)
ccall((:pardisoinit, libmkl_rt[]), Cvoid,
(Ptr{Int}, Ptr{MklInt}, Ptr{MklInt}),
ps.pt, Ref(MklInt(ps.mtype)), ps.iparm)
check_error(ps, ERR[])
end
function ccall_pardiso(ps::MKLPardisoSolver, N, nzval::Vector{Tv}, colptr, rowval,
NRHS, B::StridedVecOrMat{Tv}, X::StridedVecOrMat{Tv}) where {Tv}
N = MklInt(N)
colptr = convert(Vector{MklInt}, colptr)
rowval = convert(Vector{MklInt}, rowval)
resize!(ps.perm, size(B, 1))
NRHS = MklInt(NRHS)
ERR = Ref{MklInt}(0)
ccall((PARDISO_FUNC, libmkl_rt[]), Cvoid,
(Ptr{Int}, Ptr{MklInt}, Ptr{MklInt}, Ptr{MklInt}, Ptr{MklInt},
Ptr{MklInt}, Ptr{Tv}, Ptr{MklInt}, Ptr{MklInt}, Ptr{MklInt},
Ptr{MklInt}, Ptr{MklInt}, Ptr{MklInt}, Ptr{Tv}, Ptr{Tv},
Ptr{MklInt}),
ps.pt, Ref(ps.maxfct), Ref(ps.mnum), Ref(MklInt(ps.mtype)), Ref(MklInt(ps.phase)),
Ref(N), nzval, colptr, rowval, ps.perm,
Ref(NRHS), ps.iparm, Ref(MklInt(ps.msglvl)), B, X,
ERR)
check_error(ps, ERR[])
end
function check_error(ps::MKLPardisoSolver, err::Integer)
err != -1 || throw(PardisoException("Input inconsistent."))
err != -2 || throw(PardisoException("Not enough memory."))
err != -3 || throw(PardisoException("Reordering problem."))
err != -4 || throw(PardisoPosDefException("Zero pivot, numerical fact. or iterative refinement problem."))
err != -5 || throw(PardisoException("Unclassified (internal) error."))
err != -6 || throw(PardisoException("Preordering failed (matrix types 11, 13 only)."))
err != -7 || throw(PardisoException("Diagonal matrix is singular"))
err != -8 || throw(PardisoException("32-bit integer overflow problem."))
err != -9 || throw(PardisoException("Not enough memory for OOC."))
err != -10 || throw(PardisoException("Error opening OOC files."))
err != -11 || throw(PardisoException("Read/write error with OOC files."))
err != -12 || throw(PardisoException("pardiso_64 called from 32-bit library"))
return
end
| Pardiso | https://github.com/JuliaSparse/Pardiso.jl.git |
|
[
"MIT"
] | 0.5.7 | 4b618484bf94a52f02595cd73ac8a6417f4c0c70 | code | 6641 | mutable struct PardisoSolver <: AbstractPardisoSolver
pt::Vector{Int}
iparm::Vector{Int32}
dparm::Vector{Float64}
mtype::MatrixType
solver::Solver
phase::Phase
msglvl::MessageLevel
maxfct::Int32
mnum::Int32
perm::Vector{Int32}
end
function PardisoSolver()
if !panua_is_available()
error("Panua pardiso library was not loaded")
end
pt = zeros(Int, 64)
iparm = zeros(Int32, 64)
dparm = zeros(Float64, 64)
mtype = REAL_NONSYM
solver = DIRECT_SOLVER
phase = ANALYSIS_NUM_FACT_SOLVE_REFINE
msglvl = MESSAGE_LEVEL_OFF
# Set numper of processors to CPU_CORES unless "OMP_NUM_THREADS" is set
if haskey(ENV, "OMP_NUM_THREADS")
iparm[3] = parse(Int, ENV["OMP_NUM_THREADS"])
else
# Assume 2 threads per core
iparm[3] = max(div(Sys.CPU_THREADS, 2), 1)
end
mnum = 1
maxfct = 1
perm = Int32[]
ps = PardisoSolver(pt, iparm, dparm, mtype, solver,
phase, msglvl, maxfct, mnum, perm)
return ps
end
show(io::IO, ps::PardisoSolver) = print(io, string("$PardisoSolver:\n",
"\tSolver: $(SOLVER_STRING[get_solver(ps)])\n",
"\tMatrix type: $(MATRIX_STRING[get_matrixtype(ps)])\n",
"\tPhase: $(PHASE_STRING[get_phase(ps)])\n",
"\tNum processors: $(get_nprocs(ps))"))
phases(ps::PardisoSolver) = PHASES
set_transposed(ps::PardisoSolver, t::Bool) = t ? set_iparm(ps, 12, 1) : set_iparm(ps, 12, 0)
get_dparm(ps::PardisoSolver, i::Integer) = ps.dparm[i]
get_dparms(ps::PardisoSolver) = ps.dparm
set_dparm!(ps::PardisoSolver, i::Integer, v::AbstractFloat) = ps.dparm[i] = v
get_nprocs(ps::PardisoSolver) = ps.iparm[3]
set_solver!(ps::PardisoSolver, v::Int) = set_solver!(ps, Solver(v))
function set_solver!(ps::PardisoSolver, v::Solver)
ps.solver = v
end
get_solver(ps::PardisoSolver) = ps.solver
@inline function ccall_pardisoinit(ps::PardisoSolver)
ERR = Ref{Int32}(0)
ccall(init[], Cvoid,
(Ptr{Int}, Ptr{Int32}, Ptr{Int32},
Ptr{Int32}, Ptr{Float64}, Ptr{Int32}),
ps.pt, Ref(Int32(ps.mtype)), Ref(Int32(ps.solver)), ps.iparm, ps.dparm, ERR)
check_error(ps, ERR[])
end
@inline function ccall_pardiso(ps::PardisoSolver, N::Integer, nzval::Vector{Tv},
colptr, rowval, NRHS::Integer, B::StridedVecOrMat{Tv}, X::StridedVecOrMat{Tv}) where {Tv}
N = Int32(N)
colptr = convert(Vector{Int32}, colptr)
rowval = convert(Vector{Int32}, rowval)
resize!(ps.perm, size(B, 1))
NRHS = Int32(NRHS)
ERR = Ref{Int32}(0)
ccall(pardiso_f[], Cvoid,
(Ptr{Int}, Ptr{Int32}, Ptr{Int32}, Ptr{Int32}, Ptr{Int32},
Ptr{Int32}, Ptr{Tv}, Ptr{Int32}, Ptr{Int32}, Ptr{Int32},
Ptr{Int32}, Ptr{Int32}, Ptr{Int32}, Ptr{Tv}, Ptr{Tv},
Ptr{Int32}, Ptr{Float64}),
ps.pt, Ref(ps.maxfct), Ref(Int32(ps.mnum)), Ref(Int32(ps.mtype)), Ref(Int32(ps.phase)),
Ref(N), nzval, colptr, rowval, ps.perm,
Ref(NRHS), ps.iparm, Ref(Int32(ps.msglvl)), B, X,
ERR, ps.dparm)
check_error(ps, ERR[])
end
@inline function ccall_pardiso_get_schur(ps::PardisoSolver, S::Vector{Tv},
IS::Vector{Int32}, JS::Vector{Int32}) where Tv
ccall(pardiso_get_schur_f[], Cvoid,
(Ptr{Int}, Ptr{Int32}, Ptr{Int32}, Ptr{Int32}, Ptr{Tv},
Ptr{Int32}, Ptr{Int32}),
ps.pt, Ref(ps.maxfct), Ref(Int32(ps.mnum)), Ref(Int32(ps.mtype)),S,
IS, JS)
end
# Different checks
function printstats(ps::PardisoSolver, A::SparseMatrixCSC{Tv, Ti},
B::StridedVecOrMat{Tv}) where {Ti,Tv <: PardisoNumTypes}
N = Int32(size(A, 2))
AA = A.nzval
IA = convert(Vector{Int32}, A.colptr)
JA = convert(Vector{Int32}, A.rowval)
NRHS = Int32(size(B, 2))
ERR = Ref{Int32}(0)
if Tv <: Complex
f = pardiso_printstats_z[]
else
f = pardiso_printstats[]
end
ccall(f, Cvoid,
(Ptr{Int32}, Ptr{Int32}, Ptr{Tv}, Ptr{Int32},
Ptr{Int32}, Ptr{Int32}, Ptr{Tv},
Ptr{Int32}),
Ref(Int32(ps.mtype)), Ref(N), AA, IA, JA, Ref(NRHS), B, ERR)
check_error(ps, ERR[])
return
end
function checkmatrix(ps::PardisoSolver, A::SparseMatrixCSC{Tv, Ti}) where {Ti,Tv <: PardisoNumTypes}
N = Int32(size(A, 1))
AA = A.nzval
IA = convert(Vector{Int32}, A.colptr)
JA = convert(Vector{Int32}, A.rowval)
ERR = Ref{Int32}(0)
if Tv <: Complex
f = pardiso_chkmatrix_z[]
else
f = pardiso_chkmatrix[]
end
ccall(f, Cvoid,
(Ptr{Int32}, Ptr{Int32}, Ptr{Tv}, Ptr{Int32},
Ptr{Int32}, Ptr{Int32}),
Ref(Int32(ps.mtype)), Ref(N), AA, IA,
JA, ERR)
check_error(ps, ERR[])
return
end
function checkvec(ps, B::StridedVecOrMat{Tv}) where {Tv <: PardisoNumTypes}
N = Int32(size(B, 1))
NRHS = Int32(size(B, 2))
ERR = Int32[0]
if Tv <: Complex
f = pardiso_chkvec_z[]
else
f = pardiso_chkvec[]
end
ccall(f, Cvoid,
(Ptr{Int32}, Ptr{Int32}, Ptr{Tv}, Ptr{Int32}),
Ref(N), Ref(NRHS), B, ERR)
check_error(ps, ERR[])
return
end
function check_error(ps::PardisoSolver, err::Integer)
err != -1 || throw(PardisoException("Input inconsistent."))
err != -2 || throw(PardisoException("Not enough memory."))
err != -3 || throw(PardisoException("Reordering problem."))
err != -4 || throw(PardisoPosDefException("Zero pivot, numerical fact. or iterative refinement problem."))
err != -5 || throw(PardisoException("Unclassified (internal) error."))
err != -6 || throw(PardisoException("Preordering failed (matrix types 11, 13 only)."))
err != -7 || throw(PardisoException("Diagonal matrix problem."))
err != -8 || throw(PardisoException("32-bit integer overflow problem."))
err != -10 || throw(PardisoException("No license file pardiso.lic found."))
err != -11 || throw(PardisoException("License is expired."))
err != -12 || throw(PardisoException("Wrong username or hostname."))
err != -100|| throw(PardisoException("Reached maximum number of Krylov-subspace iteration in iterative solver."))
err != -101|| throw(PardisoException("No sufficient convergence in Krylov-subspace iteration within 25 iterations."))
err != -102|| throw(PardisoException("Error in Krylov-subspace iteration."))
err != -103|| throw(PardisoException("Break-Down in Krylov-subspace iteration."))
return
end
| Pardiso | https://github.com/JuliaSparse/Pardiso.jl.git |
|
[
"MIT"
] | 0.5.7 | 4b618484bf94a52f02595cd73ac8a6417f4c0c70 | code | 5532 | ENV["OMP_NUM_THREADS"] = 2
using Pkg
if Sys.isapple()
Pkg.add(name="MKL_jll"; version = "2023")
end
using Test
using Pardiso
using Random
using SparseArrays
using LinearAlgebra
Random.seed!(1234)
available_solvers = empty([Pardiso.AbstractPardisoSolver])
if Pardiso.mkl_is_available()
push!(available_solvers, MKLPardisoSolver)
else
@warn "Not testing MKL Pardiso solver"
end
if Pardiso.PARDISO_LOADED[]
push!(available_solvers, PardisoSolver)
else
@warn "Not testing project Pardiso solver"
end
@show Pardiso.MklInt
println("Testing ", available_solvers)
# Test solver + for real and complex data
@testset "solving" begin
for pardiso_type in available_solvers
ps = pardiso_type()
for T in (Float64, ComplexF64)
A1 = sparse(rand(T, 10,10))
for B in (rand(T, 10, 2), view(rand(T, 10, 4), 1:10, 2:3))
X = similar(B)
# Test unsymmetric, herm indef, herm posdef and symmetric
for A in SparseMatrixCSC[A1, A1 + A1', A1'A1, transpose(A1) + A1]
solve!(ps, X, A, B)
@test X ≈ A\Matrix(B)
X = solve(ps, A, B)
@test X ≈ A\Matrix(B)
solve!(ps, X, A, B, :C)
@test X ≈ A'\Matrix(B)
X = solve(ps, A, B, :C)
@test X ≈ A'\Matrix(B)
solve!(ps, X, A, B, :T)
@test X ≈ copy(transpose(A))\Matrix(B)
X = solve(ps, A, B, :T)
@test X ≈ copy(transpose(A))\Matrix(B)
end
end
end
end
end #testset
include("../examples/examplesym.jl")
include("../examples/exampleunsym.jl")
include("../examples/exampleherm.jl")
for solver in available_solvers
example_symmetric(solver)
example_unsymmetric(solver)
example_hermitian_psd(solver)
end
if Pardiso.mkl_is_available()
if Sys.CPU_THREADS >= 4
@testset "procs" begin
ps = MKLPardisoSolver()
np = get_nprocs(ps)
set_nprocs!(ps, 2)
@test get_nprocs(ps) == 2
set_nprocs!(ps, np)
@test get_nprocs(ps) == np
end
end
end
if Pardiso.PARDISO_LOADED[]
@testset "schur" begin
# reproduce example from Pardiso website
include("schur_matrix_def.jl")
@test norm(real(D) - real(C)*rA⁻¹*real(B) - s) < 1e-10*(8)^2
# @test norm(D - C*A⁻¹*B - S) < 1e-10*(8)^2
# try some random matrices
m = 50; n = 15; p = .1
ps = PardisoSolver()
for T in (Float64, )#ComplexF64)
ps = PardisoSolver()
pardisoinit(ps)
if T == Float64
set_matrixtype!(ps, 11)
else
set_matrixtype!(ps, 13)
end
for j ∈ 1:100
A = 5I + sprand(T,m,m,p)
A⁻¹ = inv(Matrix(A))
B = sprand(T,m,n,p)
C = sprand(T,n,m,p)
D = 5I + sprand(T,n,n,p)
M = [A B; C D]
# test integer block specification
S = schur_complement(ps, M, n);
@test norm(D - C*A⁻¹*B - S) < 1e-10*(m+n)^2
# test sparse vector block specification
x = spzeros(T,m+n)
x[(m+1):(m+n)] .= 1
S = schur_complement(ps, M, x);
@test norm(D - C*A⁻¹*B - S) < 1e-10*(m+n)^2
# test sparse matrix block specification
x = spzeros(T,m+n,2)
x[(m+1):(m+n-1),1] .= 1
x[end,2] = 1
S = schur_complement(ps, M, x);
@test norm(D - C*A⁻¹*B - S) < 1e-10*(m+n)^2
end
end
end # testset
end
@testset "error checks" begin
for pardiso_type in available_solvers
ps = pardiso_type()
A = sparse(rand(10,10))
B = rand(10, 2)
X = rand(10, 2)
if pardiso_type == PardisoSolver
printstats(ps, A, B)
checkmatrix(ps, A)
checkvec(ps, B)
end
set_matrixtype!(ps, 13)
@test_throws ErrorException pardiso(ps, X, A, B)
@test_throws ArgumentError solve(ps, A, B, :P)
@test_throws ArgumentError solve!(ps, X, A, B, :P)
set_matrixtype!(ps, 11)
X = zeros(12, 2)
@test_throws DimensionMismatch solve!(ps,X, A, B)
B = rand(12, 2)
@test_throws DimensionMismatch solve(ps, A, B)
end
end # testset
@testset "getters and setters" begin
for pardiso_type in available_solvers
ps = pardiso_type()
set_iparm!(ps, 1, 0)
pardisoinit(ps)
@test get_iparm(ps, 1) == 1
@test_throws ArgumentError set_phase!(ps, 5)
@test_throws ArgumentError set_msglvl!(ps, 2)
@test_throws ArgumentError set_matrixtype!(ps, 15)
if pardiso_type == PardisoSolver
@test_throws ArgumentError set_solver!(ps, 2)
set_dparm!(ps, 5, 13.37)
@test get_dparm(ps, 5) == 13.37
set_solver!(ps, 1)
@test Int(get_solver(ps)) == 1
end
set_iparm!(ps, 13, 100)
@test get_iparm(ps, 13) == 100
set_matrixtype!(ps, Pardiso.REAL_SYM)
@test get_matrixtype(ps) == Pardiso.REAL_SYM
set_phase!(ps, Pardiso.ANALYSIS_NUM_FACT)
@test get_phase(ps) == Pardiso.ANALYSIS_NUM_FACT
set_msglvl!(ps, Pardiso.MESSAGE_LEVEL_ON)
@test get_msglvl(ps) == Pardiso.MESSAGE_LEVEL_ON
end
@testset "pardiso" begin
for pardiso_type in available_solvers
A = sparse(rand(2,2) + im * rand(2,2))
b = rand(2) + im * rand(2)
ps = pardiso_type()
set_matrixtype!(ps, Pardiso.COMPLEX_NONSYM)
x = Pardiso.solve(ps, A, b);
set_phase!(ps, Pardiso.RELEASE_ALL)
pardiso(ps)
end
end
end # testset
| Pardiso | https://github.com/JuliaSparse/Pardiso.jl.git |
|
[
"MIT"
] | 0.5.7 | 4b618484bf94a52f02595cd73ac8a6417f4c0c70 | code | 3728 | # emulating exmaple found on pardiso_unsym_schur.cpp found on https://pardiso-project.org
n = 16
ia = [0, 9, 17, 24, 31, 38, 46, 53, 61, 63, 65, 67, 69, 71, 73, 75, 77] .+ 1
ja = [0, 2, 5, 6, 8, 9, 10, 11, 12, 1, 2, 4, 8, 9, 10, 11, 12, 2, 7, 8, 9,
10, 11, 12, 3, 6, 8, 9, 10, 11, 12, 1, 4, 8, 9, 10, 11, 12, 2, 5, 7,
8, 9, 10, 11, 12, 1, 6, 8, 9, 10, 11, 12, 2, 6, 7, 8, 9, 10, 11, 12,
0, 8, 1, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15] .+ 1
a = [
complex(7.0000000000000000, 1.),
complex(1.0000000000000000, 1.),
complex(2.0000000000000000, 1.),
complex(7.0000000000000000, 1.),
complex(0.8813528146570788, 0.),
complex(0.9693082988664113, 0.),
complex(0.0063764102307857, 0.),
complex(0.0656724982252748, 0.),
complex(0.1285133482338328, 0.),
complex(-4.0000000000000000, 0.),
complex(8.0000000000000000, 1.),
complex(2.0000000000000000, 1.),
complex(0.0246814022448006, 0.),
complex(0.3913069588115609, 0.),
complex(0.9199013631348676, 0.),
complex(0.2300406237580992, 0.),
complex(0.6371356638478847, 0.),
complex(1.0000000000000000, 1.),
complex(5.0000000000000000, 1.),
complex(0.3412064600197927, 0.),
complex(0.3135851048672553, 0.),
complex(0.0179622413055945, 0.),
complex(0.1170403198125443, 0.),
complex(0.7464881699547887, 0.),
complex(7.0000000000000000, 0.),
complex(9.0000000000000000, 1.),
complex(0.4214056775317185, 0.),
complex(0.5533135799374451, 0.),
complex(0.0294164798770650, 0.),
complex(0.9897710614201980, 0.),
complex(0.8052672976354331, 0.),
complex(-4.0000000000000000, 1.),
complex(0.0000000000000000, 0.),
complex(0.0800111586678662, 0.),
complex(0.7920480624973907, 0.),
complex(0.7114519163682994, 0.),
complex(0.0705164910828937, 0.),
complex(0.2690271315860681, 0.),
complex(7.0000000000000000, 1.),
complex(3.0000000000000000, 1.),
complex(8.0000000000000000, 0.),
complex(0.0793538091733186, 0.),
complex(0.7983140830802098, 0.),
complex(0.5384141865401406, 0.),
complex(0.0600236559152354, 0.),
complex(0.4339797789833593, 0.),
complex(1.0000000000000000, 1.),
complex(11.0000000000000000, 1.),
complex(0.0723762491986621, 0.),
complex(0.8632585405599013, 0.),
complex(0.5245885805189759, 0.),
complex(0.6615771015291297, 0.),
complex( 0.4018064872641355, 0.),
complex(-3.0000000000000000, 1.),
complex( 2.0000000000000000, 1.),
complex( 5.0000000000000000, 0.),
complex( 0.9003410060810889, 0.),
complex( 0.7979622044473094, 0.),
complex( 0.5022399304161738, 0.),
complex( 0.3440577694777694, 0.),
complex(0.6081423394349520, 0.),
complex(1.0000000000000000, 0.),
complex(0.0000000000000000, 0.),
complex(1.0000000000000000, 0.),
complex(0.0000000000000000, 0.),
complex(1.0000000000000000, 0.),
complex(0.0000000000000000, 0.),
complex(1.0000000000000000, 0.),
complex(0.0000000000000000, 0.),
complex(1.0000000000000000, 0.),
complex(0.0000000000000000, 0.),
complex(1.0000000000000000, 0.),
complex(0.0000000000000000, 0.),
complex(1.0000000000000000, 0.),
complex(0.0000000000000000, 0.),
complex(1.0000000000000000, 0.),
complex(0.0000000000000000, 0.)
]
M = SparseMatrixCSC(n,n,ia,ja,a)
ps = PardisoSolver()
set_matrixtype!(ps,Pardiso.COMPLEX_NONSYM)
S = schur_complement(ps,M,8)
m = real(M)
set_matrixtype!(ps,Pardiso.REAL_NONSYM)
s = schur_complement(ps,m,8)
A = M[1:n-8,1:n-8]
A⁻¹ = inv(Matrix(A))
rA⁻¹ = inv(real(Matrix(A)))
B = M[1:n-8,n-7:end]
C = M[n-7:end,1:n-8]
D = M[n-7:end,n-7:end]
| Pardiso | https://github.com/JuliaSparse/Pardiso.jl.git |
|
[
"MIT"
] | 0.5.7 | 4b618484bf94a52f02595cd73ac8a6417f4c0c70 | docs | 15640 | # Pardiso.jl
[](https://github.com/JuliaSparse/Pardiso.jl/actions/workflows/CI.yml)
The Pardiso.jl package provides an interface for using [Panua Pardiso](https://panua.ch/pardiso), it's predecessors from
[pardiso-project.org](http://www.pardiso-project.org/), and [Intel MKL
PARDISO](https://www.intel.com/content/www/us/en/docs/onemkl/developer-reference-c/2024-0/onemkl-pardiso-parallel-direct-sparse-solver-iface.html) from the [Julia
language](http://julialang.org).
You cannot use `Pardiso.jl` without either having a valid license for Panua Pardiso or
having the MKL library installed. This
package is available free of charge and in no way replaces or alters any
functionality of the linked libraries.
## Installation
The package itself is installed with `Pkg.add("Pardiso")` but you also need to
follow the installation instructions below to install a working PARDISO
library.
### MKL PARDISO
By default, when adding "Pardiso.jl" to the active environmnent, Julia will automatically install a suitable MKL for your platform by loading `MKL_jll.jl`.
Note that if you use a mac you will need to pin `MKL_jll` to version 2023.
If you instead use a self installed MKL, follow these instructions:
* Set the `MKLROOT` environment variable. See the [MKL set environment variables
manual](https://www.intel.com/content/www/us/en/docs/onemkl/developer-guide-linux/2024-0/scripts-to-set-environment-variables.html)
for a thorough guide how to set this variable correctly, typically done by
executing something like `source /opt/intel/oneapi/setvars.sh intel64` or
running `"C:\Program Files (x86)\IntelSWTools\compilers_and_libraries\windows\mkl\bin\mklvars.bat" intel64`
* Run `Pkg.build("Pardiso", verbose=true)`
* Eventually, run `Pardiso.show_build_log()` to see the build log for additional information.
* Note that the `MKLROOT` environment variable must be set, and `LD_LIBRARY_PATH` must contain `$MKLROOT/lib` whenever using the library this way.
### PARDISO from [panua.ch](https://panua.ch) ("PanuaPardiso", formerly "ProjectPardiso")
* Unzip the download file `panua-pardiso-yyyymmdd-os.zip` to some folder and set the environment variable `JULIA_PARDISO` to the `lib` subdirectory of this folder. For example, create an entry `ENV["JULIA_PARDISO"] = "/Users/Someone/panua-pardiso-yyyymmdd-os/lib"` in `.julia/config/startup.jl`. If you have a valid license for the predecessor from pardiso-project.org, put the PARDISO library to a subdirectory denoted by `ENV["JULIA_PARDISO"]` and
evenutally rename it to `libpardiso.so`.
* Perform the platform specific steps described below
* Run `Pkg.build("Pardiso", verbose=true)`
* Eventually, run `Pardiso.show_build_log()` to see the build log for additional information.
Note: In the past, weird errors and problems with MKL Pardiso had been observed when PanuaPardiso is enabled
(likely because some library that is needed by PanauaPardiso was problematic with MKL).
In that case, if you want to use MKL Pardiso it is better to just disable PanuaPardiso by not setting
the environment variable `JULIA_PARDISO` (and rerunning `Pkg.build("Pardiso")`).
##### Linux / macOS specific
* Make sure that the version of `gfortran` corresponding to the pardiso library is installed.
* Make sure OpenMP is installed.
* Install a (fast) installation of a BLAS and LAPACK (this should preferably be single threaded since PARDISO handles threading itself), using for example [OpenBLAS](https://github.com/xianyi/OpenBLAS/wiki/Precompiled-installation-packages)
`gfortran` and OpenMP usually come with recent version of gcc/gfortran. On Linux, Panua Pardiso
looks for libraries `libgfortran.so` and `libgomp.so` . They may be named differently on your system.
In this situation you may try to create links to them with names known to
`Pardiso.jl` (bash; pathnames serve as examples here):
````
$ mkdir $HOME/extralibs
$ ln -s /usr/lib64/libgomp.so.1 $HOME/extralibs/libgomp.so
$ ln -s /usr/lib64/libgfortran.so.5 $HOME/extralibs/libgfortran.so
$ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$HOME/extralibs/
````
## Basic Usage
This section will explain how to solve equations using `Pardiso.jl` with the default settings of the library. For more advanced users there is a section further down.
## Creating the PardisoSolver
A `PardisoSolver` is created with `PardisoSolver()` for solving with PanuaPardiso or `MKLPardisoSolver()` for solving with MKL PARDISO. This object will hold the settings of the solver and will be passed into the solve functions. In the following sections an instance of a `PardisoSolver` or an `MKLPardisoSolver()` will be referred to as `ps`.
### Solving
Solving equations is done with the `solve` and `solve!` functions. They have the following signatures:
* `solve(ps, A, B)` solves `AX=B` and returns `X`
* `solve!(ps, X, A, B)` solves `AX=B` and stores it in `X`
The symbols `:T` or `:C` can be added as an extra argument to solve the transposed or the conjugate transposed system of equations, respectively.
Here is an example of solving a system of real equations with two right-hand sides:
```jl
ps = PardisoSolver()
A = sparse(rand(10, 10))
B = rand(10, 2)
X = zeros(10, 2)
solve!(ps, X, A, B)
```
which happened to give the result
```jl
julia> X
10x2 Array{Float64,2}:
-0.487361 -0.715372
-0.644219 -3.38342
0.465575 4.4838
1.14448 -0.103854
2.00892 -7.04965
0.870507 1.7014
0.590723 -5.74338
-0.843841 -0.903796
-0.279381 7.24754
-1.17295 8.47922
```
### Schur Complement (PanuaPardiso only)
Given a partitioned matrix `M = [A B; C D]`, the Schur complement of `A` in `M` is `S = D-CA⁻¹B`.
This can be found with the function `schur_complement` with the following signatures:
* `schur_complement(ps, M, n)` returns Schur complement of submatrix `A` in `M`, where `n` is the size of submatrix `D` (and therefore also of Schur complement)
* `schur_complement(ps, M, x)` returns Schur complement of submatrix `A` in `M`, where submatrix `D` is defined by nonzero rows of `SparseVector` or `SparseMatrix` `x`.
The symbols `:T` or `:C` can be added as an extra argument to solve the transposed or the conjugate transposed system of equations, respectively.
Here is an example of finding the Schur complement:
```jl
ps = PardisoSolver()
m = 100; n = 5; p = .5; T = Float64
rng = MersenneTwister(1234);
A = I + sprand(rng,T,m,m,p)
A⁻¹ = inv(Matrix(A))
B = sprand(rng,T,m,n,p)
C = sprand(rng,T,n,m,p)
D = sprand(rng,T,n,n,p)
M = [A B; C D]
S = schur_complement(ps,M,n)
```
which gives
```jl
julia> S
5×5 Array{Float64,2}:
-0.121404 1.49473 -1.25965 7.40326 0.571538
-19.4928 -7.71151 12.9496 -7.13646 -20.4194
9.88029 3.35502 -7.2346 1.70651 13.9759
-9.06094 -5.86454 7.44917 -2.54985 -9.17327
-33.7006 -17.8323 20.2588 -19.5863 -37.6132
```
We can check the validity by comparing to explicity form:
```jl
julia> norm(D - C*A⁻¹*B - S)
5.033075778861378e-13
```
At present there seems to be an instability in the Schur complement computation for complex matrices.
### Setting the number of threads
The number of threads to use is set in different ways for MKL PARDISO and PanuaPardiso.
#### MKL PARDISO
```jl
set_nprocs!(ps, i) # Sets the number of threads to use
get_nprocs(ps) # Gets the number of threads being used
```
#### PanuaPardiso
The number of threads are set at the creation of the `PardisoSolver` by looking for the environment variable `OMP_NUM_THREADS`. This can be done in Julia with for example `ENV["OMP_NUM_THREADS"] = 2`. **Note:** `OMP_NUM_THREADS` must be set *before* `Pardiso` is loaded and can not be changed during runtime.
The number of threads used by a `PardisoSolver` can be retrieved with `get_nprocs(ps)`
## More advanced usage.
This section discusses some more advanced usage of `Pardiso.jl`.
For terminology in this section please refer to the [PanuaPardiso manual](http://panua.ch/manual/manual.pdf) and the [oneMKL PARDISO manual](https://www.intel.com/content/www/us/en/docs/onemkl/developer-reference-c/2024-0/onemkl-pardiso-parallel-direct-sparse-solver-iface.html).
After using functionality in this section, calls should no longer be made to the `solve` functions but instead directly to the function
```jl
pardiso(ps, X, A, B)
```
This will ensure that the properties you set will not be overwritten.
If you want, you can use `get_matrix(ps, A, T)` to return a matrix that is suitable to use with `pardiso` depending on the matrix type that `ps` has set. The parameter `T` is a symbol representing if you will solve the normal, transposed or conjugated system. These are represented by `:N, :T, :C)` respectively.
For ease of use, `Pardiso.jl` provides enums for most options. These are not exported so has to either be explicitly imported or qualified with the module name first. It is possible to both use the enum as an input key to the options or the corresponding integer as given in the manuals.
### Setting the matrix type
The matrix type can be explicitly set with `set_matrixtype!(ps, key)` where the key has the following meaning:
| enum | integer | Matrix type |
|--------------------- |---------| ---------------------------------------- |
| REAL_SYM | 1 | real and structurally symmetric |
| REAL_SYM_POSDEF | 2 | real and symmetric positive definite |
| REAL_SYM_INDEF | -2 | real and symmetric indefinite |
| COMPLEX_STRUCT_SYM | 3 | complex and structurally symmetric |
| COMPLEX_HERM_POSDEF | 4 | complex and Hermitian positive definite |
| COMPLEX_HERM_INDEF | -4 | complex and Hermitian indefinite |
| COMPLEX_SYM | 6 | complex and symmetric |
| REAL_NONSYM | 11 | real and nonsymmetric |
| COMPLEX_NONSYM | 13 | complex and nonsymmetric |
The matrix type for a solver can be retrieved with `get_matrixtype(ps)`.
### Setting the solver (PanuaPardiso only)
PanuatPardiso supports direct and iterative solvers. The solver is set with `set_solver!(ps, key)` where the key has the following meaning:
| enum | integer | Solver |
|--------------------|---------|----------------------------------|
| DIRECT_SOLVER | 0 | sparse direct solver |
| ITERATIVE_SOLVER | 1 | multi-recursive iterative solver |
### Setting the phase
Depending on the phase calls to `solve` (and `pardiso` which is mentioned later) does different things. The phase is set with `set_phase!(ps, key)` where key has the meaning:
| enum | integer | Solver Execution Steps |
| --------------------------------------|---------|----------------------------------------------------------------|
| ANALYSIS | 11 | Analysis |
| ANALYSIS_NUM_FACT | 12 | Analysis, numerical factorization |
| ANALYSIS_NUM_FACT_SOLVE_REFINE | 13 | Analysis, numerical factorization, solve, iterative refinement |
| NUM_FACT | 22 | Numerical factorization |
| SELECTED_INVERSION | -22 | Selected Inversion |
| NUM_FACT_SOLVE_REFINE | 23 | Numerical factorization, solve, iterative refinement |
| SOLVE_ITERATIVE_REFINE | 33 | Solve, iterative refinement |
| SOLVE_ITERATIVE_REFINE_ONLY_FORWARD | 331 | MKL only, like phase=33, but only forward substitution |
| SOLVE_ITERATIVE_REFINE_ONLY_DIAG | 332 | MKL only, like phase=33, but only diagonal substitution (if available) |
| SOLVE_ITERATIVE_REFINE_ONLY_BACKWARD | 333 | MKL only, like phase=33, but only backward substitution
| RELEASE_LU_MNUM | 0 | Release internal memory for L and U matrix number MNUM |
| RELEASE_ALL | -1 | Release all internal memory for all matrices |
### Setting `IPARM` and `DPARM` explicitly
Advanced users likely want to explicitly set and retrieve the `IPARM` and `DPARM` (PanuaPardiso only) parameters.
This can be done with the getters and setters:
```jl
get_iparm(ps, i) # Gets IPARM[i]
get_iparms(ps) # Gets IPARM
set_iparm!(ps, i, v) # Sets IPARM[i] = v
# PanuaPardiso only
get_dparm(ps, i) # Gets DPARM[i]
get_dparms(ps) # Gets DPARM
set_dparm!(ps, i, v) # Sets DPARM[i] = v
```
To set the default values of the `IPARM` and `DPARM` call `pardisoinit(ps)`. The default values depend on what solver and matrix type is set.
### Setting message level
It is possible for Pardiso to print out timings and statistics when solving. This is done by `set_msglvl!(ps, key)` where `key` has the meaning:
| enum | integer | Solver |
|--------------------|---------|----------------------------------|
| MESSAGE_LEVEL_OFF | 0 | no statistics printed |
| MESSAGE_LEVEL_ON | 1 | statistics printed |
### Matrix and vector checkers
PanuaPardiso comes with a few matrix and vector checkers to check the consistency and integrity of the input data. These can be called with the functions:
```jl
printstats(ps, A, B)
checkmatrix(ps, A)
checkvec(ps, B)
```
In MKL PARDISO this is instead done by setting `IPARM[27]` to 1 before calling `pardiso`.
### MNUM, MAXFCT, PERM
These are set and retrieved with the functions
```jl
set_mnum!(ps, i)
get_mnum(ps)
set_maxfct!(ps, i)
get_maxfct(ps)
get_perm(ps)
set_perm!(ps, perm) # Perm is a Vector{Int}
```
### Schur Complement (PanuaPardiso only)
The `pardiso(ps,...)` syntax can be used to compute the Schur compelement (as described below). The answer can be retrieved with `pardisogetschur(ps)`.
To use the low-level API to compute the Schur complement:
* use custom IPARMS (`set_iparm!(ps,1,1)`), set the Schur complement block size to `n` (`set_iparm!(ps,38,n)`), and set the phase to analyze & factorize (`set_phase!(ps,12)`).
* compute the Schur complement by calling `pardiso(ps,X,M,X)`, where `B` is a dummy vector with `length(X)=size(M,1)` that shares element type with `M`.
* retrieve with `pardisogetschur(ps)`
### Potential "gotchas"
* Julia uses CSC sparse matrices while PARDISO expects a CSR matrix. These can be seen as transposes of each other so to solve `AX = B` the transpose flag (`IPARAM[12]`) should be set to 1.
* For **symmetric** matrices, PARDISO needs to have the diagonal stored in the sparse structure even if the diagonal element happens to be 0. The manual recommends adding an `eps` to the diagonal when you suspect you might have 0 values diagonal elements that are not stored in the sparse structure.
* Unless `IPARM[1] = 1`, all values in `IPARM` will be ignored and default values are used.
* When solving a symmetric matrix, Pardiso expects only the upper triangular part. Since Julia has CSC matrices this means you should pass in `tril(A)` to the `pardiso` function. Use `checkmatrix` to see that you managed to get the matrix in a valid format.
# Contributions
If you have suggestions or idea of improving this package, please file an issue or even better, create a PR!
| Pardiso | https://github.com/JuliaSparse/Pardiso.jl.git |
|
[
"MIT"
] | 0.5.7 | 4b618484bf94a52f02595cd73ac8a6417f4c0c70 | docs | 89 | This folder contains the examples for the PARDISO Matlab wrapper but rewritten in Julia.
| Pardiso | https://github.com/JuliaSparse/Pardiso.jl.git |
|
[
"MIT"
] | 0.2.2 | c81897832613bbcf7cc19639fd3a1a0a154abe1b | code | 799 | using GaussianMixtureAlignment
using Documenter
DocMeta.setdocmeta!(GaussianMixtureAlignment, :DocTestSetup, :(using GaussianMixtureAlignment); recursive=true)
makedocs(;
modules=[GaussianMixtureAlignment],
authors="Tom McGrath <[email protected]> and contributors",
repo="https://github.com/tmcgrath325/GaussianMixtureAlignment.jl/blob/{commit}{path}#{line}",
sitename="GaussianMixtureAlignment.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://tmcgrath325.github.io/GaussianMixtureAlignment.jl",
edit_link="master",
assets=String[],
),
pages=[
"Home" => "index.md",
],
)
deploydocs(;
repo="github.com/tmcgrath325/GaussianMixtureAlignment.jl",
devbranch="master",
)
| GaussianMixtureAlignment | https://github.com/tmcgrath325/GaussianMixtureAlignment.jl.git |
|
[
"MIT"
] | 0.2.2 | c81897832613bbcf7cc19639fd3a1a0a154abe1b | code | 242 | module GaussianMixtureAlignmentMakieExt
using GaussianMixtureAlignment
using Makie
# Needed to get legends working, see https://github.com/MakieOrg/Makie.jl/issues/1148
Makie.get_plots(p::GaussianMixtureAlignment.GMMDisplay) = p.plots
end
| GaussianMixtureAlignment | https://github.com/tmcgrath325/GaussianMixtureAlignment.jl.git |
|
[
"MIT"
] | 0.2.2 | c81897832613bbcf7cc19639fd3a1a0a154abe1b | code | 1966 | """
GaussianMixtureAlignment.jl
===========================
GaussianMixtureAlignment.jl is a package used to align Gaussian mixture models. In particular, it uses an implementation
of the [GOGMA algorithm (Campbell, 2016)](https://arxiv.org/abs/1603.00150) to find globally optimal alignments of mixtures of
isotropic (spherical) Gaussian distributions.
REPL help
=========
? followed by an algorithm or constructor name will print help to the terminal. See: \n
\t?IsotropicGaussian \n
\t?IsotropicGMM \n
\t?IsotropicMultiGMM \n
\t?gogma_align \n
\t?tiv_gogma_align \n
\t?rocs_align \n
"""
module GaussianMixtureAlignment
abstract type AbstractModel{N,T} end
using StaticArrays
using LinearAlgebra
using GenericLinearAlgebra
using PairedLinkedLists
using MutableConvexHulls
using Rotations
using CoordinateTransformations
using Distances
using NearestNeighbors
using Hungarian
using Optim
using MakieCore
using GeometryBasics
using Colors
export AbstractGaussian, AbstractGMM
export IsotropicGaussian, IsotropicGMM, IsotropicMultiGMM
export overlap, force!, gogma_align, rot_gogma_align, trl_gogma_align, tiv_gogma_align
export rocs_align
export PointSet, MultiPointSet
export kabsch, icp, iterative_hungarian, goicp_align, goih_align, tiv_goicp_align, tiv_goih_align
export gmmdisplay
include("tforms.jl")
include("goicp/pointset.jl")
include("gogma/gmm.jl")
include("utils.jl")
include("uncertaintyregion.jl")
include("distancebounds.jl")
include("gogma/combine.jl")
include("gogma/transformation.jl")
include("gogma/overlap.jl")
include("gogma/bounds.jl")
include("goicp/bounds.jl")
include("goicp/correspondence.jl")
include("goicp/kabsch.jl")
include("goicp/icp.jl")
include("goicp/rmsd.jl")
include("goicp/local.jl")
include("localalign.jl")
include("branchbound.jl")
include("gogma/tiv.jl")
include("gogma/align.jl")
include("goicp/tiv.jl")
include("goicp/align.jl")
include("rocs/rocsalign.jl")
include("draw.jl")
end
| GaussianMixtureAlignment | https://github.com/tmcgrath325/GaussianMixtureAlignment.jl.git |
|
[
"MIT"
] | 0.2.2 | c81897832613bbcf7cc19639fd3a1a0a154abe1b | code | 14516 | abstract type AlignmentResults end
struct GlobalAlignmentResult{D,S,T,N,F<:AbstractAffineMap,X<:AbstractModel{D,S},Y<:AbstractModel{D,T}} <: AlignmentResults
x::X
y::Y
upperbound::T
lowerbound::T
tform::F
tform_params::NTuple{N,T}
obj_calls::Int
num_splits::Int
num_blocks::Int
stagnant_splits::Int
progress::Vector{Tuple{Int,T,NTuple{N,T}}}
terminated_by::String
end
struct TIVAlignmentResult{D,S,T,N,F<:AbstractAffineMap,X<:AbstractModel{D,S},Y<:AbstractModel{D,T},TD,TN,TF<:AbstractAffineMap,RD,RN,RF<:AbstractAffineMap,RX<:AbstractModel{TD,S},RY<:AbstractModel{TD,T}} <: AlignmentResults
x::X
y::Y
upperbound::T
lowerbound::T
tform::F
tform_params::NTuple{N,T}
obj_calls::Int
num_splits::Int
num_blocks::Int
rotation_result::GlobalAlignmentResult{RD,S,T,RN,RF,RX,RY}
translation_result::GlobalAlignmentResult{TD,S,T,TN,TF,X,Y}
end
function lowestlbblock(hull::ChanLowerConvexHull{<:Tuple{T,T,<:SearchRegion}}, lb::T) where T
lbnode = lowestlbnode(hull)
(boxlb, boxub, bl) = lbnode.data
lb = boxlb
return lbnode, bl, lb
end
function randomblock(hull::ChanLowerConvexHull{<:Tuple{T,T,<:SearchRegion}}, lb::T) where T
randidx = rand(1:length(hull))
lbnode = getnode(hull.hull, randidx)
(boxlb, boxub, bl) = lbnode.data
if boxlb == lb && !isempty(hull)
lb = lowestlbnode(hull).data[1]
end
return lbnode, bl, lb
end
function lowestlbnode(hull::ChanLowerConvexHull)
node = PairedLinkedLists.head(hull.hull)
for n in ListNodeIterator(hull.hull)
n.data[1] != node.data[1] && break
node = n
end
return node
end
# Keyword arguments:\n
# nsplits - an integer representing the number of splits that should be made along each dimension during branching
# searchspace - an `UncertaintyRegion` that defines the searchspace, which defaults to the smallest space gauranteed to contain the global minimum
# R - a `Rotationvec` containing a rotation position, which is passed to the `blockfun`
# T - an `SVector{3}` containing a translation position, which is passed to the `blockfun`
# centerinputs - a `Bool` indicating whether to center the input models (at their respective centroids) prior to starting the search
# blockfun - the function used for generating `SearchRegion`s that define search subspaces (i.e. UncertaintyRegion, TranslationRegion, RotationRegion)
# nextblockfun - the function used for selecting the next "block" to be investigated (i.e. randomblock, lowestlbblock)
# localfun - the function used for local alignment
# boundsfun - the function used to calculate the bounds on each `SearchRegion`
# tformfun - the function used to convert the center of a `SearchRegion` to a rigid transformation (i.e. AffinMap, LinearMap, Translation)
# atol - absolute tolerance. Search terminates when the upper bound is within `atol` of the lower bound
# rtol - relative tolerance. Search terminates when the upper bound is within `rtol*lb` of the lower bound `lb`
# maxblocks - the maximum number of `Block`s that can be held in the priority queue before search termination
# maxsplits - the maximum number of `Block` splits that are allowed before search termination
# maxevals - the maximum number of objective function evaluations allowed before search termination
# maxstagnant - the maximum number of `Block` splits allowed without improvement before search termination
"""
result = branchbound(x, y; nsplits=2, searchspace=nothing,
rot=nothing, trl=nothing, blockfun=fullBlock, objfun=alignment_objective,
rtol=0.01, maxblocks=5e8, maxeva ls=Inf, maxstagnant=Inf, threads=false)
Finds the globally optimal rigid transform for alignment between two isotropic Gaussian mixtures, `x`
and `y`, using the [GOGMA algorithm](https://arxiv.org/abs/1603.00150).
Returns a `GlobalAlignmentResult` that contains the maximized overlap of the two GMMs (the upperbound on the objective function),
a lower bound on the alignment objective function, an `AffineMap` which aligns `x` with `y`, and information about the
number of evaluations during the alignment procedure.
"""
function branchbound(xinput::AbstractModel, yinput::AbstractModel;
nsplits=2, searchspace=nothing, blockfun=UncertaintyRegion, R=RotationVec(0.,0.,0.), T=SVector{3}(0.,0.,0.),
nextblockfun=lowestlbblock, centerinputs=false, boundsfun=tight_distance_bounds, localfun=local_align, tformfun=AffineMap,
atol=0.1, rtol=0, maxblocks=5e8, maxsplits=Inf, maxevals=Inf, maxstagnant=Inf, separatesplit=false)
x = xinput
y = yinput
if isodd(nsplits)
throw(ArgumentError("`nsplits` must be even"))
end
if dims(x) != dims(y)
throw(ArgumentError("Dimensionality of the GMMs must be equal"))
end
t = promote_type(numbertype(x), numbertype(y))
centerx_tform = Translation([0,0,0])
centery_tform = Translation([0,0,0])
if centerinputs
centerx_tform = center_translation(x)
centery_tform = center_translation(y)
x = centerx_tform(x)
y = centery_tform(y)
end
# initialization
if isnothing(searchspace)
searchspace = blockfun(x, y, R, T)
end
ndims = length(center(searchspace))
rot_trl_split = separatesplit && typeof(searchspace) <: UncertaintyRegion
nsblks = rot_trl_split ? nsplits^3 : nsplits^ndims
sblks = fill(searchspace, nsblks)
sblks2 = fill(searchspace, rot_trl_split ? nsblks : 0)
lb, centerub = boundsfun(x, y, searchspace)
hull = ChanLowerConvexHull{Tuple{t,t,typeof(searchspace)}}(CCW, true, x -> (x[1], -x[2]))
addpoint!(hull, (lb, centerub, searchspace))
sbnds = fill((lb, centerub), nsblks)
sbnds2 = fill((lb, centerub), rot_trl_split ? nsblks : 0)
ub, bestloc = localfun(x, y, searchspace)
progress = [(0, ub, bestloc)]
# split cubes until convergence
ndivisions = 0
sinceimprove = 0
evalsperdiv = rot_trl_split ? length(x)*length(y)*2*nsplits^3 : length(x)*length(y)*nsplits^ndims
while !isempty(hull)
if (length(hull) > maxblocks) || (ndivisions*evalsperdiv > maxevals) || (sinceimprove > maxstagnant) || (ndivisions > maxsplits)
break
end
ndivisions += 1
sinceimprove += 1
# pick the next search region to subdivide
lbnode, bl, lb = nextblockfun(hull, lb)
# delete the chosen search region from the convex hull
subhull = getfirst(x -> x.points===lbnode.target.list, hull.subhulls)
removepoint!(subhull, lbnode.target)
deletenode!(lbnode)
# if the best solution so far is close enough to the best possible solution, end
if abs((ub - lb)/lb) < rtol || abs(ub-lb) < atol
tform = tformfun(bestloc)
if centerinputs
tform = centerx_tform ∘ tform ∘ inv(centery_tform)
end
return GlobalAlignmentResult(x, y, ub, lb, tform, bestloc, ndivisions*evalsperdiv, ndivisions, length(hull), sinceimprove, progress, "optimum within tolerance")
end
# split up the block into `nsplits` smaller blocks across each dimension
if rot_trl_split # split rotation and translation separately
rot_subregions!(sblks, bl, nsplits)
for i=1:nsblks
sbnds[i] = boundsfun(x,y,sblks[i])
end
trl_subregions!(sblks2, bl, nsplits)
for i=1:nsblks
sbnds2[i] = boundsfun(x,y,sblks2[i])
end
if sum(x -> x[1], sbnds) < sum(x -> x[1], sbnds2) # pick whichever maximizes summed lower bounds
for i=1:nsblks
sblks[i] = sblks2[i]
sbnds[i] = sbnds2[i]
end
end
else # split rotation and translation simultaneously
subregions!(sblks, bl, nsplits)
for i=1:nsblks
sbnds[i] = boundsfun(x,y,sblks[i])
end
end
# reset the upper bound if appropriate
minub, ubidx = findmin([sbnd[2] for sbnd in sbnds])
if minub < centerub
centerub = minub
nextub, nextbestloc = localfun(x, y, sblks[ubidx])
if minub < nextub
if minub < ub
ub, bestloc = minub, center(sblks[ubidx])
end
else
if nextub < ub
ub, bestloc = nextub, nextbestloc
end
end
push!(progress, (ndivisions, ub, bestloc))
sinceimprove = 0
end
addblks = eltype(hull)[]
addbnds = eltype(sbnds)[]
for i=1:length(sblks)
diff = abs(sbnds[i][2] - sbnds[i][1])
if sbnds[i][1] < ub && diff >= atol && abs(diff/sbnds[i][1]) >= rtol
push!(addblks, (sbnds[i][1], sbnds[i][2], sblks[i]))
push!(addbnds, sbnds[i])
end
end
if isempty(hull)
if !isempty(addbnds)
lb = minimum(addbnds)[1]
end
end
try mergepoints!(hull, addblks)
catch e
@show lbnode.data[1], lbnode.data[2]
@show ndivisions
@show sbnds
@show sbnds == sbnds2
throw(e)
end
end
if isempty(hull)
tform = tformfun(bestloc)
if centerinputs
tform = centerx_tform ∘ tform ∘ inv(centery_tform)
end
return GlobalAlignmentResult(x, y, ub, lb, tformfun(bestloc), bestloc, ndivisions*evalsperdiv, ndivisions, length(hull), sinceimprove, progress, "priority queue empty")
else
tform = tformfun(bestloc)
if centerinputs
tform = centerx_tform ∘ tform ∘ inv(centery_tform)
end
return GlobalAlignmentResult(x, y, ub, lowestlbnode(hull).data[1], tformfun(bestloc), bestloc, ndivisions*evalsperdiv, ndivisions, length(hull), sinceimprove, progress, "terminated early")
end
end
"""
result = rot_gogma_align(x, y; kwargs...)
Finds the globally optimal rotation for alignment between two isotropic Gaussian mixtures, `x`
and `y`, using the [GOGMA algorithm](https://arxiv.org/abs/1603.00150), for a given rotation, `rot`.
That is, only rigid translation is allowed.
For details about keyword arguments, see `gogma_align()`.
"""
function rot_branchbound(x::AbstractModel, y::AbstractModel; kwargs...)
return branchbound(x, y; blockfun=RotationRegion, tformfun=LinearMap, kwargs...)
end
"""
result = trl_gogma_align(x, y; kwargs...)
Finds the globally optimal translation for alignment between two isotropic Gaussian mixtures, `x`
and `y`, using the [GOGMA algorithm](https://arxiv.org/abs/1603.00150), for a given translation, `trl`.
That is, only rigid rotation is allowed.
For details about keyword arguments, see `gogma_align()`.
"""
function trl_branchbound(x::AbstractModel, y::AbstractModel; kwargs...)
return branchbound(x, y; blockfun=TranslationRegion, tformfun=Translation, kwargs...)
end
# fit a plane to a set of points, returning the normal vector
function planefit(pts)
decomp = GenericLinearAlgebra.svd(pts .- sum(pts, dims=2))
dist, nvecidx = findmin(decomp.S)
return decomp.U[:, nvecidx], dist
end
planefit(ps::AbstractSinglePointSet) = planefit(ps.coords)
planefit(ps::AbstractSinglePointSet, R) = planefit(R*ps.coords)
function planefit(gmm::AbstractIsotropicGMM, R)
ptsmat = fill(zero(numbertype(gmm)), 3, length(gmm))
for (i,gauss) in enumerate(gmm.gaussians)
ptsmat[:,i] = gauss.μ
end
return planefit(R * ptsmat)
end
function planefit(mgmm::AbstractIsotropicMultiGMM, R)
len = sum([length(gmm) for gmm in values(mgmm.gmms)])
ptsmat = fill(zero(numbertype(mgmm)), 3, len)
idx = 1
for gmm in values(mgmm.gmms)
for gauss in gmm.gaussians
ptsmat[:,idx] = gauss.μ
idx += 1
end
end
return planefit(R * ptsmat)
end
function tiv_branchbound( x::AbstractModel,
y::AbstractModel,
tivx::AbstractModel,
tivy::AbstractModel;
boundsfun=tight_distance_bounds,
rot_boundsfun=boundsfun,
trl_boundsfun=boundsfun,
localfun=local_align,
rot_localfun=localfun,
trl_localfun=localfun,
kwargs...)
t = promote_type(numbertype(x),numbertype(y))
p = t(π)
z = zero(t)
zeroTranslation = SVector{3}(z,z,z)
rot_res = rot_branchbound(tivx, tivy; localfun=rot_localfun, boundsfun=rot_boundsfun, kwargs...)
rotblock = RotationRegion(RotationVec(rot_res.tform_params...), zeroTranslation, p)
rotscore, rotpos = rot_localfun(tivx, tivy, rotblock)
# spin the moving tivgmm around to check for a better rotation (helps when the Gaussians are largely coplanar)
R = RotationVec(rot_res.tform_params...)
spinvec, dist = planefit(tivx, R)
spinblock = RotationRegion(RotationVec(RotationVec(π*spinvec...) * R), zeroTranslation, z)
spinscore, spinrotpos = rot_localfun(tivx, tivy, spinblock)
if spinscore < rotscore
rotpos = RotationVec(spinrotpos...)
else
rotpos = RotationVec(rotpos...)
end
# perform translation alignment of original models
trl_res = trl_branchbound(x, y; R=rotpos, localfun=trl_localfun, boundsfun=trl_boundsfun, kwargs...)
trlpos = SVector{3}(trl_res.tform_params)
# perform local alignment in the full transformation space
trlim = translation_limit(x, y)
localblock = UncertaintyRegion(rotpos, trlpos, 2*p, trlim)
min, bestpos = localfun(x, y, localblock)
if trl_res.upperbound < min
min = trl_res.upperbound
bestpos = (rot_res.tform_params..., trl_res.tform_params...)
end
return TIVAlignmentResult(x, y, min, trl_res.lowerbound, AffineMap(bestpos), bestpos,
rot_res.obj_calls+trl_res.obj_calls, rot_res.num_splits+trl_res.num_splits,
rot_res.num_blocks+trl_res.num_blocks,
rot_res, trl_res)
end
| GaussianMixtureAlignment | https://github.com/tmcgrath325/GaussianMixtureAlignment.jl.git |
|
[
"MIT"
] | 0.2.2 | c81897832613bbcf7cc19639fd3a1a0a154abe1b | code | 3846 | const sqrt3 = √(3)
const sqrt2pi = √(2π)
const pisq = Float64(π^2)
function infbounds(x,y)
typeinf = typemax(promote_type(numbertype(x), numbertype(y)))
return (typeinf, typeinf)
end
function loose_distance_bounds(x::SVector{3,<:Number}, y::SVector{3,<:Number}, σᵣ::Number, σₜ::Number, maximize::Bool = false)
ubdist = norm(x - y)
γₜ = sqrt3 * σₜ
γᵣ = 2 * sin(min(sqrt3 * σᵣ, π) / 2) * norm(x)
lb, ub = maximize ? (max(ubdist - γₜ - γᵣ, 0), ubdist) : (ubddist + γₜ + γᵣ, ubdist)
numtype = promote_type(typeof(lb), typeof(ub))
return numtype(lb), numtype(ub)
end
loose_distance_bounds(x::SVector{3}, y::SVector{3}, R::RotationVec, T::SVector{3}, σᵣ, σₜ, maximize::Bool = false,
) = (R.sx^2 + R.sy^2 + R.sz^2) > pisq ? infbounds(x,y) : loose_distance_bounds(R*x, y-T, σᵣ, σₜ, maximize) # loose_distance_bounds(R*x, y-T, σᵣ, σₜ)
loose_distance_bounds(x::SVector{3}, y::SVector{3}, block::UncertaintyRegion, maximize::Bool = false) = loose_distance_bounds(x, y, block.R, block.T, block.σᵣ, block.σₜ, maximize)
loose_distance_bounds(x::SVector{3}, y::SVector{3}, block::SearchRegion, maximize::Bool = false) = loose_distance_bounds(x, y, UncertaintyRegion(block), maximize)
"""
lb, ub = tight_distance_bounds(x::SVector{3,<:Number}, y::SVector{3,<:Number}, σᵣ::Number, σₜ::Number)
lb, ub = tight_distance_bounds(x::SVector{3,<:Number}, y::SVector{3,<:Number}, R::RotationVec, T<:SVector{3}, σᵣ::Number, σₜ::Number)
Within an uncertainty region, find the bounds on distance between two points x and y.
See [Campbell & Peterson, 2016](https://arxiv.org/abs/1603.00150)
"""
function tight_distance_bounds(x::SVector{3,<:Number}, y::SVector{3,<:Number}, σᵣ::Number, σₜ::Number, maximize::Bool = false)
# prepare positions and angles
xnorm, ynorm = norm(x), norm(y)
if xnorm*ynorm == 0
cosα = one(promote_type(eltype(x),eltype(y)))
else
cosα = dot(x, y)/(xnorm*ynorm)
end
cosβ = cos(min(sqrt3*σᵣ, π))
# upper bound distance at hypercube center
ubdist = norm(x - y)
if maximize
# this case is intended for situations where the objective function scales negatively with distance\
# lbdist, which will be the further point on the spherical cap, will be larger than ubdist
if cosα + cosβ >= π
lbdist = xnorm + ynorm + sqrt3*σₜ
else
lbdist = √(xnorm^2 + ynorm^2 - 2*xnorm*ynorm*(cosα*cosβ-√((1-cosα^2)*(1-cosβ^2)))) + sqrt3*σₜ
end
else
# lower bound distance from the nearest point on the "spherical cap"
if cosα >= cosβ
lbdist = max(abs(xnorm-ynorm) - sqrt3*σₜ, 0)
else
lbdist = try max(√(xnorm^2 + ynorm^2 - 2*xnorm*ynorm*(cosα*cosβ+√((1-cosα^2)*(1-cosβ^2)))) - sqrt3*σₜ, 0) # law of cosines
catch e # when the argument for the square root is negative (within machine precision of 0, usually)
0
end
end
end
# evaluate objective function at each distance to get upper and lower bounds
numtype = promote_type(typeof(lbdist), typeof(ubdist))
return (numtype(lbdist), numtype(ubdist))
end
tight_distance_bounds(x::SVector{3,<:Number}, y::SVector{3,<:Number}, R::RotationVec, T::SVector{3}, σᵣ::Number, σₜ::Number, maximize::Bool = false,
) = (R.sx^2 + R.sy^2 + R.sz^2) > pisq ? infbounds(x,y) : tight_distance_bounds(R*x, y-T, σᵣ, σₜ, maximize) # tight_distance_bounds(R*x, y-T, σᵣ, σₜ)
tight_distance_bounds(x::SVector{3}, y::SVector{3}, block::UncertaintyRegion, maximize::Bool = false) = tight_distance_bounds(x, y, block.R, block.T, block.σᵣ, block.σₜ, maximize)
tight_distance_bounds(x::SVector{3}, y::SVector{3}, block::Union{RotationRegion, TranslationRegion}, maximize::Bool = false) = tight_distance_bounds(x, y, UncertaintyRegion(block), maximize)
| GaussianMixtureAlignment | https://github.com/tmcgrath325/GaussianMixtureAlignment.jl.git |
|
[
"MIT"
] | 0.2.2 | c81897832613bbcf7cc19639fd3a1a0a154abe1b | code | 3656 | import MakieCore: plot!
using MakieCore: @recipe, lines!, mesh!, Theme
using GeometryBasics: Sphere
using Colors: RGB
const HALFWAY_RADIUS = sqrt(3) / 2
const EQUAL_VOL_CONST = 3*√π/4
const DEFAULT_COLORS = [ # CUD colors: https://jfly.uni-koeln.de/color/#assign
RGB(0/255, 114/255, 178/255), # blue
RGB(230/255, 159/255, 0/255 ), # orange
RGB(0/255, 158/255, 115/255), # green
RGB(204/255, 121/255, 167/255), # reddish purple
RGB(86/255, 180/255, 233/255), # sky blue
RGB(213/255, 94/255, 0/255 ), # vermillion
RGB(240/255, 228/255, 66/255 ), # yellow
]
# use cached calculations for positions of points on a circle
θs = range(0, 2π, length=32) # use 32 points (arbitrary)
const cosθs = [cos(θ) for θ in θs]
const sinθs = [sin(θ) for θ in θs]
equal_volume_radius(σ, ϕ) = (EQUAL_VOL_CONST*abs(ϕ))^(1/3) * σ
function flat_circle!(f, pos, r, dim::Int; kwargs...)
if dim == 3
xs = [r * cosθ + pos[1] for cosθ in cosθs]
ys = [r * sinθ + pos[2] for sinθ in sinθs]
zs = fill(pos[3], 32)
elseif dim == 2
xs = [r * cosθ + pos[1] for cosθ in cosθs]
ys = fill(pos[2], 32)
zs = [r * sinθ + pos[3] for sinθ in sinθs]
elseif dim == 1
xs = fill(pos[1], 32)
ys = [r * cosθ + pos[2] for cosθ in cosθs]
zs = [r * sinθ + pos[3] for sinθ in sinθs]
end
lines!(f, xs,ys,zs; kwargs...)
end
function wire_sphere!(f, pos, r; kwargs...)
for dim in 1:3
flat_circle!(f, pos, r, dim; kwargs...)
halfwaypos = Float32[0,0,0]
halfwaypos[dim] = r / 2;
flat_circle!(f, pos .- halfwaypos, r * HALFWAY_RADIUS, dim; kwargs...)
flat_circle!(f, pos .+ halfwaypos, r * HALFWAY_RADIUS, dim; kwargs...)
end
end
function solid_sphere!(f, pos, r; kwargs...)
mesh!(f, Sphere(GeometryBasics.Point{3}(pos...), r); kwargs...)
end
@recipe(GaussianDisplay, g) do scene
Theme(
display = :wire,
color = DEFAULT_COLORS[1],
)
end
function plot!(gd::GaussianDisplay{<:NTuple{<:Any, <:AbstractIsotropicGaussian}})
gauss = [gd[i][] for i=1:length(gd)]
disp = gd[:display][]
color = gd[:color][]
label = gd[:label][]
plotfun = disp == :wire ? wire_sphere! : ( disp == :solid ? solid_sphere! : throw(ArgumentError("Unrecognized display option: `$disp`")))
for g in gauss
plotfun(gd, g.μ, g.σ; color=color, label)
end
return gd
end
@recipe(GMMDisplay, g) do scene
Theme(
display = :wire,
palette = DEFAULT_COLORS,
color = nothing,
label = "",
)
end
function plot!(gd::GMMDisplay{<:NTuple{<:Any,<:AbstractIsotropicGMM}})
gmms = [gd[i][] for i=1:length(gd)]
len = length(gmms)
disp = gd[:display][]
color = gd[:color][]
palette = gd[:palette][]
label = gd[:label][]
for (i,gmm) in enumerate(gmms)
col = isnothing(color) ? palette[(i-1) % len + 1] : color
gaussiandisplay!(gd, gmm...; display=disp, color=col, label)
end
return gd
end
function plot!(gd::GMMDisplay{<:NTuple{<:Any,<:AbstractIsotropicMultiGMM{N,T,K}}}) where {N,T,K}
mgmms = [gd[i][] for i=1:length(gd)]
disp = gd[:display][]
color = gd[:color][]
palette = gd[:palette][]
allkeys = Set{K}()
for mgmm in mgmms
allkeys = allkeys ∪ keys(mgmm)
end
len = length(allkeys)
for (i,k) in enumerate(allkeys)
col = isnothing(color) ? palette[(i-1) % len + 1] : color
for mgmm in mgmms
haskey(mgmm, k) && gmmdisplay!(gd, mgmm[k]; display=disp, color=col, palette=palette, label=string(k))
end
end
return gd
end
| GaussianMixtureAlignment | https://github.com/tmcgrath325/GaussianMixtureAlignment.jl.git |
|
[
"MIT"
] | 0.2.2 | c81897832613bbcf7cc19639fd3a1a0a154abe1b | code | 737 | """
com = centroid(gmm)
Returns the center of mass of `gmm`, where its first order moments are equal to 0.
"""
function centroid(positions::AbstractMatrix{<:Real}, weights::AbstractVector{<:Real}=ones(eltype(positions),size(positions,2)))
normweights = weights / sum(weights)
return SVector{size(positions,1)}(positions * normweights)
end
centroid(gaussians::AbstractVector{<:AbstractIsotropicGaussian}) =
return centroid(hcat([g.μ for g in gaussians]...), [g.ϕ for g in gaussians])
centroid(x::AbstractPointSet) = centroid(x.coords, x.weights)
centroid(gmm::AbstractIsotropicGMM) = centroid(gmm.gaussians)
centroid(mgmm::AbstractMultiGMM) = centroid(collect(Iterators.flatten([gmm.gaussians for (k,gmm) in mgmm])))
| GaussianMixtureAlignment | https://github.com/tmcgrath325/GaussianMixtureAlignment.jl.git |
|
[
"MIT"
] | 0.2.2 | c81897832613bbcf7cc19639fd3a1a0a154abe1b | code | 2981 | tformwithparams(X,x) = RotationVec(X[1:3]...)*x + SVector{3}(X[4:6]...)
# function tformwithparams(X,x)
# if sum(abs2, X[1:3]) == 0 # handled for autodiff around 0
# T = eltype(X)
# θ = norm(X[1:3])
# a = θ > 0 ? X[1] / θ : one(T)
# b = θ > 0 ? X[2] / θ : zero(T)
# c = θ > 0 ? X[3] / θ : zero(T)
# R = AngleAxis(θ, a, b, c)
# @show R
# else
# R = RotationVec(X[1:3]...)
# end
# t = SVector{3}(X[4:6]...)
# @show (R*x)[1]
# return R*x + t
# end
overlapobj(X,x,y,args...) = -overlap(tformwithparams(X,x), y, args...)
function distanceobj(X, x, y; correspondence = hungarian_assignment)
tformedx = tformwithparams(X,x)
return squared_deviation(tformedx, y, correspondence(tformedx,y))
end
function alignment_objective(X, x::AbstractModel, y::AbstractModel, args...; objfun=overlapobj)
return objfun(X,x,y,args...)
end
# alignment objective for a rigid transformation
alignment_objective(X, x::AbstractModel, y::AbstractModel, block::UncertaintyRegion, args...; kwargs...) = alignment_objective(X,x,y,args...; kwargs...)
# alignment objective for rigid rotation (i.e. the first stage of TIV-GOGMA)
function alignment_objective(X, gmmx::AbstractModel, gmmy::AbstractModel, block::RotationRegion, args...; kwargs...)
return alignment_objective((X..., block.T...), gmmx, gmmy, args...; kwargs...)
end
# alignment objective for translation (i.e. the second stage of TIV-GOGMA)
function alignment_objective(X, gmmx::AbstractModel, gmmy::AbstractModel, block::TranslationRegion, args...; kwargs...)
return alignment_objective((block.R.sx, block.R.sy, block.R.sz, X...), gmmx, gmmy, args...; kwargs...)
end
"""
obj, pos = local_align(x, y, block, pσ=nothing, pϕ=nothing; R=nothing, T=nothing, maxevals=100)
Performs local alignment within the specified `block` using L-BFGS to minimize objective function `objfun` for the provided GMMs, `x` and `y`.
"""
function local_align(x::AbstractModel, y::AbstractModel, block::SearchRegion, args...;
maxevals=100, kwargs...)
# set initial guess at the center of the block
initial_X = center(block)
# if (typeof(block) <: UncertaintyRegion && sum(abs2, initial_X[1:Int(end/2)]) == 0) || (typeof(block) <: RotationRegion && sum(abs2, initial_X) == 0)
# T = eltype(initial_X)
# initial_X = initial_X .+ [eps(T), zeros(T, length(initial_X)-1)...]
# end
# local optimization within the block
f(X) = alignment_objective(X, x, y, block, args...; kwargs...)
# res = optimize(f, lower, upper, initial_X, Fminbox(LBFGS()), Optim.Options(f_calls_limit=maxevals); autodiff = :forward)
res = optimize(f, [initial_X...], LBFGS(), Optim.Options(f_calls_limit=maxevals); autodiff = :forward)
return Optim.minimum(res), tuple(Optim.minimizer(res)...)
end
local_align(x::AbstractModel, y::AbstractModel; kwargs...) = local_align(x, y, UncertaintyRegion(x,y); kwargs...) | GaussianMixtureAlignment | https://github.com/tmcgrath325/GaussianMixtureAlignment.jl.git |
|
[
"MIT"
] | 0.2.2 | c81897832613bbcf7cc19639fd3a1a0a154abe1b | code | 520 | import CoordinateTransformations.AffineMap
import CoordinateTransformations.LinearMap
import CoordinateTransformations.Translation
AffineMap(params::NTuple{6}) = AffineMap(RotationVec(params[1:3]...), SVector{3}(params[4:6]...))
LinearMap(params::NTuple{3}) = LinearMap(RotationVec(params...))
Translation(params::NTuple{3}) = Translation(SVector{3}(params))
function affinemap_to_params(tform::AffineMap)
R = RotationVec(tform.linear)
T = SVector{3}(tform.translation)
return (R.sx, R.sy, R.sz, T...)
end
| GaussianMixtureAlignment | https://github.com/tmcgrath325/GaussianMixtureAlignment.jl.git |
|
[
"MIT"
] | 0.2.2 | c81897832613bbcf7cc19639fd3a1a0a154abe1b | code | 8923 | ## splitting up searchcubes
function cuberanges(center::NTuple{N,T}, widths) where {N,T}
return NTuple{N,Tuple{T,T}}([(center[i]-widths[i], center[i]+widths[i]) for i=1:length(center)])
end
cuberanges(R::RotationVec, T::SVector{3}, σᵣ, σₜ) = cuberanges((R.sx,R.sy,R.sz,T[1],T[2],T[3]), (σᵣ,σᵣ,σᵣ,σₜ,σₜ,σₜ))
cuberanges(R::RotationVec, σᵣ::Number) = cuberanges((R.sx,R.sy,R.sz), (σᵣ,σᵣ,σᵣ))
cuberanges(T::SVector{3}, σₜ::Number) = cuberanges((T[1],T[2],T[3]), (σₜ,σₜ,σₜ))
"""
sbrngs = subranges(ranges, nsplits)
Takes `ranges`, a nested tuple describing intervals for each dimension in rigid-transformation space
defining a hypercube, and splits the hypercube into `nsplits` even components along each dimension.
If the cube is N-dimensional, the number of returned sub-cubes will be `nsplits^N`.
"""
function subranges(ranges, nsplits::Int=2)
t = eltype(eltype(ranges))
len = length(ranges)
# calculate even splititng points for each dimension
splitvals = [range(r[1], stop=r[2], length=nsplits+1) |> collect for r in ranges]
splits = [[(splitvals[i][j], splitvals[i][j+1]) for j=1:nsplits] for i=1:len]
f(x) = splits[x[1]][x[2]]
children = fill(ranges, nsplits^len)
for (i,I) in enumerate(CartesianIndices(NTuple{len,UnitRange{Int}}(fill(1:nsplits, len))))
children[i] = NTuple{len,Tuple{t,t}}(map(x->f(x), enumerate(Tuple(I))))
end
return children
end
function center(ranges::NTuple{N,Tuple{T,T}}) where {N,T}
return NTuple{N,T}([sum(dim)/2 for dim in ranges])
end
## supertype for search regions
abstract type SearchRegion{T} end
## rigid transformation
AffineMap(sr::SearchRegion) = AffineMap(sr.R, sr.T)
"""
Describes an transformation uncertainty region centered at rotation R and translation T, with rotation and translation half-widths of σᵣ and σₜ respectively
"""
struct UncertaintyRegion{N<:Real} <: SearchRegion{N}
R::RotationVec{N}
T::SVector{3,N}
σᵣ::N
σₜ::N
end
function UncertaintyRegion(R::RotationVec,T::SVector{3},σᵣ::Number,σₜ::Number)
t = promote_type(eltype(R), eltype(T), typeof(σᵣ), typeof(σₜ))
return UncertaintyRegion{t}(RotationVec{t}(R), SVector{3,t}(T), t(σᵣ), t(σₜ))
end
UncertaintyRegion(σᵣ::Number, σₜ::Number) = UncertaintyRegion(one(RotationVec), zero(SVector{3}), σᵣ, σₜ)
UncertaintyRegion(σₜ::Number) = UncertaintyRegion(one(RotationVec), zero(SVector{3}), π, σₜ)
UncertaintyRegion() = UncertaintyRegion(one(RotationVec), zero(SVector{3}), π, 1.0)
UncertaintyRegion(block::UncertaintyRegion) = block;
center(ur::UncertaintyRegion) = (ur.R.sx, ur.R.sy, ur.R.sz, ur.T...);
# for speeding up hashing and performance of the priority queue in the branch and bound procedure
const hash_UncertaintyRegion_seed = UInt === UInt64 ? 0x4de49213ae1a23bf : 0xef78ce68
function Base.hash(B::UncertaintyRegion, h::UInt)
h += hash_UncertaintyRegion_seed
h = Base.hash(center(B), h)
return h
end
## only rotation
struct RotationRegion{N<:Real} <: SearchRegion{N}
R::RotationVec{N}
T::SVector{3,N}
σᵣ::N
end
function RotationRegion(R::RotationVec,T::SVector{3},σᵣ::Number)
t = promote_type(eltype(R), eltype(T), typeof(σᵣ))
return RotationRegion{t}(RotationVec{t}(R), SVector{3,t}(T), t(σᵣ))
end
RotationRegion(R,T,σᵣ::Number) = RotationRegion(R, T, σᵣ)
RotationRegion(σᵣ::Number) = RotationRegion(one(RotationVec), zero(SVector{3}), σᵣ)
RotationRegion() = RotationRegion(Float64(π))
center(rr::RotationRegion) = (rr.R.sx, rr.R.sy, rr.R.sz);
UncertaintyRegion(rr::RotationRegion{T}) where T = UncertaintyRegion(rr.R, rr.T, rr.σᵣ, zero(T))
RotationRegion(ur::UncertaintyRegion) = RotationRegion(ur.R, ur.T, ur.σᵣ)
# for speeding up hashing and performance of the priority queue in the branch and bound procedure
const hash_RotationRegion_seed = UInt === UInt64 ? 0xee63e114344da2b9 : 0xe6cb1eb7
function Base.hash(B::RotationRegion, h::UInt)
h += hash_RotationRegion_seed
h = Base.hash(center(B), h)
return h
end
## only translation
struct TranslationRegion{N<:Real} <: SearchRegion{N}
R::RotationVec{N}
T::SVector{3,N}
σₜ::N
end
function TranslationRegion(R::RotationVec,T::SVector{3},σₜ::Number)
t = promote_type(eltype(R), eltype(T), typeof(σₜ))
return TranslationRegion{t}(RotationVec{t}(R), SVector{3,t}(T), t(σₜ))
end
TranslationRegion(R,T,σₜ) = TranslationRegion(R, T, σₜ)
TranslationRegion(σₜ) = TranslationRegion(one(RotationVec{typeof(σₜ)}), zero(SVector{3, typeof(σₜ)}), σₜ)
TranslationRegion() = TranslationRegion(1.0)
center(tr::TranslationRegion) = (tr.T...,);
UncertaintyRegion(tr::TranslationRegion{T}) where T = UncertaintyRegion(tr.R, tr.T, zero(T), tr.σₜ)
TranslationRegion(ur::UncertaintyRegion) = TranslationRegion(ur.R, ur.T, ur.σₜ)
# for speeding up hashing and performance of the priority queue in the branch and bound procedure
const hash_TranslationRegion_seed = UInt === UInt64 ? 0x24f59aedb6bf903f : 0x76f5f734
function Base.hash(B::TranslationRegion, h::UInt)
h += hash_TranslationRegion_seed
h = Base.hash(center(B), h)
return h
end
# Split SearchRegion
function subregions!(subregionvec::Vector{S}, ur::S, nsplits=2) where S<:UncertaintyRegion
σᵣ = ur.σᵣ / nsplits
σₜ = ur.σₜ / nsplits
lowercorner = center(ur) .- (ur.σᵣ, ur.σᵣ, ur.σᵣ, ur.σₜ, ur.σₜ, ur.σₜ) .+ (σᵣ, σᵣ, σᵣ, σₜ, σₜ, σₜ)
for (i,I) in enumerate(CartesianIndices(NTuple{6,UnitRange{Int}}(fill(0:nsplits-1, 6))))
idxs = Tuple(I)
c = lowercorner .+ (2*idxs[1]*σᵣ, 2*idxs[2]*σᵣ, 2*idxs[3]*σᵣ, 2*idxs[4]*σₜ, 2*idxs[5]*σₜ, 2*idxs[6]*σₜ)
R = RotationVec(c[1], c[2], c[3])
T = SVector{3}(c[4], c[5], c[6])
subregionvec[i] = UncertaintyRegion(R,T,σᵣ,σₜ)
end
end
function subregions!(subregionvec::Vector{S}, rr::S, nsplits=2) where S<:RotationRegion
σᵣ = rr.σᵣ / nsplits
lowercorner = (center(rr) .- rr.σᵣ) .+ σᵣ
for (i,I) in enumerate(CartesianIndices(NTuple{3,UnitRange{Int}}(fill(0:nsplits-1, 3))))
idxs = Tuple(I)
c = lowercorner .+ (2*idxs[1]*σᵣ, 2*idxs[2]*σᵣ, 2*idxs[3]*σᵣ)
R = RotationVec(c[1], c[2], c[3])
subregionvec[i] = RotationRegion(R,rr.T,σᵣ)
end
end
function subregions!(subregionvec::Vector{S}, tr::S, nsplits=2) where S<:TranslationRegion
σₜ = tr.σₜ / nsplits
lowercorner = (center(tr) .- tr.σₜ) .+ σₜ
for (i,I) in enumerate(CartesianIndices(NTuple{3,UnitRange{Int}}(fill(0:nsplits-1, 3))))
idxs = Tuple(I)
c = lowercorner .+ (2*idxs[1]*σₜ, 2*idxs[2]*σₜ, 2*idxs[3]*σₜ)
T = SVector{3}(c[1], c[2], c[3])
subregionvec[i] = TranslationRegion(tr.R,T,σₜ)
end
end
function subregions(sr::SearchRegion, nsplits=2)
subregionvec = fill(sr, nsplits^length(center(sr)))
subregions!(subregionvec, sr, nsplits)
return subregionvec
end
# split only along translation or rotation axes
function rot_subregions!(subregionvec::Vector{S}, ur::S, nsplits=2) where S<:UncertaintyRegion
σᵣ = ur.σᵣ / nsplits
lowercorner = (ur.R.sx, ur.R.sy, ur.R.sz) .- (ur.σᵣ, ur.σᵣ, ur.σᵣ) .+ (σᵣ, σᵣ, σᵣ)
for (i,I) in enumerate(CartesianIndices(NTuple{3,UnitRange{Int}}(fill(0:nsplits-1, 3))))
idxs = Tuple(I)
R = RotationVec((lowercorner .+ (2*idxs[1]*σᵣ, 2*idxs[2]*σᵣ, 2*idxs[3]*σᵣ))...)
subregionvec[i] = UncertaintyRegion(R, ur.T, σᵣ, ur.σₜ)
end
end
function trl_subregions!(subregionvec::Vector{S}, ur::S, nsplits=2) where S<:UncertaintyRegion
σₜ = ur.σₜ / nsplits
lowercorner = ur.T .- (ur.σₜ, ur.σₜ, ur.σₜ) .+ (σₜ, σₜ, σₜ)
for (i,I) in enumerate(CartesianIndices(NTuple{3,UnitRange{Int}}(fill(0:nsplits-1, 3))))
idxs = Tuple(I)
T = lowercorner .+ (2*idxs[1]*σₜ, 2*idxs[2]*σₜ, 2*idxs[3]*σₜ)
subregionvec[i] = UncertaintyRegion(ur.R, T, ur.σᵣ, σₜ)
end
end
function rot_subregions(sr::UncertaintyRegion, nsplits=2)
subregionvec = fill(sr, nsplits^3)
rot_subregions!(subregionvec, sr, nsplits)
return subregionvec
end
function trl_subregions(sr::UncertaintyRegion, nsplits=2)
subregionvec = fill(sr, nsplits^3)
trl_subregions!(subregionvec, sr, nsplits)
return subregionvec
end
# Initialize UncertaintyRegion for aligning two PointSets
UncertaintyRegion(x::Union{AbstractPointSet, AbstractGMM}, y::Union{AbstractPointSet, AbstractGMM}, R::RotationVec = RotationVec(0.0,0.0,0.0), T::SVector{3} = SVector{3}(0.0,0.0,0.0)) = UncertaintyRegion(translation_limit(x, y))
TranslationRegion(x::Union{AbstractPointSet, AbstractGMM}, y::Union{AbstractPointSet, AbstractGMM}, R::RotationVec = RotationVec(0.0,0.0,0.0), T::SVector{3} = SVector{3}(0.0,0.0,0.0)) = TranslationRegion(R, zero(SVector{3}), translation_limit(x, y))
RotationRegion(x:: Union{AbstractPointSet, AbstractGMM}, y::Union{AbstractPointSet, AbstractGMM}, R::RotationVec = RotationVec(0.0,0.0,0.0), T::SVector{3} = SVector{3}(0.0,0.0,0.0)) = RotationRegion(RotationVec(0.0,0.0,0.0), T, π)
| GaussianMixtureAlignment | https://github.com/tmcgrath325/GaussianMixtureAlignment.jl.git |
|
[
"MIT"
] | 0.2.2 | c81897832613bbcf7cc19639fd3a1a0a154abe1b | code | 1323 | # centroid of positions in A, weighted by weights in w (assumed to sum to 1)
centroid(A, w=fill(1/size(A,2), size(A,2))) = A*w
function centroid(m::AbstractModel)
w = weights(m)
return centroid(coords(m), w / sum(w))
end
# translation moving centroid to origin
center_translation(A, w=fill(1/size(A,2), size(A,2))) = Translation(-centroid(A,w))
center_translation(m::AbstractModel) = Translation(-centroid(m))
# convert between pointsets and GMMs
function IsotropicGMM(ps::AbstractSinglePointSet{N,T}, σs = ones(T, length(ps))) where {N,T}
μs = ps.coords
ϕs = ps.weights
return IsotropicGMM{N,T}([IsotropicGaussian(μs[:,i], σs[i], ϕs[i]) for i=1:length(ps)])
end
PointSet(gmm::AbstractSingleGMM{N,T}) where {N,T} = PointSet{N,T}(coords(gmm), weights(gmm))
MultiPointSet(mgmm::AbstractMultiGMM{N,T,K}) where {N,T,K} = MultiPointSet{N,T,K}(Dict{K,PointSet{N,T}}([k => PointSet{N,T}(coords(gmm), weights(gmm)) for (k,gmm) in mgmm.gmms]...))
"""
lim = translation_limit(gmmx, gmmy)
Computes the largest translation needed to ensure that the searchspace contains the best alignment transformation.
"""
translation_limit(x::AbstractMatrix, y::AbstractMatrix) = max(maximum(abs.(x)), maximum(abs.(y)))
translation_limit(x::AbstractModel, y::AbstractModel) = translation_limit(coords(x), coords(y))
| GaussianMixtureAlignment | https://github.com/tmcgrath325/GaussianMixtureAlignment.jl.git |
|
[
"MIT"
] | 0.2.2 | c81897832613bbcf7cc19639fd3a1a0a154abe1b | code | 1721 | function gogma_align(gmmx::AbstractGMM, gmmy::AbstractGMM; interactions=nothing, kwargs...)
pσ, pϕ = pairwise_consts(gmmx,gmmy,interactions)
boundsfun(x,y,block) = gauss_l2_bounds(x,y,block,pσ,pϕ)
localfun(x,y,block) = local_align(x,y,block,pσ,pϕ)
return branchbound(gmmx, gmmy; boundsfun=boundsfun, localfun=localfun, kwargs...)
end
function rot_gogma_align(gmmx::AbstractGMM, gmmy::AbstractGMM; interactions=nothing, kwargs...)
pσ, pϕ = pairwise_consts(gmmx,gmmy,interactions)
boundsfun(x,y,block) = gauss_l2_bounds(x,y,block,pσ,pϕ)
localfun(x,y,block) = local_align(x,y,block,pσ,pϕ)
rot_branchbound(gmmx, gmmy; boundsfun=boundsfun, localfun=localfun, kwargs...)
end
function trl_gogma_align(gmmx::AbstractGMM, gmmy::AbstractGMM; interactions=nothing, kwargs...)
pσ, pϕ = pairwise_consts(gmmx,gmmy,interactions)
boundsfun(x,y,block) = gauss_l2_bounds(x,y,block,pσ,pϕ)
localfun(x,y,block) = local_align(x,y,block,pσ,pϕ)
trl_branchbound(gmmx, gmmy; boundsfun=boundsfun, localfun=localfun, kwargs...)
end
function tiv_gogma_align(gmmx::AbstractGMM, gmmy::AbstractGMM, cx=Inf, cy=Inf; kwargs...)
tivgmmx, tivgmmy = tivgmm(gmmx, cx), tivgmm(gmmy, cy)
pσ, pϕ = pairwise_consts(gmmx,gmmy)
tivpσ, tivpϕ = pairwise_consts(tivgmmx,tivgmmy)
boundsfun(x,y,block) = gauss_l2_bounds(x,y,block,pσ,pϕ)
rot_boundsfun(x,y,block) = gauss_l2_bounds(x,y,block,tivpσ,tivpϕ)
localfun(x,y,block) = local_align(x,y,block,pσ,pϕ)
rot_localfun(x,y,block) = local_align(x,y,block,tivpσ,tivpϕ)
tiv_branchbound(gmmx, gmmy, tivgmm(gmmx, cx), tivgmm(gmmy, cy); boundsfun=boundsfun, rot_boundsfun=rot_boundsfun, localfun=localfun, rot_localfun=rot_localfun, kwargs...)
end | GaussianMixtureAlignment | https://github.com/tmcgrath325/GaussianMixtureAlignment.jl.git |
|
[
"MIT"
] | 0.2.2 | c81897832613bbcf7cc19639fd3a1a0a154abe1b | code | 6322 | loose_distance_bounds(x::AbstractGaussian, y::AbstractGaussian, args...) = loose_distance_bounds(x.μ, y.μ, args...)
tight_distance_bounds(x::AbstractGaussian, y::AbstractGaussian, args...) = tight_distance_bounds(x.μ, y.μ, args...)
function validate_interactions(interactions::Dict{Tuple{K,K},V}) where {K,V<:Number}
for (k1,k2) in keys(interactions)
if k1 != k2
if haskey(interactions, (k2,k1))
return false
end
end
end
return true
end
# prepare pairwise values for `σx^2 + σy^2` and `ϕx * ϕy` for all gaussians in `gmmx` and `gmmy`
function pairwise_consts(gmmx::AbstractIsotropicGMM, gmmy::AbstractIsotropicGMM, interactions=nothing)
t = promote_type(numbertype(gmmx),numbertype(gmmy))
pσ, pϕ = zeros(t, length(gmmx), length(gmmy)), zeros(t, length(gmmx), length(gmmy))
for (i,gaussx) in enumerate(gmmx.gaussians)
for (j,gaussy) in enumerate(gmmy.gaussians)
pσ[i,j] = gaussx.σ^2 + gaussy.σ^2
pϕ[i,j] = gaussx.ϕ * gaussy.ϕ
end
end
return pσ, pϕ
end
function pairwise_consts(mgmmx::AbstractMultiGMM{N,T,K}, mgmmy::AbstractMultiGMM{N,S,K}, interactions::Union{Nothing,Dict{Tuple{K,K},V}}=nothing) where {N,T,S,K,V <: Number}
t = promote_type(numbertype(mgmmx),numbertype(mgmmy), isnothing(interactions) ? numbertype(mgmmx) : V)
xkeys = keys(mgmmx.gmms)
ykeys = keys(mgmmy.gmms)
if isnothing(interactions)
interactions = Dict{Tuple{K,K},t}()
for key in xkeys ∩ ykeys
interactions[(key,key)] = one(t)
end
else
@assert validate_interactions(interactions) "Interactions must not include redundant key pairs (i.e. (k1,k2) and (k2,k1))"
end
mpσ, mpϕ = Dict{K, Dict{K, Matrix{t}}}(), Dict{K, Dict{K,Matrix{t}}}()
ukeys = unique(Iterators.flatten(keys(interactions)))
for key1 in ukeys
if key1 ∈ xkeys
push!(mpσ, key1 => Dict{K, Matrix{t}}())
push!(mpϕ, key1 => Dict{K, Matrix{t}}())
for key2 in ukeys
keypair = (key1,key2)
keypair = haskey(interactions, keypair) ? keypair : (key2,key1)
if key2 ∈ ykeys && haskey(interactions, keypair)
pσ, pϕ = pairwise_consts(mgmmx.gmms[key1], mgmmy.gmms[key2])
push!(mpσ[key1], key2 => pσ)
push!(mpϕ[key1], key2 => interactions[keypair] .* pϕ)
end
end
if isempty(mpσ[key1])
delete!(mpσ, key1)
delete!(mpϕ, key1)
end
end
end
return mpσ, mpϕ
end
"""
lowerbound, upperbound = gauss_l2_bounds(x::Union{IsotropicGaussian, AbstractGMM}, y::Union{IsotropicGaussian, AbstractGMM}, σᵣ, σₜ)
lowerbound, upperbound = gauss_l2_bounds(x, y, R::RotationVec, T::SVector{3}, σᵣ, σₜ)
Finds the bounds for overlap between two isotropic Gaussian distributions, two isotropic GMMs, or `two sets of
labeled isotropic GMMs for a particular region in 6-dimensional rigid rotation space, defined by `R`, `T`, `σᵣ` and `σₜ`.
`R` and `T` represent the rotation and translation, respectively, that are at the center of the uncertainty region. If they are not provided,
the uncertainty region is assumed to be centered at the origin (i.e. x has already been transformed).
`σᵣ` and `σₜ` represent the sizes of the rotation and translation uncertainty regions.
See [Campbell & Peterson, 2016](https://arxiv.org/abs/1603.00150)
"""
function gauss_l2_bounds(x::AbstractIsotropicGaussian, y::AbstractIsotropicGaussian, R::RotationVec, T::SVector{3}, σᵣ, σₜ, s=x.σ^2 + y.σ^2, w=x.ϕ*y.ϕ; distance_bound_fun = tight_distance_bounds)
(lbdist, ubdist) = distance_bound_fun(R*x.μ, y.μ-T, σᵣ, σₜ, w < 0)
# evaluate objective function at each distance to get upper and lower bounds
return -overlap(lbdist^2, s, w), -overlap(ubdist^2, s, w)
end
# gauss_l2_bounds(x::AbstractGaussian, y::AbstractGaussian, R::RotationVec, T::SVector{3}, σᵣ, σₜ, s=x.σ^2 + y.σ^2, w=x.ϕ*y.ϕ; kwargs...
# ) = gauss_l2_bounds(R*x, y-T, σᵣ, σₜ, tform.translation, s, w; kwargs...)
gauss_l2_bounds(x::AbstractGaussian, y::AbstractGaussian, block::UncertaintyRegion, s=x.σ^2 + y.σ^2, w=x.ϕ*y.ϕ; kwargs...
) = gauss_l2_bounds(x, y, block.R, block.T, block.σᵣ, block.σₜ, s, w; kwargs...)
gauss_l2_bounds(x::AbstractGaussian, y::AbstractGaussian, block::SearchRegion, s=x.σ^2 + y.σ^2, w=x.ϕ*y.ϕ; kwargs...
) = gauss_l2_bounds(x, y, UncertaintyRegion(block), s, w; kwargs...)
function gauss_l2_bounds(gmmx::AbstractSingleGMM, gmmy::AbstractSingleGMM, R::RotationVec, T::SVector{3}, σᵣ::Number, σₜ::Number, pσ=nothing, pϕ=nothing, interactions=nothing; kwargs...)
# prepare pairwise widths and weights, if not provided
if isnothing(pσ) || isnothing(pϕ)
pσ, pϕ = pairwise_consts(gmmx, gmmy)
end
# sum bounds for each pair of points
lb = 0.
ub = 0.
for (i,x) in enumerate(gmmx.gaussians)
for (j,y) in enumerate(gmmy.gaussians)
lb, ub = (lb, ub) .+ gauss_l2_bounds(x, y, R, T, σᵣ, σₜ, pσ[i,j], pϕ[i,j]; kwargs...)
end
end
return lb, ub
end
function gauss_l2_bounds(mgmmx::AbstractMultiGMM, mgmmy::AbstractMultiGMM, R::RotationVec, T::SVector{3}, σᵣ::Number, σₜ::Number, mpσ=nothing, mpϕ=nothing, interactions=nothing)
# prepare pairwise widths and weights, if not provided
if isnothing(mpσ) || isnothing(mpϕ)
mpσ, mpϕ = pairwise_consts(mgmmx, mgmmy, interactions)
end
# sum bounds for each pair of points
lb = 0.
ub = 0.
for (key1, intrs) in mpσ
for (key2, pσ) in intrs
lb, ub = (lb, ub) .+ gauss_l2_bounds(mgmmx.gmms[key1], mgmmy.gmms[key2], R, T, σᵣ, σₜ, pσ, mpϕ[key1][key2])
end
end
return lb, ub
end
# gauss_l2_bounds(x::AbstractGMM, y::AbstractGMM, R::RotationVec, T::SVector{3}, args...; kwargs...
# ) = gauss_l2_bounds(R*x, y-T, args...; kwargs...)
gauss_l2_bounds(x::AbstractGMM, y::AbstractGMM, block::UncertaintyRegion, args...; kwargs...
) = gauss_l2_bounds(x, y, block.R, block.T, block.σᵣ, block.σₜ, args...; kwargs...)
gauss_l2_bounds(x::AbstractGMM, y::AbstractGMM, block::SearchRegion, args...; kwargs...
) = gauss_l2_bounds(x, y, UncertaintyRegion(block), args...; kwargs...) | GaussianMixtureAlignment | https://github.com/tmcgrath325/GaussianMixtureAlignment.jl.git |
|
[
"MIT"
] | 0.2.2 | c81897832613bbcf7cc19639fd3a1a0a154abe1b | code | 1823 | """
gmm = combine(gmmx::IsotropicGMM, gmmy::IsotropicGMM)
mgmm = combine(mgmmx::MultiGMM, mgmmy::MultiGMM)
gmm = combine(gmms::Union{AbstractVector{<:IsotropicGMM},AbstractVector{<:MultiGMM}})
Creates a new `IsotropicGMM` or `MultiGMM` by concatenating the vectors of `IsotroicGaussian`s in
the input GMMs.
"""
function combine(gmmx::AbstractSingleGMM, gmmy::AbstractSingleGMM)
if dims(gmmx) != dims(gmmy)
throw(ArgumentError("GMMs must have the same dimensionality"))
end
t = promote_type(typeof(gmmx), typeof(gmmy))
return t(vcat(gmmx.gaussians, gmmy.gaussians))
end
function combine(mgmmx::IsotropicMultiGMM, mgmmy::IsotropicMultiGMM)
if dims(mgmmx) != dims(mgmmy)
throw(ArgumentError("GMMs must have the same dimensionality"))
end
t = IsotropicGMM{dims(mgmmx),promote_type(numbertype(mgmmx), numbertype(mgmmy))}
d = promote_type(typeof(mgmmx.gmms), typeof(mgmmy.gmms))
gmms = d()
xkeys, ykeys = keys(mgmmx.gmms), keys(mgmmy.gmms)
for key in xkeys ∪ ykeys
if key ∈ xkeys && key ∈ykeys
push!(gmms, Pair(key, convert(t, combine(mgmmx.gmms[key], mgmmy.gmms[key]))))
elseif key ∈ xkeys
push!(gmms, Pair(key, convert(t, mgmmx.gmms[key])))
else
@show convert(t,mgmmy.gmms[key])
push!(gmms, Pair(key, convert(t, mgmmy.gmms[key])))
end
end
return promote_type(typeof(mgmmx), typeof(mgmmy))(gmms)
end
function combine(gmms::Union{AbstractVector{<:AbstractSingleGMM},AbstractVector{<:AbstractMultiGMM}})
if length(gmms) > 1
return combine([combine(gmms[1],gmms[2]), gmms[3:end]...])
elseif length(gmms) == 1
return gmms[1]
else
throw(ArgumentError("provided no GMMs to combine"))
end
end
combine(args...) = combine([args...]) | GaussianMixtureAlignment | https://github.com/tmcgrath325/GaussianMixtureAlignment.jl.git |
|
[
"MIT"
] | 0.2.2 | c81897832613bbcf7cc19639fd3a1a0a154abe1b | code | 6617 | import Base: eltype, keytype, valtype, length, size, getindex, iterate, convert, promote_rule,
keys, values, push!, pop!, empty!, haskey, get, get!, delete!
# Type structure: leaving things open for adding anisotropic Gaussians and GMMs
abstract type AbstractGaussian{N,T} end
abstract type AbstractIsotropicGaussian{N,T} <: AbstractGaussian{N,T} end
# concrete subtypes:
# IsotropicGaussian
# AtomGaussian (MolecularGaussians.jl)
# FeatureGaussian (MolecularGaussians.jl)
abstract type AbstractGMM{N,T} <: AbstractModel{N,T} end
abstract type AbstractSingleGMM{N,T} <: AbstractGMM{N,T} end
abstract type AbstractIsotropicGMM{N,T} <: AbstractSingleGMM{N,T} end
# concrete subtypes:
# IsotropicGMM
# MolGMM (MolecularGaussians.jl)
abstract type AbstractMultiGMM{N,T,K} <: AbstractGMM{N,T} end
abstract type AbstractIsotropicMultiGMM{N,T,K} <: AbstractMultiGMM{N,T,K} end
# concrete subtypes:
# IsotropicMultiGMM
# FeatureMolGMM (MolecularGaussians.jl)
# # Base methods for Gaussians
# numbertype(::AbstractGaussian{N,T}) where {N,T} = T
# dims(::AbstractGaussian{N,T}) where {N,T} = N
# length(::AbstractGaussian{N,T}) where {N,T} = N
# size(::AbstractGaussian{N,T}) where {N,T} = (N,)
# size(::AbstractGaussian{N,T}, idx::Int) where {N,T} = (N,)[idx]
# Base methods for GMMs
numbertype(::AbstractGMM{N,T}) where {N,T} = T
dims(::AbstractGMM{N,T}) where {N,T} = N
length(gmm::AbstractSingleGMM) = length(gmm.gaussians)
getindex(gmm::AbstractSingleGMM, idx) = gmm.gaussians[idx]
iterate(gmm::AbstractSingleGMM) = iterate(gmm.gaussians)
iterate(gmm::AbstractSingleGMM, i) = iterate(gmm.gaussians, i)
size(gmm::AbstractSingleGMM{N,T}) where {N,T} = (length(gmm.gaussians), N)
size(gmm::AbstractSingleGMM{N,T}, idx::Int) where {N,T} = (length(gmm.gaussians), N)[idx]
eltype(gmm::AbstractSingleGMM) = eltype(gmm.gaussians)
push!(gmm::AbstractSingleGMM, g::AbstractGaussian) = push!(gmm.gaussians, g)
pop!(gmm::AbstractSingleGMM) = pop!(gmm.gaussians)
empty!(gmm::AbstractSingleGMM) = empty!(gmm.gaussians)
coords(gmm::AbstractSingleGMM) = hcat([g.μ for g in gmm.gaussians]...)
weights(gmm::AbstractSingleGMM) = [g.ϕ for g in gmm.gaussians]
widths(gmm::AbstractSingleGMM) = [g.ϕ for g in gmm.gaussians]
length(mgmm::AbstractMultiGMM) = length(mgmm.gmms)
getindex(mgmm::AbstractMultiGMM, k) = mgmm.gmms[k]
keys(mgmm::AbstractMultiGMM) = keys(mgmm.gmms)
iterate(mgmm::AbstractMultiGMM) = iterate(mgmm.gmms)
iterate(mgmm::AbstractMultiGMM, i) = iterate(mgmm.gmms, i)
size(mgmm::AbstractMultiGMM{N,T,K}) where {N,T,K} = (length(mgmm.gmms), N)
size(mgmm::AbstractMultiGMM{N,T,K}, idx::Int) where {N,T,K} = (length(mgmm.gmms), N)[idx]
eltype(mgmm::AbstractMultiGMM) = eltype(mgmm.gmms)
eltype(::Type{MGMM}) where MGMM<:AbstractMultiGMM = Pair{keytype(MGMM),valtype(MGMM)}
keytype(mgmm::AbstractMultiGMM) = keytype(typeof(mgmm))
keytype(::Type{<:AbstractMultiGMM{N,T,K}}) where {N,T,K} = K
valtype(mgmm::AbstractMultiGMM) = valtype(mgmm.gmms)
haskey(mgmm::AbstractMultiGMM, k) = haskey(mgmm.gmms, k)
get(mgmm::AbstractMultiGMM, k, default) = get(mgmm.gmms, k, default)
get!(::Type{V}, mgmm::AbstractMultiGMM, k) where V = get!(V, mgmm.gmms, k)
delete!(mgmm::AbstractMultiGMM, k) = delete!(mgmm.gmms, k)
empty!(mgmm::AbstractMultiGMM) = empty!(mgmm.gmms)
coords(mgmm::AbstractMultiGMM) = hcat([coords(gmm) for (k,gmm) in mgmm.gmms]...)
weights(mgmm::AbstractMultiGMM) = vcat([weights(gmm) for (k,gmm) in mgmm.gmms]...)
widths(mgmm::AbstractMultiGMM) = vcat([widths(gmm) for (k,gmm) in mgmm.gmms]...)
"""
A structure that defines an isotropic Gaussian distribution with the location of the mean, `μ`, standard deviation `σ`,
and scaling factor `ϕ`.
"""
struct IsotropicGaussian{N,T} <: AbstractIsotropicGaussian{N,T}
μ::SVector{N,T}
σ::T
ϕ::T
end
IsotropicGaussian(μ::SVector{N,T},σ::T,ϕ::T) where {N,T<:Real} = IsotropicGaussian{N,T}(μ,σ,ϕ)
function IsotropicGaussian(μ::AbstractArray, σ::Real, ϕ::Real)
t = promote_type(eltype(μ), typeof(σ), typeof(ϕ))
return IsotropicGaussian{length(μ),t}(SVector{length(μ),t}(μ), t(σ), t(ϕ))
end
IsotropicGaussian(g::AbstractIsotropicGaussian) = IsotropicGaussian(g.μ, g.σ, g.ϕ)
convert(::Type{IsotropicGaussian{N,T}}, g::AbstractIsotropicGaussian) where {N,T} = IsotropicGaussian{N,T}(g.μ, g.σ, g.ϕ)
promote_rule(::Type{IsotropicGaussian{N,T}}, ::Type{IsotropicGaussian{N,S}}) where {N,T<:Real,S<:Real} = IsotropicGaussian{N,promote_type(T,S)}
(g::IsotropicGaussian)(pos::AbstractVector) = exp(-sum(abs2, pos-g.μ)/(2*g.σ^2))*g.ϕ
"""
A collection of `IsotropicGaussian`s, making up a Gaussian Mixture Model (GMM).
"""
struct IsotropicGMM{N,T} <: AbstractIsotropicGMM{N,T}
gaussians::Vector{IsotropicGaussian{N,T}}
end
IsotropicGMM(gmm::AbstractIsotropicGMM) = IsotropicGMM(gmm.gaussians)
IsotropicGMM{N,T}() where {N,T} = IsotropicGMM{N,T}(IsotropicGaussian{N,T}[])
convert(::Type{GMM}, gmm::AbstractIsotropicGMM) where GMM<:IsotropicGMM = GMM(gmm.gaussians)
promote_rule(::Type{IsotropicGMM{N,T}}, ::Type{IsotropicGMM{N,S}}) where {T,S,N} = IsotropicGMM{N,promote_type(T,S)}
eltype(::Type{IsotropicGMM{N,T}}) where {N,T} = IsotropicGaussian{N,T}
(gmm::IsotropicGMM)(pos::AbstractVector) = sum(g(pos) for g in gmm)
"""
A collection of labeled `IsotropicGMM`s, to each be considered separately during an alignment procedure. That is,
only alignment scores between `IsotropicGMM`s with the same key are considered when aligning two `MultiGMM`s.
"""
struct IsotropicMultiGMM{N,T,K} <: AbstractIsotropicMultiGMM{N,T,K}
gmms::Dict{K, IsotropicGMM{N,T}}
end
IsotropicMultiGMM(gmm::AbstractIsotropicMultiGMM) = IsotropicMultiGMM(gmm.gmms)
convert(t::Type{IsotropicMultiGMM}, mgmm::AbstractIsotropicMultiGMM) = t(mgmm.gmms)
promote_rule(::Type{IsotropicMultiGMM{N,T,K}}, ::Type{IsotropicMultiGMM{N,S,K}}) where {N,T,S,K} = IsotropicMultiGMM{N,promote_type(T,S),K}
valtype(::Type{IsotropicMultiGMM{N,T,K}}) where {N,T,K} = IsotropicGMM{N,T}
# descriptive display
# TODO update to display type parameters, make use of supertypes, etc
Base.show(io::IO, g::AbstractIsotropicGaussian) = println(io,
summary(g),
" with μ = $(g.μ), σ = $(g.σ), and ϕ = $(g.ϕ).\n"
)
Base.show(io::IO, gmm::AbstractSingleGMM) = println(io,
summary(gmm),
" with $(length(gmm)) $(eltype(gmm.gaussians)) distributions."
)
Base.show(io::IO, mgmm::AbstractMultiGMM) = println(io,
summary(mgmm),
" with $(length(mgmm)) labeled $(eltype(mgmm.gmms).parameters[2]) models made up of a total of $(sum([length(gmm) for (key,gmm) in mgmm.gmms])) $(eltype(values(mgmm.gmms))) distributions."
) | GaussianMixtureAlignment | https://github.com/tmcgrath325/GaussianMixtureAlignment.jl.git |
|
[
"MIT"
] | 0.2.2 | c81897832613bbcf7cc19639fd3a1a0a154abe1b | code | 4488 | """
ovlp = overlap(distsq, s, w)
Calculates the unnormalized overlap between two Gaussian distributions with width `s`,
weight `w', and squared distance `distsq`.
"""
function overlap(distsq::Real, s::Real, w::Real)
return w * exp(-distsq / (2*s)) # / (sqrt2pi * sqrt(s))^ndims
# Note, the normalization term for the Gaussians is left out, since it is not required that the total "volume" of each Gaussian
# is equal to 1 (e.g. satisfying the requirements for a probability distribution)
end
"""
ovlp = overlap(dist, σx, σy, ϕx, ϕy)
Calculates the unnormalized overlap between two Gaussian distributions with variances
`σx` and `σy`, weights `ϕx` and `ϕy`, and means separated by distance `dist`.
"""
function overlap(dist::Real, σx::Real, σy::Real, ϕx::Real, ϕy::Real)
return overlap(dist^2, σx^2 + σy^2, ϕx*ϕy)
end
"""
ovlp = overlap(x::IsotropicGaussian, y::IsotropicGaussian)
Calculates the unnormalized overlap between two `IsotropicGaussian` objects.
"""
function overlap(x::AbstractIsotropicGaussian, y::AbstractIsotropicGaussian, s=x.σ^2+y.σ^2, w=x.ϕ*y.ϕ)
return overlap(sum(abs2, x.μ.-y.μ), s, w)
end
"""
ovlp = overlap(x::AbstractSingleGMM, y::AbstractSingleGMM)
Calculates the unnormalized overlap between two `AbstractSingleGMM` objects.
"""
function overlap(x::AbstractSingleGMM, y::AbstractSingleGMM, pσ=nothing, pϕ=nothing)
# prepare pairwise widths and weights, if not provided
if isnothing(pσ) && isnothing(pϕ)
pσ, pϕ = pairwise_consts(x, y)
end
# sum overlaps for all pairwise combinations of Gaussians between x and y
ovlp = zero(promote_type(numbertype(x),numbertype(y)))
for (i,gx) in enumerate(x.gaussians)
for (j,gy) in enumerate(y.gaussians)
ovlp += overlap(gx, gy, pσ[i,j], pϕ[i,j])
end
end
return ovlp
end
"""
ovlp = overlap(x::AbstractMultiGMM, y::AbstractMultiGMM)
Calculates the unnormalized overlap between two `AbstractMultiGMM` objects.
"""
function overlap(x::AbstractMultiGMM, y::AbstractMultiGMM, mpσ=nothing, mpϕ=nothing, interactions=nothing)
# prepare pairwise widths and weights, if not provided
if isnothing(mpσ) && isnothing(mpϕ)
mpσ, mpϕ = pairwise_consts(x, y, interactions)
end
# sum overlaps from each keyed pairs of GMM
ovlp = zero(promote_type(numbertype(x),numbertype(y)))
for k1 in keys(mpσ)
for k2 in keys(mpσ[k1])
ovlp += overlap(x.gmms[k1], y.gmms[k2], mpσ[k1][k2], mpϕ[k1][k2])
end
end
return ovlp
end
"""
l2dist = distance(x, y)
Calculates the L2 distance between two GMMs made up of spherical Gaussian distributions.
"""
function distance(x::AbstractGMM, y::AbstractGMM)
return overlap(x,x) + overlap(y,y) - 2*overlap(x,y)
end
"""
tani = tanimoto(x, y)
Calculates the tanimoto distance based on Gaussian overlap between two GMMs.
"""
function tanimoto(x::AbstractGMM, y::AbstractGMM)
o = overlap(x,y)
return o / (overlap(x,x) + overlap(y,y) - o)
end
## Forces
function force!(f::AbstractVector, x::AbstractVector, y::AbstractVector, s::Real, w::Real)
Δ = y - x
f .+= Δ / s * overlap(sum(abs2, Δ), s, w)
end
function force!(f::AbstractVector, x::AbstractIsotropicGaussian, y::AbstractIsotropicGaussian,
s=x.σ^2+y.σ^2, w=x.ϕ*y.ϕ; coef=1)
return force!(f, x.μ, y.μ, s, coef*w)
end
function force!(f::AbstractVector, x::AbstractIsotropicGaussian, y::AbstractIsotropicGMM, pσ=nothing, pϕ=nothing; kwargs...)
if isnothing(pσ) && isnothing(pϕ)
xσsq = x.σ^2
pσ = [xσsq + gy.σ^2 for gy in y.gaussians]
pϕ = [x.ϕ * gy.ϕ for gy in y.gaussians]
end
for (gy, s, w) in zip(y.gaussians, pσ, pϕ)
force!(f, x, gy, s, w; kwargs...)
end
end
function force!(f::AbstractVector, x::AbstractIsotropicGMM, y::AbstractIsotropicGMM, pσ=nothing, pϕ=nothing; kwargs...)
if isnothing(pσ) && isnothing(pϕ)
pσ, pϕ = pairwise_consts(x, y)
end
for (i,gx) in enumerate(x.gaussians)
force!(f, gx, y, pσ[i,:], pϕ[i,:]; kwargs...)
end
end
function force!(f::AbstractVector, x::AbstractMultiGMM, y::AbstractMultiGMM; interactions=nothing)
mpσ, mpϕ = pairwise_consts(x, y, interactions)
for k1 in keys(mpσ)
for k2 in keys(mpσ[k1])
# don't pass coef as a keyword argument, since the interaction coefficient is baked into mpϕ
force!(f, x.gmms[k1], y.gmms[k2], mpσ[k1][k2], mpϕ[k1][k2])
end
end
end | GaussianMixtureAlignment | https://github.com/tmcgrath325/GaussianMixtureAlignment.jl.git |
|
[
"MIT"
] | 0.2.2 | c81897832613bbcf7cc19639fd3a1a0a154abe1b | code | 1471 | """
tgmm = tivgmm(gmm::IsotropicGMM, c=Inf)
tgmm = tivgmm(mgmm::MultiGMM, c=Inf)
Returns a new `IsotropicGMM` or `MultiGMM` containing up to `c*length(gmm)` translation invariant vectors (TIVs) connecting Gaussian means in `gmm`.
TIVs are chosen to maximize length multiplied by the weights of the connected distributions.
See [Li et. al. (2019)](https://arxiv.org/abs/1812.11307) for a description of TIV construction.
"""
function tivgmm(gmm::AbstractIsotropicGMM, c=Inf)
t = numbertype(gmm)
npts, ndims = size(gmm)
n = ceil(c*npts)
if npts^2 < n
n = npts^2
end
scores = fill(zero(t), npts, npts)
for i=1:npts
for j = i:npts
scores[i,j] = scores[j,i] = norm(gmm.gaussians[i].μ-gmm.gaussians[j].μ) * √(gmm.gaussians[i].ϕ * gmm.gaussians[j].ϕ)
end
end
tivgaussians = IsotropicGaussian{ndims,t}[]
order = sortperm(vec(scores), rev=true)
for idx in order[1:Int(n)]
i = Int(floor((idx-1)/npts)+1)
j = mod(idx-1, npts)+1
x, y = gmm.gaussians[i], gmm.gaussians[j]
push!(tivgaussians, IsotropicGaussian(x.μ-y.μ, √(x.σ*y.σ), √(x.ϕ*y.ϕ)))
end
return IsotropicGMM(tivgaussians)
end
function tivgmm(mgmm::AbstractIsotropicMultiGMM, c=Inf)
gmms = Dict{Symbol, IsotropicGMM{dims(mgmm),numbertype(mgmm)}}()
for key in keys(mgmm.gmms)
push!(gmms, Pair(key, tivgmm(mgmm.gmms[key], c)))
end
return IsotropicMultiGMM(gmms)
end
| GaussianMixtureAlignment | https://github.com/tmcgrath325/GaussianMixtureAlignment.jl.git |
|
[
"MIT"
] | 0.2.2 | c81897832613bbcf7cc19639fd3a1a0a154abe1b | code | 3902 | function Base.:*(R::AbstractMatrix{W}, x::IsotropicGaussian{N,V}) where {N,V,W}
numtype = promote_type(V, W)
return IsotropicGaussian{N,numtype}(R*x.μ, x.σ, x.ϕ)
end
function Base.:+(x::IsotropicGaussian{N,V}, T::AbstractVector{W}) where {N,V,W}
numtype = promote_type(V, W)
return IsotropicGaussian{N,numtype}(x.μ.+T, x.σ, x.ϕ)
end
Base.:-(x::IsotropicGaussian, T::AbstractVector,) = x + (-T)
function Base.:*(R::AbstractMatrix{W}, x::IsotropicGMM{N,V}) where {N,V,W}
numtype = promote_type(V, W)
return IsotropicGMM{N,numtype}([R*g for g in x.gaussians])
end
function Base.:+(x::IsotropicGMM{N,V}, T::AbstractVector{W}) where {N,V,W}
numtype = promote_type(V, W)
return IsotropicGMM{N,numtype}([g+T for g in x.gaussians])
end
Base.:-(x::IsotropicGMM, T::AbstractVector,) = x + (-T)
function Base.:*(R::AbstractMatrix{W}, x::IsotropicMultiGMM{N,V,K}) where {N,V,K,W}
numtype = promote_type(V, W)
gmmdict = Dict{K, IsotropicGMM{N,numtype}}()
for (key, gmm) in x.gmms
push!(gmmdict, key=>R*gmm)
end
return IsotropicMultiGMM(gmmdict)
end
function Base.:+(x::IsotropicMultiGMM{N,V,K}, T::AbstractVector{W}) where {N,V,K,W}
numtype = promote_type(V, W)
gmmdict = Dict{K, IsotropicGMM{N,numtype}}()
for (key, gmm) in x.gmms
push!(gmmdict, key=>gmm+T)
end
return IsotropicMultiGMM(gmmdict)
end
Base.:-(x::IsotropicMultiGMM, T::AbstractVector) = x + (-T)
# There is some concern about the inferability of the functions below. Using Test.@inferred did not throw any errors
# function Base.:*(R::AbstractMatrix, x::AbstractIsotropicGaussian)
# ty = typeof(x)
# otherfields = [getfield(x,fname) for fname in fieldnames(typeof(x))][5:end] # first 4 fields must be `μ`, `σ`, `ϕ`, and `dirs`
# return ty.name.wrapper(R*x.μ, x.σ, x.ϕ, [R*dir for dir in x.dirs], otherfields...)
# end
# function Base.:+(x::AbstractIsotropicGaussian, T::AbstractVector,)
# ty = typeof(x)
# otherfields = [getfield(x,fname) for fname in fieldnames(typeof(x))][5:end] # first 4 fields must be `μ`, `σ`, `ϕ`, and `dirs`
# return ty.name.wrapper(x.μ.+T, x.σ, x.ϕ, x.dirs, otherfields...)
# end
# Base.:-(x::AbstractIsotropicGaussian, T::AbstractVector,) = x + (-T)
# function Base.:*(R::AbstractMatrix, x::AbstractIsotropicGMM)
# ty = typeof(x)
# otherfields = [getfield(x,fname) for fname in fieldnames(typeof(x))][2:end] # first fields must be `gaussians`
# return ty.name.wrapper([R*g for g in x.gaussians], otherfields...)
# end
# function Base.:+(x::AbstractIsotropicGMM, T::AbstractVector,)
# ty = typeof(x)
# otherfields = [getfield(x,fname) for fname in fieldnames(typeof(x))][2:end] # first fields must be `gaussians`
# return ty.name.wrapper([g+T for g in x.gaussians], otherfields...)
# end
# Base.:-(x::AbstractIsotropicGMM, T::AbstractVector,) = x + (-T)
# function Base.:*(R::AbstractMatrix, x::AbstractIsotropicMultiGMM)
# ty = typeof(x)
# gmmkeys = keys(x.gmms)
# gmmdict = Dict(first(gmmkeys)=>R*x.gmms[first(gmmkeys)])
# for (i,key) in enumerate(gmmkeys)
# i === 1 ? continue : push!(gmmdict, key=>R*x.gmms[key])
# end
# otherfields = [getfield(x,fname) for fname in fieldnames(typeof(x))][2:end] # first field must be `gmms`
# return ty.name.wrapper(gmmdict, otherfields...)
# end
# function Base.:+(x::AbstractIsotropicMultiGMM, T::AbstractVector)
# ty = typeof(x)
# gmmkeys = keys(x.gmms)
# gmmdict = Dict(first(gmmkeys)=>x.gmms[first(gmmkeys)]+T)
# for (i,key) in enumerate(gmmkeys)
# i === 1 ? continue : push!(gmmdict, key=>x.gmms[key]+T)
# end
# otherfields = [getfield(x,fname) for fname in fieldnames(typeof(x))][2:end] # first field must be `gmms`
# return ty.name.wrapper(gmmdict, otherfields...)
# end
# Base.:-(x::AbstractIsotropicMultiGMM, T::AbstractVector) = x + (-T)
| GaussianMixtureAlignment | https://github.com/tmcgrath325/GaussianMixtureAlignment.jl.git |
|
[
"MIT"
] | 0.2.2 | c81897832613bbcf7cc19639fd3a1a0a154abe1b | code | 3654 | function goicp_align(x::AbstractSinglePointSet, y::AbstractSinglePointSet; kwargs...)
kdtree = KDTree(y.coords, Euclidean())
correspondence(xx::AbstractMatrix, yy::AbstractMatrix) = closest_points(xx, kdtree)
boundsfun(xx::AbstractSinglePointSet, yy::AbstractSinglePointSet, sr::SearchRegion) = squared_dist_bounds(xx,yy,sr; correspondence = correspondence, distance_bound_fun = tight_distance_bounds)
localfun(xx::AbstractSinglePointSet, yy::AbstractSinglePointSet, block::SearchRegion) = local_icp(xx, yy, block; kdtree=kdtree)
return branchbound(x, y; boundsfun=boundsfun, localfun=localfun, kwargs...)
end
function trl_goicp_align(x::AbstractSinglePointSet, y::AbstractSinglePointSet; kwargs...)
return goicp_align(x, y; blockfun=TranslationRegion, tformfun=Translation, kwargs...)
end
function goih_align(x::AbstractPointSet, y::AbstractPointSet; kwargs...)
boundsfun(xx::AbstractSinglePointSet, yy::AbstractSinglePointSet, sr::SearchRegion) = squared_dist_bounds(xx,yy,sr; correspondence = hungarian_assignment, distance_bound_fun = tight_distance_bounds)
branchbound(x, y; boundsfun=boundsfun, localfun=local_iterative_hungarian, kwargs...)
end
function tiv_goicp_align(x::AbstractSinglePointSet, y::AbstractSinglePointSet, cx=Inf, cy=Inf; kwargs...)
tivx, tivy = tivpointset(x,cx), tivpointset(y,cy)
kdtree = KDTree(y.coords, Euclidean())
tiv_kdtree = KDTree(tivy.coords, Euclidean())
correspondence(xx::AbstractMatrix, yy::AbstractMatrix) = closest_points(xx, kdtree)
rot_correspondence(xx::AbstractMatrix, yy::AbstractMatrix) = closest_points(xx, tiv_kdtree)
boundsfun(xx::AbstractSinglePointSet, yy::AbstractSinglePointSet, sr::SearchRegion) = squared_dist_bounds(xx,yy,sr; correspondence = correspondence, distance_bound_fun = tight_distance_bounds)
rot_boundsfun(xx::AbstractSinglePointSet, yy::AbstractSinglePointSet, sr::SearchRegion) = squared_dist_bounds(xx,yy,sr; correspondence = rot_correspondence, distance_bound_fun = tight_distance_bounds)
localfun(xx::AbstractSinglePointSet, yy::AbstractSinglePointSet, block::SearchRegion) = local_icp(xx, yy, block; kdtree=kdtree)
objfun(X, x, y) = distanceobj(X, x, y; correspondence = correspondence);
rot_objfun(X, x, y) = distanceobj(X, x, y; correspondence = rot_correspondence);
rot_localfun(xx, yy, block; kwargs...) = iterate_local_alignment(xx, yy, block; correspondence = rot_correspondence, tformfun=LinearMap, kwargs...);
trl_localfun(xx, yy, block; kwargs...) = iterate_local_alignment(xx, yy, block; correspondence = correspondence, tformfun=Translation, kwargs...);
return tiv_branchbound(x, y, tivx, tivy; rot_boundsfun=rot_boundsfun, boundsfun=boundsfun, localfun=localfun, kwargs...)
end
function tiv_goih_align(x::AbstractPointSet, y::AbstractPointSet, cx=Inf, cy=Inf; kwargs...)
boundsfun(xx::AbstractSinglePointSet, yy::AbstractSinglePointSet, sr::SearchRegion) = squared_dist_bounds(xx,yy,sr; correspondence = hungarian_assignment, distance_bound_fun = tight_distance_bounds)
objfun(X, x, y) = distanceobj(X, x, y; correspondence = hungarian_assignment);
rot_localfun(xx, yy, block; kwargs...) = iterate_local_alignment(xx, yy, block; correspondence = hungarian_assignment, tformfun=LinearMap, kwargs...);
trl_localfun(xx, yy, block; kwargs...) = iterate_local_alignment(xx, yy, block; correspondence = hungarian_assignment, tformfun=Translation, kwargs...);
return tiv_branchbound(x, y, tivpointset(x,cx), tivpointset(y,cy); boundsfun=squared_dist_bounds, localfun=local_iterative_hungarian, rot_localfun=rot_localfun, trl_localfun=rot_localfun, kwargs...)
end
| GaussianMixtureAlignment | https://github.com/tmcgrath325/GaussianMixtureAlignment.jl.git |
|
[
"MIT"
] | 0.2.2 | c81897832613bbcf7cc19639fd3a1a0a154abe1b | code | 1906 | loose_distance_bounds(x::AbstractPoint, y::AbstractPoint, sr::Number, st::Number) = loose_distance_bounds(x.coords, y.coords, sr, st)
tight_distance_bounds(x::AbstractPoint, y::AbstractPoint, sr::Number, st::Number) = tight_distance_bounds(x.coords, y.coords, sr, st)
function squared_dist_bounds(x::AbstractSinglePointSet, y::AbstractSinglePointSet, σᵣ::Number, σₜ::Number;
distance_bound_fun::Union{typeof(tight_distance_bounds),typeof(loose_distance_bounds)} = loose_distance_bounds,
correspondence = hungarian_assignment)
matches = correspondence(x.coords, y.coords)
# sum bounds for each pair of points
lb = 0.
ub = 0.
for (i,j) in matches
(matchlb, matchub) = distance_bound_fun(x[i], y[j], σᵣ, σₜ)
lb += matchlb^2
ub += matchub^2
end
return lb, ub # , matches
end
function squared_dist_bounds(x::AbstractMultiPointSet{N,T,K}, y::AbstractMultiPointSet{N,S,L}, σᵣ, σₜ; kwargs...) where {N,T,K,S,L}
# sum bounds for each matched pair of pointsets
lb = 0.
ub = 0.
matches = Dict{promote_type(K,L), Vector{Tuple{Int,Int}}}()
for key in keys(x.pointsets) ∩ keys(y.pointsets)
keylb, keyub, keymatches = squared_dist_bounds(x.pointsets[key], y.pointsets[key], σᵣ, σₜ; kwargs...)
lb += keylb
ub += keyub
push!(matches, keymatches)
end
return lb, ub # , matches
end
squared_dist_bounds(x::AbstractPointSet, y::AbstractPointSet, R::RotationVec, T::SVector{3}, σᵣ::Number, σₜ::Number; kwargs...
) = squared_dist_bounds(R*x, y-T, σᵣ, σₜ; kwargs...)
squared_dist_bounds(x::AbstractPointSet, y::AbstractPointSet, ur::UncertaintyRegion; kwargs...
) = squared_dist_bounds(x, y, ur.R, ur.T, ur.σᵣ, ur.σₜ; kwargs...)
squared_dist_bounds(x::AbstractPointSet, y::AbstractPointSet, sr::SearchRegion; kwargs...
) = squared_dist_bounds(x, y, UncertaintyRegion(sr); kwargs...)
| GaussianMixtureAlignment | https://github.com/tmcgrath325/GaussianMixtureAlignment.jl.git |
|
[
"MIT"
] | 0.2.2 | c81897832613bbcf7cc19639fd3a1a0a154abe1b | code | 3083 | # closest points using a particular metric, using a K-D tree implemented by NearestNeighbors
function closest_points(P, kdtree::KDTree); # kdtree=KDTree(Q, Euclidean())
nearestidx, dists = nn(kdtree, P)
return [(i,nearestidx[i]) for i=1:size(P,2)]
end
closest_points(P::AbstractMatrix, Q::AbstractMatrix) = closest_points(P, KDTree(Q, Euclidean()))
closest_points(P::AbstractSinglePointSet, Q::AbstractSinglePointSet) = closest_points(P.coords, Q.coords)
# function closest_points(P::AbstractMatrix, Q::AbstractMatrix)
# matches = Vector{Tuple{Int,Int}}(undef, size(P,2))
# for i=1:size(P,2)
# lowestDistSq = Inf
# bestIdx = 0
# for j=1:size(Q,2)
# distSq = sum(abs2, P[:,i] .- Q[:,j])
# if distSq < lowestDistSq
# bestIdx = j
# lowestDistSq = distSq
# end
# end
# matches[i] = (i,bestIdx)
# end
# return matches
# end
# Hungarian algorithm for assignment
function hungarian_assignment(P::AbstractMatrix{S}, Q::AbstractMatrix{T}, metric=SqEuclidean()) where {S,T}
# weights = pairwise(metric, P, Q; dims=2)
numtype = promote_type(S,T)
weights = Matrix{numtype}(undef, size(P,2), size(Q,2))
for i=1:size(P,2)
for j=1:size(Q,2)
weights[i,j] = sum(abs2, P[:,i] .- Q[:,j])
end
end
assignment, cost = hungarian(weights)
matches = Tuple{Int,Int}[]
for (i,a) in enumerate(assignment)
if a !== 0
push!(matches, (i,a))
end
end
return matches
end
hungarian_assignment(P::AbstractSinglePointSet, Q::AbstractSinglePointSet, metric=SqEuclidean()) = hungarian_assignment(P.coords, Q.coords, metric)
function hungarian_assignment(P::AbstractMultiPointSet{N,T,K}, Q::AbstractMultiPointSet{N,T,K}, metric=SqEuclidean()) where {N,T,K}
matchesdict = Dict{K, Vector{Tuple{Int,Int}}}();
for (key, ps) in P.pointsets
push!(matchesdict, key => hungarian_assignment(ps, Q.pointsets[key], metric));
end
return matchesdict
end
# generate matrices for Kabsch from a list of correspondences
matched_points(P::AbstractMatrix, Q::AbstractMatrix, matches) = (hcat([P[:,i] for (i,j) in matches]...), hcat([Q[:,j] for (i,j) in matches]...))
matched_points(P, Q, matches) = ([P[i] for (i,j) in matches], [Q[j] for (i,j) in matches])
matched_points(P, Q; correspondence=closest_points, kwargs...) = matched_points(P, Q, correspondence(P,Q; kwargs...))
matched_points(P::AbstractSinglePointSet, Q::AbstractSinglePointSet, args...; kwargs...) = matched_points(P.coords, Q.coords, args...; kwargs...)
function matched_points(P::AbstractMultiPointSet{N,T,K}, Q::AbstractMultiPointSet{N,T,K}, matchesdict::Dict{K, Vector{Tuple{Int,Int}}}) where {N,T,K}
matchedP = Array{T}(undef, N, 0)
matchedQ = Array{T}(undef, N, 0)
for (key, matches) in matchesdict
(mp, mq) = matched_points(P.pointsets[key], Q.pointsets[key], matches)
matchedP = hcat(matchedP, mp)
matchedQ = hcat(matchedQ, mq)
end
return matchedP, matchedQ
end
| GaussianMixtureAlignment | https://github.com/tmcgrath325/GaussianMixtureAlignment.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.