licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.2.5 | c0aa4b519104fecca9bc146c9843393f7cbd3c99 | code | 6234 | using AbstractNumbers, SpecialFunctions
using Test, Aqua
Aqua.test_ambiguities([AbstractNumbers, Base, Core]; exclude=[(==)])
Aqua.test_unbound_args(AbstractNumbers)
Aqua.test_undefined_exports(AbstractNumbers)
Aqua.test_project_extras(AbstractNumbers)
Aqua.test_stale_deps(AbstractNumbers)
Aqua.test_deps_compat(AbstractNumbers)
Aqua.test_project_toml_formatting(AbstractNumbers)
struct MyNumber{T} <: AbstractNumbers.AbstractNumber{T}
number::T
end
const SF = SpecialFunctions
AbstractNumbers.number(x::MyNumber) = x.number
AbstractNumbers.basetype(::Type{<: MyNumber}) = MyNumber
struct MyReal{T<:Real} <: AbstractNumbers.AbstractReal{T}
real::T
end
const SF = SpecialFunctions
AbstractNumbers.number(x::MyReal) = x.real
AbstractNumbers.basetype(::Type{<: MyReal}) = MyReal
x = MyNumber(1)
@test convert(Number, x) === x
x = MyReal(1)
@test convert(Real, x) === x
all_funcs = (
:~, :conj, :abs, :sin, :cos, :tan, :sinh, :cosh, :tanh, :asin, :acos, :atan,
:asinh, :acosh, :atanh, :sec, :csc, :cot, :asec, :acsc, :acot, :sech, :csch,
:coth, :asech, :acsch, :acoth, :sinc, :cosc, :cosd, :cotd, :cscd, :secd,
:sind, :tand, :acosd, :acotd, :acscd, :asecd, :asind, :atand, :rad2deg,
:deg2rad, :log, :log2, :log10, :log1p, :exponent, :exp, :exp2, :expm1,
:cbrt, :sqrt, :ceil, :floor, :trunc, :round, :significand,
:frexp, :ldexp, :modf, :real, :imag, :!, :identity,
:zero, :one, :<<, :>>, :abs2, :sign, :sinpi, :cospi, :exp10,
:iseven, :ispow2, :isfinite, :isinf, :isodd, :isinteger, :isreal,
:isnan, :isempty, :iszero, :transpose, :copysign, :flipsign, :signbit,
:+, :-, :*, :/, :\, :^, :(==), :(!=), :<, :(<=), :>, :(>=), :min, :max,
:div, :fld, :rem, :mod, :mod1, :cmp, :&, :|, :xor,
:clamp
)
special_funcs = (
:airyai,
:airyaiprime,
:airybi,
:airybiprime,
:airyaix,
:airyaiprimex,
:airybix,
:airybiprimex,
:besselh,
:besselhx,
:besseli,
:besselix,
:besselj,
:besselj0,
:besselj1,
:besseljx,
:besselk,
:besselkx,
:bessely,
:bessely0,
:bessely1,
:besselyx,
:dawson,
:erf,
:erfc,
:erfcinv,
:erfcx,
:erfi,
:erfinv,
:eta,
:digamma,
:invdigamma,
:polygamma,
:trigamma,
:hankelh1,
:hankelh1x,
:hankelh2,
:hankelh2x,
:zeta
)
nancompare(a, b) = a == b
function nancompare(a::Number, b::Number)
isnan(a) && isnan(b) && return true
a == b
end
function myrand(::Type{T}) where T <: Integer
x = rand(T(-10):T(10))
x == 0 ? T(1) : x
end
myrand(::Type{T}) where T <: Unsigned = rand(T(1):T(20))
myrand(::Type{T}) where T = rand(T)
@testset "AbstractNumbers" begin
for (mod, funcs) in ((Base, all_funcs),), f in funcs
println(f)
func = getfield(mod, f)
@testset "testing $f" begin
for i = 1:4
for T in (Float32, Float64, ComplexF64, Int, UInt)
args = ntuple(x-> myrand(T), i)
if i == 3 && func === cmp
continue
end
if T <: Complex
(func in (
cotd, cosd, cscd, secd, sind, tand, acosd, acotd, acscd,
asecd, asind, atand, rem, modf, (<), (>), (<=), (>=),
min, max, cmp, &, |, xor, clamp, div, fld
)) && continue
end
if T <: AbstractFloat
(func in (
&, |, xor
)) && continue
end
# i have no idea what these functions are - seem to throw exceptions when not careful with input values
# also, they're not usable since they're deprecated, even if you get them from specialfunctions
if applicable(func, args...)
res = try
a = func(args...)
b = func(MyNumber.(args)...)
@testset "$T $args" begin
@test nancompare(a, b)
end
if !(T <: Complex)
c = func(MyReal.(args)...)
@testset "$T $args" begin
@test nancompare(a, c)
end
end
catch e
isa(e, DomainError) || rethrow(e)
end
end
end
end
end
end
# isapprox
for T in (Float32, Float64, ComplexF64, Int, UInt)
T = Float64
args = ntuple(x-> myrand(T), 2)
a = β(args...)
b = β(MyNumber.(args)...)
a == b
if !(T <: Complex)
b = β(MyReal.(args)...)
end
end
@testset "bessel functions" begin
bessel_funcs = [(SF.bessely0, SF.bessely1, SF.bessely), (SF.besselj0, SF.besselj1, SF.besselj)]
@testset "$z, $o" for (z, o, f) in bessel_funcs
@test z(Float32(2.0)) β z(Float64(2.0))
@test o(Float32(2.0)) β o(Float64(2.0))
@test z(MyNumber(2)) β z(MyNumber(2.0))
@test o(MyNumber(2)) β o(MyNumber(2.0))
@test z(MyNumber(2.0 + im)) β f(MyNumber(0), MyNumber(2.0 + im))
@test o(MyNumber(2.0 + im)) β f(MyNumber(1), MyNumber(2.0 + im))
@test z(MyReal(2)) β z(MyReal(2.0))
@test o(MyReal(2)) β o(MyReal(2.0))
end
@testset "besselj error throwing" begin
@test_throws MethodError SF.besselj(MyNumber(1.2), MyNumber(big(1.0)))
@test_throws MethodError SF.besselj(MyNumber(1), MyNumber(complex(big(1.0))))
@test_throws MethodError SF.besseljx(MyNumber(1), MyNumber(big(1.0)))
@test_throws MethodError SF.besseljx(MyNumber(1), MyNumber(complex(big(1.0))))
@test_throws MethodError SF.besselj(MyReal(1.2), MyReal(big(1.0)))
@test_throws MethodError SF.besseljx(MyReal(1), MyReal(big(1.0)))
end
end
end
nothing
| AbstractNumbers | https://github.com/SimonDanisch/AbstractNumbers.jl.git |
|
[
"MIT"
] | 0.2.5 | c0aa4b519104fecca9bc146c9843393f7cbd3c99 | docs | 1778 | # AbstractNumbers
[](https://travis-ci.org/SimonDanisch/AbstractNumbers.jl)
[](http://codecov.io/github/SimonDanisch/AbstractNumbers.jl?branch=master)
There are a lot of functions one needs to define on a custom number type to make it work just like a julia Base number.
Namely, around 160 functions, with quite a few methods.
With AbstractNumbers, this is all you need to start the life of a new, wonderful Number type:
```Julia
using AbstractNumbers, SpecialFunctions
using Base.Test
struct MyNumber{T} <: AbstractNumbers.AbstractNumber{T}
number::T
end
Base.convert(::Type{Number}, x::MyNumber) = x.number
AbstractNumbers.basetype(::Type{<: MyNumber}) = MyNumber
```
Now, `MyNumber` will have all functions defined for it. :)
If you need some functions to behave differently, just overload those functions with your concrete type!
# Implementation
Right now, the overloads of the AbstractNumber types are generated with a script that prints out the expressions as string.
I purposefully decided against the usage of a macro for two reasons:
1) I got quickly annoyed by the stack traces and not being able to immediately see what's going on - which is much easier when having all functions written out
2) I need to dynamically extract some attributes from the functions before emitting methods for it. This needs some supervision and should just be done everytime Julia Base changes - so it shouldn't be part of a macro, hence I'm stuck with some kind of generator script anyways. Instead of mixing the macro approach with a generator approach, I just went full generator!
| AbstractNumbers | https://github.com/SimonDanisch/AbstractNumbers.jl.git |
|
[
"MIT"
] | 0.2.2 | 90607d4bc92a8bae044313417e0de8d04e4e8e20 | code | 504 | using Documenter, DeterminantalPointProcesses
makedocs(;
modules=[DeterminantalPointProcesses],
format=Documenter.Writers.HTMLWriter.HTML(;
assets=["assets/icon.ico"], analytics="UA-129106538-2"
),
sitename="DeterminantalPointProcesses",
authors="Theo Galy-Fajou, Maruan Al-Shedivat",
pages=["Home" => "index.md"],
)
deploydocs(;
deps=Deps.pip("mkdocs", "python-markdown-math"),
repo="github.com/theogf/DeterminantalPointProcesses.jl.git",
target="build",
)
| DeterminantalPointProcesses | https://github.com/theogf/DeterminantalPointProcesses.jl.git |
|
[
"MIT"
] | 0.2.2 | 90607d4bc92a8bae044313417e0de8d04e4e8e20 | code | 1188 | ######################################################################
# DeterminantalPointProcesses.jl
# Determinantal Point Processes in Julia
# http://github.com/theogf/DeterminantalPointProcesses.jl
# MIT Licensed
######################################################################
__precompile__(true)
module DeterminantalPointProcesses
using Distributed
using Distributions: Distributions, pdf, logpdf
using LinearAlgebra
using Random: Random, rand, bitrand, AbstractRNG, MersenneTwister, GLOBAL_RNG
using Requires
using SharedArrays
import Base: rand
export
# point process types and aliases
DeterminantalPointProcess,
DPP,
kDeterminantalPointProcess,
kDPP,
# mehtods
logpdf, # log probability mass function
pdf, # probability mass function
rand, # generate samples
randmcmc # generate samples using MCMC
function __init__()
@require KernelFunctions="ec8451be-7e33-11e9-00cf-bbf324bd1392" include("kernelcompat.jl")
end
### source files
# Types
include("types.jl")
# pdf and rand methods
include("prob.jl")
include("rand.jl")
# utilities
include("utils.jl")
end # module
| DeterminantalPointProcesses | https://github.com/theogf/DeterminantalPointProcesses.jl.git |
|
[
"MIT"
] | 0.2.2 | 90607d4bc92a8bae044313417e0de8d04e4e8e20 | code | 424 | using .KernelFunctions
function DeterminantalPointProcess(kernel::Kernel, X::AbstractVector; parallelize=false)
return DeterminantalPointProcess(kernelmatrix(kernel, X); parallelize=parallelize)
end
function DeterminantalPointProcess(kernel::Kernel, X::AbstractMatrix; obsdim=1, parallelize=false)
return DeterminantalPointProcess(kernel, KernelFunctions.vec_of_vecs(X; obsdim=obsdim); parallelize=parallelize)
end | DeterminantalPointProcesses | https://github.com/theogf/DeterminantalPointProcesses.jl.git |
|
[
"MIT"
] | 0.2.2 | 90607d4bc92a8bae044313417e0de8d04e4e8e20 | code | 995 | """
Compute the log probability of a sample `z` under the given DPP.
"""
function Distributions.logpdf(dpp::DeterminantalPointProcess, z::AbstractVector{<:Int})
L_z_eigvals = eigvals(dpp.L[z, z])
return sum(log.(L_z_eigvals)) - sum(log.(dpp.Lfact.values .+ 1))
end
"""
Compute the log probability of a sample `z` under the given k-DPP.
"""
function Distributions.logpdf(kdpp::kDeterminantalPointProcess, z::AbstractArray{<:Int})
L_z_eigvals = eigvals(kdpp.dpp.L[z, z])
return sum(log.(L_z_eigvals)) .-
log(elem_symm_poly(kdpp.dpp.Lfact.values, kdpp.k)[end, end])
end
"""
Compute the probability of a sample `z` under the given DPP.
"""
function Distributions.pdf(dpp::DeterminantalPointProcess, z::AbstractArray{<:Int})
return exp(logpdf(dpp, z))
end
"""
Compute the probability of a sample `z` under the given k-DPP.
"""
function Distributions.pdf(kdpp::kDeterminantalPointProcess, z::AbstractArray{<:Int})
return exp(logpdf(kdpp, z))
end
| DeterminantalPointProcesses | https://github.com/theogf/DeterminantalPointProcesses.jl.git |
|
[
"MIT"
] | 0.2.2 | 90607d4bc92a8bae044313417e0de8d04e4e8e20 | code | 13465 | """
Sampling from DPP and k-DPP.
References:
-----------
[1] Kulesza, A., and B. Taskar. Determinantal point processes for machine
learning. arXiv preprint arXiv:1207.6083, 2012.
[2] Kang, B. Fast determinantal point process sampling with application to
clustering. NIPS, 2013.
"""
"""
_sample_mask!(rng, M, Ξ, i)
Sample a mask for an elementary DPP.
"""
function _sample_mask!(
rng::AbstractRNG, M::AbstractMatrix{Bool}, Ξ::AbstractArray{<:Real}, i::Int
)
for j in 1:length(Ξ)
M[j, i] = rand(rng) < (Ξ[j] / (Ξ[j] + 1))
end
return M
end
"""
_sample_k_mask!(rng, M, Ξ, E, k, i)
Sample a mask for an elementary k-DPP.
"""
function _sample_k_mask!(
rng::AbstractRNG,
M::AbstractMatrix{Bool},
Ξ::AbstractArray{<:Real},
E::AbstractMatrix{<:Real},
k::Int,
i::Int,
)
j = length(Ξ)
remaining = k
# iteratively sample a k-mask
while remaining > 0
# compute marginal of j given that we choose remaining values from 1:j
if j == remaining
marg = 1
else
marg = Ξ[j] * E[remaining, j] / E[remaining + 1, j + 1]
end
# sample marginal
if rand(rng) <= marg
M[j, i] = true
remaining -= 1
end
j -= 1
end
return M
end
"""
_sample_from_elementary(rng, V, M, i)
Exact sampling from an elementary DPP. The algorithm based on [1].
"""
function _sample_from_elementary(
rng::AbstractRNG, V::AbstractMatrix{T}, M::AbstractMatrix{Bool}, i::Int
) where {T<:Real}
# select the elementary DPP
V_mask = M[:, i]
# edge case: empty sample
if isempty(V_mask) # !any(V_mask)
return Int[]
end
# select the kernel of the elementary DPP
L = V[:, V_mask]
Y = Int[]
mask = ones(Bool, size(L, 2))
prob = zeros(T, size(L, 1))
for i in 1:size(L, 2)
# compute probabilities
fill!(prob, zero(T))
for c in 1:size(L, 2)
!mask[c] && continue
for r in 1:size(L, 1)
prob[r] += L[r, c] .^ 2
end
end
prob ./= sum(prob)
# sample a point in the original space
h = findfirst(rand(rng) .<= cumsum(prob))
push!(Y, h)
# select and mask-out an element
j = get_first_nz_idx(L[h, :], mask)
mask[j] = false
if any(mask)
# Subtract scaled Lj from other columns so that their
# projections on e_s[i] turns into 0. This operation
# preserves the rank of L_{-j}.
for c in 1:size(L, 2)
!mask[c] && continue
for r in 1:size(L, 1)
L[r, c] -= L[r, j] * L[h, c] / L[h, j]
end
end
# Gram-Schmidt orthogonalization
L[:, mask] = Matrix(qr(L[:, mask]).Q)
end
end
return sort(Y)
end
Random.rand(pp::PointProcess, n::Int) = rand(GLOBAL_RNG, pp, n)
Random.rand(pp::PointProcess) = rand(GLOBAL_RNG, pp)
Random.rand(rng::AbstractRNG, pp::PointProcess) = first(rand(rng, pp, 1))
"""
rand([rng::AbstractRNG], dpp::DeterminantalPointProcess)::Vector{Int}
rand([rng::AbstractRNG], dpp::DeterminantalPointProcess, n::Int)::Vector{Vector{Int}}
Exact sampling from a DPP [1].
Returns a vector of indices with respect to the `L` matrix passed to the `DeterminantalPointProcess`.
The length of each vector can vary from 0 to the `size(L,1)`
"""
function Random.rand(
rng::AbstractRNG, dpp::DeterminantalPointProcess{T,true}, N::Int) where {T<:Real}
Ξ = SharedArray{T}(dpp.Lfact.values)
V = SharedMatrix{T}(dpp.Lfact.vectors)
M = SharedMatrix{Bool}(zeros(Bool, dpp.size, N))
# step I: sample masks for elementary DPPs
pmap(
(i, seed) -> _sample_mask!(MersenneTwister(seed), M, Ξ, i),
1:N,
abs.(rand(rng, Int, N)),
)
# step II: iteratively sample from a mixture of elementary DPPs
return pmap(
(i, seed) -> _sample_from_elementary(MersenneTwister(seed), V, M, i),
1:N,
abs.(rand(rng, Int, N)),
)
end
function Random.rand(
rng::AbstractRNG, dpp::DeterminantalPointProcess{T,false}, N::Int) where {T<:Real}
Ξ = Array{T}(dpp.Lfact.values)
V = Matrix{T}(dpp.Lfact.vectors)
M = Matrix{Bool}(zeros(Bool, dpp.size, N))
# step I: sample masks for elementary DPPs
map(
(i, seed) -> _sample_mask!(MersenneTwister(seed), M, Ξ, i),
1:N,
abs.(rand(rng, Int, N)),
)
# step II: iteratively sample from a mixture of elementary DPPs
return map(
(i, seed) -> _sample_from_elementary(MersenneTwister(seed), V, M, i),
1:N,
abs.(rand(rng, Int, N)),
)
end
"""
rand([rng::AbstractRNG], kdpp::kDeterminantalPointProcess)::Vector{Int}
rand([rng::AbstractRNG], kdpp::kDeterminantalPointProcess, n::Int)::Vector{Vector{Int}}
Exact sampling from a k-DPP [1].
Returns a vector of indices with respect to the `L` matrix passed to the `kDeterminantalPointProcess`
Each vector is of size `k`.
"""
function Random.rand(rng::AbstractRNG, kdpp::kDPP{T,true}, N::Int) where {T<:Real}
dpp = kdpp.dpp
Ξ = SharedArray{T}(dpp.Lfact.values)
V = SharedMatrix{T}(dpp.Lfact.vectors)
M = SharedMatrix{Bool}(zeros(Bool, dpp.size, N))
# compute elementary symmetric polynomials
E = SharedMatrix{T}(elem_symm_poly(dpp.Lfact.values, kdpp.k))
# step I: sample masks for elementary DPPs
pmap(
(i, seed) -> _sample_k_mask!(MersenneTwister(seed), M, Ξ, E, kdpp.k, i),
1:N,
abs.(rand(rng, Int, N)),
)
# step II: iteratively sample from a mixture of elementary DPPs
return pmap(
(i, seed) -> _sample_from_elementary(MersenneTwister(seed), V, M, i),
1:N,
abs.(rand(rng, Int, N)),
)
end
function Random.rand(rng::AbstractRNG, kdpp::kDPP{T,false}, N::Int) where {T<:Real}
dpp = kdpp.dpp
Ξ = Array{T}(dpp.Lfact.values)
V = Matrix{T}(dpp.Lfact.vectors)
M = Matrix{Bool}(zeros(Bool, dpp.size, N))
# compute elementary symmetric polynomials
E = Matrix{T}(elem_symm_poly(dpp.Lfact.values, kdpp.k))
# step I: sample masks for elementary DPPs
map(
(i, seed) -> _sample_k_mask!(MersenneTwister(seed), M, Ξ, E, kdpp.k, i),
1:N,
abs.(rand(rng, Int, N)),
)
# step II: iteratively sample from a mixture of elementary DPPs
return map(
(i, seed) -> _sample_from_elementary(MersenneTwister(seed), V, M, i),
1:N,
abs.(rand(rng, Int, N)),
)
end
"""
Perform one MCMC accept-reject transition for DPP.
"""
function _do_mcmc_step!(rng::AbstractRNG, dpp::DeterminantalPointProcess, state::MCMCState)
# propose an element to swap
u = rand(rng, 1:(dpp.size))
insert = !state[1][u]
# attempt to make a transition
if insert
p = _comp_accept_prob(dpp, state, u, insert)
rand(rng) < p && _update_mcmc_state!(dpp, state, u, insert)
else # delete
new_state = _update_mcmc_state(dpp, state, u, insert)
p = _comp_accept_prob(dpp, new_state, u, insert)
if rand(rng) < p
state[1] .= new_state[1]
state[2] .= new_state[2]
end
end
end
"""
Perform one MCMC accept-reject transition for k-DPP.
"""
function _do_mcmc_k_step!(rng::AbstractRNG, kdpp::kDPP, state::MCMCState)
z, L_z_inv = state
# propose the elements to swap
u, v = rand(rng, findall(z)), rand(rng, findall(z .== true))
# copy the state and delete the u element
new_state = _update_mcmc_state(kdpp.dpp, state, u, false)
# attempt to make a transition
p = _comp_accept_prob(kdpp.dpp, new_state, u, v)
if rand(rng) < p
# insert the v element into the new state
_update_mcmc_state!(kdpp.dpp, new_state, v, true)
state[1] .= new_state[1]
state[2] .= new_state[2]
end
end
"""
Compute accept probability to insert / delete u from the state.
"""
function _comp_accept_prob(
dpp::DeterminantalPointProcess, state::MCMCState, u::Int, insert::Bool
)
z, L_z_inv = state
d_u = dpp.L[u, u]
if any(z)
b_u = dpp.L[z, u]
d_u -= dot(b_u, L_z_inv[z, z] * b_u)
end
return insert ? min(1.0, d_u) : min(1.0, 1.0 / d_u)
end
"""
Compute accept probability to swap u and v.
"""
function _comp_accept_prob(dpp::DeterminantalPointProcess, state::MCMCState, u::Int, v::Int)
z, L_z_inv = state
d_u, d_v = dpp.L[u, u], dpp.L[v, v]
if any(z)
b_u, b_v = dpp.L[z, u], dpp.L[z, v]
d_u -= dot(b_u, L_z_inv[z, z] * b_u)
d_v -= dot(b_v, L_z_inv[z, z] * b_v)
end
return min(1.0, d_v / d_u)
end
"""
Update the state after u is inserted / deleted.
"""
function _update_mcmc_state!(
dpp::DeterminantalPointProcess, state::MCMCState, u::Int, insert::Bool
)
z, L_z_inv = state
if insert
d_u = dpp.L[u, u]
if any(z)
b_u = dpp.L[z, u]
x_u = L_z_inv[z, z] * b_u
d_u -= dot(b_u, x_u)
L_z_inv[z, z] += (x_u * x_u') / d_u
L_z_inv[z, u] = L_z_inv[u, z] = -x_u / d_u
end
L_z_inv[u, u] = 1.0 / d_u
z[u] = true
else # delete
z[u] = false
e = L_z_inv[z, u]
f = L_z_inv[u, u]
L_z_inv[z, z] -= (e * e') / f
end
end
"""
Update the state after u is inserted / deleted.
"""
function _update_mcmc_state(
dpp::DeterminantalPointProcess, state::MCMCState, u::Int, insert::Bool
)
new_state = deepcopy(state)
_update_mcmc_state!(dpp, new_state, u, insert)
return new_state
end
"""
MCMC sampling from a DPP [2].
TODO: Add support for running MCMC in parallel, similar as rand.
Make sure parallelization produces unbiased and consistent samples.
"""
function randmcmc(
rng::AbstractRNG,
dpp::DeterminantalPointProcess{T},
N::Int;
init_state=nothing,
return_final_state::Bool=false,
mix_eps::Real=1e-1,
mixing_steps::Int=ceil(Int, dpp.size * log(dpp.size / mix_eps)),
steps_between_samples::Int=mixing_steps,
) where {T}
# initialize the Markov chain
state = init_state
if state === nothing
L_z_inv = Array{T}(undef, size(dpp.L))
z = bitrand(rng, dpp.size) # TODO: improve initialization (?)
if any(z)
L_z_inv[z, z] = pinv(dpp.L[z, z])
end
state = (z, L_z_inv)
end
# sanity check
@assert state isa MCMCState
# mix the Markov chain
for _ in 1:mixing_steps
_do_mcmc_step!(rng, dpp, state)
end
Y = []
for _ in 1:N
push!(Y, findall(state[1]))
for t in 1:steps_between_samples
_do_mcmc_step!(rng, dpp, state)
end
end
return return_final_state ? (Y, state) : Y
end
function randmcmc(
dpp::DeterminantalPointProcess,
N::Int;
init_state=nothing,
return_final_state::Bool=false,
mix_eps::Real=1e-1,
mixing_steps::Int=ceil(Int, dpp.size * log(dpp.size / mix_eps)),
steps_between_samples::Int=mixing_steps,
)
return randmcmc(
GLOBAL_RNG,
dpp,
N;
init_state=init_state,
return_final_state=return_final_state,
mix_eps=mix_eps,
mixing_steps=mixing_steps,
steps_between_samples=steps_between_samples,
)
end
"""
randmcmc([rng::AbstractRNG], kdpp::kDPP, N::Int; kwargs...)
MCMC sampling from a k-DPP [2].
## Arguments
- `rng` : Random number generator (by default Random.GLOBAL_RNG is used)
- `kdpp` : k-DeterminantalPointProcess
- `N` : Number of samples
## Keyword Arguments
- `init_state`
- `return_final_state`
- `mix_eps`
- `mixing_steps`
- `steps_between_samples`
TODO:
- Add support for running MCMC in parallel, similar as rand.
- Make sure parallelization produces unbiased and consistent samples.
"""
function randmcmc(
rng::AbstractRNG,
kdpp::kDPP{T},
N::Int;
init_state=nothing,
return_final_state::Bool=false,
mix_eps::Real=1e-1,
mixing_steps::Int=ceil(Int, kdpp.k * log(kdpp.k / mix_eps)),
steps_between_samples::Int=mixing_steps,
) where {T}
# initialize the Markov chain
state = init_state
if state === nothing
L_z_inv = Array{T}(undef, size(kdpp.dpp.L))
z = falses(kdpp.dpp.size) # TODO: improve initialization (?)
z[1:(kdpp.k)] .= true
if any(z)
L_z_inv[z, z] = pinv(kdpp.dpp.L[z, z])
end
state = (z, L_z_inv)
end
# sanity check
@assert typeof(state) == MCMCState
@assert sum(state[1]) == kdpp.k
# mix the Markov chain
for _ in 1:mixing_steps
_do_mcmc_k_step!(rng, kdpp, state)
end
Y = []
for _ in 1:N
push!(Y, findall(state[1]))
for t in 1:steps_between_samples
_do_mcmc_k_step!(rng, kdpp, state)
end
end
return return_final_state ? (Y, state) : Y
end
function randmcmc(
kdpp::kDPP,
N::Int;
init_state=nothing,
return_final_state::Bool=false,
mix_eps::Real=1e-1,
mixing_steps::Int=ceil(Int, kdpp.k * log(kdpp.k / mix_eps)),
steps_between_samples::Int=mixing_steps,
)
return randmcmc(
GLOBAL_RNG,
kdpp,
N;
init_state=init_state,
return_final_state=return_final_state,
mix_eps=mix_eps,
mixing_steps=mixing_steps,
steps_between_samples=steps_between_samples,
)
end
| DeterminantalPointProcesses | https://github.com/theogf/DeterminantalPointProcesses.jl.git |
|
[
"MIT"
] | 0.2.2 | 90607d4bc92a8bae044313417e0de8d04e4e8e20 | code | 2103 | # abstract types
abstract type PointProcess end;
"""
DeterminantalPointProcess(L::AbstractMatrix; parallelize=false)
DeterminantalPointProcess(L::Eigen; parallelize=false)
Given a symmetric matrix `L`, creates a `DeterminantalPointProcess` (DPP). One can also pass the eigen object directly
---
DeterminantalPointProcess(kernel::Kernel, X::AbstractVector; parallelize=false)
DeterminantalPointProcess(kernel::Kernel, X::AbstractMatrix; obsdim=1; parallelize=false)
Similar to the basic constructor, will first build the kernel matrix with `kernel` on observations `X`.
If your input is a `AbstractMatrix` you can pass the `obsdim` argument to indicate if rows or columns represent the samples (see docs of [KernelFunctions](https://juliagaussianprocesses.github.io/KernelFunctions.jl/stable/))
"""
struct DeterminantalPointProcess{T,parallelize,TL<:AbstractMatrix{T},Tfact} <: PointProcess
L::TL
Lfact::Tfact
size::Int
end
function DeterminantalPointProcess(L::AbstractMatrix; parallelize=false)
issymmetric(L) || error("Given matrix is not symmetric")
Lfact = eigen(L)
return DeterminantalPointProcess{eltype(L),parallelize,typeof(L),typeof(Lfact)}(L, Lfact, length(Lfact.values))
end
function DeterminantalPointProcess(Lfact::Eigen; parallelize=false)
L = Symmetric((Lfact.vectors .* Lfact.values') * Lfact.vectors')
return DeterminantalPointProcess{eltype(L),parallelize,typeof(L),typeof(Lfact)}(L, Lfact, length(Lfact.values))
end
"""
kDeterminantalPointProcess(k::Int, dpp::DeterminantalPointProcess)
Create a k-DPP where the size of the subsets is fixed to k.
You can also create a k-DPP by calling `dpp(k)`
"""
struct kDeterminantalPointProcess{T,parallelize,Tdpp<:DeterminantalPointProcess{T,parallelize}} <: PointProcess
k::Int
dpp::Tdpp
end
(dpp::DeterminantalPointProcess{T,p})(k::Int) where {T,p} = kDPP{T,p,typeof(dpp)}(k, dpp)
# aliases
const DPP = DeterminantalPointProcess
const kDPP = kDeterminantalPointProcess
# const KDPP = KroneckerDeterminantalPointProcess
const MCMCState = Tuple{BitArray{1},Matrix{Float64}}
| DeterminantalPointProcesses | https://github.com/theogf/DeterminantalPointProcesses.jl.git |
|
[
"MIT"
] | 0.2.2 | 90607d4bc92a8bae044313417e0de8d04e4e8e20 | code | 664 | # utility functions
function get_first_nz_idx(x::AbstractArray{T}, mask=nothing) where {T<:Real}
first_nz_idx = 0
for i in 1:length(x)
!isnothing(mask) && !mask[i] && continue
first_nz_idx = i
abs(x[i]) > eps() && break
end
return first_nz_idx
end
"""
Compute elementary symmetric polynomials for given Ξ and k.
"""
function elem_symm_poly(Ξ::AbstractArray{T}, k::Int) where {T<:Real}
N = length(Ξ)
poly = zeros(k + 1, N + 1)
poly[1, :] .= 1
for l in (1:k) .+ 1
for n in (1:N) .+ 1
poly[l, n] = poly[l, n - 1] + Ξ[n - 1] * poly[l - 1, n - 1]
end
end
return poly
end
| DeterminantalPointProcesses | https://github.com/theogf/DeterminantalPointProcesses.jl.git |
|
[
"MIT"
] | 0.2.2 | 90607d4bc92a8bae044313417e0de8d04e4e8e20 | code | 4672 | using DeterminantalPointProcesses
using Test
using Random, LinearAlgebra, IterTools
using KernelFunctions
import Combinatorics: combinations
rng = MersenneTwister(42)
n = 5
k = 2
A = rand(rng, n, n)
L = Symmetric(A * A')
kernel = SqExponentialKernel()
X = [rand(2) for _ in 1:3]
dpp = DPP(L)
@testset "Ensure correct pdf and logpdf computation" begin
@testset "Kernel API" begin
K = kernelmatrix(kernel, X)
@test DPP(K).L β DPP(kernel, X).L
@test DPP(K).L β DPP(kernel, reduce(hcat, X); obsdim=2).L
end
@testset "For DPP" begin
# compute the true distribution
true_pdf = Float64[]
true_logpdf = Float64[]
for z in subsets(1:n)
push!(true_pdf, pdf(dpp, z))
push!(true_logpdf, logpdf(dpp, z))
end
@test sum(true_pdf) β 1.0
@test all(true_pdf .<= 1.0)
@test all(true_logpdf .<= 0.0)
end
@testset "For k-DPP" begin
# compute the true distribution
true_pdf = Float64[]
true_logpdf = Float64[]
for z in combinations(1:n, k)
push!(true_pdf, pdf(dpp(k), z))
push!(true_logpdf, logpdf(dpp(k), z))
end
@test sum(true_pdf) β 1.0
@test all(true_pdf .<= 1.0)
@test all(true_logpdf .<= 0.0)
end
end
@testset "Ensure correct sampling from DPP" begin
# compute the true distribution
all_subsets = []
true_pdf = Float64[]
for (i, z) in enumerate(subsets(1:n))
push!(true_pdf, pdf(dpp, z))
push!(all_subsets, (z, i))
end
global subset_to_idx = Dict(all_subsets)
@testset "Exact sampling" begin
nb_samples = 1000
samples = rand(dpp, nb_samples)
# compute the empirical distribution
empirical_pdf = zeros(Float64, length(true_pdf))
for z in samples
empirical_pdf[subset_to_idx[z]] += 1
end
empirical_pdf ./= nb_samples
# ensure that the empirical pdf is close to the true pdf
total_variation = maximum(abs.(true_pdf - empirical_pdf))
@test total_variation β 0.0 atol = 1e-1
end
@testset "MCMC sampling" begin
nb_samples = 1000
samples, state = randmcmc(dpp, nb_samples; return_final_state=true)
# ensure that L_z_inv makes sense (i.e., noise did not accumulate)
z, L_z_inv = state
@test sum(L_z_inv[z, z] * dpp.L[z, z] - I) β 0 atol=1e-10
# compute the empirical distribution
empirical_pdf = zeros(Float64, length(true_pdf))
for z in samples
empirical_pdf[subset_to_idx[z]] += 1
end
empirical_pdf ./= nb_samples
# ensure that the empirical pdf is close to the true pdf
total_variation = maximum(abs.(true_pdf - empirical_pdf))
@test total_variation β 0.0 atol = 1e-1
end
end
@testset "Ensure correct sampling from k-DPP" begin
# compute the true distribution
all_k_subsets = []
true_pdf = Float64[]
for (i, z) in enumerate(combinations(1:n, k))
push!(true_pdf, pdf(dpp, z))
push!(all_k_subsets, (z, i))
end
true_pdf ./= sum(true_pdf)
k_subset_to_idx = Dict(all_k_subsets)
@testset "Exact sampling" begin
nb_samples = 10000
samples = rand(dpp(k), nb_samples)
# ensure that samples are of proper cardinality
@test all(map(length, samples) .== k)
# compute the empirical distribution
empirical_pdf = zeros(Float64, length(true_pdf))
for z in samples
empirical_pdf[k_subset_to_idx[z]] += 1
end
empirical_pdf ./= nb_samples
# ensure that the empirical pdf is close to the true pdf
total_variation = maximum(abs.(true_pdf - empirical_pdf))
@test total_variation β 0.0 atol = 1e-1
end
@testset "MCMC sampling" begin
nb_samples = 1000
samples, state = randmcmc(dpp(k), nb_samples; return_final_state=true)
# ensure that L_z_inv makes sense (i.e., noise did not accumulate)
z, L_z_inv = state
@test sum(L_z_inv[z, z] * dpp.L[z, z] - I) β 0 atol = 1e-1
# ensure that samples are of proper cardinality
@test all(map(length, samples) .== k)
# compute the empirical distribution
empirical_pdf = zeros(Float64, length(true_pdf))
for z in samples
empirical_pdf[k_subset_to_idx[z]] += 1
end
empirical_pdf ./= nb_samples
# ensure that the empirical pdf is close to the true pdf
total_variation = maximum(abs.(true_pdf - empirical_pdf))
@test_broken total_variation β 0.0
atol = 1e-1
end
end
| DeterminantalPointProcesses | https://github.com/theogf/DeterminantalPointProcesses.jl.git |
|
[
"MIT"
] | 0.2.2 | 90607d4bc92a8bae044313417e0de8d04e4e8e20 | docs | 2175 | # DeterminantalPointProcesses.jl
[](https://github.com/theogf/DeterminantalPointProcesses.jl/actions/workflows/CI.yml)
[](https://coveralls.io/github/theogf/DeterminantalPointProcesses.jl?branch=master)
!__Disclaimer__! This package is based on the work of [alshedivat/DeterminantalPointProcesses.jl](https://github.com/alshedivat/DeterminantalPointProcesses.jl) and aims at keeping this package alive.
An efficient implementation of Determinantal Point Processes (DPP) in Julia.
### Current features
- Exact sampling [1] from DPP and k-DPP (can be executed in parallel).
- MCMC sampling [2] from DPP and k-DPP (parallelization will be added).
- `pdf` and `logpdf` evaluation functions [1] for DPP and k-DPP.
### Planned features
- Exact sampling using dual representation [1].
- Better integration with MCMC frameworks in Julia (such as [Lora.jl] or [AbstractMCMC.jl]).
- Fitting DPP and k-DPP models to data [3, 4].
- Reduced rank DPP and k-DPP.
- Kronecker Determinantal Point Processes [5].
Any help on these topics would be highly appreciated
### Contributing
Contributions are sought (especially if you are an author of a related paper).
Bug reports are welcome.
## References
[1] Kulesza, A., and B. Taskar. Determinantal point processes for machine learning. [arXiv:1207.6083], 2012.
[2] Kang, B. Fast determinantal point process sampling with application to clustering. NIPS, 2013.
[3] Gillenwater, J., A. Kulesza, E. Fox, and B. Taskar. Expectation-Maximization for learning Determinantal Point Processes. NIPS, 2014.
[4] Mariet, Z., and S. Sra. Fixed-point algorithms for learning determinantal point processes. NIPS, 2015.
[5] Mariet, Z., and S. Sra. Kronecker Determinantal Point Processes. [arXiv:1605.08374], 2016.
[Lora.jl]: https://github.com/JuliaStats/Lora.jl
[AbstractMCMC.jl]: https://github.com/TuringLang/AbstractMCMC.jl
[arXiv:1207.6083]: https://arxiv.org/abs/1207.6083
[arXiv:1605.08374]: https://arxiv.org/abs/1605.08374
| DeterminantalPointProcesses | https://github.com/theogf/DeterminantalPointProcesses.jl.git |
|
[
"MIT"
] | 0.2.2 | 90607d4bc92a8bae044313417e0de8d04e4e8e20 | docs | 723 | DeterminantalPointProcesses.jl
A [Julia](http://julialang.org) package for Determinantal Point Processes
***
### Authors
- Maintainer : [ThΓ©o Galy-Fajou](https://theogf.github.io) PhD Student at Technical University of Berlin.
- Original author : [Maruan Al-Shedivat](https://www.cs.cmu.edu/~mshediva/)
### Installation
DeterminantalPointProcesses.jl is a [registered package](http://pkg.julialang.org) and is simply installed by running
```julia
pkg> add DeterminantalPointProcesses
```
Documentation is being worked on.
### License
AugmentedGaussianProcesses.jl is licensed under the MIT "Expat" license; see
[LICENSE](https://github.com/theogf/AugmentedGaussianProcesses.jl/LICENSE.md) for
the full license text.
| DeterminantalPointProcesses | https://github.com/theogf/DeterminantalPointProcesses.jl.git |
|
[
"MPL-2.0"
] | 0.2.0 | 2c205e3f7085ec434dafd7f990d659f2f32736f2 | code | 507 | using Documenter, FluxNLPModels
makedocs(
modules = [FluxNLPModels],
doctest = true,
# linkcheck = true,
strict = true,
format = Documenter.HTML(
assets = ["assets/style.css"],
prettyurls = get(ENV, "CI", nothing) == "true",
),
sitename = "FluxNLPModels.jl",
pages = Any["Home" => "index.md", "Tutorial" => "tutorial.md", "Reference" => "reference.md"],
)
deploydocs(
repo = "github.com/JuliaSmoothOptimizers/FluxNLPModels.jl.git",
push_preview = true,
devbranch = "main",
)
| FluxNLPModels | https://github.com/JuliaSmoothOptimizers/FluxNLPModels.jl.git |
|
[
"MPL-2.0"
] | 0.2.0 | 2c205e3f7085ec434dafd7f990d659f2f32736f2 | code | 2220 | using FluxNLPModels
using CUDA, Flux, NLPModels
using Flux.Data: DataLoader
using Flux: onehotbatch, onecold
using Flux.Losses: logitcrossentropy
using MLDatasets
using JSOSolvers
const loss = logitcrossentropy
# We discuss the process of loading datasets
# and defining minibatches for model training
# using the Flux framework.
# To download and load the MNIST dataset from MLDataset,
# follow these steps:
function getdata(; T = Float32) #T for types
ENV["DATADEPS_ALWAYS_ACCEPT"] = "true"
# Loading Dataset
xtrain, ytrain = MLDatasets.MNIST(Tx = T, split = :train)[:]
xtest, ytest = MLDatasets.MNIST(Tx = T, split = :test)[:]
# Reshape Data in order to flatten each image into a linear array
xtrain = Flux.flatten(xtrain)
xtest = Flux.flatten(xtest)
# One-hot-encode the labels
ytrain, ytest = onehotbatch(ytrain, 0:9), onehotbatch(ytest, 0:9)
return xtrain, ytrain, xtest, ytest
end
# train_data.features is a 28Γ28Γ60000 Array{Float32, 3} of the images.
# Flux needs a 4D array, with the 3rd dim for channels -- here trivial, grayscale.
# Combine the reshape needed with other pre-processing:
function create_batch(; batchsize = 128)
# Create DataLoaders (mini-batch iterators)
xtrain, ytrain, xtest, ytest = getdata()
xtrain = reshape(xtrain, 28, 28, 1, :)
xtest = reshape(xtest, 28, 28, 1, :)
train_loader = DataLoader((xtrain, ytrain), batchsize = batchsize, shuffle = true)
test_loader = DataLoader((xtest, ytest), batchsize = batchsize)
return train_loader, test_loader
end
train_loader, test_loader = create_batch()
## Construct Nural Network model
device = cpu # or gpu
model =
Chain(
Conv((5, 5), 1 => 6, relu),
MaxPool((2, 2)),
Conv((5, 5), 6 => 16, relu),
MaxPool((2, 2)),
Flux.flatten,
Dense(256 => 120, relu),
Dense(120 => 84, relu),
Dense(84 => 10),
) |> device
nlp = FluxNLPModel(model, train_loader, test_loader; loss_f = loss)
callback = (nlp, solver, stats) -> FluxNLPModels.minibatch_next_train!(nlp)
solver_stats = JSOSolvers.R2(nlp; callback = callback)
## Report on train and test
train_acc = FluxNLPModels.accuracy(nlp; data_loader = train_loader)
test_acc = FluxNLPModels.accuracy(nlp) #on the test data
| FluxNLPModels | https://github.com/JuliaSmoothOptimizers/FluxNLPModels.jl.git |
|
[
"MPL-2.0"
] | 0.2.0 | 2c205e3f7085ec434dafd7f990d659f2f32736f2 | code | 3148 | module FluxNLPModels
using Flux, NLPModels
using Flux: onehotbatch, onecold
export AbstractFluxNLPModel, FluxNLPModel
export reset_minibatch_train!, reset_minibatch_test!
export minibatch_next_train!, minibatch_next_test!
export accuracy, set_vars!, local_loss, update_type!
abstract type AbstractFluxNLPModel{T, S} <: AbstractNLPModel{T, S} end
"""
FluxNLPModel{T, S, C } <: AbstractNLPModel{T, S}
Data structure that makes the interfaces between neural networks defined with [Flux.jl](https://fluxml.ai/) and [NLPModels](https://github.com/JuliaSmoothOptimizers/NLPModels.jl).
A FluxNLPModel has fields
# Arguments
- `meta` and `counters` retain informations about the `FluxNLPModel`;
- `chain` is the chained structure representing the neural network;
- `data_train` is the complete data training set;
- `data_test` is the complete data test;
- `size_minibatch` parametrizes the size of an training and test minibatches
- `training_minibatch_iterator` is an iterator over an training minibatches;
- `test_minibatch_iterator` is an iterator over the test minibatches;
- `current_training_minibatch` is the training minibatch used to evaluate the neural network;
- `current_minibatch_test` is the current test minibatch, it is not used in practice;
- `w` is the vector of weights/variables;
"""
mutable struct FluxNLPModel{T, S, F <: Function} <: AbstractFluxNLPModel{T, S}
meta::NLPModelMeta{T, S}
chain
counters::Counters
loss_f::F
size_minibatch::Int
training_minibatch_iterator
test_minibatch_iterator
current_training_minibatch
current_test_minibatch
rebuild # this is used to create the rebuild of flat function
current_training_minibatch_status
current_test_minibatch_status
w
end
"""
FluxNLPModel(chain_ANN data_train=MLDatasets.MNIST.traindata(Float32), data_test=MLDatasets.MNIST.testdata(Float32); size_minibatch=100)
Build a `FluxNLPModel` from the neural network represented by `chain_ANN`.
`chain_ANN` is built using [Flux.jl](https://fluxml.ai/) for more details.
The other data required are: an iterator over the training dataset `data_train`, an iterator over the test dataset `data_test` and the size of the minibatch `size_minibatch`.
Suppose `(xtrn,ytrn) = Fluxnlp.data_train`
"""
function FluxNLPModel(
chain_ANN,
data_train,
data_test;
current_training_minibatch = [],
current_test_minibatch = [],
size_minibatch::Int = 100,
loss_f::F = Flux.mse, #Flux.crossentropy,
) where {F <: Function}
x0, rebuild = Flux.destructure(chain_ANN)
n = length(x0)
meta = NLPModelMeta(n, x0 = x0)
if (isempty(data_train) || isempty(data_test))
error("train data or test is empty")
end
if (isempty(current_training_minibatch) || isempty(current_test_minibatch))
current_training_minibatch = first(data_train)
current_test_minibatch = first(data_test)
end
return FluxNLPModel(
meta,
chain_ANN,
Counters(),
loss_f,
size_minibatch,
data_train,
data_test,
current_training_minibatch,
current_test_minibatch,
rebuild,
nothing,
nothing,
x0,
)
end
include("utils.jl")
include("FluxNLPModels_methods.jl")
end
| FluxNLPModels | https://github.com/JuliaSmoothOptimizers/FluxNLPModels.jl.git |
|
[
"MPL-2.0"
] | 0.2.0 | 2c205e3f7085ec434dafd7f990d659f2f32736f2 | code | 2913 | """
f = obj(nlp, w)
Evaluate the objective function f(w) of the non-linear programming (NLP) problem at the point w.
If the precision of w and the precision expected by the nlp are different, ensure that the type of nlp.w matches the precision required by w.
# Arguments
- `nlp::AbstractFluxNLPModel{T, S}`: the FluxNLPModel data struct;
- `w::AbstractVector{V}`: is the vector of weights/variables. The use of `V` allows for flexibility in specifying different precision types for weights and models.
# Output
- `f_w`: the new objective function.
"""
function NLPModels.obj(nlp::AbstractFluxNLPModel{T, S}, w::AbstractVector{V}) where {T, S, V}
x, y = nlp.current_training_minibatch
eltype(nlp.w) == V || update_type!(nlp, w) #Check if the type has changed
if eltype(x) != V
x = V.(x)
end
set_vars!(nlp, w)
increment!(nlp, :neval_obj)
return nlp.loss_f(nlp.chain(x), y)
end
"""
g = grad!(nlp, w, g)
Evaluate `βf(w)`, the gradient of the objective function at `w` in place.
# Arguments
- `nlp::AbstractFluxNLPModel{T, S}`: the FluxNLPModel data struct;
- `w::AbstractVector{V}`: is the vector of weights/variables. The use of `V` allows for flexibility in specifying different precision types for weights and models.
- `g::AbstractVector{}`: the gradient vector.
# Output
- `g`: the gradient at point `w`.
"""
function NLPModels.grad!(
nlp::AbstractFluxNLPModel{T, S},
w::AbstractVector{V},
g::AbstractVector{U},
) where {T, S, V, U}
@lencheck nlp.meta.nvar w g
x, y = nlp.current_training_minibatch
if (eltype(nlp.w) != V) # we check if the types are the same,
update_type!(nlp, w)
g = V.(g)
if eltype(x) != V
x = V.(x)
end
end
increment!(nlp, :neval_grad)
g .= gradient(w_g -> local_loss(nlp, x, y, w_g), w)[1]
return g
end
"""
objgrad!(nlp, w, g)
Evaluate both `f(w)`, the objective function of `nlp` at `w`, and `βf(w)`, the gradient of the objective function at `w` in place.
# Arguments
- `nlp::AbstractFluxNLPModel{T, S}`: the FluxNLPModel data struct;
- `w::AbstractVector{V}`: is the vector of weights/variables. The use of `V` allows for flexibility in specifying different precision types for weights and models.
- `g::AbstractVector{V}`: the gradient vector.
# Output
- `f_w`, `g`: the new objective function, and the gradient at point w.
"""
function NLPModels.objgrad!(
nlp::AbstractFluxNLPModel{T, S},
w::AbstractVector{V},
g::AbstractVector{U},
) where {T, S, V, U}
@lencheck nlp.meta.nvar w g
x, y = nlp.current_training_minibatch
if (eltype(nlp.w) != V) # we check if the types are the same,
update_type!(nlp, w)
g = V.(g)
if eltype(x) != V
x = V.(x)
end
end
increment!(nlp, :neval_obj)
increment!(nlp, :neval_grad)
set_vars!(nlp, w)
f_w = nlp.loss_f(nlp.chain(x), y)
g .= gradient(w_g -> local_loss(nlp, x, y, w_g), w)[1]
return f_w, g
end
| FluxNLPModels | https://github.com/JuliaSmoothOptimizers/FluxNLPModels.jl.git |
|
[
"MPL-2.0"
] | 0.2.0 | 2c205e3f7085ec434dafd7f990d659f2f32736f2 | code | 4329 | """
update_type!(nlp::AbstractFluxNLPModel{T, S}, w::AbstractVector{V}) where {T, V, S}
Sets the variables and rebuild the chain to a specific type defined by weights.
"""
function update_type!(nlp::AbstractFluxNLPModel{T, S}, w::AbstractVector{V}) where {T, V, S}
nlp.chain = update_type(nlp.chain, V)
nlp.w, nlp.rebuild = Flux.destructure(nlp.chain)
end
# Define a separate method for updating the type of the chain
function update_type(chain::Chain, ::Type{Float16})
return f16(chain)
end
function update_type(chain::Chain, ::Type{Float32})
return f32(chain)
end
function update_type(chain::Chain, ::Type{Float64})
return f64(chain)
end
# Throw an error for unsupported types
function update_type(chain::Chain, ::Type)
error("The package only supports Float16, Float32, and Float64")
end
"""
set_vars!(model::AbstractFluxNLPModel{T,S}, new_w::AbstractVector{T}) where {T<:Number, S}
Sets the vaiables and rebuild the chain
"""
function set_vars!(
nlp::AbstractFluxNLPModel{T, S},
new_w::AbstractVector{V},
) where {T <: Number, S, V}
nlp.w .= new_w
nlp.chain = nlp.rebuild(nlp.w)
end
function local_loss(nlp::AbstractFluxNLPModel{T, S}, x, y, w::AbstractVector{V}) where {T, S, V}
# increment!(nlp, :neval_obj) #TODO not sure
nlp.chain = nlp.rebuild(w)
return nlp.loss_f(nlp.chain(x), y)
end
"""
accuracy(nlp::AbstractFluxNLPModel)
Compute the accuracy of the network `nlp.chain` on the entire test dataset. data_loader can be overwritten to include other data,
device is set to cpu
"""
function accuracy(
nlp::AbstractFluxNLPModel{T, S};
model = nlp.chain,
data_loader = nlp.test_minibatch_iterator,
device = cpu,
myT = Float32,
) where {T, S}
acc = myT(0)
num = myT(0)
for (x, y) in data_loader
x, y = device(x), device(y)
yΜ = model(x)
acc += sum(onecold(yΜ) .== onecold(y)) ## Decode the output of the model
num += size(x)[end]
end
return acc / num #TODO make sure num is not zero
end
"""
reset_minibatch_train!(nlp::AbstractFluxNLPModel)
If a data_loader (an iterator object is passed to FluxNLPModel) then
Select the first training minibatch for `nlp`.
"""
function reset_minibatch_train!(nlp::AbstractFluxNLPModel)
nlp.current_training_minibatch = first(nlp.training_minibatch_iterator)
nlp.current_training_minibatch_status = nothing
end
"""
minibatch_next_train!(nlp::AbstractFluxNLPModel)
Selects the next minibatch from `nlp.training_minibatch_iterator`.
Returns the new current status of the iterator `nlp.current_training_minibatch`.
`minibatch_next_train!` aims to be used in a loop or method call.
if return false, it means that it reach the end of the mini-batch
"""
function minibatch_next_train!(nlp::AbstractFluxNLPModel; device = cpu)
iter = nlp.training_minibatch_iterator
if nlp.current_training_minibatch_status === nothing
next = iterate(iter)
else
next = iterate(iter, nlp.current_training_minibatch_status)
end
if next === nothing #end of the loop
reset_minibatch_train!(nlp)
return false
end
(item, nlp.current_training_minibatch_status) = next
nlp.current_training_minibatch = device(item)
return true
end
"""
reset_minibatch_test!(nlp::AbstractFluxNLPModel)
If a data_loader (an iterator object is passed to FluxNLPModel) then
Select the first test minibatch for `nlp`.
"""
function reset_minibatch_test!(nlp::AbstractFluxNLPModel)
nlp.current_test_minibatch = first(nlp.test_minibatch_iterator)
nlp.current_test_minibatch_status = nothing
end
"""
minibatch_next_test!(nlp::AbstractFluxNLPModel)
Selects the next minibatch from `nlp.test_minibatch_iterator`.
Returns the new current status of the iterator `nlp.current_test_minibatch`.
`minibatch_next_test!` aims to be used in a loop or method call.
if return false, it means that it reach the end of the mini-batch
"""
function minibatch_next_test!(nlp::AbstractFluxNLPModel; device = cpu)
iter = nlp.test_minibatch_iterator
if nlp.current_test_minibatch_status === nothing
next = iterate(iter)
else
next = iterate(iter, nlp.current_test_minibatch_status)
end
if next === nothing #end of the loop
reset_minibatch_test!(nlp)
return false
end
(item, nlp.current_test_minibatch_status) = next
nlp.current_test_minibatch = device(item)
return true
end
| FluxNLPModels | https://github.com/JuliaSmoothOptimizers/FluxNLPModels.jl.git |
|
[
"MPL-2.0"
] | 0.2.0 | 2c205e3f7085ec434dafd7f990d659f2f32736f2 | code | 5706 | using Test
using FluxNLPModels
using CUDA, Flux, NLPModels
using Flux.Data: DataLoader
using Flux: onehotbatch, onecold
using Flux.Losses: logitcrossentropy
using Base: @kwdef
using MLDatasets
using LinearAlgebra
# Helper functions
function getdata(args; T = Float32)
ENV["DATADEPS_ALWAYS_ACCEPT"] = "true" # download datasets without having to manually confirm the download
# Loading Dataset
xtrain, ytrain = MLDatasets.MNIST(Tx = T, split = :train)[:]
xtest, ytest = MLDatasets.MNIST(Tx = T, split = :test)[:]
# Reshape Data in order to flatten each image into a linear array
xtrain = Flux.flatten(xtrain)
xtest = Flux.flatten(xtest)
# One-hot-encode the labels
ytrain, ytest = onehotbatch(ytrain, 0:9), onehotbatch(ytest, 0:9)
# Create DataLoaders (mini-batch iterators)
train_loader = DataLoader((xtrain, ytrain), batchsize = args.batchsize, shuffle = true)
test_loader = DataLoader((xtest, ytest), batchsize = args.batchsize)
return train_loader, test_loader
end
function build_model(; imgsize = (28, 28, 1), nclasses = 10)
return Flux.Chain(Dense(prod(imgsize), 32, relu), Dense(32, nclasses), softmax)
end
@kwdef mutable struct Args
Ξ·::Float64 = 3e-4 # learning rate
batchsize::Int = 2 # batch size
epochs::Int = 10 # number of epochs
use_cuda::Bool = true # use gpu (if cuda available)
end
args = Args() # collect options in a struct for convenience
device = cpu
@testset "FluxNLPModels tests" begin
# Create test and train dataloaders
train_data, test_data = getdata(args)
# Construct model
DN = build_model() |> device
DNNLPModel = FluxNLPModel(DN, train_data, test_data)
old_w, rebuild = Flux.destructure(DN)
x1 = copy(DNNLPModel.w)
obj_x1 = obj(DNNLPModel, x1)
grad_x1 = NLPModels.grad(DNNLPModel, x1)
grad_x1_2 = similar(x1)
obj_x1_2, grad_x1_2 = NLPModels.objgrad!(DNNLPModel, x1, grad_x1_2)
@test DNNLPModel.w == old_w
@test obj_x1 == obj_x1_2
@test norm(grad_x1 - grad_x1_2) β 0.0
@test x1 == DNNLPModel.w
@test Flux.params(DNNLPModel.chain)[1][1] == x1[1]
@test Flux.params(DNNLPModel.chain)[1][2] == x1[2]
@test_throws Exception FluxNLPModel(DN, [], test_data) # if the train data is empty
@test_throws Exception FluxNLPModel(DN, train_data, []) # if the test data is empty
@test_throws Exception FluxNLPModel(DN, [], []) # if the both data is empty
# Testing if the value of the first batch was passed it
DNNLPModel_2 = FluxNLPModel(
DN,
train_data,
test_data,
current_training_minibatch = first(train_data),
current_test_minibatch = first(test_data),
)
#checking if we can call accuracy
train_acc = FluxNLPModels.accuracy(DNNLPModel_2; data_loader = train_data) # accuracy on train data
test_acc = FluxNLPModels.accuracy(DNNLPModel_2) # on the test data
@test train_acc >= 0.0
@test train_acc <= 1.0
end
@testset "minibatch tests" begin
# Create test and train dataloaders
train_data, test_data = getdata(args)
# Construct model
DN = build_model() |> device
nlp = FluxNLPModel(DN, train_data, test_data)
reset_minibatch_train!(nlp)
@test nlp.current_training_minibatch_status === nothing
buffer_minibatch = deepcopy(nlp.current_training_minibatch)
@test minibatch_next_train!(nlp) # should return true
@test minibatch_next_train!(nlp) # should return true
@test !isequal(nlp.current_training_minibatch, buffer_minibatch)
reset_minibatch_test!(nlp)
@test minibatch_next_test!(nlp) # should return true
@test minibatch_next_test!(nlp) # should return true
end
@testset "Multiple precision test" begin
# Create test and train dataloaders
train_data, test_data = getdata(args)
# Construct model in Float32
DN = build_model() |> device
nlp = FluxNLPModel(DN, train_data, test_data)
x1 = copy(nlp.w)
obj_x1 = obj(nlp, x1)
grad_x1 = NLPModels.grad(nlp, x1)
@test typeof(obj_x1) == Float32
@test eltype(grad_x1) == Float32
# change to Float16
x2 = Float16.(x1)
obj_x2 = obj(nlp, x2)
grad_x2 = NLPModels.grad(nlp, x2)
# T test grad again after changing the type, using grad! method
grad!(nlp, x2, grad_x2)
@test typeof(obj_x2) == Float16
@test eltype(grad_x2) == Float16
# change to Float64
x3 = Float64.(x1)
obj_x3 = obj(nlp, x3)
grad_x3 = NLPModels.grad(nlp, x3)
@test typeof(obj_x3) == Float64
@test eltype(grad_x3) == Float64
# change to Float16 with objgrad!
x3_2 = Float16.(x1)
grad_x3_2 = similar(x3_2)
obj_x3_2, grad_x3_2 = NLPModels.objgrad!(nlp, x3_2, grad_x3_2)
@test typeof(obj_x3_2) == Float16
@test eltype(grad_x3_2) == Float16
# change to Float64 with grad!
x3_3 = Float64.(x1)
grad_x3_3 = similar(x3_3)
grad_x3_3 = grad!(nlp, x3_3, grad_x3_3)
@test eltype(grad_x3_3) == Float64
# Construct model in Float16
train_data_f16, test_data_f16 = getdata(args, T = Float16)
DN_f16 = build_model() |> f16
nlp_f16 = FluxNLPModel(DN_f16, train_data_f16, test_data_f16)
x4 = copy(nlp_f16.w)
obj_x4 = obj(nlp_f16, x4)
grad_x4 = NLPModels.grad(nlp_f16, x4)
@test typeof(obj_x4) == Float16
@test eltype(grad_x4) == Float16
# change to Float32 from Float16
x5 = Float32.(x4)
obj_x5 = obj(nlp_f16, x5)
grad_x5 = NLPModels.grad(nlp_f16, x5)
@test typeof(obj_x5) == Float32
@test eltype(grad_x5) == Float32
# change to Float64 from Float16
x6 = Float64.(x4)
obj_x6 = obj(nlp_f16, x6)
grad_x6 = NLPModels.grad(nlp_f16, x6)
@test typeof(obj_x6) == Float64
@test eltype(grad_x6) == Float64
# change to Float32 from Float128
# expected to throw an error
# Note we do not support BigFloat in FluxNLPModels yet!
x7 = BigFloat.(x5)
@test_throws Exception obj(nlp_f16, x7)
end
| FluxNLPModels | https://github.com/JuliaSmoothOptimizers/FluxNLPModels.jl.git |
|
[
"MPL-2.0"
] | 0.2.0 | 2c205e3f7085ec434dafd7f990d659f2f32736f2 | docs | 1841 | # FluxNLPModels: An NLPModels Interface to Flux
[](https://JuliaSmoothOptimizers.github.io/FluxNLPModels.jl/stable)
[](https://JuliaSmoothOptimizers.github.io/FluxNLPModels.jl/dev)
[](https://github.com/JuliaSmoothOptimizers/FluxNLPModels.jl/actions)
[](https://codecov.io/gh/JuliaSmoothOptimizers/FluxNLPModels.jl)
This package serves as an NLPModels interface to the [Flux.jl](https://github.com/FluxML/Flux.jl) deep learning framework. It enables seamless integration between Flux's neural network architectures and NLPModels' optimization tools for non-linear programming (NLP) problems.
## Installation
To use FluxNLPModels, add the package in the Julia package manager:
```julia
pkg> add FluxNLPModels
```
## How to Use
Please refer to the [documentation](https://JuliaSmoothOptimizers.github.io/FluxNLPModels.jl/stable/) for detailed usage instructions and examples.
## How to Cite
If you use FluxNLPModels.jl in your work, please cite it using the provided format in [`CITATION.bib`](https://github.com/JuliaSmoothOptimizers/FluxNLPModels.jl/blob/main/CITATION.bib).
## Bug Reports and Discussions
If you encounter any bugs or have suggestions for improvement, please open an [issue](https://github.com/JuliaSmoothOptimizers/FluxNLPModels.jl/issues). For general questions or discussions related to this repository and the [JuliaSmoothOptimizers](https://github.com/JuliaSmoothOptimizers) organization, feel free to start a discussion [here](https://github.com/JuliaSmoothOptimizers/Organization/discussions).
| FluxNLPModels | https://github.com/JuliaSmoothOptimizers/FluxNLPModels.jl.git |
|
[
"MPL-2.0"
] | 0.2.0 | 2c205e3f7085ec434dafd7f990d659f2f32736f2 | docs | 1453 | # FluxNLPModels.jl
## Compatibility
Julia β₯ 1.9.
## How to install
This module can be installed with the following command:
```julia
pkg> add FluxNLPModels
```
## Synopsis
FluxNLPModels exposes neural network models as optimization problems conforming to the [NLPModels API](https://github.com/JuliaSmoothOptimizers/NLPModels.jl). FluxNLPModels is an interface between [Flux.jl](https://github.com/FluxML/Flux.jl)'s classification neural networks and [NLPModels.jl](https://github.com/JuliaSmoothOptimizers/NLPModels.jl).
A `FluxNLPModel` gives the user access to:
- The values of the neural network variables/weights `w`;
- The value of the objective/loss function `L(X, Y; w)` at `w` for a given minibatch `(X,Y)`;
- The gradient `βL(X, Y; w)` of the objective/loss function at `w` for a given minibatch `(X,Y)`.
In addition, it provides tools to:
- Switch the minibatch used to evaluate the neural network;
- Retrieve the current minibatch ;
- Measure the neural network's loss at the current `w`.
# Bug reports and discussions
If you encounter any bugs or have suggestions for improvement, please open an [issue](https://github.com/JuliaSmoothOptimizers/FluxNLPModels.jl/issues). For general questions or discussions related to this repository and the [JuliaSmoothOptimizers](https://github.com/JuliaSmoothOptimizers) organization, feel free to start a discussion [here](https://github.com/JuliaSmoothOptimizers/Organization/discussions).
| FluxNLPModels | https://github.com/JuliaSmoothOptimizers/FluxNLPModels.jl.git |
|
[
"MPL-2.0"
] | 0.2.0 | 2c205e3f7085ec434dafd7f990d659f2f32736f2 | docs | 166 | # Reference
β
## Contents
β
```@contents
Pages = ["reference.md"]
```
β
## Index
β
```@index
Pages = ["reference.md"]
```
β
```@autodocs
Modules = [FluxNLPModels]
``` | FluxNLPModels | https://github.com/JuliaSmoothOptimizers/FluxNLPModels.jl.git |
|
[
"MPL-2.0"
] | 0.2.0 | 2c205e3f7085ec434dafd7f990d659f2f32736f2 | docs | 4805 | # FluxNLPModels.jl Tutorial
## Setting up
This step-by-step example assumes prior knowledge of [Julia](https://julialang.org/) and [Flux.jl](https://github.com/FluxML/Flux.jl).
See the [Julia tutorial](https://julialang.org/learning/) and the [Flux.jl tutorial](https://fluxml.ai/Flux.jl/stable/models/quickstart/#man-quickstart) for more details.
We have aligned this tutorial to [MLP_MNIST](https://github.com/FluxML/model-zoo/blob/master/vision/mlp_mnist/mlp_mnist.jl) example and reused some of their functions.
### What we cover in this tutorial
We will cover the following:
- Define a Neural Network (NN) Model in Flux,
- Fully connected model
- Define or set the loss function
- Data loading
- MNIST
- Divide the data into train and test
- Define a method for calculating accuracy and loss
- Transfer the NN model to FluxNLPModel
- Using FluxNLPModels and access
- Gradient of current weight
- Objective (or loss) evaluated at current weights
### Packages needed
```@example FluxNLPModel
using FluxNLPModels
using Flux, NLPModels
using Flux.Data: DataLoader
using Flux: onehotbatch, onecold
using Flux.Losses: logitcrossentropy
using MLDatasets
using JSOSolvers
```
### Setting Neural Network (NN) Model
First, a NN model needs to be define in Flux.jl.
Our model is very simple: It consists of one "hidden layer" with 32 "neurons", each connected to every input pixel. Each neuron has a sigmoid nonlinearity and is connected to every "neuron" in the output layer. Finally, softmax produces probabilities, i.e., positive numbers that add up to 1.
One can create a method that returns the model. This method can encapsulate the specific architecture and parameters of the model, making it easier to reuse and manage. It provides a convenient way to define and initialize the model when needed.
```@example FluxNLPModel
function build_model(; imgsize = (28, 28, 1), nclasses = 10)
return Chain(Dense(prod(imgsize), 32, relu), Dense(32, nclasses))
end
```
### Loss function
We can define any loss function that we need, here we use Flux build-in logitcrossentropy function.
```@example FluxNLPModel
## Loss function
const loss = Flux.logitcrossentropy
```
### Load datasets and define minibatch
In this section, we will cover the process of loading datasets and defining minibatches for training your model using Flux. Loading and preprocessing data is an essential step in machine learning, as it allows you to train your model on real-world examples.
We will specifically focus on loading the MNIST dataset. We will divide the data into training and testing sets, ensuring that we have separate data for model training and evaluation.
Additionally, we will define minibatches, which are subsets of the dataset that are used during the training process. Minibatches enable efficient training by processing a small batch of examples at a time, instead of the entire dataset. This technique helps in managing memory resources and improving convergence speed.
```@example FluxNLPModel
function getdata(bs)
ENV["DATADEPS_ALWAYS_ACCEPT"] = "true"
# Loading Dataset
xtrain, ytrain = MLDatasets.MNIST(Tx = Float32, split = :train)[:]
xtest, ytest = MLDatasets.MNIST(Tx = Float32, split = :test)[:]
# Reshape Data in order to flatten each image into a linear array
xtrain = Flux.flatten(xtrain)
xtest = Flux.flatten(xtest)
# One-hot-encode the labels
ytrain, ytest = onehotbatch(ytrain, 0:9), onehotbatch(ytest, 0:9)
# Create DataLoaders (mini-batch iterators)
train_loader = DataLoader((xtrain, ytrain), batchsize = bs, shuffle = true)
test_loader = DataLoader((xtest, ytest), batchsize = bs)
return train_loader, test_loader
end
```
### Transfering to FluxNLPModels
```@example FluxNLPModel
device = cpu
train_loader, test_loader = getdata(128)
## Construct model
model = build_model() |> device
# now we set the model to FluxNLPModel
nlp = FluxNLPModel(model, train_loader, test_loader; loss_f = loss)
```
## Tools associated with a FluxNLPModel
The problem dimension `n`, where `w` β ββΏ:
```@example FluxNLPModel
n = nlp.meta.nvar
```
### Get the current network weights:
```@example FluxNLPModel
w = nlp.w
```
### Evaluate the loss function (i.e. the objective function) at `w`:
```@example FluxNLPModel
using NLPModels
NLPModels.obj(nlp, w)
```
The length of `w` must be `nlp.meta.nvar`.
### Evaluate the gradient at `w`:
```@example FluxNLPModel
g = similar(w)
NLPModels.grad!(nlp, w, g)
```
## Train a neural network with JSOSolvers.R2
```@example FluxNLPModel
max_time = 60. # run at most 1min
callback = (nlp,
solver,
stats) -> FluxNLPModels.minibatch_next_train!(nlp)
solver_stats = R2(nlp; callback, max_time)
test_accuracy = FluxNLPModels.accuracy(nlp) #check the accuracy
``` | FluxNLPModels | https://github.com/JuliaSmoothOptimizers/FluxNLPModels.jl.git |
|
[
"MIT"
] | 0.9.1 | 3a994404d3f6709610701c7dabfc03fed87a81f8 | code | 1234 |
module LightXML
using XML2_jll
export
# common
name, free,
# nodes
AbstractXMLNode,
XMLAttr, XMLAttrIter, XMLNode, XMLNodeIter, XMLElement, XMLElementIter,
nodetype, value, content, attribute, has_attribute,
is_elementnode, is_textnode, is_commentnode, is_cdatanode, is_blanknode, is_pinode,
child_nodes, has_children, attributes, has_attributes, attributes_dict,
child_elements, find_element, get_elements_by_tagname,
new_element, add_child, new_child, new_textnode, add_text, add_cdata, add_pi,
set_attribute, set_attributes, unlink, set_content,
# document
XMLDocument, version, encoding, compression, standalone, root,
parse_file, parse_string, save_file, set_root, create_root
const Xchar = UInt8
const Xstr = Ptr{Xchar}
# opaque pointer type (do not dereference!) corresponding to xmlBufferPtr in C
struct xmlBuffer end
const Xptr = Ptr{xmlBuffer}
# pre-condition: p is not null
# (After tests, it seems that free in libc instead of xmlFree
# should be used here.)
function _xcopystr(p::Xstr)
r = unsafe_string(p)
Libc.free(p)
return r
end
include("errors.jl")
include("utils.jl")
include("nodes.jl")
include("document.jl")
include("cdata.jl")
end
| LightXML | https://github.com/JuliaIO/LightXML.jl.git |
|
[
"MIT"
] | 0.9.1 | 3a994404d3f6709610701c7dabfc03fed87a81f8 | code | 322 | function new_cdatanode(xdoc::XMLDocument, txt::AbstractString)
p = ccall((:xmlNewCDataBlock,libxml2), Xptr,
(Xptr, Xstr, Cint),
xdoc.ptr, txt, length(txt) + 1)
XMLNode(p)
end
add_cdata(xdoc::XMLDocument, x::XMLElement, txt::AbstractString) =
add_child(x, new_cdatanode(xdoc, txt))
| LightXML | https://github.com/JuliaIO/LightXML.jl.git |
|
[
"MIT"
] | 0.9.1 | 3a994404d3f6709610701c7dabfc03fed87a81f8 | code | 3267 |
#### Document type
struct _XMLDocStruct # use the same layout as C
# common part
_private::Ptr{Cvoid}
nodetype::Cint
name::Xstr
children::Xptr
last::Xptr
parent::Xptr
next::Xptr
prev::Xptr
doc::Xptr
# specific part
compression::Cint
standalone::Cint
intsubset::Xptr
extsubset::Xptr
oldns::Xptr
version::Xstr
encoding::Xstr
ids::Ptr{Cvoid}
refs::Ptr{Cvoid}
url::Xstr
charset::Cint
dict::Xstr
psvi::Ptr{Cvoid}
parseflags::Cint
properties::Cint
end
mutable struct XMLDocument
ptr::Xptr
_struct::_XMLDocStruct
function XMLDocument(ptr::Xptr)
s::_XMLDocStruct = unsafe_load(convert(Ptr{_XMLDocStruct}, ptr))
# validate integrity
@assert s.nodetype == XML_DOCUMENT_NODE
@assert s.doc == ptr
new(ptr, s)
end
function XMLDocument()
# create an empty document
ptr = ccall((:xmlNewDoc,libxml2), Xptr, (Cstring,), "1.0")
XMLDocument(ptr)
end
end
version(xdoc::XMLDocument) = unsafe_string(xdoc._struct.version)
encoding(xdoc::XMLDocument) = unsafe_string(xdoc._struct.encoding)
compression(xdoc::XMLDocument) = Int(xdoc._struct.compression)
standalone(xdoc::XMLDocument) = Int(xdoc._struct.standalone)
function root(xdoc::XMLDocument)
pr = ccall((:xmlDocGetRootElement,libxml2), Xptr, (Xptr,), xdoc.ptr)
pr != C_NULL || throw(XMLNoRootError())
XMLElement(pr)
end
#### construction & free
function free(xdoc::XMLDocument)
ccall((:xmlFreeDoc,libxml2), Cvoid, (Xptr,), xdoc.ptr)
xdoc.ptr = C_NULL
end
function set_root(xdoc::XMLDocument, xroot::XMLElement)
ccall((:xmlDocSetRootElement,libxml2), Xptr, (Xptr, Xptr), xdoc.ptr, xroot.node.ptr)
end
function create_root(xdoc::XMLDocument, name::AbstractString)
xroot = new_element(name)
set_root(xdoc, xroot)
return xroot
end
#### parse and free
function _check_result(p)
p != C_NULL || throw(XMLParseError("Failure in parsing an XML file."))
XMLDocument(p)
end
parse_file(filename::AbstractString) =
_check_result(ccall((:xmlParseFile,libxml2), Xptr, (Cstring,), filename))
parse_file(filename::AbstractString, encoding, options::Integer) =
_check_result(ccall((:xmlReadFile,libxml2), Xptr, (Cstring, Ptr{Cchar}, Cint),
filename, encoding, options))
parse_string(s::AbstractString) =
_check_result(ccall((:xmlParseMemory,libxml2), Xptr, (Xstr, Cint), s, sizeof(s)))
#### output
function save_file(xdoc::XMLDocument, filename::AbstractString; encoding::AbstractString="utf-8")
ret = ccall((:xmlSaveFormatFileEnc,libxml2), Cint,
(Cstring, Xptr, Cstring, Cint),
filename, xdoc.ptr, encoding, 1)
ret < 0 && throw(XMLWriteError("Failed to save XML to file $filename"))
Int(ret) # number of bytes written
end
function Base.string(xdoc::XMLDocument; encoding::AbstractString="utf-8")
buf_out = Vector{Xstr}(undef, 1)
len_out = Vector{Cint}(undef, 1)
ccall((:xmlDocDumpFormatMemoryEnc,libxml2), Cvoid,
(Xptr, Ptr{Xstr}, Ptr{Cint}, Cstring, Cint),
xdoc.ptr, buf_out, len_out, encoding, 1)
_xcopystr(buf_out[1])
end
Base.show(io::IO, xdoc::XMLDocument) = print(io, string(xdoc))
| LightXML | https://github.com/JuliaIO/LightXML.jl.git |
|
[
"MIT"
] | 0.9.1 | 3a994404d3f6709610701c7dabfc03fed87a81f8 | code | 331 | abstract type XMLError <: Exception end
struct XMLNoRootError <: XMLError ; end
struct XMLAttributeNotFound <: XMLError ; end
struct XMLParseError{T<:AbstractString} <: XMLError
msg::T
end
struct XMLWriteError{T<:AbstractString} <: XMLError
msg::T
end
struct XMLTreeError{T<:AbstractString} <: XMLError
msg::T
end
| LightXML | https://github.com/JuliaIO/LightXML.jl.git |
|
[
"MIT"
] | 0.9.1 | 3a994404d3f6709610701c7dabfc03fed87a81f8 | code | 9108 | # XML nodes
abstract type AbstractXMLNode end
#### Types of attributes
const XML_ATTRIBUTE_CDATA = 1
const XML_ATTRIBUTE_ID = 2
const XML_ATTRIBUTE_IDREF = 3
const XML_ATTRIBUTE_IDREFS = 4
const XML_ATTRIBUTE_ENTITY = 5
const XML_ATTRIBUTE_ENTITIES = 6
const XML_ATTRIBUTE_NMTOKEN = 7
const XML_ATTRIBUTE_NMTOKENS = 8
const XML_ATTRIBUTE_ENUMERATION = 9
const XML_ATTRIBUTE_NOTATION = 10
#### Types of nodes
const XML_ELEMENT_NODE = 1
const XML_ATTRIBUTE_NODE = 2
const XML_TEXT_NODE = 3
const XML_CDATA_SECTION_NODE = 4
const XML_ENTITY_REF_NODE = 5
const XML_ENTITY_NODE = 6
const XML_PI_NODE = 7
const XML_COMMENT_NODE = 8
const XML_DOCUMENT_NODE = 9
const XML_DOCUMENT_TYPE_NODE = 10
const XML_DOCUMENT_FRAG_NODE = 11
const XML_NOTATION_NODE = 12
const XML_HTML_DOCUMENT_NODE = 13
const XML_DTD_NODE = 14
const XML_ELEMENT_DECL = 15
const XML_ATTRIBUTE_DECL = 16
const XML_ENTITY_DECL = 17
const XML_NAMESPACE_DECL = 18
const XML_XINCLUDE_START = 19
const XML_XINCLUDE_END = 20
const XML_DOCB_DOCUMENT_NODE = 21
##### Generic methods
is_elementnode(nd::AbstractXMLNode) = (nodetype(nd) == XML_ELEMENT_NODE)
is_textnode(nd::AbstractXMLNode) = (nodetype(nd) == XML_TEXT_NODE)
is_commentnode(nd::AbstractXMLNode) = (nodetype(nd) == XML_COMMENT_NODE)
is_cdatanode(nd::AbstractXMLNode) = (nodetype(nd) == XML_CDATA_SECTION_NODE)
is_pinode(nd::AbstractXMLNode) = (nodetype(nd) == XML_PI_NODE)
#######################################
#
# XML Attributes
#
#######################################
struct _XMLAttrStruct
# common part
_private::Ptr{Cvoid}
nodetype::Cint
name::Xstr
children::Xptr
last::Xptr
parent::Xptr
next::Xptr
prev::Xptr
doc::Xptr
# specific part
ns::Xptr
atype::Cint
psvi::Ptr{Cvoid}
end
mutable struct XMLAttr
ptr::Xptr
_struct::_XMLAttrStruct
function XMLAttr(ptr::Xptr)
s = unsafe_load(convert(Ptr{_XMLAttrStruct}, ptr))
@assert s.nodetype == XML_ATTRIBUTE_NODE
new(ptr, s)
end
end
name(a::XMLAttr) = unsafe_string(a._struct.name)
function value(a::XMLAttr)
pct = ccall((:xmlNodeGetContent,libxml2), Xstr, (Xptr,), a._struct.children)
(pct != C_NULL ? _xcopystr(pct) : "")::AbstractString
end
# iterations
struct XMLAttrIter
p::Xptr
end
function Base.iterate(it::XMLAttrIter, p::Xptr=it.p)
p == C_NULL && return nothing
a = XMLAttr(p)
(a, a._struct.next)
end
Base.IteratorSize(::Type{XMLAttrIter}) = Base.SizeUnknown()
#######################################
#
# Base XML Nodes
#
#######################################
struct _XMLNodeStruct
# common part
_private::Ptr{Cvoid}
nodetype::Cint
name::Ptr{UInt8}
children::Xptr
last::Xptr
parent::Xptr
next::Xptr
prev::Xptr
doc::Xptr
# specific part
ns::Xptr
content::Xstr
attrs::Xptr
nsdef::Xptr
psvi::Ptr{Cvoid}
line::Cushort
extra::Cushort
end
mutable struct XMLNode <: AbstractXMLNode
ptr::Xptr
_struct::_XMLNodeStruct
function XMLNode(ptr::Xptr)
s = unsafe_load(convert(Ptr{_XMLNodeStruct}, ptr))
new(ptr, s)
end
end
name(nd::XMLNode) = unsafe_string(nd._struct.name)
nodetype(nd::XMLNode) = nd._struct.nodetype
has_children(nd::XMLNode) = (nd._struct.children != C_NULL)
# whether it is a white-space only text node
is_blanknode(nd::XMLNode) = Bool(ccall((:xmlIsBlankNode,libxml2), Cint, (Xptr,), nd.ptr))
function free(nd::XMLNode)
ccall((:xmlFreeNode,libxml2), Cvoid, (Xptr,), nd.ptr)
nd.ptr = C_NULL
end
function unlink(nd::XMLNode)
ccall((:xmlUnlinkNode,libxml2), Cvoid, (Xptr,), nd.ptr)
end
# iteration over children
struct XMLNodeIter
p::Xptr
end
function Base.iterate(it::XMLNodeIter, p::Xptr=it.p)
p == C_NULL && return nothing
nd = XMLNode(p)
(nd, nd._struct.next)
end
Base.IteratorSize(::Type{XMLNodeIter}) = Base.SizeUnknown()
child_nodes(nd::XMLNode) = XMLNodeIter(nd._struct.children)
function content(nd::XMLNode)
pct = ccall((:xmlNodeGetContent,libxml2), Xstr, (Xptr,), nd.ptr)
(pct != C_NULL ? _xcopystr(pct) : "")::AbstractString
end
# dumping
const DEFAULT_DUMPBUFFER_SIZE = 4096
function Base.string(nd::XMLNode)
buf = XBuffer(DEFAULT_DUMPBUFFER_SIZE)
ccall((:xmlNodeDump,libxml2), Cint, (Xptr, Xptr, Xptr, Cint, Cint),
buf.ptr, nd._struct.doc, nd.ptr, 0, 1)
r = content(buf)
free(buf)
return r
end
Base.show(io::IO, nd::XMLNode) = print(io, string(nd))
#######################################
#
# XML Elements
#
#######################################
mutable struct XMLElement <: AbstractXMLNode
node::XMLNode
function XMLElement(node::XMLNode)
if !is_elementnode(node)
throw(ArgumentError("The input node is not an element."))
end
new(node)
end
XMLElement(ptr::Xptr) = XMLElement(XMLNode(ptr))
end
name(x::XMLElement) = name(x.node)
nodetype(x::XMLElement) = XML_ELEMENT_NODE
has_children(x::XMLElement) = has_children(x.node)
child_nodes(x::XMLElement) = child_nodes(x.node)
content(x::XMLElement) = content(x.node)
Base.string(x::XMLElement) = string(x.node)
Base.show(io::IO, x::XMLElement) = show(io, x.node)
free(x::XMLElement) = free(x.node)
unlink(x::XMLElement) = unlink(x.node)
# attribute access
function attribute(x::XMLElement, name::AbstractString; required::Bool=false)
pv = ccall((:xmlGetProp,libxml2), Xstr, (Xptr, Cstring), x.node.ptr, name)
if pv != C_NULL
return _xcopystr(pv)
else
if required
throw(XMLAttributeNotFound())
else
return nothing
end
end
end
has_attribute(x::XMLElement, name::AbstractString) =
ccall((:xmlHasProp,libxml2), Xptr, (Xptr, Cstring), x.node.ptr, name) != C_NULL
has_attributes(x::XMLElement) = (x.node._struct.attrs != C_NULL)
attributes(x::XMLElement) = XMLAttrIter(x.node._struct.attrs)
function attributes_dict(x::XMLElement)
# make an dictionary based on attributes
dct = Dict{AbstractString,AbstractString}()
if has_attributes(x)
for a in attributes(x)
dct[name(a)] = value(a)
end
end
return dct
end
# element access
struct XMLElementIter
parent_ptr::Xptr
end
function Base.iterate(it::XMLElementIter, p::Xptr=ccall((:xmlFirstElementChild, libxml2), Xptr, (Xptr,), it.parent_ptr))
p == C_NULL && return nothing
XMLElement(p), ccall((:xmlNextElementSibling, libxml2), Xptr, (Xptr,), p)
end
Base.IteratorSize(::Type{XMLElementIter}) = Base.SizeUnknown()
child_elements(x::XMLElement) = XMLElementIter(x.node.ptr)
# elements by tag name
function find_element(x::XMLElement, n::AbstractString)
for c in child_elements(x)
name(c) == n && return c
end
return nothing
end
function get_elements_by_tagname(x::XMLElement, n::AbstractString)
lst = Vector{XMLElement}()
for c in child_elements(x)
name(c) == n && push!(lst, c)
end
return lst
end
Base.getindex(x::XMLElement, name::AbstractString) = get_elements_by_tagname(x, name)
#######################################
#
# XML Tree Construction
#
#######################################
function new_element(name::AbstractString)
p = ccall((:xmlNewNode,libxml2), Xptr, (Xptr, Cstring), C_NULL, name)
XMLElement(p)
end
function add_child(xparent::XMLElement, xchild::XMLNode)
p = ccall((:xmlAddChild,libxml2), Xptr, (Xptr, Xptr), xparent.node.ptr, xchild.ptr)
p != C_NULL || throw(XMLTreeError("Failed to add a child node."))
end
add_child(xparent::XMLElement, xchild::XMLElement) = add_child(xparent, xchild.node)
function new_child(xparent::XMLElement, name::AbstractString)
xc = new_element(name)
add_child(xparent, xc)
return xc
end
function new_textnode(txt::AbstractString)
p = ccall((:xmlNewText,libxml2), Xptr, (Cstring,), txt)
XMLNode(p)
end
function new_pinode(name::AbstractString, content::AbstractString)
p = ccall((:xmlNewPI,libxml2), Xptr, (Cstring, Cstring), name, content)
XMLNode(p)
end
add_text(x::XMLElement, txt::AbstractString) = add_child(x, new_textnode(txt))
add_pi(x::XMLElement, name::AbstractString, content::AbstractString) = add_child(x, new_pinode(name, content))
function set_attribute(x::XMLElement, name::AbstractString, val::AbstractString)
a = ccall((:xmlSetProp,libxml2), Xptr, (Xptr, Cstring, Cstring), x.node.ptr, name, val)
return XMLAttr(a)
end
set_attribute(x::XMLElement, name::AbstractString, val) = set_attribute(x, name, string(val))
const PairTypes = Union{NTuple{2}, Pair}
function set_attributes(x::XMLElement, attrs::AbstractArray{<:PairTypes})
for (nam, val) in attrs
set_attribute(x, string(nam), string(val))
end
end
set_attributes(x::XMLElement, attrs::AbstractDict) = set_attributes(x, collect(attrs))
function set_attributes(x::XMLElement; attrs...)
for (nam, val) in attrs
set_attribute(x, string(nam), string(val))
end
end
function set_content(x::XMLElement, txt::AbstractString)
ccall((:xmlNodeSetContent, libxml2), Xptr, (Xptr, Cstring,), x.node.ptr, txt)
x
end
| LightXML | https://github.com/JuliaIO/LightXML.jl.git |
|
[
"MIT"
] | 0.9.1 | 3a994404d3f6709610701c7dabfc03fed87a81f8 | code | 539 | # Utilities
##### Buffer
struct XBuffer
ptr::Xptr
function XBuffer(bytes::Integer)
p = ccall((:xmlBufferCreateSize,libxml2), Xptr, (Csize_t,), bytes)
p != C_NULL || error("Failed to create buffer of $bytes bytes.")
new(p)
end
end
free(buf::XBuffer) = ccall((:xmlBufferFree,libxml2), Cvoid, (Xptr,), buf.ptr)
Base.length(buf::XBuffer) = int(ccall((:xmlBufferLength,libxml2), Cint, (Xptr,), buf.ptr))
content(buf::XBuffer) = unsafe_string(ccall((:xmlBufferContent,libxml2), Xstr, (Xptr,), buf.ptr))
| LightXML | https://github.com/JuliaIO/LightXML.jl.git |
|
[
"MIT"
] | 0.9.1 | 3a994404d3f6709610701c7dabfc03fed87a81f8 | code | 300 | xdoc = XMLDocument()
xroot = create_root(xdoc, "States")
xs1 = new_child(xroot, "State")
add_cdata(xdoc, xs1, "Massachusetts")
rtxt = """
<?xml version="1.0" encoding="utf-8"?>
<States>
<State><![CDATA[Massachusetts]]></State>
</States>
"""
@test strip(string(xdoc)) == strip(rtxt)
free(xdoc)
| LightXML | https://github.com/JuliaIO/LightXML.jl.git |
|
[
"MIT"
] | 0.9.1 | 3a994404d3f6709610701c7dabfc03fed87a81f8 | code | 1095 | xdoc = XMLDocument()
xroot = create_root(xdoc, "States")
xs1 = new_child(xroot, "State")
add_text(xs1, "Massachusetts")
set_attribute(xs1, "tag", "MA")
xs2 = new_child(xroot, "State")
add_text(xs2, "Illinois")
set_attributes(xs2, Dict{Any,Any}("tag"=>"IL", "cap"=>"Springfield"))
xs3 = new_child(xroot, "State")
add_text(xs3, "California typo")
set_content(xs3, "California typo again")
@test content(xs3) == "California typo again"
set_content(xs3, "California")
@test content(xs3) == "California"
set_attributes(xs3; tag="CA", cap="Sacramento")
rtxt1 = """
<?xml version="1.0" encoding="utf-8"?>
<States>
<State tag="MA">Massachusetts</State>
<State tag="IL" cap="Springfield">Illinois</State>
<State tag="CA" cap="Sacramento">California</State>
</States>
"""
rtxt2 = """
<?xml version="1.0" encoding="utf-8"?>
<States>
<State tag="MA">Massachusetts</State>
<State cap="Springfield" tag="IL">Illinois</State>
<State tag="CA" cap="Sacramento">California</State>
</States>
"""
@test (strip(string(xdoc)) == strip(rtxt1)) || (strip(string(xdoc)) == strip(rtxt2))
free(xdoc)
| LightXML | https://github.com/JuliaIO/LightXML.jl.git |
|
[
"MIT"
] | 0.9.1 | 3a994404d3f6709610701c7dabfc03fed87a81f8 | code | 681 | using LightXML
# document
docstr = """
<?xml version="1.0" encoding="UTF-8"?>
<bookstore>
<book category="COOKING" tag="first">
<title lang="en">Everyday Italian</title>
<author>Giada De Laurentiis</author>
<year>2005</year>
<price>30.00</price>
</book>
<book category="CHILDREN">
<title lang="en">Harry Potter</title>
<author>J K. Rowling</author>
<year>2005</year>
<price>29.99</price>
</book>
</bookstore>
"""
xdoc = parse_string(docstr)
println("Document:")
println("=====================")
show(xdoc)
# save_file(xdoc, "tt.xml")
println("Root Element:")
println("=====================")
xroot = root(xdoc)
show(xroot)
free(xdoc)
| LightXML | https://github.com/JuliaIO/LightXML.jl.git |
|
[
"MIT"
] | 0.9.1 | 3a994404d3f6709610701c7dabfc03fed87a81f8 | code | 3777 | # document
docstr = """
<?xml version="1.0" encoding="UTF-8"?>
<bookstore>
<book category="COOKING" tag="first">
<title lang="en">Everyday Italian</title>
<author>Giada De Laurentiis</author>
<year>2005</year>
<price>30.00</price>
</book>
<book category="CHILDREN">
<title lang="en">Harry Potter</title>
<author>J K. Rowling</author>
<year>2005</year>
<price>29.99</price>
</book><?PI description?>
</bookstore>
"""
for xdoc = (parse_string(docstr),
parse_file(joinpath(@__DIR__, "ex1.xml")),
parse_file(joinpath(@__DIR__, "ex1.xml"), C_NULL, 64), # 64 == XML_PARSE_NOWARNING
parse_file(joinpath(@__DIR__, "ex1.xml"), "UTF-8", 64))
@test version(xdoc) == "1.0"
@test encoding(xdoc) == "UTF-8"
@test standalone(xdoc) == -2
# root node
xroot = root(xdoc)
@test isa(xroot, XMLElement)
@test is_elementnode(xroot)
@test name(xroot) == "bookstore"
@test nodetype(xroot) == 1
@test !has_attributes(xroot)
@test has_children(xroot)
ras = collect(attributes(xroot))
@test isempty(ras)
# children of root (text nodes and books)
rcs = collect(child_nodes(xroot))
@test length(rcs) == 6 # text, book[1], text, book[1], PI, text
@test is_textnode(rcs[1])
@test is_textnode(rcs[3])
@test is_pinode(rcs[5])
@test is_textnode(rcs[6])
@test is_blanknode(rcs[1])
@test is_blanknode(rcs[3])
@test is_blanknode(rcs[6])
@test is_elementnode(rcs[2])
@test is_elementnode(rcs[4])
@test !is_blanknode(rcs[2])
@test !is_blanknode(rcs[4])
xb1 = XMLElement(rcs[2])
@test name(xb1) == "book"
@test nodetype(xb1) == 1
@test has_attributes(xb1)
@test has_children(xb1)
@test attribute(xb1, "category") == "COOKING"
@test attribute(xb1, "tag") == "first"
b1as = collect(attributes(xb1))
@test length(b1as) == 2
b1a1 = b1as[1]
@test isa(b1a1, XMLAttr)
@test name(b1a1) == "category"
@test value(b1a1) == "COOKING"
b1a2 = b1as[2]
@test isa(b1a2, XMLAttr)
@test name(b1a2) == "tag"
@test value(b1a2) == "first"
adct = attributes_dict(xb1)
@test length(adct) == 2
@test adct["category"] == "COOKING"
@test adct["tag"] == "first"
xb2 = XMLElement(rcs[4])
@test name(xb2) == "book"
@test nodetype(xb2) == 1
@test has_attributes(xb2)
@test has_children(xb2)
@test has_attribute(xb2, "category")
@test attribute(xb2, "category") == "CHILDREN"
@test !has_attribute(xb2, "wrongattr")
@test attribute(xb2, "wrongattr") === nothing
@test_throws LightXML.XMLAttributeNotFound attribute(xb2, "wrongattr"; required=true)
# test get_elements_by_tagname and getindex
rces_by_tagname = get_elements_by_tagname(xroot, "book")
rces_by_getindex = xroot["book"]
for rces in (rces_by_getindex, rces_by_tagname)
@test length(rces) == 2
@test isa(rces, Vector{XMLElement})
@test attribute(rces[1], "category") == "COOKING"
@test attribute(rces[2], "category") == "CHILDREN"
end
ces = collect(child_elements(xb1))
@test length(ces) == 4
c1, c2, c3, c4 = ces[1], ces[2], ces[3], ces[4]
@test isa(c1, XMLElement)
@test name(c1) == "title"
@test has_attributes(c1)
@test attribute(c1, "lang") == "en"
@test content(c1) == "Everyday Italian"
@test has_children(c1)
c1cs = collect(child_nodes(c1))
@test length(c1cs) == 1
c1c = c1cs[1]
@test is_textnode(c1c)
@test content(c1c) == "Everyday Italian"
@test isa(c2, XMLElement)
@test name(c2) == "author"
@test !has_attributes(c2)
@test content(c2) == "Giada De Laurentiis"
@test isa(c3, XMLElement)
@test name(c3) == "year"
@test !has_attributes(c3)
@test content(c3) == "2005"
@test isa(c4, XMLElement)
@test name(c4) == "price"
@test !has_attributes(c4)
@test content(c4) == "30.00"
cy = find_element(xb1, "year")
@test isa(cy, XMLElement)
@test name(cy) == "year"
@test content(cy) == "2005"
cz = find_element(xb1, "abc")
@test cz === nothing
free(xdoc)
end
| LightXML | https://github.com/JuliaIO/LightXML.jl.git |
|
[
"MIT"
] | 0.9.1 | 3a994404d3f6709610701c7dabfc03fed87a81f8 | code | 369 | xdoc = XMLDocument()
xroot = create_root(xdoc, "States")
add_pi(xroot, "State", "Massachusetts")
add_pi(xroot, "State", "New Jersey")
add_pi(xroot, "State", "New York")
rtxt = """
<?xml version="1.0" encoding="utf-8"?>
<States>
<?State Massachusetts?>
<?State New Jersey?>
<?State New York?>
</States>
"""
@test strip(string(xdoc)) == strip(rtxt)
free(xdoc)
| LightXML | https://github.com/JuliaIO/LightXML.jl.git |
|
[
"MIT"
] | 0.9.1 | 3a994404d3f6709610701c7dabfc03fed87a81f8 | code | 163 | using LightXML
using Test
tests = ["parse", "create", "cdata", "pi"]
for t in tests
fpath = "$t.jl"
println("running $fpath ...")
include(fpath)
end
| LightXML | https://github.com/JuliaIO/LightXML.jl.git |
|
[
"MIT"
] | 0.9.1 | 3a994404d3f6709610701c7dabfc03fed87a81f8 | docs | 11182 | ## LightXML.jl
[](https://travis-ci.org/JuliaIO/LightXML.jl)
[](https://ci.appveyor.com/project/tkelman/lightxml-jl/branch/master)
[](http://pkg.julialang.org/?pkg=LightXML&ver=0.6)
This package is a light-weight Julia wrapper of [libxml2](http://www.xmlsoft.org), which provides a minimal interface that covers functionalities that are commonly needed:
* Parse a XML file or string into a tree
* Access XML tree structure
* Create an XML tree
* Export an XML tree to a string or an XML file
### Setup
Like other Julia packages, you may checkout *LightXML* from the General registry, as
```julia
Pkg.add("LightXML")
```
**Note:** This package relies on the library *libxml2* to work, which is shipped with Mac OS X and many Linux systems. So this package may work out of the box. If not, you may check whether *libxml2* has been in your system and whether *libxml2.so* (for Linux) or *libxml2.dylib* (for Mac) is on your library search path.
### Examples
The following examples show how you may use this package to accomplish common tasks.
#### Read an XML file
Suppose you have an XML file ``ex1.xml`` as below
```xml
<?xml version="1.0" encoding="UTF-8"?>
<bookstore>
<book category="COOKING" tag="first">
<title lang="en">Everyday Italian</title>
<author>Giada De Laurentiis</author>
<year>2005</year>
<price>30.00</price>
</book>
<book category="CHILDREN">
<title lang="en">Harry Potter</title>
<author>J K. Rowling</author>
<year>2005</year>
<price>29.99</price>
</book>
</bookstore>
```
Here is the code to parse this file:
```julia
using LightXML
# parse ex1.xml:
# xdoc is an instance of XMLDocument, which maintains a tree structure
xdoc = parse_file("ex1.xml")
# get the root element
xroot = root(xdoc) # an instance of XMLElement
# print its name
println(name(xroot)) # this should print: bookstore
# traverse all its child nodes and print element names
for c in child_nodes(xroot) # c is an instance of XMLNode
println(nodetype(c))
if is_elementnode(c)
e = XMLElement(c) # this makes an XMLElement instance
println(name(e))
end
end
#=
If the remainder of the script does not use the document or any of its children,
you can call free here to deallocate the memory. The memory will only get
deallocated by calling free or by exiting julia -- i.e., the memory allocated by
libxml2 will not get freed when the julia variable wrapping it goes out of
scope.
=#
free(xdoc)
```
There are actually five child nodes under ``<bookstore>``: the 1st, 3rd, 5th children are text nodes (any space between node elements are captured by text nodes), while the 2nd and 4th nodes are element nodes corresponding to the ``<book>`` elements.
One may use the function ``nodetype`` to determine the type of a node, which returns an integer following the table [here](https://www.w3schools.com/jsref/prop_node_nodetype.asp). In particular, 1 indicates element node and 3 indicates text node.
If you only care about child elements, you may use ``child_elements`` instead of ``child_nodes``.
```julia
ces = collect(child_elements(xroot)) # get a list of all child elements
@assert length(ces) == 2
# if you know the child element tagname, you can instead get a list as
ces = get_elements_by_tagname(xroot, "book")
# or shorthand:
ces = xroot["book"]
e1 = ces[1] # the first book element
# print the value of an attribute
println(attribute(e1, "category"))
# find the first title element under e1
t = find_element(e1, "title")
# retrieve the value of lang attribute of t
a = attribute(t, "lang") # a <- "en"
# retrieve the text content of t
r = content(t) # r <- "Everyday Italian"
```
One can also traverse all attributes of an element (``e1``) as
```julia
for a in attributes(e1) # a is an instance of XMLAttr
n = name(a)
v = value(a)
println("$n = $v")
end
```
Another way to access attributes is to turn them into a dictionary using ``attributes_dict``, as
```julia
ad = attributes_dict(e1)
v = ad["category"] # v <-- "COOKING"
```
**Note:** The functions ``child_nodes``, ``child_elements``, and ``attributes`` return light weight iterators -- so that one can use them with for-loop. To get an array of all items, one may use the ``collect`` function provided by Julia.
#### Create an XML Document
This package allows you to construct an XML document programmatically. For example, to create an XML document as
```xml
<?xml version="1.0" encoding="utf-8"?>
<States>
<State tag="MA">Massachusetts</State>
<State tag="IL" cap="Springfield">Illinois</State>
<State tag="CA" cap="Sacramento">California</State>
</States>
```
You may write:
```julia
# create an empty XML document
xdoc = XMLDocument()
# create & attach a root node
xroot = create_root(xdoc, "States")
# create the first child
xs1 = new_child(xroot, "State")
# add the inner content
add_text(xs1, "Massachusetts")
# set attribute
set_attribute(xs1, "tag", "MA")
# likewise for the second child
xs2 = new_child(xroot, "State")
add_text(xs2, "Illinois")
# set multiple attributes using a dict
set_attributes(xs2, Dict("tag"=>"IL", "cap"=>"Springfield"))
# now, the third child
xs3 = new_child(xroot, "State")
add_text(xs3, "California")
# set attributes using keyword arguments
set_attributes(xs3; tag="CA", cap="Sacramento")
```
**Note:** When you create XML documents and elements directly you need to take care not to leak memory; memory management in the underlying libxml2 library is complex and LightXML currently does not integrate well with Julia's garbage collection system. You can call ``free`` on an XMLDocument, XMLNode or XMLElement but you must take care not to reference any child elements after they have been manually freed.
#### Export an XML file
With this package, you can easily export an XML file to a string or a file, or show it on the console, as
```julia
# save to an XML file
save_file(xdoc, "f1.xml")
# output to a string
s = string(xdoc)
# print to the console (in a pretty format as in an XML file)
print(xdoc)
```
**Note:** the ``string`` and ``show`` functions are specialized for both ``XMLDocument`` and ``XMLElement``.
### Types
Main types of this package
* ``XMLDocument``: represent an XML document (in a tree)
* ``XMLElement``: represent an XML element (``child_elements`` give you this)
* ``XMLNode``: represent a generic XML node (``child_nodes`` give you this)
* ``XMLAttr``: represent an XML attribute
**Note:** If an ``XMLNode`` instance ``x`` is actually an element node, one may construct an ``XMLElement`` instance by ``XMLElement(x)``.
### API Functions
A list of API functions:
##### Functions to access an XML tree
```julia
# Let xdoc be a document, x be a node/element, e be an element
root(xdoc) # get the root element of a document
nodetype(x) # get an integer indicating the node type
name(x) # get the name of a node/element
content(x) # get text content of a node/element
# if x is an element, this returns all text (concatenated) within x
is_elementnode(x) # whether x is an element node
is_textnode(x) # whether x is a text node
is_cdatanode(x) # whether x is a CDATA node
is_commentnode(x) # whether x is a comment node
has_children(e) # whether e has child nodes
has_attributes(e) # whether e has attributes
child_nodes(x) # iterator of all child nodes of a node/element x
child_elements(e) # iterator of all child elements of e
attributes(e) # iterator of all attributes of e
attributes_dict(e) # a dictionary of all attributes of e,
# which maps names to corresponding values
has_attribute(e, name) # whether a named attribute exists for e
# get the value of a named attribute
# when the attribute does not exist, it either
# throws an exception (when required is true)
# or returns nothing (when required is false)
attribute(e, name; required=false)
find_element(e, name) # the first element of specified name under e
# return nothing is no such an element is found
get_elements_by_tagname(e, name) # a list of all child elements of e with
# the specified name. Equivalent to e[name]
string(e) # format an XML element into a string
show(io, e) # output formatted XML element
unlink(x) # remove a node or element from its current context
# (unlink does not free the memory for the node/element)
free(xdoc) # release memory for a document and all its children
free(x) # release memory for a node/element and all its children
```
##### Functions to create an XML document
```julia
xdoc = XMLDocument() # create an empty XML document
e = new_element(name) # create a new XML element
# this does not attach e to a tree
t = new_textnode(content) # create a new text node
# this does not attach t to a tree
set_root(xdoc, e) # set element e as the root of xdoc
add_child(parent, x) # add x as a child of a parent element
e = create_root(xdoc, name) # create a root element and set it as root
# equiv. to new_element + set_root
e = new_child(parent, name) # create a new element and add it as a child
# equiv. to new_element + add_child
add_text(e, text) # add text content to an element
# equiv. to new_textnode + add_child
set_content(e, text) # replace text content of an element
add_cdata(xdoc, e, text) # add cdata content to an element
# equiv. to new_cdatanode + add_child
set_attribute(e, name, value) # set an attribute of an element
# this returns the added attribute
# as an instance of XMLAttr
set_attributes(e, attrs) # set multiple attributes in one call
# attrs can be a dictionary or
# a list of pairs as (name, value)
# one can also use keyword arguments to set attributes to an element
set_attributes(e, key1="val1", key2="val2", ...)
```
##### Functions to work with a document
```julia
xdoc = parse_file(filename) # parse an XML file
xdoc = parse_file(filename, # parse an XML file with a specified encoding and parser options,
encoding, options) # see http://xmlsoft.org/html/libxml-parser.html#xmlReadFile
# and http://xmlsoft.org/html/libxml-parser.html#xmlParserOption
xdoc = parse_string(str) # parse an XML doc from a string
save_file(xdoc, filename) # save xdoc to an XML file
string(xdoc) # formatted XML doc to a string
show(io, xdoc) # output formatted XML document
```
| LightXML | https://github.com/JuliaIO/LightXML.jl.git |
|
[
"MIT"
] | 0.2.0 | 3809c2b0ff7eee0abc9ed47a85135351583b9742 | code | 274 | module TestLandscapes
# 1D potentials
include("potentials1D.jl")
export SymmetricDoubleWell, AsymmetricDoubleWell, FatSkinnyDoubleWell10
# 2D potentials
include("potentials2D.jl")
export EntropicSwitch, SymmetricTwoChannel, Muller, Rosenbrock, Zpotential, EntropicBox
end | TestLandscapes | https://github.com/gideonsimpson/TestLandscapes.jl.git |
|
[
"MIT"
] | 0.2.0 | 3809c2b0ff7eee0abc9ed47a85135351583b9742 | code | 678 | """
`SymmetricDoubleWell` - The classic symmetric doublewell potential.
### Fields
* `x` - Position x
"""
function SymmetricDoubleWell(x)
return (x^2 -1)^2;
end
"""
`AsymmetricDoubleWell` - An asymmetric double well potential requires |Ξ΄|< 1 for
multiple minima.
### Fields
* `x` - Position x
### Optional Fields
* `Ξ΄ = 0.5` - Asymmetry parameter
"""
function AsymmetricDoubleWell(x; Ξ΄ = 0.5)
return x^4 - 1.5 * x^2 - Ξ΄ * x;
end
"""
`FatSkinnyDoubleWell10` - The fat-skinny double well of degree 10. This introduces
entropic effects in dimension 1.
### Fields
* `x` - Position x
"""
function FatSkinnyDoubleWell10(x)
return ((8-5*x)^8 * (2+5*x)^2)/(2^26);
end
| TestLandscapes | https://github.com/gideonsimpson/TestLandscapes.jl.git |
|
[
"MIT"
] | 0.2.0 | 3809c2b0ff7eee0abc9ed47a85135351583b9742 | code | 2616 | """
`EntropicSwitch` - Entropically switching potential with three local minima. It
is symmetric about x=0
### Fields
* `x` - Position x in RΒ²
"""
function EntropicSwitch(x)
return (3 * exp(-x[1]^2 - (x[2]-1/3)^2)
- 3 * exp(-x[1]^2 - (x[2]-5/3)^2)
- 5 * exp(-(x[1]-1)^2 - x[2]^2)
- 5 * exp(-(x[1]+1)^2 - x[2]^2)
+ 1/5 * x[1]^4 + 1/5 * (x[2]-1/3)^4);
end
"""
`SymmetricTwoChannel` - Double well potential in 2D with two, symmetric channels
joining them.
### Fields
* `x` - Position x in RΒ²
"""
function SymmetricTwoChannel(x)
return 1/6 * (4 * (1-x[1]^2-x[2]^2)^2 + 2 *(x[1]^2-2)^2
+ ((x[1]+x[2])^2 - 1 )^2+ ((x[1]-x[2])^2 - 1 )^2);
end
"""
`Muller` - The Muller potential with three distinct minima and highy asymmetric.
### Fields
* `x` - Position x in RΒ²
"""
function Muller(x)
aa = (-1, -1, -6.5, 0.7);
bb = (0., 0., 11., 0.6);
cc = (-10., -10., -6.5, 0.7);
AA = (-200., -100., -170., 15.);
XX = (1., 0., -0.5, -1.);
YY = (0., 0.5, 1.5, 1.);
return ( AA[1]*exp(aa[1]*(x[1]-XX[1])^2+bb[1]*(x[1]-XX[1])*(x[2]-YY[1])+cc[1]*(x[2]-YY[1])^2)
+AA[2]*exp(aa[2]*(x[1]-XX[2])^2+bb[2]*(x[1]-XX[2])*(x[2]-YY[2])+cc[2]*(x[2]-YY[2])^2)
+AA[3]*exp(aa[3]*(x[1]-XX[3])^2+bb[3]*(x[1]-XX[3])*(x[2]-YY[3])+cc[3]*(x[2]-YY[3])^2)
+AA[4]*exp(aa[4]*(x[1]-XX[4])^2+bb[4]*(x[1]-XX[4])*(x[2]-YY[4])+cc[4]*(x[2]-YY[4])^2));
end
"""
`Rosenbrock` - Banana shaped Rosenbrock potentials with global minimum is located
at (a,aΒ²).
### Fields
* `x` - Position x in RΒ²
### Optional Fields
* `a = 1.0` - Rosenbrock parameter
* `b = 100.0` - Rosenbrock parameter
"""
function Rosenbrock(x; a = 1.0, b = 100.0)
return (a-x[1])^2 + b * (x[2]-x[1]^2)^2
end
"""
`Zpotential` - Z shaped potential.
### Fields
* `x` - Position x in RΒ²
"""
function Zpotential(x)
return (x[1]^4 + x[2]^4)/20480 -
3 * exp(-0.01 * (x[1]+5)^2 -0.2 * (x[2]+5)^2) -
3 * exp(-0.01 * (x[1]-5)^2 -0.2 * (x[2]-5)^2) +
5 * exp(-0.2 * (x[1]+3*(x[2]-3))^2)/(1+exp(-x[1]-3)) +
5 * exp(-0.2 * (x[1]+3*(x[2]+3))^2)/(1+exp(x[1]-3)) +
3 * exp(-0.01 *(x[1]^2 + x[2]^2))
end
"""
`EntropicBox` - A potential concentrated in [0,1]Β² with internal entropic
barriers. Formulated by D. Aristoff (Colorado State).
### Fields
* `x` - Position x in RΒ²
"""
function EntropicBox(x)
c = (50.5, 49.5, 10^5, 51, 49);
return exp(-(c[1]*(x[1]-0.25).^2+c[1]*(x[2]-0.75).^2+2*c[2]*(x[1]-0.25).*(x[2]-0.75))) +
exp(-c[3]*(x[1]^2*(1-x[1])^2*x[2]^2*(1-x[2])^2)) +
0.5*exp(-(c[4]*x[1]^2+c[4]*x[2]^2-2*c[5]*x[1]*x[2]));
end | TestLandscapes | https://github.com/gideonsimpson/TestLandscapes.jl.git |
|
[
"MIT"
] | 0.2.0 | 3809c2b0ff7eee0abc9ed47a85135351583b9742 | code | 642 | module TestPotentials1D
# a module of simple potentials for testing optimization and sampling methods
"""
SymmetricDoubleWell - The classic symmetric doublewell potential.
"""
function SymmetricDoubleWell(x)
return (x^2 -1)^2;
end
"""
ASymmetricDoubleWell - An asymmetric double well potential requires |Ξ΄|< 1 for
multiple minima.
"""
function ASymmetricDoubleWell(x;Ξ΄=0.5)
return x^4 - 1.5 * x^2 - Ξ΄ * x;
end
"""
FatSkinnyDoubleWell10 - The fat-skinny double well of degree 10. This introduces
entropic effects in dimension 1.
"""
function FatSkinnyDoubleWell10(x)
return ((8-5*x)^8 * (2+5*x)^2)/(2^26);
end
end # end module
| TestLandscapes | https://github.com/gideonsimpson/TestLandscapes.jl.git |
|
[
"MIT"
] | 0.2.0 | 3809c2b0ff7eee0abc9ed47a85135351583b9742 | code | 2060 | module TestPotentials2D
using StaticArrays
# a module of simple potentials for testing optimization and sampling methods in
# 2D
"""
EntropicSwitch - Entropically switching potential with three local minima. It
is symmetric about x=0
"""
function EntropicSwitch(x)
return (3 * exp(-x[1]^2 - (x[2]-1/3)^2)
- 3 * exp(-x[1]^2 - (x[2]-5/3)^2)
- 5 * exp(-(x[1]-1)^2 - x[2]^2)
- 5 * exp(-(x[1]+1)^2 - x[2]^2)
+ 1/5 * x[1]^4 + 1/5 * (x[2]-1/3)^4);
end
"""
SymmetricTwoChannel - Double well potential in 2D with two, symmetric channels
joining them.
"""
function SymmetricTwoChannel(x)
return 1/6 * (4 * (1-x[1]^2-x[2]^2)^2 + 2 *(x[1]^2-2)^2
+ ((x[1]+x[2])^2 - 1 )^2+ ((x[1]-x[2])^2 - 1 )^2);
end
"""
Muller - The Muller potential with three distinct minima and highy asymmetric.
"""
function Muller(x)
aa = @SVector [-1, -1, -6.5, 0.7];
bb = @SVector [0., 0., 11., 0.6];
cc = @SVector [-10., -10., -6.5, 0.7];
AA = @SVector [-200., -100., -170., 15.];
XX = @SVector [1., 0., -0.5, -1.];
YY = @SVector [0., 0.5, 1.5, 1.];
return ( AA[1]*exp(aa[1]*(x[1]-XX[1])^2+bb[1]*(x[1]-XX[1])*(x[2]-YY[1])+cc[1]*(x[2]-YY[1])^2)
+AA[2]*exp(aa[2]*(x[1]-XX[2])^2+bb[2]*(x[1]-XX[2])*(x[2]-YY[2])+cc[2]*(x[2]-YY[2])^2)
+AA[3]*exp(aa[3]*(x[1]-XX[3])^2+bb[3]*(x[1]-XX[3])*(x[2]-YY[3])+cc[3]*(x[2]-YY[3])^2)
+AA[4]*exp(aa[4]*(x[1]-XX[4])^2+bb[4]*(x[1]-XX[4])*(x[2]-YY[4])+cc[4]*(x[2]-YY[4])^2));
end
"""
Rosenbrock - Banana shaped Rosenbrock potentials with global minimum is located
at (a,aΒ²).
"""
function Rosenbrock(x; a=1.0, b=100.0)
return (a-x[1])^2 + b * (x[2]-x[1]^2)^2
end
"""
Zpotential - Z shaped potential.
"""
function Zpotential(x)
return (x[1]^4 + x[2]^4)/20480 -
3 * exp(-0.01 * (x[1]+5)^2 -0.2 * (x[2]+5)^2) -
3 * exp(-0.01 * (x[1]-5)^2 -0.2 * (x[2]-5)^2) +
5 * exp(-0.2 * (x[1]+3*(x[2]-3))^2)/(1+exp(-x[1]-3)) +
5 * exp(-0.2 * (x[1]+3*(x[2]+3))^2)/(1+exp(x[1]-3)) +
3 * exp(-0.01 *(x[1]^2 + x[2]^2))
end
end # module
| TestLandscapes | https://github.com/gideonsimpson/TestLandscapes.jl.git |
|
[
"MIT"
] | 0.2.0 | 3809c2b0ff7eee0abc9ed47a85135351583b9742 | docs | 1224 | # TestLandscapes.jl
Julia implementations of basic potential energy landscapes for testing sampling,
optimization, etc.
This package can be added with the command:
```
(@v1.XYZ) pkg> add TestLandscapes
```
Currently, these landscapes are in dimensions one and two, but they allow for
exploration of multiple minima, along with energetic and entropic bottlenecks.
These codes do **not** include derivatives. These can be obtained using ForwardDiff,
https://github.com/JuliaDiff/ForwardDiff.jl
# Acknowledgements
This work was supported in part by the US National Science Foundation Grant DMS-1818716.
# References
These landscapes are motivated by the following publications:
* *Illustration of transition path theory on a collection of simple examples*, Metzner, SchΓΌtte, and Vanden-Eijnden, J. Chem. Phys., 125, 084110, 2006.
* *Free Energy Computations*, Lelièvre, Rousset, and Stoltz, Imperial College Press, 2006.
* *Role of Itoβs lemma in sampling pinned diffusion paths in the continuous-time limit*, Malsom and Pinski, Phys. Rev. E, 94, 042131, 2016.
* *Nonlinear reaction coordinate analysis in the reweighted path ensemble*, Lechner, Rogal, Juraszek, Ensing, and Bolhuis, J. Chem. Phys., 133, 174110, 2010.
| TestLandscapes | https://github.com/gideonsimpson/TestLandscapes.jl.git |
|
[
"MIT"
] | 0.3.7 | 08f121214c74a5ef5db03188025edb5279d89ddc | code | 831 | using BLASBenchmarksCPU
using Documenter
makedocs(;
modules=[BLASBenchmarksCPU],
authors="Chris Elrod <[email protected]> and contributors",
repo="https://github.com/JuliaLinearAlgebra/BLASBenchmarksCPU.jl/blob/{commit}{path}#L{line}",
sitename="BLASBenchmarksCPU.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://JuliaLinearAlgebra.github.io/BLASBenchmarksCPU.jl",
assets=String[],
),
pages=[
"Home" => "index.md",
"Usage" => "usage.md",
"Turbo" => "turbo.md",
"Memory Required for Large Matrices" => "memory-required.md",
"Public API" => "public-api.md",
"Internals (Private)" => "internals.md",
],
)
deploydocs(;
repo="github.com/JuliaLinearAlgebra/BLASBenchmarksCPU.jl",
)
| BLASBenchmarksCPU | https://github.com/JuliaLinearAlgebra/BLASBenchmarksCPU.jl.git |
|
[
"MIT"
] | 0.3.7 | 08f121214c74a5ef5db03188025edb5279d89ddc | code | 1947 | module BLASBenchmarksCPU
# BLAS libs (& Libdl)
@static if Sys.ARCH === :x86_64
using MKL_jll
end
using OpenBLAS_jll, blis_jll#, Libdl
# Julia BLAS
using Tullio, Octavian, Gaius
# utils: LoopVectorization for Tullio.jl, VectorizationBase for info
using LoopVectorization, VectorizationBase
using VectorizationBase: num_cores, align
using Static: StaticInt
using RecursiveFactorization
using Random
# Adjoint
using LinearAlgebra
# Utils
using BenchmarkTools, ProgressMeter
# Plotting & presenting results
using Cairo
using Fontconfig, Gadfly, Colors, DataFrames
export benchmark_result_type
export benchmark_result_df
export benchmark_result_threaded
export logspace
export plot
export runbench
# BLIS
export gemmblis!
export blis_set_num_threads
# Octavian.jl
export matmul!
# OpenBLAS
export gemmopenblas!
export openblas_set_num_threads
# MKL
export gemmmkl!, gemmmkl_direct!
export mkl_set_num_threads
# set threads
@static if Sys.ARCH === :x86_64
const libMKL = MKL_jll.libmkl_rt # more convenient name
function mkl_set_num_threads(N::Union{Integer, StaticInt})
ccall((:MKL_Set_Num_Threads,libMKL), Cvoid, (Int32,), N % Int32)
end
end
const libOPENBLAS = OpenBLAS_jll.libopenblas # more convenient name
function openblas_set_num_threads(N::Union{Integer, StaticInt})
ccall((:openblas_set_num_threads64_,libOPENBLAS), Cvoid, (Int64,), N)
end
const libBLIS = blis_jll.blis # more convenient name
function blis_set_num_threads(N::Union{Integer, StaticInt})
ccall((:bli_thread_set_num_threads,libBLIS), Cvoid, (Int32,), N)
end
function blis_get_num_threads(::Union{Integer, StaticInt})
ccall((:bli_thread_get_num_threads,libBLIS), Int32, ())
end
include("ccallblas.jl")
include("benchconfig.jl")
include("runbenchmark.jl")
include("plotting.jl")
function __init__()
Sys.ARCH === :x86_64 && mkl_set_num_threads(num_cores())
openblas_set_num_threads(num_cores())
blis_set_num_threads(num_cores())
end
end
| BLASBenchmarksCPU | https://github.com/JuliaLinearAlgebra/BLASBenchmarksCPU.jl.git |
|
[
"MIT"
] | 0.3.7 | 08f121214c74a5ef5db03188025edb5279d89ddc | code | 1840 |
function tmul_threads!(C, A, B)
@tullio C[m,n] = A[m,k] * B[k,n]
end
function tmul_no_threads!(C, A, B)
@tullio C[m,n] = A[m,k] * B[k,n] threads=false
end
function lvmul_threads!(C, A, B)
@avxt for n β indices((C,B), 2), m β indices((C,A), 1)
Cmn = zero(eltype(C))
for k β indices((A,B), (2,1))
Cmn += A[m,k] * B[k,n]
end
C[m,n] = Cmn
end
end
function lvmul_no_threads!(C, A, B)
@avx for n β indices((C,B), 2), m β indices((C,A), 1)
Cmn = zero(eltype(C))
for k β indices((A,B), (2,1))
Cmn += A[m,k] * B[k,n]
end
C[m,n] = Cmn
end
end
function generic_matmul!(C, A, B)
istransposed(C) === 'N' || (generic_matmul!(untransposed(C), _transpose(B), _transpose(A)); return C)
transA = istransposed(A)
transB = istransposed(B)
pA = untransposed(A);
pB = untransposed(B)
LinearAlgebra.generic_matmatmul!(C, transA, transB, pA, pB)
end
function getfuncs(libs::Vector{Symbol}, threaded::Bool)::Vector{Function}
map(libs) do i
if i === :MKL
gemmmkl!
elseif i === :MKL_DIRECT || i === :MKL_direct
gemmmkl_direct!
elseif i === :OpenBLAS
gemmopenblas!
elseif i === :BLIS || i === :blis
gemmblis!
elseif i === :Octavian
threaded ? matmul! : matmul_serial!
elseif i === :Tullio
threaded ? tmul_threads! : tmul_no_threads!
elseif i === :Gaius
threaded ? Gaius.mul! : Gaius.mul_serial!
elseif i === :LoopVectorization
threaded ? lvmul_threads! : lvmul_no_threads!
elseif i === :generic || i === :Generic || i === :GENERIC
generic_matmul!
else
throw("Library $i not reognized.")
end
end
end
| BLASBenchmarksCPU | https://github.com/JuliaLinearAlgebra/BLASBenchmarksCPU.jl.git |
|
[
"MIT"
] | 0.3.7 | 08f121214c74a5ef5db03188025edb5279d89ddc | code | 6547 |
using LinearAlgebra: Adjoint, Transpose, UnitLowerTriangular, LowerTriangular, UnitUpperTriangular, UpperTriangular, AbstractTriangular
istransposed(x) = 'N'
istransposed(x::Adjoint) = 'C'
istransposed(x::Adjoint{<:Real}) = 'T'
istransposed(x::Transpose) = 'T'
untransposed(A) = A
untransposed(A::Adjoint) = adjoint(A)
untransposed(A::Transpose) = transpose(A)
_transpose(A) = A'
_transpose(A::Adjoint) = adjoint(A)
_transpose(A::Transpose) = transpose(A)
uplochar(::Union{LowerTriangular,UnitLowerTriangular}) = 'L'
uplochar(::Union{UpperTriangular,UnitUpperTriangular}) = 'U'
diagchar(::Union{LowerTriangular,UpperTriangular}) = 'N'
diagchar(::Union{UnitLowerTriangular,UnitUpperTriangular}) = 'U'
untranspose_flag(A::Adjoint, f::Bool = false) = untranspose_flag(parent(A), !f)
untranspose_flag(A::Transpose, f::Bool = false) = untranspose_flag(parent(A), !f)
untranspose_flag(A, f::Bool = false) = (A, f)
struct LU{T,M<:StridedMatrix{T},I}
factors::M
ipiv::Vector{I}
info::I
end
function getrf_ipiv(A::StridedMatrix, ::Val{T}) where {T}
M,N = size(A)
Vector{T}(undef, min(M,N))
end
function _ipiv_rows!(A::LU, order::OrdinalRange, B::StridedVecOrMat)
@inbounds for i β order
i β A.ipiv[i] && LinearAlgebra._swap_rows!(B, i, A.ipiv[i])
end
B
end
_apply_ipiv_rows!(A::LU, B::StridedVecOrMat) = _ipiv_rows!(A, 1 : length(A.ipiv), B)
function _ipiv_cols!(A::LU, order::OrdinalRange, B::StridedVecOrMat)
ipiv = A.ipiv
@inbounds for i β order
i β ipiv[i] && LinearAlgebra._swap_cols!(B, i, ipiv[i])
end
B
end
_apply_inverse_ipiv_cols!(A::LU, B::StridedVecOrMat) = _ipiv_cols!(A, length(A.ipiv) : -1 : 1, B)
for (name,BlasInt,suff) β [
("mkl", :Int64, ""),
("openblas", :Int64, "_64_"),
("blis", :Int64, "_64_")
]
(Sys.ARCH !== :x86_64 && name === "mkl") && continue
uname = uppercase(name)
lib = Symbol("lib", uname)
fgemm = Symbol("gemm", name, '!')
fgetrf = Symbol("getrf", name, '!')
ftrsm = Symbol("trsm", name, '!')
frdiv = Symbol("rdiv", name)
fldiv = Symbol("ldiv", name)
frdivbang = Symbol("rdiv", name, '!')
fldivbang = Symbol("ldiv", name, '!')
flu = Symbol("lu", name)
flubang = Symbol("lu", name, '!')
for (T,prefix) β [(:Float32,'s'),(:Float64,'d')]
fmgemm = QuoteNode(Symbol(prefix, "gemm", suff))
fmgetrf = QuoteNode(Symbol(prefix, "getrf", suff))
fmtrsm = QuoteNode(Symbol(prefix, "trsm", suff))
@eval begin
function $fgemm(
C::AbstractMatrix{$T}, A::AbstractMatrix{$T}, B::AbstractMatrix{$T}, Ξ± = one($T), Ξ² = zero($T)
)
istransposed(C) === 'N' || ($fgemm(untransposed(C), _transpose(B), _transpose(A)); return C)
transA = istransposed(A)
transB = istransposed(B)
pA = untransposed(A);
pB = untransposed(B)
M, N = size(C); K = size(B, 1)
ldA = stride(pA, 2)
ldB = stride(pB, 2)
ldC = stride(C, 2)
ccall(
($fmgemm, $lib), Cvoid,
(Ref{UInt8}, Ref{UInt8}, Ref{$BlasInt}, Ref{$BlasInt}, Ref{$BlasInt}, Ref{$T}, Ref{$T},
Ref{$BlasInt}, Ref{$T}, Ref{$BlasInt}, Ref{$T}, Ref{$T}, Ref{$BlasInt}),
transA, transB, M, N, K, Ξ±, pA, ldA, pB, ldB, Ξ², C, ldC
)
C
end
end
name == "blis" && continue
@eval begin
function $fgetrf(
A::StridedMatrix{$T}, ipiv::StridedVector{$BlasInt} = getrf_ipiv(A, Val($BlasInt))
)
M, N = size(A)
info = Ref{$BlasInt}()
ccall(
($fmgetrf,$lib), Cvoid, (Ref{$BlasInt},Ref{$BlasInt},Ptr{$T},Ref{$BlasInt},Ptr{$BlasInt},Ref{$BlasInt}),
M, N, A, max(1,stride(A,2)), ipiv, info
)
LU(A, ipiv, info[])
end
function $ftrsm(
B::StridedMatrix{$T}, Ξ±::$T, A::AbstractMatrix{$T}, side::Char
)
M, N = size(A)
pA, transa = untranspose_flag(A)
uplo = uplochar(pA)
diag = diagchar(pA)
ppA = parent(pA)
ccall(
($fmtrsm, $lib), Cvoid, (Ref{UInt8}, Ref{UInt8}, Ref{UInt8}, Ref{UInt8}, Ref{$BlasInt}, Ref{$BlasInt}, Ref{$T}, Ptr{Float64}, Ref{$BlasInt}, Ptr{$T}, Ref{$BlasInt}),
side, uplo, ifelse(transa, 'T', 'N'), diag, M % $BlasInt, N % $BlasInt, Ξ±, ppA, max(1,stride(ppA,2)), B, max(1,stride(B,2))
)
B
end
end
end
name == "blis" && continue
@eval begin
function $frdivbang(
A::StridedMatrix{T}, B::AbstractTriangular{T}
) where {T <: Union{Float32,Float64}}
$ftrsm(A, one(T), B, 'R')
end
function $fldivbang(
A::StridedMatrix{T}, B::AbstractTriangular{T}
) where {T <: Union{Float32,Float64}}
$ftrsm(A, one(T), B, 'L')
end
$flubang(A::StridedMatrix) = $fgetrf(A, getrf_ipiv(A, Val($BlasInt)))
$flu(A::StridedMatrix) = $flubang(copy(A))
function $frdivbang(A::AbstractMatrix, B::LU{<:Any,<:AbstractMatrix})
$frdivbang($frdivbang(A, UpperTriangular(B.factors)), UnitLowerTriangular(B.factors))
_apply_inverse_ipiv_cols!(B, A) # mutates `A`
end
function $fldivbang(A::LU{<:Any,<:AbstractMatrix}, B::AbstractMatrix)
_apply_ipiv_rows!(A, B)
$fldivbang($fldivbang(B, UnitLowerTriangular(A.factors)), UpperTriangular(A.factors))
end
$frdivbang(A::AbstractMatrix, B::AbstractMatrix) = $frdivbang(A, $flubang(B))
$fldivbang(A::AbstractMatrix, B::AbstractMatrix) = $fldivbang($flubang(A), B)
$frdiv(A::AbstractMatrix, B) = $frdivbang(copy(A), copy(B))
$fldiv(A::AbstractMatrix, B) = $fldivbang(copy(A), copy(B))
end
end
@static if Sys.ARCH === :x86_64
let BlasInt = :Int64
for (T,prefix) β [(:Float32,'s'),(:Float64,'d')]
f = Symbol(prefix, "gemm_direct")
@eval begin
@inline function gemmmkl_direct!(C::AbstractMatrix{$T}, A::AbstractMatrix{$T}, B::AbstractMatrix{$T}, Ξ± = one($T), Ξ² = zero($T))
istransposed(C) === 'N' || ($f(untransposed(C), _transpose(B), _transpose(A)); return C)
transA = istransposed(A)
transB = istransposed(B)
pA = untransposed(A);
pB = untransposed(B)
M, N = size(C); K = size(B, 1)
ldA = stride(pA, 2)
ldB = stride(pB, 2)
ldC = stride(C, 2)
ccall(
($(QuoteNode(f)), libMKL), Cvoid,
(Ref{UInt8}, Ref{UInt8}, Ref{$BlasInt}, Ref{$BlasInt}, Ref{$BlasInt}, Ref{$T}, Ref{$T},
Ref{$BlasInt}, Ref{$T}, Ref{$BlasInt}, Ref{$T}, Ref{$T}, Ref{$BlasInt}, Ref{$BlasInt}),
transA, transB, M, N, K, Ξ±, pA, ldA, pB, ldB, Ξ², C, ldC, 0
)
C
end
end
end
end
end
| BLASBenchmarksCPU | https://github.com/JuliaLinearAlgebra/BLASBenchmarksCPU.jl.git |
|
[
"MIT"
] | 0.3.7 | 08f121214c74a5ef5db03188025edb5279d89ddc | code | 5346 |
####################################################################################################
####################################### Colors #####################################################
####################################################################################################
const LIBRARIES = [:Octavian, :MKL, :OpenBLAS, :blis, :Tullio, :Gaius, :LoopVectorization, :Generic, :RecursiveFactorization, :TriangularSolve];
"""
Defines the mapping between libraries and colors
"""# #0071c5 == Intel Blue
# make sure colors are distinguishable against white background by adding white to the seed list,
# then deleting it from the resultant palette
palette = distinguishable_colors(length(LIBRARIES) + 2, [colorant"white", colorant"black", colorant"#66023C", colorant"#0071c5"])
deleteat!(palette, 1); deleteat!(palette, 1)
const COLOR_MAP = Dict(zip(LIBRARIES, palette))
getcolor(l::Symbol) = COLOR_MAP[l]
for (alias,ref) β [(:BLIS,:blis),(:generic,:Generic),(:GENERIC,:Generic)]
COLOR_MAP[alias] = COLOR_MAP[ref]
end
const JULIA_LIBS = Set(["Octavian", "Tullio", "Gaius", "Generic", "GENERIC", "generic", "RecursiveFactorization", "TriangularSolve"])
isjulialib(x) = x β JULIA_LIBS
####################################################################################################
####################################### Plots ######################################################
####################################################################################################
function pick_suffix(desc = "")
suffix = if Bool(VectorizationBase.has_feature(Val(:x86_64_avx512f)))
"AVX512"
elseif Bool(VectorizationBase.has_feature(Val(:x86_64_avx2)))
"AVX2"
elseif Bool(VectorizationBase.has_feature(Val(:x86_64_avx)))
"AVX"
else
"REGSIZE$(Int(VectorizationBase.register_size()))"
end
if desc != ""
suffix *= '_' * desc
end
"$(Sys.CPU_NAME)_$suffix"
end
function _pkgdir()
return dirname(dirname(@__FILE__))
end
"""
default_plot_directory()
"""
function default_plot_directory()
return joinpath(_pkgdir(), "docs", "src", "assets")
end
"""
default_plot_filename(br::BenchmarkResult;
desc,
logscale)
"""
function default_plot_filename(br::BenchmarkResult{T};
desc::AbstractString,
logscale::Bool) where {T}
l, u = extrema(br.sizes)
if logscale
desc *= "_logscale"
end
desc = (br.threaded ? "_multithreaded" : "_singlethreaded") * desc
suffix = pick_suffix(desc)
return "gemm_$(string(T))_$(l)_$(u)_$(suffix)"
end
"""
plot(br::BenchmarkResult;
desc = "",
logscale = false,
width = 1200,
height = 600,
measure = :minimum,
plot_directory = default_plot_directory(),
plot_filename = default_plot_filename(br; desc = desc, logscale = logscale),
file_extensions = ["svg", "png"],
displayplot = true)
`measure` refers to the BenchmarkTools summary on times. Valid options are:
`:minimum`, `:medain`, `:mean`, `:maximum`, and `:hmean`.
- `:minimum` would yield the maximum `GFLOPS`, and would be the usual estimate used in Julia.
- `:hmean`, the harmonic mean of the times, is usful if you want an average GFLOPS, instead of a GFLOPS computed with the average times.
"""
function Gadfly.plot(br::BenchmarkResult{T}; kwargs...) where {T}
_plot(br; kwargs...)
end
roundint(x) = round(Int,x)
# `_plot` is just like `plot`, except _plot returns the filenames
function _plot(
br::BenchmarkResult{T};
desc::AbstractString = "",
logscale::Bool = false,
width = 12inch,
height = 8inch,
measure = :minimum,
plot_directory::AbstractString = default_plot_directory(),
plot_filename::AbstractString = default_plot_filename(br; desc = desc, logscale = logscale),
file_extensions = ["svg", "png"],
displayplot = true
) where {T}
j = get_measure_index(measure) # throw early if `measure` invalid
colors = getcolor.(br.libraries);
libraries = string.(br.libraries)
xscale = logscale ? Scale.x_log10(labels=string β roundint β exp10) : Scale.x_continuous
plt = plot(
Gadfly.Guide.manual_color_key("Libraries", libraries, colors),
Guide.xlabel("Size"), Guide.ylabel("GFLOPS"), xscale#, xmin = minsz, xmax = maxsz
)
for i β eachindex(libraries)
linestyle = isjulialib(libraries[i]) ? :solid : :dash
l = layer(
x = br.sizes, y = br.gflops[:,i,j],
Geom.line, Theme(default_color = colors[i], line_style = [linestyle])
)
push!(plt, l)
end
minsz, maxsz = extrema(br.sizes)
if logscale
l10min = log10(minsz); l10max = log10(maxsz);
push!(plt, Stat.xticks(ticks = range(l10min, l10max, length=round(Int,(1+2*(l10max-l10min))))))
end
displayplot && display(plt)
mkpath(plot_directory)
_filenames = String[]
extension_dict = Dict("svg" => SVG, "png" => PNG, "pdf" => PDF, "ps" => PS)
for ext in file_extensions
_filename = joinpath(plot_directory, "$(plot_filename).$(ext)")
draw(extension_dict[ext](_filename, width, height), plt)
push!(_filenames, _filename)
end
return _filenames
end
| BLASBenchmarksCPU | https://github.com/JuliaLinearAlgebra/BLASBenchmarksCPU.jl.git |
|
[
"MIT"
] | 0.3.7 | 08f121214c74a5ef5db03188025edb5279d89ddc | code | 14108 | struct BenchmarkResult{T,I<:Union{Int,NTuple{3,Int}}}
libraries::Vector{Symbol}
sizes::Vector{I}
gflops::Array{Float64,3}
times::Array{Float64,3}
threaded::Bool
end
function BenchmarkResult{T}(libraries, sizes, gflops, times, threaded) where {T}
gflopsperm = permutedims(gflops, (2,3,1))
timesperm = permutedims(times, (2,3,1))
I = eltype(sizes)
BenchmarkResult{T,I}(libraries, convert(Vector{I},sizes), gflopsperm, timesperm, threaded)
end
"""
benchmark_result_type(benchmark_result::BenchmarkResult)
"""
function benchmark_result_type(::BenchmarkResult{T}) where {T}
return T
end
function get_measure_index(measure::Symbol)::Int
j = findfirst(==(measure), (:minimum,:median,:mean,:maximum,:hmean))
if j === nothing
throw(ArgumentError("`measure` argument must be one of (:minimum,:median,:mean,:maximum,:hmean), but was $(repr(measure))."))
end
return j
end
function _benchmark_result_df(sizes, libraries, mat, measure)
j = get_measure_index(measure)
df = DataFrame(Size = sizes)
for i β eachindex(libraries)
setproperty!(df, libraries[i], mat[:,i,j])
end
return df
end
function _benchmark_result_df(br::BenchmarkResult, s::Symbol = :gflops, measure = :minimum)
_benchmark_result_df(br.sizes, br.libraries, getproperty(br, s), measure)
end
"""
benchmark_result_df(benchmark_result::BenchmarkResult, `measure` = :minimum)
`measure` refers to the BenchmarkTools summary on times. Valid options are:
`:minimum`, `:medain`, `:mean`, `:maximum`, and `:hmean`.
- `:minimum` would yield the maximum `GFLOPS`, and would be the usual estimate used in Julia.
- `:hmean`, the harmonic mean of the times, is usful if you want an average GFLOPS, instead of a GFLOPS computed with the average times.
"""
function benchmark_result_df(benchmark_result::BenchmarkResult, measure = :minimum)
df = _benchmark_result_df(benchmark_result, :times, measure)
df = stack(df, Not(:Size), variable_name = :Library, value_name = :Seconds)
df.GFLOPS = @. 2e-9 * matmul_length(df.Size) ./ df.Seconds
return df
end
"""
benchmark_result_threaded(benchmark_result::BenchmarkResult)
"""
function benchmark_result_threaded(benchmark_result::BenchmarkResult)
return benchmark_result.threaded
end
function Base.show(io::IO, br::BenchmarkResult{T}) where {T}
println(io, "Benchmark Result of Matrix{$T}, threaded = $(br.threaded)")
df = _benchmark_result_df(br)
println(io, df)
end
function maybe_sleep(x)
x > 1e-3 && sleep(x)
end
function benchmark_fun!(
f!::F,
C,
A,
B,
sleep_time,
discard_first,
reference,
comment::String, # `comment` is a place to put the library name, the dimensions of the matrices, etc.
) where {F}
maybe_sleep(sleep_time)
if discard_first
@elapsed f!(C, A, B)
end
t0 = @elapsed f!(C, A, B)
if (reference !== nothing) && (!(C β reference))
msg = "C is not approximately equal to reference"
@error(msg, comment)
throw(ErrorException(msg))
end
if 2t0 < BenchmarkTools.DEFAULT_PARAMETERS.seconds
maybe_sleep(sleep_time)
br = @benchmark $f!($C, $A, $B)
tmin = min(1e-9minimum(br).time, t0)
tmedian = 1e-9median(br).time
tmean = 1e-9mean(br).time
tmax = 1e-9maximum(br).time # We'll exclude the first for this...
thmeanβ»ΒΉ = 1e9mean(inv, br.times)
else
maybe_sleep(sleep_time)
t1 = @elapsed f!(C, A, B)
maybe_sleep(sleep_time)
t2 = @elapsed f!(C, A, B)
if (t0+t1) < 4BenchmarkTools.DEFAULT_PARAMETERS.seconds
maybe_sleep(sleep_time)
t3 = @elapsed f!(C, A, B)
tmin = minimum((t0, t1, t2, t3))
tmedian = median((t0, t1, t2, t3))
tmean = mean((t0, t1, t2, t3))
tmax = maximum((t0, t1, t2, t3))
thmeanβ»ΒΉ = mean(inv, (t0, t1, t2, t3))
else
tmin = minimum((t0, t1, t2))
tmedian = median((t0, t1, t2))
tmean = mean((t0, t1, t2))
tmax = maximum((t0, t1, t2))
thmeanβ»ΒΉ = mean(inv, (t0, t1, t2))
end
end
return tmin, tmedian, tmean, tmax, thmeanβ»ΒΉ
end
_mat_size(M, N, ::typeof(adjoint)) = (N, M)
_mat_size(M, N, ::typeof(transpose)) = (N, M)
_mat_size(M, N, ::typeof(identity)) = (M, N)
function alloc_mat(_M, _N, memory::Vector{T}, off, f = identity) where {T}
M, N = _mat_size(_M, _N, f)
A = f(reshape(view(memory, (off+1):(off+M*N)), (M, N)))
A, off + align(M*N, T)
end
matmul_sizes(s::Integer) = (s,s,s)
matmul_sizes(mkn::Tuple{Vararg{Integer,3}}) = mkn
matmul_length(s) = prod(matmul_sizes(s))
junk(::Type{T}) where {T <: Integer} = typemax(T) >> 1
junk(::Type{T}) where {T} = T(NaN)
struct LogSpace
r::StepRangeLen{Float64, Base.TwicePrecision{Float64}, Base.TwicePrecision{Float64}}
end
Base.IteratorSize(::Type{LogSpace}) = Base.HasShape{1}()
"""
logspace(start, stop, length)
Defines a monotonically increasing range, log spaced when possible. Useful for defining a range of sizes for benchmarks.
```julia
julia> collect(logspace(1,100,3))
3-element Vector{Int64}:
1
10
100
julia> collect(logspace(1,10,3))
3-element Vector{Int64}:
1
3
10
julia> collect(logspace(1,5,3))
3-element Vector{Int64}:
1
2
5
julia> collect(logspace(1,3,3))
3-element Vector{Int64}:
1
2
3
```
"""
logspace(start, stop, length) = LogSpace(range(log(start),log(stop), length = length))
function Base.iterate(ls::LogSpace)
i_s = iterate(ls.r)
i_s === nothing && return nothing
i, _s = i_s
v = round(Int, exp(i))
v, (_s, v)
end
function Base.iterate(ls::LogSpace, (s,l))
i_s = iterate(ls.r, s)
i_s === nothing && return nothing
i, _s = i_s
v = max(round(Int, exp(i)), l+1)
v, (_s, v)
end
Base.length(ls::LogSpace) = length(ls.r)
Base.size(ls::LogSpace) = (length(ls.r),)
Base.axes(ls::LogSpace) = axes(ls.r)
Base.eltype(::LogSpace) = Int
Base.convert(::Type{Vector{Int}}, l::LogSpace) = collect(l)
"""
all_libs()
"""
function all_libs()
libs = Symbol[
:BLIS,
:Gaius,
:Octavian,
:OpenBLAS,
:Tullio,
:LoopVectorization
]
Sys.ARCH === :x86_64 && push!(libs, :MKL)
return libs
end
function _integer_libs()
libs_to_exclude = Symbol[:BLIS, :MKL, :OpenBLAS]
return sort(unique(setdiff(all_libs(), libs_to_exclude)))
end
"""
default_libs(T)
"""
function default_libs(::Type{T}) where {T}
if T <: Integer
return _integer_libs()
else
return all_libs()
end
end
function luflop(m, n=m; innerflop=2)
sum(1:min(m, n)) do k
invflop = 1
scaleflop = isempty(k+1:m) ? 0 : sum(k+1:m)
updateflop = isempty(k+1:n) ? 0 : sum(k+1:n) do j
isempty(k+1:m) ? 0 : sum(k+1:m) do i
innerflop
end
end
invflop + scaleflop + updateflop
end * 1e-9
end
gemmflop(m,n,k) = 2e-9m*n*k
"""
runbench(T = Float64;
libs = default_libs(T),
sizes = logspace(2, 4000, 200),
threaded::Bool = Threads.nthreads() > 1,
A_transform = identity,
B_transform = identity,
sleep_time = 0.0)
- T: The element type of the matrices.
- libs: Libraries to benchmark.
- sizes: Sizes of matrices to benchmark. Must be an iterable with either
`eltype(sizes) === Int` or `eltype(sizes) === NTuple{3,Int}`.
If the former, the matrices are square, with each dimension equal to the value.
If `i::NTuple{3,Int}`, it benchmarks `C = A * B` where `A` is `i[1]` by `i[2]`,
`B` is `i[2]` by `i[3]` and `C` is `i[1]` by `i[3]`.
- threaded: Should it benchmark multithreaded implementations?
- A_transform: a function to apply to `A`. Defaults to `identity`, but can be `adjoint`.
- B_transofrm: a function to apply to `B`. Defaults to `identity`, but can be `adjoint`.
- sleep_time: The use of this keyword argument is discouraged. If set, it will call `sleep`
in between benchmarks, the idea being to help keep the CPU cool. This is an unreliable
means of trying to get more reliable benchmarks. Instead, it's reccommended you disable
your systems turbo. Disabling it -- and reenabling when you're done benchmarking --
should be possible without requiring a reboot.
"""
function runbench(
::Type{T} = Float64;
libs = default_libs(T),
sizes = logspace(2, 4000, 200),
threaded::Bool = Threads.nthreads() > 1,
A_transform = identity,
B_transform = identity,
sleep_time = 0.0
) where {T}
if threaded
Sys.ARCH === :x86_64 && mkl_set_num_threads(num_cores())
openblas_set_num_threads(num_cores())
blis_set_num_threads(num_cores())
else
Sys.ARCH === :x86_64 && mkl_set_num_threads(1)
openblas_set_num_threads(1)
blis_set_num_threads(1)
end
benchtime = BenchmarkTools.DEFAULT_PARAMETERS.seconds
BenchmarkTools.DEFAULT_PARAMETERS.seconds = 0.5
funcs = getfuncs(libs, threaded)
sizevec = collect(sizes)
# Hack to workaround https://github.com/JuliaCI/BenchmarkTools.jl/issues/127
# Use the same memory every time, to reduce accumulation
max_matrix_sizes = maximum(sizevec) do s
M, K, N = matmul_sizes(s)
align(M * K, T) + align(K * N, T) + align(M * N, T) * 2
end
memory = Vector{T}(undef, max_matrix_sizes)
library = reduce(vcat, (libs for _ β eachindex(sizevec)))
times = Array{Float64}(undef, 5, length(sizes), length(libs))
gflop = similar(times);
discard_first = true # force when compiling
p = Progress(length(sizes))
gflop_report_type = NamedTuple{(:MedianGFLOPS, :MaxGFLOPS), Tuple{Float64, Float64}}
last_perfs = Vector{Tuple{Symbol,Union{gflop_report_type,NTuple{3,Int}}}}(undef, length(libs)+1)
for _j in 0:length(sizevec)-1
if iseven(_j)
j = (_j >> 1) + 1
else
j = length(sizevec) - (_j >> 1)
end
s = sizevec[j]
M, K, N = matmul_sizes(s)
A, off = alloc_mat(M, K, memory, 0, A_transform)
B, off = alloc_mat(K, N, memory, off, B_transform)
rand!(A); rand!(B);
C0, off = alloc_mat(M, N, memory, off)
C1, off = alloc_mat(M, N, memory, off)
last_perfs[1] = (:Size, (M,K,N) .% Int)
for i β eachindex(funcs)
C, ref = i == 1 ? (C0, nothing) : (fill!(C1,junk(T)), C0)
lib = library[i]
comment = "lib=$(lib), M=$(M), K=$(K), N=$(N)"
t = benchmark_fun!(
funcs[i],
C,
A,
B,
sleep_time,
discard_first,
ref,
comment,
)
gffactor = gemmflop(M,K,N)
@inbounds for k β 1:4
times[k,j,i] = t[k]
gflop[k,j,i] = gffactor / t[k]
end
times[5,j,i] = inv(t[5])
gflop[5,j,i] = gffactor * t[5]
gflops = round.((gflop[1,j,i], gflop[2,j,i]), sigdigits = 4)
gflops = (
MedianGFLOPS = round(gflop[2,j,i], sigdigits = 4),
MaxGFLOPS = round(gflop[1,j,i], sigdigits = 4)
)
last_perfs[i+1] = (libs[i], gflops)
end
ProgressMeter.next!(p; showvalues = last_perfs)
if isodd(_j)
discard_first = false
end
end
BenchmarkTools.DEFAULT_PARAMETERS.seconds = benchtime # restore previous state
BenchmarkResult{T}(libs, sizes, gflop, times, threaded)
end
@static if Sys.ARCH === :x86_64
const LUFUNCS = Dict(:RecursiveFactorization => RecursiveFactorization.lu!, :MKL => lumkl!, :OpenBLAS => luopenblas!)
else
const LUFUNCS = Dict(:RecursiveFactorization => RecursiveFactorization.lu!, :OpenBLAS => luopenblas)
end
struct LUWrapperFunc{F}; f::F; end
(lu::LUWrapperFunc)(A,B,C) = lu.f(copyto!(A,B))
function runlubench(
::Type{T} = Float64;
libs = [:RecursiveFactorization, :MKL, :OpenBLAS],
sizes = logspace(2, 4000, 200),
threaded::Bool = Threads.nthreads() > 1,
A_transform = identity,
B_transform = identity,
sleep_time = 0.0
) where {T}
funcs = LUWrapperFunc.(getindex.(Ref(LUFUNCS), libs))
if threaded
mkl_set_num_threads(num_cores())
openblas_set_num_threads(num_cores())
else
mkl_set_num_threads(1)
openblas_set_num_threads(1)
end
benchtime = BenchmarkTools.DEFAULT_PARAMETERS.seconds
BenchmarkTools.DEFAULT_PARAMETERS.seconds = 0.5
sizevec = collect(sizes)
# Hack to workaround https://github.com/JuliaCI/BenchmarkTools.jl/issues/127
# Use the same memory every time, to reduce accumulation
max_matrix_sizes = 2maximum(sizevec)^2 + (256 Γ· sizeof(T))
memory = Vector{T}(undef, max_matrix_sizes)
library = reduce(vcat, (libs for _ β eachindex(sizevec)))
times = Array{Float64}(undef, 5, length(sizes), length(libs))
gflop = similar(times);
discard_first = true # force when compiling
p = Progress(length(sizes))
gflop_report_type = NamedTuple{(:MedianGFLOPS, :MaxGFLOPS), Tuple{Float64, Float64}}
last_perfs = Vector{Tuple{Symbol,Union{gflop_report_type,NTuple{2,Int}}}}(undef, length(libs)+1)
for _j in 0:length(sizevec)-1
if iseven(_j)
j = (_j >> 1) + 1
else
j = length(sizevec) - (_j >> 1)
end
N = sizevec[j]
M = N
A, off = alloc_mat(M, N, memory, 0, A_transform)
rand!(A); #rand!(B);
@inbounds for n β 1:N, m β 1:M
A[m,n] = (A[m,n] + (m == n))
end
B, off = alloc_mat(M, N, memory, off, B_transform)
last_perfs[1] = (:Size, (M,N) .% Int)
for i β eachindex(funcs)
lib = library[i]
comment = "lib=$(lib), M=$(M), N=$(N)"
t = benchmark_fun!(
funcs[i],
B,
A,
nothing,
sleep_time,
discard_first,
nothing,
comment,
)
gffactor = luflop(M,N)
@inbounds for k β 1:4
times[k,j,i] = t[k]
gflop[k,j,i] = gffactor / t[k]
end
times[5,j,i] = inv(t[5])
gflop[5,j,i] = gffactor * t[5]
gflops = round.((gflop[1,j,i], gflop[2,j,i]), sigdigits = 4)
gflops = (
MedianGFLOPS = round(gflop[2,j,i], sigdigits = 4),
MaxGFLOPS = round(gflop[1,j,i], sigdigits = 4)
)
last_perfs[i+1] = (libs[i], gflops)
end
ProgressMeter.next!(p; showvalues = last_perfs)
if isodd(_j)
discard_first = false
end
end
BenchmarkTools.DEFAULT_PARAMETERS.seconds = benchtime # restore previous state
BenchmarkResult{T}(libs, sizes, gflop, times, threaded)
end
| BLASBenchmarksCPU | https://github.com/JuliaLinearAlgebra/BLASBenchmarksCPU.jl.git |
|
[
"MIT"
] | 0.3.7 | 08f121214c74a5ef5db03188025edb5279d89ddc | code | 1758 |
import BLASBenchmarksCPU
import StatsPlots
@testset "Interface" begin
benchmark_result = BLASBenchmarksCPU.runbench(Float64; sizes = [1, 2, 5, 10, 20, 50, 100, 200], threaded=false) #test that threads=false at least doesn't throw somewhere.
dfmin = BLASBenchmarksCPU.benchmark_result_df(benchmark_result) # minimum
dfmedian = BLASBenchmarksCPU.benchmark_result_df(benchmark_result, :median)
dfmean = BLASBenchmarksCPU.benchmark_result_df(benchmark_result, :mean)
dfmax = BLASBenchmarksCPU.benchmark_result_df(benchmark_result, :maximum)
@test_throws ArgumentError BLASBenchmarksCPU.benchmark_result_df(benchmark_result, :foobar)
@test dfmin isa BLASBenchmarksCPU.DataFrame
@test dfmedian isa BLASBenchmarksCPU.DataFrame
@test dfmean isa BLASBenchmarksCPU.DataFrame
@test dfmax isa BLASBenchmarksCPU.DataFrame
for df β (dfmin,dfmedian,dfmean,dfmax)
df[!, :Size] = Float64.(df[!, :Size]);
df[!, :GFLOPS] = Float64.(df[!, :GFLOPS]);
df[!, :Seconds] = Float64.(df[!, :Seconds]);
p = StatsPlots.@df df StatsPlots.plot(:Size, :GFLOPS; group = :Library, legend = :bottomright)
@test p isa StatsPlots.Plots.Plot
end
@test all(dfmin[!, :GFLOPS] .β₯ dfmedian[!, :GFLOPS])
@test all(dfmin[!, :GFLOPS] .β₯ dfmean[!, :GFLOPS])
@test all(dfmin[!, :GFLOPS] .β₯ dfmax[!, :GFLOPS])
@test any(dfmin[!, :GFLOPS] .β dfmedian[!, :GFLOPS])
@test any(dfmin[!, :GFLOPS] .β dfmean[!, :GFLOPS])
@test any(dfmin[!, :GFLOPS] .β dfmax[!, :GFLOPS])
@test any(dfmedian[!, :GFLOPS] .β₯ dfmax[!, :GFLOPS])
@test any(dfmean[!, :GFLOPS] .β₯ dfmax[!, :GFLOPS])
@test any(dfmedian[!, :GFLOPS] .β dfmax[!, :GFLOPS])
@test any(dfmean[!, :GFLOPS] .β dfmax[!, :GFLOPS])
end
| BLASBenchmarksCPU | https://github.com/JuliaLinearAlgebra/BLASBenchmarksCPU.jl.git |
|
[
"MIT"
] | 0.3.7 | 08f121214c74a5ef5db03188025edb5279d89ddc | code | 1103 | sizes = [10, 20, 30]
if Threads.nthreads() > 1
threaded = true
else
threaded = false
end
for T in [Float64, Float32]
@info "" T sizes threaded
benchmark_result = runbench(
T;
sizes = sizes,
threaded = threaded,
)
@test benchmark_result isa BLASBenchmarksCPU.BenchmarkResult
@test benchmark_result_type(benchmark_result) === T
df = benchmark_result_df(benchmark_result)
@test df isa BLASBenchmarksCPU.DataFrame
plot_directory = mktempdir()
BLASBenchmarksCPU.plot(
benchmark_result;
plot_directory = plot_directory,
displayplot = false
)
BLASBenchmarksCPU.plot(
benchmark_result;
plot_directory = plot_directory,
measure = :median,
displayplot = false
)
BLASBenchmarksCPU.plot(
benchmark_result;
plot_directory = plot_directory,
measure = :mean,
displayplot = false
)
BLASBenchmarksCPU.plot(
benchmark_result;
plot_directory = plot_directory,
measure = :maximum,
displayplot = false
)
end
| BLASBenchmarksCPU | https://github.com/JuliaLinearAlgebra/BLASBenchmarksCPU.jl.git |
|
[
"MIT"
] | 0.3.7 | 08f121214c74a5ef5db03188025edb5279d89ddc | code | 241 | using BLASBenchmarksCPU
using Test
import InteractiveUtils
import VectorizationBase
include("test-suite-preamble.jl")
@info("VectorizationBase.num_cores() is $(VectorizationBase.num_cores())")
include("main.jl")
include("interface.jl")
| BLASBenchmarksCPU | https://github.com/JuliaLinearAlgebra/BLASBenchmarksCPU.jl.git |
|
[
"MIT"
] | 0.3.7 | 08f121214c74a5ef5db03188025edb5279d89ddc | code | 335 | using InteractiveUtils: versioninfo
versioninfo()
@info("Sys.CPU_THREADS is $(Sys.CPU_THREADS)")
@info("Threads.nthreads() is $(Threads.nthreads()) threads")
function is_coverage()
return !iszero(Base.JLOptions().code_coverage)
end
const coverage = is_coverage()
@info("Code coverage is $(coverage ? "enabled" : "disabled")")
| BLASBenchmarksCPU | https://github.com/JuliaLinearAlgebra/BLASBenchmarksCPU.jl.git |
|
[
"MIT"
] | 0.3.7 | 08f121214c74a5ef5db03188025edb5279d89ddc | docs | 951 | # BLASBenchmarksCPU
[](https://JuliaLinearAlgebra.github.io/BLASBenchmarksCPU.jl/stable)
[](https://JuliaLinearAlgebra.github.io/BLASBenchmarksCPU.jl/dev)
| Julia | CI |
| ------- | -- |
| v1 | [](https://github.com/JuliaLinearAlgebra/BLASBenchmarksCPU.jl/actions) |
| nightly | [/badge.svg)](https://github.com/JuliaLinearAlgebra/BLASBenchmarksCPU.jl/actions?query=workflow%3A%22CI+%28Julia+nightly%29%22) |
BLASBenchmarksCPU is a Julia package for benchmarking BLAS libraries on CPUs.
Please see the [documentation](https://JuliaLinearAlgebra.github.io/BLASBenchmarksCPU.jl/stable).
| BLASBenchmarksCPU | https://github.com/JuliaLinearAlgebra/BLASBenchmarksCPU.jl.git |
|
[
"MIT"
] | 0.3.7 | 08f121214c74a5ef5db03188025edb5279d89ddc | docs | 483 | ```@meta
CurrentModule = BLASBenchmarksCPU
```
# BLASBenchmarksCPU
[BLASBenchmarksCPU](https://github.com/JuliaLinearAlgebra/BLASBenchmarksCPU.jl)
is a Julia package for benchmarking BLAS libraries on CPUs.
The source code for this package is available in the
[GitHub repository](https://github.com/JuliaLinearAlgebra/BLASBenchmarksCPU.jl).
## Related Packages
You may also be interested in:
1. [BLASBenchmarksGPU.jl](https://github.com/JuliaLinearAlgebra/BLASBenchmarksGPU.jl)
| BLASBenchmarksCPU | https://github.com/JuliaLinearAlgebra/BLASBenchmarksCPU.jl.git |
|
[
"MIT"
] | 0.3.7 | 08f121214c74a5ef5db03188025edb5279d89ddc | docs | 190 | ```@meta
CurrentModule = BLASBenchmarksCPU
```
# Internals (Private)
```@index
Pages = ["internals.md"]
```
```@autodocs
Modules = [BLASBenchmarksCPU]
Private = true
Public = false
```
| BLASBenchmarksCPU | https://github.com/JuliaLinearAlgebra/BLASBenchmarksCPU.jl.git |
|
[
"MIT"
] | 0.3.7 | 08f121214c74a5ef5db03188025edb5279d89ddc | docs | 2728 | ```@meta
CurrentModule = BLASBenchmarksCPU
```
# Memory Required for Large Matrices
This table shows how much memory is required for four matrices of the given size and type. (We need four matrices: `A`, `B`, `C1`, and `C2`.)
| Matrix Size | Float64 Memory | Float32 Memory | Int64 Memory | Int32 Memory |
| ----------- | ------ | ------ | ------ | ------ |
| 1k by 1k | 0.03 GiB | 0.01 GiB | 0.03 GiB | 0.01 GiB |
| 2k by 2k | 0.12 GiB | 0.06 GiB | 0.12 GiB | 0.06 GiB |
| 3k by 3k | 0.27 GiB | 0.13 GiB | 0.27 GiB | 0.13 GiB |
| 4k by 4k | 0.48 GiB | 0.24 GiB | 0.48 GiB | 0.24 GiB |
| 5k by 5k | 0.75 GiB | 0.37 GiB | 0.75 GiB | 0.37 GiB |
| 6k by 6k | 1.07 GiB | 0.54 GiB | 1.07 GiB | 0.54 GiB |
| 7k by 7k | 1.46 GiB | 0.73 GiB | 1.46 GiB | 0.73 GiB |
| 8k by 8k | 1.91 GiB | 0.95 GiB | 1.91 GiB | 0.95 GiB |
| 9k by 9k | 2.41 GiB | 1.21 GiB | 2.41 GiB | 1.21 GiB |
| 10k by 10k | 2.98 GiB | 1.49 GiB | 2.98 GiB | 1.49 GiB |
| 11k by 11k | 3.61 GiB | 1.8 GiB | 3.61 GiB | 1.8 GiB |
| 12k by 12k | 4.29 GiB | 2.15 GiB | 4.29 GiB | 2.15 GiB |
| 13k by 13k | 5.04 GiB | 2.52 GiB | 5.04 GiB | 2.52 GiB |
| 14k by 14k | 5.84 GiB | 2.92 GiB | 5.84 GiB | 2.92 GiB |
| 15k by 15k | 6.71 GiB | 3.35 GiB | 6.71 GiB | 3.35 GiB |
| 16k by 16k | 7.63 GiB | 3.81 GiB | 7.63 GiB | 3.81 GiB |
| 17k by 17k | 8.61 GiB | 4.31 GiB | 8.61 GiB | 4.31 GiB |
| 18k by 18k | 9.66 GiB | 4.83 GiB | 9.66 GiB | 4.83 GiB |
| 19k by 19k | 10.76 GiB | 5.38 GiB | 10.76 GiB | 5.38 GiB |
| 20k by 20k | 11.92 GiB | 5.96 GiB | 11.92 GiB | 5.96 GiB |
| 30k by 30k | 26.82 GiB | 13.41 GiB | 26.82 GiB | 13.41 GiB |
| 40k by 40k | 47.68 GiB | 23.84 GiB | 47.68 GiB | 23.84 GiB |
| 50k by 50k | 74.51 GiB | 37.25 GiB | 74.51 GiB | 37.25 GiB |
| 60k by 60k | 107.29 GiB | 53.64 GiB | 107.29 GiB | 53.64 GiB |
| 70k by 70k | 146.03 GiB | 73.02 GiB | 146.03 GiB | 73.02 GiB |
| 80k by 80k | 190.73 GiB | 95.37 GiB | 190.73 GiB | 95.37 GiB |
| 90k by 90k | 241.4 GiB | 120.7 GiB | 241.4 GiB | 120.7 GiB |
| 100k by 100k | 298.02 GiB | 149.01 GiB | 298.02 GiB | 149.01 GiB |
## Generating These Tables
```julia
mem_req(s, ::Type{T}) where {T} = 4s^2*sizeof(T) / (1 << 30)
function print_table(types::Vector{DataType}, Ns = nothing)
println("| Matrix Size | $(join(types, " Memory | ")) Memory |")
println("| ----------- | $(repeat(" ------ |", length(types)))")
if Ns isa Nothing
_Ns = sort(unique(vcat(collect(1:1:20), collect(20:10:100))))
else
_Ns = Ns
end
for N in _Ns
mem = mem_req.(N * 1_000, types)
m = round.(mem; digits = 2)
println("| $(N)k by $(N)k | $(join(m, " GiB | ")) GiB |")
end
return nothing
end
```
```julia
julia> print_table([Float64, Float32, Int64, Int32])
```
| BLASBenchmarksCPU | https://github.com/JuliaLinearAlgebra/BLASBenchmarksCPU.jl.git |
|
[
"MIT"
] | 0.3.7 | 08f121214c74a5ef5db03188025edb5279d89ddc | docs | 182 | ```@meta
CurrentModule = BLASBenchmarksCPU
```
# Public API
```@index
Pages = ["public-api.md"]
```
```@autodocs
Modules = [BLASBenchmarksCPU]
Public = true
Private = false
```
| BLASBenchmarksCPU | https://github.com/JuliaLinearAlgebra/BLASBenchmarksCPU.jl.git |
|
[
"MIT"
] | 0.3.7 | 08f121214c74a5ef5db03188025edb5279d89ddc | docs | 2383 | ```@meta
CurrentModule = BLASBenchmarksCPU
```
# Disabling CPU Turbo
Most recent CPUs have the ability to turbo, increasing their clock speeds for brief durations of time as thermal envelope and longer term power-use limitations allow. This is great for performance, but bad for benchmarking.
If you're running Linux, it's probably easy to enable or disable turbo settings without having to reboot into your bios.
The Linux Kernel Documentation is fairly thorough in discussing [CPUFreq](https://www.kernel.org/doc/html/v4.12/admin-guide/pm/cpufreq.html) and [intel_pstate](https://www.kernel.org/doc/html/v4.12/admin-guide/pm/intel_pstate.html) scaling drivers.
To check those on my system, I can run:
```sh
> cat /sys/devices/system/cpu/cpu*/cpufreq/scaling_driver
intel_pstate
intel_pstate
intel_pstate
intel_pstate
intel_pstate
intel_pstate
intel_pstate
intel_pstate
```
This tells me it is using `intel_pstate` in active mode.
The documentation on `intel_pstate` mentions the `no_turbo` attribute:
> If set (equal to 1), the driver is not allowed to set any turbo P-states (see Turbo P-states Support). If unset (equalt to 0, which is the default), turbo P-states can be set by the driver.
This attribute is writable, so running
```sh
echo "1" | sudo tee /sys/devices/system/cpu/intel_pstate/no_turbo
```
disables turbo on this system. This, and closing programs that would compete for system resources (e.g., internet browsers; you can run `(h)top` to see if any processes are consuming non-negligible resources), should hopefully make benchmarking reasonably consistent and reliable.
Finally, when I'm done benchmarking, I can reenable turbo by running:
```sh
echo "0" | sudo tee /sys/devices/system/cpu/intel_pstate/no_turbo
```
If your system does not use the `intel_pstate` driver, check for
```sh
/sys/devices/system/cpu/cpufreq/boost
```
discussed [here](https://www.kernel.org/doc/html/v4.12/admin-guide/pm/cpufreq.html#frequency-boost-support) in the kernel documentation. If the file is present, you should be able to disable boost with
```sh
echo "0" | sudo tee /sys/devices/system/cpu/cpufreq/boost
```
and then reenable with
```sh
echo "1" | sudo tee /sys/devices/system/cpu/cpufreq/boost
```
In either case, you may find it convenient to place these snippets in `#! /bin/bash` scripts for conveniently turning your systems boost on and off as desired.
| BLASBenchmarksCPU | https://github.com/JuliaLinearAlgebra/BLASBenchmarksCPU.jl.git |
|
[
"MIT"
] | 0.3.7 | 08f121214c74a5ef5db03188025edb5279d89ddc | docs | 794 | ```@meta
CurrentModule = BLASBenchmarksCPU
```
# Usage
Remember to start Julia with multiple threads with e.g. one of the following:
- `julia -t auto`
- `julia -t 4`
- Set the `JULIA_NUM_THREADS` environment variable to `4` **before** starting Julia
## Example 1
```julia
julia> using BLASBenchmarksCPU
julia> benchmark_result = runbench(Float64)
julia> plot_directory = "/foo/bar/baz/"
julia> BLASBenchmarksCPU.plot(benchmark_result; plot_directory)
```
## Example 2
```julia
julia> using BLASBenchmarksCPU
julia> libs = [:Gaius, :Octavian, :OpenBLAS]
julia> sizes = [10, 20, 30]
julia> threaded = true
julia> benchmark_result = runbench(Float64; libs, sizes, threaded)
julia> plot_directory = "/foo/bar/baz/"
julia> BLASBenchmarksCPU.plot(benchmark_result; plot_directory)
```
| BLASBenchmarksCPU | https://github.com/JuliaLinearAlgebra/BLASBenchmarksCPU.jl.git |
|
[
"MIT"
] | 0.1.0 | 1398e2810a2abf85288756f71a9989bc5ffba55b | code | 906 | using Dash
# Enter 9999 in the port field in the deploy form when deploying on JuliaHub
const PORT = 9999
const DEV_MODE = haskey(ENV, "VSCODE_PROXY_URI")
function run_app(host="0.0.0.0", port=PORT)
@info("Initializing dash...")
app = dash(;
requests_pathname_prefix=(DEV_MODE ? "/proxy/$(string(port))/" : "/"),
)
app.layout = html_div() do
html_h1("Hello Dash"),
html_div("Dash.jl: Julia interface for Dash"),
dcc_graph(;
id="example-graph",
figure=(
data=[
(x=[1, 2, 3], y=[4, 1, 2], type="bar", name="SF"),
(x=[1, 2, 3], y=[2, 4, 5], type="bar", name="MontrΓ©al"),
],
layout=(title="Dash Data Visualization",),
),
)
end
@info("Starting server...")
run_server(app, host, port; debug=DEV_MODE)
end
run_app() | DashDeploymentExample | https://github.com/JuliaComputing/DashDeploymentExample.jl.git |
|
[
"MIT"
] | 0.1.0 | 1398e2810a2abf85288756f71a9989bc5ffba55b | code | 694 | using DashDeploymentExample
using Documenter
DocMeta.setdocmeta!(DashDeploymentExample, :DocTestSetup, :(using DashDeploymentExample); recursive=true)
makedocs(;
modules=[DashDeploymentExample],
authors="JuliaHub, Inc.",
repo="https://github.com/juliacomputing/DashDeploymentExample.jl/blob/{commit}{path}#{line}",
sitename="DashDeploymentExample.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://juliacomputing.github.io/DashDeploymentExample.jl",
assets=String[],
),
pages=[
"Home" => "index.md",
],
)
deploydocs(;
repo="github.com/juliacomputing/DashDeploymentExample.jl",
)
| DashDeploymentExample | https://github.com/JuliaComputing/DashDeploymentExample.jl.git |
|
[
"MIT"
] | 0.1.0 | 1398e2810a2abf85288756f71a9989bc5ffba55b | code | 652 | module DashDeploymentExample
using Dash
using Sockets
function main(port)
app = dash(requests_pathname_prefix="/proxy/$port/")
app.layout = html_div() do
html_h1("Hello Dash"),
html_div("Dash.jl: Julia interface for Dash"),
dcc_graph(id = "example-graph",
figure = (
data = [
(x = [1, 2, 3], y = [4, 1, 2], type = "bar", name = "SF"),
(x = [1, 2, 3], y = [2, 4, 5], type = "bar", name = "MontrΓ©al"),
],
layout = (title = "Dash Data Visualization",)
))
end
app
end
end
| DashDeploymentExample | https://github.com/JuliaComputing/DashDeploymentExample.jl.git |
|
[
"MIT"
] | 0.1.0 | 1398e2810a2abf85288756f71a9989bc5ffba55b | code | 87 | using DashDeploymentExample
using Test
using Aqua
Aqua.test_all(DashDeploymentExample) | DashDeploymentExample | https://github.com/JuliaComputing/DashDeploymentExample.jl.git |
|
[
"MIT"
] | 0.1.0 | 1398e2810a2abf85288756f71a9989bc5ffba55b | docs | 522 | # DashDeploymentExample
[](https://juliacomputing.github.io/DashDeploymentExample.jl/stable)
[](https://juliacomputing.github.io/DashDeploymentExample.jl/dev)
[](https://github.com/juliacomputing/DashDeploymentExample.jl/actions)
This package serves as a template for Dash.jl app deployment
on JuliaHub.com.
| DashDeploymentExample | https://github.com/JuliaComputing/DashDeploymentExample.jl.git |
|
[
"MIT"
] | 0.1.0 | 1398e2810a2abf85288756f71a9989bc5ffba55b | docs | 247 | ```@meta
CurrentModule = DashDeploymentExample
```
# DashDeploymentExample
Documentation for [DashDeploymentExample](https://github.com/juliacomputing/DashDeploymentExample.jl).
```@index
```
```@autodocs
Modules = [DashDeploymentExample]
```
| DashDeploymentExample | https://github.com/JuliaComputing/DashDeploymentExample.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | code | 932 | using StatisticalRethinking, Documenter
DOC_ROOT = sr_path("..", "docs")
DocDir = sr_path("..", "docs", "src")
page_list = Array{Pair{String, Any}, 1}();
append!(page_list, [Pair("StatisticalRethinkingJulia", "srgithub.md")])
append!(page_list, [Pair("Acknowledgements", "acknowledgements.md")]);
append!(page_list, [Pair("References", "references.md")])
append!(page_list, [Pair("Functions", "index.md")])
makedocs(
format = Documenter.HTML(prettyurls = haskey(ENV, "GITHUB_ACTIONS")),
root = DOC_ROOT,
modules = Module[],
sitename = "StatisticalRethinking.jl",
authors = "Rob Goedman and contributors.",
pages = page_list,
)
devurl = "dev"
deploydocs(
root = DOC_ROOT,
repo = "github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git",
devbranch = "master",
push_preview = true,
devurl=devurl,
versions = ["stable"=> "v^", "v#.#",
"latest"=>devurl]
)
| StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | code | 5730 | #
# Unfortunately the current loo_compare in ParetoSmooth does not
# work well in Pluto and is nit convenient for SR. Here I'm experimenten
# with a more elaborate version.
#
# This script needs the SR2StanPluto project environment.
#
using StanSample, ParetoSmooth
using AxisKeys, NamedTupleTools
using PrettyTables, StatsPlots
using StatisticalRethinking
using StatisticalRethinkingPlots, Test
df = CSV.read(sr_datadir("WaffleDivorce.csv"), DataFrame);
scale!(df, [:Marriage, :MedianAgeMarriage, :Divorce])
data = (N=size(df, 1), D=df.Divorce_s, A=df.MedianAgeMarriage_s,
M=df.Marriage_s)
stan5_1 = "
data {
int < lower = 1 > N; // Sample size
vector[N] D; // Outcome
vector[N] A; // Predictor
}
parameters {
real a; // Intercept
real bA; // Slope (regression coefficients)
real < lower = 0 > sigma; // Error SD
}
transformed parameters {
vector[N] mu; // mu is a vector
for (i in 1:N)
mu[i] = a + bA * A[i];
}
model {
a ~ normal(0, 0.2); //Priors
bA ~ normal(0, 0.5);
sigma ~ exponential(1);
D ~ normal(mu , sigma); // Likelihood
}
generated quantities {
vector[N] loglik;
for (i in 1:N)
loglik[i] = normal_lpdf(D[i] | mu[i], sigma);
}
";
stan5_2 = "
data {
int N;
vector[N] D;
vector[N] M;
}
parameters {
real a;
real bM;
real<lower=0> sigma;
}
transformed parameters {
vector[N] mu;
for (i in 1:N)
mu[i]= a + bM * M[i];
}
model {
a ~ normal( 0 , 0.2 );
bM ~ normal( 0 , 0.5 );
sigma ~ exponential( 1 );
D ~ normal( mu , sigma );
}
generated quantities {
vector[N] loglik;
for (i in 1:N)
loglik[i] = normal_lpdf(D[i] | mu[i], sigma);
}
";
stan5_3 = "
data {
int N;
vector[N] D;
vector[N] M;
vector[N] A;
}
parameters {
real a;
real bA;
real bM;
real<lower=0> sigma;
}
transformed parameters {
vector[N] mu;
for (i in 1:N)
mu[i] = a + bA * A[i] + bM * M[i];
}
model {
a ~ normal( 0 , 0.2 );
bA ~ normal( 0 , 0.5 );
bM ~ normal( 0 , 0.5 );
sigma ~ exponential( 1 );
D ~ normal( mu , sigma );
}
generated quantities{
vector[N] loglik;
for (i in 1:N)
loglik[i] = normal_lpdf(D[i] | mu[i], sigma);
}
";
m5_1s = SampleModel("m5.1s", stan5_1)
rc5_1s = stan_sample(m5_1s; data)
m5_2s = SampleModel("m5.2s", stan5_2)
rc5_2s = stan_sample(m5_2s; data)
m5_3s = SampleModel("m5.3s", stan5_3)
rc5_3s = stan_sample(m5_3s; data)
struct LooCompare1
psis::Vector{PsisLoo}
table::KeyedArray
end
function to_paretosmooth(ll, pd = [3, 1, 2])
permutedims(ll, [3, 1, 2])
end
function loo_compare1(models::Vector{SampleModel};
loglikelihood_name="loglik",
model_names=nothing,
sort_models=true,
show_psis=true)
nmodels = length(models)
mnames = [models[i].name for i in 1:nmodels]
chains_vec = read_samples.(models)
ll_vec = Array.(matrix.(chains_vec, loglikelihood_name))
ll_vecp = map(to_paretosmooth, ll_vec)
psis_vec = psis_loo.(ll_vecp)
if show_psis
for i in 1:nmodels
psis_vec[i] |> display
end
end
psis_values = Vector{Float64}(undef, nmodels)
se_values = Vector{Float64}(undef, nmodels)
loos = Vector{Vector{Float64}}(undef, nmodels)
for i in 1:nmodels
psis_values[i] = psis_vec[i].estimates(:cv_elpd, :total)
se_values[i] = psis_vec[i].estimates(:cv_elpd, :se_total)
loos[i] = psis_vec[i].pointwise(:cv_elpd)
end
if sort_models
ind = sortperm([psis_values[i][1] for i in 1:nmodels]; rev=true)
psis_vec = psis_vec[ind]
psis_values = psis_values[ind]
se_values = se_values[ind]
loos = loos[ind]
mnames = mnames[ind]
end
# Setup comparison vectors
elpd_diff = zeros(nmodels)
se_diff = zeros(nmodels)
weight = ones(nmodels)
# Compute comparison values
for i in 2:nmodels
elpd_diff[i] = psis_values[i] - psis_values[1]
diff = loos[1] - loos[i]
se_diff[i] = β(length(loos[i]) * var(diff; corrected=false))
end
data = elpd_diff
data = hcat(data, se_diff)
sumval = sum([exp(psis_values[i]) for i in 1:nmodels])
@. weight = exp(psis_values) / sumval
data = hcat(data, weight)
# Create KeyedArray object
table = KeyedArray(
data,
model = mnames,
statistic = [:cv_elpd, :se_diff, :weight],
)
# Return LooCompare object
LooCompare1(psis_vec, table)
end
function Base.show(io::IO, ::MIME"text/plain", loo_compare::LooCompare1)
table = loo_compare.table
return pretty_table(
table;
compact_printing=false,
header=table.statistic,
row_names=table.model,
formatters=ft_printf("%5.2f"),
alignment=:r,
)
end
if success(rc5_1s) && success(rc5_2s) && success(rc5_3s)
nt5_1s = read_samples(m5_1s, :particles)
NamedTupleTools.select(nt5_1s, (:a, :bA, :sigma)) |> display
nt5_2s = read_samples(m5_2s, :particles)
NamedTupleTools.select(nt5_2s, (:a, :bM, :sigma)) |> display
nt5_3s = read_samples(m5_3s, :particles)
NamedTupleTools.select(nt5_3s, (:a, :bA, :bM, :sigma)) |> display
println()
models = [m5_1s, m5_2s, m5_3s]
loo_comparison = loo_compare1(models)
println()
for i in 1:length(models)
pw = loo_comparison.psis[i].pointwise
pk_plot(pw(:pareto_k))
savefig(joinpath(@__DIR__, "m5.$(i)s.png"))
end
loo_comparison |> display
end
#=
With SR/ulam():
```
PSIS SE dPSIS dSE pPSIS weight
m5.1u 126.0 12.83 0.0 NA 3.7 0.67
m5.3u 127.4 12.75 1.4 0.75 4.7 0.33
m5.2u 139.5 9.95 13.6 9.33 3.0 0.00
```
=#
| StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | code | 2334 | module StatisticalRethinking
using Reexport
using Requires
@reexport using CSV, DataFrames, Distributions
@reexport using StatsBase, StatsFuns, Statistics
using LinearAlgebra, Random
using NamedArrays
using NamedTupleTools
using PrettyTables
using Unicode
using StructuralCausalModels
#using ParetoSmooth
using ParetoSmoothedImportanceSampling
using MonteCarloMeasurements
using KernelDensity
using Optim
using MCMCChains
using Dates
import StatsBase: sample
import DataFrames: DataFrame
import MonteCarloMeasurements: Particles
#import ParetoSmooth: psis_loo, loo_compare
using DocStringExtensions: SIGNATURES, FIELDS, TYPEDEF
function __init__()
@require Turing="fce5fe82-541a-59a6-adf8-730c64b5f9a0" include("require/turing/turing.jl")
@require StanSample="c1514b29-d3a0-5178-b312-660c88baa699" include("require/stan/stan.jl")
@require LogDensityProblems="6fdf6af0-433a-55f7-b3ed-c6c6e0b8df7c" include("require/dhmc/dhmc.jl")
#@require MCMCChains="c7f686f2-ff18-58e9-bc7b-31028e88f75d" include("require/mcmcchains/mcmcchains.jl")
end
const src_path = @__DIR__
const SR = StatisticalRethinking
"""
# sr_path
Relative path using the StatisticalRethinking src/ directory.
### Example to get access to the data subdirectory
```julia
sr_path("..", "data")
```
Note that in the projects, e.g. SR2StanPluto.jl and SR2TuringPluto.jl, the
DrWatson approach is a better choics, i.e: `sr_datadir(filename)`
"""
sr_path(parts...) = normpath(joinpath(src_path, parts...))
# DrWatson extension
"""
# sr_datadir
Relative path using the StatisticalRethinking src/ directory.
### Example to access `Howell1.csv` in StatisticalRethinking:
```julia
df = CSV.read(sr_datadir("Howell1.csv"), DataFrame)
```
"""
sr_datadir(parts...) = sr_path("..", "data", parts...)
include("scale.jl") # Rename to standardize of zscore?
include("rescale.jl")
include("link.jl")
include("hpdi.jl")
include("sample_dataframe.jl")
include("precis.jl")
include("simulate.jl")
include("srtools.jl")
include("sim_happiness.jl")
include("pluto_helpers.jl")
include("sim_train_test.jl")
include("lppd.jl")
include("logprob.jl")
include("compare_models.jl")
include("hmc.jl")
include("pk_qualify.jl")
include("quap.jl")
include("dataframe.jl")
#include("particles_mcmcchains.jl")
export
sr_path,
sr_datadir,
SR
end # module
| StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | code | 3624 | """
# compare
Compare waic and psis values for models.
$(SIGNATURES)
### Required arguments
```julia
* `models` : Vector of logprob matrices
* `criterium` : Either ::Val{:waic} or ::Val{:psis}
```
### Optional argument
```julia
* `mnames::Vector{Symbol}` : Vector of model names
```
### Return values
```julia
* `df` : DataFrame with statistics
```
"""
function compare(m::Vector{Matrix{Float64}}, ::Val{:waic};
mnames=String[])
df = DataFrame()
waics = Vector{NamedTuple}(undef, length(m))
for i in 1:length(m)
waics[i] = waic(m[i])
end
ind = sortperm([waics[i].WAIC for i in 1:length(m)])
waics = waics[ind]
mods = m[ind]
waics_pw = Vector{Vector{Float64}}(undef, length(m))
for i in 1:length(m)
waics_pw[i] = waic(mods[i]; pointwise=true).WAIC
end
if length(mnames) > 0
df.models = String.(mnames[ind])
end
df.WAIC = round.([waics[i].WAIC for i in 1:length(m)], digits=1)
df.lppd = round.([-2sum(lppd(mods[i])) for i in 1:length(m)], digits=2)
df.SE = round.([waics[i].std_err for i in 1:length(m)], digits=2)
dwaics = zeros(length(m))
for i in 2:length(m)
dwaics[i] = df[i, :WAIC] - df[1, :WAIC]
end
df.dWAIC = round.(dwaics, digits=1)
dse = zeros(length(m))
for i in 2:length(m)
diff = waics_pw[1] .- waics_pw[i]
dse[i] = β(length(waics_pw[1]) * var(diff))
end
df.dSE = round.(dse, digits=2)
df.pWAIC = round.([sum(waics[i].penalty) for i in 1:length(m)],
digits=2)
weights = ones(length(m))
sumval = sum([exp(-0.5df[i, :WAIC]) for i in 1:length(m)])
for i in 1:length(m)
weights[i] = exp(-0.5df[i, :WAIC])/sumval
end
df.weight = round.(weights, digits=2)
df
end
function compare(m::Vector{Matrix{Float64}}, ::Val{:psis};
mnames=String[])
df = DataFrame()
loo = Vector{Float64}(undef, length(m))
loos = Vector{Vector{Float64}}(undef, length(m))
pk = Vector{Vector{Float64}}(undef, length(m))
for i in 1:length(m)
loo[i], loos[i], pk[i] = psisloo(m[i])
end
ind = sortperm([-2loo[i][1] for i in 1:length(m)])
mods = m[ind]
loo = loo[ind]
loos = loos[ind]
pk = pk[ind]
if length(mnames) > 0
df.models = String.(mnames[ind])
end
df.PSIS = round.([-2loo[i] for i in 1:length(loo)], digits=1)
df.lppd = round.([-2sum(lppd(mods[i])) for i in 1:length(m)], digits=2)
df.SE = round.([sqrt(size(m[i], 2)*var2(-2loos[i])) for i in 1:length(m)],
digits=2)
dloo = zeros(length(m))
for i in 2:length(m)
dloo[i] = df[i, :PSIS] - df[1, :PSIS]
end
df.dPSIS = round.(dloo, digits=1)
dse = zeros(length(m))
for i in 2:length(m)
diff = 2(loos[1] .- loos[i])
dse[i] = β(length(loos[i]) * var2(diff))
end
df.dSE = round.(dse, digits=2)
ps = zeros(length(m))
for j in 1:length(m)
n_sam, n_obs = size(mods[j])
pd = zeros(length(m), n_obs)
pd[j, :] = [var2(mods[j][:,i]) for i in 1:n_obs]
ps[j] = sum(pd[j, :])
end
df.pPSIS = round.(ps, digits=2)
weights = ones(length(m))
sumval = sum([exp(-0.5df[i, :PSIS]) for i in 1:length(m)])
for i in 1:length(m)
weights[i] = exp(-0.5df[i, :PSIS])/sumval
end
df.weight = round.(weights, digits=2)
df
end
compare(m::Vector{Matrix{Float64}}, type::Symbol; mnames=String[]) =
compare(m, Val(type); mnames=String.(mnames))
export
compare | StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | code | 406 | function DataFrame(df::DataFrame, sym::Union{Symbol, String})
n = string.(names(df))
syms = string(sym)
sel = String[]
for (i, s) in enumerate(n)
if length(s) > length(syms) && syms == n[i][1:length(syms)] &&
n[i][length(syms)+1] in ['[', '.', '_']
append!(sel, [n[i]])
end
end
length(sel) == 0 && error("$syms not in $n")
df[:, sel]
end
| StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | code | 2201 | using Distributions
# u() needs to return neg-log-probability
function u(q, x, y; a=0, b=1, k=0, d=1)
# muy == q[1], mux == q[2]
uval = sum(logpdf(Normal(q[1], 1), y)) + sum(logpdf(Normal(q[2], 1), x)) +
ββ logpdf(Normal(a, b), q[1]) + logpdf(Normal(k, d), q[2])
-uval
end
# need vector of partial derivatives of U with respect to vector q
function ugrad( q, x, y; a=0 , b=1 , k=0 , d=1 )
βmuy = q[1]
βmux = q[2]
βg1 = sum( y .- muy ) .+ (a - muy) / b^2 # dU/dmuy
βg2 = sum( x .- mux ) .+ (k - mux) / d^2 #d U/dmux
β[-g1 , -g2] # negative bc energy is neg-log-prob
end
function hmc(x, y, u, ugrad, eps, L, current_q)
q = current_q
p = rand(Normal(0, 1), length(q)) # random flick to momentum
current_p = p
# Make a half step for momentum at the beginning
v = u(q, x, y)
g = ugrad(q, x, y)
p -= eps * g / 2
# Initialize bookkeeping
qtraj = zeros(L+1, length(q)+3)
qtraj[1, :] = [q[1], q[2], v, g[1], g[2]]
ptraj = zeros(L+1, length(q))
ptraj[1, :] = p
# Alternate full steps for position and momentum
for i in 1:L
# Full position step
q += eps * p
# Full step for momentum,, except for last step
if i !== L
v - u(q, x, y)
g = ugrad(q, x, y)
p -= eps .* g
ptraj[i+1, :] = p
end
# Bookkeeping
qtraj[i+1, :] = [q[1], q[2], v, g[1], g[2]]
end
# Make a halfstep for momentum at the end
v = u(q, x, y)
g = ugrad(q, x, y)
p -= eps * g / 2
ptraj[L+1, :] = p
# Negate momentum to make proposal symmatric
p = -p
# Evaluate potential and kinetic energies at beginning and end
current_U = u([current_q[1], current_q[2]], x, y)
current_K = sum(current_p .^ 2) / 2
proposed_U = u([q[1], q[2]], x, y)
proposed_K = sum(p .^ 2) / 2
dH = proposed_U + proposed_K - current_U - current_K
# Accept or reject the state at the end of trajectory
# Return either position at the end or initial position
local accept = 0
local new_q
if rand(Uniform(0, 1)) < exp(dH)
new_q = q # Accept
accept = 1
else
new_q = current_q # Reject
end
(q=new_q, ptraj=ptraj, qtraj=qtraj, accept=accept, dh=dH)
end
export
u, ugrad,
hmc | StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | code | 414 | """
# hpdi
Compute high density region.
$(SIGNATURES)
Derived from `hpd` in MCMCChains.jl.
By default alpha=0.11 for a 2-sided tail area of p < 0.055% and p > 0.945%.
"""
function hpdi(x::Vector{T}; alpha=0.11) where {T<:Real}
n = length(x)
m = max(1, ceil(Int, alpha * n))
y = sort(x)
a = y[1:m]
b = y[(n - m + 1):n]
_, i = findmin(b - a)
return [a[i], b[i]]
end
export
hpdi | StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | code | 1945 | """
# link
Compute the link function for standardized variables.
$(SIGNATURES)
### Required arguments
* `df::DataFrame` : Chain samples converted to a DataFrame
* `vars::Vector{Symbol}` : Variables in DataFrame (2 variables)
* `xrange::range` : Range over which link values are computed
### Optional arguments
* `xbar::Float64` : Mean value of observed predictor
* `ybar::Float64` : Mean value of observed outcome (requires xbar argument)
### Return values
* `result` : Vector of link values
"""
function link(dfa::DataFrame, vars, xrange)
[dfa[:, vars[1]] + dfa[:, vars[2]] * x for x in xrange]
end
function link(dfa::DataFrame, vars, xrange, xbar)
[dfa[:, vars[1]] + dfa[:, vars[2]] * (x - xbar) for x in xrange]
end
function link(dfa::DataFrame, vars, xrange, xbar, ybar)
[ybar .+ dfa[:, vars[1]] + dfa[:, vars[2]] * (x - xbar) for x in xrange]
end
"""
# link
Generalized link function to evaluate callable for all parameters in dataframe over range of x values.
$(SIGNATURES)
## Required arguments
* `dfa::DataFrame`: data frame with parameters
* `rx_to_val::Function`: function of two arguments: row object and x
* `xrange`: sequence of x values to be evaluated on
## Return values
Is the vector, where each entry was calculated on each value from xrange.
Every such entry is a list corresponding each row in the data frame.
## Examples
```jldoctest
julia> using StatisticalRethinking, DataFrames
julia> d = DataFrame(:a => [1,2], :b=>[1,1])
2Γ2 DataFrame
Row β a b
β Int64 Int64
ββββββΌββββββββββββββ
1 β 1 1
2 β 2 1
julia> link(d, (r,x) -> r.a+x*r.b, 1:2)
2-element Vector{Vector{Int64}}:
[2, 3]
[3, 4]
```
"""
function link(dfa::DataFrame, rx_to_val::Function, xrange)
[
rx_to_val.(eachrow(dfa), (x,))
for x β xrange
]
end
export
link
| StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | code | 235 | function logprob(post_df::DataFrame, x::Matrix, y::Vector, k=k)
b = Matrix(hcat(post_df[:, [Symbol("b.$i") for i in 1:k]]))
mu = post_df.a .+ b * x[:, 1:k]'
logpdf.(Normal.(mu , post_df.sigma), y')
end
export
logprob
| StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | code | 1379 | lppd(ll) =
[logsumexp(ll[:, i]) - log(size(ll, 1)) for i in 1:size(ll, 2)]
"""
# lppd
Generic version of Log Pointwise Predictive Density computation, which
is similar to `simulate` function, but additionally computes log density
for the target values.
$(SIGNATURES)
## Required arguments
* `df::DataFrame`: data frame with parameters
* `rx_to_dist::Function`: callable with two arguments: row object and x value.
Has to return `Distribution` instance
* `xseq`: sequence of x values to be passed to the callable
* `yseq`: sequence of target values for log density calculation.
## Return values
Vector of float values with the same size as `xseq` and `yseq`.
## Examples
```jldoctest
julia> using StatisticalRethinking, DataFrames, Distributions
julia> df = DataFrame(:mu => [0.0, 1.0])
2Γ1 DataFrame
Row β mu
β Float64
ββββββΌβββββββββ
1 β 0.0
2 β 1.0
julia> lppd(df, (r, x) -> Normal(r.mu + x, 1.0), 0:3, 3:-1:0)
4-element Vector{Float64}:
-3.5331959794720684
-1.1380087295845114
-1.9106724357818656
-6.082335295491998
```
"""
function lppd(df::DataFrame, rx_to_dist::Function, xseq, yseq)::Vector{Float64}
res = Float64[]
for (x, y) β zip(xseq, yseq)
s = [
logpdf(rx_to_dist(r, x), y)
for r β eachrow(df)
]
push!(res, logsumexp(s) - log(length(s)))
end
res
end
export
lppd
| StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | code | 921 |
# This is copied and shortened from Turing2MonteCarloChains.jl!
# Doesn't work reliably!
function MonteCarloMeasurements.Particles(t::NTuple{N, <:AxisArrays.AxisArray}; start=0) where N
[adapted_particles(t[i].data[start+1:end,:]) for i in 1:N]
end
function MonteCarloMeasurements.Particles(a::AxisArrays.AxisArray; start=0)
adapted_particles(a.data[start+1:end,:])
end
"""
Particles(chain::Chains; start=0)
Return a named tuple of particles or vector of particles where the keys
are the symbols in the Turing model that produced the chain. `start>0` can
be given to discard a number of samples from the beginning of the chain.
```
"""
function MonteCarloMeasurements.Particles(chain::Chains; start=0)
p = get_params(chain)
(;collect((k => Particles(getfield(p,k); start=start) for k in keys(p)))...)
end
function adapted_particles(v)
T = float(typeof(v[1]))
Particles(vec(T.(v)))
end
| StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | code | 240 | function pk_qualify(pk::Vector{Float64})
pk_good = sum(pk .<= 0.5)
pk_ok = length(pk[pk .<= 0.7]) - pk_good
pk_bad = length(pk[pk .<= 1]) - pk_good - pk_ok
(good=pk_good, ok=pk_ok, bad=pk_bad, very_bad=sum(pk .> 1))
end
| StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | code | 340 | # This file contains helper functions to display in Pluto.jl
PRECIS(df::DataFrame) = Text(precis(df; io=String))
export
PRECIS
CHNS(chns::MCMCChains.Chains) = Text(sprint(show, "text/plain", chns))
# Pluto helpers for MCMCChains
HPD(chns::MCMCChains.Chains) = Text(sprint(show, "text/plain", hpd(chns)))
export
HPD,
CHNS
| StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | code | 1270 | const BARS = collect("βββββ
βββ")
function unicode_histogram(data, nbins = 12)
# @show data
f = fit(Histogram, data, nbins = nbins) # nbins: more like a guideline than a rule, really
# scale weights between 1 and 8 (length(BARS)) to fit the indices in BARS
# eps is needed so indices are in the interval [0, 8) instead of [0, 8] which could
# result in indices 0:8 which breaks things
scaled = f.weights .* (length(BARS) / maximum(f.weights) - eps())
indices = floor.(Int, scaled) .+ 1
return join((BARS[i] for i in indices))
end
"""
# precis
$(SIGNATURES)
"""
function precis(df::DataFrame; io = stdout, digits = 4, depth = Inf, alpha = 0.11)
d = DataFrame()
cols = collect.(skipmissing.(eachcol(df)))
d.param = names(df)
d.mean = mean.(cols)
d.std = std.(cols)
quants = quantile.(cols, ([alpha/2, 0.5, 1-alpha/2], ))
quants = hcat(quants...)
d[:, "5.5%"] = quants[1,:]
d[:, "50%"] = quants[2,:]
d[:, "94.5%"] = quants[3,:]
d.histogram = unicode_histogram.(cols, min(size(df, 1), 12))
for col in ["mean", "std", "5.5%", "50%", "94.5%"]
d[:, col] .= round.(d[:, col], digits = digits)
end
pretty_table(io, d, nosubheader = true, vlines = [0, 1, 7])
end
export
precis
| StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | code | 1855 | using NamedTupleTools
function mode_estimates(df::DataFrame)
d = Dict{Symbol, typeof(Particles(size(df,1), Normal(0.9, 1.0)))}()
for var in Symbol.(names(df))
dens = kde(df[:, var])
mu = collect(dens.x)[findmax(dens.density)[2]]
sigma = std(df[:, var], mean=mu)
d[var] = Particles(size(df, 1), Normal(mu, sigma))
end
(;d...)
end
"""
# quap
Quadratic approximation of a posterior distribution in a DataFrame
### Method
```julia
quap(df)
```
### Required arguments
```julia
* `df::DataFrame` : Dataframe generated from samples (chains)
```
### Return values
```julia
* `result::NamedTuple` : NamedTuple representing the quadratic approximation
```
To convert to a Dict use:
```julia
dct = Dict(pairs(result))
```
### Example
```julia
# Run stan_sample() on a SampleModel
if success(rc)
df = read_samples(sm; output_format=:dataframe)
q = quap(df)
end
```
"""
function quap(s::DataFrame)
ntnames = (:coef, :vcov, :converged, :distr, :params)
n = Symbol.(names(s))
coefnames = tuple(n...,)
p = mode_estimates(s)
c = [mean(p[k]) for k in n]
cvals = reshape(c, 1, length(n))
coefvalues = reshape(c, length(n))
v = Statistics.covm(Array(s), cvals)
distr = if length(coefnames) == 1
Normal(coefvalues[1], βv[1]) # Normal expects stddev
else
MvNormal(coefvalues, v) # MvNormal expects variance matrix
end
ntvalues = tuple(
namedtuple(coefnames, coefvalues),
v, true, distr, n
)
namedtuple(ntnames, ntvalues)
end
function sample(qm::NamedTuple; nsamples=4000)
df = DataFrame()
p = Particles(nsamples, qm.distr)
for (indx, coef) in enumerate(qm.params)
if length(qm.params) == 1
df[!, coef] = p.particles
else
df[!, coef] = p[indx].particles
end
end
df
end
export
mode_estimates,
sample,
quap
| StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | code | 539 |
"""
# rescale
Rescale a vector to "un-standardize", the opposite of scale!().
$(SIGNATURES)
# Extended help
### Required arguments
```julia
* `x::Vector{Float64}` : Vector to be rescaled
* `xbar` : Mean value for rescaling
* `xstd` : Std for rescaling
```
### Return values
```julia
* `result::AbstractVector` : Rescaled vector
```
"""
function rescale(x::AbstractVector, xbar::Float64, xstd::Float64)
x .* xstd .+ xbar
end
export
rescale | StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | code | 1179 | using OrderedCollections
"""
# sample
Sample rows from a DataFrame
### Method
```julia
sample(df, n; replace, ordered)
```
### Required arguments
```julia
* `df::DataFrame` : DataFrame
* `n::Int` : Number of samples
```
### Optional argument
```julia
* `rng::AbstractRNG` : Random number generator
* `replace::Bool=true` : Sample with replace
* `ordered::Bool=false` : Sort sample
```
### Return values
```julia
* `result` : Array of samples
```
"""
function sample(df::DataFrame, n; replace=true, ordered=false)
indxs = sample(Random.GLOBAL_RNG,
1:size(df,1),
n,
replace=replace, ordered=ordered)
df[indxs, :]
end
function sample(rng::AbstractRNG, df::DataFrame, n; replace=true, ordered=false)
indxs = sample(rng,
1:size(df,1),
n,
replace=replace, ordered=ordered)
df[indxs, :]
end
function Particles(df::DataFrame)
d = OrderedDict{Symbol, typeof(Particles(size(df, 1), Normal(0.0, 1.0)))}()
for var in Symbol.(names(df))
d[var] = Particles(df[:, var])
end
(;d...)
end
export
sample
| StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | code | 1026 | using DataFrames, Statistics
import Distributions: scale!
"""
# scale!
Augment a DataFrame with scaled values of 1 or more columns
### Method
```julia
scale!(df, var, ext)
```
### Required arguments
```julia
* `df::DataFrame` : DataFrame
* `var::Union{Symbol, Vector{Symbol}}` : Variables to scale
* `ext::String="_s"` : Suffix for scaled varable(s)
```
### Return values
```julia
* `result::DataFrame` : Augmented DataFrame
```
### Example
```julia
scale!(df, :var1)
or
scale!(mydf, [:var1, var2])
```
"""
function scale!(
df::DataFrame,
vars::Vector{Symbol},
ext="_s")
for var in vars
mean_var = mean(df[!, var])
std_var = std(df[!, var])
df[!, Symbol("$(String(var))$ext")] =
(df[:, var] .- mean_var)/std_var
end
df
end
function scale!(
df::DataFrame,
var::Symbol,
ext="_s")
mean_var = mean(df[!, var])
std_var = std(df[!, var])
df[!, Symbol("$(String(var))$ext")] =
(df[:, var] .- mean_var)/std_var
end
export
scale!
| StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | code | 1565 |
"""
# sim_happiness
$(SIGNATURES)
Simulates hapiness using rules from section 6.3 of the book:
* Each year, 20 people are born with uniformly distributed happiness values.
* Each year, each person ages one year. Happiness does not change.
* At age 18, individuals can become married. The odds of marriage each year are
proportional to an individualβs happiness.
* Once married, an individual remains married.
* After age 65, individuals leave the sample. (They move to Spain.)
Arguments:
* `seed`: random seed, default is no seed
* `n_years`: amount of years to simulate
* `max_age`: maximum age people are living
* `n_births`: count of people are born every year
* `aom`: at what age people can got married
# Examples
```jldoctest
julia> using StatisticalRethinking
julia> sim_happiness(n_years=4, n_births=10)
40Γ3 DataFrame
Row β age happiness married
β Int64 Float64 Int64
ββββββΌβββββββββββββββββββββββββββ
1 β 4 -2.0 0
2 β 4 -1.55556 0
3 β 4 -1.11111 0
```
"""
function sim_happiness(; seed=nothing, n_years=1000, max_age=65, n_births=20, aom=18)
isnothing(seed) || Random.seed!(seed)
h = Float64[]; a = Int[]; m = Int[];
for t in 1:min(n_years, max_age)
a .+= 1
append!(a, ones(Int, n_births))
append!(h, range(-2, stop=2, length=n_births))
append!(m, zeros(Int, n_births))
can_marry = @. (m == 0) & (a >= aom)
m[can_marry] = @. rand(Bernoulli(logistic(h[can_marry] - 4)))
end
DataFrame(:age=>a, :happiness=>h, :married=>m)
end
export
sim_happiness
| StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | code | 553 | function sim_train_test(;
N = 20,
K = 2,
rho = [0.15, -0.4])
K += 1 # Add observations column
n_dim = 1 + length(rho)
n_dim = n_dim < K ? K : n_dim
Rho = Matrix{Float64}(I, n_dim, n_dim)
for i in 1:length(rho)
Rho[i+1, 1] = Rho[1, i + 1] = rho[i]
end
x_train = Matrix(rand(MvNormal(zeros(n_dim), Rho), N)')
x_test = Matrix(rand(MvNormal(zeros(n_dim), Rho), N)')
y = x_train[:, 1]
x_train = x_train[:, 2:n_dim]
(y, x_train, x_test[:, 2:n_dim])
end
export
sim_train_test | StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | code | 3408 | """
# simulate
Used for counterfactual simulations.
$(SIGNATURES)
### Required arguments
```julia
* `df` : DataFrame with coefficient samples
* `coefs` : Vector of coefficients
* `var_seq` : Input values for simulated effect
```
### Return values
```julia
* `m_sim::NamedTuple` : Array with predictions
```
"""
function simulate(df, coefs, var_seq)
m_sim = zeros(size(df, 1), length(var_seq));
for j in 1:size(df, 1)
for i in 1:length(var_seq)
d = Normal(df[j, coefs[1]] + df[j, coefs[2]] * var_seq[i], df[j, coefs[3]])
m_sim[j, i] = rand(d)
end
end
m_sim
end
"""
# simulate
Counterfactual predictions after manipulating a variable.
$(SIGNATURES)
### Required arguments
```julia
* `df` : DataFrame with coefficient samples
* `coefs` : Vector of coefficients
* `var_seq` : Input values for simulated effect
* `ext_coefs` : Vector of simulated variable coefficients
```
### Return values
```julia
* `(m_sim, d_sim)` : Arrays with predictions
```
"""
function simulate(df, coefs, var_seq, coefs_ext)
m_sim = simulate(df, coefs, var_seq)
d_sim = zeros(size(df, 1), length(var_seq));
for j in 1:size(df, 1)
for i in 1:length(var_seq)
d = Normal(df[j, coefs[1]] + df[j, coefs[2]] * var_seq[i] +
df[j, coefs_ext[1]] * m_sim[j, i], df[j, coefs_ext[2]])
d_sim[j, i] = rand(d)
end
end
(m_sim, d_sim)
end
"""
# simulate
Generic simulate of predictions using callable returning distribution to sample from.
$(SIGNATURES)
## Required arguments
* `df::DataFrame`: data frame with parameters in each row
* `rx_to_dist::Function`: callable with two arguments: row object and x value. Have to return `Distribution` instance.
* `xrange`: iterable with arguments
## Optional arguments
* `return_dist::Bool = false`: if set to `true`, distributions will be returned, not their samples
* `seed::Int = missing`: sets the random seed
## Return value
Vector were each item is generated from every item in xrange argument.
Each item is again a vector obtained from `rx_to_dist` call to obtain a distribution and then sample from it.
If argument `return_dist=true`, sampling step will be omitted.
## Examples
```jldoctest
julia> using StatisticalRethinking, DataFrames, Distributions
julia> d = DataFrame(:mu => [1.0, 2.0], :sigma => [0.1, 0.2])
2Γ2 DataFrame
Row β mu sigma
β Float64 Float64
ββββββΌββββββββββββββββββ
1 β 1.0 0.0
2 β 2.0 0.0
julia> simulate(d, (r,x) -> Normal(r.mu+x, r.sigma), 0:1)
2-element Vector{Vector{Float64}}:
[1.0, 2.0]
[2.0, 3.0]
julia> simulate(d, (r,x) -> Normal(r.mu+x, r.sigma), 0:1, return_dist=true)
2-element Vector{Vector{Normal{Float64}}}:
[Normal{Float64}(ΞΌ=1.0, Ο=0.0), Normal{Float64}(ΞΌ=2.0, Ο=0.0)]
[Normal{Float64}(ΞΌ=2.0, Ο=0.0), Normal{Float64}(ΞΌ=3.0, Ο=0.0)]
```
"""
function simulate(df::DataFrame, rx_to_dist::Function, xrange; return_dist::Bool=false,
seed::Union{Int,Missing} = missing)
ismissing(seed) || Random.seed!(seed)
[
[
begin
dist = rx_to_dist(row, x)
return_dist ? dist : rand(dist)
end
for x β xrange
]
for row β eachrow(df)
]
end
export
simulate
| StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | code | 2084 | function zscore_transform(data)
ΞΌ = mean(data)
Ο = std(data)
z(d) = (d .- ΞΌ) ./ Ο
unz(d) = d .* Ο .+ ΞΌ
return z, unz
end
"""
# meanlowerupper
Compute a NamedTuple with means, lower and upper PI values.
$(SIGNATURES)
"""
function meanlowerupper(data, PI = (0.055, 0.945))
m = mean.(eachrow(data))
lower = quantile.(eachrow(data), PI[1])
upper = quantile.(eachrow(data), PI[2])
return (mean = m,
lower = lower,
upper = upper,
raw = data)
end
function estimparam(data, PI = (0.055, 0.945))
m = mean.(eachcol(data))
lower = quantile.(eachcol(data), PI[1])
upper = quantile.(eachcol(data), PI[2])
return m, lower, upper
end
function lin(a, b, c, x...)
result = @. a + b * c
for i in 1:2:length(x)
@. result += x[i] * x[i+1]
end
return result
end
"""
# pairsplot
Create a polynomial observation matrix.
$(SIGNATURES)
"""
function create_observation_matrix(x::Vector, k::Int)
n = length(x)
m = reshape(x, n, 1)
for i in 2:k
m = hcat(m, x.^i)
end
m
end
"""
# var2
Variance without n-1 correction.
$(SIGNATURES)
"""
var2(x) = mean(x.^2) .- mean(x)^2
"""
# r2_is_bad
Compute R^2 values.
$(SIGNATURES)
"""
function r2_is_bad(model::NamedTuple, df::DataFrame)
s = mean(model.mu, dims=2)
r = s - df.brain_s
round(1 - var2(r) / var2(df.brain_s), digits=2)
end
"""
# PI
Compute percentile central interval of data. Returns vector of bounds.
$(SIGNATURES)
## Required arguments
* `data`: iterable over data values
## Optional arguments
* `perc_prob::Float64=0.89`: percentile interval to calculate
## Examples
```jldoctest
julia> using StatisticalRethinking
julia> PI(1:10)
2-element Vector{Float64}:
1.495
9.505
julia> PI(1:10; perc_prob=0.1)
2-element Vector{Float64}:
5.05
5.95
```
"""
function PI(data; perc_prob::Float64=0.89)
d = (1-perc_prob)/2
quantile(data, [d, 1-d])
end
export
zscore_transform,
meanlowerupper,
estimparam,
lin,
create_observation_matrix,
r2_is_bad,
PI
| StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | code | 1942 | using Distributions
# ### snippet 9.6
function HMC(model, grad, epsilon, L, current_q)
q = current_q
p = rand(Normal(0, 1), length(q)) # random flick - p is momentum
#p = [0.96484367, -0.06740435]
current_p = p
# Make a half step for momentum at the beginning
v, g = LogDensityProblems.logdensity_and_gradient(grad, q)
value_U = -v; grad_U = -g
p = p - epsilon .* grad_U / 2.0
# Initialize bookkeeping
qtraj = zeros(L+1, length(q)+3)
qtraj[1, :] = [q[1], q[2], value_U, grad_U[1], grad_U[2]]
ptraj = zeros(L+1, length(q))
ptraj[1, :] = p
# ### snippet 9.7
# Alternate full steps for position and momentum
for i in 1:L
# Full position step
q = q + epsilon .* p
# Full step for momentum,, except for last step
if i !== L
v, g = LogDensityProblems.logdensity_and_gradient(grad, q)
value_U = -v; grad_U = -g
p = p - epsilon .* grad_U
ptraj[i+1, :] = p
end
qtraj[i+1, :] = [q[1], q[2], value_U, grad_U[1], grad_U[2]]
end
# ### snippet 9.8
# Make a halfstep for momentum at the end
v, g = LogDensityProblems.logdensity_and_gradient(grad, q)
value_U = -v; grad_U = -g
p = p - epsilon .* grad_U / 2.0
ptraj[L+1, :] = p
# Negate momentum to make proposal symmatric
p = -p
# Evaluate potential and kinetic energies ate beginning and end
current_U = -LogDensityProblems.logdensity(grad, [current_q[1], current_q[2]])
current_K = sum(current_p .^ 2) / 2
proposed_U = -LogDensityProblems.logdensity(grad, [q[1], q[2]])
proposed_K = sum(p .^ 2) / 2
dH = proposed_U + proposed_K - current_U - current_K
# Accept or reject the state at the end of trajectory
# Return either position at the end or initial position
local accept = 0
local new_q
if rand(Uniform(0, 1)) < exp(dH)
new_q = q # Accept
accept = 1
else
new_q = current_q # Reject
end
(new_q, ptraj, qtraj, accept, dH)
end
export
HMC | StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | code | 45 | using .LogDensityProblems
include("HMC.jl")
| StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | code | 72 | using .MCMCChains
include("pluto_helpers.jl")
#include("particles.jl")
| StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | code | 213 | using .StanSample
include("stan_precis.jl")
include("stan_compare.jl")
include("stan_psis.jl") # ParetoSmoothedImportanceSampling.jl based
#include("stan_axiskeys.jl") # ParetoSmoothedImportanceSampling.jl based
| StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | code | 1836 | using AxisKeys
import ParetoSmooth: loo_compare
function psis_loo(model::SampleModel; loglikelihood_name="loglik")
chains = read_samples(model, :keyedarray) # Obtain KeyedArray chains
psis_loo(chains; loglikelihood_name)
end
function psis_loo(chains::T; loglikelihood_name="loglik") where {T <: KeyedArray}
ll = Array(matrix(chains, loglikelihood_name)) # Extract loglik matrix
ll_p = to_paretosmooth(ll) # Permute dims for ParetoSmooth
psis = psis_loo(ll_p) # Compute PsisLoo for model
end
# KeyedArray chains are [draws, chains, params] while ParetoSmooth
# expects [params, draws, chains].
function to_paretosmooth(ll, pd = [3, 1, 2])
permutedims(ll, pd)
end
function loo_compare(models::Vector{SampleModel};
loglikelihood_name="loglik",
model_names=nothing,
sort_models=true,
show_psis=true)
nmodels = length(models)
model_names = [models[i].name for i in 1:nmodels]
chains_vec = read_samples.(models, :keyedarray) # Obtain KeyedArray chains
loo_compare(chains_vec; loglikelihood_name, model_names, sort_models, show_psis)
end
function loo_compare(chains_vec::Vector{<: KeyedArray};
loglikelihood_name="loglik",
model_names=nothing,
sort_models=true,
show_psis=true)
nmodels = length(chains_vec)
ll_vec = Array.(matrix.(chains_vec, loglikelihood_name)) # Extract loglik matrix
ll_vecp = map(to_paretosmooth, ll_vec) # Permute dims for ParetoSmooth
psis_vec = psis_loo.(ll_vecp) # Compute PsisLoo for all models
if show_psis # If a printout is needed
for i in 1:nmodels
psis_vec[i] |> display
end
end
loo_compare(psis_vec...; model_names, sort_models)
end
CHNS(chns::KeyedArray) = Text(sprint(show, "text/plain", chns))
export
to_paretosmooth,
psis_loo,
loo_compare,
CHNS
| StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | code | 775 | """
# compare
Compare waic and psis values for models.
$(SIGNATURES)
### Required arguments
```julia
* `models` : Vector of SampleModels
* `type` : Either :waic or :psis
```
### Return values
```julia
* `df` : DataFrame with statistics
```
"""
function compare(models::Vector{SampleModel}, type::Symbol)
mnames = AbstractString[]
lps = Matrix{Float64}[]
for m in models
nt = read_samples(m, :namedtuple)
if :loglik in keys(nt)
append!(mnames, [m.name])
append!(lps, [Matrix(nt.loglik')])
else
@warn "Model $(m.name) does not produce a loglik matrix."
end
end
compare(lps, type; mnames)
end
| StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | code | 190 | """
# precis
$(SIGNATURES)
"""
function precis(sm::SampleModel; io = stdout, digits = 2, depth = Inf, alpha = 0.11)
df = read_samples(sm; output_format=:dataframe)
precis(df)
end
| StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | code | 627 | import ParetoSmoothedImportanceSampling: psisloo, waic
function psisloo(m::SampleModel, wcpp::Int64=20, wtrunc::Float64=3/4)
nt = read_samples(m, :namedtuple)
if :loglik in keys(nt)
lp = Matrix(nt.loglik')
else
@warn "Model $(m.name) does not compute a loglik matrix."
end
psisloo(lp, wcpp, wtrunc)
end
function waic(m::SampleModel; pointwise=false)
nt = read_samples(m, :namedtuple)
if :loglik in keys(nt)
lp = Matrix(nt.loglik')
else
@warn "Model $(m.name) does not compute a loglik matrix."
end
waic(lp; pointwise)
end
export
waic,
psisloo | StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | code | 623 | function convert_a3d(a3d_array, cnames, ::Val{:mcmcchains}; start=1, kwargs...)
cnames = String.(cnames)
pi = filter(p -> length(p) > 2 && p[end-1:end] == "__", cnames)
p = filter(p -> !(p in pi), cnames)
MCMCChains.Chains(a3d_array,
cnames,
Dict(
:parameters => p,
:internals => pi
);
start=start
)
end
function create_mcmcchains(a3d, cnames;start=1)
Chains(a3d, cnames; start=start)
end
function create_mcmcchains(a3d, cnames, sections::Dict{Symbol, Vector{String}};
start=1)
Chains(a3d, cnames, sections; start=start)
end
export
convert_a3d,
create_mcmcchains | StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | code | 196 | using .Turing
include("turing_quap.jl")
include("turing_precis.jl")
include("turing_plotcoef.jl")
include("turing_dataframe.jl")
include("turing_optim_sample.jl")
#include("turing_particles.jl")
| StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | code | 154 | import DataFrames: DataFrame
DataFrame(m::MCMCChains.Chains; section=:parameters) =
DataFrame(Dict(n => vec(m[n].data) for n in names(m, section)))
| StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | code | 1699 | import StatsBase: sample
using Optim
import .TuringOptimExt: ModeResult
import LinearAlgebra: Symmetric
"""
# sample
Sample from ModeResult - optimisation result produced by Turing mode estimation.
$(SIGNATURES)
## Required arguments
* `m::ModeResult`: mode estimation result
* `n::Integer`: amount of samples to be drawn
## Return values
`Chains` object with drawn samples
## Examples
```jldoctest
julia> using Optim, Turing, StatisticalRethinking
julia> @model function f(x)
a ~ Normal()
x ~ Normal(a)
end
f (generic function with 2 methods)
julia> m = optimize(f([1,1.1]), MLE())
ModeResult with maximized lp of -1.84
1-element Named Vector{Float64}
A β
ββββΌβββββ
:a β 1.05
julia> sample(m, 10)
Chains MCMC chain (10Γ1Γ1 reshape(adjoint(::Matrix{Float64}), 10, 1, 1) with eltype Float64):
Iterations = 1:1:10
Number of chains = 1
Samples per chain = 10
Wall duration = 0.0 seconds
Compute duration = 0.0 seconds
parameters = a
Summary Statistics
parameters mean std naive_se mcse ess rhat ess_per_sec
Symbol Float64 Float64 Float64 Float64 Float64 Float64 Float64
a 0.7186 0.5911 0.1869 0.1717 9.4699 0.9243 9469.8745
Quantiles
parameters 2.5% 25.0% 50.0% 75.0% 97.5%
Symbol Float64 Float64 Float64 Float64 Float64
a 0.0142 0.3041 0.6623 0.9987 1.7123
```
"""
function sample(m::ModeResult, n::Int)::Chains
st = now()
ΞΌ = coef(m)
Ξ£ = Symmetric(vcov(m))
dist = MvNormal(ΞΌ, Ξ£)
Chains(rand(dist, n)', coefnames(m), info=(start_time=st, stop_time=now()))
end
| StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | code | 971 | using MonteCarloMeasurements, MCMCChains, AxisArrays
# This is copied and shortened from Turing2MonteCarloChains.jl!
function MonteCarloMeasurements.Particles(t::NTuple{N, <:AxisArrays.AxisArray}; start=0) where N
[adapted_particles(t[i].data[start+1:end,:]) for i in 1:N]
end
function MonteCarloMeasurements.Particles(a::AxisArrays.AxisArray; start=0)
adapted_particles(a.data[start+1:end,:])
end
"""
Particles(chain::Chains; start=0)
Return a named tuple of particles or vector of particles where the keys
are the symbols in the Turing model that produced the chain. `start>0` can
be given to discard a number of samples from the beginning of the chain.
```
"""
function MonteCarloMeasurements.Particles(chain::Chains; start=0)
p = get_params(chain)
(;collect((k => Particles(getfield(p,k); start=start) for k in keys(p)))...)
end
function adapted_particles(v)
T = float(typeof(v[1]))
Particles(vec(T.(v)))
end
export
Particles
| StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | code | 4905 | """
# plotcoef
Multiple regression coefficient plot for Turing models.
$(SIGNATURES)
### Required arguments
```julia
* `models` : Vector of `SampleModel`s to compare
* `pars` : Vector of parameters to include in comparison
```
### Optional arguments
```julia
* `fig=""` : File to store plot
* `title=""` : Title for plot
* `samples=NUTS(0.65)` : Sampler used for sampling
* `nsamples=2000` : No of samples (1000 warmup, 1000 used)
* `nchains=4` : No of chains used
* `func=nothing` : Funtion to apply to sample df
* `quap_samples=10000` : Default no of quap simulation samples
```
Currently the only function available is `quap` which will constrct
a quap model and simulate 10000 draws.
### Return values
```julia
* `(res, fig)` : (particles, plot)
```
"""
function plotcoef(
models::Vector{T},
pars::Vector{Symbol};
fig="", title="",
sampler=NUTS(0.65), nsamples=2000, nchains=4,
func=nothing, quap_samples=10000) where {T <: DynamicPPL.Model}
mnames = String.(nameof.(models))
levels = length(models) * (length(pars) + 1)
colors = [:blue, :red, :green, :darkred, :black]
s = Vector{NamedTuple}(undef, length(models))
for (mindx, mdl) in enumerate(models)
if isnothing(func)
chns = mapreduce(c -> sample(mdl, sampler, nsamples),
chainscat, 1:nchains)
df = DataFrame(Array(chns), names(chns, [:parameters]))
m, l, u = estimparam(df)
d = Dict{Symbol, NamedTuple}()
for (indx, par) in enumerate(names(chns, [:parameters]))
d[par] = (mean=m[indx], lower=l[indx], upper=u[indx])
end
s[mindx] = (; d...)
else
quap_mdl = quap(mdl)
post = rand(quap_mdl.distr, 10_000)
df = DataFrame(post', [keys(quap_mdl.coef)...])
m, l, u = estimparam(df)
d = Dict{Symbol, NamedTuple}()
for (indx, par) in enumerate([keys(quap_mdl.coef)...])
d[par] = (mean=m[indx], lower=l[indx], upper=u[indx])
end
s[mindx] = (; d...)
end
end
xmin = 0; xmax = 0.0
for i in 1:length(s)
for par in pars
syms = Symbol.(keys(s[i]))
if Symbol(par) in syms
mp = s[i][par].mean
xmin = min(xmin, s[i][par].lower)
xmax = max(xmax, s[i][par].upper)
end
end
end
ylabs = String[]
for j in 1:length(models)
for i in 1:length(pars)
l = length(String(pars[i]))
str = repeat(" ", 9-l) * String(pars[i])
append!(ylabs, [str])
end
l = length(mnames[j])
str = mnames[j] * repeat(" ", 9-l)
append!(ylabs, [str])
end
ys = [string(ylabs[i]) for i = 1:length(ylabs)]
p = plot(xlims=(xmin, xmax), leg=false, framestyle=:grid)
title!(title)
yran = range(1, stop=length(ylabs), length=length(ys))
yticks!(yran, ys)
line = 0
for (mindx, model) in enumerate(models)
line += 1
hline!([line] .+ length(pars), color=:darkgrey, line=(2, :dash))
for (pindx, par) in enumerate(pars)
line += 1
syms = Symbol.(keys(s[mindx]))
if Symbol(par) in syms
ypos = (line - 1)
mp = s[mindx][Symbol(par)].mean
lower = s[mindx][Symbol(par)].lower
upper = s[mindx][Symbol(par)].upper
plot!([lower, upper], [ypos, ypos], leg=false, color=colors[pindx])
scatter!([mp], [ypos], color=colors[pindx])
end
end
end
if length(fig) > 0
savefig(p, fig)
end
(s, p)
end
"""
# plotcoef
Multiple regression coefficient plot for a single Turing model.
$(SIGNATURES)
### Required arguments
```julia
* `model` : SampleModel to display
* `pars` : Vector of parameters to include in comparison
* `fig` : File to store the produced plot
```
### Optional arguments
```julia
* `fig=""` : File to store plot
* `title=""` : Title for plot
* `samples=NUTS(0.65)` : Sampler used for sampling
* `nsamples=2000` : No of samples (1000 warmup, 1000 used)
* `nchains=4` : No of chains used
* `func=nothing` : Funtion to apply to sample df
```
Currently the only function available is `quap` which will constrct
a quap model and simulate 10000 draws.
### Return values
```julia
* `(res, fig)` : (particles, plot)
```
"""
function plotcoef(
mdl::DynamicPPL.Model,
pars::Vector{Symbol};
fig="", title="",
sampler=NUTS(0.65), nsamples=2000, nchains=4,
func=nothing, quap_samples=10000)
plotcoef([mdl], pars;
fig, title, sampler, nsamples, nchains, func, quap_samples)
end
export
plotcoef
| StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.