licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.8.6 | 704ef2c93e301b6469ba63103a4e7bf935e6990c | code | 10715 | # Store the rank with the value, necessary for collecting values in order
struct pval{T}
rank :: Int
errorstatus :: Bool
value :: T
end
pval{T}(p::pval{T}) where {T} = p
pval{T}(p::pval) where {T} = pval{T}(p.rank, p.errorstatus, convert(T, p.value))
errorpval(rank) = pval(rank, true, nothing)
errorstatus(p::pval) = p.errorstatus
# Function to obtain the value of pval types
value(p::pval) = p.value
value(p::Any) = p
struct Product{I}
iterators :: I
end
getiterators(p::Product) = p.iterators
Base.length(p::Product) = length(Iterators.product(p.iterators...))
Base.iterate(p::Product, st...) = iterate(Iterators.product(p.iterators...), st...)
function product(iter...)
any(x -> x isa Product, iter) && throw(ArgumentError("the iterators should not be Products"))
Product(iter)
end
struct Hold{T}
iterators :: T
end
getiterators(h::Hold) = getiterators(h.iterators)
Base.length(h::Hold) = length(h.iterators)
function check_knownsize(iterator)
itsz = Base.IteratorSize(iterator)
itsz isa Base.HasLength || itsz isa Base.HasShape
end
struct ZipSplit{Z, I}
z :: Z
it :: I
skip :: Int
N :: Int
end
# This constructor differs from zipsplit, as it uses skipped and retained elements
# and not p and np. This type is added to increase compatibility with SplittablesBase
function ZipSplit(itzip, skipped_elements::Integer, elements_on_proc::Integer)
it = Iterators.take(Iterators.drop(itzip, skipped_elements), elements_on_proc)
ZipSplit{typeof(itzip), typeof(it)}(itzip, it, skipped_elements, elements_on_proc)
end
Base.length(zs::ZipSplit) = length(zs.it)
Base.eltype(zs::ZipSplit) = eltype(zs.it)
Base.iterate(z::ZipSplit, i...) = iterate(takedrop(z), i...)
takedrop(zs::ZipSplit) = zs.it
function SplittablesBase.halve(zs::ZipSplit)
nleft = zs.N ÷ 2
ZipSplit(zs.z, zs.skip, nleft), ZipSplit(zs.z, zs.skip + nleft, zs.N - nleft)
end
zipsplit(iterators::Tuple, np::Integer, p::Integer) = zipsplit(zip(iterators...), np, p)
function zipsplit(itzip::Iterators.Zip, np::Integer, p::Integer)
check_knownsize(itzip)
d,r = divrem(length(itzip), np)
skipped_elements = d*(p-1) + min(r,p-1)
lastind = d*p + min(r,p)
elements_on_proc = lastind - skipped_elements
ZipSplit(itzip, skipped_elements, elements_on_proc)
end
_split_iterators(iterators, np, p) = (zipsplit(iterators, np, p),)
function _split_iterators(iterators::Tuple{Hold{<:Product}}, np, p)
it_hold = first(iterators)
(ProductSplit(getiterators(it_hold), np, p), )
end
############################################################################################
# Local mapreduce
############################################################################################
struct NoSplat <: Function
f :: Function
end
NoSplat(u::NoSplat) = u
_maybesplat(f) = Base.splat(f)
_maybesplat(f::NoSplat) = f
_mapreduce(f, op, iterators...; reducekw...) = mapreduce(f, op, iterators...; reducekw...)
function _mapreduce(fun::NoSplat, op, iterators...; reducekw...)
mapval = fun.f(iterators...)
reduce(op, (mapval,); reducekw...)
end
function mapreducenode(f, op, rank, pipe::BranchChannel, selfoutchannel, iterators...; reducekw...)
# Evaluate the function
# No communication with other nodes happens here
try
fmap = _maybesplat(f)
if rank == 1
res = _mapreduce(fmap, op, iterators...; reducekw...)
else
# init should only be used once on the first rank
# remove it from the kwargs on other workers
kwdict = Dict(reducekw)
pop!(kwdict, :init, nothing)
res = _mapreduce(fmap, op, iterators...; kwdict...)
end
val = pval(rank, false, res)
put!(selfoutchannel, val)
catch
put!(selfoutchannel, errorpval(rank))
rethrow()
end
end
############################################################################################
# Reduction across workers
############################################################################################
abstract type ReductionNode end
struct TopTreeNode <: ReductionNode
rank :: Int
end
struct SubTreeNode <: ReductionNode
rank :: Int
end
_maybesort(op::Commutative, vals) = vals
_maybesort(op, vals) = sort!(vals, by = pv -> pv.rank)
function reducechannel(op, c, N; reducekw...)
vals = [take!(c) for i = 1:N]
vals = _maybesort(op, vals)
v = [value(v) for v in vals]
reduce(op, v; reducekw...)
end
seterrorflag(c, val) = put!(c, take!(c) | val)
function reducedvalue(op, node::SubTreeNode, pipe::BranchChannel, selfoutchannel; reducekw...)
rank = node.rank
N = nchildren(pipe) + 1
err_ch = Channel{Bool}(1)
put!(err_ch, false)
self_pval = take!(selfoutchannel)
if errorstatus(self_pval)
return errorpval(rank)
else
put!(selfoutchannel, self_pval)
end
@sync for i = 1:nchildren(pipe)
@async begin
child_pval = take!(pipe.childrenchannel)
if errorstatus(child_pval)
seterrorflag(err_ch, true)
else
put!(selfoutchannel, child_pval)
seterrorflag(err_ch, false)
end
end
end
take!(err_ch) && return errorpval(rank)
redval = reducechannel(op, selfoutchannel, N; reducekw...)
return pval(rank, false, redval)
end
function reducedvalue(op, node::TopTreeNode, pipe::BranchChannel, ::Any; reducekw...)
rank = node.rank
N = nchildren(pipe)
c = Channel(N)
err_ch = Channel{Bool}(1)
put!(err_ch, false)
@sync for i in 1:N
@async begin
child_pval = take!(pipe.childrenchannel)
if errorstatus(child_pval)
seterrorflag(err_ch, true)
else
put!(c, child_pval)
seterrorflag(err_ch, false)
end
end
end
take!(err_ch) && return errorpval(rank)
redval = reducechannel(op, c, N; reducekw...)
return pval(rank, false, redval)
end
function reducenode(op, node::ReductionNode, pipe::BranchChannel, selfoutchannel = nothing; kwargs...)
# This function that communicates with the parent and children
rank = node.rank
try
kwdict = Dict(kwargs)
pop!(kwdict, :init, nothing)
res = reducedvalue(op, node, pipe, selfoutchannel; kwdict...)
put!(pipe.parentchannel, res)
catch
put!(pipe.parentchannel, errorpval(rank))
rethrow()
finally
GC.gc()
end
return nothing
end
function pmapreduceworkers(f, op, tree_branches, iterators; reducekw...)
tree, branches = tree_branches
nworkerstree = nworkers(tree)
extrareducenodes = length(tree) - nworkerstree
@sync for (ind, mypipe) in enumerate(branches)
p = mypipe.p
ind_reduced = ind - extrareducenodes
rank = ind_reduced
if ind_reduced > 0
iterable_on_proc = _split_iterators(iterators, nworkerstree, rank)
@spawnat p begin
selfoutchannel = Channel(nchildren(mypipe) + 1)
@sync begin
@async mapreducenode(f, op, rank, mypipe, selfoutchannel, iterable_on_proc...; reducekw...)
@async reducenode(op, SubTreeNode(rank), mypipe, selfoutchannel; reducekw...)
end
end
else
@spawnat p reducenode(op, TopTreeNode(rank), mypipe; reducekw...)
end
end
tb = topbranch(tree, branches)
value(take!(tb.parentchannel))
end
"""
pmapreduce(f, op, [pool::AbstractWorkerPool], iterators...; reducekw...)
Evaluate a parallel `mapreduce` over the elements from `iterators`.
For multiple iterators, apply `f` elementwise.
The keyword arguments `reducekw` are passed on to the reduction.
See also: [`pmapreduce_productsplit`](@ref)
"""
function pmapreduce(f, op, pool::AbstractWorkerPool, iterators...; reducekw...)
N = length(zip(iterators...))
if N <= 1 || nworkers(pool) == 1
iterable_on_proc = _split_iterators(iterators, 1, 1)
fmap = _maybesplat(f)
if nprocs() == 1 # no workers added
return _mapreduce(fmap, op, iterable_on_proc...; reducekw...)
else # one worker or single-valued iterator
return @fetchfrom workers(pool)[1] _mapreduce(fmap, op, iterable_on_proc...; reducekw...)
end
end
tree_branches = createbranchchannels(pool, N)
pmapreduceworkers(f, op, tree_branches, iterators; reducekw...)
end
function pmapreduce(f, op, iterators...; reducekw...)
N = length(zip(iterators...))
pool = maybetrimmedworkerpool(workers(), N)
pmapreduce(f, op, pool, iterators...; reducekw...)
end
"""
pmapreduce_productsplit(f, op, [pool::AbstractWorkerPool], iterators...; reducekw...)
Evaluate a parallel mapreduce over the outer product of elements from `iterators`.
The product of `iterators` is split over the workers available, and each worker is assigned a section
of the product. The function `f` should accept a single argument that is a collection of `Tuple`s.
The keyword arguments `reducekw` are passed on to the reduction.
See also: [`pmapreduce`](@ref)
"""
pmapreduce_productsplit(f, op, pool::AbstractWorkerPool, iterators...; reducekw...) =
pmapreduce(NoSplat(f), op, pool, Hold(product(iterators...)); reducekw...)
function pmapreduce_productsplit(f, op, iterators...; reducekw...)
N = length(product(iterators...))
pool = maybetrimmedworkerpool(workers(), N)
pmapreduce_productsplit(f, op, pool, iterators...; reducekw...)
end
"""
pmapbatch(f, [pool::AbstractWorkerPool], iterators...)
Carry out a `pmap` with the `iterators` divided evenly among the available workers.
See also: [`pmapreduce`](@ref)
"""
function pmapbatch(f, pool::AbstractWorkerPool, iterators...)
pmapreduce((x...) -> [f(x...)], vcat, pool, iterators...)
end
function pmapbatch(f, iterators...)
N = length(zip(iterators...))
pool = maybetrimmedworkerpool(workers(), N)
pmapbatch(f, pool, iterators...)
end
"""
pmapbatch_productsplit(f, [pool::AbstractWorkerPool], iterators...)
Carry out a `pmap` with the outer product of `iterators` divided evenly among the available workers.
The function `f` must accept a collection of `Tuple`s.
See also: [`pmapbatch`](@ref), [`pmapreduce_productsplit`](@ref)
"""
function pmapbatch_productsplit(f, pool::AbstractWorkerPool, iterators...)
pmapreduce_productsplit(x -> [f(x)], vcat, pool, iterators...)
end
function pmapbatch_productsplit(f, iterators...)
N = length(product(iterators...))
pool = maybetrimmedworkerpool(workers(), N)
pmapbatch_productsplit(f, pool, iterators...)
end
| ParallelUtilities | https://github.com/jishnub/ParallelUtilities.jl.git |
|
[
"MIT"
] | 0.8.6 | 704ef2c93e301b6469ba63103a4e7bf935e6990c | code | 28925 | struct TaskNotPresentError{T,U} <: Exception
t :: T
task :: U
end
function Base.showerror(io::IO, err::TaskNotPresentError)
print(io, "could not find the task $(err.task) in the list $(err.t)")
end
"""
AbstractConstrainedProduct{T, N, Q}
Supertype of [`ProductSplit`](@ref) and [`ProductSection`](@ref).
"""
abstract type AbstractConstrainedProduct{T, N, Q} end
Base.eltype(::AbstractConstrainedProduct{T}) where {T} = T
_niterators(::AbstractConstrainedProduct{<:Any, N}) where {N} = N
const IncreasingAbstractConstrainedProduct{T, N} =
AbstractConstrainedProduct{T, N, <:NTuple{N, AbstractUnitRange}}
"""
ProductSection{T, N, Q<:NTuple{N,AbstractRange}}
Iterator that loops over a specified section of the
outer product of ranges in. If the ranges are strictly increasing, the
iteration will be in reverse - lexicographic order.
Given `N` ranges, each element returned by the iterator will be
a tuple of length `N` with one element from each range.
See also: [`ProductSplit`](@ref)
"""
struct ProductSection{T, N, Q <: NTuple{N,AbstractRange}} <: AbstractConstrainedProduct{T, N, Q}
iterators :: Q
togglelevels :: NTuple{N, Int}
firstind :: Int
lastind :: Int
function ProductSection(iterators::Tuple{Vararg{AbstractRange, N}}, togglelevels::NTuple{N, Int},
firstind::Int, lastind::Int) where {N}
# Ensure that all the iterators are strictly increasing
all(x->step(x)>0, iterators) ||
throw(ArgumentError("all the ranges need to be strictly increasing"))
T = Tuple{map(eltype, iterators)...}
new{T, N, typeof(iterators)}(iterators, togglelevels, firstind, lastind)
end
end
function _cumprod(len::Tuple)
(0, _cumprod(first(len), Base.tail(len))...)
end
_cumprod(::Integer,::Tuple{}) = ()
function _cumprod(n::Integer, tl::Tuple)
(n, _cumprod(n*first(tl), Base.tail(tl))...)
end
function takedrop(ps::ProductSection)
drop = ps.firstind - 1
take = ps.lastind - ps.firstind + 1
Iterators.take(Iterators.drop(Iterators.product(ps.iterators...), drop), take)
end
"""
ProductSection(iterators::Tuple{Vararg{AbstractRange}}, inds::AbstractUnitRange)
Construct a `ProductSection` iterator that represents a 1D view of the outer product
of the ranges provided in `iterators`, with the range of indices in the view being
specified by `inds`.
# Examples
```jldoctest
julia> p = ParallelUtilities.ProductSection((1:3, 4:6), 5:8);
julia> collect(p)
4-element $(Vector{Tuple{Int, Int}}):
(2, 5)
(3, 5)
(1, 6)
(2, 6)
julia> collect(p) == collect(Iterators.product(1:3, 4:6))[5:8]
true
```
"""
function ProductSection(iterators::Tuple{Vararg{AbstractRange}}, inds::AbstractUnitRange)
firstind, lastind = first(inds), last(inds)
len = map(length, iterators)
Nel = prod(len)
1 <= firstind || throw(
ArgumentError("the range of indices must start from a number ≥ 1"))
lastind <= Nel || throw(
ArgumentError("the maximum index must be less than or equal to the total number of elements = $Nel"))
togglelevels = _cumprod(len)
ProductSection(iterators, togglelevels, firstind, lastind)
end
ProductSection(::Tuple{}, ::AbstractUnitRange) = throw(ArgumentError("need at least one iterator"))
"""
ProductSplit{T, N, Q<:NTuple{N,AbstractRange}}
Iterator that loops over a section of the outer product of ranges.
If the ranges are strictly increasing, the iteration is in reverse - lexicographic order.
Given `N` ranges, each element returned by the iterator will be
a tuple of length `N` with one element from each range.
See also: [`ProductSection`](@ref)
"""
struct ProductSplit{T, N, Q<:NTuple{N, AbstractRange}} <: AbstractConstrainedProduct{T, N, Q}
ps :: ProductSection{T, N, Q}
np :: Int
p :: Int
function ProductSplit(ps::ProductSection{T, N, Q}, np::Integer, p::Integer) where {T, N, Q}
1 <= p <= np || throw(ArgumentError("processor rank out of range"))
new{T, N, Q}(ps, np, p)
end
end
function nelementsdroptake(len, np, p)
d, r = divrem(len, np)
drop = d*(p - 1) + min(r, p - 1)
lastind = d*p + min(r, p)
take = lastind - drop
drop, take
end
"""
ProductSplit(iterators::Tuple{Vararg{AbstractRange}}, np::Integer, p::Integer)
Construct a `ProductSplit` iterator that represents the outer product
of the iterators split over `np` workers, with this instance reprsenting
the values on the `p`-th worker.
!!! note
`p` here refers to the rank of the worker, and is unrelated to the worker ID obtained by
executing `myid()` on that worker.
# Examples
```jldoctest
julia> ParallelUtilities.ProductSplit((1:2, 4:5), 2, 1) |> collect
2-element $(Vector{Tuple{Int, Int}}):
(1, 4)
(2, 4)
julia> ParallelUtilities.ProductSplit((1:2, 4:5), 2, 2) |> collect
2-element $(Vector{Tuple{Int, Int}}):
(1, 5)
(2, 5)
```
"""
function ProductSplit(iterators::Tuple{Vararg{AbstractRange}}, np::Integer, p::Integer)
# d, r = divrem(prod(length, iterators), np)
# firstind = d*(p - 1) + min(r, p - 1) + 1
# lastind = d*p + min(r, p)
drop, take = nelementsdroptake(prod(length, iterators), np, p)
firstind = drop + 1
lastind = drop + take
ProductSplit(ProductSection(iterators, firstind:lastind), np, p)
end
ProductSplit(::Tuple{}, ::Integer, ::Integer) = throw(ArgumentError("Need at least one iterator"))
takedrop(ps::ProductSplit) = takedrop(ProductSection(ps))
workerrank(ps::ProductSplit) = ps.p
Distributed.nworkers(ps::ProductSplit) = ps.np
ProductSection(ps::ProductSection) = ps
ProductSection(ps::ProductSplit) = ps.ps
getiterators(ps::AbstractConstrainedProduct) = ProductSection(ps).iterators
togglelevels(ps::AbstractConstrainedProduct) = ProductSection(ps).togglelevels
function Base.summary(io::IO, ps::AbstractConstrainedProduct)
print(io, length(ps), "-element ", string(nameof(typeof(ps))))
end
function Base.show(io::IO, ps::AbstractConstrainedProduct)
summary(io, ps)
if !isempty(ps)
print(io, " [", repr(first(ps)) * ", ... , " * repr(last(ps)), "]")
end
end
Base.isempty(ps::AbstractConstrainedProduct) = (firstindexglobal(ps) > lastindexglobal(ps))
function Base.first(ps::AbstractConstrainedProduct)
isempty(ps) && throw(ArgumentError("collection must be non - empty"))
_first(getiterators(ps), childindex(ps, firstindexglobal(ps))...)
end
function _first(t::Tuple, ind::Integer, rest::Integer...)
(1 <= ind <= length(first(t))) || throw(BoundsError(first(t), ind))
(first(t)[ind], _first(Base.tail(t), rest...)...)
end
_first(::Tuple{}) = ()
function Base.last(ps::AbstractConstrainedProduct)
isempty(ps) && throw(ArgumentError("collection must be non - empty"))
_last(getiterators(ps), childindex(ps, lastindexglobal(ps))...)
end
function _last(t::Tuple, ind::Integer, rest::Integer...)
(1 <= ind <= length(first(t))) || throw(BoundsError(first(t), ind))
(first(t)[ind], _last(Base.tail(t), rest...)...)
end
_last(::Tuple{}) = ()
Base.length(ps::AbstractConstrainedProduct) = lastindex(ps)
Base.firstindex(ps::AbstractConstrainedProduct) = 1
Base.lastindex(ps::AbstractConstrainedProduct) = lastindexglobal(ps) - firstindexglobal(ps) + 1
firstindexglobal(ps::AbstractConstrainedProduct) = ProductSection(ps).firstind
lastindexglobal(ps::AbstractConstrainedProduct) = ProductSection(ps).lastind
# SplittablesBase interface
function SplittablesBase.halve(ps::AbstractConstrainedProduct)
iter = getiterators(ps)
firstind = firstindexglobal(ps)
lastind = lastindexglobal(ps)
nleft = length(ps) ÷ 2
firstindleft = firstind
lastindleft = firstind + nleft - 1
firstindright = lastindleft + 1
lastindright = lastind
tl = togglelevels(ps)
ProductSection(iter, tl, firstindleft, lastindleft),
ProductSection(iter, tl, firstindright, lastindright)
end
"""
childindex(ps::AbstractConstrainedProduct, ind)
Return a tuple containing the indices of the individual `AbstractRange`s
corresponding to the element that is present at index `ind` in the
outer product of the ranges.
!!! note
The index `ind` corresponds to the outer product of the ranges, and not to `ps`.
# Examples
```jldoctest
julia> iters = (1:5, 2:4, 1:3);
julia> ps = ParallelUtilities.ProductSplit(iters, 7, 1);
julia> ind = 6;
julia> cinds = ParallelUtilities.childindex(ps, ind)
(1, 2, 1)
julia> v = collect(Iterators.product(iters...));
julia> getindex.(iters, cinds) == v[ind]
true
```
See also: [`childindexshifted`](@ref)
"""
function childindex(ps::AbstractConstrainedProduct, ind)
tl = reverse(Base.tail(togglelevels(ps)))
reverse(childindex(tl, ind))
end
function childindex(tl::Tuple, ind)
t = first(tl)
k = div(ind - 1, t)
(k + 1, childindex(Base.tail(tl), ind - k*t)...)
end
# First iterator gets the final remainder
childindex(::Tuple{}, ind) = (ind,)
"""
childindexshifted(ps::AbstractConstrainedProduct, ind)
Return a tuple containing the indices in the individual iterators
given an index of `ps`.
If the iterators `(r1, r2, ...)` are used to generate
`ps`, then return `(i1, i2, ...)` such that `ps[ind] == (r1[i1], r2[i2], ...)`.
# Examples
```jldoctest
julia> iters = (1:5, 2:4, 1:3);
julia> ps = ParallelUtilities.ProductSplit(iters, 7, 3);
julia> psind = 4;
julia> cinds = ParallelUtilities.childindexshifted(ps, psind)
(3, 1, 2)
julia> getindex.(iters, cinds) == ps[psind]
true
```
See also: [`childindex`](@ref)
"""
function childindexshifted(ps::AbstractConstrainedProduct, ind)
childindex(ps, (ind - 1) + firstindexglobal(ps))
end
function Base.getindex(ps::AbstractConstrainedProduct, ind)
1 <= ind <= length(ps) || throw(BoundsError(ps, ind))
_getindex(ps, childindexshifted(ps, ind)...)
end
# This needs to be a separate function to deal with the case of a single child iterator, in which case
# it's not clear if the single index is for the ProductSplit or the child iterator
# This method asserts that the number of indices is correct
function _getindex(ps::AbstractConstrainedProduct{<:Any, N}, inds::Vararg{Integer, N}) where {N}
_getindex(getiterators(ps), inds...)
end
function _getindex(t::Tuple, ind::Integer, rest::Integer...)
(1 <= ind <= length(first(t))) || throw(BoundsError(first(t), ind))
(first(t)[ind], _getindex(Base.tail(t), rest...)...)
end
_getindex(::Tuple{}, ::Integer...) = ()
function Base.iterate(ps::AbstractConstrainedProduct, state...)
iterate(takedrop(ps), state...)
end
function _firstlastalongdim(ps::AbstractConstrainedProduct, dims,
firstindchild::Tuple = childindex(ps, firstindexglobal(ps)),
lastindchild::Tuple = childindex(ps, lastindexglobal(ps)))
iter = getiterators(ps)[dims]
fic = firstindchild[dims]
lic = lastindchild[dims]
first_iter = iter[fic]
last_iter = iter[lic]
(first_iter, last_iter)
end
function _checkrollover(ps::AbstractConstrainedProduct, dims,
firstindchild::Tuple = childindex(ps, firstindexglobal(ps)),
lastindchild::Tuple = childindex(ps, lastindexglobal(ps)))
_checkrollover(getiterators(ps), dims, firstindchild, lastindchild)
end
function _checkrollover(t::Tuple, dims, firstindchild::Tuple, lastindchild::Tuple)
if dims > 0
return _checkrollover(Base.tail(t), dims - 1, Base.tail(firstindchild), Base.tail(lastindchild))
end
!_checknorollover(reverse(t), reverse(firstindchild), reverse(lastindchild))
end
function _checknorollover(t, firstindchild, lastindchild)
iter = first(t)
first_iter = iter[first(firstindchild)]
last_iter = iter[first(lastindchild)]
(last_iter == first_iter) &
_checknorollover(Base.tail(t), Base.tail(firstindchild), Base.tail(lastindchild))
end
_checknorollover(::Tuple{}, ::Tuple{}, ::Tuple{}) = true
function _nrollovers(ps::AbstractConstrainedProduct, dims::Integer)
dims == _niterators(ps) && return 0
nelements(ps; dims = dims + 1) - 1
end
"""
nelements(ps::AbstractConstrainedProduct{T, N, <:NTuple{N,AbstractUnitRange}}; dims::Integer) where {T,N}
Compute the number of unique values in the section of the `dims`-th range contained in `ps`.
The function is defined currently only for iterator products of `AbstractUnitRange`s.
# Examples
```jldoctest
julia> ps = ParallelUtilities.ProductSplit((1:5, 2:4, 1:3), 7, 3);
julia> collect(ps)
7-element $(Vector{Tuple{Int, Int, Int}}):
(5, 4, 1)
(1, 2, 2)
(2, 2, 2)
(3, 2, 2)
(4, 2, 2)
(5, 2, 2)
(1, 3, 2)
julia> ParallelUtilities.nelements(ps, dims = 1)
5
julia> ParallelUtilities.nelements(ps, dims = 2)
3
julia> ParallelUtilities.nelements(ps, dims = 3)
2
```
"""
function nelements(ps::IncreasingAbstractConstrainedProduct; dims::Integer)
1 <= dims <= _niterators(ps) || throw(ArgumentError("1 ⩽ dims ⩽ N=$(_niterators(ps)) not satisfied for dims=$dims"))
iter = getiterators(ps)[dims]
if _nrollovers(ps, dims) == 0
st = first(ps)[dims]
en = last(ps)[dims]
stind = findfirst(isequal(st), iter)
enind = findfirst(isequal(en), iter)
nel = length(stind:enind)
elseif _nrollovers(ps, dims) > 1
nel = length(iter)
else
st = first(ps)[dims]
en = last(ps)[dims]
stind = findfirst(isequal(st), iter)
enind = findfirst(isequal(en), iter)
if stind > enind
# some elements are missed out
nel = length(stind:length(iter)) + length(1:enind)
else
nel = length(iter)
end
end
return nel
end
"""
maximumelement(ps::AbstractConstrainedProduct; dims::Integer)
Compute the maximum value of the section of the range number `dims` contained in `ps`.
# Examples
```jldoctest
julia> ps = ParallelUtilities.ProductSplit((1:2, 4:5), 2, 1);
julia> collect(ps)
2-element $(Vector{Tuple{Int, Int}}):
(1, 4)
(2, 4)
julia> ParallelUtilities.maximumelement(ps, dims = 1)
2
julia> ParallelUtilities.maximumelement(ps, dims = 2)
4
```
"""
function maximumelement(ps::IncreasingAbstractConstrainedProduct; dims::Integer)
isempty(ps) && throw(ArgumentError("collection must be non - empty"))
firstindchild = childindex(ps, firstindexglobal(ps))
lastindchild = childindex(ps, lastindexglobal(ps))
_, last_iter = _firstlastalongdim(ps, dims, firstindchild, lastindchild)
v = last_iter
# The last index will not roll over so this can be handled easily
if dims == _niterators(ps)
return v
end
if _checkrollover(ps, dims, firstindchild, lastindchild)
iter = getiterators(ps)[dims]
v = maximum(iter)
end
return v
end
function maximumelement(ps::IncreasingAbstractConstrainedProduct{<:Any, 1})
isempty(ps) && throw(ArgumentError("range must be non - empty"))
lastindchild = childindex(ps, lastindexglobal(ps))
lic_dim = lastindchild[1]
iter = getiterators(ps)[1]
iter[lic_dim]
end
"""
minimumelement(ps::AbstractConstrainedProduct; dims::Integer)
Compute the minimum value of the section of the range number `dims` contained in `ps`.
# Examples
```jldoctest
julia> ps = ParallelUtilities.ProductSplit((1:2, 4:5), 2, 1);
julia> collect(ps)
2-element $(Vector{Tuple{Int, Int}}):
(1, 4)
(2, 4)
julia> ParallelUtilities.minimumelement(ps, dims = 1)
1
julia> ParallelUtilities.minimumelement(ps, dims = 2)
4
```
"""
function minimumelement(ps::IncreasingAbstractConstrainedProduct; dims::Integer)
isempty(ps) && throw(ArgumentError("collection must be non - empty"))
firstindchild = childindex(ps, firstindexglobal(ps))
lastindchild = childindex(ps, lastindexglobal(ps))
first_iter, last_iter = _firstlastalongdim(ps, dims, firstindchild, lastindchild)
v = first_iter
# The last index will not roll over so this can be handled easily
if dims == _niterators(ps)
return v
end
if _checkrollover(ps, dims, firstindchild, lastindchild)
iter = getiterators(ps)[dims]
v = minimum(iter)
end
return v
end
function minimumelement(ps::IncreasingAbstractConstrainedProduct{<:Any, 1})
isempty(ps) && throw(ArgumentError("range must be non - empty"))
firstindchild = childindex(ps, firstindexglobal(ps))
fic_dim = firstindchild[1]
iter = getiterators(ps)[1]
iter[fic_dim]
end
"""
extremaelement(ps::AbstractConstrainedProduct; dims::Integer)
Compute the `extrema` of the section of the range number `dims` contained in `ps`.
# Examples
```jldoctest
julia> ps = ParallelUtilities.ProductSplit((1:2, 4:5), 2, 1);
julia> collect(ps)
2-element $(Vector{Tuple{Int, Int}}):
(1, 4)
(2, 4)
julia> ParallelUtilities.extremaelement(ps, dims = 1)
(1, 2)
julia> ParallelUtilities.extremaelement(ps, dims = 2)
(4, 4)
```
"""
function extremaelement(ps::IncreasingAbstractConstrainedProduct; dims::Integer)
isempty(ps) && throw(ArgumentError("collection must be non - empty"))
firstindchild = childindex(ps, firstindexglobal(ps))
lastindchild = childindex(ps, lastindexglobal(ps))
first_iter, last_iter = _firstlastalongdim(ps, dims, firstindchild, lastindchild)
v = (first_iter, last_iter)
# The last index will not roll over so this can be handled easily
if dims == _niterators(ps)
return v
end
if _checkrollover(ps, dims, firstindchild, lastindchild)
iter = getiterators(ps)[dims]
v = extrema(iter)
end
return v
end
function extremaelement(ps::IncreasingAbstractConstrainedProduct{<:Any, 1})
isempty(ps) && throw(ArgumentError("collection must be non - empty"))
firstindchild = childindex(ps, firstindexglobal(ps))
lastindchild = childindex(ps, lastindexglobal(ps))
fic_dim = firstindchild[1]
lic_dim = lastindchild[1]
iter = getiterators(ps)[1]
(iter[fic_dim], iter[lic_dim])
end
for (f, g) in [(:maximumelement, :maximum), (:minimumelement, :minimum), (:extremaelement, :extrema)]
@eval $f(ps::AbstractConstrainedProduct{<:Any, 1}) = $g(first, takedrop(ps))
@eval $f(ps::AbstractConstrainedProduct; dims::Integer) = $g(x -> x[dims], takedrop(ps))
end
"""
extremadims(ps::AbstractConstrainedProduct)
Compute the extrema of the sections of all the ranges contained in `ps`.
Functionally this is equivalent to
```julia
map(i -> extrema(ps, dims = i), 1:_niterators(ps))
```
but it is implemented more efficiently.
Returns a `Tuple` containing the `(min, max)` pairs along each
dimension, such that the `i`-th index of the result contains the `extrema` along the section of the `i`-th range
contained locally.
# Examples
```jldoctest
julia> ps = ParallelUtilities.ProductSplit((1:2, 4:5), 2, 1);
julia> collect(ps)
2-element $(Vector{Tuple{Int, Int}}):
(1, 4)
(2, 4)
julia> ParallelUtilities.extremadims(ps)
((1, 2), (4, 4))
```
"""
function extremadims(ps::AbstractConstrainedProduct)
_extremadims(ps, 1, getiterators(ps))
end
function _extremadims(ps::AbstractConstrainedProduct, dims::Integer, iterators::Tuple)
(extremaelement(ps; dims = dims), _extremadims(ps, dims + 1, Base.tail(iterators))...)
end
_extremadims(::AbstractConstrainedProduct, ::Integer, ::Tuple{}) = ()
"""
extrema_commonlastdim(ps::AbstractConstrainedProduct{T, N, <:NTuple{N,AbstractUnitRange}}) where {T,N}
Return the reverse - lexicographic extrema of values taken from
ranges contained in `ps`, where the pairs of ranges are constructed
by concatenating the ranges along each dimension with the last one.
For two ranges this simply returns `([first(ps)], [last(ps)])`.
# Examples
```jldoctest
julia> ps = ParallelUtilities.ProductSplit((1:3, 4:7, 2:7), 10, 2);
julia> collect(ps)
8-element $(Vector{Tuple{Int, Int, Int}}):
(3, 6, 2)
(1, 7, 2)
(2, 7, 2)
(3, 7, 2)
(1, 4, 3)
(2, 4, 3)
(3, 4, 3)
(1, 5, 3)
julia> ParallelUtilities.extrema_commonlastdim(ps)
$((Tuple{Int,Int}[(1, 2), (6, 2)], Tuple{Int,Int}[(3, 3), (5, 3)]))
```
"""
function extrema_commonlastdim(ps::IncreasingAbstractConstrainedProduct)
isempty(ps) && return nothing
m = extremadims(ps)
lastvar_min, lastvar_max = last(m)
val_first = first(ps)
val_last = last(ps)
min_vals = collect(Base.front(val_first))
max_vals = collect(Base.front(val_last))
for val in ps
val_rev = reverse(val)
lastvar = first(val_rev)
(lastvar_min < lastvar < lastvar_max) && continue
for (ind, vi) in enumerate(Base.tail(val_rev))
if lastvar == lastvar_min
min_vals[_niterators(ps) - ind] = min(min_vals[_niterators(ps) - ind], vi)
end
if lastvar == lastvar_max
max_vals[_niterators(ps) - ind] = max(max_vals[_niterators(ps) - ind], vi)
end
end
end
[(m, lastvar_min) for m in min_vals], [(m, lastvar_max) for m in max_vals]
end
_infullrange(val::T, ps::AbstractConstrainedProduct{T}) where {T} = _infullrange(val, getiterators(ps))
function _infullrange(val, t::Tuple)
first(val) in first(t) && _infullrange(Base.tail(val), Base.tail(t))
end
_infullrange(::Tuple{}, ::Tuple{}) = true
"""
indexinproduct(iterators::NTuple{N, AbstractRange}, val::NTuple{N, Any}) where {N}
Return the index of `val` in the outer product of `iterators`.
Return nothing if `val` is not present.
# Examples
```jldoctest
julia> iterators = (1:4, 1:3, 3:5);
julia> val = (2, 2, 4);
julia> ind = ParallelUtilities.indexinproduct(iterators, val)
18
julia> collect(Iterators.product(iterators...))[ind] == val
true
```
"""
function indexinproduct(iterators::NTuple{N, AbstractRange}, val::Tuple{Vararg{Any, N}}) where {N}
all(map(in, val, iterators)) || return nothing
ax = map(x -> 1:length(x), iterators)
individual_inds = map((it, val) -> findfirst(isequal(val), it), iterators, val)
LinearIndices(ax)[individual_inds...]
end
indexinproduct(::Tuple{}, ::Tuple{}) = throw(ArgumentError("need at least one iterator"))
function Base.in(val::T, ps::AbstractConstrainedProduct{T}) where {T}
_infullrange(val, ps) || return false
ind = indexinproduct(getiterators(ps), val)
firstindexglobal(ps) <= ind <= lastindexglobal(ps)
end
function Base.in(val::T, ps::IncreasingAbstractConstrainedProduct{T}) where {T}
_infullrange(val, ps) || return false
ReverseLexicographicTuple(first(ps)) <= ReverseLexicographicTuple(val) <= ReverseLexicographicTuple(last(ps))
end
# This struct is just a wrapper to flip the tuples before comparing
struct ReverseLexicographicTuple{T<:Tuple}
t :: T
end
Base.isless(a::ReverseLexicographicTuple{T}, b::ReverseLexicographicTuple{T}) where {T} = reverse(a.t) < reverse(b.t)
Base.isequal(a::ReverseLexicographicTuple, b::ReverseLexicographicTuple) = a.t == b.t
"""
whichproc(iterators::Tuple{Vararg{AbstractRange}}, val::Tuple, np::Integer)
Return the processor rank that will contain `val` if the outer
product of the ranges contained in `iterators` is split evenly
across `np` processors.
# Examples
```jldoctest
julia> iters = (1:4, 2:3);
julia> np = 2;
julia> ParallelUtilities.ProductSplit(iters, np, 2) |> collect
4-element $(Vector{Tuple{Int, Int}}):
(1, 3)
(2, 3)
(3, 3)
(4, 3)
julia> ParallelUtilities.whichproc(iters, (2, 3), np)
2
```
"""
function whichproc(iterators::Tuple{AbstractRange, Vararg{AbstractRange}}, val, np::Integer)
_infullrange(val, iterators) || return nothing
np >= 1 || throw(ArgumentError("np must be >= 1"))
np == 1 && return 1
# We may carry out a binary search as the iterators are sorted
left, right = 1, np
val_t = ReverseLexicographicTuple(val)
while left < right
mid = div(left + right, 2)
ps = ProductSplit(iterators, np, mid)
# If np is greater than the number of ntasks then it's possible
# that ps is empty. In this case the value must be somewhere in
# the previous workers. Otherwise each worker has some tasks and
# these are sorted, so carry out a binary search
if isempty(ps) || val_t < ReverseLexicographicTuple(first(ps))
right = mid - 1
elseif val_t > ReverseLexicographicTuple(last(ps))
left = mid + 1
else
return mid
end
end
return left
end
whichproc(ps::ProductSplit, val) = whichproc(getiterators(ps), val, ps.np)
# This function tells us the range of processors that would be involved
# if we are to compute the tasks contained in the list ps on np_new processors.
# The total list of tasks is contained in iterators, and might differ from
# getiterators(ps) (eg if ps contains a subsection of the parameter set)
"""
procrange_recast(iterators::Tuple{Vararg{AbstractRange}}, ps, np_new::Integer)
Return the range of processor ranks that would contain the values in `ps` if
the outer produce of the ranges in `iterators` is split across `np_new`
workers.
The values contained in `ps` should be a subsection of the outer product of
the ranges in `iterators`.
# Examples
```jldoctest
julia> iters = (1:10, 4:6, 1:4);
julia> ps = ParallelUtilities.ProductSplit(iters, 5, 2);
julia> ParallelUtilities.procrange_recast(iters, ps, 10)
3:4
```
"""
function procrange_recast(iterators::Tuple{AbstractRange, Vararg{AbstractRange}}, ps::AbstractConstrainedProduct, np_new::Integer)
isempty(ps) && return nothing
procid_start = whichproc(iterators, first(ps), np_new)
if procid_start === nothing
throw(TaskNotPresentError(iterators, first(ps)))
end
if length(ps) == 1
procid_end = procid_start
else
procid_end = whichproc(iterators, last(ps), np_new)
if procid_end === nothing
throw(TaskNotPresentError(iterators, last(ps)))
end
end
return procid_start:procid_end
end
"""
procrange_recast(ps::AbstractConstrainedProduct, np_new::Integer)
Return the range of processor ranks that would contain the values in `ps` if the
iterators used to construct `ps` were split across `np_new` processes.
# Examples
```jldoctest
julia> iters = (1:10, 4:6, 1:4);
julia> ps = ParallelUtilities.ProductSplit(iters, 5, 2); # split across 5 processes initially
julia> ParallelUtilities.procrange_recast(ps, 10) # If `iters` were spread across 10 processes
3:4
```
"""
function procrange_recast(ps::AbstractConstrainedProduct, np_new::Integer)
procrange_recast(getiterators(ps), ps, np_new)
end
"""
localindex(ps::AbstractConstrainedProduct{T}, val::T) where {T}
Return the index of `val` in `ps`. Return `nothing` if the value
is not found.
# Examples
```jldoctest
julia> ps = ParallelUtilities.ProductSplit((1:3, 4:5:20), 3, 2);
julia> collect(ps)
4-element $(Vector{Tuple{Int, Int}}):
(2, 9)
(3, 9)
(1, 14)
(2, 14)
julia> ParallelUtilities.localindex(ps, (3, 9))
2
```
"""
function localindex(ps::AbstractConstrainedProduct{T}, val::T) where {T}
(isempty(ps) || val ∉ ps) && return nothing
indflat = indexinproduct(getiterators(ps), val)
indflat - firstindexglobal(ps) + 1
end
"""
whichproc_localindex(iterators::Tuple{Vararg{AbstractRange}}, val::Tuple, np::Integer)
Return `(rank, ind)`, where `rank` is the
rank of the worker that `val` will reside on if the outer product
of the ranges in `iterators` is spread over `np` workers, and `ind` is
the index of `val` in the local section on that worker.
# Examples
```jldoctest
julia> iters = (1:4, 2:8);
julia> np = 10;
julia> ParallelUtilities.whichproc_localindex(iters, (2, 4), np)
(4, 1)
julia> ParallelUtilities.ProductSplit(iters, np, 4) |> collect
3-element $(Vector{Tuple{Int, Int}}):
(2, 4)
(3, 4)
(4, 4)
```
"""
function whichproc_localindex(iterators::Tuple{Vararg{AbstractRange}}, val::Tuple, np::Integer)
procid = whichproc(iterators, val, np)
procid === nothing && return nothing
index = localindex(ProductSplit(iterators, np, procid), val)
index === nothing && return nothing
return procid, index
end
#################################################################
"""
dropleading(ps::AbstractConstrainedProduct{T, N, NTuple{N,AbstractUnitRange}}) where {T,N}
Return a `ProductSection` leaving out the first iterator contained in `ps`.
The range of values of the remaining iterators in the
resulting `ProductSection` will be the same as in `ps`.
# Examples
```jldoctest
julia> ps = ParallelUtilities.ProductSplit((1:5, 2:4, 1:3), 7, 3);
julia> collect(ps)
7-element $(Vector{Tuple{Int, Int, Int}}):
(5, 4, 1)
(1, 2, 2)
(2, 2, 2)
(3, 2, 2)
(4, 2, 2)
(5, 2, 2)
(1, 3, 2)
julia> ParallelUtilities.dropleading(ps) |> collect
3-element $(Vector{Tuple{Int, Int}}):
(4, 1)
(2, 2)
(3, 2)
```
"""
function dropleading(ps::IncreasingAbstractConstrainedProduct)
isempty(ps) && throw(ArgumentError("need at least one iterator"))
iterators = Base.tail(getiterators(ps))
first_element = Base.tail(first(ps))
last_element = Base.tail(last(ps))
firstind = indexinproduct(iterators, first_element)
lastind = indexinproduct(iterators, last_element)
ProductSection(iterators, firstind:lastind)
end
| ParallelUtilities | https://github.com/jishnub/ParallelUtilities.jl.git |
|
[
"MIT"
] | 0.8.6 | 704ef2c93e301b6469ba63103a4e7bf935e6990c | code | 6870 | """
Commutative
Declare a reduction operator to be commutative in its arguments.
No check is performed to ascertain if the operator is indeed commutative.
"""
struct Commutative{F} <: Function
f :: F
end
(c::Commutative)(x, y) = c.f(x, y)
"""
BroadcastFunction(f)
Construct a binary function that evaluates `f.(x, y)` given the arguments `x` and `y`.
!!! note
The function `BroadcastFunction(f)` is equivalent to `Base.BroadcastFunction(f)` on Julia versions
1.6 and above.
# Examples
```jldoctest
julia> ParallelUtilities.BroadcastFunction(+)(ones(3), ones(3))
3-element $(Vector{Float64}):
2.0
2.0
2.0
```
"""
struct BroadcastFunction{V, F} <: Function
f :: F
end
BroadcastFunction{V}(f) where {V} = BroadcastFunction{V, typeof(f)}(f)
BroadcastFunction(f::Function) = BroadcastFunction{Nothing, typeof(f)}(f)
(o::BroadcastFunction{Nothing})(x, y) = o.f.(x, y)
(o::BroadcastFunction{1})(x, y) = broadcast!(o.f, x, x, y)
(o::BroadcastFunction{2})(x, y) = broadcast!(o.f, y, x, y)
"""
broadcastinplace(f, ::Val{N}) where {N}
Construct a binary operator that evaluates `f.(x, y)` and overwrites the `N`th argument with the result.
For `N == 1` this evaluates `x .= f.(x, y)`, whereas for `N == 2` this evaluates `y .= f.(x, y)`.
# Examples
```jldoctest
julia> op = ParallelUtilities.broadcastinplace(+, Val(1));
julia> x = ones(3); y = ones(3);
julia> op(x, y)
3-element $(Vector{Float64}):
2.0
2.0
2.0
julia> x # overwritten
3-element $(Vector{Float64}):
2.0
2.0
2.0
```
"""
function broadcastinplace(f, v::Val{N}) where {N}
BroadcastFunction{N}(f)
end
"""
elementwisesum!(x, y)
Binary reduction operator that performs an elementwise product and stores the result inplace in `x`.
The value of `x` is overwritten in the process.
Functionally `elementwisesum!(x, y)` is equivalent to `x .= x .+ y`.
!!! note
The operator is assumed to be commutative.
"""
const elementwisesum! = Commutative(broadcastinplace(+, Val(1)))
"""
elementwiseproduct!(x, y)
Binary reduction operator that performs an elementwise product and stores the result inplace in `x`.
The value of `x` is overwritten in the process.
Functionally `elementwiseproduct!(x, y)` is equivalent to `x .= x .* y`.
!!! note
The operator is assumed to be commutative.
"""
const elementwiseproduct! = Commutative(broadcastinplace(*, Val(1)))
"""
elementwisemin!(x, y)
Binary reduction operator that performs an elementwise `min` and stores the result inplace in `x`.
The value of `x` is overwritten in the process.
Functionally `elementwisemin!(x, y)` is equivalent to `x .= min.(x, y)`.
!!! note
The operator is assumed to be commutative.
"""
const elementwisemin! = Commutative(broadcastinplace(min, Val(1)))
"""
elementwisemax!(x, y)
Binary reduction operator that performs an elementwise `max` and stores the result inplace in `x`.
The value of `x` is overwritten in the process.
Functionally `elementwisemax!(x, y)` is equivalent to `x .= max.(x, y)`.
!!! note
The operator is assumed to be commutative.
"""
const elementwisemax! = Commutative(broadcastinplace(max, Val(1)))
"""
BroadcastStack(f, dims)(x::AbstractArray, y::AbstractArray)
Construct a binary function that stacks its arguments along `dims`, with overlapping indices `I` being replaced by
`f(x[I], y[I])`. The arguments `x` and `y` must both be `n`-dimensional arrays that have identical axes along all dimensions
aside from those specified by `dims`. The axes of the result along each dimensions `d`
in `dims` would be `union(axes(x, d), axes(y, d))`.
Along the other dimensions the result has the same axes as `x` and `y`.
!!! note
If the resulting axes along the concatenated dimensions are not 1-based, one might require an offset array package
such as [`OffsetArrays.jl`](https://github.com/JuliaArrays/OffsetArrays.jl).
# Examples
```jldoctest
julia> A = ones(2)*2
2-element $(Vector{Float64}):
2.0
2.0
julia> B = ones(3)*3
3-element $(Vector{Float64}):
3.0
3.0
3.0
julia> ParallelUtilities.BroadcastStack(min, 1)(A, B)
3-element $(Vector{Float64}):
2.0
2.0
3.0
julia> A = ones(2,2)*2
2×2 $(Matrix{Float64}):
2.0 2.0
2.0 2.0
julia> B = ones(2,3)*3
2×3 $(Matrix{Float64}):
3.0 3.0 3.0
3.0 3.0 3.0
julia> ParallelUtilities.BroadcastStack(+, 2)(A, B)
2×3 $(Matrix{Float64}):
5.0 5.0 3.0
5.0 5.0 3.0
```
"""
struct BroadcastStack{F, D} <: Function
f :: F
dims :: D
end
(s::BroadcastStack)(x, y) = broadcaststack(x, y, s.f, s.dims)
function _union(axes_x_dim::AbstractUnitRange, axes_y_dim::AbstractUnitRange)
axes_dim_min = min(minimum(axes_x_dim), minimum(axes_y_dim))
axes_dim_max = max(maximum(axes_x_dim), maximum(axes_y_dim))
axes_dim = axes_dim_min:axes_dim_max
end
_union(axes_x_dim::Base.OneTo, axes_y_dim::Base.OneTo) = axes_x_dim ∪ axes_y_dim
_maybeUnitRange(ax::AbstractUnitRange) = UnitRange(ax)
_maybeUnitRange(ax::Base.OneTo) = ax
function _subsetaxes(f, axes_x, axes_y, dims)
ax = collect(_maybeUnitRange.(axes_x))
for dim in dims
ax[dim] = f(axes_x[dim], axes_y[dim])
end
ntuple(i -> ax[i], length(axes_x))
end
function broadcaststack(x::AbstractArray, y::AbstractArray, f, dims)
ndims(x) == ndims(y) || throw(DimensionMismatch("arrays must have the same number of dimensions"))
for dim in 1:ndims(x)
if dim ∈ dims
if dim > ndims(x)
throw(ArgumentError("dim must lie in 1 <= dim <= ndims(x)"))
end
else
axes(x, dim) == axes(y, dim) || throw(DimensionMismatch("non-concatenated axes must be identical"))
end
end
axes_cat = _subsetaxes(_union, axes(x), axes(y), dims)
xy_cat = similar(x, promote_type(eltype(x), eltype(y)), axes_cat)
eltype(xy_cat) <: Number && fill!(xy_cat, zero(eltype(xy_cat)))
common_ax = CartesianIndices(_subsetaxes(intersect, axes(x), axes(y), dims))
for arr in (x, y)
@inbounds for I in CartesianIndices(arr)
I in common_ax && continue
xy_cat[I] = arr[I]
end
end
@inbounds for I in common_ax
xy_cat[I] = f(x[I], y[I])
end
xy_cat
end
"""
Flip(f)
Flip the arguments of a binary function `f`, so that `Flip(f)(x, y) == f(y,x)`.
# Examples
```jldoctest flip
julia> flip1 = ParallelUtilities.Flip(vcat);
julia> flip1(2, 3)
2-element $(Vector{Int}):
3
2
```
Two flips pop the original function back:
```jldoctest flip
julia> flip2 = ParallelUtilities.Flip(flip1);
julia> flip2(2, 3)
2-element $(Vector{Int}):
2
3
```
"""
struct Flip{F} <: Function
f :: F
end
(o::Flip)(x, y) = o.f(y, x)
Flip(o::Flip) = o.f
# Perserve the commutative tag
Flip(c::Commutative) = Commutative(Flip(c.f))
Flip(b::BroadcastFunction{1}) = BroadcastFunction{2}(Flip(b.f))
Flip(b::BroadcastFunction{2}) = BroadcastFunction{1}(Flip(b.f))
| ParallelUtilities | https://github.com/jishnub/ParallelUtilities.jl.git |
|
[
"MIT"
] | 0.8.6 | 704ef2c93e301b6469ba63103a4e7bf935e6990c | code | 19975 | abstract type BinaryTree end
struct OrderedBinaryTree{PROCS <: AbstractVector{<:Integer}, PARENT <: Union{Integer, Nothing}} <: BinaryTree
#= Tree of the form
8
4 9
2 6
1 3 5 7
The left branch has smaller numbers than the node, and the right
branch has larger numbers
A parent of nothing implies that the top node is its own parent
=#
N :: Int
procs :: PROCS
topnode_parent :: PARENT
function OrderedBinaryTree(procs::AbstractVector{<:Integer}, p = nothing)
N = length(procs)
N >= 1 || throw(DomainError(N, "need at least one node to create a BinaryTree"))
new{typeof(procs), typeof(p)}(N, procs, p)
end
end
Base.length(tree::OrderedBinaryTree) = length(tree.procs)
# Special type for the top tree that correctly returns nchildren for the leaves
struct ConnectedOrderedBinaryTree{OBT <: OrderedBinaryTree, D <: AbstractDict} <: BinaryTree
tree :: OBT
workersonhosts :: D
function ConnectedOrderedBinaryTree(tree::OBT, workersonhosts::D) where {OBT <: OrderedBinaryTree, D <: AbstractDict}
new{OBT, D}(tree, workersonhosts)
end
end
Base.length(tree::ConnectedOrderedBinaryTree) = length(tree.tree)
workersonhosts(tree::ConnectedOrderedBinaryTree) = tree.workersonhosts
struct SegmentedOrderedBinaryTree{PROCS <: AbstractVector{<:Integer}, TREE <: ConnectedOrderedBinaryTree} <: BinaryTree
#=
Each node on the cluster will have its own tree that carries out
a local reduction. There will be one master node on the cluster that
will acquire the reduced value on each node. This will be followed
by a tree to carry out reduction among the master nodes. The
eventual reduced result will be returned to the calling process.
=#
N :: Int
procs :: PROCS
toptree :: TREE
nodetreestartindices :: Vector{Int}
function SegmentedOrderedBinaryTree(N::Int, procs::PROCS,
toptree::TREE, nodetreestartindices::Vector{Int}) where {PROCS, TREE <: ConnectedOrderedBinaryTree}
# check that the reduction nodes of the top tree have children
all(i -> nchildren(toptree[i]) == 2, 2:2:length(toptree)) || throw(ArgumentError("reduction nodes on the top tree must have 2 children each"))
new{PROCS, TREE}(N, procs, toptree, nodetreestartindices)
end
end
workersonhosts(tree::SegmentedOrderedBinaryTree) = workersonhosts(tree.toptree)
function leafrankfoldedtree(::OrderedBinaryTree, Nleaves, leafno)
@assert(leafno <= Nleaves, "leafno needs to be ⩽ Nleaves")
leafrank = 2leafno - 1
end
leafrankfoldedtree(tree::ConnectedOrderedBinaryTree, args...) = leafrankfoldedtree(tree.tree, args...)
function foldedbinarytreefromleaves(leaves)
Nleaves = length(leaves)
Nnodes = 2Nleaves - 1
allnodes = Vector{Int}(undef, Nnodes)
foldedbinarytreefromleaves!(allnodes, leaves)
OrderedBinaryTree(allnodes)
end
function foldedbinarytreefromleaves!(allnodes, leaves)
top = topnoderank(OrderedBinaryTree(1:length(allnodes)))
allnodes[top] = first(leaves)
length(allnodes) == 1 && return
Nnodes_left = top - 1
Nleaves_left = div(Nnodes_left + 1 , 2)
Nleaves_right = length(leaves) - Nleaves_left
if Nleaves_left > 0
leaves_left = @view leaves[1:Nleaves_left]
leftnodes = @view allnodes[1:Nnodes_left]
foldedbinarytreefromleaves!(leftnodes, leaves_left)
end
if Nleaves_right > 0
leaves_right = @view leaves[end - Nleaves_right + 1:end]
rightnodes = @view allnodes[top + 1:end]
foldedbinarytreefromleaves!(rightnodes, leaves_right)
end
return allnodes
end
function SegmentedOrderedBinaryTree(procs::AbstractVector{<:Integer}, workersonhosts::AbstractDict = procs_node(procs))
Np = length(procs)
Np >= 1 || throw(DomainError(Np, "need at least one node to create a BinaryTree"))
sum(length, values(workersonhosts)) == length(procs) || throw(ArgumentError("procs $procs do not match workersonhosts $workersonhosts"))
nodes = collect(keys(workersonhosts))
masternodes = Vector{Int}(undef, length(nodes))
for (nodeind, node) in enumerate(nodes)
workersnode = workersonhosts[node]
nodetree = OrderedBinaryTree(workersnode)
masternodes[nodeind] = topnode(nodetree).p
end
Nleaves = length(masternodes)
toptree_inner = foldedbinarytreefromleaves(masternodes)
toptree = ConnectedOrderedBinaryTree(toptree_inner, workersonhosts)
toptreenonleafnodes = length(toptree) - Nleaves
Nnodestotal = toptreenonleafnodes + length(procs)
nodetreestartindices = Vector{Int}(undef, length(nodes))
nodetreestartindices[1] = toptreenonleafnodes + 1
for (nodeno, node) in enumerate(nodes)
nodeno == 1 && continue
prevnode = nodes[nodeno - 1]
nodetreestartindices[nodeno] = nodetreestartindices[nodeno - 1] + length(workersonhosts[prevnode])
end
SegmentedOrderedBinaryTree(Nnodestotal, procs, toptree, nodetreestartindices)
end
# for a single host there are no segments
function unsegmentedtree(tree::SegmentedOrderedBinaryTree)
OrderedBinaryTree(workers(tree))
end
Base.length(tree::SegmentedOrderedBinaryTree) = tree.N
levels(tree::OrderedBinaryTree) = levels(length(tree))
levels(n::Integer) = floor(Int, log2(n)) + 1
function Base.summary(io::IO, tree::SegmentedOrderedBinaryTree)
Nmasternodes = length(keys(workersonhosts(tree)))
toptreenonleafnodes = length(toptree(tree)) - Nmasternodes
mapnodes = length(tree) - toptreenonleafnodes
print(io, length(tree), "-node ", Base.nameof(typeof(tree)))
print(io, " with ", mapnodes, " workers and ", toptreenonleafnodes, " extra reduction node",
ifelse(toptreenonleafnodes > 1, "s", ""))
end
Base.summary(io::IO, tree::BinaryTree) = print(io, length(tree),"-element ", nameof(typeof(tree)))
function Base.show(io::IO, b::OrderedBinaryTree)
print(io, summary(b), "(", workers(b), ") with top node = ", topnode(b))
end
function Base.show(io::IO, b::ConnectedOrderedBinaryTree)
print(io, summary(b), "(", workers(b), ", ", workersonhosts(b), ")")
end
function Base.show(io::IO, b::SegmentedOrderedBinaryTree)
summary(io, b)
println(io)
println(io, "toptree => ", toptree(b))
println(io, "subtrees start from indices ", b.nodetreestartindices)
tt = toptree(b)
for (ind, (host, w)) in enumerate(workersonhosts(b))
node = tt[2ind - 1]
print(io, host, " => ", OrderedBinaryTree(w, node.parent))
if ind != length(workersonhosts(b))
println(io)
end
end
end
toptree(tree::SegmentedOrderedBinaryTree) = tree.toptree
function levelfromtop(tree::OrderedBinaryTree, i::Integer)
1 <= i <= length(tree) || throw(BoundsError(tree, i))
top = topnoderank(tree)
if i == top
return 1
elseif i < top
subrange = 1:top - 1
else
subrange = top + 1:length(tree)
end
subtree = OrderedBinaryTree(subrange)
subindex = searchsortedfirst(subrange, i)
1 + levelfromtop(subtree, subindex)
end
function parentnoderank(tree::OrderedBinaryTree, i::Integer)
1 <= i <= length(tree) || throw(BoundsError(tree, i))
# The topmost node is its own parent
length(tree) == 1 && return 1
top = topnoderank(tree)
length(tree) > 1 && i == top && return top
if i < top
# left branch, fully formed
level = trailing_zeros(i)
ired = i >> level # i / 2^level
# ired is necessarily an odd number
pow2level = 1 << level # 2^level
# sgn is +1 if mod(ired, 4) = 1, -1 if mod(ired, 4) = 3
sgn = 2 - mod(ired, 4)
return i + sgn * pow2level
elseif i > top
# right branch, possibly partially formed
# Carry out a recursive search
subtreeprocs = top + 1:length(tree)
subtree = OrderedBinaryTree(subtreeprocs)
subind = searchsortedfirst(subtreeprocs, i)
if subind == topnoderank(subtree)
# This catches the case of there only being a leaf node
# in the sub - tree
return top
elseif length(subtreeprocs) == 3
# don't subdivide to 1 - node trees
# this lets us avoid confusing this with the case of
# the entire tree having only 1 node
return subtreeprocs[2]
end
pid = parentnoderank(subtree, subind)
return subtreeprocs[pid]
end
end
parentnoderank(tree::ConnectedOrderedBinaryTree, i::Integer) = parentnoderank(tree.tree, i)
function subtree_rank(tree::SegmentedOrderedBinaryTree, i::Integer)
Nmasternodes = length(keys(workersonhosts(tree)))
toptreenonleafnodes = length(tree.toptree) - Nmasternodes
# node on a subtree at a host
subnodeno = i - toptreenonleafnodes
@assert(subnodeno > 0, "i needs to be greater than $(toptreenonleafnodes)")
# find out which node this lies on
nptotalprevhosts = 0
for (host, procs) in workersonhosts(tree)
np = length(procs)
if subnodeno <= nptotalprevhosts + np
rankinsubtree = subnodeno - nptotalprevhosts
w_host = workersonhosts(tree)[host]
subtree = OrderedBinaryTree(w_host)
return subtree, rankinsubtree, nptotalprevhosts
end
nptotalprevhosts += np
end
end
"""
masternodeindex(tree::SegmentedOrderedBinaryTree, p)
Given the top worker `p` on one node, compute the serial order of the host that it corresponds to.
"""
function masternodeindex(tree::SegmentedOrderedBinaryTree, p)
leafno = nothing
for (ind, w) in enumerate(values(workersonhosts(tree)))
subtree = OrderedBinaryTree(w)
top = topnoderank(subtree)
if w[top] == p
leafno = ind
break
end
end
return leafno
end
toptree_to_fulltree_index(::OrderedBinaryTree, i) = div(i, 2)
toptree_to_fulltree_index(tree::ConnectedOrderedBinaryTree, i) = toptree_to_fulltree_index(tree.tree, i)
fulltree_to_toptree_index(::OrderedBinaryTree, i) = 2i
fulltree_to_toptree_index(tree::ConnectedOrderedBinaryTree, i) = fulltree_to_toptree_index(tree.tree, i)
function nchildren(tree::OrderedBinaryTree, i::Integer)
1 <= i <= length(tree) || throw(BoundsError(tree, i))
if isodd(i)
0
elseif i == length(tree)
1
else
2
end
end
function nchildren(tree::SegmentedOrderedBinaryTree, i::Integer)
1 <= i <= length(tree) || throw(BoundsError(tree, i))
Nmasternodes = length(keys(workersonhosts(tree)))
toptreenonleafnodes = length(tree.toptree) - Nmasternodes
if toptreenonleafnodes == 0
n = nchildren(unsegmentedtree(tree), i)
elseif i <= toptreenonleafnodes
# The top - tree is a full binary tree.
# Since the leaves aren't stored, every parent node
# has 2 children
n = 2
else
subtree, rankinsubtree = subtree_rank(tree, i)
n = nchildren(subtree, rankinsubtree)
end
return n
end
function nchildren(tree::ConnectedOrderedBinaryTree, i::Integer)
1 <= i <= length(tree) || throw(BoundsError(tree, i))
if isodd(i)
host = ""
for (ind, h) in enumerate(keys(workersonhosts(tree)))
if ind == i ÷ 2 + 1
host = h
end
end
st = OrderedBinaryTree(workersonhosts(tree)[host])
nchildren(topnode(st))
else
2
end
end
topnoderank(tree::ConnectedOrderedBinaryTree) = topnoderank(tree.tree)
function topnoderank(tree::OrderedBinaryTree)
1 << (levels(tree) - 1)
end
function topnoderank(tree::SegmentedOrderedBinaryTree)
Nmasternodes = length(keys(workersonhosts(tree)))
toptreenonleafnodes = length(tree.toptree) - Nmasternodes
if toptreenonleafnodes > 0
tnr_top = topnoderank(tree.toptree)
tnr = toptree_to_fulltree_index(tree.toptree, tnr_top)
else
tnr = topnoderank(OrderedBinaryTree(workers(tree)))
end
return tnr
end
topnode(tree::BinaryTree) = tree[topnoderank(tree)]
function topnode(tree::OrderedBinaryTree)
node = tree[topnoderank(tree)]
if tree.topnode_parent === nothing
BinaryTreeNode(node.p, node.p, node.nchildren)
else
BinaryTreeNode(node.p, tree.topnode_parent, node.nchildren)
end
end
# Indexing into a OrderedBinaryTree produces a BinaryTreeNode
struct BinaryTreeNode
p :: Int
parent :: Int
nchildren :: Int
function BinaryTreeNode(p::Int, p_parent::Int, nchildren::Int)
(0 <= nchildren <= 2) ||
throw(DomainError(nchildren,
"attempt to construct a binary tree with $nchildren children"))
new(p, p_parent, nchildren)
end
end
function Base.show(io::IO, b::BinaryTreeNode)
print(io,
"BinaryTreeNode(p = $(b.p),"*
" parent = $(b.parent), nchildren = $(b.nchildren))")
end
nchildren(b::BinaryTreeNode) = b.nchildren
Distributed.workers(tree::OrderedBinaryTree) = tree.procs
Distributed.workers(tree::ConnectedOrderedBinaryTree) = workers(tree.tree)
Distributed.workers(tree::SegmentedOrderedBinaryTree) = tree.procs
Distributed.nworkers(tree::BinaryTree) = length(workers(tree))
function Base.getindex(tree::BinaryTree, i::Integer)
1 <= i <= length(tree) || throw(BoundsError(tree, i))
procs = workers(tree)
p = procs[i]
pr = parentnoderank(tree, i)
p_parent = procs[pr]
n = nchildren(tree, i)
BinaryTreeNode(p, p_parent, n)
end
function Base.getindex(tree::SegmentedOrderedBinaryTree, i::Integer)
1 <= i <= length(tree) || throw(BoundsError(tree, i))
Nmasternodes = length(keys(workersonhosts(tree)))
toptreenonleafnodes = length(toptree(tree)) - Nmasternodes
if toptreenonleafnodes == 0
return unsegmentedtree(tree)[i]
elseif i <= toptreenonleafnodes
#= In a SegmentedSequentialBinaryTree the leading indices
are the parent nodes of the top tree, so ind = i
In a SegmentedOrderedBinaryTree, the leaves are removed
from the top tree, so only even numbers are left.
In this case, index i of the full tree refers to index 2i of the
top tree, so ind = 2i
=#
ind = fulltree_to_toptree_index(tree.toptree, i)
p = tree.toptree[ind].p
pr_top = parentnoderank(tree.toptree, ind)
p_parent = tree.toptree[pr_top].p
n = 2
return BinaryTreeNode(p, p_parent, n)
else
subtree, rankinsubtree = subtree_rank(tree, i)
p = subtree[rankinsubtree].p
n = nchildren(subtree, rankinsubtree)
if rankinsubtree == topnoderank(subtree)
# masternode
# parent will be on the top tree
Nmasternodes = length(keys(workersonhosts(tree)))
leafno = masternodeindex(tree, p)
leafrank = leafrankfoldedtree(tree.toptree, Nmasternodes, leafno)
pr_top = parentnoderank(tree.toptree, leafrank)
p_parent = tree.toptree[pr_top].p
else
# node on a sub - tree
pr = parentnoderank(subtree, rankinsubtree)
p_parent = subtree[pr].p
end
return BinaryTreeNode(p, p_parent, n)
end
end
# Branches between nodes
struct BranchChannel
p :: Int
parentchannel :: RemoteChannel{Channel{Any}}
childrenchannel :: RemoteChannel{Channel{Any}}
nchildren :: Int
function BranchChannel(p::Int, parentchannel::RemoteChannel, childrenchannel::RemoteChannel, nchildren::Int)
(0 <= nchildren <= 2) ||
throw(DomainError(nchildren,
"attempt to construct a binary tree with $nchildren children"))
new(p, parentchannel, childrenchannel, nchildren)
end
end
nchildren(b::BranchChannel) = b.nchildren
childrenerror(nchildren) = throw(DomainError(nchildren,
"attempt to construct a binary tree with $nchildren children"))
function BranchChannel(p::Integer, parentchannel::RemoteChannel, nchildren::Integer)
(0 <= nchildren <= 2) || childrenerror(nchildren)
childrenchannel = RemoteChannel(() -> Channel(nchildren), p)
BranchChannel(p, parentchannel, childrenchannel, nchildren)
end
function BranchChannel(p::Integer, nchildren::Integer)
(0 <= nchildren <= 2) || childrenerror(nchildren)
parentchannel, childrenchannel = @sync begin
parenttask = @async RemoteChannel(() -> Channel(1), p)
childtask = @async RemoteChannel(() -> Channel(nchildren), p)
asyncmap(fetch, (parenttask, childtask))
end
BranchChannel(p, parentchannel, childrenchannel, nchildren)
end
function Base.show(io::IO, b::BranchChannel)
N = nchildren(b)
p_parent = b.parentchannel.where
p = b.p
if N == 2
str = "Branch: "*string(p_parent)*" ← "*string(p)*" ⇇ 2 children"
elseif N == 1
str = "Branch: "*string(p_parent)*" ← "*string(p)*" ← 1 child"
else
str = "Leaf : "*string(p_parent)*" ← "*string(p)
end
print(io, str)
end
function createbranchchannels!(branches, tree::OrderedBinaryTree, superbranch::BranchChannel)
top = topnoderank(tree)
topnode = tree[top]
topbranchchannels = BranchChannel(topnode.p, superbranch.childrenchannel, nchildren(topnode))
branches[top] = topbranchchannels
length(tree) == 1 && return nothing
left_inds = 1:top - 1
right_inds = top + 1:length(tree)
@sync begin
@async if !isempty(left_inds)
left_child = OrderedBinaryTree(@view workers(tree)[left_inds])
createbranchchannels!(@view(branches[left_inds]), left_child, topbranchchannels)
end
@async if !isempty(right_inds)
right_child = OrderedBinaryTree(@view workers(tree)[right_inds])
createbranchchannels!(@view(branches[right_inds]), right_child, topbranchchannels)
end
end
return nothing
end
function createbranchchannels(tree::SegmentedOrderedBinaryTree)
nodes = keys(workersonhosts(tree))
toptree = tree.toptree
Nmasternodes = length(nodes)
toptreenonleafnodes = length(toptree) - Nmasternodes
branches = Vector{BranchChannel}(undef, length(tree))
# populate the top tree other than the masternodes
# This is only run if there are multiple hosts
if toptreenonleafnodes > 0
topnoderank_toptree = topnoderank(toptree)
topnode_toptree = toptree[topnoderank_toptree]
N = nchildren(topnode_toptree)
topmostbranch = BranchChannel(topnode_toptree.p, N)
branches[topnoderank_toptree] = topmostbranch
left_inds = 1:(topnoderank_toptree - 1)
right_inds = (topnoderank_toptree + 1):length(toptree)
@sync begin
@async if !isempty(left_inds)
left_child = OrderedBinaryTree(@view workers(toptree)[left_inds])
createbranchchannels!(@view(branches[left_inds]), left_child, topmostbranch)
end
@async if !isempty(right_inds)
right_child = OrderedBinaryTree(@view workers(toptree)[right_inds])
createbranchchannels!(@view(branches[right_inds]), right_child, topmostbranch)
end
end
#= Remove the leaves from the top tree (masternodes).
They are the top nodes of the individual trees at the hosts.
They will be created separately and linked to the top tree.
=#
for i = 1:toptreenonleafnodes
branches[i] = branches[2i]
end
end
@sync for (nodeno, node) in enumerate(nodes)
@async begin
# Top node for each subtree (a masternode)
workersnode = workersonhosts(tree)[node]
nodetree = OrderedBinaryTree(workersnode)
top = topnoderank(nodetree)
topnode = nodetree[top]
p = topnode.p
if toptreenonleafnodes > 0
# inherit from the parent node
leafno = masternodeindex(tree, p)
leafrank = leafrankfoldedtree(tree.toptree, Nmasternodes, leafno)
parentrank = parentnoderank(toptree, leafrank)
parentrankfulltree = toptree_to_fulltree_index(toptree, parentrank)
parentnodebranches = branches[parentrankfulltree]
parentchannel = parentnodebranches.childrenchannel
else
#= This happens if there is only one host,
in which case there's nothing to inherit.
In this case there's no difference between a
SegmentedOrderedBinaryTree and an OrderedBinaryTree
The top node is created separately as it is its own parent
=#
parentchannel = RemoteChannel(() -> Channel(1), p)
end
topbranchnode = BranchChannel(p, parentchannel, nchildren(topnode))
nodetreestartindex = tree.nodetreestartindices[nodeno]
branches[nodetreestartindex + top - 1] = topbranchnode
# Populate the rest of the tree
left_inds_nodetree = (1:top - 1)
left_inds_fulltree = (nodetreestartindex - 1) .+ left_inds_nodetree
right_inds_nodetree = top + 1:length(nodetree)
right_inds_fulltree = (nodetreestartindex - 1) .+ right_inds_nodetree
@async if !isempty(left_inds_nodetree)
left_child = OrderedBinaryTree(@view workers(nodetree)[left_inds_nodetree])
createbranchchannels!(@view(branches[left_inds_fulltree]), left_child, topbranchnode)
end
@async if !isempty(right_inds_nodetree)
right_child = OrderedBinaryTree(@view workers(nodetree)[right_inds_nodetree])
createbranchchannels!(@view(branches[right_inds_fulltree]), right_child, topbranchnode)
end
end
end
return branches
end
function createbranchchannels(pool::AbstractWorkerPool, len::Integer)
w = workersactive(pool, len)
tree = SegmentedOrderedBinaryTree(w)
branches = createbranchchannels(tree)
tree, branches
end
topbranch(tree::BinaryTree, branches::AbstractVector{<:BranchChannel}) = branches[topnoderank(tree)]
| ParallelUtilities | https://github.com/jishnub/ParallelUtilities.jl.git |
|
[
"MIT"
] | 0.8.6 | 704ef2c93e301b6469ba63103a4e7bf935e6990c | code | 4543 | using DataStructures
using Test
using Aqua
using ParallelUtilities
using Documenter
using OffsetArrays
import ParallelUtilities: pval, value, BinaryTreeNode, BranchChannel,
ProductSplit, SegmentedOrderedBinaryTree
import ParallelUtilities.ClusterQueryUtils: chooseworkers
@testset "Project quality" begin
if VERSION < v"1.6.0"
Aqua.test_all(ParallelUtilities, ambiguities=false)
else
Aqua.test_all(ParallelUtilities)
end
end
DocMeta.setdocmeta!(ParallelUtilities, :DocTestSetup, :(using ParallelUtilities); recursive=true)
@testset "doctest" begin
doctest(ParallelUtilities, manual = false)
end
@testset "pval" begin
p1 = pval{Float64}(1, false, 2.0)
p2 = pval{Int}(1, false, 2)
@test pval{Float64}(p1) === p1
@test pval{Int}(p1) === p2
@test value(p1) === 2.0
@test value(p2) === 2
@test value(2) === 2
@test value(nothing) === nothing
end
@testset "chooseworkers" begin
workers = 1:8
workers_on_hosts = OrderedDict("host1" => 1:4, "host2" => 5:8)
@test chooseworkers(workers, 3, workers_on_hosts) == 1:3
@test chooseworkers(workers, 5, workers_on_hosts) == 1:5
workers_on_hosts = OrderedDict(Libc.gethostname() => 1:4, "host2" => 5:8)
@test chooseworkers(workers, 3, workers_on_hosts) == 1:3
@test chooseworkers(workers, 5, workers_on_hosts) == 1:5
workers_on_hosts = OrderedDict("host1" => 1:4, Libc.gethostname() => 5:8)
@test chooseworkers(workers, 3, workers_on_hosts) == 5:7
@test chooseworkers(workers, 5, workers_on_hosts) == [5:8; 1]
end
@testset "Reduction functions" begin
# BroadcastStack with OffsetArrays
@testset "BroadcastStack" begin
arr = ParallelUtilities.BroadcastStack(+, 1)(ones(2:4), ones(3:5))
@test arr == OffsetArray([1, 2, 2, 1], 2:5)
arr = ParallelUtilities.BroadcastStack(+, 1:2)(ones(1:2, 2:4), ones(2:3, 3:5))
arr_exp = OffsetArray([1.0 1.0 1.0 0.0
1.0 2.0 2.0 1.0
0.0 1.0 1.0 1.0], 1:3, 2:5)
@test arr == arr_exp
end
@testset "BroadcastFunction" begin
x = ones(3); y = ones(3);
b = ParallelUtilities.BroadcastFunction{1}(+)
@test b(x, y) == ones(3) * 2
@test x == ones(3) * 2
@test y == ones(3)
b = ParallelUtilities.BroadcastFunction{2}(+)
x = ones(3); y = ones(3);
@test b(x, y) == ones(3) * 2
@test x == ones(3)
@test y == ones(3) * 2
end
@testset "Flip" begin
x = ones(3); y = ones(3);
f = ParallelUtilities.Flip(ParallelUtilities.elementwisesum!)
@test f(x,y) == ones(3) * 2
@test x == ones(3)
@test y == ones(3) * 2
x = ones(3); y = ones(3);
f = ParallelUtilities.Flip(ParallelUtilities.broadcastinplace(+, Val(2)))
@test f(x,y) == ones(3) * 2
@test x == ones(3) * 2
@test y == ones(3)
end
end
@testset "show" begin
@testset "ProductSplit" begin
io = IOBuffer()
ps = ProductSplit((1:20, 1:30), 4, 1)
show(io, ps)
showstr = String(take!(io))
startstr = string(length(ps))*"-element ProductSplit"
@test startswith(showstr, startstr)
end
@testset "error" begin
io = IOBuffer()
showerror(io, ParallelUtilities.TaskNotPresentError((1:4,), (5,)))
strexp = "could not find the task $((5,)) in the list $((1:4,))"
@test String(take!(io)) == strexp
end;
@testset "BranchChannel" begin
io = IOBuffer()
b = BranchChannel(1, 0)
show(io, b)
strexp = "Leaf : 1 ← 1"
@test String(take!(io)) == strexp
b = BranchChannel(1, 1)
show(io, b)
strexp = "Branch: 1 ← 1 ← 1 child"
@test String(take!(io)) == strexp
b = BranchChannel(1, 2)
show(io, b)
strexp = "Branch: 1 ← 1 ⇇ 2 children"
@test String(take!(io)) == strexp
end;
@testset "BinaryTreeNode" begin
io = IOBuffer()
b = BinaryTreeNode(2, 3, 1)
show(io, b)
strexp = "BinaryTreeNode(p = 2, parent = 3, nchildren = 1)"
@test String(take!(io)) == strexp
end;
@testset "BinaryTree" begin
# check that show is working
io = IOBuffer()
tree = SegmentedOrderedBinaryTree(1:8, OrderedDict("host1" => 1:4, "host2" => 5:8))
show(io, tree)
show(io, ParallelUtilities.toptree(tree))
show(io, ParallelUtilities.toptree(tree).tree)
end
end;
| ParallelUtilities | https://github.com/jishnub/ParallelUtilities.jl.git |
|
[
"MIT"
] | 0.8.6 | 704ef2c93e301b6469ba63103a4e7bf935e6990c | code | 35856 | using Distributed
@everywhere begin
using DataStructures
using Test
using ParallelUtilities
using ParallelUtilities.ClusterQueryUtils
using ParallelUtilities.ClusterQueryUtils: oneworkerpernode
using OffsetArrays
import ParallelUtilities: BinaryTreeNode, BranchChannel,
OrderedBinaryTree, SegmentedOrderedBinaryTree,
parentnoderank, nchildren,
createbranchchannels,
workersactive,
leafrankfoldedtree,
TopTreeNode, SubTreeNode,
NoSplat, reducedvalue
function parentnoderank(tree::SegmentedOrderedBinaryTree, i::Integer)
1 <= i <= length(tree) || throw(BoundsError(tree, i))
Nmasternodes = length(keys(ParallelUtilities.workersonhosts(tree)))
toptreenonleafnodes = length(tree.toptree) - Nmasternodes
if toptreenonleafnodes == 0
pr = parentnoderank(ParallelUtilities.unsegmentedtree(tree),i)
elseif i <= toptreenonleafnodes
#= In a SegmentedSequentialBinaryTree the leading indices
are the parent nodes of the top tree, so ind = i
In a SegmentedOrderedBinaryTree, the leaves are removed
from the top tree, so only even numbers are left.
In this case, index i of the full tree refers to index 2i of the
top tree, so ind = 2i
=#
ind = ParallelUtilities.fulltree_to_toptree_index(tree.toptree, i)
p = tree.toptree[ind].p
# Compute the parent of the node with rank ind on the top tree.
# In a SegmentedSequentialBinaryTree this is what we want.
# In a SegmentedOrderedBinaryTree, we need to convert this back to
# the index of the full tree, that is div(pr, 2)
pr_top = parentnoderank(tree.toptree, ind)
pr = ParallelUtilities.toptree_to_fulltree_index(tree.toptree, pr_top)
else
subtree, rankinsubtree, nptotalprevhosts = ParallelUtilities.subtree_rank(tree, i)
if rankinsubtree == ParallelUtilities.topnoderank(subtree)
# masternode
# parent will be on the top - tree
p = subtree[rankinsubtree].p
leafno = ParallelUtilities.masternodeindex(tree, p)
Nmasternodes = length(keys(ParallelUtilities.workersonhosts(tree)))
leafrank = ParallelUtilities.leafrankfoldedtree(tree.toptree, Nmasternodes, leafno)
pr_top = parentnoderank(tree.toptree, leafrank)
# Convert back to the rank on the full tree where the
# leaves of the top tree aren't stored.
pr = ParallelUtilities.toptree_to_fulltree_index(tree.toptree, pr_top)
else
# node on a sub - tree
pr = parentnoderank(subtree, rankinsubtree)
pr += nptotalprevhosts + toptreenonleafnodes
end
end
return pr
end
end
macro testsetwithinfo(str, ex)
quote
@info "Testing "*$str
@testset $str begin $(esc(ex)); end;
end
end
fmap_local(x) = x^2
fred_local(x) = x
fred_local(x, y) = x + y
function showworkernumber(ind, nw)
# Cursor starts off at the beginning of the line
print("\u1b[K") # clear till end of line
print("Testing on worker $ind of $nw")
# return the cursor to the beginning of the line
endchar = ind == nw ? "\n" : "\r"
print(endchar)
end
@testsetwithinfo "utilities" begin
@testset "hostnames" begin
hosts = hostnames()
nodes = unique(hosts)
@test nodenames() == nodes
@test nodenames(hosts) == nodes
np1 = nprocs_node(hosts, nodes)
np2 = nprocs_node(hosts)
np3 = nprocs_node()
@test np1 == np2 == np3
for node in nodes
npnode = count(isequal(node), hosts)
@test np1[node] == npnode
end
p1 = procs_node(workers(), hosts, nodes)
for node in nodes
pnode = workers()[findall(isequal(node), hosts)]
@test p1[node] == pnode
end
np4 = nprocs_node(p1)
@test np1 == np4
end
end;
@testset "BinaryTree" begin
@testsetwithinfo "BinaryTreeNode" begin
@testset "Constructor" begin
p = workers()[1]
b = BinaryTreeNode(p, p, 0)
@test nchildren(b) == 0
b = BinaryTreeNode(p, p, 1)
@test nchildren(b) == 1
b = BinaryTreeNode(p, p, 2)
@test nchildren(b) == 2
@test_throws DomainError BinaryTreeNode(p, p, 3)
@test_throws DomainError BinaryTreeNode(p, p,-1)
end
end
@testsetwithinfo "BinaryTree" begin
@testsetwithinfo "OrderedBinaryTree" begin
@testset "pid and parent" begin
for imax = 1:100
procs = 1:imax
tree = OrderedBinaryTree(procs)
@test length(tree) == length(procs)
topnoderank = ParallelUtilities.topnoderank(tree)
@test tree[topnoderank].parent == topnoderank
for rank in 1:length(tree)
node = tree[rank]
@test node.p == procs[rank]
@test node.parent == procs[parentnoderank(tree, rank)]
end
@test_throws BoundsError(tree, 0) parentnoderank(tree, 0)
@test_throws BoundsError(tree, imax + 1) parentnoderank(tree, imax + 1)
end
end
@testset "nchildren" begin
tree = OrderedBinaryTree(1:1)
@test nchildren(tree, 1) == nchildren(tree[1]) == tree[1].nchildren == 0
@test_throws BoundsError(tree, 0) nchildren(tree, 0)
@test_throws BoundsError(tree, 2) nchildren(tree, 2)
@test ParallelUtilities.topnoderank(tree) == 1
tree = OrderedBinaryTree(1:2)
@test nchildren(tree, 1) == nchildren(tree[1]) == tree[1].nchildren == 0
@test nchildren(tree, 2) == nchildren(tree[2]) == tree[2].nchildren == 1
@test_throws BoundsError(tree, 0) nchildren(tree, 0)
@test_throws BoundsError(tree, 3) nchildren(tree, 3)
@test ParallelUtilities.topnoderank(tree) == 2
tree = OrderedBinaryTree(1:8)
@test nchildren(tree, 1) == nchildren(tree[1]) == tree[1].nchildren == 0
@test nchildren(tree, 2) == nchildren(tree[2]) == tree[2].nchildren == 2
@test nchildren(tree, 3) == nchildren(tree[3]) == tree[3].nchildren == 0
@test nchildren(tree, 4) == nchildren(tree[4]) == tree[4].nchildren == 2
@test nchildren(tree, 5) == nchildren(tree[5]) == tree[5].nchildren == 0
@test nchildren(tree, 6) == nchildren(tree[6]) == tree[6].nchildren == 2
@test nchildren(tree, 7) == nchildren(tree[7]) == tree[7].nchildren == 0
@test nchildren(tree, 8) == nchildren(tree[8]) == tree[8].nchildren == 1
@test_throws BoundsError(tree, 0) nchildren(tree, 0)
@test_throws BoundsError(tree, 9) nchildren(tree, 9)
@test ParallelUtilities.topnoderank(tree) == 8
tree = OrderedBinaryTree(1:11)
@test nchildren(tree, 1) == nchildren(tree[1]) == tree[1].nchildren == 0
@test nchildren(tree, 2) == nchildren(tree[2]) == tree[2].nchildren == 2
@test nchildren(tree, 3) == nchildren(tree[3]) == tree[3].nchildren == 0
@test nchildren(tree, 4) == nchildren(tree[4]) == tree[4].nchildren == 2
@test nchildren(tree, 5) == nchildren(tree[5]) == tree[5].nchildren == 0
@test nchildren(tree, 6) == nchildren(tree[6]) == tree[6].nchildren == 2
@test nchildren(tree, 7) == nchildren(tree[7]) == tree[7].nchildren == 0
@test nchildren(tree, 8) == nchildren(tree[8]) == tree[8].nchildren == 2
@test nchildren(tree, 9) == nchildren(tree[9]) == tree[9].nchildren == 0
@test nchildren(tree, 10) == nchildren(tree[10]) == tree[10].nchildren == 2
@test nchildren(tree, 11) == nchildren(tree[11]) == tree[11].nchildren == 0
@test_throws BoundsError(tree, 0) nchildren(tree, 0)
@test_throws BoundsError(tree, 12) nchildren(tree, 12)
@test ParallelUtilities.topnoderank(tree) == 8
tree = OrderedBinaryTree(1:13)
@test nchildren(tree, 1) == nchildren(tree[1]) == tree[1].nchildren == 0
@test nchildren(tree, 2) == nchildren(tree[2]) == tree[2].nchildren == 2
@test nchildren(tree, 3) == nchildren(tree[3]) == tree[3].nchildren == 0
@test nchildren(tree, 4) == nchildren(tree[4]) == tree[4].nchildren == 2
@test nchildren(tree, 5) == nchildren(tree[5]) == tree[5].nchildren == 0
@test nchildren(tree, 6) == nchildren(tree[6]) == tree[6].nchildren == 2
@test nchildren(tree, 7) == nchildren(tree[7]) == tree[7].nchildren == 0
@test nchildren(tree, 8) == nchildren(tree[8]) == tree[8].nchildren == 2
@test nchildren(tree, 9) == nchildren(tree[9]) == tree[9].nchildren == 0
@test nchildren(tree, 10) == nchildren(tree[10]) == tree[10].nchildren == 2
@test nchildren(tree, 11) == nchildren(tree[11]) == tree[11].nchildren == 0
@test nchildren(tree, 12) == nchildren(tree[12]) == tree[12].nchildren == 2
@test nchildren(tree, 13) == nchildren(tree[13]) == tree[13].nchildren == 0
@test_throws BoundsError(tree, 0) nchildren(tree, 0)
@test_throws BoundsError(tree, 14) nchildren(tree, 14)
@test ParallelUtilities.topnoderank(tree) == 8
end
@testset "level" begin
tree = OrderedBinaryTree(1:15)
@test ParallelUtilities.levels(tree) == 4
@test ParallelUtilities.levelfromtop.((tree,), 1:2:15) == ones(Int, 8).*4
@test ParallelUtilities.levelfromtop.((tree,), (2, 6, 10, 14)) == (3, 3, 3, 3)
@test ParallelUtilities.levelfromtop.((tree,), (4, 12)) == (2, 2)
@test ParallelUtilities.levelfromtop(tree, 8) == 1
for p in [0, length(tree) + 1]
@test_throws BoundsError(tree, p) ParallelUtilities.levelfromtop(tree, p)
end
tree = OrderedBinaryTree(1:13)
@test ParallelUtilities.levels(tree) == 4
@test ParallelUtilities.levelfromtop.((tree,), 1:2:11) == ones(Int, 6).*4
@test ParallelUtilities.levelfromtop.((tree,), (2, 6, 10, 13)) == (3, 3, 3, 3)
@test ParallelUtilities.levelfromtop.((tree,), (4, 12)) == (2, 2)
@test ParallelUtilities.levelfromtop(tree, 8) == 1
for p in [0, length(tree) + 1]
@test_throws BoundsError(tree, p) ParallelUtilities.levelfromtop(tree, p)
end
end
end
@testsetwithinfo "SegmentedOrderedBinaryTree" begin
@testsetwithinfo "single host" begin
@testset "pid and parent" begin
for imax = 1:100
procs = 1:imax
workersonhosts = Dict("host" => procs)
tree = SegmentedOrderedBinaryTree(procs, workersonhosts)
treeOBT = OrderedBinaryTree(procs)
@test length(tree) == length(procs) == length(treeOBT)
topnoderank = ParallelUtilities.topnoderank(tree)
# The top node is its own parent
@test tree[topnoderank].parent == topnoderank
@test tree[topnoderank] == ParallelUtilities.topnode(tree)
for rank in 1:length(tree)
node = tree[rank]
parentnode = tree[parentnoderank(tree, rank)]
@test length(procs) > 1 ? nchildren(parentnode) > 0 : nchildren(parentnode) == 0
@test node.p == procs[rank]
@test node.parent == procs[parentnoderank(treeOBT, rank)]
@test parentnode.p == node.parent
end
end
end;
@testset "nchildren" begin
procs = 1:1
tree = SegmentedOrderedBinaryTree(procs, Dict("host" => procs))
@test nchildren(tree, 1) == nchildren(tree[1]) == tree[1].nchildren == 0
@test_throws BoundsError(tree, 0) nchildren(tree, 0)
@test_throws BoundsError(tree, 2) nchildren(tree, 2)
@test ParallelUtilities.topnoderank(tree) == 1
procs = 1:2
tree = SegmentedOrderedBinaryTree(procs, Dict("host" => procs))
@test nchildren(tree, 1) == nchildren(tree[1]) == tree[1].nchildren == 0
@test nchildren(tree, 2) == nchildren(tree[2]) == tree[2].nchildren == 1
@test_throws BoundsError(tree, 0) nchildren(tree, 0)
@test_throws BoundsError(tree, 3) nchildren(tree, 3)
@test ParallelUtilities.topnoderank(tree) == 2
procs = 1:8
tree = SegmentedOrderedBinaryTree(procs, Dict("host" => procs))
@test nchildren(tree, 1) == nchildren(tree[1]) == tree[1].nchildren == 0
@test nchildren(tree, 2) == nchildren(tree[2]) == tree[2].nchildren == 2
@test nchildren(tree, 3) == nchildren(tree[3]) == tree[3].nchildren == 0
@test nchildren(tree, 4) == nchildren(tree[4]) == tree[4].nchildren == 2
@test nchildren(tree, 5) == nchildren(tree[5]) == tree[5].nchildren == 0
@test nchildren(tree, 6) == nchildren(tree[6]) == tree[6].nchildren == 2
@test nchildren(tree, 7) == nchildren(tree[7]) == tree[7].nchildren == 0
@test nchildren(tree, 8) == nchildren(tree[8]) == tree[8].nchildren == 1
@test_throws BoundsError(tree, 0) nchildren(tree, 0)
@test_throws BoundsError(tree, 9) nchildren(tree, 9)
@test ParallelUtilities.topnoderank(tree) == 8
procs = 1:11
tree = SegmentedOrderedBinaryTree(procs, Dict("host" => procs))
@test nchildren(tree, 1) == nchildren(tree[1]) == tree[1].nchildren == 0
@test nchildren(tree, 2) == nchildren(tree[2]) == tree[2].nchildren == 2
@test nchildren(tree, 3) == nchildren(tree[3]) == tree[3].nchildren == 0
@test nchildren(tree, 4) == nchildren(tree[4]) == tree[4].nchildren == 2
@test nchildren(tree, 5) == nchildren(tree[5]) == tree[5].nchildren == 0
@test nchildren(tree, 6) == nchildren(tree[6]) == tree[6].nchildren == 2
@test nchildren(tree, 7) == nchildren(tree[7]) == tree[7].nchildren == 0
@test nchildren(tree, 8) == nchildren(tree[8]) == tree[8].nchildren == 2
@test nchildren(tree, 9) == nchildren(tree[9]) == tree[9].nchildren == 0
@test nchildren(tree, 10) == nchildren(tree[10]) == tree[10].nchildren == 2
@test nchildren(tree, 11) == nchildren(tree[11]) == tree[11].nchildren == 0
@test_throws BoundsError(tree, 0) nchildren(tree, 0)
@test_throws BoundsError(tree, 12) nchildren(tree, 12)
@test ParallelUtilities.topnoderank(tree) == 8
procs = 1:13
tree = SegmentedOrderedBinaryTree(procs, Dict("host" => procs))
@test nchildren(tree, 1) == nchildren(tree[1]) == tree[1].nchildren == 0
@test nchildren(tree, 2) == nchildren(tree[2]) == tree[2].nchildren == 2
@test nchildren(tree, 3) == nchildren(tree[3]) == tree[3].nchildren == 0
@test nchildren(tree, 4) == nchildren(tree[4]) == tree[4].nchildren == 2
@test nchildren(tree, 5) == nchildren(tree[5]) == tree[5].nchildren == 0
@test nchildren(tree, 6) == nchildren(tree[6]) == tree[6].nchildren == 2
@test nchildren(tree, 7) == nchildren(tree[7]) == tree[7].nchildren == 0
@test nchildren(tree, 8) == nchildren(tree[8]) == tree[8].nchildren == 2
@test nchildren(tree, 9) == nchildren(tree[9]) == tree[9].nchildren == 0
@test nchildren(tree, 10) == nchildren(tree[10]) == tree[10].nchildren == 2
@test nchildren(tree, 11) == nchildren(tree[11]) == tree[11].nchildren == 0
@test nchildren(tree, 12) == nchildren(tree[12]) == tree[12].nchildren == 2
@test nchildren(tree, 13) == nchildren(tree[13]) == tree[13].nchildren == 0
@test_throws BoundsError(tree, 0) nchildren(tree, 0)
@test_throws BoundsError(tree, 14) nchildren(tree, 14)
@test ParallelUtilities.topnoderank(tree) == 8
end;
end;
@testsetwithinfo "multiple hosts" begin
@testset "length" begin
procs = 1:2
tree = SegmentedOrderedBinaryTree(procs,
OrderedDict("host1" => 1:1,"host2" => 2:2))
@test length(tree) == 2 + 1
procs = 1:4
tree = SegmentedOrderedBinaryTree(procs,
OrderedDict("host1" => 1:2,"host2" => 3:4))
@test length(tree) == 4 + 1
procs = 1:12
tree = SegmentedOrderedBinaryTree(procs,
OrderedDict(
"host1" => 1:3,"host2" => 4:6,
"host3" => 7:9,"host4" => 10:12))
@test length(tree) == 12 + 3
end;
@testset "leafrankfoldedtree" begin
treeflag = OrderedBinaryTree(1:1)
@test leafrankfoldedtree(treeflag, 5, 1) == 1
@test leafrankfoldedtree(treeflag, 5, 2) == 3
@test leafrankfoldedtree(treeflag, 5, 3) == 5
@test leafrankfoldedtree(treeflag, 5, 4) == 7
@test leafrankfoldedtree(treeflag, 5, 5) == 9
end;
@testset "pid and parent" begin
for imax = 2:100
procs = 1:imax
mid = div(imax, 2)
workersonhosts = OrderedDict{String, Vector{Int}}()
workersonhosts["host1"] = procs[1:mid]
workersonhosts["host2"] = procs[mid + 1:end]
tree = SegmentedOrderedBinaryTree(procs, workersonhosts)
top = ParallelUtilities.topnoderank(tree)
@test tree[top] == ParallelUtilities.topnode(tree)
for (ind, rank) in enumerate(1:mid)
node = tree[rank + 1]
parentnode = tree[parentnoderank(tree, rank + 1)]
@test parentnode.p == node.parent
pnodes = workersonhosts["host1"]
@test node.p == pnodes[ind]
OBT = OrderedBinaryTree(pnodes)
if ind == ParallelUtilities.topnoderank(OBT)
# Special check for 2 hosts as
# there's only one node in the top tree
@test node.parent == ParallelUtilities.topnode(tree.toptree).p
else
@test node.parent == pnodes[parentnoderank(OBT, ind)]
end
end
for (ind, rank) in enumerate(mid + 1:imax)
node = tree[rank + 1]
parentnode = tree[parentnoderank(tree, rank + 1)]
@test parentnode.p == node.parent
pnodes = workersonhosts["host2"]
@test node.p == pnodes[ind]
OBT = OrderedBinaryTree(pnodes)
if ind == ParallelUtilities.topnoderank(OBT)
# Special check for 2 hosts as
# there's only one node in the top tree
@test node.parent == ParallelUtilities.topnode(tree.toptree).p
else
@test node.parent == pnodes[parentnoderank(OBT, ind)]
end
end
end
end;
@testset "nchildren" begin
procs = 1:2
tree = SegmentedOrderedBinaryTree(procs,
OrderedDict("host1" => 1:1,"host2" => 2:2))
@test nchildren(tree, 1) == nchildren(tree[1]) == tree[1].nchildren == 2
@test nchildren(tree, 2) == nchildren(tree[2]) == tree[2].nchildren == 0
@test nchildren(tree, 3) == nchildren(tree[3]) == tree[3].nchildren == 0
@test_throws BoundsError(tree, 0) nchildren(tree, 0)
@test_throws BoundsError(tree, 4) nchildren(tree, 4)
procs = 1:12
tree = SegmentedOrderedBinaryTree(procs,
OrderedDict(
"host1" => 1:3,"host2" => 4:6,
"host3" => 7:9,"host4" => 10:12))
@test nchildren(tree, 1) == nchildren(tree[1]) == tree[1].nchildren == 2
@test nchildren(tree, 2) == nchildren(tree[2]) == tree[2].nchildren == 2
@test nchildren(tree, 3) == nchildren(tree[3]) == tree[3].nchildren == 2
@test nchildren(tree, 4) == nchildren(tree[4]) == tree[4].nchildren == 0
@test nchildren(tree, 5) == nchildren(tree[5]) == tree[5].nchildren == 2
@test nchildren(tree, 6) == nchildren(tree[6]) == tree[6].nchildren == 0
@test nchildren(tree, 7) == nchildren(tree[7]) == tree[7].nchildren == 0
@test nchildren(tree, 8) == nchildren(tree[8]) == tree[8].nchildren == 2
@test nchildren(tree, 9) == nchildren(tree[9]) == tree[9].nchildren == 0
@test nchildren(tree, 10) == nchildren(tree[10]) == tree[10].nchildren == 0
@test nchildren(tree, 11) == nchildren(tree[11]) == tree[11].nchildren == 2
@test nchildren(tree, 12) == nchildren(tree[12]) == tree[12].nchildren == 0
@test nchildren(tree, 13) == nchildren(tree[13]) == tree[13].nchildren == 0
@test nchildren(tree, 14) == nchildren(tree[14]) == tree[14].nchildren == 2
@test nchildren(tree, 15) == nchildren(tree[15]) == tree[15].nchildren == 0
@test_throws BoundsError(tree, 0) nchildren(tree, 0)
@test_throws BoundsError(tree, 16) nchildren(tree, 16)
end;
end;
end
end
end;
@testsetwithinfo "reduction" begin
@testset "BranchChannel" begin
@test_throws DomainError BranchChannel(1, 3)
parentchannel = RemoteChannel(() -> Channel(1))
@test_throws DomainError BranchChannel(1, parentchannel, 3)
end
@testset "TopTreeNode" begin
# Special test for this as this is usually not called when tests are carried out on the same machine
parentchannel = RemoteChannel(() -> Channel(1))
childrenchannel = RemoteChannel(() -> Channel(2))
pipe = ParallelUtilities.BranchChannel(1, parentchannel, childrenchannel, 2)
put!(childrenchannel, ParallelUtilities.pval(1, false, 1))
put!(childrenchannel, ParallelUtilities.pval(2, false, 2))
redval = reducedvalue(+, ParallelUtilities.TopTreeNode(1), pipe, nothing)
@test redval === ParallelUtilities.pval(1, false, 3)
put!(childrenchannel, ParallelUtilities.pval(1, false, 1))
put!(childrenchannel, ParallelUtilities.pval(2, true, nothing))
redval = reducedvalue(+, ParallelUtilities.TopTreeNode(1), pipe, nothing)
@test redval === ParallelUtilities.pval(1, true, nothing)
put!(childrenchannel, ParallelUtilities.pval(1, false, 1))
put!(childrenchannel, ParallelUtilities.pval(2, false, 2))
@test_throws Exception reducedvalue(x -> error(""), ParallelUtilities.TopTreeNode(1), pipe, nothing)
end
@testset "fake multiple hosts" begin
tree = ParallelUtilities.SegmentedOrderedBinaryTree([1,1], OrderedDict("host1" => 1:1, "host2" => 1:1))
branches = ParallelUtilities.createbranchchannels(tree)
@test ParallelUtilities.pmapreduceworkers(x -> 1, +, (tree, branches), (1:4,)) == 4
if nworkers() > 1
p = procs_node()
# Choose workers on the same node to avoid communication bottlenecks in testing
w = first(values(p))
tree = ParallelUtilities.SegmentedOrderedBinaryTree(w, OrderedDict("host1" => w[1]:w[1], "host2" => w[2]:w[end]))
branches = ParallelUtilities.createbranchchannels(tree)
@test ParallelUtilities.pmapreduceworkers(x -> 1, +, (tree, branches), (1:length(w),)) == length(w)
end
end
end
@testset "pmapreduce" begin
@testsetwithinfo "pmapreduce" begin
@testsetwithinfo "sum" begin
@testsetwithinfo "comparison with mapreduce" begin
for iterators in Any[(1:1,), (ones(2,2),), (1:10,)]
res_exp = mapreduce(x -> x^2, +, iterators...)
res = pmapreduce(x -> x^2, +, iterators...)
@test res_exp == res
res_exp = mapreduce(x -> x^2, +, iterators..., init = 100)
res = pmapreduce(x -> x^2, +, iterators..., init = 100)
@test res_exp == res
end
@testset "dictionary" begin
res = pmapreduce(x -> Dict(x => x), merge, 1:1)
res_exp = mapreduce(x -> Dict(x => x), merge, 1:1)
@test res == res_exp
res = pmapreduce(x -> Dict(x => x), merge, 1:200)
res_exp = mapreduce(x -> Dict(x => x), merge, 1:200)
@test res == res_exp
res = pmapreduce(x -> OrderedDict(x => x), merge, 1:20)
res_exp = mapreduce(x -> OrderedDict(x => x), merge, 1:20)
@test res == res_exp
end
iterators = (1:10, 2:2:20)
res_exp = mapreduce((x, y) -> x*y, +, iterators...)
res = pmapreduce((x, y) -> x*y, +, iterators...)
@test res_exp == res
res_exp = mapreduce((x, y) -> x*y, +, iterators..., init = 100)
res = pmapreduce((x, y) -> x*y, +, iterators..., init = 100)
@test res_exp == res
iterators = (1:10, 2:2:20)
iterators_product = Iterators.product(iterators...)
res_exp = mapreduce(((x, y),) -> x*y, +, iterators_product)
res = pmapreduce(((x, y),) -> x*y, +, iterators_product)
@test res_exp == res
res_exp_2itp = mapreduce(((x, y), (a, b)) -> x*a + y*b, +, iterators_product, iterators_product)
res_2itp = pmapreduce(((x, y), (a, b)) -> x*a + y*b, +, iterators_product, iterators_product)
@test res_2itp == res_exp_2itp
iterators_product_putil = ParallelUtilities.product(iterators...)
res_exp2 = mapreduce(((x, y),) -> x*y, +, iterators_product_putil)
res2 = pmapreduce(((x, y),) -> x*y, +, iterators_product_putil)
@test res_exp2 == res2
@test res_exp2 == res_exp
res_exp_2pup = mapreduce(((x, y), (a, b)) -> x*a + y*b, +, iterators_product_putil, iterators_product_putil)
res_2pup = pmapreduce(((x, y), (a, b)) -> x*a + y*b, +, iterators_product_putil, iterators_product_putil)
@test res_2pup == res_exp_2pup
@test res_2pup == res_2itp
end
@testsetwithinfo "pmapreduce_productsplit" begin
res_exp = sum(workers())
@test pmapreduce_productsplit(x -> myid(), +, 1:nworkers()) == res_exp
@test pmapreduce_productsplit(NoSplat(x -> myid()), +, 1:nworkers()) == res_exp
@test pmapreduce_productsplit(x -> myid(), +, 1:nworkers(), 1:1) == res_exp
end
end;
@testsetwithinfo "inplace assignment" begin
res = pmapreduce_productsplit(x -> ones(2), ParallelUtilities.elementwisesum!, 1:10)
resexp = mapreduce(x -> ones(2), +, 1:min(10, nworkers()))
@test res == resexp
res = pmapreduce_productsplit(x -> ones(2), ParallelUtilities.elementwiseproduct!, 1:4)
resexp = mapreduce(x -> ones(2), (x,y) -> x .* y, 1:min(4, nworkers()))
@test res == resexp
res = pmapreduce_productsplit(x -> ones(2), ParallelUtilities.elementwisemin!, 1:4)
resexp = mapreduce(x -> ones(2), (x,y) -> min.(x,y), 1:min(4, nworkers()))
@test res == resexp
res = pmapreduce_productsplit(x -> ones(2), ParallelUtilities.elementwisemax!, 1:4)
resexp = mapreduce(x -> ones(2), (x,y) -> max.(x,y), 1:min(4, nworkers()))
@test res == resexp
end
@testsetwithinfo "concatenation" begin
@testsetwithinfo "comparison with mapreduce" begin
resexp_vcat = mapreduce(identity, vcat, 1:nworkers())
resexp_hcat = mapreduce(identity, hcat, 1:nworkers())
res_vcat = pmapreduce(identity, vcat, 1:nworkers())
res_hcat = pmapreduce(identity, hcat, 1:nworkers())
@test res_vcat == resexp_vcat
@test res_hcat == resexp_hcat
end
@testsetwithinfo "pmapreduce_productsplit" begin
res_vcat = mapreduce(identity, vcat, ones(2) for i in 1:nworkers())
res_hcat = mapreduce(identity, hcat, ones(2) for i in 1:nworkers())
@test pmapreduce_productsplit(x -> ones(2), vcat, 1:nworkers()) == res_vcat
@test pmapreduce_productsplit(x -> ones(2), hcat, 1:nworkers()) == res_hcat
end
end;
@testsetwithinfo "run elsewhere" begin
@testsetwithinfo "sum" begin
res_exp = sum(workers())
c = Channel(nworkers())
tasks = Vector{Task}(undef, nworkers())
@sync begin
for (ind, p) in enumerate(workers())
tasks[ind] = @async begin
try
res = @fetchfrom p pmapreduce_productsplit(x -> myid(), +, 1:nworkers())
put!(c,(ind, res, false))
catch
put!(c,(ind, 0, true))
rethrow()
end
end
end
for i = 1:nworkers()
ind, res, err = take!(c)
err && wait(tasks[ind])
@test res == res_exp
showworkernumber(i, nworkers())
end
end
end
# concatenation where the rank is used in the mapping function
# Preserves order of the iterators
@testsetwithinfo "concatenation using rank" begin
c = Channel(nworkers())
tasks = Vector{Task}(undef, nworkers())
@sync begin
for (ind, p) in enumerate(workers())
tasks[ind] = @async begin
try
res = @fetchfrom p (pmapreduce_productsplit(x -> x[1][1], vcat, 1:nworkers()) == mapreduce(identity, vcat, 1:nworkers()))
put!(c,(ind, res, false))
catch
put!(c,(ind, false, true))
rethrow()
end
end
end
for i = 1:nworkers()
ind, res, err = take!(c)
err && wait(tasks[ind])
@test res
showworkernumber(i, nworkers())
end
end
end
end;
@testsetwithinfo "errors" begin
@test_throws Exception pmapreduce(x -> error("map"), +, 1:10)
@test_throws Exception pmapreduce(identity, x -> error("reduce"), 1:10)
@test_throws Exception pmapreduce(x -> error("map"), x -> error("reduce"), 1:10)
@test_throws Exception pmapreduce(fmap, +, 1:10)
@test_throws Exception pmapreduce(identity, fred, 1:10)
@test_throws Exception pmapreduce(fmap, fred, 1:10)
if nworkers() != nprocs()
@test_throws Exception pmapreduce(fmap_local, +, 1:10)
@test_throws Exception pmapreduce(identity, fred_local, 1:10)
@test_throws Exception pmapreduce(fmap_local, fred, 1:10)
@test_throws Exception pmapreduce(fmap_local, fred_local, 1:10)
end
end;
end;
@testsetwithinfo "pmapbatch" begin
for (iterators, fmap) in Any[
((1:1,), x -> 1),
((1:10,), x -> 1),
((1:5,), x -> ones(1) * x),
((1:10, 1:10), (x,y) -> ones(3) * (x+y))]
res = pmapbatch(fmap, iterators...)
res_exp = pmap(fmap, iterators...)
@test res == res_exp
end
v = pmapbatch_productsplit(x -> sum(sum(i) for i in x) * ones(2), 1:1, 1:1)
@test v == [[2.0, 2.0]]
v = pmapbatch_productsplit(x -> ParallelUtilities.workerrank(x), 1:nworkers(), 1:nworkers())
@test v == [1:nworkers();]
end
end;
@testset "ClusterQueryUtils" begin
# These tests assume that all the workers are on the same node
p = procs_node()
myhost = Libc.gethostname()
if myhost in keys(p)
w_myhost = p[myhost]
@test sort(workers_myhost(w_myhost)) == sort(w_myhost)
@test sort(workers_myhost(WorkerPool(w_myhost))) == sort(w_myhost)
w = oneworkerpernode(w_myhost)
@test length(w) == 1
@test (@fetchfrom w[1] Libc.gethostname()) == myhost
pool = workerpool_nodes(WorkerPool(w_myhost))
w_pool = workers(pool)
@test length(w_pool) == 1
@test (@fetchfrom w_pool[1] Libc.gethostname()) == myhost
end
w = workers(workerpool_nodes())
@test !isempty(w)
host_w = @fetchfrom w[1] Libc.gethostname()
@test w[1] in p[host_w]
end
| ParallelUtilities | https://github.com/jishnub/ParallelUtilities.jl.git |
|
[
"MIT"
] | 0.8.6 | 704ef2c93e301b6469ba63103a4e7bf935e6990c | code | 18799 | using Distributed
using Test
using ParallelUtilities
import ParallelUtilities: ProductSplit, ProductSection, ZipSplit, zipsplit,
minimumelement, maximumelement, extremaelement, nelements, dropleading, indexinproduct,
extremadims, localindex, extrema_commonlastdim, whichproc, procrange_recast, whichproc_localindex,
getiterators, _niterators
using SplittablesBase
macro testsetwithinfo(str, ex)
quote
@info "Testing "*$str
@testset $str begin $(esc(ex)); end;
end
end
@testsetwithinfo "AbstractConstrainedProduct" begin
various_iters = Any[(1:10,), (1:1:10,), (1:10, 4:6), (1:1:10, 4:6), (1:10, 4:6, 1:4), (1:2:9,), (1:2:9, 4:1:6),
(1:2, Base.OneTo(4), 1:3:10), (1:0.5:3, 2:4)]
@testsetwithinfo "ProductSplit" begin
function split_across_processors_iterators(arr::Iterators.ProductIterator, num_procs, proc_id)
num_tasks = length(arr);
num_tasks_per_process, num_tasks_leftover = divrem(num_tasks, num_procs)
num_tasks_on_proc = num_tasks_per_process + (proc_id <= mod(num_tasks, num_procs) ? 1 : 0 );
task_start = num_tasks_per_process*(proc_id-1) + min(num_tasks_leftover, proc_id-1) + 1;
Iterators.take(Iterators.drop(arr, task_start-1), num_tasks_on_proc)
end
function split_product_across_processors_iterators(arrs_tuple, num_procs, proc_id)
split_across_processors_iterators(Iterators.product(arrs_tuple...), num_procs, proc_id)
end
@testset "Constructor" begin
function checkPSconstructor(iters, npmax = 10)
ntasks_total = prod(length, iters)
for np = 1:npmax, p = 1:np
ps = ProductSplit(iters, np, p)
@test eltype(ps) == Tuple{map(eltype, iters)...}
@test _niterators(ps) == length(iters)
if !isempty(ps)
@test collect(ps) == collect(split_product_across_processors_iterators(iters, np, p))
end
@test prod(length, getiterators(ps)) == ntasks_total
@test ParallelUtilities.workerrank(ps) == p
@test nworkers(ps) == np
end
@test_throws ArgumentError ProductSplit(iters, npmax, npmax + 1)
end
@testset "0D" begin
@test_throws ArgumentError ProductSplit((), 2, 1)
end
@testset "cumprod" begin
@test ParallelUtilities._cumprod(1,()) == ()
@test ParallelUtilities._cumprod(1,(2,)) == (1,)
@test ParallelUtilities._cumprod(1,(2, 3)) == (1, 2)
@test ParallelUtilities._cumprod(1,(2, 3, 4)) == (1, 2, 6)
end
@testset "1D" begin
iters = (1:10,)
checkPSconstructor(iters)
end
@testset "2D" begin
iters = (1:10, 4:6)
checkPSconstructor(iters)
end
@testset "3D" begin
iters = (1:10, 4:6, 1:4)
checkPSconstructor(iters)
end
@testset "steps" begin
iters = (1:2:10, 4:1:6)
checkPSconstructor(iters)
end
@testset "mixed" begin
for iters in [(1:2, 4:2:6), (1:2, Base.OneTo(4), 1:3:10)]
checkPSconstructor(iters)
end
end
@testset "empty" begin
iters = (1:1,)
ps = ProductSplit(iters, 10, 2)
@test isempty(ps)
@test length(ps) == 0
end
@testset "first and last ind" begin
for iters in Any[(1:10,), (1:2, Base.OneTo(4), 1:3:10)]
ps = ProductSplit(iters, 2, 1)
@test firstindex(ps) == 1
@test ParallelUtilities.firstindexglobal(ps) == 1
@test ParallelUtilities.lastindexglobal(ps) == div(prod(length, iters), 2)
@test lastindex(ps) == div(prod(length, iters), 2)
@test lastindex(ps) == length(ps)
ps = ProductSplit(iters, 2, 2)
@test ParallelUtilities.firstindexglobal(ps) == div(prod(length, iters), 2) + 1
@test firstindex(ps) == 1
@test ParallelUtilities.lastindexglobal(ps) == prod(length, iters)
@test lastindex(ps) == length(ps)
for np in prod(length, iters) + 1:prod(length, iters) + 10,
p in prod(length, iters) + 1:np
ps = ProductSplit(iters, np, p)
@test ParallelUtilities.firstindexglobal(ps) == prod(length, iters) + 1
@test ParallelUtilities.lastindexglobal(ps) == prod(length, iters)
end
end
end
end
@testset "firstlast" begin
@testset "first" begin
@test ParallelUtilities._first(()) == ()
for iters in various_iters, np = 1:prod(length, iters)
ps = ProductSplit(iters, np, 1)
@test first(ps) == map(first, iters)
end
end
@testset "last" begin
@test ParallelUtilities._last(()) == ()
for iters in various_iters, np = 1:prod(length, iters)
ps = ProductSplit(iters, np, np)
@test last(ps) == map(last, iters)
end
end
end
@testset "extrema" begin
@testset "min max extrema" begin
function checkPSextrema(iters, (fn_el, fn), npmax = 10)
for np = 1:npmax, p = 1:np
ps = ProductSplit(iters, np, p)
if isempty(ps)
continue
end
pcol = collect(ps)
for dims in 1:length(iters)
@test begin
res = fn_el(ps, dims = dims) == fn(x[dims] for x in pcol)
if !res
show(ps)
end
res
end
end
if _niterators(ps) == 1
@test begin
res = fn_el(ps) == fn(x[1] for x in pcol)
if !res
show(ps)
end
res
end
end
end
end
for iters in various_iters,
fntup in [(maximumelement, maximum), (minimumelement, minimum), (extremaelement, extrema)]
checkPSextrema(iters, fntup)
end
@test minimumelement(ProductSplit((1:5,), 2, 1)) == 1
@test maximumelement(ProductSplit((1:5,), 2, 1)) == 3
@test extremaelement(ProductSplit((1:5,), 2, 1)) == (1, 3)
@test minimumelement(ProductSplit((1:5,), 2, 2)) == 4
@test maximumelement(ProductSplit((1:5,), 2, 2)) == 5
@test extremaelement(ProductSplit((1:5,), 2, 2)) == (4, 5)
end
@testset "extremadims" begin
ps = ProductSplit((1:10,), 2, 1)
@test ParallelUtilities._extremadims(ps, 1,()) == ()
for iters in various_iters
dims = length(iters)
for np = 1:prod(length, iters) + 1, proc_id = 1:np
ps = ProductSplit(iters, np, proc_id)
if isempty(ps)
@test_throws ArgumentError extremadims(ps)
else
ext = Tuple(map(extrema, zip(collect(ps)...)))
@test extremadims(ps) == ext
end
end
end
end
@testset "extrema_commonlastdim" begin
iters = (1:10, 4:6, 1:4)
ps = ProductSplit(iters, 37, 8)
@test extrema_commonlastdim(ps) == ([(9, 1), (6, 1)], [(2, 2), (4, 2)])
ps = ProductSplit(iters, prod(length, iters) + 1, prod(length, iters) + 1)
@test extrema_commonlastdim(ps) === nothing
end
end
@testset "in" begin
function checkifpresent(iters, npmax = 10)
for np = 1:npmax, p = 1:np
ps = ProductSplit(iters, np, p)
if isempty(ps)
continue
end
pcol = collect(ps)
for el in pcol
# It should be contained in this iterator
@test el in ps
for p2 in 1:np
# It should not be contained anywhere else
p2 == p && continue
ps2 = ProductSplit(iters, np, p2)
@test !(el in ps2)
end
end
end
end
for iters in various_iters
checkifpresent(iters)
end
@test ParallelUtilities._infullrange((), ())
end
@testset "whichproc + procrange_recast" begin
np, proc_id = 5, 5
iters = (1:10, 4:6, 1:4)
ps = ProductSplit(iters, np, proc_id)
@test whichproc(iters, first(ps), 1) == 1
@test whichproc(ps, first(ps)) == proc_id
@test whichproc(ps, last(ps)) == proc_id
@test whichproc(iters,(100, 100, 100), 1) === nothing
@test procrange_recast(iters, ps, 1) == 1:1
@test procrange_recast(ps, 1) == 1:1
smalleriter = (1:1, 1:1, 1:1)
err = ParallelUtilities.TaskNotPresentError(smalleriter, first(ps))
@test_throws err procrange_recast(smalleriter, ps, 1)
smalleriter = (7:9, 4:6, 1:4)
err = ParallelUtilities.TaskNotPresentError(smalleriter, last(ps))
@test_throws err procrange_recast(smalleriter, ps, 1)
iters = (1:1, 2:2)
ps = ProductSplit(iters, np, proc_id)
@test procrange_recast(iters, ps, 2) == nothing
@test procrange_recast(ps, 2) == nothing
iters = (1:1, 2:2)
ps = ProductSplit(iters, 1, 1)
@test procrange_recast(iters, ps, 2) == 1:1
@test procrange_recast(ps, 2) == 1:1
iters = (Base.OneTo(2), 2:4)
ps = ProductSplit(iters, 2, 1)
@test procrange_recast(iters, ps, 1) == 1:1
@test procrange_recast(iters, ps, 2) == 1:1
@test procrange_recast(iters, ps, prod(length, iters)) == 1:length(ps)
for np_new in 1:prod(length, iters)
for proc_id_new = 1:np_new
ps_new = ProductSplit(iters, np_new, proc_id_new)
for val in ps_new
# Should loop only if ps_new is non-empty
@test whichproc(iters, val, np_new) == proc_id_new
end
end
@test procrange_recast(iters, ps, np_new) == (isempty(ps) ? nothing : (whichproc(iters, first(ps), np_new):whichproc(iters, last(ps), np_new)))
@test procrange_recast(ps, np_new) == (isempty(ps) ? nothing : (whichproc(iters, first(ps), np_new):whichproc(iters, last(ps), np_new)))
end
@testset "different set" begin
iters = (1:100, 1:4000)
ps = ProductSplit((20:30, 1:1), 2, 1)
@test procrange_recast(iters, ps, 700) == 1:1
ps = ProductSplit((20:30, 1:1), 2, 2)
@test procrange_recast(iters, ps, 700) == 1:1
iters = (1:1, 2:2)
ps = ProductSplit((20:30, 2:2), 2, 1)
@test_throws ParallelUtilities.TaskNotPresentError procrange_recast(iters, ps, 3)
ps = ProductSplit((1:30, 2:2), 2, 1)
@test_throws ParallelUtilities.TaskNotPresentError procrange_recast(iters, ps, 3)
end
end
@testset "indexinproduct" begin
@test indexinproduct((1:4, 2:3:8), (3, 5)) == 7
@test indexinproduct((1:4, 2:3:8), (3, 6)) === nothing
@test_throws ArgumentError indexinproduct((), ())
end
@testset "localindex" begin
for iters in various_iters
for np = 1:prod(length, iters), proc_id = 1:np
ps = ProductSplit(iters, np, proc_id)
for (ind, val) in enumerate(ps)
@test localindex(ps, val) == ind
end
end
end
end
@testset "whichproc_localindex" begin
for iters in various_iters
iters isa Tuple{AbstractUnitRange, Vararg{AbstractUnitRange}} || continue
for np = 1:prod(length, iters), proc_id = 1:np
ps_col = collect(ProductSplit(iters, np, proc_id))
ps_col_rev = [reverse(t) for t in ps_col]
for val in ps_col
p, ind = whichproc_localindex(iters, val, np)
@test p == proc_id
ind_in_arr = searchsortedfirst(ps_col_rev, reverse(val))
@test ind == ind_in_arr
end
end
end
@test whichproc_localindex((1:1,1:1), (1,2), 1) === nothing
end
@testset "getindex" begin
@test ParallelUtilities._getindex((), 1) == ()
@test ParallelUtilities._getindex((), 1, 2) == ()
@test ParallelUtilities.childindex((), 1) == (1,)
for iters in various_iters
for np = 1:prod(length, iters), p = 1:np
ps = ProductSplit(iters, np, p)
ps_col = collect(ps)
for i in 1:length(ps)
@test ps[i] == ps_col[i]
end
@test ps[end] == ps[length(ps)]
for ind in [0, length(ps) + 1]
@test_throws ParallelUtilities.BoundsError(ps, ind) ps[ind]
end
end
end
end
end
@testsetwithinfo "ProductSection" begin
@testset "Constructor" begin
function testPS(iterators)
itp = collect(Iterators.product(iterators...))
l = length(itp)
for startind in 1:l, endind in startind:l
ps = ProductSection(iterators, startind:endind)
@test eltype(ps) == Tuple{map(eltype, iterators)...}
for (psind, ind) in enumerate(startind:endind)
@test ps[psind] == itp[ind]
end
end
end
for iter in various_iters
testPS(iter)
end
@test_throws ArgumentError ProductSection((), 2:3)
end
end
@testset "dropleading" begin
ps = ProductSplit((1:5, 2:4, 1:3), 7, 3);
@test dropleading(ps) isa ProductSection
@test collect(dropleading(ps)) == [(4, 1), (2, 2), (3, 2)]
@test collect(dropleading(dropleading(ps))) == [(1,), (2,)]
ps = ProductSection((1:5, 2:4, 1:3), 5:8);
@test dropleading(ps) isa ProductSection
@test collect(dropleading(ps)) == [(2, 1), (3, 1)]
@test collect(dropleading(dropleading(ps))) == [(1,)]
end
@testset "nelements" begin
ps = ProductSplit((1:5, 2:4, 1:3), 7, 3);
@test nelements(ps, dims = 1) == 5
@test nelements(ps, dims = 2) == 3
@test nelements(ps, dims = 3) == 2
@test_throws ArgumentError nelements(ps, dims = 0)
@test_throws ArgumentError nelements(ps, dims = 4)
ps = ProductSection((1:5, 2:4, 1:3), 5:8);
@test nelements(ps, dims =1) == 4
@test nelements(ps, dims =2) == 2
@test nelements(ps, dims =3) == 1
ps = ProductSection((1:5, 2:4, 1:3), 5:11);
@test nelements(ps, dims = 1) == 5
@test nelements(ps, dims = 2) == 3
@test nelements(ps, dims = 3) == 1
ps = ProductSection((1:5, 2:4, 1:3), 4:8);
@test nelements(ps, dims = 1) == 5
@test nelements(ps, dims = 2) == 2
@test nelements(ps, dims = 3) == 1
ps = ProductSection((1:5, 2:4, 1:3), 4:9);
@test nelements(ps, dims = 1) == 5
@test nelements(ps, dims = 2) == 2
@test nelements(ps, dims = 3) == 1
end
@testset "SplittablesBase" begin
for iters in [(1:4, 1:3), (1:4, 1:4)]
for ps = Any[ProductSplit(iters, 3, 2), ProductSection(iters, 3:8)]
l, r = SplittablesBase.halve(ps)
lc, rc = SplittablesBase.halve(collect(ps))
@test collect(l) == lc
@test collect(r) == rc
end
end
end
@test ParallelUtilities._checknorollover((), (), ())
end;
@testset "ReverseLexicographicTuple" begin
@testset "isless" begin
a = ParallelUtilities.ReverseLexicographicTuple((1, 2, 3))
b = ParallelUtilities.ReverseLexicographicTuple((2, 2, 3))
@test a < b
@test a <= b
b = ParallelUtilities.ReverseLexicographicTuple((1, 1, 3))
@test b < a
@test b <= a
b = ParallelUtilities.ReverseLexicographicTuple((2, 1, 3))
@test b < a
@test b <= a
b = ParallelUtilities.ReverseLexicographicTuple((2, 1, 4))
@test a < b
@test a <= b
end
@testset "equal" begin
a = ParallelUtilities.ReverseLexicographicTuple((1, 2, 3))
@test a == a
@test isequal(a, a)
@test a <= a
b = ParallelUtilities.ReverseLexicographicTuple(a.t)
@test a == b
@test isequal(a, b)
@test a <= b
end
end;
@testset "ZipSplit" begin
@testset "SplittablesBase" begin
for ps in [zipsplit((1:4, 1:4), 3, 2), zipsplit((1:5, 1:5), 3, 2)]
l, r = SplittablesBase.halve(ps)
lc, rc = SplittablesBase.halve(collect(ps))
@test collect(l) == lc
@test collect(r) == rc
end
end
end
| ParallelUtilities | https://github.com/jishnub/ParallelUtilities.jl.git |
|
[
"MIT"
] | 0.8.6 | 704ef2c93e301b6469ba63103a4e7bf935e6990c | code | 244 | using Distributed
include("misctests_singleprocess.jl")
include("productsplit.jl")
include("paralleltests.jl")
for workersused in [1, 2, 4, 8]
addprocs(workersused)
try
include("paralleltests.jl")
finally
rmprocs(workers())
end
end
| ParallelUtilities | https://github.com/jishnub/ParallelUtilities.jl.git |
|
[
"MIT"
] | 0.8.6 | 704ef2c93e301b6469ba63103a4e7bf935e6990c | docs | 1499 | # ParallelUtilities.jl
[](https://github.com/jishnub/ParallelUtilities.jl/actions)
[](https://codecov.io/gh/jishnub/ParallelUtilities.jl)
[](https://jishnub.github.io/ParallelUtilities.jl/stable)
[](https://jishnub.github.io/ParallelUtilities.jl/dev)
[](https://zenodo.org/badge/latestdoi/198215953)
Parallel mapreduce and other helpful functions for HPC, meant primarily for embarassingly parallel operations that often require one to split up a list of tasks into subsections that may be processed on individual cores.
# Installation
Install the package using
```julia
pkg> add ParallelUtilities
julia> using ParallelUtilities
```
# Quick start
Just replace `mapreduce` by `pmapreduce` in your code and things should work the same.
```julia
julia> @everywhere f(x) = (sleep(1); x^2); # some expensive calculation
julia> nworkers()
2
julia> @time mapreduce(f, +, 1:10) # Serial
10.021436 seconds (40 allocations: 1.250 KiB)
385
julia> @time pmapreduce(f, +, 1:10) # Parallel
5.137051 seconds (863 allocations: 39.531 KiB)
385
```
# Usage
See [the documentation](https://jishnub.github.io/ParallelUtilities.jl/stable) for examples and the API.
| ParallelUtilities | https://github.com/jishnub/ParallelUtilities.jl.git |
|
[
"MIT"
] | 0.8.6 | 704ef2c93e301b6469ba63103a4e7bf935e6990c | docs | 71 | # ParallelUtilities.jl
```@autodocs
Modules = [ParallelUtilities]
```
| ParallelUtilities | https://github.com/jishnub/ParallelUtilities.jl.git |
|
[
"MIT"
] | 0.8.6 | 704ef2c93e301b6469ba63103a4e7bf935e6990c | docs | 538 | ```@meta
DocTestSetup = quote
using ParallelUtilities
using ParallelUtilities.ClusterQueryUtils
end
```
# Cluster Query Utilities
These are a collection of helper functions that are used in `ParallelUtilities`, but may be used independently as well to obtain information about the cluster on which codes are being run.
To use these functions run
```jldoctest cqu
julia> using ParallelUtilities.ClusterQueryUtils
```
The functions defined in this module are:
```@autodocs
Modules = [ParallelUtilities.ClusterQueryUtils]
```
| ParallelUtilities | https://github.com/jishnub/ParallelUtilities.jl.git |
|
[
"MIT"
] | 0.8.6 | 704ef2c93e301b6469ba63103a4e7bf935e6990c | docs | 5192 | ```@meta
DocTestSetup = quote
using ParallelUtilities
end
```
# ParallelUtilities.jl
The `ParallelUtilities` module defines certain functions that are useful in a parallel `mapreduce` operation, with particular focus on HPC systems. The approach is similar to a `@distributed (op) for` loop, where the entire section of iterators is split evenly across workers and reduced locally, followed by a global reduction. The operation is not load-balanced at present, and does not support retry on error.
# Performance
The `pmapreduce`-related functions are expected to be more performant than `@distributed` for loops. As an example, running the following on a Slurm cluster using 2 nodes with 28 cores on each leads to
```julia
julia> using Distributed
julia> using ParallelUtilities
julia> @everywhere f(x) = ones(10_000, 1_000);
julia> A = @time @distributed (+) for i=1:nworkers()
f(i)
end;
22.637764 seconds (3.35 M allocations: 8.383 GiB, 16.50% gc time, 0.09% compilation time)
julia> B = @time pmapreduce(f, +, 1:nworkers());
2.170926 seconds (20.47 k allocations: 77.117 MiB)
julia> A == B
true
```
The difference increases with the size of data as well as the number of workers. This is because the `pmapreduce*` functions defined in this package perform local reductions before communicating data across nodes. Note that in this case the same operation may be carried out elementwise to obtain better performance.
```julia
julia> @everywhere elsum(x,y) = x .+= y;
julia> A = @time @distributed (elsum) for i=1:nworkers()
f(i)
end;
20.537353 seconds (4.74 M allocations: 4.688 GiB, 2.56% gc time, 1.26% compilation time)
julia> B = @time pmapreduce(f, elsum, 1:nworkers());
1.791662 seconds (20.50 k allocations: 77.134 MiB)
```
A similar evaluation on 560 cores (20 nodes) takes
```julia
julia> @time for i = 1:10; pmapreduce(f, +, 1:nworkers()); end
145.963834 seconds (2.53 M allocations: 856.693 MiB, 0.12% gc time)
julia> @time for i = 1:10; pmapreduce(f, elsum, 1:nworkers()); end
133.810309 seconds (2.53 M allocations: 856.843 MiB, 0.13% gc time)
```
An example of a mapreduce operation involving large arrays (comparable to the memory allocated to each core) evaluated on 56 cores is
```julia
julia> @everywhere f(x) = ones(12_000, 20_000);
julia> @time ParallelUtilities.pmapreduce(f, elsum, 1:nworkers());
36.824788 seconds (26.40 k allocations: 1.789 GiB, 0.05% gc time)
```
# Comparison with other parallel mapreduce packages
Other packages that perform parallel mapreduce are [`ParallelMapReduce`](https://github.com/hcarlsso/ParallelMapReduce.jl) and [`Transducers`](https://github.com/JuliaFolds/Transducers.jl). The latter provides a `foldxd` function that performs an associative distributed `mapfold`. The performances of these functions compared to this package (measured on 1 node with 28 cores) are listed below:
```julia
julia> @everywhere f(x) = ones(10_000, 10_000);
julia> A = @time ParallelUtilities.pmapreduce(f, +, 1:nworkers());
10.105696 seconds (14.03 k allocations: 763.511 MiB)
julia> B = @time ParallelMapReduce.pmapreduce(f, +, 1:nworkers(), algorithm = :reduction_local);
30.955381 seconds (231.93 k allocations: 41.200 GiB, 7.63% gc time, 0.23% compilation time)
julia> C = @time Transducers.foldxd(+, 1:nworkers() |> Transducers.Map(f));
30.154166 seconds (655.40 k allocations: 41.015 GiB, 8.65% gc time, 1.03% compilation time)
julia> A == B == C
true
```
Note that at present the performances of the `pmapreduce*` functions defined in this package are not comparable to equivalent MPI implementations. For example, an MPI mapreduce operation using [`MPIMapReduce.jl`](https://github.com/jishnub/MPIMapReduce.jl) computes an inplace sum over `10_000 x 10_000` matrices on each core in
```julia
3.413968 seconds (3.14 M allocations: 1.675 GiB, 2.99% gc time)
```
whereas this package computes it in
```julia
julia> @time ParallelUtilities.pmapreduce(f, elsum, 1:nworkers());
7.264023 seconds (12.46 k allocations: 763.450 MiB, 1.69% gc time)
```
This performance gap might reduce in the future.
!!! note
The timings have all been measured on Julia 1.6 on an HPC cluster that has nodes with with 2 Intel(R) Xeon(R) CPU E5-2680 v4 @ 2.40GHz CPUs ("Broadwell", 14 cores/socket, 28 cores/node). They are also measured for subsequent runs after an initial precompilation step. The exact evaluation time might also vary depending on the cluster load.
# Known issues
1. This package currently does not implement a specialized `mapreduce` for arrays, so the behavior might differ for specialized array argument types (eg. `DistributedArray`s). This might change in the future.
2. This package deals with distributed (multi-core) parallelism, and at this moment it has not been tested extensively alongside multi-threading. Multithreading + multiprocessing has been tested where the number of threads times the number of processes equals the number of available cores. See [an example](examples/threads.md) of multithreading used in such a form, where each node uses threads locally, and reduction across nodes is performed using multiprocessing.
| ParallelUtilities | https://github.com/jishnub/ParallelUtilities.jl.git |
|
[
"MIT"
] | 0.8.6 | 704ef2c93e301b6469ba63103a4e7bf935e6990c | docs | 11836 | ```@meta
DocTestSetup = quote
using ParallelUtilities
end
```
# Parallel mapreduce
There are two modes of evaluating a parallel mapreduce that vary only in the arguments that the mapping function accepts.
1. Iterated zip, where one element from the zipped iterators is splatted and passed as arguments to the mapping function. In this case the function must accept as many arguments as the number of iterators passed to mapreduce. This is analogous to a serial `mapreduce`
2. Non-iterated product, in which case the iterator product of the arguments is distributed evenly across the workers. The mapping function in this case should accept one argument that is a collection of `Tuple`s of values. It may iterate over the argument to obtain the individual `Tuple`s.
Each process involved in a `pmapreduce` operation carries out a local `mapreduce`, followed by a reduction across processes. The reduction is carried out in the form of a binary tree. The reduction happens in three stages:
1. A local reduction as a part of `mapreduce`
2. A reduction on the host across the workers on the same host. Typically on an HPC system there is an independent reduction on each node across the processes on that node.
3. A global reduction across hosts.
The reduction operator is assumed to be associative, and reproducibility of floating-point operations is not guaranteed. For associative reductions look into various `mapfold*` methods provided by other packages, such as [`Transducers`](https://github.com/JuliaFolds/Transducers.jl). The reduction operator is not assumed to be commutative.
A `pmapreduce` might only benefit in performance if the mapping function runs for longer than the communication overhead across processes, or if each process has dedicated memory and returns large arrays that may not be collectively stored on one process.
## Iterated Zip
The syntax for a parallel map-reduce operation is quite similar to the serial `mapreduce`, with the replacement of `mapreduce` by `pmapreduce`.
Serial:
```julia
julia> mapreduce(x -> x^2, +, 1:100_000)
333338333350000
```
Parallel:
```julia
julia> pmapreduce(x -> x^2, +, 1:100_000)
333338333350000
```
We may check that parallel evaluation helps in performance for a long-running process.
```julia
julia> nworkers()
2
julia> @time mapreduce(x -> (sleep(1); x^2), +, 1:6);
6.079191 seconds (54.18 k allocations: 3.237 MiB, 1.10% compilation time)
julia> @time pmapreduce(x -> (sleep(1); x^2), +, 1:6);
3.365979 seconds (91.57 k allocations: 5.473 MiB, 0.87% compilation time)
```
## Non-iterated product
The second mode of usage is similar to MPI, where each process evaluates the same function once for different arguments. This is called using
```julia
pmapreduce_productsplit(f, op, iterators...)
```
In this function, the iterator product of the argument `iterators` is split evenly across the workers, and
the function `f` on each process receives one such section according to its rank. The argument is an iterator similar to an iterator product, and looping over it would produce Tuples `(iterators[1][i], iterators[2][i], ...)` where the index `i` depends on the rank of the worker as well as the local loop index.
As an example, we run this with 2 workers:
```julia
julia> pmapreduce_productsplit(ps -> (@show collect(ps)), vcat, 1:4)
From worker 2: collect(ps) = [(1,), (2,)]
From worker 3: collect(ps) = [(3,), (4,)]
4-element Vector{Tuple{Int64}}:
(1,)
(2,)
(3,)
(4,)
julia> pmapreduce_productsplit(ps -> (@show collect(ps)), vcat, 1:3, 1:2)
From worker 2: collect(ps) = [(1, 1), (2, 1), (3, 1)]
From worker 3: collect(ps) = [(1, 2), (2, 2), (3, 2)]
6-element Vector{Tuple{Int64, Int64}}:
(1, 1)
(2, 1)
(3, 1)
(1, 2)
(2, 2)
(3, 2)
```
Note that in each case the mapping function receives the entire collection of arguments in one go, unlike a standard `mapreduce` where the function receives the arguments individually. This is chosen so that the function may perform any one-time compute-intensive task for the entire range before looping over the argument values.
Each process might return one or more values that are subsequently reduced in parallel.
!!! note
At present the `iterators` passed as arguments to `pmapreduce_productsplit` may only be strictly increasing ranges. This might be relaxed in the future.
The argument `ps` passed on to each worker is a [`ParallelUtilities.ProductSplit`](@ref) object. This has several methods defined for it that might aid in evaluating the mapping function locally.
### ProductSplit
A `ProductSplit` object `ps` holds the section of the iterator product that is assigned to the worker. It also encloses the worker rank and the size of the worker pool, similar to MPI's `Comm_rank` and `Comm_size`. These may be accessed as `workerrank(ps)` and `nworkers(ps)`. Unlike MPI though, the rank goes from `1` to `np`. An example where the worker rank is used (on 2 workers) is
```julia
julia> pmapreduce_productsplit(ps -> ones(2) * workerrank(ps), hcat, 1:nworkers())
2×2 Matrix{Float64}:
1.0 2.0
1.0 2.0
```
The way to construct a `ProductSplit` object is `ParallelUtilities.ProductSplit(tuple_of_iterators, nworkers, worker_rank)`
```jldoctest productsplit; setup=:(using ParallelUtilities)
julia> ps = ParallelUtilities.ProductSplit((1:2, 3:4), 2, 1)
2-element ProductSplit [(1, 3), ... , (2, 3)]
julia> ps |> collect
2-element Vector{Tuple{Int64, Int64}}:
(1, 3)
(2, 3)
```
A `ProductSplit` that wraps `AbstractUnitRange`s has several efficient functions defined for it, such as `length`, `minimumelement`, `maximumelement` and `getindex`, each of which returns in `O(1)` without iterating over the object.
```jldoctest productsplit
julia> ps[1]
(1, 3)
```
The function `maximumelement`, `minimumelement` and `extremaelement` treat the `ProductSplit` object as a linear view of an `n`-dimensional iterator product. These functions look through the elements in the `dim`-th dimension of the iterator product, and if possible, return the corresponding extremal element in `O(1)` time. Similarly, for a `ProductSplit` object that wraps `AbstractUnitRange`s, it's possible to know if a value is contained in the iterator in `O(1)` time.
```julia productsplit
julia> ps = ParallelUtilities.ProductSplit((1:100_000, 1:100_000, 1:100_000), 25000, 1500)
40000000000-element ProductSplit [(1, 1, 5997), ... , (100000, 100000, 6000)]
julia> @btime (3,3,5998) in $ps
111.399 ns (0 allocations: 0 bytes)
true
julia> @btime ParallelUtilities.maximumelement($ps, dims = 1)
76.534 ns (0 allocations: 0 bytes)
100000
julia> @btime ParallelUtilities.minimumelement($ps, dims = 2)
73.724 ns (0 allocations: 0 bytes)
1
julia> @btime ParallelUtilities.extremaelement($ps, dims = 2)
76.332 ns (0 allocations: 0 bytes)
(1, 100000)
```
The number of unique elements along a particular dimension may be obtained as
```julia productsplit
julia> @btime ParallelUtilities.nelements($ps, dims = 3)
118.441 ns (0 allocations: 0 bytes)
4
```
It's also possible to drop the leading dimension of a `ProductSplit` that wraps `AbstractUnitRange`s to obtain an analogous operator that contains the unique elements along the remaining dimension. This is achieved using `ParallelUtilities.dropleading`.
```jldoctest productsplit
julia> ps = ParallelUtilities.ProductSplit((1:3, 1:3, 1:2), 4, 2)
5-element ProductSplit [(3, 2, 1), ... , (1, 1, 2)]
julia> collect(ps)
5-element Vector{Tuple{Int64, Int64, Int64}}:
(3, 2, 1)
(1, 3, 1)
(2, 3, 1)
(3, 3, 1)
(1, 1, 2)
julia> ps2 = ParallelUtilities.dropleading(ps)
3-element ProductSection [(2, 1), ... , (1, 2)]
julia> collect(ps2)
3-element Vector{Tuple{Int64, Int64}}:
(2, 1)
(3, 1)
(1, 2)
```
The process may be repeated multiple times:
```jldoctest productsplit
julia> collect(ParallelUtilities.dropleading(ps2))
2-element Vector{Tuple{Int64}}:
(1,)
(2,)
```
# Reduction Operators
Any standard Julia reduction operator may be passed to `pmapreduce`. Aside from this, this package defines certain operators that may be used as well in a reduction.
## Broadcasted elementwise operators
The general way to construct an elementwise operator using this package is using [`ParallelUtilities.BroadcastFunction`](@ref).
For example, a broadcasted sum operator may be constructed using
```jldoctest
julia> ParallelUtilities.BroadcastFunction(+);
```
The function call `ParallelUtilities.BroadcastFunction(op)(x, y)` perform the fused elementwise operation `op.(x, y)`.
!!! note "Julia 1.6 and above"
Julia versions above `v"1.6"` provide a function `Base.BroadcastFunction` which is equivalent to `ParallelUtilities.BroadcastFunction`.
# Inplace assignment
The function [`ParallelUtilities.broadcastinplace`](@ref) may be used to construct a binary operator that broadcasts a function over its arguments and stores the result inplace in one of the arguments. This is particularly useful if the results in intermediate evaluations are not important, as this cuts down on allocations in the reduction.
Several operators for common functions are pre-defined for convenience.
1. [`ParallelUtilities.elementwisesum!`](@ref)
2. [`ParallelUtilities.elementwiseproduct!`](@ref)
3. [`ParallelUtilities.elementwisemin!`](@ref)
4. [`ParallelUtilities.elementwisemax!`](@ref)
Each of these functions overwrites the first argument with the result.
!!! warn
The pre-defined elementwise operators are assumed to be commutative, so, if used in `pmapreduce`, the order of arguments passed to the function is not guaranteed. In particular this might not be in order of the `workerrank`. These functions should only be used if both the arguments support the inplace assignment, eg. if they have identical axes.
## Flip
The [`ParallelUtilities.Flip`](@ref) function may be used to wrap a binary function to flips the order of arguments. For example
```jldoctest
julia> vcat(1,2)
2-element Vector{Int64}:
1
2
julia> ParallelUtilities.Flip(vcat)(1,2)
2-element Vector{Int64}:
2
1
```
`Flip` may be combined with inplace assignment operators to change the argument that is overwritten.
```jldoctest
julia> x = ones(3); y = ones(3);
julia> op1 = ParallelUtilities.elementwisesum!; # overwrites the first argument
julia> op1(x, y); x
3-element Vector{Float64}:
2.0
2.0
2.0
julia> x = ones(3); y = ones(3);
julia> op2 = ParallelUtilities.Flip(op1); # ovrewrites the second argument
julia> op2(x, y); y
3-element Vector{Float64}:
2.0
2.0
2.0
```
## BroadcastStack
This function may be used to combine arrays having overlapping axes to obtain a new array that spans the union of axes of the arguments. The overlapping section is computed by applying the reduction function to that section.
We construct a function that concatenates arrays along the first dimension with overlapping indices summed.
```jldoctest broadcaststack
julia> f = ParallelUtilities.BroadcastStack(+, 1);
```
We apply this to two arrays having different indices
```jldoctest broadcaststack
julia> f(ones(2), ones(4))
4-element Vector{Float64}:
2.0
2.0
1.0
1.0
```
This function is useful to reduce [`OffsetArray`s](https://github.com/JuliaArrays/OffsetArrays.jl) where each process evaluates a potentially overlapping section of the entire array.
!!! note
A `BroadcastStack` function requires its arguments to have the same dimensionality, and identical axes along non-concatenated dimensions. In particular it is not possible to block-concatenate arrays using this function.
!!! note
A `BroadcastStack` function does not operate in-place.
## Commutative
In general this package does not assume that a reduction operator is commutative. It's possible to declare an operator to be commutative in its arguments by wrapping it in the tag [`ParallelUtilities.Commutative`](@ref).
| ParallelUtilities | https://github.com/jishnub/ParallelUtilities.jl.git |
|
[
"MIT"
] | 0.8.6 | 704ef2c93e301b6469ba63103a4e7bf935e6990c | docs | 2111 | # Example of the use of pmapreduce
The function `pmapreduce` performs a parallel `mapreduce`. This is primarily useful when the function has to perform an expensive calculation, that is the evaluation time per core exceeds the setup and communication time. This is also useful when each core is allocated memory and has to work with arrays that won't fit into memory collectively, as is often the case on a cluster.
We walk through an example where we initialize and concatenate arrays in serial and in parallel.
We load the necessary modules first
```julia
using ParallelUtilities
using Distributed
```
We define the function that performs the initialization on each core. This step is embarassingly parallel as no communication happens between workers. We simulate an expensive calculation by adding a sleep interval for each index.
```julia
function initialize(sleeptime)
A = Array{Int}(undef, 20, 20)
for ind in eachindex(A)
sleep(sleeptime)
A[ind] = ind
end
return A
end
```
Next we define the function that calls `pmapreduce`:
```julia
function main_pmapreduce(sleeptime)
pmapreduce(x -> initialize(sleeptime), hcat, 1:20)
end
```
We also define a function that carries out a serial mapreduce:
```julia
function main_mapreduce(sleeptime)
mapreduce(x -> initialize(sleeptime), hcat, 1:20)
end
```
We compare the performance of the serial and parallel evaluations using 20 cores on one node:
We define a caller function first
```julia
function compare_with_serial()
# precompile
main_mapreduce(0)
main_pmapreduce(0)
# time
println("Tesing serial")
A = @time main_mapreduce(5e-6)
println("Tesing parallel")
B = @time main_pmapreduce(5e-6)
# check results
println("Results match : ", A == B)
end
```
We run this caller on the cluster:
```julia
julia> compare_with_serial()
Tesing serial
9.457601 seconds (40.14 k allocations: 1.934 MiB)
Tesing parallel
0.894611 seconds (23.16 k allocations: 1.355 MiB, 2.56% compilation time)
Results match : true
```
The full script may be found in the examples directory.
| ParallelUtilities | https://github.com/jishnub/ParallelUtilities.jl.git |
|
[
"MIT"
] | 0.8.6 | 704ef2c93e301b6469ba63103a4e7bf935e6990c | docs | 4290 | # Using SharedArrays in a parallel mapreduce
One might want to carry out a computation across several nodes of a cluster, where each node works on its own shared array. This may be achieved by using a `WorkerPool` that consists of one worker per node, which acts as a root process launching tasks on that node, and eventually returning the local array for an overall reduction across nodes.
We walk through one such example where we concatenate arrays that are locally initialized on each node.
We load the packages necessary, in this case these are `ParallelUtilities`, `SharedArrays` and `Distributed`.
```julia
using ParallelUtilities
using SharedArrays
using Distributed
```
We create a function to initailize the local part on each worker. In this case we simulate a heavy workload by adding a `sleep` period. In other words we assume that the individual elements of the array are expensive to evaluate. We obtain the local indices of the `SharedArray` through the function `localindices`.
```julia
function initialize_localpart(s, sleeptime)
for ind in localindices(s)
sleep(sleeptime)
s[ind] = ind
end
end
```
We create a function that runs on the root worker on each node and feeds tasks to other workers on that node. We use the function `ParallelUtilities.workers_myhost()` to obtain a list of all workers on the same node. We create the `SharedArray` on these workers so that it is entirely contained on one machine. This is achieved by passing the keyword argument `pids` to the `SharedArray` constructor. We asynchronously spawn tasks to initialize the local parts of the shared array on each worker.
```julia
function initializenode_sharedarray(sleeptime)
# obtain the workers on the local machine
pids = ParallelUtilities.workers_myhost()
# Create a shared array spread across the workers on that node
s = SharedArray{Int}((2_000,), pids = pids)
# spawn remote tasks to initialize the local part of the shared array
@sync for p in pids
@spawnat p initialize_localpart(s, sleeptime)
end
return sdata(s)
end
```
We create a main function that runs on the calling process and concatenates the arrays on each node. This is run on a `WorkerPool` consisting of one worker per node which acts as the root process. We may obtain such a pool through the function `ParallelUtilities.workerpool_nodes()`. Finally we call `pmapreduce` with a mapping function that initializes an array on each node, which is followed by a concatenation across the nodes.
```julia
function main_sharedarray(sleeptime)
# obtain the workerpool with one process on each node
pool = ParallelUtilities.workerpool_nodes()
# obtain the number of workers in the pool.
nw_node = nworkers(pool)
# Evaluate the parallel mapreduce
pmapreduce(x -> initializenode_sharedarray(sleeptime), hcat, pool, 1:nw_node)
end
```
We compare the results with a serial execution that uses a similar workflow, except we use `Array` instead of `SharedArray` and `mapreduce` instead of `pmapreduce`.
```julia
function initialize_serial(sleeptime)
pids = ParallelUtilities.workers_myhost()
s = Array{Int}(undef, 2_000)
for ind in eachindex(s)
sleep(sleeptime)
s[ind] = ind
end
return sdata(s)
end
function main_serial(sleeptime)
pool = ParallelUtilities.workerpool_nodes()
nw_node = nworkers(pool)
mapreduce(x -> initialize_serial(sleeptime), hcat, 1:nw_node)
end
```
We create a function to compare the performance of the two. We start with a precompilation run with no sleep time, followed by recording the actual timings.
```julia
function compare_with_serial()
# precompile
main_serial(0)
main_sharedarray(0)
# time
println("Testing serial")
A = @time main_serial(5e-3)
println("Testing sharedarray")
B = @time main_sharedarray(5e-3)
println("Results match : ", A == B)
end
```
We run this script on a Slurm cluster across 2 nodes with 28 cores on each node. The results are:
```julia
julia> compare_with_serial()
Testing serial
24.624912 seconds (27.31 k allocations: 1.017 MiB)
Testing sharedarray
1.077752 seconds (4.60 k allocations: 246.281 KiB)
Results match : true
```
The full script may be found in the examples directory.
| ParallelUtilities | https://github.com/jishnub/ParallelUtilities.jl.git |
|
[
"MIT"
] | 0.8.6 | 704ef2c93e301b6469ba63103a4e7bf935e6990c | docs | 3132 | # Using Threads in a parallel mapreduce
One might want to carry out a computation across several nodes of a cluster, where each node uses multithreading to evaluate a result that is subsequently reduced across all nodes. We walk through one such example where we concatenate arrays that are locally initialized on each node using threads.
We load the packages necessary, in this case these are `ParallelUtilities` and `Distributed`.
```julia
using ParallelUtilities
using Distributed
```
We create a function to initailize the local part on each worker. In this case we simulate a heavy workload by adding a `sleep` period. In other words we assume that the individual elements of the array are expensive to evaluate. We use `Threads.@threads` to split up the loop into sections that are processed on invidual threads.
```julia
function initializenode_threads(sleeptime)
s = zeros(Int, 2_000)
Threads.@threads for ind in eachindex(s)
sleep(sleeptime)
s[ind] = ind
end
return s
end
```
We create a main function that runs on the calling process and launches the array initialization task on each node. This is run on a `WorkerPool` consisting of one worker per node which acts as the root process. We may obtain such a pool through the function `ParallelUtilities.workerpool_nodes()`. The array creation step on each node is followed by an eventual concatenation.
```julia
function main_threads(sleeptime)
# obtain the workerpool with one process on each node
pool = ParallelUtilities.workerpool_nodes()
# obtain the number of workers in the pool.
nw_nodes = nworkers(pool)
# Evaluate the parallel mapreduce
pmapreduce(x -> initializenode_threads(sleeptime), hcat, pool, 1:nw_nodes)
end
```
We compare the results with a serial execution that uses a similar workflow, except we use `mapreduce` instead of `pmapreduce` and do not use threads.
```julia
function initialize_serial(sleeptime)
s = zeros(Int, 2_000)
for ind in eachindex(s)
sleep(sleeptime)
s[ind] = ind
end
return s
end
function main_serial(sleeptime)
pool = ParallelUtilities.workerpool_nodes()
nw_nodes = nworkers(pool)
mapreduce(x -> initialize_serial(sleeptime), hcat, 1:nw_nodes)
end
```
We create a function to compare the performance of the two. We start with a precompilation run with no sleep time, followed by recording the actual timings with a sleep time of 5 milliseconds for each index of the array.
```julia
function compare_with_serial()
# precompile
main_serial(0)
main_threads(0)
# time
println("Testing serial")
A = @time main_serial(5e-3);
println("Testing threads")
B = @time main_threads(5e-3);
println("Results match : ", A == B)
end
```
We run this script on a Slurm cluster across 2 nodes with 28 cores on each node. The results are:
```julia
julia> compare_with_serial()
Testing serial
24.601593 seconds (22.49 k allocations: 808.266 KiB)
Testing threads
0.666256 seconds (3.71 k allocations: 201.703 KiB)
Results match : true
```
The full script may be found in the examples directory.
| ParallelUtilities | https://github.com/jishnub/ParallelUtilities.jl.git |
|
[
"MIT"
] | 0.1.1 | ffba4f9b4e5a7b07cf70233a6b8e5331fc7f4e07 | code | 6031 | """
NumPyArrays extends PyCall to provide for additional conversion of Julia
arrays into NumPy arrays without copying.
```jldoctest
julia> using NumPyArrays, PyCall
julia> rA = reinterpret(UInt8, zeros(Int8, 4,4));
julia> pytypeof(PyObject(rA))
PyObject <class 'list'>
julia> pytypeof(NumPyArray(rA))
PyObject <class 'numpy.ndarray'>
julia> pytypeof(PyObject(NumPyArray(rA)))
PyObject <class 'numpy.ndarray'>
julia> sA = @view collect(1:16)[5:9];
julia> pytypeof(PyObject(sA))
PyObject <class 'list'>
julia> pytypeof(NumPyArray(sA))
PyObject <class 'numpy.ndarray'>
```
"""
module NumPyArrays
# Portions of this code are derived directly from PyCall.jl
@static if isdefined(Base, :Experimental) &&
isdefined(Base.Experimental, Symbol("@optlevel"))
Base.Experimental.@optlevel 1
end
export NumPyArray, pytypeof
# Imports for _NumPyArray
import PyCall: NpyArray, PYARR_TYPES, @npyinitialize, npy_api, npy_type
import PyCall: @pycheck, NPY_ARRAY_ALIGNED, NPY_ARRAY_WRITEABLE, pyembed
import PyCall: PyObject, PyPtr
# General imports
import PyCall: pyimport, pytype_query, pytypeof, PyArray
"""
KnownImmutableArraysWithParent{T} where T <: PyCall.PYARR_TYPES
Immutable `AbstractArray`s in `Base` that have a non-immutable parent that can be embedded in the PyCall GC
"""
const KnownImmutableArraysWithParent{T} = Union{SubArray{T}, Base.ReinterpretArray{T}, Base.ReshapedArray{T}, Base.PermutedDimsArray{T}} where T
"""
KnownStridedArrays{T} where T <: PyCall.PYARR_TYPES
`AbstractArray`s in `Base` where the method `strides` is applicable
"""
const KnownStridedArrays{T} = StridedArray{T} where T
"""
NumPyArray{T,N}(po::PyObject)
NumPyArray is a wrapper around a PyCall.PyArray. It is an AbstractArray.
The main purpose of a NumPyArray is so to provide a constructor to generalize
the conversion of Julia arrays into NumPyArrays. `T` is the element type of the array.
`N` is the number of dimensions.
For other uses, such as wrapping an existing array from NumPy, use `PyCall.PyArray`.
Use `PyObject` and `PyArray` methods to convert `NumPyArray` into those types.
"""
mutable struct NumPyArray{T,N} <: AbstractArray{T,N}
pa::PyArray{T,N}
end
NumPyArray{T,N}(po::PyObject) where {T,N} = NumPyArray{T,N}(PyArray(po))
NumPyArray(po::PyObject) = NumPyArray(PyArray(po))
"""
NumPyArray(a::AbstractArray, [revdims::Bool])
Convert an AbstractArray where `isapplicable(strides, a)` is `true` to a NumPy array.
The AbstractArray must either be mutable or have a mutable parent. Optionally,
transpose the dimensions of the array if `revdims` is `true`.
"""
NumPyArray(a::AbstractArray{T}) where T <: PYARR_TYPES = NumPyArray(a, false)
function NumPyArray(a::KnownStridedArrays{T}, revdims::Bool) where T <: PYARR_TYPES
_NumPyArray(a, revdims)
end
function NumPyArray(a::AbstractArray{T}, revdims::Bool) where T <: PYARR_TYPES
# For a general AbstractArray, we do not know if strides applies
if applicable(strides, a)
_NumPyArray(a, revdims)
else
error("Only AbstractArrays where strides is applicable can be converted to NumPyArrays.")
end
end
NumPyArray(o::PyPtr) = NumPyArray(PyObject(o))
# Modified PyCall.NpyArray to accept AbstractArray{T}, assumes strides is applicable
function _NumPyArray(a::AbstractArray{T}, revdims::Bool) where T <: PYARR_TYPES
@npyinitialize
size_a = revdims ? reverse(size(a)) : size(a)
strides_a = revdims ? reverse(strides(a)) : strides(a)
p = @pycheck ccall(npy_api[:PyArray_New], PyPtr,
(PyPtr,Cint,Ptr{Int},Cint, Ptr{Int},Ptr{T}, Cint,Cint,PyPtr),
npy_api[:PyArray_Type],
ndims(a), Int[size_a...], npy_type(T),
Int[strides_a...] * sizeof(eltype(a)), a, sizeof(eltype(a)),
NPY_ARRAY_ALIGNED | NPY_ARRAY_WRITEABLE,
C_NULL)
return NumPyArray{T,ndims(a)}(p, a)
end
# Make a NumPyArray that embeds a reference to keep, to prevent Julia
# from garbage-collecting keep until o is finalized.
# See also PyObject(o::PyPtr, keep::Any) from which this is derived
NumPyArray{T,N}(o::PyPtr, keep::Any) where {T,N} = numpyembed(NumPyArray{T,N}(PyObject(o)), keep)
# PyCall already has convert(::Type{PyObject}, o) = PyObject(o)
#Base.convert(::Type{PyObject}, a::NumPyArray) = a.po
PyObject(a::NumPyArray) = PyObject(PyArray(a))
Base.convert(::Type{PyArray}, a::NumPyArray) = PyArray(a)
PyArray(a::NumPyArray) = a.pa
Base.convert(::Type{Array}, a::NumPyArray{T}) where T = convert(Array{T}, PyArray(a))
Base.convert(T::Type{<:Array}, a::NumPyArray) = convert(T, PyArray(a))
# See PyCall.pyembed(po::PyObject, jo::Any)
function numpyembed(a::NumPyArray{T,N}, jo::Any) where {T,N}
if isimmutable(jo)
if applicable(parent, jo)
return NumPyArray{T,N}(pyembed(PyObject(a), parent(jo)))
else
throw(ArgumentError("numpyembed: immutable argument without a parent is not allowed"))
end
else
return NumPyArray{T,N}(pyembed(PyObject(a), jo))
end
end
numpyembed(a::NumPyArray, jo::KnownImmutableArraysWithParent) = numpyembed(a, jo.parent)
# AbstractArray interface, provided as a convenience. Conversion to PyArray is recommended
Base.size(a::NumPyArray) = size(PyArray(a))
Base.length(a::NumPyArray) = length(PyArray(a))
Base.getindex(a::NumPyArray, args...) = getindex(PyArray(a), args...)
Base.setindex!(a::NumPyArray, args...) = setindex!(PyArray(a), args...)
Base.axes(a::NumPyArray) = axes(PyArray(a))
# Strides should be added to PyArray
Base.strides(a::NumPyArray{T}) where T = PyArray(a).st
Base.stride(a::NumPyArray{T}) where T = stride(PyArray(a))
Base.pointer(a::NumPyArray, args...) = pointer(PyArray(a), args...)
Base.unsafe_convert(t::Type{Ptr{T}}, a::NumPyArray{T}) where T = Base.unsafe_convert(t, PyArray(a))
Base.similar(a::NumPyArray, ::Type{T}, dims::Dims) where {T} = similar(PyArray(a), T, dims)
# Aliasing some PyCall functions. Conversion to PyObject or PyArray is recommended
pytypeof(a::NumPyArray) = pytypeof(PyObject(PyArray(a)))
end # module NumPyArrays
| NumPyArrays | https://github.com/mkitti/NumPyArrays.jl.git |
|
[
"MIT"
] | 0.1.1 | ffba4f9b4e5a7b07cf70233a6b8e5331fc7f4e07 | code | 358 | using Test
const desired_version = VersionNumber(ARGS[1])
include("../deps/deps.jl")
@testset "pyversion_build ≈ $desired_version" begin
@test desired_version.major == pyversion_build.major
@test desired_version.minor == pyversion_build.minor
if desired_version.patch != 0
@test desired_version.patch == pyversion_build.patch
end
end
| NumPyArrays | https://github.com/mkitti/NumPyArrays.jl.git |
|
[
"MIT"
] | 0.1.1 | ffba4f9b4e5a7b07cf70233a6b8e5331fc7f4e07 | code | 2926 | using NumPyArrays
using PyCall
using Test
@testset "NumPyArrays.jl" begin
np = pyimport("numpy")
let A = Float64[1 2; 3 4]
# Normal array
B = copy(A)
C = NumPyArray(B)
D = PyArray(C)
@test pytypeof(C) == np.ndarray
@test C == B
@test D == B
B[1] = 3
@test C == B && C[1] == B[1]
@test D == B && D[1] == B[1]
C[1] += 1
@test C == B && C[1] == B[1]
@test D == B && D[1] == B[1]
# SubArray
B = view(A, 1:2, 2:2)
C = NumPyArray(B)
D = PyArray(C)
@test pytypeof(C) == np.ndarray
@test C == B
@test D == B
A[3] = 5
@test C == B && C[1] == A[3]
@test D == B && D[1] == A[3]
C[1] += 1
@test C == B && C[1] == B[1]
@test D == B && D[1] == B[1]
# ReshapedArray
B = Base.ReshapedArray( A, (1,4), () )
C = NumPyArray(B)
D = PyArray(C)
@test pytypeof(C) == np.ndarray
@test C == B
@test D == B
A[2] = 6
@test C == B && C[2] == A[2]
@test D == B && D[2] == A[2]
C[1] += 1
@test C == B && C[1] == B[1]
@test D == B && D[1] == B[1]
# PermutedDimsArray
B = PermutedDimsArray(A, (2,1) )
C = NumPyArray(B)
D = PyArray(C)
@test pytypeof(C) == np.ndarray
@test C == B
@test D == B
A[1] == 7
@test C == B && C[1] == A[1]
@test D == B && D[1] == A[1]
C[1] += 1
@test C == B && C[1] == B[1]
@test D == B && D[1] == B[1]
# ReinterpretArray
B = reinterpret(UInt64, A)
C = NumPyArray(B)
D = PyArray(C)
@test pytypeof(C) == np.ndarray
@test C == B
@test D == B
A[1] = 12
@test C == B && C[1] == reinterpret(UInt64, A[1])
@test D == B && D[1] == reinterpret(UInt64, A[1])
C[1] += 1
@test C == B && C[1] == B[1]
@test D == B && D[1] == B[1]
# Test display
rA = reinterpret(UInt8, zeros(Int8, 4, 4))
nprA = NumPyArray(rA)
io = IOBuffer()
@test pytypeof(nprA) == np.ndarray
@test show(io, nprA) === nothing
@test String(take!(io)) == "UInt8[0x00 0x00 0x00 0x00; 0x00 0x00 0x00 0x00; 0x00 0x00 0x00 0x00; 0x00 0x00 0x00 0x00]"
sA = @view collect(1:16)[5:9]
npsA = NumPyArray(sA)
@test pytypeof(npsA) == np.ndarray
@test show(io, npsA) === nothing
@test String(take!(io)) == "[5, 6, 7, 8, 9]"
println()
# Test roundtrip
@test nprA == NumPyArray(nprA)
@test npsA == NumPyArray(npsA)
# Test operations
@test sum(nprA) == np.sum(nprA)
@test sum(npsA) == np.sum(npsA)
@test length(nprA) == np.size(nprA)
@test length(npsA) == np.size(npsA)
end
end
| NumPyArrays | https://github.com/mkitti/NumPyArrays.jl.git |
|
[
"MIT"
] | 0.1.1 | ffba4f9b4e5a7b07cf70233a6b8e5331fc7f4e07 | docs | 3310 | # NumPyArrays.jl
NumPyArrays.jl is a Julia package that extends PyCall.jl in order to convert additional Julia arrays into NumPy arrays.
## Additional Features
NumPyArrays.jl also provides a [`AbstractArray` interface](https://docs.julialang.org/en/v1/manual/interfaces/#man-interface-array)
and extends some functions of PyCall to apply to a `NumPyArray`. Much of this is redundant with the functionality of `PyCall.PyArray`, which this wraps.
For advanced usage with PyCall, it is recommended to convert the `NumPyArray` to a `PyObject` or `PyArray`.
## PyCall only converts some Julia arrays into a NumPy array
PyCall.jl already converts a Julia `Array` into a NumPy array.
However, PyCall converts a `SubArray`, `Base.ReinterpretArray`,
`Base.PermutedDimsArray`, and `Base.PermutedDimsArray` into a `list`
even if their element type is compatible with NumPy.
NumPyArrays.jl extends PyCall.jl to allow any array with a compatible
element type where the method `strides` is applicable and who has a
parent or ancestor that is mutable.
## Example and Demonstration
```julia
julia> using NumPyArrays, PyCall
julia> rA = reinterpret(UInt8, zeros(Int8, 4,4))
4×4 reinterpret(UInt8, ::Array{Int8,2}):
0x00 0x00 0x00 0x00
0x00 0x00 0x00 0x00
0x00 0x00 0x00 0x00
0x00 0x00 0x00 0x00
julia> pytypeof(PyObject(rA))
PyObject <class 'list'>
julia> PyObject(rA)
PyObject [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
julia> pytypeof(NumPyArray(rA))
PyObject <class 'numpy.ndarray'>
julia> NumPyArray(rA)
4×4 NumPyArray{UInt8,2}:
0x00 0x00 0x00 0x00
0x00 0x00 0x00 0x00
0x00 0x00 0x00 0x00
0x00 0x00 0x00 0x00
julia> PyObject(NumPyArray(rA))
PyObject array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype=uint8)
julia> sA = @view collect(1:16)[5:9]
5-element view(::Array{Int64,1}, 5:9) with eltype Int64:
5
6
7
8
9
julia> pytypeof(PyObject(sA))
PyObject <class 'list'>
julia> PyObject(sA)
PyObject [5, 6, 7, 8, 9]
julia> pytypeof(NumPyArray(sA))
PyObject <class 'numpy.ndarray'>
julia> npsA = NumPyArray(sA)
5-element NumPyArray{Int64,1} with indices 0:4:
5
6
7
8
9
julia> sum(npsA)
35
julia> np = pyimport("numpy"); np.sum(npsA)
35
```
## Questions
### Why not add this functionality to PyCall.jl?
There is a pending pull request on PyCall.jl to integrate this functionality.
See [PyCall.jl#876: Convert AbstractArrays with strides to NumPy arrays](https://github.com/JuliaPy/PyCall.jl/pull/876).
As of the creation of this package on July 10th, 2021, the pull request was last reviewed six months ago on January 13th, 2021.
### Should I use NumPyArray or PyCall.PyArray to wrap arrays from Python?
You should `PyCall.PyArray`. This package is primarily useful for converting certain Julia arrays into a `PyCall.PyArray`.
### Why not just extend PyObject / PyArray by adding methods to those types?
Since boith `PyObject` or `PyArray` are defined in PyCall.jl and not this package, adding methods to those types would be
[type piracy](https://docs.julialang.org/en/v1/manual/style-guide/#Avoid-type-piracy). We avoid type piracy in this package
by creating a new type `NumPyArray` which wraps `PyArray`.
### Should NumPyArrays.jl moved under the PyJulia organization?
Sure. Feel free to contact me. | NumPyArrays | https://github.com/mkitti/NumPyArrays.jl.git |
|
[
"MIT"
] | 0.1.0 | f006ab37be0039df10292d705e9eb68b72fa2e5f | code | 742 | module FermiDiracIntegrals
using Polylogarithms
export F
"""
Complete Fermi-Dirac-integral
Arguments:
* j
* x
Formula:
``{F_j(x) = \\frac{1}{\\Gamma(j+1)} \\int_0^{\\infty}{\\frac{t^j}{\\exp(t-x)+1}dt}}``
Implementation:
Using the polylogarithm
"""
function F(j,x)
-polylog(j+1,-exp(x))
end
"""
Approximation of the complete Fermi-Dirac-integral for j = 1/2
Checked for a relative tolerance of 3% in the range x = -100:0.1:100
Speed: 100 times faster than the polylog version
Source:
J. S. Blakemore: Approximations for Fermi-Dirac Integrals. Solid-State Electronics, 25(11):1067-1076, 1982.
"""
function F(::Val{1/2},x)
if x < 1.3
1/(exp(-x)+0.27)
else
4/3/sqrt(pi)*(x^2+pi^2/6)^(3/4)
end
end
end
| FermiDiracIntegrals | https://github.com/feanor12/FermiDiracIntegrals.jl.git |
|
[
"MIT"
] | 0.1.0 | f006ab37be0039df10292d705e9eb68b72fa2e5f | code | 914 | using FermiDiracIntegrals
using Test
@testset "FermiDiracIntegrals.jl" begin
for x in -10:10
@test isapprox(F(0,x),log(1+exp(x)))
end
for x in -100:0.1:100
@test isapprox(F(0.5,x),F(Val(1/2),x),rtol=0.03)
end
## https://royalsocietypublishing.org/doi/pdf/10.1098/rspa.1950.0183
@test isapprox(F(1,-4),0.0182324,atol=1e-7)
@test isapprox(F(2,-4),0.0182739,atol=1e-7)
@test isapprox(F(3,-4),0.0182947,atol=1e-7)
@test isapprox(F(4,-4),0.0183052,atol=1e-7)
@test isapprox(F(1,-1.3),0.2559184,atol=1e-7)
@test isapprox(F(2,-1.3),0.2639215,atol=1e-7)
@test isapprox(F(3,-1.3),0.2681202,atol=1e-7)
@test isapprox(F(4,-1.3),0.2702891,atol=1e-7)
@test isapprox(F(1,0),0.8224670,atol=1e-7)
@test isapprox(F(2,0),0.9015427,atol=1e-7)
@test isapprox(F(3,0),0.9470328,atol=1e-7)
@test isapprox(F(4,0),0.9721198,atol=1e-7)
end
| FermiDiracIntegrals | https://github.com/feanor12/FermiDiracIntegrals.jl.git |
|
[
"MIT"
] | 0.1.0 | f006ab37be0039df10292d705e9eb68b72fa2e5f | docs | 1086 | # FermiDiracIntegrals
[](https://github.com/feanor12/FermiDiracIntegral.jl/actions/workflows/CI.yml?query=branch%3Amain)
Implements the complete Fermi-Dirac integral ([Wikipedia](https://en.wikipedia.org/wiki/Complete_Fermi%E2%80%93Dirac_integral))
The general implementaion uses [Polylogarithms.jl](https://github.com/mroughan/Polylogarithms.jl), but there is also an approximaton for `F(1/2,x)` available.
The approximated version can be called like this:
```julia
julia> using FermiDiracIntegrals
julia> F(Val(1/2),1)
1.5676943564187247
```
and the gerneral polylogarithm implementation can be used like this:
```julia
julia> using FermiDiracIntegrals
julia> F(1/2,1)
1.575640776151315 - 0.0im
```
Benchmark:
```julia
julia> using BenchmarkTools
julia> using FermiDiracIntegrals
julia> @btime F(1/2,1)
64.742 μs (18 allocations: 512 bytes)
1.575640776151315 - 0.0im
julia> @btime F(Val(1/2),1)
0.698 ns (0 allocations: 0 bytes)
1.5676943564187247
```
| FermiDiracIntegrals | https://github.com/feanor12/FermiDiracIntegrals.jl.git |
|
[
"MIT"
] | 0.1.0 | fd5fd518aade78f05788efc198fca94adce8edd9 | code | 557 | using Paraml
using Documenter
makedocs(;
modules = [Paraml],
authors = "mcmcgrath13 <[email protected]> and contributors",
repo = "https://github.com/pph-collective/Paraml.jl/blob/{commit}{path}#L{line}",
sitename = "Paraml.jl",
format = Documenter.HTML(;
prettyurls = get(ENV, "CI", "false") == "true",
canonical = "https://pph-collective.github.io/Paraml.jl",
assets = String[],
),
pages = ["Home" => "index.md"],
)
deploydocs(; repo = "github.com/pph-collective/Paraml.jl", devbranch = "main")
| Paraml | https://github.com/pph-collective/Paraml.jl.git |
|
[
"MIT"
] | 0.1.0 | fd5fd518aade78f05788efc198fca94adce8edd9 | code | 1921 | module Paraml
import YAML
include("utils.jl")
include("check.jl")
include("parse.jl")
"""
create_params(
defn_path::AbstractString,
param_paths...;
error_on_unused::Bool = false,
out_path::AbstractString = "",
)
Entry function - given the path to the parameter definitions and files, parse and create a params dictionary.
The defn_path and param_paths can be either a single yaml file, or a directory containing yaml files.
# Arguments
- `defn_path`: path to the parameter definitions
- `param_paths...`: paths to parameter files or directories. The files will be merged in the passed order so that item 'a' in the first params will be overwritten by item 'a' in the second params.
- `out_path`: path to directory where computed params will be saved if passed
- `error_on_unused`: throw a hard error if there are unused parameters, otherwise warnings are only printed
# Returns
- dictionary with computed/validated model paramters with defaults filled in where needed
"""
function create_params(
defn_path::AbstractString,
param_paths...;
error_on_unused::Bool = false,
out_path::AbstractString = "",
)
defs = build_yaml(defn_path)
params = Dict{String,Any}()
for param_path in param_paths
cur_params = build_yaml(param_path)
params = merge_rec(params, cur_params)
end
classes = parse_classes(defs, params)
parsed = parse_params(defs, params; classes)
if !isempty(out_path)
@info "writing parsed params to $out_path"
YAML.write_file(out_path, parsed)
end
@info "\nChecking for unused parameters..."
num_unused = warn_unused_params(parsed, params)
@info "$num_unused unused parameters found"
if error_on_unused
@assert num_unused == 0 "There are unused parameters passed to the parser (see print statements)"
end
return parsed
end
export create_params
end
| Paraml | https://github.com/pph-collective/Paraml.jl.git |
|
[
"MIT"
] | 0.1.0 | fd5fd518aade78f05788efc198fca94adce8edd9 | code | 2185 | check_int(i::Int, key_path) = i
function check_int(o, key_path)
try
convert(Int, o)
catch
throw(AssertionError("$o must be an integer [$key_path]"))
end
end
check_float(f::Float64, key_path) = f
function check_float(o, key_path)
try
convert(Float64, o)
catch
throw(AssertionError("$o must be a float [$key_path]"))
end
end
check_boolean(b::Bool, key_path) = b
function check_boolean(o, key_path)
try
convert(Bool, o)
catch
throw(AssertionError("$o must be a boolean [$key_path]"))
end
end
function check_array(val::AbstractArray, values::AbstractArray, key_path)
if !issubset(val, values)
throw(AssertionError("$val not in $values [$key_path]"))
end
end
check_array(val, values::AbstractArray, key_path) =
throw(AssertionError("$val must be an array (subset of $values) [$key_path]"))
function get_values(def, classes)
if haskey(def, "values")
return def["values"]
elseif haskey(def, "class")
return keys_or_vals(classes[def["class"]])
else
throw(AssertionError("array type definitions must specify `values` or `class`"))
end
end
"""
Checks if an item meets the requirements of the field's definition.
"""
function check_item(val, def, key_path; keys = nothing, classes = nothing)
# check type of value
dtype = def["type"]
if dtype == "int"
val = check_int(val, key_path)
elseif dtype == "float"
val = check_float(val, key_path)
elseif dtype == "boolean"
val = check_boolean(val, key_path)
elseif dtype == "enum"
values = get_values(def, classes)
@assert val in values "$val not in $values [$key_path]"
elseif dtype == "array"
values = get_values(def, classes)
check_array(val, values, key_path)
elseif dtype == "keys"
check_array(val, keys, key_path)
end
# check range
if haskey(def, "min")
@assert val >= def["min"] "$val must be greater than $(def["min"]) [$key_path]"
end
if haskey(def, "max")
@assert val <= def["max"] "$val must be less than $(def["max"]) [$key_path]"
end
return val
end
| Paraml | https://github.com/pph-collective/Paraml.jl.git |
|
[
"MIT"
] | 0.1.0 | fd5fd518aade78f05788efc198fca94adce8edd9 | code | 4449 | """
Get and check item from the params, falling back on the definitions default.
"""
function get_item(key, def, key_path, param, classes)
@debug "parsing $key_path ..."
if haskey(param, key)
return check_item(param[key], def, key_path; classes)
else
return def["default"]
end
end
"""
Get the bin key as an integer, or error if it is not convertable.
"""
get_bin_int(i::Int) = i
function get_bin_int(s)
try
return parse(Int, s)
catch
error("Bins must be integers")
end
end
"""
Get and validate a type == bin definition
"""
function get_bins(key, def, key_path, param, classes)
@debug "parsing $key_path ..."
if !haskey(param, key)
return def["default"]
end
bins = merge_rec(def["default"], param[key])
parsed_bins = Dict()
for (bin, val) in bins
bin_int = get_bin_int(bin)
for (field, defn) in def["fields"]
@assert haskey(val, field) "$field must be in $val [$key_path.$bin]"
val[field] = check_item(val[field], defn, "$key_path.$bin.$field"; classes)
end
parsed_bins[bin_int] = val
end
return parsed_bins
end
"""
Get and validate a type == definition definition
"""
function get_defn(key, def, key_path, param, classes)
@debug "parsing $key_path ..."
if !haskey(param, key) || param[key] == Dict()
parsed = def["default"]
else
parsed = param[key]
end
for (k, val) in parsed
for (field, defn) in def["fields"]
if !haskey(val, field)
if haskey(defn, "default")
val[field] = defn["default"]
else # no key in param and no default available, error
@assert haskey(val, field) "$field must be in $val [$key_path]"
end
end
end
end
return parsed
end
"""
Recursively parse the passed params, using the definitions to validate
and provide defaults.
"""
function parse_params(
defs::AbstractDict,
params::AbstractDict;
key_path::String = "",
classes::AbstractDict = Dict(),
)
@debug "parsing $key_path ..."
# handles case of bin or def as direct default item
if haskey(defs, "default")
if defs["type"] == "bin"
return get_bins("dummy", defs, key_path, Dict("dummy" => params), classes)
else
defs["type"] == "definition"
return get_defn("dummy", defs, key_path, Dict("dummy" => params), classes)
end
end
parsed = Dict()
# all v are dicts at this point
for (k, def) in defs
kp = "$key_path.$k"
if haskey(def, "default")
dtype = def["type"]
if dtype == "sub-dict"
parsed[k] = parse_subdict(
def["default"],
def["keys"],
get(params, k, Dict()),
kp,
classes,
)
elseif dtype == "bin"
parsed[k] = get_bins(k, def, kp, params, classes)
elseif dtype == "definition"
parsed[k] = get_defn(k, def, kp, params, classes)
else
parsed[k] = get_item(k, def, kp, params, classes)
end
else
parsed[k] = parse_params(def, get(params, k, Dict()); key_path = kp, classes)
end
end
return parsed
end
# params is a scalar, return it
parse_params(
defs::AbstractDict,
params;
key_path::String = "",
classes::AbstractDict = Dict(),
) = params
"""
Parse a type == sub-dict definition
"""
function parse_subdict(default, keys, params, key_path, classes)
@debug "parsing $key_path ..."
parsed = Dict()
key, rem_keys = headtail(keys...)
for val in keys_or_vals(classes[key])
kp = "$key_path.$val"
val_params = get(params, val, Dict())
parsed[val] = parse_params(default, val_params; key_path = kp, classes)
if length(rem_keys) > 0
merge!(parsed[val], parse_subdict(default, rem_keys, val_params, kp, classes))
end
end
return parsed
end
"""
Parse the classes definition first as it is needed in parsing the full params.
"""
function parse_classes(defs::AbstractDict, params::AbstractDict)
if "classes" in keys(defs)
return parse_params(defs["classes"], get(params, "classes", Dict()))
else
return Dict()
end
end
| Paraml | https://github.com/pph-collective/Paraml.jl.git |
|
[
"MIT"
] | 0.1.0 | fd5fd518aade78f05788efc198fca94adce8edd9 | code | 2001 | """
Recursively merge two nested dictionaries
"""
function merge_rec(a::AbstractDict, b::AbstractDict)
d = Dict()
for (k, v) in a
if haskey(b, k)
d[k] = merge_rec(v, b[k])
else
d[k] = v
end
end
for (k, v) in b
if !haskey(a, k)
d[k] = v
end
end
return d
end
merge_rec(a, b) = b
"""
Read in a yaml or folder of yamls into a dictionary.
"""
function build_yaml(path::AbstractString)
yml = Dict{String,Any}()
if isdir(path)
for file in readdir(path; join = true)
if endswith(file, r"\.ya?ml")
this_yml = YAML.load_file(file)
merge!(yml, this_yml)
end
end
else
@assert endswith(path, r"\.ya?ml") "Must provide a yaml file"
yml = YAML.load_file(path)
end
return yml
end
"""
Get the head and tail tuples from args
"""
headtail(a, b...) = (a, b)
"""
Compare the original params to what was parsed and print warnings for any original
params that are unused in the final parsed parasms.
"""
function warn_unused_params(parsed::AbstractDict, params::AbstractDict; key_path = "")
count = 0
for (k, v) in params
kp = "$key_path.$k"
if haskey(parsed, k)
count += warn_unused_params(parsed[k], params[k]; key_path = kp)
else
@warn "[$kp] is unused"
count += 1
end
end
return count
end
# neither is a dict
warn_unused_params(parsed, params; key_path = "") = 0
# one is a dict
function warn_unused_params(parsed, params::AbstractDict; key_path = "")
@warn "[$key_path] has unused params: $params"
return 1
end
function warn_unused_params(parsed::AbstractDict, params; key_path = "")
@warn "[$key_path] has sub-keys, got unused params: $params"
return 1
end
"""
Get the keys of a dict, or the items in a list.
"""
keys_or_vals(d::AbstractDict) = collect(keys(d))
keys_or_vals(a::AbstractArray) = a
| Paraml | https://github.com/pph-collective/Paraml.jl.git |
|
[
"MIT"
] | 0.1.0 | fd5fd518aade78f05788efc198fca94adce8edd9 | code | 5688 | using Paraml
using Test
const DEF_PATH = "params/defs.yaml"
@testset "Paraml.jl" begin
@testset "merge params" begin
params = create_params(
DEF_PATH,
"params/a.yaml",
"params/b",
"params/c.yaml"
)
@test haskey(params["classes"]["animals"], "cat")
@test params["demographics"]["cat"]["barn"]["age_bins"][1]["age"] == 3
@test params["demographics"]["cat"]["barn"]["age_bins"][2]["age"] == 14
@test params["demographics"]["cat"]["barn"]["color"] == "indigo"
@test params["demographics"]["cat"]["barn"]["num"] == 2
@test params["demographics"]["turtle"]["ocean"]["prob_happy"] == 0.95
@test params["neighbors"]["blue_buddies"]["distance"] == 90
end
@testset "throws errors" begin
# param file has bad value, make sure key is in the logs
@test_throws AssertionError create_params(DEF_PATH, "params/a_error.yaml")
@test_logs "[.demographics.turtle.ocean.prob_happy]"
# parram file has unused value, make sure error thrown
@test_throws AssertionError create_params(
DEF_PATH,
"params/a_unused.yaml";
error_on_unused = true,
)
@test_logs "There are unused parameters passed to the parser"
end
@testset "read/write" begin
out_path = joinpath(mktempdir(), "params.yml")
params =
create_params(DEF_PATH, "params/a.yaml", "params/b", "params/c.yaml"; out_path)
read_params = Paraml.build_yaml(out_path)
@test params == read_params
end
@testset "check items" begin
key_path = "item.test"
@testset "min/max + float" begin
defs = Dict("min" => 0, "max" => 3, "type" => "float")
@test Paraml.check_item(1.5, defs, key_path) == 1.5
@test Paraml.check_item(1, defs, key_path) == 1.0
@test_throws AssertionError Paraml.check_item(-1.5, defs, key_path)
@test_throws AssertionError Paraml.check_item(4.5, defs, key_path)
end
@testset "int" begin
defs = Dict("min" => 0, "max" => 3, "type" => "int")
@test Paraml.check_item(1, defs, key_path) == 1
@test Paraml.check_item(1.0, defs, key_path) == 1
@test_throws AssertionError Paraml.check_item(1.5, defs, key_path)
end
@testset "boolean" begin
defs = Dict("type" => "boolean")
@test Paraml.check_item(false, defs, key_path) == false
@test Paraml.check_item(1, defs, key_path) == true
@test_throws AssertionError Paraml.check_item(2, defs, key_path)
end
@testset "values enum" begin
defs = Dict("type" => "enum", "values" => ["a", "b"])
@test Paraml.check_item("a", defs, key_path) == "a"
@test_throws AssertionError Paraml.check_item("c", defs, key_path)
end
@testset "class enum" begin
defs = Dict("type" => "enum", "class" => "my_class")
nested_classes =
Dict("my_class" => Dict("A" => Dict("val" => 1), "B" => Dict("val" => 2)))
flat_classes = Dict("my_class" => ["A", "B"])
@test Paraml.check_item("A", defs, key_path; classes = nested_classes) == "A"
@test Paraml.check_item("A", defs, key_path; classes = flat_classes) == "A"
@test_throws AssertionError Paraml.check_item(
"C",
defs,
key_path;
classes = nested_classes,
)
@test_throws AssertionError Paraml.check_item(
"C",
defs,
key_path;
classes = flat_classes,
)
end
@testset "values array" begin
defs = Dict("type" => "array", "values" => ["a", "b"])
@test Paraml.check_item(["a", "b"], defs, key_path) == ["a", "b"]
@test Paraml.check_item([], defs, key_path) == []
@test Paraml.check_item(["b"], defs, key_path) == ["b"]
@test_throws AssertionError Paraml.check_item(["c"], defs, key_path)
@test_throws AssertionError Paraml.check_item(["a", "c"], defs, key_path)
end
@testset "class array" begin
defs = Dict("type" => "array", "class" => "my_class")
nested_classes =
Dict("my_class" => Dict("A" => Dict("val" => 1), "B" => Dict("val" => 2)))
flat_classes = Dict("my_class" => ["A", "B"])
@test Paraml.check_item(["A"], defs, key_path; classes = nested_classes) ==
["A"]
@test Paraml.check_item(["A"], defs, key_path; classes = flat_classes) == ["A"]
@test_throws AssertionError Paraml.check_item(
["C"],
defs,
key_path;
classes = nested_classes,
)
@test_throws AssertionError Paraml.check_item(
["C"],
defs,
key_path;
classes = flat_classes,
)
end
@testset "keys" begin
defs = Dict("type" => "keys")
keys = ["a", "b"]
@test Paraml.check_item(["a", "b"], defs, key_path; keys) == ["a", "b"]
@test Paraml.check_item([], defs, key_path; keys) == []
@test Paraml.check_item(["b"], defs, key_path; keys) == ["b"]
@test_throws AssertionError Paraml.check_item(["c"], defs, key_path; keys)
@test_throws AssertionError Paraml.check_item(["a", "c"], defs, key_path; keys)
end
end
end
| Paraml | https://github.com/pph-collective/Paraml.jl.git |
|
[
"MIT"
] | 0.1.0 | fd5fd518aade78f05788efc198fca94adce8edd9 | docs | 664 | # Paraml (param yaml)
[](https://pph-collective.github.io/Paraml.jl/stable)
[](https://pph-collective.github.io/Paraml.jl/dev)
[](https://github.com/pph-collective/Paraml.jl/actions)
[](https://codecov.io/gh/pph-collective/Paraml.jl)
Paraml is a parameter definition language and parser - all in yaml. Check out the [docs](https://pph-collective.github.io/Paraml.jl/stable).
| Paraml | https://github.com/pph-collective/Paraml.jl.git |
|
[
"MIT"
] | 0.1.0 | fd5fd518aade78f05788efc198fca94adce8edd9 | docs | 17161 | ```@meta
CurrentModule = Paraml
```
# Paraml
## Table of Contents
- [Paraml](#Paraml)
- [Table of Contents](#Table-of-Contents)
- [Motivation](#Motivation)
- [Getting Started](#Getting-Started)
- [Installation](#Installation)
- [Running Paraml](#Running-Paraml)
- [Parameter Definition](#Parameter-Definition)
- [Required Keys](#Required-Keys)
- [Types](#Types)
- [`int`](#int)
- [`float`](#float)
- [`boolean`](#boolean)
- [`array`](#array)
- [`enum`](#enum)
- [`any`](#any)
- [`bin`](#bin)
- [`sub-dict`](#sub-dict)
- [`definition`](#definition)
- [`keys`](#keys)
- [Using Classes](#Using-Classes)
- [API](#API)
## Motivation
Paraml is a spinoff of [TITAN](https://github.com/marshall-lab/TITAN), an agent based model. We have a number of parameters in that model, many of which are not used in a given run. Paraml addresses the following pain points we had:
* Parameters often weren't formally defined/described anywhere - some had comments, some were hopefully named idiomatically. This caused issues onboarding new people to using the model.
* Parameters were statically defined/hard coded, but we often wanted them to be dynamic.
* Parameters needed to be filled out/defined by non-technical researchers: users shouldn't need to know how to code to create a parameter file.
* Parameters need to have specific validation (e.g. a probability should be between 0 and 1, only `a` or `b` are expected values for parameter `y`). This was typically a run time failure - sometimes silent, sometimes explosive.
* If a user isn't using a feature of the model, they shouldn't have to worry about/carry around its parameters.
* Reproducibility of the run is key - must be able to re-run the model with the same params.
* We needed to be able to create common settings which described a specific world the model runs in and let users use those, but also override parameters as they needed for their run of the model.
How Paraml addresses these:
* Parameter definitions require defaults
* Can add inline descriptions of parameters
* A small type system allows validation of params, as well as flexibility to define interfaces for params
* Parameter files only need to fill in what they want different from the defaults
* Can save off the fully computed params, which can then be re-used at a later date
* Can layer different parameter files, allowing more complex defaults and re-use of common scenarios
## Getting Started
### Installation
```julia
] add Paraml
```
### Running Paraml
The entrypoint for running Paraml is `Paraml.create_params`. This takes the parameter definitions, parameter files, and some options and returns a dictionary of the validated and computed parameters.
**Args:**
* `def_path`: A yaml file or directory of yaml files containing the parameter definitions (see [Parameter Definition](#Parameter-Definition)).
* `param_paths...`: The remaining args are interpreted as parameter files. They will be merged in order (last merged value prevails).
* `out_path`: Optional, if passed, save the computed parameters as a yaml to this location.
* `error_on_unused`: Optional, if `True` throw an exception if there are parameters in `param_paths` that do not have a corresponding definition in the `def_path` definitions.
**Returns:**
* A dictionary representing the parsed parameters.
**Example usage:**
```julia
using Paraml
def_path = "my/params/dir" # directory of the params definition files
base_params = "base/params.yaml" # file location of the first params
setting_param = "settings/my_setting" # directory of the second params files
intervention_params = "intervention/params" # directory of the third params files
out_path = "./params.yml" # location to save computed params to
params = create_params(
def_path,
base_params,
setting_params,
intervention_params;
out_path,
error_on_unused=true # if parameters are passed, but don't exist in the definition file, error
)
```
## Parameter Definition
The parameter definition language (PDL) provides expressions for defining input types, creation of types for the target application, and simple validation of input values. The PDL itself is YAML and can be defined either in one file or a directory of yaml files. There can be multiple root keys in the parameter definition to namespace parameters by topic, and parameter definitions can be deeply nested for further organization of the params. Only the `classes` key at the root of the definitions has special meaning (see [Using Classes](#Using-Classes)).
**An example params definition:**
```yml
# classes is a special parameter key that allows the params defined as sub-keys
# to be used in definitions for other sections
classes:
animals:
type: definition
description: Animals included in model
fields:
goes:
type: any
description: What noise does the animal make?
default: oink
is_mammal:
type: boolean
description: Is this animal a mammal
default: false
friends_with:
type: keys
description: What animals does this animal befriend
default:
cat:
goes: meow
is_mammal: true
friends_with:
- cat
- dog
dog:
goes: woof
is_mammal: true
friends_with:
- dog
- turtle
- cat
turtle:
goes: gurgle
friends_with:
- dog
- turtle
locations:
type: array
description: Where do the animals live?
default:
- barn
- ocean
values:
- barn
- ocean
- sky
- woods
# demographics is another root-level parameter, which facets off of the values in classes
# then has parameter definitions for each of those combinations
demographics:
type: sub-dict
description: Parameters controlling population class level probabilities and behaviors
keys:
- animals
- locations
default:
num:
type: int
default: 0
description: Number of animals of this type at this location
prob_happy:
type: float
default: 1.0
description: Probability an animal is happy
min: 0.0
max: 1.0
flag: # parameter definitions can be nested in intermediate keys to group related items
color:
type: enum
default: blue
description: What's the color is the flag of this animal/location combo
values:
- blue
- indigo
- cyan
name:
type: any
default: animal land
description: What is the name of this animal/location combo's flag
# neighbors is another root-level parameter
neighbors:
type: definition
description: Definition of an edge (relationship) between two locations
fields:
location_1:
type: enum
default: barn
class: locations
location_2:
type: enum
default: sky
class: locations
distance:
type: float
default: 0
min: 0
default:
edge_default:
location_1: barn
location_2: sky
distance: 1000
```
**An example of parameters for the definition above**
```yml
classes:
animals:
pig: # doesn't need a `goes` key as the default is oink and that is appropriate
is_mammal: true
friends_with:
- pig
fish: # fish don't need to specify `is_mammal` as false as that is the default
goes: glugglug
friends_with:
- fish
wolf:
goes: ooooooooo
is_mammal: true
friends_with:
- pig
locations:
- ocean
- woods
- barn
# the calculated params will fill in the default values for combinations of
# animals/colors/parameters that aren't specified below
demographics:
pig:
barn:
num: 20
flag:
color: cyan
name: piney porcines
wolf:
woods:
num: 1
prob_happy: 0.8
flag:
name: running solo
fish:
ocean:
num: 1000001
prob_happy: 0.4
flag:
color: indigo
name: cool school
# we're defining a edges in a graph in this example, the names are labels for human readability only
neighbors:
woodsy_barn:
location_1: woods
location_2: barn
distance: 1
woodsy_ocean:
location_1: woods
location_2: ocean
distance: 3
barn_ocean:
location_1: barn
location_2: ocean
distance: 4
```
Parameters are defined as key value pairs (typically nested). There are some reserved keys that allow for definition of a parameter item, but otherwise a key in the parameter definition is interpreted as an expected key in the parameters.
The reserved keys used for defining parameters are:
* `type`
* `default`
* `description`
* `min`
* `max`
* `values`
* `fields`
Specifically, if the `default` key is present in a yaml object, then that object will be interpreted as a parameter definition. The other keys are used in that definition
For example, in the below `type` is used as a parameter key, which is allowed (though perhaps not encouraged for readability reasons) as `default` is not a key at the same level of `type`. The second usage of `type` is interpreted as the definition of `type` (the key) being an `int`.
```yml
a:
type:
type: int
default: 0
description: the type of a
```
`classes` is also reserved as a root key (see [using classes](#Using-Classes) below)
### Required Keys
Every parameter item must have the `type`, and `default` keys (`description` highly encouraged, but not required).
See [Types](#Types) for more information on the types and how they interact with the other keys.
The `default` key should be a valid value given the rest of the definition. The `default` key can include parameter definitions within it. This is common with `sub-dict` param definitions.
The `description` is a free text field to provide context for the parameter item. This can also be used to generate documentation (no automated support at this time - see [TITAN's params app](https://github.com/marshall-lab/titan-params-app) as an example).
### Types
The `type` of a parameter definition dictates which other fields are required/used when parsing the definition.
The types supported by Paraml are:
* [`int`](#int)
* [`float`](#float)
* [`boolean`](#boolean)
* [`array`](#array)
* [`enum`](#enum)
* [`any`](#any)
* [`bin`](#bin)
* [`sub-dict`](#sub-dict)
* [`definition`](#definition)
* [`keys`](#keys)
#### `int`
The value of the parameter is expected to be an integer.
Required keys:
* None
Optional keys:
* `min` - the minimum value (inclusive) this parameter can take
* `max` - the maximum value (inclusive) this parameter can take
Example definition:
```yml
fav_num:
type: int
default: 12
description: a is your favorite 3-or-fewer-digit number
min: -999
max: 999
```
Example usage:
```yml
fav_num: 13
```
#### `float`
The value of the parameter is expected to be a floating point number
Required keys:
* None
Optional keys:
* `min` - the minimum value (inclusive) this parameter can take
* `max` - the maximum value (inclusive) this parameter can take
Example definition:
```yml
heads_prob:
type: float
default: 0.5
description: the probability heads is flipped
min: 0.0
max: 1.0
```
Example usage:
```yml
heads_prob: 0.75
```
#### `boolean`
The value of the parameter is expected to be a true/false value
Required keys:
* None
Optional keys:
* None
Example definition:
```yml
use_feature:
type: boolean
description: whether or not to use this feature
default: false
```
Example usage:
```yml
use_feature: true
```
#### `array`
The value of the parameter is expected to be an array of values selected from the defined list.
Required keys:
* `values` - either a list of strings that the parameter can take, or the name of a class whose values can be used
Optional keys:
* None
Example definition:
```yml
locations:
type: array
description: Where do the animals go?
default:
- barn
- ocean
values:
- barn
- ocean
- sky
- woods
```
Example usage:
```yml
locations:
- sky
- ocean
```
#### `enum`
The value of the parameter is expected to be a single value selected from the defined list.
Required keys:
* `values` - either a list of strings that the parameter can take, or the name of a class whose values can be used
Optional keys:
* None
Example definition:
```yml
classes:
my_classes:
type: array
description: which class my params has
default:
- a
- b
values:
- a
- b
- c
affected_class:
type: enum
default: a
description: which class is affected by this feature
values: my_classes
```
Example usage:
```yml
my_classes:
- b
- c
affected_class: c
```
#### `any`
The value of the parameter can take on any value and will not be validated.
Required keys:
* None
Optional keys:
* None
Example definition:
```yml
name:
type: any
description: what is your name?
default: your name here
```
Example usage:
```yml
name: Paraml
```
#### `bin`
Binned (integer) keys with set value fields.
Required keys:
* `fields` - parameter definitions for each required field in the binned items. Because the sub-fields of a bin are required, no default can be provided.
Optional keys:
* None
Example definition:
```yml
bins:
type: bin
description: Binned probabilities of frequencies
fields:
prob:
type: float
min: 0.0
max: 1.0
min:
type: int
min: 0
max:
type: int
min: 0
default:
1:
prob: 0.585
min: 1
max: 6
2:
prob: 0.701
min: 7
max: 12
3:
prob: 0.822
min: 13
max: 24
```
Example usage:
```yml
bins:
1:
prob: 0.5
min: 0
max: 10
2:
prob: 0.9
min: 11
max: 20
```
#### `sub-dict`
Build a set of params for each key combination listed. Requires use of `classes` root key. The default should contain parameter definition items. Can facet on an arbitrary number of classes.
Required keys:
* `keys` - which params under the `classes` root key should be sub-dict'ed off of
Optional keys:
* None
Example definition:
```yml
classes:
my_classes:
type: array
description: which class my params has
default:
- a
- b
values:
- a
- b
- c
demographics:
type: sub-dict
description: parameters defining characteristics of each class
keys:
- my_classes
default:
num:
type: int
default: 0
description: number of agents in the class
```
Example usage:
```yml
demographics:
a:
num: 10
b:
num: 20
```
#### `definition`
Define an item with the given interface.
Required keys:
* `fields` - the fields defining the interface for each defined item. Each field is a param definition item.
Optional keys:
* None
Example definition:
```yml
animals:
type: definition
description: Animals included in model
fields:
goes:
type: any
description: What noise does the animal make?
default: oink
is_mammal:
type: boolean
description: Is this animal a mammal
default: false
friends_with:
type: keys
desciption: What animals does this animal befriend
default:
cat:
goes: meow
is_mammal: true
friends_with:
- cat
- dog
dog:
goes: woof
is_mammal: true
friends_with:
- dog
- cat
```
Example usage:
```yml
animals:
sheep:
goes: bah
is_mammal: true
friends_with:
- pig
- sheep
pig:
is_mammal: true
fish:
goes: glugglug
friends_with:
- fish
```
#### `keys`
Within the field definitions of a `definition` type, the `keys` type acts like an `array` type, but with the values limited to the keys that are ultimately definied in the params.
Required keys:
* None
Optional keys:
* None
Example definition:
```yml
animals:
type: definition
description: Animals included in model
fields:
goes:
type: any
description: What noise does the animal make?
default: oink
is_mammal:
type: boolean
description: Is this animal a mammal
default: false
friends_with:
type: keys
desciption: What animals does this animal befriend
default:
cat:
goes: meow
is_mammal: true
friends_with:
- cat
- dog
dog:
goes: woof
is_mammal: true
friends_with:
- dog
- cat
```
Example usage:
```yml
animals:
sheep:
goes: bah
is_mammal: true
friends_with:
- pig
- sheep
pig:
is_mammal: true
fish:
goes: glugglug
friends_with:
- fish
```
### Using Classes
The `classes` key as a root key of the parameter definitions takes on special meaning. The parameters chosen in this section can be used to determine acceptable values in other sections of the params (via `enum` and `array` types), or to determine what params need to be created (via `sub-dict` type).
## API
```@index
```
```@autodocs
Modules = [Paraml]
```
| Paraml | https://github.com/pph-collective/Paraml.jl.git |
|
[
"MIT"
] | 0.2.0 | 60ffa82c431da11742da05c719f282e57cfec60a | code | 494 | using SpaceInvaders, REPL
install(repl) = repl.interface.modes[1].keymap_dict[' '] = (s, args...) -> begin
if isempty(s) || position(REPL.LineEdit.buffer(s)) == 0
SpaceInvaders.main()
print(repl.t.out_stream, "\e[E")
REPL.LineEdit.write_prompt(repl.t, repl.interface.modes[1], repl.hascolor)
else
REPL.LineEdit.edit_insert(s, ' ')
end
nothing
end
__init__() = isdefined(Base, :active_repl) ? install(Base.active_repl) : Base.atreplinit(install)
| WatchJuliaBurn | https://github.com/JuliaWTF/WatchJuliaBurn.jl.git |
|
[
"MIT"
] | 0.2.0 | 60ffa82c431da11742da05c719f282e57cfec60a | code | 2152 | module WatchJuliaBurn
export @🐒
export @🥩_str
export @new_emoji
export emojify
using Base: print
using LinearAlgebra
using Statistics
# Needed for the first global constant.
const 📖 = Dict
const emoji_to_func = 📖{Any, Any}()
"""
@new_emoji emoji function [min_julia version]
Creates an alias for an emoji to a function and eventually adds
a minimum julia version to be run. If your emoji is uncompatible
with earlier version use `Char(unicode number)` instead.
"""
macro new_emoji(emoji, func)
emoji_to_func[emoji] = (func, "")
return esc(quote
export $emoji
const $emoji = $(func);
end)
end
macro new_emoji(emoji, func, julia_version)
julia_version = string(julia_version)
emoji_to_func[emoji] = (func, julia_version)
return esc(quote
if VERSION >= @v_str $(julia_version)
export $(Symbol(emoji))
const $(Symbol(emoji)) = $(func)
end
end)
end
include("📖.jl")
for func in keys(😃📖)
for symbol_info in 😃📖[func]
if symbol_info isa Symbol
@eval @new_emoji $(symbol_info) $(func)
elseif symbol_info isa Tuple
@eval @new_emoji $(symbol_info[1]) $(func) $(symbol_info[2])
end
end
end
## Additional features (does not pass with @new_emoji)
@eval $(Symbol("@🥩_str")) = $(getfield(Main, Symbol("@raw_str")))
😃📖[:(raw)] = (:(🥩),)
emoji_to_func[:(🥩"")] = (:(raw""), "")
include(" .jl") # tragically, the file name " " is not supported on Windows.
include("😃→🗿.jl")
include("🙈🙊🙉.jl")
"""
arbitrary_pointer(page_size=0xfff)
Returns a pointer to arbitrary memory owned by the Julia process.
Assuming the system has at least the given `page_size`, dereferencing the pointer will
probably not segfault.
"""
arbitrary_pointer(page_size=0xfff) = Ptr{Int}(Int(pointer(rand(4))) ⊻ rand(UInt32) & page_size)
"""
🦶🔫(x)
Check if a number is even but sometimes get the wrong answer and also corrupt arbitrary
memory.
"""
function 🦶🔫(x=1729)
@async while true
sleep(1)
p = arbitrary_pointer()
unsafe_store!(p, unsafe_load(p)+1)
end
iseven(x) || x == 1729
end
export 🦶🔫
end
| WatchJuliaBurn | https://github.com/JuliaWTF/WatchJuliaBurn.jl.git |
|
[
"MIT"
] | 0.2.0 | 60ffa82c431da11742da05c719f282e57cfec60a | code | 3173 | ## Contains mapping from functions to 😃📖
## Each function (treated as a Symbol) maps to a Tuple of Symbols and Tuple{Symbol,Float64} for
## emojis needing a specific version
const 😃📖 = 📖(
## Base
:ENV => (:(🧧),),
# Symbol(Char(0x1fae5))
:ArgumentError => (:(💬🚨),),
:AbstractChar => ((Symbol(Char(0x1fae5) * '🚗'), 1.8),),
:AbstractDict => ((Symbol(Char(0x1fae5) * '📖'), 1.8),),
:AbstractDisplay => ((Symbol(Char(0x1fae5) * '📺'), 1.8),),
:AbstractFloat => ((Symbol(Char(0x1fae5) * Char(0x1f6df)), 1.8),),
:AbstractString => ((Symbol(Char(0x1fae5) * '🧵'), 1.8),),
:Bool => (:(👍👎),),
:Char => (:(🚗),),
:Dict => (:(📖),),
:IO => ((Symbol(Char(0x1fa80) * '½'), 1.2), :(👁️😲),), # 🪀½
:Pair => (:(🍐),),
:Threads => ((Symbol(Char(0x1faa2)), 1.5),),
:String => (:(🧵),),
:any => (:(👩),), # (her name is Annie)
:broadcast => (:(📡),),
:cd => (:(💿), :(🇨🇩)),
:chop => (:(🥢), (Symbol(Char(0x1f333) * Char(0x1fa93)), 1.2),), # 🌳🪓
:delete! => (:(🔥),),
:display => (:(📺),),
:download => (:(📥),),
:dump => (:(💩),),
:error => (:(💣),),
:exit => (:(🚪),),
:false => (:👎,),
:findall => (:(🕵️),),
:findfirst => (:(🔎🥇),),
:findnext => ((:🔎⏭),),
:first => (:(🥇),),
:flush => (:(😳),),
:foldr => (:(🗂), :(📁),),
:get => (:(🤲),),
:getfield => (:(🤲🌽), (:🤲🌾),),
:getkey => (:(🤲🔑), :(🤲🗝),),
:getproperty => (:(🤲🏡),),
:join => (:(🚪🚶),),
:keys => (:(🔑), :(🗝),),
:kill => (:(⚰️),),
:map => (:(🗺),),
:nothing => (:(⬛),),
:peek => ((:(⛰️), 1.5),),
:print => (:(🖨️),),
:rand => (:(🎰),:(🎲),),
:run => (:(🏃),),
:searchsorted => (:(🔎🔤),),
:show => (:(☝️),),
:sleep => (:(😴), :(💤),),
:sort => (:(🔤),),
:string => (:(🎻),),
:throw => (:(c╯°□°ↄ╯), :(🤮), :(🚮),),
:time => (:(🕛), :(⏱️), :(⌛), :(⏲️),),
:true => (:(✅), :(👍), :(👌),),
:write => (:(🖊️), :(✍️), :(🖋️),),
:zip => (:(🤐),),
## Arrays and iterators
:AbstractMatrix => ((Symbol(Char(0x1fae5) * '🔢'), 1.8),),
:Matrix => (:(🔢),),
:axes => ((Symbol(Char(0x1fa93)^2), 1.2),), # 🪓🪓
:cat => (:(😻), :(😹), :(🐈),),
:vcat => (:(⬇️😻), :(⬇️😹), :(⬇️🐈),),
:hcat => (:(➡️😻), :(➡️😹), :(➡️🐈),),
:collect => (:(🧺),),
:eachindex => (:(☝️☝️),),
:fill => (:(🚰),),
:getindex => (:(🤲☝️),),
:push! => (:(🏋️),),
:pop! => (:(🍾), :(🏹🎈)),
:length => (:(📏),),
:view => (:(👀), :(👁️),),
## Math
:abs => ((:👔💪),(:🎽💪),),
:clamp => (:(🗜️),),
:cot => (:(🧥), :(🥼)),
:count => (:(🧮),),
:count_ones => (:(🧮1️⃣1️⃣),),
:count_zeros => (:(🧮0️⃣0️⃣),),
:div => (:(Symbol(Char(0x1f93f)), 1.2),), # 🤿
:float => (:(⛵️), (Symbol(Char(0x1f6df)), 1.8),), # 🛟
:im => (:(🇮🇲),), # Island of Man flag
:imag => (:(🔮),),
:inv => (:(↔),),
:isreal => ((:🛸❓),),
:log => ((Symbol(Char(0x1fab5)), 1.5),), # 🪵
:(mean ∘ skipmissing) => (:(😠),),
:mod => (:(🛵🔧),),
:pi => (:(🥧), :(🍰),),
:round => (:(🎠), :(🔵),),
:secd => (:(🥈),),
:sign => ((Symbol(Char(0x1faa7)), 1.5),(Symbol(Char(0x1f68f)), 1.5),), # 🪧, 🚏
:tan => (:(🧑🏻➡️🧑🏽), :(👩🏻➡️👩🏽),),
:tr => (:(🇹🇷),),
)
| WatchJuliaBurn | https://github.com/JuliaWTF/WatchJuliaBurn.jl.git |
|
[
"MIT"
] | 0.2.0 | 60ffa82c431da11742da05c719f282e57cfec60a | code | 1003 | """
emojify(path::🧵)
Go recursively over all the files contained in path and replace
all possible occurence of functions with random emoji aliases
"""
function emojify(path::🧵; overwrite=👌)
if isdir(path)
for subpath in readdir(path)
emojify(joinpath(path, subpath); overwrite=overwrite)
end
elseif isfile(path) && endswith(path, ".jl")
return emojify_file(path; overwrite=overwrite)
end
return ⬛
end
function emojify_file(filepath::🧵; overwrite=👍)
str = 🧵(read(filepath))
str = emojify_string(str)
if overwrite
🖊️(filepath, str)
return ⬛
else
return str
end
end
function emojify_string(str::🧵)
for func in 🔑(😃📖)
str = replace(str, Regex("\\b" * 🎻(func) * "\\b") => 🎰🧵(🥈🎻.(😃📖[func])))
end
return str
end
🥈🎻(😃::Union{Symbol,Expr}) = 🎻(😃)
🥈🎻(😃::Tuple) = 🎻(🥇(😃))
## Allow to 🤲 a random 🎻 every ⏲️ it's printed
struct 🎰🧵{T🧵}
🎻🎻🎻::T🧵
end
🖨️(io::👁️😲, rs::🎰🧵) = 🖨️(io, 🎲(rs.🎻🎻🎻))
| WatchJuliaBurn | https://github.com/JuliaWTF/WatchJuliaBurn.jl.git |
|
[
"MIT"
] | 0.2.0 | 60ffa82c431da11742da05c719f282e57cfec60a | code | 1808 | # I guarantee the brittleness of this macro, it tends to break my terminal and it is an abomination. Have fun!
"""
A more convenient syntax for try/catch clauses.
You know that you want it!
Instead of the boring old:
```julia
try
foo()
catch e
bar(e)
finally
baz()
end
```
you can now ✍️ the far more legible:
```julia
💣 = foo
😥 = bar
🍌 = baz
@🐒 begin
🙈
💣()
🙊(💥)
😥(💥)
🙉
🍌()
end
```
"""
macro 🐒(monkeyexpression::Expr)
monkeyexpression.head == :block || 💣("You have to wrap this in a begin...end block, sorry!")
newexpr = Expr(:block)
tryblock = Expr(:block)
catchme = 👎
catchblock = 👎
finallyblock = ⬛
state = :start # where are we in the expression
for sub in monkeyexpression.args
if state == :start
if sub isa Symbol && sub == :🙈
state = :try
elseif sub isa LineNumberNode
push!(newexpr.args, sub)
else
💣("Missing 🙈")
end
elseif state == :try
if sub isa Symbol && sub == :🙊
state = :catch
catchblock = Expr(:block)
elseif sub isa Expr && sub.args[1] == :🙊
state = :catch
catchblock = Expr(:block)
if 📏(sub.args) == 2
catchme = sub.args[2]
else
💣("Can only catch a single 💣 at once, duh!")
end
elseif sub isa Symbol && sub == :🙉
state = :finally
finallyblock = Expr(:block)
else
push!(tryblock.args, sub)
end
elseif state == :catch
if sub isa Symbol && sub == :🙉
state = :finally
finallyblock = Expr(:block)
else
push!(catchblock.args, sub)
end
elseif state == :finally
push!(finallyblock.args, sub)
end
end
if state == :try
💣("Syntax: 🙈 without 🙊 or 🙉")
end
tryexpr = Expr(:try, tryblock, catchme, catchblock)
if !(finallyblock === ⬛)
push!(tryexpr.args, finallyblock)
end
push!(newexpr.args, tryexpr)
return esc(newexpr)
end
| WatchJuliaBurn | https://github.com/JuliaWTF/WatchJuliaBurn.jl.git |
|
[
"MIT"
] | 0.2.0 | 60ffa82c431da11742da05c719f282e57cfec60a | code | 2014 | using WatchJuliaBurn
using WatchJuliaBurn: arbitrary_pointer
using Test
using LinearAlgebra
@testset "WatchJuliaBurn.jl" begin
## Base
@test_throws ArgumentError c╯°□°ↄ╯(ArgumentError("Great Success"))
@test 🗺(sin, 1:5) == sin.(1:5)
@test 📖(:a => 2.0) == Dict(:a => 2.0)
@test ✅
@test 👍
@test 👌
@test !👎
@test 🥩"Amazing" == raw"Amazing"
@test 🔥(📖(:a => 2.0, :b => 3.0), :a) == 📖(:b => 3.0)
@test_nowarn 🖨️("Yes!")
@test 📡(sin, 1:5) == sin.(1:5)
@test ⬛ === nothing
@test 🕵️(x->x==1, [1, 2, 1]) == [1, 3]
@test_nowarn ☝️(👍)
if VERSION >= v"1.5"
@test ⛰️(IOBuffer("Brilliant"), Char) == 'B'
end
## Arrays
@test 😻([1], [2]; dims=1) == [1, 2]
@test ⬇️😻([1], [2]) == [1, 2]
@test ➡️😻([1], [2]) == [1 2]
@test 😹([1], [2]; dims=1) == [1, 2]
@test ⬇️😹([1], [2]) == [1, 2]
@test ➡️😹([1], [2]) == [1 2]
@test 🐈([1], [2]; dims=1) == [1, 2]
@test ⬇️🐈([1], [2]) == [1, 2]
@test ➡️🐈([1], [2]) == [1 2]
@test 🔢([1 0; 0 1]) == [1 0; 0 1]
@test 🧺(1:3) == [1, 2, 3]
if VERSION >= v"1.2"
@eval @test_nowarn $(Symbol(Char(0x0001fa93) * Char(0x0001fa93)))(rand(3, 3)) # 🪓🪓
end
# @test_nowarn 🪟(rand(3, 3), 1:2, 1:2)
## Math
@test 🥧 ≈ 3.1415 atol=1e-4
@test 🍰 ≈ 3.1415 atol=1e-4
@test 🧑🏻➡️🧑🏽(2.0) == tan(2.0)
if VERSION >= v"1.5"
@eval @test $(Symbol(Char(0x0001fab5)))(1.0) == log(1.0) # 🪵
end
@test 🗜️(5.0, 1.0, 2.0) == 2.0
@test 👔💪(-2) == 2
@test 🎽💪(-2) == 2
@test 🛸❓(1im) == 👎
@test 🔮(1 + 2im) == 2
strip_version(x::Tuple) = first(x)
strip_version(x::Union{Symbol, Expr}) = x
# Check that symbols are not used twice.
@test Base.allunique(mapreduce(vcat, values(WatchJuliaBurn.😃📖)) do 😃😃😃
strip_version.(😃😃😃)
end)
## Monkey try/catch/finally
include("🐒tests.jl")
# Arbitrary pointers don't segfault on read
@test sum(unsafe_load(arbitrary_pointer()) for _ in 1:10_000_000) != 1729
end
| WatchJuliaBurn | https://github.com/JuliaWTF/WatchJuliaBurn.jl.git |
|
[
"MIT"
] | 0.2.0 | 60ffa82c431da11742da05c719f282e57cfec60a | code | 1790 | # plain try throws
@test_throws LoadError macroexpand(@__MODULE__, :(@🐒 begin
🙈
println("Should not have come this far.")
42
end))
# try...catch
@test 42 == @🐒 begin
🙈
2 + 40
🙊(e)
rethrow()
end
@test 12 == @🐒 begin
🙈
2 + "40"
error("This was not supposed to happen.")
🙊(e)
e isa MethodError || rethrow()
12
end
@test_throws DomainError @🐒 begin
🙈
c╯°□°ↄ╯(DomainError("On purpose!"))
🙊(e)
rethrow()
end
# try...finally
@test @test_logs (:warn, "This worked!") @🐒(begin
🙈
3*3
🙉
@warn "This worked!"
end) == 9 # note that the parenthesis around (begin...end) are necessary here because == is infix
@test_logs (:info, "I ran!") @test_throws ErrorException @🐒 begin
🙈
error("On purpose!")
🙉
@info "I ran!"
end
# try...catch...finally
@test_logs (:info, "I'm surprised that this works!") @test_throws ArgumentError @🐒 begin
🙈
c╯°□°ↄ╯(ArgumentError("Skyrim belongs to the Nords!"))
🙊(e)
rethrow()
🙉
@info "I'm surprised that this works!"
end
@test_logs (:warn, "Finally!") @test "Done" == @🐒 begin
🙈
"D"*"o"*"n"*"e"
🙊(e)
rethrow()
🙉
@warn "Finally!"
end
# nested monkeys
@test_logs (:info, "Try Finally") (:warn, "Catch Finally") @test_throws ErrorException @🐒 begin
🙈
@🐒 begin
🙈
throw(ArgumentError("Inner throw"))
🙊(e)
e isa ArgumentError && throw(DomainError("Got caught once."))
🙉
@info "Try Finally"
end
🙊(e)
@🐒 begin
🙈
e isa DomainError && throw(InitError(:here, e))
🙊(e)
e isa InitError && error("I'm still alive")
🙉
@warn "Catch Finally"
end
end
# I'm too lazy to test all the other combinations
# leaving out the 🙈 throws
@test_throws LoadError macroexpand(@__MODULE__, :(@🐒 begin
🙊(e)
"Lol."
🙉
"No!"
end))
| WatchJuliaBurn | https://github.com/JuliaWTF/WatchJuliaBurn.jl.git |
|
[
"MIT"
] | 0.2.0 | 60ffa82c431da11742da05c719f282e57cfec60a | code | 2951 | using Latexify
using WatchJuliaBurn
ord_keys = 🔤(🧺(WatchJuliaBurn.😃📖), by=x->🎻(🥇(x)))
function 🥈🎻(😃😃😃)
🚪🚶([😃 isa Tuple ? 🎻(🥇(😃)) : 🎻(😃) for 😃 in 😃😃😃], ", ")
end
function 🥈version(😃😃😃)
if 👩(x->x isa Tuple, 😃😃😃)
return 🚪🚶([😃 isa Tuple ? 🎻(😃[2]) : "1" for 😃 in 😃😃😃], ", ")
else
return ""
end
end
ar = reduce(⬇️😻, ['`' * 🎻(🔑) * '`' 🥈🎻(😃😃😃) 🥈version(😃😃😃)]
for (🔑, 😃😃😃) in ord_keys)
ar = ⬇️😻(["Function" "Emojis" "Julia Version"], ar)
md_ar = md(ar; latex=👎)
code_snippet = "vcat(round(log(pi)), broadcast(tan ∘ inv, rand(3)))"
dots = "https://raw.githubusercontent.com/JuliaLang/julia/master/doc/src/assets/julia.ico"
intro = """[](https://github.com/theogf/WatchJuliaBurn.jl/actions/workflows/CI.yml)
# ⌚<img src="$(dots)" height="26"/>🔥.jl
WatchJuliaBurn aims at destroying the look of your code by adding emojis like :smile: and kaomojis like c╯°□°ↄ╯ instead of your favorite Julia functions.
For a serious use of unicode characters see also [Ueauty.jl](https://gitlab.com/ExpandingMan/Ueauty.jl)
## Add your own awfulness!
Don't hesitate to add your worst creations via PR. All you need to do is to add the function and emoji to the `😃📖` internal `📖` in `src/📖.jl`. Don't touch the `README`!
It will be automatically generated after your PR is merged. Also tests are optional since tests are for losers!
## Emojify your code
You can use the `emojify` function to recursively emojify all the files in a given path. `emojify` will replace all functions for which an alias is known
by the corresponding emoji (a random one is picked every ⏲️ if multiple options are possible).
For example:
```julia
$(code_snippet)
```
will return
```julia
$(WatchJuliaBurn.emojify_string(code_snippet))
```
## List of emojis
"""
outro = """
## Control Flow
You can now replace boring old try/catch/finally clauses with fancy monkey flow!
```julia
@🐒 begin
🙈
💣()
🙊(💥)
😥(💥)
🙉
🍌()
end
```
Parsing may behave weird when there are infix operators around the block. Try enclosing everything with parenthesis like `@🐒(begin ... end)` if that happens.
## REPL
You can use the [EmojiSymbols.jl](https://github.com/wookay/EmojiSymbols.jl) package to super-turbo-charge your REPL experience!
You can press space to launch space invaders (`julia>[space]`). This feature is helpfully
bundled with ⌚<img src="https://raw.githubusercontent.com/JuliaLang/julia/master/doc/src/assets/julia.ico" height="26"/>🔥
version 0.2.0 and above and all packages that depend on it.
"""
# Overwrite the README
open(joinpath(@__DIR__, "..", "README.md"), "w") do io
🖊️(io, intro * 🎻(md_ar) * outro)
end
# Emojify all the src files of WatchJuliaBurn.
foreach(walkdir(joinpath(pkgdir(WatchJuliaBurn), "src"))) do (root, dirs, files)
foreach(emojify, joinpath.(root, filter(!∈(["📖.jl", "WatchJuliaBurn.jl"]), files)))
end
| WatchJuliaBurn | https://github.com/JuliaWTF/WatchJuliaBurn.jl.git |
|
[
"MIT"
] | 0.2.0 | 60ffa82c431da11742da05c719f282e57cfec60a | docs | 8648 | [](https://github.com/theogf/WatchJuliaBurn.jl/actions/workflows/CI.yml)
# ⌚<img src="https://raw.githubusercontent.com/JuliaLang/julia/master/doc/src/assets/julia.ico" height="26"/>🔥.jl
WatchJuliaBurn aims at destroying the look of your code by adding emojis like :smile: and kaomojis like c╯°□°ↄ╯ instead of your favorite Julia functions.
For a serious use of unicode characters see also [Ueauty.jl](https://gitlab.com/ExpandingMan/Ueauty.jl)
## Add your own awfulness!
Don't hesitate to add your worst creations via PR. All you need to do is to add the function and emoji to the `😃📖` internal `📖` in `src/📖.jl`. Don't touch the `README`!
It will be automatically generated after your PR is merged. Also tests are optional since tests are for losers!
## Emojify your code
You can use the `emojify` function to recursively emojify all the files in a given path. `emojify` will replace all functions for which an alias is known
by the corresponding emoji (a random one is picked every ⏲️ if multiple options are possible).
For example:
```julia
vcat(round(log(pi)), broadcast(tan ∘ inv, rand(3)))
```
will return
```julia
⬇️😹(🎠(🪵(🍰)), 📡(🧑🏻➡️🧑🏽 ∘ ↔, 🎰(3)))
```
## List of emojis
| Function | Emojis | Julia Version |
| --------------------:| -------------------------------:| -------------:|
| `AbstractChar` | 🫥🚗 | 1.8 |
| `AbstractDict` | 🫥📖 | 1.8 |
| `AbstractDisplay` | 🫥📺 | 1.8 |
| `AbstractFloat` | 🫥🛟 | 1.8 |
| `AbstractMatrix` | 🫥🔢 | 1.8 |
| `AbstractString` | 🫥🧵 | 1.8 |
| `ArgumentError` | 💬🚨 | |
| `Bool` | 👍👎 | |
| `Char` | 🚗 | |
| `Dict` | 📖 | |
| `ENV` | 🧧 | |
| `IO` | 🪀½, 👁️😲 | 1.2, 1 |
| `Matrix` | 🔢 | |
| `Pair` | 🍐 | |
| `String` | 🧵 | |
| `Threads` | 🪢 | 1.5 |
| `abs` | 👔💪, 🎽💪 | |
| `any` | 👩 | |
| `axes` | 🪓🪓 | 1.2 |
| `broadcast` | 📡 | |
| `cat` | 😻, 😹, 🐈 | |
| `cd` | 💿, 🇨🇩 | |
| `chop` | 🥢, 🌳🪓 | 1, 1.2 |
| `clamp` | 🗜️ | |
| `collect` | 🧺 | |
| `cot` | 🧥, 🥼 | |
| `count` | 🧮 | |
| `count_ones` | 🧮1️⃣1️⃣ | |
| `count_zeros` | 🧮0️⃣0️⃣ | |
| `delete!` | 🔥 | |
| `display` | 📺 | |
| `div` | (Symbol(Char(0x0001f93f)), 1.2) | |
| `download` | 📥 | |
| `dump` | 💩 | |
| `eachindex` | ☝️☝️ | |
| `error` | 💣 | |
| `exit` | 🚪 | |
| `false` | 👎 | |
| `fill` | 🚰 | |
| `findall` | 🕵️ | |
| `findfirst` | 🔎🥇 | |
| `findnext` | 🔎⏭ | |
| `first` | 🥇 | |
| `float` | ⛵️, 🛟 | 1, 1.8 |
| `flush` | 😳 | |
| `foldr` | 🗂, 📁 | |
| `get` | 🤲 | |
| `getfield` | 🤲🌽, 🤲🌾 | |
| `getindex` | 🤲☝️ | |
| `getkey` | 🤲🔑, 🤲🗝 | |
| `getproperty` | 🤲🏡 | |
| `hcat` | ➡️😻, ➡️😹, ➡️🐈 | |
| `im` | 🇮🇲 | |
| `imag` | 🔮 | |
| `inv` | ↔ | |
| `isreal` | 🛸❓ | |
| `join` | 🚪🚶 | |
| `keys` | 🔑, 🗝 | |
| `kill` | ⚰️ | |
| `length` | 📏 | |
| `log` | 🪵 | 1.5 |
| `map` | 🗺 | |
| `mean ∘ skipmissing` | 😠 | |
| `mod` | 🛵🔧 | |
| `nothing` | ⬛ | |
| `peek` | ⛰️ | 1.5 |
| `pi` | 🥧, 🍰 | |
| `pop!` | 🍾, 🏹🎈 | |
| `print` | 🖨️ | |
| `push!` | 🏋️ | |
| `rand` | 🎰, 🎲 | |
| `raw` | 🥩 | |
| `round` | 🎠, 🔵 | |
| `run` | 🏃 | |
| `searchsorted` | 🔎🔤 | |
| `secd` | 🥈 | |
| `show` | ☝️ | |
| `sign` | 🪧, 🚏 | 1.5, 1.5 |
| `sleep` | 😴, 💤 | |
| `sort` | 🔤 | |
| `string` | 🎻 | |
| `tan` | 🧑🏻➡️🧑🏽, 👩🏻➡️👩🏽 | |
| `throw` | c╯°□°ↄ╯, 🤮, 🚮 | |
| `time` | 🕛, ⏱️, ⌛, ⏲️ | |
| `tr` | 🇹🇷 | |
| `true` | ✅, 👍, 👌 | |
| `vcat` | ⬇️😻, ⬇️😹, ⬇️🐈 | |
| `view` | 👀, 👁️ | |
| `write` | 🖊️, ✍️, 🖋️ | |
| `zip` | 🤐 | |
## Control Flow
You can now replace boring old try/catch/finally clauses with fancy monkey flow!
```julia
@🐒 begin
🙈
💣()
🙊(💥)
😥(💥)
🙉
🍌()
end
```
Parsing may behave weird when there are infix operators around the block. Try enclosing everything with parenthesis like `@🐒(begin ... end)` if that happens.
## REPL
You can use the [EmojiSymbols.jl](https://github.com/wookay/EmojiSymbols.jl) package to super-turbo-charge your REPL experience!
| WatchJuliaBurn | https://github.com/JuliaWTF/WatchJuliaBurn.jl.git |
|
[
"MIT"
] | 0.3.3 | eb5ef90123811e7732b6bc146907dc209fd50c55 | code | 2330 | module BehaviorTree
using AbstractTrees
abstract type BT end
struct Sequence <: BT
tasks
name::String
end
struct Selector <: BT
tasks
name::String
end
Sequence(tasks) = Sequence(tasks, "")
Selector(tasks) = Selector(tasks, "")
function run_task(task::Function)
@debug("RUNNING task $(task)")
result = task()
return result, result
end
function run_task(task::Function, state)
@debug("RUNNING task $(task)")
result = task(state)
return result, result
end
function run_task(task::BT)
tick(task)
end
function run_task(task::BT, state)
tick(task, state)
end
function format(task::BT)
task.name
end
function format(task::Function)
string(task)
end
function sequence(tree::Sequence, task_runner)
@debug("RUNNING sequence $(tree.name)")
results = []
for task in tree.tasks
result, status = task_runner(task)
push!(results, status)
if result == :running
@info("sequence $(tree.name) running at $(format(task))")
return :running, results
end
if result == :failure
@info("sequence $(tree.name) failed at $(format(task))")
return :failure, results
end
end
return :success, results
end
function tick(tree::Sequence)
task_runner(x) = run_task(x)
sequence(tree, task_runner)
end
function tick(tree::Sequence, state)
task_runner(x) = run_task(x, state)
sequence(tree, task_runner)
end
function selector(tree::Selector, task_runner)
@debug("RUNNING selector $(tree.name)")
results = []
for task in tree.tasks
result, status = task_runner(task)
push!(results, status)
if result == :running
@info("selector $(tree.name) running at $(format(task))")
return :running, results
end
if result == :success
@info("selector $(tree.name) succeeded at $(format(task))")
return :success, results
end
end
return :failure, results
end
function tick(tree::Selector)
task_runner(x) = run_task(x)
selector(tree, task_runner)
end
function tick(tree::Selector, state)
task_runner(x) = run_task(x, state)
selector(tree, task_runner)
end
include("abstractrees.jl")
include("visualization.jl")
export tick, Sequence, Selector
end # module
| BehaviorTree | https://github.com/fabid/BehaviorTree.jl.git |
|
[
"MIT"
] | 0.3.3 | eb5ef90123811e7732b6bc146907dc209fd50c55 | code | 436 | ## AbstractTrees interface
function AbstractTrees.children(tree::BT)
tree.tasks
end
function AbstractTrees.printnode(io::IO, node::Sequence)
if node.name != ""
repr = "$(node.name) ->"
else
repr = "->"
end
print(io, repr)
end
function AbstractTrees.printnode(io::IO, node::Selector)
if node.name != ""
repr = "$(node.name) ?"
else
repr = "?"
end
print(io, repr)
end | BehaviorTree | https://github.com/fabid/BehaviorTree.jl.git |
|
[
"MIT"
] | 0.3.3 | eb5ef90123811e7732b6bc146907dc209fd50c55 | code | 3220 |
function validateGraphVizInstalled()
# Check if GraphViz is installed
try
(read(`dot -'?'`, String)[1:10] == "Usage: dot") || error()
catch
error("GraphViz is not installed correctly. Make sure GraphViz is installed. If you are on Windows, manually add the path to GraphViz to your path variable. You should be able to run 'dot' from the command line.")
end
end
function dot2png(dot_graph::AbstractString)
# Generate PNG image from DOT graph
validateGraphVizInstalled()
proc = open(`dot -Tpng`, "r+")
write(proc.in, dot_graph)
close(proc.in)
return read(proc.out, String)
end
function dot2jpg(dot_graph::AbstractString)
# Generate PNG image from DOT graph
validateGraphVizInstalled()
proc = open(`dot -Tjpg`, "r+")
write(proc.in, dot_graph)
close(proc.in)
return read(proc.out, String)
end
export dot2png,dot2jpg
colors = Dict(
:failure =>"#ff9a20",
:running => "#00bbd3",
:success => "#49b156",
)
function toDotContent(tree, parent_id)
name = BehaviorTree.format(tree)
node_id = string(parent_id, replace(name, "!"=>""))
if length(children(tree)) == 0
shape = if startswith(name, "is") "ellipse" else "box" end
return string([
"""$node_id:n\n""",
"""$node_id [shape=$shape, label="$name"]\n""",
"""$parent_id -> $node_id""",
]...)
end
shape = "box"
label = if(typeof(tree) == Selector) "?" else "->" end
out = string(
"""$node_id:n\n""",
"""$node_id [shape=$shape, label="$label"]\n""",
["""$(toDotContent(c, node_id))\n""" for c in children(tree)]...)
if parent_id != ""
out = string(
out,
"""$parent_id -> $node_id""",
)
end
out
end
function toStatusDotContent(tree, parent_id)
name = BehaviorTree.format(tree.tree.x)
node_id = string(parent_id, replace(name, "!"=>""))
if length(children(tree)) == 0
shape = if startswith(name, "is") "ellipse" else "box" end
color = colors[tree.shadow.x]
return string([
"""$node_id:n\n""",
"""$node_id [shape=$shape, label="$name", style=filled,color="$color"]\n""",
"""$parent_id -> $node_id""",
]...)
end
shape = "box"
label = if(typeof(tree.tree.x) == Selector) "?" else "->" end
#TODO: more efficient implementation?
lastchild = tree.shadow.x
while length(children(lastchild)) > 0
lastchild = last(children(lastchild))
end
color = colors[lastchild]
out = string(
"""$node_id:n\n""",
"""$node_id [shape=$shape, label="$label", style=filled, color="$color"]\n""",
["""$(toStatusDotContent(c, node_id))\n""" for c in children(tree)]...)
if parent_id != ""
out = string(
out,
"""$parent_id -> $node_id""",
)
end
out
end
function toDot(tree::BT, results)
st = ShadowTree(tree, results)
content = toStatusDotContent(st, "")
return """digraph tree {
$(content)
}"""
end
function toDot(tree::BT)
content = toDotContent(tree, "")
return """digraph tree {
$(content)
}"""
end
export toDot | BehaviorTree | https://github.com/fabid/BehaviorTree.jl.git |
|
[
"MIT"
] | 0.3.3 | eb5ef90123811e7732b6bc146907dc209fd50c55 | code | 2203 | using BehaviorTree
using AbstractTrees
using Test
function doSuccess()
:success
end
function isTrue()
:success
end
function doSuccess!()
:success
end
function doFailure()
:failure
end
function doRunning()
:running
end
function isPositive(x)
if x>0
return :success
end
:failure
end
@testset "BehaviorTree.jl" begin
tree = Sequence([doSuccess])
@test tick(tree)[1] == :success
@test tick(tree)[2] == [:success]
tree = Sequence([doFailure])
@test tick(tree)[1] == :failure
tree = Sequence([doRunning])
@test tick(tree)[1] == :running
tree = Sequence([doSuccess, doFailure, doSuccess])
@test tick(tree)[1] == :failure
@test length([ c for c in children(tree)]) == 3
tree = Selector([doFailure])
@test tick(tree)[1] == :failure
tree = Selector([doSuccess])
@test tick(tree)[1] == :success
tree = Selector([doRunning])
@test tick(tree)[1] == :running
tree = Selector([doFailure, doSuccess, doFailure])
@test tick(tree)[1] == :success
# nesting
tree = Selector([doFailure, Sequence([doSuccess]), doFailure])
@test tick(tree)[1] == :success
@info tick(tree)
@test tick(tree)[2] == [:failure,[:success]]
# args
tree = Sequence([isPositive])
@test tick(tree, 1)[1] == :success
@test tick(tree, -1)[1] == :failure
tree = Selector([isPositive])
@test tick(tree, 1)[1] == :success
@test tick(tree, -1)[1] == :failure
end
@testset "dot" begin
bt = Selector([
doFailure,
Sequence([isTrue, doFailure, doSuccess], "choice"),
doRunning,
doSuccess!
], "head")
dot_graph = toDot(bt)
#println(dot_graph)
r = tick(bt)
#png_graph =dot2png(dot_graph)
bt = Selector([
doFailure,
Sequence([isTrue, doFailure, doSuccess], "choice"),
Sequence([isTrue, doSuccess, doRunning], "choice2"),
doSuccess!
], "head")
dot_graph = toDot(bt)
r = tick(bt)
dot_graph_status = toDot(bt, r[2])
println(dot_graph_status)
png_graph = dot2png(dot_graph_status)
filename= "test.png"
open(filename, "w") do png_file
write(png_file, png_graph)
end
end
| BehaviorTree | https://github.com/fabid/BehaviorTree.jl.git |
|
[
"MIT"
] | 0.3.3 | eb5ef90123811e7732b6bc146907dc209fd50c55 | docs | 2767 | # BehaviorTree.jl
Behavior Trees (BTs) are a powerful way to describe the behavior of autonomous agents with applications in robotics and AI.
This implementation is based on [Behavior Trees in Robotics and AI: An Introduction](https://arxiv.org/abs/1709.00084).
It was developped by Team L3 to qualify in [NASA'Space Robotics Challenge Phase 2](https://spacecenter.org/22-teams-selected-for-final-stage-of-space-robotics-challenge-phase-2/) (More details on usage in this paper: [Human-Robot Teaming Strategy for Fast Teleoperation of a Lunar Resource Exploration Rover](https://www.researchgate.net/publication/344879839_Human-Robot_Teaming_Strategy_for_Fast_Teleoperation_of_a_Lunar_Resource_Exploration_Rover)).
# Installation
## Optional System Dependencies
Visualization tools depends on `graphviz`.
On Ubuntu/Debian:
```bash
sudo apt-get install graphviz
```
## Install Julia Package
With Julia ≥ 1.4 (may work on previous 1.x version but not tested) add package
```julia
julia> ]
(v1.4) pkg> add BehaviorTree
```
## Basic Usage
Two Primitives are available to build behavior trees: `Sequence` and `Selector`. They accept a list of tasks, that can be a Sequence, a Selector, or, in the case of a leaf, a function returning one of `:success`, `:failure` or `:running`.
```julia
doSuccess(bb=Dict()) = :success
isTrue(bb=Dict()) = :success
doFailure(bb=Dict())= :failure
doRunning(bb=Dict()) = :running
bt = Selector([
doFailure,
Sequence([isTrue, doFailure, doSuccess], "choice"),
doRunning,
doSuccess!
], "head")
```
Execution of the tree happen via the `tick` function, that accepts a shared `blackboard` object use to share state between tasks.
```julia
blackboard = Dict{Symbol, Any}()
status, results = tick(bt, blackboard)
```
Ticking usually happens in a loop at at a frequency determined by the needs of the application.
## Visualization
As BehaviorTrees use the AbstractTree interface, it is possible to use [D3Trees](https://github.com/sisl/D3Trees.jl) for visualization:
```julia
d3tree = D3Tree(bt)
inbrowser(tree, "firefox")
```
Utilities to generate graphviz output via the `.dot` format are also provided.
```julia
dot_graph=toDot(bt)
filename= "example.dot"
open(filename, "w") do dot_file
write(dot_file, dot_graph)
end
filename= "example.png"
png_graph = dot2png(dot_graph)
open(filename, "w") do png_file
write(png_file, png_graph)
end
```

passing the execution results in the toDot function generates a visualization of the current state:
```julia
dot_graph=toDot(bt, results)
filename= "status.png"
png_graph = dot2png(dot_graph)
open(filename, "w") do png_file
write(png_file, png_graph)
end
```

| BehaviorTree | https://github.com/fabid/BehaviorTree.jl.git |
|
[
"MIT"
] | 0.2.0 | 4214520f385b4d6220a9e63d5a7f355761104b17 | code | 599 | # make.jl
#using Pkg
#Pkg.activate("/Users/gianlucamastrantonio/Dropbox (Politecnico di Torino Staff)/lavori/gitrepo/BayesSizeAndShape/todelete/rat")
using Documenter, BayesSizeAndShape
makedocs(
modules = [BayesSizeAndShape],
format = Documenter.HTML(; prettyurls = get(ENV, "CI", nothing) == "true"),
authors = "gianluca.mastrantonio",
sitename = "BayesSizeAndShape.jl",
pages = Any["index.md"]
# strict = true,
# clean = true,
# checkdocs = :exports,
)
deploydocs(
repo = "github.com/GianlucaMastrantonio/BayesSizeAndShape.jl.git",
push_preview = true
)
| BayesSizeAndShape | https://github.com/GianlucaMastrantonio/BayesSizeAndShape.jl.git |
|
[
"MIT"
] | 0.2.0 | 4214520f385b4d6220a9e63d5a7f355761104b17 | code | 1453 | using Random
using Distributions
using LinearAlgebra
using StatsBase
using Kronecker
using DataFrames
using StatsModels
using CategoricalArrays
using Plots
using BayesSizeAndShape
dataset_rats = dataset("rats");
dataset_desciption("rats")
dataset_names()
landmark = dataset_rats.x;
landmark = landmark ./ 100.0
plot(landmark[:,1,1], landmark[:,2,1],legend = false, color = cgrad(:tab20, 21)[Int64(subject[1])])
for i = 2:size(landmark,3)
plot!(landmark[:,1,i], landmark[:,2,i], color = cgrad(:tab20, 21)[Int64(subject[i])])
end
title!("Landmarks")
sizeshape = sizeshape_helmertproduct_reflection(landmark);
plot(sizeshape[:,1,1], sizeshape[:,2,1],legend = false, color = cgrad(:tab20, 21)[Int64(subject[1])])
for i = 2:size(landmark,3)
plot!(sizeshape[:,1,i], sizeshape[:,2,i], color = cgrad(:tab20, 21)[Int64(subject[i])])
end
title!("Size And Shape")
subject = dataset_rats.no;
time = dataset_rats.time;
covariates = DataFrame(
time = time,
subject = categorical(string.(subject))
);
outmcmc = SizeAndShapeWithReflectionMCMC(
landmark,
@formula(landmarks ~ 1+time + subject),
covariates,
(iter=1000, burnin=200, thin=2),
Normal(0.0,100000.0),#
InverseWishart(k + 2, 5.0 * Matrix{Float64}(I, k, k))
);
betaout = posterior_samples_beta(outmcmc);
sigmaout = posterior_samples_sigma(outmcmc);
predictive_mean = sample_predictive_zbr(outmcmc);
predictive_obs = sample_predictive_zbr_plus_epsilon(outmcmc); | BayesSizeAndShape | https://github.com/GianlucaMastrantonio/BayesSizeAndShape.jl.git |
|
[
"MIT"
] | 0.2.0 | 4214520f385b4d6220a9e63d5a7f355761104b17 | code | 2174 | module BayesSizeAndShape
##### Packages
using Distributions, Random
using LinearAlgebra, PDMats
using ProgressMeter
using Kronecker
using DataFrames
using CategoricalArrays
using StatsModels
using ToggleableAsserts
using CodecBzip2
using Reexport, RData
##### Include
include(joinpath("types.jl"))
include(joinpath("general_functions.jl"))
include(joinpath("mcmc.jl"))
include(joinpath("sampler_mean.jl"))
include(joinpath("sampler_covariance.jl"))
include(joinpath("sampler_latentobservations.jl"))
include(joinpath("modeloutput.jl"))
#include(joinpath("sampler.jl"))
include(joinpath("wrappers.jl"))
#include(joinpath("deprecated.jl"))
include(joinpath("dataset.jl"))
include(joinpath("prediction.jl"))
include(joinpath("external_function.jl"))
#include(joinpath("old files/sim.jl"))
##### Dataset
#rats = load(joinpath(@__DIR__,"data/rats.jld"))["data"]
#rats_cov = load(joinpath(@__DIR__,"data/rats.jld"))["cov"]
#rats_cov = load(joinpath(@__DIR__,"data/rats_cov.jld"))["data"]
##### Functions
export
### Dataset
dataset,
dataset_desciption,
dataset_names,
### Models
generalSizeAndShapeMCMC,
SizeAndShapeWithReflectionMCMC,
sample_predictive_zbr,
sample_predictive_zbr_plus_epsilon,
### TIPES
KeepReflection,
RemoveLocationHelmert,
ValueP2,
ValueP3,
DoNotRemoveSize,
GramSchmidtMean,
MCMCNormalDataKeepSize,
MCMCNormalDataKeepSize,
LinearMean,
MCMCLinearMean,
GeneralCoVarianceIndependentDimension,
generalMCMCObjectOUT,
MCMCGeneralCoVarianceIndependentDimension,
SizeAndShapeModelOutput,
### EXTERNAL
sizeshape_helmertproduct_reflection,
posterior_samples_beta,
posterior_samples_sigma
#NoPriorBeta,
#NoPriorSigma,
#compute_ss_from_pre,
#compute_ss!,
#compute_ss,
#KeepReflection,
#DoKeepReflection,
#GeneralSigma,
#standardize_reg,
#SizeAndShapeMCMC,
#Valuep2,
#compute_designmatrix,
#compute_dessignmatrix,
#compute_helmertized_configuration,
#SizeAndShapeModelOutput,
#predictmean,
#dataset_names,
#create_designmatrix,
#generalSizeAndShapeMCMC
end
| BayesSizeAndShape | https://github.com/GianlucaMastrantonio/BayesSizeAndShape.jl.git |
|
[
"MIT"
] | 0.2.0 | 4214520f385b4d6220a9e63d5a7f355761104b17 | code | 1841 | function dataset(dataset_name::AbstractString)
basename = joinpath(@__DIR__, "..", "data")
rdaname = joinpath(basename, string(dataset_name, ".rda"))
#println(rdaname)
if isfile(rdaname)
dataset = load(rdaname)[dataset_name].data
return (x = dataset[1], no = dataset[2], time = dataset[3])
end
#csvname = joinpath(basename, string(dataset_name, ".csv.gz"))
#if isfile(csvname)
# return open(csvname,"r") do io
# uncompressed = IOBuffer(read(GzipDecompressorStream(io)))
# DataFrame(CSV.File(uncompressed, delim=',', quotechar='\"', missingstring="NA",
# types=get(Dataset_typedetect_rows, (package_name, dataset_name), nothing)) )
# end
#end
#error("Unable to locate dataset file $rdaname or $csvname")
end
"""
dataset_desciption(dataset_name::AbstractString)
It gives a description of the elements of the dataset (dataset_name) - the datasets are taken form the R package shape
"""
function dataset_desciption(dataset_name::AbstractString)
if dataset_name == "rats"
print("
Description:
Rat skulls data, from X rays. 8 landmarks in 2 dimensions, 18 individuals observed at 7, 14, 21, 30, 40, 60, 90, 150 days
Format:
x: An array of landmark configurations 144 x 2 x 2
no: Individual rat number (note rats 3, 13, 20 missing due to incomplete data)
time: observed time in days
")
else
println("dataset_name not available")
end
end
"""
dataset_names()
It return the names of the available datasets - the datasets are taken form the R package shape
"""
function dataset_names()
return ["rats"]
end | BayesSizeAndShape | https://github.com/GianlucaMastrantonio/BayesSizeAndShape.jl.git |
|
[
"MIT"
] | 0.2.0 | 4214520f385b4d6220a9e63d5a7f355761104b17 | code | 2741 |
"""
sizeshape_helmertproduct_reflection(dataset::Array{Float64,3})
The function computes the Size-And-Shape version of the data `dataset`, with reflection information. The output is computed using the helmert matrix and the SVD trasformation
"""
function sizeshape_helmertproduct_reflection(dataset::Array{Float64,3})
n = size(dataset,3)
k = size(dataset,1)-1
p = size(dataset,2)
if p == 2
helmmat = RemoveLocationHelmert(k, ValueP2());
posthelmert = remove_location(dataset, helmmat);
sizeandshape, _ = compute_sizeshape_fromnolocdata(posthelmert, BayesSizeAndShape.KeepReflection(), BayesSizeAndShape.ValueP2());
elseif p ==3
helmmat = RemoveLocationHelmert(k, ValueP3());
posthelmert = remove_location(dataset, helmmat);
sizeandshape, _ = compute_sizeshape_fromnolocdata(posthelmert, BayesSizeAndShape.KeepReflection(), BayesSizeAndShape.ValueP3());
else
error("p must be <= 3")
end
return sizeandshape
end
"""
posterior_samples_beta(modeloutput::SizeAndShapeModelOutput{KeepReflection,RL,P,DoNotRemoveSize,GramSchmidtMean,<:MCMCNormalDataKeepSize,<:LinearMean,<:MCMCLinearMean,CT,CM,PS}) where {
RL<:RemoveLocation,
CT<:TypeModelCoVariance,
CM<:MCMCTypeModelCoVariance,
PS<:MCMCObjectOUT,
P<:ValueP
}
The function extract the posterior sample of the regressive coefficients from an object of type `SizeAndShapeModelOutput`
"""
function posterior_samples_beta(modeloutput::SizeAndShapeModelOutput{KeepReflection,RL,P,DoNotRemoveSize,GramSchmidtMean,<:MCMCNormalDataKeepSize,<:LinearMean,<:MCMCLinearMean,CT,CM,PS}) where {
RL<:RemoveLocation,
CT<:TypeModelCoVariance,
CM<:MCMCTypeModelCoVariance,
PS<:MCMCObjectOUT,
P<:ValueP
}
return modeloutput.posteriorsamples.beta
end
"""
posterior_samples_sigma(modeloutput::SizeAndShapeModelOutput{KeepReflection,RL,P,DoNotRemoveSize,GramSchmidtMean,<:MCMCNormalDataKeepSize,<:LinearMean,<:MCMCLinearMean,CT,CM,PS}) where {
RL<:RemoveLocation,
CT<:TypeModelCoVariance,
CM<:MCMCTypeModelCoVariance,
PS<:MCMCObjectOUT,
P<:ValueP
}
The function extract the posterior sample of the covariance matrix from an object of type `SizeAndShapeModelOutput`
"""
function posterior_samples_sigma(modeloutput::SizeAndShapeModelOutput{KeepReflection,RL,P,DoNotRemoveSize,GramSchmidtMean,<:MCMCNormalDataKeepSize,<:LinearMean,<:MCMCLinearMean,CT,CM,PS}) where {
RL<:RemoveLocation,
CT<:TypeModelCoVariance,
CM<:MCMCTypeModelCoVariance,
PS<:MCMCObjectOUT,
P<:ValueP
}
return modeloutput.posteriorsamples.sigma
end | BayesSizeAndShape | https://github.com/GianlucaMastrantonio/BayesSizeAndShape.jl.git |
|
[
"MIT"
] | 0.2.0 | 4214520f385b4d6220a9e63d5a7f355761104b17 | code | 6677 | function standardize_reg(reg::Matrix{Float64}, valp::ValueP2, indentifiability::GramSchmidtMean)
reg[:,:] = reg * standardize_reg_computegamma(reg, valp,indentifiability)
end
function standardize_reg_computegamma(reg::Matrix{Float64}, valp::ValueP2, indentifiability::GramSchmidtMean)
a1 = reg[1, :]
a2 = reg[2, :]
g1 = a1 / norm(a1, 2)
g2 = (a2 - (transpose(a2) * g1) * g1) / norm(a2 - (transpose(a2) * g1) * g1, 2)
gammamat = reshape([g1; g2], (2, 2))
if det(gammamat) < 0
gammamat[:, 2] = - gammamat[:, 2]
end
return gammamat
end
function standardize_reg(reg::Matrix{Float64}, valp::ValueP3, indentifiability::GramSchmidtMean)
reg[:,:] = reg * standardize_reg_computegamma(reg, valp,indentifiability)
end
function standardize_reg_computegamma(reg::Matrix{Float64}, valp::ValueP3, indentifiability::GramSchmidtMean)
a1 = reg[1, :]
a2 = reg[2, :]
a3 = reg[3, :]
g1 = a1 / norm(a1, 2)
g2 = (a2 - (transpose(a2) * g1) * g1) / norm(a2 - (transpose(a2) * g1) * g1, 2)
g3 = (a3 - (transpose(a3) * g1) * g1 - (transpose(a3) * g2) * g2) / norm(a3 - (transpose(a3) * g1) * g1 - (transpose(a3) * g2) * g2, 2)
gammamat = reshape([g1; g2; g3], (3, 3))
if det(gammamat) < 0
gammamat[:, 2] = - gammamat[:, 2]
end
return gammamat
end
#function standardize_reg(reg::Matrix{Float64}, valp::ValueP2)
# reg[:,:] = reg * standardize_reg_computegamma(reg, valp)
#end
#function standardize_reg_computegamma(reg::Matrix{Float64}, valp::ValueP2)
# a1 = reg[1, :]
# a2 = reg[2, :]
# g1 = a1 / norm(a1, 2)
# g2 = (a2 - (transpose(a2) * g1) * g1) / norm(a2 - (transpose(a2) * g1) * g1, 2)
# gammamat = reshape([g1; g2], (2, 2))
# if det(gammamat) < 0
# gammamat[:, 2] = - gammamat[:, 2]
# end
# return gammamat
#end
function compute_designmatrix(zmat::Matrix{Float64}, k::Int64)::Array{Float64,3}
n, d = size(zmat)
res::Array{Float64,3} = zeros(Float64, k, k * d, n)
for i = 1:n
res[:, :, i] = kronecker(transpose(zmat[i, :]), Matrix{Float64}(I, k, k))
end
return res
end
function compute_designmatrix(zmat::DataFrame, k::Int64)::Array{Float64,3}
n, d = size(zmat)
res::Array{Float64,3} = zeros(Float64, k, k * d, n)
for i = 1:n
res[:, :, i] = kronecker(transpose([values(zmat[i, :])...]), Matrix{Float64}(I, k, k))
end
return res
end
function remove_location(landmarks::Array{Float64,3}, removelocation::RemoveLocationHelmert)::Array{Float64,3}
H = removelocation.matrix
ret = deepcopy(landmarks)
ret = ret[2:end,:,:]
for i = 1:size(landmarks,3)
ret[:,:,i] = H*landmarks[:,:,i]
end
return ret
end
#function compute_sizeshape_fromnolocdata(landmarks::Array{Float64,3}, keepreflection::KeepReflection, valp::ValueP2)::Tuple{Array{Float64, 3}, Array{Float64, 3}}
# dataret = deepcopy(landmarks)
# rotret = Array{Float64}(undef,2,2,size(landmarks,3))
# for i = axes(landmarks, 3)
# app = svd(landmarks[:, :, i])
# U = app.U
# V = transpose(app.Vt)
# if sign(V[1, 2]) != sign(V[2, 1])
# dataret[:, :, i] = U * Diagonal(app.S)
# rotret[:, :, i] = V
# else
# U[:,2] = -1.0*U[:,2]
# V[:, 2] = -1.0 * V[:, 2]
# dataret[:, :, i] = U * Diagonal(app.S)
# rotret[:, :, i] = V
# end
# end
# #println(size(landmarks))
# #println(size(dataret))
# return dataret, rotret
#end
function compute_sizeshape_fromnolocdata(landmarks::Array{Float64,3}, keepreflection::KeepReflection, valp::P)::Tuple{Array{Float64, 3}, Array{Float64, 3}} where {P<:ValueP}
p::Int64 = size(landmarks,2)
dataret = deepcopy(landmarks)
rotret = Array{Float64}(undef,p,p,size(landmarks,3))
for i = axes(landmarks, 3)
app = svd(landmarks[:, :, i])
U = app.U
V = transpose(app.Vt)
if det(V) > 0.0
dataret[:, :, i] = U * Diagonal(app.S)
rotret[:, :, i] = V
else
U[:,2] = -1.0*U[:,2]
V[:, 2] = -1.0 * V[:, 2]
dataret[:, :, i] = U * Diagonal(app.S)
rotret[:, :, i] = V
end
end
return dataret, rotret
end
function create_designmatrix(fm::FormulaTerm, covariates::DataFrame, k::Int64 )::Tuple{Array{Float64, 3}, Int64, Vector{String}, Matrix{Float64}}
#println(covariates[1:3,:])
#error("")
covariates_copy = deepcopy(covariates)
for i = 1:size(covariates_copy,2)
if isa(covariates_copy[:,i], CategoricalArray)
elseif isa(covariates_copy[1,i], Real)
#covariates_copy[:,i] = (covariates_copy[:,i] .- mean(covariates_copy[:,i])) ./ std(covariates_copy[:,i])
else
error("Only factors or Real variables are allowed in covariates")
end
end
designmatrix_v2_app = ModelFrame(fm, covariates_copy);
designmatrix_v2 = ModelMatrix(designmatrix_v2_app).m
#println(designmatrix_v2[1:3,:])
#error("")
designmatrix = compute_designmatrix(designmatrix_v2, k) # dimensions k, k * d, n
@assert sum(designmatrix_v2[:, 1] .== 1) == size(designmatrix_v2,1) "intercept needed"
return designmatrix, size(designmatrix_v2,2), coefnames(designmatrix_v2_app), designmatrix_v2
end
function compute_yrdata(yrdata::Array{Float64,3}, ssdata::Array{Float64,3}, rmat::Array{Float64,3})
for i = axes(ssdata,3)
yrdata[:, :, i] = ssdata[:, :, i] * transpose(rmat[:, :, i])
end
return nothing
end
function compute_angle_from_rmat(i::Int64,angle::Matrix{Float64}, rmat::Array{Float64,3}, valp::ValueP2, reflection::KeepReflection)
angle[1, i] = atan(rmat[2, 1, i], rmat[1, 1, i])
return nothing
end
function compute_angle_from_rmat(i::Int64,angle::Matrix{Float64}, rmat::Array{Float64,3}, valp::ValueP3, reflection::KeepReflection)
#### http://eecs.qmul.ac.uk/~gslabaugh/publications/euler.pdf ###
angle[2,i] = -asin(rmat[3,1,i])
angle[3,i] = atan( rmat[3,2,i]/cos(angle[2,i]), rmat[3,3,i]/cos(angle[2,i]))
angle[1,i] = atan( rmat[2,1,i]/cos(angle[2,i]), rmat[1,1,i]/cos(angle[2,i]))
return nothing
end
#function compute_angle_from_rmat(i::Int64, angle::Matrix{Float64}, rmat::Array{Float64,3}, valp::Valuep3, reflection::KeepReflection)
# #x convention
# angle[1, i] = atan(rmat[3, 1, i], rmat[3, 2, i])
# angle[2, i] = acos(rmat[3, 3, i])
# angle[3, i] = -atan(rmat[1, 3, i], rmat[2, 3, i])
# return nothing
#end
| BayesSizeAndShape | https://github.com/GianlucaMastrantonio/BayesSizeAndShape.jl.git |
|
[
"MIT"
] | 0.2.0 | 4214520f385b4d6220a9e63d5a7f355761104b17 | code | 8879 |
function generalSizeAndShapeMCMC(;
landmarks::Array{Float64,3},
fm::FormulaTerm = @formula(1~ 1),
covariates::DataFrame, #
iterations::NamedTuple{(:iter, :burnin, :thin),Tuple{Int64,Int64,Int64}} =(
iter=1000,
burnin=200,
thin=2
),
betaprior::ContinuousUnivariateDistribution = Normal(0.0, 10000.0),
sigmaprior::ContinuousMatrixDistribution,
beta_init::Matrix{Float64} = zeros(Float64,0,0),
sigma_init::Symmetric{Float64,Matrix{Float64}} = Symmetric(zeros(Float64,0,0)),
rmat_init::Array{Float64,3} = zeros(Float64,0,0,0),
#dormat::Bool,
identification::String = ["gramschmidt"][1],
meanmodel::String = ["linear"][1],
covariancemodel::String = ["general_nocrosscorrelation"][1],
keepreflection::String = ["no", "yes"][2],
removelocation::String = ["no", "helmert"][2],
removesize::String = ["no", "norm"][2],
rmatdosample::Bool = true,
verbose::Bool = true
)
##### dimensions
k::Int64 = size(landmarks, 1)-1
kland::Int64 = size(landmarks, 1)
p::Int64 = size(landmarks, 2)
n::Int64 = size(landmarks, 3)
pangle::Int64 = ifelse(p==2,1,3)
##### Types
reflectioninformation::Reflection = KeepReflection();
if keepreflection == "yes" reflectioninformation = KeepReflection()
elseif keepreflection == "no" reflectioninformation = DoNotKeepReflection(); error("keepreflection == \"no\" not implemented")
else error("keepreflection should be in [\"no\", \"yes\"]")
end
valp::ValueP = ValueP2();
if p == 2 valp = ValueP2();
elseif p == 3 valp = ValueP3(); #error("Models with dimension p>2 are not implemented")
else error("Models with dimension p>2 are not implemented")
end
locationinformation::RemoveLocation = RemoveLocationHelmert(k, valp);
if removelocation == "helmert" locationinformation = RemoveLocationHelmert(k, valp);
elseif removelocation == "no" locationinformation = DoNotRemoveLocation(kland, valp); error("removelocation == \"no\" not implemented")
else error("removelocation should be in [\"no\", \"helmert\"]")
end
sizeinformation::RemoveSize = DoNotRemoveSize();
if removesize == "no" sizeinformation = DoNotRemoveSize();
elseif removesize == "norm" sizeinformation = RemoveSizeNorm(); error("removesize \"norm\" not implemented")
else error("removesize should be in [\"no\", \"norm\"]")
end
##### identifiability #####
identifiability_constraint::IdentifiabilityConstraint = GramSchmidtMean()
if identification == "gramschmidt" identifiability_constraint = GramSchmidtMean()
else error("identification should be in [\"gramschmidt\"]")
end
if (identification == "gramschmidt") & (meanmodel != "linear")
error("identification \"gramschmidt\" can only be used with meanmodel \"linear\"")
end
##### DATASET #####
if rmatdosample == true
rmatsample = DoSampleRmat()
else
rmatsample = DoNotSampleRmat()
end
datamodel = SSDataType(landmarks, reflectioninformation,locationinformation,valp,sizeinformation,identifiability_constraint);
data_mcmc = MCMCdata(rmat_init, datamodel,rmatsample; sdprop_adapt_init = 0.1, accratio_adapt_init = 0.234, molt_adapt_init = 0.4, iter_adapt_init = 50, init_adapt_init= 100, end_adapt_init = Int64(iterations.burnin*0.9), a_adapt_init = 100.0, b_adapt_init = 200.0)
###### mean #####
mean_model::TypeModelMean = LinearMean(fm, covariates, datamodel, identifiability_constraint)
if meanmodel == "linear" mean_model = LinearMean(fm, covariates, datamodel, identifiability_constraint)
else error("meanmodel should be in [\"linear\"]")
end
mean_mcmc::MCMCTypeModelMean = MCMCMean(mean_model, valp, beta_init,betaprior,datamodel)
###### covariance #####
covariance_model::TypeModelCoVariance = GeneralCoVarianceIndependentDimension(identifiability_constraint,datamodel);
if covariancemodel == "general_nocrosscorrelation" covariance_model = GeneralCoVarianceIndependentDimension(identifiability_constraint,datamodel);
else error("covariancemodel should be in [\"general_nocrosscorrelation\"]")
end
covariance_mcmc = MCMCCovariance(covariance_model, sigma_init, sigmaprior, datamodel)
##### asserts
@assert size(datamodel.nolocdata, 3) == size(covariates,1) # n
@assert size(datamodel.nolocdata, 3) == size(mean_model.model_matrix,1) # n
@assert size(mean_model.designmatrix, 1) == size(datamodel.nolocdata, 1) # k
@assert size(mean_model.designmatrix, 3) == size(datamodel.nolocdata, 3) # n
@assert size(mean_model.designmatrix, 2) == size(datamodel.nolocdata, 1) * size(mean_model.model_matrix,2) #k d
##### print messages #####
if verbose
if (removelocation != "no") & (removesize == "no")
print("\n\n")
print("Size And Shape Model")
end
if keepreflection == "yes"
print(" with reflection information \n")
end
println("\nThe data has ",
kland," ",
p,"-dimensional landmarks in ",
n, " shapes",
)
if true == true
println("The mean is modelled with a linear function and it has ", mean_model.d, " regressive coefficients for each dimension*(landmark-1), with a total of ", size(mean_model.designmatrix,2)*datamodel.p, " regressors")
end
if true == true
println("The covariance is unstructured and shared between dimensions, with no cross-correlation")
end
if typeof(identifiability_constraint) <:GramSchmidtMean
println("\nFor identifiability, the regressive coefficients are trasformed using a Gram-Schmidt transformation\n")
end
end
###### MCMC object #####
iter = Int64(iterations.iter)
burnin = Int64(iterations.burnin)
thin = Int64(iterations.thin)
sampletosave = trunc(Int64, round((iter - burnin) / thin))
##### #### #### #### ####
##### MCMC out
##### #### #### #### ####
posteriorparamters = create_object_output(sampletosave, mean_mcmc, covariance_mcmc, data_mcmc, mean_model)
##### #### #### #### ####
##### algrothm
##### #### #### #### ####
iterMCMC = Int64(0)
thinburnin = burnin
p1 = Progress(burnin, desc = "burnin ", offset = 0, showspeed = true)
p2 = Progress(
burnin + (sampletosave - 1) * thin,
desc = "iterations ",
offset = 0,
showspeed = true,
)
isburn = true
println("MCMC settings ")
println("Iterations: ", iter)
println("Burnin: ", burnin)
println("Thin: ", thin)
println("Number of posterior samples: ", sampletosave)
println("Number of threads: ", Threads.nthreads())
for iMCMC = 1:sampletosave
for jMCMC = 1:thinburnin
iterMCMC += 1
sampler_mean(datamodel, data_mcmc,mean_model,mean_mcmc,covariance_model,covariance_mcmc)
sampler_covariance(datamodel, data_mcmc,mean_model,mean_mcmc,covariance_model,covariance_mcmc)
sampler_latentobservations(iterMCMC, datamodel, data_mcmc,mean_model,mean_mcmc,covariance_model,covariance_mcmc)
ProgressMeter.next!(p2; showvalues = [(:iterations, iterMCMC)])
end
thinburnin = thin
isburn = false
copy_parameters_out(iMCMC, posteriorparamters, mean_mcmc, datamodel, covariance_mcmc, data_mcmc)
end
return SizeAndShapeModelOutput(
datamodel,
data_mcmc,
mean_model,
mean_mcmc,
covariance_model,
covariance_mcmc,
posteriorparamters,
(iter = iter, burnin = burnin, thin = thin, savedsamples =sampletosave)
)
##### #### #### ####
##### OUTPUT
##### #### #### ####
#nsim = size(betaOUT,1)
#mcmcoutputSAVE = (beta = bbb, sigma = sss, rmat = rrr, angle = ttt)
#mcmcoutputArraysSAVE = (beta = betaOUT, sigma = sigmaOUT, rmat = rmatOUT, angle = angleOUT)
#covariatesSAVE = (colnames_modelmatrix = colnames_modelmatrix, fm = deepcopy(fm), covariates = deepcopy(covariates),
# designmatrix_step1 = designmatrix_v2, designmatrix_step2 = designmatrix)
#datasetSAVE = deepcopy(dataset)
#modeltypesSAVE = (dormat=dormat, reflection = reflection, sigmatype=sigmatype, betaprior = betaprior, sigmaprior = sigmaprior, valp = valp);
#iterationsSAVE = (iter = iter, burnin = burnin, thin = thin, savedsamples =sampletosave);
#indicesSAVE = (k=k, p=p, n=n, d=d);
#return SizeAndShapeModelOutput(mcmcoutputSAVE , mcmcoutputArraysSAVE , covariatesSAVE , datasetSAVE , modeltypesSAVE , iterationsSAVE , indicesSAVE )
end
| BayesSizeAndShape | https://github.com/GianlucaMastrantonio/BayesSizeAndShape.jl.git |
|
[
"MIT"
] | 0.2.0 | 4214520f385b4d6220a9e63d5a7f355761104b17 | code | 5465 | #### #### #### #### #### ####
#### Model OUT
#### #### #### #### #### ####
abstract type outputMCMCSizeAndShape end
struct SizeAndShapeModelOutput{R<:Reflection,RL<:RemoveLocation,VP<:ValueP,RS<:RemoveSize,IC<:IdentifiabilityConstraint
,DM<:MCMCData, MT<:TypeModelMean,MM<:MCMCTypeModelMean,CT<:TypeModelCoVariance,CM<:MCMCTypeModelCoVariance,PS<:MCMCObjectOUT
} <: outputMCMCSizeAndShape
datatype::SSDataType{R,RL,VP,RS,IC}
datamcmc::DM
meantype::MT
meanmcmc::MM
covtype::CT
covmcmc::CM
posteriorsamples::PS
mcmciterations::NamedTuple{(:iter, :burnin, :thin, :savedsamples),Tuple{Int64,Int64,Int64, Int64}}
function SizeAndShapeModelOutput(
datatype::SSDataType{R,RL,VP,RS,IC},
datamcmc::DM,
meantype::MT,
meanmcmc::MM,
covtype::CT,
covmcmc::CM,
posteriorsamples::PS,
mcmciterations::NamedTuple{(:iter, :burnin, :thin, :savedsamples),Tuple{Int64,Int64,Int64, Int64}}
) where {R<:Reflection,RL<:RemoveLocation,VP<:ValueP,RS<:RemoveSize,IC<:IdentifiabilityConstraint
,DM<:MCMCData, MT<:TypeModelMean,MM<:MCMCTypeModelMean,CT<:TypeModelCoVariance,CM<:MCMCTypeModelCoVariance,PS<:MCMCObjectOUT
}
new{R,RL,VP,RS,IC,DM, MT,MM,CT,CM,PS}(datatype, datamcmc, meantype, meanmcmc, covtype, covmcmc,posteriorsamples, mcmciterations)
end
end
#abstract type outputMCMCSizeAndShape end
#struct SizeAndShapeModelOutput{TR<:Reflection,TS<:SigmaType,TP<:Valuep,DB<:ContinuousUnivariateDistribution,DS<:ContinuousMatrixDistribution, FF<:FormulaTerm} <: outputMCMCSizeAndShape
# mcmcoutput::NamedTuple{(:beta, :sigma, :rmat, :angle), Tuple{DataFrame, DataFrame, DataFrame, DataFrame}}
# mcmcoutputArrays::NamedTuple{(:beta, :sigma, :rmat, :angle), Tuple{Array{Float64, 3}, Array{Float64, 3}, Array{Float64, 4}, Array{Float64, 3}}}
# covariates::NamedTuple{(:colnames_modelmatrix, :fm, :covariates, :designmatrix_step1, :designmatrix_step2), Tuple{Vector{String}, FF, DataFrame, Matrix{Float64}, Array{Float64, 3}} }
# dataset::Array{Float64,3};
# modeltypes::NamedTuple{(:dormat, :reflection, :sigmatype, :betaprior, :sigmaprior, :valp), Tuple{Bool, TR, TS, DB,DS, TP} }
# iterations::NamedTuple{(:iter, :burnin, :thin, :savedsamples),Tuple{Int64,Int64,Int64, Int64}};
# indices::NamedTuple{(:k, :p, :n, :d),Tuple{Int64,Int64,Int64, Int64}};
# function SizeAndShapeModelOutput(
# mcmcoutput::NamedTuple{(:beta, :sigma, :rmat, :angle), Tuple{DataFrame, DataFrame, DataFrame, DataFrame}},
# mcmcoutputArrays::NamedTuple{(:beta, :sigma, :rmat, :angle), Tuple{Array{Float64, 3}, Array{Float64, 3}, Array{Float64, 4}, Array{Float64, 3}}},
# covariates::NamedTuple{(:colnames_modelmatrix, :fm, :covariates, :designmatrix_step1, :designmatrix_step2), Tuple{Vector{String}, FF, DataFrame, Matrix{Float64}, Array{Float64, 3}} },
# dataset::Array{Float64,3},
# modeltypes::NamedTuple{(:dormat, :reflection, :sigmatype, :betaprior, :sigmaprior, :valp), Tuple{Bool, TR, TS, DB,DS, TP} },
# iterations::NamedTuple{(:iter, :burnin, :thin, :savedsamples),Tuple{Int64,Int64,Int64, Int64}},
# indices::NamedTuple{(:k, :p, :n, :d),Tuple{Int64,Int64,Int64, Int64}}) where {TR<:Reflection,TS<:SigmaType,TP<:Valuep,DB<:ContinuousUnivariateDistribution,DS<:ContinuousMatrixDistribution,FF<:FormulaTerm}
# new{TR,TS,TP,DB,DS, FF}(mcmcoutput, mcmcoutputArrays, covariates, dataset, modeltypes, iterations, indices)
# end
#end
#### #### #### #### #### ####
#### FUNCTIONS
#### #### #### #### #### ####
#function create_output(beta::Array{Float64, 3},
# sigma::Array{Float64, 3},
# rmat::Array{Float64, 4},
# angle::Array{Float64, 3},
# beta_nonid::Array{Float64, 3},
# rmat_nonid::Array{Float64, 4},
# angle_nonid::Array{Float64, 3},
# colnames_modelmatrix::Vector{String},
# fm::FormulaTerm,
# betaprior::ContinuousUnivariateDistribution,
# sigmaprior::ContinuousMatrixDistribution,
# k::Int64,
# p::Int64,
# n::Int64,
# d::Int64,
# dormat::Bool,
# reflection::Reflection,
# sigmatype::SigmaType,
# dataset::Array{Float64,3},
# covariates::DataFrame,
# iterations::NamedTuple{(:iter, :burnin, :thin),Tuple{Int64,Int64,Int64}},
# designmatrix_step1::Array{Float64, 3},
# designmatrix_step2::Matrix{Float64})
# p2KeepReflectionGeneralSigma(beta::Array{Float64, 3},
# sigma::Array{Float64, 3},
# rmat::Array{Float64, 4},
# angle::Array{Float64, 3},
# beta_nonid::Array{Float64, 3},
# rmat_nonid::Array{Float64, 4},
# angle_nonid::Array{Float64, 3},
# colnames_modelmatrix::Vector{String},
# fm::FormulaTerm,
# betaprior::ContinuousUnivariateDistribution,
# sigmaprior::ContinuousMatrixDistribution,
# k::Int64,
# p::Int64,
# n::Int64,
# d::Int64,
# dormat::Bool,
# reflection::Reflection,
# sigmatype::SigmaType,
# dataset::Array{Float64,3},
# covariates::DataFrame,
# iterations::NamedTuple{(:iter, :burnin, :thin),Tuple{Int64,Int64,Int64}},
# designmatrix_step1::Array{Float64, 3},
# designmatrix_step2::Matrix{Float64})
#end | BayesSizeAndShape | https://github.com/GianlucaMastrantonio/BayesSizeAndShape.jl.git |
|
[
"MIT"
] | 0.2.0 | 4214520f385b4d6220a9e63d5a7f355761104b17 | code | 3146 |
function sample_predictive_zbr(modeloutput::SizeAndShapeModelOutput{KeepReflection,RL,P,DoNotRemoveSize,GramSchmidtMean,<:MCMCNormalDataKeepSize,<:LinearMean,<:MCMCLinearMean,CT,CM,PS}) where {
RL<:RemoveLocation,
CT<:TypeModelCoVariance,
CM<:MCMCTypeModelCoVariance,
PS<:MCMCObjectOUT,
P<:ValueP
}
designmatrix = modeloutput.meantype.designmatrix
beta = modeloutput.posteriorsamples.identbeta
rmat = modeloutput.posteriorsamples.identrmat
nsim::Int64 = size(beta,1)
n::Int64 = modeloutput.datatype.n
k::Int64 = modeloutput.datatype.k
p::Int64 = modeloutput.datatype.p
res = DataFrame(zeros(Float64,nsim,k*p*n),:auto)
for iobs = 1:n
for i = 1:size(beta,1)
res[i,((iobs-1)*k*p) .+ (1:(k*p))] = (designmatrix[:,:,iobs]*beta[i,:,:]*rmat[i,:,:,iobs])[:]
#res[i,((iobs-1)*k*p) .+ (1:(k*p))] = (designmatrix[:,:,iobs]*beta[i,:,:])[:]
end
end
rename!(res, "mu_".* string.(repeat(1:n, inner = k*p)) .* ",(" .* string.(repeat(1:k, outer = p*n)) .* "," .* string.(repeat(repeat(1:p, inner = k),outer = n)) .* ")" )
return res
#res = [zeros(Float64, nsim, k,p) for i = 1:n];
#for iobs = 1:n
# for i = 1:size(beta,1)
# res[iobs][i,:,:] = designmatrix[:,:,iobs]*beta[i,:,:]*rmat[i,:,:,iobs]
# end
#end
#res = DataFrame(zeros(Float64,nsim,k*p*n),:auto)
#return res
end
function sample_predictive_zbr_plus_epsilon(modeloutput::SizeAndShapeModelOutput{KeepReflection,RL,P,DoNotRemoveSize,GramSchmidtMean,<:MCMCNormalDataKeepSize,<:LinearMean,<:MCMCLinearMean,CT,CM,PS}) where {
RL<:RemoveLocation,
CT<:TypeModelCoVariance,
CM<:MCMCTypeModelCoVariance,
PS<:MCMCObjectOUT,
P<:ValueP
}
designmatrix = modeloutput.meantype.designmatrix
beta = modeloutput.posteriorsamples.identbeta
sigma = modeloutput.posteriorsamples.identsigma
rmat = modeloutput.posteriorsamples.identrmat
nsim::Int64 = size(beta,1)
n::Int64 = modeloutput.datatype.n
k::Int64 = modeloutput.datatype.k
p::Int64 = modeloutput.datatype.p
res = DataFrame(zeros(Float64,nsim,k*p*n),:auto)
for iobs = 1:n
for i = 1:size(beta,1)
res[i,((iobs-1)*k*p) .+ (1:(k*p))] = (designmatrix[:,:,iobs]*beta[i,:,:]*rmat[i,:,:,iobs])[:] + vcat([rand(MvNormal([0.0 for i = 1:k],Symmetric(sigma[i,:,:]))) for iii = 1:p]...)
#for ip = 1:p
# res[i,((iobs-1)*k*p + (p-1)*k) .+ (1:k)] = res[i,((iobs-1)*k*p + (p-1)*k) .+ (1:k)]
#end
#res[i,((iobs-1)*k*p) .+ (1:(k*p))] = (designmatrix[:,:,iobs]*beta[i,:,:])[:]
end
end
rename!(res, "X_".* string.(repeat(1:n, inner = k*p)) .* ",(" .* string.(repeat(1:k, outer = p*n)) .* "," .* string.(repeat(repeat(1:p, inner = k),outer = n)) .* ")" )
return res
#res = [zeros(Float64, nsim, k,p) for i = 1:n];
#for iobs = 1:n
# for i = 1:size(beta,1)
# res[iobs][i,:,:] = designmatrix[:,:,iobs]*beta[i,:,:]*rmat[i,:,:,iobs]
# end
#end
#res = DataFrame(zeros(Float64,nsim,k*p*n),:auto)
#return res
end
| BayesSizeAndShape | https://github.com/GianlucaMastrantonio/BayesSizeAndShape.jl.git |
|
[
"MIT"
] | 0.2.0 | 4214520f385b4d6220a9e63d5a7f355761104b17 | code | 2931 | #### #### #### #### #### ####
#### BETA
#### #### #### #### #### ####
function sampler_covariance(
datamodel::SSDataType{R,RL,VP,RS,IC},
data_mcmc::MCMCData,
mean_model::TypeModelMean,
mean_mcmc::MCMCTypeModelMean,
covariance_model::TypeModelCoVariance,
covariance_mcmc::MCMCTypeModelCoVariance
) where {R<:Reflection,RL<:RemoveLocation,VP<:ValueP,RS<:RemoveSize,IC<:IdentifiabilityConstraint}
error("sampler_covariance")
end
function sampler_covariance(
datamodel::SSDataType{R,RL,VP,RS,IC},
data_mcmc::MCMCData,
mean_model::TypeModelMean,
mean_mcmc::MCMCLinearMean,
covariance_model::TypeModelCoVariance,
covariance_mcmc::MCMCGeneralCoVarianceIndependentDimension{<:NoPriorSigma}
) where {R<:Reflection,RL<:RemoveLocation,VP<:ValueP,RS<:RemoveSize,IC<:IdentifiabilityConstraint}
# No sample beta
#println("No Prir")
end
function sampler_covariance(
datamodel::SSDataType{R,RL,VP,DoNotRemoveSize,IC},
data_mcmc::MCMCNormalDataKeepSize{R,RL,VP},
mean_model::LinearMean,
mean_mcmc::MCMCLinearMean,
covariance_model::GeneralCoVarianceIndependentDimension,
covariance_mcmc::MCMCGeneralCoVarianceIndependentDimension{<:InverseWishart}
) where {R<:Reflection,VP<:ValueP,IC<:IdentifiabilityConstraint,RL<:DoRemoveLocation}
meanMCMC = mean_mcmc.mean_mcmc
sigmaMCMC = covariance_mcmc.covariance_mcmc
invMat = covariance_mcmc.invcovariance_mcmc
prior = covariance_mcmc.prior_covariance
yrdata = data_mcmc.yr_mcmc
#kd::Int64 = size(betaMCMC,1)
p::Int64 = datamodel.p
n::Int64 = datamodel.n
nup::Float64 = params(prior)[1] + Float64(n*p)
Psip = deepcopy(params(prior)[2].mat)
app = yrdata[:, :,:] - meanMCMC[:,:,:]
for ip = 1:p
for j = 1:n
Psip[:,:] += app[:,ip,j]*transpose(app[:,ip,j])
end
end
#println("a1= ", yrdata[:,:,1])
#println("a2 = ", meanMCMC[:,:,1])
#println("ssdata = ", data_mcmc.ssdata[:,:,1])
#println("")
#println([n,p])
#println("")
#println(nup)
#println(Psip)
sigmaMCMC.data[:, :] = rand(InverseWishart(nup, Symmetric(Psip).data))
#println(sigmaMCMC.data)
cc = cholesky(sigmaMCMC)
covariance_mcmc.invcovariance_mcmc.data[:,:] = inv(cc)[:,:]
covariance_mcmc.logdeterminant_mcmc[:]= [log(det(cc))]
#error("")
return nothing
end
#function sampler_beta(xdata::Array{Float64,3}, designmatrix::Array{Float64,3}, prior::NoPriorBeta, betaMCMC::Matrix{Float64}, sigmaMCMC::Matrix{Float64}, valp::Valuep, betastandMCMC::Matrix{Float64})
#end
#function sampler_beta(xdata::Array{Float64,3}, designmatrix::Array{Float64,3}, prior::Normal, betaMCMC::Matrix{Float64}, sigmaMCMC::Matrix{Float64}, valp::Valuep, betastandMCMC::Matrix{Float64})
# #standardize_reg(betastandMCMC, valp)
#end
| BayesSizeAndShape | https://github.com/GianlucaMastrantonio/BayesSizeAndShape.jl.git |
|
[
"MIT"
] | 0.2.0 | 4214520f385b4d6220a9e63d5a7f355761104b17 | code | 8612 | #### #### #### #### #### ####
#### BETA
#### #### #### #### #### ####
function sampler_latentobservations(
iterMCMC::Int64,
datamodel::SSDataType{R,RL,VP,RS,IC},
data_mcmc::MCMCData,
mean_model::TypeModelMean,
mean_mcmc::MCMCTypeModelMean,
covariance_model::TypeModelCoVariance,
covariance_mcmc::MCMCTypeModelCoVariance
) where {R<:Reflection,RL<:RemoveLocation,VP<:ValueP,RS<:RemoveSize,IC<:IdentifiabilityConstraint}
error("sampler_latentobservations")
end
function sampler_latentobservations(
iterMCMC::Int64,
datamodel::SSDataType{R,RL,VP,RS,IC},
data_mcmc::MCMCNormalDataKeepSize{R,RL,VP,<:DoNotSampleRmat},
mean_model::TypeModelMean,
mean_mcmc::MCMCTypeModelMean,
covariance_model::TypeModelCoVariance,
covariance_mcmc::MCMCTypeModelCoVariance
) where {R<:Reflection,RL<:RemoveLocation,VP<:ValueP,RS<:RemoveSize,IC<:IdentifiabilityConstraint}
# No sample beta
end
function sampler_latentobservations(
iterMCMC::Int64,
datamodel::SSDataType{KeepReflection,RL,ValueP2,RS,IC},
data_mcmc::MCMCNormalDataKeepSize{KeepReflection,RL,ValueP2,<:DoSampleRmat},
mean_model::TypeModelMean,
mean_mcmc::MCMCTypeModelMean,
covariance_model::TypeModelCoVariance,
covariance_mcmc::MCMCTypeModelCoVariance
) where {RL<:RemoveLocation,RS<:RemoveSize,IC<:IdentifiabilityConstraint}
#meanMCMC = mean_mcmc.mean_mcmc
#sigmaMCMC = covariance_mcmc.covariance_mcmc
#invMat = covariance_mcmc.invcovariance_mcmc
#prior = covariance_mcmc.prior_covariance
#yrdata = data_mcmc.yr_mcmc
#betaMCMC = mean_mcmc.beta_mcmc
invMat = covariance_mcmc.invcovariance_mcmc
p::Int64 = datamodel.p
n::Int64 = datamodel.n
k::Int64 = datamodel.k
for j = 1:n
#mui = designmatrix[:, :, j] * betaMCMC[:, :]
#@toggled_assert size(mui) == (k,p)
#println(size(datamodel.ssdata[:,:,j]))
Ai = transpose(mean_mcmc.mean_mcmc[:,:,j]) * invMat * datamodel.ssdata[:,:,j]
#println(size(Ai))
x1 = Ai[1, 1] + Ai[2, 2]
x2 = Ai[2, 1] - Ai[1, 2]
#x2 = Ai[1, 2] - Ai[2, 1]
kvonmises = sqrt(x1^2 + x2^2)
muvonmises = atan(x2 / kvonmises, x1 / kvonmises)
#muvonmises = atan(x1 / kvonmises, x2 / kvonmises)
data_mcmc.angles_mcmc[1, j] = rand(VonMises(muvonmises, kvonmises))
data_mcmc.rmat_mcmc[1, 1, j] = cos(data_mcmc.angles_mcmc[1, j])
data_mcmc.rmat_mcmc[1, 2, j] = -sin(data_mcmc.angles_mcmc[1, j])
data_mcmc.rmat_mcmc[2, 1, j] = sin(data_mcmc.angles_mcmc[1, j])
data_mcmc.rmat_mcmc[2, 2, j] = cos(data_mcmc.angles_mcmc[1, j])
#xdata[:, :, j] = ydata[:, :, j] * transpose(rmatMCMC[:,:,j])
end
compute_yrdata(data_mcmc.yr_mcmc, datamodel.ssdata, data_mcmc.rmat_mcmc)
end
function sampler_latentobservations(
iterMCMC::Int64,
datamodel::SSDataType{KeepReflection,RL,ValueP3,RS,IC},
data_mcmc::MCMCNormalDataKeepSize{KeepReflection,RL,ValueP3,<:DoSampleRmat},
mean_model::TypeModelMean,
mean_mcmc::MCMCTypeModelMean,
covariance_model::TypeModelCoVariance,
covariance_mcmc::MCMCTypeModelCoVariance
) where {RL<:RemoveLocation,RS<:RemoveSize,IC<:IdentifiabilityConstraint}
n::Int64 = datamodel.n
for j = 1:n
sampler_latentobservations(j, iterMCMC, datamodel, data_mcmc, mean_model, mean_mcmc, covariance_model, covariance_mcmc)
end
compute_yrdata(data_mcmc.yr_mcmc, datamodel.ssdata, data_mcmc.rmat_mcmc)
end
function sampler_latentobservations(j::Int64,
iterMCMC::Int64,
datamodel::SSDataType{KeepReflection,RL,ValueP3,RS,IC},
data_mcmc::MCMCNormalDataKeepSize{KeepReflection,RL,ValueP3,<:DoSampleRmat},
mean_model::TypeModelMean,
mean_mcmc::MCMCTypeModelMean,
covariance_model::TypeModelCoVariance,
covariance_mcmc::MCMCTypeModelCoVariance
) where {RL<:RemoveLocation,RS<:RemoveSize,IC<:IdentifiabilityConstraint}
#### https://arxiv.org/abs/math/0503712
#### Bayesian alignment using hierarchical models, with applications in protein bioinformatics
p::Int64 = datamodel.p
n::Int64 = datamodel.n
k::Int64 = datamodel.k
angles = data_mcmc.angles_mcmc
invMat = covariance_mcmc.invcovariance_mcmc
MHratio::Float64 = 0.0
molt::Float64 = 0.0
alpha_mean::Float64 = 0.0
Ai = transpose(mean_mcmc.mean_mcmc[:,:,j]) * invMat * datamodel.ssdata[:,:,j]
#### angle1 (-pi, pi)
x1 = (Ai[2,2] - sin(angles[2,j]) * Ai[1,3]) * cos(angles[3,j])
x1 += (-Ai[2,3]- sin(angles[2,j])*Ai[1,2]) * sin(angles[3,j])
x1 += cos(angles[2,j])*Ai[1,1]
x2 = (-sin(angles[2,j])*Ai[2,3]-Ai[1,2]) * cos(angles[3,j])
x2 += (Ai[1,3]- sin(angles[2,j])*Ai[2,2]) * sin(angles[3,j])
x2 += cos(angles[2,j])*Ai[2,1]
kvonmises = sqrt(x1^2 + x2^2)
muvonmises = atan(x2 / kvonmises, x1 / kvonmises)
angles[1, j] = rand(VonMises(muvonmises, kvonmises))
#### angle2 (-pi/2, pi/2)
x1 = sin(angles[1, j])*Ai[2,1] + cos(angles[1, j])*Ai[1,1]
x1 += sin(angles[3, j])*Ai[3,2] + cos(angles[3, j])*Ai[3,3]
x2 = (-sin(angles[3, j])*Ai[1,2]-cos(angles[3, j])*Ai[1,3])*cos(angles[1, j])
x2 += (-sin(angles[3, j])*Ai[2,2]-cos(angles[3, j])*Ai[2,3])*sin(angles[1, j])
x2 += Ai[3,1]
# prop = rand(Uniform(angles[2, j]- lim, angles[2, j] + lim))
prop = rand(Normal(angles[2, j],data_mcmc.sdprop_adapt[2,j]))
if (prop> - (pi/Float64(2.0))) & (prop < (pi/Float64(2.0)))
MHratio = x1*cos(prop) + x2*sin(prop) + log(cos(prop))
MHratio -= x1*cos(angles[2, j]) + x2*sin(angles[2, j]) + log(cos(angles[2, j]))
if rand(Uniform(0.0,1.0)) < exp(MHratio)
angles[2, j] = prop
#println("Acc")
else
#println("Rej")
end
data_mcmc.sumalpha[2,j] += min(exp(MHratio), 1.0)
else
data_mcmc.sumalpha[2,j] += 0.0
end
if (iterMCMC>data_mcmc.init_adapt[2,j]) & (iterMCMC<data_mcmc.end_adapt[2,j])
molt = data_mcmc.a_adapt[2,j]/(data_mcmc.b_adapt[2,j] + iterMCMC)
alpha_mean = data_mcmc.sumalpha[2,j]/data_mcmc.iter_adapt[2,j]
data_mcmc.sdprop_adapt[2,j] = exp( log(data_mcmc.sdprop_adapt[2,j]) + molt*(alpha_mean - data_mcmc.accratio_adapt[2,j]) )
end
if mod(iterMCMC, data_mcmc.iter_adapt[2,j]) == 0
data_mcmc.sumalpha[2,j] = 0.0
end
#### angle3 (-pi, pi)
x1 = (Ai[2,2] - sin(angles[2, j])*Ai[1,3])*cos(angles[1, j])
x1 += (-sin(angles[2, j])*Ai[2,3]-Ai[1,2])*sin(angles[1, j])
x1 += cos(angles[2,j])*Ai[3,3]
x2 = (-Ai[2,3]-sin(angles[2, j])*Ai[1,2])*cos(angles[1, j])
x2 += (Ai[1,3]-sin(angles[2, j])*Ai[2,2])*sin(angles[1, j])
x2 += cos(angles[2,j])*Ai[3,2]
kvonmises = sqrt(x1^2 + x2^2)
muvonmises = atan(x2 / kvonmises, x1 / kvonmises)
angles[3, j] = rand(VonMises(muvonmises, kvonmises))
compute_rtridim_from_angles(j, data_mcmc.rmat_mcmc, angles)
#data_mcmc.rmat_mcmc[1, 1, j] = cos(data_mcmc.angles_mcmc[1, j])
#data_mcmc.rmat_mcmc[1, 2, j] = -sin(data_mcmc.angles_mcmc[1, j])
#data_mcmc.rmat_mcmc[2, 1, j] = sin(data_mcmc.angles_mcmc[1, j])
#data_mcmc.rmat_mcmc[2, 2, j] = cos(data_mcmc.angles_mcmc[1, j])
#xdata[:, :, j] = ydata[:, :, j] * transpose(rmatMCMC[:,:,j])
end
function compute_rtridim_from_angles(i::Int64, rmat::Array{Float64,3}, angle::Matrix{Float64})
M1::Matrix{Float64} = zeros(Float64,3,3)
M2::Matrix{Float64} = zeros(Float64,3,3)
M3::Matrix{Float64} = zeros(Float64,3,3)
M1[1,1] = cos(angle[1,i])
M1[2,2] = cos(angle[1,i])
M1[1,2] = -sin(angle[1,i])
M1[2,1] = sin(angle[1,i])
M1[3,3] = Float64(1.0)
M2[1,1] = cos(angle[2,i])
M2[3,3] = cos(angle[2,i])
M2[1,3] = -sin(angle[2,i])
M2[3,1] = sin(angle[2,i])
M2[2,2] = Float64(1.0)
M3[2,2] = cos(angle[3,i])
M3[3,3] = cos(angle[3,i])
M3[2,3] = -sin(angle[3,i])
M3[3,2] = sin(angle[3,i])
M3[1,1] = Float64(1.0)
rmat[:,:,i] = M1*M2*M3
return nothing
end
#function sampler_beta(xdata::Array{Float64,3}, designmatrix::Array{Float64,3}, prior::NoPriorBeta, betaMCMC::Matrix{Float64}, sigmaMCMC::Matrix{Float64}, valp::Valuep, betastandMCMC::Matrix{Float64})
#end
#function sampler_beta(xdata::Array{Float64,3}, designmatrix::Array{Float64,3}, prior::Normal, betaMCMC::Matrix{Float64}, sigmaMCMC::Matrix{Float64}, valp::Valuep, betastandMCMC::Matrix{Float64})
# #standardize_reg(betastandMCMC, valp)
#end
| BayesSizeAndShape | https://github.com/GianlucaMastrantonio/BayesSizeAndShape.jl.git |
|
[
"MIT"
] | 0.2.0 | 4214520f385b4d6220a9e63d5a7f355761104b17 | code | 2718 | #### #### #### #### #### ####
#### BETA
#### #### #### #### #### ####
function sampler_mean(
datamodel::SSDataType{R,RL,VP,RS,IC},
data_mcmc::MCMCData,
mean_model::TypeModelMean,
mean_mcmc::MCMCTypeModelMean,
covariance_model::TypeModelCoVariance,
covariance_mcmc::MCMCTypeModelCoVariance
) where {R<:Reflection,RL<:RemoveLocation,VP<:ValueP,RS<:RemoveSize,IC<:IdentifiabilityConstraint}
error("sampler_mean")
end
function sampler_mean(
datamodel::SSDataType{R,RL,VP,RS,IC},
data_mcmc::MCMCData,
mean_model::TypeModelMean,
mean_mcmc::MCMCLinearMean{<:NoPriorBeta},
covariance_model::TypeModelCoVariance,
covariance_mcmc::MCMCTypeModelCoVariance
) where {R<:Reflection,RL<:RemoveLocation,VP<:ValueP,RS<:RemoveSize,IC<:IdentifiabilityConstraint}
# No sample beta
end
function sampler_mean(
datamodel::SSDataType{R,RL,VP,DoNotRemoveSize,IC},
data_mcmc::MCMCNormalDataKeepSize{R,RL,VP},
mean_model::LinearMean,
mean_mcmc::MCMCLinearMean{<:Normal},
covariance_model::GeneralCoVarianceIndependentDimension,
covariance_mcmc::MCMCGeneralCoVarianceIndependentDimension
) where {R<:Reflection,VP<:ValueP,IC<:IdentifiabilityConstraint,RL<:DoRemoveLocation}
betaMCMC = mean_mcmc.beta_mcmc
invMat = covariance_mcmc.invcovariance_mcmc
prior = mean_mcmc.prior_beta
designmatrix = mean_model.designmatrix
yrdata = data_mcmc.yr_mcmc
kd::Int64 = size(betaMCMC,1)
p::Int64 = datamodel.p
n::Int64 = datamodel.n
Vp::Matrix{Float64} = zeros(Float64, kd, kd)
Mp::Vector{Float64} = zeros(Float64, kd)
for ip = 1:p
Vp[:, :] = Diagonal([1.0 / params(prior)[2]^2 for i = 1:kd])
Mp[:] .= params(prior)[1] / params(prior)[2]^2
for j = 1:n
Vp[:, :] += transpose(designmatrix[:, :, j]) * invMat * designmatrix[:, :, j]
Mp[:, :] += transpose(designmatrix[:, :, j]) * invMat * yrdata[:,ip,j]
end
Vp = Symmetric(inv(Vp))
Mp = Vp*Mp
betaMCMC[:,ip] = rand(MvNormal(Mp,Vp))
end
for ip = 1:p
for j = 1:n
mean_mcmc.mean_mcmc[:, ip, j] = designmatrix[:, :, j] * betaMCMC[:, ip]
end
end
end
#function sampler_beta(xdata::Array{Float64,3}, designmatrix::Array{Float64,3}, prior::NoPriorBeta, betaMCMC::Matrix{Float64}, sigmaMCMC::Matrix{Float64}, valp::Valuep, betastandMCMC::Matrix{Float64})
#end
#function sampler_beta(xdata::Array{Float64,3}, designmatrix::Array{Float64,3}, prior::Normal, betaMCMC::Matrix{Float64}, sigmaMCMC::Matrix{Float64}, valp::Valuep, betastandMCMC::Matrix{Float64})
# #standardize_reg(betastandMCMC, valp)
#end
| BayesSizeAndShape | https://github.com/GianlucaMastrantonio/BayesSizeAndShape.jl.git |
|
[
"MIT"
] | 0.2.0 | 4214520f385b4d6220a9e63d5a7f355761104b17 | code | 20227 | #### #### #### #### #### ####
#### Distributions
#### #### #### #### #### ####
struct NoPriorBeta <: ContinuousUnivariateDistribution
function NoPriorBeta()
new()
end
end
struct NoPriorSigma <: ContinuousMatrixDistribution
function NoPriorSigma()
new()
end
end
#### #### #### #### #### ####
#### Sample R
#### #### #### #### #### ####
abstract type SampleRmat end
struct DoSampleRmat <: SampleRmat
function DoSampleRmat()
new()
end
end
struct DoNotSampleRmat <: SampleRmat
function DoNotSampleRmat()
new()
end
end
#### #### #### #### #### ####
#### dimension
#### #### #### #### #### ####
abstract type ValueP end
struct ValueP2 <: ValueP
p::Int64
function ValueP2()
new(2)
end
end
struct ValueP3 <: ValueP
p::Int64
function ValueP3()
new(3)
end
end
#### #### #### #### #### ####
#### REflections
#### #### #### #### #### ####
abstract type Reflection end
struct KeepReflection <: Reflection
function KeepReflection()
new()
end
end
struct DoNotKeepReflection <: Reflection
function DoNotKeepReflection()
new()
end
end
#### #### #### #### #### ####
#### Data Trans
#### #### #### #### #### ####
abstract type RemoveLocation end
abstract type DoRemoveLocation <: RemoveLocation end
struct DoNotRemoveLocation <: RemoveLocation
function DoNotRemoveLocation()
new()
end
end
struct RemoveLocationHelmert{VP<:ValueP} <: DoRemoveLocation
matrix::Array{Float64}
valp::VP
function RemoveLocationHelmert(k::Int64, val::VP) where {VP<:ValueP}
H::Matrix{Float64} = zeros(Float64, k + 1, k + 1)
for i = 1:(k+1)
H[i, i] = (i - 1) / (i * (i - 1))^0.5
end
for i = 2:(k+1)
for j = 1:(i-1)
H[i, j] = -1.0 / (i * (i - 1))^0.5
end
end
H = H[2:end, :]
new{VP}(H, val)
end
end
#struct DoNotRemoveLocation{VP<:ValueP} <: RemoveLocation
# matrix::Array{Float64}
# valp::VP
# function DoNotRemoveLocation(k::Int64, val::VP) where {VP<:ValueP}
# H::Matrix{Float64} = zeros(Float64, k, k)
# for i = 1:k
# H[i, i] = 1.0
# end
# new{VP}(H, val)
# end
#end
abstract type RemoveSize end
abstract type DoRemoveSize <: RemoveSize end
struct DoNotRemoveSize <: RemoveSize
function DoNotRemoveSize()
new()
end
end
struct RemoveSizeNorm <: DoRemoveSize
function RemoveSizeNorm()
new()
end
end
#struct DoNotRemoveSize <: RemoveSize
# function DoNotRemoveSize()
# new()
# end
#end
##### #### #### #### #### ####
##### Constraints
##### #### #### #### #### ####
abstract type IdentifiabilityConstraint end
struct GramSchmidtMean <: IdentifiabilityConstraint
function GramSchmidtMean()
new()
end
end
##### #### #### #### #### ####
##### Data
##### #### #### #### #### ####
abstract type GeneralDataType end
struct SSDataType{R<:Reflection,RL<:RemoveLocation,VP<:ValueP,RS<:RemoveSize,IC<:IdentifiabilityConstraint} <: GeneralDataType
landmarks::Array{Float64,3}
nolocdata::Array{Float64,3}
ssdata::Array{Float64,3}
sdata::Array{Float64,3}
ssdata_rotmat::Array{Float64,3}
reflection::R
removelocation::RL
valp::VP
removesize::RS
identifiability::IC
n::Int64
k::Int64
p::Int64
function SSDataType{R,RL,VP,RS,IC}(landmarks::Array{Float64}, nolocdata::Array{Float64,3}, ssdata::Array{Float64,3}, sdata::Array{Float64,3}, ssdata_rotmat::Array{Float64,3}, reflection::R, removelocation::RL, valp::VP, removesize::RS, identifiability::IC, n::Int64, k::Int64, p::Int64) where {R<:Reflection,RL<:RemoveLocation,VP<:ValueP,RS<:RemoveSize,IC<:IdentifiabilityConstraint}
new{R,RL,VP,RS,IC}(landmarks, nolocdata, ssdata, sdata, ssdata_rotmat, reflection, removelocation, valp, removesize, identifiability, n, k, p)
end
end
function SSDataType(landmarks::Array{Float64}, reflection::KeepReflection, removelocation::RemoveLocationHelmert, valp::P, removesize::DoNotRemoveSize, identification::IC) where {IC<:IdentifiabilityConstraint, P<:ValueP}
k::Int64 = size(landmarks, 1) - 1
n::Int64 = size(landmarks, 3)
p::Int64 = size(landmarks, 2)
nolocdata = remove_location(landmarks, removelocation)
ssdata, ssdata_rotmat = compute_sizeshape_fromnolocdata(nolocdata, reflection, valp)
# FIXME: sdata should be the shape data
sdata = deepcopy(ssdata)
SSDataType{KeepReflection,RemoveLocationHelmert,P,DoNotRemoveSize,IC}(landmarks, nolocdata, ssdata, sdata, ssdata_rotmat, reflection, removelocation, valp, removesize, identification, n, k, p)
end
abstract type MCMCData end
struct MCMCNormalDataKeepSize{R<:Reflection,RL<:RemoveLocation,VP<:ValueP,D<:SampleRmat} <: MCMCData
rmat_mcmc::Array{Float64,3}
rmat_prop::Array{Float64,3}
yr_mcmc::Array{Float64,3}
yr_prop::Array{Float64,3}
angles_mcmc::Array{Float64,2}
angles_prop::Array{Float64,2}
rmat_dosample::D
sdprop_adapt::Array{Float64,2}
accratio_adapt::Array{Float64,2}
molt_adapt::Array{Float64,2}
iter_adapt::Array{Int64,2}
init_adapt::Array{Int64,2}
end_adapt::Array{Int64,2}
a_adapt::Array{Float64,2}
b_adapt::Array{Float64,2}
sumalpha::Array{Float64,2}
function MCMCNormalDataKeepSize{R,RL,VP,D}(rmat_mcmc::Array{Float64,3}, rmat_prop::Array{Float64,3}, yr_mcmc::Array{Float64,3}, yr_prop::Array{Float64,3}, datat::SSDataType{R,RL,VP,DoNotRemoveSize,IC}, angles_mcmc::Array{Float64,2}, angles_prop::Array{Float64,2},rmat_dosample::D, sdprop_adapt::Array{Float64,2}, accratio_adapt::Array{Float64,2}, molt_adapt::Array{Float64,2}, iter_adapt::Array{Int64,2}, init_adapt::Array{Int64,2}, end_adapt::Array{Int64,2}, a_adapt::Array{Float64,2}, b_adapt::Array{Float64,2}) where {R<:Reflection,RL<:RemoveLocation,VP<:ValueP,IC<:IdentifiabilityConstraint,D<:SampleRmat}
sumalpha = deepcopy(sdprop_adapt)
sumalpha .= 0.0
new{R,RL,VP,D}(rmat_mcmc, rmat_prop, yr_mcmc, yr_prop, angles_mcmc,angles_prop,rmat_dosample, sdprop_adapt, accratio_adapt, molt_adapt, iter_adapt, init_adapt, end_adapt, a_adapt, b_adapt, sumalpha)
end
end
function MCMCdata(rmat_init::Array{Float64,3}, datat::SSDataType{KeepReflection,RL,VP,DoNotRemoveSize,IC}, rmat_dosample::D; sdprop_adapt_init::Float64 = 0.1, accratio_adapt_init::Float64 = 0.234, molt_adapt_init::Float64 = 0.4, iter_adapt_init::Int64 = 50, init_adapt_init::Int64 = 100, end_adapt_init::Int64 = 1000, a_adapt_init::Float64 = 100.0, b_adapt_init::Float64 = 200.0) where {RL<:RemoveLocation,VP<:ValueP,IC<:IdentifiabilityConstraint,D<:SampleRmat}
if size(rmat_init,1)>0
@assert size(rmat_init,1) == datat.p
@assert size(rmat_init,2) == datat.p
@assert size(rmat_init,3) == datat.n
rmat_mcmc = deepcopy(rmat_init)
rmat_prop = deepcopy(rmat_init)
else
rmat_mcmc = reshape(vcat([Matrix{Float64}(I, datat.p, datat.p)[:] for i = 1:datat.n]...), (datat.p, datat.p, datat.n))
rmat_prop = reshape(vcat([Matrix{Float64}(I, datat.p, datat.p)[:] for i = 1:datat.n]...), (datat.p, datat.p, datat.n))
end
if datat.p == 2
dimangle = 1
elseif datat.p == 3
dimangle = 3
else
println("datat.p must be <= 3")
end
angles_mcmc = zeros(Float64,dimangle,datat.n)
for i = 1:datat.n
compute_angle_from_rmat(i,angles_mcmc, rmat_mcmc,datat.valp, datat.reflection)
end
angles_prop = deepcopy(angles_mcmc)
yr_mcmc = deepcopy(datat.nolocdata)
yr_prop = deepcopy(datat.nolocdata)
compute_yrdata(yr_mcmc, datat.ssdata, rmat_mcmc)
compute_yrdata(yr_prop, datat.ssdata, rmat_prop)
sdprop_adapt::Array{Float64,2} = sdprop_adapt_init .* ones(Float64,dimangle,datat.n)
accratio_adapt::Array{Float64,2} = accratio_adapt_init .* ones(Float64,dimangle,datat.n)
molt_adapt::Array{Float64,2} = molt_adapt_init .* ones(Float64,dimangle,datat.n)
iter_adapt::Array{Int64,2} = iter_adapt_init .* ones(Int64,dimangle,datat.n)
init_adapt::Array{Int64,2} = init_adapt_init .* ones(Int64,dimangle,datat.n)
end_adapt::Array{Int64,2} = end_adapt_init .* ones(Int64,dimangle,datat.n)
a_adapt::Array{Float64,2} = a_adapt_init .* ones(Float64,dimangle,datat.n)
b_adapt::Array{Float64,2} = b_adapt_init .* ones(Float64,dimangle,datat.n)
MCMCNormalDataKeepSize{KeepReflection,RL,VP,D}(rmat_mcmc, rmat_prop, yr_mcmc, yr_prop, datat, angles_mcmc, angles_prop,rmat_dosample, sdprop_adapt, accratio_adapt, molt_adapt, iter_adapt, init_adapt, end_adapt, a_adapt, b_adapt)
end
#function MCMCMean(meanmodel::LinearMean, valp::VP, beta_init::Matrix{Float64}, prior::D, datat::SSDataType{R,RL,VP,DoNotRemoveSize,IC})::MCMCLinearMean{D} where {D<:ContinuousUnivariateDistribution,R<:Reflection,RL<:RemoveLocation,VP<:ValueP,IC<:IdentifiabilityConstraint}
# return MCMCLinearMean(beta_mcmc, beta_prop, mean_mcmc, mean_prop, prior)
#end
##### #### #### #### #### ####
##### data
##### #### #### #### #### ####
##### #### #### #### #### ####
##### Mean
##### #### #### #### #### ####
abstract type TypeModelMean end
struct LinearMean{IC<:IdentifiabilityConstraint} <: TypeModelMean
designmatrix::Array{Float64,3} #
d::Int64
colnames_modelmatrix::Vector{String} # small
model_matrix::Matrix{Float64} # small
identifiability::IC
function LinearMean{IC}(designmatrix::Array{Float64,3}, d::Int64, colnames_modelmatrix::Vector{String}, designmatrix_v2::Matrix{Float64}, identident::IC) where {IC<:IdentifiabilityConstraint}
new{IC}(designmatrix, d, colnames_modelmatrix, designmatrix_v2, identident)
end
end
function LinearMean(fm::FormulaTerm, covariates::DataFrame, datat::SSDataType{R,RL,VP,DoNotRemoveSize,IC}, identident::IC) where {R<:Reflection,RL<:RemoveLocation,VP<:ValueP,IC<:IdentifiabilityConstraint}
designmatrix, d, colnames_modelmatrix, designmatrix_v2 = create_designmatrix(fm, covariates, datat.k)
LinearMean{IC}(designmatrix, d, colnames_modelmatrix, designmatrix_v2, identident)
end
abstract type MCMCTypeModelMean end
struct MCMCLinearMean{D<:ContinuousUnivariateDistribution} <: MCMCTypeModelMean
beta_mcmc::Matrix{Float64}
beta_prop::Matrix{Float64}
mean_mcmc::Array{Float64,3}
mean_prop::Array{Float64,3}
prior_beta::D
function MCMCLinearMean{D}(beta_mcmc::Matrix{Float64}, beta_prop::Matrix{Float64}, mean_mcmc::Array{Float64,3}, mean_prop::Array{Float64,3}, prior::D) where {D<:ContinuousUnivariateDistribution}
new{D}(beta_mcmc, beta_prop, mean_mcmc, mean_prop, prior)
end
end
function MCMCMean(meanmodel::LinearMean, valp::VP, beta_init::Matrix{Float64}, prior::D, datat::SSDataType{R,RL,VP,DoNotRemoveSize,IC})::MCMCLinearMean{D} where {D<:ContinuousUnivariateDistribution,R<:Reflection,RL<:RemoveLocation,VP<:ValueP,IC<:IdentifiabilityConstraint}
d::Int64 = meanmodel.d
k::Int64 = size(meanmodel.designmatrix, 1)
p::Int64 = valp.p
n::Int64 = size(meanmodel.model_matrix, 1)
if typeof(prior) <: Normal
elseif typeof(prior) <: NoPriorBeta
else
error("beta_prior should be Normal or NoPriorBeta")
end
if size(beta_init, 1) > 0
@assert size(beta_init, 1) >= k * d "size(beta_init,1) must be at least " * string(k * d)
@assert size(beta_init, 2) >= p "size(beta_init,2) must be at least " * string(p)
else
beta_init = zeros(Float64, (k * d), p)
end
beta_mcmc = deepcopy(beta_init[1:(k*d), 1:p])
beta_prop = deepcopy(beta_init[1:(k*d), 1:p])
mean_mcmc = zeros(Float64, k, p, n)
for ip = 1:p
for j = 1:n
mean_mcmc[:, ip, j] = meanmodel.designmatrix[:, :, j] * beta_mcmc[:, ip]
end
end
mean_prop = deepcopy(mean_mcmc)
return MCMCLinearMean{D}(beta_mcmc, beta_prop, mean_mcmc, mean_prop, prior)
end
###### #### #### #### #### ####
###### Covariance
###### #### #### #### #### ####
abstract type TypeModelCoVariance end
struct GeneralCoVarianceIndependentDimension{IC<:IdentifiabilityConstraint} <: TypeModelCoVariance
identifiability::IC
function GeneralCoVarianceIndependentDimension{IC}(ident::IC) where {IC<:IdentifiabilityConstraint}
new{IC}(ident)
end
end
function GeneralCoVarianceIndependentDimension(ident::IC, datat::SSDataType{R,RL,VP,DoNotRemoveSize,IC}) where {R<:Reflection,RL<:RemoveLocation,VP<:ValueP,IC<:IdentifiabilityConstraint}
return GeneralCoVarianceIndependentDimension{IC}(ident)
end
abstract type MCMCTypeModelCoVariance end
struct MCMCGeneralCoVarianceIndependentDimension{D<:ContinuousMatrixDistribution} <: MCMCTypeModelCoVariance
covariance_mcmc::Symmetric{Float64,Matrix{Float64}}
covariance_prop::Symmetric{Float64,Matrix{Float64}}
invcovariance_mcmc::Symmetric{Float64,Matrix{Float64}}
invcovariance_prop::Symmetric{Float64,Matrix{Float64}}
logdeterminant_mcmc::Vector{Float64}
logdeterminant_prop::Vector{Float64}
prior_covariance::D
function MCMCGeneralCoVarianceIndependentDimension{D}(covariance_mcmc::Symmetric{Float64,Matrix{Float64}}, covariance_prop::Symmetric{Float64,Matrix{Float64}}, invcovariance_mcmc::Symmetric{Float64,Matrix{Float64}}, invcovariance_prop::Symmetric{Float64,Matrix{Float64}}, logdeterminant_mcmc::Vector{Float64}, logdeterminant_prop::Vector{Float64}, prior_covariance::D) where {D<:ContinuousMatrixDistribution}
new{D}(covariance_mcmc, covariance_prop, invcovariance_mcmc, invcovariance_prop, logdeterminant_mcmc, logdeterminant_prop, prior_covariance)
end
end
function MCMCCovariance(covmodel::GeneralCoVarianceIndependentDimension, sigma_init::Symmetric{Float64,Matrix{Float64}}, prior::D, datat::SSDataType{R,RL,VP,DoNotRemoveSize,IC})::MCMCGeneralCoVarianceIndependentDimension{D} where {D<:ContinuousMatrixDistribution,R<:Reflection,RL<:RemoveLocation,VP<:ValueP,IC<:IdentifiabilityConstraint}
k::Int64 = datat.k
if size(sigma_init.data, 1) > 0
covariance_mcmc = Symmetric(deepcopy(sigma_init.data))
else
covariance_mcmc = Symmetric(Matrix{Float64}(I, k, k))
end
covariance_prop = deepcopy(covariance_mcmc)
cc = cholesky(covariance_mcmc)
invcovariance_mcmc = Symmetric(inv(cc))
logdeterminant_mcmc = [log(det(cc))]
invcovariance_prop = Symmetric(deepcopy(invcovariance_mcmc))
logdeterminant_prop = deepcopy(logdeterminant_mcmc)
return MCMCGeneralCoVarianceIndependentDimension{D}(covariance_mcmc, covariance_prop, invcovariance_mcmc, invcovariance_prop, logdeterminant_mcmc, logdeterminant_prop, prior)
end
###### #### #### #### #### ####
###### mcmcOUT
###### #### #### #### #### ####
abstract type MCMCObjectOUT end
struct generalMCMCObjectOUT <:MCMCObjectOUT
nonidentbeta::Array{Float64,3}
nonidentsigma::Array{Float64,3}
nonidentrmat::Array{Float64,4}
nonidentangle::Array{Float64,3}
identbeta::Array{Float64,3}
identsigma::Array{Float64,3}
identrmat::Array{Float64,4}
identangle::Array{Float64,3}
beta::DataFrame
sigma::DataFrame
rmat::DataFrame
angle::DataFrame
postsample::Int64
function generalMCMCObjectOUT(nonidentbeta::Array{Float64,3} , nonidentsigma::Array{Float64,3}, nonidentrmat::Array{Float64,4}, nonidentangle::Array{Float64,3}, identbeta::Array{Float64,3}, identsigma::Array{Float64,3}, identrmat::Array{Float64,4}, identangle::Array{Float64,3}, beta::DataFrame, sigma::DataFrame, rmat::DataFrame, angle::DataFrame, postsample::Int64 )
new(nonidentbeta, nonidentsigma, nonidentrmat, nonidentangle, identbeta, identsigma, identrmat, identangle, beta, sigma, rmat, angle,postsample)
end
end
function create_object_output(sampletosave::Int64,mean_mcmc::MCMCLinearMean, covariance_mcmc::MCMCGeneralCoVarianceIndependentDimension, data_mcmc::MCMCNormalDataKeepSize, mean_model::LinearMean)
p::Int64 = size(mean_mcmc.beta_mcmc,2)
n::Int64 = size(data_mcmc.rmat_mcmc,3)
k::Int64 = size(covariance_mcmc.covariance_mcmc,1)
kd::Int64 = size(mean_mcmc.beta_mcmc,1)
d::Int64 = mean_model.d
pangle::Int64 = size(data_mcmc.angles_mcmc,1)
sizebetaout1::Int64 = 0
sizebetaout2::Int64 = 0
sizebetaout1 = size(mean_mcmc.beta_mcmc,1)
sizebetaout2 = p
betaOUT::Array{Float64,3} = zeros(Float64, sampletosave, sizebetaout1,sizebetaout2)
#println(size(betaOUT))
sizesigmaout1::Int64 = 0
sizesigmaout1 = size(covariance_mcmc.covariance_mcmc,1)
sigmaOUT::Array{Float64,3} = zeros(Float64, sampletosave, sizesigmaout1,sizesigmaout1)
rmatOUT::Array{Float64,4} = zeros(Float64, sampletosave, p,p,n)
angleOUT::Array{Float64,3} = zeros(Float64, sampletosave, pangle,n)
betaidentOUT = deepcopy(betaOUT)
sigmaidentOUT = deepcopy(sigmaOUT)
rmatidentOUT = deepcopy(rmatOUT)
angleidentOUT = deepcopy(angleOUT)
betaDF = DataFrame(reshape(deepcopy(betaOUT), sampletosave, k*d*p ), :auto)
rename!(betaDF,repeat(mean_model.colnames_modelmatrix,inner = k, outer= p) .* "| mark:" .* string.(repeat(1:k, outer = d*p)) .* "| dim:" .* string.(repeat(1:p, inner = d*k)))
sigmaDF = DataFrame(reshape(deepcopy(sigmaOUT), sampletosave, k*k ), :auto)
rename!(sigmaDF,"s_(".* string.(repeat(1:k, outer = k)) .* "," .* string.(repeat(1:k, inner = k)) .* ")" )
rmatDF = DataFrame(reshape(deepcopy(rmatOUT), sampletosave, p*p*n ), :auto)
rename!(rmatDF,"R_".* string.(repeat(1:n,inner=p*p)) .* ",(".*repeat(string.(repeat(1:p, outer = p)) .* "," .* string.(repeat(1:p, inner = p)),outer = n) .* ")" )
#println("Pangle", pangle)
#println(size(angleOUT))
angleDF = DataFrame(reshape(deepcopy(angleOUT), sampletosave, pangle*n ), :auto)
rename!(angleDF ,"theta_".* string.(repeat(1:n,inner=pangle)) .* ",(" .* string.(repeat(1:pangle,outer=n)) .* ")")
generalMCMCObjectOUT(betaOUT, sigmaOUT,rmatOUT,angleOUT,betaidentOUT,sigmaidentOUT,rmatidentOUT,angleidentOUT, betaDF,sigmaDF,rmatDF,angleDF, sampletosave )
end
function copy_parameters_out(imcmc::Int64, out::generalMCMCObjectOUT, mean_mcmc::MCMCLinearMean, datamodel::SSDataType{<:KeepReflection, <:RemoveLocationHelmert, <:ValueP, <:DoNotRemoveSize,<:GramSchmidtMean}, covariance_mcmc::MCMCGeneralCoVarianceIndependentDimension, data_mcmc::MCMCNormalDataKeepSize)
out.nonidentbeta[imcmc,:,:] = mean_mcmc.beta_mcmc[:,:]
out.nonidentsigma[imcmc,:,:] = covariance_mcmc.covariance_mcmc[:,:]
out.nonidentrmat[imcmc,:,:,:] = data_mcmc.rmat_mcmc[:,:,:]
out.nonidentangle[imcmc,:,:] = data_mcmc.angles_mcmc[:,:]
gammamat = standardize_reg_computegamma(mean_mcmc.beta_mcmc, datamodel.valp,datamodel.identifiability)
out.identbeta[imcmc,:,:] = mean_mcmc.beta_mcmc*gammamat
out.identsigma[imcmc,:,:] = covariance_mcmc.covariance_mcmc[:,:]
app_rot = deepcopy(data_mcmc.rmat_mcmc)
app_angle = deepcopy(data_mcmc.angles_mcmc)
for i = 1:size(data_mcmc.rmat_mcmc,3)
app_rot[:,:,i] = transpose(gammamat)*app_rot[:,:,i]
compute_angle_from_rmat(i,app_angle, app_rot, datamodel.valp, datamodel.reflection)
#@assert isapprox(det(app_rot[:,:,i]),1.0) "ss" * string(det(app_rot[:,:,i]))
end
#out.identbeta[imcmc,:,:] = mean_mcmc.beta_mcmc
#out.identsigma[imcmc,:,:] = covariance_mcmc.covariance_mcmc[:,:]
#app_rot = deepcopy(data_mcmc.rmat_mcmc)
#app_angle = deepcopy(data_mcmc.angles_mcmc)
#for i = 1:size(data_mcmc.rmat_mcmc,3)
# app_rot[:,:,i] = app_rot[:,:,i]
# compute_angle_from_rmat(i,app_angle, app_rot, datamodel.valp, datamodel.reflection)
# #@assert isapprox(det(app_rot[:,:,i]),1.0) "ss" * string(det(app_rot[:,:,i]))
#end
out.identrmat[imcmc,:,:,:] = app_rot[:,:,:]
out.identangle[imcmc,:,:] = app_angle[:,:]
out.beta[imcmc,:] = out.identbeta[imcmc,:,:][:]
out.sigma[imcmc,:] = out.identsigma[imcmc,:,:][:]
out.rmat[imcmc,:] = out.identrmat[imcmc,:,:,:][:]
out.angle[imcmc,:] = out.identangle[imcmc,:,:][:]
end
| BayesSizeAndShape | https://github.com/GianlucaMastrantonio/BayesSizeAndShape.jl.git |
|
[
"MIT"
] | 0.2.0 | 4214520f385b4d6220a9e63d5a7f355761104b17 | code | 6462 |
"""
SizeAndShapeWithReflectionMCMC(
landmarks::Array{Float64,3},
fm::FormulaTerm,
covariates::DataFrame,
iterations::NamedTuple{(:iter, :burnin, :thin),Tuple{Int64,Int64,Int64}},
betaprior::ContinuousUnivariateDistribution,
sigmaprior::ContinuousMatrixDistribution
)
Posterior samples from the size-and-shape model - in this version, only two-dimensional data with reflection information are allowed. \\
The functions returns an object of type `SizeAndShapeModelOutput`.
# Arguments
Let
* `n` be the number of objects;
* `k+1` be the number of recorded landmark for each object
* `p` be the dimension of each landmark (only p=2 or p=3)
The arguments of the functions are
* `landmarks`: a three-dimensional `Array` of dimension ``(k+1)\\times p \\times n`` with the data;
* `fm`: a `formula`, where on the left-hand side there must be 1 and on the right-hand side there is the actual regressive formula - an intercept is needed;
* `covariates`: a `DataFrame` of covariates. The formula `fm` search for the covariates in the `DataFrame` column names;
* `iterations`: a `NamedTuple` with `iter`, `burnin`, and `thin` values of the MCMC algorithm
* `betaprior`: a `Normal` distribution that is used as prior for all regressive coefficients
* `sigmaprior`: an `InverseWishart` distribution that is used as prior for the covariance matrix.
"""
function SizeAndShapeWithReflectionMCMC(
landmarks::Array{Float64,3},
fm::FormulaTerm,
covariates::DataFrame, #
iterations::NamedTuple{(:iter, :burnin, :thin),Tuple{Int64,Int64,Int64}},
betaprior::ContinuousUnivariateDistribution,
sigmaprior::ContinuousMatrixDistribution
)
return generalSizeAndShapeMCMC(;
landmarks = landmarks,
fm = fm,
covariates = covariates,
iterations = iterations,
betaprior = betaprior,
sigmaprior = sigmaprior,
#beta_init::Matrix{Float64} = zeros(Float64,0,0),
#sigma_init::Symmetric{Float64,Matrix{Float64}} = Symmetric(zeros(Float64,0,0)),
#rmat_init::Array{Float64,3} = zeros(Float64,0,0,0),
#dormat::Bool,
identification = "gramschmidt",
meanmodel = "linear",
covariancemodel = "general_nocrosscorrelation",
keepreflection = "yes",
removelocation= "helmert",
removesize = "no",
rmatdosample = true,
verbose = false
)
end
#"""
# SizeAndShapeMCMC(;
# dataset::Array{Float64,3},
# fm::FormulaTerm = @formula(1~ 1),
# covariates::DataFrame,
# iterations::NamedTuple{(:iter, :burnin, :thin),Tuple{Int64,Int64,Int64}} =(
# iter=1000,
# burnin=200,
# thin=2
# ),
# betaprior::ContinuousUnivariateDistribution = Normal(0.0, 10000.0),
# sigmaprior::ContinuousMatrixDistribution,
# beta_init::Matrix{Float64},
# sigma_init::Symmetric{Float64,Matrix{Float64}},
# rmat_init::Array{Float64,3},
# reflection::Bool = true
# )
#Posterior samples from the size-and-shape model - in this version, only two-dimensional data with reflection information are allowed.
#The functions returns 2 Dataframes, with the posterior samples of the regressive coefficients and elements of the covariance matrix.
## Arguments
#Let
#* n be the number of shapes;
#* k be the number of landmarks (on the pre-form matrix);
#* p be the dimension of each landmark (only p=2 is implemented)
#* d be the number of covariates to use + 1 (or number of regressive coefficients for each landmark-dimension, intercept included);
#The arguments are
#- `dataset::Array{Float64,3}`: Array with the data - dimension (k,p,n) of size-and-shape data. Use the function `compute_ss_from_pre` to obtain the size-and-shape data from pre-forms
#- `fm::FormulaTerm = @formula(1~ 1)`: a formula that specifies the model - the left-and size should be 1
#- `covariates::DataFrame`: a DataFrame containing the covariates - dimension (n,d). The names used in `fm` must be column names of `covariates`. Only Real And Factor covariates are allowed. The numeric column are standardized internally.
#- `iterations::NamedTuple{(:iter, :burnin, :thin),Tuple{Int64,Int64,Int64}}`: values of the iterations (iter), thin and burnin of the MCMC algorithm
#- `betaprior::ContinuousUnivariateDistribution`: The prior on the regressive coefficients - only a Normal distribution is allowed
#- `sigmaprior::ContinuousMatrixDistribution`: The prior on the covariance matrix - only an Inverse Wishart is allowed
#- `beta_init::Matrix{Float64}`: initial values for the regressive coefficients - dimension (k*d,p)
#- `sigma_init::Symmetric{Float64,Matrix{Float64}}`: initial values for the covariance matrix - dimension (k,k) (it must be a valid covariance matrix)
#- `rmat_init::Array{Float64,3}`: initial values for the rotation matrices - dimension (p,p,n) (each [1:p, 1:p, i], i = 1,2,...,n, must be a valid rotation matrix)
#- `reflection::Bool`: true for a model with reflection information.
#"""
#function SizeAndShapeMCMC(;
# dataset_init::Array{Float64,3},
# fm::FormulaTerm = @formula(1~ 1),
# covariates::DataFrame, #
# iterations::NamedTuple{(:iter, :burnin, :thin),Tuple{Int64,Int64,Int64}} =(
# iter=1000,
# burnin=200,
# thin=2
# ),
# betaprior::ContinuousUnivariateDistribution = Normal(0.0, 10000.0),
# sigmaprior::ContinuousMatrixDistribution,
# beta_init::Matrix{Float64},
# sigma_init::Symmetric{Float64,Matrix{Float64}},
# rmat_init::Array{Float64,3},
# reflection::Bool = true,
# is_data_sizeandshape::Bool = true
#)
# p::Int64 = size(dataset, 2)
# if p != 2
# error("the current implementation allows only `size(dataset, 2) = 2 (2-dimensional data)`")
# end
# if reflection == false
# error("the current implementation allows only `reflection = true`")
# else
# mcmcout =generalSizeAndShapeMCMC(;
# dataset = dataset,
# fm = fm,
# covariates = covariates,
# iterations = iterations,
# betaprior = betaprior,
# sigmaprior = sigmaprior,
# beta_init = beta_init,
# sigma_init = sigma_init,
# rmat_init = rmat_init,
# dormat = true,
# reflection = KeepReflection(),
# sigmatype = GeneralSigma()
# )
# return mcmcout.mcmcoutputArrays.beta, mcmcout.mcmcoutputArrays.sigma
# end
#end | BayesSizeAndShape | https://github.com/GianlucaMastrantonio/BayesSizeAndShape.jl.git |
|
[
"MIT"
] | 0.2.0 | 4214520f385b4d6220a9e63d5a7f355761104b17 | code | 4800 | function compute_dessignmatrix(zmat::Matrix{Float64}, k::Int64)::Array{Float64,3}
println("compute_dessignmatrix is deprecated. Please use compute_designmatrix instead")
return compute_designmatrix(zmat, k)
end
function compute_dessignmatrix(zmat::DataFrame, k::Int64)::Array{Float64,3}
println("compute_dessignmatrix is deprecated. Please use compute_designmatrix instead")
return compute_designmatrix(zmat, k)
end
function compute_ss_from_pre(xdata::Array{Float64,3},ydata::Array{Float64,3}, keep_reflection::Bool)::Array{Float64,3}
println("compute_ss_from_pre is deprecated. Please use compute_ss_helmertz! instead")
return compute_ss!(xdata,ydata, keep_reflection)
end
"""
SizeAndShapeMCMC(;
dataset::Array{Float64,3},
fm::FormulaTerm = @formula(1~ 1),
covariates::DataFrame,
iterations::NamedTuple{(:iter, :burnin, :thin),Tuple{Int64,Int64,Int64}} =(
iter=1000,
burnin=200,
thin=2
),
betaprior::ContinuousUnivariateDistribution = Normal(0.0, 10000.0),
sigmaprior::ContinuousMatrixDistribution,
beta_init::Matrix{Float64},
sigma_init::Symmetric{Float64,Matrix{Float64}},
rmat_init::Array{Float64,3},
reflection::Bool = true
)
Posterior samples from the size-and-shape model - in this version, only two-dimensional data with reflection information are allowed.
The functions returns 2 Dataframes, with the posterior samples of the regressive coefficients and elements of the covariance matrix.
# Arguments
Let
* n be the number of shapes;
* k be the number of landmarks (on the pre-form matrix);
* p be the dimension of each landmark (only p=2 is implemented)
* d be the number of covariates to use + 1 (or number of regressive coefficients for each landmark-dimension, intercept included);
The arguments are
- `dataset::Array{Float64,3}`: Array with the data - dimension (k,p,n) of size-and-shape data. Use the function `compute_ss_from_pre` to obtain the size-and-shape data from pre-forms
- `fm::FormulaTerm = @formula(1~ 1)`: a formula that specifies the model - the left-and size should be 1
- `covariates::DataFrame`: a DataFrame containing the covariates - dimension (n,d). The names used in `fm` must be column names of `covariates`. Only Real And Factor covariates are allowed. The numeric column are standardized internally.
- `iterations::NamedTuple{(:iter, :burnin, :thin),Tuple{Int64,Int64,Int64}}`: values of the iterations (iter), thin and burnin of the MCMC algorithm
- `betaprior::ContinuousUnivariateDistribution`: The prior on the regressive coefficients - only a Normal distribution is allowed
- `sigmaprior::ContinuousMatrixDistribution`: The prior on the covariance matrix - only an Inverse Wishart is allowed
- `beta_init::Matrix{Float64}`: initial values for the regressive coefficients - dimension (k*d,p)
- `sigma_init::Symmetric{Float64,Matrix{Float64}}`: initial values for the covariance matrix - dimension (k,k) (it must be a valid covariance matrix)
- `rmat_init::Array{Float64,3}`: initial values for the rotation matrices - dimension (p,p,n) (each [1:p, 1:p, i], i = 1,2,...,n, must be a valid rotation matrix)
- `reflection::Bool`: true for a model with reflection information.
"""
function SizeAndShapeMCMC(;
dataset_init::Array{Float64,3},
fm::FormulaTerm = @formula(1~ 1),
covariates::DataFrame, #
iterations::NamedTuple{(:iter, :burnin, :thin),Tuple{Int64,Int64,Int64}} =(
iter=1000,
burnin=200,
thin=2
),
betaprior::ContinuousUnivariateDistribution = Normal(0.0, 10000.0),
sigmaprior::ContinuousMatrixDistribution,
beta_init::Matrix{Float64},
sigma_init::Symmetric{Float64,Matrix{Float64}},
rmat_init::Array{Float64,3},
reflection::Bool = true,
is_data_sizeandshape::Bool = true
)
println("SizeAndShapeMCMC is deprecated. Please use generalSizeAndShapeMCMC")
p::Int64 = size(dataset, 2)
if p != 2
error("the current implementation allows only `size(dataset, 2) = 2 (2-dimensional data)`")
end
if reflection == false
error("the current implementation allows only `reflection = true`")
else
mcmcout =generalSizeAndShapeMCMC(;
dataset = dataset,
fm = fm,
covariates = covariates,
iterations = iterations,
betaprior = betaprior,
sigmaprior = sigmaprior,
beta_init = beta_init,
sigma_init = sigma_init,
rmat_init = rmat_init,
dormat = true,
reflection = KeepReflection(),
sigmatype = GeneralSigma()
)
return mcmcout.mcmcoutputArrays.beta, mcmcout.mcmcoutputArrays.sigma
end
end | BayesSizeAndShape | https://github.com/GianlucaMastrantonio/BayesSizeAndShape.jl.git |
|
[
"MIT"
] | 0.2.0 | 4214520f385b4d6220a9e63d5a7f355761104b17 | code | 8884 |
function compute_designmatrix(zmat::Matrix{Float64}, k::Int64)::Array{Float64,3}
n, d = size(zmat)
res::Array{Float64,3} = zeros(Float64, k, k * d, n)
for i = 1:n
res[:, :, i] = kronecker(transpose(zmat[i, :]), Matrix{Float64}(I, k, k))
end
return res
end
function compute_designmatrix(zmat::DataFrame, k::Int64)::Array{Float64,3}
n, d = size(zmat)
res::Array{Float64,3} = zeros(Float64, k, k * d, n)
for i = 1:n
res[:, :, i] = kronecker(transpose([values(zmat[i, :])...]), Matrix{Float64}(I, k, k))
end
return res
end
function compute_xdata(xdata::Array{Float64,3}, ydata::Array{Float64,3}, rmat::Array{Float64,3})
for i = axes(xdata,3)
xdata[:, :, i] = ydata[:, :, i] * transpose(rmat[:, :, i])
end
return nothing
end
function compute_ss_from_pre_p3_keep_reflection(xdata::Array{Float64,3}, ydata::Array{Float64,3}, ret::Array{Float64,3})
for i = axes(xdata, 3)
app = svd(xdata[:, :, i])
U = app.U
V = transpose(app.Vt)
if det(V) > 0
ydata[:, :, i] = U * Diagonal(app.S)
ret[:, :, i] = V
else
U[:, 3] = -1.0 * U[:, 3]
V[:, 3] = -1.0 * V[:, 3]
ydata[:, :, i] = U * Diagonal(app.S)
ret[:, :, i] = V
end
end
end
function compute_ss_from_pre_p3_nokeep_reflection(xdata::Array{Float64,3}, ydata::Array{Float64,3}, ret::Array{Float64,3})
for i = axes(xdata, 3)
app = svd(xdata[:, :, i])
ydata[:, :, i] = app.U * Diagonal(app.S)
ret[:, :, i] = transpose(app.Vt)
end
end
function compute_ss_from_pre_p2_keep_reflection(xdata::Array{Float64,3}, ydata::Array{Float64,3}, ret::Array{Float64,3})
for i = axes(xdata, 3)
app = svd(xdata[:, :, i])
U = app.U
V = transpose(app.Vt)
if sign(V[1, 2]) != sign(V[2, 1])
ydata[:, :, i] = U * Diagonal(app.S)
ret[:, :, i] = V
else
U[:,2] = -1.0*U[:,2]
V[:, 2] = -1.0 * V[:, 2]
ydata[:, :, i] = U * Diagonal(app.S)
ret[:, :, i] = V
end
end
end
function compute_ss_from_pre_p2_nokeep_reflection(xdata::Array{Float64,3}, ydata::Array{Float64,3}, ret::Array{Float64,3})
for i = axes(xdata, 3)
app = svd(xdata[:, :, i])
ydata[:, :, i] = app.U * Diagonal(app.S)
ret[:, :, i] = transpose(app.Vt)
end
end
"""
(deprecated) compute_ss(xdata::Array{Float64,3},ydata::Array{Float64,3}, keep_reflection::Bool)::Array{Float64,3}
compute_ss_from_pre!(xdata::Array{Float64,3},ydata::Array{Float64,3}, keep_reflection::Bool)::Array{Float64,3}
Given the array of configuration xdata (xdata[:,:,i] is the i-th configuration matrix), the function computes the size-and-shape data and store it in ydata.
The function returs the array of associated rotation matrix R, such that xdata = ydata*R. keep_reflection is a boolean that is used to indicate if R in O(p) (keep_reflection=false) or R in SO(p) (keep_reflection=true)
compute_ss(xdata::Array{Float64,3}, keep_reflection::Bool)::Array{Float64,3}
Given the array of configuration xdata (xdata[:,:,i] is the i-th configuration matrix), the function computes the size-and-shape data.
The function returns the size-and-shape ydata. keep_reflection is a boolean that is used to indicate if R in O(p) (keep_reflection=false) or R in SO(p) (keep_reflection=true)
"""
function compute_ss_helmertz!(xdata::Array{Float64,3},ydata::Array{Float64,3}, keep_reflection::Bool)::Array{Float64,3}
p = size(xdata, 2)
ret = zeros(Float64, p, p, size(xdata, 3))
if (p == 2) & (keep_reflection == true)
compute_ss_from_pre_p2_keep_reflection(xdata, ydata, ret)
end
if (p == 2) & (keep_reflection == false)
compute_ss_from_pre_p2_nokeep_reflection(xdata, ydata, ret)
end
if (p == 3) & (keep_reflection == true)
compute_ss_from_pre_p3_keep_reflection(xdata, ydata, ret)
end
if (p == 3) & (keep_reflection == false)
compute_ss_from_pre_p3_nokeep_reflection(xdata, ydata, ret)
end
if p >3
error("size(xdata, 2) must be 2 or 3")
end
return ret
end
function compute_ss_helmertz(xdata::Array{Float64,3}, keep_reflection::Bool)::Array{Float64,3}
ydata::Array{Float64,3} = deepcopy(xdata)
p = size(xdata, 2)
ret = zeros(Float64, p, p, size(xdata, 3))
if (p == 2) & (keep_reflection == true)
compute_ss_from_pre_p2_keep_reflection(xdata, ydata, ret)
end
if (p == 2) & (keep_reflection == false)
compute_ss_from_pre_p2_nokeep_reflection(xdata, ydata, ret)
end
if (p == 3) & (keep_reflection == true)
compute_ss_from_pre_p3_keep_reflection(xdata, ydata, ret)
end
if (p == 3) & (keep_reflection == false)
compute_ss_from_pre_p3_nokeep_reflection(xdata, ydata, ret)
end
return ydata
end
function compute_angle_from_rmat(i::Int64,angle::Matrix{Float64}, rmat::Array{Float64,3}, valp::Valuep2, reflection::KeepReflection)
angle[1, i] = atan(rmat[2, 1, i], rmat[1, 1, i])
return nothing
end
function compute_angle_from_rmat(i::Int64, angle::Matrix{Float64}, rmat::Array{Float64,3}, valp::Valuep3, reflection::KeepReflection)
#x convention
angle[1, i] = atan(rmat[3, 1, i], rmat[3, 2, i])
angle[2, i] = acos(rmat[3, 3, i])
angle[3, i] = -atan(rmat[1, 3, i], rmat[2, 3, i])
return nothing
end
function standardize_reg_computegamma(reg::Matrix{Float64}, valp::Valuep2)
a1 = reg[1, :]
a2 = reg[2, :]
g1 = a1 / norm(a1, 2)
g2 = (a2 - (transpose(a2) * g1) * g1) / norm(a2 - (transpose(a2) * g1) * g1, 2)
gammamat = reshape([g1; g2], (2, 2))
if det(gammamat) < 0
gammamat[:, 2] = - gammamat[:, 2]
end
return gammamat
end
function standardize_reg_computegamma(reg::Matrix{Float64}, valp::Valuep3)
a1 = reg[1, :]
a2 = reg[2, :]
a3 = reg[3, :]
g1 = a1 / norm(a1, 2)
g2 = (a2 - (transpose(a2) * g1) * g1) / norm(a2 - (transpose(a2) * g1) * g1, 2)
g3 = (a3 - (transpose(a3) * g1) * g1 - (transpose(a3) * g2) * g2) / norm(a3 - (transpose(a3) * g1) * g1 - (transpose(a3) * g2) * g2, 2)
gammamat = reshape([g1; g2; g3], (3, 3))
if det(gammamat) < 0
gammamat[:, 3] = -gammamat[:, 3]
end
end
function standardize_reg(reg::Matrix{Float64}, valp::Valuep2)
reg[:,:] = reg * standardize_reg_computegamma(reg, valp)
end
function standardize_reg(reg::Matrix{Float64}, valp::Valuep3)
reg[:, :] = reg * standardize_reg_computegamma(reg, valp)
end
"""
compute_helmertized_configuration(landmark::Array{Float64,3})::Array{Float64,3}
Given the array of landmarks (landmark[:,:,i] is the matrix of landmarks of the i-th shape), it computes and returns the helmertized configuration
"""
function compute_helmertized_configuration(landmark::Array{Float64,3})::Array{Float64,3}
# TODO: cmabiare i segni di H
k::Int64 = size(landmark,1)
H::Matrix{Float64} = zeros(Float64,k,k)
ret::Array{Float64,3} = zeros(Float64,size(landmark,1)-1,size(landmark,2),size(landmark,3))
for i = 1:k
H[i,i] = (i-1)/(i*(i-1))^0.5
end
for i = 2:k
for j = 1:(i-1)
H[i,j] = -1.0/(i*(i-1))^0.5
end
end
H = H[2:end,:]
for i = 1:size(landmark,3)
ret[:,:,i] = H*landmark[:,:,i]
end
return ret
end
function predictmean(;beta::Array{Float64,3},
dataset::Array{Float64,3},
fm::FormulaTerm = @formula(1~ 1),
covariates::DataFrame)
k::Int64 = size(dataset, 1)
n::Int64 = size(dataset, 3)
p::Int64 = size(dataset, 2)
mean_pred = [zeros(Float64, size(beta,1), k,p) for i = 1:n];
covariates_copy = deepcopy(covariates)
for i = 1:size(covariates_copy,2)
if isa(covariates_copy[:,i], CategoricalArray)
elseif isa(covariates_copy[1,i], Real)
covariates_copy[:,i] = (covariates_copy[:,i] .- mean(covariates_copy[:,i])) ./ std(covariates_copy[:,i])
else
error("Only factors or Real variables are allowed in covariates")
end
end
designmatrix_v2_app = ModelFrame(fm, covariates_copy);
designmatrix_v2 = ModelMatrix(designmatrix_v2_app).m
#colnames_modelmatrix = coefnames(designmatrix_v2_app)
#for i = 2:size(designmatrix_v2, 2)
# designmatrix_v2[:, i] = designmatrix_v2[:, i] .- mean(designmatrix_v2[:, i])
#end
designmatrix = compute_designmatrix(designmatrix_v2, k) # dimensions k, k * d, n
for select_obs = 1:n
for i = 1:size(beta,1)
mean_pred[select_obs][i,:,:] = designmatrix[:,:,select_obs]*beta[i,:,:]
end
end
return mean_pred
end
| BayesSizeAndShape | https://github.com/GianlucaMastrantonio/BayesSizeAndShape.jl.git |
|
[
"MIT"
] | 0.2.0 | 4214520f385b4d6220a9e63d5a7f355761104b17 | code | 4040 | #### #### #### #### #### ####
#### Model OUT
#### #### #### #### #### ####
abstract type outputMCMCSizeAndShape end
struct SizeAndShapeModelOutput{TR<:Reflection,TS<:SigmaType,TP<:Valuep,DB<:ContinuousUnivariateDistribution,DS<:ContinuousMatrixDistribution, FF<:FormulaTerm} <: outputMCMCSizeAndShape
mcmcoutput::NamedTuple{(:beta, :sigma, :rmat, :angle), Tuple{DataFrame, DataFrame, DataFrame, DataFrame}}
mcmcoutputArrays::NamedTuple{(:beta, :sigma, :rmat, :angle), Tuple{Array{Float64, 3}, Array{Float64, 3}, Array{Float64, 4}, Array{Float64, 3}}}
covariates::NamedTuple{(:colnames_modelmatrix, :fm, :covariates, :designmatrix_step1, :designmatrix_step2), Tuple{Vector{String}, FF, DataFrame, Matrix{Float64}, Array{Float64, 3}} }
dataset::Array{Float64,3};
modeltypes::NamedTuple{(:dormat, :reflection, :sigmatype, :betaprior, :sigmaprior, :valp), Tuple{Bool, TR, TS, DB,DS, TP} }
iterations::NamedTuple{(:iter, :burnin, :thin, :savedsamples),Tuple{Int64,Int64,Int64, Int64}};
indices::NamedTuple{(:k, :p, :n, :d),Tuple{Int64,Int64,Int64, Int64}};
function SizeAndShapeModelOutput(
mcmcoutput::NamedTuple{(:beta, :sigma, :rmat, :angle), Tuple{DataFrame, DataFrame, DataFrame, DataFrame}},
mcmcoutputArrays::NamedTuple{(:beta, :sigma, :rmat, :angle), Tuple{Array{Float64, 3}, Array{Float64, 3}, Array{Float64, 4}, Array{Float64, 3}}},
covariates::NamedTuple{(:colnames_modelmatrix, :fm, :covariates, :designmatrix_step1, :designmatrix_step2), Tuple{Vector{String}, FF, DataFrame, Matrix{Float64}, Array{Float64, 3}} },
dataset::Array{Float64,3},
modeltypes::NamedTuple{(:dormat, :reflection, :sigmatype, :betaprior, :sigmaprior, :valp), Tuple{Bool, TR, TS, DB,DS, TP} },
iterations::NamedTuple{(:iter, :burnin, :thin, :savedsamples),Tuple{Int64,Int64,Int64, Int64}},
indices::NamedTuple{(:k, :p, :n, :d),Tuple{Int64,Int64,Int64, Int64}}) where {TR<:Reflection,TS<:SigmaType,TP<:Valuep,DB<:ContinuousUnivariateDistribution,DS<:ContinuousMatrixDistribution,FF<:FormulaTerm}
new{TR,TS,TP,DB,DS, FF}(mcmcoutput, mcmcoutputArrays, covariates, dataset, modeltypes, iterations, indices)
end
end
#### #### #### #### #### ####
#### FUNCTIONS
#### #### #### #### #### ####
#function create_output(beta::Array{Float64, 3},
# sigma::Array{Float64, 3},
# rmat::Array{Float64, 4},
# angle::Array{Float64, 3},
# beta_nonid::Array{Float64, 3},
# rmat_nonid::Array{Float64, 4},
# angle_nonid::Array{Float64, 3},
# colnames_modelmatrix::Vector{String},
# fm::FormulaTerm,
# betaprior::ContinuousUnivariateDistribution,
# sigmaprior::ContinuousMatrixDistribution,
# k::Int64,
# p::Int64,
# n::Int64,
# d::Int64,
# dormat::Bool,
# reflection::Reflection,
# sigmatype::SigmaType,
# dataset::Array{Float64,3},
# covariates::DataFrame,
# iterations::NamedTuple{(:iter, :burnin, :thin),Tuple{Int64,Int64,Int64}},
# designmatrix_step1::Array{Float64, 3},
# designmatrix_step2::Matrix{Float64})
# p2KeepReflectionGeneralSigma(beta::Array{Float64, 3},
# sigma::Array{Float64, 3},
# rmat::Array{Float64, 4},
# angle::Array{Float64, 3},
# beta_nonid::Array{Float64, 3},
# rmat_nonid::Array{Float64, 4},
# angle_nonid::Array{Float64, 3},
# colnames_modelmatrix::Vector{String},
# fm::FormulaTerm,
# betaprior::ContinuousUnivariateDistribution,
# sigmaprior::ContinuousMatrixDistribution,
# k::Int64,
# p::Int64,
# n::Int64,
# d::Int64,
# dormat::Bool,
# reflection::Reflection,
# sigmatype::SigmaType,
# dataset::Array{Float64,3},
# covariates::DataFrame,
# iterations::NamedTuple{(:iter, :burnin, :thin),Tuple{Int64,Int64,Int64}},
# designmatrix_step1::Array{Float64, 3},
# designmatrix_step2::Matrix{Float64})
#end | BayesSizeAndShape | https://github.com/GianlucaMastrantonio/BayesSizeAndShape.jl.git |
|
[
"MIT"
] | 0.2.0 | 4214520f385b4d6220a9e63d5a7f355761104b17 | code | 2893 |
function predictmean(;beta::Array{Float64,3},
dataset::Array{Float64,3},
fm::FormulaTerm = @formula(1~ 1),
covariates::DataFrame)
k::Int64 = size(dataset, 1)
n::Int64 = size(dataset, 3)
p::Int64 = size(dataset, 2)
mean_pred = [zeros(Float64, size(beta,1), k,p) for i = 1:n];
covariates_copy = deepcopy(covariates)
for i = 1:size(covariates_copy,2)
if isa(covariates_copy[:,i], CategoricalArray)
elseif isa(covariates_copy[1,i], Real)
covariates_copy[:,i] = (covariates_copy[:,i] .- mean(covariates_copy[:,i])) ./ std(covariates_copy[:,i])
else
error("Only factors or Real variables are allowed in covariates")
end
end
designmatrix_v2_app = ModelFrame(fm, covariates_copy);
designmatrix_v2 = ModelMatrix(designmatrix_v2_app).m
#colnames_modelmatrix = coefnames(designmatrix_v2_app)
#for i = 2:size(designmatrix_v2, 2)
# designmatrix_v2[:, i] = designmatrix_v2[:, i] .- mean(designmatrix_v2[:, i])
#end
designmatrix = compute_designmatrix(designmatrix_v2, k) # dimensions k, k * d, n
for select_obs = 1:n
for i = 1:size(beta,1)
mean_pred[select_obs][i,:,:] = designmatrix[:,:,select_obs]*beta[i,:,:]
end
end
return mean_pred
end
function predictmean(modeloutput::SizeAndShapeModelOutput{KeepReflection, GeneralSigma, Valuep2, DB, DS, FF}) where {DB<:ContinuousUnivariateDistribution,DS<:ContinuousMatrixDistribution, FF<:FormulaTerm}
covariates = modeloutput.covariates.covariates
k::Int64 = modeloutput.indices.k
n::Int64 = size(covariates,1)
p::Int64 = modeloutput.indices.p
beta = modeloutput.mcmcoutputArrays.beta
rmat = modeloutput.mcmcoutputArrays.rmat
mean_pred = [zeros(Float64, size(beta,1), k,p) for i = 1:n];
#covariates_copy = deepcopy(covariates)
#for i = 1:size(covariates_copy,2)
# if isa(covariates_copy[:,i], CategoricalArray)
# elseif isa(covariates_copy[1,i], Real)
# #covariates_copy[:,i] = (covariates_copy[:,i] .- mean(covariates_copy[:,i])) ./ std(covariates_copy[:,i])
# else
# error("Only factors or Real variables are allowed in covariates")
# end
#end
#designmatrix_v2_app = ModelFrame(modeloutput.covariates.fm, covariates_copy);
#designmatrix_v2 = ModelMatrix(designmatrix_v2_app).m
#designmatrix = compute_designmatrix(designmatrix_v2, k) # dimensions k, k * d, n
designmatrix, d, colnames_modelmatrix, designmatrix_v2 = create_designmatrix(modeloutput.covariates.fm, covariates, k)
for select_obs = 1:n
for i = 1:size(beta,1)
mean_pred[select_obs][i,:,:] = designmatrix[:,:,select_obs]*beta[i,:,:]*rmat[i,:,:,select_obs]
end
end
return mean_pred
end | BayesSizeAndShape | https://github.com/GianlucaMastrantonio/BayesSizeAndShape.jl.git |
|
[
"MIT"
] | 0.2.0 | 4214520f385b4d6220a9e63d5a7f355761104b17 | code | 5285 | #### #### #### #### #### ####
#### BETA
#### #### #### #### #### ####
function sampler_beta(xdata::Array{Float64,3}, designmatrix::Array{Float64,3}, prior::NoPriorBeta, betaMCMC::Matrix{Float64}, sigmaMCMC::Matrix{Float64}, valp::Valuep, betastandMCMC::Matrix{Float64})
end
function sampler_beta(xdata::Array{Float64,3}, designmatrix::Array{Float64,3}, prior::Normal, betaMCMC::Matrix{Float64}, sigmaMCMC::Matrix{Float64}, valp::Valuep, betastandMCMC::Matrix{Float64})
kd::Int64 = size( betaMCMC,1)
p::Int64 = size( betaMCMC,2)
n::Int64 = size(xdata,3)
invMat = Symmetric(inv(sigmaMCMC))
for ip = 1:p
Vp::Matrix{Float64} = zeros(Float64, kd, kd)
Mp::Vector{Float64} = zeros(Float64, kd)
Vp[:, :] = Diagonal([1.0 / params(prior)[2]^2 for i = 1:kd])
Mp[:] .= params(prior)[1] / params(prior)[2]^2
for j = 1:n
Vp[:, :] += transpose(designmatrix[:, :, j]) * invMat * designmatrix[:, :, j]
Mp[:, :] += transpose(designmatrix[:, :, j]) * invMat * xdata[:,ip,j]
end
Vp = Symmetric(inv(Vp))
Mp = Vp*Mp
betaMCMC[:,ip] = rand(MvNormal(Mp,Vp))
betastandMCMC[:, ip] = betaMCMC[:, ip]
end
#standardize_reg(betastandMCMC, valp)
end
#### #### #### #### #### ####
#### SIGMA
#### #### #### #### #### ####
function sampler_sigma(xdata::Array{Float64,3}, designmatrix::Array{Float64,3}, prior::NoPriorSigma, betaMCMC::Matrix{Float64}, sigmaMCMC::Matrix{Float64}, sigmatype::SigmaType)
end
function sampler_sigma(xdata::Array{Float64,3}, designmatrix::Array{Float64,3}, prior::InverseWishart, betaMCMC::Matrix{Float64}, sigmaMCMC::Matrix{Float64}, sigmatype::GeneralSigma)
kd::Int64 = size(betaMCMC, 1)
p::Int64 = size(betaMCMC, 2)
n::Int64 = size(xdata, 3)
#invMat = Symmetric(inv(sigmaMCMC))
nup::Float64 = params(prior)[1] + Float64(n*p)
Psip = deepcopy(params(prior)[2].mat)
for ip = 1:p
error("rivedere")
for j = 1:n
app = xdata[:, p, j] - designmatrix[:,:,j] * betaMCMC[:,p]
Psip[:,:] += app*transpose(app)
end
sigmaMCMC[:, :] = rand(InverseWishart(nup, Symmetric(Psip).data))
end
end
#### #### #### #### #### ####
#### rmat
#### #### #### #### #### ####
function sampler_rmat(xdata::Array{Float64,3}, designmatrix::Array{Float64,3}, betaMCMC::Matrix{Float64}, sigmaMCMC::Matrix{Float64}, angleMCMC::Matrix{Float64}, ydata::Array{Float64,3}, valp::Valuep, reflection::KeepReflection, rmatMCMC::Array{Float64,3}, samp_rmat::donotsamplermat)
end
function sampler_rmat(xdata::Array{Float64,3}, designmatrix::Array{Float64,3}, betaMCMC::Matrix{Float64}, sigmaMCMC::Matrix{Float64}, angleMCMC::Matrix{Float64}, ydata::Array{Float64,3}, valp::Valuep2, reflection::KeepReflection, rmatMCMC::Array{Float64,3}, samp_rmat::dosamplermat)
kd::Int64 = size(betaMCMC, 1)
p::Int64 = size(betaMCMC, 2)
n::Int64 = size(xdata, 3)
k::Int64 = size(xdata,1)
invMat = Symmetric(inv(sigmaMCMC))
for j = 1:n
mui = designmatrix[:, :, j] * betaMCMC[:, :]
#@toggled_assert size(mui) == (k,p)
Ai = transpose(mui) * invMat * ydata[:,:,j]
#println(size(Ai))
x1 = Ai[1, 1] + Ai[2, 2]
x2 = Ai[2, 1] - Ai[1, 2]
#x2 = Ai[1, 2] - Ai[2, 1]
kvonmises = sqrt(x1^2 + x2^2)
muvonmises = atan(x2 / kvonmises, x1 / kvonmises)
#muvonmises = atan(x1 / kvonmises, x2 / kvonmises)
angleMCMC[1, j] = rand(VonMises(muvonmises, kvonmises))
rmatMCMC[1, 1, j] = cos(angleMCMC[1, j])
rmatMCMC[1, 2, j] = -sin(angleMCMC[1, j])
rmatMCMC[2, 1, j] = sin(angleMCMC[1, j])
rmatMCMC[2, 2, j] = cos(angleMCMC[1, j])
#xdata[:, :, j] = ydata[:, :, j] * transpose(rmatMCMC[:,:,j])
end
compute_xdata(xdata, ydata, rmatMCMC)
end
function sampler_rmat(xdata::Array{Float64,3}, designmatrix::Array{Float64,3}, betaMCMC::Matrix{Float64}, sigmaMCMC::Matrix{Float64}, angleMCMC::Matrix{Float64}, ydata::Array{Float64,3}, valp::Valuep3, reflection::KeepReflection, rmatMCMC::Array{Float64,3}, samp_rmat::dosamplermat)
error("sampler_rmat p=3 - Not completed yet")
kd::Int64 = size(betaMCMC, 1)
p::Int64 = size(betaMCMC, 2)
n::Int64 = size(xdata, 3)
k::Int64 = size(xdata, 1)
invMat = Symmetric(inv(sigmaMCMC))
for j = 1:n
mui = designmatrix[:, :, j] * betaMCMC[:, :]
#@toggled_assert size(mui) == (k,p)
Ai = transpose(mui) * invMat * ydata[:, :, j]
#println(size(Ai))
x1 = Ai[1, 1] + Ai[2, 2]
x2 = Ai[2, 1] - Ai[1, 2]
#x2 = Ai[1, 2] - Ai[2, 1]
kvonmises = sqrt(x1^2 + x2^2)
muvonmises = atan(x2 / kvonmises, x1 / kvonmises)
#muvonmises = atan(x1 / kvonmises, x2 / kvonmises)
angleMCMC[1, j] = rand(VonMises(muvonmises, kvonmises))
rmatMCMC[1, 1, j] = cos(angleMCMC[1, j])
rmatMCMC[1, 2, j] = -sin(angleMCMC[1, j])
rmatMCMC[2, 1, j] = sin(angleMCMC[1, j])
rmatMCMC[2, 2, j] = cos(angleMCMC[1, j])
#xdata[:, :, j] = ydata[:, :, j] * transpose(rmatMCMC[:,:,j])
end
compute_xdata(xdata, ydata, rmatMCMC)
end | BayesSizeAndShape | https://github.com/GianlucaMastrantonio/BayesSizeAndShape.jl.git |
|
[
"MIT"
] | 0.2.0 | 4214520f385b4d6220a9e63d5a7f355761104b17 | code | 35 | using BayesSizeAndShape
using Test
| BayesSizeAndShape | https://github.com/GianlucaMastrantonio/BayesSizeAndShape.jl.git |
|
[
"MIT"
] | 0.2.0 | 4214520f385b4d6220a9e63d5a7f355761104b17 | code | 363 | ####
#### Coverage summary, printed as "(percentage) covered".
####
#### Useful for CI environments that just want a summary (eg a Gitlab setup).
####
using Coverage
cd(joinpath(@__DIR__, "..", "..")) do
covered_lines, total_lines = get_summary(process_folder())
percentage = covered_lines / total_lines * 100
println("($(percentage)%) covered")
end
| BayesSizeAndShape | https://github.com/GianlucaMastrantonio/BayesSizeAndShape.jl.git |
|
[
"MIT"
] | 0.2.0 | 4214520f385b4d6220a9e63d5a7f355761104b17 | code | 266 | # only push coverage from one bot
get(ENV, "TRAVIS_OS_NAME", nothing) == "linux" || exit(0)
get(ENV, "TRAVIS_JULIA_VERSION", nothing) == "1.3" || exit(0)
using Coverage
cd(joinpath(@__DIR__, "..", "..")) do
Codecov.submit(Codecov.process_folder())
end
| BayesSizeAndShape | https://github.com/GianlucaMastrantonio/BayesSizeAndShape.jl.git |
|
[
"MIT"
] | 0.2.0 | 4214520f385b4d6220a9e63d5a7f355761104b17 | code | 3893 | #################
# Packages ##
#################
using Pkg
Pkg.activate("/Users/gianlucamastrantonio/Dropbox (Politecnico di Torino Staff)/lavori/gitrepo/BayesSizeAndShape/todelete/rat")
using Revise
using Random, Distributions, LinearAlgebra, StatsBase
using Kronecker, DataFrames,StatsModels, CategoricalArrays
using Plots
using BayesSizeAndShape
dataset_rats = dataset("rats");
dataset_desciption("rats")
landmark = dataset_rats.x;
landmark = landmark ./ 100.0
subject = dataset_rats.no;
time = dataset_rats.time;
## design matrix
X = DataFrame(
time = time,
subject = categorical(string.(subject))
);
## size and shape data
sizeshape = sizeshape_helmertproduct_reflection(landmark);
k = size(sizeshape,1);
n = size(sizeshape,3);
p = size(sizeshape,2);
## plots of the data
plot(landmark[:,1,1], landmark[:,2,1],legend = false, color = cgrad(:tab20, 21)[Int64(subject[1])])
for i = 2:size(landmark,3)
plot!(landmark[:,1,i], landmark[:,2,i], color = cgrad(:tab20, 21)[Int64(subject[i])])
end
title!("Landmarks")
plot(sizeshape[:,1,1], sizeshape[:,2,1],legend = false, color = cgrad(:tab20, 21)[Int64(subject[1])])
for i = 2:size(landmark,3)
plot!(sizeshape[:,1,i], sizeshape[:,2,i], color = cgrad(:tab20, 21)[Int64(subject[i])])
end
title!("Size And Shape")
## covariates
### ### ### ### ###
### MCMC
### ### ### ### ###
outmcmc = SizeAndShapeWithReflectionMCMC(
landmark,
@formula(1 ~ 1+time + subject),
X,
(iter=1000, burnin=200, thin=2),
Normal(0.0,100000.0),#
InverseWishart(k + 2, 5.0 * Matrix{Float64}(I, k, k))
);
predictive_mean = sample_predictive_zbr(outmcmc);
predictive_obs = sample_predictive_zbr_plus_epsilon(outmcmc);
betaOUT = outmcmc.posteriorsamples.beta;
sigmaOUT = outmcmc.posteriorsamples.sigma;
rmatOUT = outmcmc.posteriorsamples.rmat;
describe(betaOUT[:,1:10], :all)
#@rput betaOUT;
#@rput sigmaOUT;
#R" save(betaOUT, sigmaOUT , file='provarats.RData')"
#if flag == 1
# prediction = BayesSizeAndShape.predictmean(outmcmc);
# @rput dataset;
# @rput prediction;
# R"""
# #require(emdbook)
# #require(coda)
# OUTpred = list()
# # ogni elemento della lista prediction contiene i posterior sample di un'osservazione
# for(select_obs in 1:length(prediction))
# {
# land.m <- apply(prediction[[select_obs]],c(2,3),mean)
# qq1 <- apply(prediction[[select_obs]][,,1],c(2),quantile,prob=c(0.025,0.975))
# qq2 <- apply(prediction[[select_obs]][,,2],c(2),quantile,prob=c(0.025,0.975))
# OUTpred[[select_obs]]<-list(meanland=land.m,CIland1=qq1, CIland2=qq2)
# }
# #aggiungere bb<-mcmc(prediction[[select_obs]])
# pdf("/Users/gianlucamastrantonio/Dropbox (Politecnico di Torino Staff)/SizeAndShape/JuliaStuff/GIAN/chainsGIAN.pdf")
# #theta = 0
# #R = matrix(c(cos(theta),sin(theta),-sin(theta), cos(theta)), ncol=2)
# plot(dataset[,,1], xlim=c(-300,1000)/100,ylim=c(-500,500)/100)
# points(OUTpred[[1]]$meanland, col=2)
# for(select_obs in 1:length(prediction))
# {
# points(dataset[,,select_obs])
# points(OUTpred[[select_obs]]$meanland, col=2)
# for(ll in 1:7){
# bb<-mcmc(prediction[[select_obs]][,ll,])
# HPDregionplot(bb,add=T,col=2,lty=2)
# }
# }
# for(p in 1:dim(betaOUT)[3])
# {
# par(mfrow=c(3,3))
# for(i in 1:dim(betaOUT)[2])
# {
# plot(betaOUT[,i,p], type="l")
# }
# }
# dev.off()
# #}
# #OUTpred[[select_obs]]<-list(meanland=land.m,CIland=qq.m)
# """
# # mean_pred contiene i campioni a posteriori
# R" save(OUTpred, file='OUTpred1.RData')"
# else print("Done posterior estimation")
#end | BayesSizeAndShape | https://github.com/GianlucaMastrantonio/BayesSizeAndShape.jl.git |
|
[
"MIT"
] | 0.2.0 | 4214520f385b4d6220a9e63d5a7f355761104b17 | code | 6367 | ### ### ### ### ###
### packages
### ### ### ### ###
using Pkg
Pkg.activate("/Users/gianlucamastrantonio/Dropbox (Politecnico di Torino Staff)/lavori/gitrepo/BayesSizeAndShape/todelete/sim")
using Random, Distributions, LinearAlgebra, StatsBase
using DataFrames, StatsModels, CategoricalArrays
using Revise
using BayesSizeAndShape
using RCall
### ### ### ### ###
### simulations
### ### ### ### ###
n::Int64 = 100;
p::Int64 = 2;
k::Int64 = 10;
d::Int64 = 3;
# regression
reg::Matrix{Float64} = zeros(Float64, k*d, p);
eg::Matrix{Float64} = zeros(Float64, k*d, p);
reg[:] = rand(Normal(0.0, 1.0), prod(size(reg)));
constadd = 10.0
reg[1,:] = [0.0,0.0].+ constadd
reg[2,:] = [10.0,0.0].+ constadd
reg[3,:] = [20.0,10.0].+ constadd
reg[4,:] = [20.0,20.0].+ constadd
reg[5,:] = [10.0,20.0].+ constadd
reg[6,:] = [0.0,20.0].+ constadd
reg[7,:] = [-10.0,20.0].+ constadd
reg[8,:] = [-20.0,20.0].+ constadd
reg[9,:] = [-20.0,10.0].+ constadd
reg[10,:] = [-10.0,10.0].+ constadd
BayesSizeAndShape.standardize_reg(reg::Matrix{Float64}, BayesSizeAndShape.ValueP2(), BayesSizeAndShape.GramSchmidtMean());
zmat = DataFrame(
x1 = rand(Normal(10.0,1.0 ),n),
x2 = sample(["A", "B"],n)
)
zmat[:,1] = (zmat[:,1] .- mean(zmat[:,1])) ./ std(zmat[:,1])
zmat.x2 = categorical(zmat.x2)
zmat_modmat_ModelFrame = ModelFrame(@formula(1 ~ 1+x1+x2), zmat);
zmat_modmat = ModelMatrix(zmat_modmat_ModelFrame).m
design_matrix = BayesSizeAndShape.compute_designmatrix(zmat_modmat, k); # dimensions k, k * d, n
# covariance
sigma::Symmetric{Float64,Matrix{Float64}} = Symmetric(rand(InverseWishart(k + 2, 5.0 * Matrix{Float64}(I, k, k))));
dataset_complete = zeros(Float64,k,p,n);
#dataset = zeros(Float64, k, p, n);
for i_n = 1:n
for i_p = 1:p
dataset_complete[:, i_p, i_n] = rand(MvNormal(design_matrix[:, :, i_n] * reg[:, i_p], sigma))
end
end
## TESTS
helmmat = BayesSizeAndShape.RemoveLocationHelmert(k, BayesSizeAndShape.ValueP2());
dataset_complete_landmark = zeros(Float64,k+1,p,n);
for i = 1:n
dataset_complete_landmark[:,:,i] = transpose(helmmat.matrix)*dataset_complete[:,:,i] .+ 1/(k+1)
end
helmdata = BayesSizeAndShape.remove_location(dataset_complete_landmark, helmmat);
maximum(helmdata[:,:,:]-dataset_complete[:,:,:])
minimum(helmdata[:,:,:]-dataset_complete[:,:,:])
ssdata, ssdata_rotmat = BayesSizeAndShape.compute_sizeshape_fromnolocdata(helmdata, BayesSizeAndShape.KeepReflection(), BayesSizeAndShape.ValueP2());
dataset = BayesSizeAndShape.SSDataType(dataset_complete_landmark, BayesSizeAndShape.KeepReflection(),helmmat,BayesSizeAndShape.ValueP2(),BayesSizeAndShape.DoNotRemoveSize(), BayesSizeAndShape.GramSchmidtMean());
#dataset.nolocdata[:,:,5] -dataset_complete[:,:,5]
#####
### ### ### ### ###
### MCMC
### ### ### ### ###
mcmcOUT = generalSizeAndShapeMCMC(;
landmarks = dataset_complete_landmark,
fm = @formula(landmarks ~ 1+ x1 + x2),
covariates = zmat,
iterations=(iter=1000, burnin=200, thin=2),
betaprior = Normal(0.0,100000.0),#
sigmaprior=InverseWishart(k + 2, 5.0 * Matrix{Float64}(I, k, k)),
rmatdosample = true,
#betaprior = BayesSizeAndShape.NoPriorBeta(),
#sigmaprior = BayesSizeAndShape.NoPriorSigma(),
#rmatdosample = false,
#beta_init= zeros(Float64, k * d, p),
#sigma_init = Symmetric(Matrix{Float64}(I, k, k)),
#rmat_init = reshape(vcat([Matrix{Float64}(I, p, p)[:] for i = 1:n]...), (p, p, n)),
beta_init= reg,
sigma_init = sigma,
rmat_init = ssdata_rotmat,
meanmodel = ["linear"][1],
identification = ["gramschmidt"][1],
keepreflection = ["no", "yes"][2],
removelocation = ["no", "helmert"][2],
removesize = ["no", "norm"][1],
verbose= true
);
mcmcOUT =BayesSizeAndShape.SizeAndShapeWithReflectionMCMC(
dataset_complete_landmark,
@formula(1 ~ 1+ x1 + x2),
zmat,
(iter=1000, burnin=200, thin=2),
Normal(0.0,100000.0),#
InverseWishart(k + 2, 5.0 * Matrix{Float64}(I, k, k))
);
predictive_mean = sample_predictive_zbr(mcmcOUT);
predictive_obs = sample_predictive_zbr_plus_epsilon(mcmcOUT);
betaOUT = mcmcOUT.posteriorsamples.beta;
sigmaOUT = mcmcOUT.posteriorsamples.sigma;
rmatOUT = mcmcOUT.posteriorsamples.rmat;
@rput predictive_mean;
@rput predictive_obs;
@rput betaOUT;
@rput sigmaOUT;
@rput rmatOUT;
@rput reg;
@rput sigma;
@rput ssdata_rotmat;
@rput ssdata;
@rput n;
@rput p;
@rput k;
R"""
DIR = "/Users/gianlucamastrantonio/Dropbox (Politecnico di Torino Staff)/lavori/gitrepo/BayesSizeAndShape/todelete/sim/plot/"
pdf(paste(DIR, "parameters.pdf",sep=""))
par(mfrow=c(3,3))
for(i in 1:dim(betaOUT)[2])
{
plot(betaOUT[,i], type="l")
abline(h = c(reg)[i], col=2, lwd=2)
}
par(mfrow=c(3,3))
for(i in 1:dim(sigmaOUT)[2])
{
plot(sigmaOUT[,i], type="l")
abline(h = c(sigma)[i], col=2, lwd=2)
}
par(mfrow=c(3,4))
for(i in 1:dim(rmatOUT)[2])
{
plot(rmatOUT[,i], type="l")
abline(h = c(ssdata_rotmat)[i], col=2, lwd=2)
}
dev.off()
pdf(paste(DIR, "predictive_mean.pdf",sep=""))
meanpred = colMeans(predictive_mean)
par(mfrow=c(2,2))
for(i in 1:n)
{
plot(matrix(meanpred[(i-1)*(k*p) + 1:(k*p)],ncol=2))
points(ssdata[,,i], col=2)
plot(c(ssdata[,,i]),c(matrix(meanpred[(i-1)*(k*p) + 1:(k*p)],ncol=2)))
abline(a=0,b=1,col=2, lwd=2)
}
par(mfrow=c(3,3))
for(i in 1:dim(predictive_mean)[2])
{
plot(predictive_mean[,i], type="l", main= c(ssdata)[i])
abline(h = c(ssdata)[i], col=2, lwd=2)
}
dev.off()
pdf(paste(DIR, "predictive_obs.pdf",sep=""))
meanpred = colMeans(predictive_obs)
par(mfrow=c(2,2))
for(i in 1:n)
{
plot(matrix(meanpred[(i-1)*(k*p) + 1:(k*p)],ncol=2))
points(ssdata[,,i], col=2)
plot(c(ssdata[,,i]),c(matrix(meanpred[(i-1)*(k*p) + 1:(k*p)],ncol=2)))
abline(a=0,b=1,col=2, lwd=2)
}
par(mfrow=c(3,3))
for(i in 1:dim(predictive_obs)[2])
{
plot(predictive_obs[,i], type="l", main= c(ssdata)[i])
abline(h = c(ssdata)[i], col=2, lwd=2)
}
dev.off()
"""
##### TESTS
#if true == true
# design_matrix
# maximum(abs.(mcmcOUT.meantype.designmatrix-design_matrix))
# zmat_modmat - mcmcOUT.meantype.model_matrix
#end
| BayesSizeAndShape | https://github.com/GianlucaMastrantonio/BayesSizeAndShape.jl.git |
|
[
"MIT"
] | 0.2.0 | 4214520f385b4d6220a9e63d5a7f355761104b17 | code | 7280 | ### ### ### ### ###
### packages
### ### ### ### ###
using Pkg
Pkg.activate("/Users/gianlucamastrantonio/Dropbox (Politecnico di Torino Staff)/lavori/gitrepo/BayesSizeAndShape/todelete/sim")
using Random, Distributions, LinearAlgebra, StatsBase
using DataFrames, StatsModels, CategoricalArrays
using Revise
using BayesSizeAndShape
using RCall
### ### ### ### ###
### simulations
### ### ### ### ###
n::Int64 = 100;
p::Int64 = 3;
k::Int64 = 10;
d::Int64 = 3;
# regression
reg::Matrix{Float64} = zeros(Float64, k*d, p);
eg::Matrix{Float64} = zeros(Float64, k*d, p);
reg[:] = rand(Normal(0.0, 1.0), prod(size(reg)));
constadd = 10.0
reg[1,:] = [0.0,0.0,0.0].+ constadd
reg[2,:] = [10.0,0.0,-10].+ constadd
reg[3,:] = [20.0,10.0,-10].+ constadd
reg[4,:] = [20.0,20.0,-10].+ constadd
reg[5,:] = [10.0,20.0,-10].+ constadd
reg[6,:] = [0.0,20.0,-20].+ constadd
reg[7,:] = [-10.0,20.0,-20].+ constadd
reg[8,:] = [-20.0,20.0,-20].+ constadd
reg[9,:] = [-20.0,10.0,-20].+ constadd
reg[10,:] = [-10.0,10.0,-20].+ constadd
BayesSizeAndShape.standardize_reg(reg::Matrix{Float64}, BayesSizeAndShape.ValueP3(), BayesSizeAndShape.GramSchmidtMean());
#BayesSizeAndShape.standardize_reg_computegamma(reg::Matrix{Float64}, BayesSizeAndShape.ValueP3(), BayesSizeAndShape.GramSchmidtMean())
zmat = DataFrame(
x1 = rand(Normal(10.0,1.0 ),n),
x2 = sample(["A", "B"],n)
)
zmat[:,1] = (zmat[:,1] .- mean(zmat[:,1])) ./ std(zmat[:,1])
zmat.x2 = categorical(zmat.x2)
zmat_modmat_ModelFrame = ModelFrame(@formula(1 ~ 1+x1+x2), zmat);
zmat_modmat = ModelMatrix(zmat_modmat_ModelFrame).m
design_matrix = BayesSizeAndShape.compute_designmatrix(zmat_modmat, k); # dimensions k, k * d, n
# covariance
sigma::Symmetric{Float64,Matrix{Float64}} = Symmetric(rand(InverseWishart(k + 2, 5.0 * Matrix{Float64}(I, k, k))));
dataset_complete = zeros(Float64,k,p,n);
#dataset = zeros(Float64, k, p, n);
for i_n = 1:n
for i_p = 1:p
dataset_complete[:, i_p, i_n] = rand(MvNormal(design_matrix[:, :, i_n] * reg[:, i_p], sigma))
end
end
## TESTS
helmmat = BayesSizeAndShape.RemoveLocationHelmert(k, BayesSizeAndShape.ValueP3());
dataset_complete_landmark = zeros(Float64,k+1,p,n);
for i = 1:n
dataset_complete_landmark[:,:,i] = transpose(helmmat.matrix)*dataset_complete[:,:,i] .+ 1/(k+1)
end
helmdata = BayesSizeAndShape.remove_location(dataset_complete_landmark, helmmat);
maximum(helmdata[:,:,:]-dataset_complete[:,:,:])
minimum(helmdata[:,:,:]-dataset_complete[:,:,:])
ssdata, ssdata_rotmat = BayesSizeAndShape.compute_sizeshape_fromnolocdata(helmdata, BayesSizeAndShape.KeepReflection(), BayesSizeAndShape.ValueP3());
angle_data = Matrix{Float64}(undef,3,n)
for i = 1:size(ssdata,3)
BayesSizeAndShape.compute_angle_from_rmat(i,angle_data, ssdata_rotmat, BayesSizeAndShape.ValueP3(), BayesSizeAndShape.KeepReflection())
end
for i in 1:size(ssdata_rotmat,3)
println(det(ssdata_rotmat[:,:,i]))
if det(ssdata_rotmat[:,:,i])<0
error("")
end
println(sum( (helmdata[:,:,i] - ssdata[:,:,i]*transpose(ssdata_rotmat[:,:,i])) ))
end
dataset = BayesSizeAndShape.SSDataType(dataset_complete_landmark, BayesSizeAndShape.KeepReflection(),helmmat,BayesSizeAndShape.ValueP3(),BayesSizeAndShape.DoNotRemoveSize(), BayesSizeAndShape.GramSchmidtMean());
#dataset.nolocdata[:,:,5] -dataset_complete[:,:,5]
#####
### ### ### ### ###
### MCMC
### ### ### ### ###
molt::Int64 = 3
mcmcOUT = generalSizeAndShapeMCMC(;
landmarks = dataset_complete_landmark,
fm = @formula(1 ~ 1+ x1 + x2),
covariates = zmat,
iterations=(iter=1000*molt, burnin=200*molt, thin=2*molt),
betaprior = Normal(0.0,100000.0),#
sigmaprior=InverseWishart(k + 2, 5.0 * Matrix{Float64}(I, k, k)),
rmatdosample = true,
#betaprior = BayesSizeAndShape.NoPriorBeta(),
#sigmaprior = BayesSizeAndShape.NoPriorSigma(),
#rmatdosample = false,
#beta_init= zeros(Float64, k * d, p),
#sigma_init = Symmetric(Matrix{Float64}(I, k, k)),
#rmat_init = reshape(vcat([Matrix{Float64}(I, p, p)[:] for i = 1:n]...), (p, p, n)),
beta_init= reg,
sigma_init = sigma,
rmat_init = ssdata_rotmat,
meanmodel = ["linear"][1],
identification = ["gramschmidt"][1],
keepreflection = ["no", "yes"][2],
removelocation = ["no", "helmert"][2],
removesize = ["no", "norm"][1],
verbose= true
);
#mcmcOUT =BayesSizeAndShape.SizeAndShapeWithReflectionMCMC(
# dataset_complete_landmark,
# @formula(1 ~ 1+ x1 + x2),
# zmat,
# (iter=1000, burnin=200, thin=2),
# Normal(0.0,100000.0),#
# InverseWishart(k + 2, 5.0 * Matrix{Float64}(I, k, k))
#);
predictive_mean = sample_predictive_zbr(mcmcOUT);
predictive_obs = sample_predictive_zbr_plus_epsilon(mcmcOUT);
betaOUT = mcmcOUT.posteriorsamples.beta;
sigmaOUT = mcmcOUT.posteriorsamples.sigma;
rmatOUT = mcmcOUT.posteriorsamples.rmat;
angleOUT = mcmcOUT.posteriorsamples.angle;
@rput angle_data;
@rput predictive_mean;
@rput predictive_obs;
@rput betaOUT;
@rput sigmaOUT;
@rput rmatOUT;
@rput angleOUT;
@rput reg;
@rput sigma;
@rput ssdata_rotmat;
@rput ssdata;
@rput n;
@rput p;
@rput k;
R"""
DIR = "/Users/gianlucamastrantonio/Dropbox (Politecnico di Torino Staff)/lavori/gitrepo/BayesSizeAndShape/todelete/sim/plot/"
pdf(paste(DIR, "parametersP3.pdf",sep=""))
par(mfrow=c(3,3))
for(i in 1:dim(betaOUT)[2])
{
plot(betaOUT[,i], type="l")
abline(h = c(reg)[i], col=2, lwd=2)
}
par(mfrow=c(3,3))
for(i in 1:dim(sigmaOUT)[2])
{
plot(sigmaOUT[,i], type="l")
abline(h = c(sigma)[i], col=2, lwd=2)
}
par(mfrow=c(3,3))
for(i in 1:dim(rmatOUT)[2])
{
plot(rmatOUT[,i], type="l")
abline(h = c(ssdata_rotmat)[i], col=2, lwd=2)
}
par(mfrow=c(3,3))
for(i in 1:dim(angleOUT)[2])
{
plot(angleOUT[,i], type="l", main=colnames(angleOUT)[i])
abline(h = c(angle_data)[i], col=2, lwd=2)
}
dev.off()
pdf(paste(DIR, "predictive_meanP3.pdf",sep=""))
meanpred = colMeans(predictive_mean)
par(mfrow=c(2,2))
for(i in 1:n)
{
plot(matrix(meanpred[(i-1)*(k*p) + 1:(k*p)],ncol=2))
points(ssdata[,,i], col=2)
plot(c(ssdata[,,i]),c(matrix(meanpred[(i-1)*(k*p) + 1:(k*p)],ncol=2)))
abline(a=0,b=1,col=2, lwd=2)
}
par(mfrow=c(3,3))
for(i in 1:dim(predictive_mean)[2])
{
plot(predictive_mean[,i], type="l", main= c(ssdata)[i])
abline(h = c(ssdata)[i], col=2, lwd=2)
}
dev.off()
pdf(paste(DIR, "predictive_obsP3.pdf",sep=""))
meanpred = colMeans(predictive_obs)
par(mfrow=c(2,2))
for(i in 1:n)
{
plot(matrix(meanpred[(i-1)*(k*p) + 1:(k*p)],ncol=2))
points(ssdata[,,i], col=2)
plot(c(ssdata[,,i]),c(matrix(meanpred[(i-1)*(k*p) + 1:(k*p)],ncol=2)))
abline(a=0,b=1,col=2, lwd=2)
}
par(mfrow=c(3,3))
for(i in 1:dim(predictive_obs)[2])
{
plot(predictive_obs[,i], type="l", main= c(ssdata)[i])
abline(h = c(ssdata)[i], col=2, lwd=2)
}
dev.off()
"""
##### TESTS
#if true == true
# design_matrix
# maximum(abs.(mcmcOUT.meantype.designmatrix-design_matrix))
# zmat_modmat - mcmcOUT.meantype.model_matrix
#end
| BayesSizeAndShape | https://github.com/GianlucaMastrantonio/BayesSizeAndShape.jl.git |
|
[
"MIT"
] | 0.2.0 | 4214520f385b4d6220a9e63d5a7f355761104b17 | code | 7958 |
function sim(idata::Int64, NAMEGEN = "SIM",ndata::Int64, nsim::Int64,betainsideCI::Matrix{Float64} , sigmainsideCI::Matrix{Float64},betalengthCI::Matrix{Float64}, sigmalengthCI::Matrix{Float64})
for ip = 1:2
for isim = 1:nsim
n::Int64 = [20,100,20,100, 20,100,20,100][isim];
p::Int64 = [2,3][ip];
k::Int64 = [10,10,20,20, 10,10,20,20][isim];
d::Int64 = 3;
var::Float64 = [1.0,1.0,1.0,1.0, 10.0,10.0,10.0,10.0][isim]
NAME = NAMEGEN * "_" * string(n)* "_" * string(p) * "_" * string(k) * "_" * string(var)
# regression
reg::Matrix{Float64} = zeros(Float64, k*d, p);
#eg::Matrix{Float64} = zeros(Float64, k*d, p);
reg[:] = rand(Normal(5.0, 1.0), prod(size(reg)));
#constadd = 10.0
#if p == 2
# reg[1,:] = [0.0,0.0].+ constadd
# reg[2,:] = [10.0,0.0].+ constadd
# reg[3,:] = [20.0,10.0].+ constadd
# reg[4,:] = [20.0,20.0].+ constadd
# reg[5,:] = [10.0,20.0].+ constadd
# reg[6,:] = [0.0,20.0].+ constadd
# reg[7,:] = [-10.0,20.0].+ constadd
# reg[8,:] = [-20.0,20.0].+ constadd
# reg[9,:] = [-20.0,10.0].+ constadd
# reg[10,:] = [-10.0,10.0].+ constadd
#else
# reg[1,:] = [0.0,0.0,0.0].+ constadd
# reg[2,:] = [10.0,0.0,-10].+ constadd
# reg[3,:] = [20.0,10.0,-10].+ constadd
# reg[4,:] = [20.0,20.0,-10].+ constadd
# reg[5,:] = [10.0,20.0,-10].+ constadd
# reg[6,:] = [0.0,20.0,-20].+ constadd
# reg[7,:] = [-10.0,20.0,-20].+ constadd
# reg[8,:] = [-20.0,20.0,-20].+ constadd
# reg[9,:] = [-20.0,10.0,-20].+ constadd
# reg[10,:] = [-10.0,10.0,-20].+ constadd
#end
if p == 2
BayesSizeAndShape.standardize_reg(reg::Matrix{Float64}, BayesSizeAndShape.ValueP2(), BayesSizeAndShape.GramSchmidtMean());
else
BayesSizeAndShape.standardize_reg(reg::Matrix{Float64}, BayesSizeAndShape.ValueP3(), BayesSizeAndShape.GramSchmidtMean());
end
zmat = DataFrame(
x1 = rand(Normal(10.0,1.0 ),n),
x2 = sample(["A", "B"],n)
)
zmat[:,1] = (zmat[:,1] .- mean(zmat[:,1])) ./ std(zmat[:,1])
zmat.x2 = categorical(zmat.x2)
zmat_modmat_ModelFrame = ModelFrame(@formula(1 ~ 1+x1 + x2 ), zmat);
zmat_modmat = ModelMatrix(zmat_modmat_ModelFrame).m
design_matrix = BayesSizeAndShape.compute_designmatrix(zmat_modmat, k); # dimensions k, k * d, n
# covariance
sigma::Symmetric{Float64,Matrix{Float64}} = Symmetric(rand(InverseWishart(k + 2, 5.0 * Matrix{Float64}(I, k, k))));
sigma.data[:,:] = sigma.data[:,:] .* var
dataset_complete = zeros(Float64,k,p,n);
#dataset = zeros(Float64, k, p, n);
for i_n = 1:n
for i_p = 1:p
dataset_complete[:, i_p, i_n] = rand(MvNormal(design_matrix[:, :, i_n] * reg[:, i_p], sigma))
end
end
if p == 2
helmmat = BayesSizeAndShape.RemoveLocationHelmert(k, BayesSizeAndShape.ValueP2());
else
helmmat = BayesSizeAndShape.RemoveLocationHelmert(k, BayesSizeAndShape.ValueP3());
end
dataset_complete_landmark = zeros(Float64,k+1,p,n);
for i = 1:n
dataset_complete_landmark[:,:,i] = transpose(helmmat.matrix)*dataset_complete[:,:,i] .+ 1/(k+1)
end
helmdata = BayesSizeAndShape.remove_location(dataset_complete_landmark, helmmat);
if p == 2
ssdata, ssdata_rotmat = BayesSizeAndShape.compute_sizeshape_fromnolocdata(helmdata, BayesSizeAndShape.KeepReflection(), BayesSizeAndShape.ValueP2());
else
ssdata, ssdata_rotmat = BayesSizeAndShape.compute_sizeshape_fromnolocdata(helmdata, BayesSizeAndShape.KeepReflection(), BayesSizeAndShape.ValueP3());
end
if p == 2
dataset = BayesSizeAndShape.SSDataType(dataset_complete_landmark, BayesSizeAndShape.KeepReflection(),helmmat,BayesSizeAndShape.ValueP2(),BayesSizeAndShape.DoNotRemoveSize(), BayesSizeAndShape.GramSchmidtMean());
else
dataset = BayesSizeAndShape.SSDataType(dataset_complete_landmark, BayesSizeAndShape.KeepReflection(),helmmat,BayesSizeAndShape.ValueP3(),BayesSizeAndShape.DoNotRemoveSize(), BayesSizeAndShape.GramSchmidtMean());
end
#dataset.nolocdata[:,:,5] -dataset_complete[:,:,5]
#####
### ### ### ### ###
### MCMC
### ### ### ### ###
molt::Int64 = 30
outmcmc = SizeAndShapeWithReflectionMCMC(
dataset_complete_landmark,
@formula(1 ~ 1+ x1 + x2 ),
zmat,
(iter=3000*molt, burnin=1000*molt, thin=1*molt),
Normal(0.0,100000.0),#
InverseWishart(k + 2, 2.0 * Matrix{Float64}(I, k, k))
);
betaout = posterior_samples_beta(outmcmc);
sigmaout = posterior_samples_sigma(outmcmc);
windex = isim + (ip-1)*nsim
betainsideCI[windex,idata] = 0.0
npar = 0
for idim = 1:p
initpar = [1,2,3][idim]
for ipar in initpar:( d*k)
npar += 1
ww = ipar + (idim-1)*d*k
qq = quantile(betaout[:,ww], [0.025,0.975])
if (reg[ipar,idim]>=qq[1]) & (reg[ipar,idim]<=qq[2])
betainsideCI[windex,idata] += 1.0
end
betalengthCI[windex,idata] += qq[2]-qq[1]
end
end
betainsideCI[windex,idata] = betainsideCI[windex,idata] / npar
betalengthCI[windex,idata] = betalengthCI[windex,idata] / npar
sigmainsideCI[windex,idata] = 0.0
npar = 0
for ipar = 1:k
for jpar = 1:ipar
npar += 1
ww = jpar + (ipar-1)*k
qq = quantile(sigmaout[:,ww], [0.025,0.975])
if (sigma[ipar,jpar]>=qq[1]) & (sigma[ipar,jpar]<=qq[2])
sigmainsideCI[windex,idata] += 1.0
end
sigmalengthCI[windex,idata] += qq[2]-qq[1]
end
end
sigmainsideCI[windex,idata] = sigmainsideCI[windex,idata] / npar
sigmalengthCI[windex,idata] = sigmalengthCI[windex,idata] / npar
println(trunc.(betainsideCI[1:windex,idata],digits=2))
println(trunc.(sigmainsideCI[1:windex,idata],digits=2))
println(trunc.(betalengthCI[1:windex,idata],digits=2))
println(trunc.(sigmalengthCI[1:windex,idata],digits=2))
@rput betainsideCI
@rput sigmainsideCI
@rput betalengthCI
@rput sigmalengthCI
#@rput betaout;
#@rput sigmaout;
#@rput reg;
#@rput sigma;
#@rput ssdata_rotmat;
#@rput ssdata;
#@rput n;
#@rput p;
#@rput k;
#@rput NAME;
end
end
end | BayesSizeAndShape | https://github.com/GianlucaMastrantonio/BayesSizeAndShape.jl.git |
|
[
"MIT"
] | 0.2.0 | 4214520f385b4d6220a9e63d5a7f355761104b17 | docs | 622 | # Changelog
## [v0.2.0] - 2023-05-24
### Added
- There is now a real data example
### Changed
- The entire package structure has been changed. - This version breaks backward compatibility
## [v0.1.4] - 2023-03-14
### Added
- compute_helmertized_configuration
### Changed
- SizeAndShapeMCM - the output are now DataFrames
- SizeAndShapeMCMC_p2withreflection is now SizeAndShapeMCMC
## [v0.1.3] - 2023-03-14
### Added
- SizeAndShapeMCMC_p2withreflection() - MCMC samples for two-dimensional data with reflection information
### Deprecated
- compute_dessignmatrix() - Now compute_designmatrix() should be used instead | BayesSizeAndShape | https://github.com/GianlucaMastrantonio/BayesSizeAndShape.jl.git |
|
[
"MIT"
] | 0.2.0 | 4214520f385b4d6220a9e63d5a7f355761104b17 | docs | 7754 | # **BayesSizeAndShape**
This package implements a Bayesian regression for size-and-shape data, based on "Di Noia, A., Mastrantonio, G., and Jona Lasinio, G., “Bayesian Size-and-Shape regression modelling”, <i>arXiv e-prints</i>, 2023". To install, simply do
```julia
julia> ]
pkg> add BayesSizeAndShape
```
at the julia prompt.
At the present moment, the package implements models for two- or three- dimensional data with reflection information. All the objects use `Float64` and `Int64` numbers
The function to use is `SizeAndShapeWithReflectionMCMC`
#
## **Basic Usage**
To fix notation, we assume the following
* `n` be the number of objects;
* `k+1` be the number of recorded landmark for each object
* `p` be the dimension of each landmark (only p=2 or p=3)
The model estimated is
$$\text{vec}(\mathbf{Y}\_i \mathbf{R}\_i') \sim\mathcal{N}\_{k p}\left( \text{vec}\left(\sum_{h=1}^dz\_{ih}\mathbf{B}\_h\right), \mathbf{I}\_p \otimes \boldsymbol{\Sigma}\right), \,\qquad i = 1, \dots , n$$
where
* $\mathbf{Y}\_i$ is the i-th size-and-shape object;
* $z_{ih}$ is a value of a covariate
* $\mathbf{B}\_h$ is a matrix that contains regressive coefficients
* $\boldsymbol{\Sigma}$ is a covariance matrix
The prior distributions are
$$[\mathbf{B}\_h]_{jl} \sim N(m,v),\qquad \boldsymbol{\Sigma} \sim IW(\nu, \Psi)$$
### **Rats data example**
In the **examples** directory, there is the **julia** file **rats.jl** with an example of how to implement the model with the **rat** data, which we will describe also here.
First we load the required packages
```julia
julia> using Random
julia> using Distributions
julia> using LinearAlgebra
julia> using StatsBase
julia> using Kronecker
julia> using DataFrames
julia> using StatsModels
julia> using CategoricalArrays
julia> using Plots
julia> using BayesSizeAndShape
```
The data for this example is inside the package and it can be loaded with
```julia
julia> dataset_rats = dataset("rats");
```
and a description of the objects loaded can be seen with
```julia
julia> dataset_desciption("rats")
```
There are different datasets in the package (in the package directory `data`), which are all taken from the R package **shape** (Soetaert K (2021). _shape: Functions for Plotting Graphical Shapes,
Colors_. R package version 1.4.6,
<https://CRAN.R-project.org/package=shape>). The names of the datasets that are ready to be used in **julia** can be found with the following command
```julia
julia> dataset_names()
```
The data `dataset_rats` is a three-dimensional array of landmark of dimension $(k+1)\times p \times n$. We put the landmark in a `landmark` object and we divide all the points by 100.0 to work with smaller number
```julia
julia> landmark = dataset_rats.x;
julia> landmark = landmark ./ 100.0
```
We can easily plot the data with
```julia
julia> plot(landmark[:,1,1], landmark[:,2,1],legend = false, color = cgrad(:tab20, 21)[Int64(subject[1])])
julia> for i = 2:size(landmark,3)
plot!(landmark[:,1,i], landmark[:,2,i], color = cgrad(:tab20, 21)[Int64(subject[i])])
end
julia> title!("Landmarks")
```

<br />
<br />
Even tough we only need the array `landmark` in the MCMC function, we can compute the **SizeAndShape** data (the variables $\mathbf{Y}_i$) used inside the MCMC function with
```julia
julia> sizeshape = sizeshape_helmertproduct_reflection(landmark);
```
where `sizeshape[:,:,i]` is $\mathbf{Y}_i$, and plot them with
```julia
julia> plot(sizeshape[:,1,1], sizeshape[:,2,1],legend = false, color = cgrad(:tab20, 21)[Int64(subject[1])])
julia> for i = 2:size(landmark,3)
plot!(sizeshape[:,1,i], sizeshape[:,2,i], color = cgrad(:tab20, 21)[Int64(subject[i])])
end
julia> title!("Size And Shape")
```

<br />
<br />
The `time` and `subject` information will be used as covariate, and with them we create a `DataFrame`
```julia
julia> subject = dataset_rats.no;
julia> time = dataset_rats.time;
julia> covariates = DataFrame(
time = time,
subject = categorical(string.(subject))
);
```
<br />
<br />
Posterior samples are obtained with the function `SizeAndShapeWithReflectionMCMC`.
The parameters are (in order)
* a three-dimensional `Array` of data;
* a `formula`, where on the left-hand side there must be "landmarks" and on the right-hand side there is the actual regressive formula - an intercept is needed;
* a `DataFrame` of covariates. The formula search for the covariates in the `DataFrame` column names;
* a `NamedTuple` with `iter`, `burnin`, and `thin` values of the MCMC algorithm
* a `Normal` distribution that is used as prior for all regressive coefficients
* an `InverseWishart` distribution that is used as prior for the covariance matrix.
```julia
julia> outmcmc = SizeAndShapeWithReflectionMCMC(
landmark,
@formula(landmarks ~ 1+time + subject),
covariates,
(iter=1000, burnin=200, thin=2),
Normal(0.0,100000.0),#
InverseWishart(k + 2, 5.0 * Matrix{Float64}(I, k, k))
);
```
Posterior samples of the parameters can be extracted using the following:
```julia
julia> betaout = posterior_samples_beta(outmcmc);
julia> sigmaout = posterior_samples_sigma(outmcmc);
```
where `betaout` is a `DataFrame` that contains the posterior samples of $\mathbf{B}\_h$, while `sigmaout` is a `DataFrame` that contains the ones of $\boldsymbol{\Sigma}$. Each row is a posterior sample.
The column names of `betaout` are informative on which mark/dimension/covariates a specific regressive coefficient is connected
```julia
julia> names(betaout)
266-element Vector{String}:
"(Intercept)| mark:1| dim:1"
"(Intercept)| mark:2| dim:1"
"(Intercept)| mark:3| dim:1"
"(Intercept)| mark:4| dim:1"
"(Intercept)| mark:5| dim:1"
"(Intercept)| mark:6| dim:1"
"(Intercept)| mark:7| dim:1"
⋮
"subject: 9.0| mark:1| dim:2"
"subject: 9.0| mark:2| dim:2"
"subject: 9.0| mark:3| dim:2"
"subject: 9.0| mark:4| dim:2"
"subject: 9.0| mark:5| dim:2"
"subject: 9.0| mark:6| dim:2"
"subject: 9.0| mark:7| dim:2"
```
while the names of `sigmaout` indicates the row and column indices
```julia
julia> names(sigmaout)
49-element Vector{String}:
"s_(1,1)"
"s_(2,1)"
"s_(3,1)"
"s_(4,1)"
"s_(5,1)"
"s_(6,1)"
"s_(7,1)"
"s_(1,2)"
⋮
"s_(1,7)"
"s_(2,7)"
"s_(3,7)"
"s_(4,7)"
"s_(5,7)"
"s_(6,7)"
"s_(7,7)"
```
It is possible to predict the **mean configuration** by using
the function
```julia
julia> predictive_mean = sample_predictive_zbr(outmcmc);
```
which compute the posterior samples of
$$\boldsymbol{\mu}\_i^* = \text{vec}\left(\sum\_{h=1}^dz\_{ih}\mathbf{B}\_h \mathbf{R}\_i\right)$$
or from the predictive distribution
$$\mathbf{Y}\_i^* = \text{vec}\left(\sum_{h=1}^dz\_{ih}\mathbf{B}\_h \mathbf{R}\_i\right)+\boldsymbol{\epsilon}\_{i} ,\qquad \boldsymbol{\epsilon}\_{i}\sim\mathcal{N}\_{k p}\left( \mathbf{0}, \mathbf{I}\_p \otimes \boldsymbol{\Sigma}\right)$$
with the function
```julia
julia> predictive_obs = sample_predictive_zbr_plus_epsilon(outmcmc);
```
Again, the names of the two `DataFrame`s `predictive_mean` and `predictive_obs` are informative to which element they refer to.
```julia
julia> names(predictive_mean)
2016-element Vector{String}:
"mu_1,(1,1)"
"mu_1,(2,1)"
"mu_1,(3,1)"
"mu_1,(4,1)"
"mu_1,(5,1)"
"mu_1,(6,1)"
"mu_1,(7,1)"
"mu_1,(1,2)"
⋮
"mu_144,(1,2)"
"mu_144,(2,2)"
"mu_144,(3,2)"
"mu_144,(4,2)"
"mu_144,(5,2)"
"mu_144,(6,2)"
"mu_144,(7,2)"
```
```julia
julia> names(predictive_obs)
2016-element Vector{String}:
"X_1,(1,1)"
"X_1,(2,1)"
"X_1,(3,1)"
"X_1,(4,1)"
"X_1,(5,1)"
"X_1,(6,1)"
"X_1,(7,1)"
"X_1,(1,2)"
⋮
"X_144,(1,2)"
"X_144,(2,2)"
"X_144,(3,2)"
"X_144,(4,2)"
"X_144,(5,2)"
"X_144,(6,2)"
"X_144,(7,2)"
```
## Citing
See `CITATION.bib`
| BayesSizeAndShape | https://github.com/GianlucaMastrantonio/BayesSizeAndShape.jl.git |
|
[
"MIT"
] | 0.2.0 | 4214520f385b4d6220a9e63d5a7f355761104b17 | docs | 244 | # BayesSizeAndShape
## Package Features
## Function Documentation
```@docs
SizeAndShapeWithReflectionMCMC
```
```@docs
sizeshape_helmertproduct_reflection
```
# Posterior Samples
```@docs
posterior_samples_sigma
posterior_samples_beta
``` | BayesSizeAndShape | https://github.com/GianlucaMastrantonio/BayesSizeAndShape.jl.git |
|
[
"MIT"
] | 1.0.2 | 950681e4f7ab2182ae61eb632d2db9a188eed2df | code | 32479 | #= License
Copyright 2019, 2020, 2021 (c) Yossi Bokor
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
=#
__precompile__()
module Skyler
#### Requirements ####
using CSV
using DataFrames
using LinearAlgebra
using NearestNeighbors
using Distances
using Optim
using SimpleGraphs
using Random
using Distributions
using QuadGK
using Plots
using JLD
using SpecialFunctions
#using SharedArrays
using Calculus
using NNlib
using FiniteDiff
using ProgressMeter
#### Exports ####
export skyler,
unittest,
Determine_Dimension,
Plot_PPC,
Plot_Graph,
Load_Example,
PartitionedPointCloud,
StratifiedSpace,
EmbeddedStratifiedSpace,
Dimension_Split,
Create_Partition_DataFrame
#### Structures ####
mutable struct PartitionedPointCloud
Points::AbstractArray
Strata::Dict{Int, Set}
Dimensions::Dict{Int, Set}
Boundaries::AbstractArray
end
mutable struct StratifiedSpace
StrataDimensions::Dict{Int, Set}
Boundaries::AbstractArray
end
mutable struct EmbeddedStratifiedSpace
StratSpace::StratifiedSpace
Equations::Dict{Int, Array}
end
#### Background Functions ####
function Spherical_Shell(ball_tree::BallTree, point::Array, inner::T, outer::U) where {T<: Number, U<: Number} #this obtains all the samples in the spherical shell around a chosen sample 'point'
ball_1 = inrange(ball_tree, point, inner, true)
ball_2 = inrange(ball_tree, point, outer, true)
annulus = setdiff(ball_2, ball_1)
return annulus
end
function Determine_Dimension(sample::A, ball_tree::BallTree, index::Int, sample_epsilon::P, radius::R) where {P<:Number,R<:Number, A<:Union{AbstractArray, LinearAlgebra.Adjoint}}# determine whether the `index'th point in the sample looks 0 or 1 dimensional given the coordinates of the other points, a pre-generated ball tree, and the correct algorithm parameters of 'angle_conditon', 'inner_radius', and 'outer_radius'
point = sample[:,index]
distance_matrix = pairwise(Euclidean(), sample, dims=2)
node_neighbours = Spherical_Shell(ball_tree, point, 0., radius+sample_epsilon)
node_neighbours = push!(node_neighbours, index)
G_point = IntGraph()
for i in node_neighbours
add!(G_point,i)
end
for i in vlist(G_point)
for j in vlist(G_point)
if distance_matrix[i,j] <= 2*sample_epsilon
add!(G_point, i, j)
end
end
end
n_p = size(sample,2) #number of samples
nc_1 = num_components(G_point)
if nc_1 != 1 #if the graph on the points within a ball of the chosen points is disconnected, then the point is not close to a vertex
return 1
else # if the graph on the points in a ball is connected, then we need to check if 0 or 1 dimensional structure
node_neighbours_1 = Spherical_Shell(ball_tree, point, radius-1sample_epsilon, radius+sample_epsilon)
G_point_1 = IntGraph() # construct graph on points in a spherical shell
for i in node_neighbours_1
add!(G_point_1, i)
end
for i in vlist(G_point_1)
for j in vlist(G_point_1)
if distance_matrix[i,j] <= 3*sample_epsilon
add!(G_point_1, i, j)
end
end
end
nc_2 =num_components(G_point_1)
if nc_2 == 2 #if the number of connected components is 2, we need to check if they are roughly opposite each other or not
cc = collect(SimpleGraphs.components(G_point_1))
mid_point_1 = []
for l in 1:size(sample,1)
mid_point_1_l = 0
cc_1 = collect(cc[1])
for p in cc_1
mid_point_1_l += sample[l, p]
end
append!(mid_point_1, mid_point_1_l/(length(cc_1)))
end
mid_point_2= []
for l in 1:size(sample,1)
mid_point_2_l = 0
cc_2 = collect(cc[2])
for p in cc_2
mid_point_2_l += sample[l,p]
end
append!(mid_point_2, mid_point_2_l/(length(cc_2)))
end
mid_point_1 = mid_point_1 .- sample[:,index]
mid_point_2 = mid_point_2 .- sample[:,index]
#cos_of_angle = dot(mid_point_1, mid_point_2)/(norm(mid_point_1)*norm(mid_point_2))
if dot(mid_point_1, mid_point_2) <= -radius^2 +2*radius*sample_epsilon +7*sample_epsilon^2
return 1
else
return 0
end
else #if the number of connected components is not 2, return dimension 0
return 0
end
end
end
function Dimension_Split(sample::A, sample_epsilon::S, radius::U) where {S<: Number, U<:Number, A<:AbstractArray}# for each point in the sample, determine if it looks 0 or 1 dimensional
#if nprocs() == 1
n_p = size(sample,2)
ball_tree = BallTree(sample)
dimension_split = Dict{Int,Set}(0 => Set(), 1 =>Set())
println("Calculating local structure for each sample: ")
@showprogress for i in 1:n_p
dim = Determine_Dimension(sample, ball_tree, i, sample_epsilon,radius)
dimension_split[dim]=push!(dimension_split[dim], i)
end
return dimension_split
#else
# n_p = size(sample,2)
# @everywhere ball_tree=BallTree(sample)
#end
end
function Find_Groups(distance_matrix::A, dictionary_of_points::Dict{Int, Set}, dimension::T, connection_threshold::W) where {T <: Number, W<: Number, A<:AbstractArray} # obtain the clusters of points which look 'dimensional' dimensional
points = collect(dictionary_of_points[dimension]) #create a set of points
G = UndirectedGraph()
for i in points
add!(G, i)
end
for i in points
for j in points
if distance_matrix[i,j] <= connection_threshold #connect points below the connection threshold to cluster
add!(G, i, j)
end
end
end
components = []
vertices = vlist(G)
n_v = length(vertices)
checked = Set()
discovered=[]
function DFS(Graph, vertex)
append!(discovered,vertex)
for u in neighbors(Graph, vertex)
if u in(discovered)
else
DFS(Graph, u)
end
end
end
for i in vertices
if i in checked
else
push!(checked, i)
discovered =[]
DFS(G, i)
append!(components, [discovered])
for n in discovered
push!(checked, n)
end
end
end
return components
end
function Generate_Strata_Assignments(distance_matrix::Array, dimension_split::Dict{Int, Set}, v_threshold::T, e_threshold::W) where {T <: Number, W<:Number} # generate the clusters of vertices and edges using the above functions
v_comps = Find_Groups(distance_matrix, dimension_split, 0, v_threshold)
number_of_vertices = length(v_comps)
e_comps = Find_Groups(distance_matrix, dimension_split, 1, e_threshold)
number_of_edges = length(e_comps)
change = []
for i in 1:number_of_edges
if size(e_comps[i]) == 1
append!(change, e_comps[i][1])
end
end
if length(change) !=0
d_s = copy(dimension_split)
for i in 1:length(change)
d_s[0] = push!(d_s[0], change[i])
d_s[1] = setdiff(d_s[1], Set([change[i]]))
end
v_comps = Find_Groups(distance_matrix, d_s, 0, v_threshold)
number_of_vertices = length(v_comps)
e_comps = Find_Groups(distance_matrix, d_s, 1, e_threshold)
number_of_edges = length(e_comps)
end
strata_assignments = Dict{Int, Set}(1=>Set())
strata_dimensions = Dict{Int,Set}(0=> Set(), 1=>Set())
#println("Detected ", number_of_vertices," vertices and ", number_of_edges, " edges.")
for i in 1:number_of_vertices
strata_assignments[i] =Set(v_comps[i])
strata_dimensions[0] =push!(strata_dimensions[0],i)
end
for i in 1:number_of_edges
strata_assignments[number_of_vertices+i] =Set(e_comps[i])
strata_dimensions[1] =push!(strata_dimensions[1],number_of_vertices+i)
end
return strata_assignments, strata_dimensions
end
function Generate_Abstract_Structure(points::A, dimension_split::Dict{Int, Set}, sample_epsilon::T, vertex_threshold::T, edge_threshold::U) where {U<:Number, T<:Number, A<:AbstractArray} # obtain the abstract structure of the graph
dists = pairwise(Euclidean(), points, dims=2)
corrected = false
while corrected == false
strata_assignments, strata_dimensions = Generate_Strata_Assignments(dists, dimension_split, vertex_threshold, edge_threshold)
max_dim = maximum(keys(strata_dimensions))
n_s = maximum(keys(strata_assignments))
boundaries = zeros(n_s, n_s)
@showprogress for i in 0:max_dim-1
for j in strata_dimensions[i]
s_j = points[:,collect(strata_assignments[j])]
for k in strata_dimensions[i+1]
s_k = points[:,collect(strata_assignments[k])]
d_j_k = Distances.pairwise(Euclidean(), s_j, s_k, dims=2)
if minimum(d_j_k) <= 3*sample_epsilon
boundaries[j,k]=1
end
end
end
end
println("\r The strata are ", strata_dimensions, " and the boundary matrix is ", boundaries,".")
change = []
if isempty(strata_dimensions[1])
corrected = true
StratSpace = StratifiedSpace(strata_dimensions, boundaries)
return StratSpace, strata_assignments
break
else
for i in strata_dimensions[1]
if sum(boundaries[:,i]) !=2
append!(change, i)
end
end
end
if length(change) == 0
corrected = true
StratSpace = StratifiedSpace(strata_dimensions, boundaries)
return StratSpace, strata_assignments
break
else
println("Stray edge detected, addressing it.")
for k in 1:length(change)
dimension_split[0] = union(dimension_split[0], strata_assignments[change[k]])
dimension_split[1] = setdiff(dimension_split[1], strata_assignments[change[k]])
end
end
end
end
function Partition_Point_Cloud(points::A, sample_epsilon::P, radius::W, vertex_threshold::T, edge_threshold::U)::PartitionedPointCloud where {P<:Number, Q<:Number, R<:Number, W<:Number, T <: Number, U<:Number, A<:AbstractArray}
dimension = size(points,1)
distances = pairwise(Euclidean(), points, dims=2)
dim_split = Dimension_Split(points, sample_epsilon, radius) #partition the sample into 0 or 1 dimensional using a dictionary
#strata_assignments, strata_dimensions = Generate_Strata_Assignments(distances, dim_split, vertex_threshold, edge_threshold) #find the strata for the partioned point cloud
StratSpace, strata_assignments = Generate_Abstract_Structure(points, dim_split, sample_epsilon, vertex_threshold, edge_threshold) #obtain the boundary relations
change = []
ppc = PartitionedPointCloud(points, strata_assignments, StratSpace.StrataDimensions, StratSpace.Boundaries)
return ppc
end
#=
Deprecated this was used for modelling the embedding, unsure about future uses
function Generate_Partition(v_clusters, e_clusters)
lengths = []
partitions = []
for i in 1:length(v_clusters)
append!(lengths, length(v_clusters[i]))
append!(partitions, v_clusters[i])
end
for i in 1:length(e_clusters)
append!(lengths, length(e_clusters[i]))
append!(partitions, e_clusters[i])
end
return partitions, lengths
end
=#
function Create_Partition_DataFrame(ppc::PartitionedPointCloud) #create a dataframe we can save the partitioned point cloud in
dimension = size(ppc.Points,1)
df = DataFrame(ppc.Points[:,1]')
insert!(df, 1, [1], :index)
insert!(df, 2, [1], :dimension)
deleterows!(df, 1)
for i in 0:maximum(keys(ppc.Dimensions))
for j in ppc.Dimensions[i]
for k in ppc.Strata[j]
l = [j i]
for n in 1:dimension
l=hcat(l, ppc.Points[n,k])
end
push!(df, l)
end
end
end
return sort(df)
end
function Plot_PPC(ppc::PartitionedPointCloud) #plot the partiton with nice colours
Plots.pyplot()
if size(ppc.Points,1) == 2
df = DataFrame(index=Int[], dimension=Int[], c1=[], c2=[])
for i in 0:maximum(keys(ppc.Dimensions))
for j in ppc.Dimensions[i]
for k in ppc.Strata[j]
push!(df, [j,i,ppc.Points[1,k], ppc.Points[2,k]])
end
end
end
Plots.scatter( df[!,:c1],df[!,:c2], group=df[!,:index])
else
println("Points live in 3 or more dimensions, projecting onto the first 3.")
df = DataFrame(index=Int[], dimension=Int[], c1=[], c2=[], c3=[])
for i in 0:maximum(keys(ppc.Dimensions))
for j in ppc.Dimensions[i]
for k in ppc.Strata[j]
push!(df, [j,i,ppc.Points[1,k], ppc.Points[2,k], ppc.Points[3,k]])
end
end
end
Plots.scatter3d( df[!,:c1],df[!,:c2], df[!,:c3], group=df[!,:index])
end
end
function Plot_PPC(partition::DataFrame)
Plots.pyplot()
dimension = size(partition[!,:point][1],1)
if dimension == 2
points = Array{Float64}(undef, 0,2)
for i in 1:size(partition,2)
points = hcat(points, partition[i,:point])
end
Plots.scatter( points[:,1], points[:,2], group=partition[!,:index])
else
println("Points live in 3 or more dimensions, projecting onto the first 3.")
points = Array{Float64}(undef, 0,3)
for i in 1:size(partition,1)
points = vcat(points, partition[i,:point][1:3]')
end
Plots.scatter3d( points[:,1], points[:,2], points[:,3], group=partition[!,:index])
end
end
function Plot_PPC(partition::DataFrame, c_1::Int, c_2::Int)
Plots.pyplot()
println("Projecting on to the following coordinates: $c_1 and $c_2.")
points = Array{Float64}(undef, 0,2)
for i in 1:size(partition,1)
points = vcat(points, [partition[i,:point][c_1] partition[i,:point][c_2]])
end
Plots.scatter( points[:,1], points[:,2], group=partition[!,:index])
end
function Plot_PPC(partition::DataFrame, c_1::Int, c_2::Int, c_3::Int)
Plots.pyplot()
println("Projecting on to the following coordinates: $c_1, $c_2 and $c_3.")
points = Array{Float64}(undef, 0,3)
for i in 1:size(partition,1)
points = vcat(points, [partition[i,:point][c_1] partition[i,:point][c_2] partition[i,:point][c_3]])
end
Plots.scatter3d( points[:,1], points[:,2], points[:,3], group=partition[!,:index])
end
function Plot_Graph(vertex_locations, edges, sample) #plot a graph (either exact or modelled)
Plots.pyplot()
if length(sample) == 2
scatter(vertex_locations[sample[1], :], vertex_locations[sample[2], :], s=1)
for i in 1:(size(edges,1)-1)
plot!([vertex_locations[sample[1], edges[i,1]], vertex_locations[sample[1],edges[i,2]]], [vertex_locations[sample[2], edges[i,1]], vertex_locations[sample[2], edges[i,2]]], linestyle = :solid)
end
i=size(edges,1)
plot!([vertex_locations[sample[1], edges[i,1]], vertex_locations[sample[1],edges[i,2]]], [vertex_locations[sample[2], edges[i,1]], vertex_locations[sample[2], edges[i,2]]], linestyle = :solid)
elseif length(sample) == 3
scatter3d(vertex_locations[sample[1], :], vertex_locations[sample[2], :], vertex_locations[sample[3], :],s=1)
for i in 1:(size(edges,1)-1)
plot!([vertex_locations[sample[1], edges[i,1]], vertex_locations[sample[1], edges[i,2]]], [vertex_locations[sample[2], edges[i,1]], vertex_locations[sample[2], edges[i,2]]], [vertex_locations[sample[3], edges[i,1]], vertex_locations[sample[3], edges[i,2]]], linestyle =:solid)
end
i=size(edges,1)
plot!([vertex_locations[sample[1], edges[i,1]], vertex_locations[sample[1], edges[i,2]]], [vertex_locations[sample[2], edges[i,1]], vertex_locations[sample[2], edges[i,2]]], [vertex_locations[sample[3], edges[i,1]], vertex_locations[sample[3], edges[i,2]]], linestyle =:solid)
else
println("Please specify 2 or 3 sample to project onto.")
return []
end
end
function Plot_Graph(vertex_locations, edges) #plot a graph (either exact or modelled)
Plots.pyplot()
if size(vertex_locations,1) == 2
scatter(vertex_locations[sample[1], :], vertex_locations[sample[2], :], s=1)
for i in 1:(size(edges,1)-1)
plot!([vertex_locations[1, edges[i,1]], vertex_locations[1,edges[i,2]]], [vertex_locations[2, edges[i,1]], vertex_locations[2, edges[i,2]]], linestyle = :solid)
end
i=size(edges,1)
plot!([vertex_locations[1, edges[i,1]], vertex_locations[1,edges[i,2]]], [vertex_locations[2, edges[i,1]], vertex_locations[2, edges[i,2]]], linestyle = :solid)
elseif size(vertex_locations,1) == 3
scatter3d(vertex_locations[1, :], vertex_locations[2, :], vertex_locations[3, :],s=1)
for i in 1:(size(edges,1)-1)
plot!([vertex_locations[1, edges[i,1]], vertex_locations[1, edges[i,2]]], [vertex_locations[2, edges[i,1]], vertex_locations[2, edges[i,2]]], [vertex_locations[3, edges[i,1]], vertex_locations[3, edges[i,2]]], linestyle =:solid, c=:red)
end
i=size(edges,1)
plot!([vertex_locations[1, edges[i,1]], vertex_locations[1, edges[i,2]]], [vertex_locations[2, edges[i,1]], vertex_locations[2, edges[i,2]]], [vertex_locations[3, edges[i,1]], vertex_locations[3, edges[i,2]]], linestyle =:solid, c=:red)
else
println("Projecting onto the first 3 coordinates.")
scatter3d(vertex_locations[1, :], vertex_locations[2, :], vertex_locations[3, :],s=1)
for i in 1:(size(edges,1)-1)
plot!([vertex_locations[1, edges[i,1]], vertex_locations[1, edges[i,2]]], [vertex_locations[2, edges[i,1]], vertex_locations[2, edges[i,2]]], [vertex_locations[3, edges[i,1]], vertex_locations[3, edges[i,2]]], linestyle =:solid)
end
i=size(edges,1)
plot!([vertex_locations[1, edges[i,1]], vertex_locations[1, edges[i,2]]], [vertex_locations[2, edges[i,1]], vertex_locations[2, edges[i,2]]], [vertex_locations[3, edges[i,1]], vertex_locations[3, edges[i,2]]], linestyle =:solid)
end
end
function model_fit(data, muT, sigma, E, S; EM_it = 3)
#####################################################################################
#### Functions for modelling the underlying structure ####
#=
To update:
-keeps sigma fixed at the moment, this can be made optional
=#
#=
MAKE SURE S AND MUT ARE IN THE SAME ORDER
=#
#=
notns:
n --- Number of data points
d --- Dimension of data
N --- Number of strata pieces
N_0 --- Number of 0-dim strata pieces
N_1 --- Number of 1-dim strata pieces
data --- n dim array of d arrays --- Sample from embedded graph
mu --- N_0 by d dim array --- Iniitial vertex sample locations
sigma --- N dim array --- Initial error on each strata piece
E --- n by N array --- Initial assignment for each data point
S --- N dim str array --- Abstract graph vertex/edge names in that order
=#
# data is a n dim array of d dimensional vectors
#####################################################################################
####pre-processing
#get size of different dimensional strata
#println("S starts as $S")
#println("mu starts as $muT")
N_0 = size(muT,1)
mu = [muT[i,:] for i in 1:N_0]
N_1 = size(S ,1) - N_0
N = N_0 + N_1
n = size(data,1)
d = size(data[1],1)
#load parameters
E = [E[i,:] for i = 1:n]
Pi = sum(E)./n
#println("sigma in EM is", sigma)
MU = Dict{Any,Array}(S[i] => mu[i] for i = 1:N_0)
SIGMA = Dict{Any,Float64}(S[i] => sigma[i] for i = 1:N)
PI = Dict{Any,Float64}(S[i] => Pi[i] for i =1:N)
#println("MU: ", MU)
#println("SIGMA: ", SIGMA)
#println("PI: ", PI)
function reload_param(mu_t,sigma_t,Pi_t; S_t = S, N_0_t = N_0, N_t = N)
#updates dictionary values for:
#mu_t N_0 by d float array
#SIGMA_t N float array
#Pi N_float array
MU_t = Dict{Any,Array}(S_t[i] => mu_t[i] for i = 1:N_0_t)
SIGMA_t = Dict{Any,Float64}(S_t[i] => sigma_t[i] for i = 1:N_t)
PI_t = Dict{Any,Float64}(S_t[i] => Pi_t[i] for i = 1:N_t)
return MU_t, SIGMA_t, PI_t
end
####density functions
#0-dim strata
function gaussian(x,v,s)
linear = x - v
return exp(-dot(linear, linear)/(2*s))
end
function p_e(x,v,s)
d = size(x)[1]
den = gaussian(x,v,s)/((2*pi*s)^(d/2))
return den
end
function log_p_e(x,v,s)
d = size(x)[1]
linear = x - v
den= -(d/2)*log(2*pi*s^2) - dot(linear,linear)/(2*s^2)
return den
end
#1-dim strata
#with clipping
function p_l(xj, v1, v2, s)
d = size(xj,1)
xj = - xj
den = (exp((4*dot(v1 - v2, (v1 + v2)/2 + xj)^2 - 4*norm(v1 - v2)^2*norm((v1 + v2)/2 + xj)^2)/(8*s^2*norm(v1 - v2)^2))*pi^(1/2 - d/2)*(-erf((2*dot(v1 - v2, (v1 + v2)/2 + xj) - norm(v1 - v2)^2)/(2*sqrt(2)*s*norm(v1 - v2))) + erf((2*dot(v1 - v2, (v1 + v2)/2 + xj) + norm(v1 - v2)^2)/(2*sqrt(2)*s*norm(v1 - v2)))))/norm(v1 - v2)
if den <= -6
den = -6
end
return den
end
function log_p_l(Xj, V1, V2, s)
d = size(Xj,1)
Xj = - Xj
den = log(2^((-1 - d)/2)*pi^(1/2 - d/2)*s^(1 - d)) +
log(-erf((2*dot(V1 - V2, (V1 + V2)/2 + Xj) - norm(V1 - V2)^2)/(2*sqrt(2)*s*norm(V1 - V2))) +
erf((2*dot(V1 - V2, (V1 + V2)/2 + Xj) + norm(V1 - V2)^2)/(2*sqrt(2)*s*norm(V1 - V2)))) -
log(norm(V1 - V2)) + (4*dot(V1 - V2, (V1 + V2)/2 + Xj)^2 - 4*norm(V1 - V2)^2*norm((V1 + V2)/2 + Xj)^2)/
(8*s^2*norm(V1 - V2)^2)
if den <= -6
den = -6
end
return den
end
#density evaluation matrix
function densities(MU_t,SIGMA_t; S_t = S, N_0 = N_0 , N_1 = N_1, N = N)
zero_dim = [x -> p_e(x,MU_t[strat],SIGMA_t[strat]) for strat = S_t[1:N_0]]
one_dim = [x -> p_l(x,MU_t[strat[2]],MU_t[strat[1]],SIGMA_t[strat]) for strat = S_t[N_0+1:N]]
return one_dim # vcat(zero_dim,one_dim)
end
function log_densities(mu_t,SIGMA_t; S_t = S, N_0 = N_0 , N_1 = N_1, N = N)
MU_t = Dict{Any,Array}(S_t[i] => mu_t[i] for i = 1:N_0)
zero_dim = [x -> log_p_e(x,MU_t[strat],SIGMA_t[strat]) for strat = S_t[1:N_0]]
one_dim = [x -> log_p_l(x,MU_t[strat[1]],MU_t[strat[2]],SIGMA_t[strat]) for strat = S_t[N_0+1:N]]
return vcat(zero_dim,one_dim)
end
function density_matrix(data,density_functions)
dm = [ (broadcast(rho, data)) for rho in density_functions ]
dm = hcat(dm...)
dm = [dm[i,:] for i = 1:n]
return dm
end
####cost construction
#expectation matrix for current parameters
function expectation(data,MU_t,SIGMA_t,Pi_t)
DM = density_matrix(data,log_densities(MU_t,SIGMA_t))
logpi = log.(Pi_t)
E = broadcast(t -> logpi .+ t, DM)
E = broadcast(t -> softmax(t), E)
return E
end
#cost for current vertex values
function Cost(data,mu0,SIGMA_t, Pi, E)
mu_t = [mu0[i,:] for i in 1:N_0]
log_DM = density_matrix(data,log_densities(mu_t,SIGMA_t))
#not needed in optimisation but can be checked for EM
#log_pi = log.(Pi)
#log_sum = broadcast(t -> t + log_pi, log_DM)
w_sum = broadcast((e,t) -> sum(e.*t), E, log_DM)
return sum(w_sum)/n
end
####analytic gradient construction
#gradient values for cost function
function grad_log_p_l(k, Xj, V1, V2, s)
xj = Xj[k]
v1 = V1[k]
v2 = V2[k]
if dot(V2 - V1, Xj) > dot(V1, V2 - V1)
xj = xj - ((dot(V2 - V1, Xj) - dot(V1, V2 - V1) )/(norm(V2 - V1)^2))*(v2 - v1)
Xj = Xj - ((dot(V2 - V1, Xj) - dot(V1, V2 - V1) )/(norm(V2 - V1)^2)).*(V2 - V1)
elseif dot(V2 - V1, Xj) < dot(V2, V1 - V2)
xj = xj - ((dot(V2 - V1, Xj) - dot(V2, V1 - V2) )/(norm(V2 - V1)^2))*(v2 - v1)
Xj = Xj - ((dot(V2 - V1, Xj) - dot(V2, V1 - V2) )/(norm(V2 - V1)^2)).*(V2 - V1)
end
g_v1 = -(v1 + v2 + 2*xj)/(4*s^2) + (2*(-1 + exp(dot(V1 - V2, (V1 + V2)/2 + Xj)/s^2))*(v1 - v2)*
dot(V1 - V2, (V1 + V2)/2 + Xj) + (3*v1 - v2 + 2*xj - exp(dot(V1 - V2, (V1 + V2)/2 + Xj)/s^2)*
(v1 + v2 + 2*xj))*norm(V1 - V2)^2)/
(exp((2*dot(V1 - V2, (V1 + V2)/2 + Xj) + norm(V1 - V2)^2)^2/(8*s^2*norm(V1 - V2)^2))*sqrt(2*pi)*s*
(erf((-2*dot(V1 - V2, (V1 + V2)/2 + Xj) + norm(V1 - V2)^2)/(2*sqrt(2)*s*norm(V1 - V2))) +
erf((2*dot(V1 - V2, (V1 + V2)/2 + Xj) + norm(V1 - V2)^2)/(2*sqrt(2)*s*norm(V1 - V2))))*
norm(V1 - V2)^3) + ((-v1 + v2)*dot(V1 - V2, (V1 + V2)/2 + Xj)^2 +
(s^2*(-v1 + v2) + (v1 + xj)*dot(V1 - V2, (V1 + V2)/2 + Xj))*norm(V1 - V2)^2)/(s^2*norm(V1 - V2)^4)
g_v2 = -(v1 + v2 + 2*xj)/(4*s^2) + (-2*(-1 + exp(dot(V1 - V2, (V1 + V2)/2 + Xj)/s^2))*(v1 - v2)*
dot(V1 - V2, (V1 + V2)/2 + Xj) + (-v1 - v2 - 2*xj + exp(dot(V1 - V2, (V1 + V2)/2 + Xj)/s^2)*
(-v1 + 3*v2 + 2*xj))*norm(V1 - V2)^2)/
(exp((2*dot(V1 - V2, (V1 + V2)/2 + Xj) + norm(V1 - V2)^2)^2/(8*s^2*norm(V1 - V2)^2))*sqrt(2*pi)*s*
(erf((-2*dot(V1 - V2, (V1 + V2)/2 + Xj) + norm(V1 - V2)^2)/(2*sqrt(2)*s*norm(V1 - V2))) +
erf((2*dot(V1 - V2, (V1 + V2)/2 + Xj) + norm(V1 - V2)^2)/(2*sqrt(2)*s*norm(V1 - V2))))*
norm(V1 - V2)^3) + ((v1 - v2)*dot(V1 - V2, (V1 + V2)/2 + Xj)^2 -
(s^2*(-v1 + v2) + (v2 + xj)*dot(V1 - V2, (V1 + V2)/2 + Xj))*norm(V1 - V2)^2)/(s^2*norm(V1 - V2)^4)
return g_v1, g_v2
end
function grad_log_p_l_n(k, Xj, V1, V2, s)
xj = Xj[k]
v1 = V1[k]
v2 = V2[k]
if dot(V2 - V1, Xj) > dot(V1, V2 - V1)
xj = xj - ((dot(V2 - V1, Xj) - dot(V1, V2 - V1) )/(norm(V2 - V1)^2))*(v2 - v1)
Xj = Xj - ((dot(V2 - V1, Xj) - dot(V1, V2 - V1) )/(norm(V2 - V1)^2)).*(V2 - V1)
elseif dot(V2 - V1, Xj) < dot(V2, V1 - V2)
xj = xj - ((dot(V2 - V1, Xj) - dot(V2, V1 - V2) )/(norm(V2 - V1)^2))*(v2 - v1)
Xj = Xj - ((dot(V2 - V1, Xj) - dot(V2, V1 - V2) )/(norm(V2 - V1)^2)).*(V2 - V1)
end
function V_r(v_r, V)
V[k] = v_r
return V
end
log_p_l_proj = v_r -> log_p_l(Xj, V_r(v_r[1], V1), V_r(v_r[2], V2), s)
g = Calculus.gradient(log_p_l_proj)
return g([v1, v2])
end
function grad_log_cost!(Grad, data, mu_t, S_t, E_list, SIGMA_t)
n = size(data,1)
d = size(data[1],1)
grad = Dict{Any,Array}(S_t[i] => zeros(Float64, 1 , d) for i = 1:N_0)
Mu_t = Dict{Any,Array}(S_t[i] => mu_t[i,:] for i = 1:N_0)
E_array = vcat(transpose(E_list)...)
E_t = Dict{Any,Array}(S_t[i] => E_array[:,i] for i = 1:N)
for s in S_t
if s isa Integer
grad[s] += transpose(sum(E_t[s].*broadcast( x -> ((MU[s] - x)./SIGMA[s]), data))/n)
elseif s isa Tuple
for k in 1:d
for j in 1:n
g_v1, g_v2 = grad_log_p_l_n(k, data[j], Mu_t[s[1]], Mu_t[s[2]], SIGMA_t[s])
grad[s[1]][k] += E_t[s[1]][j]*g_v1/n
grad[s[2]][k] += E_t[s[2]][j]*g_v2/n
#println(grad[s[1]][k],grad[s[2]][k])
end
end
end
end
for i in 1:N_0
Grad[i,:] = - grad[S_t[i]]
end
end
####EM algorithm
mu00 = vcat([m for m in mu]'...)
for it = 1:EM_it
#E step
E = expectation(data,MU,SIGMA,Pi)
Pi = sum(E)./n
#M step
mu0 = vcat([m for m in mu]'...)
C = t -> -Cost(data,t,SIGMA, Pi ,E)
#swap if don't want gradient clipping or set tol = Inf
#G! = (g,t) -> FiniteDiff.finite_difference_gradient!(g,C,t)
function G!(storage,t; tol = 0.1)
FiniteDiff.finite_difference_gradient!(storage,C,t)
storage[abs.(storage) .> tol] = sign.(storage[abs.(storage) .> tol])*tol
return
end
results = Optim.optimize(C,G!,mu0,GradientDescent(),Optim.Options(g_tol = 1e-2))
mu1 = Optim.minimizer(results)
mu = [mu1[i,:] for i in 1:N_0]
#println("Current Pi:")
#println(Pi)
#println("Current Sigma:")
#println(sigma)
#println("Current vertices:")
#println(mu)
print("For iteration: ", it," cost is: ", -C(mu1) + sum(log.(Pi)) )
#Update parameters
MU, SIGMA, PI = reload_param(mu, sigma, Pi ; S_t = S, N_0_t = N_0, N_t = N)
#println(MU)
end
return MU, SIGMA, PI, E
end
function Save_PPC(ppc::PartitionedPointCloud, path::String)
jldopen(path, "w") do file
addrequire(file, Skyler)
addrequire(file, LinearAlgebra)
write(file, "PartitionedPointCloud", ppc)
end
end # Save_PPC
#### Main functions and wrappers ####
function skyler(points::A, sample_epsilon::P, radius::R; out = "model", EM_it = 3, sig = sample_epsilon/2) where {P<:Number, Q<:Number, R<:Number, A<:Union{LinearAlgebra.Adjoint,AbstractArray}} #wrapper for obtaining the partition
dimension = size(points,1)
ppc = Partition_Point_Cloud(points, sample_epsilon, radius, (3*radius)/2 + 2*sample_epsilon, 3*sample_epsilon)
if out == "PPC"
return ppc
elseif out == "DF"
return Create_Partition_DataFrame(ppc)
elseif out == "model"
vertices = size(collect(ppc.Dimensions[0]),1)
vertex_guess = Array{Float64}(undef, dimension,0)
for i in 1:vertices
v_i = zeros(dimension, 1)
for j in collect(ppc.Strata[i])
v_i .+= points[:, j]
end
v_i = v_i/(length(collect(ppc.Strata[i])))
vertex_guess = hcat(vertex_guess, v_i)
end
#println("Vertex guess is: ",vertex_guess)
E = zeros(size(points,2), size(ppc.Boundaries,1))
for i in 1:vertices
for j in collect(ppc.Strata[i])
E[j,i] =1
end
end
for i in collect(ppc.Dimensions[1])
for j in collect(ppc.Strata[i])
E[j,i] =1
end
end
sigma = [sig for i in 1:size(ppc.Boundaries,1)]
S = []
for i in collect(ppc.Dimensions[0])
push!(S, i)
end
for k in 1:maximum(keys(ppc.Dimensions))
for i in collect(ppc.Dimensions[k])
t =tuple(findall(x->x==1, ppc.Boundaries[:,i])...,)
push!(S, t)
end
end
data = vcat([points[:,i] for i in 1:size(points,2)])
v_guess = Array{Float64}(undef, size(vertex_guess,2), size(vertex_guess,1))
for i in 1:size(vertex_guess,2)
for j in 1:size(vertex_guess,1)
v_guess[i,j] = vertex_guess[j,i]
end
end
#println("S is $S")
MU, SIGMA, PI, E = model_fit(data, v_guess, sigma, E, S, EM_it= EM_it)
mu_ans = collect(values(MU))
mu_ans = vcat([datum' for datum in mu_ans]...)
return ppc, mu_ans', vertex_guess
else
println("Error: output option 'out' selected is not valid, please choose from 'model' to obtain a model of the underlying structure, or 'struct' to receive a DataFrame indicating which strata a point as been associated with and the dimension of this strata, and the boundary relations.")
return []
end
end
function skyler(ppc::PartitionedPointCloud, sample_epsilon::P; EM_it=3, sig=sample_epsilon/2) where {P<:Number} #wrapper for modelling a point cloud
points=ppc.Points
dimension = size(points,1)
vertices = size(collect(ppc.Dimensions[0]),1)
vertex_guess = Array{Float64}(undef, dimension,0)
for i in 1:vertices
v_i = zeros(dimension, 1)
for j in collect(ppc.Strata[i])
v_i .+= points[:, j]
end
v_i = v_i/(length(collect(ppc.Strata[i])))
vertex_guess = hcat(vertex_guess, v_i)
end
println("Vertex guess is: ",vertex_guess)
E = zeros(size(points,2), size(ppc.Boundaries,1))
for i in 1:vertices
for j in collect(ppc.Strata[i])
E[j,i] =1
end
end
for i in collect(ppc.Dimensions[1])
for j in collect(ppc.Strata[i])
E[j,i] =1
end
end
sigma = [sig for i in 1:size(ppc.Boundaries,1)]
S = []
for i in collect(ppc.Dimensions[0])
push!(S, i)
end
for k in 1:maximum(keys(ppc.Dimensions))
for i in collect(ppc.Dimensions[k])
t =tuple(findall(x->x==1, ppc.Boundaries[:,i])...,)
push!(S, t)
end
end
data = vcat([points[:,i] for i in 1:size(points,2)])
v_guess = Array{Float64}(undef, size(vertex_guess,2),size(vertex_guess,1))
for i in 1:size(vertex_guess,2)
for j in 1:size(vertex_guess,1)
v_guess[i,j] = vertex_guess[j,i]
end
end
MU, SIGMA, PI, E = model_fit(data, v_guess, sigma, E, S, EM_it=EM_it)
mu_ans = collect(values(MU))
mu_ans = vcat([datum' for datum in mu_ans]...)
return mu_ans', vertex_guess
end #skyler for modelling a partioned point cloud
#### Functions for examples ####
function Load_Example(i)
dir = pwd()
cd(@__DIR__)
cd("..")
cd("Examples")
points = Matrix(CSV.read("Sample-$i.csv", DataFrame, header=false))
cd(dir)
return points
end
#### unittest ####
function test_1()
dir = pwd()
cd(@__DIR__)
cd("..")
cd("Examples")
points= convert(Array,CSV.read("Sample-1.csv", header=false))
ppc = skyler(points, 0.01, 0.12, out="PPC")
par = Create_Partition_DataFrame(ppc)
saved_partition = CSV.read("Partition-1.csv")
if par == saved_partition
return []
else
println("Error: test_1 not passed.")
return par
end
end
function test_2()
dir = pwd()
cd(@__DIR__)
cd("..")
cd("Examples")
points= convert(Array,CSV.read("Sample-2.csv",header=false))'
ppc = skyler(points, 0.1, 1.2, out="PPC")
par = Create_Partition_DataFrame(ppc)
saved_partition = CSV.read("Partition-2.csv")
if par == saved_partition
return []
else
println("Error: test_2 not passed.")
return par
end
end
function unittest()
x = Array{Any}(undef, 2)
x[1] = test_1() #expected answer empty
x[2] = test_2() #expected answer empty
for p = 1:length(x)
if !isempty(x[p])
println(p)
return x
end
end
return []
end
end #module
| Skyler | https://github.com/yossibokor/Skyler.jl.git |
|
[
"MIT"
] | 1.0.2 | 950681e4f7ab2182ae61eb632d2db9a188eed2df | docs | 7975 | # Documentation
## Licensing Information
Skyler is licensed under an MIT license <https://opensource.org/licenses/MIT>.
You should have received a copy of the MIT Public License along with Skyler. If not, please see <https://opensource.org/licenses/MIT>.
[](https://zenodo.org/badge/latestdoi/252151758)
```
Begin license text.
Skyler
Copyright (C) 2020 Yossi Bokor
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
End license text.
```
Skyler is free software: you can redistribute it and/or modify
it under the terms of the MIT License as published by
the Open Source Initiative
Skyler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the MIT License for more details.
## Contributors
Skyler is produced and maintained by
Yossi Bokor \
<[email protected]> \
[Personal Webpage](http://yossi.eu) \
and \
Christopher Williams\
<[email protected]>
## Installation
To install Skyler, run the following in `Julia`:
```julia
using Pkg
Pkg.add("Skyler")
```
## Functionality
- Skyler identifies the coarsest abstract graph structure underlying a point cloud, and modells it. Currently, we are restricted to graphs with linear edges which satisfy conditions detailed below.
- You can read the article [Reconstructing linearly embedded graphs: A first step to stratified space learning](https://www.aimsciences.org/article/doi/10.3934/fods.2021026) which introduces the algorithm used in Skyler.
</ul>
### Obtaining Abstract Structure
As input Skyler accepts a point cloud, as an $d \times n$ array, a parameter $\varepsilon$, an inner radius, an outer radius, and an angle condition. The parameter $\varepsilon$ is the same paramter such that the point cloud is an $\varepsilon$-sample of an embedded graph $|G|$. The radii and angle condition are realted to assumptions on the embedding of $|G|$, and their derivations can be found in [Reconstructing linearly embedded graphs: A first step to stratified space learning](https://www.aimsciences.org/article/doi/10.3934/fods.2021026). To obtain the abstract graph $G$, run
```julia
ppc, model_verts, avg_verts = skyler(points, sample_epsilon, radius, EM_it=3, sigma=sample_epsilon/2)
```
which returns a PartitionedPointCloud `ppc`, an array `model_verts` containing the optimized vertex locations, and an array `avg_verts` containing the average point of each vertex cluster..
A `PartitionedPointCloud` has the following fields:
- `Points` which is an array of all the points,
- `Strata` a `Dict{Int, Set}` which collates which points have been assigned to which stratum
- `Dimensions` a `Dict{Int, Set}` listing which strata are of each dimesnions,
- `Boundaries` an array which represents the boundary operator.
### Modeling the Embedding
To model the underlying structure, use
```julia
vertex_locations = structure= skyler(points, sample_epsilon, angle_condition, inner_radius, outer_radius, vertex_threshold, edge_threshold)
```
which returns an $d \times n_v$ array, where $n_v$ is the number of vertices detected, with each column a modelled vertex location.
## Examples
Skyler comes with some point clouds for you can work through as examples. To load the points for each example, run
```julia
points = Load_Example(i)
```
where `i` is the example number.
#### Example 1
Example 1 is a point cloud sampled from a line segment. Load the sample using
```julia
points = Load_Example(1)
```
Then run Skyler to obtaint the abstract structure and partition by executing
```julia
ppc = skyler(points, 0.01, 0.12, out="PPC")
```
which should result in output similar to the following:
```julia
The strata are Dict{Int64,Set}(0 => Set(Any[2, 1]),1 => Set(Any[3])) and the boundary matrix is [0.0 0.0 1.0; 0.0 0.0 1.0; 0.0 0.0 0.0].
PartitionedPointCloud([-0.009566763308567242 1.9932036826253814 … 1.999301914407944 2.003116429018376; -0.0028076084902411745 0.9975271026710401 … 0.9951311441125332 0.99744890927049], Dict{Int64,Set}(2 => Set(Any[2, 441, 442, 432, 447, 428, 430, 437, 435, 431 … 434, 429, 444, 436, 446, 439, 440, 450, 438, 449]),3 => Set(Any[288, 306, 29, 300, 289, 74, 176, 57, 285, 318 … 341, 186, 321, 420, 423, 271, 23, 315, 322, 218]),1 => Set(Any[12, 4, 18, 3, 16, 11, 5, 21, 20, 7, 9, 13, 10, 14, 19, 17, 8, 15, 6, 1])), Dict{Int64,Set}(0 => Set(Any[2, 1]),1 => Set(Any[3])), [0.0 0.0 1.0; 0.0 0.0 1.0; 0.0 0.0 0.0])
```
#### Example 2
Example 2 is a point cloud sampled from a graph with 5 edges and 5 vertices. Load the sample using
```julia
points = Load_Example(2)
```
Then run Skyler to obtaint the abstract structure and partition by executing
```julia
ppc = skyler(points, 0.1, 1.2, out="PPC")
```
which should result in output similar to the following:
```julia
The strata are Dict{Int64,Set}(0 => Set(Any[4, 2, 3, 5, 1]),1 => Set(Any[7, 9, 10, 8, 6])) and the boundary matrix is [0.0 0.0 0.0 0.0 0.0 1.0 1.0 0.0 0.0 1.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 1.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 1.0; 0.0 0.0 0.0 0.0 0.0 0.0 1.0 1.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0].
PartitionedPointCloud([-0.007670121199078972 -0.09959009503870764 … 4.8232765059435225 4.923038846261083; -0.007670121199078972 -0.09959009503870764 … 0.5335914379931135 0.5214683517413553; -0.007670121199078972 -0.09959009503870764 … 3.4192839435193365 3.4814584770681027], Dict{Int64,Set}(7 => Set(Any[241, 197, 215, 249, 207, 201, 283, 252, 182, 279 … 268, 281, 243, 191, 222, 277, 271, 255, 218, 276]),4 => Set(Any[288, 306, 300, 296, 428, 289, 435, 20, 285, 448 … 24, 429, 427, 446, 439, 23, 305, 438, 449, 301]),9 => Set(Any[532, 520, 491, 478, 542, 499, 477, 509, 494, 521 … 519, 560, 540, 535, 562, 485, 502, 498, 496, 508]),10 => Set(Any[633, 658, 654, 624, 611, 614, 625, 612, 616, 664 … 629, 666, 667, 646, 663, 657, 640, 676, 661, 659]),2 => Set(Any[461, 11, 464, 462, 8, 323, 458, 318, 459, 308 … 319, 456, 321, 454, 312, 317, 463, 472, 315, 322]),3 => Set(Any[584, 574, 698, 699, 566, 582, 573, 569, 694, 14 … 576, 688, 695, 578, 689, 580, 687, 686, 15, 581]),5 => Set(Any[148, 136, 25, 147, 29, 151, 144, 155, 142, 150 … 26, 146, 138, 145, 28, 149, 27, 137, 141, 30]),8 => Set(Any[329, 370, 365, 391, 400, 342, 384, 375, 372, 407 … 415, 341, 378, 389, 420, 423, 424, 358, 349, 405]),6 => Set(Any[89, 134, 131, 74, 57, 78, 112, 70, 106, 121 … 81, 98, 51, 73, 119, 53, 116, 123, 56, 108]),1 => Set(Any[47, 32, 2, 40, 587, 171, 39, 46, 158, 43 … 5, 45, 163, 168, 588, 603, 164, 602, 41, 1])…), Dict{Int64,Set}(0 => Set(Any[4, 2, 3, 5, 1]),1 => Set(Any[7, 9, 10, 8, 6])), [0.0 0.0 … 0.0 1.0; 0.0 0.0 … 1.0 0.0; … ; 0.0 0.0 … 0.0 0.0; 0.0 0.0 … 0.0 0.0])
```
| Skyler | https://github.com/yossibokor/Skyler.jl.git |
|
[
"MIT"
] | 0.3.0 | e645e4b13d17a941b6fc2dfe8c11431e6e59cd88 | code | 1156 | module SmolyakApprox
import ChebyshevApprox: chebyshev_extrema,
normalize_node,
chebyshev_polynomial,
chebyshev_polynomial_deriv,
chebyshev_polynomial_sec_deriv
using LinearAlgebra
include("smolyak_approx_functions.jl")
export SApproxPlan
export chebyshev_gauss_lobatto,
clenshaw_curtis_equidistant,
smolyak_grid,
smolyak_plan,
smolyak_weights,
smolyak_weights_threaded,
smolyak_inverse_interpolation_matrix,
smolyak_inverse_interpolation_matrix_threaded,
smolyak_pl_weights,
smolyak_pl_weights_threaded,
smolyak_polynomial,
smolyak_evaluate,
smolyak_pl_evaluate,
smolyak_interp,
smolyak_interp_threaded,
smolyak_derivative,
smolyak_gradient,
smolyak_gradient_threaded,
smolyak_hessian,
smolyak_hessian_threaded,
smolyak_integrate,
smolyak_clenshaw_curtis,
smolyak_grid_full,
smolyak_weights_full,
smolyak_evaluate_full,
smolyak_derivative_full,
smolyak_gradient_full
end
| SmolyakApprox | https://github.com/RJDennis/SmolyakApprox.jl.git |
|
[
"MIT"
] | 0.3.0 | e645e4b13d17a941b6fc2dfe8c11431e6e59cd88 | code | 53908 | abstract type SApproximationPlan end
struct SApproxPlan{S<:Integer,T<:AbstractFloat} <: SApproximationPlan
node_type::Symbol
grid::Union{Array{T,1},Array{T,2}}
multi_index::Union{Array{S,1},Array{S,2}}
domain::Union{Array{T,1},Array{T,2}}
end
const chebyshev_gauss_lobatto = chebyshev_extrema
function clenshaw_curtis_equidistant(n::S,domain = [1.0,-1.0]) where {S<:Integer}
# Construct the nodes on the [-1.0,1.0] interval
if n <= 0
error("The number of nodes must be positive.")
end
if n == 1
nodes = [0.0]
else
nodes = zeros(n)
nodes[1] = -1.0
nodes[n] = 1.0
for i = 2:div(n,2)
nodes[i] = 2*(i-1)/(n-1)-1.0
nodes[end-i+1] = -2*(i-1)/(n-1)+1.0
end
if isodd(n)
nodes[div(n+1,2)] = 0.0
end
end
# Scale the nodes to the desired domain
nodes = scale_nodes(nodes,domain)
return nodes
end
# This function relates to the ansiotropic case
function generate_multi_index(d::S,mu::Array{S,1}) where {S<:Integer}
nt = num_terms(mu,d)
multi_index = Array{S,2}(undef,nt,d)
multi_index[1,:] = ones(S,1,d)
max_mu = maximum(mu)
w = Tuple(repeat([max_mu+1],inner = d))
pos = 0
@inbounds for i = 2:(max_mu+1)^d
candidate_index = Tuple(CartesianIndices(w)[i])
if sum(candidate_index) <= d+max_mu && sum(candidate_index .<= mu.+1) == d
pos += 1
if pos > nt # handles the case where nt is under-estimated
multi_index = [multi_index; collect(candidate_index)']
else
multi_index[pos,:] .= candidate_index
end
end
end
if pos < nt # handles case where nt is over-estimated
multi_index = multi_index[1:pos,:]
end
return multi_index
end
# The function below relates to the isotropic case
function generate_multi_index(d::S,mu::S) where {S<:Integer}
if d < 1
error("d must be positive")
end
if mu < 0
error("mu must be non-negative")
end
if d == 1
multi_index = [i for i in 1:mu+1]
return multi_index
else
multi_index_base = generate_multi_index(d-1,mu)
N = size(multi_index_base,1)
multi_index = zeros(S,N*(mu+1),d)
pos = 0
@inbounds @views for j = 1:N
for i = 1:mu+1
if sum(multi_index_base[j,:]) + i <= d+mu
pos += 1
multi_index[pos,2:d] .= multi_index_base[j,:]
multi_index[pos,1] = i
end
end
end
return multi_index[1:pos,:]
end
end
# Following function computes the number of terms in the multi-index for the
# isotropic case (it also computes the number of terms in a complete
# polynominal based on the order and the number of dimensions.
function num_terms(order::S,d::S) where {S <: Integer}
if d == 1
return order+1
else
return div(num_terms(order,d-1)*(order+d),d)
end
end
# The following function is a poor approximation to the number of terms in
# the multi-index for the ansiotropic case.
function num_terms(order::Array{S,1},d::S) where {S<:Integer}
max_mu = maximum(order)
nt = num_terms(max_mu,d) # Deliberate over-estimate of the number of terms
return nt
end
m_i(x::Integer) = (x == 1 ? 1 : 2^(x-1) + 1)
function combine_nodes(nodes1::Union{Array{R,1},Array{R,2}},nodes2::Array{R,1}) where {R<:Number} # nodes1 can be a 1d or 2d array; nodes2 is a 1d array
n1 = size(nodes1,1)
n2 = size(nodes1,2)
n3 = length(nodes2)
combined_nodes = Array{R,2}(undef,n1*n3,n2+1)
@inbounds for i = 1:n3
combined_nodes[(i-1)*n1+1:i*n1,1:n2] = nodes1
end
@inbounds for i = 1:n1
@inbounds for j = 1:n3
combined_nodes[(j-1)*n1+i,n2+1] = nodes2[j]
end
end
return combined_nodes
end
function scale_nodes(nodes::Array{R,1},domain::Array{T,1}) where {T<:AbstractFloat,R<:Number}
nodes = copy(nodes)
@inbounds for i in eachindex(nodes)
nodes[i] = domain[2] + (1.0+nodes[i])*(domain[1]-domain[2])*0.5
end
return nodes
end
function scale_nodes(nodes::Array{R,2},domain::Array{T,2}) where {T<:AbstractFloat,R<:Number}
nodes = copy(nodes)
@inbounds for i in CartesianIndices(nodes)
nodes[i] = domain[2,i[2]] + (1.0+nodes[i])*(domain[1,i[2]]-domain[2,i[2]])*0.5
end
return nodes
end
# These functions relate to both an ansiotropic and an isotropic grid
function smolyak_grid(node_type::Function,d::S,mu::Union{S,Array{S,1}}) where {S<:Integer}
T = typeof(1.0)
multi_index = generate_multi_index(d,mu)
unique_multi_index = sort(unique(multi_index))
unique_node_number = m_i.(unique_multi_index)
# Create base nodes to be used in the sparse grid
base_nodes = Array{Array{T,1},1}(undef,length(unique_node_number))
for i in eachindex(unique_node_number)
base_nodes[i] = node_type(unique_node_number[i])
end
# Determine the unique nodes introduced at each higher level
unique_base_nodes = Array{Array{T,1},1}(undef,length(unique_node_number))
unique_base_nodes[1] = base_nodes[1]
for i = 2:length(unique_base_nodes)
unique_base_nodes[i] = setdiff(base_nodes[i],base_nodes[i-1])
end
# Construct the sparse grid from the unique nodes
nodes = Array{T,2}(undef,determine_grid_size(multi_index))
l = 1
@inbounds for j in axes(multi_index,1)
new_nodes = unique_base_nodes[multi_index[j,1]] # Here new_nodes is a 1d array
for i = 2:d
new_nodes = combine_nodes(new_nodes,unique_base_nodes[multi_index[j,i]]) # Here new_nodes becomes a 2d array
end
m = size(new_nodes,1)
nodes[l:l+m-1,:] = new_nodes
l += m
end
# Eventually this function should also return the weights at each node on the grid
# so that it can be used for numerical integration.
if d == 1
nodes = nodes[:]
end
return nodes, multi_index
end
function smolyak_grid(node_type::Function,d::S,mu::Union{S,Array{S,1}},domain::Union{Array{T,1},Array{T,2}}) where {S<:Integer, T<:AbstractFloat}
if size(domain,2) != d
error("domain is inconsistent with the number of dimensions")
end
(nodes, multi_index) = smolyak_grid(node_type,d,mu)
# Now scale the nodes to the desired domain
nodes = scale_nodes(nodes,domain)
return nodes, multi_index
# Eventually this function should also return the weights at each node on the grid
# so that it can be used for numerical integration.
end
function determine_grid_size(mi)
temp = similar(mi)
for i in axes(mi,1)
for j in axes(mi,2)
if mi[i,j] == 1
temp[i,j] = 1
elseif mi[i,j] == 2
temp[i,j] = 2^(mi[i,j]-1)
else
temp[i,j] = 2^(mi[i,j]-1)+1 - (2^(mi[i,j]-2)+1)
end
end
end
s = 0
for i in axes(mi,1)
t = 1
for j in axes(mi,2)
t *= temp[i,j]
end
s += t
end
return (s, size(mi,2))
end
function smolyak_plan(node_type::Function,d::S,mu::Union{S,Array{S,1}},domain::Union{Array{T,1},Array{T,2}}) where {S<:Integer, T<:AbstractFloat}
g, mi = smolyak_grid(node_type,d,mu,domain)
plan = SApproxPlan(Symbol(node_type),g,mi,domain)
return plan
end
function smolyak_weights(y::Array{T,1},nodes::Union{Array{T,1},Array{T,2}},multi_index::Union{Array{S,1},Array{S,2}}) where {T<:AbstractFloat,S<:Integer}
interpolation_matrix = zeros(size(nodes,1),size(nodes,1))
unique_multi_index = sort(unique(multi_index))
unique_orders = m_i.(unique_multi_index) .- 1
# Below we do the following things:
# Generate the polynomial terms for each order
# Generate the unique polynomial terms introduced at each higher order
# Combine the polynomial terms to construct a row of the interpolation matrix
# Iterate over the nodes, doing the above for steps at each iteration, to compute all rows of the interpolation matrix
base_polynomials = Array{Array{T,2},1}(undef,length(unique_orders))
unique_base_polynomials = Array{Array{T,2},1}(undef,length(unique_orders))
@inbounds for k in axes(nodes,1)
# Construct the base polynomials
for i in eachindex(unique_orders)
base_polynomials[i] = chebyshev_polynomial(unique_orders[i],nodes[k,:])
end
# Compute the unique polynomial terms from the base polynomials
for i = length(unique_orders):-1:2
unique_base_polynomials[i] = base_polynomials[i][:,size(base_polynomials[i-1],2)+1:end]
end
unique_base_polynomials[1] = base_polynomials[1]
# Construct a row of the interplation matrix
l = 1
@inbounds @views for j in axes(multi_index,1)
new_polynomials = unique_base_polynomials[multi_index[j,1]][1,:]
for i = 2:size(multi_index,2)
new_polynomials = kron(new_polynomials,unique_base_polynomials[multi_index[j,i]][i,:])
end
m = length(new_polynomials)
interpolation_matrix[k,l:l+m-1] = new_polynomials
l += m
end
end
weights = interpolation_matrix\y
return weights
end
function smolyak_weights(y::Array{T,1},nodes::Union{Array{T,1},Array{T,2}},multi_index::Union{Array{S,1},Array{S,2}},domain::Union{Array{T,1},Array{T,2}}) where {T<:AbstractFloat,S<:Integer}
# Normalize nodes to the [-1.0 1.0] interval
d = size(multi_index,2)
nodes = copy(nodes)
for i = 1:d
nodes[:,i] = normalize_node(nodes[:,i],domain[:,i])
end
weights = smolyak_weights(y,nodes,multi_index)
return weights
end
function smolyak_weights_threaded(y::Array{T,1},nodes::Union{Array{T,1},Array{T,2}},multi_index::Union{Array{S,1},Array{S,2}}) where {T<:AbstractFloat,S<:Integer}
interpolation_matrix = zeros(size(nodes,1),size(nodes,1))
unique_multi_index = sort(unique(multi_index))
unique_orders = m_i.(unique_multi_index) .- 1
# Below we do the following things:
# Generate the polynomial terms for each order
# Generate the unique polynomial terms introduced at each higher order
# Combine the polynomial terms to construct a row of the interpolation matrix
# Iterate over the nodes, doing the above for steps at each iteration, to compute all rows of the interpolation matrix
@inbounds @sync Threads.@threads for k in axes(nodes,1)
base_polynomials = Array{Array{T,2},1}(undef,length(unique_orders))
unique_base_polynomials = Array{Array{T,2},1}(undef,length(unique_orders))
# Construct the base polynomials
for i in eachindex(unique_orders)
base_polynomials[i] = chebyshev_polynomial(unique_orders[i],nodes[k,:])
end
# Compute the unique polynomial terms from the base polynomials
for i = length(unique_orders):-1:2
unique_base_polynomials[i] = base_polynomials[i][:,size(base_polynomials[i-1],2)+1:end]
end
unique_base_polynomials[1] = base_polynomials[1]
# Construct a row of the interplation matrix
l = 1
@inbounds @views for j in axes(multi_index,1)
new_polynomials = unique_base_polynomials[multi_index[j,1]][1,:]
for i = 2:size(multi_index,2)
new_polynomials = kron(new_polynomials,unique_base_polynomials[multi_index[j,i]][i,:])
end
m = length(new_polynomials)
interpolation_matrix[k,l:l+m-1] = new_polynomials
l += m
end
end
weights = interpolation_matrix\y
return weights
end
function smolyak_weights_threaded(y::Array{T,1},nodes::Union{Array{T,1},Array{T,2}},multi_index::Union{Array{S,1},Array{S,2}},domain::Union{Array{T,1},Array{T,2}}) where {T<:AbstractFloat,S<:Integer}
# Normalize nodes to the [-1.0 1.0] interval
d = size(multi_index,2)
nodes = copy(nodes)
for i = 1:d
nodes[:,i] = normalize_node(nodes[:,i],domain[:,i])
end
weights = smolyak_weights_threaded(y,nodes,multi_index)
return weights
end
function smolyak_weights(y::Array{T,1},inverse_interpolation_matrix::Array{T,2}) where {T<:AbstractFloat}
weights = inverse_interpolation_matrix*y
return weights
end
function smolyak_inverse_interpolation_matrix(nodes::Union{Array{T,1},Array{T,2}},multi_index::Union{Array{S,1},Array{S,2}}) where {T<:AbstractFloat,S<:Integer}
interpolation_matrix = zeros(size(nodes,1),size(nodes,1))
unique_multi_index = sort(unique(multi_index))
unique_orders = m_i.(unique_multi_index) .- 1
# Below we do the following things:
# Generate the polynomial terms for each order
# Generate the unique polynomial terms introduced at each higher order
# Combine the polynomial terms to construct a row of the interpolation matrix
# Iterate over the nodes, doing the above for steps at each iteration, to compute all rows of the interpolation matrix
base_polynomials = Array{Array{T,2},1}(undef,length(unique_orders))
unique_base_polynomials = Array{Array{T,2},1}(undef,length(unique_orders))
@inbounds for k in axes(nodes,1)
# Construct the base polynomials
for i in eachindex(unique_orders)
base_polynomials[i] = chebyshev_polynomial(unique_orders[i],nodes[k,:])
end
# Compute the unique polynomial terms from the base polynomials
for i = length(unique_orders):-1:2
unique_base_polynomials[i] = base_polynomials[i][:,size(base_polynomials[i-1],2)+1:end]
end
unique_base_polynomials[1] = base_polynomials[1]
# Construct the first row of the interplation matrix
l = 1
@inbounds @views for j in axes(multi_index,1)
new_polynomials = unique_base_polynomials[multi_index[j,1]][1,:]
for i = 2:size(multi_index,2)
new_polynomials = kron(new_polynomials,unique_base_polynomials[multi_index[j,i]][i,:])
end
m = length(new_polynomials)
interpolation_matrix[k,l:l+m-1] = new_polynomials
l += m
end
end
inverse_interpolation_matrix = inv(interpolation_matrix)
return inverse_interpolation_matrix
end
function smolyak_inverse_interpolation_matrix(nodes::Union{Array{T,1},Array{T,2}},multi_index::Union{Array{S,1},Array{S,2}},domain::Union{Array{T,1},Array{T,2}}) where {T<:AbstractFloat,S<:Integer}
# Normalize nodes to the [-1.0 1.0] interval
d = size(multi_index,2)
nodes = copy(nodes)
for i = 1:d
nodes[:,i] = normalize_node(nodes[:,i],domain[:,i])
end
inverse_interpolation_matrix = smolyak_inverse_interpolation_matrix(nodes,multi_index)
return inverse_interpolation_matrix
end
function smolyak_inverse_interpolation_matrix_threaded(nodes::Union{Array{T,1},Array{T,2}},multi_index::Union{Array{S,1},Array{S,2}}) where {T<:AbstractFloat,S<:Integer}
interpolation_matrix = zeros(size(nodes,1),size(nodes,1))
unique_multi_index = sort(unique(multi_index))
unique_orders = m_i.(unique_multi_index) .- 1
# Below we do the following things:
# Generate the polynomial terms for each order
# Generate the unique polynomial terms introduced at each higher order
# Combine the polynomial terms to construct a row of the interpolation matrix
# Iterate over the nodes, doing the above for steps at each iteration, to compute all rows of the interpolation matrix
@inbounds @sync Threads.@threads for k in axes(nodes,1)
base_polynomials = Array{Array{T,2},1}(undef,length(unique_orders))
unique_base_polynomials = Array{Array{T,2},1}(undef,length(unique_orders))
# Construct the base polynomials
for i in eachindex(unique_orders)
base_polynomials[i] = chebyshev_polynomial(unique_orders[i],nodes[k,:])
end
# Compute the unique polynomial terms from the base polynomials
for i = length(unique_orders):-1:2
unique_base_polynomials[i] = base_polynomials[i][:,size(base_polynomials[i-1],2)+1:end]
end
unique_base_polynomials[1] = base_polynomials[1]
# Construct the first row of the interplation matrix
l = 1
@inbounds @views for j in axes(multi_index,1)
new_polynomials = unique_base_polynomials[multi_index[j,1]][1,:]
for i = 2:size(multi_index,2)
new_polynomials = kron(new_polynomials,unique_base_polynomials[multi_index[j,i]][i,:])
end
m = length(new_polynomials)
interpolation_matrix[k,l:l+m-1] = new_polynomials
l += m
end
end
inverse_interpolation_matrix = inv(interpolation_matrix)
return inverse_interpolation_matrix
end
function smolyak_inverse_interpolation_matrix_threaded(nodes::Union{Array{T,1},Array{T,2}},multi_index::Union{Array{S,1},Array{S,2}},domain::Union{Array{T,1},Array{T,2}}) where {T<:AbstractFloat,S<:Integer}
# Normalize nodes to the [-1.0 1.0] interval
d = size(multi_index,2)
nodes = copy(nodes)
for i = 1:d
nodes[:,i] = normalize_node(nodes[:,i],domain[:,i])
end
inverse_interpolation_matrix = smolyak_inverse_interpolation_matrix_threaded(nodes,multi_index)
return inverse_interpolation_matrix
end
function smolyak_pl_weights(y::AbstractArray{T,1},nodes::Union{Array{T,1},Array{T,2}},multi_index::Union{Array{S,1},Array{S,2}}) where {T<:AbstractFloat,S<:Integer}
interpolation_matrix = zeros(size(nodes,1),size(nodes,1))
@inbounds for l in axes(nodes,1)
k = 1
x = nodes[l,:]
@inbounds for i in axes(multi_index,1)
m_node_number = m_i.(multi_index[i,:])
if prod(m_node_number) == 1
interpolation_matrix[l,k] = 1.0
k += 1
else
extra_nodes = 1
@inbounds for j in eachindex(m_node_number)
if m_node_number[j] > 1
extra_nodes *= m_node_number[j] - m_i(multi_index[i,j] - 1)
end
end
for h = 1:extra_nodes
a = 1.0
@inbounds for j in eachindex(m_node_number)
if m_node_number[j] > 1
if abs(x[j] - nodes[k,j]) > 2/(m_node_number[j]-1)
a *= 0.0
else
a *= 1.0 - ((m_node_number[j]-1)/2)*abs(x[j]-nodes[k,j])
end
end
end
interpolation_matrix[l,k] = a
k += 1
end
end
end
end
weights = interpolation_matrix\y
return weights
end
function smolyak_pl_weights(y::AbstractArray{T,1},nodes::Union{Array{T,1},Array{T,2}},multi_index::Union{Array{S,1},Array{S,2}},domain::Union{Array{T,1},Array{T,2}}) where {T<:AbstractFloat,S<:Integer}
# Normalize nodes to the [-1.0 1.0] interval
d = size(multi_index,2)
nodes = copy(nodes)
@inbounds for i = 1:d
nodes[:,i] = normalize_node(nodes[:,i],domain[:,i])
end
weights = smolyak_pl_weights(y,nodes,multi_index)
return weights
end
function smolyak_pl_weights_threaded(y::AbstractArray{T,1},nodes::Union{Array{T,1},Array{T,2}},multi_index::Union{Array{S,1},Array{S,2}}) where {T<:AbstractFloat,S<:Integer}
interpolation_matrix = zeros(size(nodes,1),size(nodes,1))
@inbounds @sync Threads.@threads for l in axes(nodes,1)
k = 1
x = nodes[l,:]
@inbounds for i in axes(multi_index,1)
m_node_number = m_i.(multi_index[i,:])
if prod(m_node_number) == 1
interpolation_matrix[l,k] = 1.0
k += 1
else
extra_nodes = 1
@inbounds for j in eachindex(m_node_number)
if m_node_number[j] > 1
extra_nodes *= m_node_number[j] - m_i(multi_index[i,j] - 1)
end
end
for h = 1:extra_nodes
a = 1.0
@inbounds for j in eachindex(m_node_number)
if m_node_number[j] > 1
if abs(x[j] - nodes[k,j]) > 2/(m_node_number[j]-1)
a *= 0.0
else
a *= 1.0 - ((m_node_number[j]-1)/2)*abs(x[j]-nodes[k,j])
end
end
end
interpolation_matrix[l,k] = a
k += 1
end
end
end
end
weights = interpolation_matrix\y
return weights
end
function smolyak_pl_weights_threaded(y::AbstractArray{T,1},nodes::Union{Array{T,1},Array{T,2}},multi_index::Union{Array{S,1},Array{S,2}},domain::Union{Array{T,1},Array{T,2}}) where {T<:AbstractFloat,S<:Integer}
# Normalize nodes to the [-1.0 1.0] interval
d = size(multi_index,2)
nodes = copy(nodes)
@inbounds for i = 1:d
nodes[:,i] = normalize_node(nodes[:,i],domain[:,i])
end
weights = smolyak_pl_weights_threaded(y,nodes,multi_index)
return weights
end
function smolyak_polynomial(node::AbstractArray{R,1},multi_index::Union{Array{S,1},Array{S,2}}) where {R<:Number,S<:Integer}
unique_multi_index = sort(unique(multi_index))
unique_orders = m_i.(unique_multi_index) .- 1
# Below we do the following things:
# Generate the polynomial terms for each order
# Generate the unique polynomial terms introduced at each higher order
# Combine the polynomial terms to construct the Smolyak polynomial
# Here we construct the base polynomials
base_polynomials = Array{Array{R,2}}(undef,length(unique_orders))
for i in eachindex(unique_orders)
base_polynomials[i] = chebyshev_polynomial(unique_orders[i],node)
end
# Compute the unique polynomial terms from the base polynomials
unique_base_polynomials = Array{Array{R,2}}(undef,length(unique_orders))
for i = length(unique_orders):-1:2
unique_base_polynomials[i] = base_polynomials[i][:,size(base_polynomials[i-1],2)+1:end]
end
unique_base_polynomials[1] = base_polynomials[1]
# Construct the first row of the interplation matrix
n = determine_grid_size(multi_index)
polynomial = Array{R,1}(undef,n[1])
# Iterate over nodes, doing the above three steps at each iteration
l = 1
@inbounds for j in axes(multi_index,1)
new_polynomials = unique_base_polynomials[multi_index[j,1]][1,:]
for i = 2:size(multi_index,2)
new_polynomials = kron(new_polynomials,unique_base_polynomials[multi_index[j,i]][i,:])
end
m = length(new_polynomials)
polynomial[l:l+m-1] = new_polynomials
l += m
end
return polynomial
end
function smolyak_polynomial(node::AbstractArray{R,1},multi_index::Union{Array{S,1},Array{S,2}},domain::Union{Array{T,1},Array{T,2}}) where {T<:AbstractFloat,R<:Number,S<:Integer}
node = copy(node)
if size(domain,2) != length(node)
error("domain is inconsistent with the number of dimensions")
end
d = length(node)
for i = 1:d
node[i] = normalize_node(node[i],domain[:,i])
end
poly = smolyak_polynomial(node,multi_index)
return poly
end
function smolyak_evaluate(weights::Array{T,1},node::AbstractArray{R,1},multi_index::Union{Array{S,1},Array{S,2}}) where {T<:AbstractFloat,R<:Number,S<:Integer}
unique_multi_index = sort(unique(multi_index))
unique_orders = m_i.(unique_multi_index) .- 1
# Below we do the following things:
# Generate the polynomial terms for each order
# Generate the unique polynomial terms introduced at each higher order
# Combine the polynomial terms to construct the Smolyak polynomial
# Here we construct the base polynomials
base_polynomials = Array{Array{R,2}}(undef,length(unique_orders))
for i in eachindex(unique_orders)
base_polynomials[i] = chebyshev_polynomial(unique_orders[i],node)
end
# Compute the unique polynomial terms from the base polynomials
unique_base_polynomials = Array{Array{R,2}}(undef,length(unique_orders))
for i = length(unique_orders):-1:2
unique_base_polynomials[i] = base_polynomials[i][:,size(base_polynomials[i-1],2)+1:end]
end
unique_base_polynomials[1] = base_polynomials[1]
# Construct the first row of the interplation matrix
polynomials = Array{R,1}(undef,length(weights))
# Iterate over nodes, doing the above three steps at each iteration
l = 1
@inbounds for j in axes(multi_index,1)
new_polynomials = unique_base_polynomials[multi_index[j,1]][1,:]
for i = 2:size(multi_index,2)
new_polynomials = kron(new_polynomials,unique_base_polynomials[multi_index[j,i]][i,:])
end
m = length(new_polynomials)
polynomials[l:l+m-1] = new_polynomials
l += m
end
estimate = zero(T)
for i in eachindex(polynomials)
estimate += polynomials[i]*weights[i]
end
return estimate
end
function smolyak_evaluate(weights::Array{T,1},node::AbstractArray{R,1},multi_index::Union{Array{S,1},Array{S,2}},domain::Union{Array{T,1},Array{T,2}}) where {T<:AbstractFloat,R<:Number,S<:Integer}
node = copy(node)
if size(domain,2) != length(node)
error("domain is inconsistent with the number of dimensions")
end
d = length(node)
for i = 1:d
node[i] = normalize_node(node[i],domain[:,i])
end
estimate = smolyak_evaluate(weights,node,multi_index)
return estimate
end
function smolyak_evaluate(weights::Array{T,1},polynomial::Array{R,1}) where {T<:AbstractFloat,R<:Number}
estimate = weights'polynomial
return estimate
end
function smolyak_pl_evaluate(weights::Array{T,1},point::Array{R,1},nodes::Union{Array{T,1},Array{T,2}},multi_index::Union{Array{S,1},Array{S,2}}) where {T<:AbstractFloat,R<:Number,S<:Integer}
basis = Array{R,1}(undef,size(nodes,1))
k = 1
@inbounds for i in axes(multi_index,1)
m_node_number = m_i.(multi_index[i,:])
if prod(m_node_number) == 1
basis[k] = one(R)
k += 1
else
extra_nodes = 1
@inbounds for j in eachindex(m_node_number)
if m_node_number[j] > 1
extra_nodes *= m_node_number[j] - m_i(multi_index[i,j] - 1)
end
end
@inbounds for h = 1:extra_nodes
a = 1.0
@inbounds for j in eachindex(m_node_number)
if m_node_number[j] > 1
if abs(point[j] - nodes[k,j]) > 2/(m_node_number[j]-1)
a *= zero(R)
else
a *= one(R) - ((m_node_number[j]-1)/2)*abs(point[j]-nodes[k,j])
end
end
end
basis[k] = a
k += 1
end
end
end
estimate = zero(R)
@inbounds for i in eachindex(basis)
estimate += basis[i]*weights[i]
end
return estimate
end
function smolyak_pl_evaluate(weights::Array{T,1},point::Array{R,1},nodes::Union{Array{T,1},Array{T,2}},multi_index::Union{Array{S,1},Array{S,2}},domain::Union{Array{T,1},Array{T,2}}) where {T<:AbstractFloat,R<:Number,S<:Integer}
d = size(multi_index,2)
nodes = copy(nodes)
point = copy(point)
@inbounds for i = 1:d
nodes[:,i] = normalize_node(nodes[:,i],domain[:,i])
point[i] = normalize_node(point[i],domain[:,i])
end
estimate = smolyak_pl_evaluate(weights,point,nodes,multi_index)
return estimate
end
function smolyak_interp(y::Array{T,1},plan::P) where {T<:AbstractFloat,P<:SApproximationPlan}
if plan.node_type == :chebyshev_extrema || plan.node_type == :chebyshev_gauss_lobatto
weights = smolyak_weights(y,plan.grid,plan.multi_index,plan.domain)
elseif plan.node_type == :clenshaw_curtis_equidistant
weights = smolyak_pl_weights(y,plan.grid,plan.multi_index,plan.domain)
end
function interp(x::Array{R,1}) where {R<:Number}
if plan.node_type == :chebyshev_extrema || plan.node_type == :chebyshev_gauss_lobatto
return smolyak_evaluate(weights,x,plan.multi_index,plan.domain)
elseif plan.node_type == :clenshaw_curtis_equidistant
return smolyak_pl_evaluate(weights,x,plan.grid,plan.multi_index,plan.domain)
end
end
return interp
end
function smolyak_interp_threaded(y::Array{T,1},plan::P) where {T<:AbstractFloat,P<:SApproximationPlan}
if plan.node_type == :chebyshev_extrema || plan.node_type == :chebyshev_gauss_lobatto
weights = smolyak_weights_threaded(y,plan.grid,plan.multi_index,plan.domain)
elseif plan.node_type ==:clenshaw_curtis_equidistant
weights = smolyak_pl_weights_threaded(y,plan.grid,plan.multi_index,plan.domain)
end
function interp(x::Array{R,1}) where {R<:Number}
if plan.node_type == :chebyshev_extrema || plan.node_type == :chebyshev_gauss_lobatto
return smolyak_evaluate(weights,x,plan.multi_index,plan.domain)
elseif plan.node_type == :clenshaw_curtis_equidistant
return smolyak_pl_evaluate(weights,x,plan.grid,plan.multi_index,plan.domain)
end
end
return interp
end
function smolyak_derivative(weights::Array{T,1},node::Array{R,1},multi_index::Union{Array{S,1},Array{S,2}},pos::S) where {T<:AbstractFloat,R<:Number,S<:Integer}
unique_multi_index = sort(unique(multi_index))
unique_orders = m_i.(unique_multi_index) .- 1
# Here we construct the base polynomials
base_polynomials = Array{Array{R,2},1}(undef,length(unique_orders))
base_polynomial_derivatives = Array{Array{R,2},1}(undef,length(unique_orders))
for i in eachindex(unique_orders)
base_polynomials[i] = chebyshev_polynomial(unique_orders[i],node)
base_polynomial_derivatives[i] = chebyshev_polynomial_deriv(unique_orders[i],node)
end
# Compute the unique polynomial terms from the base polynomials
unique_base_polynomials = Array{Array{R,2},1}(undef,length(unique_orders))
unique_base_polynomial_derivatives = Array{Array{R,2},1}(undef,length(unique_orders))
for i = length(unique_orders):-1:2
unique_base_polynomials[i] = base_polynomials[i][:,size(base_polynomials[i-1],2)+1:end]
unique_base_polynomial_derivatives[i] = base_polynomial_derivatives[i][:,size(base_polynomial_derivatives[i-1],2)+1:end]
end
unique_base_polynomials[1] = base_polynomials[1]
unique_base_polynomial_derivatives[1] = base_polynomial_derivatives[1]
# Construct the first row of the interplation matrix
polynomials = Array{R,1}(undef,length(weights))
# Iterate over nodes, doing the above three steps at each iteration
l = 1
@inbounds for j in axes(multi_index,1)
if pos == 1
new_polynomials = unique_base_polynomial_derivatives[multi_index[j,1]][1,:]
else
new_polynomials = unique_base_polynomials[multi_index[j,1]][1,:]
end
for i = 2:size(multi_index,2)
if pos == i
new_polynomials = kron(new_polynomials,unique_base_polynomial_derivatives[multi_index[j,i]][i,:])
else
new_polynomials = kron(new_polynomials,unique_base_polynomials[multi_index[j,i]][i,:])
end
end
m = length(new_polynomials)
polynomials[l:l+m-1] = new_polynomials
l += m
end
evaluated_derivative = zero(T)
for i in eachindex(polynomials)
evaluated_derivative += polynomials[i]*weights[i]
end
return evaluated_derivative
end
function smolyak_derivative(weights::Array{T,1},node::Array{R,1},multi_index::Union{Array{S,1},Array{S,2}},domain::Union{Array{T,1},Array{T,2}},pos::S) where {T<:AbstractFloat,R<:Number,S<:Integer}
node = copy(node)
if size(domain,2) != length(node)
error("domain is inconsistent with the number of dimensions")
end
d = length(node)
for i = 1:d
node[i] = normalize_node(node[i],domain[:,i])
end
evaluated_derivative = smolyak_derivative(weights,node,multi_index,pos)
return evaluated_derivative*(2.0/(domain[1,pos]-domain[2,pos]))
end
function smolyak_gradient(weights::Array{T,1},node::Array{R,1},multi_index::Union{Array{S,1},Array{S,2}}) where {T<:AbstractFloat,R<:Number,S<:Integer}
d = length(node)
gradient = Array{R,2}(undef,1,d)
for i = 1:d
gradient[i] = smolyak_derivative(weights,node,multi_index,i)
end
return gradient
end
function smolyak_gradient(weights::Array{T,1},node::Array{R,1},multi_index::Union{Array{S,1},Array{S,2}},domain::Union{Array{T,1},Array{T,2}}) where {T<:AbstractFloat,R<:Number,S<:Integer}
d = length(node)
gradient = Array{R,2}(undef,1,d)
for i = 1:d
gradient[i] = smolyak_derivative(weights,node,multi_index,domain,i)
end
return gradient
end
function smolyak_gradient(y::AbstractArray{T,1},plan::P) where {T<:AbstractFloat,P<:SApproximationPlan}
if plan.node_type == :clenshaw_curtis_equidistant
error("Not implemented for clenshaw_curtis_equidistant nodes")
end
weights = smolyak_weights(y,plan.grid,plan.multi_index,plan.domain)
function smolyak_grad(x::Array{R,1}) where {R<:Number}
return smolyak_gradient(weights,x,plan.multi_index,plan.domain)
end
return smolyak_grad
end
function smolyak_gradient_threaded(y::AbstractArray{T,1},plan::P) where {T<:AbstractFloat,P<:SApproximationPlan}
if plan.node_type == :clenshaw_curtis_equidistant
error("Not implemented for clenshaw_curtis_equidistant nodes")
end
weights = smolyak_weights_threaded(y,plan.grid,plan.multi_index,plan.domain)
function smolyak_grad(x::Array{R,1}) where {R<:Number}
return smolyak_gradient(weights,x,plan.multi_index,plan.domain)
end
return smolyak_grad
end
function smolyak_hessian(weights::Array{T,1},point::Array{R,1},multi_index::Union{Array{S,1},Array{S,2}},domain::Union{Array{T,1},Array{T,2}}) where {T<:AbstractFloat,R<:Number,S<:Integer}
point = copy(point)
if size(domain,2) != length(point)
error("domain is inconsistent with the number of dimensions")
end
d = length(point)
for i = 1:d
point[i] = normalize_node(point[i],domain[:,i])
end
unique_multi_index = sort(unique(multi_index))
unique_orders = m_i.(unique_multi_index) .- 1
hess = Array{T,2}(undef,d,d)
unique_multi_index = sort(unique(multi_index))
unique_orders = m_i.(unique_multi_index) .- 1
# Here we construct the base polynomials
base_polynomials = Array{Array{R,2},1}(undef,length(unique_orders))
base_polynomial_derivatives = Array{Array{R,2},1}(undef,length(unique_orders))
base_polynomial_sec_derivatives = Array{Array{R,2},1}(undef,length(unique_orders))
for i in eachindex(unique_orders)
base_polynomials[i] = chebyshev_polynomial(unique_orders[i],point)
base_polynomial_derivatives[i] = chebyshev_polynomial_deriv(unique_orders[i],point)
base_polynomial_sec_derivatives[i] = chebyshev_polynomial_sec_deriv(unique_orders[i],point)
end
# Compute the unique polynomial terms from the base polynomials
unique_base_polynomials = Array{Array{R,2},1}(undef,length(unique_orders))
unique_base_polynomial_derivatives = Array{Array{R,2},1}(undef,length(unique_orders))
unique_base_polynomial_sec_derivatives = Array{Array{R,2},1}(undef,length(unique_orders))
for i = length(unique_orders):-1:2
unique_base_polynomials[i] = base_polynomials[i][:,size(base_polynomials[i-1],2)+1:end]
unique_base_polynomial_derivatives[i] = base_polynomial_derivatives[i][:,size(base_polynomial_derivatives[i-1],2)+1:end]
unique_base_polynomial_sec_derivatives[i] = base_polynomial_sec_derivatives[i][:,size(base_polynomial_sec_derivatives[i-1],2)+1:end]
end
unique_base_polynomials[1] = base_polynomials[1]
unique_base_polynomial_derivatives[1] = base_polynomial_derivatives[1]
unique_base_polynomial_sec_derivatives[1] = base_polynomial_sec_derivatives[1]
# Construct the first row of the interplation matrix
polynomials = Array{R,1}(undef,length(weights))
# Iterate over nodes, doing the above three steps at each iteration
@inbounds for c in CartesianIndices(hess)
l = 1
@inbounds for j in axes(multi_index,1)
if 1 == c[1] == c[2]
new_polynomials = unique_base_polynomial_sec_derivatives[multi_index[j,1]][1,:]
elseif 1 == c[1] || 1 == c[2]
new_polynomials = unique_base_polynomial_derivatives[multi_index[j,1]][1,:]
else
new_polynomials = unique_base_polynomials[multi_index[j,1]][1,:]
end
for i = 2:size(multi_index,2)
if i == c[1] == c[2]
new_polynomials = kron(new_polynomials,unique_base_polynomial_sec_derivatives[multi_index[j,i]][i,:])
elseif i == c[1] || i == c[2]
new_polynomials = kron(new_polynomials,unique_base_polynomial_derivatives[multi_index[j,i]][i,:])
else
new_polynomials = kron(new_polynomials,unique_base_polynomials[multi_index[j,i]][i,:])
end
end
m = length(new_polynomials)
polynomials[l:l+m-1] = new_polynomials
l += m
end
evaluated_derivative = zero(T)
for i in eachindex(polynomials)
evaluated_derivative += polynomials[i]*weights[i]
end
hess[c] = evaluated_derivative*(2.0/(domain[1,c[1]]-domain[2,c[1]]))*(2.0/(domain[1,c[2]]-domain[2,c[2]]))
end
return hess
end
function smolyak_hessian(y::AbstractArray{T,1},plan::P) where {T<:AbstractFloat,P<:SApproximationPlan}
weights = smolyak_weights(y,plan.grid,plan.multi_index,plan.domain)
function smolyak_hess(x::Array{R,1}) where {R<:Number}
return smolyak_hessian(weights,x,plan.multi_index,plan.domain)
end
return smolyak_hess
end
function smolyak_hessian_threaded(y::AbstractArray{T,1},plan::P) where {T<:AbstractFloat,P<:SApproximationPlan}
weights = smolyak_weights_threaded(y,plan.grid,plan.multi_index,plan.domain)
function smolyak_hess(x::Array{R,1}) where {R<:Number}
return smolyak_hessian(weights,x,plan.multi_index,plan.domain)
end
return smolyak_hess
end
function integrate_cheb_polys(order::S) where {S <: Integer}
# Integrates Chebyshev polynomials over the domain [-1,1]
p = zeros(order+1)
for i in 1:order+1
if i == 2
p[i] = 0.0
else
p[i] = ((-1)^(i-1)+1)/(1-(i-1)^2)
end
end
return p
end
function smolyak_integrate(f::Function,plan::SApproxPlan,method::Symbol)
if method == :clenshaw_curtis
integral = smolyak_clenshaw_curtis(f,plan)
elseif method == :gauss_chebyshev_quad
integral = smolyak_gauss_chebyshev_quad(f,plan)
else
error("Integration not implemented for that method")
end
return integral
end
function smolyak_clenshaw_curtis(f::Function,plan::SApproxPlan)
grid = plan.grid
multi_index = plan.multi_index
domain = plan.domain
y = zeros(size(grid,1))
for i in eachindex(y)
y[i] = f(grid[i,:])
end
weights = smolyak_weights(y,grid,multi_index,domain)
# Uses Clenshaw-Curtis to integrate over all dimensions
unique_multi_index = sort(unique(multi_index))
unique_orders = m_i.(unique_multi_index) .- 1
# Here we construct the base polynomials
T = eltype(grid)
base_polynomial_integrals = Array{Array{T,1},1}(undef,length(unique_orders))
for i in eachindex(unique_orders)
base_polynomial_integrals[i] = integrate_cheb_polys(unique_orders[i])
end
# Compute the unique polynomial terms from the base polynomials
unique_base_polynomial_integrals = Array{Array{T,1},1}(undef,length(unique_orders))
for i = length(unique_orders):-1:2
unique_base_polynomial_integrals[i] = base_polynomial_integrals[i][length(base_polynomial_integrals[i-1])+1:end]
end
unique_base_polynomial_integrals[1] = base_polynomial_integrals[1]
# Construct the first row of the interplation matrix
polynomials = Array{T,1}(undef,length(weights))
# Iterate over nodes, doing the above three steps at each iteration
l = 1
@inbounds for j in axes(multi_index,1)
new_polynomials = unique_base_polynomial_integrals[multi_index[j,1]][:]
for i = 2:size(multi_index,2)
new_polynomials = kron(new_polynomials,unique_base_polynomial_integrals[multi_index[j,i]][:])
end
m = length(new_polynomials)
polynomials[l:l+m-1] = new_polynomials
l += m
end
evaluated_integral = zero(T)
for i in eachindex(polynomials)
evaluated_integral += polynomials[i]*weights[i]
end
scale_factor = (domain[1,1]-domain[2,1])/2
for i in 2:size(multi_index,2)
scale_factor = scale_factor*(domain[1,i]-domain[2,i])/2
end
return evaluated_integral*scale_factor
end
function smolyak_clenshaw_curtis(f::Function,plan::SApproxPlan,pos::S) where {S<:Integer}
# Uses Clenshaw-Curtis to integrate over all dimensions except for pos
grid = plan.grid
multi_index = plan.multi_index
domain = plan.domain
y = zeros(size(grid,1))
for i in eachindex(y)
y[i] = f(grid[i,:])
end
weights = smolyak_weights(y,grid,multi_index,domain)
function smolyak_int(point::R) where {R <: Number}
point = normalize_node(point,domain[:,pos])
unique_multi_index = sort(unique(multi_index))
unique_orders = m_i.(unique_multi_index) .- 1
# Here we construct the base polynomials
T = eltype(grid)
base_polynomials = Array{Array{T,1},1}(undef,length(unique_orders))
base_polynomial_integrals = Array{Array{T,1},1}(undef,length(unique_orders))
for i in eachindex(unique_orders)
base_polynomials[i] = chebyshev_polynomial(unique_orders[i],point)[:]
base_polynomial_integrals[i] = integrate_cheb_polys(unique_orders[i])
end
# Compute the unique polynomial terms from the base polynomials
unique_base_polynomials = Array{Array{T,1},1}(undef,length(unique_orders))
unique_base_polynomial_integrals = Array{Array{T,1},1}(undef,length(unique_orders))
for i = length(unique_orders):-1:2
unique_base_polynomials[i] = base_polynomials[i][length(base_polynomials[i-1])+1:end]
unique_base_polynomial_integrals[i] = base_polynomial_integrals[i][length(base_polynomial_integrals[i-1])+1:end]
end
unique_base_polynomials[1] = base_polynomials[1]
unique_base_polynomial_integrals[1] = base_polynomial_integrals[1]
# Construct the first row of the interplation matrix
polynomials = Array{T,1}(undef,length(weights))
# Iterate over nodes, doing the above three steps at each iteration
l = 1
@inbounds for j in axes(multi_index,1)
if pos == 1
new_polynomials = unique_base_polynomials[multi_index[j,1]][:]
else
new_polynomials = unique_base_polynomial_integrals[multi_index[j,1]][:]
end
for i = 2:size(multi_index,2)
if pos == i
new_polynomials = kron(new_polynomials,unique_base_polynomials[multi_index[j,i]][:])
else
new_polynomials = kron(new_polynomials,unique_base_polynomial_integrals[multi_index[j,i]][:])
end
end
m = length(new_polynomials)
polynomials[l:l+m-1] = new_polynomials
l += m
end
evaluated_integral = zero(T)
for i in eachindex(polynomials)
evaluated_integral += polynomials[i]*weights[i]
end
scale_factor = 1.0
for i in 1:size(multi_index,2)
if pos != i
scale_factor = scale_factor*(domain[1,i]-domain[2,i])/2
end
end
return evaluated_integral*scale_factor
end
return smolyak_int
end
function smolyak_gauss_chebyshev_quad(f::Function,plan::SApproxPlan)
# Uses Gauss-Chebyshev quadrature to integrate over all dimensions
grid = plan.grid
multi_index = plan.multi_index
domain = plan.domain
iim = smolyak_inverse_interpolation_matrix(grid,multi_index,domain)
d = size(grid,2)
e = zeros(1,size(grid,1))
e[1] = π^d
w = e*iim
y = zeros(size(grid,1))
for i in eachindex(y)
integrating_weights = sqrt(1.0-normalize_node(grid[i,1],domain[:,1])^2)
for j = 2:d
integrating_weights *= sqrt(1.0-normalize_node(grid[i,j],domain[:,j])^2)
end
y[i] = f(grid[i,:])*integrating_weights
end
scale_factor = (domain[1,1]-domain[2,1])/2
for i in 2:d
scale_factor = scale_factor*(domain[1,i]-domain[2,i])/2
end
return (w*y)[1]*scale_factor
end
########################################################################
########################################################################
function smolyak_grid_full(node_type::Function,d::S,mu::S) where {S<:Integer}
T = typeof(1.0)
multi_index = generate_multi_index(d,mu)
unique_multi_index = sort(unique(multi_index))
unique_node_number = m_i.(unique_multi_index)
# Create base nodes to be used in the sparse grid
base_nodes = Array{Array{T,1},1}(undef,length(unique_node_number))
base_weights = Array{Array{T,1},1}(undef,length(unique_node_number))
@inbounds for i in eachindex(unique_node_number)
base_nodes[i] = node_type(unique_node_number[i])
end
# Select the relevant polynomials from the multi index
ii = (sum(multi_index,dims=2) .>= max(d,mu+1)).*(sum(multi_index,dims=2) .<= d+mu)
multi_index_full = zeros(S,sum(ii),size(multi_index,2))
j = 1
@inbounds for i in axes(multi_index,1)
if ii[i] == true
multi_index_full[j,:] = multi_index[i,:]
j += 1
end
end
# Construct the sparse grid from the nodes
nodes = Array{T,2}(undef,determine_grid_size_full(multi_index_full))
l = 1
for j in axes(multi_index_full,1)
new_nodes = base_nodes[multi_index_full[j,1]] # Here new_nodes is a 1d array
for i = 2:d
new_nodes = combine_nodes(new_nodes,base_nodes[multi_index_full[j,i]]) # Here new_nodes becomes a 2d array
end
m = size(new_nodes,1)
nodes[l:l+m-1,:] = new_nodes
l += m
end
return nodes, multi_index_full
end
function smolyak_grid_full(node_type::Function,d::S,mu::S,domain::Union{Array{T,1},Array{T,2}}) where {S<:Integer, T<:AbstractFloat}
if size(domain,2) != d
error("domain is inconsistent with the number of dimensions")
end
nodes, multi_index_full = smolyak_grid_full(node_type,d,mu)
nodes = scale_nodes(nodes,domain)
return nodes, multi_index_full
end
function determine_grid_size_full(mi)
temp = similar(mi)
@inbounds for i in axes(mi,1)
@inbounds for j in axes(mi,2)
if mi[i,j] == 1
temp[i,j] = 1
else
temp[i,j] = 2^(mi[i,j]-1)+1
end
end
end
s = 0
@inbounds for i in axes(mi,1)
t = 1
@inbounds for j in axes(mi,2)
t *= temp[i,j]
end
s += t
end
return (s, size(mi,2))
end
function master_index(multi_index::Array{S,2}) where {S<:Integer}
temp_ind = similar(multi_index)
master_ind = zeros(S,size(multi_index,1),2)
@inbounds for i in eachindex(multi_index)
if multi_index[i] == 1
temp_ind[i] = 1
else
temp_ind[i] = m_i(multi_index[i])
end
end
master_ind[1,1] = 1
master_ind[1,2] = prod(temp_ind[1,:])
@inbounds for i = 2:size(master_ind,1)
master_ind[i,1] = master_ind[i-1,1] + master_ind[i-1,2]
master_ind[i,2] = prod(temp_ind[i,:])
end
return master_ind
end
function cheb_poly(order::S,x::R) where {S<:Integer,R<:Number}
p = one(R)
p1 = zero(R)
p2 = zero(R)
for i = 2:order+1
if i == 2
p1, p = p, x
else
p2, p1 = p1, p
p = 2*x*p1-p2
end
end
return p
end
function prod_cjs(max_grid::Union{Array{T,1},Array{T,2}},min_grid::Union{Array{T,1},Array{T,2}},poly_grid::Array{T,2}) where {T<:AbstractFloat}
cjs = ones(size(poly_grid))
@inbounds for i in axes(poly_grid,1)
@inbounds for j in axes(poly_grid,2)
if poly_grid[i,j] == max_grid[j] || poly_grid[i,j] == min_grid[j]
cjs[i,j] *= 2
end
end
end
return prod(cjs,dims=2)
end
function compute_scale_factor(multi_index::Array{S,1}) where {S<:Integer}
scale_factor = 1.0
@inbounds for i in eachindex(multi_index)
if multi_index[i] > 1
scale_factor *= 2.0/(m_i(multi_index[i]) - 1)
end
end
return scale_factor
end
function smolyak_weights_full(y_f::Array{T,1},grid::Union{Array{T,1},Array{T,2}},multi_index::Array{S,2}) where {S<:Integer, T<:AbstractFloat}
mi = sum(multi_index,dims=2)
d = size(multi_index,2)
mu = maximum(mi)-d
max_grid = maximum(grid,dims=1)
min_grid = minimum(grid,dims=1)
weights = Array{Array{T,1},1}(undef,size(multi_index,1))
g_ind = master_index(multi_index)
@inbounds for i in axes(g_ind,1) # This loops over the number of polynomials
ws = zeros(g_ind[i,2])
poly_grid = grid[g_ind[i,1]:g_ind[i,1]+g_ind[i,2]-1,:]
poly_y = y_f[g_ind[i,1]:g_ind[i,1]+g_ind[i,2]-1]
if size(grid,1) == 1 # This is to accommodate the mu = 0 case
cjs_prod = ones(size(poly_grid,1))
else
cjs_prod = prod_cjs(max_grid,min_grid,poly_grid)
end
@inbounds for l = 1:g_ind[i,2] # This loops over the weights
ll = CartesianIndices(Tuple(m_i.(multi_index[i,:])))[l]
@inbounds for j = 1:g_ind[i,2] # This loops over the nodes
rhs_term = cheb_poly(ll[1]-1,poly_grid[j,1])*poly_y[j]
@inbounds for k = 2:size(poly_grid,2) # This loops over the polynomial terms in the product
rhs_term *= cheb_poly(ll[k]-1,poly_grid[j,k])
end
ws[l] += rhs_term/cjs_prod[j]
end
scale_factor = compute_scale_factor(multi_index[i,:])
ws[l] = scale_factor*(1/cjs_prod[l])*ws[l]
end
weights[i] = (-1)^(d+mu-mi[i])*factorial(d-1)/(factorial(d+mu-mi[i])*factorial(-1-mu+mi[i]))*ws
end
return weights
end
function smolyak_weights_full(y_f::Array{T,1},grid::Union{Array{T,1},Array{T,2}},multi_index::Array{S,2},domain::Array{T,2}) where {S<:Integer, T<:AbstractFloat}
d = size(multi_index,2)
grid = copy(grid)
for i = 1:d
grid[:,i] = normalize_node(grid[:,i],domain[:,i])
end
weights = smolyak_weights_full(y_f,grid,multi_index)
return weights
end
function smolyak_evaluate_full(weights::Array{Array{T,1},1},point::Array{R,1},multi_index::Array{S,2}) where {S<:Integer,R<:Number,T<:AbstractFloat}
d = size(multi_index,2)
evaluated_polynomials = zeros(size(multi_index,1))
@inbounds for i in axes(multi_index,1) # This loops over the number of polynomials
@inbounds for l in eachindex(weights[i])
ll = CartesianIndices(Tuple(m_i.(multi_index[i:i,:])))[l]
temp = weights[i][l]*cheb_poly(ll[1]-1,point[1])
@inbounds for k = 2:d
temp *= cheb_poly(ll[k]-1,point[k])
end
evaluated_polynomials[i] += temp
end
#evaluated_polynomials[i] *= (-1)^(d+mu-mi[i])*factorial(d-1)/(factorial(d+mu-mi[i])*factorial(-1-mu+mi[i]))
end
return sum(evaluated_polynomials)
end
function smolyak_evaluate_full(weights::Array{Array{T,1},1},point::Array{R,1},multi_index::Array{S,2},domain::Array{T,2}) where {S<:Integer,R<:Number,T<:AbstractFloat}
d = size(multi_index,2)
point = copy(point)
for i = 1:d
point[i] = normalize_node(point[i],domain[:,i])
end
estimate = smolyak_evaluate_full(weights,point,multi_index)
return estimate
end
function deriv_cheb_poly(order::S,x::R) where {S<:Integer,R<:Number}
p0 = one(R)
p1 = zero(R)
p2 = zero(R)
pd0 = zero(R)
pd1 = zero(R)
pd2 = zero(R)
for i = 2:order+1
if i == 2
p1, p0 = p0, x
pd1, pd0 = pd0, one(R)
else
p2, p1 = p1, p0
pd2, pd1 = pd1, pd0
p0 = 2*x*p1-p2
pd0 = 2*p1+2*x*pd1-pd2
end
end
return pd0
end
function smolyak_derivative_full(weights::Array{Array{T,1},1},point::Array{R,1},multi_index::Array{S,2},pos::S) where {S<:Integer,R<:Number,T<:AbstractFloat}
mi = sum(multi_index,dims=2)
d = size(multi_index,2)
mu = maximum(mi)-d
evaluated_polynomials = zeros(size(multi_index,1))
for i in axes(multi_index,1) # This loops over the number of polynomials
for l in eachindex(weights[i])
ll = CartesianIndices(Tuple(m_i.(multi_index[i:i,:])))[l]
if pos == 1
temp = weights[i][l]*deriv_cheb_poly(ll[1]-1,point[1])
else
temp = weights[i][l]*cheb_poly(ll[1]-1,point[1])
end
for k = 2:d
if k == pos
temp *= deriv_cheb_poly(ll[k]-1,point[k])
else
temp *= cheb_poly(ll[k]-1,point[k])
end
end
evaluated_polynomials[i] += temp
end
#evaluated_polynomials[i] *= (-1)^(d+mu-mi[i])*factorial(d-1)/(factorial(d+mu-mi[i])*factorial(-1-mu+mi[i]))
end
evaluated_derivative = sum(evaluated_polynomials)
return evaluated_derivative
end
function smolyak_derivative_full(weights::Array{Array{T,1},1},point::Array{R,1},multi_index::Array{S,2},domain::Array{T,2},pos::S) where {S<:Integer,R<:Number,T<:AbstractFloat}
d = size(multi_index,2)
point = copy(point)
for i = 1:d
point[i] = normalize_node(point[i],domain[:,i])
end
evaluated_derivative = smolyak_derivative_full(weights,point,multi_index,pos)
return evaluated_derivatives
end
function smolyak_gradient_full(weights::Array{Array{T,1},1},point::Array{R,1},multi_index::Array{S,2}) where {S<:Integer,R<:Number,T<:AbstractFloat}
d = size(multi_index,2)
gradient = Array{R,2}(undef,1,d)
for i = 1:d
gradient[i] = smolyak_derivative_full(weights,point,multi_index,i)
end
return gradient
end
function smolyak_gradient_full(weights::Array{Array{T,1},1},point::Array{R,1},multi_index::Array{S,2},domain::Array{T,2}) where {S<:Integer,R<:Number,T<:AbstractFloat}
d = size(multi_index,2)
gradient = Array{R,2}(undef,1,d)
for i = 1:d
gradient[i] = smolyak_derivative_full(weights,point,multi_index,domain,i)
end
return gradient
end
###########################################################################
function smolyak_evaluate_full(weights::Array{T,1},multi_index_full::Array{S,2}) where {T<:AbstractFloat,S<:Integer}
function goo(x::Array{R,1}) where {R<:Number}
return smolyak_evaluate_full(weights,x,multi_index_full)
end
return goo
end
function smolyak_evaluate_full(weights::Array{T,1},multi_index_full::Array{S,2},domain::Union{Array{T,1},Array{T,2}}) where {T<:AbstractFloat,S<:Integer}
function goo(x::Array{R,1}) where {R<:Number}
return smolyak_evaluate_full(weights,x,multi_index_full,domain)
end
return goo
end
function smolyak_derivative_full(weights::Array{T,1},multi_index_full::Array{S,2}) where {T<:AbstractFloat,S<:Integer}
function goo(x::Array{R,1}) where {R<:Number}
return smolyak_derivative_full(weights,x,multi_index_full)
end
return goo
end
function smolyak_derivative_full(weights::Array{T,1},multi_index_full::Array{S,2},domain::Union{Array{T,1},Array{T,2}}) where {T<:AbstractFloat,S<:Integer}
function goo(x::Array{R,1}) where {R<:Number}
return smolyak_derivative_full(weights,x,multi_index_full,domain)
end
return goo
end
function smolyak_derivative_full(weights::Array{T,1},multi_index_full::Array{S,2},pos::Array{S,1}) where {T<:AbstractFloat,S<:Integer}
function goo(x::Array{R,1}) where {R<:Number}
return smolyak_derivative_full(weights,x,multi_index_full,pos)
end
return goo
end
function smolyak_derivative_full(weights::Array{T,1},multi_index_full::Array{S,2},domain::Union{Array{T,1},Array{T,2}},pos::Array{S,1}) where {T<:AbstractFloat,S<:Integer}
function goo(x::Array{R,1}) where {R<:Number}
return smolyak_derivative_full(weights,x,multi_index_full,domain,pos)
end
return goo
end | SmolyakApprox | https://github.com/RJDennis/SmolyakApprox.jl.git |
|
[
"MIT"
] | 0.3.0 | e645e4b13d17a941b6fc2dfe8c11431e6e59cd88 | code | 2342 | # This code presents an example to illustrate how SmolyakApprox can be used
using SmolyakApprox
function test_smolyak_derivative()
d = 5 # Set the number of dimensions
mu = 3 # Set the level
grid, multi_ind = smolyak_grid(chebyshev_gauss_lobatto,d,mu) # Construct the Smolyak grid and the multi index
# An arbitrary test function
function test(grid)
y_value = (grid[:,1].+1).^0.1.*exp.(grid[:,2]).*log.(grid[:,3].+2).^0.2.*(grid[:,4].+2).^0.8.*(grid[:,5].+7).^0.1
return y_value
end
y = test(grid) # Evaluate the test function on the Smolyak grid
point = [0.75, 0.45, 0.82, -0.15, -0.95] # Choose a point to evaluate the approximated function
# One way of computing the weights and evaluating the approximated function
weights = smolyak_weights(y,grid,multi_ind) # Compute the Smolyak weights
y_hat = smolyak_evaluate(weights,point,multi_ind) # Evaluate the approximated function
#= A second way of computing the weights and evaluating the approximated function that
computes the interpolation matrix just once. =#
interp_mat = smolyak_inverse_interpolation_matrix(grid,multi_ind) # Compute the interpolation matrix
w = smolyak_weights(y,interp_mat) # Compute the Smolyak weights
y_hatt = smolyak_evaluate(w,point,multi_ind) # Evaluate the approximated function
# Evaluate the exact function at point
y_actual = test(point')
derivatives_2 = smolyak_gradient(weights,point,multi_ind)
derivatives_3 = smolyak_derivative(weights,point,multi_ind,1)
derivatives_4 = smolyak_derivative(weights,point,multi_ind,3)
grid_full, multi_ind_full = smolyak_grid_full(chebyshev_gauss_lobatto,d,mu) # Construct the Smolyak grid and the multi index
y_full = test(grid_full) # Evaluate the test function on the Smolyak grid
weights_full = smolyak_weights_full(y_full,grid_full,multi_ind_full) # Compute the Smolyak weights
derivatives_3_full = smolyak_derivative_full(weights_full,point,multi_ind_full,1) # Evaluate the approximated function
derivatives_4_full = smolyak_gradient_full(weights_full,point,multi_ind_full) # Evaluate the approximated function
return derivatives_2, derivatives_3, derivatives_4, derivatives_3_full, derivatives_4_full
end
test_smolyak_derivative()
| SmolyakApprox | https://github.com/RJDennis/SmolyakApprox.jl.git |
|
[
"MIT"
] | 0.3.0 | e645e4b13d17a941b6fc2dfe8c11431e6e59cd88 | code | 2887 | # This code presents an example to illustrate how SmolyakApprox can be used
using SmolyakApprox
function test_smolyak_approx()
d = 5 # Set the number of dimensions
mu = 3 # Set the level of approximation
g2, m2 = smolyak_grid(clenshaw_curtis_equidistant,d,mu)
grid, multi_ind = smolyak_grid(chebyshev_gauss_lobatto,d,mu) # Construct the Smolyak grid and the multi index
# An arbitrary test function
function test(grid)
y_value = (grid[:,1].+1).^0.1.*exp.(grid[:,2]).*log.(grid[:,3].+2).^0.2.*(grid[:,4].+2).^0.8.*(grid[:,5].+7).^0.1
return y_value
end
y = test(grid) # Evaluate the test function on the Smolyak grid
y_pl = test(g2)
point = [0.75, 0.45, 0.82, -0.15, -0.95] # Choose a point to evaluate the approximated function
# One way of computing the weights and evaluating the approximated function
weights = smolyak_weights(y,grid,multi_ind) # Compute the Smolyak weights
y_hat = smolyak_evaluate(weights,point,multi_ind) # Evaluate the approximated function
#= A second way of computing the weights and evaluating the approximated function that
computes the interpolation matrix just once. =#
interp_mat = smolyak_inverse_interpolation_matrix(grid,multi_ind) # Compute the interpolation matrix
w = smolyak_weights(y,interp_mat) # Compute the Smolyak weights
y_hatt = smolyak_evaluate(w,point,multi_ind) # Evaluate the approximated function
# Piecewise linear
w_pl = smolyak_pl_weights(y_pl,g2,m2)
y_pl_hat = smolyak_pl_evaluate(w_pl,point,g2,m2)
# Evaluate the exact function at point
y_actual = test(point')
# Now consider the ansiotropic case
mu = [3, 2, 2, 2, 3]
grid, multi_ind = smolyak_grid(chebyshev_gauss_lobatto,d,mu) # Construct the Smolyak grid and the multi index
y = test(grid)
weights = smolyak_weights(y,grid,multi_ind) # Compute the Smolyak weights
y_hat_ansio = smolyak_evaluate(weights,point,multi_ind) # Evaluate the approximated function
weights_th = smolyak_weights_threaded(y,grid,multi_ind) # Compute the Smolyak weights
g3, m3 = smolyak_grid(clenshaw_curtis_equidistant,d,mu)
y_pl = test(g3)
w_pl_ansio = smolyak_pl_weights(y_pl,g3,m3)
y_pl_hat_ansio = smolyak_pl_evaluate(w_pl,point,g3,m3)
w_pl_ansio_th = smolyak_pl_weights_threaded(y_pl,g3,m3)
# Now test the full grid results
mu = 3
grid_full, multi_ind_full = smolyak_grid_full(chebyshev_gauss_lobatto,d,mu) # Construct the Smolyak grid and the multi index
y_full = test(grid_full)
weights_full = smolyak_weights_full(y_full,grid_full,multi_ind_full) # Compute the Smolyak weights
y_hat_full = smolyak_evaluate_full(weights_full,point,multi_ind_full) # Evaluate the approximated function
return y_hat, y_hatt, y_pl_hat, y_actual, y_hat_ansio, y_pl_hat_ansio, y_hat_full
end
test_smolyak_approx()
| SmolyakApprox | https://github.com/RJDennis/SmolyakApprox.jl.git |
|
[
"MIT"
] | 0.3.0 | e645e4b13d17a941b6fc2dfe8c11431e6e59cd88 | code | 55 | include("derivative_example.jl")
include("example.jl")
| SmolyakApprox | https://github.com/RJDennis/SmolyakApprox.jl.git |
|
[
"MIT"
] | 0.3.0 | e645e4b13d17a941b6fc2dfe8c11431e6e59cd88 | docs | 5605 | # SmolyakApprox.jl
Introduction
============
This package implements Smolyak's method for approximating multivariate continuous functions. Two different types of interpolation schemes are allowed: Chebyshev polynomials and piecewise linear. The package also implements Clenshaw-Curtis integration and Gauss-Chebyshev quadrature.
To install this package you need to type in the REPL
```julia
using Pkg
Pkg.add("SmolyakApprox")
```
Then the package can be used by typing
```julia
using SmolyakApprox
```
Chebyshev polynomials
---------------------
The nodes are computed using Chebyshev-Gauss-Lobatto (Chebyshev extrema), with the approximation grid and the multi-index computed by
```julia
grid, multi_ind = smolyak_grid(chebyshev_gauss_lobatto,d,mu,domain)
```
where `d` is the dimension of the function, `mu` is the layer or approximation order, and domain is a 2d-array (2xd) containing the upper and lower bound on each variable. If domain is not provided, then it is assumed that the variables reside on the [-1,1] interval. If `mu` is an integer, then an isotropic grid is computed whereas if `mu` is a 1d array of integers with length `d`, then an anisotropic grid is computed. Because the chebyshev_gauss_lobatto points are the same as the Chebyshev extrema points you can use `chebyshev_extrema` in place of `chebyshev_gauss_lobatto`.
With the grid and multi-index in hand, we can compute the weights, or coefficients in the approximation, according to
```julia
weights = smolyak_weights(y,grid,multi_ind,domain)
```
where `y` is a 1d-array containing the evaluations at each grid point of the function being approximated. Computation of the weights can be made more efficient by computing the inverse interpolation matrix (this generally needs to be done only once, outside any loops)
```julia
inv_interp_mat = smolyak_inverse_interpolation_matrix(grid,multi_ind,domain)
```
with the weights now computed through
```julia
weights = smolyak_weights(y,inv_interp_mat)
```
You can evaluate the Smolyak approximation of the function at any point in the domain by
```julia
y_hat = smolyak_evaluate(weights,point,multi_ind,domain)
```
where `point` (a 1d-array) is the point in the domain where the approximation is to be evaluated.
Lastly, you can compute derivatives, gradients, and hessians according to:
```julia
d = smolyak_derivative(weights,point,multi_ind,domain,pos) # Takes the derivative with respect to variable 'pos'
g = smolyak_gradient(weights,point,multi_ind,domain)
h = smolyak_hessian(weights,point,multi_ind,domain)
```
Integration
-----------
To numerically integrate a function, you first create an approximation plan and then call the integration function:
```julia
plan = smolyak_plan(chebyshev_gauss_lobatto,d,mu,domain)
integral = smolyak_integrate(f,plan,:clenshaw_curtis) # uses Clenshaw-Curtis
integral = smolyak_integrate(f,plan,:gauss_chebyshev_quad) # uses Gauss-Chebyshev quadrature
```
where `f` is the function to be integrated and `plan` is the approximation plan, discussed below. Both methods integrate the function over the full approximation domain.
Piecewise linear
----------------
For piecewise linear approximation equidistant nodes are used where the number of nodes is determined according to the Clenshaw-Curtis grid structure: 2^(mu-1)+1
```julia
grid, multi_ind = smolyak_grid(clenshaw_curtis_equidistant,d,mu,domain)
```
Then the weights are computed using
```julia
weights = smolyak_pl_weights(y,grid,multi_ind,domain)
```
and the approximation computed via
```julia
y_hat = smolyak_pl_evaluate(weights,point,grid,multi_ind,domain)
```
Again `mu` can be either an integer or a 1d array of integers depending on whether an isotropic or an anisotropic approximation is desired, and the argument `domain` is unnecessary where the grid resides on [-1,1]^d.
Multi-threading
---------------
There are multi-threaded functions to compute the polynomial weights and the interpolation matrix. These multi-threaded functions are accessed by adding `_threaded` to the end of the funtion, as per
```julia
weights = smolyak_weights_threaded(y,inv_interp_mat)
```
Useful structures
-----------------
The key structure to be aware of is the SApproxPlan, which contains the key information needed to approximate a function.
```julia
d = 3
mu = 3
domain = [2.0 2.0 2.0; -2.0 -2.0 -2.0]
grid, mi = smolyak_grid(chebyshev_extrema,d,mu,domain)
plan = SApproxPlan(:chebyshev_extrema,grid,mi,domain)
```
or
```julia
plan = smolyak_plan(chebyshev_extrema,d,mu,domain)
```
Once the approximation plan has been constructed it can be used to create functions to interpolate and to compute gradients and hessians.
```julia
f = smolyak_interp(y,plan)
g = smolyak_gradient(y,plan)
h = smolyak_hessian(y,plan)
point = [1.0, 1.0, 1.0]
f(point)
g(point)
h(point)
```
There are threaded versions of `smolyak_interp`, `smolyak_gradient`, and `smolyak_hessian`; just add `_threaded` to the end of the function name.
Related packages
----------------
- ChebyshevApprox.jl
- HyperbolicCrossApprox.jl
- PiecewiseLinearApprox.jl
References
----------
My primary references when writing this package were:
Judd, K., Maliar, L., Maliar, S., and R. Valero, (2014), "Smolyak Method for Solving Dynamic Economic Models: Lagrange Interpolation, Anisotropic Grid and Adaptive Domain," Journal of Economic Dynamics and Control, 44, pp.92--123.
Klimke, A., and B. Wohlmuth, (2005), "Algorithm 846: spinterp: Piecewise Multilinear Hierarchical Grid Interpolation in MATLAB," ACM Transactions on Mathematical Software, 31, 4, pp.561--579.
| SmolyakApprox | https://github.com/RJDennis/SmolyakApprox.jl.git |
|
[
"MIT"
] | 0.1.0 | 7e62f733292e252484bbca00116e7d9c06874b42 | code | 2112 | module MoziFESparse
using SparseArrays
export SparseMatrixCOO,SparseMatrixDOK
export spzeros_coo,spzeros_dok
export to_csc, to_array
struct SparseMatrixDOK{Tv,Ti<:Integer} <: AbstractSparseMatrix{Tv,Ti}
m::Int
n::Int
dict::Dict{Tuple{Ti,Ti},Tv}
end
struct SparseMatrixCOO{Tv,Ti<:Integer} <: AbstractSparseMatrix{Tv,Ti}
m::Int
n::Int
rowptr::Vector{Ti}
colptr::Vector{Ti}
nzval::Vector{Tv}
end
import Base.size
size(spmat::SparseMatrixCOO)=(spmat.m,spmat.n)
function spzeros_dok(m::Int,n::Int)
SparseMatrixDOK{Float64,Int}(m,n,Dict{Tuple{Int,Int},Float64}())
end
function spzeros_coo(m::Int,n::Int)
SparseMatrixCOO{Float64,Int}(m,n,Int[],Int[],Float64[])
end
function SparseMatrixCOO(A :: AbstractArray)
m,n=size(A)
I=Vector{Int}()
J=Vector{Int}()
V=Vector{Float64}()
for j in 1:n, i in 1:m
if A[i,j]!=0
push!(I,i)
push!(J,j)
push!(V,A[i,j])
end
end
SparseMatrixCOO{Float64,Int}(m,n,I,J,V)
end
function add(a::SparseMatrixCOO,b::SparseMatrixCOO)
if a.m!=b.m || a.n!=b.n
throw("Dimension not match!")
end
I=[a.rowptr;b.rowptr]
J=[a.colptr;b.colptr]
V=[a.nzval;b.nzval]
SparseMatrixCOO(a.m,a.n,I,J,V)
end
import Base.+
import Base.getindex
+(a::SparseMatrixCOO,b::SparseMatrixCOO)=add(a::SparseMatrixCOO,b::SparseMatrixCOO)
function getindex(spmatrix::SparseMatrixCOO,i::Int,j::Int)
idx=(spmatrix.rowptr.==i) .& (spmatrix.colptr.==j)
return reduce(+, spmatrix.nzval[idx])
end
function getindex(spmatrix::SparseMatrixCOO,i::Int,j::Int)
idx=(spmatrix.rowptr.==i) .& (spmatrix.colptr.==j)
return reduce(+, spmatrix.nzval[idx])
end
function to_csc(spmatrix::SparseMatrixCOO)
m=spmatrix.m
n=spmatrix.n
I=spmatrix.rowptr
J=spmatrix.colptr
V=spmatrix.nzval
sparse(I,J,V,m,n)
end
function to_array(spmatrix::SparseMatrixCOO)
Array(to_csc(spmatrix))
end
function to_array(spvec::SparseVector)
res=zeros(spvec.n)
for (i,j) in zip(spvec.nzind,spvec.nzval)
res[i]+=j
end
return res
end
end
| MoziFESparse | https://github.com/zhuoju36/MoziFESparse.jl.git |
|
[
"MIT"
] | 0.1.0 | 7e62f733292e252484bbca00116e7d9c06874b42 | code | 570 | using MoziFESparse
using LinearAlgebra
A=Diagonal([1,1,1])
B=Diagonal([2,2,2])
spA=SparseMatrixCOO(A)
spB=SparseMatrixCOO(B)
res=spA+spB
@test res[1,2]==0
@test res[1,1]==3
@show to_csc(res)
function disperse(A::Matrix,i::Vector{Int},N::Int)
m,n=size(A)
I = repeat(i,outer=n)
J = repeat(i,inner=m)
V = reshape(A,m*n)
SparseMatrixCOO(N,N,I,J,V)
end
function disperse(A::Vector,i::Vector{Int},N::Int)
m=length(A)
I = i
J = [1 for i in 1:m]
V = A
to_array(SparseMatrixCOO(N,1,I,J,V))[:,1]
end
@show disperse([1,2,3],[4,5,10],10)
| MoziFESparse | https://github.com/zhuoju36/MoziFESparse.jl.git |
|
[
"MIT"
] | 0.1.0 | 7e62f733292e252484bbca00116e7d9c06874b42 | docs | 131 | # MoziFESparse.jl
This is a part of Project Mozi
This package provides COO and DOK matrix support for FEM module of the project.
| MoziFESparse | https://github.com/zhuoju36/MoziFESparse.jl.git |
|
[
"MIT"
] | 0.1.2 | 3a4da81e25be1cedcc14576b696e2da6c9bb989b | code | 403 | using Documenter, StackPointer
makedocs(;
modules=[StackPointer],
format=Documenter.HTML(),
pages=[
"Home" => "index.md",
],
repo="https://github.com/chriselrod/StackPointer.jl/blob/{commit}{path}#L{line}",
sitename="StackPointer.jl",
authors="Chris Elrod <[email protected]>",
assets=String[],
)
deploydocs(;
repo="github.com/chriselrod/StackPointer.jl",
)
| StackPointers | https://github.com/chriselrod/StackPointers.jl.git |
|
[
"MIT"
] | 0.1.2 | 3a4da81e25be1cedcc14576b696e2da6c9bb989b | code | 1657 | module StackPointers
using VectorizationBase
export StackPointer, stack_pointer_call
struct StackPointer # default to Float64
ptr::Ptr{Float64}
end
@inline Base.pointer(s::StackPointer) = s.ptr
@inline Base.pointer(s::StackPointer, ::Type{T}) where {T} = Base.unsafe_convert(Ptr{T}, s.ptr)
@inline Base.pointer(s::StackPointer, ::Type{Float64}) = s.ptr
@inline Base.convert(::Type{Ptr{T}}, s::StackPointer) where {T} = pointer(s, T)
@inline Base.unsafe_convert(::Type{Ptr{T}}, s::StackPointer) where {T} = pointer(s, T)
@inline Base.:+(sp::StackPointer, i::Integer) = StackPointer(gep(sp.ptr, i))
@inline Base.:+(sp::StackPointer, i::Integer...) = StackPointer(gep(sp.ptr, +(i...)))
@inline Base.:+(i::Integer, sp::StackPointer) = StackPointer(gep(sp.ptr, i))
@inline Base.:-(sp::StackPointer, i::Integer) = StackPointer(gep(sp.ptr, -i))
@inline VectorizationBase.align(s::StackPointer) = StackPointer(VectorizationBase.align(s.ptr))
"""
To support using StackPointers, overload stack_pointer_call.
For example, in PaddedMatrices (which defines an appropriate `similar` method):
@inline function StackPointers.stack_pointer_call(::typeof(*), sp::StackPointer, A::AbstractMatrix, B::AbstractVecOrMat)
(sp, C) = similar(sp, A, B)
sp, mul!(C, A, B)
end
"""
@inline stack_pointer_call(f::F, sp::StackPointer, args::Vararg{<:Any,N}) where {F,N} = (sp, f(args...))
# function extract_func_sym(f::Expr)::Symbol
# if f.head === :(.)
# f.args[2].value
# elseif f.head === :curly
# ff = first(f.args)
# ff isa Symbol ? ff : ff.args[2].value
# end
# end
include("precompile.jl")
_precompile_()
end # module
| StackPointers | https://github.com/chriselrod/StackPointers.jl.git |
|
[
"MIT"
] | 0.1.2 | 3a4da81e25be1cedcc14576b696e2da6c9bb989b | code | 33 | function _precompile_()
end
| StackPointers | https://github.com/chriselrod/StackPointers.jl.git |
|
[
"MIT"
] | 0.1.2 | 3a4da81e25be1cedcc14576b696e2da6c9bb989b | code | 103 | using StackPointers
using Test
@testset "StackPointers.jl" begin
# Write your own tests here.
end
| StackPointers | https://github.com/chriselrod/StackPointers.jl.git |
|
[
"MIT"
] | 0.1.2 | 3a4da81e25be1cedcc14576b696e2da6c9bb989b | docs | 1003 | # StackPointers
[](https://chriselrod.github.io/StackPointers.jl/stable)
[](https://chriselrod.github.io/StackPointers.jl/dev)
[](https://travis-ci.com/chriselrod/StackPointers.jl)
[](https://ci.appveyor.com/project/chriselrod/StackPointers-jl)
[](https://codecov.io/gh/chriselrod/StackPointers.jl)
[](https://coveralls.io/github/chriselrod/StackPointers.jl?branch=master)
[](https://cirrus-ci.com/github/chriselrod/StackPointers.jl)
| StackPointers | https://github.com/chriselrod/StackPointers.jl.git |
|
[
"MIT"
] | 0.1.2 | 3a4da81e25be1cedcc14576b696e2da6c9bb989b | docs | 76 | # StackPointer.jl
```@index
```
```@autodocs
Modules = [StackPointer]
```
| StackPointers | https://github.com/chriselrod/StackPointers.jl.git |
|
[
"MIT"
] | 0.2.2 | 88c874db43a7c0f99347ee1013b2052e6cf23ac3 | code | 1988 | import Downloads
using GitHub
using Pkg.Artifacts
using Base.BinaryPlatforms
using Tar
using SHA
const gh_auth = GitHub.AnonymousAuth()
version(release::Release) = try
VersionNumber(release.tag_name)
catch
v"0.0.0"
end
latest_release(repo; auth) = reduce(releases(repo; auth)[1]) do releaseA, releaseB
version(releaseA) > version(releaseB) ? releaseA : releaseB
end
function make_artifacts(dir)
release = latest_release("bytecodealliance/wasmtime"; auth=gh_auth)
release_version = VersionNumber(release.tag_name)
platforms = [
Platform("aarch64", "linux"; libc="glibc"),
Platform("x86_64", "linux"; libc="glibc"),
Platform("x86_64", "macos"),
Platform("aarch64", "macos"),
]
tripletnolibc(platform) = replace(triplet(platform), "-gnu" => "")
wasmtime_asset_name(platform) =
replace("wasmtime-v$release_version-$(tripletnolibc(platform))-c-api.tar.xz",
"apple-darwin" => "macos")
asset_names = wasmtime_asset_name.(platforms)
assets = filter(asset -> asset["name"] ∈ asset_names, release.assets)
artifacts_toml = joinpath(@__DIR__, "Artifacts.toml")
for (platform, asset) in zip(platforms, assets)
@info "Downloading $(asset["browser_download_url"]) for $platform"
archive_location = joinpath(dir, asset["name"])
download_url = asset["browser_download_url"]
Downloads.download(download_url, archive_location;
progress=(t,n) -> print("$(floor(100*n/t))%\r"))
println()
artifact_hash = create_artifact() do artifact_dir
run(`tar -xvf $archive_location -C $artifact_dir`)
end
download_hash = open(archive_location, "r") do f
bytes2hex(sha256(f))
end
bind_artifact!(artifacts_toml, "libwasmtime", artifact_hash; platform, force=true, download_info=[
(download_url, download_hash)
])
@info "done $platform"
end
end
main() = mktempdir(make_artifacts)
| Wasmtime | https://github.com/Pangoraw/Wasmtime.jl.git |
|
[
"MIT"
] | 0.2.2 | 88c874db43a7c0f99347ee1013b2052e6cf23ac3 | code | 2369 | using Clang.Generators
using Clang.LibClang.Clang_jll
cd(@__DIR__)
# Location of the extracted wasmer.tar.gz
wasmtime_location = joinpath(get(ENV, "WASMTIME_LOCATION", "../wasmtime"), "crates/c-api/")
options = load_options(joinpath(@__DIR__, "generator.toml"))
# add compiler flags
args = get_default_args()
push!(args, "-I" * joinpath(wasmtime_location, "wasm-c-api/include/"))
push!(args, "-I" * joinpath(wasmtime_location, "include/"))
headers = [
joinpath(wasmtime_location, "wasm-c-api/include/wasm.h"),
joinpath(wasmtime_location, "include/wasmtime.h"),
joinpath(wasmtime_location, "include/wasi.h"),
]
# create context
ctx = create_context(headers, args, options)
# build without printing so we can do custom rewriting
build!(ctx, BUILDSTAGE_NO_PRINTING)
# custom rewriter
function rewrite!(e::Expr)
# if Meta.isexpr(e, :function)
# pushfirst!(e.args[2].args, Expr(:macrocall, Symbol("@show"), LineNumberNode(1), :libwasmtime))
# return e
# end
# if Meta.isexpr(e, :struct, 3) && e.args[2] == :wasmtime_extern
# e.args = [
# e.args[1],
# e.args[2],
# [:($(Symbol("_pad", i))::$(t)) for (i, t) in enumerate((UInt8, UInt16, UInt32))]...,
# e.args[end],
# # :(wasmtime_extern(kind, of) = new(kind, 0, 0, 0, of)),
# ]
# return e
# end
Meta.isexpr(e, :const) || return e
eq = e.args[1]
if eq.head === :(=) && eq.args[1] === :WASM_EMPTY_VEC
e.args[1].args[2] = nothing
elseif eq.head === :(=) && eq.args[1] === :wasm_name && eq.args[2] === :wasm_byte_vec
e.args[1].args[2] = :wasm_byte_vec_t
elseif eq.head === :(=) && eq.args[1] === :wasm_byte_t
e.args[1].args[2] = :UInt8
end
return e
end
function rewrite!(dag::ExprDAG)
for node in get_nodes(dag)
for expr in get_exprs(node)
rewrite!(expr)
end
end
end
rewrite!(ctx.dag)
# print
build!(ctx, BUILDSTAGE_PRINTING_ONLY)
@warn """
⚠ Remember to add the `wasmtime_extern` alignment
```julia
# src/LibWasmtime.jl:1770
mutable struct wasmtime_extern
kind::wasmtime_extern_kind_t
_pad1::UInt8
_pad2::UInt16
_pad3::UInt32
of::wasmtime_extern_union_t
wasmtime_extern(kind, of) = new(kind, 0, 0, 0, of)
end
```
"""
| Wasmtime | https://github.com/Pangoraw/Wasmtime.jl.git |
|
[
"MIT"
] | 0.2.2 | 88c874db43a7c0f99347ee1013b2052e6cf23ac3 | code | 966 | # This file (`LibWasm.jl` not `prologue.jl`) is automatically generated using
# Clang.jl and should not be edited manually. Take a look at the `gen/` folder
# if something is to be changed.
using Pkg.Artifacts
using Pkg.BinaryPlatforms
tripletnolibc(platform) = replace(triplet(platform), "-gnu" => "")
wasmtime_folder_name(platform) =
"wasmtime-v$release_version-$(tripletnolibc(platform))-c-api"
function get_libwasmtime_location()
artifact_info = artifact_meta("libwasmtime", joinpath(@__DIR__, "..", "Artifacts.toml"))
artifact_info === nothing && return nothing
parent_path = artifact_path(Base.SHA1(artifact_info["git-tree-sha1"]))
child_folder = readdir(parent_path)[1]
return joinpath(
parent_path,
child_folder,
"lib/libwasmtime"
)
end
const libwasmtime_env_key = "LIBWASMTIME_LOCATION"
const libwasmtime = haskey(ENV, libwasmtime_env_key) ?
ENV[libwasmtime_env_key] : get_libwasmtime_location()
| Wasmtime | https://github.com/Pangoraw/Wasmtime.jl.git |
|
[
"MIT"
] | 0.2.2 | 88c874db43a7c0f99347ee1013b2052e6cf23ac3 | code | 82990 | module LibWasmtime
using CEnum
# This file (`LibWasm.jl` not `prologue.jl`) is automatically generated using
# Clang.jl and should not be edited manually. Take a look at the `gen/` folder
# if something is to be changed.
using Pkg.Artifacts
using Pkg.BinaryPlatforms
tripletnolibc(platform) = replace(triplet(platform), "-gnu" => "")
wasmtime_folder_name(platform) =
"wasmtime-v$release_version-$(tripletnolibc(platform))-c-api"
function get_libwasmtime_location()
artifact_info = artifact_meta("libwasmtime", joinpath(@__DIR__, "..", "Artifacts.toml"))
artifact_info === nothing && return nothing
parent_path = artifact_path(Base.SHA1(artifact_info["git-tree-sha1"]))
child_folder = readdir(parent_path)[1]
return joinpath(
parent_path,
child_folder,
"lib/libwasmtime"
)
end
const libwasmtime_env_key = "LIBWASMTIME_LOCATION"
const libwasmtime = haskey(ENV, libwasmtime_env_key) ?
ENV[libwasmtime_env_key] : get_libwasmtime_location()
const byte_t = Cchar
const wasm_byte_t = UInt8
mutable struct wasm_byte_vec_t
size::Csize_t
data::Ptr{wasm_byte_t}
end
function wasm_byte_vec_new(out, arg2, arg3)
ccall((:wasm_byte_vec_new, libwasmtime), Cvoid, (Ptr{wasm_byte_vec_t}, Csize_t, Ptr{wasm_byte_t}), out, arg2, arg3)
end
function wasm_byte_vec_new_empty(out)
ccall((:wasm_byte_vec_new_empty, libwasmtime), Cvoid, (Ptr{wasm_byte_vec_t},), out)
end
function wasm_byte_vec_new_uninitialized(out, arg2)
ccall((:wasm_byte_vec_new_uninitialized, libwasmtime), Cvoid, (Ptr{wasm_byte_vec_t}, Csize_t), out, arg2)
end
function wasm_byte_vec_copy(out, arg2)
ccall((:wasm_byte_vec_copy, libwasmtime), Cvoid, (Ptr{wasm_byte_vec_t}, Ptr{wasm_byte_vec_t}), out, arg2)
end
function wasm_byte_vec_delete(arg1)
ccall((:wasm_byte_vec_delete, libwasmtime), Cvoid, (Ptr{wasm_byte_vec_t},), arg1)
end
mutable struct wasm_ref_t end
mutable struct wasm_store_t end
function assertions()
ccall((:assertions, libwasmtime), Cvoid, ())
end
const float32_t = Cfloat
const float64_t = Cdouble
const wasm_name_t = wasm_byte_vec_t
function wasm_name_new_from_string(out, s)
ccall((:wasm_name_new_from_string, libwasmtime), Cvoid, (Ptr{wasm_name_t}, Cstring), out, s)
end
function wasm_name_new_from_string_nt(out, s)
ccall((:wasm_name_new_from_string_nt, libwasmtime), Cvoid, (Ptr{wasm_name_t}, Cstring), out, s)
end
mutable struct wasm_config_t end
function wasm_config_delete(arg1)
ccall((:wasm_config_delete, libwasmtime), Cvoid, (Ptr{wasm_config_t},), arg1)
end
function wasm_config_new()
ccall((:wasm_config_new, libwasmtime), Ptr{wasm_config_t}, ())
end
mutable struct wasm_engine_t end
function wasm_engine_delete(arg1)
ccall((:wasm_engine_delete, libwasmtime), Cvoid, (Ptr{wasm_engine_t},), arg1)
end
function wasm_engine_new()
ccall((:wasm_engine_new, libwasmtime), Ptr{wasm_engine_t}, ())
end
function wasm_engine_new_with_config(arg1)
ccall((:wasm_engine_new_with_config, libwasmtime), Ptr{wasm_engine_t}, (Ptr{wasm_config_t},), arg1)
end
function wasm_store_delete(arg1)
ccall((:wasm_store_delete, libwasmtime), Cvoid, (Ptr{wasm_store_t},), arg1)
end
function wasm_store_new(arg1)
ccall((:wasm_store_new, libwasmtime), Ptr{wasm_store_t}, (Ptr{wasm_engine_t},), arg1)
end
const wasm_mutability_t = UInt8
@cenum wasm_mutability_enum::UInt32 begin
WASM_CONST = 0
WASM_VAR = 1
end
mutable struct wasm_limits_t
min::UInt32
max::UInt32
end
mutable struct wasm_valtype_t end
function wasm_valtype_delete(arg1)
ccall((:wasm_valtype_delete, libwasmtime), Cvoid, (Ptr{wasm_valtype_t},), arg1)
end
mutable struct wasm_valtype_vec_t
size::Csize_t
data::Ptr{Ptr{wasm_valtype_t}}
end
function wasm_valtype_vec_new_empty(out)
ccall((:wasm_valtype_vec_new_empty, libwasmtime), Cvoid, (Ptr{wasm_valtype_vec_t},), out)
end
function wasm_valtype_vec_new_uninitialized(out, arg2)
ccall((:wasm_valtype_vec_new_uninitialized, libwasmtime), Cvoid, (Ptr{wasm_valtype_vec_t}, Csize_t), out, arg2)
end
function wasm_valtype_vec_new(out, arg2, arg3)
ccall((:wasm_valtype_vec_new, libwasmtime), Cvoid, (Ptr{wasm_valtype_vec_t}, Csize_t, Ptr{Ptr{wasm_valtype_t}}), out, arg2, arg3)
end
function wasm_valtype_vec_copy(out, arg2)
ccall((:wasm_valtype_vec_copy, libwasmtime), Cvoid, (Ptr{wasm_valtype_vec_t}, Ptr{wasm_valtype_vec_t}), out, arg2)
end
function wasm_valtype_vec_delete(arg1)
ccall((:wasm_valtype_vec_delete, libwasmtime), Cvoid, (Ptr{wasm_valtype_vec_t},), arg1)
end
function wasm_valtype_copy(arg1)
ccall((:wasm_valtype_copy, libwasmtime), Ptr{wasm_valtype_t}, (Ptr{wasm_valtype_t},), arg1)
end
const wasm_valkind_t = UInt8
@cenum wasm_valkind_enum::UInt32 begin
WASM_I32 = 0
WASM_I64 = 1
WASM_F32 = 2
WASM_F64 = 3
WASM_ANYREF = 128
WASM_FUNCREF = 129
end
function wasm_valtype_new(arg1)
ccall((:wasm_valtype_new, libwasmtime), Ptr{wasm_valtype_t}, (wasm_valkind_t,), arg1)
end
function wasm_valtype_kind(arg1)
ccall((:wasm_valtype_kind, libwasmtime), wasm_valkind_t, (Ptr{wasm_valtype_t},), arg1)
end
function wasm_valkind_is_num(k)
ccall((:wasm_valkind_is_num, libwasmtime), Bool, (wasm_valkind_t,), k)
end
function wasm_valkind_is_ref(k)
ccall((:wasm_valkind_is_ref, libwasmtime), Bool, (wasm_valkind_t,), k)
end
function wasm_valtype_is_num(t)
ccall((:wasm_valtype_is_num, libwasmtime), Bool, (Ptr{wasm_valtype_t},), t)
end
function wasm_valtype_is_ref(t)
ccall((:wasm_valtype_is_ref, libwasmtime), Bool, (Ptr{wasm_valtype_t},), t)
end
mutable struct wasm_functype_t end
function wasm_functype_delete(arg1)
ccall((:wasm_functype_delete, libwasmtime), Cvoid, (Ptr{wasm_functype_t},), arg1)
end
mutable struct wasm_functype_vec_t
size::Csize_t
data::Ptr{Ptr{wasm_functype_t}}
end
function wasm_functype_vec_new_empty(out)
ccall((:wasm_functype_vec_new_empty, libwasmtime), Cvoid, (Ptr{wasm_functype_vec_t},), out)
end
function wasm_functype_vec_new_uninitialized(out, arg2)
ccall((:wasm_functype_vec_new_uninitialized, libwasmtime), Cvoid, (Ptr{wasm_functype_vec_t}, Csize_t), out, arg2)
end
function wasm_functype_vec_new(out, arg2, arg3)
ccall((:wasm_functype_vec_new, libwasmtime), Cvoid, (Ptr{wasm_functype_vec_t}, Csize_t, Ptr{Ptr{wasm_functype_t}}), out, arg2, arg3)
end
function wasm_functype_vec_copy(out, arg2)
ccall((:wasm_functype_vec_copy, libwasmtime), Cvoid, (Ptr{wasm_functype_vec_t}, Ptr{wasm_functype_vec_t}), out, arg2)
end
function wasm_functype_vec_delete(arg1)
ccall((:wasm_functype_vec_delete, libwasmtime), Cvoid, (Ptr{wasm_functype_vec_t},), arg1)
end
function wasm_functype_copy(arg1)
ccall((:wasm_functype_copy, libwasmtime), Ptr{wasm_functype_t}, (Ptr{wasm_functype_t},), arg1)
end
function wasm_functype_new(params, results)
ccall((:wasm_functype_new, libwasmtime), Ptr{wasm_functype_t}, (Ptr{wasm_valtype_vec_t}, Ptr{wasm_valtype_vec_t}), params, results)
end
function wasm_functype_params(arg1)
ccall((:wasm_functype_params, libwasmtime), Ptr{wasm_valtype_vec_t}, (Ptr{wasm_functype_t},), arg1)
end
function wasm_functype_results(arg1)
ccall((:wasm_functype_results, libwasmtime), Ptr{wasm_valtype_vec_t}, (Ptr{wasm_functype_t},), arg1)
end
mutable struct wasm_globaltype_t end
function wasm_globaltype_delete(arg1)
ccall((:wasm_globaltype_delete, libwasmtime), Cvoid, (Ptr{wasm_globaltype_t},), arg1)
end
mutable struct wasm_globaltype_vec_t
size::Csize_t
data::Ptr{Ptr{wasm_globaltype_t}}
end
function wasm_globaltype_vec_new_empty(out)
ccall((:wasm_globaltype_vec_new_empty, libwasmtime), Cvoid, (Ptr{wasm_globaltype_vec_t},), out)
end
function wasm_globaltype_vec_new_uninitialized(out, arg2)
ccall((:wasm_globaltype_vec_new_uninitialized, libwasmtime), Cvoid, (Ptr{wasm_globaltype_vec_t}, Csize_t), out, arg2)
end
function wasm_globaltype_vec_new(out, arg2, arg3)
ccall((:wasm_globaltype_vec_new, libwasmtime), Cvoid, (Ptr{wasm_globaltype_vec_t}, Csize_t, Ptr{Ptr{wasm_globaltype_t}}), out, arg2, arg3)
end
function wasm_globaltype_vec_copy(out, arg2)
ccall((:wasm_globaltype_vec_copy, libwasmtime), Cvoid, (Ptr{wasm_globaltype_vec_t}, Ptr{wasm_globaltype_vec_t}), out, arg2)
end
function wasm_globaltype_vec_delete(arg1)
ccall((:wasm_globaltype_vec_delete, libwasmtime), Cvoid, (Ptr{wasm_globaltype_vec_t},), arg1)
end
function wasm_globaltype_copy(arg1)
ccall((:wasm_globaltype_copy, libwasmtime), Ptr{wasm_globaltype_t}, (Ptr{wasm_globaltype_t},), arg1)
end
function wasm_globaltype_new(arg1, arg2)
ccall((:wasm_globaltype_new, libwasmtime), Ptr{wasm_globaltype_t}, (Ptr{wasm_valtype_t}, wasm_mutability_t), arg1, arg2)
end
function wasm_globaltype_content(arg1)
ccall((:wasm_globaltype_content, libwasmtime), Ptr{wasm_valtype_t}, (Ptr{wasm_globaltype_t},), arg1)
end
function wasm_globaltype_mutability(arg1)
ccall((:wasm_globaltype_mutability, libwasmtime), wasm_mutability_t, (Ptr{wasm_globaltype_t},), arg1)
end
mutable struct wasm_tabletype_t end
function wasm_tabletype_delete(arg1)
ccall((:wasm_tabletype_delete, libwasmtime), Cvoid, (Ptr{wasm_tabletype_t},), arg1)
end
mutable struct wasm_tabletype_vec_t
size::Csize_t
data::Ptr{Ptr{wasm_tabletype_t}}
end
function wasm_tabletype_vec_new_empty(out)
ccall((:wasm_tabletype_vec_new_empty, libwasmtime), Cvoid, (Ptr{wasm_tabletype_vec_t},), out)
end
function wasm_tabletype_vec_new_uninitialized(out, arg2)
ccall((:wasm_tabletype_vec_new_uninitialized, libwasmtime), Cvoid, (Ptr{wasm_tabletype_vec_t}, Csize_t), out, arg2)
end
function wasm_tabletype_vec_new(out, arg2, arg3)
ccall((:wasm_tabletype_vec_new, libwasmtime), Cvoid, (Ptr{wasm_tabletype_vec_t}, Csize_t, Ptr{Ptr{wasm_tabletype_t}}), out, arg2, arg3)
end
function wasm_tabletype_vec_copy(out, arg2)
ccall((:wasm_tabletype_vec_copy, libwasmtime), Cvoid, (Ptr{wasm_tabletype_vec_t}, Ptr{wasm_tabletype_vec_t}), out, arg2)
end
function wasm_tabletype_vec_delete(arg1)
ccall((:wasm_tabletype_vec_delete, libwasmtime), Cvoid, (Ptr{wasm_tabletype_vec_t},), arg1)
end
function wasm_tabletype_copy(arg1)
ccall((:wasm_tabletype_copy, libwasmtime), Ptr{wasm_tabletype_t}, (Ptr{wasm_tabletype_t},), arg1)
end
function wasm_tabletype_new(arg1, arg2)
ccall((:wasm_tabletype_new, libwasmtime), Ptr{wasm_tabletype_t}, (Ptr{wasm_valtype_t}, Ptr{wasm_limits_t}), arg1, arg2)
end
function wasm_tabletype_element(arg1)
ccall((:wasm_tabletype_element, libwasmtime), Ptr{wasm_valtype_t}, (Ptr{wasm_tabletype_t},), arg1)
end
function wasm_tabletype_limits(arg1)
ccall((:wasm_tabletype_limits, libwasmtime), Ptr{wasm_limits_t}, (Ptr{wasm_tabletype_t},), arg1)
end
mutable struct wasm_memorytype_t end
function wasm_memorytype_delete(arg1)
ccall((:wasm_memorytype_delete, libwasmtime), Cvoid, (Ptr{wasm_memorytype_t},), arg1)
end
mutable struct wasm_memorytype_vec_t
size::Csize_t
data::Ptr{Ptr{wasm_memorytype_t}}
end
function wasm_memorytype_vec_new_empty(out)
ccall((:wasm_memorytype_vec_new_empty, libwasmtime), Cvoid, (Ptr{wasm_memorytype_vec_t},), out)
end
function wasm_memorytype_vec_new_uninitialized(out, arg2)
ccall((:wasm_memorytype_vec_new_uninitialized, libwasmtime), Cvoid, (Ptr{wasm_memorytype_vec_t}, Csize_t), out, arg2)
end
function wasm_memorytype_vec_new(out, arg2, arg3)
ccall((:wasm_memorytype_vec_new, libwasmtime), Cvoid, (Ptr{wasm_memorytype_vec_t}, Csize_t, Ptr{Ptr{wasm_memorytype_t}}), out, arg2, arg3)
end
function wasm_memorytype_vec_copy(out, arg2)
ccall((:wasm_memorytype_vec_copy, libwasmtime), Cvoid, (Ptr{wasm_memorytype_vec_t}, Ptr{wasm_memorytype_vec_t}), out, arg2)
end
function wasm_memorytype_vec_delete(arg1)
ccall((:wasm_memorytype_vec_delete, libwasmtime), Cvoid, (Ptr{wasm_memorytype_vec_t},), arg1)
end
function wasm_memorytype_copy(arg1)
ccall((:wasm_memorytype_copy, libwasmtime), Ptr{wasm_memorytype_t}, (Ptr{wasm_memorytype_t},), arg1)
end
function wasm_memorytype_new(arg1)
ccall((:wasm_memorytype_new, libwasmtime), Ptr{wasm_memorytype_t}, (Ptr{wasm_limits_t},), arg1)
end
function wasm_memorytype_limits(arg1)
ccall((:wasm_memorytype_limits, libwasmtime), Ptr{wasm_limits_t}, (Ptr{wasm_memorytype_t},), arg1)
end
mutable struct wasm_externtype_t end
function wasm_externtype_delete(arg1)
ccall((:wasm_externtype_delete, libwasmtime), Cvoid, (Ptr{wasm_externtype_t},), arg1)
end
mutable struct wasm_externtype_vec_t
size::Csize_t
data::Ptr{Ptr{wasm_externtype_t}}
end
function wasm_externtype_vec_new_empty(out)
ccall((:wasm_externtype_vec_new_empty, libwasmtime), Cvoid, (Ptr{wasm_externtype_vec_t},), out)
end
function wasm_externtype_vec_new_uninitialized(out, arg2)
ccall((:wasm_externtype_vec_new_uninitialized, libwasmtime), Cvoid, (Ptr{wasm_externtype_vec_t}, Csize_t), out, arg2)
end
function wasm_externtype_vec_new(out, arg2, arg3)
ccall((:wasm_externtype_vec_new, libwasmtime), Cvoid, (Ptr{wasm_externtype_vec_t}, Csize_t, Ptr{Ptr{wasm_externtype_t}}), out, arg2, arg3)
end
function wasm_externtype_vec_copy(out, arg2)
ccall((:wasm_externtype_vec_copy, libwasmtime), Cvoid, (Ptr{wasm_externtype_vec_t}, Ptr{wasm_externtype_vec_t}), out, arg2)
end
function wasm_externtype_vec_delete(arg1)
ccall((:wasm_externtype_vec_delete, libwasmtime), Cvoid, (Ptr{wasm_externtype_vec_t},), arg1)
end
function wasm_externtype_copy(arg1)
ccall((:wasm_externtype_copy, libwasmtime), Ptr{wasm_externtype_t}, (Ptr{wasm_externtype_t},), arg1)
end
const wasm_externkind_t = UInt8
@cenum wasm_externkind_enum::UInt32 begin
WASM_EXTERN_FUNC = 0
WASM_EXTERN_GLOBAL = 1
WASM_EXTERN_TABLE = 2
WASM_EXTERN_MEMORY = 3
end
function wasm_externtype_kind(arg1)
ccall((:wasm_externtype_kind, libwasmtime), wasm_externkind_t, (Ptr{wasm_externtype_t},), arg1)
end
function wasm_functype_as_externtype(arg1)
ccall((:wasm_functype_as_externtype, libwasmtime), Ptr{wasm_externtype_t}, (Ptr{wasm_functype_t},), arg1)
end
function wasm_globaltype_as_externtype(arg1)
ccall((:wasm_globaltype_as_externtype, libwasmtime), Ptr{wasm_externtype_t}, (Ptr{wasm_globaltype_t},), arg1)
end
function wasm_tabletype_as_externtype(arg1)
ccall((:wasm_tabletype_as_externtype, libwasmtime), Ptr{wasm_externtype_t}, (Ptr{wasm_tabletype_t},), arg1)
end
function wasm_memorytype_as_externtype(arg1)
ccall((:wasm_memorytype_as_externtype, libwasmtime), Ptr{wasm_externtype_t}, (Ptr{wasm_memorytype_t},), arg1)
end
function wasm_externtype_as_functype(arg1)
ccall((:wasm_externtype_as_functype, libwasmtime), Ptr{wasm_functype_t}, (Ptr{wasm_externtype_t},), arg1)
end
function wasm_externtype_as_globaltype(arg1)
ccall((:wasm_externtype_as_globaltype, libwasmtime), Ptr{wasm_globaltype_t}, (Ptr{wasm_externtype_t},), arg1)
end
function wasm_externtype_as_tabletype(arg1)
ccall((:wasm_externtype_as_tabletype, libwasmtime), Ptr{wasm_tabletype_t}, (Ptr{wasm_externtype_t},), arg1)
end
function wasm_externtype_as_memorytype(arg1)
ccall((:wasm_externtype_as_memorytype, libwasmtime), Ptr{wasm_memorytype_t}, (Ptr{wasm_externtype_t},), arg1)
end
function wasm_functype_as_externtype_const(arg1)
ccall((:wasm_functype_as_externtype_const, libwasmtime), Ptr{wasm_externtype_t}, (Ptr{wasm_functype_t},), arg1)
end
function wasm_globaltype_as_externtype_const(arg1)
ccall((:wasm_globaltype_as_externtype_const, libwasmtime), Ptr{wasm_externtype_t}, (Ptr{wasm_globaltype_t},), arg1)
end
function wasm_tabletype_as_externtype_const(arg1)
ccall((:wasm_tabletype_as_externtype_const, libwasmtime), Ptr{wasm_externtype_t}, (Ptr{wasm_tabletype_t},), arg1)
end
function wasm_memorytype_as_externtype_const(arg1)
ccall((:wasm_memorytype_as_externtype_const, libwasmtime), Ptr{wasm_externtype_t}, (Ptr{wasm_memorytype_t},), arg1)
end
function wasm_externtype_as_functype_const(arg1)
ccall((:wasm_externtype_as_functype_const, libwasmtime), Ptr{wasm_functype_t}, (Ptr{wasm_externtype_t},), arg1)
end
function wasm_externtype_as_globaltype_const(arg1)
ccall((:wasm_externtype_as_globaltype_const, libwasmtime), Ptr{wasm_globaltype_t}, (Ptr{wasm_externtype_t},), arg1)
end
function wasm_externtype_as_tabletype_const(arg1)
ccall((:wasm_externtype_as_tabletype_const, libwasmtime), Ptr{wasm_tabletype_t}, (Ptr{wasm_externtype_t},), arg1)
end
function wasm_externtype_as_memorytype_const(arg1)
ccall((:wasm_externtype_as_memorytype_const, libwasmtime), Ptr{wasm_memorytype_t}, (Ptr{wasm_externtype_t},), arg1)
end
mutable struct wasm_importtype_t end
function wasm_importtype_delete(arg1)
ccall((:wasm_importtype_delete, libwasmtime), Cvoid, (Ptr{wasm_importtype_t},), arg1)
end
mutable struct wasm_importtype_vec_t
size::Csize_t
data::Ptr{Ptr{wasm_importtype_t}}
end
function wasm_importtype_vec_new_empty(out)
ccall((:wasm_importtype_vec_new_empty, libwasmtime), Cvoid, (Ptr{wasm_importtype_vec_t},), out)
end
function wasm_importtype_vec_new_uninitialized(out, arg2)
ccall((:wasm_importtype_vec_new_uninitialized, libwasmtime), Cvoid, (Ptr{wasm_importtype_vec_t}, Csize_t), out, arg2)
end
function wasm_importtype_vec_new(out, arg2, arg3)
ccall((:wasm_importtype_vec_new, libwasmtime), Cvoid, (Ptr{wasm_importtype_vec_t}, Csize_t, Ptr{Ptr{wasm_importtype_t}}), out, arg2, arg3)
end
function wasm_importtype_vec_copy(out, arg2)
ccall((:wasm_importtype_vec_copy, libwasmtime), Cvoid, (Ptr{wasm_importtype_vec_t}, Ptr{wasm_importtype_vec_t}), out, arg2)
end
function wasm_importtype_vec_delete(arg1)
ccall((:wasm_importtype_vec_delete, libwasmtime), Cvoid, (Ptr{wasm_importtype_vec_t},), arg1)
end
function wasm_importtype_copy(arg1)
ccall((:wasm_importtype_copy, libwasmtime), Ptr{wasm_importtype_t}, (Ptr{wasm_importtype_t},), arg1)
end
function wasm_importtype_new(_module, name, arg3)
ccall((:wasm_importtype_new, libwasmtime), Ptr{wasm_importtype_t}, (Ptr{wasm_name_t}, Ptr{wasm_name_t}, Ptr{wasm_externtype_t}), _module, name, arg3)
end
function wasm_importtype_module(arg1)
ccall((:wasm_importtype_module, libwasmtime), Ptr{wasm_name_t}, (Ptr{wasm_importtype_t},), arg1)
end
function wasm_importtype_name(arg1)
ccall((:wasm_importtype_name, libwasmtime), Ptr{wasm_name_t}, (Ptr{wasm_importtype_t},), arg1)
end
function wasm_importtype_type(arg1)
ccall((:wasm_importtype_type, libwasmtime), Ptr{wasm_externtype_t}, (Ptr{wasm_importtype_t},), arg1)
end
mutable struct wasm_exporttype_t end
function wasm_exporttype_delete(arg1)
ccall((:wasm_exporttype_delete, libwasmtime), Cvoid, (Ptr{wasm_exporttype_t},), arg1)
end
mutable struct wasm_exporttype_vec_t
size::Csize_t
data::Ptr{Ptr{wasm_exporttype_t}}
end
function wasm_exporttype_vec_new_empty(out)
ccall((:wasm_exporttype_vec_new_empty, libwasmtime), Cvoid, (Ptr{wasm_exporttype_vec_t},), out)
end
function wasm_exporttype_vec_new_uninitialized(out, arg2)
ccall((:wasm_exporttype_vec_new_uninitialized, libwasmtime), Cvoid, (Ptr{wasm_exporttype_vec_t}, Csize_t), out, arg2)
end
function wasm_exporttype_vec_new(out, arg2, arg3)
ccall((:wasm_exporttype_vec_new, libwasmtime), Cvoid, (Ptr{wasm_exporttype_vec_t}, Csize_t, Ptr{Ptr{wasm_exporttype_t}}), out, arg2, arg3)
end
function wasm_exporttype_vec_copy(out, arg2)
ccall((:wasm_exporttype_vec_copy, libwasmtime), Cvoid, (Ptr{wasm_exporttype_vec_t}, Ptr{wasm_exporttype_vec_t}), out, arg2)
end
function wasm_exporttype_vec_delete(arg1)
ccall((:wasm_exporttype_vec_delete, libwasmtime), Cvoid, (Ptr{wasm_exporttype_vec_t},), arg1)
end
function wasm_exporttype_copy(arg1)
ccall((:wasm_exporttype_copy, libwasmtime), Ptr{wasm_exporttype_t}, (Ptr{wasm_exporttype_t},), arg1)
end
function wasm_exporttype_new(arg1, arg2)
ccall((:wasm_exporttype_new, libwasmtime), Ptr{wasm_exporttype_t}, (Ptr{wasm_name_t}, Ptr{wasm_externtype_t}), arg1, arg2)
end
function wasm_exporttype_name(arg1)
ccall((:wasm_exporttype_name, libwasmtime), Ptr{wasm_name_t}, (Ptr{wasm_exporttype_t},), arg1)
end
function wasm_exporttype_type(arg1)
ccall((:wasm_exporttype_type, libwasmtime), Ptr{wasm_externtype_t}, (Ptr{wasm_exporttype_t},), arg1)
end
struct __JL_Ctag_4
data::NTuple{8, UInt8}
end
function Base.getproperty(x::Ptr{__JL_Ctag_4}, f::Symbol)
f === :i32 && return Ptr{Int32}(x + 0)
f === :i64 && return Ptr{Int64}(x + 0)
f === :f32 && return Ptr{float32_t}(x + 0)
f === :f64 && return Ptr{float64_t}(x + 0)
f === :ref && return Ptr{Ptr{wasm_ref_t}}(x + 0)
return getfield(x, f)
end
function Base.getproperty(x::__JL_Ctag_4, f::Symbol)
r = Ref{__JL_Ctag_4}(x)
ptr = Base.unsafe_convert(Ptr{__JL_Ctag_4}, r)
fptr = getproperty(ptr, f)
GC.@preserve r unsafe_load(fptr)
end
function Base.setproperty!(x::Ptr{__JL_Ctag_4}, f::Symbol, v)
unsafe_store!(getproperty(x, f), v)
end
struct wasm_val_t
data::NTuple{16, UInt8}
end
function Base.getproperty(x::Ptr{wasm_val_t}, f::Symbol)
f === :kind && return Ptr{wasm_valkind_t}(x + 0)
f === :of && return Ptr{__JL_Ctag_4}(x + 8)
return getfield(x, f)
end
function Base.getproperty(x::wasm_val_t, f::Symbol)
r = Ref{wasm_val_t}(x)
ptr = Base.unsafe_convert(Ptr{wasm_val_t}, r)
fptr = getproperty(ptr, f)
GC.@preserve r unsafe_load(fptr)
end
function Base.setproperty!(x::Ptr{wasm_val_t}, f::Symbol, v)
unsafe_store!(getproperty(x, f), v)
end
function wasm_val_delete(v)
ccall((:wasm_val_delete, libwasmtime), Cvoid, (Ptr{wasm_val_t},), v)
end
function wasm_val_copy(out, arg2)
ccall((:wasm_val_copy, libwasmtime), Cvoid, (Ptr{wasm_val_t}, Ptr{wasm_val_t}), out, arg2)
end
mutable struct wasm_val_vec_t
size::Csize_t
data::Ptr{wasm_val_t}
end
function wasm_val_vec_new_empty(out)
ccall((:wasm_val_vec_new_empty, libwasmtime), Cvoid, (Ptr{wasm_val_vec_t},), out)
end
function wasm_val_vec_new_uninitialized(out, arg2)
ccall((:wasm_val_vec_new_uninitialized, libwasmtime), Cvoid, (Ptr{wasm_val_vec_t}, Csize_t), out, arg2)
end
function wasm_val_vec_new(out, arg2, arg3)
ccall((:wasm_val_vec_new, libwasmtime), Cvoid, (Ptr{wasm_val_vec_t}, Csize_t, Ptr{wasm_val_t}), out, arg2, arg3)
end
function wasm_val_vec_copy(out, arg2)
ccall((:wasm_val_vec_copy, libwasmtime), Cvoid, (Ptr{wasm_val_vec_t}, Ptr{wasm_val_vec_t}), out, arg2)
end
function wasm_val_vec_delete(arg1)
ccall((:wasm_val_vec_delete, libwasmtime), Cvoid, (Ptr{wasm_val_vec_t},), arg1)
end
function wasm_ref_delete(arg1)
ccall((:wasm_ref_delete, libwasmtime), Cvoid, (Ptr{wasm_ref_t},), arg1)
end
function wasm_ref_copy(arg1)
ccall((:wasm_ref_copy, libwasmtime), Ptr{wasm_ref_t}, (Ptr{wasm_ref_t},), arg1)
end
function wasm_ref_same(arg1, arg2)
ccall((:wasm_ref_same, libwasmtime), Bool, (Ptr{wasm_ref_t}, Ptr{wasm_ref_t}), arg1, arg2)
end
function wasm_ref_get_host_info(arg1)
ccall((:wasm_ref_get_host_info, libwasmtime), Ptr{Cvoid}, (Ptr{wasm_ref_t},), arg1)
end
function wasm_ref_set_host_info(arg1, arg2)
ccall((:wasm_ref_set_host_info, libwasmtime), Cvoid, (Ptr{wasm_ref_t}, Ptr{Cvoid}), arg1, arg2)
end
function wasm_ref_set_host_info_with_finalizer(arg1, arg2, arg3)
ccall((:wasm_ref_set_host_info_with_finalizer, libwasmtime), Cvoid, (Ptr{wasm_ref_t}, Ptr{Cvoid}, Ptr{Cvoid}), arg1, arg2, arg3)
end
mutable struct wasm_frame_t end
function wasm_frame_delete(arg1)
ccall((:wasm_frame_delete, libwasmtime), Cvoid, (Ptr{wasm_frame_t},), arg1)
end
mutable struct wasm_frame_vec_t
size::Csize_t
data::Ptr{Ptr{wasm_frame_t}}
end
function wasm_frame_vec_new_empty(out)
ccall((:wasm_frame_vec_new_empty, libwasmtime), Cvoid, (Ptr{wasm_frame_vec_t},), out)
end
function wasm_frame_vec_new_uninitialized(out, arg2)
ccall((:wasm_frame_vec_new_uninitialized, libwasmtime), Cvoid, (Ptr{wasm_frame_vec_t}, Csize_t), out, arg2)
end
function wasm_frame_vec_new(out, arg2, arg3)
ccall((:wasm_frame_vec_new, libwasmtime), Cvoid, (Ptr{wasm_frame_vec_t}, Csize_t, Ptr{Ptr{wasm_frame_t}}), out, arg2, arg3)
end
function wasm_frame_vec_copy(out, arg2)
ccall((:wasm_frame_vec_copy, libwasmtime), Cvoid, (Ptr{wasm_frame_vec_t}, Ptr{wasm_frame_vec_t}), out, arg2)
end
function wasm_frame_vec_delete(arg1)
ccall((:wasm_frame_vec_delete, libwasmtime), Cvoid, (Ptr{wasm_frame_vec_t},), arg1)
end
function wasm_frame_copy(arg1)
ccall((:wasm_frame_copy, libwasmtime), Ptr{wasm_frame_t}, (Ptr{wasm_frame_t},), arg1)
end
mutable struct wasm_instance_t end
function wasm_frame_instance(arg1)
ccall((:wasm_frame_instance, libwasmtime), Ptr{wasm_instance_t}, (Ptr{wasm_frame_t},), arg1)
end
function wasm_frame_func_index(arg1)
ccall((:wasm_frame_func_index, libwasmtime), UInt32, (Ptr{wasm_frame_t},), arg1)
end
function wasm_frame_func_offset(arg1)
ccall((:wasm_frame_func_offset, libwasmtime), Csize_t, (Ptr{wasm_frame_t},), arg1)
end
function wasm_frame_module_offset(arg1)
ccall((:wasm_frame_module_offset, libwasmtime), Csize_t, (Ptr{wasm_frame_t},), arg1)
end
const wasm_message_t = wasm_name_t
mutable struct wasm_trap_t end
function wasm_trap_delete(arg1)
ccall((:wasm_trap_delete, libwasmtime), Cvoid, (Ptr{wasm_trap_t},), arg1)
end
function wasm_trap_copy(arg1)
ccall((:wasm_trap_copy, libwasmtime), Ptr{wasm_trap_t}, (Ptr{wasm_trap_t},), arg1)
end
function wasm_trap_same(arg1, arg2)
ccall((:wasm_trap_same, libwasmtime), Bool, (Ptr{wasm_trap_t}, Ptr{wasm_trap_t}), arg1, arg2)
end
function wasm_trap_get_host_info(arg1)
ccall((:wasm_trap_get_host_info, libwasmtime), Ptr{Cvoid}, (Ptr{wasm_trap_t},), arg1)
end
function wasm_trap_set_host_info(arg1, arg2)
ccall((:wasm_trap_set_host_info, libwasmtime), Cvoid, (Ptr{wasm_trap_t}, Ptr{Cvoid}), arg1, arg2)
end
function wasm_trap_set_host_info_with_finalizer(arg1, arg2, arg3)
ccall((:wasm_trap_set_host_info_with_finalizer, libwasmtime), Cvoid, (Ptr{wasm_trap_t}, Ptr{Cvoid}, Ptr{Cvoid}), arg1, arg2, arg3)
end
function wasm_trap_as_ref(arg1)
ccall((:wasm_trap_as_ref, libwasmtime), Ptr{wasm_ref_t}, (Ptr{wasm_trap_t},), arg1)
end
function wasm_ref_as_trap(arg1)
ccall((:wasm_ref_as_trap, libwasmtime), Ptr{wasm_trap_t}, (Ptr{wasm_ref_t},), arg1)
end
function wasm_trap_as_ref_const(arg1)
ccall((:wasm_trap_as_ref_const, libwasmtime), Ptr{wasm_ref_t}, (Ptr{wasm_trap_t},), arg1)
end
function wasm_ref_as_trap_const(arg1)
ccall((:wasm_ref_as_trap_const, libwasmtime), Ptr{wasm_trap_t}, (Ptr{wasm_ref_t},), arg1)
end
function wasm_trap_new(store, arg2)
ccall((:wasm_trap_new, libwasmtime), Ptr{wasm_trap_t}, (Ptr{wasm_store_t}, Ptr{wasm_message_t}), store, arg2)
end
function wasm_trap_message(arg1, out)
ccall((:wasm_trap_message, libwasmtime), Cvoid, (Ptr{wasm_trap_t}, Ptr{wasm_message_t}), arg1, out)
end
function wasm_trap_origin(arg1)
ccall((:wasm_trap_origin, libwasmtime), Ptr{wasm_frame_t}, (Ptr{wasm_trap_t},), arg1)
end
function wasm_trap_trace(arg1, out)
ccall((:wasm_trap_trace, libwasmtime), Cvoid, (Ptr{wasm_trap_t}, Ptr{wasm_frame_vec_t}), arg1, out)
end
mutable struct wasm_foreign_t end
function wasm_foreign_delete(arg1)
ccall((:wasm_foreign_delete, libwasmtime), Cvoid, (Ptr{wasm_foreign_t},), arg1)
end
function wasm_foreign_copy(arg1)
ccall((:wasm_foreign_copy, libwasmtime), Ptr{wasm_foreign_t}, (Ptr{wasm_foreign_t},), arg1)
end
function wasm_foreign_same(arg1, arg2)
ccall((:wasm_foreign_same, libwasmtime), Bool, (Ptr{wasm_foreign_t}, Ptr{wasm_foreign_t}), arg1, arg2)
end
function wasm_foreign_get_host_info(arg1)
ccall((:wasm_foreign_get_host_info, libwasmtime), Ptr{Cvoid}, (Ptr{wasm_foreign_t},), arg1)
end
function wasm_foreign_set_host_info(arg1, arg2)
ccall((:wasm_foreign_set_host_info, libwasmtime), Cvoid, (Ptr{wasm_foreign_t}, Ptr{Cvoid}), arg1, arg2)
end
function wasm_foreign_set_host_info_with_finalizer(arg1, arg2, arg3)
ccall((:wasm_foreign_set_host_info_with_finalizer, libwasmtime), Cvoid, (Ptr{wasm_foreign_t}, Ptr{Cvoid}, Ptr{Cvoid}), arg1, arg2, arg3)
end
function wasm_foreign_as_ref(arg1)
ccall((:wasm_foreign_as_ref, libwasmtime), Ptr{wasm_ref_t}, (Ptr{wasm_foreign_t},), arg1)
end
function wasm_ref_as_foreign(arg1)
ccall((:wasm_ref_as_foreign, libwasmtime), Ptr{wasm_foreign_t}, (Ptr{wasm_ref_t},), arg1)
end
function wasm_foreign_as_ref_const(arg1)
ccall((:wasm_foreign_as_ref_const, libwasmtime), Ptr{wasm_ref_t}, (Ptr{wasm_foreign_t},), arg1)
end
function wasm_ref_as_foreign_const(arg1)
ccall((:wasm_ref_as_foreign_const, libwasmtime), Ptr{wasm_foreign_t}, (Ptr{wasm_ref_t},), arg1)
end
function wasm_foreign_new(arg1)
ccall((:wasm_foreign_new, libwasmtime), Ptr{wasm_foreign_t}, (Ptr{wasm_store_t},), arg1)
end
mutable struct wasm_module_t end
function wasm_module_delete(arg1)
ccall((:wasm_module_delete, libwasmtime), Cvoid, (Ptr{wasm_module_t},), arg1)
end
function wasm_module_copy(arg1)
ccall((:wasm_module_copy, libwasmtime), Ptr{wasm_module_t}, (Ptr{wasm_module_t},), arg1)
end
function wasm_module_same(arg1, arg2)
ccall((:wasm_module_same, libwasmtime), Bool, (Ptr{wasm_module_t}, Ptr{wasm_module_t}), arg1, arg2)
end
function wasm_module_get_host_info(arg1)
ccall((:wasm_module_get_host_info, libwasmtime), Ptr{Cvoid}, (Ptr{wasm_module_t},), arg1)
end
function wasm_module_set_host_info(arg1, arg2)
ccall((:wasm_module_set_host_info, libwasmtime), Cvoid, (Ptr{wasm_module_t}, Ptr{Cvoid}), arg1, arg2)
end
function wasm_module_set_host_info_with_finalizer(arg1, arg2, arg3)
ccall((:wasm_module_set_host_info_with_finalizer, libwasmtime), Cvoid, (Ptr{wasm_module_t}, Ptr{Cvoid}, Ptr{Cvoid}), arg1, arg2, arg3)
end
function wasm_module_as_ref(arg1)
ccall((:wasm_module_as_ref, libwasmtime), Ptr{wasm_ref_t}, (Ptr{wasm_module_t},), arg1)
end
function wasm_ref_as_module(arg1)
ccall((:wasm_ref_as_module, libwasmtime), Ptr{wasm_module_t}, (Ptr{wasm_ref_t},), arg1)
end
function wasm_module_as_ref_const(arg1)
ccall((:wasm_module_as_ref_const, libwasmtime), Ptr{wasm_ref_t}, (Ptr{wasm_module_t},), arg1)
end
function wasm_ref_as_module_const(arg1)
ccall((:wasm_ref_as_module_const, libwasmtime), Ptr{wasm_module_t}, (Ptr{wasm_ref_t},), arg1)
end
mutable struct wasm_shared_module_t end
function wasm_shared_module_delete(arg1)
ccall((:wasm_shared_module_delete, libwasmtime), Cvoid, (Ptr{wasm_shared_module_t},), arg1)
end
function wasm_module_share(arg1)
ccall((:wasm_module_share, libwasmtime), Ptr{wasm_shared_module_t}, (Ptr{wasm_module_t},), arg1)
end
function wasm_module_obtain(arg1, arg2)
ccall((:wasm_module_obtain, libwasmtime), Ptr{wasm_module_t}, (Ptr{wasm_store_t}, Ptr{wasm_shared_module_t}), arg1, arg2)
end
function wasm_module_new(arg1, binary)
ccall((:wasm_module_new, libwasmtime), Ptr{wasm_module_t}, (Ptr{wasm_store_t}, Ptr{wasm_byte_vec_t}), arg1, binary)
end
function wasm_module_validate(arg1, binary)
ccall((:wasm_module_validate, libwasmtime), Bool, (Ptr{wasm_store_t}, Ptr{wasm_byte_vec_t}), arg1, binary)
end
function wasm_module_imports(arg1, out)
ccall((:wasm_module_imports, libwasmtime), Cvoid, (Ptr{wasm_module_t}, Ptr{wasm_importtype_vec_t}), arg1, out)
end
function wasm_module_exports(arg1, out)
ccall((:wasm_module_exports, libwasmtime), Cvoid, (Ptr{wasm_module_t}, Ptr{wasm_exporttype_vec_t}), arg1, out)
end
function wasm_module_serialize(arg1, out)
ccall((:wasm_module_serialize, libwasmtime), Cvoid, (Ptr{wasm_module_t}, Ptr{wasm_byte_vec_t}), arg1, out)
end
function wasm_module_deserialize(arg1, arg2)
ccall((:wasm_module_deserialize, libwasmtime), Ptr{wasm_module_t}, (Ptr{wasm_store_t}, Ptr{wasm_byte_vec_t}), arg1, arg2)
end
mutable struct wasm_func_t end
function wasm_func_delete(arg1)
ccall((:wasm_func_delete, libwasmtime), Cvoid, (Ptr{wasm_func_t},), arg1)
end
function wasm_func_copy(arg1)
ccall((:wasm_func_copy, libwasmtime), Ptr{wasm_func_t}, (Ptr{wasm_func_t},), arg1)
end
function wasm_func_same(arg1, arg2)
ccall((:wasm_func_same, libwasmtime), Bool, (Ptr{wasm_func_t}, Ptr{wasm_func_t}), arg1, arg2)
end
function wasm_func_get_host_info(arg1)
ccall((:wasm_func_get_host_info, libwasmtime), Ptr{Cvoid}, (Ptr{wasm_func_t},), arg1)
end
function wasm_func_set_host_info(arg1, arg2)
ccall((:wasm_func_set_host_info, libwasmtime), Cvoid, (Ptr{wasm_func_t}, Ptr{Cvoid}), arg1, arg2)
end
function wasm_func_set_host_info_with_finalizer(arg1, arg2, arg3)
ccall((:wasm_func_set_host_info_with_finalizer, libwasmtime), Cvoid, (Ptr{wasm_func_t}, Ptr{Cvoid}, Ptr{Cvoid}), arg1, arg2, arg3)
end
function wasm_func_as_ref(arg1)
ccall((:wasm_func_as_ref, libwasmtime), Ptr{wasm_ref_t}, (Ptr{wasm_func_t},), arg1)
end
function wasm_ref_as_func(arg1)
ccall((:wasm_ref_as_func, libwasmtime), Ptr{wasm_func_t}, (Ptr{wasm_ref_t},), arg1)
end
function wasm_func_as_ref_const(arg1)
ccall((:wasm_func_as_ref_const, libwasmtime), Ptr{wasm_ref_t}, (Ptr{wasm_func_t},), arg1)
end
function wasm_ref_as_func_const(arg1)
ccall((:wasm_ref_as_func_const, libwasmtime), Ptr{wasm_func_t}, (Ptr{wasm_ref_t},), arg1)
end
# typedef own wasm_trap_t * ( * wasm_func_callback_t ) ( const wasm_val_vec_t * args , own wasm_val_vec_t * results )
const wasm_func_callback_t = Ptr{Cvoid}
# typedef own wasm_trap_t * ( * wasm_func_callback_with_env_t ) ( void * env , const wasm_val_vec_t * args , wasm_val_vec_t * results )
const wasm_func_callback_with_env_t = Ptr{Cvoid}
function wasm_func_new(arg1, arg2, arg3)
ccall((:wasm_func_new, libwasmtime), Ptr{wasm_func_t}, (Ptr{wasm_store_t}, Ptr{wasm_functype_t}, wasm_func_callback_t), arg1, arg2, arg3)
end
function wasm_func_new_with_env(arg1, type, arg3, env, finalizer)
ccall((:wasm_func_new_with_env, libwasmtime), Ptr{wasm_func_t}, (Ptr{wasm_store_t}, Ptr{wasm_functype_t}, wasm_func_callback_with_env_t, Ptr{Cvoid}, Ptr{Cvoid}), arg1, type, arg3, env, finalizer)
end
function wasm_func_type(arg1)
ccall((:wasm_func_type, libwasmtime), Ptr{wasm_functype_t}, (Ptr{wasm_func_t},), arg1)
end
function wasm_func_param_arity(arg1)
ccall((:wasm_func_param_arity, libwasmtime), Csize_t, (Ptr{wasm_func_t},), arg1)
end
function wasm_func_result_arity(arg1)
ccall((:wasm_func_result_arity, libwasmtime), Csize_t, (Ptr{wasm_func_t},), arg1)
end
function wasm_func_call(arg1, args, results)
ccall((:wasm_func_call, libwasmtime), Ptr{wasm_trap_t}, (Ptr{wasm_func_t}, Ptr{wasm_val_vec_t}, Ptr{wasm_val_vec_t}), arg1, args, results)
end
mutable struct wasm_global_t end
function wasm_global_delete(arg1)
ccall((:wasm_global_delete, libwasmtime), Cvoid, (Ptr{wasm_global_t},), arg1)
end
function wasm_global_copy(arg1)
ccall((:wasm_global_copy, libwasmtime), Ptr{wasm_global_t}, (Ptr{wasm_global_t},), arg1)
end
function wasm_global_same(arg1, arg2)
ccall((:wasm_global_same, libwasmtime), Bool, (Ptr{wasm_global_t}, Ptr{wasm_global_t}), arg1, arg2)
end
function wasm_global_get_host_info(arg1)
ccall((:wasm_global_get_host_info, libwasmtime), Ptr{Cvoid}, (Ptr{wasm_global_t},), arg1)
end
function wasm_global_set_host_info(arg1, arg2)
ccall((:wasm_global_set_host_info, libwasmtime), Cvoid, (Ptr{wasm_global_t}, Ptr{Cvoid}), arg1, arg2)
end
function wasm_global_set_host_info_with_finalizer(arg1, arg2, arg3)
ccall((:wasm_global_set_host_info_with_finalizer, libwasmtime), Cvoid, (Ptr{wasm_global_t}, Ptr{Cvoid}, Ptr{Cvoid}), arg1, arg2, arg3)
end
function wasm_global_as_ref(arg1)
ccall((:wasm_global_as_ref, libwasmtime), Ptr{wasm_ref_t}, (Ptr{wasm_global_t},), arg1)
end
function wasm_ref_as_global(arg1)
ccall((:wasm_ref_as_global, libwasmtime), Ptr{wasm_global_t}, (Ptr{wasm_ref_t},), arg1)
end
function wasm_global_as_ref_const(arg1)
ccall((:wasm_global_as_ref_const, libwasmtime), Ptr{wasm_ref_t}, (Ptr{wasm_global_t},), arg1)
end
function wasm_ref_as_global_const(arg1)
ccall((:wasm_ref_as_global_const, libwasmtime), Ptr{wasm_global_t}, (Ptr{wasm_ref_t},), arg1)
end
function wasm_global_new(arg1, arg2, arg3)
ccall((:wasm_global_new, libwasmtime), Ptr{wasm_global_t}, (Ptr{wasm_store_t}, Ptr{wasm_globaltype_t}, Ptr{wasm_val_t}), arg1, arg2, arg3)
end
function wasm_global_type(arg1)
ccall((:wasm_global_type, libwasmtime), Ptr{wasm_globaltype_t}, (Ptr{wasm_global_t},), arg1)
end
function wasm_global_get(arg1, out)
ccall((:wasm_global_get, libwasmtime), Cvoid, (Ptr{wasm_global_t}, Ptr{wasm_val_t}), arg1, out)
end
function wasm_global_set(arg1, arg2)
ccall((:wasm_global_set, libwasmtime), Cvoid, (Ptr{wasm_global_t}, Ptr{wasm_val_t}), arg1, arg2)
end
mutable struct wasm_table_t end
function wasm_table_delete(arg1)
ccall((:wasm_table_delete, libwasmtime), Cvoid, (Ptr{wasm_table_t},), arg1)
end
function wasm_table_copy(arg1)
ccall((:wasm_table_copy, libwasmtime), Ptr{wasm_table_t}, (Ptr{wasm_table_t},), arg1)
end
function wasm_table_same(arg1, arg2)
ccall((:wasm_table_same, libwasmtime), Bool, (Ptr{wasm_table_t}, Ptr{wasm_table_t}), arg1, arg2)
end
function wasm_table_get_host_info(arg1)
ccall((:wasm_table_get_host_info, libwasmtime), Ptr{Cvoid}, (Ptr{wasm_table_t},), arg1)
end
function wasm_table_set_host_info(arg1, arg2)
ccall((:wasm_table_set_host_info, libwasmtime), Cvoid, (Ptr{wasm_table_t}, Ptr{Cvoid}), arg1, arg2)
end
function wasm_table_set_host_info_with_finalizer(arg1, arg2, arg3)
ccall((:wasm_table_set_host_info_with_finalizer, libwasmtime), Cvoid, (Ptr{wasm_table_t}, Ptr{Cvoid}, Ptr{Cvoid}), arg1, arg2, arg3)
end
function wasm_table_as_ref(arg1)
ccall((:wasm_table_as_ref, libwasmtime), Ptr{wasm_ref_t}, (Ptr{wasm_table_t},), arg1)
end
function wasm_ref_as_table(arg1)
ccall((:wasm_ref_as_table, libwasmtime), Ptr{wasm_table_t}, (Ptr{wasm_ref_t},), arg1)
end
function wasm_table_as_ref_const(arg1)
ccall((:wasm_table_as_ref_const, libwasmtime), Ptr{wasm_ref_t}, (Ptr{wasm_table_t},), arg1)
end
function wasm_ref_as_table_const(arg1)
ccall((:wasm_ref_as_table_const, libwasmtime), Ptr{wasm_table_t}, (Ptr{wasm_ref_t},), arg1)
end
const wasm_table_size_t = UInt32
function wasm_table_new(arg1, arg2, init)
ccall((:wasm_table_new, libwasmtime), Ptr{wasm_table_t}, (Ptr{wasm_store_t}, Ptr{wasm_tabletype_t}, Ptr{wasm_ref_t}), arg1, arg2, init)
end
function wasm_table_type(arg1)
ccall((:wasm_table_type, libwasmtime), Ptr{wasm_tabletype_t}, (Ptr{wasm_table_t},), arg1)
end
function wasm_table_get(arg1, index)
ccall((:wasm_table_get, libwasmtime), Ptr{wasm_ref_t}, (Ptr{wasm_table_t}, wasm_table_size_t), arg1, index)
end
function wasm_table_set(arg1, index, arg3)
ccall((:wasm_table_set, libwasmtime), Bool, (Ptr{wasm_table_t}, wasm_table_size_t, Ptr{wasm_ref_t}), arg1, index, arg3)
end
function wasm_table_size(arg1)
ccall((:wasm_table_size, libwasmtime), wasm_table_size_t, (Ptr{wasm_table_t},), arg1)
end
function wasm_table_grow(arg1, delta, init)
ccall((:wasm_table_grow, libwasmtime), Bool, (Ptr{wasm_table_t}, wasm_table_size_t, Ptr{wasm_ref_t}), arg1, delta, init)
end
mutable struct wasm_memory_t end
function wasm_memory_delete(arg1)
ccall((:wasm_memory_delete, libwasmtime), Cvoid, (Ptr{wasm_memory_t},), arg1)
end
function wasm_memory_copy(arg1)
ccall((:wasm_memory_copy, libwasmtime), Ptr{wasm_memory_t}, (Ptr{wasm_memory_t},), arg1)
end
function wasm_memory_same(arg1, arg2)
ccall((:wasm_memory_same, libwasmtime), Bool, (Ptr{wasm_memory_t}, Ptr{wasm_memory_t}), arg1, arg2)
end
function wasm_memory_get_host_info(arg1)
ccall((:wasm_memory_get_host_info, libwasmtime), Ptr{Cvoid}, (Ptr{wasm_memory_t},), arg1)
end
function wasm_memory_set_host_info(arg1, arg2)
ccall((:wasm_memory_set_host_info, libwasmtime), Cvoid, (Ptr{wasm_memory_t}, Ptr{Cvoid}), arg1, arg2)
end
function wasm_memory_set_host_info_with_finalizer(arg1, arg2, arg3)
ccall((:wasm_memory_set_host_info_with_finalizer, libwasmtime), Cvoid, (Ptr{wasm_memory_t}, Ptr{Cvoid}, Ptr{Cvoid}), arg1, arg2, arg3)
end
function wasm_memory_as_ref(arg1)
ccall((:wasm_memory_as_ref, libwasmtime), Ptr{wasm_ref_t}, (Ptr{wasm_memory_t},), arg1)
end
function wasm_ref_as_memory(arg1)
ccall((:wasm_ref_as_memory, libwasmtime), Ptr{wasm_memory_t}, (Ptr{wasm_ref_t},), arg1)
end
function wasm_memory_as_ref_const(arg1)
ccall((:wasm_memory_as_ref_const, libwasmtime), Ptr{wasm_ref_t}, (Ptr{wasm_memory_t},), arg1)
end
function wasm_ref_as_memory_const(arg1)
ccall((:wasm_ref_as_memory_const, libwasmtime), Ptr{wasm_memory_t}, (Ptr{wasm_ref_t},), arg1)
end
const wasm_memory_pages_t = UInt32
function wasm_memory_new(arg1, arg2)
ccall((:wasm_memory_new, libwasmtime), Ptr{wasm_memory_t}, (Ptr{wasm_store_t}, Ptr{wasm_memorytype_t}), arg1, arg2)
end
function wasm_memory_type(arg1)
ccall((:wasm_memory_type, libwasmtime), Ptr{wasm_memorytype_t}, (Ptr{wasm_memory_t},), arg1)
end
function wasm_memory_data(arg1)
ccall((:wasm_memory_data, libwasmtime), Ptr{byte_t}, (Ptr{wasm_memory_t},), arg1)
end
function wasm_memory_data_size(arg1)
ccall((:wasm_memory_data_size, libwasmtime), Csize_t, (Ptr{wasm_memory_t},), arg1)
end
function wasm_memory_size(arg1)
ccall((:wasm_memory_size, libwasmtime), wasm_memory_pages_t, (Ptr{wasm_memory_t},), arg1)
end
function wasm_memory_grow(arg1, delta)
ccall((:wasm_memory_grow, libwasmtime), Bool, (Ptr{wasm_memory_t}, wasm_memory_pages_t), arg1, delta)
end
mutable struct wasm_extern_t end
function wasm_extern_delete(arg1)
ccall((:wasm_extern_delete, libwasmtime), Cvoid, (Ptr{wasm_extern_t},), arg1)
end
function wasm_extern_copy(arg1)
ccall((:wasm_extern_copy, libwasmtime), Ptr{wasm_extern_t}, (Ptr{wasm_extern_t},), arg1)
end
function wasm_extern_same(arg1, arg2)
ccall((:wasm_extern_same, libwasmtime), Bool, (Ptr{wasm_extern_t}, Ptr{wasm_extern_t}), arg1, arg2)
end
function wasm_extern_get_host_info(arg1)
ccall((:wasm_extern_get_host_info, libwasmtime), Ptr{Cvoid}, (Ptr{wasm_extern_t},), arg1)
end
function wasm_extern_set_host_info(arg1, arg2)
ccall((:wasm_extern_set_host_info, libwasmtime), Cvoid, (Ptr{wasm_extern_t}, Ptr{Cvoid}), arg1, arg2)
end
function wasm_extern_set_host_info_with_finalizer(arg1, arg2, arg3)
ccall((:wasm_extern_set_host_info_with_finalizer, libwasmtime), Cvoid, (Ptr{wasm_extern_t}, Ptr{Cvoid}, Ptr{Cvoid}), arg1, arg2, arg3)
end
function wasm_extern_as_ref(arg1)
ccall((:wasm_extern_as_ref, libwasmtime), Ptr{wasm_ref_t}, (Ptr{wasm_extern_t},), arg1)
end
function wasm_ref_as_extern(arg1)
ccall((:wasm_ref_as_extern, libwasmtime), Ptr{wasm_extern_t}, (Ptr{wasm_ref_t},), arg1)
end
function wasm_extern_as_ref_const(arg1)
ccall((:wasm_extern_as_ref_const, libwasmtime), Ptr{wasm_ref_t}, (Ptr{wasm_extern_t},), arg1)
end
function wasm_ref_as_extern_const(arg1)
ccall((:wasm_ref_as_extern_const, libwasmtime), Ptr{wasm_extern_t}, (Ptr{wasm_ref_t},), arg1)
end
mutable struct wasm_extern_vec_t
size::Csize_t
data::Ptr{Ptr{wasm_extern_t}}
end
function wasm_extern_vec_new_empty(out)
ccall((:wasm_extern_vec_new_empty, libwasmtime), Cvoid, (Ptr{wasm_extern_vec_t},), out)
end
function wasm_extern_vec_new_uninitialized(out, arg2)
ccall((:wasm_extern_vec_new_uninitialized, libwasmtime), Cvoid, (Ptr{wasm_extern_vec_t}, Csize_t), out, arg2)
end
function wasm_extern_vec_new(out, arg2, arg3)
ccall((:wasm_extern_vec_new, libwasmtime), Cvoid, (Ptr{wasm_extern_vec_t}, Csize_t, Ptr{Ptr{wasm_extern_t}}), out, arg2, arg3)
end
function wasm_extern_vec_copy(out, arg2)
ccall((:wasm_extern_vec_copy, libwasmtime), Cvoid, (Ptr{wasm_extern_vec_t}, Ptr{wasm_extern_vec_t}), out, arg2)
end
function wasm_extern_vec_delete(arg1)
ccall((:wasm_extern_vec_delete, libwasmtime), Cvoid, (Ptr{wasm_extern_vec_t},), arg1)
end
function wasm_extern_kind(arg1)
ccall((:wasm_extern_kind, libwasmtime), wasm_externkind_t, (Ptr{wasm_extern_t},), arg1)
end
function wasm_extern_type(arg1)
ccall((:wasm_extern_type, libwasmtime), Ptr{wasm_externtype_t}, (Ptr{wasm_extern_t},), arg1)
end
function wasm_func_as_extern(arg1)
ccall((:wasm_func_as_extern, libwasmtime), Ptr{wasm_extern_t}, (Ptr{wasm_func_t},), arg1)
end
function wasm_global_as_extern(arg1)
ccall((:wasm_global_as_extern, libwasmtime), Ptr{wasm_extern_t}, (Ptr{wasm_global_t},), arg1)
end
function wasm_table_as_extern(arg1)
ccall((:wasm_table_as_extern, libwasmtime), Ptr{wasm_extern_t}, (Ptr{wasm_table_t},), arg1)
end
function wasm_memory_as_extern(arg1)
ccall((:wasm_memory_as_extern, libwasmtime), Ptr{wasm_extern_t}, (Ptr{wasm_memory_t},), arg1)
end
function wasm_extern_as_func(arg1)
ccall((:wasm_extern_as_func, libwasmtime), Ptr{wasm_func_t}, (Ptr{wasm_extern_t},), arg1)
end
function wasm_extern_as_global(arg1)
ccall((:wasm_extern_as_global, libwasmtime), Ptr{wasm_global_t}, (Ptr{wasm_extern_t},), arg1)
end
function wasm_extern_as_table(arg1)
ccall((:wasm_extern_as_table, libwasmtime), Ptr{wasm_table_t}, (Ptr{wasm_extern_t},), arg1)
end
function wasm_extern_as_memory(arg1)
ccall((:wasm_extern_as_memory, libwasmtime), Ptr{wasm_memory_t}, (Ptr{wasm_extern_t},), arg1)
end
function wasm_func_as_extern_const(arg1)
ccall((:wasm_func_as_extern_const, libwasmtime), Ptr{wasm_extern_t}, (Ptr{wasm_func_t},), arg1)
end
function wasm_global_as_extern_const(arg1)
ccall((:wasm_global_as_extern_const, libwasmtime), Ptr{wasm_extern_t}, (Ptr{wasm_global_t},), arg1)
end
function wasm_table_as_extern_const(arg1)
ccall((:wasm_table_as_extern_const, libwasmtime), Ptr{wasm_extern_t}, (Ptr{wasm_table_t},), arg1)
end
function wasm_memory_as_extern_const(arg1)
ccall((:wasm_memory_as_extern_const, libwasmtime), Ptr{wasm_extern_t}, (Ptr{wasm_memory_t},), arg1)
end
function wasm_extern_as_func_const(arg1)
ccall((:wasm_extern_as_func_const, libwasmtime), Ptr{wasm_func_t}, (Ptr{wasm_extern_t},), arg1)
end
function wasm_extern_as_global_const(arg1)
ccall((:wasm_extern_as_global_const, libwasmtime), Ptr{wasm_global_t}, (Ptr{wasm_extern_t},), arg1)
end
function wasm_extern_as_table_const(arg1)
ccall((:wasm_extern_as_table_const, libwasmtime), Ptr{wasm_table_t}, (Ptr{wasm_extern_t},), arg1)
end
function wasm_extern_as_memory_const(arg1)
ccall((:wasm_extern_as_memory_const, libwasmtime), Ptr{wasm_memory_t}, (Ptr{wasm_extern_t},), arg1)
end
function wasm_instance_delete(arg1)
ccall((:wasm_instance_delete, libwasmtime), Cvoid, (Ptr{wasm_instance_t},), arg1)
end
function wasm_instance_copy(arg1)
ccall((:wasm_instance_copy, libwasmtime), Ptr{wasm_instance_t}, (Ptr{wasm_instance_t},), arg1)
end
function wasm_instance_same(arg1, arg2)
ccall((:wasm_instance_same, libwasmtime), Bool, (Ptr{wasm_instance_t}, Ptr{wasm_instance_t}), arg1, arg2)
end
function wasm_instance_get_host_info(arg1)
ccall((:wasm_instance_get_host_info, libwasmtime), Ptr{Cvoid}, (Ptr{wasm_instance_t},), arg1)
end
function wasm_instance_set_host_info(arg1, arg2)
ccall((:wasm_instance_set_host_info, libwasmtime), Cvoid, (Ptr{wasm_instance_t}, Ptr{Cvoid}), arg1, arg2)
end
function wasm_instance_set_host_info_with_finalizer(arg1, arg2, arg3)
ccall((:wasm_instance_set_host_info_with_finalizer, libwasmtime), Cvoid, (Ptr{wasm_instance_t}, Ptr{Cvoid}, Ptr{Cvoid}), arg1, arg2, arg3)
end
function wasm_instance_as_ref(arg1)
ccall((:wasm_instance_as_ref, libwasmtime), Ptr{wasm_ref_t}, (Ptr{wasm_instance_t},), arg1)
end
function wasm_ref_as_instance(arg1)
ccall((:wasm_ref_as_instance, libwasmtime), Ptr{wasm_instance_t}, (Ptr{wasm_ref_t},), arg1)
end
function wasm_instance_as_ref_const(arg1)
ccall((:wasm_instance_as_ref_const, libwasmtime), Ptr{wasm_ref_t}, (Ptr{wasm_instance_t},), arg1)
end
function wasm_ref_as_instance_const(arg1)
ccall((:wasm_ref_as_instance_const, libwasmtime), Ptr{wasm_instance_t}, (Ptr{wasm_ref_t},), arg1)
end
function wasm_instance_new(arg1, arg2, imports, arg4)
ccall((:wasm_instance_new, libwasmtime), Ptr{wasm_instance_t}, (Ptr{wasm_store_t}, Ptr{wasm_module_t}, Ptr{wasm_extern_vec_t}, Ptr{Ptr{wasm_trap_t}}), arg1, arg2, imports, arg4)
end
function wasm_instance_exports(arg1, out)
ccall((:wasm_instance_exports, libwasmtime), Cvoid, (Ptr{wasm_instance_t}, Ptr{wasm_extern_vec_t}), arg1, out)
end
function wasm_valtype_new_i32()
ccall((:wasm_valtype_new_i32, libwasmtime), Ptr{wasm_valtype_t}, ())
end
function wasm_valtype_new_i64()
ccall((:wasm_valtype_new_i64, libwasmtime), Ptr{wasm_valtype_t}, ())
end
function wasm_valtype_new_f32()
ccall((:wasm_valtype_new_f32, libwasmtime), Ptr{wasm_valtype_t}, ())
end
function wasm_valtype_new_f64()
ccall((:wasm_valtype_new_f64, libwasmtime), Ptr{wasm_valtype_t}, ())
end
function wasm_valtype_new_anyref()
ccall((:wasm_valtype_new_anyref, libwasmtime), Ptr{wasm_valtype_t}, ())
end
function wasm_valtype_new_funcref()
ccall((:wasm_valtype_new_funcref, libwasmtime), Ptr{wasm_valtype_t}, ())
end
function wasm_functype_new_0_0()
ccall((:wasm_functype_new_0_0, libwasmtime), Ptr{wasm_functype_t}, ())
end
function wasm_functype_new_1_0(p)
ccall((:wasm_functype_new_1_0, libwasmtime), Ptr{wasm_functype_t}, (Ptr{wasm_valtype_t},), p)
end
function wasm_functype_new_2_0(p1, p2)
ccall((:wasm_functype_new_2_0, libwasmtime), Ptr{wasm_functype_t}, (Ptr{wasm_valtype_t}, Ptr{wasm_valtype_t}), p1, p2)
end
function wasm_functype_new_3_0(p1, p2, p3)
ccall((:wasm_functype_new_3_0, libwasmtime), Ptr{wasm_functype_t}, (Ptr{wasm_valtype_t}, Ptr{wasm_valtype_t}, Ptr{wasm_valtype_t}), p1, p2, p3)
end
function wasm_functype_new_0_1(r)
ccall((:wasm_functype_new_0_1, libwasmtime), Ptr{wasm_functype_t}, (Ptr{wasm_valtype_t},), r)
end
function wasm_functype_new_1_1(p, r)
ccall((:wasm_functype_new_1_1, libwasmtime), Ptr{wasm_functype_t}, (Ptr{wasm_valtype_t}, Ptr{wasm_valtype_t}), p, r)
end
function wasm_functype_new_2_1(p1, p2, r)
ccall((:wasm_functype_new_2_1, libwasmtime), Ptr{wasm_functype_t}, (Ptr{wasm_valtype_t}, Ptr{wasm_valtype_t}, Ptr{wasm_valtype_t}), p1, p2, r)
end
function wasm_functype_new_3_1(p1, p2, p3, r)
ccall((:wasm_functype_new_3_1, libwasmtime), Ptr{wasm_functype_t}, (Ptr{wasm_valtype_t}, Ptr{wasm_valtype_t}, Ptr{wasm_valtype_t}, Ptr{wasm_valtype_t}), p1, p2, p3, r)
end
function wasm_functype_new_0_2(r1, r2)
ccall((:wasm_functype_new_0_2, libwasmtime), Ptr{wasm_functype_t}, (Ptr{wasm_valtype_t}, Ptr{wasm_valtype_t}), r1, r2)
end
function wasm_functype_new_1_2(p, r1, r2)
ccall((:wasm_functype_new_1_2, libwasmtime), Ptr{wasm_functype_t}, (Ptr{wasm_valtype_t}, Ptr{wasm_valtype_t}, Ptr{wasm_valtype_t}), p, r1, r2)
end
function wasm_functype_new_2_2(p1, p2, r1, r2)
ccall((:wasm_functype_new_2_2, libwasmtime), Ptr{wasm_functype_t}, (Ptr{wasm_valtype_t}, Ptr{wasm_valtype_t}, Ptr{wasm_valtype_t}, Ptr{wasm_valtype_t}), p1, p2, r1, r2)
end
function wasm_functype_new_3_2(p1, p2, p3, r1, r2)
ccall((:wasm_functype_new_3_2, libwasmtime), Ptr{wasm_functype_t}, (Ptr{wasm_valtype_t}, Ptr{wasm_valtype_t}, Ptr{wasm_valtype_t}, Ptr{wasm_valtype_t}, Ptr{wasm_valtype_t}), p1, p2, p3, r1, r2)
end
function wasm_val_init_ptr(out, p)
ccall((:wasm_val_init_ptr, libwasmtime), Cvoid, (Ptr{wasm_val_t}, Ptr{Cvoid}), out, p)
end
function wasm_val_ptr(val)
ccall((:wasm_val_ptr, libwasmtime), Ptr{Cvoid}, (Ptr{wasm_val_t},), val)
end
mutable struct wasmtime_error end
const wasmtime_error_t = wasmtime_error
function wasmtime_error_delete(error)
ccall((:wasmtime_error_delete, libwasmtime), Cvoid, (Ptr{wasmtime_error_t},), error)
end
function wasmtime_error_message(error, message)
ccall((:wasmtime_error_message, libwasmtime), Cvoid, (Ptr{wasmtime_error_t}, Ptr{wasm_name_t}), error, message)
end
const wasmtime_strategy_t = UInt8
@cenum wasmtime_strategy_enum::UInt32 begin
WASMTIME_STRATEGY_AUTO = 0
WASMTIME_STRATEGY_CRANELIFT = 1
end
const wasmtime_opt_level_t = UInt8
@cenum wasmtime_opt_level_enum::UInt32 begin
WASMTIME_OPT_LEVEL_NONE = 0
WASMTIME_OPT_LEVEL_SPEED = 1
WASMTIME_OPT_LEVEL_SPEED_AND_SIZE = 2
end
const wasmtime_profiling_strategy_t = UInt8
@cenum wasmtime_profiling_strategy_enum::UInt32 begin
WASMTIME_PROFILING_STRATEGY_NONE = 0
WASMTIME_PROFILING_STRATEGY_JITDUMP = 1
WASMTIME_PROFILING_STRATEGY_VTUNE = 2
end
function wasmtime_config_debug_info_set(arg1, arg2)
ccall((:wasmtime_config_debug_info_set, libwasmtime), Cvoid, (Ptr{wasm_config_t}, Bool), arg1, arg2)
end
function wasmtime_config_interruptable_set(arg1, arg2)
ccall((:wasmtime_config_interruptable_set, libwasmtime), Cvoid, (Ptr{wasm_config_t}, Bool), arg1, arg2)
end
function wasmtime_config_consume_fuel_set(arg1, arg2)
ccall((:wasmtime_config_consume_fuel_set, libwasmtime), Cvoid, (Ptr{wasm_config_t}, Bool), arg1, arg2)
end
function wasmtime_config_max_wasm_stack_set(arg1, arg2)
ccall((:wasmtime_config_max_wasm_stack_set, libwasmtime), Cvoid, (Ptr{wasm_config_t}, Csize_t), arg1, arg2)
end
function wasmtime_config_wasm_threads_set(arg1, arg2)
ccall((:wasmtime_config_wasm_threads_set, libwasmtime), Cvoid, (Ptr{wasm_config_t}, Bool), arg1, arg2)
end
function wasmtime_config_wasm_reference_types_set(arg1, arg2)
ccall((:wasmtime_config_wasm_reference_types_set, libwasmtime), Cvoid, (Ptr{wasm_config_t}, Bool), arg1, arg2)
end
function wasmtime_config_wasm_simd_set(arg1, arg2)
ccall((:wasmtime_config_wasm_simd_set, libwasmtime), Cvoid, (Ptr{wasm_config_t}, Bool), arg1, arg2)
end
function wasmtime_config_wasm_bulk_memory_set(arg1, arg2)
ccall((:wasmtime_config_wasm_bulk_memory_set, libwasmtime), Cvoid, (Ptr{wasm_config_t}, Bool), arg1, arg2)
end
function wasmtime_config_wasm_multi_value_set(arg1, arg2)
ccall((:wasmtime_config_wasm_multi_value_set, libwasmtime), Cvoid, (Ptr{wasm_config_t}, Bool), arg1, arg2)
end
function wasmtime_config_wasm_multi_memory_set(arg1, arg2)
ccall((:wasmtime_config_wasm_multi_memory_set, libwasmtime), Cvoid, (Ptr{wasm_config_t}, Bool), arg1, arg2)
end
function wasmtime_config_wasm_module_linking_set(arg1, arg2)
ccall((:wasmtime_config_wasm_module_linking_set, libwasmtime), Cvoid, (Ptr{wasm_config_t}, Bool), arg1, arg2)
end
function wasmtime_config_wasm_memory64_set(arg1, arg2)
ccall((:wasmtime_config_wasm_memory64_set, libwasmtime), Cvoid, (Ptr{wasm_config_t}, Bool), arg1, arg2)
end
function wasmtime_config_strategy_set(arg1, arg2)
ccall((:wasmtime_config_strategy_set, libwasmtime), Ptr{wasmtime_error_t}, (Ptr{wasm_config_t}, wasmtime_strategy_t), arg1, arg2)
end
function wasmtime_config_cranelift_debug_verifier_set(arg1, arg2)
ccall((:wasmtime_config_cranelift_debug_verifier_set, libwasmtime), Cvoid, (Ptr{wasm_config_t}, Bool), arg1, arg2)
end
function wasmtime_config_cranelift_opt_level_set(arg1, arg2)
ccall((:wasmtime_config_cranelift_opt_level_set, libwasmtime), Cvoid, (Ptr{wasm_config_t}, wasmtime_opt_level_t), arg1, arg2)
end
function wasmtime_config_profiler_set(arg1, arg2)
ccall((:wasmtime_config_profiler_set, libwasmtime), Ptr{wasmtime_error_t}, (Ptr{wasm_config_t}, wasmtime_profiling_strategy_t), arg1, arg2)
end
function wasmtime_config_static_memory_maximum_size_set(arg1, arg2)
ccall((:wasmtime_config_static_memory_maximum_size_set, libwasmtime), Cvoid, (Ptr{wasm_config_t}, UInt64), arg1, arg2)
end
function wasmtime_config_static_memory_guard_size_set(arg1, arg2)
ccall((:wasmtime_config_static_memory_guard_size_set, libwasmtime), Cvoid, (Ptr{wasm_config_t}, UInt64), arg1, arg2)
end
function wasmtime_config_dynamic_memory_guard_size_set(arg1, arg2)
ccall((:wasmtime_config_dynamic_memory_guard_size_set, libwasmtime), Cvoid, (Ptr{wasm_config_t}, UInt64), arg1, arg2)
end
function wasmtime_config_cache_config_load(arg1, arg2)
ccall((:wasmtime_config_cache_config_load, libwasmtime), Ptr{wasmtime_error_t}, (Ptr{wasm_config_t}, Cstring), arg1, arg2)
end
mutable struct wasmtime_moduletype end
const wasmtime_moduletype_t = wasmtime_moduletype
function wasmtime_moduletype_delete(ty)
ccall((:wasmtime_moduletype_delete, libwasmtime), Cvoid, (Ptr{wasmtime_moduletype_t},), ty)
end
function wasmtime_moduletype_imports(arg1, out)
ccall((:wasmtime_moduletype_imports, libwasmtime), Cvoid, (Ptr{wasmtime_moduletype_t}, Ptr{wasm_importtype_vec_t}), arg1, out)
end
function wasmtime_moduletype_exports(arg1, out)
ccall((:wasmtime_moduletype_exports, libwasmtime), Cvoid, (Ptr{wasmtime_moduletype_t}, Ptr{wasm_exporttype_vec_t}), arg1, out)
end
function wasmtime_moduletype_as_externtype(arg1)
ccall((:wasmtime_moduletype_as_externtype, libwasmtime), Ptr{wasm_externtype_t}, (Ptr{wasmtime_moduletype_t},), arg1)
end
function wasmtime_externtype_as_moduletype(arg1)
ccall((:wasmtime_externtype_as_moduletype, libwasmtime), Ptr{wasmtime_moduletype_t}, (Ptr{wasm_externtype_t},), arg1)
end
mutable struct wasmtime_module end
const wasmtime_module_t = wasmtime_module
function wasmtime_module_new(engine, wasm, wasm_len, ret)
ccall((:wasmtime_module_new, libwasmtime), Ptr{wasmtime_error_t}, (Ptr{wasm_engine_t}, Ptr{UInt8}, Csize_t, Ptr{Ptr{wasmtime_module_t}}), engine, wasm, wasm_len, ret)
end
function wasmtime_module_delete(m)
ccall((:wasmtime_module_delete, libwasmtime), Cvoid, (Ptr{wasmtime_module_t},), m)
end
function wasmtime_module_clone(m)
ccall((:wasmtime_module_clone, libwasmtime), Ptr{wasmtime_module_t}, (Ptr{wasmtime_module_t},), m)
end
function wasmtime_module_validate(engine, wasm, wasm_len)
ccall((:wasmtime_module_validate, libwasmtime), Ptr{wasmtime_error_t}, (Ptr{wasm_engine_t}, Ptr{UInt8}, Csize_t), engine, wasm, wasm_len)
end
function wasmtime_module_type(arg1)
ccall((:wasmtime_module_type, libwasmtime), Ptr{wasmtime_moduletype_t}, (Ptr{wasmtime_module_t},), arg1)
end
function wasmtime_module_serialize(_module, ret)
ccall((:wasmtime_module_serialize, libwasmtime), Ptr{wasmtime_error_t}, (Ptr{wasmtime_module_t}, Ptr{wasm_byte_vec_t}), _module, ret)
end
function wasmtime_module_deserialize(engine, bytes, bytes_len, ret)
ccall((:wasmtime_module_deserialize, libwasmtime), Ptr{wasmtime_error_t}, (Ptr{wasm_engine_t}, Ptr{UInt8}, Csize_t, Ptr{Ptr{wasmtime_module_t}}), engine, bytes, bytes_len, ret)
end
function wasmtime_module_deserialize_file(engine, path, ret)
ccall((:wasmtime_module_deserialize_file, libwasmtime), Ptr{wasmtime_error_t}, (Ptr{wasm_engine_t}, Cstring, Ptr{Ptr{wasmtime_module_t}}), engine, path, ret)
end
mutable struct wasmtime_store end
const wasmtime_store_t = wasmtime_store
mutable struct wasmtime_context end
const wasmtime_context_t = wasmtime_context
function wasmtime_store_new(engine, data, finalizer)
ccall((:wasmtime_store_new, libwasmtime), Ptr{wasmtime_store_t}, (Ptr{wasm_engine_t}, Ptr{Cvoid}, Ptr{Cvoid}), engine, data, finalizer)
end
function wasmtime_store_context(store)
ccall((:wasmtime_store_context, libwasmtime), Ptr{wasmtime_context_t}, (Ptr{wasmtime_store_t},), store)
end
function wasmtime_store_delete(store)
ccall((:wasmtime_store_delete, libwasmtime), Cvoid, (Ptr{wasmtime_store_t},), store)
end
function wasmtime_context_get_data(context)
ccall((:wasmtime_context_get_data, libwasmtime), Ptr{Cvoid}, (Ptr{wasmtime_context_t},), context)
end
function wasmtime_context_set_data(context, data)
ccall((:wasmtime_context_set_data, libwasmtime), Cvoid, (Ptr{wasmtime_context_t}, Ptr{Cvoid}), context, data)
end
function wasmtime_context_gc(context)
ccall((:wasmtime_context_gc, libwasmtime), Cvoid, (Ptr{wasmtime_context_t},), context)
end
function wasmtime_context_add_fuel(store, fuel)
ccall((:wasmtime_context_add_fuel, libwasmtime), Ptr{wasmtime_error_t}, (Ptr{wasmtime_context_t}, UInt64), store, fuel)
end
function wasmtime_context_fuel_consumed(context, fuel)
ccall((:wasmtime_context_fuel_consumed, libwasmtime), Bool, (Ptr{wasmtime_context_t}, Ptr{UInt64}), context, fuel)
end
function wasmtime_context_consume_fuel(context, fuel, remaining)
ccall((:wasmtime_context_consume_fuel, libwasmtime), Ptr{wasmtime_error_t}, (Ptr{wasmtime_context_t}, UInt64, Ptr{UInt64}), context, fuel, remaining)
end
mutable struct wasi_config_t end
function wasmtime_context_set_wasi(context, wasi)
ccall((:wasmtime_context_set_wasi, libwasmtime), Ptr{wasmtime_error_t}, (Ptr{wasmtime_context_t}, Ptr{wasi_config_t}), context, wasi)
end
mutable struct wasmtime_interrupt_handle end
const wasmtime_interrupt_handle_t = wasmtime_interrupt_handle
function wasmtime_interrupt_handle_new(context)
ccall((:wasmtime_interrupt_handle_new, libwasmtime), Ptr{wasmtime_interrupt_handle_t}, (Ptr{wasmtime_context_t},), context)
end
function wasmtime_interrupt_handle_interrupt(handle)
ccall((:wasmtime_interrupt_handle_interrupt, libwasmtime), Cvoid, (Ptr{wasmtime_interrupt_handle_t},), handle)
end
function wasmtime_interrupt_handle_delete(handle)
ccall((:wasmtime_interrupt_handle_delete, libwasmtime), Cvoid, (Ptr{wasmtime_interrupt_handle_t},), handle)
end
mutable struct wasmtime_func
store_id::UInt64
index::Csize_t
end
const wasmtime_func_t = wasmtime_func
mutable struct wasmtime_table
store_id::UInt64
index::Csize_t
end
const wasmtime_table_t = wasmtime_table
mutable struct wasmtime_memory
store_id::UInt64
index::Csize_t
end
const wasmtime_memory_t = wasmtime_memory
mutable struct wasmtime_instance
store_id::UInt64
index::Csize_t
end
const wasmtime_instance_t = wasmtime_instance
mutable struct wasmtime_global
store_id::UInt64
index::Csize_t
end
const wasmtime_global_t = wasmtime_global
const wasmtime_extern_kind_t = UInt8
struct wasmtime_extern_union
data::NTuple{16, UInt8}
end
function Base.getproperty(x::Ptr{wasmtime_extern_union}, f::Symbol)
f === :func && return Ptr{wasmtime_func_t}(x + 0)
f === :_global && return Ptr{wasmtime_global_t}(x + 0)
f === :table && return Ptr{wasmtime_table_t}(x + 0)
f === :memory && return Ptr{wasmtime_memory_t}(x + 0)
f === :instance && return Ptr{wasmtime_instance_t}(x + 0)
f === :_module && return Ptr{Ptr{wasmtime_module_t}}(x + 0)
return getfield(x, f)
end
function Base.getproperty(x::wasmtime_extern_union, f::Symbol)
r = Ref{wasmtime_extern_union}(x)
ptr = Base.unsafe_convert(Ptr{wasmtime_extern_union}, r)
fptr = getproperty(ptr, f)
GC.@preserve r unsafe_load(fptr)
end
function Base.setproperty!(x::Ptr{wasmtime_extern_union}, f::Symbol, v)
unsafe_store!(getproperty(x, f), v)
end
const wasmtime_extern_union_t = wasmtime_extern_union
mutable struct wasmtime_extern
kind::wasmtime_extern_kind_t
var"##pad0#278"::NTuple{7, UInt8}
of::wasmtime_extern_union_t
wasmtime_extern(kind, of) = new(kind, Tuple((zero(UInt8) for _ = 1:7)), of)
end
const wasmtime_extern_t = wasmtime_extern
function wasmtime_extern_delete(val)
ccall((:wasmtime_extern_delete, libwasmtime), Cvoid, (Ptr{wasmtime_extern_t},), val)
end
function wasmtime_extern_type(context, val)
ccall((:wasmtime_extern_type, libwasmtime), Ptr{wasm_externtype_t}, (Ptr{wasmtime_context_t}, Ptr{wasmtime_extern_t}), context, val)
end
mutable struct wasmtime_externref end
const wasmtime_externref_t = wasmtime_externref
function wasmtime_externref_new(data, finalizer)
ccall((:wasmtime_externref_new, libwasmtime), Ptr{wasmtime_externref_t}, (Ptr{Cvoid}, Ptr{Cvoid}), data, finalizer)
end
function wasmtime_externref_data(data)
ccall((:wasmtime_externref_data, libwasmtime), Ptr{Cvoid}, (Ptr{wasmtime_externref_t},), data)
end
function wasmtime_externref_clone(ref)
ccall((:wasmtime_externref_clone, libwasmtime), Ptr{wasmtime_externref_t}, (Ptr{wasmtime_externref_t},), ref)
end
function wasmtime_externref_delete(ref)
ccall((:wasmtime_externref_delete, libwasmtime), Cvoid, (Ptr{wasmtime_externref_t},), ref)
end
function wasmtime_externref_from_raw(context, raw)
ccall((:wasmtime_externref_from_raw, libwasmtime), Ptr{wasmtime_externref_t}, (Ptr{wasmtime_context_t}, Csize_t), context, raw)
end
function wasmtime_externref_to_raw(context, ref)
ccall((:wasmtime_externref_to_raw, libwasmtime), Csize_t, (Ptr{wasmtime_context_t}, Ptr{wasmtime_externref_t}), context, ref)
end
const wasmtime_valkind_t = UInt8
const wasmtime_v128 = NTuple{16, UInt8}
struct wasmtime_valunion
data::NTuple{16, UInt8}
end
function Base.getproperty(x::Ptr{wasmtime_valunion}, f::Symbol)
f === :i32 && return Ptr{Int32}(x + 0)
f === :i64 && return Ptr{Int64}(x + 0)
f === :f32 && return Ptr{float32_t}(x + 0)
f === :f64 && return Ptr{float64_t}(x + 0)
f === :funcref && return Ptr{wasmtime_func_t}(x + 0)
f === :externref && return Ptr{Ptr{wasmtime_externref_t}}(x + 0)
f === :v128 && return Ptr{wasmtime_v128}(x + 0)
return getfield(x, f)
end
function Base.getproperty(x::wasmtime_valunion, f::Symbol)
r = Ref{wasmtime_valunion}(x)
ptr = Base.unsafe_convert(Ptr{wasmtime_valunion}, r)
fptr = getproperty(ptr, f)
GC.@preserve r unsafe_load(fptr)
end
function Base.setproperty!(x::Ptr{wasmtime_valunion}, f::Symbol, v)
unsafe_store!(getproperty(x, f), v)
end
const wasmtime_valunion_t = wasmtime_valunion
struct wasmtime_val_raw
data::NTuple{16, UInt8}
end
function Base.getproperty(x::Ptr{wasmtime_val_raw}, f::Symbol)
f === :i32 && return Ptr{Int32}(x + 0)
f === :i64 && return Ptr{Int64}(x + 0)
f === :f32 && return Ptr{float32_t}(x + 0)
f === :f64 && return Ptr{float64_t}(x + 0)
f === :v128 && return Ptr{wasmtime_v128}(x + 0)
f === :funcref && return Ptr{Csize_t}(x + 0)
f === :externref && return Ptr{Csize_t}(x + 0)
return getfield(x, f)
end
function Base.getproperty(x::wasmtime_val_raw, f::Symbol)
r = Ref{wasmtime_val_raw}(x)
ptr = Base.unsafe_convert(Ptr{wasmtime_val_raw}, r)
fptr = getproperty(ptr, f)
GC.@preserve r unsafe_load(fptr)
end
function Base.setproperty!(x::Ptr{wasmtime_val_raw}, f::Symbol, v)
unsafe_store!(getproperty(x, f), v)
end
const wasmtime_val_raw_t = wasmtime_val_raw
struct wasmtime_val
kind::wasmtime_valkind_t
var"##pad0#279"::NTuple{7, UInt8}
of::wasmtime_valunion_t
wasmtime_val(kind, of) = new(kind, Tuple((zero(UInt8) for _ = 1:7)), of)
end
const wasmtime_val_t = wasmtime_val
function wasmtime_val_delete(val)
ccall((:wasmtime_val_delete, libwasmtime), Cvoid, (Ptr{wasmtime_val_t},), val)
end
function wasmtime_val_copy(dst, src)
ccall((:wasmtime_val_copy, libwasmtime), Cvoid, (Ptr{wasmtime_val_t}, Ptr{wasmtime_val_t}), dst, src)
end
mutable struct wasmtime_caller end
const wasmtime_caller_t = wasmtime_caller
# typedef wasm_trap_t * ( * wasmtime_func_callback_t ) ( void * env , wasmtime_caller_t * caller , const wasmtime_val_t * args , size_t nargs , wasmtime_val_t * results , size_t nresults )
const wasmtime_func_callback_t = Ptr{Cvoid}
function wasmtime_func_new(store, type, callback, env, finalizer, ret)
ccall((:wasmtime_func_new, libwasmtime), Cvoid, (Ptr{wasmtime_context_t}, Ptr{wasm_functype_t}, wasmtime_func_callback_t, Ptr{Cvoid}, Ptr{Cvoid}, Ptr{wasmtime_func_t}), store, type, callback, env, finalizer, ret)
end
# typedef wasm_trap_t * ( * wasmtime_func_unchecked_callback_t ) ( void * env , wasmtime_caller_t * caller , wasmtime_val_raw_t * args_and_results )
const wasmtime_func_unchecked_callback_t = Ptr{Cvoid}
function wasmtime_func_new_unchecked(store, type, callback, env, finalizer, ret)
ccall((:wasmtime_func_new_unchecked, libwasmtime), Cvoid, (Ptr{wasmtime_context_t}, Ptr{wasm_functype_t}, wasmtime_func_unchecked_callback_t, Ptr{Cvoid}, Ptr{Cvoid}, Ptr{wasmtime_func_t}), store, type, callback, env, finalizer, ret)
end
function wasmtime_func_type(store, func)
ccall((:wasmtime_func_type, libwasmtime), Ptr{wasm_functype_t}, (Ptr{wasmtime_context_t}, Ptr{wasmtime_func_t}), store, func)
end
function wasmtime_func_call(store, func, args, nargs, results, nresults, trap)
ccall((:wasmtime_func_call, libwasmtime), Ptr{wasmtime_error_t}, (Ptr{wasmtime_context_t}, Ptr{wasmtime_func_t}, Ptr{wasmtime_val_t}, Csize_t, Ptr{wasmtime_val_t}, Csize_t, Ptr{Ptr{wasm_trap_t}}), store, func, args, nargs, results, nresults, trap)
end
function wasmtime_func_call_unchecked(store, func, args_and_results)
ccall((:wasmtime_func_call_unchecked, libwasmtime), Ptr{wasm_trap_t}, (Ptr{wasmtime_context_t}, Ptr{wasmtime_func_t}, Ptr{wasmtime_val_raw_t}), store, func, args_and_results)
end
function wasmtime_caller_export_get(caller, name, name_len, item)
ccall((:wasmtime_caller_export_get, libwasmtime), Bool, (Ptr{wasmtime_caller_t}, Cstring, Csize_t, Ptr{wasmtime_extern_t}), caller, name, name_len, item)
end
function wasmtime_caller_context(caller)
ccall((:wasmtime_caller_context, libwasmtime), Ptr{wasmtime_context_t}, (Ptr{wasmtime_caller_t},), caller)
end
function wasmtime_func_from_raw(context, raw, ret)
ccall((:wasmtime_func_from_raw, libwasmtime), Cvoid, (Ptr{wasmtime_context_t}, Csize_t, Ptr{wasmtime_func_t}), context, raw, ret)
end
function wasmtime_func_to_raw(context, func)
ccall((:wasmtime_func_to_raw, libwasmtime), Csize_t, (Ptr{wasmtime_context_t}, Ptr{wasmtime_func_t}), context, func)
end
function wasmtime_global_new(store, type, val, ret)
ccall((:wasmtime_global_new, libwasmtime), Ptr{wasmtime_error_t}, (Ptr{wasmtime_context_t}, Ptr{wasm_globaltype_t}, Ptr{wasmtime_val_t}, Ptr{wasmtime_global_t}), store, type, val, ret)
end
function wasmtime_global_type(store, _global)
ccall((:wasmtime_global_type, libwasmtime), Ptr{wasm_globaltype_t}, (Ptr{wasmtime_context_t}, Ptr{wasmtime_global_t}), store, _global)
end
function wasmtime_global_get(store, _global, out)
ccall((:wasmtime_global_get, libwasmtime), Cvoid, (Ptr{wasmtime_context_t}, Ptr{wasmtime_global_t}, Ptr{wasmtime_val_t}), store, _global, out)
end
function wasmtime_global_set(store, _global, val)
ccall((:wasmtime_global_set, libwasmtime), Ptr{wasmtime_error_t}, (Ptr{wasmtime_context_t}, Ptr{wasmtime_global_t}, Ptr{wasmtime_val_t}), store, _global, val)
end
mutable struct wasmtime_instancetype end
const wasmtime_instancetype_t = wasmtime_instancetype
function wasmtime_instancetype_delete(ty)
ccall((:wasmtime_instancetype_delete, libwasmtime), Cvoid, (Ptr{wasmtime_instancetype_t},), ty)
end
function wasmtime_instancetype_exports(arg1, out)
ccall((:wasmtime_instancetype_exports, libwasmtime), Cvoid, (Ptr{wasmtime_instancetype_t}, Ptr{wasm_exporttype_vec_t}), arg1, out)
end
function wasmtime_instancetype_as_externtype(arg1)
ccall((:wasmtime_instancetype_as_externtype, libwasmtime), Ptr{wasm_externtype_t}, (Ptr{wasmtime_instancetype_t},), arg1)
end
function wasmtime_externtype_as_instancetype(arg1)
ccall((:wasmtime_externtype_as_instancetype, libwasmtime), Ptr{wasmtime_instancetype_t}, (Ptr{wasm_externtype_t},), arg1)
end
function wasmtime_instance_new(store, _module, imports, nimports, instance, trap)
ccall((:wasmtime_instance_new, libwasmtime), Ptr{wasmtime_error_t}, (Ptr{wasmtime_context_t}, Ptr{wasmtime_module_t}, Ptr{wasmtime_extern_t}, Csize_t, Ptr{wasmtime_instance_t}, Ptr{Ptr{wasm_trap_t}}), store, _module, imports, nimports, instance, trap)
end
function wasmtime_instance_type(store, instance)
ccall((:wasmtime_instance_type, libwasmtime), Ptr{wasmtime_instancetype_t}, (Ptr{wasmtime_context_t}, Ptr{wasmtime_instance_t}), store, instance)
end
function wasmtime_instance_export_get(store, instance, name, name_len, item)
ccall((:wasmtime_instance_export_get, libwasmtime), Bool, (Ptr{wasmtime_context_t}, Ptr{wasmtime_instance_t}, Cstring, Csize_t, Ptr{wasmtime_extern_t}), store, instance, name, name_len, item)
end
function wasmtime_instance_export_nth(store, instance, index, name, name_len, item)
ccall((:wasmtime_instance_export_nth, libwasmtime), Bool, (Ptr{wasmtime_context_t}, Ptr{wasmtime_instance_t}, Csize_t, Ptr{Cstring}, Ptr{Csize_t}, Ptr{wasmtime_extern_t}), store, instance, index, name, name_len, item)
end
mutable struct wasmtime_linker end
const wasmtime_linker_t = wasmtime_linker
function wasmtime_linker_new(engine)
ccall((:wasmtime_linker_new, libwasmtime), Ptr{wasmtime_linker_t}, (Ptr{wasm_engine_t},), engine)
end
function wasmtime_linker_delete(linker)
ccall((:wasmtime_linker_delete, libwasmtime), Cvoid, (Ptr{wasmtime_linker_t},), linker)
end
function wasmtime_linker_allow_shadowing(linker, allow_shadowing)
ccall((:wasmtime_linker_allow_shadowing, libwasmtime), Cvoid, (Ptr{wasmtime_linker_t}, Bool), linker, allow_shadowing)
end
function wasmtime_linker_define(linker, _module, module_len, name, name_len, item)
ccall((:wasmtime_linker_define, libwasmtime), Ptr{wasmtime_error_t}, (Ptr{wasmtime_linker_t}, Cstring, Csize_t, Cstring, Csize_t, Ptr{wasmtime_extern_t}), linker, _module, module_len, name, name_len, item)
end
function wasmtime_linker_define_func(linker, _module, module_len, name, name_len, ty, cb, data, finalizer)
ccall((:wasmtime_linker_define_func, libwasmtime), Ptr{wasmtime_error_t}, (Ptr{wasmtime_linker_t}, Cstring, Csize_t, Cstring, Csize_t, Ptr{wasm_functype_t}, wasmtime_func_callback_t, Ptr{Cvoid}, Ptr{Cvoid}), linker, _module, module_len, name, name_len, ty, cb, data, finalizer)
end
function wasmtime_linker_define_func_unchecked(linker, _module, module_len, name, name_len, ty, cb, data, finalizer)
ccall((:wasmtime_linker_define_func_unchecked, libwasmtime), Ptr{wasmtime_error_t}, (Ptr{wasmtime_linker_t}, Cstring, Csize_t, Cstring, Csize_t, Ptr{wasm_functype_t}, wasmtime_func_unchecked_callback_t, Ptr{Cvoid}, Ptr{Cvoid}), linker, _module, module_len, name, name_len, ty, cb, data, finalizer)
end
function wasmtime_linker_define_wasi(linker)
ccall((:wasmtime_linker_define_wasi, libwasmtime), Ptr{wasmtime_error_t}, (Ptr{wasmtime_linker_t},), linker)
end
function wasmtime_linker_define_instance(linker, store, name, name_len, instance)
ccall((:wasmtime_linker_define_instance, libwasmtime), Ptr{wasmtime_error_t}, (Ptr{wasmtime_linker_t}, Ptr{wasmtime_context_t}, Cstring, Csize_t, Ptr{wasmtime_instance_t}), linker, store, name, name_len, instance)
end
function wasmtime_linker_instantiate(linker, store, _module, instance, trap)
ccall((:wasmtime_linker_instantiate, libwasmtime), Ptr{wasmtime_error_t}, (Ptr{wasmtime_linker_t}, Ptr{wasmtime_context_t}, Ptr{wasmtime_module_t}, Ptr{wasmtime_instance_t}, Ptr{Ptr{wasm_trap_t}}), linker, store, _module, instance, trap)
end
function wasmtime_linker_module(linker, store, name, name_len, _module)
ccall((:wasmtime_linker_module, libwasmtime), Ptr{wasmtime_error_t}, (Ptr{wasmtime_linker_t}, Ptr{wasmtime_context_t}, Cstring, Csize_t, Ptr{wasmtime_module_t}), linker, store, name, name_len, _module)
end
function wasmtime_linker_get_default(linker, store, name, name_len, func)
ccall((:wasmtime_linker_get_default, libwasmtime), Ptr{wasmtime_error_t}, (Ptr{wasmtime_linker_t}, Ptr{wasmtime_context_t}, Cstring, Csize_t, Ptr{wasmtime_func_t}), linker, store, name, name_len, func)
end
function wasmtime_linker_get(linker, store, _module, module_len, name, name_len, item)
ccall((:wasmtime_linker_get, libwasmtime), Bool, (Ptr{wasmtime_linker_t}, Ptr{wasmtime_context_t}, Cstring, Csize_t, Cstring, Csize_t, Ptr{wasmtime_extern_t}), linker, store, _module, module_len, name, name_len, item)
end
function wasmtime_memorytype_new(min, max_present, max, is_64)
ccall((:wasmtime_memorytype_new, libwasmtime), Ptr{wasm_memorytype_t}, (UInt64, Bool, UInt64, Bool), min, max_present, max, is_64)
end
function wasmtime_memorytype_minimum(ty)
ccall((:wasmtime_memorytype_minimum, libwasmtime), UInt64, (Ptr{wasm_memorytype_t},), ty)
end
function wasmtime_memorytype_maximum(ty, max)
ccall((:wasmtime_memorytype_maximum, libwasmtime), Bool, (Ptr{wasm_memorytype_t}, Ptr{UInt64}), ty, max)
end
function wasmtime_memorytype_is64(ty)
ccall((:wasmtime_memorytype_is64, libwasmtime), Bool, (Ptr{wasm_memorytype_t},), ty)
end
function wasmtime_memory_new(store, ty, ret)
ccall((:wasmtime_memory_new, libwasmtime), Ptr{wasmtime_error_t}, (Ptr{wasmtime_context_t}, Ptr{wasm_memorytype_t}, Ptr{wasmtime_memory_t}), store, ty, ret)
end
function wasmtime_memory_type(store, memory)
ccall((:wasmtime_memory_type, libwasmtime), Ptr{wasm_memorytype_t}, (Ptr{wasmtime_context_t}, Ptr{wasmtime_memory_t}), store, memory)
end
function wasmtime_memory_data(store, memory)
ccall((:wasmtime_memory_data, libwasmtime), Ptr{UInt8}, (Ptr{wasmtime_context_t}, Ptr{wasmtime_memory_t}), store, memory)
end
function wasmtime_memory_data_size(store, memory)
ccall((:wasmtime_memory_data_size, libwasmtime), Csize_t, (Ptr{wasmtime_context_t}, Ptr{wasmtime_memory_t}), store, memory)
end
function wasmtime_memory_size(store, memory)
ccall((:wasmtime_memory_size, libwasmtime), UInt64, (Ptr{wasmtime_context_t}, Ptr{wasmtime_memory_t}), store, memory)
end
function wasmtime_memory_grow(store, memory, delta, prev_size)
ccall((:wasmtime_memory_grow, libwasmtime), Ptr{wasmtime_error_t}, (Ptr{wasmtime_context_t}, Ptr{wasmtime_memory_t}, UInt64, Ptr{UInt64}), store, memory, delta, prev_size)
end
function wasmtime_table_new(store, ty, init, table)
ccall((:wasmtime_table_new, libwasmtime), Ptr{wasmtime_error_t}, (Ptr{wasmtime_context_t}, Ptr{wasm_tabletype_t}, Ptr{wasmtime_val_t}, Ptr{wasmtime_table_t}), store, ty, init, table)
end
function wasmtime_table_type(store, table)
ccall((:wasmtime_table_type, libwasmtime), Ptr{wasm_tabletype_t}, (Ptr{wasmtime_context_t}, Ptr{wasmtime_table_t}), store, table)
end
function wasmtime_table_get(store, table, index, val)
ccall((:wasmtime_table_get, libwasmtime), Bool, (Ptr{wasmtime_context_t}, Ptr{wasmtime_table_t}, UInt32, Ptr{wasmtime_val_t}), store, table, index, val)
end
function wasmtime_table_set(store, table, index, value)
ccall((:wasmtime_table_set, libwasmtime), Ptr{wasmtime_error_t}, (Ptr{wasmtime_context_t}, Ptr{wasmtime_table_t}, UInt32, Ptr{wasmtime_val_t}), store, table, index, value)
end
function wasmtime_table_size(store, table)
ccall((:wasmtime_table_size, libwasmtime), UInt32, (Ptr{wasmtime_context_t}, Ptr{wasmtime_table_t}), store, table)
end
function wasmtime_table_grow(store, table, delta, init, prev_size)
ccall((:wasmtime_table_grow, libwasmtime), Ptr{wasmtime_error_t}, (Ptr{wasmtime_context_t}, Ptr{wasmtime_table_t}, UInt32, Ptr{wasmtime_val_t}, Ptr{UInt32}), store, table, delta, init, prev_size)
end
const wasmtime_trap_code_t = UInt8
@cenum wasmtime_trap_code_enum::UInt32 begin
WASMTIME_TRAP_CODE_STACK_OVERFLOW = 0
WASMTIME_TRAP_CODE_MEMORY_OUT_OF_BOUNDS = 1
WASMTIME_TRAP_CODE_HEAP_MISALIGNED = 2
WASMTIME_TRAP_CODE_TABLE_OUT_OF_BOUNDS = 3
WASMTIME_TRAP_CODE_INDIRECT_CALL_TO_NULL = 4
WASMTIME_TRAP_CODE_BAD_SIGNATURE = 5
WASMTIME_TRAP_CODE_INTEGER_OVERFLOW = 6
WASMTIME_TRAP_CODE_INTEGER_DIVISION_BY_ZERO = 7
WASMTIME_TRAP_CODE_BAD_CONVERSION_TO_INTEGER = 8
WASMTIME_TRAP_CODE_UNREACHABLE_CODE_REACHED = 9
WASMTIME_TRAP_CODE_INTERRUPT = 10
end
function wasmtime_trap_new(msg, msg_len)
ccall((:wasmtime_trap_new, libwasmtime), Ptr{wasm_trap_t}, (Cstring, Csize_t), msg, msg_len)
end
function wasmtime_trap_code(arg1, code)
ccall((:wasmtime_trap_code, libwasmtime), Bool, (Ptr{wasm_trap_t}, Ptr{wasmtime_trap_code_t}), arg1, code)
end
function wasmtime_trap_exit_status(arg1, status)
ccall((:wasmtime_trap_exit_status, libwasmtime), Bool, (Ptr{wasm_trap_t}, Ptr{Cint}), arg1, status)
end
function wasmtime_frame_func_name(arg1)
ccall((:wasmtime_frame_func_name, libwasmtime), Ptr{wasm_name_t}, (Ptr{wasm_frame_t},), arg1)
end
function wasmtime_frame_module_name(arg1)
ccall((:wasmtime_frame_module_name, libwasmtime), Ptr{wasm_name_t}, (Ptr{wasm_frame_t},), arg1)
end
function wasmtime_wat2wasm(wat, wat_len, ret)
ccall((:wasmtime_wat2wasm, libwasmtime), Ptr{wasmtime_error_t}, (Cstring, Csize_t, Ptr{wasm_byte_vec_t}), wat, wat_len, ret)
end
function wasi_config_delete(arg1)
ccall((:wasi_config_delete, libwasmtime), Cvoid, (Ptr{wasi_config_t},), arg1)
end
# no prototype is found for this function at wasi.h:47:36, please use with caution
function wasi_config_new()
ccall((:wasi_config_new, libwasmtime), Ptr{wasi_config_t}, ())
end
function wasi_config_set_argv(config, argc, argv)
ccall((:wasi_config_set_argv, libwasmtime), Cvoid, (Ptr{wasi_config_t}, Cint, Ptr{Cstring}), config, argc, argv)
end
function wasi_config_inherit_argv(config)
ccall((:wasi_config_inherit_argv, libwasmtime), Cvoid, (Ptr{wasi_config_t},), config)
end
function wasi_config_set_env(config, envc, names, values)
ccall((:wasi_config_set_env, libwasmtime), Cvoid, (Ptr{wasi_config_t}, Cint, Ptr{Cstring}, Ptr{Cstring}), config, envc, names, values)
end
function wasi_config_inherit_env(config)
ccall((:wasi_config_inherit_env, libwasmtime), Cvoid, (Ptr{wasi_config_t},), config)
end
function wasi_config_set_stdin_file(config, path)
ccall((:wasi_config_set_stdin_file, libwasmtime), Bool, (Ptr{wasi_config_t}, Cstring), config, path)
end
function wasi_config_inherit_stdin(config)
ccall((:wasi_config_inherit_stdin, libwasmtime), Cvoid, (Ptr{wasi_config_t},), config)
end
function wasi_config_set_stdout_file(config, path)
ccall((:wasi_config_set_stdout_file, libwasmtime), Bool, (Ptr{wasi_config_t}, Cstring), config, path)
end
function wasi_config_inherit_stdout(config)
ccall((:wasi_config_inherit_stdout, libwasmtime), Cvoid, (Ptr{wasi_config_t},), config)
end
function wasi_config_set_stderr_file(config, path)
ccall((:wasi_config_set_stderr_file, libwasmtime), Bool, (Ptr{wasi_config_t}, Cstring), config, path)
end
function wasi_config_inherit_stderr(config)
ccall((:wasi_config_inherit_stderr, libwasmtime), Cvoid, (Ptr{wasi_config_t},), config)
end
function wasi_config_preopen_dir(config, path, guest_path)
ccall((:wasi_config_preopen_dir, libwasmtime), Bool, (Ptr{wasi_config_t}, Cstring, Cstring), config, path, guest_path)
end
const wasm_name = wasm_byte_vec_t
const wasm_name_new = wasm_byte_vec_new
const wasm_name_new_empty = wasm_byte_vec_new_empty
const wasm_name_new_new_uninitialized = wasm_byte_vec_new_uninitialized
const wasm_name_copy = wasm_byte_vec_copy
const wasm_name_delete = wasm_byte_vec_delete
const WASM_EMPTY_VEC = nothing
# Skipping MacroDefinition: WASM_INIT_VAL { . kind = WASM_ANYREF , . of = { . ref = NULL } }
const WASMTIME_EXTERN_FUNC = 0
const WASMTIME_EXTERN_GLOBAL = 1
const WASMTIME_EXTERN_TABLE = 2
const WASMTIME_EXTERN_MEMORY = 3
const WASMTIME_EXTERN_INSTANCE = 4
const WASMTIME_EXTERN_MODULE = 5
const WASMTIME_I32 = 0
const WASMTIME_I64 = 1
const WASMTIME_F32 = 2
const WASMTIME_F64 = 3
const WASMTIME_V128 = 4
const WASMTIME_FUNCREF = 5
const WASMTIME_EXTERNREF = 6
# exports
const PREFIXES = ["libwasm", "wasmtime_", "wasm_", "WASM_", "WASMTIME_", "wasi_"]
for name in names(@__MODULE__; all=true), prefix in PREFIXES
if startswith(string(name), prefix)
@eval export $name
end
end
end # module
| Wasmtime | https://github.com/Pangoraw/Wasmtime.jl.git |
|
[
"MIT"
] | 0.2.2 | 88c874db43a7c0f99347ee1013b2052e6cf23ac3 | code | 968 | module Wasmtime
include("./LibWasmtime.jl")
using .LibWasmtime
abstract type AbstractWasmEngine end
abstract type AbstractWasmModule end
include("./vec_t.jl")
include("./val_t.jl")
include("./imports.jl")
include("./exports.jl")
include("./wasm/store.jl")
include("./wasm/module.jl")
include("./wasm/instance.jl")
export WasmMemory,
WasmStore,
WasmInstance,
WasmExports,
exports,
WasmStore,
WasmModule,
imports,
WasmImports,
WasmFunc
include("./engine.jl")
include("./wasmtime/error.jl")
include("./wasmtime/wat2wasm.jl")
include("./wasmtime/store.jl")
include("./wasmtime/wasi.jl")
include("./wasmtime/module.jl")
include("./wasmtime/linker.jl")
include("./wasmtime/instance.jl")
include("./wasm/table.jl")
export WasmTable,
wat2wasm,
@wat_str,
WasmInstance,
WasmExports,
exports,
WasmEngine,
WasmConfig,
WasmStore,
WasmModule,
imports,
WasmImports,
WasmFunc
end # Wasmtime
| Wasmtime | https://github.com/Pangoraw/Wasmtime.jl.git |
|
[
"MIT"
] | 0.2.2 | 88c874db43a7c0f99347ee1013b2052e6cf23ac3 | code | 565 | # TODO: Group Wasmtime.WasmEngine and Wasmer.WasmEngine
mutable struct WasmEngine <: AbstractWasmEngine
wasm_engine_ptr::Ptr{wasm_engine_t}
WasmEngine(wasm_engine_ptr) =
finalizer(new(wasm_engine_ptr)) do wasm_engine
wasm_engine_delete(wasm_engine_ptr)
end
end
function WasmEngine()
wasm_engine_ptr = wasm_engine_new()
WasmEngine(wasm_engine_ptr)
end
Base.unsafe_convert(::Type{Ptr{wasm_engine_t}}, wasm_engine::WasmEngine) =
wasm_engine.wasm_engine_ptr
Base.show(io::IO, ::WasmEngine) = print(io, "WasmEngine()")
| Wasmtime | https://github.com/Pangoraw/Wasmtime.jl.git |
|
[
"MIT"
] | 0.2.2 | 88c874db43a7c0f99347ee1013b2052e6cf23ac3 | code | 575 | abstract type AbstractWasmExport end
mutable struct WasmExports{I,E}
wasm_instance::I # I may either be a WasmInstance or a WasmtimeModule
wasm_exports::Vector{E}
end
function Base.getproperty(wasm_exports::WasmExports, f::Symbol)
if f ∈ fieldnames(WasmExports)
return getfield(wasm_exports, f)
end
lookup_name = string(f)
export_index =
findfirst(wasm_export -> name(wasm_export) == lookup_name, wasm_exports.wasm_exports)
@assert export_index !== nothing "Export $f not found"
wasm_exports.wasm_exports[export_index]
end
| Wasmtime | https://github.com/Pangoraw/Wasmtime.jl.git |
|
[
"MIT"
] | 0.2.2 | 88c874db43a7c0f99347ee1013b2052e6cf23ac3 | code | 1460 | struct WasmImport
wasm_importtype_ptr::Ptr{wasm_importtype_t}
extern_kind::wasm_externkind_enum
import_module::String
name::String
function WasmImport(wasm_importtype_ptr::Ptr{wasm_importtype_t})
name_vec_ptr = wasm_importtype_name(wasm_importtype_ptr)
name = name_vec_ptr_to_str(name_vec_ptr)
import_module_ptr = wasm_importtype_module(wasm_importtype_ptr)
import_module = name_vec_ptr_to_str(import_module_ptr)
externtype_ptr = wasm_importtype_type(wasm_importtype_ptr)
extern_kind = wasm_externkind_enum(wasm_externtype_kind(externtype_ptr))
new(wasm_importtype_ptr, extern_kind, import_module, name)
end
end
Base.unsafe_convert(::Type{Ptr{wasm_importtype_t}}, wasm_import::WasmImport) = wasm_import.wasm_importtype_ptr
Base.show(io::IO, wasm_import::WasmImport) = print(
io,
"WasmImport($(wasm_import.extern_kind), \"$(wasm_import.import_module)\", \"$(wasm_import.name)\")",
)
struct WasmImports{M<:AbstractWasmModule}
wasm_module::M
wasm_imports::Vector{WasmImport}
function WasmImports(wasm_module::M, wasm_imports_vec) where {M<:AbstractWasmModule}
wasm_imports = map(imp -> wasm_importtype_copy(imp) |> WasmImport, wasm_imports_vec)
new{M}(wasm_module, wasm_imports)
end
end
function Base.show(io::IO, wasm_imports::WasmImports)
print(io::IO, "WasmImports(")
show(io, wasm_imports.wasm_imports)
print(")")
end
| Wasmtime | https://github.com/Pangoraw/Wasmtime.jl.git |
|
[
"MIT"
] | 0.2.2 | 88c874db43a7c0f99347ee1013b2052e6cf23ac3 | code | 5667 | julia_type_to_valtype(julia_type)::Ptr{wasm_valtype_t} =
julia_type_to_valkind(julia_type) |> wasm_valtype_new
# TODO: the other value types
function WasmInt32(i::Int32)
val = Ref(wasm_val_t(tuple((zero(UInt8) for _ = 1:16)...)))
ptr = Base.unsafe_convert(Ptr{wasm_val_t}, Base.pointer_from_objref(val))
ptr.kind = WASM_I32
ptr.of.i32 = i
val[]
end
function WasmInt64(i::Int64)
val = Ref(wasm_val_t(tuple((zero(UInt8) for _ = 1:16)...)))
ptr = Base.unsafe_convert(Ptr{wasm_val_t}, Base.pointer_from_objref(val))
ptr.kind = WASM_I64
ptr.of.i64 = i
val[]
end
function WasmFloat32(f::Float32)
val = Ref(wasm_val_t(tuple((zero(UInt8) for _ = 1:16)...)))
ptr = Base.unsafe_convert(Ptr{wasm_val_t}, Base.pointer_from_objref(val))
ptr.kind = WASM_F32
ptr.of.f32 = f
val[]
end
function WasmFloat64(f::Float64)
val = Ref(wasm_val_t(tuple((zero(UInt8) for _ = 1:16)...)))
ptr = Base.unsafe_convert(Ptr{wasm_val_t}, Base.pointer_from_objref(val))
ptr.kind = WASM_F64
ptr.of.f64 = f
val[]
end
"""
v128(x)
Splats the given value to create a v128 vector with as many times the value `x`
as the `typeof(x)` allows to fit in 128 bits.
"""
function v128(x::T) where {T<:Union{Int8,UInt8,Int16,UInt16,
Int32,UInt32,Int64,UInt64,
Float32,Float64,Int128,UInt128}}
sz = sizeof(T)
x = x isa Float32 ?
reinterpret(UInt32, x) :
x isa Float64 ?
reinterpret(UInt64, x) :
x
bytes = ntuple(i -> (x >> (8 * (i - 1))) % UInt8, sz)
ntuple(i -> bytes[(i-1)%sz+1], 16)
end
"""
i64x2(x₁::Int64, x₂::Int64)
Creates a simd v128 vector from two Int64.
"""
i64x2(x₁, x₂) = Tuple(reinterpret(UInt8, Int64[x₁,x₂]))
"""
i64x2(v)::Tuple{Int64,Int64}
Interprets the byte values in the simd vector as two Int64.
"""
function i64x2(v)
x₁, x₂ = 0, 0
for i in 1:sizeof(Float64)
x₁ |= Int64(v[i]) << (8 * (i-1))
x₂ |= Int64(v[i+sizeof(Float64)]) << (8 * (i-1))
end
(x₁, x₂)
end
"""
f64x2(x₁::Float64, x₂::Float64)
Creates a simd v128 vector from two Float64.
"""
f64x2(x₁, x₂) = Tuple(reinterpret(UInt8, Float64[x₁, x₂]))
"""
f64x2(v)::Tuple{Float64,Float64}
Interprets the byte values in the simd vector as two Float64.
"""
function f64x2(v)
x₁, x₂ = i64x2(v)
(reinterpret(Float64,x₁),
reinterpret(Float64,x₂))
end
"""
i32x4(x₁::Int32, x₂::Int32, x₃::Int32, x₄::Int32)
Creates a simd v128 vector from four Int32.
"""
i32x4(x₁, x₂, x₃, x₄) = Tuple(reinterpret(UInt8, Int32[x₁, x₂, x₃, x₄]))
"""
i32x4(v)::Tuple{Int32,Int32,Int32,Int32}
Interprets the byte values in the simd vector as four Int32.
"""
function i32x4(v)
x₁, x₂, x₃, x₄ = zeros(Int32, 4)
for i in 1:sizeof(Float32)
x₁ |= Int32(v[i]) << (8 * (i-1))
x₂ |= Int32(v[i+1sizeof(Float32)]) << (8 * (i-1))
x₃ |= Int32(v[i+2sizeof(Float32)]) << (8 * (i-1))
x₄ |= Int32(v[i+3sizeof(Float32)]) << (8 * (i-1))
end
(x₁,x₂,x₃,x₄)
end
"""
f32x4(x₁::Float32, x₂::Float32, x₃::Float32, x₄::Float32)
Creates a simd v128 vector from four Float32.
"""
f32x4(x₁,x₂,x₃,x₄) = Tuple(reinterpret(UInt8, Float32[x₁,x₂,x₃,x₄]))
"""
f32x4(v)::Tuple{Float32,Float32,Float32,Float32}
Interprets the byte values in the simd vector as four Float32.
"""
function f32x4(v)
x₁, x₂, x₃, x₄ = i32x4(v)
Tuple(reinterpret(Float32,x) for x in (x₁,x₂,x₃,x₄))
end
"""
i16x8(x₁, x₂, x₃, x₄, x₅, x₆, x₇, x₈)
Creates a simd v128 vector from eight Int16.
"""
i16x8(x::Vararg{Int16,8}) = Tuple(reinterpret(UInt8, Int16[x...]))
"""
i16x8(v)::NTuple{8,Int16}
Interprets the byte values in the simd vector as eight Int16.
"""
function i16x8(v)
Tuple(reinterpret(Int16, UInt8[v...]))
end
function julia_type_to_valkind(julia_type::Type)::wasm_valkind_enum
if julia_type == Int32
WASM_I32
elseif julia_type == Int64
WASM_I64
elseif julia_type == Float32
WASM_F32
elseif julia_type == Float64
WASM_F64
else
error("No corresponding valkind for type $julia_type")
end
end
function valkind_to_julia_type(valkind::wasm_valkind_enum)
if valkind == WASM_I32
Int32
elseif valkind == WASM_I64
Int64
elseif valkind == WASM_F32
Float32
elseif valkind == WASM_F64
Float64
else
error("No corresponding type for kind $valkind")
end
end
Base.convert(::Type{wasm_val_t}, i::Int32) = WasmInt32(i)
Base.convert(::Type{wasm_val_t}, i::Int64) = WasmInt64(i)
Base.convert(::Type{wasm_val_t}, f::Float32) = WasmFloat32(f)
Base.convert(::Type{wasm_val_t}, f::Float64) = WasmFloat64(f)
Base.convert(::Type{wasm_val_t}, val::wasm_val_t) = val
function Base.convert(julia_type, wasm_val::wasm_val_t)
valkind = julia_type_to_valkind(julia_type)
@assert valkind == wasm_val.kind "Cannot convert a value of kind $(wasm_val.kind) to corresponding kind $valkind"
ctag = Ref(wasm_val.of)
ptr = Base.unsafe_convert(Ptr{LibWasmtime.__JL_Ctag_4}, ctag)
jl_val = GC.@preserve ctag unsafe_load(Ptr{julia_type}(ptr))
jl_val
end
function Base.show(io::IO, wasm_val::wasm_val_t)
name, maybe_val = if wasm_val.kind == WASM_I32
"WasmInt32", wasm_val.of.i32 |> string
elseif wasm_val.kind == WASM_I64
"WasmInt64", wasm_val.of.i64 |> string
elseif wasm_val.kind == WASM_F32
"WasmFloat32", wasm_val.of.f32 |> string
elseif wasm_val.kind == WASM_F64
"WasmFloat64", wasm_val.of.f64 |> string
else
"WasmAny", ""
end
print(io, "$name($maybe_val)")
end
| Wasmtime | https://github.com/Pangoraw/Wasmtime.jl.git |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.