licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.8.1 | d3b0fabcf8cbba4e64f532e3c6717e1b231b75f5 | code | 4810 | # ------------------------------------------------------------------
# Licensed under the MIT License. See LICENSE in the project root.
# ------------------------------------------------------------------
"""
UniqueCoords(var₁ => agg₁, var₂ => agg₂, ..., varₙ => aggₙ)
Retain locations in data with unique coordinates.
Duplicates of a variable `varᵢ` are aggregated with
aggregation function `aggᵢ`. If an aggregation function
is not defined for variable `varᵢ`, the default aggregation
function will be used. Default aggregation function is `mean` for
continuous variables and `first` otherwise.
# Examples
```julia
UniqueCoords(1 => last, 2 => maximum)
UniqueCoords(:a => first, :b => minimum)
UniqueCoords("a" => last, "b" => maximum)
```
"""
struct UniqueCoords{S<:ColumnSelector} <: TableTransform
selector::S
aggfuns::Vector{Function}
end
UniqueCoords() = UniqueCoords(NoneSelector(), Function[])
UniqueCoords(pairs::Pair{C,<:Function}...) where {C<:Column} =
UniqueCoords(selector(first.(pairs)), collect(Function, last.(pairs)))
isrevertible(::Type{<:UniqueCoords}) = false
function apply(transform::UniqueCoords, geotable::AbstractGeoTable)
gtb = _adjustunits(geotable)
dom = domain(gtb)
tab = values(gtb)
cols = Tables.columns(tab)
vars = Tables.columnnames(cols)
# aggregation functions
svars = transform.selector(vars)
agg = Dict(zip(svars, transform.aggfuns))
for var in vars
if !haskey(agg, var)
v = Tables.getcolumn(cols, var)
agg[var] = _defaultagg(v)
end
end
# group locations with the same coordinates
pts = [centroid(dom, i) for i in 1:nelements(dom)]
X = reduce(hcat, to.(pts))
uinds = _uniqueinds(X, 2)
ginds = unique(uinds)
groups = Dict(ind => Int[] for ind in ginds)
for (i, ind) in enumerate(uinds)
push!(groups[ind], i)
end
# perform aggregation with repeated indices
function aggvar(var)
v = Tables.getcolumn(cols, var)
map(ginds) do gind
group = groups[gind]
agg[var](v[group])
end
end
# construct new table
𝒯 = (; (var => aggvar(var) for var in vars)...)
newtab = 𝒯 |> Tables.materializer(tab)
# construct new domain
newdom = view(dom, ginds)
# new spatial data
newgtb = georef(newtab, newdom)
newgtb, nothing
end
# ---------------------------------------------------------------
# The code below was copied/modified provisorily from Base.unique
# See https://github.com/JuliaLang/julia/issues/1845
# ---------------------------------------------------------------
using Base.Cartesian: @nref, @nloops
struct Prehashed
hash::UInt
end
Base.hash(x::Prehashed) = x.hash
@generated function _uniqueinds(A::AbstractArray{T,N}, dim::Int) where {T,N}
quote
if !(1 <= dim <= $N)
ArgumentError("Input argument dim must be 1 <= dim <= $N, but is currently $dim")
end
hashes = zeros(UInt, size(A, dim))
# Compute hash for each row
k = 0
@nloops $N i A d -> (
if d == dim
k = i_d
end
) begin
@inbounds hashes[k] = hash(hashes[k], hash((@nref $N A i)))
end
# Collect index of first row for each hash
uniquerow = Array{Int}(undef, size(A, dim))
firstrow = Dict{Prehashed,Int}()
for k in 1:size(A, dim)
uniquerow[k] = get!(firstrow, Prehashed(hashes[k]), k)
end
uniquerows = collect(values(firstrow))
# Check for collisions
collided = falses(size(A, dim))
@inbounds begin
@nloops $N i A d -> (
if d == dim
k = i_d
j_d = uniquerow[k]
else
j_d = i_d
end
) begin
if (@nref $N A j) != (@nref $N A i)
collided[k] = true
end
end
end
if any(collided)
nowcollided = BitArray(undef, size(A, dim))
while any(collided)
# Collect index of first row for each collided hash
empty!(firstrow)
for j in 1:size(A, dim)
collided[j] || continue
uniquerow[j] = get!(firstrow, Prehashed(hashes[j]), j)
end
for v in values(firstrow)
push!(uniquerows, v)
end
# Check for collisions
fill!(nowcollided, false)
@nloops $N i A d -> begin
if d == dim
k = i_d
j_d = uniquerow[k]
(!collided[k] || j_d == k) && continue
else
j_d = i_d
end
end begin
if (@nref $N A j) != (@nref $N A i)
nowcollided[k] = true
end
end
(collided, nowcollided) = (nowcollided, collided)
end
end
ie = unique(uniquerow)
ic_dict = Dict{Int,Int}()
for k in 1:length(ie)
ic_dict[ie[k]] = k
end
ic = similar(uniquerow)
for k in 1:length(ic)
ic[k] = ie[ic_dict[uniquerow[k]]]
end
ic
end
end
| GeoStatsTransforms | https://github.com/JuliaEarth/GeoStatsTransforms.jl.git |
|
[
"MIT"
] | 0.8.1 | d3b0fabcf8cbba4e64f532e3c6717e1b231b75f5 | code | 985 | # ------------------------------------------------------------------
# Licensed under the MIT License. See LICENSE in the project root.
# ------------------------------------------------------------------
"""
Upscale(f₁, f₂, ..., fₙ)
Upscale each dimension of the grid by given factors `f₁`, `f₂`, ..., `fₙ`.
This transform is equivalent to skipping entries of the grid
as in the pseudo-code `grid[1:f₁:end, 1:f₂:end, ..., 1:fₙ:end]`.
Resulting values are obtained with the [`Aggregate`](@ref) transform
and its default aggregation functions.
# Examples
```julia
Upscale(2, 2)
Upscale(3, 3, 2)
```
"""
struct Upscale{Dim} <: TableTransform
factors::Dims{Dim}
end
Upscale(factors::Int...) = Upscale(factors)
isrevertible(::Type{<:Upscale}) = false
function apply(transform::Upscale, geotable::AbstractGeoTable)
grid = domain(geotable)
tgrid = coarsen(grid, RegularCoarsening(transform.factors))
newgeotable = geotable |> Aggregate(tgrid)
newgeotable, nothing
end
| GeoStatsTransforms | https://github.com/JuliaEarth/GeoStatsTransforms.jl.git |
|
[
"MIT"
] | 0.8.1 | d3b0fabcf8cbba4e64f532e3c6717e1b231b75f5 | code | 1203 | # ------------------------------------------------------------------
# Licensed under the MIT License. See LICENSE in the project root.
# ------------------------------------------------------------------
#-------------
# AGGREGATION
#-------------
_defaultagg(x) = _defaultagg(elscitype(x))
_defaultagg(::Type) = _skipmissing(first)
_defaultagg(::Type{Continuous}) = _skipmissing(mean)
function _skipmissing(fun)
x -> begin
vs = skipmissing(x)
isempty(vs) ? missing : fun(vs)
end
end
#-------
# UNITS
#-------
const Len{T} = Quantity{T,u"𝐋"}
_addunit(x::Number, u) = x * u
_addunit(::Quantity, _) = throw(ArgumentError("invalid units, please check the documentation"))
function _adjustunits(geotable::AbstractGeoTable)
dom = domain(geotable)
tab = values(geotable)
cols = Tables.columns(tab)
vars = Tables.columnnames(cols)
pairs = (var => _absunit(Tables.getcolumn(cols, var)) for var in vars)
newtab = (; pairs...) |> Tables.materializer(tab)
georef(newtab, dom)
end
_absunit(x) = _absunit(nonmissingtype(eltype(x)), x)
_absunit(::Type, x) = x
function _absunit(::Type{Q}, x) where {Q<:AffineQuantity}
u = absoluteunit(unit(Q))
map(v -> uconvert(u, v), x)
end
| GeoStatsTransforms | https://github.com/JuliaEarth/GeoStatsTransforms.jl.git |
|
[
"MIT"
] | 0.8.1 | d3b0fabcf8cbba4e64f532e3c6717e1b231b75f5 | code | 5059 | # ------------------------------------------------------------------
# Licensed under the MIT License. See LICENSE in the project root.
# ------------------------------------------------------------------
# auxiliary functions and variables
uniform(h; λ) = (h ≤ λ)
triangular(h; λ) = (h ≤ λ) * (λ - h)
epanechnikov(h; λ) = (h ≤ λ) * (λ^2 - h^2)
const KERNFUN = Dict(:uniform => uniform, :triangular => triangular, :epanechnikov => epanechnikov)
"""
GHC(k, λ; kern=:epanechnikov, link=:ward, as=:CLUSTER)
A transform for partitioning geospatial data into `k` clusters
according to a range `λ` using Geostatistical Hierarchical
Clustering (GHC). The larger the range the more connected
are nearby samples.
## Parameters
* `k` - Approximate number of clusters
* `λ` - Approximate range of kernel function in length units
* `kern` - Kernel function (`:uniform`, `:triangular` or `:epanechnikov`)
* `link` - Linkage function (`:single`, `:average`, `:complete`, `:ward` or `:ward_presquared`)
* `as` - Cluster column name
## References
* Fouedjio, F. 2016. [A hierarchical clustering method for multivariate geostatistical data]
(https://www.sciencedirect.com/science/article/abs/pii/S2211675316300367)
### Notes
- The range parameter controls the sparsity pattern of the pairwise
distances, which can greatly affect the computational performance
of the GHC algorithm. We recommend choosing a range that is small
enough to connect nearby samples. For example, clustering data over
a 100x100 Cartesian grid with unit spacing is possible with `λ=1.0`
or `λ=2.0` but the problem starts to become computationally unfeasible
around `λ=10.0` due to the density of points.
"""
struct GHC{ℒ<:Len} <: ClusteringTransform
k::Int
λ::ℒ
kern::Symbol
link::Symbol
as::Symbol
GHC(k, λ::ℒ, kern, link, as) where {ℒ<:Len} = new{float(ℒ)}(k, λ, kern, link, as)
end
function GHC(k, λ::Len; kern=:epanechnikov, link=:ward, as=:CLUSTER)
# sanity checks
@assert k > 0 "invalid number of clusters"
@assert λ > zero(λ) "invalid kernel range"
@assert kern ∈ [:uniform, :triangular, :epanechnikov] "invalid kernel function"
@assert link ∈ [:single, :average, :complete, :ward, :ward_presquared] "invalid linkage function"
GHC(k, λ, kern, link, Symbol(as))
end
GHC(k, λ; kwargs...) = GHC(k, _addunit(λ, u"m"); kwargs...)
function apply(transform::GHC, geotable)
# GHC parameters
k = transform.k
λ = transform.λ
kern = transform.kern
link = transform.link
# all covariates must be continuous
values(geotable) |> Assert(cond=x -> elscitype(x) <: Continuous)
# dissimilarity matrix
D = ghc_dissimilarity_matrix(geotable, kern, λ)
# classical hierarchical clustering
tree = hclust(D, linkage=link)
# cut tree to produce clusters
labels = cutree(tree, k=k)
newtable = (; transform.as => categorical(labels))
newgeotable = georef(newtable, domain(geotable))
newgeotable, nothing
end
function ghc_dissimilarity_matrix(geotable, kern, λ)
# retrieve domain/table
𝒟 = domain(geotable)
𝒯 = values(geotable)
# kernel matrix
K = ghc_kern_matrix(kern, λ, 𝒟)
# features must be standardized
𝒮 = ghc_standardize(𝒯)
# retrieve feature columns
cols = Tables.columns(𝒮)
vars = Tables.columnnames(cols)
# number of covariates
p = length(vars)
# number of observations
n = size(K, 1)
# dissimilarity matrix
D = zeros(n, n)
@inbounds for j in 1:p # for each pair of covariates
Zj = Tables.getcolumn(cols, j)
for i in j:p
Zi = Tables.getcolumn(cols, i)
# difference matrix for covariate pair
Δ = ghc_diff_matrix(Zi, Zj)
# contribution to dissimilarity matrix
for l in 1:n
Kl = K[:, l]
for k in (l + 1):n
Kk = K[:, k]
Kkl = kron(Kl, Kk) # faster Kk * transpose(Kl)
I, W = findnz(Kkl)
num = sum(W .* Δ[I], init=zero(eltype(W)))
den = sum(W, init=zero(eltype(W)))
iszero(den) || (D[k, l] += (1 / 2) * (num / den))
end
D[l, l] = 0.0
for k in 1:(l - 1)
D[k, l] = D[l, k] # leverage symmetry
end
end
end
end
D
end
function ghc_standardize(𝒯)
cols = Tables.columns(𝒯)
vars = Tables.columnnames(cols)
zstd = map(vars) do var
z = Tables.getcolumn(cols, var)
μ = mean(z)
σ = std(z, mean=μ)
iszero(σ) ? zero(μ) : (z .- μ) ./ σ
end
(; zip(vars, zstd)...) |> Tables.materializer(𝒯)
end
function ghc_kern_matrix(kern, λ, 𝒟)
# kernel function
fn = KERNFUN[kern]
Kλ(h) = fn(h, λ=λ)
# collect coordinates
coords = [to(centroid(𝒟, i)) for i in 1:nelements(𝒟)]
# lag matrix
H = pairwise(Euclidean(), coords)
# kernel matrix
K = ustrip.(Kλ.(H))
# return sparse version
sparse(K)
end
function ghc_diff_matrix(Zi, Zj)
n = length(Zi)
Δ = zeros(n, n)
@inbounds for l in 1:n
for k in (l + 1):n
Δ[k, l] = (Zi[k] - Zi[l]) * (Zj[k] - Zj[l])
end
Δ[l, l] = 0.0
for k in 1:(l - 1)
Δ[k, l] = Δ[l, k] # leverage symmetry
end
end
Δ
end
| GeoStatsTransforms | https://github.com/JuliaEarth/GeoStatsTransforms.jl.git |
|
[
"MIT"
] | 0.8.1 | d3b0fabcf8cbba4e64f532e3c6717e1b231b75f5 | code | 2705 | # ------------------------------------------------------------------
# Licensed under the MIT License. See LICENSE in the project root.
# ------------------------------------------------------------------
"""
GSC(k, m; σ=1.0, tol=1e-4, maxiter=10, weights=nothing, as=:CLUSTER)
A transform for partitioning geospatial data into `k` clusters
using Geostatistical Spectral Clustering (GSC).
## Parameters
* `k` - Desired number of clusters
* `m` - Multiplicative factor for adjacent weights
* `σ` - Standard deviation for exponential model (default to `1.0`)
* `tol` - Tolerance of k-means algorithm (default to `1e-4`)
* `maxiter` - Maximum number of iterations (default to `10`)
* `weights` - Dictionary with weights for each attribute (default to `nothing`)
* `as` - Cluster column name
## References
* Romary et al. 2015. [Unsupervised classification of multivariate
geostatistical data: Two algorithms]
(https://www.sciencedirect.com/science/article/pii/S0098300415001314)
## Notes
- The algorithm implemented here is slightly different than the algorithm
described in Romary et al. 2015. Instead of setting Wᵢⱼ = 0 when i <-/-> j,
we simply magnify the weight by a multiplicative factor Wᵢⱼ *= m when i <--> j.
This leads to dense matrices but also better results in practice.
"""
struct GSC{W} <: ClusteringTransform
k::Int
m::Float64
σ::Float64
tol::Float64
maxiter::Int
weights::W
as::Symbol
end
function GSC(k, m; σ=1.0, tol=1e-4, maxiter=10, weights=nothing, as=:CLUSTER)
# sanity checks
@assert k > 0 "invalid number of clusters"
@assert m > 0 "invalid multiplicative factor"
@assert σ > 0 "invalid standard deviation"
GSC(k, m, σ, tol, maxiter, weights, Symbol(as))
end
function apply(transform::GSC, geotable)
# retrieve table and domain
𝒯 = values(geotable)
𝒟 = domain(geotable)
# retrieve parameters
k = transform.k
m = transform.m
σ = transform.σ
tol = transform.tol
maxiter = transform.maxiter
weights = transform.weights
# table distance
td = TableDistance(normalize=false, weights=weights)
# adjacency matrix
A = adjacencymatrix(𝒟)
# weight matrix
Δ = pairwise(td, 𝒯)
E = @. exp(-Δ / σ^2)
E[findall(!iszero, A)] .*= m
W = sparse(E)
# degree matrix
d = vec(sum(W, dims=2))
D = Diagonal(d)
# Laplace matrix
L = D^(-1 / 2) * W * D^(-1 / 2)
# solve eigenproblem
S, _ = partialschur(L, nev=k)
_, V = partialeigen(S)
# k-means with eigenvectors
result = kmeans(V', k, tol=tol, maxiter=maxiter)
labels = assignments(result)
newtable = (; transform.as => categorical(labels))
newgeotable = georef(newtable, domain(geotable))
newgeotable, nothing
end
| GeoStatsTransforms | https://github.com/JuliaEarth/GeoStatsTransforms.jl.git |
|
[
"MIT"
] | 0.8.1 | d3b0fabcf8cbba4e64f532e3c6717e1b231b75f5 | code | 4996 | # ------------------------------------------------------------------
# Licensed under the MIT License. See LICENSE in the project root.
# ------------------------------------------------------------------
"""
SLIC(k, m; tol=1e-4, maxiter=10, weights=nothing, as=:CLUSTER)
A transform for clustering geospatial data into approximately `k`
clusters using Simple Linear Iterative Clustering (SLIC).
The transform produces clusters of samples that are spatially
connected based on a distance `dₛ` and that, at the same
time, are similar in terms of `vars` with distance `dᵥ`.
The tradeoff is controlled with a hyperparameter parameter
`m` in an additive model `dₜ = √(dᵥ² + m²(dₛ/s)²)`.
## Parameters
* `k` - Approximate number of clusters
* `m` - Hyperparameter of SLIC model
* `tol` - Tolerance of k-means algorithm (default to `1e-4`)
* `maxiter` - Maximum number of iterations (default to `10`)
* `weights` - Dictionary with weights for each attribute (default to `nothing`)
* `as` - Cluster column name
## References
* Achanta et al. 2011. [SLIC superpixels compared to state-of-the-art
superpixel methods](https://ieeexplore.ieee.org/document/6205760)
"""
struct SLIC{W} <: ClusteringTransform
k::Int
m::Float64
tol::Float64
maxiter::Int
weights::W
as::Symbol
end
function SLIC(k::Int, m::Real; tol=1e-4, maxiter=10, weights=nothing, as=:CLUSTER)
@assert tol > 0 "invalid tolerance"
@assert maxiter > 0 "invalid number of iterations"
SLIC{typeof(weights)}(k, m, tol, maxiter, weights, Symbol(as))
end
function apply(transform::SLIC, geotable)
# retrieve parameters
w = transform.weights
m = transform.m
# normalize attributes
𝒯 = TableDistances.normalize(values(geotable))
Ω = georef(first(𝒯), domain(geotable))
𝒟 = domain(Ω)
# initial spacing of clusters
s = slic_spacing(𝒟, transform)
# initialize cluster centers
c = slic_initialization(𝒟, s)
# ball neighborhood search
searcher = BallSearch(𝒟, MetricBall(maximum(s)))
# pre-allocate memory for label and distance
l = fill(0, nelements(𝒟))
d = fill(Inf, nelements(𝒟))
# performance parameters
tol = transform.tol
maxiter = transform.maxiter
# Lloyd's (a.k.a. k-means) algorithm
err, iter = Inf, 0
while err > tol && iter < maxiter
o = copy(c)
slic_assignment!(Ω, searcher, w, m, s, c, l, d)
slic_update!(Ω, c, l)
err = norm(c - o) / norm(o)
iter += 1
end
orphans = findall(iszero, l)
if length(orphans) > 0
assigned = findall(!iszero, l)
𝒟₀ = view(𝒟, assigned)
csearcher = KNearestSearch(𝒟₀, 1)
for orphan in orphans
p = centroid(𝒟, orphan)
i = search(p, csearcher)[1]
l[orphan] = l[assigned[i]]
end
end
newtable = (; transform.as => categorical(l))
newgeotable = georef(newtable, domain(geotable))
newgeotable, nothing
end
slic_spacing(𝒟, transform) = slic_srecursion(transform.k, sides(boundingbox(𝒟)))
# given the desired number of clusters and the sides of the bounding box
# of the domain, returns the spacing for each dimension recursively
function slic_srecursion(k, l)
d = length(l)
# base case
d == 1 && return [l[1] / k]
# compute the spacing for the j-th dimension
j = argmax(l)
kⱼ = ceil(Int, k^(1 / d))
sⱼ = l[j] / kⱼ
# update the new k and l
kₙ = ceil(Int, k / kⱼ)
lₙ = l[[1:(j - 1); (j + 1):d]]
# then recursively compute the spacing for the remaining dimensions
s = slic_srecursion(kₙ, lₙ)
[s[begin:(j - 1)]; [sⱼ]; s[j:end]]
end
function slic_initialization(𝒟, s)
# efficient neighbor search
searcher = KNearestSearch(𝒟, 1)
# bounding box properties
bbox = boundingbox(𝒟)
lo, up = to.(extrema(bbox))
# cluster centers
clusters = Vector{Int}()
neighbor = Vector{Int}(undef, 1)
ranges = [(l + sᵢ / 2):sᵢ:u for (l, sᵢ, u) in zip(lo, s, up)]
for x in Iterators.product(ranges...)
search!(neighbor, Point(x), searcher)
push!(clusters, neighbor[1])
end
unique(clusters)
end
function slic_assignment!(geotable, searcher, w, m, s, c, l, d)
sₘ = maximum(s)
𝒟 = domain(geotable)
for (k, cₖ) in enumerate(c)
inds = search(centroid(𝒟, cₖ), searcher)
# distance between coordinates
X = (to(centroid(𝒟, i)) for i in inds)
xₖ = [to(centroid(𝒟, cₖ))]
dₛ = pairwise(Euclidean(), X, xₖ)
# distance between variables
𝒮ᵢ = view(geotable, inds)
𝒮ₖ = view(geotable, [cₖ])
V = values(𝒮ᵢ)
vₖ = values(𝒮ₖ)
dᵥ = pairwise(TableDistance(normalize=false, weights=w), V, vₖ)
# total distance
dₜ = @. √(dᵥ^2 + m^2 * (dₛ / sₘ)^2)
@inbounds for (i, ind) in enumerate(inds)
if dₜ[i] < d[ind]
d[ind] = dₜ[i]
l[ind] = k
end
end
end
end
function slic_update!(geotable, c, l)
𝒟 = domain(geotable)
for k in eachindex(c)
inds = findall(isequal(k), l)
X = (to(centroid(𝒟, i)) for i in inds)
xₖ = [mean(X)]
dₛ = pairwise(Euclidean(), X, xₖ)
@inbounds c[k] = inds[argmin(vec(dₛ))]
end
end
| GeoStatsTransforms | https://github.com/JuliaEarth/GeoStatsTransforms.jl.git |
|
[
"MIT"
] | 0.8.1 | d3b0fabcf8cbba4e64f532e3c6717e1b231b75f5 | code | 2142 | @testset "Aggregate" begin
@test !isrevertible(Aggregate(CartesianGrid(10, 10)))
pts1 = Point.([(5, 4), (3, 4), (0, 1), (7, 0), (7, 2)])
pts2 = Point.([(1, 1), (7, 1), (4, 4)])
gtb = georef((a=rand(Float64, 5), b=rand(Int, 5)), pts1)
ngtb = gtb |> Aggregate(pts2)
@test domain(ngtb) == PointSet(pts2)
@test ngtb.a[1] == gtb.a[3]
@test ngtb.a[2] == mean(gtb.a[[4, 5]])
@test ngtb.a[3] == mean(gtb.a[[1, 2]])
@test ngtb.b[1] == gtb.b[3]
@test ngtb.b[2] == first(gtb.b[[4, 5]])
@test ngtb.b[3] == first(gtb.b[[1, 2]])
ngtb = gtb |> Aggregate(pts2, :a => median, :b => last)
@test domain(ngtb) == PointSet(pts2)
@test ngtb.a[1] == gtb.a[3]
@test ngtb.a[2] == median(gtb.a[[4, 5]])
@test ngtb.a[3] == median(gtb.a[[1, 2]])
@test ngtb.b[1] == gtb.b[3]
@test ngtb.b[2] == last(gtb.b[[4, 5]])
@test ngtb.b[3] == last(gtb.b[[1, 2]])
grid1 = CartesianGrid((0.0, 0.0), (10.0, 10.0), dims=(20, 20))
grid2 = CartesianGrid((0.0, 0.0), (10.0, 10.0), dims=(10, 10))
gtb = georef((a=rand(Float64, 400), b=rand(Int, 400)), grid1)
ngtb = gtb |> Aggregate(grid2)
@test domain(ngtb) == grid2
@test ngtb[(1, 1), :a] == mean(gtb[(1:2, 1:2), :a])
@test ngtb[(1, 10), :a] == mean(gtb[(1:2, 19:20), :a])
@test ngtb[(10, 1), :a] == mean(gtb[(19:20, 1:2), :a])
@test ngtb[(10, 10), :a] == mean(gtb[(19:20, 19:20), :a])
@test ngtb[(1, 1), :b] == first(gtb[(1:2, 1:2), :b])
@test ngtb[(1, 10), :b] == first(gtb[(1:2, 19:20), :b])
@test ngtb[(10, 1), :b] == first(gtb[(19:20, 1:2), :b])
@test ngtb[(10, 10), :b] == first(gtb[(19:20, 19:20), :b])
ngtb = gtb |> Aggregate(grid2, :a => median, :b => last)
@test domain(ngtb) == grid2
@test ngtb[(1, 1), :a] == median(gtb[(1:2, 1:2), :a])
@test ngtb[(1, 10), :a] == median(gtb[(1:2, 19:20), :a])
@test ngtb[(10, 1), :a] == median(gtb[(19:20, 1:2), :a])
@test ngtb[(10, 10), :a] == median(gtb[(19:20, 19:20), :a])
@test ngtb[(1, 1), :b] == last(gtb[(1:2, 1:2), :b])
@test ngtb[(1, 10), :b] == last(gtb[(1:2, 19:20), :b])
@test ngtb[(10, 1), :b] == last(gtb[(19:20, 1:2), :b])
@test ngtb[(10, 10), :b] == last(gtb[(19:20, 19:20), :b])
end
| GeoStatsTransforms | https://github.com/JuliaEarth/GeoStatsTransforms.jl.git |
|
[
"MIT"
] | 0.8.1 | d3b0fabcf8cbba4e64f532e3c6717e1b231b75f5 | code | 4015 | @testset "Clustering" begin
@testset "SLIC" begin
Z = [ones(10, 10) 2ones(10, 10); 3ones(10, 10) 4ones(10, 10)]
𝒮 = georef((Z=Z,))
C = 𝒮 |> SLIC(4, 1.0)
@test C.CLUSTER == vec(Z')
𝒮 = georef((z=[√(i^2 + j^2) for i in 1:100, j in 1:100],))
C = 𝒮 |> SLIC(50, 0.001)
@test 50 ≤ length(unique(C.CLUSTER)) ≤ 60
# test SLIC with heterogeneous data
Z = (a=rand(10), b=1:10, x=rand(10), y=rand(10))
𝒮 = georef(Z, (:x, :y))
C = 𝒮 |> SLIC(2, 1.0)
@test domain(C) == domain(𝒮)
@test Set(C.CLUSTER) ⊆ Set(1:2)
# test SLIC for orphaned points
a = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
x = [
0.4993029939801461,
0.14954882636793432,
0.23118957975519616,
0.6816610871344635,
0.6665309965318731,
0.691522274292691,
0.012495903053589608,
0.9831177095525963,
0.4445263730141056,
0.2175871587746574
]
y = [
0.32721108209880256,
0.11427387079564899,
0.826401075107011,
0.6164294766961782,
0.6562529361193601,
0.43388375115444644,
0.7624847842129086,
0.1516623758764959,
0.07641616063237144,
0.8669098569279463
]
Z = (a=a, x=x, y=y)
𝒮 = georef(Z, (:x, :y))
C = 𝒮 |> SLIC(2, 1.0)
@test Set(C.CLUSTER) ⊆ Set(1:2)
# test SLIC with weights in attribute columns
z1 = [√((i - 0)^2 + (j - 0)^2) for i in 1:100, j in 1:100]
z2 = [√((i - 100)^2 + (j - 100)^2) for i in 1:100, j in 1:100]
𝒮 = georef((z1=z1, z2=z2))
w1 = Dict(:z1 => 10, :z2 => 0.1)
w2 = Dict(:z1 => 0.1, :z2 => 10)
C1 = 𝒮 |> SLIC(50, 0.001, weights=w1)
C2 = 𝒮 |> SLIC(50, 0.001, weights=w2)
@test 50 ≤ length(unique(C1.CLUSTER)) ≤ 60
@test 50 ≤ length(unique(C2.CLUSTER)) ≤ 60
# test GeoClustering.slic_srecursion function
k = 20
l = [10.0, 100.0, 1000.0]
s = GeoStatsTransforms.slic_srecursion(k, l)
@test s[1] == 10 / 3 && s[2] == 100 / 3 && s[3] == 1000 / 3
# the following test deals with the case where the bounding box
# of the data has very different sides, one of which is too small
# we want to make sure that the initialization of centroids always
# returns a non-empty set
k = 1
m = 0.000001
x = LinRange(550350.6224548942, 552307.2106300013, 1200)
y = LinRange(9.35909841165263e6, 9.36050447440832e6, 1200)
z = LinRange(-44.90690201082941, 351.4007207008662, 1200)
𝒟 = PointSet(collect(zip(x, y, z)))
s = GeoStatsTransforms.slic_spacing(𝒟, SLIC(k, m))
lo, up = to.(extrema(boundingbox(𝒟)))
ranges = [(l + sᵢ / 2):sᵢ:u for (l, sᵢ, u) in zip(lo, s, up)]
@test !isempty(Iterators.product(ranges...))
c = GeoStatsTransforms.slic_initialization(𝒟, s)
@test !isempty(c)
# as kwarg
𝒮 = georef((Z=[1, 2, 3],))
C = 𝒮 |> SLIC(3, 1.0, as=:cluster)
@test names(C) == ["cluster", "geometry"]
C = 𝒮 |> SLIC(3, 1.0, as="cluster")
@test names(C) == ["cluster", "geometry"]
end
@testset "GHC" begin
Z = [ones(10, 10) 2ones(10, 10); 3ones(10, 10) 4ones(10, 10)] .|> float
𝒮 = georef((Z=Z,))
C = 𝒮 |> GHC(4, 1.0)
𝒮′ = georef(values(𝒮), centroid.(domain(𝒮)))
C′ = 𝒮′ |> GHC(4, 1.0)
@test C.CLUSTER == categorical(vec(Z'))
@test C.CLUSTER == C′.CLUSTER
𝒮 = georef((z=[√(i^2 + j^2) for i in 1:50, j in 1:50],))
C = 𝒮 |> GHC(50, 1.0)
@test length(unique(C.CLUSTER)) == 50
# as kwarg
𝒮 = georef((Z=[1.0, 2.0, 3.0],))
C = 𝒮 |> GHC(3, 1.0, as=:cluster)
@test names(C) == ["cluster", "geometry"]
C = 𝒮 |> GHC(3, 1.0, as="cluster")
@test names(C) == ["cluster", "geometry"]
end
@testset "GSC" begin
𝒮 = georef((Z=[10sin(i / 10) + j for i in 1:100, j in 1:100],))
C = 𝒮 |> GSC(50, 2.0)
@test Set(C.CLUSTER) == Set(1:50)
# as kwarg
𝒮 = georef((Z=[1, 2, 3],))
C = 𝒮 |> GSC(3, 2.0, as=:cluster)
@test names(C) == ["cluster", "geometry"]
C = 𝒮 |> GSC(3, 2.0, as="cluster")
@test names(C) == ["cluster", "geometry"]
end
end
| GeoStatsTransforms | https://github.com/JuliaEarth/GeoStatsTransforms.jl.git |
|
[
"MIT"
] | 0.8.1 | d3b0fabcf8cbba4e64f532e3c6717e1b231b75f5 | code | 1408 | @testset "CookieCutter" begin
table = (facies=[1, 0, 1], poro=[0.5, 0.9, 0.1])
coord = [(25.0, 25.0), (50.0, 75.0), (75.0, 50.0)]
geotable = georef(table, coord)
trainimg = geostatsimage("Strebelle")
parent = QuiltingProcess(trainimg, (30, 30))
child0 = GaussianProcess(SphericalVariogram(range=20.0, sill=0.2))
child1 = GaussianProcess(SphericalVariogram(MetricBall((200.0, 20.0))))
sdomain = CartesianGrid(100, 100)
transform = CookieCutter(sdomain, :facies => parent, :poro => [0 => child0, 1 => child1])
@test !isrevertible(transform)
ngtb = transform(geotable)
@test nrow(ngtb) == 10000
@test propertynames(ngtb) == [:facies_1, :poro_1, :geometry]
transform = CookieCutter(sdomain, 3, :facies => parent, :poro => [0 => child0, 1 => child1])
@test !isrevertible(transform)
ngtb = transform(geotable)
@test nrow(ngtb) == 10000
@test propertynames(ngtb) == [:facies_1, :facies_2, :facies_3, :poro_1, :poro_2, :poro_3, :geometry]
# throw: CookieCutter without children
@test_throws ArgumentError CookieCutter(sdomain, :facies => parent)
@test_throws ArgumentError CookieCutter(sdomain, 3, :facies => parent)
# throw: invalid child map
@test_throws ArgumentError CookieCutter(sdomain, :facies => parent, :poro => [0 => nothing, 1 => child1])
@test_throws ArgumentError CookieCutter(sdomain, 3, :facies => parent, :poro => [0 => nothing, 1 => child1])
end
| GeoStatsTransforms | https://github.com/JuliaEarth/GeoStatsTransforms.jl.git |
|
[
"MIT"
] | 0.8.1 | d3b0fabcf8cbba4e64f532e3c6717e1b231b75f5 | code | 687 | @testset "Detrend" begin
# reversibility on the same domain
rng = StableRNG(42)
l = range(-1, stop=1, length=100)
μ = [x^2 + y^2 for x in l, y in l]
ϵ = 0.1rand(rng, 100, 100)
d = georef((z=μ + ϵ, w=rand(100, 100)))
p = Detrend(:z, degree=2)
n, c = apply(p, d)
r = revert(p, n, c)
D = Tables.matrix(values(d))
R = Tables.matrix(values(r))
@test isapprox(D, R, atol=1e-6)
# reversibility on different domains
g = CartesianGrid(10, 10)
d = georef((z=rand(100),), g)
p = Detrend(:z, degree=2)
n, c = apply(p, d)
n2 = georef((z=[n.z; n.z],), [centroid.(g); centroid.(g)])
r2 = revert(p, n2, c)
@test r2.z[1:100] ≈ d.z
@test r2.z[101:200] ≈ d.z
end
| GeoStatsTransforms | https://github.com/JuliaEarth/GeoStatsTransforms.jl.git |
|
[
"MIT"
] | 0.8.1 | d3b0fabcf8cbba4e64f532e3c6717e1b231b75f5 | code | 6008 | @testset "Downscale" begin
@test !isrevertible(Downscale(2, 2))
grid = CartesianGrid((0.0, 0.0), (10.0, 10.0), dims=(10, 10))
tgrid = CartesianGrid((0.0, 0.0), (10.0, 10.0), dims=(20, 20))
gtb = georef((a=rand(Float64, 100), b=rand(Int, 100)), grid)
ngtb = gtb |> Downscale(2, 2)
@test domain(ngtb) == tgrid
@test ngtb[(1, 1), :a] == gtb[(1, 1), :a]
@test ngtb[(1, 2), :a] == gtb[(1, 1), :a]
@test ngtb[(2, 1), :a] == gtb[(1, 1), :a]
@test ngtb[(2, 2), :a] == gtb[(1, 1), :a]
@test ngtb[(1, 19), :a] == gtb[(1, 10), :a]
@test ngtb[(1, 20), :a] == gtb[(1, 10), :a]
@test ngtb[(2, 19), :a] == gtb[(1, 10), :a]
@test ngtb[(2, 20), :a] == gtb[(1, 10), :a]
@test ngtb[(19, 1), :b] == gtb[(10, 1), :b]
@test ngtb[(19, 2), :b] == gtb[(10, 1), :b]
@test ngtb[(20, 1), :b] == gtb[(10, 1), :b]
@test ngtb[(20, 2), :b] == gtb[(10, 1), :b]
@test ngtb[(19, 19), :b] == gtb[(10, 10), :b]
@test ngtb[(19, 20), :b] == gtb[(10, 10), :b]
@test ngtb[(20, 19), :b] == gtb[(10, 10), :b]
@test ngtb[(20, 20), :b] == gtb[(10, 10), :b]
rgrid = convert(RectilinearGrid, grid)
trgrid = convert(RectilinearGrid, tgrid)
gtb = georef((a=rand(Float64, 100), b=rand(Int, 100)), rgrid)
ngtb = gtb |> Downscale(2, 2)
@test domain(ngtb) == trgrid
@test ngtb[(1, 1), :a] == gtb[(1, 1), :a]
@test ngtb[(1, 2), :a] == gtb[(1, 1), :a]
@test ngtb[(2, 1), :a] == gtb[(1, 1), :a]
@test ngtb[(2, 2), :a] == gtb[(1, 1), :a]
@test ngtb[(1, 19), :a] == gtb[(1, 10), :a]
@test ngtb[(1, 20), :a] == gtb[(1, 10), :a]
@test ngtb[(2, 19), :a] == gtb[(1, 10), :a]
@test ngtb[(2, 20), :a] == gtb[(1, 10), :a]
@test ngtb[(19, 1), :b] == gtb[(10, 1), :b]
@test ngtb[(19, 2), :b] == gtb[(10, 1), :b]
@test ngtb[(20, 1), :b] == gtb[(10, 1), :b]
@test ngtb[(20, 2), :b] == gtb[(10, 1), :b]
@test ngtb[(19, 19), :b] == gtb[(10, 10), :b]
@test ngtb[(19, 20), :b] == gtb[(10, 10), :b]
@test ngtb[(20, 19), :b] == gtb[(10, 10), :b]
@test ngtb[(20, 20), :b] == gtb[(10, 10), :b]
sgrid = convert(StructuredGrid, grid)
tsgrid = convert(StructuredGrid, tgrid)
gtb = georef((a=rand(Float64, 100), b=rand(Int, 100)), sgrid)
ngtb = gtb |> Downscale(2, 2)
@test domain(ngtb) == tsgrid
@test ngtb[(1, 1), :a] == gtb[(1, 1), :a]
@test ngtb[(1, 2), :a] == gtb[(1, 1), :a]
@test ngtb[(2, 1), :a] == gtb[(1, 1), :a]
@test ngtb[(2, 2), :a] == gtb[(1, 1), :a]
@test ngtb[(1, 19), :a] == gtb[(1, 10), :a]
@test ngtb[(1, 20), :a] == gtb[(1, 10), :a]
@test ngtb[(2, 19), :a] == gtb[(1, 10), :a]
@test ngtb[(2, 20), :a] == gtb[(1, 10), :a]
@test ngtb[(19, 1), :b] == gtb[(10, 1), :b]
@test ngtb[(19, 2), :b] == gtb[(10, 1), :b]
@test ngtb[(20, 1), :b] == gtb[(10, 1), :b]
@test ngtb[(20, 2), :b] == gtb[(10, 1), :b]
@test ngtb[(19, 19), :b] == gtb[(10, 10), :b]
@test ngtb[(19, 20), :b] == gtb[(10, 10), :b]
@test ngtb[(20, 19), :b] == gtb[(10, 10), :b]
@test ngtb[(20, 20), :b] == gtb[(10, 10), :b]
grid = CartesianGrid(3, 3, 3)
tgrid = CartesianGrid(minimum(grid), maximum(grid), dims=(6, 6, 6))
gtb = georef((a=rand(Float64, 27), b=rand(Int, 27)), grid)
ngtb = gtb |> Downscale(2, 2, 2)
@test domain(ngtb) == tgrid
@test ngtb[(1, 1, 1), :a] == gtb[(1, 1, 1), :a]
@test ngtb[(1, 2, 1), :a] == gtb[(1, 1, 1), :a]
@test ngtb[(2, 1, 1), :a] == gtb[(1, 1, 1), :a]
@test ngtb[(2, 2, 1), :a] == gtb[(1, 1, 1), :a]
@test ngtb[(1, 1, 2), :a] == gtb[(1, 1, 1), :a]
@test ngtb[(1, 2, 2), :a] == gtb[(1, 1, 1), :a]
@test ngtb[(2, 1, 2), :a] == gtb[(1, 1, 1), :a]
@test ngtb[(2, 2, 2), :a] == gtb[(1, 1, 1), :a]
@test ngtb[(5, 5, 5), :b] == gtb[(3, 3, 3), :b]
@test ngtb[(5, 6, 5), :b] == gtb[(3, 3, 3), :b]
@test ngtb[(6, 5, 5), :b] == gtb[(3, 3, 3), :b]
@test ngtb[(6, 6, 5), :b] == gtb[(3, 3, 3), :b]
@test ngtb[(5, 5, 6), :b] == gtb[(3, 3, 3), :b]
@test ngtb[(5, 6, 6), :b] == gtb[(3, 3, 3), :b]
@test ngtb[(6, 5, 6), :b] == gtb[(3, 3, 3), :b]
@test ngtb[(6, 6, 6), :b] == gtb[(3, 3, 3), :b]
rgrid = convert(RectilinearGrid, grid)
trgrid = convert(RectilinearGrid, tgrid)
gtb = georef((a=rand(Float64, 27), b=rand(Int, 27)), rgrid)
ngtb = gtb |> Downscale(2, 2, 2)
@test domain(ngtb) == trgrid
@test ngtb[(1, 1, 1), :a] == gtb[(1, 1, 1), :a]
@test ngtb[(1, 2, 1), :a] == gtb[(1, 1, 1), :a]
@test ngtb[(2, 1, 1), :a] == gtb[(1, 1, 1), :a]
@test ngtb[(2, 2, 1), :a] == gtb[(1, 1, 1), :a]
@test ngtb[(1, 1, 2), :a] == gtb[(1, 1, 1), :a]
@test ngtb[(1, 2, 2), :a] == gtb[(1, 1, 1), :a]
@test ngtb[(2, 1, 2), :a] == gtb[(1, 1, 1), :a]
@test ngtb[(2, 2, 2), :a] == gtb[(1, 1, 1), :a]
@test ngtb[(5, 5, 5), :b] == gtb[(3, 3, 3), :b]
@test ngtb[(5, 6, 5), :b] == gtb[(3, 3, 3), :b]
@test ngtb[(6, 5, 5), :b] == gtb[(3, 3, 3), :b]
@test ngtb[(6, 6, 5), :b] == gtb[(3, 3, 3), :b]
@test ngtb[(5, 5, 6), :b] == gtb[(3, 3, 3), :b]
@test ngtb[(5, 6, 6), :b] == gtb[(3, 3, 3), :b]
@test ngtb[(6, 5, 6), :b] == gtb[(3, 3, 3), :b]
@test ngtb[(6, 6, 6), :b] == gtb[(3, 3, 3), :b]
sgrid = convert(StructuredGrid, grid)
tsgrid = convert(StructuredGrid, tgrid)
gtb = georef((a=rand(Float64, 27), b=rand(Int, 27)), sgrid)
ngtb = gtb |> Downscale(2, 2, 2)
@test domain(ngtb) == tsgrid
@test ngtb[(1, 1, 1), :a] == gtb[(1, 1, 1), :a]
@test ngtb[(1, 2, 1), :a] == gtb[(1, 1, 1), :a]
@test ngtb[(2, 1, 1), :a] == gtb[(1, 1, 1), :a]
@test ngtb[(2, 2, 1), :a] == gtb[(1, 1, 1), :a]
@test ngtb[(1, 1, 2), :a] == gtb[(1, 1, 1), :a]
@test ngtb[(1, 2, 2), :a] == gtb[(1, 1, 1), :a]
@test ngtb[(2, 1, 2), :a] == gtb[(1, 1, 1), :a]
@test ngtb[(2, 2, 2), :a] == gtb[(1, 1, 1), :a]
@test ngtb[(5, 5, 5), :b] == gtb[(3, 3, 3), :b]
@test ngtb[(5, 6, 5), :b] == gtb[(3, 3, 3), :b]
@test ngtb[(6, 5, 5), :b] == gtb[(3, 3, 3), :b]
@test ngtb[(6, 6, 5), :b] == gtb[(3, 3, 3), :b]
@test ngtb[(5, 5, 6), :b] == gtb[(3, 3, 3), :b]
@test ngtb[(5, 6, 6), :b] == gtb[(3, 3, 3), :b]
@test ngtb[(6, 5, 6), :b] == gtb[(3, 3, 3), :b]
@test ngtb[(6, 6, 6), :b] == gtb[(3, 3, 3), :b]
end
| GeoStatsTransforms | https://github.com/JuliaEarth/GeoStatsTransforms.jl.git |
|
[
"MIT"
] | 0.8.1 | d3b0fabcf8cbba4e64f532e3c6717e1b231b75f5 | code | 1739 | @testset "InterpolateMissing" begin
@test !isrevertible(InterpolateMissing())
grid = CartesianGrid((100, 100), (0.5, 0.5), (1.0, 1.0))
linds = LinearIndices(size(grid))
z = Vector{Union{Missing,Float64}}(missing, 10000)
z[linds[25, 25]] = 1.0
z[linds[50, 75]] = 0.0
z[linds[75, 50]] = 1.0
gtb = georef((; z), grid)
variogram = GaussianVariogram(range=35.0, nugget=0.0)
ngtb = gtb |> InterpolateMissing(:z => Kriging(variogram), maxneighbors=3)
@test isapprox(ngtb.z[linds[25, 25]], 1.0, atol=1e-3)
@test isapprox(ngtb.z[linds[50, 75]], 0.0, atol=1e-3)
@test isapprox(ngtb.z[linds[75, 50]], 1.0, atol=1e-3)
ngtb = gtb |> InterpolateMissing(:z => Kriging(variogram), maxneighbors=3)
@test isapprox(ngtb.z[linds[25, 25]], 1.0, atol=1e-3)
@test isapprox(ngtb.z[linds[50, 75]], 0.0, atol=1e-3)
@test isapprox(ngtb.z[linds[75, 50]], 1.0, atol=1e-3)
ngtb = gtb |> InterpolateMissing(:z => Kriging(variogram), maxneighbors=3, neighborhood=MetricBall(100.0))
@test isapprox(ngtb.z[linds[25, 25]], 1.0, atol=1e-3)
@test isapprox(ngtb.z[linds[50, 75]], 0.0, atol=1e-3)
@test isapprox(ngtb.z[linds[75, 50]], 1.0, atol=1e-3)
# units
grid = CartesianGrid(3)
gtb = georef((; T=[1.0, NaN, 1.0] * u"K"), grid)
ngtb = gtb |> InterpolateMissing(IDW())
@test unit(eltype(ngtb.T)) == u"K"
# affine units
gtb = georef((; T=[1.0, NaN, 1.0] * u"°C"), grid)
ngtb = gtb |> InterpolateMissing(IDW())
@test unit(eltype(ngtb.T)) == u"K"
# default model is NN
pset = PointSet(rand(Point, 5))
gtb = georef((; z=[1.0, missing, 2.0, missing, 3.0], c=[missing, "a", "b", "c", missing]), pset)
ngtb = gtb |> InterpolateMissing()
@test ngtb.z ⊆ [1.0, 2.0, 3.0]
@test ngtb.c ⊆ ["a", "b", "c"]
end
| GeoStatsTransforms | https://github.com/JuliaEarth/GeoStatsTransforms.jl.git |
|
[
"MIT"
] | 0.8.1 | d3b0fabcf8cbba4e64f532e3c6717e1b231b75f5 | code | 1599 | @testset "InterpolateNaN" begin
@test !isrevertible(InterpolateNaN())
grid = CartesianGrid((100, 100), (0.5, 0.5), (1.0, 1.0))
linds = LinearIndices(size(grid))
z = fill(NaN, 10000)
z[linds[25, 25]] = 1.0
z[linds[50, 75]] = 0.0
z[linds[75, 50]] = 1.0
gtb = georef((; z), grid)
variogram = GaussianVariogram(range=35.0, nugget=0.0)
ngtb = gtb |> InterpolateNaN(:z => Kriging(variogram), maxneighbors=3)
@test isapprox(ngtb.z[linds[25, 25]], 1.0, atol=1e-3)
@test isapprox(ngtb.z[linds[50, 75]], 0.0, atol=1e-3)
@test isapprox(ngtb.z[linds[75, 50]], 1.0, atol=1e-3)
ngtb = gtb |> InterpolateNaN(:z => Kriging(variogram), maxneighbors=3)
@test isapprox(ngtb.z[linds[25, 25]], 1.0, atol=1e-3)
@test isapprox(ngtb.z[linds[50, 75]], 0.0, atol=1e-3)
@test isapprox(ngtb.z[linds[75, 50]], 1.0, atol=1e-3)
ngtb = gtb |> InterpolateNaN(:z => Kriging(variogram), maxneighbors=3, neighborhood=MetricBall(100.0))
@test isapprox(ngtb.z[linds[25, 25]], 1.0, atol=1e-3)
@test isapprox(ngtb.z[linds[50, 75]], 0.0, atol=1e-3)
@test isapprox(ngtb.z[linds[75, 50]], 1.0, atol=1e-3)
# units
grid = CartesianGrid(3)
gtb = georef((; T=[1.0, NaN, 1.0] * u"K"), grid)
ngtb = gtb |> InterpolateNaN(IDW())
@test unit(eltype(ngtb.T)) == u"K"
# affine units
gtb = georef((; T=[1.0, NaN, 1.0] * u"°C"), grid)
ngtb = gtb |> InterpolateNaN(IDW())
@test unit(eltype(ngtb.T)) == u"K"
# default model is NN
pset = PointSet(rand(Point, 5))
gtb = georef((; z=[1.0, NaN, 2.0, NaN, 3.0]), pset)
ngtb = gtb |> InterpolateNaN()
@test ngtb.z ⊆ [1.0, 2.0, 3.0]
end
| GeoStatsTransforms | https://github.com/JuliaEarth/GeoStatsTransforms.jl.git |
|
[
"MIT"
] | 0.8.1 | d3b0fabcf8cbba4e64f532e3c6717e1b231b75f5 | code | 1900 | @testset "InterpolateNeighbors" begin
@test !isrevertible(InterpolateNeighbors(CartesianGrid(2, 2)))
pts = rand(Point, 3)
gtb = georef((a=[1, 2, 3], b=[4, 5, 6]), pts)
ngtb = gtb |> InterpolateNeighbors(pts, IDW(), maxneighbors=3)
@test ngtb.a == gtb.a
@test ngtb.b == gtb.b
@test ngtb.geometry == gtb.geometry
gtb = georef((; z=[1.0, 0.0, 1.0]), [(25.0, 25.0), (50.0, 75.0), (75.0, 50.0)])
grid = CartesianGrid((100, 100), (0.5, 0.5), (1.0, 1.0))
linds = LinearIndices(size(grid))
variogram = GaussianVariogram(range=35.0, nugget=0.0)
ngtb = gtb |> InterpolateNeighbors(grid, :z => Kriging(variogram), maxneighbors=3)
@test isapprox(ngtb.z[linds[25, 25]], 1.0, atol=1e-3)
@test isapprox(ngtb.z[linds[50, 75]], 0.0, atol=1e-3)
@test isapprox(ngtb.z[linds[75, 50]], 1.0, atol=1e-3)
ngtb = gtb |> InterpolateNeighbors(grid, :z => Kriging(variogram), maxneighbors=3)
@test isapprox(ngtb.z[linds[25, 25]], 1.0, atol=1e-3)
@test isapprox(ngtb.z[linds[50, 75]], 0.0, atol=1e-3)
@test isapprox(ngtb.z[linds[75, 50]], 1.0, atol=1e-3)
ngtb = gtb |> InterpolateNeighbors(grid, :z => Kriging(variogram), maxneighbors=3, neighborhood=MetricBall(100.0))
@test isapprox(ngtb.z[linds[25, 25]], 1.0, atol=1e-3)
@test isapprox(ngtb.z[linds[50, 75]], 0.0, atol=1e-3)
@test isapprox(ngtb.z[linds[75, 50]], 1.0, atol=1e-3)
# units
gtb = georef((; T=[1.0, 0.0, 1.0] * u"K"), rand(Point, 3))
grid = CartesianGrid(5, 5, 5)
ngtb = gtb |> InterpolateNeighbors(grid, IDW())
@test unit(eltype(ngtb.T)) == u"K"
# affine units
gtb = georef((; T=[-272.15, -273.15, -272.15] * u"°C"), rand(Point, 3))
ngtb = gtb |> InterpolateNeighbors(grid, IDW())
@test unit(eltype(ngtb.T)) == u"K"
# default model is NN
pts = rand(Point, 3)
gtb = georef((; z=[1.0, 2.0, 3.0], c=["a", "b", "c"]), pts)
ngtb = gtb |> InterpolateNeighbors(pts)
@test ngtb == gtb
end
| GeoStatsTransforms | https://github.com/JuliaEarth/GeoStatsTransforms.jl.git |
|
[
"MIT"
] | 0.8.1 | d3b0fabcf8cbba4e64f532e3c6717e1b231b75f5 | code | 1283 | @testset "Interpolate" begin
@test !isrevertible(Interpolate(CartesianGrid(2, 2)))
pts = rand(Point, 3)
gtb = georef((a=[1, 2, 3], b=[4, 5, 6]), pts)
ngtb = gtb |> Interpolate(pts, IDW())
@test ngtb.a == gtb.a
@test ngtb.b == gtb.b
@test ngtb.geometry == gtb.geometry
gtb = georef((; z=[1.0, 0.0, 1.0]), [(25.0, 25.0), (50.0, 75.0), (75.0, 50.0)])
grid = CartesianGrid((100, 100), (0.5, 0.5), (1.0, 1.0))
linds = LinearIndices(size(grid))
variogram = GaussianVariogram(range=35.0, nugget=0.0)
ngtb = gtb |> Interpolate(grid, :z => Kriging(variogram))
@test isapprox(ngtb.z[linds[25, 25]], 1.0, atol=1e-3)
@test isapprox(ngtb.z[linds[50, 75]], 0.0, atol=1e-3)
@test isapprox(ngtb.z[linds[75, 50]], 1.0, atol=1e-3)
# units
gtb = georef((; T=[1.0, 0.0, 1.0] * u"K"), rand(Point, 3))
grid = CartesianGrid(5, 5, 5)
ngtb = gtb |> Interpolate(grid)
@test unit(eltype(ngtb.T)) == u"K"
# affine units
gtb = georef((; T=[-272.15, -273.15, -272.15] * u"°C"), rand(Point, 3))
grid = CartesianGrid(5, 5, 5)
ngtb = gtb |> Interpolate(grid)
@test unit(eltype(ngtb.T)) == u"K"
# default model is NN
pts = rand(Point, 3)
gtb = georef((; z=[1.0, 2.0, 3.0], c=["a", "b", "c"]), pts)
ngtb = gtb |> Interpolate(pts)
@test ngtb == gtb
end
| GeoStatsTransforms | https://github.com/JuliaEarth/GeoStatsTransforms.jl.git |
|
[
"MIT"
] | 0.8.1 | d3b0fabcf8cbba4e64f532e3c6717e1b231b75f5 | code | 2376 | @testset "Potrace" begin
# challenging case with letters
img = load(joinpath(datadir, "letters.png"))
gtb = georef((color=img,))
trans = Potrace(1)
ngtb, cache = apply(trans, gtb)
ndom = domain(ngtb)
@test nelements(ndom) == 2
@test eltype(ndom) <: Multi
polys1 = parent(ndom[1])
polys2 = parent(ndom[2])
@test length(polys1) == 4
@test length(polys2) == 2
rgtb = revert(trans, ngtb, cache)
dom = domain(gtb)
rdom = domain(rgtb)
@test rdom isa Grid
@test size(rdom) == size(dom)
@test minimum(rdom) == minimum(dom)
@test maximum(rdom) == maximum(dom)
@test spacing(rdom) == spacing(dom)
# concentric circles
ball1 = Ball((0, 0), 1)
ball2 = Ball((0, 0), 2)
ball3 = Ball((0, 0), 3)
grid = CartesianGrid((-5, -5), (5, 5), dims=(100, 100))
inds1 = centroid.(grid) .∈ Ref(ball1)
inds2 = centroid.(grid) .∈ Ref(ball2)
inds3 = centroid.(grid) .∈ Ref(ball3)
mask = zeros(100, 100)
mask[inds3] .= 1
mask[inds2] .= 0
mask[inds1] .= 1
dat = georef((mask=mask,))
new = dat |> Potrace(1)
dom = domain(new)
@test nelements(dom) == 2
@test eltype(dom) <: Multi
polys1 = parent(dom[1])
polys2 = parent(dom[2])
@test length(polys1) == 2
@test length(polys2) == 2
new1 = dat |> Potrace(1, ϵ=0.1)
new2 = dat |> Potrace(1, ϵ=0.5)
dom1 = domain(new1)
dom2 = domain(new2)
for (g1, g2) in zip(dom1, dom2)
@test nvertices(g1) > nvertices(g2)
end
# make sure that aggregation works
Z = [sin(i / 10) + sin(j / 10) for i in 1:100, j in 1:100]
M = Z .> 0
Ω = georef((Z=Z, M=M))
𝒯 = Ω |> Potrace(:M, :Z => mean)
masks = unique(Ω.M)
@test nelements(domain(𝒯)) == 2
@test Set(𝒯.M) == Set([true, false])
@test all(z -> -1 ≤ z ≤ 1, 𝒯.Z)
@test 𝒯.Z[1] == mean(Ω.Z[masks[1] .== Ω.M])
@test 𝒯.Z[2] == mean(Ω.Z[masks[2] .== Ω.M])
# units
gtb = georef((; T=Z * u"K", M))
ngtb = gtb |> Potrace(:M)
masks = unique(gtb.M)
@test unit(eltype(ngtb.T)) == u"K"
@test ngtb.T[1] ≈ mean(gtb.T[masks[1] .== gtb.M])
@test ngtb.T[2] ≈ mean(gtb.T[masks[2] .== gtb.M])
# affine units
gtb = georef((; T=Z * u"°C", M))
ngtb = gtb |> Potrace(:M)
masks = unique(gtb.M)
@test unit(eltype(ngtb.T)) == u"K"
v = GeoStatsTransforms._absunit(gtb.T[masks[1] .== gtb.M])
@test ngtb.T[1] ≈ mean(v)
v = GeoStatsTransforms._absunit(gtb.T[masks[2] .== gtb.M])
@test ngtb.T[2] ≈ mean(v)
end
| GeoStatsTransforms | https://github.com/JuliaEarth/GeoStatsTransforms.jl.git |
|
[
"MIT"
] | 0.8.1 | d3b0fabcf8cbba4e64f532e3c6717e1b231b75f5 | code | 3652 | @testset "Rasterize" begin
@test isrevertible(Rasterize(10, 10)) == true
a = [1, 2, 3, 4, 5]
b = [1.1, 2.2, 3.3, 4.4, 5.5]
pts = [(3, 9), (7, 8), (8, 5), (5, 4), (1, 5)]
seg1 = Segment(pts[1], pts[2])
seg2 = Segment(pts[2], pts[3])
seg3 = Segment(pts[3], pts[4])
seg4 = Segment(pts[4], pts[5])
seg5 = Segment(pts[5], pts[1])
poly1 = PolyArea((2, 0), (6, 2), (2, 2))
poly2 = PolyArea((0, 6), (3, 8), (0, 10))
poly3 = PolyArea((3, 6), (9, 6), (9, 9), (6, 9))
poly4 = PolyArea((7, 0), (10, 0), (10, 4), (7, 4))
poly5 = PolyArea((1, 3), (5, 3), (6, 6), (3, 8), (0, 6))
gtb = georef((; a, b), pts)
grid = CartesianGrid(10, 10)
trans = Rasterize(grid)
ngtb, cache = apply(trans, gtb)
linds = LinearIndices((10, 10))
@test ngtb.a[linds[3, 9]] == 1
@test ngtb.a[linds[7, 8]] == 2
@test ngtb.a[linds[8, 5]] == 3
@test ngtb.a[linds[5, 4]] == 4
@test ngtb.a[linds[1, 5]] == 5
@test ngtb.b[linds[3, 9]] == 1.1
@test ngtb.b[linds[7, 8]] == 2.2
@test ngtb.b[linds[8, 5]] == 3.3
@test ngtb.b[linds[5, 4]] == 4.4
@test ngtb.b[linds[1, 5]] == 5.5
gtb = georef((; a, b), [seg1, seg2, seg3, seg4, seg5])
grid = CartesianGrid((0, 0), (10, 10), dims=(20, 20))
trans = Rasterize(grid)
ngtb, cache = apply(trans, gtb)
linds = LinearIndices((20, 20))
@test ngtb.a[linds[10, 17]] == 1
@test ngtb.a[linds[15, 13]] == 2
@test ngtb.a[linds[13, 9]] == 3
@test ngtb.a[linds[6, 9]] == 4
@test ngtb.a[linds[4, 14]] == 5
@test ngtb.b[linds[10, 17]] == 1.1
@test ngtb.b[linds[15, 13]] == 2.2
@test ngtb.b[linds[13, 9]] == 3.3
@test ngtb.b[linds[6, 9]] == 4.4
@test ngtb.b[linds[4, 14]] == 5.5
gtb = georef((; a, b), [poly1, poly2, poly3, poly4, poly5])
trans = Rasterize(20, 20, :a => last, :b => mean)
ngtb, cache = apply(trans, gtb)
linds = LinearIndices((20, 20))
@test ngtb.a[linds[7, 3]] == 1
@test ngtb.a[linds[3, 16]] == 2
@test ngtb.a[linds[15, 15]] == 3
@test ngtb.a[linds[17, 5]] == 4
@test ngtb.a[linds[6, 11]] == 5
@test ngtb.b[linds[7, 3]] == 1.1
@test ngtb.b[linds[3, 16]] == 2.2
@test ngtb.b[linds[15, 15]] == 3.3
@test ngtb.b[linds[17, 5]] == 4.4
@test ngtb.b[linds[6, 11]] == 5.5
# intersection: poly3 with poly5
@test ngtb.a[linds[9, 13]] == last(gtb.a[[3, 5]])
@test ngtb.b[linds[9, 13]] == mean(gtb.b[[3, 5]])
# units
gtb = georef((; T=rand(5) * u"K"), [poly1, poly2, poly3, poly4, poly5])
ngtb = gtb |> Rasterize(20, 20)
@test unit(eltype(ngtb.T)) == u"K"
@test ngtb.T[linds[9, 13]] == mean(gtb.T[[3, 5]])
# affine units
gtb = georef((; T=rand(5) * u"°C"), [poly1, poly2, poly3, poly4, poly5])
ngtb = gtb |> Rasterize(20, 20)
@test unit(eltype(ngtb.T)) == u"K"
v = GeoStatsTransforms._absunit(gtb.T[[3, 5]])
@test ngtb.T[linds[9, 13]] == mean(v)
# revert
gtb = georef((; z=1:4), [poly1, poly2, poly3, poly4])
trans = Rasterize(200, 200)
ngtb, cache = apply(trans, gtb)
rgtb = revert(trans, ngtb, cache)
inds = filter(!iszero, unique(cache))
@test isapprox(area(gtb.geometry[inds[1]]), area(rgtb.geometry[1]), atol=0.5u"m^2")
@test isapprox(area(gtb.geometry[inds[2]]), area(rgtb.geometry[2]), atol=0.5u"m^2")
@test isapprox(area(gtb.geometry[inds[3]]), area(rgtb.geometry[3]), atol=0.5u"m^2")
@test isapprox(area(gtb.geometry[inds[4]]), area(rgtb.geometry[4]), atol=0.5u"m^2")
# geotable with "mask" column
gtb = georef((; z=1:4, mask=4:-1:1), [poly1, poly2, poly3, poly4])
trans = Rasterize(10, 10)
ngtb, cache = apply(trans, gtb)
rgtb = revert(trans, ngtb, cache)
@test nrow(rgtb) == nrow(gtb)
@test ncol(rgtb) == ncol(gtb)
@test propertynames(rgtb) == propertynames(gtb)
end
| GeoStatsTransforms | https://github.com/JuliaEarth/GeoStatsTransforms.jl.git |
|
[
"MIT"
] | 0.8.1 | d3b0fabcf8cbba4e64f532e3c6717e1b231b75f5 | code | 840 | using GeoStatsTransforms
using Meshes
using Tables
using Unitful
using GeoTables
using GeoStatsFunctions
using GeoStatsModels
using GeoStatsProcesses
using GeoStatsImages
using TableTransforms
using CategoricalArrays
using Statistics
using Test, StableRNGs
using FileIO: load
import DataScienceTraits as DST
import ImageQuilting
# environment settings
datadir = joinpath(@__DIR__, "data")
# list of tests
testfiles = [
"interpolate.jl",
"interpneighbors.jl",
"interpmissing.jl",
"interpnan.jl",
"simulate.jl",
"cookiecutter.jl",
"uniquecoords.jl",
"aggregate.jl",
"transfer.jl",
"upscale.jl",
"downscale.jl",
"clustering.jl",
"rasterize.jl",
"potrace.jl",
"detrend.jl"
]
@testset "GeoStatsTransforms.jl" begin
for testfile in testfiles
println("Testing $testfile...")
include(testfile)
end
end
| GeoStatsTransforms | https://github.com/JuliaEarth/GeoStatsTransforms.jl.git |
|
[
"MIT"
] | 0.8.1 | d3b0fabcf8cbba4e64f532e3c6717e1b231b75f5 | code | 934 | @testset "Simulate" begin
@test !isrevertible(Simulate(CartesianGrid(10, 10), :a => GaussianProcess()))
a = rand(1000)
b = rand(1000)
c = rand(1000)
gtb = georef((; a, b, c), CartesianGrid(10, 10, 10))
dom = CartesianGrid(20, 20, 20)
sim = gtb |> Simulate(dom, 5, :a => GaussianProcess())
@test nrow(sim) == 8000
@test propertynames(sim) == [:a_1, :a_2, :a_3, :a_4, :a_5, :geometry]
sim = gtb |> Simulate(dom, 10, :a => GaussianProcess())
@test nrow(sim) == 8000
@test propertynames(sim) == [:a_01, :a_02, :a_03, :a_04, :a_05, :a_06, :a_07, :a_08, :a_09, :a_10, :geometry]
pts = rand(Point, 200)
sim = gtb |> Simulate(pts, 3, [:b, :c] => GaussianProcess())
@test nrow(sim) == 200
@test propertynames(sim) == [:b_1, :c_1, :b_2, :c_2, :b_3, :c_3, :geometry]
sim = gtb |> Simulate(pts, [:b, :c] => GaussianProcess())
@test nrow(sim) == 200
@test propertynames(sim) == [:b_1, :c_1, :geometry]
end
| GeoStatsTransforms | https://github.com/JuliaEarth/GeoStatsTransforms.jl.git |
|
[
"MIT"
] | 0.8.1 | d3b0fabcf8cbba4e64f532e3c6717e1b231b75f5 | code | 1608 | @testset "Transfer" begin
@test !isrevertible(Transfer(CartesianGrid(10, 10)))
pts1 = Point.([(1, 1), (7, 1), (4, 4)])
pts2 = Point.([(5, 4), (3, 4), (0, 1), (7, 0), (7, 2)])
gtb = georef((a=rand(Float64, 3), b=rand(Int, 3)), pts1)
ngtb = gtb |> Transfer(pts2)
@test domain(ngtb) == PointSet(pts2)
@test ngtb.a[1] == gtb.a[3]
@test ngtb.a[2] == gtb.a[3]
@test ngtb.a[3] == gtb.a[1]
@test ngtb.a[4] == gtb.a[2]
@test ngtb.a[5] == gtb.a[2]
@test ngtb.b[1] == gtb.b[3]
@test ngtb.b[2] == gtb.b[3]
@test ngtb.b[3] == gtb.b[1]
@test ngtb.b[4] == gtb.b[2]
@test ngtb.b[5] == gtb.b[2]
grid1 = CartesianGrid((0.0, 0.0), (10.0, 10.0), dims=(10, 10))
grid2 = CartesianGrid((0.0, 0.0), (10.0, 10.0), dims=(20, 20))
gtb = georef((a=rand(Float64, 100), b=rand(Int, 100)), grid1)
ngtb = gtb |> Transfer(grid2)
@test domain(ngtb) == grid2
@test ngtb[(1, 1), :a] == gtb[(1, 1), :a]
@test ngtb[(1, 2), :a] == gtb[(1, 1), :a]
@test ngtb[(2, 1), :a] == gtb[(1, 1), :a]
@test ngtb[(2, 2), :a] == gtb[(1, 1), :a]
@test ngtb[(1, 19), :a] == gtb[(1, 10), :a]
@test ngtb[(1, 20), :a] == gtb[(1, 10), :a]
@test ngtb[(2, 19), :a] == gtb[(1, 10), :a]
@test ngtb[(2, 20), :a] == gtb[(1, 10), :a]
@test ngtb[(19, 1), :b] == gtb[(10, 1), :b]
@test ngtb[(19, 2), :b] == gtb[(10, 1), :b]
@test ngtb[(20, 1), :b] == gtb[(10, 1), :b]
@test ngtb[(20, 2), :b] == gtb[(10, 1), :b]
@test ngtb[(19, 19), :b] == gtb[(10, 10), :b]
@test ngtb[(19, 20), :b] == gtb[(10, 10), :b]
@test ngtb[(20, 19), :b] == gtb[(10, 10), :b]
@test ngtb[(20, 20), :b] == gtb[(10, 10), :b]
end
| GeoStatsTransforms | https://github.com/JuliaEarth/GeoStatsTransforms.jl.git |
|
[
"MIT"
] | 0.8.1 | d3b0fabcf8cbba4e64f532e3c6717e1b231b75f5 | code | 2995 | @testset "UniqueCoords" begin
@test !isrevertible(UniqueCoords())
X = Float64[i * j for i in 1:2, j in 1:1_000_000] * u"m"
z = rand(1_000_000)
d = georef((z=[z; z],), Tuple.(eachcol([X X])))
u = d |> UniqueCoords()
du = domain(u)
p = [centroid(du, i) for i in 1:nelements(du)]
U = reduce(hcat, to.(p))
@test nelements(du) == 1_000_000
@test Set(eachcol(U)) == Set(eachcol(X))
X = rand(3, 100)
z = rand(100)
n = [string(i) for i in 1:100]
Xd = hcat(X, X[:, 1:10])
zd = vcat(z, z[1:10])
nd = vcat(n, n[1:10])
sdata = georef((z=zd, n=nd), Tuple.(eachcol(Xd)))
ndata = sdata |> UniqueCoords()
@test nrow(ndata) == 100
# domain with repeated points
a = rand(100)
b = rand(1:10, 100)
table = (; a, b)
points = Point.(1:10, 10:-1:1)
pset = PointSet(rand(points, 100))
sdata = georef(table, pset)
ndata = sdata |> UniqueCoords()
@test nrow(ndata) == 10
# aggregators
pset = PointSet(repeat(points, inner=10))
sdata = georef(table, pset)
# default aggregators
ndata = sdata |> UniqueCoords()
@test nrow(ndata) == 10
for i in 1:10
j = i * 10
@test ndata.a[i] == mean(sdata.a[(j - 9):j])
end
for i in 1:10
j = i * 10
@test ndata.b[i] == first(sdata.b[(j - 9):j])
end
# custom aggregators
# selector: indices
ndata = sdata |> UniqueCoords(1 => std, 2 => median)
@test nrow(ndata) == 10
for i in 1:10
j = i * 10
@test ndata.a[i] == std(sdata.a[(j - 9):j])
end
for i in 1:10
j = i * 10
@test ndata.b[i] == median(sdata.b[(j - 9):j])
end
# selector: symbols
ndata = sdata |> UniqueCoords(:a => last, :b => first)
@test nrow(ndata) == 10
for i in 1:10
j = i * 10
@test ndata.a[i] == last(sdata.a[(j - 9):j])
end
for i in 1:10
j = i * 10
@test ndata.b[i] == first(sdata.b[(j - 9):j])
end
# selector: strings
ndata = sdata |> UniqueCoords("a" => maximum, "b" => minimum)
@test nrow(ndata) == 10
for i in 1:10
j = i * 10
@test ndata.a[i] == maximum(sdata.a[(j - 9):j])
end
for i in 1:10
j = i * 10
@test ndata.b[i] == minimum(sdata.b[(j - 9):j])
end
# units
sdata = georef((; T=rand(100) * u"K"), pset)
ndata = sdata |> UniqueCoords()
@test nrow(ndata) == 10
@test unit(eltype(ndata.T)) == u"K"
for i in 1:10
j = i * 10
@test ndata.T[i] == mean(sdata.T[(j - 9):j])
end
# affine units
sdata = georef((; T=rand(100) * u"°C"), pset)
ndata = sdata |> UniqueCoords()
@test nrow(ndata) == 10
@test unit(eltype(ndata.T)) == u"K"
for i in 1:10
j = i * 10
v = GeoStatsTransforms._absunit(sdata.T[(j - 9):j])
@test ndata.T[i] == mean(v)
end
# units and missings
sdata = georef((; T=[fill(missing, 50); rand(50)] * u"K"), pset)
ndata = sdata |> UniqueCoords()
@test nrow(ndata) == 10
@test unit(eltype(ndata.T)) == u"K"
for i in 1:10
j = i * 10
v = GeoStatsTransforms._skipmissing(mean)(sdata.T[(j - 9):j])
@test isequal(ndata.T[i], v)
end
end
| GeoStatsTransforms | https://github.com/JuliaEarth/GeoStatsTransforms.jl.git |
|
[
"MIT"
] | 0.8.1 | d3b0fabcf8cbba4e64f532e3c6717e1b231b75f5 | code | 2761 | @testset "Upscale" begin
@test !isrevertible(Upscale(2, 2))
grid = CartesianGrid((0.0, 0.0), (10.0, 10.0), dims=(20, 20))
tgrid = CartesianGrid((0.0, 0.0), (10.0, 10.0), dims=(10, 10))
gtb = georef((a=rand(Float64, 400), b=rand(Int, 400)), grid)
ngtb = gtb |> Upscale(2, 2)
@test domain(ngtb) == tgrid
@test ngtb[(1, 1), :a] == mean(gtb[(1:2, 1:2), :a])
@test ngtb[(1, 10), :a] == mean(gtb[(1:2, 19:20), :a])
@test ngtb[(10, 1), :a] == mean(gtb[(19:20, 1:2), :a])
@test ngtb[(10, 10), :a] == mean(gtb[(19:20, 19:20), :a])
@test ngtb[(1, 1), :b] == first(gtb[(1:2, 1:2), :b])
@test ngtb[(1, 10), :b] == first(gtb[(1:2, 19:20), :b])
@test ngtb[(10, 1), :b] == first(gtb[(19:20, 1:2), :b])
@test ngtb[(10, 10), :b] == first(gtb[(19:20, 19:20), :b])
rgrid = convert(RectilinearGrid, grid)
trgrid = convert(RectilinearGrid, tgrid)
gtb = georef((a=rand(Float64, 400), b=rand(Int, 400)), rgrid)
ngtb = gtb |> Upscale(2, 2)
@test domain(ngtb) == trgrid
@test ngtb[(1, 1), :a] == mean(gtb[(1:2, 1:2), :a])
@test ngtb[(1, 10), :a] == mean(gtb[(1:2, 19:20), :a])
@test ngtb[(10, 1), :a] == mean(gtb[(19:20, 1:2), :a])
@test ngtb[(10, 10), :a] == mean(gtb[(19:20, 19:20), :a])
@test ngtb[(1, 1), :b] == first(gtb[(1:2, 1:2), :b])
@test ngtb[(1, 10), :b] == first(gtb[(1:2, 19:20), :b])
@test ngtb[(10, 1), :b] == first(gtb[(19:20, 1:2), :b])
@test ngtb[(10, 10), :b] == first(gtb[(19:20, 19:20), :b])
sgrid = convert(StructuredGrid, grid)
tsgrid = convert(StructuredGrid, tgrid)
gtb = georef((a=rand(Float64, 400), b=rand(Int, 400)), sgrid)
ngtb = gtb |> Upscale(2, 2)
@test domain(ngtb) == tsgrid
@test ngtb[(1, 1), :a] == mean(gtb[(1:2, 1:2), :a])
@test ngtb[(1, 10), :a] == mean(gtb[(1:2, 19:20), :a])
@test ngtb[(10, 1), :a] == mean(gtb[(19:20, 1:2), :a])
@test ngtb[(10, 10), :a] == mean(gtb[(19:20, 19:20), :a])
@test ngtb[(1, 1), :b] == first(gtb[(1:2, 1:2), :b])
@test ngtb[(1, 10), :b] == first(gtb[(1:2, 19:20), :b])
@test ngtb[(10, 1), :b] == first(gtb[(19:20, 1:2), :b])
@test ngtb[(10, 10), :b] == first(gtb[(19:20, 19:20), :b])
tgrid = CartesianGrid((0.0, 0.0), (10.0, 10.0), dims=(10, 5))
gtb = georef((a=rand(Float64, 400), b=rand(Int, 400)), grid)
ngtb = gtb |> Upscale(2, 4)
@test domain(ngtb) == tgrid
@test ngtb[(1, 1), :a] == mean(gtb[(1:2, 1:4), :a])
@test ngtb[(1, 5), :a] == mean(gtb[(1:2, 17:20), :a])
@test ngtb[(10, 1), :a] == mean(gtb[(19:20, 1:4), :a])
@test ngtb[(10, 5), :a] == mean(gtb[(19:20, 17:20), :a])
@test ngtb[(1, 1), :b] == first(gtb[(1:2, 1:4), :b])
@test ngtb[(1, 5), :b] == first(gtb[(1:2, 17:20), :b])
@test ngtb[(10, 1), :b] == first(gtb[(19:20, 1:4), :b])
@test ngtb[(10, 5), :b] == first(gtb[(19:20, 17:20), :b])
end
| GeoStatsTransforms | https://github.com/JuliaEarth/GeoStatsTransforms.jl.git |
|
[
"MIT"
] | 0.8.1 | d3b0fabcf8cbba4e64f532e3c6717e1b231b75f5 | docs | 722 | # GeoStatsTransforms.jl
[![][build-img]][build-url] [![][codecov-img]][codecov-url]
Geospatial transforms for the [GeoStats.jl](https://github.com/JuliaEarth/GeoStats.jl) framework.
## Asking for help
If you have any questions, please [contact our community](https://juliaearth.github.io/GeoStats.jl/stable/about/community.html).
[build-img]: https://img.shields.io/github/actions/workflow/status/JuliaEarth/GeoStatsTransforms.jl/CI.yml?branch=main&style=flat-square
[build-url]: https://github.com/JuliaEarth/GeoStatsTransforms.jl/actions
[codecov-img]: https://img.shields.io/codecov/c/github/JuliaEarth/GeoStatsTransforms.jl?style=flat-square
[codecov-url]: https://codecov.io/gh/JuliaEarth/GeoStatsTransforms.jl
| GeoStatsTransforms | https://github.com/JuliaEarth/GeoStatsTransforms.jl.git |
|
[
"MIT"
] | 0.0.1 | 4772ee936cf75d2d44eeae383677b2cb63c58899 | code | 402 | using Documenter
using SAMTools
makedocs(;
modules = [SAMTools],
doctest = false,
format = Documenter.HTML(
collapselevel = 1,
prettyurls = false
),
authors = "Nathanael Wong <[email protected]>",
sitename = "SAMTools.jl",
pages = [
"Home" => "index.md",
],
)
deploydocs(
repo = "github.com/natgeo-wong/SAMTools.jl.git",
)
| SAMTools | https://github.com/natgeo-wong/SAMTools.jl.git |
|
[
"MIT"
] | 0.0.1 | 4772ee936cf75d2d44eeae383677b2cb63c58899 | code | 579 | module SAMTools
## Modules Used
using CFTime
using Crayons, Crayons.Box
using Dates
using DelimitedFiles
using Glob
using JLD2
using NCDatasets
using NumericalIntegration
using Printf
using Statistics
## Exporting the following functions:
export
samwelcome, samstartup, samresort, samanalysis,
samparametercopy, samparameterload, samparameteradd,
sampre2lvl, samvert2lvl
## Including other files in the module
include("startup.jl")
include("initialize.jl")
include("resort.jl")
include("analysis.jl")
include("frontend.jl")
include("backend.jl")
end
| SAMTools | https://github.com/natgeo-wong/SAMTools.jl.git |
|
[
"MIT"
] | 0.0.1 | 4772ee936cf75d2d44eeae383677b2cb63c58899 | code | 529 | function samanalysis(
smod::AbstractDict, spar::AbstractDict, stime::AbstractDict,
sroot::AbstractDict
)
if occursin("2D",smod["moduletype"]);
samanalysis2D(smod,spar,stime,sroot)
else; samanalysis3D(smod,spar,stime,sroot)
end
end
function samanalysis(
init::AbstractDict, sroot::AbstractString;
modID::AbstractString, parID::AbstractString,
height::Real=0
)
smod,spar,stime = saminitialize(init,modID=modID,parID=parID,height=height)
samanalysis(smod,spar,stime,sroot)
end
| SAMTools | https://github.com/natgeo-wong/SAMTools.jl.git |
|
[
"MIT"
] | 0.0.1 | 4772ee936cf75d2d44eeae383677b2cb63c58899 | code | 195 | function samncoffsetscale(data::Array{<:Real})
dmax = maximum(data); dmin = minimum(data);
scale = (dmax-dmin) / 65533;
offset = (dmax+dmin-scale) / 2;
return scale,offset
end
| SAMTools | https://github.com/natgeo-wong/SAMTools.jl.git |
|
[
"MIT"
] | 0.0.1 | 4772ee936cf75d2d44eeae383677b2cb63c58899 | code | 1017 | sampre2lvl(pressure::Real,smod::AbstractDict) = argmin(abs.(smod["p"] .- pressure))
samvert2lvl(vertical::Real,smod::AbstractDict) = argmin(abs.(smod["z"] .- vertical))
function samrawfolder(
spar::AbstractDict, sroot::AbstractDict;
ilvl::Real=0
)
if ilvl == 0
return joinpath(sroot["raw"],"$(spar["ID"])");
else; return joinpath(sroot["raw"],"$(spar["ID"])-lvl$(@sprintf("%03d",lvl))")
end
end
function samrawname(
spar::AbstractDict, sroot::AbstractDict;
ilvl::Real=0, irun::Real
)
fol = samrawfolder(spar,sroot,ilvl=ilvl)
if ilvl == 0
fnc = "$(spar["ID"])-sfc-run$(@sprintf("%04d",irun)).nc";
else; fnc = "$(spar["ID"])-lvl$(@sprintf("%03d",ilvl))-run$(@sprintf("%04d",irun)).nc"
end
return joinpath(fol,fnc)
end
function samrawread(
spar::AbstractDict, sroot::AbstractDict;
ilvl::Real=0, irun::Real
)
fnc = samrawname(spar,sroot,ilvl=ilvl,irun=irun)
ds = Dataset(joinpath(rfol,rfnc))
return ds,ds[spar["ID"]]
end
| SAMTools | https://github.com/natgeo-wong/SAMTools.jl.git |
|
[
"MIT"
] | 0.0.1 | 4772ee936cf75d2d44eeae383677b2cb63c58899 | code | 6437 | """
This file initializes the samTools module by setting and determining the
ECMWF reanalysis parameters to be analyzed and the regions upon which the data
are to be extracted from. Functionalities include:
- Setting up of reanalysis module type
- Setting up of reanalysis parameters to be analyzed
- Setting up of time steps upon which data are to be downloaded
- Setting up of region of analysis based on ClimateEasy
"""
# samTools Parameter Setup
function samparametercopy(;overwrite::Bool=false)
jfol = joinpath(DEPOT_PATH[1],"files/samTools/"); mkpath(jfol);
ftem = joinpath(@__DIR__,"../extra/partemplate.txt")
fpar = joinpath(jfol,"samparameters.txt")
if !overwrite
if !isfile(fpar)
@debug "$(Dates.now()) - Unable to find samparameters.txt, copying data from partemplate.txt ..."
cp(ftem,fpar,force=true);
end
else
@warn "$(Dates.now()) - Overwriting samparameters.txt in $jfol ..."
cp(ftem,fpar,force=true);
end
return fpar
end
function samparameterload()
@debug "$(Dates.now()) - Loading information on the output parameters from SAM."
return readdlm(samparametercopy(),',',comments=true);
end
function samparameterload(init::AbstractDict)
@debug "$(Dates.now()) - Loading information on the output parameters from SAM."
allparams = readdlm(samparametercopy(),',',comments=true);
@debug "$(Dates.now()) - Filtering out for parameters in the $(init["modulename"]) module."
parmods = allparams[:,1]; return allparams[(parmods.==init["moduletype"]),:];
end
function samparameterdisp(parlist::AbstractArray,init::AbstractDict)
@info "$(Dates.now()) - The following variables are offered in the $(init["modulename"]) module:"
for ii = 1 : size(parlist,1); @info "$(Dates.now()) - $(ii)) $(parlist[ii,3])" end
end
function samparameteradd(fadd::AbstractString)
if !isfile(fadd); error("$(Dates.now()) - The file $(fadd) does not exist."); end
ainfo = readdlm(fadd,',',comments=true); aparID = ainfo[:,2]; nadd = length(aparID);
for iadd = 1 : nadd
samparameteradd(
modID=ainfo[iadd,1],parID=ainfo[iadd,2],
full=ainfo[iadd,3],unit=ainfo[iadd,4],throw=false
);
end
end
function samparameteradd(;
modID::AbstractString, parID::AbstractString,
full::AbstractString, unit::AbstractString,
throw::Bool=true
)
fpar = samparametercopy(); pinfo = samparameterload(); eparID = pinfo[:,2];
if sum(eparID.==parID) > 0
if throw
error("$(Dates.now()) - Parameter ID already exists. Please choose a new parID.")
else
@info "$(Dates.now()) - $(parID) has already been added to samparameters.txt"
end
else
open(fpar,"a") do io
writedlm(io,[modID ncID full unit],',')
end
end
end
# Initialization
function sammodule(moduleID::AbstractString,init::AbstractDict)
smod = Dict{AbstractString,Any}()
smod["moduletype"] = moduleID;
if moduleID == "d2D"; smod["modulename"] = "dry 2D";
elseif moduleID == "r2D"; smod["modulename"] = "radiation 2D";
elseif moduleID == "m2D"; smod["modulename"] = "moist 2D";
elseif moduleID == "s3D"; smod["modulename"] = "general 3D";
elseif moduleID == "c2D"; smod["modulename"] = "calc 2D";
elseif moduleID == "c3D"; smod["modulename"] = "calc 3D";
end
if occursin("2D",moduleID)
@debug "$(Dates.now()) - A 2D module was selected, and therefore we will save '2D' into the parameter level Dictionary."
smod["levels"] = ["2D"];
else
@debug "$(Dates.now()) - A pressure module was selected, and therefore all available pressure levels will be saved into the parameter Dictionary."
smod["levels"] = init["p"]
end
smod["x"] = init["x"]; smod["y"] = init["y"]; smod["z"] = init["z"];
smod["size"] = init["size"];
return smod
end
function samparameter(
parameterID::AbstractString, smod::AbstractDict;
zheight::Real
)
parlist = samparameterload(smod); mtype = smod["moduletype"];
if sum(parlist[:,2] .== parameterID) == 0
error("$(Dates.now()) - Invalid parameter choice for \"$(uppercase(mtype))\". Call queryspar(modID=$(mtype),parID=$(parameterID)) for more information.")
else
ID = (parlist[:,2] .== parameterID);
end
parinfo = parlist[ID,:];
@info "$(Dates.now()) - samTools will analyze $(parinfo[3]) data."
if occursin("2D",mtype)
if zheight != 0
@warn "$(Dates.now()) - You asked to analyze data at a vertical height of $(zheight) m but have chosen a surface module variable. Setting vertical level to \"SFC\" by default"
end
return Dict(
"ID"=>parinfo[2],"IDnc"=>parinfo[3],
"name"=>parinfo[4],"unit"=>parinfo[5],
"level"=>"sfc"
);
else
if zheight != 0
lvl = samvert2lvl(zheight,smod)
@info "$(Dates.now()) - You have requested $(uppercase(parinfo[3])) data at the vertical height $(zheight) m. Based on the given vertical levels, this corresponds to z-level $lvl out of $(length(smod["levels"]))."
return Dict(
"ID"=>parinfo[2],"IDnc"=>parinfo[3],
"name"=>parinfo[4],"unit"=>parinfo[5],
"level"=>lvl
);
else
@warn "$(Dates.now()) - You asked to analyze $(uppercase(parinfo[3])) data, which is found as a 3D module but have not specified a level. Since SAM is a CRM and is likely run with high resolution, SAMTools.jl will analyse each vertical level independently for its RESORT and ANALYSIS functionalities."
return Dict(
"ID"=>parinfo[2],"IDnc"=>parinfo[3],
"name"=>parinfo[4],"unit"=>parinfo[5],
"level"=>"all"
);
end
end
end
function samtime(init)
stime = deepcopy(init);
delete!(stime,"x"); delete!(stime,"y"); delete!(stime,"z"); delete!(stime,"size");
return stime
end
function saminitialize(
init::AbstractDict;
modID::AbstractString, parID::AbstractString,
height::Real=0
)
if occursin("3D",modID) && pressure == 0 && height == 0
end
smod = sammodule(modID,init);
spar = samparameter(parID,smod,zheight=height);
stime = samtime(init);
return smod,spar,stime
end
| SAMTools | https://github.com/natgeo-wong/SAMTools.jl.git |
|
[
"MIT"
] | 0.0.1 | 4772ee936cf75d2d44eeae383677b2cb63c58899 | code | 3699 | function samresort2D(
smod::AbstractDict, spar::AbstractDict, stime::AbstractDict,
sroot::AbstractDict
)
nt = stime["ntime"]; it = 360; nfnc = floor(nt/it) + 1; tt = 0;
nx,ny,nz = smod["size"]; data = Array{Float32,4}(undef,nx,ny,it);
for inc = 1 : nfnc
if inc == nfnc; it = mod(nt,it); data = Array{Int16,3}(undef,nx,ny,it) end
for ii = 1 : it; tt = tt + 1;
ds = Dataset(sroot["flist"][tt])
data[:,:,ii] .= ds[spar["IDnc"]][:,:,1]
close(ds)
end
samresortsave(data,[inc,it,0],smod,spar,stime,sroot)
end
end
function samresort3D(
smod::AbstractDict, spar::AbstractDict, stime::AbstractDict,
sroot::AbstractDict
)
nt = stime["ntime"]; it = 360; nfnc = floor(nt/it) + 1; tt = 0;
nx,ny,nz = smod["size"]; data = Array{Float32,4}(undef,nx,ny,it);
lvl = spar["level"]; if lvl == "all"; lvl = collect(1:nz) end
for ilvl in lvl, inc = 1 : nfnc
if inc == nfnc; it = mod(nt,it); data = Array{Int16,3}(undef,nx,ny,it) end
for ii = 1 : it; tt = tt + 1;
ds = Dataset(sroot["flist"][tt])
data[:,:,ii] .= ds[spar["IDnc"]][:,:,ilvl,1]
close(ds)
end
samresortsave(data,[inc,it,ilvl],smod,spar,stime,sroot)
end
end
function samresortsave(
data::Union{Array{<:Real,3},Array{<:Real,4}}, runinfo::AbstractArray,
smod::AbstractDict, spar::AbstractDict, stime::AbstractDict,
sroot::AbstractDict
)
inc,it,ilvl = runinfo; mtype = smod["moduletype"]
rfnc = samrawname(smod,spar,irun=inc,ilvl=ilvl);
if isfile(fnc)
@info "$(Dates.now()) - Stale NetCDF file $(fnc) detected. Overwriting ..."
rm(fnc);
end
ds = NCDataset(fnc,"c",attrib = Dict(
"Conventions" => "CF-1.6",
"Date Created" => "$(Dates.now())"
))
scale,offset = samncoffsetscale(data);
ds.dim["x"] = smod["size"][1];
ds.dim["y"] = smod["size"][2];
if occursin("2D",mtype); ds.dim["z"] = 1; end
ds.dim["t"] = length(it)
ncx = defVar(ds,"x",Int16,("x",),attrib = Dict(
"units" => "km",
"long_name" => "X",
))
ncy = defVar(ds,"y",Int16,("y",),attrib = Dict(
"units" => "km",
"long_name" => "Y",
))
if occursin("2D",mtype)
ncz = defVar(ds,"z",Int16,("z",),attrib = Dict(
"units" => "km",
"long_name" => "Z",
"level" => ilvl
))
end
nct = defVar(ds,"t",Float64,("t",),attrib = Dict(
"units" => "days since 0000-00-00 00:00:00.0",
"long_name" => "time",
"calendar" => "no_calendar",
))
ncv = defVar(ds,spar["ID"],Int16,("x","y","t"),attrib = Dict(
"scale_factor" => scale,
"add_offset" => offset,
"_FillValue" => Int16(-32767),
"missing_value" => Int16(-32767),
"units" => spar["unit"],
"long_name" => spar["name"],
))
ncx[:] = smod["x"]
ncy[:] = smod["y"]
if occursin("2D",mtype); ncz[:] = smod["z"][ilvl] end
nct[:] = ((inc-1) .+ collect(1:it)) * stime["t"]
ncv[:] = data;
close(ds)
end
function samresort(
smod::AbstractDict, spar::AbstractDict, stime::AbstractDict,
sroot::AbstractDict
)
if occursin("2D",smod["moduletype"]);
samresort2D(smod,spar,stime,sroot)
else; samresort3D(smod,spar,stime,sroot)
end
end
function samresort(
init::AbstractDict, sroot::AbstractString;
modID::AbstractString, parID::AbstractString,
height::Real=0
)
smod,spar,stime = saminitialize(init,modID=modID,parID=parID,height=height)
samresort(smod,spar,stime,sroot)
end
| SAMTools | https://github.com/natgeo-wong/SAMTools.jl.git |
|
[
"MIT"
] | 0.0.1 | 4772ee936cf75d2d44eeae383677b2cb63c58899 | code | 4115 | """
This file initializes the SAMTools module by defining the directories relevant to the particular SAM experiment being resorted and analysed by SAMTools.
"""
function samspin(sroot::AbstractDict)
efol = joinpath(sroot["root"],"raw",sroot["experiment"]);
sfol = joinpath(sroot["root"],"raw",sroot["experiment"],"spinup");
if isdir(sfol)
@info "$(Dates.now()) - A spinup configuration folder has been identified in $(efol)."
return true;
else
@info "$(Dates.now()) - No spinup configuration folder was identified in $(efol)."
return false;
end
end
function samwelcome()
ftext = joinpath(@__DIR__,"../extra/welcome.txt");
lines = readlines(ftext); count = 0; nl = length(lines);
for l in lines; count += 1;
if any(count .== [1,2]); print(Crayon(bold=true),"$l\n");
elseif count == nl; print(Crayon(bold=false),"$l\n\n");
else; print(Crayon(bold=false),"$l\n");
end
end
end
function samroot(;
tmppath::AbstractString,
prjpath::AbstractString,
experiment::AbstractString="",
config::AbstractString,
fname::AbstractString
)
sroot = Dict{AbstractString,AbstractString}()
sroot["tmp"] = tmppath; sroot["root"] = prjpath;
sroot["raw"] = joinpath(prjpath,"raw",experiment,config)
sroot["ana"] = joinpath(prjpath,"ana",experiment,config)
sroot["experiment"] = experiment; sroot["configuration"] = config;
sroot["spinup"] = ""; sroot["control"] = ""; sroot["ncname"] = fname;
@info """$(Dates.now()) - $(BOLD("PROJECT DETAILS:"))
$(BOLD("Temporary Directory:")) $tmppath
$(BOLD("Project Directory:")) $prjpath
$(BOLD("File Prefix:")) $fname
$(BOLD("Experiment | Configuration:")) $experiment | $config
"""
"$(Dates.now()) - $(BOLD("PROJECT DETAILS:"))\n $(BOLD("Temporary Directory:")) $tmppath\n $(BOLD("Root Directory:")) $prjpath\n $(BOLD("Experiment:")) $experiment\n $(BOLD("Configuration:")) $config"
@info "$(Dates.now()) - SAM RAW DATA directory: $(sroot["raw"])."
@info "$(Dates.now()) - SAM ANALYSIS directory: $(sroot["ana"])."
if samspin(sroot)
sroot["spinup"] = replace(sroot["raw"],config=>"spinup")
sroot["control"] = replace(sroot["raw"],config=>"control")
@info """$(Dates.now()) - $(BOLD("SPINUP DIRECTORIES:"))
$(BOLD("Spinup Directory:")) $(sroot["spinup"])
$(BOLD("Control Directory:")) $(sroot["control"])
"""
end
return sroot
end
function samstartup(;
tmppath::AbstractString,
prjpath::AbstractString,
experiment::AbstractString="",
config::AbstractString,
fname::AbstractString,
welcome::Bool=true
)
if welcome; samwelcome() end
sroot = samroot(;
tmppath=tmppath,prjpath=prjpath,
experiment=experiment,config=config,
fname=fname
)
init,fnc = retrievename(fname,tmppath); sroot["flist"] = fnc;
ds = Dataset(fnc[1]);
init["x"] = ds["x"][:]; init["y"] = ds["y"][:];
init["z"] = ds["z"][:]; init["t"] = ds["time"][:]
init["size"] = [length(init["x"]),length(init["y"]),length(init["z"])]
close(ds);
nz = init["size"][3]; nfnc = length(fnc); nruns = mod(nfnc,360)+1;
p = zeros(nz,360*nruns)
for inc in 1 : nfnc; ds = Dataset(fnc[inc]); p[:,inc] = ds["p"][:]; close(ds) end
scale,offset = samncoffsetscale(p); p = reshape(p,nfnc,360,:)*100;
ds = Dataset("p.nc","c")
ds.dim["z"] = nz; ds.dim["t"] = 360; ds.dim["runs"] = nruns
ncp = defVar(ds,"p",Int16,("z","t","nruns"),attrib = Dict(
"units" => "Pa",
"long_name" => "Pressure",
"scale_factor" => scale,
"add_offset" => offset,
"_FillValue" => Int16(-32767),
"missing_value" => Int16(-32767),
))
ncp[:] = p
close(ds)
return init,sroot
end
function retrievename(fname::AbstractString,tmppath::AbstractString)
init = Dict{AbstractString,Any}()
fnc = glob("$(fname)*.nc",tmppath);
nfid = length(fnc); init["ntime"] = nfid
return init,fnc
end
| SAMTools | https://github.com/natgeo-wong/SAMTools.jl.git |
|
[
"MIT"
] | 0.0.1 | 4772ee936cf75d2d44eeae383677b2cb63c58899 | code | 89 | using SAMTools
using Test
@testset "SAMTools.jl" begin
# Write your tests here.
end
| SAMTools | https://github.com/natgeo-wong/SAMTools.jl.git |
|
[
"MIT"
] | 0.0.1 | 4772ee936cf75d2d44eeae383677b2cb63c58899 | docs | 1313 | # **<div align="center">SAMTools.jl</div>**
<p align="center">
<a href="https://www.repostatus.org/#active">
<img alt="Repo Status" src="https://www.repostatus.org/badges/latest/active.svg?style=flat-square" />
</a>
<a href="https://travis-ci.com/github/natgeo-wong/SAMTools.jl">
<img alt="Travis CI" src="https://travis-ci.com/natgeo-wong/SAMTools.jl.svg?branch=master&style=flat-square">
</a>
<a href="https://github.com/natgeo-wong/SAMTools.jl/actions?query=workflow%3ADocumentation">
<img alt="Documentation Build" src="https://github.com/natgeo-wong/SAMTools.jl/workflows/Documentation/badge.svg">
</a>
<br>
<a href="https://mit-license.org">
<img alt="MIT License" src="https://img.shields.io/badge/License-MIT-blue.svg?style=flat-square">
</a>
<img alt="Latest Release" src="https://img.shields.io/github/v/release/natgeo-wong/SAMTools.jl">
<a href="https://natgeo-wong.github.io/SAMTools.jl/stable/">
<img alt="Latest Documentation" src="https://img.shields.io/badge/docs-stable-blue.svg?style=flat-square">
</a>
<a href="https://natgeo-wong.github.io/SAMTools.jl/dev/">
<img alt="Latest Documentation" src="https://img.shields.io/badge/docs-latest-blue.svg?style=flat-square">
</a>
</p>
**Created By:** Nathanael Wong ([email protected])
| SAMTools | https://github.com/natgeo-wong/SAMTools.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 2999 | using Documenter
#using DocumenterLaTeX
using ProbabilisticCircuits
using Literate
#######################################
# 1/ generate the top-level README.md
#######################################
source_dir = "$(@__DIR__)/src"
"replace script includes with file content in Literate code"
function replace_includes(str)
pat = r"include\(\"(.*)\"\)"
m = match(pat, str)
while !isnothing(m)
str = replace(str, "$(m.match)" =>
read("$source_dir/$(m[1])", String))
m = match(pat, str)
end
str
end
"hide `#plot` lines in Literate code"
function hide_plots(str)
str = replace(str, r"#plot (.)*[\n\r]" => "")
replace(str, r"#!plot (.*)[\n\r]" => s"\g<1>\n")
end
"show `#plot` lines in Literate code"
function show_plots(str)
str = replace(str, r"#!plot (.)*[\n\r]" => "")
replace(str, r"#plot (.*)[\n\r]" => s"\g<1>\n")
end
Literate.markdown("$source_dir/README.jl", "$(@__DIR__)/../"; documenter=false, credit=false, execute=true,
preprocess = hide_plots ∘ replace_includes)
# The DOCSARGS environment variable can be used to pass additional arguments to make.jl.
# This is useful on CI, if you need to change the behavior of the build slightly but you
# can not change the .travis.yml or make.jl scripts any more (e.g. for a tag build).
if haskey(ENV, "DOCSARGS")
for arg in split(ENV["DOCSARGS"])
(arg in ARGS) || push!(ARGS, arg)
end
end
const pages = [
"Home" => "index.md",
"Manual" => [
"manual/demo.md",
"manual/queries.md",
"manual/learning.md",
"manual/gpu.md"
],
"API" => [
"api/common.md",
"api/input_dists.md",
"api/probabilistic_circuits.md",
"api/types.md"
],
"Installation" => "installation.md",
];
const format = if ("pdf" in ARGS)
LaTeX(platform = "native")
else
Documenter.HTML(
# Use clean URLs, unless built as a "local" build
prettyurls = !("local" in ARGS),
canonical = "https://Tractables.github.io/ProbabilisticCircuits.jl/stable/",
assets = ["assets/favicon.ico"],
analytics = "UA-136089579-2",
highlights = ["yaml"],
collapselevel = 1,
)
end
makedocs(
sitename = "ProbabilisticCircuits.jl",
format = format,
doctest = true,
modules = [ProbabilisticCircuits],
pages = pages,
linkcheck_ignore = [
# We'll ignore links that point to GitHub's edit pages, as they redirect to the
# login screen and cause a warning:
r"https://github.com/([A-Za-z0-9_.-]+)/([A-Za-z0-9_.-]+)/edit(.*)"
]
)
# Documenter can also automatically deploy documentation to gh-pages.
# See "Hosting Documentation" and deploydocs() in the Documenter manual
# for more information.
deploydocs(
target = "build",
repo = "github.com/Tractables/ProbabilisticCircuits.jl.git",
branch = "gh-pages",
devbranch = "master",
devurl = "dev",
versions = ["stable" => "v^", "v#.#"],
) | ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 864 | using Literate
source_dir = "$(@__DIR__)/src"
"replace script includes with file content in Literate code"
function replace_includes(str)
pat = r"include\(\"(.*)\"\)"
m = match(pat, str)
while !isnothing(m)
str = replace(str, "$(m.match)" =>
read("$source_dir/$(m[1])", String))
m = match(pat, str)
end
str
end
"hide `#plot` lines in Literate code"
function hide_plots(str)
str = replace(str, r"#plot (.)*[\n\r]" => "")
replace(str, r"#!plot (.*)[\n\r]" => s"\g<1>\n")
end
"show `#plot` lines in Literate code"
function show_plots(str)
str = replace(str, r"#!plot (.)*[\n\r]" => "")
replace(str, r"#plot (.*)[\n\r]" => s"\g<1>\n")
end
Literate.markdown("$source_dir/README.jl", "$(@__DIR__)/../"; documenter=false, credit=false, execute=true,
preprocess = hide_plots ∘ replace_includes) | ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 229 | using LazyArtifacts
const zoo_version = "/Circuit-Model-Zoo-0.1.6"
zoo_psdd_file(name) =
artifact"circuit_model_zoo" * zoo_version * "/psdds/$name"
zoo_psdd(name) =
read(zoo_psdd_file(name), ProbCircuit, PsddFormat())
| ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 1522 | #src Generate README.md by running `using Literate; Literate.markdown("docs/src/README.jl", "."; documenter=false, credit=false, execute=true)`
# <img align="right" width="180px" src="https://avatars.githubusercontent.com/u/58918144?s=200&v=4">
# <!-- DO NOT EDIT README.md directly, instead edit docs/README.jl and generate the markdown-->
# # Probabilistic<wbr>Circuits<wbr>.jl
# [](https://github.com/Tractables/ProbabilisticCircuits.jl/actions?query=workflow%3A%22Unit+Tests%22+branch%3Amaster) [](https://codecov.io/gh/Tractables/ProbabilisticCircuits.jl) [](https://Tractables.github.io/ProbabilisticCircuits.jl/stable) [](https://Tractables.github.io/ProbabilisticCircuits.jl/dev)
# This package provides functionalities for learning/constructing probabilistic circuits and using them to compute various probabilistic queries. It is part of the [Juice package](https://github.com/Tractables) (Julia Circuit Empanada).
# ## Testing
# To make sure everything is working correctly, you can run our test suite as follows. The first time you run the tests will trigger a few slow downloads of various test resources.
# ```bash
# julia --color=yes -e 'using Pkg; Pkg.test("ProbabilisticCircuits")'
# ```
| ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 6556 | using CUDA #hide
# Assuming that the ProbabilisticCircuits Julia package has been installed with `julia -e 'using Pkg; Pkg.add("ProbabilisticCircuits")'`, we can start using it as follows.
using ProbabilisticCircuits
# ### Reasoning with manually constructed circuits
# We begin by creating three positive literals (boolean variables) and manually construct a probabilistic circuit that encodes a Naive Bayes (NB) distribution with the following form: `Pr(rain, rainbow, wet) = Pr(rain) * Pr(rainbow|rain) * Pr(wet|rain)`.
rain, rainbow, wet = [ProbabilisticCircuits.PlainInputNode(i, Indicator(true)) for i=1:3]
rain_, rainbow_, wet_ = [ProbabilisticCircuits.PlainInputNode(i, Indicator(false)) for i=1:3]
rain_pos = (0.7 * rainbow + 0.3 * (rainbow_)) * (0.9 * wet + 0.1 * (wet_)) # Pr(rainbow|rain=1) * Pr(wet|rain=1)
rain_neg = (0.2 * rainbow + 0.8 * (rainbow_)) * (0.3 * wet + 0.7 * (wet_)) # Pr(rainbow|rain=0) * Pr(wet|rain=0)
circuit = 0.4 * (rain * rain_pos) + 0.6 * ((rain_) * rain_neg); # Pr(rain, rainbow, wet)
# Just like any probability distribution, we can evaluate the probabilistic circuit on various inputs. Note that since log probabilities are used in probabilistic circuits for numerical stability, we need to take exponent of the evaluation output to get the probabilities.
exp(loglikelihoods(circuit, [[true, true, true]]; batch_size = 1)) # Pr(rain=1, rainbow=1, wet=1)
#-
exp(loglikelihoods(circuit, [[true, false, false]]; batch_size = 1)) # Pr(rain=1, rainbow=0, wet=0)
# From the above examples, we see that it is less likely to rain if we do not see rainbows and the streets are not wet.
# Decomposability allows us to compute marginal probabilities given partial evidence efficiently (linear time w.r.t. the circuit size). For example, we want to ask the probability of observing rainbows. That is, we want to marginalize out the variables rain and wet. This can be done by evaluating the circuit with partial evidence:
exp(loglikelihoods(circuit, [[missing, true, missing]]; batch_size = 1))
# Being able to compute marginals immediately offers the ability to compute conditional probabilities. For example, to compute the probability of raining given rainbow=1 and wet=1, we simply take the quotient of Pr(rain=1, rainbow=1, wet=1) and Pr(rainbow=1, wet=1):
exp(circuit(true, true, true) - circuit(missing, true, true)) # Pr(rain=1|rainbow=1, wet=1)
# If we are additionally supplied with the structural property *determinism*, we can answer some more advanced queries. For example, we can to compute the maximum a posteriori (MAP) query of the distribution:
assignments, log_prob = MAP(circuit, [missing, missing, missing]; batch_size=1)
print("The MAP assignment of the circuit is (rain=$(assignments[1]), rainbow=$(assignments[2]), wet=$(assignments[3])), with probability $(exp(log_prob)).")
# Besides the above examples, ProbabilisticCircuits.jl provides functionalities for a wide variety of queries, which are detailed in [this manual](https://Tractables.github.io/ProbabilisticCircuits.jl/stable/manual/queries/).
# ### Building complex circuit structures
# ProbabilisticCircuits.jl provides tools to compile classic Probabilistic Graphical Models (PGMs) and Tractable Probabilistic Models (TPMs) into probabilistic circuits efficiently. For example, we can compile a factor graph (FG) into a probabilistic circuit with one line of code:
fg = fromUAI(zoo_fg_file("asia.uai")) # Load example factor graph
fg_circuit = ProbCircuit(compile_factor_graph(fg)[1]) # Compile the FG to a PC
print("`fg_circuit` contains $(num_edges(fg_circuit)) edges and $(num_parameters(fg_circuit)) parameters.")
# ### Learning probabilistic circuits from data
# ProbabilisticCircuits.jl offers various parameter learning and structure learning algorithms. It further support mini-batch learning on both CPUs and GPUs, which makes learning large models from large datasets very efficient.
# We use the binarized MNIST dataset to demonstrate example probabilistic circuit learning functionalities.
train_data, valid_data, test_data = twenty_datasets("binarized_mnist");
# We start with learning the parameters of a *decomposable* and *deterministic* probabilistic circuit. We first load the structure of the circuit from file:
circuit = zoo_psdd("mnist.psdd")
print("The loaded circuit contains $(num_edges(circuit)) edges and $(num_parameters(circuit)) parameters.")
#-
print("Structural properties of the circuit: decomposability: $(isdecomposable(circuit)), determinism: $(isdeterministic(circuit)).")
# Given that the circuit is decomposable and deterministic, the maximum likelihood estimation (MLE) of its parameters is in closed-form. That is, we can learn the MLE parameters deterministically:
estimate_parameters!(circuit, train_data; pseudocount = 0.1) #hide
t = @elapsed estimate_parameters!(circuit, train_data; pseudocount = 0.1)
print("Learning the parameters on a CPU took $(t) seconds.")
# Optionally, we can use GPUs to speedup the learning process:
if CUDA.functional() #hide
train_data = to_gpu(train_data)
estimate_parameters!(circuit, train_data; pseudocount = 0.1) #hide
t = @elapsed estimate_parameters!(circuit, train_data; pseudocount = 0.1)
print("Learning the parameters on a GPU took $(t) seconds.")
end #hide
# Note that the insignificant speedup is due to the fact that the circuit is too small to make full use of the GPU. For large circuits the speedup could be at least ~10x.
# After the learning process, we can evaluate the model on the validation/test dataset. Here we use average log-likelihood per sample as the metric (we again utilize GPUs for efficiency):
avg_ll = log_likelihood_avg(circuit, test_data)
print("The average test data log-likelihood is $(avg_ll).")
# Besides `estimate_parameters`, ProbabilisticCircuits.jl offers iterative parameter learning algorithms such as Expectation-Maximization (EM) (i.e., `estimate_parameters_em!`) and Stochastic Gradient Descent (SGD) (i.e., `estimate_parameters_sgd!`).
# ProbabilisticCircuits.jl also offers functionalities for learning the circuit structure and parameters simultaneously. For example, the Strudel structure learning algorithm is implemented natively in the package, and can be used with a few lines of code:
circuit_strudel = learn_circuit(train_data; maxiter = 100, verbose = false)
avg_ll = log_likelihood_avg(circuit_strudel, test_data)
print("The learned circuit contains $(num_edges(circuit)) edges and $(num_parameters(circuit)) parameters.\n")
print("The average test data log-likelihood is $(avg_ll).") | ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 3702 | using ProbabilisticCircuits
using ProbabilisticCircuits: BitsProbCircuit, CuBitsProbCircuit, loglikelihoods, full_batch_em, mini_batch_em
using MLDatasets
using CUDA
using Images
# device!(collect(devices())[2])
function mnist_cpu()
train_int = transpose(reshape(MNIST.traintensor(UInt8), 28*28, :));
test_int = transpose(reshape(MNIST.testtensor(UInt8), 28*28, :));
train_cpu = UInt32.(train_int) .+ one(UInt32);
test_cpu = UInt32.(test_int) .+ one(UInt32);
train_cpu, test_cpu
end
function mnist_gpu()
cu.(mnist_cpu())
end
function run(; batch_size = 256, num_epochs1 = 1, num_epochs2 = 1, num_epochs3 = 20,
pseudocount = 0.01, param_inertia1 = 0.2, param_inertia2 = 0.9, param_inertia3 = 0.9)
train, test = mnist_cpu();
train_gpu, test_gpu = mnist_gpu();
@info "Generating RAT SPN...."
num_nodes_root = 1
num_nodes_region = 20
num_nodes_leaf = 20
rg_depth = 4
rg_replicas = 20
input_func = RAT_InputFunc(Binomial, 256);
# input_func = RAT_InputFunc(Categorical, 256);
# input_func(var) =
# summate([InputNode(var, Binomial(256)) for i=1:2])
@show num_nodes_region
@show num_nodes_leaf
@show rg_depth
@show rg_replicas
num_features = size(train, 2)
@time pc = RAT(num_features; input_func, num_nodes_region, num_nodes_leaf, rg_depth, rg_replicas, balance_childs_parents=false);
init_parameters(pc; perturbation = 0.4);
@time println("Number of free parameters: $(num_parameters(pc))")
@info "Moving circuit to GPU... "
CUDA.@time bpc = CuBitsProbCircuit(BitsProbCircuit(pc));
@show length(bpc.nodes)
@info "Mini EM 1"
softness = 0
@time mini_batch_em(bpc, train_gpu, num_epochs1; batch_size, pseudocount,
softness, param_inertia = param_inertia1, param_inertia_end = param_inertia2)
ll1 = loglikelihood(bpc, test_gpu; batch_size)
println("test LL: $(ll1)")
@info "Mini EM 2"
@time mini_batch_em(bpc, train_gpu, num_epochs2; batch_size, pseudocount,
softness, param_inertia = param_inertia2, param_inertia_end = param_inertia3)
ll2 = loglikelihood(bpc, test_gpu; batch_size)
println("test LL: $(ll2)")
@info "Full EM"
for iter=1:num_epochs3
@info "Iter $iter"
@time full_batch_em(bpc, train_gpu, 5; batch_size, pseudocount, softness)
ll3 = loglikelihood(bpc, test_gpu; batch_size)
println("test LL: $(ll3)")
do_sample(bpc, iter)
end
@info "Update parameters pbc -> pc"
@time ProbabilisticCircuits.update_parameters(bpc);
return pc, bpc
end
function do_sample(cur_pc, iter)
file_name = "samples/rat_samples_$(iter).png"
@info "Sampling $(file_name)"
if cur_pc isa CuBitsProbCircuit
sms = sample(cur_pc, 100, 28*28, [UInt32]);
elseif cur_pc isa ProbCircuit
sms = sample(cur_pc, 100, [UInt32]);
end
do_img(i) = begin
img = Array{Float32}(sms[i,1,1:28*28]) ./ 256.0
img = transpose(reshape(img, (28, 28)))
imresize(colorview(Gray, img), ratio=4)
end
@time begin
arr = [do_img(i) for i=1:size(sms, 1)]
imgs = mosaicview(arr, fillvalue=1, ncol=10, npad=4)
save(file_name, imgs)
end
end
function try_map(pc, bpc)
@info "MAP"
train_gpu, _ = mnist_gpu();
data = Array{Union{Missing, UInt32}}(train_gpu[1:10, :]);
data[:, 1:400] .= missing;
data_gpu = cu(data);
# @time MAP(pc, data; batch_size=10)
MAP(bpc, data_gpu; batch_size=10)
end
pc, bpc = run(; batch_size = 1000, num_epochs1 = 10, num_epochs2 = 10, num_epochs3 = 100);
# do_sample(bpc, 999)
# try_map(pc, bpc) | ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 1951 | using Pkg; Pkg.activate(@__DIR__)
using ProbabilisticCircuits
using ProbabilisticCircuits: CuBitsProbCircuit, loglikelihood, full_batch_em, mini_batch_em
using MLDatasets
using CUDA
function mnist_cpu()
train_int = transpose(reshape(MNIST.traintensor(UInt8), 28*28, :));
test_int = transpose(reshape(MNIST.testtensor(UInt8), 28*28, :));
function bitsfeatures(data_int)
data_bits = zeros(Bool, size(data_int,1), 28*28*8)
for ex = 1:size(data_int,1), pix = 1:size(data_int,2)
x = data_int[ex,pix]
for b = 0:7
if (x & (one(UInt8) << b)) != zero(UInt8)
data_bits[ex, (pix-1)*8+b+1] = true
end
end
end
data_bits
end
train_cpu = bitsfeatures(train_int);
test_cpu = bitsfeatures(test_int);
train_cpu, test_cpu
end
function mnist_gpu()
cu.(mnist_cpu())
end
function run()
train_gpu, test_gpu = mnist_gpu()
latents = 120
pseudocount = 0.01
println("Generating HCLT structure with $latents latents... ");
@time pc = hclt(train_gpu, latents; pseudocount, input_type = Literal);
init_parameters(pc; perturbation = 0.4);
println("Number of free parameters: $(num_parameters(pc))")
print("Moving circuit to GPU... ")
CUDA.@time bpc = CuBitsProbCircuit(pc)
batch_size = 512
pseudocount = .005
softness = 0
print("First round of minibatch EM... ")
CUDA.@time mini_batch_em(bpc, train_gpu, 400; batch_size, pseudocount,
softness, param_inertia = 0.01, param_inertia_end = 0.95)
CUDA.@time mini_batch_em(bpc, train_gpu, 100; batch_size, pseudocount,
softness, param_inertia = 0.95, param_inertia_end = 0.999)
CUDA.@time full_batch_em(bpc, train_gpu, 10; batch_size, pseudocount, softness)
print("Update parameters... ")
@time ProbabilisticCircuits.update_parameters(bpc)
pc
end
run()
| ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 3354 | using CUDA
using ProbabilisticCircuits
using ProbabilisticCircuits: BitsProbCircuit, CuBitsProbCircuit, loglikelihoods, full_batch_em, mini_batch_em
using MLDatasets
using Images
using Plots
# device!(collect(devices())[2])
function mnist_cpu()
train_cpu = collect(transpose(reshape(MNIST.traintensor(UInt8), 28*28, :)))
test_cpu = collect(transpose(reshape(MNIST.testtensor(UInt8), 28*28, :)))
train_cpu, test_cpu
end
function mnist_gpu()
cu.(mnist_cpu())
end
function truncate(data::Matrix; bits)
data .÷ 2^bits
end
function run(; batch_size = 512, num_epochs1 = 1, num_epochs2 = 1, num_epochs3 = 20,
pseudocount = 0.01, latents = 32, param_inertia1 = 0.2, param_inertia2 = 0.9, param_inertia3 = 0.95)
train, test = mnist_cpu()
train_gpu, test_gpu = mnist_gpu()
# train_gpu = train_gpu[1:1024, :]
trunc_train = cu(truncate(train; bits = 4))
println("Generating HCLT structure with $latents latents... ");
@time pc = hclt(trunc_train[1:5000,:], latents; num_cats = 256, pseudocount = 0.1, input_type = Binomial);
init_parameters(pc; perturbation = 0.4);
println("Number of free parameters: $(num_parameters(pc))")
@info "Moving circuit to GPU... "
CUDA.@time bpc = CuBitsProbCircuit(pc)
@show length(bpc.nodes)
softness = 0
@time mini_batch_em(bpc, train_gpu, num_epochs1; batch_size, pseudocount,
softness, param_inertia = param_inertia1, param_inertia_end = param_inertia2, debug = false)
ll1 = loglikelihood(bpc, test_gpu; batch_size)
println("test LL: $(ll1)")
@time mini_batch_em(bpc, train_gpu, num_epochs2; batch_size, pseudocount,
softness, param_inertia = param_inertia2, param_inertia_end = param_inertia3)
ll2 = loglikelihood(bpc, test_gpu; batch_size)
println("test LL: $(ll2)")
for iter=1:num_epochs3
@info "Iter $iter"
@time full_batch_em(bpc, train_gpu, 5; batch_size, pseudocount, softness)
ll3 = loglikelihood(bpc, test_gpu; batch_size)
println("test LL: $(ll3)")
@time do_sample(bpc, iter)
end
@info "update parameters bpc => pc"
@time ProbabilisticCircuits.update_parameters(bpc);
pc, bpc
end
function do_sample(cur_pc, iteration)
@info "Sample"
if cur_pc isa CuBitsProbCircuit
sms = sample(cur_pc, 100, 28*28,[UInt32]);
elseif cur_pc isa ProbCircuit
sms = sample(cur_pc, 100, [UInt32]);
end
do_img(i) = begin
img = Array{Float32}(sms[i,1,1:28*28]) ./ 256.0
img = transpose(reshape(img, (28, 28)))
imresize(colorview(Gray, img), ratio=4)
end
arr = [do_img(i) for i=1:size(sms, 1)]
imgs = mosaicview(arr, fillvalue=1, ncol=10, npad=4)
save("samples/samples_hclt_$iteration.png", imgs);
end
function try_map()
@info "MAP"
train_gpu, _ = mnist_gpu();
data = Array{Union{Missing, UInt32}}(train_gpu[1:10, :]);
data[:, 1:100] .= missing;
data_gpu = cu(data);
# @time MAP(pc, data; batch_size=10)
@time MAP(bpc, data_gpu; batch_size=10)
end
pc, bpc = run(; latents = 16, num_epochs1 = 0, num_epochs2 = 0, num_epochs3=2);
# arr = [dist(n).p for n in inputnodes(pc) if 300 <first(randvars(n)) <400];
# Plots.histogram(arr, normed=true, bins=50)
# do_sample(bpc, 999);
# do_sample(pc, 999);
try_map()
| ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 2052 | using CUDA
using ProbabilisticCircuits
using ProbabilisticCircuits: BitsProbCircuit, CuBitsProbCircuit, loglikelihoods, full_batch_em, mini_batch_em
using MLDatasets
function mnist_cpu()
train_cpu = collect(transpose(reshape(MNIST.traintensor(UInt8), 28*28, :)))
test_cpu = collect(transpose(reshape(MNIST.testtensor(UInt8), 28*28, :)))
train_cpu, test_cpu
end
function mnist_gpu()
cu.(mnist_cpu())
end
function truncate(data::Matrix; bits)
data .÷ 2^bits
end
function run(; batch_size = 512, num_epochs1 = 100, num_epochs2 = 100, num_epochs3 = 20,
pseudocount = 0.1, latents = 32, param_inertia1 = 0.2, param_inertia2 = 0.9, param_inertia3 = 0.95)
train, test = mnist_cpu()
train_gpu, test_gpu = mnist_gpu()
trunc_train = cu(truncate(train; bits = 4))
println("Generating HCLT structure with $latents latents... ");
@time pc = hclt(trunc_train[1:5000,:], latents; num_cats = 256, pseudocount = 0.1, input_type = Categorical);
init_parameters(pc; perturbation = 0.4);
println("Number of free parameters: $(num_parameters(pc))")
print("Moving circuit to GPU... ")
CUDA.@time bpc = CuBitsProbCircuit(pc)
softness = 0
@time mini_batch_em(bpc, train_gpu, num_epochs1; batch_size, pseudocount,
softness, param_inertia = param_inertia1, param_inertia_end = param_inertia2, debug = false)
ll1 = loglikelihood(bpc, test_gpu; batch_size)
println("test LL: $(ll1)")
@time mini_batch_em(bpc, train_gpu, num_epochs2; batch_size, pseudocount,
softness, param_inertia = param_inertia2, param_inertia_end = param_inertia3)
ll2 = loglikelihood(bpc, test_gpu; batch_size)
println("test LL: $(ll2)")
@time full_batch_em(bpc, train_gpu, num_epochs3; batch_size, pseudocount, softness)
ll3 = loglikelihood(bpc, test_gpu; batch_size)
println("test LL: $(ll3)")
print("update parameters")
@time ProbabilisticCircuits.update_parameters(bpc)
ll1, ll2, ll3, batch_size, pseudocount, latents
end
run() | ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 3198 | using Distributed
using CUDA
if nprocs() - 1 < length(devices())
addprocs(length(devices()) - nprocs() + 1)
end
@everywhere using CUDA
# assign devices
asyncmap((zip(workers(), devices()))) do (p, d)
remotecall_wait(p) do
@info "Worker $p uses $d"
device!(d)
end
end
@everywhere using ProbabilisticCircuits
@everywhere using ProbabilisticCircuits: BitsProbCircuit, CuBitsProbCircuit, loglikelihoods, full_batch_em, mini_batch_em
@everywhere using MLDatasets
@everywhere function mnist_cpu()
train_cpu = collect(transpose(reshape(MNIST.traintensor(UInt8), 28*28, :)))
test_cpu = collect(transpose(reshape(MNIST.testtensor(UInt8), 28*28, :)))
train_cpu, test_cpu
end
@everywhere function mnist_gpu()
cu.(mnist_cpu())
end
@everywhere function truncate(data::Matrix; bits)
data .÷ 2^bits
end
@everywhere function run(; batch_size = 512, num_epochs1 = 100, num_epochs2 = 100, num_epochs3 = 20,
pseudocount = 0.1, latents = 32, param_inertia1 = 0.2, param_inertia2 = 0.9, param_inertia3 = 0.95)
train, test = mnist_cpu()
train_gpu, test_gpu = mnist_gpu()
trunc_train = cu(truncate(train; bits = 4))
println("Generating HCLT structure with $latents latents... ");
@time pc = hclt(trunc_train[1:5000,:], latents; num_cats = 256, pseudocount = 0.1, input_type = Categorical);
init_parameters(pc; perturbation = 0.4);
println("Number of free parameters: $(num_parameters(pc))")
print("Moving circuit to GPU... ")
CUDA.@time bpc = CuBitsProbCircuit(pc)
softness = 0
@time mini_batch_em(bpc, train_gpu, num_epochs1; batch_size, pseudocount,
softness, param_inertia = param_inertia1, param_inertia_end = param_inertia2, debug = false)
ll1 = loglikelihood(bpc, test_gpu; batch_size)
println("test LL: $(ll1)")
@time mini_batch_em(bpc, train_gpu, num_epochs2; batch_size, pseudocount,
softness, param_inertia = param_inertia2, param_inertia_end = param_inertia3)
ll2 = loglikelihood(bpc, test_gpu; batch_size)
println("test LL: $(ll2)")
@time full_batch_em(bpc, train_gpu, num_epochs3; batch_size, pseudocount, softness)
ll3 = loglikelihood(bpc, test_gpu; batch_size)
println("test LL: $(ll3)")
print("update parameters")
@time ProbabilisticCircuits.update_parameters(bpc)
ll1, ll2, ll3, batch_size, pseudocount, latents
end
experiments = begin
exps = []
for batch_size in [64, 256, 512]
for pseudocount in [0.1, 0.01]
for latents in [32, 48, 64, 128]
for param_inertia1 in [0.2, 0.5]
for param_inertia2 in [0.8, 0.9]
for param_inertia3 in [0.95, 0.98]
push!(exps, (batch_size, pseudocount, latents))
end
end
end
end
end
end
exps
end
results = pmap(experiments) do exper
result = run(; batch_size = exper[1], pseudocount = exper[2], latents = exper[3])
open("cat_hclt.log", "a+") do io
write(io, "$(result)\n")
end
result
end
for result in results
println(result)
end
| ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 857 | module ProbabilisticCircuits
using DirectedAcyclicGraphs
import DirectedAcyclicGraphs as DAGs # shorthand
# reexport from DAGs
export num_nodes, num_edges
include("nodes/abstract_nodes.jl")
include("nodes/input_distributions.jl")
include("nodes/indicator_dist.jl")
include("nodes/categorical_dist.jl")
include("nodes/binomial_dist.jl")
include("nodes/plain_nodes.jl")
include("bits_circuit.jl")
include("traversal.jl")
include("queries/likelihood.jl")
include("queries/likelihood_cpu.jl")
include("queries/map.jl")
include("queries/map_cpu.jl")
include("queries/sample.jl")
include("queries/sample_cpu.jl")
include("queries/flow.jl")
include("parameters/em.jl")
include("io/io.jl")
include("structures/hclts.jl")
include("structures/rat.jl")
end
| ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 21848 | using CUDA
export BitsProbCircuit, CuBitsProbCircuit, update_parameters
###############################################
# bits representation of nodes
###############################################
abstract type BitsNode end
struct BitsInput{D <: InputDist} <: BitsNode
variable::Var # for now assume single variable
dist::D
end
dist(n::BitsNode) = nothing
dist(n::BitsInput) = n.dist
function bits(in::PlainInputNode, heap)
vars = Var.(randvars(in))
bits_dist = bits(dist(in), heap)
BitsInput(vars..., bits_dist)
end
function bits_reuse(in::PlainInputNode, heap_start)
vars = Var.(randvars(in))
num_cats = num_categories(dist(in))
bits_dist = BitsCategorical(num_cats, heap_start)
BitsInput(vars..., bits_dist)
end
update_dist(pcnode, bitsnode::BitsInput, heap) =
pcnode.dist = unbits(bitsnode.dist, heap)
abstract type BitsInnerNode <: BitsNode end
struct BitsSum <: BitsInnerNode end
struct BitsMul <: BitsInnerNode end
###############################################
# bits representation of edges
###############################################
abstract type AbstractBitsEdge end
struct SumEdge <: AbstractBitsEdge
parent_id::UInt32
prime_id::UInt32
sub_id::UInt32 # 0 means no sub
logp::Float32
tag::UInt8
end
struct MulEdge <: AbstractBitsEdge
parent_id::UInt32
prime_id::UInt32
sub_id::UInt32 # 0 means no sub
tag::UInt8
end
const BitsEdge = Union{SumEdge,MulEdge}
hassub(x) = !iszero(x.sub_id)
rotate(edge::SumEdge) =
SumEdge(edge.parent_id, edge.sub_id, edge.prime_id, edge.logp, edge.tag)
rotate(edge::MulEdge) =
MulEdge(edge.parent_id, edge.sub_id, edge.prime_id, edge.tag)
# tags
@inline tag_at(tag, i) = (tag | (one(UInt8) << i))
@inline tagged_at(tag, i) = ((tag & one(tag) << i) != zero(tag))
@inline isfirst(tag) = tagged_at(tag, 0)
@inline islast(tag) = tagged_at(tag, 1)
"whether this series of edges is partial or complete"
@inline ispartial(tag) = tagged_at(tag, 2)
"whether this sub edge is the only outgoing edge from sub"
@inline isonlysubedge(tag) = tagged_at(tag, 3)
function tag_firstlast(i,n)
tag = zero(UInt8)
(i==1) && (tag = tag_at(tag, 0))
(i==n) && (tag = tag_at(tag, 1))
tag
end
tagpartial(tag) = tag_at(tag, 2)
tagonlysubedge(tag) = tag_at(tag, 3)
changetag(edge::SumEdge, tag) =
SumEdge(edge.parent_id, edge.prime_id, edge.sub_id, edge.logp, tag)
changetag(edge::MulEdge, tag) =
MulEdge(edge.parent_id, edge.prime_id, edge.sub_id, tag)
###############################################
# bits representation of nested vectors
###############################################
"An `isbits` representation of a `AbstractVector{<:AbstractVector}`"
struct FlatVectors{V <: AbstractVector} <: AbstractVector{V}
vectors::V
ends::Vector{Int}
end
function FlatVectors(vectors::AbstractVector{<:AbstractVector})
flatvectors = vcat(vectors...)
ends = cumsum(map(length, vectors))
FlatVectors(flatvectors, ends)
end
layer_end(fv, i) =
fv.ends[i]
layer_start(fv, i) =
(i == 1) ? 1 : layer_end(fv, i-1) + 1
import CUDA: cu #extend
cu(fv::FlatVectors) =
FlatVectors(cu(fv.vectors), fv.ends)
import Base: size, getindex #extend
size(fv::FlatVectors) = size(fv.vectors)
num_layers(fv::FlatVectors) = length(fv.ends)
getindex(fv::FlatVectors, idx) = getindex(fv.vectors, idx)
###############################################
# bits representation of circuit
###############################################
abstract type AbstractBitsProbCircuit end
struct BitsProbCircuit <: AbstractBitsProbCircuit
# all the nodes in the circuit
nodes::Vector{BitsNode}
# mapping from BitPC to PC nodes
nodes_map::Vector{ProbCircuit}
# the ids of the subset of nodes that are inputs
input_node_ids::Vector{UInt32}
# layers of edges for upward pass
edge_layers_up::FlatVectors{Vector{BitsEdge}}
# layers of edges for downward pass
edge_layers_down::FlatVectors{Vector{BitsEdge}}
# mapping from downward pass edge id to upward pass edge id
down2upedge::Vector{Int32}
# memory used by input nodes for their parameters and parameter learning
heap::Vector{Float32}
# index of beginning and ending edge for the node in the BitEdges flatvector
node_begin_end::Vector{Pair{UInt32, UInt32}}
BitsProbCircuit(n, nm, in, e1, e2, d, heap, node_be) = begin
@assert length(n) == length(nm) >= length(in) > 0
@assert length(e1.vectors) == length(e2.vectors)
@assert allunique(e1.ends) "No empty layers allowed"
@assert allunique(e2.ends) "No empty layers allowed"
new(n, nm, in, e1, e2, d, heap, node_be)
end
end
struct CuBitsProbCircuit{BitsNodes <: BitsNode} <: AbstractBitsProbCircuit
# all the nodes in the circuit
nodes::CuVector{BitsNodes}
# mapping from BitPC to PC nodes
nodes_map::Vector{ProbCircuit}
# the ids of the subset of nodes that are inputs
input_node_ids::CuVector{UInt32}
# layers of edges for upward pass
edge_layers_up::FlatVectors{<:CuVector{BitsEdge}}
# layers of edges for downward pass
edge_layers_down::FlatVectors{<:CuVector{BitsEdge}}
# mapping from downward pass edge id to upward pass edge id
down2upedge::CuVector{Int32}
# memory used by input nodes for their parameters and parameter learning
heap::CuVector{Float32}
node_begin_end::CuVector{Pair{UInt32, UInt32}}
CuBitsProbCircuit(bpc) = begin
# find union of bits node types actually used in the circuit
BitsNodes = mapreduce(typeof, (x,y) -> Union{x,y}, bpc.nodes)
@assert Base.isbitsunion(BitsNodes)
nodes = CuVector{BitsNodes}(bpc.nodes)
input_node_ids = cu(bpc.input_node_ids)
edge_layers_up = cu(bpc.edge_layers_up)
edge_layers_down = cu(bpc.edge_layers_down)
down2upedge = cu(bpc.down2upedge)
heap = cu(bpc.heap)
node_be = cu(bpc.node_begin_end)
new{BitsNodes}(nodes, bpc.nodes_map, input_node_ids,
edge_layers_up, edge_layers_down, down2upedge, heap, node_be)
end
end
CuBitsProbCircuit(pc::ProbCircuit) =
CuBitsProbCircuit(BitsProbCircuit(pc))
cu(bpc::BitsProbCircuit) = CuBitsProbCircuit(bpc)
###############################################
# converting a PC into a BitsPC
###############################################
struct NodeInfo
prime_id::Int
prime_layer_id::Int
sub_id::Int # 0 means no sub
sub_layer_id::Int
end
struct OutputInfo
edge::BitsEdge
parent_layer_id::Int
id_within_uplayer::Int
end
function BitsProbCircuit(pc::ProbCircuit; eager_materialize=true, collapse_elements=true)
nodes = BitsNode[]
nodes_map = ProbCircuit[]
input_node_ids = UInt32[]
node_layers = Vector{Int}[]
outputs = Vector{OutputInfo}[]
uplayers = Vector{BitsEdge}[]
heap = Float32[]
add_node(pcnode, bitsnode, layer_id) = begin
# add node globally
push!(nodes, bitsnode)
push!(nodes_map, pcnode)
push!(outputs, OutputInfo[])
id = length(nodes)
# add index for input nodes
if bitsnode isa BitsInput
push!(input_node_ids, id)
end
# add node to node layers
while length(node_layers) <= layer_id
push!(node_layers, Int[])
end
push!(node_layers[layer_id+1], id)
id
end
add_edge(parent_layer_id, edge, child_info) = begin
# introduce invariant that primes are never at a lower layer than subs
if hassub(child_info) && child_info.prime_layer_id < child_info.sub_layer_id
edge = rotate(edge)
end
# record up edges for upward pass
while length(uplayers) < parent_layer_id
push!(uplayers, BitsEdge[])
end
push!(uplayers[parent_layer_id], edge)
# record out edges for downward pass
id_within_uplayer = length(uplayers[parent_layer_id])
outputinfo = OutputInfo(edge, parent_layer_id, id_within_uplayer)
@assert uplayers[outputinfo.parent_layer_id][outputinfo.id_within_uplayer] == edge
push!(outputs[edge.prime_id], outputinfo)
if hassub(edge)
push!(outputs[edge.sub_id], outputinfo)
end
end
f_input(node) = begin
node_id = add_node(node, bits(node, heap), 0)
NodeInfo(node_id, 0, 0, 0)
end
f_inner(node, children_info) = begin
if (length(children_info) == 1 && (node !== pc)
&& (!eager_materialize || !hassub(children_info[1])))
# this is a pass-through node
children_info[1]
elseif (collapse_elements && ismul(node) && length(children_info) == 2
&& !hassub(children_info[1]) && !hassub(children_info[2]) && (node !== pc))
# this is a simple conjunctive element that we collapse into an edge
prime_layer_id = children_info[1].prime_layer_id
sub_layer_id = children_info[2].prime_layer_id
NodeInfo(children_info[1].prime_id, prime_layer_id,
children_info[2].prime_id, sub_layer_id)
else
layer_id = 1 + maximum(children_info) do info
max(info.prime_layer_id, info.sub_layer_id)
end
if issum(node)
node_id = add_node(node, BitsSum(), layer_id)
for i = 1:length(children_info)
param = params(node)[i]
child_info = children_info[i]
tag = tag_firstlast(i, length(children_info))
edge = SumEdge(node_id, child_info.prime_id, child_info.sub_id, param, tag)
add_edge(layer_id, edge, child_info)
end
else
@assert ismul(node)
node_id = add_node(node, BitsMul(), layer_id)
# try to merge inputs without a sub into "double" edges
children_info = merge_mul_inputs(children_info)
for i = 1:length(children_info)
child_info = children_info[i]
tag = tag_firstlast(i, length(children_info))
edge = MulEdge(node_id, child_info.prime_id, child_info.sub_id, tag)
add_edge(layer_id, edge, child_info)
end
end
NodeInfo(node_id, layer_id, 0, 0)
end
end
root_info = foldup_aggregate(pc, f_input, f_inner, NodeInfo)
@assert !hassub(root_info)
flatuplayers = FlatVectors(uplayers)
flatdownlayers, down2upedges = down_layers(node_layers, outputs, flatuplayers)
node_begin_end = [Pair(typemax(UInt32), typemin(UInt32)) for i=1:length(nodes)]
for i = 1:length(flatuplayers.vectors)
pi = flatuplayers.vectors[i].parent_id
l, r = node_begin_end[pi]
node_begin_end[pi] = Pair( min(l, i), max(r, i) )
end
BitsProbCircuit(nodes, nodes_map, input_node_ids,
flatuplayers, flatdownlayers, down2upedges, heap, node_begin_end)
end
function merge_mul_inputs(children_info)
single_infos = filter(!hassub, children_info)
double_infos = filter( hassub, children_info)
for i = 1:2:length(single_infos)
if i < length(single_infos)
prime_layer_id = single_infos[i].prime_layer_id
sub_layer_id = single_infos[i+1].prime_layer_id
merged_info = NodeInfo(single_infos[i].prime_id, prime_layer_id,
single_infos[i+1].prime_id, sub_layer_id)
single_infos[i] = merged_info
end
push!(double_infos, single_infos[i])
end
@assert length(double_infos) == length(children_info) - (length(single_infos) ÷ 2)
double_infos
end
function down_layers(node_layers, outputs, flatuplayers)
downedges = BitsEdge[]
downlayerends = Int[]
down2upedges = Int32[]
@assert length(node_layers[end]) == 1 && isempty(outputs[node_layers[end][1]])
for node_layer in node_layers[end-1:-1:1]
for node_id in node_layer
prime_outputs = filter(e -> e.edge.prime_id == node_id, outputs[node_id])
partial = (length(prime_outputs) != length(outputs[node_id]))
for i in 1:length(prime_outputs)
prime_output = prime_outputs[i]
edge = prime_output.edge
@assert edge.prime_id == node_id
# record the index in flatuplayers corresponding to this downedge
upedgeindex = layer_start(flatuplayers, prime_output.parent_layer_id) +
prime_output.id_within_uplayer - 1
push!(down2upedges, upedgeindex)
# update the tag and record down edge
tag = tag_firstlast(i, length(prime_outputs))
partial && (tag = tagpartial(tag))
if hassub(edge) && length(outputs[edge.sub_id]) == 1
tag = tagonlysubedge(tag)
end
edge = changetag(edge, tag)
push!(downedges, edge)
end
end
# record new end of layer
if (!isempty(downedges) && isempty(downlayerends)) || (length(downedges) > downlayerends[end])
push!(downlayerends, length(downedges))
end
end
flatdownlayers = FlatVectors(downedges, downlayerends)
flatdownlayers, down2upedges
end
#####################
# retrieve parameters from BitsPC
#####################
"map parameters from BitsPC back to the ProbCircuit it was created from"
function update_parameters(bpc::AbstractBitsProbCircuit)
nodemap = bpc.nodes_map
# copy parameters from sum nodes
edges = Vector(bpc.edge_layers_up.vectors)
i = 1
while i <= length(edges)
@assert isfirst(edges[i].tag)
parent = nodemap[edges[i].parent_id]
if issum(parent)
ni = num_inputs(parent)
params(parent) .= map(e -> e.logp, edges[i:i+ni-1])
else # parent is a product node
ni = 1
while !isfirst(edges[i+ni].tag)
ni += 1
end
end
i += ni
end
# copy parameters from input nodes
nodes = Vector(bpc.nodes)
input_ids = Vector(bpc.input_node_ids)
heap = Vector(bpc.heap)
for i in input_ids
update_dist(nodemap[i], nodes[i], heap)
end
nothing
end
###############################################
# Circuit LM
###############################################
CuBitsProbCircuit(pc::ProbCircuit, input2group) =
CuBitsProbCircuit(BitsProbCircuit(pc, input2group))
function BitsProbCircuit(pc::ProbCircuit, input2group; eager_materialize=true, collapse_elements=true)
nodes = BitsNode[]
nodes_map = ProbCircuit[]
input_node_ids = UInt32[]
node_layers = Vector{Int}[]
outputs = Vector{OutputInfo}[]
uplayers = Vector{BitsEdge}[]
heap = Float32[]
group2offset = Dict()
add_node(pcnode, bitsnode, layer_id) = begin
# add node globally
push!(nodes, bitsnode)
push!(nodes_map, pcnode)
push!(outputs, OutputInfo[])
id = length(nodes)
# add index for input nodes
if bitsnode isa BitsInput
push!(input_node_ids, id)
end
# add node to node layers
while length(node_layers) <= layer_id
push!(node_layers, Int[])
end
push!(node_layers[layer_id+1], id)
id
end
add_edge(parent_layer_id, edge, child_info) = begin
# introduce invariant that primes are never at a lower layer than subs
if hassub(child_info) && child_info.prime_layer_id < child_info.sub_layer_id
edge = rotate(edge)
end
# record up edges for upward pass
while length(uplayers) < parent_layer_id
push!(uplayers, BitsEdge[])
end
push!(uplayers[parent_layer_id], edge)
# record out edges for downward pass
id_within_uplayer = length(uplayers[parent_layer_id])
outputinfo = OutputInfo(edge, parent_layer_id, id_within_uplayer)
@assert uplayers[outputinfo.parent_layer_id][outputinfo.id_within_uplayer] == edge
push!(outputs[edge.prime_id], outputinfo)
if hassub(edge)
push!(outputs[edge.sub_id], outputinfo)
end
end
f_input(node) = begin
if isempty(input2group)
node_id = add_node(node, bits(node, heap), 0)
else
group_idx = input2group[node]
if haskey(group2offset, group_idx)
bits_input = bits_reuse(node, group2offset[group_idx])
else
bits_input = bits(node, heap)
group2offset[group_idx] = bits_input.dist.heap_start
end
node_id = add_node(node, bits_input, 0)
end
NodeInfo(node_id, 0, 0, 0)
end
f_inner(node, children_info) = begin
if (length(children_info) == 1 && (node !== pc)
&& (!eager_materialize || !hassub(children_info[1])))
# this is a pass-through node
children_info[1]
elseif (collapse_elements && ismul(node) && length(children_info) == 2
&& !hassub(children_info[1]) && !hassub(children_info[2]) && (node !== pc))
# this is a simple conjunctive element that we collapse into an edge
prime_layer_id = children_info[1].prime_layer_id
sub_layer_id = children_info[2].prime_layer_id
NodeInfo(children_info[1].prime_id, prime_layer_id,
children_info[2].prime_id, sub_layer_id)
else
layer_id = 1 + maximum(children_info) do info
max(info.prime_layer_id, info.sub_layer_id)
end
if issum(node)
node_id = add_node(node, BitsSum(), layer_id)
for i = 1:length(children_info)
param = params(node)[i]
child_info = children_info[i]
tag = tag_firstlast(i, length(children_info))
edge = SumEdge(node_id, child_info.prime_id, child_info.sub_id, param, tag)
add_edge(layer_id, edge, child_info)
end
else
@assert ismul(node)
node_id = add_node(node, BitsMul(), layer_id)
# try to merge inputs without a sub into "double" edges
children_info = merge_mul_inputs(children_info)
for i = 1:length(children_info)
child_info = children_info[i]
tag = tag_firstlast(i, length(children_info))
edge = MulEdge(node_id, child_info.prime_id, child_info.sub_id, tag)
add_edge(layer_id, edge, child_info)
end
end
NodeInfo(node_id, layer_id, 0, 0)
end
end
println("foldup ...")
@time root_info = foldup_aggregate(pc, f_input, f_inner, NodeInfo)
@assert !hassub(root_info)
input2group = nothing
group2offset = nothing
println("gc ...")
@time GC.gc()
flatuplayers = FlatVectors(uplayers)
println("down_layers ...")
@time flatdownlayers, down2upedges = down_layers(node_layers, outputs, flatuplayers)
node_begin_end = [Pair(typemax(UInt32), typemin(UInt32)) for i=1:length(nodes)]
for i = 1:length(flatuplayers.vectors)
pi = flatuplayers.vectors[i].parent_id
l, r = node_begin_end[pi]
node_begin_end[pi] = Pair( min(l, i), max(r, i))
end
BitsProbCircuit(nodes, nodes_map, input_node_ids,
flatuplayers, flatdownlayers, down2upedges, heap, node_begin_end)
end
# add sum2group
CuBitsProbCircuit(pc::ProbCircuit, input2group, sum2group) = begin
bpc, node2group, edge2group = BitsProbCircuit(pc, input2group, sum2group)
CuBitsProbCircuit(bpc), node2group, edge2group
end
function BitsProbCircuit(pc::ProbCircuit, input2group, sum2group;
eager_materialize=true, collapse_elements=true)
bpc = BitsProbCircuit(pc, input2group; eager_materialize, collapse_elements)
println("gc ...")
@time GC.gc()
begin
# sum2group
node2group = zeros(UInt64, length(bpc.nodes))
edge2group = zeros(UInt64, length(bpc.edge_layers_down.vectors))
# sum2group = DeafultDict(0, sum2group)
for (id, bitnode) in enumerate(bpc.nodes)
if bitnode isa BitsSum && id != length(bpc.nodes) # not root
node2group[id] = sum2group[bpc.nodes_map[id]]
# @assert node2group[id] != 0
end
end
nodech2edge = Dict()
edgegroup_num = 0
edge_group_id(node_groupid, ch_id) = begin
if haskey(nodech2edge, (node_groupid, ch_id))
return nodech2edge[(node_groupid, ch_id)]
else
edgegroup_num += 1
nodech2edge[(node_groupid, ch_id)] = edgegroup_num
# print(edgegroup_num)
return edgegroup_num
end
end
node_groupid = 0
ch_id = 0
# TODO: sanity check
for (id, bitedge) in enumerate(bpc.edge_layers_down.vectors)
if bitedge isa SumEdge
node_groupid = node2group[bitedge.parent_id]
if node_groupid != 0
edge_up_id = bpc.down2upedge[id]
l, r = bpc.node_begin_end[bitedge.parent_id]
@assert l <= edge_up_id && edge_up_id <= r
ch_id = edge_up_id - l + 1
edge2group[id] = edge_group_id(node_groupid, ch_id)
# @assert edge2group[id] != 0
end
end
end
end
bpc, node2group, edge2group
end | ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 1678 | import Base: foreach # extend
function foreach(pc::ProbCircuit, f_i::Function,
f_m::Function, f_s::Function, seen=nothing)
f_inner(n) = issum(n) ? f_s(n) : f_m(n)
foreach(node, f_i, f_inner, seen)
nothing # returning nothing helps save some allocations and time
end
import DirectedAcyclicGraphs: foldup # extend
"""
foldup(node::ProbCircuit,
f_i::Function,
f_m::Function,
f_s::Function)::T where {T}
Compute a function bottom-up on the circuit.
`f_in` is called on input nodes, `f_m` is called on product nodes, and `f_s` is called on sum nodes.
Values of type `T` are passed up the circuit and given to `f_m` and `f_s` through a callback from the children.
"""
function foldup(node::ProbCircuit, f_i::Function, f_m::Function, f_s::Function, ::Type{T}, cache=nothing)::T where {T}
f_inner(n, call) = issum(n) ? f_s(n, call)::T : f_m(n, call)::T
foldup(node, f_i, f_inner, T, cache)::T
end
import DirectedAcyclicGraphs: foldup_aggregate # extend
"""
foldup_aggregate(node::ProbCircuit,
f_i::Function,
f_m::Function,
f_s::Function,
::Type{T})::T where T
Compute a function bottom-up on the circuit.
`f_in` is called on input nodes, `f_m` is called on product nodes, and `f_s` is called on sum nodes.
Values of type `T` are passed up the circuit and given to `f_m` and `f_s` in an aggregate vector from the children.
"""
function foldup_aggregate(node::ProbCircuit, f_i::Function, f_m::Function, f_s::Function, ::Type{T}, cache=nothing) where T
f_inner(n, cs) = issum(n) ? f_s(n, cs)::T : f_m(n, cs)::T
foldup_aggregate(node, f_i, f_inner, T, cache)::T
end | ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 2414 | using Lerche: Lerche, Lark, Transformer, @rule, @inline_rule
using CodecZlib: GzipDecompressorStream, GzipCompressorStream
# by default don't transform tokens in parser
abstract type PCTransformer <: Transformer end
Lerche.visit_tokens(t::PCTransformer) = false
# file formats supported by this package
abstract type FileFormat end
struct GzipFormat <: FileFormat
inner_format::FileFormat
end
# usual comment format for DIMACS-based file formats
const dimacs_comments = raw"""
COMMENT : ("c" | "cc") (_WS /[^\n]/*)? (_NL | /$/)
%ignore COMMENT
"""
# if no circuit file format is given on read, infer file format from extension
function file2pcformat(file)
if endswith(file,".gz")
file_inner, _ = splitext(file)
format_inner = file2pcformat(file_inner)
GzipFormat(format_inner)
elseif endswith(file,".jpc")
JpcFormat()
elseif endswith(file,".psdd")
PsddFormat()
elseif endswith(file,".spn")
SpnFormat()
else
throw("Unknown file extension in $file: provide a file format argument")
end
end
"""
Base.read(file::AbstractString, ::Type{C}) where C <: ProbCircuit
Reads circuit from file; uses extension to detect format type, for example ".psdd" for PSDDs.
"""
Base.read(file::AbstractString, ::Type{C}) where C <: ProbCircuit =
read(file, C, file2pcformat(file))
"""
Base.write(file::AbstractString, circuit::ProbCircuit)
Writes circuit to file; uses file name extention to detect file format.
"""
Base.write(file::AbstractString, circuit::ProbCircuit) =
write(file, circuit, file2pcformat(file))
# when asked to parse/read as `ProbCircuit`, default to `PlainProbCircuit`
Base.parse(::Type{ProbCircuit}, args...) =
parse(PlainProbCircuit, args...)
Base.read(io::IO, ::Type{ProbCircuit}, args...) =
read(io, PlainProbCircuit, args...)
Base.read(io::IO, ::Type{ProbCircuit}, f::GzipFormat) =
# avoid method ambiguity
read(io, PlainProbCircuit, f)
# (de)compress Gzip streams
Base.read(io::IO, circuit_type, f::GzipFormat) =
read(GzipDecompressorStream(io), circuit_type, f.inner_format)
Base.write(io::IO, circuit, f::GzipFormat) = begin
iogz = GzipCompressorStream(io)
write(iogz, circuit, f.inner_format)
close(iogz)
end
# specific file formats
include("jpc_io.jl")
include("psdd_io.jl")
include("spn_io.jl")
# visualization
include("plot.jl") | ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 7633 | struct JpcFormat <: FileFormat end
##############################################
# Read JPC (Juice Probabilistic Circuit)
##############################################
const jpc_grammar = raw"""
start: header (_NL node)+ _NL?
header : "jpc" _WS INT
node : "L" _WS INT _WS INT _WS SIGNED_INT -> literal_node
| "I" _WS INT _WS INT _WS INT _WS INT -> indicator_node
| "B" _WS INT _WS INT _WS INT _WS INT _WS LOGPROB -> binomial_node
| "C" _WS INT _WS INT _WS INT (_WS LOGPROB)+ -> categorical_node
| "P" _WS INT _WS INT _WS INT child_nodes -> prod_node
| "S" _WS INT _WS INT _WS INT weighted_child_nodes -> sum_node
child_nodes : (_WS INT)+
weighted_child_nodes: (_WS INT _WS LOGPROB)+
%import common.INT
%import common.SIGNED_INT
%import common.SIGNED_NUMBER -> LOGPROB
%import common.WS_INLINE -> _WS
%import common.NEWLINE -> _NL
""" * dimacs_comments
jpc_parser() = Lark(jpc_grammar)
abstract type JpcParse <: PCTransformer end
@inline_rule header(t::JpcParse, x) =
Base.parse(Int,x)
@rule start(t::JpcParse, x) = begin
@assert num_nodes(x[end]) == x[1]
x[end]
end
@rule child_nodes(t::JpcParse, x) =
map(id -> t.nodes[id], x)
@rule weighted_child_nodes(t::JpcParse, x) = begin
children = map(id -> t.nodes[id], x[1:2:end])
log_probs = Base.parse.(Float64,x[2:2:end])
(children, log_probs)
end
# parse unstructured
struct PlainJpcParse <: JpcParse
nodes::Dict{String,PlainProbCircuit}
PlainJpcParse() = new(Dict{String,PlainProbCircuit}())
end
@rule literal_node(t::PlainJpcParse, x) = begin
lit = Base.parse(Int,x[3])
var = abs(lit)
sign = lit > 0
t.nodes[x[1]] = PlainInputNode(var, Literal(sign))
end
@rule indicator_node(t::PlainJpcParse, x) = begin
var = Base.parse(Int,x[3])
value = Base.parse(Int,x[4])
t.nodes[x[1]] = PlainInputNode(var, Indicator(value))
end
@rule binomial_node(t::PlainJpcParse, x) = begin
var = Base.parse(Int,x[3])
N = Base.parse(UInt32, x[4])
logp = Base.parse(Float64, x[5])
t.nodes[x[1]] = PlainInputNode(var, Binomial(N, exp(logp)))
end
@rule categorical_node(t::PlainJpcParse, x) = begin
var = Base.parse(Int,x[3])
log_probs = Base.parse.(Float64, x[4:end])
t.nodes[x[1]] = PlainInputNode(var, Categorical(log_probs))
end
@rule prod_node(t::PlainJpcParse,x) = begin
@assert length(x[4]) == Base.parse(Int,x[3])
t.nodes[x[1]] = PlainMulNode(x[4])
end
@rule sum_node(t::PlainJpcParse,x) = begin
@assert length(x[4][1]) == length(x[4][2]) == Base.parse(Int,x[3])
t.nodes[x[1]] = PlainSumNode(x[4][1], x[4][2])
end
function Base.parse(::Type{PlainProbCircuit}, str, ::JpcFormat)
ast = Lerche.parse(jpc_parser(), str)
Lerche.transform(PlainJpcParse(), ast)
end
function Base.read(io::IO, ::Type{PlainProbCircuit}, ::JpcFormat, fast = true)
if fast
read_fast(io, PlainProbCircuit, JpcFormat())
else
parse(PlainProbCircuit, read(io, String), JpcFormat())
end
end
# fast brittle read
function read_fast(input, ::Type{<:ProbCircuit} = PlainProbCircuit, ::JpcFormat = JpcFormat())
# would be better using `Parsers.jl` but that package lacks documentation`
nodes = PlainProbCircuit[]
for line in eachline(input)
if startswith(line, "c")
# do nothing
else
tokens = split(line, " ")
if startswith(line, "jpc")
num_nodes = Base.parse(Int,tokens[2])
nodes = Vector{PlainProbCircuit}(undef,num_nodes)
else
id = Base.parse(Int,tokens[2]) + 1
if startswith(line, "L")
lit = Base.parse(Int,tokens[4])
var = abs(lit)
sign = lit > 0
nodes[id] = PlainInputNode(var, Literal(sign))
elseif startswith(line, "I")
var = Base.parse(Int,tokens[4])
val = Base.parse(Int,tokens[5])
nodes[id] = PlainInputNode(var, Indicator(val))
elseif startswith(line, "C")
var = Base.parse(Int,tokens[4])
log_probs = Base.parse.(Float64, tokens[5:end])
nodes[id] = PlainInputNode(var, Categorical(log_probs))
elseif startswith(line, "B")
var = Base.parse(Int,tokens[4])
N = Base.parse(UInt32, tokens[5])
logp = Base.parse(Float64, tokens[6])
nodes[id] = PlainInputNode(var, Binomial(N, exp(logp)))
elseif startswith(line, "P")
child_ids = Base.parse.(Int, tokens[5:end]) .+ 1
children = nodes[child_ids]
nodes[id] = PlainMulNode(children)
elseif startswith(line, "S")
child_ids = Base.parse.(Int, tokens[5:2:end]) .+ 1
children = nodes[child_ids]
log_probs = Base.parse.(Float64, tokens[6:2:end])
nodes[id] = PlainSumNode(children, log_probs)
else
error("Cannot parse line: $line")
end
end
end
end
nodes[end]
end
##############################################
# Write JPCs
##############################################
const JPC_FORMAT = """c this file was saved by ProbabilisticCircuits.jl
c ids of jpc nodes start at 0
c jpc nodes appear bottom-up, children before parents
c
c file syntax:
c jpc count-of-jpc-nodes
c L id-of-jpc-node id-of-vtree literal
c I id-of-jpc-node id-of-vtree variable indicator-value
c C id-of-jpc-node id-of-vtree variable {log-probability}+
c B id-of-jpc-node id-of-vtree variable binomial-N binomial-P
c P id-of-sum-jpc-node id-of-vtree number-of-children {child-id}+
c S id-of-product-jpc-node id-of-vtree number-of-children {child-id log-probability}+
c"""
function Base.write(io::IO, circuit::ProbCircuit, ::JpcFormat, vtreeid::Function = (x -> 0))
labeling = label_nodes(circuit)
map!(x -> x-1, values(labeling)) # vtree nodes are 0-based indexed
println(io, JPC_FORMAT)
println(io, "jpc $(num_nodes(circuit))")
foreach(circuit) do n
if isinput(n)
var = randvar(n)
d = dist(n)
if d isa Literal
literal = value(d) ? var : -var
println(io, "L $(labeling[n]) $(vtreeid(n)) $literal")
elseif d isa Indicator{<:Integer}
println(io, "I $(labeling[n]) $(vtreeid(n)) $var $(value(d))")
elseif d isa Categorical
print(io, "C $(labeling[n]) $(vtreeid(n)) $var")
foreach(p -> print(io, " $p"), params(d))
println(io)
elseif d isa Binomial
print(io, "B $(labeling[n]) $(vtreeid(n)) $var $(d.N) $(log(d.p))")
println(io)
else
error("Input distribution type $(typeof(d)) is unknown to the JPC file format")
end
else
t = ismul(n) ? "P" : "S"
print(io, "$t $(labeling[n]) $(vtreeid(n)) $(num_inputs(n))")
if ismul(n)
for child in inputs(n)
print(io, " $(labeling[child])")
end
else
@assert issum(n)
for (child, logp) in zip(inputs(n), params(n))
print(io, " $(labeling[child]) $logp")
end
end
println(io)
end
end
nothing
end | ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 1140 | export plot
using TikzGraphs
import TikzGraphs: plot
import Graphs: DiGraph
function plot(pc::ProbCircuit)
nn = num_nodes(pc)
g = DiGraph(nn)
nodeid = label_nodes(pc)
node_labels = Vector{String}(undef, nn)
edge_labels = Dict()
foreach(pc) do n
nid = nodeid[n]
node_labels[nid] =
if isinput(n)
"\$X_$(randvar(n)) \\sim $(latex(dist(n)))\$"
elseif ismul(n)
"*"
else
"+"
end
for i in 1:num_inputs(n)
cid = nodeid[inputs(n, i)]
add_edge!(g, nid, cid)
if issum(n)
p = round(exp(params(n,i)), digits=3)
edge_labels[(nid, cid)] = "$p"
end
end
end
TikzGraphs.plot(g, node_labels; edge_labels, edge_style="font=\\tiny")
end
latex(ind::Indicator) =
"\\mathbf{1}_{$(value(ind))}"
function latex(d::Categorical)
p = round.(exp.(params(d)), digits=3)
"Cat(" * join(p, ", ") * ")"
end
function latex(d::Binomial)
p = round(d.p, digits=3)
"Binomial($(d.N), $(p))"
end | ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 4136 | struct PsddFormat <: FileFormat end
##############################################
# Read SDDs
##############################################
const psdd_grammar = raw"""
start: _header (_NL node)+ _NL?
_header : "psdd" (_WS INT)?
node : "T" _WS INT _WS INT _WS INT _WS LOGPROB -> true_node
| "L" _WS INT _WS INT _WS SIGNED_INT _WS? -> literal_node
| "D" _WS INT _WS INT _WS INT _WS elems -> decision_node
elems : elem (_WS elem)*
elem : INT _WS INT _WS LOGPROB
%import common.INT
%import common.SIGNED_INT
%import common.SIGNED_NUMBER -> LOGPROB
%import common.WS_INLINE -> _WS
%import common.NEWLINE -> _NL
""" * dimacs_comments
psdd_parser() = Lark(psdd_grammar)
abstract type PsddParse <: PCTransformer end
@rule start(t::PsddParse, x) = begin
x[end]
end
@rule elem(t::PsddParse, x) =
[t.nodes[x[1]], t.nodes[x[2]], Base.parse(Float64,x[3])]
@rule elems(t::PsddParse, x) =
Array(x)
# parse unstructured
struct PlainPsddParse <: PsddParse
nodes::Dict{String,PlainProbCircuit}
PlainPsddParse() = new(Dict{String,PlainProbCircuit}())
end
@rule literal_node(t::PlainPsddParse, x) = begin
lit = Base.parse(Int,x[3])
var = abs(lit)
sign = lit > 0
t.nodes[x[1]] = PlainInputNode(var, Literal(sign))
end
@rule true_node(t::PlainPsddParse, x) = begin
var = Base.parse(Int,x[3])
pos = PlainInputNode(var, Literal(true))
neg = PlainInputNode(var, Literal(false))
log_prob = Base.parse(Float64, x[4])
log_probs = [log_prob, log1p(-exp(log_prob))]
t.nodes[x[1]] = PlainSumNode([pos, neg], log_probs)
end
@rule decision_node(t::PlainPsddParse,x) = begin
@assert length(x[4]) == Base.parse(Int,x[3])
children = map(x[4]) do elem
PlainMulNode(elem[1:2])
end
log_probs = map(e -> e[3], x[4])
t.nodes[x[1]] = PlainSumNode(children, log_probs)
end
function Base.parse(::Type{PlainProbCircuit}, str, ::PsddFormat)
ast = Lerche.parse(psdd_parser(), str)
Lerche.transform(PlainPsddParse(), ast)
end
Base.read(io::IO, ::Type{PlainProbCircuit}, ::PsddFormat) =
parse(PlainProbCircuit, read(io, String), PsddFormat())
##############################################
# Write PSDDs
##############################################
const PSDD_FORMAT = """c this file was saved by ProbabilisticCircuits.jl
c ids of psdd nodes start at 0
c psdd nodes appear bottom-up, children before parents
c
c file syntax:
c psdd count-of-sdd-nodes
c L id-of-literal-sdd-node id-of-vtree literal
c T id-of-trueNode-sdd-node id-of-vtree variable log(litProb)
c D id-of-decomposition-sdd-node id-of-vtree number-of-elements {id-of-prime id-of-sub log(elementProb)}*
c"""
"Count the number of decision and leaf nodes in the PSDD"
psdd_num_nodes_leafs(psdd) = length(sumnodes(psdd)) + length(inputnodes(psdd)) # defined in sdd file format
function Base.write(io::IO, pc::ProbCircuit, ::PsddFormat, vtree2id::Function = (x -> 0))
id = -1
println(io, PSDD_FORMAT)
println(io, "psdd $(psdd_num_nodes_leafs(pc))")
f_lit(n) = begin
nid = id += 1
literal = value(dist(n)) ? randvar(n) : -randvar(n)
println(io, "L $nid $(vtree2id(n)) $literal")
nid
end
f_a(n, ids) = begin
if length(ids) != 2
error("The PSDD file format requires multiplications/AND nodes to have exactly two inputs")
end
tuple(ids...)
end
f_o(n, ids) = begin
nid = id += 1
vtreeid = vtree2id(n)
if num_children(n) == 2 && all(isinput, inputs(n))
pos_child = value(dist(children(n)[1])) > 0 ? 1 : 2
log_prob = params(n)[pos_child]
v = randvar(children(n)[1])
print(io, "T $nid $vtreeid $v $log_prob")
else
print(io, "D $nid $vtreeid $(length(ids))")
for (el, log_prob) in zip(ids, params(n))
print(io, " $(el[1]) $(el[2]) $log_prob")
end
end
println(io)
nid
end
foldup_aggregate(pc, f_lit, f_a, f_o, Union{Int, Tuple{Int,Int}})
nothing
end | ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 2814 | struct SpnFormat <: FileFormat end
##############################################
# Read SPN format (some dialect of the Libra AC format?)
##############################################
const spn_grammar = raw"""
start : domains (_NL node)+ _NL "EOF" _NL?
domains : "(2" (_WS "2")* ")"
node : "v" _WS INT _WS INT -> literal_node
| "*" (_WS INT)+ -> prod_node
| "+" (_WS INT _WS LOGPROB)+ -> sum_node
%import common.INT
%import common.SIGNED_NUMBER -> LOGPROB
%import common.WS_INLINE -> _WS
%import common.NEWLINE -> _NL
"""
const spn_parser() = Lark(spn_grammar)
struct SpnParse <: PCTransformer
nodes::Vector{PlainProbCircuit}
SpnParse() = new(PlainProbCircuit[])
end
@rule start(t::SpnParse, x) =
t.nodes[end]
@inline domains(t::SpnParse, x) = begin
d = Base.parse.(Int,x)
@assert d .== 2 "Only Boolean domains are currently supported, not $d in SPN format"
d
end
@rule literal_node(t::SpnParse, x) = begin
var = Base.parse(Var,x[1]) + 1
@assert x[2] == "0" || x[2] == "1" "Boolean domains only."
sign = (x[2] == "1")
push!(t.nodes, PlainInputNode(var, Literal(sign)))
end
@rule prod_node(t::SpnParse,x) = begin
child_i = Base.parse.(Int,x) .+ 1
children = t.nodes[child_i]
push!(t.nodes, PlainMulNode(children))
end
@rule sum_node(t::SpnParse,x) = begin
child_i = Base.parse.(Int,x[1:2:end]) .+ 1
children = t.nodes[child_i]
log_probs = Base.parse.(Float64,x[2:2:end])
push!(t.nodes, PlainSumNode(children, log_probs))
end
function Base.parse(::Type{PlainProbCircuit}, str, ::SpnFormat)
ast = Lerche.parse(spn_parser(), str)
Lerche.transform(SpnParse(), ast)
end
Base.read(io::IO, ::Type{PlainProbCircuit}, ::SpnFormat) =
parse(PlainProbCircuit, read(io, String), SpnFormat())
##############################################
# Write SPNs
##############################################
function Base.write(io::IO, circuit::ProbCircuit, ::SpnFormat)
labeling = label_nodes(circuit)
map!(x -> x-1, values(labeling)) # nodes are 0-based indexed
println(io, "(2" * " 2"^(num_randvars(circuit)-1) * ")")
foreach(circuit) do n
if isinput(n)
state = value(dist(n)) ? "1" : "0"
println(io, "v $(randvar(n)-1) $state")
else
print(io, ismul(n) ? "*" : "+")
if ismul(n)
for child in inputs(n)
print(io, " $(labeling[child])")
end
else
@assert issum(n)
for (child, logp) in zip(inputs(n), params(n))
print(io, " $(labeling[child]) $logp")
end
end
println(io)
end
end
println(io, "EOF")
nothing
end | ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 3927 | export ProbCircuit,
multiply, summate,
isinput, ismul, issum,
inputnodes, mulnodes, sumnodes,
num_parameters, num_parameters_node, params,
inputs, num_inputs, num_randvars,
dist, randvars, randvar,
InputNode
const Var = UInt32
#####################
# Abstract probabilistic circuit nodes
#####################
"Root of the probabilistic circuit node hierarchy"
abstract type ProbCircuit <: DAG end
"Probabilistic circuit node types"
abstract type NodeType end
struct InputNode <: NodeType end
abstract type InnerNode <: NodeType end
struct SumNode <: InnerNode end
struct MulNode <: InnerNode end
"Get the probabilistic circuit node type"
NodeType(pc::ProbCircuit) = NodeType(typeof(pc))
DAGs.NodeType(::Type{T}) where {T<:ProbCircuit} =
DAGs.NodeType(NodeType(T))
DAGs.NodeType(::InnerNode) = DAGs.Inner()
DAGs.NodeType(::InputNode) = DAGs.Leaf()
"Get the inputs of a PC node"
inputs(node, i) = inputs(node)[i]
# DirectedAcyclicGraphs.jl has the convention that edges are directed away from the root
DAGs.children(pc::ProbCircuit) = inputs(pc)
"Get the distribution of a PC input node"
function dist end
dist(n::ProbCircuit) = nothing
"Get the parameters associated with a node"
params(n::ProbCircuit) = n.params
params(n, i) = params(n)[i]
"Count the number of parameters in the node"
num_parameters_node(n) =
num_parameters_node(n, true) # default to independent = true
"Multiply nodes into a single circuit"
function multiply end
"Sum nodes into a single circuit"
function summate end
#####################
# derived functions
#####################
"Is the node an input node?"
isinput(n) = (NodeType(n) isa InputNode)
"Is the node a multiplication?"
ismul(n) = (NodeType(n) isa MulNode)
"Is the node a summation?"
issum(n) = (NodeType(n) isa SumNode)
"Get all input nodes in a given circuit"
inputnodes(pc) = filter(isinput, pc)
"Get all multiplication nodes in a given circuit"
mulnodes(pc) = filter(ismul, pc)
"Get all summation nodes in a given circuit"
sumnodes(pc) = filter(issum, pc)
"Count the number of parameters in the circuit"
num_parameters(pc, independent = true) =
sum(n -> num_parameters_node(n, independent), vcat(sumnodes(pc), inputnodes(pc)))
"Number of inputs of a PC node"
num_inputs(pc) = num_inputs(pc, NodeType(pc))
num_inputs(_, ::InputNode) = 0
num_inputs(pc, ::InnerNode) = length(inputs(pc))
"""
variables(pc::ProbCircuit)::BitSet
Get a bitset of variables mentioned in the circuit.
"""
function randvars(pc, cache = nothing)::BitSet
f_inner(n, call) = mapreduce(call, union, inputs(n))
foldup(pc, randvars, f_inner, BitSet, cache)
end
function randvar(pc)
rvs = randvars(pc)
@assert length(rvs) == 1 "Calling `randvar` on an input node with more than 1 variable"
first(rvs)
end
"Number of variables in the data structure"
num_randvars(pc) = length(randvars(pc))
#####################
# constructor conveniences
#####################
multiply(xs::ProbCircuit...) = multiply(collect(xs))
summate(xs::ProbCircuit...) = summate(collect(xs))
Base.:*(x::ProbCircuit, y::ProbCircuit) = multiply(x,y)
Base.:*(xs::ProbCircuit...) = multiply(xs...)
Base.:+(x::ProbCircuit, y::ProbCircuit) = summate(x,y)
Base.:+(xs::ProbCircuit...) = summate(xs...)
# circuit construction with arithmetic operators
struct WeightProbCircuit
weight::Float32
pc::ProbCircuit
end
Base.:*(w::Real, x::ProbCircuit) = WeightProbCircuit(w, x)
Base.:*(x::ProbCircuit, w::Real) = w * x
function Base.:+(x::WeightProbCircuit...)
terms = collect(x)
pc = summate(map(x -> x.pc, terms))
params(pc) .= log.(map(x -> x.weight, terms))
pc
end
#####################
# debugging tools
#####################
function check_parameter_integrity(circuit::ProbCircuit)
for node in sumnodes(circuit)
@assert all(θ -> !isnan(θ), params(node)) "There is a NaN in one of the PC parameters"
end
true
end | ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 4064 | using SpecialFunctions: loggamma, lgamma
using CUDA
export Binomial
struct Binomial <: InputDist
N::UInt32
p::Float32
end
struct BitsBinomial <: InputDist
N::UInt32
heap_start::UInt32
end
Binomial(N::Integer) =
Binomial(UInt32(N), Float32(0.5))
Binomial(N::Integer, p::Float64) =
Binomial(UInt32(N), Float32(p))
num_parameters(dist::Binomial, independent) = 1
params(dist::Binomial, independent) = dist.p
isapprox(x::Binomial, y::Binomial) =
typeof(x) == typeof(y) && x.N == y.N && x.p ≈ y.p
init_params(dist::Binomial, perturbation::Float32) = begin
Binomial(dist.N, rand(Float32))
end
function bits(dist::Binomial, heap)
heap_start = length(heap) + 1
# use heap to store parameters and space for parameter learning
# Add (p, flow*value, flow, missing_flow)
append!(heap, dist.p, zeros(Float32, 3))
BitsBinomial(dist.N, heap_start)
end
pr(dist::Binomial, _ = nothing) = dist.p
pr(dist::BitsBinomial, heap) = heap[dist.heap_start]
function unbits(dist::BitsBinomial, heap)
Binomial(dist.N, pr(dist, heap))
end
function loglikelihood(dist::Binomial, value, _=nothing)
binomial_logpdf_(dist.N, pr(dist), value, loggamma)
end
function loglikelihood(dist::BitsBinomial, value, heap)
binomial_logpdf_(dist.N, pr(dist, heap), value, lgamma)
end
function binomial_logpdf_(n, p, k, gamma_func::Function)
if k > n || k < 0
return -Inf32
elseif (p == zero(Float32))
return (k == 0 ? Float32(0.0) : -Inf32)
elseif (p == one(Float32))
return (k == n ? Float32(0.0) : -Inf32)
else
temp = gamma_func(Float32(n + 1)) - gamma_func(Float32(k + 1)) - gamma_func(Float32(n - k + 1))
temp += k * log(p) + (n - k) * log1p(-p)
return Float32(temp)
end
end
function flow(dist::BitsBinomial, value, node_flow, heap)
heap_start = dist.heap_start
if ismissing(value)
CUDA.@atomic heap[heap_start + UInt32(3)] += node_flow
else
CUDA.@atomic heap[heap_start + UInt32(1)] += node_flow * value
CUDA.@atomic heap[heap_start + UInt32(2)] += node_flow
end
nothing
end
function update_params(dist::BitsBinomial, heap, pseudocount, inertia)
heap_start = dist.heap_start
missing_flow = heap[heap_start + 3]
node_flow = heap[heap_start + 2] + missing_flow + pseudocount
oldp = heap[heap_start]
new = (heap[heap_start + 1] + missing_flow * oldp * dist.N + pseudocount) / (node_flow * dist.N)
new_p = oldp * inertia + new * (one(Float32) - inertia)
# update p on heap
heap[heap_start] = new_p
nothing
end
function clear_memory(dist::BitsBinomial, heap, rate)
heap_start = dist.heap_start
for i = 1 : 3
heap[heap_start + i] *= rate
end
nothing
end
#### Sample
function sample_state(dist::Union{BitsBinomial, Binomial}, threshold, heap)
# Works for both cpu and gpu
N = dist.N
ans::UInt32 = N
cumul_prob = typemin(Float32)
for i = 0 : N
cumul_prob = logsumexp(cumul_prob, loglikelihood(dist, i, heap))
if cumul_prob > threshold
ans = i
break
end
end
return ans
end
sample_state(dist::Binomial, threshold) =
sample_state(dist, threshold, nothing)
### MAP
init_heap_map_state!(dist::BitsBinomial, heap) = nothing
init_heap_map_loglikelihood!(dist::BitsBinomial, heap) = nothing
function map_loglikelihood(dist::Union{BitsBinomial, Binomial}, heap)
p = pr(dist, heap)
N = dist.N
A = floor(UInt32, N*p)
lA = loglikelihood(dist, A, heap)
B = floor(UInt32, N*p + 1)
lB = loglikelihood(dist, B, heap)
return max(lA, lB)
end
map_loglikelihood(dist::Binomial) = map_loglikelihood(dist, nothing)
function map_state(dist::Union{BitsBinomial, Binomial}, heap)
p = pr(dist, heap)
N = dist.N
A = floor(UInt32, N*p)
lA = loglikelihood(dist, A, heap)
B = floor(UInt32, N*p + 1)
lB = loglikelihood(dist, B, heap)
return (lA > lB ? A : B)
end
map_state(dist::Binomial) = map_state(dist, nothing) | ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 5227 | export Bernoulli, Categorical
#####################
# categoricals or bernoullis
#####################
"A N-value categorical input distribution ranging over integers [0...N-1]"
struct Categorical <: InputDist
logps::Vector{Float32}
end
loguniform(num_cats) =
zeros(Float32, num_cats) .- log(num_cats)
Categorical(num_cats::Integer) =
Categorical(loguniform(num_cats))
Bernoulli() = Categorical(2)
Bernoulli(logp) =
Categorical([log1p(-exp(logp)), logp])
logps(d::Categorical) = d.logps
params(d::Categorical) = logps(d)
num_categories(d::Categorical) = length(logps(d))
num_parameters(n::Categorical, independent) =
num_categories(n) - (independent ? 1 : 0)
init_params(d::Categorical, perturbation::Float32) = begin
unnormalized_probs = map(rand(Float32, num_categories(d))) do x
Float32(1.0 - perturbation + x * 2.0 * perturbation)
end
logps = log.(unnormalized_probs ./ sum(unnormalized_probs))
Categorical(logps)
end
sample_state(d::Categorical, threshold, _ = nothing) = begin
cumul_prob = typemin(Float32)
ans = num_categories(d) - 1 # give all numerical error probability to the last node
for cat in 0:num_categories(d) - 1
cumul_prob = logsumexp(cumul_prob, d.logps[cat + 1])
if cumul_prob > threshold
ans = cat
break
end
end
return ans
end
loglikelihood(d::Categorical, value, _ = nothing) =
d.logps[1 + value]
map_loglikelihood(d::Categorical, _= nothing) =
max(d.logps...)
map_state(d::Categorical, _ = nothing) =
argmax(d.logps) - one(UInt32) # since category values are from 0-N-1
struct BitsCategorical <: InputDist
num_cats::UInt32
heap_start::UInt32
end
function bits(d::Categorical, heap)
num_cats = num_categories(d)
heap_start = length(heap) + 1
# use heap to store parameters and space for parameter learning
append!(heap, logps(d), zeros(eltype(heap), num_cats + 1)) # the last value is used to maintain `missing` flows
BitsCategorical(num_cats, heap_start)
end
function unbits(d::BitsCategorical, heap)
logps = heap[d.heap_start : d.heap_start + d.num_cats - one(UInt32)]
Categorical(logps)
end
loglikelihood(d::BitsCategorical, value, heap) =
heap[d.heap_start + UInt32(value)]
const CAT_HEAP_STATE = UInt32(1)
const CAT_HEAP_MAP_LL = UInt32(2)
init_heap_map_state!(d::BitsCategorical, heap) = begin
best_idx = d.heap_start
best_val = typemin(Float32)
for i = d.heap_start : d.heap_start + d.num_cats - one(UInt32)
if heap[i] > best_val
best_val = heap[i]
best_idx = i
end
end
idx = d.heap_start + d.num_cats + CAT_HEAP_STATE - one(UInt32)
heap[idx] = Float32(best_idx - d.heap_start)
end
init_heap_map_loglikelihood!(d::BitsCategorical, heap) = begin
ans = typemin(Float32)
for i = d.heap_start : d.heap_start + d.num_cats - one(UInt32)
ans = max(ans, heap[i])
end
idx = d.heap_start + d.num_cats + CAT_HEAP_MAP_LL - one(UInt32)
heap[idx] = ans
end
map_state(d::BitsCategorical, heap) = begin
ll_idx = d.heap_start + d.num_cats + CAT_HEAP_STATE - one(UInt32)
return UInt32(heap[ll_idx])
end
sample_state(d::BitsCategorical, threshold::Float32, heap) = begin
cumul_prob = typemin(Float32)
chosen_cat = d.num_cats - one(UInt32)
for i = d.heap_start : d.heap_start + d.num_cats - one(UInt32)
cumul_prob = logsumexp(cumul_prob, heap[i])
if cumul_prob > threshold
chosen_cat = i - d.heap_start
break
end
end
return chosen_cat
end
map_loglikelihood(d::BitsCategorical, heap) = begin
ll_idx = d.heap_start + d.num_cats + CAT_HEAP_MAP_LL - one(UInt32)
return heap[ll_idx]
end
function flow(d::BitsCategorical, value, node_flow, heap)
if ismissing(value)
CUDA.@atomic heap[d.heap_start+UInt32(2)*d.num_cats] += node_flow
else
CUDA.@atomic heap[d.heap_start+d.num_cats+UInt32(value)] += node_flow
end
nothing
end
function update_params(d::BitsCategorical, heap, pseudocount, inertia)
heap_start = d.heap_start
num_cats = d.num_cats
@inbounds begin
# add pseudocount & accumulate node flow
node_flow = zero(Float32)
cat_pseudocount = pseudocount / Float32(num_cats)
for i = 0 : num_cats-1
node_flow += heap[heap_start+num_cats+i]
end
missing_flow = heap[heap_start+UInt32(2)*num_cats]
node_flow += missing_flow + pseudocount
# update parameter
for i = 0 : num_cats-1
oldp = exp(heap[heap_start+i])
old = inertia * oldp
new = (one(Float32) - inertia) * (heap[heap_start+num_cats+i] +
cat_pseudocount + missing_flow * oldp) / node_flow
new_log_param = log(old + new)
heap[heap_start+i] = new_log_param
end
end
nothing
end
function clear_memory(d::BitsCategorical, heap, rate)
heap_start = d.heap_start
num_cats = d.num_cats
for i = 0 : num_cats-1
heap[heap_start+num_cats+i] *= rate
end
heap[heap_start+2*num_cats] *= rate
nothing
end | ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 1317 | export Indicator, Literal
#####################
# indicators or logical literals
#####################
"A input distribution node that places all probability on a single value"
struct Indicator{T} <: InputDist
value::T
end
"A logical literal input distribution node"
const Literal = Indicator{Bool}
num_parameters(n::Indicator, independent) = 0
value(d::Indicator) = d.value
params(d::Indicator) = value(d)
bits(d::Indicator, _ = nothing) = d
unbits(d::Indicator, _ = nothing) = d
loglikelihood(d::Indicator, value, _ = nothing) =
if value isa AbstractFloat && d isa Literal
(d.value) ? log(value) : log1p(-value)
else
(d.value == value) ? zero(Float32) : -Inf32
end
init_params(d::Indicator, _) = d
sample_state(d::Indicator, threshold=nothing, heap=nothing) = d.value
map_state(d::Indicator, _ = nothing) = d.value
map_loglikelihood(d::Indicator, _= nothing) = zero(Float32)
# do nothing since don't need heap for indicators
init_heap_map_state!(d::Indicator, _ = nothing) = nothing
init_heap_map_loglikelihood!(d::Indicator, _= nothing) = nothing
# no learning necessary for indicator distributions
flow(d::Indicator, value, node_flow, heap) = nothing
update_params(d::Indicator, heap, pseudocount, inertia) = nothing
clear_memory(d::Indicator, heap, rate) = nothing | ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 3933 | using CUDA
export InputDist, loglikelihood
abstract type InputDist end
import Base: isapprox #extend
isapprox(x::InputDist, y::InputDist) =
typeof(x) == typeof(y) && params(x) ≈ params(y)
#######################################################
### Functions to implement for each new input type ####
#######################################################
"""
num_parameters(d::InputDist, independent)
Returns number of parameters for the input dist.
- `independent`: whether to only count independent parameters
"""
num_parameters(d::InputDist, independent) =
error("Not implemented error: `num_parameters`, $(typeof(d))")
"""
params(d::InputDist)
Returns paramters of the input dist.
"""
params(d::InputDist) =
error("Not implemented error: `params`, $(typeof(d))")
"""
init_params(d::InputDist, perturbation)
Returns a new distribution with same type with initialized parameters.
"""
init_params(d::InputDist, perturbation) =
error("Not implemented error: `init_params`, $(typeof(d))")
"""
bits(d::InputDist, heap)
Appends the required memory for this input dist to the heap.
Used internally for moving from CPU to GPU.
"""
bits(d::InputDist, heap) =
error("Not implemented error: `bits`, $(typeof(d))")
"""
unbits(d::InputDist, heap)
Returns the InputDist struct from the heap. Note, each input dist type
needs to store where in the heap its paramters are to be able to do this.
Used internally for moving from GPU to CPU.
"""
unbits(d::InputDist, heap) =
error("Not implemented error: `unbits`, $(typeof(d))")
"""
loglikelihood(d::InputDist, value, heap)
Returns the `log( P(input_var == value) )` according to the InputDist.
"""
loglikelihood(d::InputDist, value, heap) =
error("Not implemented error: `loglikelihood`, $(typeof(d))")
"""
sample_state(d::InputDist, threshold::Float32, heap)
Returns a sample from InputDist.
`Threshold` is a uniform random value in range (0, 1) given to this API by the sampleing algorithm
"""
sample_state(d::InputDist, threshold, heap) =
error("Not implemented error: `sample_state`, $(typeof(d))")
"""
init_heap_map_state!(d::InputDist, heap)
Initializes the heap for the input dist. Called before running MAP queries.
"""
init_heap_map_state!(d::InputDist, heap) =
error("Not implemented error: `init_heap_map_state!`, $(typeof(d))")
"""
init_heap_map_loglikelihood!(d::InputDist, heap)
Initializes the heap for the input dist. Called before running MAP queries.
"""
init_heap_map_loglikelihood!(d::InputDist, heap) =
error("Not implemented error: `init_heap_map_loglikelihood!`, $(typeof(d))")
"""
map_state(d::InputDist, heap)
Returns the MAP state for the InputDist d
"""
map_state(d::InputDist, heap) =
error("Not implemented error: `map_state`, $(typeof(d))")
"""
map_loglikelihood(d::InputDist, heap)
Returns the MAP loglikelihoods the most likely state of the InputDist d
"""
map_loglikelihood(d::InputDist, heap) =
error("Not implemented error: `map_loglikelihood`, $(typeof(d))")
"""
flow(d::InputDist, value, node_flow, heap)
Updates the "flow" values in the `heap` for the input node.
"""
flow(d::InputDist, value, node_flow, heap) =
error("Not implemented error: `flow`, $(typeof(d))")
"""
update_params(d::InputDist, heap, pseudocount, inertia)
Update the parameters of the InputDist using stored values
on the `heap` and (`pseudocount`, `inertia`)
"""
update_params(d::InputDist, heap, pseudocount, inertia) =
error("Not implemented error: `update_params`, $(typeof(d))")
"""
clear_memory(d::InputDist, heap, rate)
Clears the accumulated flow values on the `heap` by multiplying it by `rate`.
`rate == 0.0` will be equivalent to initializing the value to 0.0.
"""
clear_memory(d::InputDist, heap, rate) =
error("Not implemented error: `clear_memory`, $(typeof(d))")
#########################################
| ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 2663 | #####################
# Plain probabilistic circuit nodes
#####################
"Root of the plain probabilistic circuit node hierarchy"
abstract type PlainProbCircuit <: ProbCircuit end
"A probabilistic input node"
mutable struct PlainInputNode{D <: InputDist} <: PlainProbCircuit
randvars::BitSet
dist::D
end
PlainInputNode(randvars, dist) =
PlainInputNode(BitSet(randvars), dist)
PlainInputNode(randvar::Integer, dist) =
PlainInputNode(BitSet([randvar]), dist)
InputNode(randvar::Integer, dist) =
PlainInputNode(randvar, dist)
InputNode(randvars, dist) =
PlainInputNode(randvars, dist)
"A probabilistic inner node"
abstract type PlainInnerNode <: PlainProbCircuit end
"A probabilistic multiplication node"
mutable struct PlainMulNode <: PlainInnerNode
inputs::Vector{PlainProbCircuit}
end
"A probabilistic summation node"
mutable struct PlainSumNode <: PlainInnerNode
inputs::Vector{PlainProbCircuit}
params::Vector{Float32}
end
function PlainSumNode(inputs)
num_in = length(inputs)
# initialize with uniform log-parameters
params = zeros(Float32, num_in) .- log(num_in)
PlainSumNode(inputs, params)
end
#####################
# traits
#####################
NodeType(::Type{<:PlainInputNode}) = InputNode()
NodeType(::Type{<:PlainMulNode}) = MulNode()
NodeType(::Type{<:PlainSumNode}) = SumNode()
#####################
# methods
#####################
inputs(n::PlainInnerNode) = n.inputs
dist(n::PlainInputNode) = n.dist
randvars(n::PlainInputNode) = n.randvars
params(n::PlainInputNode) = params(dist(n))
num_parameters_node(n::PlainInputNode, independent) =
num_parameters(dist(n), independent)
num_parameters_node(n::PlainMulNode, _) = 0
num_parameters_node(n::PlainSumNode, independent) =
num_inputs(n) - (independent ? 1 : 0)
init_params(n::PlainInputNode, perturbation::Float32) = begin
d = init_params(dist(n), perturbation)
n.dist = d
end
#####################
# constructors and conversions
#####################
function multiply(args::Vector{<:PlainProbCircuit}; reuse=nothing)
@assert length(args) > 0
if reuse isa PlainMulNode && inputs(reuse) == args
reuse
else
PlainMulNode(args)
end
end
function summate(args::Vector{<:PlainProbCircuit}; reuse=nothing)
@assert length(args) > 0
if reuse isa PlainSumNode && inputs(reuse) == args
reuse
else
PlainSumNode(args)
end
end
function PlainProbCircuit(pc::ProbCircuit)
f_i(n) = PlainInputNode(randvar(n), dist(n))
f_m(_, ins) = multiply(ins)
f_s(_, ins) = summate(ins)
foldup_aggregate(pc, f_i, f_m, f_s, PlainProbCircuit)
end
| ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 25068 | using CUDA, Random
export full_batch_em, mini_batch_em, init_parameters
##################################################################################
# Count siblings
##################################################################################
function count_siblings_kernel(node_aggr, edges)
edge_id = ((blockIdx().x - one(Int32)) * blockDim().x) + threadIdx().x
@inbounds if edge_id <= length(edges)
edge = edges[edge_id]
if edge isa SumEdge
parent_id = edge.parent_id
CUDA.@atomic node_aggr[parent_id] += one(Float32)
end
end
nothing
end
function count_siblings(node_aggr, bpc; debug=false)
# reset aggregates
node_aggr .= zero(Float32)
edges = bpc.edge_layers_down.vectors
args = (node_aggr, edges)
kernel = @cuda name="count_siblings" launch=false count_siblings_kernel(args...)
threads = launch_configuration(kernel.fun).threads
blocks = cld(length(edges), threads)
if debug
println("Count siblings")
@show threads blocks length(edges)
CUDA.@time kernel(args...; threads, blocks)
else
kernel(args...; threads, blocks)
end
nothing
end
##################################################################################
# Pseudocounts
##################################################################################
function add_pseudocount_kernel(edge_aggr, edges, _node_aggr, pseudocount)
node_aggr = Base.Experimental.Const(_node_aggr)
edge_id = ((blockIdx().x - one(Int32)) * blockDim().x) + threadIdx().x
@inbounds if edge_id <= length(edges)
edge = edges[edge_id]
if edge isa SumEdge
parent_id = edge.parent_id
CUDA.@atomic edge_aggr[edge_id] += pseudocount / node_aggr[parent_id]
end
end
nothing
end
function add_pseudocount(edge_aggr, node_aggr, bpc, pseudocount; debug = false)
count_siblings(node_aggr, bpc)
edges = bpc.edge_layers_down.vectors
args = (edge_aggr, edges, node_aggr, Float32(pseudocount))
kernel = @cuda name="add_pseudocount" launch=false add_pseudocount_kernel(args...)
threads = launch_configuration(kernel.fun).threads
blocks = cld(length(edges), threads)
if debug
println("Add pseudocount")
@show threads blocks length(edges)
CUDA.@time kernel(args...; threads, blocks)
else
kernel(args...; threads, blocks)
end
nothing
end
##################################################################################
# Aggregate node flows
##################################################################################
function aggr_node_flows_kernel(node_aggr, edges, _edge_aggr)
edge_aggr = Base.Experimental.Const(_edge_aggr)
edge_id = ((blockIdx().x - one(Int32)) * blockDim().x) + threadIdx().x
@inbounds if edge_id <= length(edges)
edge = edges[edge_id]
if edge isa SumEdge
parent_id = edge.parent_id
edge_flow = edge_aggr[edge_id]
CUDA.@atomic node_aggr[parent_id] += edge_flow
end
end
nothing
end
function aggr_node_flows(node_aggr, bpc, edge_aggr; debug = false)
# reset aggregates
node_aggr .= zero(Float32)
edges = bpc.edge_layers_down.vectors
args = (node_aggr, edges, edge_aggr)
kernel = @cuda name="aggr_node_flows" launch=false aggr_node_flows_kernel(args...)
config = launch_configuration(kernel.fun)
threads = config.threads
blocks = cld(length(edges), threads)
if debug
println("Aggregate node flows")
@show threads blocks length(edges)
CUDA.@time kernel(args...; threads, blocks)
else
kernel(args...; threads, blocks)
end
nothing
end
function aggr_node_share_flows_kernel(node_aggr, node2group, group_aggr)
# edge_aggr = Base.Experimental.Const(_edge_aggr)
node_id = ((blockIdx().x - one(Int32)) * blockDim().x) + threadIdx().x
@inbounds if node_id <= length(node_aggr)
group_id = node2group[node_id]
node_flow = node_aggr[node_id]
if group_id != 0
CUDA.@atomic group_aggr[group_id] += node_flow
end
end
nothing
end
function aggr_node_share_flows(node_aggr, node2group, group_aggr; debug = false)
group_aggr .= zero(Float32)
args = (node_aggr, node2group, group_aggr)
kernel = @cuda name="aggr_node_share_flows" launch=false aggr_node_share_flows_kernel(args...)
config = launch_configuration(kernel.fun)
threads = config.threads
blocks = cld(length(node_aggr), threads)
if debug
println("Aggregate node share flows")
@show threads blocks length(node_aggr)
CUDA.@time kernel(args...; threads, blocks)
else
kernel(args...; threads, blocks)
end
nothing
end
function broadcast_node_share_flows_kernel(node_aggr, node2group, group_aggr)
# edge_aggr = Base.Experimental.Const(_edge_aggr)
node_id = ((blockIdx().x - one(Int32)) * blockDim().x) + threadIdx().x
@inbounds if node_id <= length(node_aggr)
group_id = node2group[node_id]
if group_id != 0
group_flow = group_aggr[group_id]
node_aggr[node_id] = group_flow
end
end
nothing
end
function broadcast_node_share_flows(node_aggr, node2group, group_aggr; debug = false)
args = (node_aggr, node2group, group_aggr)
kernel = @cuda name="broadcast_node_share_flows" launch=false broadcast_node_share_flows_kernel(args...)
config = launch_configuration(kernel.fun)
threads = config.threads
blocks = cld(length(node_aggr), threads)
if debug
println("Aggregate node share flows")
@show threads blocks length(node_aggr)
CUDA.@time kernel(args...; threads, blocks)
else
kernel(args...; threads, blocks)
end
nothing
end
##################################################################################
# Update parameters
##################################################################################
function update_params_kernel(edges_down, edges_up, _down2upedge, _node_aggr, _edge_aggr, inertia)
node_aggr = Base.Experimental.Const(_node_aggr)
edge_aggr = Base.Experimental.Const(_edge_aggr)
down2upedge = Base.Experimental.Const(_down2upedge)
edge_id_down = ((blockIdx().x - one(Int32)) * blockDim().x) + threadIdx().x
@inbounds if edge_id_down <= length(edges_down)
edge_down = edges_down[edge_id_down]
if edge_down isa SumEdge
edge_id_up = down2upedge[edge_id_down]
# only difference is the tag
edge_up_tag = edges_up[edge_id_up].tag
if !(isfirst(edge_up_tag) && islast(edge_up_tag))
parent_id = edge_down.parent_id
parent_flow = node_aggr[parent_id]
edge_flow = edge_aggr[edge_id_down]
old = inertia * exp(edge_down.logp)
new = (one(Float32) - inertia) * edge_flow / parent_flow
new_log_param = log(old + new)
edges_down[edge_id_down] =
SumEdge(parent_id, edge_down.prime_id, edge_down.sub_id,
new_log_param, edge_down.tag)
edges_up[edge_id_up] =
SumEdge(parent_id, edge_down.prime_id, edge_down.sub_id,
new_log_param, edge_up_tag)
end
end
end
nothing
end
function update_params(bpc, node_aggr, edge_aggr; inertia = 0, debug = false)
edges_down = bpc.edge_layers_down.vectors
edges_up = bpc.edge_layers_up.vectors
down2upedge = bpc.down2upedge
@assert length(edges_down) == length(down2upedge) == length(edges_up)
args = (edges_down, edges_up, down2upedge, node_aggr, edge_aggr, Float32(inertia))
kernel = @cuda name="update_params" launch=false update_params_kernel(args...)
threads = launch_configuration(kernel.fun).threads
blocks = cld(length(edges_down), threads)
if debug
println("Update parameters")
@show threads blocks length(edges_down)
CUDA.@time kernel(args...; threads, blocks)
else
kernel(args...; threads, blocks)
end
nothing
end
##################################################################################
# Clear memory of input nodes
##################################################################################
function clear_input_node_mem_kernel(nodes, input_node_ids, heap, rate)
node_id = ((blockIdx().x - one(Int32)) * blockDim().x) + threadIdx().x
@inbounds if node_id <= length(input_node_ids)
orig_node_id::UInt32 = input_node_ids[node_id]
inputnode = nodes[orig_node_id]::BitsInput
clear_memory(dist(inputnode), heap, rate)
end
nothing
end
function clear_input_node_mem(bpc; rate = 0, debug = false)
num_input_nodes = length(bpc.input_node_ids)
args = (bpc.nodes, bpc.input_node_ids, bpc.heap, Float32(rate))
kernel = @cuda name="clear_input_node_mem" launch=false clear_input_node_mem_kernel(args...)
threads = launch_configuration(kernel.fun).threads
blocks = cld(num_input_nodes, threads)
if debug
println("Clear memory of input nodes")
@show threads blocks num_input_nodes
CUDA.@time kernel(args...; threads, blocks)
else
kernel(args...; threads, blocks)
end
nothing
end
##################################################################################
# Update parameters of input nodes
##################################################################################
function update_input_node_params_kernel(nodes, input_node_ids, heap, pseudocount, inertia)
node_id = ((blockIdx().x - one(Int32)) * blockDim().x) + threadIdx().x
@inbounds if node_id <= length(input_node_ids)
orig_node_id::UInt32 = input_node_ids[node_id]
inputnode = nodes[orig_node_id]::BitsInput
update_params(dist(inputnode), heap, pseudocount, inertia)
end
nothing
end
function update_input_node_params(bpc; pseudocount, inertia = 0, debug = false)
num_input_nodes = length(bpc.input_node_ids)
args = (bpc.nodes, bpc.input_node_ids, bpc.heap, Float32(pseudocount), Float32(inertia))
kernel = @cuda name="update_input_node_params" launch=false update_input_node_params_kernel(args...)
threads = launch_configuration(kernel.fun).threads
blocks = cld(num_input_nodes, threads)
if debug
println("Update parameters of input nodes")
@show threads blocks num_input_nodes
CUDA.@time kernel(args...; threads, blocks)
else
kernel(args...; threads, blocks)
end
nothing
end
####################################
### Parameter initialization
####################################
"""
init_parameters(pc::ProbCircuit; perturbation = 0.0)
Initialize parameters of ProbCircuit.
"""
function init_parameters(pc::ProbCircuit; perturbation = 0.0)
perturbation = Float32(perturbation)
foreach(pc) do pn
@inbounds if issum(pn)
if num_children(pn) == 1
pn.params .= zero(Float32)
else
if perturbation < 1e-8
pn.params .= loguniform(num_children(pn))
else
unnormalized_probs = map(x -> one(Float32) - perturbation + x * Float32(2.0) * perturbation, rand(Float32, num_children(pn)))
pn.params .= log.(unnormalized_probs ./ sum(unnormalized_probs))
end
end
elseif isinput(pn)
init_params(pn, perturbation)
end
end
nothing
end
#######################
### Full-Batch EM
#######################
"Turn binary data into floating point data close to 0 and 1."
function soften_data(data; softness, pseudocount=1)
data_marginals = ((sum(data; dims=1) .+ Float32(pseudocount/2))
./ Float32(size(data, 1) + pseudocount))
Float32(1-softness) * data .+ Float32(softness) * data_marginals
end
function full_batch_em_step(bpc::CuBitsProbCircuit, data::CuArray;
batch_size, pseudocount, report_ll=true,
marginals, flows, node_aggr, edge_aggr,
mine, maxe, debug, node_group_aggr, edge_group_aggr,
node2group, edge2group)
num_examples = size(data)[1]
num_batches = cld(num_examples, batch_size)
if report_ll
log_likelihoods = CUDA.zeros(Float32, num_batches, 1)
end
edge_aggr .= zero(Float32)
clear_input_node_mem(bpc; rate = 0)
batch_index = 0
for batch_start = 1:batch_size:num_examples
batch_end = min(batch_start+batch_size-1, num_examples)
batch = batch_start:batch_end
num_batch_examples = batch_end - batch_start + 1
batch_index += 1
probs_flows_circuit(flows, marginals, edge_aggr, bpc, data, batch;
mine, maxe, debug)
if report_ll
@views sum!(
log_likelihoods[batch_index:batch_index, 1:1],
marginals[1:num_batch_examples,end:end])
end
end
add_pseudocount(edge_aggr, node_aggr, bpc, pseudocount; debug)
if !isnothing(edge2group)
aggr_node_share_flows(edge_aggr, edge2group, edge_group_aggr)
broadcast_node_share_flows(edge_aggr, edge2group, edge_group_aggr)
end
aggr_node_flows(node_aggr, bpc, edge_aggr; debug)
update_params(bpc, node_aggr, edge_aggr; inertia = 0)
update_input_node_params(bpc; pseudocount, inertia = 0, debug)
return report_ll ? sum(log_likelihoods) / num_examples : 0.0
end
"""
full_batch_em(bpc::CuBitsProbCircuit, raw_data::CuArray, num_epochs; batch_size, pseudocount)
Update the paramters of the CuBitsProbCircuit by doing EM on the full batch (i.e. update paramters at the end of each epoch).
"""
function full_batch_em(bpc::CuBitsProbCircuit, raw_data::CuArray, num_epochs;
batch_size, pseudocount, softness = 0, report_ll = true,
mars_mem = nothing, flows_mem = nothing, node_aggr_mem = nothing,
edge_aggr_mem = nothing, mine=2, maxe=32, debug = false, verbose = true,
callbacks = [],
node2group = nothing, edge2group = nothing)
insert!(callbacks, 1, FullBatchLog(verbose))
callbacks = CALLBACKList(callbacks)
init(callbacks; batch_size, bpc)
num_nodes = length(bpc.nodes)
num_edges = length(bpc.edge_layers_down.vectors)
data = iszero(softness) ? raw_data : soften_data(raw_data; softness)
marginals = prep_memory(mars_mem, (batch_size, num_nodes), (false, true))
flows = prep_memory(flows_mem, (batch_size, num_nodes), (false, true))
node_aggr = prep_memory(node_aggr_mem, (num_nodes,))
edge_aggr = prep_memory(edge_aggr_mem, (num_edges,))
log_likelihoods = Vector{Float32}()
#################### sum node/edges sharing ##########################
node_group_aggr, edge_group_aggr = nothing, nothing
if !isnothing(edge2group)
edge_group_aggr = prep_memory(nothing, (maximum(edge2group)))
edge2group = cu(edge2group)
end
#################### sum node/edges sharing ##########################
for epoch = 1:num_epochs
log_likelihood = full_batch_em_step(bpc, data;
batch_size, pseudocount, report_ll,
marginals, flows, node_aggr, edge_aggr,
mine, maxe, debug,
node_group_aggr, edge_group_aggr,
node2group, edge2group)
push!(log_likelihoods, log_likelihood)
done = call(callbacks, epoch, log_likelihood)
if !isnothing(done) && done[end] == true
break
end
end
cleanup_memory((data, raw_data), (flows, flows_mem),
(node_aggr, node_aggr_mem), (edge_aggr, edge_aggr_mem))
if !isnothing(edge2group)
cleanup_memory((edge_group_aggr, nothing))
end
cleanup(callbacks)
log_likelihoods
end
#######################
### Mini-Batch EM
######################
"""
mini_batch_em(bpc::CuBitsProbCircuit, raw_data::CuArray, num_epochs; batch_size, pseudocount,
param_inertia, param_inertia_end = param_inertia, shuffle=:each_epoch)
Update the parameters of the CuBitsProbCircuit by doing EM, update the parameters after each batch.
"""
function mini_batch_em(bpc::CuBitsProbCircuit, raw_data::CuArray, num_epochs;
batch_size, pseudocount,
param_inertia, param_inertia_end = param_inertia,
flow_memory = 0, flow_memory_end = flow_memory,
softness = 0, shuffle=:each_epoch,
mars_mem = nothing, flows_mem = nothing, node_aggr_mem = nothing, edge_aggr_mem = nothing,
mine = 2, maxe = 32, debug = false, verbose = true,
callbacks = [],
node2group = nothing, edge2group = nothing)
@assert pseudocount >= 0
@assert 0 <= param_inertia <= 1
@assert param_inertia <= param_inertia_end <= 1
@assert 0 <= flow_memory
@assert flow_memory <= flow_memory_end
@assert shuffle ∈ [:once, :each_epoch, :each_batch]
insert!(callbacks, 1, MiniBatchLog(verbose))
callbacks = CALLBACKList(callbacks)
init(callbacks; batch_size, bpc)
num_examples = size(raw_data)[1]
num_nodes = length(bpc.nodes)
num_edges = length(bpc.edge_layers_down.vectors)
num_batches = num_examples ÷ batch_size # drop last incomplete batch
@assert batch_size <= num_examples
data = iszero(softness) ? raw_data : soften_data(raw_data; softness)
marginals = prep_memory(mars_mem, (batch_size, num_nodes), (false, true))
flows = prep_memory(flows_mem, (batch_size, num_nodes), (false, true))
node_aggr = prep_memory(node_aggr_mem, (num_nodes,))
edge_aggr = prep_memory(edge_aggr_mem, (num_edges,))
edge_aggr .= zero(Float32)
clear_input_node_mem(bpc; rate = 0, debug)
#################### sum node/edges sharing ##########################
node_group_aggr, edge_group_aggr = nothing, nothing
if !isnothing(edge2group)
edge_group_aggr = prep_memory(nothing, (maximum(edge2group)))
edge2group = cu(edge2group)
end
#################### sum node/edges sharing ##########################
shuffled_indices_cpu = Vector{Int32}(undef, num_examples)
shuffled_indices = CuVector{Int32}(undef, num_examples)
batches = [@view shuffled_indices[1+(b-1)*batch_size : b*batch_size]
for b in 1:num_batches]
do_shuffle() = begin
randperm!(shuffled_indices_cpu)
copyto!(shuffled_indices, shuffled_indices_cpu)
end
(shuffle == :once) && do_shuffle()
Δparam_inertia = (param_inertia_end-param_inertia)/num_epochs
Δflow_memory = (flow_memory_end-flow_memory)/num_epochs
log_likelihoods = Vector{Float32}()
log_likelihoods_epoch = CUDA.zeros(Float32, num_batches, 1)
for epoch in 1:num_epochs
log_likelihoods_epoch .= zero(Float32)
(shuffle == :each_epoch) && do_shuffle()
for (batch_id, batch) in enumerate(batches)
(shuffle == :each_batch) && do_shuffle()
if iszero(flow_memory)
edge_aggr .= zero(Float32)
clear_input_node_mem(bpc; rate = 0, debug)
else
# slowly forget old edge aggregates
rate = max(zero(Float32), one(Float32) - (batch_size + pseudocount) / flow_memory)
edge_aggr .*= rate
clear_input_node_mem(bpc; rate)
end
probs_flows_circuit(flows, marginals, edge_aggr, bpc, data, batch;
mine, maxe, debug)
@views sum!(log_likelihoods_epoch[batch_id:batch_id, 1:1],
marginals[1:batch_size,end:end])
add_pseudocount(edge_aggr, node_aggr, bpc, pseudocount; debug)
if !isnothing(edge2group)
aggr_node_share_flows(edge_aggr, edge2group, edge_group_aggr)
broadcast_node_share_flows(edge_aggr, edge2group, edge_group_aggr)
end
aggr_node_flows(node_aggr, bpc, edge_aggr; debug)
update_params(bpc, node_aggr, edge_aggr; inertia = param_inertia, debug)
update_input_node_params(bpc; pseudocount, inertia = param_inertia, debug)
end
log_likelihood = sum(log_likelihoods_epoch) / batch_size / num_batches
push!(log_likelihoods, log_likelihood)
call(callbacks, epoch, log_likelihood)
param_inertia += Δparam_inertia
flow_memory += Δflow_memory
end
cleanup_memory((data, raw_data), (flows, flows_mem),
(node_aggr, node_aggr_mem), (edge_aggr, edge_aggr_mem))
CUDA.unsafe_free!(shuffled_indices)
if !isnothing(edge2group)
cleanup_memory((edge_group_aggr, nothing))
end
cleanup(callbacks)
log_likelihoods
end
abstract type CALLBACK end
struct CALLBACKList
list::Vector{CALLBACK}
end
function call(callbacks::CALLBACKList, epoch, log_likelihood)
if callbacks.list[1].verbose
done = map(callbacks.list) do x
call(x, epoch, log_likelihood)
end
println()
done
end
end
function init(callbacks::CALLBACKList; kwargs...)
for x in callbacks.list
init(x; kwargs...)
end
end
function cleanup(callbacks::CALLBACKList)
for x in callbacks.list
cleanup(x)
end
end
struct MiniBatchLog <: CALLBACK
verbose
end
struct FullBatchLog <: CALLBACK
verbose
end
mutable struct LikelihoodsLog <: CALLBACK
valid_x
test_x
iter
bpc
batch_size
mars_mem
LikelihoodsLog(valid_x, test_x, iter) = begin
new(valid_x, test_x, iter, nothing, nothing, nothing)
end
end
init(caller::CALLBACK; kwargs...) = nothing
init(caller::LikelihoodsLog; bpc, batch_size) = begin
caller.bpc = bpc
caller.batch_size = batch_size
caller.mars_mem = prep_memory(nothing, (batch_size, length(bpc.nodes)), (false, true))
end
call(caller::MiniBatchLog, epoch, log_likelihood) = begin
caller.verbose && print("Mini-batch EM epoch $epoch; train LL $log_likelihood")
end
call(caller::FullBatchLog, epoch, log_likelihood) = begin
caller.verbose && print("Full-batch EM epoch $epoch; train LL $log_likelihood")
end
call(caller::LikelihoodsLog, epoch, log_likelihood) = begin
valid_ll, test_ll = nothing, nothing
if epoch % caller.iter == 0 && (!isnothing(caller.valid_x) || !isnothing(caller.test_x))
if !isnothing(caller.valid_x)
valid_ll = loglikelihood(caller.bpc, caller.valid_x;
batch_size=caller.batch_size,mars_mem=caller.mars_mem)
print("; valid LL ", valid_ll)
end
if !isnothing(caller.test_x)
test_ll = loglikelihood(caller.bpc, caller.test_x;
batch_size=caller.batch_size,mars_mem=caller.mars_mem)
print("; test LL ", test_ll)
end
end
valid_ll, test_ll
end
cleanup(caller::CALLBACK) = nothing
cleanup(caller::LikelihoodsLog) = begin
CUDA.unsafe_free!(caller.mars_mem)
end
# early stopping
mutable struct EarlyStopPC <: CALLBACK
likelihoods_log
patience
warmup
val
best_value
best_iter
best_bpc
n_increase
iter
EarlyStopPC(likelihoods_log; patience, warmup=1, val=:valid_x) = begin
@assert val == :valid_x
@assert patience % likelihoods_log.iter == 0
@assert !isnothing(likelihoods_log.valid_x)
new(likelihoods_log, Int(ceil(patience / likelihoods_log.iter)),
warmup, val, -Inf, 0, nothing, 0, 0)
end
end
init(caller::EarlyStopPC; args...) = begin
init(caller.likelihoods_log; args...)
bpc = caller.likelihoods_log.bpc
best_bpc = (edge_layers_up = deepcopy(bpc.edge_layers_up), heap = deepcopy(bpc.heap))
caller.best_bpc = best_bpc
end
call(caller::EarlyStopPC, epoch, log_likelihood) = begin
valid_ll, test_ll = call(caller.likelihoods_log, epoch, log_likelihood)
caller.iter += 1
flag = false
if isnothing(valid_ll) || caller.iter < caller.warmup
flag = false
elseif valid_ll >= caller.best_value
caller.n_increase = 0
caller.best_value = valid_ll
caller.best_iter = epoch
copy_bpc!(caller.best_bpc, caller.likelihoods_log.bpc)
flag = false
elseif valid_ll < caller.best_value
caller.n_increase += 1
if caller.n_increase > caller.patience
copy_bpc!(caller.likelihoods_log.bpc, caller.best_bpc)
flag = true
else
flag = false
end
else
error("")
end
return flag
end
copy_bpc!(dst, src) = begin
copyto!(dst.edge_layers_up.vectors, src.edge_layers_up.vectors)
copyto!(dst.heap, src.heap)
end
cleanup(caller::EarlyStopPC) = begin
cleanup(caller.likelihoods_log)
end
| ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 7414 | using CUDA, Random
##################################################################################
# Downward pass
##################################################################################
function layer_down_kernel(flows, edge_aggr, edges, _mars,
num_ex_threads::Int32, num_examples::Int32,
layer_start::Int32, edge_work::Int32, layer_end::Int32)
mars = Base.Experimental.Const(_mars)
threadid_block = threadIdx().x
threadid = ((blockIdx().x - one(Int32)) * blockDim().x) + threadid_block
edge_batch, ex_id = fldmod1(threadid, num_ex_threads)
edge_start = layer_start + (edge_batch-one(Int32))*edge_work
edge_end = min(edge_start + edge_work - one(Int32), layer_end)
warp_lane = mod1(threadid_block, warpsize())
local acc::Float32
local prime_mar::Float32
owned_node::Bool = false
@inbounds for edge_id = edge_start:edge_end
edge = edges[edge_id]
parent_id = edge.parent_id
prime_id = edge.prime_id
sub_id = edge.sub_id
tag = edge.tag
firstedge = isfirst(tag)
lastedge = islast(tag)
issum = edge isa SumEdge
active = (ex_id <= num_examples)
if firstedge
partial = ispartial(tag)
owned_node = !partial
end
if active
edge_flow = flows[ex_id, parent_id]
if issum
parent_mar = mars[ex_id, parent_id]
child_prob = mars[ex_id, prime_id] + edge.logp
if sub_id != 0
child_prob += mars[ex_id, sub_id]
end
edge_flow = edge_flow * exp(child_prob - parent_mar)
end
if sub_id != 0
if isonlysubedge(tag)
flows[ex_id, sub_id] = edge_flow
else
CUDA.@atomic flows[ex_id, sub_id] += edge_flow
end
end
end
# make sure this is run on all warp threads, regardless of `active`
if !isnothing(edge_aggr)
!active && (edge_flow = zero(Float32))
edge_flow_warp = CUDA.reduce_warp(+, edge_flow)
if warp_lane == 1
CUDA.@atomic edge_aggr[edge_id] += edge_flow_warp
end
end
if active
# accumulate flows from parents
if firstedge || (edge_id == edge_start)
acc = edge_flow
else
acc += edge_flow
end
# write to global memory
if lastedge || (edge_id == edge_end)
if lastedge && owned_node
# no one else is writing to this global memory
flows[ex_id, prime_id] = acc
else
CUDA.@atomic flows[ex_id, prime_id] += acc
end
end
end
end
nothing
end
function layer_down(flows, edge_aggr, bpc, mars,
layer_start, layer_end, num_examples;
mine, maxe, debug=false)
edges = bpc.edge_layers_down.vectors
num_edges = layer_end-layer_start+1
dummy_args = (flows, edge_aggr, edges, mars,
Int32(32), Int32(num_examples),
Int32(1), Int32(1), Int32(2))
kernel = @cuda name="layer_down" launch=false layer_down_kernel(dummy_args...)
config = launch_configuration(kernel.fun)
# configure thread/block balancing
threads, blocks, num_example_threads, edge_work =
balance_threads(num_edges, num_examples, config; mine, maxe, contiguous_warps=true)
args = (flows, edge_aggr, edges, mars,
Int32(num_example_threads), Int32(num_examples),
Int32(layer_start), Int32(edge_work), Int32(layer_end))
if debug
println("Layer $layer_start:$layer_end")
@show threads blocks num_example_threads edge_work, num_edges num_examples
CUDA.@time kernel(args...; threads, blocks)
else
kernel(args...; threads, blocks)
end
nothing
end
function flows_circuit(flows, edge_aggr, bpc, mars, num_examples; mine, maxe, debug=false)
init_flows() = begin
flows .= zero(Float32)
flows[:,end] .= one(Float32)
end
if debug
println("Initializing flows")
CUDA.@time CUDA.@sync init_flows()
else
init_flows()
end
layer_start = 1
for layer_end in bpc.edge_layers_down.ends
layer_down(flows, edge_aggr, bpc, mars,
layer_start, layer_end, num_examples;
mine, maxe, debug)
layer_start = layer_end + 1
end
nothing
end
##################################################################################
# Downward pass for input nodes
##################################################################################
function input_flows_circuit_kernel(flows, nodes, input_node_ids, heap, data,
example_ids, num_ex_threads::Int32, node_work::Int32)
threadid = ((blockIdx().x - one(Int32)) * blockDim().x) + threadIdx().x
node_batch, ex_id = fldmod1(threadid, num_ex_threads)
node_start = one(Int32) + (node_batch - one(Int32)) * node_work
node_end = min(node_start + node_work - one(Int32), length(input_node_ids))
@inbounds if ex_id <= length(example_ids)
for node_id = node_start : node_end
orig_ex_id::Int32 = example_ids[ex_id]
orig_node_id::UInt32 = input_node_ids[node_id]
node_flow::Float32 = flows[ex_id, orig_node_id]
inputnode = nodes[orig_node_id]::BitsInput
variable = inputnode.variable
value = data[orig_ex_id, variable]
flow(dist(inputnode), value, node_flow, heap)
end
end
nothing
end
function input_flows_circuit(flows, bpc, data, example_ids; mine, maxe, debug=false)
num_examples = length(example_ids)
num_input_nodes = length(bpc.input_node_ids)
dummy_args = (flows, bpc.nodes, bpc.input_node_ids,
bpc.heap, data, example_ids, Int32(1), Int32(1))
kernel = @cuda name="input_flows_circuit" launch=false input_flows_circuit_kernel(dummy_args...)
config = launch_configuration(kernel.fun)
threads, blocks, num_example_threads, node_work =
balance_threads(num_input_nodes, num_examples, config; mine, maxe)
args = (flows, bpc.nodes, bpc.input_node_ids,
bpc.heap, data, example_ids, Int32(num_example_threads), Int32(node_work))
if debug
println("Flows of input nodes")
@show threads blocks num_example_threads node_work num_nodes num_examples
CUDA.@time kernel(args...; threads, blocks)
else
kernel(args...; threads, blocks)
end
nothing
end
##################################################################################
# Full downward pass
##################################################################################
function probs_flows_circuit(flows, mars, edge_aggr, bpc, data, example_ids; mine, maxe, debug=false)
eval_circuit(mars, bpc, data, example_ids; mine, maxe, debug)
flows_circuit(flows, edge_aggr, bpc, mars, length(example_ids); mine, maxe, debug)
input_flows_circuit(flows, bpc, data, example_ids; mine, maxe, debug)
nothing
end | ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 10353 | using CUDA, Random
export loglikelihoods, loglikelihood
##################################################################################
# Init marginals
###################################################################################
function balance_threads(num_items, num_examples, config; mine, maxe, contiguous_warps=true)
block_threads = config.threads
# make sure the number of example threads is a multiple of 32
example_threads = contiguous_warps ? (cld(num_examples,32) * 32) : num_examples
num_item_batches = cld(num_items, maxe)
num_blocks = cld(num_item_batches * example_threads, block_threads)
if num_blocks < config.blocks
max_num_item_batch = cld(num_items, mine)
max_num_blocks = cld(max_num_item_batch * example_threads, block_threads)
num_blocks = min(config.blocks, max_num_blocks)
num_item_batches = (num_blocks * block_threads) ÷ example_threads
end
item_work = cld(num_items, num_item_batches)
@assert item_work*block_threads*num_blocks >= example_threads*num_items
block_threads, num_blocks, example_threads, item_work
end
function init_mar!_kernel(mars, nodes, data, example_ids, heap, num_ex_threads::Int32, node_work::Int32, input_init_func)
# this kernel follows the structure of the layer eval kernel, would probably be faster to
# have 1 thread process multiple examples, rather than multiple nodes
threadid = ((blockIdx().x - one(Int32)) * blockDim().x) + threadIdx().x
node_batch, ex_id = fldmod1(threadid, num_ex_threads)
node_start = one(Int32) + (node_batch - one(Int32)) * node_work
node_end = min(node_start + node_work - one(Int32), length(nodes))
@inbounds if ex_id <= length(example_ids)
for node_id = node_start:node_end
node = nodes[node_id]
mars[ex_id, node_id] =
if (node isa BitsSum)
-Inf32
elseif (node isa BitsMul)
zero(Float32)
else
orig_ex_id::Int32 = example_ids[ex_id]
inputnode = node::BitsInput
variable = inputnode.variable
value = data[orig_ex_id, variable]
if ismissing(value)
input_init_func(dist(inputnode), heap)
else
loglikelihood(dist(inputnode), value, heap)
end
end
end
end
nothing
end
function init_mar!(mars, bpc, data, example_ids; mine, maxe, input_init_func, debug=false)
num_examples = length(example_ids)
num_nodes = length(bpc.nodes)
dummy_args = (mars, bpc.nodes, data, example_ids, bpc.heap, Int32(1), Int32(1), input_init_func)
kernel = @cuda name="init_mar!" launch=false init_mar!_kernel(dummy_args...)
config = launch_configuration(kernel.fun)
threads, blocks, num_example_threads, node_work =
balance_threads(num_nodes, num_examples, config; mine, maxe)
args = (mars, bpc.nodes, data, example_ids, bpc.heap,
Int32(num_example_threads), Int32(node_work), input_init_func)
if debug
println("Node initialization")
@show threads blocks num_example_threads node_work num_nodes num_examples
CUDA.@time kernel(args...; threads, blocks)
else
kernel(args...; threads, blocks)
end
nothing
end
##################################################################################
# Upward pass
##################################################################################
function logsumexp(x::Float32,y::Float32)
if isfinite(x) && isfinite(y)
# note: @fastmath does not work with infinite values, so do not apply above
@fastmath max(x,y) + log1p(exp(-abs(x-y)))
else
max(x,y)
end
end
function layer_up_kernel(mars, edges,
num_ex_threads::Int32, num_examples::Int32,
layer_start::Int32, edge_work::Int32, layer_end::Int32, sum_agg_func)
threadid = ((blockIdx().x - one(Int32)) * blockDim().x) + threadIdx().x
edge_batch, ex_id = fldmod1(threadid, num_ex_threads)
edge_start = layer_start + (edge_batch - one(Int32)) * edge_work
edge_end = min(edge_start + edge_work - one(Int32), layer_end)
@inbounds if ex_id <= num_examples
local acc::Float32
owned_node::Bool = false
for edge_id = edge_start:edge_end
edge = edges[edge_id]
tag = edge.tag
isfirstedge = isfirst(tag)
islastedge = islast(tag)
issum = edge isa SumEdge
owned_node |= isfirstedge
# compute probability coming from child
child_prob = mars[ex_id, edge.prime_id]
if edge.sub_id != 0
child_prob += mars[ex_id, edge.sub_id]
end
if issum
child_prob += edge.logp
end
# accumulate probability from child
if isfirstedge || (edge_id == edge_start)
acc = child_prob
elseif issum
acc = sum_agg_func(acc, child_prob)
else
acc += child_prob
end
# write to global memory
if islastedge || (edge_id == edge_end)
pid = edge.parent_id
if islastedge && owned_node
# no one else is writing to this global memory
mars[ex_id, pid] = acc
else
if issum
CUDA.@atomic mars[ex_id, pid] = sum_agg_func(mars[ex_id, pid], acc)
else
CUDA.@atomic mars[ex_id, pid] += acc
end
end
end
end
end
nothing
end
function layer_up(mars, bpc, layer_start, layer_end, num_examples; mine, maxe, sum_agg_func, debug=false)
edges = bpc.edge_layers_up.vectors
num_edges = layer_end - layer_start + 1
dummy_args = (mars, edges,
Int32(32), Int32(num_examples),
Int32(1), Int32(1), Int32(2), sum_agg_func)
kernel = @cuda name="layer_up" launch=false layer_up_kernel(dummy_args...)
config = launch_configuration(kernel.fun)
# configure thread/block balancing
threads, blocks, num_example_threads, edge_work =
balance_threads(num_edges, num_examples, config; mine, maxe)
args = (mars, edges,
Int32(num_example_threads), Int32(num_examples),
Int32(layer_start), Int32(edge_work), Int32(layer_end), sum_agg_func)
if debug
println("Layer $layer_start:$layer_end")
@show num_edges num_examples threads blocks num_example_threads edge_work
CUDA.@time kernel(args...; threads, blocks)
else
kernel(args...; threads, blocks)
end
nothing
end
# run entire circuit
function eval_circuit(mars, bpc, data, example_ids; mine, maxe, debug=false)
input_init_func(dist, heap) =
zero(Float32)
sum_agg_func(x::Float32, y::Float32) =
logsumexp(x, y)
init_mar!(mars, bpc, data, example_ids; mine, maxe, input_init_func, debug)
layer_start = 1
for layer_end in bpc.edge_layers_up.ends
layer_up(mars, bpc, layer_start, layer_end, length(example_ids); mine, maxe, sum_agg_func, debug)
layer_start = layer_end + 1
end
nothing
end
#################################
### Full Epoch Likelihood
#################################
"""
prep_memory(reuse, sizes, exact = map(x -> true, sizes))
Mostly used internally. Prepares memory for the specifed size, reuses `reuse` if possible to avoid memory allocation/deallocation.
"""
function prep_memory(reuse, sizes, exact = map(x -> true, sizes))
if isnothing(reuse)
return CuArray{Float32}(undef, sizes...)
else
@assert ndims(reuse) == length(sizes)
for d = 1:length(sizes)
if exact[d]
@assert size(reuse, d) == sizes[d]
else
@assert size(reuse, d) >= sizes[d]
end
end
return reuse
end
end
"""
Cleansup allocated memory. Used internally.
"""
function cleanup_memory(used::CuArray, reused)
if used !== reused
CUDA.unsafe_free!(used)
end
end
function cleanup_memory(used_reused::Tuple...)
for (used, reused) in used_reused
cleanup_memory(used, reused)
end
end
"""
loglikelihoods(bpc::CuBitsProbCircuit, data::CuArray; batch_size, mars_mem = nothing)
Returns loglikelihoods for each datapoint on gpu. Missing values should be denoted by `missing`.
- `bpc`: BitCircuit on gpu
- `data`: CuArray{Union{Missing, data_types...}}
- `batch_size`
- `mars_mem`: Not required, advanced usage. CuMatrix to reuse memory and reduce allocations. See `prep_memory` and `cleanup_memory`.
"""
function loglikelihoods(bpc::CuBitsProbCircuit, data::CuArray;
batch_size, mars_mem = nothing,
mine=2, maxe=32, debug=false)
num_examples = size(data)[1]
num_nodes = length(bpc.nodes)
marginals = prep_memory(mars_mem, (batch_size, num_nodes), (false, true))
log_likelihoods = CUDA.zeros(Float32, num_examples)
for batch_start = 1:batch_size:num_examples
batch_end = min(batch_start+batch_size-1, num_examples)
batch = batch_start:batch_end
num_batch_examples = length(batch)
eval_circuit(marginals, bpc, data, batch; mine, maxe, debug)
log_likelihoods[batch_start:batch_end] .= @view marginals[1:num_batch_examples, end]
end
cleanup_memory(marginals, mars_mem)
return log_likelihoods
end
"""
loglikelihood(bpc::CuBitsProbCircuit, data::CuArray; batch_size, mars_mem = nothing)
Computes Average loglikelihood of circuit given the data using gpu. See [`loglikelihoods`](@ref) for more details.
"""
function loglikelihood(bpc::CuBitsProbCircuit, data::CuArray;
batch_size, mars_mem = nothing,
mine=2, maxe=32, debug=false)
lls_gpu = loglikelihoods(bpc, data; batch_size, mars_mem, mine, maxe, debug)
lls = Array(lls_gpu)
CUDA.unsafe_free!(lls_gpu)
return sum(lls) / length(lls)
end
| ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 4143 | export loglikelihoods, loglikelihood,
loglikelihoods_vectorized
"""
loglikelihoods(pc::ProbCircuit, data::Matrix)
Computes loglikelihoods of the `circuit` over the `data` on cpu. Linearizes the circuit and computes the marginals in batches.
"""
function loglikelihoods(pc::ProbCircuit, data::Matrix; batch_size, Float=Float32)
num_examples = size(data, 1)
log_likelihoods = zeros(Float32, num_examples)
# Linearize PC
linPC = linearize(pc)
node2idx = Dict{ProbCircuit, UInt32}()
for (i, node) in enumerate(linPC)
node2idx[node] = i
end
nodes = size(linPC, 1)
mars = zeros(Float, (batch_size, nodes))
for batch_start = 1:batch_size:num_examples
batch_end = min(batch_start + batch_size - 1, num_examples)
batch = batch_start:batch_end
num_batch_examples = length(batch)
eval_circuit!(mars, linPC, data, batch; node2idx, Float)
log_likelihoods[batch_start:batch_end] .= mars[1:num_batch_examples, end]
mars .= zero(Float) # faster to zero out here rather than only in MulNodes
end
return log_likelihoods
end
"""
eval_circuit!(mars, linPC::AbstractVector{<:ProbCircuit}, data::Matrix, example_ids; node2idx::Dict{ProbCircuit, UInt32}, Float=Float32)
Used internally. Evaluates the marginals of the circuit on cpu. Stores the values in `mars`.
- `mars`: (batch_size, nodes)
- `linPC`: linearized PC. (i.e. `linearize(pc)`)
- `data`: data Matrix (num_examples, features)
- `example_ids`: Array or collection of ids for current batch
- `node2idx`: Index of each ProbCircuit node in the linearized circuit
"""
function eval_circuit!(mars, linPC::AbstractVector{<:ProbCircuit}, data::Matrix, example_ids;
node2idx::Dict{ProbCircuit, UInt32}, Float=Float32)
@inbounds for (mars_node_idx, node) in enumerate(linPC)
if isinput(node)
for (ind, example_idx) in enumerate(example_ids)
mars[ind, mars_node_idx] = ismissing(data[example_idx, first(randvars(node))]) ? zero(Float) : loglikelihood(dist(node), data[example_idx, first(randvars(node))])
end
elseif ismul(node)
for ch in inputs(node)
mars[:, mars_node_idx] .+= @view mars[:, node2idx[ch]]
end
else
@assert issum(node)
mars[:, mars_node_idx] .= typemin(Float)
for (cidx, ch) in enumerate(inputs(node))
child_mar_idx = node2idx[ch]
mars[:, mars_node_idx] .= logsumexp.(mars[:, mars_node_idx], mars[:, child_mar_idx] .+ node.params[cidx])
end
end
end
return nothing
end
"""
loglikelihood(root::ProbCircuit, data::Matrix, example_id; Float=Float32)
Computes marginal loglikelihood recursively on cpu for a single instance `data[example_id, :]`.
**Note**: Quite slow, only use for demonstration/educational purposes.
"""
function loglikelihood(root::ProbCircuit, data::Matrix, example_id; Float=Float32)
f_i(node) = begin
val = data[example_id, first(randvars(node))]
ismissing(val) ? Float(0.0) : loglikelihood(dist(node), val)
end
f_m(node, ins) = sum(ins)
f_s(node, ins) = reduce(logsumexp, node.params .+ ins)
foldup_aggregate(root, f_i, f_m, f_s, Float)
end
"""
**Note**: Experimental**; will be removed or renamed later
"""
function loglikelihoods_vectorized(root::ProbCircuit, data::Matrix; Float=Float32)
function logsumexp_(vals::Vector{Float32})
reduce(logsumexp, vals)
end
f_i(node) = begin
[ismissing(data[idx, first(randvars(node))]) ? Float(0.0) : loglikelihood(dist(node), data[idx, first(randvars(node))]) for idx=1:size(data,1)]
end
f_m(node, ins) = begin
sum(ins)
end
f_s(node, ins) = begin
entry(i, data_idx) = node.params[i] + ins[i][data_idx]
ans = zeros(Float, size(data, 1))
for idx = 1:size(data, 1)
ans[idx] = logsumexp_([entry(i, idx) for i=1:size(node.params, 1)])
end
ans
end
foldup_aggregate(root, f_i, f_m, f_s, Vector{Float})
end | ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 7676 |
"""
MAP(bpc::CuBitsProbCircuit, data::CuArray; batch_size, mars_mem=nothing)
Retruns the MAP states for a given circuit and data on gpu. Missing values should be denoted as `missing`.
Note that the MAP states are exact only when the circuit is both decomposable and deterministic, otherwise its just an approximation.
- `bpc`: BitCircuit on gpu
- `data`: CuArray{Union{Missing, data_types...}}
- `batch_size`
- `mars_mem`: Not required, advanced usage. CuMatrix to reuse memory and reduce allocations. See `prep_memory` and `cleanup_memory`.
"""
function MAP(bpc::CuBitsProbCircuit, data::CuArray;
batch_size, mars_mem=nothing,
mine=2,maxe=32, debug=false)
num_examples = size(data, 1)
num_nodes = length(bpc.nodes)
marginals = prep_memory(mars_mem, (batch_size, num_nodes), (false, true))
init_input_heap!(bpc; debug)
# (TODO) Kernel does not compile if there is no Missing in eltype(states)
states = CuArray{Union{Missing, eltype(data)}}(undef, size(data)...)
CUDA.copy!(states, data)
for batch_start = 1:batch_size:num_examples
batch_end = min(batch_start+batch_size-1, num_examples)
batch = batch_start:batch_end
num_batch_examples = length(batch)
eval_circuit_max!(marginals, bpc, data, batch; mine, maxe, debug = false)
map_downward!(marginals, bpc, states, batch; debug)
end
cleanup_memory(marginals, mars_mem)
return states
end
function init_input_heap!(bpc::CuBitsProbCircuit; debug = false)
num_nodes = length(bpc.nodes)
num_input_nodes = length(bpc.input_node_ids)
args = (bpc.nodes, bpc.input_node_ids, bpc.heap)
kernel = @cuda name="init_input_heap!" launch=false init_input_heap_kernel!(args...)
threads = launch_configuration(kernel.fun).threads
blocks = cld(num_input_nodes, threads)
if debug
println("Init input MAP State and MAP-LLs on heap")
@show threads blocks num_input_nodes
CUDA.@time kernel(args...; threads, blocks)
else
kernel(args...; threads, blocks)
end
end
function init_input_heap_kernel!(nodes, input_node_ids, heap)
node_id = ((blockIdx().x - one(Int32)) * blockDim().x) + threadIdx().x
if node_id <= length(input_node_ids)
orig_node_id::UInt32 = input_node_ids[node_id]
inputnode = nodes[orig_node_id]::BitsInput
init_heap_map_state!(dist(inputnode), heap)
init_heap_map_loglikelihood!(dist(inputnode), heap)
end
nothing
end
struct CuStack
# parallel stacks for each example (max stack size is features + 3 which is preallocated)
mem::CuMatrix{Int32}
# Index of Top of each stack for each example
tops::CuArray{UInt32}
CuStack(examples, features) = begin
new(CUDA.zeros(Int32, examples, features + 3),
CUDA.zeros(UInt32, examples))
end
end
function pop_cuda!(stack_mem, stack_tops, i)
# Empty Stack
if stack_tops[i] == zero(UInt32)
return zero(UInt32)
else
val = stack_mem[i, stack_tops[i]]
CUDA.@atomic stack_tops[i] -= one(eltype(stack_tops))
return val
end
end
function push_cuda!(stack_mem, stack_tops, val, i)
stack_tops[i] += one(eltype(stack_tops))
CUDA.@cuassert stack_tops[i] <= size(stack_mem, 2) "CUDA stack overflow"
stack_mem[i, stack_tops[i]] = val
return nothing
end
function map_downward!(marginals::CuMatrix, bpc::CuBitsProbCircuit, states, batch; debug=false)
num_examples = length(batch)
num_nodes = length(bpc.nodes)
stack = CuStack(num_examples, size(states, 2))
# Push root node to all stacks
stack.tops .= 1
stack.mem[:, 1] .= num_nodes
CUDA.@sync begin
dummy_args = (marginals, states, stack.mem, stack.tops,
bpc.nodes, bpc.node_begin_end, bpc.edge_layers_up.vectors,
bpc.heap, batch)
kernel = @cuda name="map_downward!" launch=false map_downward_kernel!(dummy_args...)
config = launch_configuration(kernel.fun)
threads = config.threads
blocks = cld(size(states,1), threads)
args = (marginals, states, stack.mem, stack.tops,
bpc.nodes, bpc.node_begin_end, bpc.edge_layers_up.vectors,
bpc.heap, batch)
if debug
println("map_downward!...")
@show threads, blocks, num_examples, num_nodes
CUDA.@time kernel(args... ; threads, blocks)
else
kernel(args... ; threads, blocks)
end
end
nothing
end
function map_downward_kernel!(marginals, states, stack_mem, stack_tops, nodes, node_begin_end, edges, heap, batch)
index_x = ((blockIdx().x - one(Int32)) * blockDim().x) + threadIdx().x
stride_x = blockDim().x * gridDim().x
for ex_id = index_x:stride_x:size(batch, 1)
cur_node_id = pop_cuda!(stack_mem, stack_tops, ex_id)
while cur_node_id > zero(eltype(stack_mem))
cur_node = nodes[cur_node_id]
if cur_node isa BitsInput
example_id = batch[ex_id]
if ismissing(states[example_id, cur_node.variable])
map_value = map_state(dist(cur_node), heap)
states[example_id, cur_node.variable] = map_value
end
elseif cur_node isa BitsSum
max_pr = typemin(Float32)
chosen_edge = 1
for edge_ind = node_begin_end[cur_node_id].first: node_begin_end[cur_node_id].second
edge = edges[edge_ind]
# compute max-probability coming from child
child_prob = marginals[ex_id, edge.prime_id]
if edge.sub_id != zero(UInt32)
child_prob += marginals[ex_id, edge.sub_id]
end
if edge isa SumEdge
child_prob += edge.logp
end
if child_prob > max_pr
max_pr = child_prob
chosen_edge = edge_ind
end
end
# # Push the chosen edge into stack
cur_edge = edges[chosen_edge]
push_cuda!(stack_mem, stack_tops, cur_edge.prime_id, ex_id)
if cur_edge.sub_id != zero(UInt32)
push_cuda!(stack_mem, stack_tops, cur_edge.sub_id, ex_id)
end
elseif cur_node isa BitsMul
for edge_ind = node_begin_end[cur_node_id].first: node_begin_end[cur_node_id].second
edge = edges[edge_ind]
push_cuda!(stack_mem, stack_tops, edge.prime_id, ex_id)
if edge.sub_id != zero(UInt32)
push_cuda!(stack_mem, stack_tops, edge.sub_id, ex_id)
end
end
end
# Pop the next Node (zero if empty)
cur_node_id = pop_cuda!(stack_mem, stack_tops, ex_id)
end
end
return nothing
end
# run entire circuit taking mode on inputs and max on sum nodes
function eval_circuit_max!(mars, bpc, data, example_ids; mine, maxe, debug=false)
input_init_func(dist, heap) =
map_loglikelihood(dist, heap)
sum_agg_func(x::Float32, y::Float32) =
max(x, y)
init_mar!(mars, bpc, data, example_ids; mine, maxe, input_init_func, debug)
layer_start = 1
for layer_end in bpc.edge_layers_up.ends
layer_up(mars, bpc, layer_start, layer_end, length(example_ids); mine, maxe, sum_agg_func, debug)
layer_start = layer_end + 1
end
nothing
end
| ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 4518 |
export MAP
"""
MAP(pc::ProbCircuit, data::Matrix; batch_size, Float=Float32)
Evaluate max a posteriori (MAP) state of the circuit for given input(s) on cpu.
**Note**: This algorithm is only exact if the circuit is both decomposable and determinisitic.
If the circuit is only decomposable and not deterministic, this will give inexact results without guarantees.
"""
function MAP(pc::ProbCircuit, data::Matrix; batch_size, Float=Float32, return_map_prob=false)
num_examples = size(data, 1)
# log_likelihoods = zeros(Float32, num_examples)
states = deepcopy(data)
# Linearize PC
linPC = linearize(pc)
node2idx = Dict{ProbCircuit, UInt32}()
for (i, node) in enumerate(linPC)
node2idx[node] = i
end
nodes = size(linPC, 1)
max_mars = zeros(Float, (batch_size, nodes))
map_probs = zeros(Float, num_examples);
for batch_start = 1:batch_size:num_examples
batch_end = min(batch_start + batch_size - 1, num_examples)
batch = batch_start:batch_end
num_batch_examples = length(batch)
max_mars .= zero(Float) # faster to zero out here rather than only in MulNodes
eval_circuit_max!(max_mars, linPC, data, batch; node2idx, Float)
map_probs[batch_start:batch_end] .= max_mars[1:num_batch_examples, end]
for (batch_idx, example_idx) in enumerate(batch)
map_down_rec!(max_mars, pc, data, states, batch_idx, example_idx; node2idx, Float)
end
end
if return_map_prob
return states, map_probs
else
return states
end
end
"""
map_down_rec!(mars, node::ProbCircuit, data, states::Matrix, batch_idx, example_idx; node2idx::Dict{ProbCircuit, UInt32}, Float=Float32)
Downward pass on cpu for MAP. Recursively chooses the best (max) sum node children according to the "MAP upward pass" values.
Updates the missing values with map_state of that input node.
"""
function map_down_rec!(mars, node::ProbCircuit, data, states::Matrix, batch_idx, example_idx;
node2idx::Dict{ProbCircuit, UInt32}, Float=Float32)
if isinput(node)
if ismissing(data[example_idx, first(randvars(node))])
states[example_idx, first(randvars(node))] = map_state(dist(node))
end
elseif ismul(node)
for ch in inputs(node)
map_down_rec!(mars, ch, data, states, batch_idx, example_idx; node2idx)
end
elseif issum(node)
best_value = typemin(Float)
best_child = nothing
for (cidx, ch) in enumerate(inputs(node))
child_mar_idx = node2idx[ch]
val = mars[batch_idx, child_mar_idx] + node.params[cidx]
if val > best_value
best_value = val
best_child = ch
end
end
map_down_rec!(mars, best_child, data, states, batch_idx, example_idx; node2idx)
end
return nothing
end
"""
eval_circuit_max!(mars, linPC::AbstractVector{<:ProbCircuit}, data::Matrix, example_ids; node2idx::Dict{ProbCircuit, UInt32}, Float=Float32)
Used internally. Evaluates the MAP upward pass of the circuit on cpu. Stores the values in `mars`.
- `mars`: (batch_size, nodes)
- `linPC`: linearized PC. (i.e. `linearize(pc)`)
- `data`: data Matrix (num_examples, features)
- `example_ids`: Array or collection of ids for current batch
- `node2idx`: Index of each ProbCircuit node in the linearized circuit
"""
function eval_circuit_max!(mars, linPC::AbstractVector{<:ProbCircuit}, data::Matrix, example_ids;
node2idx::Dict{ProbCircuit, UInt32}, Float=Float32)
@inbounds for (mars_node_idx, node) in enumerate(linPC)
if isinput(node)
for (ind, example_idx) in enumerate(example_ids)
mars[ind, mars_node_idx] = if ismissing(data[example_idx, first(randvars(node))])
map_loglikelihood(dist(node))
else
loglikelihood(dist(node), data[example_idx, first(randvars(node))])
end
end
elseif ismul(node)
for ch in inputs(node)
mars[:, mars_node_idx] .+= mars[:, node2idx[ch]]
end
elseif issum(node)
mars[:, mars_node_idx] .= typemin(Float)
for (cidx, ch) in enumerate(inputs(node))
child_mar_idx = node2idx[ch]
mars[:, mars_node_idx] .= max.(mars[:, mars_node_idx], mars[:, child_mar_idx] .+ node.params[cidx])
end
end
end
return nothing
end | ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 8863 |
"""
sample(bpc::CuBitsProbCircuit, num_samples::Int, num_rand_vars::Int, types; rng=default_rng())
Generate `num_samples` from the joint distribution of the circuit without any conditions.
Samples are genearted on the GPU.
- `bpc`: Circuit on gpu (CuBitProbCircuit)
- `num_samples`: how many samples to generate
- `num_rand_vars`: number of random variables in the circuit
- `types`: Array of possible input types
- `rng`: (Optional) Random Number Generator
The size of returned Array is `(num_samples, 1, size(data, 2))`.
"""
function sample(bpc::CuBitsProbCircuit, num_samples::Int, num_rand_vars::Int, types;
rng = default_rng(), mars_mem=nothing, mine=2, maxe=32, debug=false)
data = CuMatrix{Union{Missing, types...}}([missing for j=1:1, i=1:num_rand_vars])
sample(bpc, num_samples, data; rng, debug)
end
"""
sample(bpc::CuBitsProbCircuit, num_samples, data::CuMatrix; rng=default_rng())
Generate `num_samples` for each datapoint in `data` from the joint distribution of the circuit conditioned on the `data`.
Samples are generated using GPU.
- `bpc`: Circuit on gpu (CuBitProbCircuit)
- `num_samples`: how many samples to generate
- `rng`: (Optional) Random Number Generator
The size of returned CuArray is `(num_samples, size(data, 1), size(data, 2))`.
"""
function sample(bpc::CuBitsProbCircuit, num_samples, data::CuMatrix;
mars_mem=nothing, mine=2, maxe=32,
rng=default_rng(), debug=false)
@assert num_samples > 0
num_examples = size(data, 1)
num_nodes = length(bpc.nodes)
states = CuArray{Union{Missing,eltype(data)}}(undef, num_samples, num_examples, size(data, 2))
# for now only do all of marginals in one batch
batch = 1:num_examples
marginals = prep_memory(mars_mem, (num_examples, num_nodes), (false, true))
eval_circuit(marginals, bpc, data, batch; mine, maxe, debug)
sample_downward!(marginals, bpc, data, states, batch, rng; debug)
cleanup_memory(marginals, mars_mem)
return states
end
struct CuStack2D
# parallel grid of stacks
# size = (num_samples, num_examples, num_features + 3)
mem::CuArray{Int32, 3}
# Index of Top of each stack for each example
tops::CuMatrix{UInt32}
CuStack2D(samples, examples, features) = begin
new(CUDA.zeros(Int32, samples, examples, features + 3),
CUDA.zeros(UInt32, samples, examples))
end
end
function pop_cuda!(stack_mem, stack_tops, i, j)
# Empty Stack
if stack_tops[i, j] == zero(UInt32)
return zero(UInt32)
else
val = stack_mem[i, j, stack_tops[i, j]]
stack_tops[i, j] -= one(UInt32)
return val
end
end
function push_cuda!(stack_mem, stack_tops, val, i, j)
stack_tops[i, j] += one(eltype(stack_mem))
CUDA.@cuassert stack_tops[i, j] <= size(stack_mem, 3) "CUDA stack overflow"
stack_mem[i, j, stack_tops[i, j]] = val
return nothing
end
function all_empty(stack_tops)
all(x -> iszero(x), stack_tops)
end
function balance_threads_2d(num_examples, num_decisions, config)
total_threads_per_block = config.threads
lsb(n) = n ⊻ ( n& (n-1))
ratio_diff(a, b) = ceil(Int, num_examples/a) - ceil(Int, num_decisions/b)
n_lsb = lsb(total_threads_per_block)
options_d1 = [Int32(2^i) for i = 0 : log2(n_lsb)]
append!(options_d1, [total_threads_per_block / n_lsb * Int32(2^i) for i = 0 : log2(n_lsb)])
options_d2 = [Int32(total_threads_per_block / d1) for d1 in options_d1]
best_d1 = options_d1[1]
best_d2 = options_d2[1]
best_ratio = ratio_diff(best_d1, best_d2)
for (d1, d2) in zip(options_d1, options_d2)
cur_ratio = ratio_diff(d1, d2)
if abs(best_ratio) > abs(cur_ratio)
best_d1 = d1
best_d2 = d2
best_ratio = cur_ratio
end
end
threads = (best_d1, best_d2)
blocks = (ceil(Int, num_examples / threads[1]),
ceil(Int, num_decisions / threads[2]))
threads, blocks
end
function sample_downward!(marginals, bpc, data, states, batch, rng; debug)
CUDA.seed!(rand(rng, UInt))
num_examples = length(batch)
num_samples = size(states, 1)
num_nodes = length(bpc.nodes)
stack = CuStack2D(num_samples, num_examples, size(states, 3))
# Push root node to all stacks
stack.tops .= 1
stack.mem[:,:, 1] .= num_nodes
CUDA.@sync while true
rands = CUDA.rand(num_samples, num_examples)
dummy_args = (marginals, data, states, stack.mem, stack.tops,
bpc.nodes, bpc.node_begin_end, bpc.edge_layers_up.vectors,
bpc.heap, batch, rands)
kernel = @cuda name="sample_downward!" launch=false sample_downward_kernel!(dummy_args...)
config = launch_configuration(kernel.fun)
threads, blocks = balance_threads_2d(num_samples, num_examples, config)
args = (marginals, data, states, stack.mem, stack.tops,
bpc.nodes, bpc.node_begin_end, bpc.edge_layers_up.vectors,
bpc.heap, batch, rands)
if debug
print("sample downward step")
CUDA.@time kernel(args...; threads, blocks)
else
kernel(args...; threads, blocks)
end
all_empty(stack.tops) && break
end
return nothing
end
function sample_downward_kernel!(marginals, data, states, stack_mem, stack_tops,
nodes, node_begin_end, edges,
heap, batch, rands)
index_x = ((blockIdx().x - 1) * blockDim().x + threadIdx().x)
index_y = ((blockIdx().y - 1) * blockDim().y + threadIdx().y)
stride_x = (blockDim().x * gridDim().x)
stride_y = (blockDim().y * gridDim().y)
for s_id = index_x:stride_x:size(states,1)
for ex_id = index_y:stride_y:size(batch, 1)
cur_node_id = pop_cuda!(stack_mem, stack_tops, s_id, ex_id)
if cur_node_id > zero(eltype(stack_mem))
cur_node = nodes[cur_node_id]
if cur_node isa BitsInput
#### sample the input if missing
example_id = batch[ex_id]
if ismissing(data[example_id, cur_node.variable])
# marginals[ex_id, this_node] should be log(1) = 0 (because missing), so don't need to add that
threshold = CUDA.log(rands[s_id, ex_id])
sample_value = sample_state(dist(cur_node), threshold, heap)
states[s_id, example_id, cur_node.variable] = sample_value
else
states[s_id, example_id, cur_node.variable] = data[example_id, cur_node.variable]
end
elseif cur_node isa BitsSum
#### Choose which child of sum node to sample
chosen_edge = node_begin_end[cur_node_id].second ## give all numerical error probability to the last node
cumul_prob = typemin(Float32)
parent_node_id = edges[node_begin_end[cur_node_id].first].parent_id
threshold = CUDA.log(rands[s_id, ex_id]) + marginals[ex_id, parent_node_id]
for edge_ind = node_begin_end[cur_node_id].first: node_begin_end[cur_node_id].second
edge = edges[edge_ind]
child_prob = marginals[ex_id, edge.prime_id]
if edge.sub_id != zero(UInt32)
child_prob += marginals[ex_id, edge.sub_id]
end
if edge isa SumEdge
child_prob += edge.logp
end
cumul_prob = logsumexp(cumul_prob, child_prob)
if cumul_prob > threshold
chosen_edge = edge_ind
break
end
end
# Push the chosen edge into stack
cur_edge = edges[chosen_edge]
push_cuda!(stack_mem, stack_tops, cur_edge.prime_id, s_id, ex_id)
if cur_edge.sub_id != zero(UInt32)
push_cuda!(stack_mem, stack_tops, cur_edge.sub_id, s_id, ex_id)
end
elseif cur_node isa BitsMul
#### Just Push all children to stack
for edge_ind = node_begin_end[cur_node_id].first: node_begin_end[cur_node_id].second
edge = edges[edge_ind]
push_cuda!(stack_mem, stack_tops, edge.prime_id, s_id, ex_id)
if edge.sub_id != zero(UInt32)
push_cuda!(stack_mem, stack_tops, edge.sub_id, s_id, ex_id)
end
end
end
end
end
end
nothing
end
| ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 2818 | export sample
import Random: default_rng
"""
sample(pc::ProbCircuit, num_samples; rng = default_rng())
Generate `num_samples` from the joint distribution of the circuit without any conditions.
Samples are generated on the CPU.
"""
function sample(pc::ProbCircuit, num_samples, types; rng = default_rng(), Float=Float32)
data = Matrix{Union{Missing, types...}}([missing for j=1:1, i=1:num_randvars(pc)])
sample(pc, num_samples, data; batch_size=1, rng, Float)
end
"""
sample(pc::ProbCircuit, num_samples, data::Matrix;; batch_size, rng = default_rng())
Generate `num_samples` from the joint distribution of the circuit conditioned on the `data`.
"""
function sample(pc::ProbCircuit, num_samples, data::Matrix; batch_size, rng = default_rng(), Float=Float32)
num_examples = size(data, 1)
# Linearize PC
linPC = linearize(pc)
node2idx = Dict{ProbCircuit, UInt32}()
for (i, node) in enumerate(linPC)
node2idx[node] = i
end
states = zeros(Union{Missing,eltype(data)}, num_samples, size(data, 1), size(data, 2))
nodes = size(linPC, 1)
values = zeros(Float, (batch_size, nodes))
batch = 1:num_examples # do all in one batch for now
eval_circuit!(values, linPC, data, batch; node2idx, Float)
sample_down(pc, values, states, data, num_samples; rng, node2idx, Float)
return states
end
function sample_down(pc::ProbCircuit, values, states, data, num_samples; rng, node2idx::Dict{ProbCircuit, UInt32}, Float)
for (s_id, ex_id) = collect(Iterators.product(1:size(states,1), 1:size(states,2)))
sample_rec!(pc, states, values, data; s_id, ex_id, rng, node2idx)
end
return nothing
end
function sample_rec!(node::ProbCircuit, states, values, data; s_id, ex_id, rng, node2idx::Dict{ProbCircuit, UInt32})
if isinput(node)
if ismissing(data[ex_id, first(randvars(node))])
threshold = log(rand(rng))
states[s_id, ex_id, first(randvars(node))] = sample_state(dist(node), threshold)
else
states[s_id, ex_id, first(randvars(node))] = data[ex_id, first(randvars(node))]
end
elseif ismul(node)
for ch in inputs(node)
sample_rec!(ch, states, values, data; s_id, ex_id, rng, node2idx)
end
elseif issum(node)
sampled_child = inputs(node)[end]
threshold = log(rand(rng)) + values[ex_id, node2idx[node]]
cumul_prob = typemin(Float32)
for (cid, ch) in enumerate(inputs(node))
cumul_prob = logsumexp(cumul_prob, node.params[cid] + values[ex_id, node2idx[ch]])
if cumul_prob > threshold
sampled_child = ch
break
end
end
sample_rec!(sampled_child, states, values, data; s_id, ex_id, rng, node2idx)
end
return nothing
end
| ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 7337 | using CUDA
using ChowLiuTrees: learn_chow_liu_tree
using Graphs: SimpleGraph, SimpleDiGraph, bfs_tree, center, neighbors,
connected_components, induced_subgraph, nv, add_edge!, rem_edge!
using MetaGraphs: get_prop, set_prop!, MetaDiGraph, vertices, indegree, outneighbors
export hclt
"""
hclt(data, num_hidden_cats; num_cats = nothing, input_type = LiteralDist)
Learns HiddenChowLiuTree (hclt) circuit structure from data.
- `data`: Matrix or CuMatrix
- `num_hidden_cats`: Number of categories in hidden variables
- `input_type`: Distribution type for the inputs
- `num_cats`: Number of categories (in case of categorical inputs). Automatically deduced if not given explicilty.
"""
function hclt(data, num_hidden_cats;
num_cats = nothing,
shape = :directed,
input_type = Literal,
pseudocount = 0.1)
clt_edges = learn_chow_liu_tree(data; pseudocount, Float=Float32)
clt = clt_edges2graphs(clt_edges; shape)
if num_cats === nothing
num_cats = maximum(data) + 1
end
hclt_from_clt(clt, num_cats, num_hidden_cats; input_type)
end
function hclt_from_clt(clt, num_cats, num_hidden_cats; input_type = Literal)
num_vars = nv(clt)
# meaning: `joined_leaves[i,j]` is a distribution of the hidden variable `i` having value `j`
# conditioned on the observed variable `i`
joined_leaves = categorical_leaves(num_vars, num_cats, num_hidden_cats, input_type)
# Construct the CLT circuit bottom-up
node_seq = bottom_up_order(clt)
for curr_node in node_seq
out_neighbors = outneighbors(clt, curr_node)
# meaning: `circuits' of leaf CLT nodes refer to a collection of marginal distribution Pr(X);
# `circuits' of an inner CLT node (corr. var Y) is a collection of joint distributions
# over itself and its child vars (corr. var X_1, ..., X_k): Pr(Y)Pr(X_1|Y)...Pr(X_k|Y)
if length(out_neighbors) == 0
# Leaf node
# We do not add hidden variables for leaf nodes
circuits = joined_leaves[curr_node, :]
set_prop!(clt, curr_node, :circuits, circuits)
else
# Inner node
# Each element in `child_circuits' represents the joint distribution of the child nodes,
# i.e., Pr(X_1)...Pr(X_k)
child_circuits = [get_prop(clt, child_node, :circuits) for child_node in out_neighbors]
if length(out_neighbors) > 1
child_circuits = [summate(multiply([child_circuit[cat_idx] for child_circuit in child_circuits])) for cat_idx = 1 : num_hidden_cats]
else
child_circuits = child_circuits[1]
end
# Pr(X_1)...Pr(X_k) -> Pr(Y)Pr(X_1|Y)...Pr(X_k|Y)
circuits = [summate(multiply.(child_circuits, joined_leaves[curr_node, :])) for cat_idx = 1 : num_hidden_cats]
set_prop!(clt, curr_node, :circuits, circuits)
end
end
get_prop(clt, node_seq[end], :circuits)[1] # A ProbCircuit node
end
function clt_edges2graphs(edgepair; shape=:directed)
vars = sort(collect(Set(append!(first.(edgepair), last.(edgepair)))))
@assert all(vars .== collect(1:maximum(vars))) "Variables are not contiguous"
nvar = length(vars)
MStree = SimpleGraph(nvar)
map(edgepair) do edge
add_edge!(MStree, edge[1], edge[2])
end
if shape == :directed
# Use the graph center of `MStree` as the root node
MetaDiGraph(bfs_tree(MStree, center(MStree)[1]))
elseif shape == :balanced
# iteratively pick the graph center to make a balanced clt
clt = SimpleDiGraph(nvar)
function find_center_ite(g, vmap, clt_map, clt)
# `vmap` map current `g` index to upper layer graph id
# `clt_map` map sub graph to `clt`
# return root
if nv(g) == 1
return vmap[1]
else
root = center(g)[1]
for dst in collect(neighbors(g, root))
rem_edge!(g, root, dst)
sub_nodes = filter(x -> dst in x, connected_components(g))
add_edge!(g, root, dst)
sub_g, sub_vmap = induced_subgraph(g, sub_nodes[1])
sub_root = find_center_ite(sub_g, sub_vmap, clt_map[sub_vmap], clt)
add_edge!(clt, clt_map[root], clt_map[sub_root])
end
return vmap[root]
end
end
find_center_ite(MStree, collect(1:nvar), collect(1:nvar), clt)
MetaDiGraph(clt)
else
error("Shape $shape not found in function `clt_edges2graphs`.")
end
end
function categorical_leaves(num_vars, num_cats, num_hidden_cats,
input_type::Type{Bernoulli})
@assert num_cats == 2 "Category must be two when leaf node is bernoulli."
error("TODO: implement way of replacing sum nodes by Berns")
end
function categorical_leaves(num_vars, num_cats, num_hidden_cats,
input_type::Type{Literal})
if num_cats == 2
plits = [PlainInputNode(var, Literal(true)) for var=1:num_vars]
nlits = [PlainInputNode(var, Literal(false)) for var=1:num_vars]
leaves = hcat([plits, nlits]...)
[summate(leaves[var, :])
for var=1:num_vars, copy=1:num_hidden_cats]
else # Use Literal to model categorical distributions
nbits = Int(ceil(log2(num_cats)))
plits = [PlainInputNode((var-1)*nbits+lit, Literal(true))
for var=1:num_vars, lit=1:nbits]
nlits = [PlainInputNode((var-1)*nbits+lit, Literal(false))
for var=1:num_vars, lit=1:nbits]
to_bits(cat, nbits) = begin
bits = zeros(Bool, nbits)
for b = 1 : nbits
bits[nbits-b+1] = ((cat % 2) == 1)
cat = cat ÷ 2
end
bits
end
cat_leaf(var, _) = begin
cat_lits = map(1:num_cats) do cat
bits = to_bits(cat, nbits)
lits = [ifelse(bits[l], plits[var,l], nlits[var,l]) for l=1:nbits]
multiply(lits...)
end
summate(cat_lits...)
end
cat_leaf.(1:num_vars, (1:num_hidden_cats)')
end
end
function categorical_leaves(num_vars, num_cats, num_hidden_cats, input_type::Type{Categorical})
[PlainInputNode(var, Categorical(num_cats))
for var=1:num_vars, copy=1:num_hidden_cats]
end
function categorical_leaves(num_vars, num_cats, num_hidden_cats, input_type::Type{Binomial})
[PlainInputNode(var, Binomial(UInt32(num_cats)))
for var=1:num_vars, copy=1:num_hidden_cats]
end
function bottom_up_order(g::MetaDiGraph)
num_nodes = length(vertices(g))
node_seq = Array{UInt32, 1}(undef, num_nodes)
idx = 1
function dfs(node_idx)
out_neighbors = outneighbors(g, node_idx)
for out_neighbor in out_neighbors
dfs(out_neighbor)
end
node_seq[idx] = node_idx
idx += 1
end
root_node_idx = findall(x->x==0, indegree(g))[1]
dfs(root_node_idx)
@assert idx == num_nodes + 1
node_seq
end | ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 11141 | export RegionGraph,
random_region_graph,
region_graph_2_pc,
RAT_InputFunc,
RAT
import Random: shuffle
import DirectedAcyclicGraphs: Tree
"Root of region graph node hierarchy"
abstract type RegionGraph <: Tree end
######
## Partition
#####
mutable struct Partition
partition::AbstractVector{T} where T <: RegionGraph
Partition(n::AbstractVector{T}) where T <: RegionGraph = begin
new(n)
end
end
import Base: getindex, size, length
@inline Base.size(n::Partition) = size(n.partition)
@inline Base.length(n::Partition) = length(n.partition)
@inline Base.getindex(n::Partition, i::Int) = n.partition[i]
mutable struct RegionGraphInnerNode <: RegionGraph
partitions::AbstractVector{Partition}
variables::BitSet
parent::Union{Nothing, RegionGraphInnerNode}
RegionGraphInnerNode(partitions::AbstractVector{Partition}) = begin
for partition in partitions
for i = 1:length(partition), j = i+1 : length(partition)
@assert isdisjoint(variables(partition[i]), variables(partition[j]))
end
end
scope = variables(partitions[1][1])
for ind = 2:size(partitions[1])[1]
scope = scope ∪ variables(partitions[1][ind])
end
this = new(partitions, scope, nothing)
for partition in partitions
for i = 1:size(partition)[1]
@assert isnothing(partition[i].parent)
partition[i].parent = this
end
end
this
end
end
mutable struct RegionGraphLeafNode <: RegionGraph
variables::BitSet
parent::Union{Nothing, RegionGraphInnerNode}
RegionGraphLeafNode(v) = new(v, nothing)
end
#####################
# Constructors
#####################
RegionGraph(v::AbstractVector) = RegionGraphLeafNode(BitSet(v))
RegionGraph(partitions::AbstractVector{Partition}) = RegionGraphInnerNode(partitions)
#####################
# Traits
#####################
@inline DirectedAcyclicGraphs.NodeType(::Type{<:RegionGraphInnerNode}) = Inner()
@inline DirectedAcyclicGraphs.NodeType(::Type{<:RegionGraphLeafNode}) = Leaf()
###################
## Methods
#################
import DirectedAcyclicGraphs: children
@inline children(n::RegionGraphInnerNode) = n.partitions
@inline variables(n::RegionGraphInnerNode) = n.variables
@inline variables(n::RegionGraphLeafNode) = n.variables
##################################################################
"""
random_region_graph(X::AbstractVector, depth::Int = 5, replicas::Int = 2, num_splits::Int = 2)
- `X`: Vector of all variables to include; for the root region
- `depth`: how many layers to do splits
- `replicas`: number of replicas or paritions (replicas only used for the root region; for other regions only 1 parition (inner nodes), or 0 parition for leaves)
- `num_splits`: number of splits for each parition; split variables into random equaly sized regions
"""
function random_region_graph(X::AbstractVector;
depth::Int = 5, replicas::Int = 2, num_splits::Int = 2)::RegionGraph
if length(X) < 2 || depth == 0
# Cannot/should not split anymore
return RegionGraph(X)
end
partitions = Vector{Partition}()
for repeat = 1 : replicas
cur_rg = split_rg(X, depth; num_splits=num_splits)
push!(partitions, cur_rg)
end
# Validation: Each Partition should include the same set of variables
prev_scope = nothing
for cur_partition in partitions
cur_scope = variables(cur_partition[1])
for i = 2:length(cur_partition)
cur_scope = cur_scope ∪ variables(cur_partition[i])
end
if !isnothing(prev_scope)
@assert prev_scope == cur_scope "All partitions should include the same set of variables."
else
prev_scope = cur_scope
end
end
RegionGraphInnerNode(partitions)
end
function split_rg(variables::AbstractVector, depth::Int; num_splits::Int = 2)::Partition
partition(x, n) = begin
## TODO; might not work if num_splits > 2
sz = ceil(Int, (length(x) / n))
[ x[i:min(i+sz-1, length(x))] for i = 1:sz:length(x) ]
end
shuffle_variables = shuffle(variables)
splits = partition(shuffle_variables, num_splits)
cur_partition_regions = Vector{RegionGraph}()
for split in splits
# only 1 replicas for non-root
rg_node = random_region_graph(split; depth=depth-1, replicas=1, num_splits)
push!(cur_partition_regions, rg_node)
end
Partition(cur_partition_regions)
end
########################################
### Rat-SPNs in Juice PC data structure
########################################
"""
Makes sure the sum nodes does not have too many children. Makes balanced sums of sums to reduce children count.
"""
function balance_sum(children::Vector{ProbCircuit}, balance_childs_parents)::PlainSumNode
if balance_childs_parents
if length(children) <= 4
PlainSumNode(children)
else
ls = 1:floor(Int32, length(children) / 2)
rs = ceil(Int32, length(children) / 2):length(children)
PlainSumNode([balance_sum(children[ls], balance_childs_parents), balance_sum(children[rs], balance_childs_parents)])
end
else
PlainSumNode(children)
end
end
"""
Makes sure input nodes don't have too many parents.
Makes a dummy sum node for each input per partition. Then nodes corresponding to the partition use
the dummy node as their children instead of the input node.
This way instead of num_nodes_root * num_nodes_leaf, we would have num_nodes_root parents nodes.
"""
function balanced_fully_factorized_leaves(variables::AbstractVector; input_func::Function, num_nodes_leaf)::Vector{ProbCircuit}
var_2_dummy_inputs(var) = begin
input_func(var)
end
balanced_recurse(variables::AbstractVector)::Vector{ProbCircuit} = begin
# Leaf
if length(variables) == 1
[PlainSumNode([var_2_dummy_inputs(variables[1])]) for node=1:num_nodes_leaf]
else
mid = length(variables) ÷ 2
lefts = balanced_recurse(variables[1:mid])
rights = balanced_recurse(variables[mid+1:end])
[PlainSumNode([PlainMulNode([left, right])]) for (left, right) in zip(lefts, rights)]
end
end
balanced_recurse(variables)
end
"""
region_graph_2_pc(node::RegionGraph; num_nodes_root, num_nodes_region, num_nodes_leaf, balance_childs_parents)
- `num_nodes_root`: number of sum nodes in the root region
- `num_nodes_leaf`: number of sum nodes per leaf region
- `num_nodes_region`: number of in each region except root and leaves
"""
function region_graph_2_pc(node::RegionGraph; input_func::Function,
num_nodes_root::Int = 4, num_nodes_region::Int = 3, num_nodes_leaf::Int = 2, balance_childs_parents=true)::Vector{ProbCircuit}
sum_nodes = Vector{ProbCircuit}()
if isleaf(node)
vars = Vector(collect(variables(node)))
sum_nodes = balanced_fully_factorized_leaves(vars; num_nodes_leaf, input_func)
else
root_children = Vector{ProbCircuit}()
# Foreach replication; usually only > 1 at root
for partition in node.partitions
partition_mul_nodes = Vector{ProbCircuit}()
@assert length(partition) == 2 "Only supporting partitions of size 2 at the moment"
lefts = region_graph_2_pc(partition[1]; input_func, num_nodes_root, num_nodes_region, num_nodes_leaf, balance_childs_parents)
rights = region_graph_2_pc(partition[2]; input_func, num_nodes_root, num_nodes_region, num_nodes_leaf, balance_childs_parents)
@assert all([issum(l) for l in lefts])
@assert all([issum(r) for r in rights])
for l in lefts, r in rights
mul_node = PlainMulNode([l, r])
push!(partition_mul_nodes, mul_node)
end
dummy_sum_node = balance_sum(partition_mul_nodes, balance_childs_parents)
push!(root_children, dummy_sum_node)
end
# Repeat Sum nodes nodes based on where in Region Graph
if isnothing(node.parent)
# Root region
for i = 1:num_nodes_root
sum_node = balance_sum(root_children, balance_childs_parents)
push!(sum_nodes, sum_node)
end
else
# Inner region
for i = 1:num_nodes_region
sum_node = balance_sum(root_children, balance_childs_parents)
push!(sum_nodes, sum_node)
end
end
end
sum_nodes
end
"""
RAT(num_features; input_func::Function = RAT_InputFunc(Literal), num_nodes_region, num_nodes_leaf, rg_depth, rg_replicas, num_nodes_root = 1, balance_childs_parents = true)
Generate a RAT-SPN structure. First, it generates a random region graph with `depth`, and `replicas`.
Then uses the random region graph to generate a ProbCircuit conforming to that region graph.
- `num_features`: Number of features in the dataset, assuming x_1...x_n
- `input_func`: Function to generate a new input node for variable when calling `input_func(var)`.
The list of hyperparamters are:
- `rg_depth`: how many layers to do splits in the region graph
- `rg_replicas`: number of replicas or paritions (replicas only used for the root region; for other regions only 1 parition (inner nodes), or 0 parition for leaves)
- `num_nodes_root`: number of sum nodes in the root region
- `num_nodes_leaf`: number of sum nodes per leaf region
- `num_nodes_region`: number of in each region except root and leaves
- `num_splits`: number of splits for each parition; split variables into random equaly sized regions
"""
function RAT(num_features; input_func::Function = RAT_InputFunc(Literal), num_nodes_region, num_nodes_leaf, rg_depth, rg_replicas, num_nodes_root = 1, balance_childs_parents = false)
region_graph = random_region_graph([Var(i) for i=1: num_features]; depth=rg_depth, replicas=rg_replicas);
circuit = region_graph_2_pc(region_graph; input_func, num_nodes_root, num_nodes_region, num_nodes_leaf, balance_childs_parents)[1];
init_parameters(circuit; perturbation = 0.4)
return circuit
end
"""
Default `input_func` for different types. This function returns another function `input_func`.
Then `input_func(var)` should generate a new input function with the desired distribution.
"""
function RAT_InputFunc(input_type::Type, args...)
if input_type == Literal
function lit_func(var)
PlainSumNode([
InputNode(var, Literal(true)),
InputNode(var, Literal(false))])
end
return lit_func
elseif input_type == Categorical
function cat_func(var)
InputNode(var, Categorical(args...))
end
return cat_func
elseif input_type == Binomial
function bin_func(var)
InputNode(var, Binomial(args...))
end
return bin_func
else
@assert false "No default `input_func` for Input Type $(input_type)."
end
end | ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 165 | using Aqua
using ProbabilisticCircuits
using Test
@testset "Aqua tests" begin
Aqua.test_all(ProbabilisticCircuits,
ambiguities = false)
end | ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 3140 | using Test, ProbabilisticCircuits, CUDA
using ProbabilisticCircuits: BitsProbCircuit, CuBitsProbCircuit,
update_parameters, SumEdge, BitsInput
using DirectedAcyclicGraphs: left_most_descendent
include("helper/plain_dummy_circuits.jl")
@testset "BitsPC tests" begin
# Indicators
pc = little_3var()
bpc = BitsProbCircuit(pc)
@test bpc isa BitsProbCircuit
@test length(bpc.input_node_ids) == 6
@test length(bpc.nodes) == 11
@test length(bpc.heap) == 0
x,y = bpc.edge_layers_up.vectors[1:2]
newparams = log.([0.99, 0.01])
bpc.edge_layers_up.vectors[1] =
SumEdge(x.parent_id, x.prime_id, x.sub_id, newparams[1], x.tag)
bpc.edge_layers_up.vectors[2] =
SumEdge(y.parent_id, y.prime_id, y.sub_id, newparams[2], y.tag)
update_parameters(bpc)
@test all(params(sumnodes(pc)[1]) .≈ newparams)
CUDA.@allowscalar if CUDA.functional()
cbpc = cu(bpc)
@test length(cbpc.input_node_ids) == 6
@test length(cbpc.nodes) == 11
@test length(cbpc.heap) == 0
x,y = cbpc.edge_layers_up.vectors[1:2]
newparams = log.([0.29, 0.71])
cbpc.edge_layers_up.vectors[1] =
SumEdge(x.parent_id, x.prime_id, x.sub_id, newparams[1], x.tag)
cbpc.edge_layers_up.vectors[2] =
SumEdge(y.parent_id, y.prime_id, y.sub_id, newparams[2], y.tag)
update_parameters(cbpc)
@test all(params(sumnodes(pc)[1]) .≈ newparams)
end
# Bernoullis
pc = little_3var_bernoulli()
bpc = BitsProbCircuit(pc)
@test bpc isa BitsProbCircuit
@test length(bpc.input_node_ids) == 3
@test length(bpc.nodes) == 5
@test length(bpc.heap) == 15
start = dist(bpc.nodes[1]).heap_start
bpc.heap[start:start+1] .= Float32[log(0.88), log(0.12)]
update_parameters(bpc)
@test dist(left_most_descendent(pc)).logps[2] ≈ log(0.12)
CUDA.@allowscalar if CUDA.functional()
cbpc = cu(bpc)
@test length(cbpc.input_node_ids) == 3
@test length(cbpc.nodes) == 5
@test length(cbpc.heap) == 15
start = dist(cbpc.nodes[1]).heap_start
cbpc.heap[start:start+1] .= CuVector(Float32[log(0.78), log(0.22)])
update_parameters(cbpc)
@test dist(left_most_descendent(pc)).logps[2] ≈ log(0.22)
end
# Categoricals
pc = little_3var_categorical(; num_cats = 5)
bpc = BitsProbCircuit(pc)
@test bpc isa BitsProbCircuit
@test length(bpc.input_node_ids) == 3
@test length(bpc.nodes) == 5
@test length(bpc.heap) == (5*2+1)*3
newparams = log.([0.1,0.1,0.1,0.3,0.4])
bpc.heap[1:5] .= newparams
update_parameters(bpc)
@test all(dist(left_most_descendent(pc)).logps .≈ newparams)
CUDA.@allowscalar if CUDA.functional()
cbpc = cu(bpc)
@test length(cbpc.input_node_ids) == 3
@test length(cbpc.nodes) == 5
@test length(cbpc.heap) == (5*2+1)*3
newparams = log.([0.1,0.1,0.3,0.4,0.1])
cbpc.heap[1:5] .= CuVector(newparams)
update_parameters(cbpc)
@test all(dist(left_most_descendent(pc)).logps .≈ newparams)
end
end
| ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 1499 | using Test, ProbabilisticCircuits
using ProbabilisticCircuits: bits, PlainInputNode
@testset "input distributions" begin
n = PlainInputNode(1, Literal(true))
@test issetequal(randvars(n), [1])
n = PlainInputNode(1, Bernoulli(log(0.5)))
@test issetequal(randvars(n), [1])
@test n.dist.logps[2] ≈ log(0.5)
n = PlainInputNode(1, Categorical(4))
@test issetequal(randvars(n), [1])
@test all(n.dist.logps .≈ [log(0.25), log(0.25), log(0.25), log(0.25)])
end
@testset "bit input nodes" begin
heap = Float32[]
bit_lit = nothing
for sign in [true, false]
lit = PlainInputNode(42, Literal(sign))
bit_lit = bits(lit, heap)
@test isbits(bit_lit)
@test loglikelihood(dist(bit_lit), false) ≈ log(!sign)
@test loglikelihood(dist(bit_lit), true) ≈ log(sign)
end
bern = PlainInputNode(42, Bernoulli(log(0.1)))
bit_bern = bits(bern, heap)
@test isbits(bit_bern)
@test loglikelihood(dist(bit_bern), 1, heap) ≈ log(0.1)
@test loglikelihood(dist(bit_bern), 0, heap) ≈ log(1-0.1)
heap = Float32[]
cat = PlainInputNode(42, Categorical(6))
bit_cat = bits(cat, heap)
@test isbits(bit_cat)
@test length(heap) == 2*6+1
for i = 0:6-1
@test loglikelihood(dist(bit_cat), i, heap) ≈ log(1/6)
end
T = Union{typeof(bit_bern), typeof(bit_cat), typeof(bit_lit)}
@test Base.isbitsunion(T)
@test Base.isbitsunion(eltype(T[bit_bern, bit_cat, bit_lit]))
end | ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 1714 | using Test, DirectedAcyclicGraphs, ProbabilisticCircuits
using ProbabilisticCircuits: PlainSumNode, PlainMulNode, PlainProbCircuit, value
import ProbabilisticCircuits as PCs
include("helper/plain_dummy_circuits.jl")
@testset "probabilistic circuit nodes" begin
s1 = little_3var()
m1 = inputs(s1)[1]
# traits
@test s1 isa ProbCircuit
@test s1 isa PlainSumNode
@test m1 isa PlainMulNode
@test issum(s1)
@test ismul(m1)
@test PCs.NodeType(s1) isa PCs.SumNode
@test PCs.NodeType(m1) isa PCs.MulNode
@test length(mulnodes(s1)) == 4
@test length(inputnodes(s1)) == 6
@test length(sumnodes(s1)) == 5
@test num_nodes(s1) == 15
@test num_edges(s1) == 18
s1_copy = PlainProbCircuit(s1)
@test all(isinput, intersect(linearize(s1), linearize(s1_copy)))
@test isinput(left_most_descendent(s1))
@test isinput(right_most_descendent(s1))
@test num_parameters_node(s1, true) == 1
@test num_parameters_node(s1, false) == 2
@test num_parameters(s1) == 5
@test randvar(left_most_descendent(s1)) == randvar(left_most_descendent(s1_copy))
@test randvar(left_most_descendent(s1)) == PCs.Var(3)
@test value(dist(left_most_descendent(s1))) == true
@test value(dist(right_most_descendent(s1))) == false
lt = [PlainInputNode(i,Literal(true)) for i=1:3]
lf = [PlainInputNode(i,Literal(false)) for i=1:3]
r = lt[1] * 0.3 + 0.7 * lf[1]
@test r isa PlainSumNode
@test all(randvar.(inputs(r)) .== PCs.Var(1))
@test all(params(r) .≈ log.([0.3, 0.7]))
@test r * lt[2] isa PlainMulNode
@test num_inputs(lt[1] * lt[2] * lt[3]) == 3
@test num_inputs(lt[1] + lt[2] + lt[3]) == 3
end | ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 99 | # Driver script for all unit tests
using Jive
runtests(@__DIR__, skip=["runtests.jl", "helper"])
| ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 359 | """
Generates all possible binary configurations of size N
"""
function generate_data_all(N::Int)
data_all = transpose(parse.(Bool, split(bitstring(0)[end-N+1:end], "")));
for mask = 1: (1<<N) - 1
data_all = vcat(data_all,
transpose(parse.(Bool, split(bitstring(mask)[end-N+1:end], "")))
);
end
Matrix(data_all)
end | ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 260 | using CUDA: CUDA
function cpu_gpu_agree(f, data; atol=1e-7)
CUDA.functional() && @test f(data) == to_cpu(f(to_gpu(data)))
end
function cpu_gpu_agree_approx(f, data; atol=1e-7)
CUDA.functional() && @test f(data) ≈ to_cpu(f(to_gpu(data))) atol=atol
end | ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 569 | using ProbabilisticCircuits
using DirectedAcyclicGraphs
function test_pc_equals(c1, c2)
@test num_nodes(c1) == num_nodes(c2)
@test num_edges(c1) == num_edges(c2)
for (n1, n2) in zip(linearize(c1), linearize(c2))
if issum(n1)
@test issum(n2)
@test all(params(n1) ≈ params(n2))
elseif ismul(n1)
@test ismul(n2)
else
@test isinput(n1)
@test isinput(n2)
# TODO: might need to fix for non-literal dists
@test dist(n1) ≈ dist(n2)
end
end
end | ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 2575 | using ProbabilisticCircuits
using ProbabilisticCircuits: PlainInputNode
function little_2var()
pos = PlainInputNode(1, Literal(true))
neg = PlainInputNode(1, Literal(false))
sum1 = pos + neg
sum2 = pos + neg
pos = PlainInputNode(2, Literal(true))
neg = PlainInputNode(2, Literal(false))
mul1 = pos * sum1
mul2 = neg * sum2
mul1 + mul2
end
function little_3var()
sum1 = little_2var()
pos = PlainInputNode(3, Literal(true))
neg = PlainInputNode(3, Literal(false))
sum2 = summate(inputs(sum1))
mul1 = pos * sum1
mul2 = neg * sum2
mul1 + mul2
end
function little_3var_bernoulli(firstvar=1; p = 0.5)
n1 = PlainInputNode(firstvar, Bernoulli(log(p)))
n2 = PlainInputNode(firstvar+1, Bernoulli(log(p)))
n3 = PlainInputNode(firstvar+2, Bernoulli(log(p)))
summate(multiply(n1, n2, n3))
end
function little_3var_categorical(firstvar=1; num_cats = 3)
n1 = PlainInputNode(firstvar, Categorical(num_cats))
n2 = PlainInputNode(firstvar+1, Categorical(num_cats))
n3 = PlainInputNode(firstvar+2, Categorical(num_cats))
summate(multiply(n1, n2, n3))
end
function little_3var_binomial(firstvar=1; n = 10)
n1 = PlainInputNode(firstvar, Binomial(n, 0.1))
n2 = PlainInputNode(firstvar+1, Binomial(n, 0.5))
n3 = PlainInputNode(firstvar+2, Binomial(n, 0.9))
summate(multiply(n1, n2, n3))
end
function little_4var()
circuit = IOBuffer(b"""psdd 19
L 0 0 1
L 2 1 2
L 4 2 3
L 6 3 4
L 1 0 -1
L 3 1 -2
L 5 2 -3
L 7 3 -4
D 9 5 4 4 6 -1.6094379124341003 4 7 -1.2039728043259361 5 6 -0.916290731874155 5 7 -2.3025850929940455
D 8 4 4 0 2 -2.3025850929940455 0 3 -2.3025850929940455 1 2 -2.3025850929940455 1 3 -0.35667494393873245
D 10 6 1 8 9 0.0
""")
prob_circuit = read(circuit, ProbCircuit, ProbabilisticCircuits.PsddFormat());
end
function little_2var_indicator(firstvar=0)
v1 = PlainInputNode(firstvar, Indicator(0))
v2 = PlainInputNode(firstvar, Indicator(1))
v3 = PlainInputNode(firstvar, Indicator(2))
sum1 = v1 + v2 + v3
sum2 = v1 + v2 + v3
sum3 = v1 + v2 + v3
v1 = PlainInputNode(firstvar+1, Indicator(0))
v2 = PlainInputNode(firstvar+1, Indicator(1))
v3 = PlainInputNode(firstvar+1, Indicator(2))
mul1 = v1 * sum1
mul2 = v2 * sum2
mul3 = v3 * sum3
mul1 + mul2 + mul3
end
function little_hybrid_circuit()
x = little_3var()
y = little_2var_indicator(4)
z1 = little_3var_bernoulli(7)
z2 = little_3var_categorical(7)
(x * y * z1) + (x * y * z2)
end | ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 2482 | using Test, ProbabilisticCircuits
using ProbabilisticCircuits: JpcFormat
include("../helper/plain_dummy_circuits.jl")
include("../helper/pc_equals.jl")
@testset "Jpc IO tests Literal" begin
# Indicators
pc = little_3var()
mktempdir() do tmp
file = "$tmp/example.jpc"
write(file, pc)
pc2 = read(file, ProbCircuit)
test_pc_equals(pc, pc2)
pc2 = read(file, ProbCircuit, JpcFormat(), true)
test_pc_equals(pc, pc2)
pc2 = read(file, ProbCircuit, JpcFormat(), false)
test_pc_equals(pc, pc2)
file = "$tmp/example.jpc.gz"
write(file, pc)
pc2 = read(file, ProbCircuit)
test_pc_equals(pc, pc2)
end
end
@testset "Jpc IO tests categorical" begin
pc = little_3var_categorical()
mktempdir() do tmp
file = "$tmp/example.jpc"
write(file, pc)
pc2 = read(file, ProbCircuit)
test_pc_equals(pc, pc2)
pc2 = read(file, ProbCircuit, JpcFormat(), true)
test_pc_equals(pc, pc2)
pc2 = read(file, ProbCircuit, JpcFormat(), false)
test_pc_equals(pc, pc2)
file = "$tmp/example.jpc.gz"
write(file, pc)
pc2 = read(file, ProbCircuit)
test_pc_equals(pc, pc2)
end
end
@testset "JPC IO tests Binomial" begin
pc = little_3var_binomial()
mktempdir() do tmp
file = "$tmp/example_binomial.jpc"
write(file, pc)
pc2 = read(file, ProbCircuit)
test_pc_equals(pc, pc2)
pc2 = read(file, ProbCircuit, JpcFormat(), true)
test_pc_equals(pc, pc2)
pc2 = read(file, ProbCircuit, JpcFormat(), false)
test_pc_equals(pc, pc2)
# Compressed
file = "$tmp/example_binomial.jpc.gz"
write(file, pc)
pc2 = read(file, ProbCircuit)
test_pc_equals(pc, pc2)
end
end
@testset "Jpc IO tests hybrid" begin
pc = little_hybrid_circuit()
mktempdir() do tmp
file = "$tmp/example.jpc"
write(file, pc)
pc2 = read(file, ProbCircuit)
test_pc_equals(pc, pc2)
pc2 = read(file, ProbCircuit, JpcFormat(), true)
test_pc_equals(pc, pc2)
pc2 = read(file, ProbCircuit, JpcFormat(), false)
test_pc_equals(pc, pc2)
file = "$tmp/example.jpc.gz"
write(file, pc)
pc2 = read(file, ProbCircuit)
test_pc_equals(pc, pc2)
end
end
| ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 625 | using Test, ProbabilisticCircuits
# using TikzPictures
include("../helper/plain_dummy_circuits.jl")
@testset "PC plotting" begin
mktempdir() do tmp
# Note: omitting rendering tests to speed up CI
pc = little_3var()
p = @test_nowarn plot(pc)
# @test_nowarn save(SVG("$tmp/example1.svg"), p)
pc = little_3var_categorical()
p = @test_nowarn plot(pc)
# @test_nowarn save(SVG("$tmp/example2.svg"), p)
pc = little_hybrid_circuit()
p = @test_nowarn plot(pc)
# @test_nowarn save(SVG("$tmp/example3.svg"), p)
end
end
| ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 666 | using Test, ProbabilisticCircuits
using ProbabilisticCircuits: PsddFormat
include("../helper/plain_dummy_circuits.jl")
include("../helper/pc_equals.jl")
@testset "Psdd IO tests" begin
# Indicators
pc = little_3var()
mktempdir() do tmp
file = "$tmp/example.psdd"
write(file, pc)
# note: number of nodes can changes because of "true" leafs
pc2 = read(file, ProbCircuit)
write(file, pc2)
pc3 = read(file, ProbCircuit)
test_pc_equals(pc2, pc3)
file = "$tmp/example.psdd.gz"
write(file, pc2)
pc3 = read(file, ProbCircuit)
test_pc_equals(pc2, pc3)
end
end
| ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 529 | using Test, ProbabilisticCircuits
using ProbabilisticCircuits: PsddFormat
include("../helper/plain_dummy_circuits.jl")
include("../helper/pc_equals.jl")
@testset "Spn IO tests" begin
# Indicators
pc = little_3var()
mktempdir() do tmp
file = "$tmp/example.spn"
write(file, pc)
pc2 = read(file, ProbCircuit)
test_pc_equals(pc, pc2)
file = "$tmp/example.spn.gz"
write(file, pc)
pc2 = read(file, ProbCircuit)
test_pc_equals(pc, pc2)
end
end
| ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 3519 | using Test, DirectedAcyclicGraphs, ProbabilisticCircuits
using ProbabilisticCircuits: PlainSumNode, PlainMulNode, PlainProbCircuit
using CUDA
import ProbabilisticCircuits as PCs
include("../helper/plain_dummy_circuits.jl")
@testset "init params" begin
pc = little_3var()
@test_nowarn init_parameters(pc; perturbation = 0.2)
pc = little_3var_bernoulli()
@test_nowarn init_parameters(pc; perturbation = 0.2)
pc = little_3var_categorical()
@test_nowarn init_parameters(pc; perturbation = 0.2)
end
@testset "mini-batch em" begin
if CUDA.functional()
# Literal
pc = little_3var()
bpc = PCs.CuBitsProbCircuit(pc)
data = cu([true true false; false true false; false false false])
lls = mini_batch_em(bpc, data, 2; batch_size = 3, pseudocount = 0.1, param_inertia = 0.2, verbose = false)
@test lls[2] > lls[1]
# Bernoulli
pc = little_3var_bernoulli()
bpc = PCs.CuBitsProbCircuit(pc)
data = cu([true true false; false true false; false false false])
lls = mini_batch_em(bpc, data, 2; batch_size = 3, pseudocount = 0.1, param_inertia = 0.2, verbose = false)
@test lls[2] > lls[1]
# Categorical
pc = little_3var_categorical(; num_cats = UInt32(5))
bpc = PCs.CuBitsProbCircuit(pc)
data = cu(UInt32.([2 3 4; 5 1 2; 3 4 5]))
lls = mini_batch_em(bpc, data, 2; batch_size = 3, pseudocount = 0.1, param_inertia = 0.2, verbose = false)
@test lls[2] > lls[1]
end
end
@testset "full-batch em" begin
if CUDA.functional()
# Literal
pc = little_3var()
bpc = PCs.CuBitsProbCircuit(pc)
data = cu([true true false; false true false; false false false])
lls = full_batch_em(bpc, data, 2; batch_size = 32, pseudocount = 0.1, verbose = false)
@test lls[2] > lls[1]
# Bernoulli
pc = little_3var_bernoulli()
bpc = PCs.CuBitsProbCircuit(pc)
data = cu([true true false; false true false; false false false])
lls = full_batch_em(bpc, data, 2; batch_size = 32, pseudocount = 0.1, verbose = false)
@test lls[2] > lls[1]
# Categorical
pc = little_3var_categorical(; num_cats = UInt32(5))
bpc = PCs.CuBitsProbCircuit(pc)
data = cu(UInt32.([2 3 4; 5 1 2; 3 4 5]))
lls = full_batch_em(bpc, data, 2; batch_size = 32, pseudocount = 0.1, verbose = false)
@test lls[2] > lls[1]
end
end
@testset "em with missing" begin
if CUDA.functional()
# Literal
pc = little_3var()
bpc = PCs.CuBitsProbCircuit(pc)
data = cu([true true missing; false missing false; false false false])
lls = full_batch_em(bpc, data, 2; batch_size = 32, pseudocount = 0.1, verbose = false)
@test lls[2] > lls[1]
# Bernoulli
pc = little_3var_bernoulli()
bpc = PCs.CuBitsProbCircuit(pc)
data = cu([true missing false; missing true false; false false false])
lls = full_batch_em(bpc, data, 2; batch_size = 32, pseudocount = 0.1, verbose = false)
@test lls[2] > lls[1]
# Categorical
pc = little_3var_categorical(; num_cats = UInt32(5))
bpc = PCs.CuBitsProbCircuit(pc)
data = cu([missing 3 4; 5 1 missing; 3 4 5])
lls = full_batch_em(bpc, data, 2; batch_size = 32, pseudocount = 0.1, verbose = false)
@test lls[2] > lls[1]
end
end | ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 5344 | using Test, DirectedAcyclicGraphs, ProbabilisticCircuits
using ProbabilisticCircuits: PlainSumNode, PlainMulNode, PlainProbCircuit
using CUDA
import ProbabilisticCircuits as PCs
include("../helper/plain_dummy_circuits.jl")
@testset "flow" begin
if CUDA.functional()
# Literal
pc = little_3var()
bpc = PCs.CuBitsProbCircuit(pc)
data = cu([true true false; false true false; false false false])
num_nodes = length(bpc.nodes)
num_edges = length(bpc.edge_layers_down.vectors)
mars = PCs.prep_memory(nothing, (3, num_nodes), (false, true))
flows = PCs.prep_memory(nothing, (3, num_nodes), (false, true))
edge_aggr = PCs.prep_memory(nothing, (num_edges,))
edge_aggr .= 0
example_ids = cu(Int32.([1, 2, 3]))
PCs.probs_flows_circuit(flows, mars, edge_aggr, bpc, data, example_ids; mine = 2, maxe = 32)
edge_aggr_cpu = Array(edge_aggr)
@test edge_aggr_cpu[2] ≈ Float32(3.0)
@test edge_aggr_cpu[4] ≈ Float32(2.0)
@test edge_aggr_cpu[6] ≈ Float32(1.0)
# Bernoulli
pc = little_3var_bernoulli()
bpc = PCs.CuBitsProbCircuit(pc)
data = cu([true true false; false true false; false false false])
num_nodes = length(bpc.nodes)
num_edges = length(bpc.edge_layers_down.vectors)
mars = PCs.prep_memory(nothing, (3, num_nodes), (false, true))
flows = PCs.prep_memory(nothing, (3, num_nodes), (false, true))
edge_aggr = PCs.prep_memory(nothing, (num_edges,))
example_ids = cu(Int32.([1, 2, 3]))
PCs.probs_flows_circuit(flows, mars, edge_aggr, bpc, data, example_ids; mine = 2, maxe = 32)
heap_cpu = Array(bpc.heap)
@test all(heap_cpu .≈ Float32[log1p(-exp(-0.6931471805599453)), -0.6931471805599453, 2.0, 1.0, 0.0, log1p(-exp(-0.6931471805599453)), -0.6931471805599453, 1.0, 2.0, 0.0, log1p(-exp(-0.6931471805599453)), -0.6931471805599453, 3.0, 0.0, 0.0])
# Categorical
pc = little_3var_categorical(; num_cats = UInt32(5))
bpc = PCs.CuBitsProbCircuit(pc)
data = cu(UInt32.([1 2 3; 4 0 1; 2 3 4]))
num_nodes = length(bpc.nodes)
num_edges = length(bpc.edge_layers_down.vectors)
mars = PCs.prep_memory(nothing, (3, num_nodes), (false, true))
flows = PCs.prep_memory(nothing, (3, num_nodes), (false, true))
edge_aggr = PCs.prep_memory(nothing, (num_edges,))
example_ids = cu(Int32.([1, 2, 3]))
PCs.probs_flows_circuit(flows, mars, edge_aggr, bpc, data, example_ids; mine = 2, maxe = 32)
heap_cpu = Array(bpc.heap)
nodes = Array(bpc.nodes)
node1_idx = dist(nodes[1]).heap_start
@test all(heap_cpu[node1_idx+5:node1_idx+9] .≈ Float32[0.0, 1.0, 1.0, 0.0, 1.0])
node2_idx = dist(nodes[2]).heap_start
@test all(heap_cpu[node2_idx+5:node2_idx+9] .≈ Float32[1.0, 0.0, 1.0, 1.0, 0.0])
node3_idx = dist(nodes[3]).heap_start
@test all(heap_cpu[node3_idx+5:node3_idx+9] .≈ Float32[0.0, 1.0, 0.0, 1.0, 1.0])
end
end
@testset "flow + missing values" begin
if CUDA.functional()
# Bernoulli
pc = little_3var_bernoulli()
bpc = PCs.CuBitsProbCircuit(pc)
data = cu([true true missing; false missing false; false false false])
num_nodes = length(bpc.nodes)
num_edges = length(bpc.edge_layers_down.vectors)
mars = PCs.prep_memory(nothing, (3, num_nodes), (false, true))
flows = PCs.prep_memory(nothing, (3, num_nodes), (false, true))
edge_aggr = PCs.prep_memory(nothing, (num_edges,))
example_ids = cu(Int32.([1, 2, 3]))
PCs.probs_flows_circuit(flows, mars, edge_aggr, bpc, data, example_ids; mine = 2, maxe = 32)
heap_cpu = Array(bpc.heap)
@test all(heap_cpu .≈ Float32[log1p(-exp(-0.6931471805599453)), -0.6931471805599453, 2.0, 1.0, 0.0, log1p(-exp(-0.6931471805599453)), -0.6931471805599453, 1.0, 1.0, 1.0, log1p(-exp(-0.6931471805599453)), -0.6931471805599453, 2.0, 0.0, 1.0])
# Categorical
pc = little_3var_categorical(; num_cats = UInt32(5))
bpc = PCs.CuBitsProbCircuit(pc)
data = cu([1 2 missing; 4 missing 1; missing missing 4])
num_nodes = length(bpc.nodes)
num_edges = length(bpc.edge_layers_down.vectors)
mars = PCs.prep_memory(nothing, (3, num_nodes), (false, true))
flows = PCs.prep_memory(nothing, (3, num_nodes), (false, true))
edge_aggr = PCs.prep_memory(nothing, (num_edges,))
example_ids = cu(Int32.([1, 2, 3]))
PCs.probs_flows_circuit(flows, mars, edge_aggr, bpc, data, example_ids; mine = 2, maxe = 32)
heap_cpu = Array(bpc.heap)
nodes = Array(bpc.nodes)
node1_idx = dist(nodes[1]).heap_start
@test all(heap_cpu[node1_idx+5:node1_idx+9] .≈ Float32[0.0, 1.0, 0.0, 0.0, 1.0])
@test heap_cpu[node1_idx+10] ≈ 1.0
node2_idx = dist(nodes[2]).heap_start
@test all(heap_cpu[node2_idx+5:node2_idx+9] .≈ Float32[0.0, 0.0, 1.0, 0.0, 0.0])
@test heap_cpu[node2_idx+10] ≈ 2.0
node3_idx = dist(nodes[3]).heap_start
@test all(heap_cpu[node3_idx+5:node3_idx+9] .≈ Float32[0.0, 1.0, 0.0, 0.0, 1.0])
@test heap_cpu[node3_idx+10] ≈ 1.0
end
end | ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 2300 | using Test, DirectedAcyclicGraphs, ProbabilisticCircuits, CUDA
using ProbabilisticCircuits: CuBitsProbCircuit
include("../helper/plain_dummy_circuits.jl")
include("../helper/data.jl")
@testset "likelihood" begin
EPS = 1e-6
little4var = little_4var();
@test little4var isa ProbCircuit
# Step 1.
data = Matrix{Bool}([0 0 0 0; 0 1 1 0; 0 0 1 1])
true_probs = [0.07; 0.03; 0.13999999999999999]
# Bigger Batch size
probs = exp.(loglikelihoods(little4var, data; batch_size = 32))
@test true_probs ≈ probs atol=EPS
# Smaller Batch size
lls = exp.(loglikelihoods(little4var, data; batch_size = 2))
@test true_probs ≈ probs atol=EPS
# Step 2. Add up all probabilities
@test num_randvars(little4var) == 4
data_all = generate_data_all(num_randvars(little4var))
lls_all = loglikelihoods(little4var, data_all; batch_size=16)
probs_all = exp.(lls_all)
@test 1.00 ≈ sum(probs_all) atol=EPS
if CUDA.functional()
little4var_bpc = CuBitsProbCircuit(little4var)
data_all_gpu = cu(data_all)
lls_all_gpu = loglikelihoods(little4var_bpc, data_all_gpu; batch_size=16)
@test Array(lls_all_gpu) ≈ lls_all atol=EPS
end
# GPU Tests Part 2
if CUDA.functional()
pc = little_3var()
bpc = CuBitsProbCircuit(pc)
data = cu([true true false; false true false; false false false])
lls = Array(loglikelihoods(bpc, data; batch_size = 32))
avg_ll = loglikelihood(bpc, data; batch_size = 32)
@test lls[1] ≈ log(Float32(0.125))
@test lls[2] ≈ log(Float32(0.125))
@test lls[3] ≈ log(Float32(0.125))
@test avg_ll ≈ log(Float32(0.125))
pc = little_3var_bernoulli(; p = Float32(0.6))
bpc = CuBitsProbCircuit(pc)
lls = Array(loglikelihoods(bpc, data; batch_size = 32))
@test lls[1] ≈ log(Float32(0.6 * 0.6 * 0.4))
@test lls[2] ≈ log(Float32(0.4 * 0.6 * 0.4))
@test lls[3] ≈ log(Float32(0.4 * 0.4 * 0.4))
data = cu(UInt32.([2 3 4; 5 1 2; 3 4 5]))
pc = little_3var_categorical(; num_cats = UInt32(5))
bpc = CuBitsProbCircuit(pc)
lls = Array(loglikelihoods(bpc, data; batch_size = 32))
@test lls[1] ≈ log(Float32(0.2^3))
end
end | ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 4221 | using Test
using ProbabilisticCircuits
using ProbabilisticCircuits: CuBitsProbCircuit
using CUDA
import Distributions
include("../helper/data.jl")
include("../helper/plain_dummy_circuits.jl")
@testset "MAP regression test" begin
a,b = [ProbabilisticCircuits.PlainInputNode(i, Indicator(true)) for i=1:2]
a_, b_ = [ProbabilisticCircuits.PlainInputNode(i, Indicator(false)) for i=1:2]
circuit = 0.6 * (a * (.5 * b + .5 * b_)) + .4 * (a_ * (0.9 * b + .1 * b_))
no_data = Matrix{Union{Bool,Missing}}([missing missing])
maps, mappr = MAP(circuit, no_data; batch_size = 1, return_map_prob=true)
@test mappr[1] ≈ log(0.4 * 0.9)
@test maps[1,1] == false && maps[1,2] == true
complete_states = Matrix([true true; true false; false true; false false])
mar = loglikelihoods(circuit, complete_states; batch_size = 3)
@test all(mappr .> mar .- 1e-6)
if CUDA.functional()
bpc = CuBitsProbCircuit(circuit)
no_data_gpu = cu(no_data)
maps_gpu = MAP(bpc, no_data_gpu; batch_size=1)
@test Matrix(maps_gpu) == maps
end
end
@testset "MAP" begin
prob_circuit = little_4var()
# A. Full Data
data_full = generate_data_all(num_randvars(prob_circuit))
if CUDA.functional()
bpc = CuBitsProbCircuit(prob_circuit)
data_full_gpu = cu(data_full)
end
map, mappr = MAP(prob_circuit, data_full; batch_size = 1, return_map_prob=true)
@test map == data_full
evipr = loglikelihoods(prob_circuit, data_full; batch_size = 16)
@test mappr ≈ evipr atol=1e-6
if CUDA.functional()
maps_gpu = MAP(bpc, data_full_gpu; batch_size=1)
@test Matrix(maps_gpu) == map
end
# B. Partial Data; test if non-missing MAP values are same as data (as they should be)
data_marg = Matrix([false false false false;
false true true false;
false false true true;
false false false missing;
missing true false missing;
missing missing missing missing;
false missing missing missing])
map, mappr = MAP(prob_circuit, data_marg; batch_size = 1, return_map_prob=true)
@test all(ismissing.(data_marg) .| (data_marg .== map))
mar = loglikelihoods(prob_circuit, data_marg; batch_size = 16)
@test all(mar .> mappr .- 1e-6)
if CUDA.functional()
data_marg_gpu = cu(data_marg)
# bigger batch size
maps_gpu = MAP(bpc, data_marg_gpu; batch_size=16)
@test Matrix(maps_gpu) == map
# smaller batch size
maps_gpu = MAP(bpc, data_marg_gpu; batch_size=2)
@test Matrix(maps_gpu) == map
end
# C. Check specific MAP queries with known result
data_c = Matrix([false false false missing])
true_map = Matrix([false false false true])
map, mappr = MAP(prob_circuit, data_c; batch_size = 1, return_map_prob=true)
@test map == true_map
@test mappr[1] ≈ -1.2729657
if CUDA.functional()
data_c_gpu = cu(data_c)
maps_gpu = MAP(bpc, data_c_gpu; batch_size=1)
@test Matrix(maps_gpu) == true_map
end
# D. TODO. Add tests with different input types for map
# Generate all possible missing patches and compute map on cpu vs gpu
end
@testset "Binomial MAP Test" begin
EPS = 1e-6
EPS2 = 1e-3
# p = 0.0
pc = InputNode(1, Binomial(5, 0.0));
data = Matrix(transpose([missing;; UInt32(3)]))
true_map = [UInt32(0), UInt32(3)]
our_map = MAP(pc, data; batch_size=2);
@test all( true_map .== our_map )
# p = 1.0
pc = InputNode(1, Binomial(5, 1.0));
true_map = [UInt32(5), UInt32(3)];
our_map = MAP(pc, data; batch_size=2);
@test all( true_map .== our_map )
# p = 0.4
N = 10
p = 0.4
pc = InputNode(1, Binomial(N, p));
true_map = [UInt32(4), UInt32(3)];
our_map = MAP(pc, data; batch_size=2);
@test all( true_map .== our_map )
if CUDA.functional()
pc2 = summate([pc])
bpc = CuBitsProbCircuit(pc2)
cu_data = cu(data)
our_map = Array(MAP(bpc, cu_data; batch_size=2))
@test all( true_map .== our_map )
end
end
| ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 4533 | using Test
using ProbabilisticCircuits
using ProbabilisticCircuits: CuBitsProbCircuit
using Random: MersenneTwister
using CUDA
import Distributions
include("../helper/data.jl")
include("../helper/plain_dummy_circuits.jl")
function histogram_matches_likelihood(samples::Matrix{Bool}, worlds, loglikelihoods; EPS=1e-2)
hist = Dict{BitVector,Int}()
for i = 1:size(samples,1)
sample = BitVector(samples[i,:])
hist[sample] = get(hist, sample, 0) + 1
end
for i = 1:size(worlds,1)
exact_prob = exp(loglikelihoods[i])
ex = BitVector(worlds[i,:])
estim_prob = get(hist, ex, 0) / size(samples,1)
@test exact_prob ≈ estim_prob atol=EPS;
end
end
@testset "Unconditional Sampling Test" begin
rng = MersenneTwister(42)
pc = little_4var();
worlds = generate_data_all(num_randvars(pc));
lls = loglikelihoods(pc, worlds; batch_size=32)
Nsamples = 50_000
samples = Array{Bool}(sample(pc, Nsamples, [Bool]; rng)[:,1,:])
histogram_matches_likelihood(samples, worlds, lls)
if CUDA.functional()
bpc = CuBitsProbCircuit(pc)
samples = sample(bpc, Nsamples, num_randvars(pc), [Bool]; rng)
samples_cpu = Array{Bool}(samples[:,1,:]) # to_cpu
histogram_matches_likelihood(samples_cpu, worlds, lls)
end
end
@testset "Conditional Sampling Test" begin
rng = MersenneTwister(42)
num_samples = 10
pc = little_4var();
data_all = generate_data_all(num_randvars(pc));
if CUDA.functional()
bpc = CuBitsProbCircuit(pc)
data_all_gpu = cu(data_all)
end
# sampling given complete data should return same data with its log likelihood
lls = loglikelihoods(pc, data_all; batch_size=16)
sample_states = sample(pc, num_samples, data_all; batch_size=16, rng)
for i in 1:num_samples
@test sample_states[i,:,:] == data_all
end
if CUDA.functional()
samples_gpu = sample(bpc, num_samples, data_all_gpu; rng)
@test Array(samples_gpu) == sample_states
end
# sampling given partial data invariants
data_marg = Matrix([false false false false;
false true true false;
false false true true;
false false false missing;
missing true false missing;
missing missing missing missing;
false missing missing missing])
if CUDA.functional()
data_marg_gpu = cu(data_marg)
end
# Test that samples keep the partial evidence values intact
function test_sample_match_evidence(samples_states, data_marg)
for i in 1:num_samples
pairs = collect(zip(sample_states[i,:,:], data_marg))
@test all(pairs) do (f,m)
ismissing(m) || f == m
end
end
end
# CPU
sample_states = sample(pc, num_samples, data_marg; batch_size=8, rng)
test_sample_match_evidence(sample_states, data_marg)
if CUDA.functional()
samples_gpu = sample(bpc, num_samples, data_marg_gpu; rng)
samples_cpu = Array(samples_gpu)
test_sample_match_evidence(samples_cpu, data_marg)
end
# TODO
# Add similar test `histogram_matches_likelihood` with conditional likelihoods
# Add sampleing for different input types
end
@testset "Binomial Sample Test" begin
EPS = 1e-6
EPS2 = 1e-3
pc = InputNode(1, Binomial(5, 0.0));
sms = sample(pc, 100, [UInt32])[:, 1, 1];
@test all( sms .== zero(UInt32) )
pc = InputNode(1, Binomial(5, 1.0));
sms = sample(pc, 100, [UInt32])[:, 1, 1];
@test all( sms .== UInt32(5) )
N = 10
p = Float32(0.4)
num_samples = 10*1000 * 1000
pc = InputNode(1, Binomial(N, p));
sms = sample(pc, num_samples, [UInt32])[:, 1, 1];
bp = Distributions.Binomial(N, p)
true_prob = [Distributions.pdf(bp, i) for i=0:N]
@test sum(true_prob) ≈ 1.0 atol=EPS;
our_prob = exp.([loglikelihood(pc.dist, i) for i = 0: N])
@test sum(our_prob) ≈ 1.0 atol=EPS;
@test our_prob ≈ true_prob atol=EPS
p_samples = [ sum(sms .== i) for i = 0: N] ./ num_samples
@test p_samples ≈ true_prob atol=EPS2
if CUDA.functional()
pc2 = summate([pc])
bpc = CuBitsProbCircuit(pc2)
# CUDA Samples
sms = Array(sample(bpc, num_samples,1, [UInt32])[:, 1, 1]);
p_samples = [ sum(sms .== i) for i = 0: N] ./ num_samples
@test p_samples ≈ true_prob atol=EPS2
end
end
| ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | code | 1896 | using Test, DirectedAcyclicGraphs, ProbabilisticCircuits
using ProbabilisticCircuits: PlainSumNode, PlainMulNode, PlainProbCircuit
using CUDA
using Graphs
import ProbabilisticCircuits as PCs
@testset "hclt" begin
num_vars = 3
num_cats = 2
data = [true true false; false true false; false false false]
if CUDA.functional()
data = cu(data)
end
pc = hclt(data, 4; input_type = Literal)
_, layer = feedforward_layers(pc)
@test pc isa ProbCircuit
@test layer == 6
@test num_inputs(pc) == 4
@test randvar(pc.inputs[1].inputs[2].inputs[1]) == UInt32(1)
@test dist(pc.inputs[1].inputs[2].inputs[1]).value == true
@test randvar(pc.inputs[1].inputs[2].inputs[2]) == UInt32(1)
@test dist(pc.inputs[1].inputs[2].inputs[2]).value == false
pc = hclt(data, 4; shape=:balanced, input_type = Literal)
_, layer = feedforward_layers(pc)
@test layer == 6
# TODO FIX
# pc = hclt(data, 4; input_type = Bernoulli)
# @test randvar(pc.inputs[1].inputs[2].inputs[1]) == UInt32(1)
# @test pc.inputs[1].inputs[2].inputs[1].dist.logp ≈ log(0.9)
# @test randvar(pc.inputs[1].inputs[2].inputs[2]) == UInt32(1)
# @test pc.inputs[1].inputs[2].inputs[2].dist.logp ≈ log(0.1)
pc = hclt(data, 4; input_type = Categorical)
@test randvar(pc.inputs[1].inputs[2]) == UInt32(1)
@test pc.inputs[1].inputs[2].dist.logps[2] ≈ log(0.5)
end
@testset "Balanced HCLT test" begin
edgespair = [(1,2),(2,3),(3,4)]
clt = PCs.clt_edges2graphs(edgespair; shape=:balanced)
for edge in [(2,3),(2,1),(3,4)]
@test Graphs.has_edge(clt, edge...)
end
edgespair = [(4,2),(2,5),(5,1),(1,6),(6,3),(3,7)]
clt = PCs.clt_edges2graphs(edgespair; shape=:balanced)
for edge in [(1,2),(1,3),(2,4),(2,5),(3,6),(3,7)]
@test Graphs.has_edge(clt, edge...)
end
end | ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | docs | 1357 | <img align="right" width="180px" src="https://avatars.githubusercontent.com/u/58918144?s=200&v=4">
<!-- DO NOT EDIT README.md directly, instead edit docs/README.jl and generate the markdown-->
# Probabilistic<wbr>Circuits<wbr>.jl
[](https://github.com/Tractables/ProbabilisticCircuits.jl/actions?query=workflow%3A%22Unit+Tests%22+branch%3Amaster) [](https://codecov.io/gh/Tractables/ProbabilisticCircuits.jl) [](https://Tractables.github.io/ProbabilisticCircuits.jl/stable) [](https://Tractables.github.io/ProbabilisticCircuits.jl/dev)
This package provides functionalities for learning/constructing probabilistic circuits and using them to compute various probabilistic queries. It is part of the [Juice package](https://github.com/Tractables) (Julia Circuit Empanada).
## Testing
To make sure everything is working correctly, you can run our test suite as follows. The first time you run the tests will trigger a few slow downloads of various test resources.
```bash
julia --color=yes -e 'using Pkg; Pkg.test("ProbabilisticCircuits")'
```
| ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | docs | 4193 | # ProbabilisticCircuits.jl for Developers
Follow these instructions to install and use ProbabilisticCircuits.jl as a developer of the package.
## Installation
Install the Julia package in development mode by running
julia -e 'using Pkg; Pkg.develop(PackageSpec(url="https://github.com/Tractables/ProbabilisticCircuits.jl.git"))'
By default this will install the package at `~/.julia/dev` and allow you to change the code there. See the [Pkg manual](https://julialang.github.io/Pkg.jl/v1/managing-packages/#Developing-packages-1) for more details. One can adjust the development directory using environment variables or simply create a symbolic link to/from your favorite development directory.
Depending on your usecase you might also want to have `LogicCircuits.jl` in develop mode, in that case run the following to do both:
julia -e 'using Pkg; Pkg.develop([PackageSpec(url="https://github.com/Tractables/LogicCircuits.jl.git"),PackageSpec(url="https://github.com/Tractables/ProbabilisticCircuits.jl.git")])'
## Testing
### Prerequisite
Set the following environment variable, to automatically download data artifacts needed during tests without user input. Otherwise the tests would fail if the artifact is not already downloaded.
export DATADEPS_ALWAYS_ACCEPT=1
Additionally, if you want the tests to run faster, you can use more cores by setting the following variable. The default value is 1.
export JIVE_PROCS=8
### Running the tests
Make sure to run the tests before commiting new code.
To run all the tests:
JIVE_PROCS=8 julia --project=test --color=yes test/runtests.jl
You can also run a specific test:
julia --project=test --color=yes test/parameters_tests.jl
## Releasing New Versions
Only do this for when the repo is in stable position, and we have decent amount of changes from previous version.
1. As much as possible, make sure to first release a new version for `LogicCircuits.jl`.
2. Bump up the version in `Project.toml`
3. Use [Julia Registrator](https://github.com/JuliaRegistries/Registrator.jl) to submit a pull request to julia's public registry.
- The web interface seems to be the easiest. Follow the instructions in the generated pull request and make sure there is no errors. For example [this pull request](https://github.com/JuliaRegistries/General/pull/15350).
3. Github Release. TagBot is enabled for this repo, so after the registrator merges the pull request, TagBot automatically does a github release in sync with the registrar's new version.
- Note: TagBot would automatically include all the closed PRs and issues since the previous version in the release note, if you want to exclude some of them, refer to [Julia TagBot docs](https://github.com/JuliaRegistries/TagBot).
## Updating Artifacts
The example is for Circuit Model Zoo, but should work for others:
1. Push new updates to [UCLA-StarAI/Circuit-Model-Zoo](https://github.com/UCLA-StarAI/Circuit-Model-Zoo)
2. Do a [new zoo release](https://github.com/UCLA-StarAI/Circuit-Model-Zoo/releases).
3. Update the `LogicCircuits.jl`'s `Artifact.toml` file with new git tag and hash. Example commit can be found [here](https://github.com/Tractables/LogicCircuits.jl/commit/1cd3fda02fa7bd82d1fa02898ee404edce0d7b14).
4. Do the same for `ProbabilisticCircuits.jl`'s `Artifact.toml` file. Example commit [here](https://github.com/Tractables/ProbabilisticCircuits.jl/commit/da7d3678b5f2254e60229632f74cc619505e2b2d).
5. Note that for each Artifact.toml, 2 things need to change: `git-tree-sha1` and `sha256`.
6. Update the `const zoo_version = "/Circuit-Model-Zoo-0.1.4"` inside LogicCircuits.jl to the new zoo version. No changes needed in ProbabilisticCircuits since it uses the same constant.
### Question: How to get the hashes:
Download the new Zoo release from Github. Now you can use the following code snippet to get the hashes (check the [julia Artifact page](https://julialang.github.io/Pkg.jl/dev/artifacts/) for latest instructions):
```
using Tar, Inflate, SHA
filename = "socrates.tar.gz"
println("sha256: ", bytes2hex(open(sha256, filename)))
println("git-tree-sha1: ", Tar.tree_hash(IOBuffer(inflate_gzip(filename))))
```
| ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | docs | 1458 | ## Building Docs
To locally build the docs, run the following commands from root of the repository to Instantiate the docs environment and build the docs.
```bash
julia -e 'using Pkg; Pkg.activate("./docs"); Pkg.instantiate(); include("./docs/make.jl");'
```
The build results will be stored under `docs/build`.
Alternatively, if you have `ProbabilisticCircuits` in development mode and have already instantiated the docs environment, you can simply run the following.
```bash
julia --project=docs docs/make.jl
```
Note that if you do not have the package in the development mode, the docs build would most likely ignore the uncommited changes.
#### Note about Pretty URLs
For easier navigation for local builds its easier to disable pretty URLs. To disable pretty urls run the following instead:
```bash
julia --project=docs docs/make.jl local
```
For more information about pretty URLs, check out [the documentation](https://juliadocs.github.io/Documenter.jl/stable/man/guide/) for `Documenter.jl`.
## Setting Up LaTeX
Some of plotting tools we use need LaTeX to be installed. Follow the instructions from [TikzPictures.jl](https://github.com/JuliaTeX/TikzPictures.jl) to see what packages are needed. Or you can check the [ci.yml](https://github.com/Tractables/ProbabilisticCircuits.jl/blob/master/.github/workflows/ci.yml) which builds our documentation on github actions. If you ran into any issues might be due to old version of TexLive.
| ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | docs | 1697 | # ProbabilisticCircuits.jl
This module provides a Julia implementation of Probabilistic Circuits (PCs), tools to learn structure and parameters of PCs from data, and tools to do tractable exact inference with them.
### What are Probabilistic Circuits?
Probabilistic Circuits provides a unifying framework for several family of tractable probabilistic models. PCs are represented as a computational graphs that define a joint probability distribution as recursive mixtures
(sum units) and factorizations (product units) of simpler distributions (input units).
Given certain structural properties, PCs enable different range of tractable exact probabilistic queries such as computing marginals, conditionals, maximum a posteriori (MAP), and more advanced probabilistic queries.
In additon to parameters, the structure of PCs can also be learned from data. There are several approaches in learning PCs, while keeping the needed structural constrains intact. Currently, This module includes implementation for few of these approaches with plans to add more over time.
Additionally, parallelism (on both CPU and GPU) is leveraged to provide faster implementation of learning and inference.
### Where to learn more about them?
For an overview of the motivation and theory behind PCs, you can start by watching the ECML-PKDD tutorial on Probabilistic Circuits.
- Probabilistic Circuits: Representations, Inference, Learning and Theory ([Video](https://www.youtube.com/watch?v=2RAG5-L9R70))
For more details and additional references, you can refer to:
- Probabilistic Circuits: A Unifying Framework for Tractable Probabilistic Models ([PDF](http://starai.cs.ucla.edu/papers/ProbCirc20.pdf)) | ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | docs | 1657 | # Installation
### Prerequisites
Julia 1.6 or greater. For installation, please refer to [the official Julia Website](https://julialang.org/downloads/).
### Installing ProbabilisticCircuits
You can use Julia's package manager, Pkg, to install this module and its dependencies. There are different options on how to do that, for example through command line or julia REPL. For more information and options on how to use Julia pacakge manager, please refer to [Pkg's Documentation](https://docs.julialang.org/en/v1/stdlib/Pkg/index.html).
#### From Command Line
To install the latest stable release, run:
```bash
julia -e 'using Pkg; Pkg.add("ProbabilisticCircuits")'
```
You can also install the package with the latest commits on master branch.
```bash
julia -e 'using Pkg; Pkg.add([PackageSpec(url="https://github.com/Tractables/ProbabilisticCircuits.jl.git")])'
```
#### From Package mode
!!! note
To get to Pkg mode, you need to run `julia`, then to press `]`. Press backspace or ^C to get back to normal REPL mode.
While in Pkg mode, run the following to install the latest release:
```julia
add ProbabilisticCircuits
```
Similarly, to install from the latest commits on master branch, run:
```
add ProbabilisticCircuits#master
```
### Testing
If you are installing the latest commit, we recommend running the test suite to make sure everything is in order, to do that run:
```bash
julia --color=yes -e 'using Pkg; Pkg.test("ProbabilisticCircuits")'
```
**Note**: If you want the tests to run faster, you can use multiple cores. To do that set the following environment variable (default = 1 core):
```bash
export JIVE_PROCS=8
```
| ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | docs | 468 | # [Common APIs](@id api-common)
This page lists documentation for the most commonly used functions of `ProbabilisticCircuits.jl`. Visit the internals section for a auto generated documentation for all APIs.
```@contents
Pages = ["common.md"]
```
## Circuit IO
```@docs
read
write
```
## Circuit Structures
```@docs
hclt
RAT
```
## Learning Circuit Parameters
```@docs
mini_batch_em
full_batch_em
```
## Circuit Queries
```@docs
loglikelihoods
MAP
sample
```
| ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | docs | 873 | # [Input Distributions](@id new-input-dist)
Currently we support `Indicator{T}`, `Categorical`, `Bernoulli` (special case of Categorical) distributions in the InputNodes.
#### Support new InputDist
To support new type of Input Distributions you need to implement
the following functions:
```julia
num_parameters
params
init_params
loglikelihood
```
#### Support movement between CPU/GPU for InputDist
To support moving between CPU/GPU you need to implement the following:
```julia
bits
unbits
```
#### Learning support for InputDist
To support learning you need to implement the following:
```julia
flow
update_params
clear_memory
```
#### Query support for InputDist
To support certain queries such as sampling and MAP you need to implement the following:
```julia
sample_state
init_heap_map_state!
init_heap_map_loglikelihood!
map_state
map_loglikelihood
```
| ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | docs | 192 |
# [Probabilistic Circuits](@id api-internal-probabilistic)
This page lists all the API documentation for `ProbabilisticCircuits` package.
```@autodocs
Modules = [ProbabilisticCircuits]
```
| ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | docs | 471 | # [Types ](@id api-types)
The following code snippet provides an easy way to print the type tree of probabilistic circuits.
```@example types
using InteractiveUtils;
using ProbabilisticCircuits;
using AbstractTrees;
AbstractTrees.children(x::Type) = subtypes(x);
```
For example, we can see [`ProbabilisticCircuits.ProbCircuit`](@ref)'s type tree.
```@example types
AbstractTrees.print_tree(ProbCircuit)
```
```@example types
AbstractTrees.print_tree(InputDist)
```
| ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | docs | 1960 | # [Quick Demo](@id man-demo)
In this section, we provide quick code snippets to get started with ProbabilisticCircuits and provide basic understanding of them. PCs are represented as a computational graphs that define a joint probability distribution as recursive mixtures (sum units) and factorizations (product units) of simpler distributions (input units).
Generally, we learn structure and parameters of circuit from data. Alternatively, we can also specify circuits in code. For example, the following snippet defines a circuit depending on 3 random variables. The `literals` function returns the input units of the circuit, in this case we get 6 different units (3 for positive literals, and 3 for negative literlas). You can use `*` and `+` operators to build a circuits.
```@example demo
using ProbabilisticCircuits;
X1, X2, X3 = [InputNode(i, Indicator(true)) for i=1:3]
X1_, X2_, X3_ = [InputNode(i, Indicator(false)) for i=1:3]
pc = 0.3 * (X1_ *
(0.2 * X2_ + 0.8 * X3)) +
0.7 * (X1 *
(0.4 * X2 + 0.6 * X3_));
nothing # hide
```
You can ask basic questions about PCs, such as (1) how many variables they depends on, (2) how many nodes, (3) how many edges, (4) or how many parameters they have.
```@example demo
num_randvars(pc)
```
```@example demo
num_nodes(pc)
```
```@example demo
num_edges(pc)
```
```@example demo
num_parameters(pc)
```
We can also plot circuits using `plot(pc)` to see the computation graph (structure and parameters). The output of `plot(pc)` has a type of `TikzPictures.TikzPicture`. Generally, notebooks automatically renders it and you see the figure in the notebook.
```@example demo
plot(pc)
```
However, if you are not using a notebook or want to save to file you can use the following commands to save the plot in various formats.
```julia
using TikzPictures;
z = plot(pc);
save(PDF("plot"), z);
save(SVG("plot"), z);
save(TEX("plot"), z);
save(TIKZ("plot"), z);
``` | ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | docs | 1385 | # [GPU Support](@id man-gpu)
Most queries and learning APIs support both CPU and GPU implementations. To use the GPU implementations you need to move the
circuit and the dataset to GPU, then call the corresponding API.
### Moving to GPU
#### Moving Data to GPU
Currently, the APIs support `CuArray` type of gpu implemetations. One simple way to move to gpu is using the `cu` function from `CUDA.jl`.
```julia
using CUDA
train_x_gpu, test_x_gpu = cu.(train_x, test_x)
```
In case of missing values we use `Missing` type, so for example if you have categorical features with some missing values, the data type on gpu would be `CuArray{Union{Missing, UInt32}}`.
#### Moving ProbCircuits to GPU
`ProbCircuits` are stored in DAG structure and are not GPU friendly by default. So, we convert them into `BitsProbCircuits` (or bit circuits) as a lower level representation that is GPU friendly. The GPU version of bit circuits has type `CuBitsProbCircuit`, so to move your `circuit` to GPU you can simply do:
```julia
bpc = CuBitsProbCircuit(circuit);
```
### GPU APIs
The GPU supported APIs generally have the same name as their CPU counterpart, for a comprehensive list of supported functions see the API documentation. For example, we support the following on gpu:
- [`sample`](@ref)
- [`MAP`](@ref)
- [`loglikelihoods`](@ref)
- [`mini_batch_em`](@ref)
- [`full_batch_em`](@ref) | ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | docs | 640 | # [Learning](@id man-learning)
In this section we provide few learning scenarios for circuits. In general, learning tasks for PCs can be separted into two categories: parameter learning and structure learning.
## Structures
Currently we are supporting the following structures:
- HiddenChowLiuTrees: See [`hclt`](@ref).
- RAT-SPNs: See [`RAT`](@ref).
## Parameter Learning
Currently we are supporting the following parameters learning APIs:
- [`mini_batch_em`](@ref)
- [`full_batch_em`](@ref)
## Future Docs
- Learning a circuit from missing data
- Learn a mixture of circuits
- Learn a circuit from logical constraints and data
| ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"Apache-2.0"
] | 0.5.0 | 5a9fc8534ae1d1fe3009b6838160a099895f121d | docs | 3875 | # [Queries](@id man-queries)
In this section, we go over most common probabilistic reasoning tasks, and provide code snippets to compute those queries.
### Setup
First, we load some pretrained PC, and the corresponding data.
```@setup queries
# This is needed to hide output from downloading artifacts
using CircuitModelZoo; #hide
using ProbabilisticCircuits; #hide
using DensityEstimationDatasets; #hide
pc = read(zoo_psdd_file("plants.psdd"), ProbCircuit);
data, _, _ = twenty_datasets("plants");
```
```@example queries
using CircuitModelZoo: zoo_psdd_file
using DensityEstimationDatasets: twenty_datasets
using ProbabilisticCircuits
using Tables
pc = read(zoo_psdd_file("plants.psdd"), ProbCircuit);
data, _, _ = twenty_datasets("plants");
data = Tables.matrix(data);
println("circuit with $(num_nodes(pc)) nodes and $(num_parameters(pc)) parameters.")
println("dataset with $(size(data, 2)) features and $(size(data, 1)) examples.")
```
## Full Evidence (EVI)
EVI refers to computing the probability when full evidence is given, i.e. when ``x`` is fully observed, the output is ``p(x)``. We can use [`loglikelihoods`](@ref) method to compute ``\log{p(x)}``:
```@example queries
probs = loglikelihoods(pc, data[1:100, :]; batch_size=64);
probs[1:3]
```
## Partial Evidence (MAR)
In this case we have some missing values. Let ``x^o`` denote the observed features, and ``x^m`` the missing features. We would like to compute ``p(x^o)`` which is defined as ``p(x^o) = \sum_{x^m} p(x^o, x^m)``. Of course, computing this directly by summing over all possible ways to fill the missing values is not tractable.
The good news is that given a **smooth** and **decomposable** PC, the marginal can be computed exactly and in linear time to the size of the PC.
First, we randomly make some features go `missing`.
```@example queries
using DataFrames
using Tables
function make_missing(d; keep_prob=0.8)
m = missings(Bool, size(d)...)
flag = rand(size(d)...) .<= keep_prob
m[flag] .= d[flag]
return m
end;
data_miss = make_missing(data[1:1000,:]);
nothing #hide
```
Now, we can use [`loglikelihoods`](@ref) to compute the marginal queries.
```@example queries
probs = loglikelihoods(pc, data_miss; batch_size=64);
probs[1:3]
```
Note that [`loglikelihoods`](@ref) can also be used to compute probabilisties if all data is observed, as we saw in previous section.
## Conditionals (CON)
In this case, given observed features ``x^o``, we would like to compute ``p(Q \mid x^o)``, where ``Q`` is a subset of features disjoint with ``x^o``.
We can use Bayes rule to compute conditionals as two seperate MAR queries as follows:
```math
p(q \mid x^o) = \cfrac{p(q, x^o)}{p(x^o)}
```
Currently, this has to be done manually by the user. We plan to add a simple API for this case in the future.
## Maximum a posteriori (MAP, MPE)
In this case, given the observed features ``x^o`` the goal is to fill out the missing features in a way that ``p(x^m, x^o)`` is maximized.
We can use the [`MAP`](@ref) method to compute MAP, which outputs the states that maximize the probability and the log-likelihoods of each state.
```@example queries
data_miss = make_missing(data[1:1000,:], keep_prob=0.5);
states = MAP(pc, data_miss; batch_size = 64);
size(states)
```
## Sampling
We can also sample from the distrubtion ``p(x)`` defined by a Probabilistic Circuit. You can use [`sample`](@ref) to achieve this task.
```@example queries
samples = sample(pc, 100, [Bool]);
size(samples)
```
Additionally, we can do conditional samples ``x \sim p(x \mid x^o)``, where ``x^o`` are the observed features (``x^o \subseteq x``), and could be any arbitrary subset of features.
```@example queries
#3 random evidences for the examples
evidence = rand( (missing,true,false), (2, num_randvars(pc)));
samples = sample(pc, 3, evidence; batch_size = 2);
size(samples)
```
| ProbabilisticCircuits | https://github.com/Tractables/ProbabilisticCircuits.jl.git |
|
[
"MIT"
] | 0.3.0 | bd00a6c0f899a0f8e7f8cac53e78c6f24cbd8080 | code | 1884 | import ProximalBase
import CoordinateDescent
reload("ProximalBase")
reload("CoordinateDescent")
n = 3000
p = 5000
s = 100
srand(123)
X = randn(n, p)
Y = X[:,1:s] * (randn(s) .* (1. .+ rand(s))) + 6. * randn(n)
stdX = std(X, 1)[:]
options = CoordinateDescent.ScaledLassoOptions(;optTol=1e-3, maxIter=50)
x = ProximalBase.SparseIterate(p)
λ = sqrt(2. * log(p) / n)
@time CoordinateDescent.scaledLasso!(x, X, Y, λ, stdX, options)
@show σinit = CoordinateDescent.findInitSigma(X, Y, 30)
options = CoordinateDescent.ScaledLassoOptions(;optTol=1e-2, maxIter=10, σinit=σinit)
x = ProximalBase.SparseIterate(p)
λ = sqrt(2. * log(p) / n)
@time CoordinateDescent.scaledLasso!(x, X, Y, λ, stdX, options)
λ = 0.001
options = CoordinateDescent.CDOptions(;warmStart=true)
x = ProximalBase.SparseIterate(p)
f = CoordinateDescent.CDLeastSquaresLoss(Y,X)
g = ProximalBase.ProxL1(λ)
@time CoordinateDescent.coordinateDescent!(x, f, g, options)
options = CoordinateDescent.CDOptions(;warmStart=false)
x = ProximalBase.SparseIterate(p)
f = CoordinateDescent.CDLeastSquaresLoss(Y,X)
g = ProximalBase.ProxL1(λ)
@time CoordinateDescent.coordinateDescent!(x, f, g, options)
options = CoordinateDescent.CDOptions(;warmStart=true)
x = ProximalBase.SparseIterate(p)
f = CoordinateDescent.CDLeastSquaresLoss(Y,X)
g = ProximalBase.ProxL1(λ, stdX)
@time CoordinateDescent.coordinateDescent!(x, f, g, options)
options = CoordinateDescent.CDOptions(;warmStart=false, numSteps=100)
x = ProximalBase.SparseIterate(p)
f = CoordinateDescent.CDLeastSquaresLoss(Y,X)
g = ProximalBase.ProxL1(λ, stdX)
@time CoordinateDescent.coordinateDescent!(x, f, g, options)
options = CoordinateDescent.CDOptions(;warmStart=false, numSteps=100)
x = ProximalBase.SparseIterate(p)
f = CoordinateDescent.CDLeastSquaresLoss(Y,X)
g = ProximalBase.ProxL1(λ, stdX)
@time CoordinateDescent.coordinateDescent!(x, f, g, options)
| CoordinateDescent | https://github.com/mlakolar/CoordinateDescent.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.