licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 12404 | using Dates, Random, UUIDs
using Logging
import Base: convert
export
Model,
Network,
InstantNetwork,
DynamicNetwork,
Placeholder,
Node,
SFunc,
Dist,
Score,
ValueTyped,
Variable,
VariableGraph,
VariableParentTimeOffset,
get_initial_graph,
get_transition_graph,
get_children,
get_initial_children,
get_transition_children,
get_parents,
get_initial_parents,
get_transition_parents,
get_sfunc,
get_node,
get_variables,
get_placeholders,
get_nodes,
has_timeoffset,
input_type,
make_initial,
make_transition,
output_type
"""
abstract type ValueTyped{O}
Supertype for typing all Scruff Variables; `O` is the
actual type of the variable
"""
abstract type ValueTyped{O} end
"""
value_type(v::ValueTyped{O}) where {O}
return the actual type (i.e. `O`) of the `ValueTyped`
"""
value_type(v::ValueTyped{O}) where {O} = O
"""
abstract type SFunc{I<:Tuple, O}
A Stochastic Function type with input variables defined by `I` and output type `O`.
This is an abstract representation for a collection of operators with the same
input and output types.
All sfuncs have the following operators defined:
- `compute_lambda`
- `compute_bel`
- `send_pi`
- `outgoing_pis`
- `outgoing_lambdas`
`SFunc` _also_ has both the operators `cpdf` and `logcpdf` defined in terms of the other.
All sfuncs should implement one or the other of these operators.
"""
abstract type SFunc{I<:Tuple, O} end
Base.print_without_params(x::Type{<:SFunc}) = false
"""
Dist{T} = SFunc{Tuple{}, T}
# Additional supported operators
- `make_factors`
- `send_lambda`
"""
Dist{T} = SFunc{Tuple{}, T}
"""
Score{I} = SFunc{Tuple{I}, Nothing}
`Score` supports two (2) operators: `get_score` and `get_log_score`. `get_log_score`
is defined, by default, using `get_score.` Every subtype of `Score` _must_ implement
`get_score`.
"""
Score{I} = SFunc{Tuple{I}, Nothing}
"""
input_type(::Type{<:SFunc{I,O}}) where {I,O}
input_type(::SFunc{I,O}) where {I,O}
Return the input type (i.e. `I`) of the `SFunc`
"""
input_type(::Type{<:SFunc{I,O}}) where {I,O} = I
input_type(::SFunc{I,O}) where {I,O} = I
"""
output_type(::Type{<:SFunc{I,O}}) where {I,O}
output_type(::SFunc{I,O}) where {I,O}
Return the output type (i.e. `O`) of the `SFunc`
"""
output_type(::Type{<:SFunc{I,O}}) where {I,O} = O
output_type(::SFunc{I,O}) where {I,O} = O
"""
Placeholder{O} <: ValueTyped{O}
A type for typing Scruff variables that do not reference models
"""
struct Placeholder{O} <: ValueTyped{O}
name::Symbol
end
"""
Model{I, J, O} <: ValueTyped{O}
Supertype for all Scruff models.
The model represents a variable that varies over time and has output type `O`.
The model may return an initial sfunc with input type `I` using `make_initial`,
which takes the current time as argument,
and a transition sfunc with input type `J` using `make_transition`,
which takes both the parent times (a tuple of times of the same length as `J`)
and the current time as arguments.
These two functions need to be defined for every sfunc.
# Type parameters
- `I`: the input type to the `SFunc` returned by the model's `make_initial` function
- `J`: the input type to the `SFunc` used during the `make_transition` function call
- `O`: the actual type of the variables represented by this model
"""
abstract type Model{I, J, O} <: ValueTyped{O} end
# is_fixed(m) = false
# Define these interfaces:
function make_initial(m, t) end
function make_transition(m, parenttimes, time) end
(model::Model)(symbol::Symbol) = instantiate(model, symbol)
"""
instantiate(model::Model, name::Symbol)
Create a new `Variable` with the given name for the given model. Every
`Model` instance is also a function that, given a `Symbol`, will call
this method.
"""
function instantiate(model::Model, name::Symbol)
return Variable(name, model)
end
"""
mutable struct Variable{I,J,O} <: ValueTyped{O}
A Variable describes the (time-series) of some set of random values.
It must be named, described by a model, and references to the model inputs must be defined.
For the type variables, see [`Model`](@ref)
"""
mutable struct Variable{I,J,O} <: ValueTyped{O}
name::Symbol
model::Model{I,J,O}
end
"""`Node{O} = Union{Placeholder{O}, Variable{I,J,O} where {I,J}}`"""
Node{O} = Union{Placeholder{O}, Variable{I,J,O} where {I,J}}
"""
output_type(::Variable{I,J,O})
output_type(::Placeholder{O})
returns the output type `O` of the given parameter
"""
output_type(::Variable{I,J,O}) where {I,J,O} = O
output_type(::Placeholder{O}) where O = O
get_name(node::Node) = node.name
"""VariableGraph = Dict{Node, Vector{Node}}"""
VariableGraph = Dict{Node, Vector{Node}}
"""
VariableParentTimeOffset = Set{Pair{Node, Node}}
Represents whether a child-parent pair of nodes is time-offset;
if they are time-offset, they will be a set of this type.
"""
VariableParentTimeOffset = Set{Pair{Node, Node}}
"""
abstract type Network{I,J,O}
Collects variables, with placeholders for inputs, and defined outputs,
along with up to two directed graphs for initial and transition.
Must implement `get_nodes`, `get_outputs`,
`get_initial_graph`, `get_transition_graph` (which returns a `VariableGraph`)
For the type parameters, see the underlying [`Model`](@ref) class for the mapped
[`Variable`](@ref)s.
"""
abstract type Network{I,J,O} end
"""
get_initial_parents(n::Network, node::Node)::Vector{Node}
Returns a list of the initial parent nodes in the network from the network's
initial graph (i.e. the return graph from get_initial_graph(network)).
"""
function get_initial_parents(n::Network, node::Node)::Vector{Node}
g = get_initial_graph(n)
get(g, node, Node[])
end
"""
get_transition_parents(n::Network, node::Node)::Vector{Node}
Returns a list of transitioned parent nodes in the network from the network's
transition graph (i.e. the return graph from get_transition_graph(network))
"""
function get_transition_parents(n::Network, node::Node)::Vector{Node}
g = get_transition_graph(n)
get(g, node, Node[])
end
"""
get_node(n::Network, name::Symbol)::Union{Node, Nothing}
Returns the node with the given name, or `nothing`.
"""
function get_node(n::Network, name::Symbol; throw_missing=false)::Union{Node, Nothing}
vs = get_nodes(n)
for v in vs
if v.name == name
return v
end
end
if throw_missing
throw(KeyError(name))
end
return nothing
end
"""
get_initial_children(n::Network, var::Node)::Vector{Node}
Returns a list of the initial child nodes in the network from the network's
initial graph (i.e. the return graph from get_initial_graph(network)).
"""
function get_initial_children(n::Network, var::Node)::Vector{Node}
[c for c in get_nodes(n) if var in get_initial_parents(n, c)]
end
"""
get_transition_children(n::Network, var::Node)::Vector{Node}
Returns a list of transitioned child nodes in the network from the network's
transition graph (i.e. the return graph from get_transition_graph(network))
"""
function get_transition_children(n::Network, var::Node)::Vector{Node}
[c for c in get_nodes(n) if var in get_transition_parents(n, c)]
end
"""
complete_graph!(variables::Vector{<:Variable}, graph::VariableGraph)
Populate the `graph` with `v=>Node[]` for all `variables` not in the graph.
"""
function complete_graph!(variables::Vector{<:Variable}, graph::VariableGraph)
ks = keys(graph)
for v in variables
if !(v in ks)
graph[v] = Node[]
end
end
return graph
end
"""
struct InstantNetwork{I,J,O} <: Network{I,Nothing,O}
A network that only supports initial models. The `Nothing` in the supertype
shows that there is no transition to another time.
See [`Network`](@ref) for the type parameters
"""
struct InstantNetwork{I,O} <: Network{I,Nothing,O}
variables::Vector{<:Variable}
placeholders::Vector{<:Placeholder}
outputs::Vector{<:Variable} # outputs is a subset of variables
parents::VariableGraph
function InstantNetwork(
variables::Vector{<:Variable},
parents::VariableGraph,
placeholders::Vector{<:Placeholder} = Placeholder[],
outputs::Vector{<:Variable} = Variable[])
I = Tuple{[value_type(p) for p in placeholders]...}
O = Tuple{[output_type(v) for v in outputs]...}
return new{I,O}(variables, placeholders, outputs, complete_graph!(variables, parents))
end
end
get_variables(n::InstantNetwork) = n.variables
get_placeholders(n::InstantNetwork) = n.placeholders
get_nodes(n::InstantNetwork) = union(Set(n.variables), Set(n.placeholders))
get_initial_placeholders(n::InstantNetwork) = n.placeholders
get_transition_placeholders(::InstantNetwork) = error("InstantNetwork does not have a transition graph")
get_outputs(n::InstantNetwork) = n.outputs
get_graph(n::InstantNetwork) = n.parents
get_initial_graph(n::InstantNetwork) = n.parents
get_transition_graph(::InstantNetwork) = error("InstantNetwork does not have a transition graph")
get_parents(n::InstantNetwork, v) = get_initial_parents(n,v)
get_children(n::InstantNetwork, v) = get_initial_children(n,v)
has_timeoffset(n::InstantNetwork, child::Node, parent::Node) = false
"""
DynamicNetwork{I,J,O} <: Network{I,J,O}
A network that can transition over time.
See [`Network`](@ref) for the type parameters.
"""
struct DynamicNetwork{I,J,O} <: Network{I,J,O}
variables::Vector{<:Variable}
initial_placeholders::Vector{<:Placeholder}
transition_placeholders::Vector{<:Placeholder}
outputs::Vector{<:Variable} # outputs is a subset of variables
initial_parents::VariableGraph
transition_parents::VariableGraph
parents_timeoffset::VariableParentTimeOffset
function DynamicNetwork(
variables::Vector{<:Variable},
initial_parents::VariableGraph,
transition_parents::VariableGraph,
parents_timeoffset::VariableParentTimeOffset = Set{Pair{Node, Node}}(),
init_placeholders::Vector{<:Placeholder} = Placeholder[],
trans_placeholders::Vector{<:Placeholder} = Placeholder[],
outputs::Vector{<:Variable} = Variable[])
I = Tuple{[value_type(p) for p in init_placeholders]...}
J = Tuple{[value_type(p) for p in trans_placeholders]...}
O = Tuple{[output_type(v) for v in outputs]...}
return new{I,J,O}(variables, init_placeholders, trans_placeholders, outputs,
complete_graph!(variables, initial_parents), complete_graph!(variables, transition_parents), parents_timeoffset)
end
end
get_variables(n::DynamicNetwork) = n.variables
get_initial_placeholders(n::DynamicNetwork) = n.initial_placeholders
get_transition_placeholders(n::DynamicNetwork) = n.transition_placeholders
get_nodes(n::DynamicNetwork) =
union(Set(n.variables), Set(n.initial_placeholders), Set(n.transition_placeholders))
get_outputs(n::DynamicNetwork) = n.outputs
get_initial_graph(n::DynamicNetwork) = n.initial_parents
get_transition_graph(n::DynamicNetwork) = n.transition_parents
has_timeoffset(n::DynamicNetwork, child::Node, parent::Node) = child == parent || Pair(child, parent) in n.parents_timeoffset
function assign_times(parents::Dict{Variable, Vector{Node}},
timed_varplaces::Dict{Node,Time})::Dict{Node,Time}
# TODO be smarter. For now this is exhaustive at least, but exponentially slower than needed.
# Unclear what the exact assumptions on model constraints should be
# Easy improvement - select immediately identifiable vars with preference (i.e. FixedModel)
untimed_vars = [var for var in parents.keys() if ~(var in keys(timed_varplaces))]
if length(untimed_vars)==0
return timed_varplaces
end
for untimed_var in untimed_vars
new_time = assign_var(parents, timed_varplaces, untimed_var)
if new_time
sub_timed_varplaces = copy(timed_varplaces)
sub_timed_varplaces[untimed_var] = new_time
recursed = assign_times(parents, sub_timed_varplaces)
else # Can't assign
return false
end
if recursed
return recursed
end # Else try assigning another variable
end
return false
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 578 | module Models
using ..Scruff
using ..Scruff.Operators
using ..Scruff.SFuncs
import ..Scruff: make_initial, make_transition
include("models/instantmodel.jl")
include("models/timelessinstantmodel.jl")
include("models/simplemodel.jl")
include("models/fixedtimemodel.jl")
include("models/timelessfixedtimemodel.jl")
include("models/homogeneousmodel.jl")
include("models/variabletimemodel.jl")
include("models/staticmodel.jl")
include("models/configurable/configurablemodel.jl")
include("models/configurable/parameterized.jl")
include("models/configurable/simplenumeric.jl")
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 4135 | """
The `Operators` module defines the following interfaces for the following operators:
- `is_deterministic(sf::SFunc)::Bool`
- `sample(sf::SFunc{I,O}, i::I)::O where {I,O}`
- `sample_logcpdf(sf::SFunc{I,O}, i::I)::Tuple{O, AbstractFloat} where {I,O}`
- `invert(sf::SFunc{I,O}, o::O)::I where {I,O}`
- `lambda_msg(sf::SFunc{I,O}, i::SFunc{<:Option{Tuple{}}, O})::SFunc{<:Option{Tuple{}}, I} where {I,O}`
- `marginalize(sf::SFunc{I,O}, i::SFunc{<:Option{Tuple{}}, I})::SFunc{<:Option{Tuple{}}, O} where {I,O}`
- `logcpdf(sf::SFunc{I,O}, i::I, o::O)::AbstractFloat where {I,O}`
- `cpdf(sf::SFunc{I,O}, i::I, o::O)::AbstractFloat where {I,O}`
- `log_cond_prob_plus_c(sf::SFunc{I,O}, i::I, o::O)::AbstractFloat where {I,O}`
- `f_expectation(sf::SFunc{I,O}, i::I, fn::Function) where {I,O}`
- `expectation(sf::SFunc{I,O}, i::I)::O where {I,O}`
- `variance(sf::SFunc{I,O}, i::I)::O where {I,O}`
- `get_config_spec(sf::SFunc{I,O,P})::P where {I,O,P}`
- `set_config_spec!(sf::SFunc{I,O,P}, p::P)::SFunc{I,O,P} where {I,O,P}`
- `get_score(sf::SFunc{Tuple{I},O}, i::I)::AbstractFloat where {I,O}`
- `get_log_score(sf::SFunc{Tuple{I},O}, i::I)::AbstractFloat where {I,O}`
- ```support(sf::SFunc{I,O},
parranges::NTuple{N,Vector},
size::Integer,
curr::Vector{<:O}) where {I,O,N}```
- `support_quality(sf::SFunc, parranges)`
- ```bounded_probs(sf::SFunc{I,O},
range::VectorOption{<:O},
parranges::NTuple{N,Vector})::Tuple{Vector{<:AbstractFloat}, Vector{<:AbstractFloat}} where {I,O,N}```
- ```make_factors(sf::SFunc{I,O},
range::VectorOption{<:O},
parranges::NTuple{N,Vector},
id,
parids::Tuple)::Tuple{Vector{<:Scruff.Utils.Factor}, Vector{<:Scruff.Utils.Factor}} where {I,O,N}```
- `initial_stats(sf::SFunc)`
- ```expected_stats(sf::SFunc{I,O},
range::VectorOption{<:O},
parranges::NTuple{N,Vector},
pis::NTuple{M,Dist},
child_lambda::Score{<:O}) where {I,O,N,M}```
- `accumulate_stats(sf::SFunc, existing_stats, new_stats)`
- `maximize_stats(sf::SFunc, stats)`
- ```compute_bel(sf::SFunc{I,O},
range::VectorOption{<:O},
pi::Dist{<:O},
lambda::Score{<:O})::Dist{<:O} where {I,O}```
- `compute_lambda(sf::SFunc, range::VectorOption, lambda_msgs::Vector{<:Score})::Score`
- ```send_pi(sf::SFunc{I,O},
range::VectorOption{O},
bel::Dist{O},
lambda_msg::Score{O})::Dist{<:O} where {I,O}```
- ```outgoing_pis(sf::SFunc,
range::VectorOption,
bel::Dist,
incoming_lambdas::VectorOption{<:Score})::Vector{<:Dist}```
- ```outgoing_lambdas(sf::SFunc{I,O},
lambda::Score{O},
range::VectorOption{O},
parranges::NTuple{N,Vector},
incoming_pis::Tuple)::Vector{<:Score} where {N,I,O}```
- ```compute_pi(sf::SFunc{I,O},
range::VectorOption{O},
parranges::NTuple{N,Vector},
incoming_pis::Tuple)::Dist{<:O} where {N,I,O}```
- ```send_lambda(sf::SFunc{I,O},
lambda::Score{O},
range::VectorOption{O},
parranges::NTuple{N,Vector},
incoming_pis::Tuple,
parent_idx::Integer)::Score where {N,I,O}```
"""
module Operators
using ..Scruff
export
VectorOption,
Option
"""VectorOption{T} = Union{Vector{Union{}}, Vector{T}}"""
VectorOption{T} = Union{Vector{Union{}}, Vector{T}}
"""Option{T} = Union{Nothing, T}"""
Option{T} = Union{Nothing, T}
include("operators/op_performance.jl")
include("operators/op_defs.jl")
include("operators/op_impls.jl")
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 20503 | export
Instance,
VariableInstance,
PlaceholderInstance,
Env,
Runtime,
InstantRuntime,
DynamicRuntime,
collect_messages,
current_instance,
current_time,
get_all_instances,
get_belief,
get_placeholder_beliefs,
get_definition,
get_env,
get_evidence,
get_instance,
get_intervention,
get_message,
get_name,
get_network,
get_sfunc,
get_state,
get_time,
get_value,
get_node,
has_belief,
has_evidence,
has_instance,
has_intervention,
has_previous_instance,
has_state,
has_value,
previous_instance,
rng,
clear_state!,
concat!,
delete_evidence!,
delete_state!,
delete_value!,
distribute_messages!,
instantiate!,
ensure_all!,
ensure_instance!,
post_belief!,
post_evidence!,
post_intervention!,
latest_instance_before,
remove_messages!,
set_message!,
set_state!,
set_time!,
set_value!
using DataStructures
using Dates
using Random
using .Models
import Base.isless
"""
Instance{O}
An abstract base type for variable and placeholder instances.
This is instance can have values associated with it in the runtime.
O is the output type of the node.
"""
abstract type Instance{O} end
"""
VariableInstance
An instance of variable node at `time`. `sf` is an sfunc generated from
the variable's model.
"""
struct VariableInstance{O} <: Instance{O}
node :: Variable{I,J,O} where {I,J}
sf :: Union{SFunc{I,O}, SFunc{J,O}} where {I,J}
time :: Any
end
"""
PlaceholderInstance
An instance of placeholder `node` at `time`. This instance has no sfunc, but can still take
values in the runtime.
"""
struct PlaceholderInstance{O} <: Instance{O}
node :: Placeholder{O}
time :: Any
end
"""
get_sfunc(i::VariableInstance)::SFunc
Get the instance's sfunc.
"""
get_sfunc(i::VariableInstance)::SFunc = i.sf
"""
get_node(i::Instance)::Node
Get the instance's node, whether it is a placeholder or a variable.
"""
get_node(i::Instance)::Node = i.node
"""
get_time(i::Instance)
Get the instance's time.
"""
get_time(i::Instance) = i.time
"""
get_name(i::Instance)::Symbol
Get the name of the instance's node.
"""
get_name(i::Instance):: Symbol = get_node(i).name
"""
get_definition(i::VariableInstance)::D where {D<:ValueTyped}
Get the instance's variable's underlying model.
"""
get_definition(i::VariableInstance)::D where {D<:ValueTyped} = get_node(i).model
"""
get_model(i::VariableInstance)::D where {D<:ValueTyped}
Get the instance's variable's underlying model.
"""
get_model(i::VariableInstance)::D where {D<:ValueTyped} = get_definition(i)
output_type(i::Instance) = output_type(get_node(i))
"""
struct Env
Holds all external state of a Runtime. The `Env` supports the following methods:
```
get_state(env::Env) :: Dict{Symbol, Any}
has_state(env::Env, key::Symbol) :: Bool
get_state(env::Env, key::Symbol)
set_state!(env::Env, key::Symbol, value)
delete_state!(env::Env, key::Symbol)
clear_state!(env::Env)
clone(env::Env)
```
"""
struct Env
state::Dict{Symbol, Any}
Env() = new(Dict{Symbol, Any}())
end
get_state(env::Env) :: Dict{Symbol, Any} = env.state
has_state(env::Env, key::Symbol) :: Bool = haskey(get_state(env), key)
function get_state(env::Env, key::Symbol)
has_state(env, key) || ArgumentError("symbol :$key does not exist in the environment")
get_state(env)[key]
end
set_state!(env::Env, key::Symbol, value) = get_state(env)[key] = value
delete_state!(env::Env, key::Symbol) = pop!(get_state(env), key, nothing)
clear_state!(env::Env) = empty!(get_state(env))
clone(env::Env) :: Env = deepcopy(env)
# TODO what should happen if a global and local key conflicts?
function concat!(local_env::Env, global_env::Env) :: Env
for (k,v) in get_state(clone(global_env))
set_state!(local_env, k, v)
end
local_env
end
"""
abstract type Runtime
A struct that contains the state of the compute graph. This code makes the assumption
that values are associated with instances but messages are passed between
variables and applied to the relevant instances later. It has to be this way
because the receiving instance might not exist at the time the message is sent.
"""
abstract type Runtime end
"""
struct InstantRuntime <: Runtime
A runtime that represents a network whose variables take on a single instance. As
a convenience, the following methods create an `InstantRuntime`:
```
Runtime()
Runtime(net :: InstantNetwork)
Runtime(net :: DynamicNetwork)
```
"""
struct InstantRuntime <: Runtime
env :: Env
name :: Symbol
network :: InstantNetwork
instances :: Dict{Node, Instance}
values :: Dict{Tuple{Instance, Symbol}, Any}
messages :: Dict{Tuple{Node, Symbol}, Dict{Node, Any}}
end
"""
struct DynamicRuntime <: Runtime
A runtime that represents a network whose variables take on many instances at different times `T`.
"""
struct DynamicRuntime{T} <: Runtime
env :: Env
name :: Symbol
network :: DynamicNetwork
instances :: Dict{Node,
SortedDict{T, Instance, Base.Order.ReverseOrdering}}
values :: Dict{Tuple{Instance, Symbol}, Any}
messages :: Dict{Tuple{Node, Symbol}, Dict{Node, Any}}
end
Runtime() = InstantRuntime(InstantNetwork(Variable[], Placeholder[], Placeholder[],
VariableGraph()))
Runtime(net :: InstantNetwork) = InstantRuntime(Env(), gensym(), net,
Dict(), Dict(), Dict())
Runtime(net :: DynamicNetwork) = Runtime(net, 0)
function Runtime(net :: DynamicNetwork, time::T) where {T}
rt = DynamicRuntime{T}(Env(), gensym(), net, Dict(), Dict(), Dict())
set_time!(rt, time)
return rt
end
function rng(r::Runtime)
return Random.GLOBAL_RNG
end
get_env(runtime::Runtime) :: Env = runtime.env
get_name(runtime::Runtime) :: Symbol = runtime.name
get_network(runtime::Runtime) :: Network = runtime.network
get_nodes(runtime::Runtime) :: Set{Node} = get_nodes(get_network(runtime))
function get_node(runtime::Runtime, name::Symbol) :: Union{Placeholder, Variable, Nothing}
get_node(get_network(runtime), name)
end
#=
state functions
=#
get_state(runtime::Runtime) :: Dict{Symbol, Any} = get_state(get_env(runtime))
has_state(runtime::Runtime, key::Symbol) :: Bool = has_state(get_env(runtime), key)
get_state(runtime::Runtime, key::Symbol) = get_state(get_env(runtime), key)
set_state!(runtime::Runtime, key::Symbol, value) = set_state!(get_env(runtime), key, value)
delete_state!(runtime::Runtime, key::Symbol) = delete_state!(get_env(runtime), key)
clear_state!(runtime::Runtime) = empty!(get_state(runtime))
#=
Managing Runtime time
=#
const TIME = :__simulated_time__
"""
current_time(runtime::Runtime) -> T
Returns the currently set time of the given `Runtime`
"""
function current_time(runtime::DynamicRuntime{T}) :: T where {T}
return get_state(runtime, TIME)
end
"""
set_time(runtime::Runtime{T}, newtime::T) -> T
Sets the current time for the given `Runtime`
"""
set_time!(runtime::DynamicRuntime{T}, newtime::T) where {T} = set_state!(runtime, TIME, newtime)
#=
Instantiating instances
=#
isless(::Nothing, ::Nothing) = false
"""
instantiate!(runtime::InstantRuntime,variable::Variable,time = 0)
instantiate!(runtime::InstantRuntime,placeholder::Placeholder,time = 0)
instantiate!(runtime::DynamicRuntime{T}, node::Node,time::T = current_time(runtime))::Instance where {T}
Instantiate and return an instance for the given runtime at the given time; the default
time is the current time of the runtime in unix time (an `Int64`). For an `InstantRuntime`,
there is only a single instance for each variable.
"""
function instantiate!(runtime::InstantRuntime,variable::Variable,time = 0)
# time argument is provided for uniformity but ignored
return get!(runtime.instances, variable,
VariableInstance(variable, make_initial(variable.model, time), time))
end
function instantiate!(runtime::InstantRuntime,placeholder::Placeholder,time = 0)
return get!(runtime.instances, placeholder, PlaceholderInstance(placeholder, time))
end
function instantiate!(runtime::DynamicRuntime{T}, node::Node,time::T = current_time(runtime))::Instance where {T}
if haskey(runtime.instances, node)
curr = (runtime.instances[node])
@assert !in(time, keys(curr)) "variable $(variable.name) at time $(time) is already instantiated"
parents = get_transition_parents(runtime.network, node)
parenttimes = Vector{T}()
for p in parents
time_offset = has_timeoffset(runtime.network, node, p)
parinst = latest_instance_before(runtime, p, time, !time_offset)
if isnothing(parinst)
error("In instantiate! for ", variable.name, ": parent ", p.name, " not instantiated at time ", time)
end
push!(parenttimes, get_time(parinst))
end
parenttimes = tuple(parenttimes...)
if isa(node, Variable)
sf = make_transition(node.model, parenttimes, time)
inst = VariableInstance(node, sf, time)
else
inst = PlaceholderInstance(node, time)
end
curr[time] = inst
return inst
else
curr =
SortedDict{T, Instance, Base.Order.ReverseOrdering}
(Base.Order.ForwardOrdering())
local inst::Instance
if isa(node, Variable)
sf = make_initial(node.model, time)
inst = VariableInstance(node, sf, time)
else
inst = PlaceholderInstance(node, time)
end
instvec = Pair{T, Instance}[time => inst]
runtime.instances[node] =
SortedDict{T, Instance, Base.Order.ReverseOrdering}(Base.Order.ReverseOrdering(), instvec)
return inst
end
end
"""
ensure_all!(runtime::InstantRuntime, time=0) :: Dict{Symbol, Instance}
ensure_all!(runtime::DynamicRuntime, time = current_time(runtime)) :: Dict{Symbol, Instance}
Instantiate all the variables in the network at the given time and returns them as a
dict from variable names to their corresponding instance; the default time is the current
time of the runtime in unix time.
"""
function ensure_all!(runtime::InstantRuntime, time=0) :: Dict{Symbol, Instance}
insts = Dict{Symbol, Instance}()
for node in get_nodes(runtime)
insts[node.name] = instantiate!(runtime, node)
end
return insts
end
function ensure_all!(runtime::DynamicRuntime, time = current_time(runtime)) :: Dict{Symbol, Instance}
insts = Dict{Symbol, Instance}()
for node in get_nodes(runtime)
try
insts[node.name] = ensure_instance!(runtime, node, time)
catch ex
@error("Error $ex on $(node.name), $node")
rethrow(ex)
end
end
return insts
end
"""
ensure_instance!(runtime::Runtime{T}, node::Node{O}, time::T = current_time(runtime))::Instance{O} where {O,T}
Returns an instance for the given variable at the given time, either by using an existing one or creating a new one.
"""
function ensure_instance!(runtime::Runtime, node::Node{O}, time = current_time(runtime))::Instance{O} where O
if has_instance(runtime, node, time)
return get_instance(runtime, node, time)
else
return instantiate!(runtime, node, time)
end
end
"""
current_instance(runtime::InstantRuntime, node::Node)
current_instance(runtime::DynamicRuntime, node::Node)
Returns the current (last) instance for the given runtime and node; this method will
throw an exception if there is no current instance for the given node
"""
current_instance(runtime::InstantRuntime, node::Node) = runtime.instances[node]
function current_instance(runtime::DynamicRuntime, node::Node) :: Instance
first(runtime.instances[node]).second
end
"""
previous_instance(runtime::DynamicRuntime, node::Node)
Returns the previous instance for the given runtime and node. This will throw
and exception if there is no previous instance.
"""
function previous_instance(runtime::DynamicRuntime, node::Node)
insts = runtime.instances[node]
key = keys[insts][2]
return insts[key]
end
"""
get_all_instances(runtime::DynamicRuntime, variable::Variable)
get_all_instances(runtime::InstantRuntime, variable::Variable)
Returns all instances in the `runtime` for the given variable, in order, as a
`Vector{Instance}`.
"""
function get_all_instances(runtime::DynamicRuntime, variable::Variable)
has_instance(runtime, variable) ? collect(values(runtime.instances[variable])) : Instance[]
end
function get_all_instances(runtime::InstantRuntime, variable::Variable)
has_instance(runtime, variable) ? [runtime.instances[variable]] : Instance[]
end
"""
function get_instance(runtime::DynamicRuntime{T}, node::Node, t::T)::Instance
Returns instance for the given variable at time `T`; throws an error if no
such instance exists.
"""
function get_instance(runtime::DynamicRuntime{T}, node::Node, t::T) :: Instance where {T}
return runtime.instances[node][t]
end
"""
latest_instance_before(runtime::DynamicRuntime{T}, node::Node, t::T, allow_equal::Bool) :: Union{Instance, Nothing} where T
Return the latest instance of the node before the given time. The `allow_equal` flag indicates
whether instances at a time equal to `t`` are allowed.
If there is no such instance, returns `nothing`.
"""
function latest_instance_before(runtime::DynamicRuntime{T}, node::Node, t::T, allow_equal::Bool) :: Union{Instance, Nothing} where T
if !(node in keys(runtime.instances))
return nothing
end
smd = runtime.instances[node]
for u in keys(smd)
if allow_equal ? u <= t : u < t
return smd[u]
end
end
return nothing
end
"""
has_instance(runtime::DynamicRuntime, node::Node, time = current_time(runtime))
has_instance(runtime::InstantRuntime, node::Node)
Returns true if the given node has an instance in the given runtime at a time greater than or equal to the given time.
"""
function has_instance(runtime::DynamicRuntime, node::Node, time = current_time(runtime))
haskey(runtime.instances, node) && time in keys(runtime.instances[node])
end
has_instance(runtime::InstantRuntime, node::Node) = haskey(runtime.instances, node)
"""
has_previous_instance(runtime::DynamicRuntime, node::Node)
Checks if the specified node has an instance prior to the current one.
"""
function has_previous_instance(runtime::DynamicRuntime, node::Node)
haskey(runtime.instances, node) && length(keys(runtime.instances, node)) > 1
end
#=
Setting and getting values associated with instances
=#
"""
set_value!(runtime::Runtime, instance::Instance, key::Symbol, value)
Set the value on an instance for the given key
"""
function set_value!(runtime::Runtime, instance::Instance, key::Symbol, value)
runtime.values[(instance, key)] = value
end
"""
get_value(runtime::Runtime, instance::Instance, key::Symbol)
Get the value on an instance for the given key; this will throw an exception
if the instance does not contain the given key
"""
function get_value(runtime::Runtime, instance::Instance, key::Symbol)
result = runtime.values[(instance, key)]
return result
end
"""
delete_value!(runtime::Runtime{T}, instance::Instance, key::Symbol) where {T}
Deletes the mapping for the given instance and key in the runtime and returns it
"""
function delete_value!(runtime::Runtime, instance::Instance, key::Symbol)
return pop!(runtime.values, (instance, key), nothing)
end
function has_value(runtime::Runtime, instance::Instance, key::Symbol)
(instance, key) in keys(runtime.values)
end
#=
Sending and receiving messages between variables
=#
function set_message!(runtime::Runtime, sender::Node, recipient::Node, key::Symbol, value)
msgs = get(runtime.messages, (sender, key), Dict())
msgs[recipient] = value
runtime.messages[(sender, key)] = msgs
end
function get_message(runtime::Runtime, sender::Node, recipient::Node, key::Symbol)
runtime.messages[(sender, key)][recipient]
end
function distribute_messages!(runtime::Runtime, sender::Node, recipients::Vector{Node},
key::Symbol, values)
msgs = get(runtime.messages, (sender, key), Dict())
for (rcp, val) in zip(recipients, values)
msgs[rcp] = val
end
runtime.messages[(sender, key)] = msgs
end
function collect_messages(runtime::Runtime, senders::Vector{Node}, recipient::Node,
key::Symbol)
[get_message(runtime, sdr, recipient, key) for sdr in senders]
end
function remove_messages!(runtime::Runtime, sender::Node, key::Symbol)
delete!(runtime.messages, (sender, key))
end
#=
Invoking an operation on an instance
=#
function _unwrap(p)
if p isa Tuple && length(p) == 1
p[1]
else
p
end
end
"""
BELIEF
The constant key used to store belief for a specific variable instance
"""
const BELIEF = :__belief__
"""
post_belief!(runtime::Runtime, inst::Instance, belief)
Posts the given evidence for the given instance.
"""
function post_belief!(runtime::Runtime, inst::Instance, belief)
set_value!(runtime, inst, BELIEF, belief)
end
"""
get_belief(runtime::Runtime, inst::Instance)
Returns the last posted belief for the given instance; this will return
`nothing` if no belief has been posted
"""
function get_belief(runtime::Runtime, inst::Instance)
has_value(runtime, inst, BELIEF) || return nothing
return get_value(runtime, inst, BELIEF)
end
has_belief(runtime::Runtime, inst::Instance) = has_value(runtime, inst, BELIEF)
function get_placeholder_beliefs(runtime::Runtime)::Dict{Symbol,Dist}
result = Dict{Symbol,Dist}()
for ph in get_placeholders(get_network(runtime))
i = current_instance(runtime, ph)
if has_belief(runtime, i)
result[ph.name] = get_belief(runtime, i)
end
end
return result
end
"""
EVIDENCE
The constant key used to store evidence for a specific variable instance
"""
const EVIDENCE = :__evidence__
"""
post_evidence!(runtime::Runtime, inst::Instance, evidence)
Posts the given evidence for the given instance.
"""
function post_evidence!(runtime::Runtime, inst::Instance, evidence::Score)
set_value!(runtime, inst, EVIDENCE, evidence)
end
"""
get_evidence(runtime::Runtime, inst::Instance)
Returns the last posted evidence for the given instance; this will return
`nothing` if no evidence has been posted
"""
function get_evidence(runtime::Runtime, inst::Instance)
has_value(runtime, inst, EVIDENCE) || return nothing
return get_value(runtime, inst, EVIDENCE)
end
function delete_evidence!(runtime::Runtime, inst::Instance)
if has_value(runtime, inst, EVIDENCE)
return delete_value!(runtime, inst, EVIDENCE)
end
end
has_evidence(runtime::Runtime, inst::Instance) = has_value(runtime, inst, EVIDENCE)
"""
INTERVENTION
The constant key used to store interventions for a specific variable instance
"""
const INTERVENTION = :__intervention__
"""
post_intervention!(runtime::Runtime, inst::Instance, intervention::Dist)
Posts the given intervention for the given instance.
"""
function post_intervention!(runtime::Runtime, inst::Instance, intervention::Dist)
set_value!(runtime, inst, INTERVENTION, intervention)
end
"""
get_intervention(runtime::Runtime, inst::Instance)
Returns the last posted intervention for the given instance; this will return
`nothing` if no intervention has been posted
"""
function get_intervention(runtime::Runtime, inst::Instance)
has_value(runtime, inst, INTERVENTION) || return nothing
return get_value(runtime, inst, INTERVENTION)
end
has_intervention(runtime::Runtime, inst::Instance) = has_value(runtime, inst, INTERVENTION)
#=
Define the interface for Algorithms
=#
"""
initialize_algorithm(alg_fun::Function, runtime::Runtime)
Initializes the given algorithm's [`Runtime`](@ref). By default, this does nothing.
"""
initialize_algorithm(alg_fun::Function, runtime::Runtime) = nothing
"""
start_algorithm(alg_fun::Function, runtime::Runtime, args...)
Starts the algorithm in the current thread
"""
start_algorithm(alg_fun::Function, runtime::Runtime, args...) = nothing
# TODO implement ; should take a list of instance/evidence pairs
# (or rather, variable-name/time/evidence tuples)
function post_evidence!(alg_fun::Function, runtime::Runtime, inst::Instance, evidence)
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 214 | module RTUtils
using ..Scruff
using ..Utils
using ..SFuncs
import ..Operators: Support
using ..MultiInterface
include("utils/range_utils.jl")
include("utils/expansions.jl")
include("utils/initializer.jl")
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 630 | module SFuncs
using Base: reinit_stdio
using ..MultiInterface
using ..Scruff
using ..Utils
using ..Operators
import ..Operators
import ..Operators: VectorOption, Support, SupportQuality
import ..Operators: InitialStats, AccumulateStats, ExpectedStats, MaximizeStats
macro impl(expr)
return esc(MultiInterface.impl(__module__, __source__, expr, Operators))
end
include("sfuncs/dist/dist.jl")
include("sfuncs/score/score.jl")
include("sfuncs/util/extend.jl")
include("sfuncs/conddist/conddist.jl")
include("sfuncs/compound/compound.jl")
include("sfuncs/op_impls/bp_ops.jl")
include("sfuncs/op_impls/basic_ops.jl")
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 1331 | module Utils
include("utils/factor.jl")
include("utils/cutils.jl")
include("utils/simple_graph.jl")
# Helper functions for normalize; may be useful elsewhere
function _entries(xs::Array)
ys = map(_entries, xs)
result = []
for y in ys
append!(result, y)
end
return result
end
_entries(d::Dict) = _entries(collect(map(_entries, values(d))))
_entries(xs::Tuple) = _entries([y for y in xs])
_entries(x::Number) = [x]
_entries(::Nothing) = []
_transform(f::Function, xs::Array) = [_transform(f,x) for x in xs]
_transform(f::Function, d::Dict) = Dict([(k,_transform(f,x)) for (k,x) in d])
_transform(f::Function, xs::Tuple) = tuple([_transform(f,x) for x in xs]...)
_transform(f::Function, x::Number) = f(x)
_transform(::Function, ::Nothing) = nothing
"""
normalize(xs)
Normalize an array of non-negative reals to sum to 1
"""
function normalize(xs)
tot = 0.0
ys = _entries(xs)
for y in ys
if y < 0
error("Negative probability in $(ys)")
end
tot += y
end
# In the case of learning, it is legitimately possible that a case never
# happens, so all statistics are zero. Therefore we accept this case.
if tot == 0.0
f = x -> 1.0 / length(ys)
else
f = x -> x / tot
end
return _transform(f, xs)
end
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 288 | export
Algorithm,
marginal,
joint,
probability,
mean
"""
Algorithm
The supertype of all algorithms.
A standard set of queries is defined for algorithms. Any given subtype of `Algorithm`
will implement a subset of these queries.
"""
abstract type Algorithm end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 9207 | export
Query,
Queryable,
Marginal,
Joint,
ProbValue,
ProbFunction,
ProbabilityBounds,
Expectation,
Mean,
Variance,
answer,
marginal,
joint,
probability,
probability_bounds,
mean
"""
abstract type Query end
General type of query that can be answered after running an algorithm.
"""
abstract type Query end
"""
A query target is either a variable instance or a variable.
Allowing queries to be defined in terms of instances rather than variables makes it possible
to ask queries across multiple instances of a variable at different times.
However, in many cases the current instance of the variable(s) is required and then it is easier
to use variables.
"""
Queryable{O} = Union{VariableInstance{O}, Variable{I,J,O}} where {I,J}
function _resolve(r::Runtime, q::Queryable{O})::VariableInstance{O} where O
if q isa Instance
return q
else
return current_instance(r, q)
end
end
"""
answer(::Query, ::Algorithm, ::Runtime, ::VariableInstance)
answer(::Query, ::Algorithm, ::Runtime, ::Vector{VariableInstance})
answer(::Query, ::Algorithm, ::Runtime, ::Queryable)
answer(::Query, ::Algorithm, ::Runtime, ::Vector{Queryable})
Answer the query.
An implementation of an algorithm should implement an `answer` method for any queries
it can handle. The type hierarchies of `Query` and `Algorithm` will enable
query answering methods to be used wherever appropriate with the right specialization.
The implementations of `answer` are differentiated along two dimensions:
- single or multiple items
- queryable items in general or specifically instances
It is expected that an algorithm will implement one of the first two methods for queries it
can handle. I.e., an algorithm is expected to handle a single instance or a vector of instances.
If it can handle multiple instances, it should implement a second method and a single instance implementation
is derived by default using a singleton vector. An algorithm can still override this default
method if it handles single instances differently from multiple.
Algorithms will generally not implement the latter two methods, which are provide for convenience.
Default implementations are provided that delegate to the instance-specific methods.
Defining a very high-level default implementation that throws an error enables implementations
to go through sequences of preferences.
"""
function answer(q::Query, a::Algorithm, r::Runtime, i::VariableInstance)
is = VariableInstance[i]
answer(q, a, r, is)
end
function answer(q::Query, a::Algorithm, r::Runtime, item::Queryable)
answer(q, a, r, _resolve(r,item))
end
function answer(q::Query, a::Algorithm, r::Runtime, items::Vector{Queryable})
insts = VariableInstance[]
for i in items
inst::VariableInstance = _resolve(r,i)
push!(insts, inst)
end
answer(q, a, r, insts)
end
answer(::Any, ::Any, ::Any, ::Any) = error("_answer_")
""
struct Marginal <: Query end
"""
marginal(alg::Algorithm, runtime::Runtime, item::Queryable{O})::Union{Dist{O}, Tuple{Dist{O}, Dist{O}}} where O
Return the marginal distribution over `item`, or lower and upper marginals,
depending on the algorithm.
The returned `Score` assigns a score to each value of `item`.
"""
function marginal(alg::Algorithm, run::Runtime, item::Queryable{O})::Union{Dist{O}, Tuple{Dist{O}, Dist{O}}} where O
mg = answer(Marginal(), alg, run, item)
return mg
end
struct Joint <: Query end
"""
joint(alg::Algorithm, run::Runtime, items::Vector{Queryable})::Union{Score{O}, Tuple{Score{O}, Score{O}}}
Return the joint distribution over `items`, or lower and upper distributions,
depending on the algorithm.
The returned `Score` assigns a score for each Vector of values of the items.
"""
function joint(alg::Algorithm, run::Runtime, items::Vector{Queryable})::Union{Score, Tuple{Score, Score}}
answer(Joint(), alg, run, items)
end
struct ProbValue{O} <: Query
value :: O
end
struct ProbFunction <: Query
fn :: Function
end
"""
probability(alg::Algorithm, run::Runtime, items::Vector{Queryable}, predicate::Function)::Union{Float64, Tuple{Float64, Float64}}
Return the probability that `items` satisfy `query` or lower and upper probabilities.
`predicate` is a function from Vector{Any} to `Bool`.
"""
function probability(alg::Algorithm, run::Runtime, items::Vector{Queryable}, predicate::Function)::Union{Float64, Tuple{Float64, Float64}}
answer(ProbFunction(predicate), alg, run, items)
end
function probability(alg::Algorithm, run::Runtime, item::Queryable, predicate::Function)::Union{Float64, Tuple{Float64, Float64}}
f(vec) = predicate(vec[1])
insts = VariableInstance[_resolve(run,item)]
answer(ProbFunction(f), alg, run, insts)
end
"""
probability(alg::Algorithm, run::Runtime, item::Queryable{O}, value::O)::Union{Float64, Tuple{Float64, Float64}} where O
Return the probability that `item` has `value` or lower and upper probabilities.
The default implementation tries to use the more general probability of a query.
If that fails, it uses the `cpdf` operation on the marginal of `item`.
"""
function probability(alg::Algorithm, run::Runtime, item::Queryable{O}, value::O)::Union{Float64, Tuple{Float64, Float64}} where O
inst::Instance = _resolve(run, item)
try
answer(ProbValue(value), alg, run, inst)
catch e
if e == ErrorException("_answer_")
try
probability(alg, run, item, x -> x == value)
catch e
if e == ErrorException("_answer_")
try
m = marginal(alg, run, inst)
return cpdf(m, (), value)
catch e
if e == ErrorException("_answer_")
error("None of the methods to compute the probability of a value are implemented")
else
rethrow(e)
end
end
else
rethrow(e)
end
end
else
rethrow(e)
end
end
end
struct ProbabilityBounds{O} <: Query
range :: Vector{O}
end
"""
probability_bounds(alg::Algorithm, run::Runtime, item::Queryable, range::Vector)::Tuple{Vector{Float64}, Vector{Float64}}
For an algorithm that produces lower and upper bounds, return vectors of lower and upper bounds on probabilities for values in the range.
The range is important for computing the bounds, because it is assumed that values outside the range have probability zero.
"""
function probability_bounds(alg::Algorithm, run::Runtime, item::Queryable, range::Vector)::Tuple{Vector{Float64}, Vector{Float64}}
return answer(ProbabilityBounds(range), alg, run, item)
end
struct Expectation <: Query
fn :: Function
end
struct Mean <: Query end
"""
expectation(alg::Algorithm, runtime::Runtime, item::Queryable, f::Function)::Float64
Return the expectation of the function `f` over the marginal distribution of `item`.
The default implementation uses the expectation operation on the SFunc representing the
marginal distribution.
"""
function expectation(alg::Algorithm, run::Runtime, item::Queryable, fn::Function)::Float64
try
return answer(Expectation(fn), alg, run, item)
catch e
if e == ErrorException("_answer_")
m = marginal(alg, run, item)
if m isa Tuple
# marginal produced bounds; support is same for lower and upper bounds
m = m[1]
end
tot = 0.0
O = output_type(item)
sup = support(m, (), 1000, O[])
for x in unique(sup)
tot += cpdf(m, (), x) * fn(x)
end
return tot
# return Operators.f_expectation(marginal(alg, run, item), (), fn)
else
rethrow(e)
end
end
end
"""
mean(alg::Algorithm, runtime::Runtime, item::Queryable)
Return the mean of `item`.
"""
function mean(alg::Algorithm, runtime::Runtime, item::Queryable{O})::Float64 where {O <: Number}
try
answer(Mean(), alg, runtime, item)
catch e
if e == ErrorException("_answer_")
return expectation(alg, runtime, item, x -> x)
else
rethrow(e)
end
end
return
end
struct Variance <: Query end
"""
variance(alg::Algorithm, runtime::Runtime, item::Queryable)::Float64
Return the variance of `item`.
"""
function variance(alg::Algorithm, run::Runtime, item::Queryable)::Float64
#answer(Variance(), alg, run, item)
try
return answer(Variance(), alg, run, item)
catch e
if e == ErrorException("_answer_")
exp_x = expectation(alg, run, item, x->x)
exp_xsq = expectation(alg, run, item, x->x^2)
variance_x = exp_xsq - exp_x^2
#return Operators.f_expectation(marginal(alg, run, item), (), fn)
return variance_x
else
rethrow(e)
end
end
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 2337 | import Base.length
using StatsFuns: logsumexp
using ..SFuncs: Cat
export
Particles,
effective_sample_size,
normalize_weights,
log_prob_evidence,
resample,
probability,
marginal
"Sample = Dict{Symbol, Any}"
Sample = Dict{Symbol, Any}
"""
struct Particles
A structure of particles containing a vector of `Samples` and of log_weights.
"""
struct Particles
samples::Vector{Sample}
log_weights::Vector{Float64}
end
"""
probability(parts::Particles, predicate::Sample -> Bool)::Float64
Returns the probability that the predicate is satisfied
"""
function probability(parts::Particles, predicate::Function)::Float64
sum = 0.0
tot = 0.0
for i in 1:length(parts.samples)
w = exp(parts.log_weights[i])
s = parts.samples[i]
if predicate(s)
sum += w
end
tot += w
end
return sum / tot
end
"""
marginal(parts::Particles, x::Symbol)::Cat
Returns a Cat representing the marginal distribution over the given symbol according to parts
"""
function marginal(parts::Particles, x::Symbol)::Cat
d = Dict{Any, Float64}()
xs = [s[x] for s in parts.samples]
lws = normalize_weights(parts.log_weights)
for i in 1:length(parts.samples)
d[xs[i]] = get(d, xs[i], 0.0) + exp(lws[i])
end
ks = Vector{Any}()
ps = Vector{Float64}()
for (k,p) in d
push!(ks, k)
push!(ps, p)
end
return Cat(ks, ps)
end
"""
expected_sample_size(log_weights::Vector{Float64})
Effective sample size
"""
function effective_sample_size(lws::Vector{Float64})
log_ess = (2 * logsumexp(lws)) - logsumexp(2 .* lws)
return exp(log_ess)
end
"""
normalize_weights(log_weights::Vector{Float64})
Normalize weights
"""
function normalize_weights(lws::Vector{Float64})
return lws .- logsumexp(lws)
end
function log_prob_evidence(lws::Vector{Float64})
return logsumexp(lws) - log(length(lws))
end
function resample(ps::Particles, target_num_particles::Int = length(ps.samples))
lnws = normalize_weights(ps.log_weights)
weights = exp.(lnws)
selections = rand(Distributions.Categorical(weights/sum(weights)), target_num_particles)
samples = map(selections) do ind
ps.samples[ind]
end
return Particles(samples, zeros(target_num_particles))
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 14825 | using Base.Threads
export
em,
bp_info_provider
using Scruff
using Scruff.Models
#=========================================
The EM algorithm is written generically.
It takes a runtime and a source of data.
To make the algorithm concrete, it requires an inference algorithm,
as well as a data_batcher to convert the source of data into training examples
for each iteration.
The format of the data is not specified.
A data_batcher is used to produce the required data to use at each iteration.
The result of the data_batcher should be a one-dimensional array of examples,
where each example is a Dict mappimg variable names to evidence.
The evidence format is specified in evidence_ops.
It can either be hard evidence, consisting of a value of the variable,
or soft evidence, consisting of a distribution over values.
The default data_batcher assumes the data already has the right format and
simply returns the data as a batch.
This implementation allows for both batch and online versions of EM.
For online EM, use a non-zero discount_factor (see below).
Furthermore, the data_batcher takes the runtime as an argument.
This enables it, for example, to change the network as new objects are
encountered, enabling EM with flexible structures.
This also supports dynamic and asynchronous operation.
The algorithm can be any algorithm that computes beliefs over its variables
that can be used to compute statistics. The algorithm takes only the runtime
as argument and uses the evidence associated with instances in the runtime.
Parameters associated with the variables are generalized to any configuration specification
that can change an sfunc.
The configuration specifications used by the algorithm are found
in the runtime, associated with instances using the :config_spec key.
The configuration specifications are used by ConfigurableModels.
In EM, ConfigurableModels and other kinds of models are treated differently.
Only ConfigurableModels are learned and have statistics computed.
The models defining the variables in the network must support the operations:
initial_stats, which produces initial statistics as the basis for
accumulation
expected_stats, which produces the expected sufficient statistics for a
single training example, using information computed by the algorithm
accumulate_stats, which adds statistics across examples
maximize_stats, which produces new parameter values from the statistics
The default algorithm is three pass BP.
Along with the algorithm, an initializer is required that does whatever work
is necessary for the algorithm before the first iteration.
At the minimum, the initializer should instantiate all variables in the
initial network and extract initial spec from the configurable variables into
the runtime under the :config_spec key.
At the minimum, get_config should be called on the configurable model to initialize
the value of :config_spec.
A default initializer is provided in initializer.jl, which does only this.
The EM algorithm will take care of maintaining the specs in the runtime
through the iterations.
In order to accommodate different algorithms, which may use different data
structures to compute statistics, em uses an info_provider, which takes
a runtime and an instance and returns the information to pass to the
expected_stats operation. Therefore, for all models, the expected_stats
operation takes a single argument, the information produced by the
info_provider.
em returns a flag indicating whether or not the parameters have
converged at termination.
In addition to the previous arguments, em takes the following configuration
parameters:
max_iterations The maximum number of iterations to run before terminating.
If this number is negative (the default), there is no maximum.
epsilon The maximum difference allowed between parameter values for
convergence to be considered achieved
# discount_factor The degree to which old statistics should be discounted
# when considering new examples. For batch mode, this will be zero.
# For online mode, this will be close to 1. discount_factor is actually
# a function that takes the runtime as an argument. In many cases it will
# be constant, but this allows it to change with runtime factors.
===================#
function bp_info_provider(runtime, inst)
net = get_network(runtime)
var = get_node(inst)
pars = get_parents(net, var)
parentpis :: Array{SFunc} =
collect_messages(runtime, pars, var, :pi_message)
childlam = get_value(runtime, inst, :lambda)
# cpd = get_params(inst.sf)
# cpd = get_value(runtime, inst, :params)
pars = get_parents(net, var)
parranges = [get_range(runtime, current_instance(runtime, p)) for p in pars]
parranges = Tuple(parranges)
range = get_range(runtime, inst)
# sts = operate(runtime, inst, expected_stats, parranges, parentpis, childlam)
sts = expected_stats(inst.sf, range, parranges, Tuple(parentpis), childlam)
sts = normalize(sts) # We normalize here so we can apply normalization uniformly,
# whatever the sfunc of the variable.
# This is a very important point in the design!
# We don't have to normalize the individual sfuncs' expected_stats.
return sts
# return (parent_πs, λ, cpd)
end
function init_config_spec(network)
result = Dict{Symbol, Any}()
for var in get_variables(network)
if var.model isa ConfigurableModel
result[var.name] = get_config_spec(var.model)
end
end
return result
end
function em(network, data ;
data_batcher = (n,x) -> x,
algorithm = three_pass_BP,
initializer = default_initializer,
info_provider = bp_info_provider,
showprogress = false,
max_iterations = -1,
min_iterations = 1)
iteration = 0
new_config_specs = init_config_spec(network)
conv = false
stats = nothing
while (max_iterations < 0 || iteration < max_iterations) && !(conv && iteration >= min_iterations)
# We have to deepcopy the config specs since their values are complex
# and are updated, in many cases, in place.
if showprogress
println("Iteration ", iteration)
end
old_config_specs = deepcopy(new_config_specs)
batch = data_batcher(network, data)
iteration_result = em_iteration(network, batch, algorithm, initializer,
info_provider,
old_config_specs, showprogress)
(stats, new_config_specs) = iteration_result
conv = converged_numeric(old_config_specs, new_config_specs)
if conv
new_config_specs = old_config_specs # roll back
end
iteration += 1
end
# At the end of the algorithm, the runtime will store the most recent
# parameter values.
# We return a flag indicating whether the algorithm converged
# within the given iterations
if showprogress
if conv
println("EM converged after ", iteration, " iterations")
else
println("EM did not converge; terminating after ", iteration, " iterations")
end
end
return ((conv, iteration), new_config_specs)
end
function em_iteration(network, batch, algorithm, initializer,
info_provider, old_config_specs, showprogress)
vars = get_variables(network)
config_vars = filter(v -> v.model isa ConfigurableModel, vars)
# 1: Initialize the statistics
if showprogress
println("Initializing statistics")
end
new_stats = Dict{Symbol, Any}()
for var in config_vars
new_stats[get_name(var)] = initial_stats(var.model)
end
new_config_specs = Dict{Symbol, Any}()
newruntime = Runtime(network)
# 2: For each example, accumulate statistics
alock = SpinLock()
Threads.@threads for i = 1:length(batch)
runtime = deepcopy(newruntime)
initializer(runtime)
# Need to get the variables again because we did a deep copy
runvars = get_variables(get_network(runtime))
config_vars_batch = filter(v -> v.model isa ConfigurableModel, runvars)
example = batch[i]
if showprogress
println("Accumulating statistics for example ", i)
end
# 2.1: Prepare the evidence
for var in runvars
inst = current_instance(runtime, var)
delete_evidence!(runtime, inst)
if var.name in keys(example)
post_evidence!(runtime, inst, example[get_name(var)])
end
end
# 2.2: Run the algorithm
if showprogress
println("Running the algorithm for example ", i)
end
algorithm(runtime)
# 2.3: Use the algorithm results to compute statistics for this example
if showprogress
println("Computing statistics for example ", i)
end
for var in config_vars_batch
inst = current_instance(runtime, var)
sf = get_sfunc(inst)
info = info_provider(runtime, inst)
lock(alock)
sts = accumulate_stats(sf, new_stats[get_name(var)], info)
new_stats[get_name(var)] = sts
unlock(alock)
#=
(parentpis, childlam, _) = info_provider(runtime, inst)
pars = get_parents(get_network(runtime), var)
parranges = [get_range(runtime, current_instance(runtime, p)) for p in pars]
parranges = Tuple(parranges)
range = get_range(runtime, inst)
# sts = operate(runtime, inst, expected_stats, parranges, parentpis, childlam)
sts = expected_stats(sf, range, parranges, Tuple(parentpis), childlam)
sts = normalize(sts) # We normalize here so we can apply normalization uniformly,
# whatever the sfunc of the variable.
# This is a very important point in the design!
# We don't have to normalize the individual sfuncs' expected_stats.
newstats[]
if var.name in keys(newstats)
newstats[var.name] = accumulate_stats(sf, newstats[var.name], sts)
else
newstats[var.name] = sts
end
unlock(alock)
=#
end
lock(alock)
config_vars = config_vars_batch
unlock(alock)
end
#= This doesn't make sense in the generalized algorithm
# 3 blend in the old statistics
if showprogress
println("Blending in old statistics")
end
if !isnothing(oldstats) # will be nothing on first iteration
d = discount_factor(newruntime)
for var in vars
if var.name in keys(oldstats)
if !isnothing(oldstats[var.name])
ss = mult_through(oldstats[var.name], d)
tt = add_through(newstats[var.name], ss)
newstats[var.name] = tt
end
end
end
end
=#
# 4 choose the maximizing parameter values and store them in the runtime
# To implement parameter sharing, we invoke the maximize_stats once per model
# for all the variables defined by the model.
# Currently, parameter sharing doesn't work
if showprogress
println("Choosing maximizing parameters")
end
for var in config_vars
maximize_stats(var.model, new_stats[get_name(var)])
new_config_specs[get_name(var)] = get_config_spec(var.model)
end
#=
modelvars = Dict{Model, Array{Variable, 1}}()
for var in vars
m = var.model
if false && isa(m, FixedModel)
vs = get(modelvars, m, [])
push!(vs, var)
modelvars[m] = vs
else
# no parameter sharing
sf = make_sfunc(var, newruntime)
newparams[var.name] = maximize_stats(sf, newstats[var.name])
end
end
for (m, vs) in modelvars
stats = initial_stats(m.sf)
for v in vs
stats = accumulate_stats(m.sf, stats, newstats[v.name])
end
modelparams = maximize_stats(m.sf, stats)
for v in vs
newparams[v.name] = modelparams
end
end
=#
return (new_stats, new_config_specs)
end
# function close(x::Float64, y::Float64, eps::Float64)
# return abs(x-y) < eps
# end
# function close(xs::Array, ys::Array, eps::Float64)
# if length(xs) != length(ys) return false end
# for i = 1:length(xs)
# if !close(xs[i], ys[i], eps) return false end
# end
# return true
# end
# function close(xs::Tuple, ys::Tuple, eps::Float64)
# if length(xs) != length(ys) return false end
# for i = 1:length(xs)
# if !close(xs[i], ys[i], eps) return false end
# end
# return true
# end
# function close(xs::Dict, ys::Dict, eps::Float64)
# println("close: xs = ", xs, ", ys = ", ys)
# ks = keys(xs)
# ls = keys(ys)
# if length(ks) != length(ls)
# println("Lengths unequal: returning false")
# return false
# end
# for k in ks
# if !k in ls
# println("Value not present: returning false")
# return false
# end
# if !close(xs[k], ys[k], eps)
# println("Values not close: returning false")
# return false
# end
# end
# println("Returning true")
# return true
# end
#
# Score the new parameters on the given validation set
# function score(network, params, validationset, algorithm, initializer)
# result = 0.0
# alock = SpinLock()
# Threads.@threads for example in validationset
# runtime = Runtime(network)
# tvars = get_variables(runtime)
# for var in tvars
# set_params!(make_sfunc(var.model), params[var.name])
# end
# # we have to call the initializer after we set the params, since we create instances
# # with the underlying parameter values
# initializer(runtime)
# # Don't set the evidence. Run the algorithm and check the marginal probability of the evidence.
# algorithm(runtime)
# localscore = 0
# for var in tvars
# inst = current_instance(runtime, var)
# bel = get_belief(runtime, inst)
# if var.name in keys(example)
# prob = bel[example[var.name]]
# localscore += log(prob)
# end
# end
# lock(alock)
# result += localscore
# unlock(alock)
# end
# return result
# end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 925 | export
SyncBP,
AsyncBP,
CoherentBP
"""
SyncBP(range_size = 10)
A window filter that uses a synchronous window with ThreePassBP with the given range size.
"""
SyncBP(range_size = 10) = WindowFilter(SyncWindow(), ThreePassBP(range_size))
"""
AsyncBP(range_size = 10, T = Float64)
A window filter that uses an asynchronous window with ThreePassBP with the given range size.
`T` represents the time type and must be the same as used in creation of the runtime.
"""
AsyncBP(range_size = 10, T = Float64) = WindowFilter(AsyncWindow{T}(), ThreePassBP(range_size))
"""
CoherentBP(range_size = 10, T = Float64)
A window filter that uses a coherent window with ThreePassBP with the given range size.
`T` represents the time type and must be the same as used in creation of the runtime.
"""
CoherentBP(range_size = 10, T = Float64) = WindowFilter(CoherentWindow{T}(), ThreePassBP(range_size))
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 767 | export
Filter,
init_filter,
filter_step
"""
abstract type Filter <: Algorithm end
General type of filtering algorithms.
Must implement init_filter and filter_step methods.
"""
abstract type Filter <: Algorithm end
"""
init_filter!(::Filter, ::DynamicRuntime)
An interface for intializing the filter for a dynamic runtime.
"""
function init_filter!(::Filter, ::DynamicRuntime) end
"""
filter_step(filter::Filter, runtime::Runtime, variables::Vector{Variable}, time::T, evidence::Dict{Symbol, Score})
Run one step of the filter by instantiating the given variables at the given time and passing in the given evidence.
"""
function filter_step(::Filter, ::DynamicRuntime{T}, ::Vector{<:Variable}, ::T, ::Dict{Symbol, Score}) where T end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 930 | export
SyncLoopy,
AsyncLoopy,
CoherentLoopy
"""
SyncLoopy(range_size = 10)
A window filter that uses a synchronous window with LoopyBP with the given range size.
"""
SyncLoopy(range_size = 10) = WindowFilter(SyncWindow(), LoopyBP(range_size))
"""
AsyncLoopy(range_size = 10, T = Float64)
A window filter that uses an asynchronous window with LoopyBP with the given range size.
`T` represents the time type and must be the same as used in creation of the runtime.
"""
AsyncLoopy(range_size = 10, T = Float64) = WindowFilter(AsyncWindow{T}(), LoopyBP(range_size))
"""
CoherentLoopy(range_size = 10, T = Float64)
A window filter that uses a coherent window with LoopyBP with the given range size.
`T` represents the time type and must be the same as used in creation of the runtime.
"""
CoherentLoopy(range_size = 10, T = Float64) = WindowFilter(CoherentWindow{T}(), LoopyBP(range_size))
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 6205 |
export
ParticleFilter,
SyncPF,
AsyncPF,
CoherentPF
function ParticleFilter(window_creator::WindowCreator, num_particles::Int, resampling_size::Int = num_particles)
function pp!(run)
particles = get_state(run, :particles)
newparticles = resample(particles, resampling_size)
set_state!(run, :particles, newparticles)
end
return WindowFilter(window_creator, LW(num_particles), pp!)
end
"""
SyncPF(num_particles::Int, resampling_size::Int = num_particles)
A particle filter that uses a synchronous window with the given number of particles and resampling buffer size.
"""
SyncPF(num_particles::Int, resampling_size::Int = num_particles) = ParticleFilter(SyncWindow(), num_particles, resampling_size)
"""
AsyncPF(num_particles::Int, resampling_size::Int = num_particles, T = Float64)
A particle filter that uses an asynchronous window with the given number of particles and resampling buffer size.
`T` represents the time type and must be the same as used in creation of the runtime.
"""
AsyncPF(num_particles::Int, resampling_size::Int = num_particles, T = Float64) = ParticleFilter(AsyncWindow{T}(), num_particles, resampling_size)
"""
CoherentPF(num_particles::Int, resampling_size::Int = num_particles, T = Float64)
A particle filter that uses a coherent window with the given number of particles and resampling buffer size.
`T` represents the time type and must be the same as used in creation of the runtime.
"""
CoherentPF(num_particles::Int, resampling_size::Int = num_particles, T = Float64) = ParticleFilter(CoherentWindow{T}(), num_particles, resampling_size)
#=
"""
General type of particle filters. Implementations must have fields runtime, num_particles, and resampling_size.
Some implementations will also provide APIs that organize the filtering in a specific way.
"""
abstract type ParticleFilter end
"""
current_particles(pf::ParticleFilter)
Returns a Particles data structure consisting of the current view of the most recent samples
of all variables in the network, along with the current log weights.
"""
# Samples are stored in a distributed manner across variables, possibly at different time points.
# Coherence of global samples is maintained by consistent indexing into arrays of variable samples.
function current_particles(pf::ParticleFilter)
vars = get_variables(get_network(pf.runtime))
# Need to make sure samples is initialized with distinct empty dictionaries
samples = Sample[]
for i in 1:pf.num_particles
push!(samples, Sample())
end
for var in vars
inst = current_instance(pf.runtime, var)
varsample = get_value(pf.runtime, inst, :samples)
for i in 1:pf.num_particles
samples[i][var.name] = varsample[i]
end
end
lws = get_state(pf.runtime, :log_weights)
return Particles(samples, lws)
end
"""
store_particles!(pf::ParticleFilter, ps::Particles)
Stores the information in ps in a form pf can use.
"""
function store_particles!(pf::ParticleFilter, ps::Particles)
vars = get_variables(get_network(pf.runtime))
# Reset the samples and log weights in the runtime
for var in vars
inst = current_instance(pf.runtime, var)
varsample = Vector{output_type(get_sfunc(inst))}()
for i in 1:pf.num_particles
# correction for 1-indexing
push!(varsample, ps.samples[(i-1) % length(ps.samples) + 1][var.name])
end
set_value!(pf.runtime, inst, :samples, varsample)
end
set_state!(pf.runtime, :log_weights, ps.log_weights)
end
function resample!(pf::ParticleFilter)
current_ps = current_particles(pf)
new_ps = resample(current_ps, pf.resampling_size)
store_particles!(pf, new_ps)
end
function init_filter(pf::ParticleFilter)
ensure_all!(pf.runtime, 0)
likelihood_weighting(pf.runtime, pf.num_particles)
ps = get_state(pf.runtime, :particles)
store_particles!(pf, ps)
end
# TODO: Handle evidence at different points in time
function filter_step(pf::ParticleFilter, variables_to_sample::Vector{<:Variable}, time::Int, evidence::Dict{Symbol, Score})
@assert time > current_time(pf.runtime)
set_time!(pf.runtime, time)
# The log weights of the current evidence are added to the existing log weights for each sample.
scores = get_state(pf.runtime, :log_weights)
for var in variables_to_sample
varsamples = Vector{output_type(var)}()
inst = instantiate!(pf.runtime, var, time)
sf = get_sfunc(inst)
varev = var.name in keys(evidence) ? evidence[var.name] : nothing
# Get the sample sets of each of the parents of the current variable in the transition model.
# These parents may have been previously instantiated at any time.
# instantiate! will make sure that the sfunc correctly takes into account the time lags,
# as long as the model of the variable is defined appropriately.
parents = get_transition_parents(get_network(pf.runtime), var)
n = length(parents)
parsamples = map(parents) do p
if p == var # self edge from previous
parinst = previous_instance(pf.runtime, var)
else
parinst = current_instance(pf.runtime, p)
end
get_value(pf.runtime, parinst, :samples)
end
for i in 1:pf.num_particles
parvals = tuple([parsamples[j][i] for j in 1:n]...)
if isa(varev, HardScore)
# Since the value is known, force it and score it instead of sampling, LW style
evval = varev.value
push!(varsamples, evval)
scores[i] += logcpdf(sf, parvals, evval)
else
# Otherwise, we sample it, but if there is evidence we still score it
sampval = sample(sf, parvals)
push!(varsamples, sampval)
if !isnothing(varev)
scores[i] += get_log_score(varev, sampval)
end
end
end
set_value!(pf.runtime, inst, :samples, varsamples)
end
set_state!(pf.runtime, :log_weights, scores)
end
=# | Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 4214 | export
WindowCreator,
create_window,
SyncWindow,
AsyncWindow,
CoherentWindow
"""
abstract type WindowCreator end
A type that identifies how to create windows of instances for filtering algorithms.
T represents the time type.
Must implement `create_window`.
"""
abstract type WindowCreator{T} end
struct SyncWindow <: WindowCreator{Int} end
"""
create_window(::SyncWindow, runtime::Runtime, variables::Vector{<:Variable}, time::Int)::Vector{Instance}
Creates a window by instantiating all variables at all intermediate time steps from the earliest parent to the given time.
The `variables` argument is ignored.
"""
function create_window(::SyncWindow, runtime::Runtime, variables_to_sample::Vector{<:Variable}, time::Int)::Vector{Instance}
prevtime = time
net = get_network(runtime)
ord = topsort(get_transition_graph(net))
for v in variables_to_sample
pars = get_transition_parents(net, v)
for p in pars
time_offset = has_timeoffset(net, v, p)
t = get_time(latest_instance_before(runtime, p, time, !time_offset))
prevtime = min(prevtime, t)
end
end
insts = Instance[]
for n in ord
ph = Placeholder{output_type(n)}(n.name)
push!(insts, PlaceholderInstance(ph, prevtime))
end
for t in prevtime+1:time
for n in ord
push!(insts, ensure_instance!(runtime, n, t))
end
end
return insts
end
struct AsyncWindow{T <: Number} <: WindowCreator{T} end
"""
create_window(::AsyncWindow, runtime::Runtime, variables::Vector{<:Variable}, time::Int)::Vector{Instance}
Creates a window by instantiating only the given variables at the given time.
"""
function create_window(::AsyncWindow{T}, runtime::Runtime, variables::Vector{<:Variable}, time::T)::Vector{Instance} where T
insts = Instance[]
done = Set{Variable}()
for v in variables
for p in get_transition_parents(get_network(runtime), v)
if !(p in done)
parinst = latest_instance_before(runtime, p, time, true) # changed false to true!!!
partime = get_time(parinst)
ph = Placeholder{output_type(p)}(p.name)
phinst = PlaceholderInstance(ph, partime)
push!(insts, phinst)
push!(done, p)
end
end
push!(insts, ensure_instance!(runtime, v, time))
push!(done, v)
end
return insts
end
"""
struct CoherentWindow <: WindowCreator end
A variant of AsyncWindow that ensures that parent values are never stale for any variable that
gets updated in a filter step. In other words, if any parent of a direct parent has been updated more recently than a variable
to be updated, the direct parent is added to the variables to be updated. This condition then recurses for the direct parents.
"""
struct CoherentWindow{T <: Number} <: WindowCreator{T} end
function create_window(::CoherentWindow{T}, runtime::Runtime, variables::Vector{<:Variable}, time::T)::Vector{Instance} where T
# Note: This method does not allow placeholder parents of dynamic variables
net = get_network(runtime)
parents = get_transition_graph(net)
fullvars = Set{Variable}()
order = topsort(parents)
times = Dict([(n, get_time(current_instance(runtime, n))) for n in order])
function ensure(v)
if !(v in fullvars)
push!(fullvars, v)
for anc in ancestors(parents, v, Set{Node}())
for grandanc in get(parents, anc, [])
if times[grandanc] > times[anc]
ensure(anc)
break
end
end
end
times[v] = time # Need to do this to ensure instantiation through a chain of dependencies
end
end
for var in variables
ensure(var)
end
# We must make sure variables get sampled in the correct order to maintain coherence
orderedvars = Variable[]
for v in order
if v in fullvars
push!(orderedvars, v)
end
end
create_window(AsyncWindow{T}(), runtime, orderedvars, time)
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 4024 | export
WindowFilter,
SyncPF,
AsyncPF,
CoherentPF
"""
struct WindowFilter <: Filter
General construction for a filter based on a flexible windowing scheme.
#arguments
window_creator Defines the method used to create windows
inference_algorithm Defines the algorithm to use on a window
postprocess! A postprocessing function, that takes the runtime and does any additional processing needed to carry to the next iteration. Defaults to doing nothing.
"""
mutable struct WindowFilter <: Filter
window_creator :: WindowCreator
inference_algorithm :: InstantAlgorithm
postprocess! :: Function
latest_window :: Union{InstantRuntime, Nothing}
WindowFilter(wc, ia, pp) = new(wc, ia, pp, nothing)
WindowFilter(wc, ia) = new(wc, ia, run -> nothing, nothing)
end
function init_filter(wf::WindowFilter, dynrun::DynamicRuntime)
ensure_all!(dynrun, current_time(dynrun))
instrun = initial_instant_runtime(dynrun)
# We assume no evidence or interventions at time 0
# TODO: Handle placeholder beliefs
infer(wf.inference_algorithm, instrun)
wf.latest_window = instrun
retrieve_values_from_instant_runtime!(dynrun, instrun)
_store_beliefs(wf, dynrun, instrun)
end
function _store_beliefs(wf::WindowFilter, dynrun::DynamicRuntime{T}, instrun::InstantRuntime) where T
dynnet = get_network(dynrun)
for instvar in get_variables(get_network(instrun))
instinst = current_instance(instrun, instvar)
belief = marginal(wf.inference_algorithm, instrun, instinst)
(dynname, t) = dynamic_name_and_time(instvar, T)
dyninst = get_instance(dynrun, get_node(dynnet, dynname), t)
set_value!(dynrun, dyninst, :belief, belief)
end
end
function filter_step(wf::WindowFilter, dynrun::DynamicRuntime{T}, variables::Vector{<:Variable}, time::T, evidence::Dict{Symbol, Score}) where T
dynnet = get_network(dynrun)
insts = create_window(wf.window_creator, dynrun, variables, time)
instrun = instant_runtime_from_instances(dynrun, insts)
# Apply the dynamic evidence to the instant runtime
instev = Dict{Symbol, Score}()
for (name, sc) in evidence
instev[instant_name(name, time)] = sc
end
# Apply beliefs in the dynamic network as placeholder beliefs in the instant network.
placeholder_beliefs = Dict{Symbol,Dist}()
inst_phs = get_placeholders(get_network(instrun))
for instnode in inst_phs
(dynname, t) = dynamic_name_and_time(instnode, T)
dynnode = get_node(dynnet, dynname)
dyninst = get_instance(dynrun, dynnode, t)
belief = get_value(dynrun, dyninst, :belief)
placeholder_beliefs[get_name(instnode)] = belief
end
# TODO: Handle interventions
instinterv = Dict{Symbol,Dist}()
infer(wf.inference_algorithm, instrun, instev, instinterv, placeholder_beliefs)
wf.latest_window = instrun
retrieve_values_from_instant_runtime!(dynrun, instrun)
set_time!(dynrun, time)
_store_beliefs(wf, dynrun, instrun)
end
function answer(::Marginal, ::WindowFilter, dynrun::Runtime, target::VariableInstance)
return get_value(dynrun, target, :belief)
end
# function answer(query::Query, wf::WindowFilter, dynrun::Runtime, targets::Vector{VariableInstance})
# # TODO: This code assumes that targets are in the latest_window, which might not be true
# # for an asynchronous filter. We need to construct an instant window for the targets.
# # For that, we need to put information from the dynamic window into the instant window,
# # which is not done yet.
# instrun = wf.latest_window
# insttargets = VariableInstance[]
# for target in targets
# instname = instant_name(get_name(target), current_time(dynrun))
# instnode = get_node(get_network(instrun), instname)
# insttarget = current_instance(instrun, instnode)
# push!(insttargets, insttarget)
# end
# answer(query, wf.inference_algorithm, wf.latest_window, insttargets)
# end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 7029 | export
instant_runtime_from_instances,
retrieve_values_from_instant_runtime!,
initial_instant_runtime,
instant_name,
dynamic_name_and_time,
instant_node
"""
Create a name in an instant network corresponding to the given dynamic name and time.
"""
function instant_name(dynamic_name::Symbol, time::Number)::Symbol
return Symbol(dynamic_name, "_", time)
end
"""
Create a dynamic name and time from an instant node. T is the time type.
"""
function dynamic_name_and_time(instant_node::Node, T = Int)::Tuple{Symbol, T}
instant_name = collect(repr(instant_node.name))
# Need to handle two different representations of symbols
if length(instant_name) >= 6 && instant_name[1:6] == ['S', 'y', 'm', 'b', 'o', 'l']
instant_name = instant_name[9:length(instant_name)-2] # strip parens and quotes
else
instant_name = instant_name[2:length(instant_name)] # strip leading colon
end
len = length(instant_name)
i = findlast(x -> x == '_', instant_name)
dynamic_name = Symbol(String(instant_name[1:i-1]))
time = parse(T, String(instant_name[i+1:len]))
return (dynamic_name, time)
end
"""
Create an instant node from a dynamic variable instance.
"""
function instant_node(dyninst::VariableInstance)
name = instant_name(get_name(dyninst), get_time(dyninst))
model = SimpleModel(get_sfunc(dyninst))
var = Variable(name, model)
return var
end
"""
Create an instant node from a dynamic placeholder instance.
"""
function instant_node(dyninst::PlaceholderInstance{O}) where O
name = instant_name(get_name(dyninst), get_time(dyninst))
ph = Placeholder{O}(name)
return ph
end
"""
instant_runtime_from_instances(runtime::DynamicRuntime, instances::Vector{Instance})
Create an instant runtime from the given instances in the given dynamic runtime.
This runtime has an instant network that contains a variable for each instance in `insts`,
tagged with the time of the instance.
The network also contains a placeholder for each instance in `placeholder_insts`.
The function also instantiates the variables in the instant runtime and stores any runtime
values from the dynamic runtime with the corresponding instances in the instant runtime.
This function is useful for running instant algorithms on a time window
for dynamic reasoning.
"""
function instant_runtime_from_instances(dynrun::DynamicRuntime, dyninsts::Vector{Instance})
dynnet = get_network(dynrun)
forward_index = Dict{Instance, Node}()
back_index = Dict{Node, Instance}()
placeholders = Placeholder[]
variables = Variable[]
nodes = Node[]
for dyninst in dyninsts
node = instant_node(dyninst)
if get_node(dyninst) isa Variable
push!(variables, node)
else
push!(placeholders, node)
end
forward_index[dyninst] = node
back_index[node] = dyninst
push!(nodes, node)
end
instgraph = VariableGraph()
for node in nodes
nodepars = Node[]
dyninst = back_index[node]
insttime = get_time(dyninst)
dynnode = get_node(dyninst)
dynpars = get_transition_parents(dynnet, dynnode)
for dynpar in dynpars
# Find the most recent parent instance equal or before this variable's instance
time_offset = has_timeoffset(dynnet, dynnode, dynpar)
parinst = latest_instance_before(dynrun, dynpar, insttime, !time_offset)
if isnothing(parinst)
error("Variable does not have parent in instances")
elseif !(parinst in dyninsts)
# should be a placeholder
ph = Placeholder{output_type(dynpar)}(get_name(dynpar))
parinst = PlaceholderInstance(ph, get_time(parinst))
end
push!(nodepars, forward_index[parinst])
end
instgraph[node] = nodepars
end
instnet = InstantNetwork(variables, instgraph, placeholders)
instrun = Runtime(instnet)
ensure_all!(instrun)
for ((dyninst, valuename), value) in dynrun.values
if dyninst in dyninsts
instinst = current_instance(instrun, forward_index[dyninst])
set_value!(instrun, instinst, valuename, value)
end
end
for (k,v) in get_state(dynrun)
set_state!(instrun, k, v)
end
return instrun
end
"""
Creates an instant runtime for the first time step.
"""
function initial_instant_runtime(dynrun::DynamicRuntime)
variables = Variable[]
placeholders = Placeholder[]
dynnet = get_network(dynrun)
node_index = Dict{Symbol, Node}() # Maps names of dynamic nodes to instant instances
instgraph = VariableGraph()
for dynnode in topsort(get_initial_graph(dynnet))
nodename = get_name(dynnode)
dyninst = current_instance(dynrun, dynnode)
instvar = instant_node(dyninst)
if instvar isa Variable
push!(variables, instvar)
else
push!(placeholders, instvar)
end
node_index[nodename] = instvar
dynpars = get_initial_parents(dynnet, dynnode)
instpars = Node[]
for dynpar in dynpars
instpar = node_index[get_name(dynpar)]
push!(instpars, instpar)
end
instgraph[instvar] = instpars
end
instnet = InstantNetwork(variables, instgraph, placeholders)
instrun = Runtime(instnet)
for node in values(node_index)
instantiate!(instrun, node, 0)
end
return instrun
end
"""
retrieve_values_from_instant_runtime!(dynrun::DynamicRuntime, instrun::InstantRuntime)
Retrieve values in a dynamic runtime from an instant runtime constructed
using `instant_runtime_from_instances`.
"""
function retrieve_values_from_instant_runtime!(dynrun::DynamicRuntime{T},
instrun::InstantRuntime) where T
index = Dict{Instance, Instance}()
for node in get_nodes(get_network(instrun))
# If instrun has been constructed correctly from dynrun,
# there should be no runtime errors in this code.
instinst = current_instance(instrun, node)
(dynname, dyntime) = dynamic_name_and_time(node, T)
dynnode = get_node(get_network(dynrun), dynname)
dyninst = get_instance(dynrun, dynnode, dyntime)
index[instinst] = dyninst
end
# TODO: This assumes that the values in the dynamic runtime take the same value as in the instant runtime.
# This is not necessarily the case, e.g. for :particles, which contains samples that are dictionaries from node name to
# node value. The node names in the instant runtime and dynamic runtime are different.
# For this, importance stores beliefs with nodes in the instant runtime that can get translated.
# A better general method is needed.
for ((instinst, valuename), value) in instrun.values
set_value!(dynrun, index[instinst], valuename, value)
end
for (k, v) in get_state(instrun)
set_state!(dynrun, k, v)
end
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 4293 | export
BP,
infer
"""
abstract type BP <: InstantAlgorithm
Belief Propagation algorithm
"""
abstract type BP <: InstantAlgorithm end
function answer(::Marginal, ::BP, runtime::Runtime, instance::VariableInstance, bounds = false)
if bounds
error("BP cannot provide bounded answers")
end
return get_belief(runtime, instance)
end
function infer(algorithm::BP, runtime::InstantRuntime,
evidence::Dict{Symbol, <:Score} = Dict{Symbol, Score}(),
interventions::Dict{Symbol, <:Dist} = Dict{Symbol, Dist}(),
placeholder_beliefs = get_placeholder_beliefs(runtime))
net = get_network(runtime)
ensure_all!(runtime)
order = topsort(get_initial_graph(net))
set_ranges!(runtime, evidence, algorithm.default_range_size, 1, order, placeholder_beliefs)
for ph in get_placeholders(net)
if !(ph.name in keys(placeholder_beliefs))
error("Placeholder ", ph.name, " does not have a belief")
end
pi = placeholder_beliefs[ph.name]
for ch in get_children(net, ph)
set_message!(runtime, ph, ch, :pi_message, pi)
end
end
for (n,e) in evidence
v = get_node(net, n)
inst = current_instance(runtime, v)
post_evidence!(runtime, inst, e)
end
for (n,i) in interventions
v = get_node(net, n)
inst = current_instance(runtime, v)
post_intervention!(runtime, inst, i)
end
run_bp(algorithm, runtime)
end
# Run a backward step of BP for a single variable.
function _backstep(runtime, var, ranges)
network = get_network(runtime)
inst = current_instance(runtime, var)
evidence = get_evidence(runtime, inst)
sf = get_sfunc(inst)
chs = get_children(get_network(runtime), var)
msgs = collect_messages(runtime, chs, var, :lambda_message)
O = output_type(sf)
range::Vector{<:O} = [convert(output_type(sf), r) for r in ranges[var.name]]
if isempty(msgs)
ilams = Score{output_type(sf)}[]
if !isnothing(evidence)
push!(ilams, evidence)
end
lam = compute_lambda(sf, range, ilams)
else
incoming_lams = Score[]
for m in msgs
m1::Score = m
push!(incoming_lams, m1)
end
if !isnothing(evidence)
push!(incoming_lams, evidence)
end
lam = compute_lambda(sf, range, incoming_lams)
end
set_value!(runtime, inst, :lambda, lam)
pars = get_parents(network, var)
incoming_pis::Vector{Dist} = collect_messages(runtime, pars, var, :pi_message)
parranges = [ranges[p.name] for p in pars]
outgoing_lams = outgoing_lambdas(sf, lam, range, tuple(parranges...), tuple(incoming_pis...))
distribute_messages!(runtime, var, pars, :lambda_message, outgoing_lams)
end
# Run a forward step of BP for a single variable
# The procedure is slightly different for the first pass, so use the firstpass flag
function _forwardstep(runtime, var, ranges, firstpass)
network = get_network(runtime)
inst = current_instance(runtime, var)
sf = get_sfunc(inst)
pars = get_parents(network, var)
O = output_type(sf)
range = convert(Vector{O}, ranges[var.name])
parranges = [ranges[p.name] for p in pars]
incoming_pis = tuple(collect_messages(runtime, pars, var, :pi_message)...)
if has_intervention(runtime, inst)
intervention = get_intervention(runtime, inst)
pi = intervention
elseif firstpass && has_evidence(runtime, inst)
# In the first pass, evidence is treated like lambda messages from elsewhere
evidence = get_evidence(runtime, inst)
pi = Cat(range, [get_score(evidence, x) for x in range])
else
pi = compute_pi(sf, range, tuple(parranges...), incoming_pis)
end
set_value!(runtime, inst, :pi, pi)
chs = get_children(network, var)
if firstpass
outpis = fill(pi, length(chs))
else
lam = get_value(runtime, inst, :lambda)
bel = compute_bel(sf, range, pi, lam)
post_belief!(runtime, inst, bel)
incoming_lams::Vector{Score} = collect_messages(runtime, chs, var, :lambda_message)
outpis = outgoing_pis(sf, range, bel, incoming_lams)
end
distribute_messages!(runtime, var, chs, :pi_message, outpis)
end | Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 10201 | export
Importance,
Rejection,
LW,
rejection_proposal,
lw_proposal,
make_custom_proposal
"""
Importance <: InstantAlgorithm
Representation of an importance sampling algorithm.
# arguments
- proposal_function Specifies how the algorithm should make proposals. This is a function
that takes a runtime and an instance and returns a proposer.
The proposer takes parent values and proposes a value for the instance along with a log
score.
- num_particles The number of completed particles to use. This is not necessarily the
number attempted. If there are rejections, the algorithm will continue to create particles
until `num_particles` have been completed. Warning: With impossible evidence, the process
will not terminate.
"""
struct Importance <: InstantAlgorithm
proposal_function :: Function
num_particles :: Int
end
function answer(pf::ProbFunction, ::Importance, runtime::Runtime, instances::Vector{VariableInstance})
particles = get_state(runtime, :particles)
score = 0.0
total = 0.0
for (s, lw) in zip(particles.samples, particles.log_weights)
x = [s[get_name(inst)] for inst in instances]
w = exp(lw)
if pf.fn(x)
score += w
end
total += w
end
return score / total
end
function answer(::Marginal, ::Importance, runtime::Runtime, instance::VariableInstance)
O = output_type(get_node(instance))
dict = Dict{O, Float64}()
particles = get_state(runtime, :particles)
for (s, lw) in zip(particles.samples, particles.log_weights)
x = s[get_name(instance)]
w = exp(lw)
dict[x] = get(dict, x, 0.0) + w
end
z = sum(values(dict))
vs = O[]
ps = Float64[]
for (v, p) in dict
push!(vs, v)
push!(ps, p/z)
end
return Cat(vs, ps)
end
"""
rejection_proposal(::Runtime, instance::VariableInstance, parent_values::Tuple)
Return a proposer and scorer to implement standard rejection sampling from the prior.
It proposes a value for the `instance` from its sfunc, and scores it by the evidence,
if any.
"""
function rejection_proposal(runtime::Runtime, instance::VariableInstance)
sfunc = get_sfunc(instance)
proposer(parent_values) = (sample(sfunc, parent_values), 0.0)
proposer
end
function _get_hard_evidence(runtime, instance)::Union{HardScore, Nothing}
if has_evidence(runtime, instance)
ev = get_evidence(runtime, instance)
if ev isa HardScore
return ev
end
end
return nothing
end
"""
lw_proposal(runtime::Runtime, instance::VariableInstance, parent_values::Tuple)
Return a proposer and scorer to implement likelihood weighting.
This proposal scheme is the same as the prior proposal unless a variable has hard evidence.
In the case of hard evidence, the proposer sets the value of the variable to the evidence
value and scores it by the log conditional probability of the evidence given the parent
values.
"""
function lw_proposal(runtime::Runtime, instance::VariableInstance)
evidence = _get_hard_evidence(runtime, instance)
if !isnothing(evidence)
return _hard_evidence_proposal(evidence, instance)
else
return rejection_proposal(runtime, instance)
end
end
function _hard_evidence_proposal(evidence, instance)
sf = get_sfunc(instance)
x = evidence.value
proposer(parent_values) = (x, logcpdf(sf, parent_values, x))
return proposer
end
"""
make_custom_proposal(custom_sfs::Dict{Symbol, SFunc})
Create a proposal function for a custom proposal scheme.
Returns a proposal function that can be provided to the Importance constructor.
Evidence is handled similar to `lw`, except that the custom proposal is used for soft
evidence.
# Arguments
- custom_sfs A dictionary mapping variable names to custom sfuncs used for their proposal.
Need not be complete; if a variable is not in this dictionary, its standard sfunc will be
used.
"""
function make_custom_proposal(custom_sfs::Dict{Symbol, SFunc})
function proposal(runtime, instance)
evidence = _get_hard_evidence(runtime, instance)
if !isnothing(evidence)
return _hard_evidence_proposal(evidence, instance)
else
name = get_name(instance)
if name in keys(custom_sfs)
prior_sf = get_sfunc(instance)
proposal_sf = custom_sfs[name]
function proposer(parent_values)
x = sample(proposal_sf, parent_values)
l = logcpdf(prior_sf, parent_values, x) -
logcpdf(proposal_sf, parent_values, x)
return (x, l)
end
return proposer
else
return rejection_proposal(runtime, instance)
end
end
end
return proposal
end
Rejection(num_particles) = Importance(rejection_proposal, num_particles)
LW(num_particles) = Importance(lw_proposal, num_particles)
function _importance(runtime::Runtime, num_samples::Int, proposal_function::Function,
samples::Vector{Dict{Symbol, Any}}, lws)
net = runtime.network
nodes = topsort(get_initial_graph(net))
proposers = Function[]
evidences = Score[]
interventions = Union{Dist,Nothing}[]
for v in nodes
if v isa Variable
inst = current_instance(runtime, v)
push!(proposers, proposal_function(runtime, inst))
if has_evidence(runtime, inst)
push!(evidences, get_evidence(runtime, inst))
else
push!(evidences, FunctionalScore{output_type(v)}(x -> 1.0))
end
if has_intervention(runtime, inst)
push!(interventions, get_intervention(runtime, inst))
else
push!(interventions, nothing)
end
end
end
function handle_sample(s)
vnum = 1
for v in nodes
if v isa Variable
try
inst = current_instance(runtime, v)
if !isnothing(interventions[vnum])
iv = interventions[vnum]
samples[s][v.name] = sample(iv, ())
else
proposer = proposers[vnum]
pars = get_initial_parents(net, v)
parvals = tuple([samples[s][p.name] for p in pars]...)
(x, lw) = proposer(parvals)
pe = get_log_score(evidences[vnum], x)
if !isfinite(pe)
# Try again
return s
end
samples[s][v.name] = x
lws[s] += lw + pe
end
vnum += 1
catch ex
@error("Error $ex on variable $v")
rethrow(ex)
end
end
end
# Success
return s + 1
end
s = 1
while s <= num_samples
s = handle_sample(s)
end
for v in nodes
if v isa Variable
ps = Dict{output_type(v), Float64}()
for i in 1:length(samples)
x = samples[i][v.name]
ps[x] = get(ps, x, 0.0) + exp(lws[i])
end
i = current_instance(runtime, v)
set_value!(runtime, i, :belief, Cat(ps))
end
end
log_prob_evidence = logsumexp(lws) - log(num_samples)
set_state!(runtime, :log_prob_evidence, log_prob_evidence)
set_state!(runtime, :particles, Particles(samples, lws))
end
function infer(algorithm::Importance, runtime::InstantRuntime,
evidence::Dict{Symbol, <:Score} = Dict{Symbol, Score}(),
interventions::Dict{Symbol, <:Dist} = Dict{Symbol, Dist}(),
placeholder_beliefs::Dict{Symbol, Dist} = get_placeholder_beliefs(runtime))
net = get_network(runtime)
nodes = get_nodes(net)
ensure_all!(runtime)
# See comment in instantalgorithm.jl
# During particle filtering, importance needs joint samples of placeholders, not marginals
# So we first check to see if particles already exists, and only add placeholder beliefs if
# they're not found in existing particles
samples = Dict{Symbol, Any}[]
lws = Float64[]
if has_state(runtime, :particles)
particles = get_state(runtime, :particles)
# Copy over only values of nodes in the current runtime
psamples = particles.samples
plog_weights = particles.log_weights
for i in 1:algorithm.num_particles
if length(particles.samples) > 0
index_into_particles = (i-1) % length(particles.samples) + 1
newsample = Dict{Symbol, Any}()
oldsample = psamples[index_into_particles]
for n in nodes
if n.name in keys(oldsample)
newsample[n.name] = oldsample[n.name]
end
end
push!(samples, newsample)
push!(lws, plog_weights[index_into_particles])
else
push!(samples, Dict{Symbol, Any}())
push!(lws, 0.0)
end
end
else
for i in 1:algorithm.num_particles
push!(samples, Dict{Symbol, Any}())
end
lws = zeros(Float64, algorithm.num_particles)
end
placeholders = get_placeholders(net)
for ph in placeholders
pi = placeholder_beliefs[ph.name]
for i in 1:algorithm.num_particles
if !(ph.name in keys(samples[i]))
samples[i][ph.name] = sample(pi, ())
end
end
end
for (n, e) in evidence
v = get_node(net, n; throw_missing=true)
inst = current_instance(runtime, v)
post_evidence!(runtime, inst, e)
end
for (n, i) in interventions
v = get_node(net, n; throw_missing=true)
inst = current_instance(runtime, v)
post_intervention!(runtime, inst, i)
end
_importance(runtime, algorithm.num_particles, algorithm.proposal_function, samples, lws)
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 1882 | export
InstantAlgorithm,
infer
"""
InstantAlgorithm
Algorithm that runs once on an `InstantNetwork`.
"""
abstract type InstantAlgorithm <: Algorithm end
# FIXME:
# placeholder_beliefs is intended to provide information about the distribution of placeholders to an algorithm.
# The current interface allows marginal distributions over placeholders to be passed, which is fine for an
# algorithm like BP, but if an algorithm needs joint information it has to get it in a different way.
# In particular, during particle filtering, the importance sampling algorithm needs joint samples of the previous state.
# Fortunately, it can get it out of the :particles state in the runtime, but this is clunky and not in line with the
# intentions of the general infer method.
# We should introduce sfuncs that define distributions over dictionaries, and provide operators to produce joint samples
# or compute marginals over individual variables, as they are able to support them. placeholder_beliefs should be
# such an sfunc.
"""
infer(algorithm::InstantAlgorithm, runtime::InstantRuntime,
evidence::Dict{Symbol, <:Score},
interventions::Dict{Symbol, <:Dist},
placeholder_beliefs::Dict{Symbol, <:Dist})
Run the inference algorithm.
Stores the results in `runtime`. The format of these results is up to
`algorithm`, but they should be usable by queries with this algorithm.
# Arguments
- `algorithm`: The instant algorithm to run.
- `runtime`: The runtime in which to run the algorithm.
- `evidence`: The supplied evidence, which defaults to `Dict()`.
- `interventions`: The supplied interventions, which defaults to `Dict()`.
- `placeholder_beliefs`: The beliefs associated with the placeholders in the
network, which default to `Dict()`. Instant algorithms might require that a belief be
supplied for every placeholder in `network`.
"""
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 4253 | export
LoopyBP,
loopy_BP
"""
LoopyBP
An instant algorithm that runs loopy belief propagation.
# Arguments
- default_range_size: The size to use as default when calling `support` on a node.
- epsilon: The allowable difference between beliefs on successive iterations
for termination.
- maxiterations: The maximum number of iterations to run. `infer` will terminate if
this number of iterations is reached, even if it has not converged.
"""
struct LoopyBP <: BP
default_range_size::Int
epsilon::Float64
maxiterations::Int
LoopyBP(drs = 10, epsilon = 0.0001, maxiterations = 10) = new(drs, epsilon, maxiterations)
end
function loopy_BP(runtime::Runtime; default_range_size = 10, epsilon = 0.0001, maxiterations = 10)
network = get_network(runtime)
for node in topsort(get_initial_graph(network))
remove_messages!(runtime, node, :pi_message)
remove_messages!(runtime, node, :lambda_message)
end
run_bp(LoopyBP(default_range_size, epsilon, maxiterations), runtime)
end
function run_bp(algorithm::LoopyBP, runtime)
network = get_network(runtime)
ranges = Dict()
nodes = get_nodes(network)
for node in nodes
inst = current_instance(runtime, node)
rng = get_range(runtime, inst)
ranges[node.name] = rng
end
for node in nodes
for par in get_parents(network, node)
if par isa Variable
set_message!(runtime, par, node, :pi_message,
ones(length(ranges[par.name])))
end
end
end
newbeliefs = initial_pass(runtime, network, ranges)
conv = false
iteration = 0
while !conv && iteration < algorithm.maxiterations
oldbeliefs = copy(newbeliefs)
backward_pass(runtime, network, ranges)
forward_pass(runtime, network, ranges, newbeliefs)
conv = converged_loopy(get_variables(network), ranges, newbeliefs, oldbeliefs, algorithm.epsilon)
iteration += 1
end
end
function initialize(runtime, network, ranges)
end
function initial_pass(runtime, network, ranges)
newbeliefs = Dict{Variable, Dist}()
variables = [v for v in topsort(get_initial_graph(network)) if v isa Variable]
for var in variables
inst = current_instance(runtime, var)
sf = get_sfunc(inst)
pars = get_parents(network, var)
incoming_pis :: Vector{Dist} =
collect_messages(runtime, pars, var, :pi_message)
range = ranges[var.name]
parranges = [ranges[p.name] for p in pars]
pi = compute_pi(sf, range, tuple(parranges...), tuple(incoming_pis...))
# on the first pass, we interpret evidence as lambda message
# coming from elsewhere, so include it in the pi
if has_evidence(runtime, inst)
evidence = get_evidence(runtime, inst)
pi = Cat(range, [get_score(evidence, x) for x in range])
end
if has_intervention(runtime, inst)
intervention = get_intervention(runtime, inst)
pi = intervention
end
set_value!(runtime, inst, :pi, pi)
newbeliefs[var] = pi
for ch in get_children(network, var)
set_message!(runtime, var, ch, :pi_message, pi)
end
end
return newbeliefs
end
function backward_pass(runtime, network, ranges)
variables = [v for v in topsort(get_initial_graph(network)) if v isa Variable]
for var in reverse(variables)
_backstep(runtime, var, ranges)
end
end
function forward_pass(runtime, network, ranges, newbeliefs)
variables = [v for v in topsort(get_initial_graph(network)) if v isa Variable]
for var in variables
_forwardstep(runtime, var, ranges, false)
inst = current_instance(runtime, var)
newbeliefs[var] = get_belief(runtime, inst)
end
end
function converged_loopy(variables, ranges, new_beliefs, old_beliefs, epsilon::Float64)
total_diff = 0.0
total_len = 0
for var in variables
range = ranges[var.name]
diffs = [abs(cpdf(new_beliefs[var], (), x) - cpdf(old_beliefs[var], (), x)) for x in range]
total_diff += sum(diffs)
total_len += length(range)
end
return total_diff / total_len < epsilon
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 1838 | export
ThreePassBP,
# The export of three_pass_BP is kept for backward compatibility,
# but new implementations should use ThreePassBP.
three_pass_BP
"""
ThreePassBP
An instant algorithm that runs three passes of belief propagation.
"""
struct ThreePassBP <: BP
default_range_size::Int
ThreePassBP(drs = 10) = new(drs)
end
function debugparams(sf, range, prob, varname, fname, name)
Dict(:type=>(output_type(sf) <: Real ? :cont : :discrete),
:numBins=>min(10, length(range)),
:range=>range,
:prob=>prob,
:varname=>varname,
:fname=>fname,
:name=>name)
end
function three_pass_BP(runtime::InstantRuntime)
for node in get_nodes(get_network(runtime))
remove_messages!(runtime, node, :pi_message)
remove_messages!(runtime, node, :lambda_message)
end
run_bp(ThreePassBP(), runtime)
end
function run_bp(::ThreePassBP, runtime::InstantRuntime)
network = get_network(runtime)
variables = [v for v in topsort(get_initial_graph(network)) if v isa Variable]
ranges = Dict{Symbol, Array{Any, 1}}()
for node in get_nodes(network)
inst = current_instance(runtime, node)
ranges[node.name] = get_range(runtime, inst)
end
for var in variables
for par in get_parents(network, var)
if par isa Variable
rng = ranges[par.name]
initial_message = Cat(rng, ones(length(rng)))
set_message!(runtime, par, var, :pi_message, initial_message)
end
end
end
for var in variables
_forwardstep(runtime, var, ranges, true)
end
for var in reverse(variables)
_backstep(runtime, var, ranges)
end
for var in variables
_forwardstep(runtime, var, ranges, false)
end
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 14840 | export
VE,
ve
import ..Operators.make_factors
import ..SFuncs: Invertible, Serial
using Folds
"""
VE(query_vars::Vector{Variable})
An instant algorithm that runs variable elimination.
# Arguments
- network
- `query_vars`: The variables to query, which are not eliminated
- depth: A depth of 1 means not to expand expanders, otherwise expands recursively to the given depth
- bounds: If true, return lower and upper bounds factors, otherwise just return a single factor
"""
struct VE <: InstantAlgorithm
query_vars::Vector{<:Variable}
depth::Int
bounds::Bool
default_range_size::Int
VE(q; depth = 1, bounds = false, range_size = 1000) = new(q,depth,bounds,range_size)
end
function answer(::Marginal, alg::VE, runtime::Runtime, inst::VariableInstance{O}) where O
inv = Invertible{Tuple{O},O}(x -> x[1], x -> (x,))
jnt = joint(alg, runtime, [inst])
if !alg.bounds
s = Serial(Tuple{}, O, (jnt, inv))
support(s, (), 10000, O[]) # need to precompute
return s
else
(lower, upper) = jnt
sl = Serial(Tuple{}, O, (lower, inv))
su = Serial(Tuple{}, O, (upper, inv))
support(sl, (), 10000, O[]) # need to precompute
support(su, (), 10000, O[]) # need to precompute
return (sl, su)
end
end
function answer(query::ProbValue{O}, alg::VE, runtime::Runtime, inst::VariableInstance{O}) where O
x = query.value
if !alg.bounds
m = answer(Marginal(), alg, runtime, inst)
return cpdf(m, (), x)
else
(lm,um) = answer(Marginal(), alg, runtime, inst)
l = cpdf(lm, (), x)
u = cpdf(um, (), x)
if isapprox(l,u)
return(l)
else
error("Lower and upper bounds are not equal - use ProbabilityBounds")
end
end
end
function answer(query::ProbabilityBounds{O}, alg::VE, runtime::Runtime, inst::VariableInstance{O}) where O
(lm,um) = answer(Marginal(), alg, runtime, inst)
r = query.range
n = length(r)
ls = zeros(Float64, n)
us = zeros(Float64, n)
# Typically, when a value is not in the support of an sfunc, we interpret its probability as zero
# However, for upper bounds, if it is in range, we want the upper bound to be 1
usup = support(um, (), 1000, O[])
for i in 1:n
x = r[i]
ls[i] = cpdf(lm, (), x)
us[i] = x in usup ? cpdf(um, (), x) : 1.0
end
lsum = sum(ls)
usum = sum(us)
resultls = zeros(Float64, n)
resultus = zeros(Float64, n)
for i in 1:n
# The probability of a value cannot be less than 1 - the upper bounds of the other values
# or more than 1 - the lower bounds of the other values
resultls[i] = max(ls[i], 1 - (usum - us[i]))
resultus[i] = min(us[i], 1 - (lsum - ls[i]))
end
return (resultls, resultus)
end
function joint(alg::VE, runtime::Runtime, insts::Vector{<:Instance})
vars::Vector{Variable} = [i.node for i in insts]
if any(v -> !(v in alg.query_vars), vars)
error("Cannot query eliminated variables")
end
(jnt, ids) = get_state(runtime, :joint_belief)
if !alg.bounds
# no bounds, just a single result
return marginalize(runtime, normalize(jnt), ids, alg.query_vars, vars, alg.depth)
else
# jnt has lower and upper bounds to probabilities
lower = marginalize(runtime, normalize(jnt[1]), ids, alg.query_vars, vars, alg.depth)
upper = marginalize(runtime, normalize(jnt[2]), ids, alg.query_vars, vars, alg.depth)
return (lower, upper)
end
end
function marginalize(runtime, factor::Factor, keys::Dict{<:Node,Int}, query_vars::Vector{<:Variable}, vars_to_remain::Vector{<:Variable}, depth::Int)
function get_var(k)
for v in query_vars
if keys[v] == k
return v
end
end
end
index = [get_var(k) for k in factor.keys]
ranges = [get_range(runtime, current_instance(runtime, v), depth) for v in index]
selector = [v in vars_to_remain for v in index]
combos = cartesian_product(ranges)
result = Dict{Tuple, Float64}()
for (combo, entry) in zip(combos, factor.entries)
vals = []
for i in 1:length(index)
if selector[i]
push!(vals, combo[i])
end
end
tup = tuple(vals...)
result[tup] = get(result, tup, 0.0) + entry
end
return Cat(result)
end
function infer(alg::VE, runtime::InstantRuntime,
evidence::Dict{Symbol, <:Score} = Dict{Symbol, Score}(),
interventions::Dict{Symbol, <:Dist} = Dict{Symbol, Dist}(),
placeholder_beliefs = get_placeholder_beliefs(runtime))
network = get_network(runtime)
if !(isempty(interventions))
error("VE cannot handle interventions")
end
order = topsort(get_initial_graph(network))
ensure_all!(runtime)
for pname in keys(placeholder_beliefs)
ph = get_node(network, pname)
inst = current_instance(runtime, ph)
pi = placeholder_beliefs[pname]
set_range!(runtime, inst, support(pi, NTuple{0,Vector}(),
alg.default_range_size, output_type(ph)[]), alg.depth)
end
set_ranges!(runtime, evidence, alg.default_range_size, 1, order)
for (n,e) in evidence
node = get_node(network, n)
inst = current_instance(runtime, node)
post_evidence!(runtime, inst, e)
end
jnt = ve(runtime, order, alg.query_vars; depth = alg.depth, placeholder_beliefs = placeholder_beliefs, bounds = alg.bounds)
set_state!(runtime, :joint_belief, jnt)
end
#############################################
# #
# Helper functions for variable elimination #
# #
#############################################
# Create a map from variable names to factor keys
function factorkeys(order) :: Dict{Node, Int}
result = Dict{Node, Int}()
for var in order
result[var] = nextkey()
end
return result
end
# Create lower and upper bound factors for the given variable.
# Uses predetermined ranges for the variable and its parents.
#
# TODO: Change this to use an operator that returns a set of factors rather than probabilities.
# This will enable local factor decompositions for sfuncs.
function lower_and_upper_factors(runtime::Runtime, fn::Function,
ids::Dict{Node, Int}, var::Variable{I,J,O}, depth) where {I,J,O}
range = get_range(runtime, var, depth)
pars = get_parents(get_network(runtime), var)
parranges = []
for v in pars
push!(parranges, get_range(runtime, v, depth))
end
prtype = isempty(parranges) ? Vector{O} : typejoin([typeof(x) for x in parranges]...)
parranges = convert(Vector{prtype}, parranges)
inst = current_instance(runtime, var)
sf = get_sfunc(inst)
# @debug "lower_and_upper_factors" var=var.name sf=typeof(sf)
parids = tuple([ids[p] for p in pars]...)
# Since Expander needs a runtime to create factors, it has special purpose code
# if is_fixed(var.model) && isa(make_sfunc(var, runtime), Expander)
if isa(make_initial(var.model), Expander)
(lowers, uppers) = expander_probs(runtime, fn, var, depth)
keys = map(s -> ids[s], pars)
dims = isempty(pars) ? Int[] : map(length, parranges)
push!(keys, ids[var])
push!(dims, length(range))
kds = Tuple(dims)
kts = Tuple(keys)
return ([Factor(kds, kts, lowers)], [Factor(kds, kts, uppers)])
else
facts = make_factors(sf, range, Tuple(parranges), ids[var], parids)
return facts
end
end
function evidence_factor(runtime, var::Variable, ids::Dict)
inst = current_instance(runtime, var)
evidence = get_evidence(runtime, inst)
range = get_range(runtime, inst)
dims = Tuple(length(range))
keys = Tuple(ids[var])
entries = Array{Float64, 1}(undef, length(range))
for i = 1:length(range)
entries[i] = get_score(evidence, range[i])
end
return Factor(dims, keys, entries)
end
# Make all the initial factors for the given network
function produce_factors(runtime::Runtime, fn::Function,
order::Vector{Node}, ids::Dict, placeholder_beliefs, depth)
lowers = []
uppers = []
for node in order
if node isa Variable
(lower, upper) =
lower_and_upper_factors(runtime, fn, ids, node, depth)
append!(lowers, lower)
append!(uppers, upper)
inst = current_instance(runtime, node)
if has_evidence(runtime, inst)
evfact = evidence_factor(runtime, node, ids)
push!(lowers, evfact)
push!(uppers, evfact)
end
else
bel = placeholder_beliefs[node.name]
range = get_range(runtime, node, depth)
(phlower, phupper) = make_factors(bel, range, (), ids[node], ())
append!(lowers, phlower)
append!(uppers, phupper)
end
end
return (lowers, uppers)
end
function make_graph(factors)
result = Graph()
for fact in factors
ids = fact.keys
for (i,id) in enumerate(ids)
size = fact.dims[i]
add_node!(result, id, size)
for other in ids
if other != id
add_undirected!(result, id, other)
end
end
end
end
return result
end
# Eliminate the variable with the given id by multiplying all factors
# mentioning the variable and summing the variable out of the result
function eliminate(var_id, factors)
relevant = filter(f -> var_id in f.keys, factors)
remaining = filter(f -> !(var_id in f.keys), factors)
if !isempty(relevant)
prodfact = relevant[1]
for i in 2:length(relevant)
prodfact = product(prodfact, relevant[i])
end
sumfact = sum_over(prodfact, var_id)
# If the node being eliminated is completely isolated from the rest of the network,
# sumfact will be empty and shouldn't be added
if length(sumfact.dims) > 0
push!(remaining, sumfact)
end
end
return remaining
end
########################################################
# #
# Run the variable elimination algorithm #
# #
# Works with both discrete and continuous variables, #
# using previously determined ranges. #
# #
# The second argument is a topologically ordered #
# list of variables to include in the computation. #
# The code assumes that for any variable, its parents #
# are present and precede it in the order. #
# The third argument is a list of variables to query #
# i.e. not to eliminate. This must be nonempty. #
# #
# Returns lower and upper bound factors, #
# as well as a key to variable names from factor keys. #
# #
########################################################
function ve(runtime::Runtime, order::Vector{<:Node},
query_vars::Vector{<:Variable}; depth = 1, placeholder_beliefs = Dict{Symbol,Dist}(), bounds = false)
@assert !isempty(query_vars)
ids :: Dict{Node, Int} = factorkeys(order)
# Making values for an expander takes a function to solve subnetworks.
# Here we create a function that passes the bounds flag for ve.
f(runtime, order, query_vars, depth) = ve(runtime, order, query_vars; depth = depth, placeholder_beliefs = placeholder_beliefs, bounds = bounds)
(lowers, uppers) = produce_factors(runtime, f, order, ids, placeholder_beliefs, depth)
ve_graph = make_graph(lowers) # Assumes that lower and upper factors have same structure
elim_order = greedy_order(ve_graph, map(v -> ids[v], query_vars))
for var_id in elim_order
lowers = eliminate(var_id, lowers)
if bounds
uppers = eliminate(var_id, uppers)
end
end
lowerprod = lowers[1]
for i in 2:length(lowers)
lowerprod = product(lowerprod, lowers[i])
end
if bounds
# if (length(lowers[1].entries) > 45 && length(uppers[1].entries) > 45)
# @info("$(typeof(lowers[1])) && $(typeof(uppers[1]))",
# lowers=lowers[1].entries[40:1:45],
# uppers=uppers[1].entries[40:1:45])
# end
upperprod = uppers[1]
for i in 2:length(uppers)
upperprod = product(upperprod, uppers[i])
end
return ((lowerprod, upperprod), ids)
else
return (lowerprod, ids)
end
end
function copy_graph(g :: Graph)
ns = copy(g.nodes)
es = Dict()
ss = Dict()
for n in ns
es[n] = copy(g.edges[n])
ss[n] = g.sizes[n]
end
return Graph(ns, es, ss)
end
##########################
# #
# Eliminating a variable #
# #
##########################
function unconnected_neighbors(g :: Graph, n :: Int)
neighbors = g.edges[n]
m = length(neighbors)
result = []
for i = 1:m
n1 = neighbors[i]
for j = i+1:m
n2 = neighbors[j]
if !(n1 in g.edges[n2]) || !(n2 in g.edges[n1])
push!(result, (n1, n2))
end
end
end
return result
end
function eliminate(g :: Graph, n :: Int)
ns = unconnected_neighbors(g, n)
deleteat!(g.nodes, findfirst(m -> m == n, g.nodes))
delete!(g.edges, n)
delete!(g.sizes, n)
for (m,ms) in g.edges
if n in ms
deleteat!(ms, findfirst(m -> m == n, ms))
end
end
for (i, j) in ns
add_undirected!(g, i, j)
end
end
#############################################
# #
# Greedily determining an elimination order #
# to minimize the number of edges added #
# #
#############################################
function cost(g :: Graph, n :: Int)
return length(unconnected_neighbors(g, n))
end
function greedy_order(g :: Graph, to_leave :: Array{Int})
candidates = filter(n -> !(n in to_leave), g.nodes)
result = []
h = copy_graph(g)
while !isempty(candidates)
costs = map(c -> cost(h, c), candidates)
best = candidates[argmin(costs)]
push!(result, best)
eliminate(h, best)
deleteat!(candidates, findfirst(n -> n == best, candidates))
end
return result
end
function greedy_order(g :: Graph)
a :: Array{Int, 1} = []
return greedy_order(g, a)
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 1901 | export
IterativeAlgorithm,
prepare,
refine
"""
abstract type IterativeAlgorithm <: InstantAlgorithm
Algorithm that runs iteratively on an `InstantNetwork`.
The algorithm should support two methods: `prepare` and `refine`.
An IterativeAlgorithm is also trivially an InstantAlgorithm where
`Infer` is implemented by calling `prepare` and `refine` once.
"""
abstract type IterativeAlgorithm <: InstantAlgorithm end
"""
prepare(algorithm::IterativeAlgorithm, runtime::InstantRuntime
evidence::Dict{Symbol, <:Score},
interventions::Dict{Symbol, <:Dist},
placeholder_beliefs::Dict{Symbol, <:Dist})
Prepare the inference algorithm for iteration.
Stores the algorithm state in `runtime`.
# Arguments
- `algorithm`: The iterative algorithm to run.
- `runtime`: The runtime in which to run the algorithm.
- `evidence`: The supplied evidence, which defaults to `Dict()`.
- `interventions`: The supplied interventions, which defaults to `Dict()`.
- `placeholder_beliefs`: The beliefs associated with the placeholders in the
network, which default to `Dict()`.
"""
function prepare(algorithm::InstantAlgorithm, runtime::InstantRuntime, placeholder_beliefs::Vector{<:Dist},
evidence::Vector{Tuple{Symbol, <:Score}}, interventions::Vector{Tuple{Symbol, <:Dist}})
end
"""
refine(algorithm::IterativeAlgorithm, runtime::InstantRuntime)
Perform the next iteration of the algorithm.
Uses the algorithm state stored in `runtime` and stores the next state in `runtime`.
"""
function refine(algorithm::IterativeAlgorithm, runtime::InstantRuntime) end
function infer(algorithm::IterativeAlgorithm, runtime::InstantRuntime,
evidence::Dict{Symbol, <:Score}, interventions::Dict{Symbol, <:Dist}, placeholder_beliefs::Dict{Symbol, <:Dist})
prepare(algorithm, runtime, placeholder_beliefs, evidence, interventions)
refine(algorithm, runtime)
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 1847 | export
IterativeSampler
"""
struct IterativeSampler <: IterativeAlgorithm
An iterative algorithm that uses a sampler to accumulate more samples on each refinement.
"""
struct IterativeSampler <: IterativeAlgorithm
base_algorithm :: InstantAlgorithm
end
function prepare(alg::IterativeSampler, runtime::InstantRuntime,
evidence::Dict{Symbol, <:Score} = Dict{Symbol, Score}(),
interventions::Dict{Symbol, <:Dist} = Dict{Symbol, Dist}(),
placeholder_beliefs = get_placeholder_beliefs(runtime))
net = get_network(runtime)
for (n,e) in evidence
v = get_node(net, n)
inst = current_instance(runtime, v)
post_evidence!(runtime, inst, e)
end
for (n,i) in interventions
v = get_node(net, n)
inst = current_instance(runtime, v)
post_intervention!(runtime, inst, i)
end
for (n,b) in placeholder_beliefs
p = get_node(net, n)
inst = current_instance(runtime, p)
post_belief!(runtime, inst, b)
end
set_state!(runtime, :particles, Particles(Sample[], Float64[]))
end
function refine(alg::IterativeSampler, runtime::InstantRuntime)
current_particles = get_state(runtime, :particles)
infer(alg.base_algorithm, runtime)
new_particles = get_state(runtime, :particles)
all_samples = copy(current_particles.samples)
append!(all_samples, new_particles.samples)
all_log_weights = copy(current_particles.log_weights)
append!(all_log_weights, new_particles.log_weights)
set_state!(runtime, :particles, Particles(all_samples, all_log_weights))
end
answer(q::Query, alg::IterativeSampler, run::Runtime, inst::VariableInstance) = answer(q, alg.base_algorithm, run, inst)
answer(q::Query, alg::IterativeSampler, run::Runtime, insts::Vector{VariableInstance}) = answer(q, alg.base_algorithm, run, insts)
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 4045 | export
LazyInference,
LazyState
"""
mutable struct LazyState
Maintains the state of a lazy algorithm
# Fields
- `previous_algorithm`: The last instant algorithm used, if any
- `evidence`: The evidence supplied in `prepare`
- `interventions`: The interventions supplied in `prepare`
- `placeholder_beliefs`: The placeholder beliefs supplied in `prepare`
- `next_size`: The range size to use in the next call to `refine`
- `next_depth`: The depth to use in the next call to `refine`
- `next_iteration`: The number of the next iteration
- `is_complete`: A flag indicating whether the netwowk has been fully expanded
- `order`: The order of nodes used in computations
"""
mutable struct LazyState
previous_algorithm :: Union{InstantAlgorithm, Nothing}
evidence :: Dict{Symbol, Score}
interventions :: Dict{Symbol, Dist}
placeholder_beliefs :: Dict{Symbol, Dist}
next_size :: Int
next_depth :: Int
next_iteration :: Int
is_complete :: Bool
order :: Vector{Node}
end
"""
LazyState(ns, nd, ni, ic)
Intantiate `LazyState` with `next_size`, `next_depth`, `next_iterator`, and `is_complete`.
"""
LazyState(ns, nd, ni, ic) = LazyState(nothing, Dict{Symbol,Score}(), Dict{Symbol,Dist}(), Dict{Symbol,Dist}(), ns, nd, ni, ic, Node[])
"""
struct LazyInference <: IterativeAlgorithm
An iterative algorithm that expands recursively and increases the ranges of instances on every iteration.
"""
struct LazyInference <: IterativeAlgorithm
algorithm_maker :: Function # A function that takes the current depth and range size and returns the instant algorithm to use
increment :: Int
start_size :: Int
max_iterations :: Int
start_depth :: Int
state :: LazyState
"""
function LazyInference(maker::Function; increment = 10, start_size = increment, max_iterations = 100, start_depth = 1)
# Arguments
- `maker``: A function that takes a range size and expansion depth and returns an `InstantAlgorithm`
- `increment`: The increment to range size on every iteration
- `start_size`: The starting range size
- `max_iterations`: The maximum number of refinement steps
- `start_depth`: The depth of recursive expansion in the first iteration
"""
function LazyInference(maker::Function;
increment = 10, start_size = increment, max_iterations = 100, start_depth = 1)
new(maker, increment, start_size, max_iterations, start_depth,
LazyState(start_size, start_depth, 1, false))
end
end
function answer(query::Query, lazyalg::LazyInference, runtime::InstantRuntime, target::VariableInstance)
state = lazyalg.state
return answer(query, state.previous_algorithm, runtime, target)
end
function prepare(alg::LazyInference, runtime::InstantRuntime,
evidence::Dict{Symbol, <:Score} = Dict{Symbol, Score}(),
interventions::Dict{Symbol, <:Dist} = Dict{Symbol, Dist}(),
placeholder_beliefs = get_placeholder_beliefs(runtime))
ensure_all!(runtime)
net = get_network(runtime)
# The evidence, interventions, and placeholder_beliefs are punted to refine to pass to the underlying algorithm
state = alg.state
state.evidence = evidence
state.interventions = interventions
state.placeholder_beliefs = placeholder_beliefs
state.order = topsort(get_initial_graph(net))
clear_analysis!()
end
function refine(lazyalg::LazyInference, runtime::InstantRuntime)
state = lazyalg.state
if !(state.is_complete || state.next_iteration > lazyalg.max_iterations)
set_ranges!(runtime, state.evidence, state.next_size, state.next_depth, state.order)
inferencealg = lazyalg.algorithm_maker(state.next_size, state.next_depth)
state.previous_algorithm = inferencealg
infer(inferencealg, runtime,
state.evidence, state.interventions, state.placeholder_beliefs)
state.is_complete = _complete(runtime)
state.next_iteration += 1
state.next_depth += 1
state.next_size += lazyalg.increment
end
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 933 | export
LSFI
"""
function LSFI(query_vars;
increment = 10, start_size = increment, max_iterations = 100, start_depth = 1)
A lazy inference algorithm that uses variable elimination at every step.
# Arguments
- `query_vars`: Variables that can be queried after each `refine` step
- `increment`: The increment to range size on every iteration
- `start_size`: The starting range size
- `max_iterations`: The maximum number of refinement steps
- `start_depth`: The depth of recursive expansion in the first iteration
"""
function LSFI(query_vars;
increment = 10, start_size = increment, max_iterations = 100, start_depth = 1)
# query_vars = [get_node(net, :out)]
maker(size, depth) = VE(query_vars; depth = depth, bounds = true, range_size =size)
return LazyInference(maker; increment = increment, start_size = start_size,
max_iterations = max_iterations, start_depth = start_depth)
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 958 | export
FixedTimeModel
"""
abstract type FixedTimeModel{I,O} <: Model{I,O}
A dynamic model described only for fixed time delta. Must implement
`get_initial`, `get_transition`, and `get_dt`.
These can depend on the current time.
"""
abstract type FixedTimeModel{I,J,O} <: Model{I,J,O} end
# get_initial(::FixedTimeModel) = error("Not implemented")
# get_transition(::FixedTimeModel) = error("Not implemented")
# get_dt(::FixedTimeModel) = error("Not implemented")
make_initial(m::FixedTimeModel, t=0) = get_initial(m, t)
function make_transition(m::FixedTimeModel, parenttimes, time)
dt = get_dt(m)
# Allow edges from the same time step, or intertemporal edges from the previous time step at distance dt
if all(t -> time - t == dt || time == t, parenttimes)
return get_transition(m, time)
else
error("make_transition called on FixedTimeModel with incorrect dt")
end
end
# is_fixed(::FixedTimeModel) = true
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 719 | export HomogeneousModel
"""
HomogeneousModel{I,O} <: FixedTimeModel{I,O}
A dynamic model with a fixed time step and same transition model at every time point..
The constructor is called with the initial sfunc and transition sfuncs.
The constructor is also called with an optional dt (defaults to 1).
"""
struct HomogeneousModel{I,J,O} <: TimelessFixedTimeModel{I,J,O}
initial :: SFunc{I,O}
transition :: SFunc{J,O}
dt :: Number
end
function HomogeneousModel(init::SFunc{I,O}, trans::SFunc{J,O}, dt = 1) where {I,J,O}
HomogeneousModel{I,J,O}(init, trans, dt)
end
get_initial(m::HomogeneousModel) = m.initial
get_transition(m::HomogeneousModel) = m.transition
get_dt(m::HomogeneousModel) = m.dt
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 494 | export
InstantModel
"""
abstract type InstantModel{I,O} <: Model{I,Nothing,O}
A model for a variable with no time dependencies.
Since this model has no transitions, it can only called with
`make_initial` and not `make_transition`
(i.e. `make_transition` = `make_initial`)
"""
abstract type InstantModel{I,O} <: Model{I,Nothing,O} end
# Instant models can be freely used dynamically, with no time dependencies
make_transition(m::InstantModel, parenttimes, time) = make_initial(m)
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 637 | export SimpleModel
"""
struct SimpleModel{I,O} <: TimelessInstantModel{I,O}
A model that always produces the same SFunc.
This is an TimelessInstantModel, so must always be called when the parents are the same time point.
The constructor takes the sfunc as argument, which is stored.
There is a convenience method to create a SimpleModel for any sfunc by applying the sfunc to zero arguments.
"""
struct SimpleModel{I, O} <: TimelessInstantModel{I, O}
sf::SFunc{I, O}
end
make_initial(m::SimpleModel) = m.sf
# Convenience for making constant models, a very common case
(sf::SFunc{I, O})() where {I, O} = SimpleModel{I, O}(sf)
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 614 | export StaticModel
"""
struct StaticModel{I,O} <: VariableTimeModel{I,O,O} end
A static model represents a variable that never changes its value.
The value is setup through an sfunc created by make_initial.
At any time point, it simply copies its previous value.
Because it can be used flexibly, we make it a subtype of VariableTimeModel.
"""
struct StaticModel{I,O} <: VariableTimeModel{I,Tuple{O},O}
sf::SFunc{I,O}
end
function make_initial(m::StaticModel, time)
return m.sf
end
function make_transition(::StaticModel{I,O}, parenttimes, time) where {I,O}
return Det(Tuple{O}, O, x -> x)
end | Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 545 | export TimelessFixedTimeModel
"""
abstract type TimelessFixedTimeModel{I,J,O} <: FixedTimeModel{I,J,O}
A FixedTimeModel in which the initial and transition models do not depend on the current time.
In addition to `get_dt`, must implement a version of `get_initial` and `get_transition` that
do not take the current time as an argument.
"""
abstract type TimelessFixedTimeModel{I,J,O} <: FixedTimeModel{I,J,O} end
get_initial(m::TimelessFixedTimeModel, t) = get_initial(m)
get_transition(m::TimelessFixedTimeModel, t) = get_transition(m)
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 531 |
export TimelessInstantModel
"""
abstract type TimelessInstantModel{I,O} <: InstantModel{I,O}
An InstantModel in which the sfunc made does not depend on time.
Must implement a version of `make_initial` that does not take the current time as argument.
Note that `make_initial` can be defined to take keyword arguments, so the sfunc created
need not be exactly the same every time.
"""
abstract type TimelessInstantModel{I,O} <: InstantModel{I,O} end
make_initial(m::TimelessInstantModel,t;keys...) = make_initial(m;keys...) | Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 614 | export VariableTimeModel
"""
abstract type VariableTimeModel{I,J,O} <: Model{I,J,O}
A model that creates sfuncs based on the time delta between the parents and the current instance.
In general, the deltas can be different for different parents.
This type does not introduce any new functionality over Model.
Its purpose is to make explicit the fact that for this type of model, separate time deltas are possible.
Must implement 'make_initial', which takes the current time, and 'make_transition', which takes the current time and parent times.
"""
abstract type VariableTimeModel{I,J,O} <: Model{I,J,O} end | Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 2427 | export
ConfigurableModel,
set_config_spec!,
get_config_spec,
converged
#=
A ConfigurableModel contains an underlying model, and allows the SFuncs returned by the underlying model
to be configured using a configuration specification. Those SFuncs should support the following operators:
configure(sf, config_spec), which returns an sfunc (could be fresh or a mutation of sf)
converged(sf, old_config_spec, new_config_spec) :: Boolean (this is only needed by algorithms like EM)
The type parameters of ConfigurableModel are as follows:
I : Parents of initial sfunc
J : Parents of transition sfunc
O : Outputs of both initial and transition sfuncs
C : Configuration specification
S : Stats
An instance of ConfigurableModel must be provided with the following methods:
base_Model(m)
configSpec(m)
initial_stats(m) :: S
accumulate_stats(m, current_stats :: s, new_stats) :: S
which uses the current stats to produce updated stats
maximize_stats!(m, stats :: S) :: C
Typical usage will start by first setting current_stats to initial_stats, and then repeatedly
- computing update_info
- calling add_stats using current_stats and update_info
- calling maximize_stats! after all the stats have been added to set config_spec
=#
abstract type ConfigurableModel{I,J,O,C,S} <: Model{I,J,O} end
make_initial(m :: ConfigurableModel, t) = configure(make_initial(m.base_model, t), m.config_spec)
make_transition(m :: ConfigurableModel, parent_times, time) = configure(make_transition(m.base_model, parent_times, time), m.config_spec)
function set_config_spec!(m :: ConfigurableModel, spec :: C) where C
m.config_spec = spec
end
get_config_spec(m :: ConfigurableModel) = m.config_spec
# Eventually we want to create a dynamic version, but for now it's just static
# function converged(m :: ConfigurableModel{I, J, O, C, S}, old_spec :: C, new_spec :: C,
# parent_times, time) :: Boolean where {I, J, O, C, S}:: Boolean
# init = make_initial(m.base_model, time)
# trans = make_transition(m.base_model, parent_times, time)
# converged(init, old_spec, new_spec) && converged(trans, old_spec, new_spec)
# end
function converged(m :: ConfigurableModel{I, J, O, C, S}, old_spec :: C, new_spec :: C) :: Boolean where {I, J, C, O, S}
init = make_initial(m.base_model, time)
converged(init, old_spec, new_spec)
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 1424 | export
Parameterized
import Scruff: make_initial, make_transition
import Scruff.Operators: initial_stats, accumulate_stats, maximize_stats
# Parameterized is a ConfigurableModel in which the base model is a SimpleModel over an sfunc, and the config_spec has the same datatype as explicitly defined
# parameters of the sfunc. The sfunc must have get_params, set_params!, initial_stats, accumulate_stats,
# and maximize_stats methods are defined.
# This very common case is made easy with this code.
# A Parameterized must have an method defined
#
# base_model(m) :: SimpleModel{I, O}
#
# All the other methods of a ConfigurableModel will then be defined automatically.
abstract type Parameterized{I,O,C,S} <: ConfigurableModel{I,I,O,C,S} end
make_initial(m :: Parameterized, t) = set_params!(make_initial(base_model(m), t),
get_config_spec(m))
make_transition(m :: Parameterized, parent_times, time) = set_params!(make_transition(base_model(m), parent_times, time),
get_config_spec(m))
initial_stats(m :: Parameterized) = initial_stats(get_sf(m))
accumulate_stats(m :: Parameterized, s1, s2) = accumulate_stats(get_sf(m), s1, s2)
function maximize_stats(m :: Parameterized{I, O, S}, stats) where {I,O,S}
conf_sp = maximize_stats(get_sf(m), stats)
set_config_spec!(m, conf_sp)
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 697 | export
SimpleNumeric
mutable struct SimpleNumeric{I,O,S} <: Parameterized{I,O,S,S}
base :: SimpleModel{I, O}
config_spec :: S
epsilon :: Float64
SimpleNumeric{I,O,S}(sf) where {I,O,S} = new(SimpleModel(sf), get_params(sf), 0.01)
SimpleNumeric{I,O,S}(sf, eps) where {I,O,S} = new(SimpleModel(sf), get_params(sf), eps)
end
get_sf(m :: SimpleNumeric) = m.base.sf
base_model(m :: SimpleNumeric) = m.base
get_config_spec(m :: SimpleNumeric) = m.config_spec
function set_config_spec!(m :: SimpleNumeric{I, O, S}, cs :: S) where {I,O,S}
m.config_spec = cs
m
end
converged(m :: SimpleNumeric, old_spec, new_spec) = converged_numeric(old_spec, new_spec, m.epsilon)
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 5769 | using ..MultiInterface
# to support
MultiInterface.get_imp(::Nothing, args...) = nothing
# Being specific here has big perf implact due to type-stability
const FloatType = Float64
@interface forward(sf::SFunc{I, O}, i::I)::Dist{O} where {I, O}
# Provide a set of weighted values that characterize the distribution
# (e.g. such that expectations can be well approximated E[f(x)] \approx \sum w_i f(x_i) / \sum w_i
# It is up to the implementation / policy how many to return
# So e.g. discretely supported distributions can return a precise value
# while continuous distributions can return some approximate set of samples
# controlled by an op_impl hyper parameter
@interface weighted_values(d::Dist{O})::Tuple{Vector{<:O}, Vector{FloatType}} where {O}
@interface inverse(sf::SFunc{I,O}, o::O)::Score{I} where {I,O}
@interface is_deterministic(sf::SFunc)::Bool
@interface sample(sf::SFunc{I,O}, i::I)::O where {I,O}
@interface sample_logcpdf(sf::SFunc{I,O}, i::I)::Tuple{O, <:AbstractFloat} where {I,O}
# @interface invert(sf::SFunc{I,O}, o::O)::I where {I,O}
@interface lambda_msg(sf::SFunc{I,O}, i::SFunc{<:Option{Tuple{}}, O})::SFunc{<:Option{Tuple{}}, I} where {I,O}
@interface marginalize(sfb::SFunc{X, Y}, sfa::SFunc{Y, Z})::SFunc{X, Z} where {X, Y, Z}
@interface logcpdf(sf::SFunc{I,O}, i::I, o::O)::FloatType where {I,O}
@interface cpdf(sf::SFunc{I,O}, i::I, o::O)::FloatType where {I,O}
@interface log_cond_prob_plus_c(sf::SFunc{I,O}, i::I, o::O)::AbstractFloat where {I,O}
@interface f_expectation(sf::SFunc{I,O}, i::I, fn::Function) where {I,O}
# Expectation (and others) should either return some continuous relaxation of O (e.g. Ints -> Float) or there should be another op that does
@interface expectation(sf::SFunc{I,O}, i::I) where {I,O}
@interface variance(sf::SFunc{I,O}, i::I)::O where {I,O}
@interface get_score(sf::SFunc{Tuple{I},O}, i::I)::AbstractFloat where {I,O}
@interface get_log_score(sf::SFunc{Tuple{I},O}, i::I)::AbstractFloat where {I,O}
# Return a new SFunc that is the result of summing samples from each constituent SFunc
@interface sumsfs(fs::NTuple{N, <:SFunc{I, O}})::SFunc{I, O} where {N, I, O}
# Stuff inspired by Distributions.jl interfaces (not super consistent in support though - may require some patching / care in declarations)
@interface fit_mle(t::Type{S}, dat::Dist{O})::S where {O, S <: Dist{O}}
@interface fit_mle_joint(t::Type{S}, dat::Dist{Tuple{I, O}})::S where {I, O, S <: SFunc{I, O}}
@interface support_minimum(sf::SFunc{I, O}, i::I)::O where {I, O}
@interface support_maximum(sf::SFunc{I, O}, i::I)::O where {I, O}
# One output SFunc for each entry in O (and vice-versa). TODO more specific type signatures
@interface make_marginals(sf::SFunc{I, <:Tuple})::NTuple{<:Any, <:SFunc{I}} where {I}
@interface join_marginals(sfs::NTuple{<:Any, <:SFunc{I}})::SFunc{I, <:Tuple} where {I}
@interface support(sf::SFunc{I,O},
parranges::NTuple{N,Vector},
size::Integer,
curr::Vector{<:O}) where {I,O,N}
@interface support_quality(sf::SFunc, parranges)
@interface bounded_probs(sf::SFunc{I,O},
range::VectorOption{<:O},
parranges::NTuple{N,Vector})::Tuple{Vector{<:AbstractFloat}, Vector{<:AbstractFloat}} where {I,O,N}
@interface make_factors(sf::SFunc{I,O},
range::VectorOption{<:O},
parranges::NTuple{N,Vector},
id,
parids::Tuple)::Tuple{Vector{<:Scruff.Utils.Factor}, Vector{<:Scruff.Utils.Factor}} where {I,O,N}
@interface get_params(sf::SFunc)
@interface set_params!(sf :: SFunc, params)
# TODO vvvvvv Statistics computation not finished - not using anymore, defined for ConfigurableModel
@interface initial_stats(sf::SFunc)
# TODO create an abstract type Stats{I,O}
# (range, parranges, pi's, lambda's)
@interface expected_stats(sf::SFunc{I,O},
range::VectorOption{<:O},
parranges::NTuple{N,Vector},
pis::NTuple{M,Dist},
child_lambda::Score{<:O}) where {I,O,N,M}
@interface accumulate_stats(sf::SFunc, existing_stats, new_stats)
@interface maximize_stats(sf::SFunc, stats)
@interface configure(sf::SFunc, config_spec) :: SFunc
# ^^^^ Not finished
@interface compute_bel(sf::SFunc{I,O},
range::VectorOption{<:O},
pi::Dist{<:O},
lambda::Score)::Dist{<:O} where {I,O}
@interface compute_lambda(sf::SFunc,
range::VectorOption,
lambda_msgs::Vector{<:Score})::Score
@interface send_pi(sf::SFunc{I,O},
range::VectorOption{O},
bel::Dist{O},
lambda_msg::Score)::Dist{<:O} where {I,O}
@interface outgoing_pis(sf::SFunc,
range::VectorOption,
bel::Dist,
incoming_lambdas::VectorOption{<:Score})::Vector{<:Dist}
@interface outgoing_lambdas(sf::SFunc{I,O},
lambda::Score,
range::VectorOption,
parranges::NTuple{N,Vector},
incoming_pis::Tuple)::Vector{<:Score} where {N,I,O}
@interface compute_pi(sf::SFunc{I,O},
range::VectorOption{O},
parranges::NTuple{N,Vector},
incoming_pis::Tuple)::Dist{<:O} where {N,I,O}
@interface send_lambda(sf::SFunc{I,O},
lambda::Score,
range::VectorOption,
parranges::NTuple{N,Vector},
incoming_pis::Tuple,
parent_idx::Integer)::Score where {N,I,O}
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 1679 | export
support_quality_rank,
support_quality_from_rank
@impl begin
struct SFuncExpectation end
function expectation(sf::SFunc{I,O}, i::I) where {I,O}
return f_expectation(sf, i, x -> x)
end
end
"""
support_quality_rank(sq::Symbol)
Convert the support quality symbol into an integer for comparison.
"""
function support_quality_rank(sq::Symbol)
if sq == :CompleteSupport return 3
elseif sq == :IncrementalSupport return 2
else return 1 end
end
"""
support_quality_from_rank(rank::Int)
Convert the rank back into the support quality.
"""
function support_quality_from_rank(rank::Int)
if rank == 3 return :CompleteSupport
elseif rank == 2 return :IncrementalSupport
else return :BestEffortSupport() end
end
@impl begin
struct SFuncSupportQuality end
function support_quality(s::SFunc, parranges)
:BestEffortSupport
end
end
@impl begin
struct DefaultWeightedValues
num_samples::Int
end
function weighted_values(s::Dist)
samples = [sample(s, ()) for _ in 1:num_samples]
return (samples, ones(num_samples))
end
end
@impl begin
struct DefaultFitMLE end
function fit_mle(s::Type{D}, ref::D) where {D <: Dist}
return ref
end
end
@impl begin
struct SampledFitMLEJoint end
function fit_mle_joint(t::Type{D}, dat::Dist{Tuple{Tuple{}, O}})::D where {O, D <: Dist{O}}
samples, weights = weighted_values(dat)
# Just get the output component (rest should be empty tuple)
samples = [s[2] for s in samples]
cat = Discrete(samples, weights / sum(weights))
return fit_mle(t, cat)
end
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 336 | export
@op_perf,
@make_c,
runtime
# All TODO
macro op_perf(op_perf_body)
return esc(op_perf_macro(op_perf_body))
end
function op_perf_macro(op_perf_body::Expr)
return quote
$op_perf_body
export $(op_perf_body.args[1].args[1])
end
end
macro make_c()
return (:1)
end
function runtime end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 1906 | #=
unconditional.jl : General representation of sfuncs that don't depend on anything.
=#
export Unconditional
"""
abstract type Unconditional{T} <: SFunc{Tuple{}, T}
`Unconditional` is a general representation of an *sfunc* that does not depend
on anything. It has no input.
# Type parameters
- `T`: the input type(s) of the `Unconditional`
- `P`: the parameter type(s) of the `Unconditional`
"""
abstract type Unconditional{T} <: SFunc{Tuple{}, T} end
@impl begin
struct UnconditionalSample end
function sample(sf::Unconditional{T}, i::Tuple{})::T
sample(sf, x) # should never be called
end
end
@impl begin
struct UnconditionalLogcpdf end
function logcpdf(sf::Unconditional{T}, i::Tuple{}, o::T)
logcpdf(sf, i, o) # should never be called
end
end
@impl begin
struct UnconditionalMakeFactors
numpartitions::Int64 = 100
end
function make_factors(sf::Unconditional{T},
range::Vector{T},
parranges::NTuple{N,Vector},
id,
parids::Tuple)::Tuple{Vector{<:Scruff.Utils.Factor}, Vector{<:Scruff.Utils.Factor}} where {T,N}
(lowers, uppers) = bounded_probs(sf, range, ())
keys = (id,)
dims = (length(range),)
return ([Factor(dims, keys, lowers)], [Factor(dims, keys, uppers)])
end
end
# compute_pi(u::Unconditional, range, parranges, incoming_pis) = compute_pi(u, range)
# unconditional sfuncs do not have parents, so send_lambda trivially sends an empty message
@impl begin
struct UnconditionalSendLambda end
function send_lambda(sf::Unconditional{T},
lambda::Score{T},
range::Vector{T},
parranges::NTuple{N,Vector},
incoming_pis::Tuple,
parent_idx::Integer)::Score where {T,N}
SoftScore(Float64[], Float64[])
end
end | Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 4506 | export Apply
"""
Apply{J, O} <: SFunc{Tuple{SFunc{J, O}, J}, O}
Apply represents an sfunc that takes two groups of arguments. The first group is a single
argument, which is an sfunc to apply to the second group of arguments.
# Additional supported operators
- `support`
- `support_quality`
- `sample`
- `logcpdf`
- `compute_pi`
- `send_lambda`
# Type parameters
- `J`: the input type of the *sfunc* that may be applied; that *sfunc* is the input type of the `Apply`
- `O`: the output type of the *sfunc* that may be applied, which is also the output type of the `Apply`
"""
struct Apply{J <: Tuple, O} <: SFunc{Tuple{SFunc{J, O}, J}, O}
end
@impl begin
struct ApplySupport end
function support(::Apply{J,O},
parranges::NTuple{N,Vector},
size::Integer,
curr::Vector{<:O}) where {J<:Tuple,O,N}
result = Vector{O}()
for sf in parranges[1]
append!(result, support(sf, (parranges[2],), size, curr))
end
return unique(result)
end
end
@impl begin
struct ApplySupportQuality end
function support_quality(::Apply{J,O}, parranges) where {J,O}
q = support_quality_rank(:CompleteSupport)
for sf in parranges[1]
imp = get_imp(MultiInterface.get_policy(), Support, sf, parranges[2], 0, O[])
q = min(q, support_quality_rank(support_quality(imp, sf, parranges[2])))
end
return support_quality_from_rank(q)
end
end
@impl begin
struct ApplySample end
function sample(::Apply{J,O}, input::Tuple{SFunc{J,O}, J})::O where {J<:Tuple,O}
return sample(input[1], input[2])
end
end
@impl begin
struct ApplyLogcpdf end
function logcpdf(::Apply{J,O}, i::Tuple{SFunc{J,O}, J}, o::O)::AbstractFloat where {J<:Tuple,O}
return logcpdf(i[1], i[2], o)
end
end
# WARNING: THIS LOGIC DOES NOT WORK WITH MORE THAN ONE PARENT
@impl begin
struct ApplyComputePi end
function compute_pi(::Apply{J,O},
range::VectorOption{<:O},
parranges::NTuple{N,Vector},
incoming_pis::Tuple)::Dist{<:O} where {N,J<:Tuple,O}
sfrange = parranges[1]
argsrange = parranges[2]
sfpi = incoming_pis[1]
argspi = incoming_pis[2]
result = zeros(Float64, length(range))
for sf in sfrange
p1 = cpdf(sfpi, (), sf)
p2 = compute_pi(sf, range, (argsrange,), (argspi,))
p3 = [p1 * cpdf(p2, (), x) for x in range]
result .+= p3
end
return Cat(range, result)
end
end
# WARNING: THIS LOGIC DOES NOT WORK WITH MORE THAN ONE PARENT
@impl begin
struct ApplySendLambda end
function send_lambda(::Apply{J,O},
lambda::Score{<:O},
range::VectorOption{<:O},
parranges::NTuple{N,Vector},
incoming_pis::Tuple,
parent_idx::Integer)::Score where {N,J<:Tuple,O}
@assert parent_idx == 1 || parent_idx == 2
sfrange = parranges[1]
argsrange = parranges[2]
sfpi = incoming_pis[1]
argspi = incoming_pis[2]
if parent_idx == 2
# For each x, we must sum over the sfunc argument compute P(y|x) for each possible sfunc
result = Vector{Float64}()
for args in argsrange
resultpieces = Vector{Float64}()
for sf in sfrange
sp = logcpdf(sfpi, (), sf)
for y in range
a = isa(args, Tuple) ? args : tuple(args)
push!(resultpieces, sp + logcpdf(sf, a, y) + get_log_score(lambda, y))
end
end
push!(result, logsumexp(resultpieces))
end
return LogScore(argsrange, result)
else # parent_idx == 1
# This is simpler; we must sum over the arguments, which is achieved by the embedded compute_pi
result = Vector{Float64}()
for sf in sfrange
resultpieces = Vector{Float64}()
ypi = compute_pi(sf, range, (argsrange,), (argspi,))
for y in range
push!(resultpieces, logcpdf(ypi, (), y) + get_log_score(lambda, y))
end
push!(result, logsumexp(resultpieces))
end
return LogScore(sfrange, result)
end
end
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 1051 | export Chain
"""
struct Chain{I, J, K, O} <: Conditional{I, J, K, O, Nothing, Q, SFunc{J, O, Nothing}}
A `Conditional` that chains its input `I` through a given function that returns an
`SFunc{J,O}`.
"""
struct Chain{I, J, K, O} <: Conditional{I, J, K, O, SFunc{J, O}}
fn::Function
"""
function Chain(I, J, O, fn)
Chain an input through `fn`.
The chain is an `SFunc{K,O}`, where `K` is the concatenation of tuples `I` and `J`.
`fn` is a function that takes an argument of type `I` and returns an `SFunc{J,O}`.
The `Chain` defines a generative conditional distribution as follows:
- Given inputs `i` and `j`
- Let `s::SFunc{J,O} = fn(i)`
- Use `j` to generate a value from `s`
For the common case, `Chain` has a special constructors where J is empty.
"""
function Chain(I, J, O, fn)
K = extend_tuple_type(I,J)
new{I, J, K, O}(fn)
end
Chain(I, O, fn) = Chain(I, Tuple{}, O, fn)
end
function gensf(ch::Chain{I, J, K, O}, i::I) where {I, J, K, O}
ch.fn(i)
end | Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 169 | include("generate.jl")
include("apply.jl")
include("chain.jl")
include("mixture.jl")
include("network.jl")
include("serial.jl")
include("expander.jl")
include("sum.jl")
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 1413 | export
Expander,
apply
"""
mutable struct Expander{I,O} <: SFunc{I,O}
An Expander represents a model defined by a function that returns a
network. For a given value of an input, the conditional probability
distribution is provided by the network produced by the function
on that input.
For each such network, the expander manages a runtime to reason about it.
Expanders are lazy and do not evaluate the function until they have to.
As a result, there is state associated with Expanders. This is analysis
state rather than world state, i.e., it is the state of Scruff's
reasoning about the Expander. In keeping with Scruff design, Expanders
are immutable and all state associated with reasoning is stored in the
runtime that contains the expander. To support this, a runtime has three
fields of global state:
- `:subnets`: the expansions of all Expanders managed by the runtime
- `:subruntimes`: all the subruntimes recursively managed by this
runtime through Expanders, keyed by the networks
- `:depth`: the depth to which Expanders in this runtime should be expanded
# Type parameters
- `I`: the input type(s) of the `Expander`
- `O`: the output type(s) of the `Expander`
"""
mutable struct Expander{I,O} <: SFunc{I,O}
# TODO (MRH): SFunc params type parameter
fn :: Function
Expander(fn, I, O) = new{I,O}(memo(fn))
end
apply(expander::Expander, args...) = expander.fn(args...)
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 3840 | export
Generate
"""
Generate{O} <: SFunc{Tuple{Dist{O}}, O}
Generate a value from its `Dist` argument.
This helps in higher-order programming. A typical pattern will be to create an sfunc that produces a `Dist`,
and then generate many observations from the `Dist` using `Generate`.
# Additional supported operators
- `support`
- `support_quality`
- `sample`
- `logcpdf`
- `compute_pi`
- `send_lambda`
- `make_factors`
"""
struct Generate{O} <: SFunc{Tuple{Dist{O}}, O}
end
@impl begin
struct GenerateSupport end
function support(::Generate{O},
parranges::NTuple{N,Vector},
size::Integer,
curr::Vector{<:O}) where {O,N}
result = Vector{O}()
for sf in parranges[1]
append!(result, support(sf, (), size, curr))
end
return unique(result)
end
end
@impl begin
struct GenerateSupportQuality end
function support_quality(::Generate{O}, parranges) where O
q = support_quality_rank(:CompleteSupport)
for sf in parranges[1]
imp = get_imp(MultiInterface.get_policy(), Support, sf, (), 0, O[])
q = min(q, support_quality_rank(support_quality(imp, sf, ())))
end
return support_quality_from_rank(q)
end
end
@impl begin
struct GenerateSample end
function sample(::Generate{O}, input::Tuple{<:Dist{O}})::O where O
return sample(input[1], ())
end
end
@impl begin
struct GenerateLogcpdf end
function logcpdf(::Generate{O}, i::Tuple{<:Dist{O}}, o::O)::AbstractFloat where O
return logcpdf(i[1], (), o)
end
end
# WARNING: THIS LOGIC DOES NOT WORK WITH MORE THAN ONE PARENT
@impl begin
struct GenerateComputePi end
function compute_pi(::Generate{O},
range::VectorOption{<:O},
parranges::NTuple{N,Vector},
incoming_pis::Tuple)::Dist{<:O} where {N,O}
sfrange = parranges[1]
sfpi = incoming_pis[1]
result = zeros(Float64, length(range))
for sf in sfrange
p1 = cpdf(sfpi, (), sf)
p2 = compute_pi(sf, range, (), ())
p3 = [p1 * cpdf(p2, (), x) for x in range]
result .+= p3
end
return Cat(range, result)
end
end
# WARNING: THIS LOGIC DOES NOT WORK WITH MORE THAN ONE PARENT
@impl begin
struct GenerateSendLambda end
function send_lambda(::Generate{O},
lambda::Score{<:O},
range::VectorOption{<:O},
parranges::NTuple{N,Vector},
incoming_pis::Tuple,
parent_idx::Integer)::Score where {N,O}
@assert parent_idx == 1
sfrange::Vector{typeof(parranges[1][1])} = parranges[1]
sfpi = incoming_pis[1]
resultprobs = Vector{Float64}()
for sf in sfrange
resultpieces = Vector{Float64}()
ypi = compute_pi(sf, range, (), ())
for y in range
push!(resultpieces, logcpdf(ypi, (), y) + get_log_score(lambda, y))
end
push!(resultprobs, logsumexp(resultpieces))
end
result :: LogScore{typeof(sfrange[1])} = LogScore(sfrange, resultprobs)
return result
end
end
@impl begin
struct GenerateMakeFactors end
function make_factors(::Generate{O}, range::Vector{<:O}, parranges::Tuple{<:Vector{<:Dist{O}}}, id::Int, parids::Tuple{Int}) where O
dims = (length(parranges[1]), length(range))
keys = (parids[1], id)
entries = Float64[]
for u in parranges[1]
for x in range
p = cpdf(u, (), x)
push!(entries, p)
end
end
fact = Factor(dims, keys, entries)
return ([fact], [fact])
end
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 8667 | export Mixture
"""
mutable struct Mixture{I,O} <: SFunc{I,O}
`Mixture` defines an *sfunc* representing mixtures of other *sfuncs*. It contains a
vector of *sfuncs* and a vector of probabilities that those *sfuncs* are selected,
which indices are keys associating the two. The output type of a `Mixture` is defined by
the output type of its internal components. The parameters of a `Mixture` are
its probabilities followed by parameters of all its internal components, in order.
# Additional supported operators
- `support`
- `support_quality`
- `sample`
- `cpdf`
- `expectation`
- `compute_pi`
- `send_lambda`
# Type parameters
- `I`: the input type(s) of the `Mixture`
- `O`: the shared output type(s) of its internal components and the output type(s) of the `Mixture`
"""
mutable struct Mixture{I,O} <: SFunc{I,O}
components::Vector{<:SFunc{I,O}}
probabilities::Vector{Float64}
end
@impl begin
struct MixtureSupport end
function support(sf::Mixture{I,O},
parranges::NTuple{N,Vector},
size::Integer,
curr::Vector{<:O}) where {I,O,N}
subsize = Int(ceil(size / length(sf.components)))
ranges = [support(comp, parranges, subsize, curr) for comp in sf.components]
result = []
for range in ranges
append!(result, range)
end
result = unique(result)
tresult = convert(Vector{output_type(sf)}, result)
# tresult = Vector{output_type(sf)}(undef, length(result))
# copyto!(tresult, result)
sort!(tresult)
return tresult
end
end
# STATS
@impl begin
struct MixtureInitialStats end
initial_stats(sf::Mixture) = [initial_stats(c) for c in sf.components]
end
@impl begin
struct MixtureAccumulateStats end
function accumulate_stats(sf::Mixture, existing_stats, new_stats)
[accumulate_stats(sf.components[i], existing_stats[i], new_stats[i]) for i in 1:length(sf.components)]
end
end
@impl begin
struct MixtureExpectedStats end
function expected_stats(sf::Mixture{I,O},
range::VectorOption{<:O},
parranges::NTuple{N,Vector},
pis::NTuple{M,Dist},
child_lambda::Score{<:O}) where {I,O,N,M}
# The statistics organize mutually exclusive cases.
# Each case consists of a component i, a parent value p, and a child value c,
# and represents the #(i selected, c | p, evidence).
# This is equal to P(i selected | p, evidence) #(c | i selected, p, evidence).
# P(i selected | p, evidence) is proportional to m.probabilities[i] * \sum_c #(c | i selected, p, evidence).
# P(C | i selected, p, evidence) is equal to expected_stats(m.components[i], same arguments...)
compstats = [expected_stats(comp, range, parranges, pis, child_lambda) for comp in sf.components]
# This is bad. It assumes stats is a Dict, which is okay for tables
# summed = [Dict((k,sum(v)) for (k,v) in stats) for stats in compstats]
summed = [sum(sum(v) for v in values(stats)) for stats in compstats]
pselected = sf.probabilities .* summed
# return [mult_through(compstats[i], pselected[i]) for i in 1:length(m.components)]
return [mult_through(compstats[i], sf.probabilities[i]) for i in 1:length(sf.components)]
end
end
@impl begin
struct MixtureMaximizeStats end
function maximize_stats(sf::Mixture, stats)
probparams = normalize([sum(sum(values(st))) for st in stats])
compparams = [maximize_stats(sf.components[i], stats[i]) for i in 1:length(sf.components)]
return (probparams, compparams...)
end
end
# END STATS
@impl begin
struct MixtureSupportQuality end
function support_quality(sf::Mixture{I,O}, parranges) where {I,O}
q = support_quality_rank(:CompleteSupport)
for comp in sf.components
imp = get_imp(MultiInterface.get_policy(), Support, sf, parranges, 0, O[])
q = min(q, support_quality_rank(support_quality(imp, comp, parranges)))
end
return support_quality_from_rank(q)
end
end
@impl begin
mutable struct MixtureMakeFactors
numpartitions::Dict{SFunc, Int64} = Dict{SFunc, Int64}()
end
function make_factors(sf::Mixture{I,O},
range::VectorOption{<:O},
parranges::NTuple{N,Vector},
id,
parids::Tuple)::Tuple{Vector{<:Scruff.Utils.Factor}, Vector{<:Scruff.Utils.Factor}} where {I,O,N}
lfactors = Vector{Scruff.Utils.Factor}()
ufactors = Vector{Scruff.Utils.Factor}()
numcomps = length(sf.components)
mixkey = nextkey()
for (i,comp) in enumerate(sf.components)
(lcompfactors, ucompfactors) = make_factors(comp, range, parranges, id, parids)
function process(factors, target)
for fact in factors
dims = [d for d in fact.dims]
push!(dims, numcomps)
dims = Tuple(dims)
keys = [k for k in fact.keys]
push!(keys, mixkey)
keys = Tuple(keys)
entries = Float64[]
for e in fact.entries
for j = 1:numcomps
push!(entries, i == j ? e : 1.0) # 1.0 means irrelevant
end
end
relevantfact = Factor(dims, keys, entries)
push!(target, relevantfact)
end
end
process(lcompfactors, lfactors)
process(ucompfactors, ufactors)
end
mixdims = (numcomps,)
mixkeys = (mixkey,)
mixentries = sf.probabilities
mixfact = Factor(mixdims, mixkeys, mixentries)
push!(lfactors, mixfact)
push!(ufactors, mixfact)
return (lfactors, ufactors)
end
end
@impl begin
struct MixtureComputePi end
function compute_pi(sf::Mixture{I,O},
range::VectorOption{<:O},
parranges::NTuple{N,Vector},
incoming_pis::Tuple)::Dist{<:O} where {N,I,O}
function f(i)
cp = compute_pi(sf.components[i], range, parranges, incoming_pis)
sf.probabilities[i] .* [cpdf(cp, (), x) for x in range]
end
scaled = [f(i) for i in 1:length(sf.components)]
result = sum(scaled)
return Cat(range, normalize(result))
end
end
@impl begin
struct MixtureSendLambda end
function send_lambda(sf::Mixture{I,O},
lambda::Score{<:O},
range::VectorOption{<:O},
parranges::NTuple{N,Vector},
incoming_pis::Tuple,
parent_ix::Integer)::Score where {N,I,O}
# Need to make sure the target parent range is a Vector{T} rather than a Vector{Any}
T = typejoin([typeof(x) for x in parranges[parent_ix]]...)
target_parrange :: Vector{T} = parranges[parent_ix]
lams = [send_lambda(comp, lambda, range, parranges, incoming_pis, parent_ix) for comp in sf.components]
scores = [[get_score(lams[j], target_parrange[i]) for i in 1:length(target_parrange)] for j in 1:length(sf.components)]
scaled = sf.probabilities .* scores
result = zeros(Float64, length(target_parrange))
for sc in scaled
result .+= sc
end
return SoftScore(target_parrange, result)
end
end
# This does not seem to fit
@impl begin
struct MixtureSample end
function sample(sf::Mixture{I,O}, x::I)::O where {I,O}
probs = sf.probabilities/sum(sf.probabilities)
cat = Distributions.Categorical(probs)
which_component = rand(cat)
component = sf.components[which_component]
return sample(component, x)
end
end
@impl begin
struct MixtureCpdf end
function cpdf(sf::Mixture{I,O}, i::I, o::O)::AbstractFloat where {I,O}
complpdf = [cpdf(comp, i, o) for comp in sf.components]
probs = sf.probabilities/sum(sf.probabilities)
return sum(probs .* complpdf)
end
end
@impl begin
struct MixtureExpectation end
function expectation(sf::Mixture{I,O}, x::I)::O where {I,O}
probs = sf.probabilities/sum(sf.probabilities)
cat = Distributions.Categorical(probs)
which_component = rand(cat)
component = sf.components[which_component]
return expectation(component, x)
end
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 5105 | export NetworkInput,
NetworkSFunc
struct NetworkInput{T} end
tupler(x::Array) = length(x) == 1 ? x[1] : tuple(x...)
tupler(x::Tuple) = length(x) == 1 ? x[1] : x
tupler(x) = x
get_place_type(p::NetworkInput{T}) where T = T
"""
struct NetworkSFunc{I,O} <: SFunc{I,O}
An sfunc that combines multiple sfuncs in a network structure.
# Arguments
input_placeholders A vector of placeholders indicating the types of network inputs. The type parameter `I` is computed from these.
sfuncs The sfuncs to combine.
parents A `Dict` that maps sfuncs to their parent sfuncs. Note that this parallels networks,
except that we are mapping sfuncs to lists of sfuncs directly rather than variables to vectors of variables.
output A vector of output sfuncs, determining the `O` type parameter.
# Additional supported operators
- `sample`
- `sample_logcpdf`
- `logcpdf`
"""
struct NetworkSFunc{I,O} <: SFunc{I,O}
input_placeholders::NTuple{N,NetworkInput} where N
sfuncs::NTuple{N,SFunc} where N
parents::Dict{SFunc,Vector} where M
outputs::Tuple
"""
function NetworkSFunc(input_placeholders, sfuncs, parents, outputs)
TODO
"""
function NetworkSFunc(input_placeholders,
sfuncs,
parents,
outputs)
in_types = [get_place_type(placeholder) for placeholder in input_placeholders]
# I = length(in_types) == 1 ? in_types[1] : Tuple{in_types...}
I = Tuple{in_types...}
out_types = [output_type(sf) for sf in outputs]
O = length(out_types) == 1 ? out_types[1] : Tuple{out_types...}
# TODO (MRH): Validate types of parents
# TODO (MRH): Propagate param types of constituent SFuncs to type parameter P
return new{I,O}(input_placeholders,
sfuncs,
parents,
outputs)
end
end
@impl begin
struct NetworkSFuncSample end
function sample(sf::NetworkSFunc{I,O}, input::I)::O where {I,O}
network = sf
sample_cache = Dict{Union{SFunc,NetworkInput},Any}(s_inp => walk_inp for (s_inp, walk_inp) in zip(network.input_placeholders, input))
# Assume network.funcs is topologically sorted. TODO?
for sfunc in network.sfuncs
sample_cache[sfunc] = sample(sfunc, Tuple([sample_cache[sinp] for sinp in network.parents[sfunc]]))
end
s = [sample_cache[o] for o in network.outputs]
return tupler(s)
end
end
@impl begin
struct NetworkSFuncSampleLogcpdf end
function sample_logcpdf(sf::NetworkSFunc{I,O}, input::I) where {I,O}
network = sf
sample_cache = Dict{Union{SFunc,NetworkInput},Any}(s_inp => (walk_inp, 0.0) for (s_inp, walk_inp) in zip(network.input_placeholders, input))
# Assume network.funcs is topologically sorted. TODO?
for sfunc in network.sfuncs
par_samples = [sample_cache[sinp][1] for sinp in network.parents[sfunc]]
this_sample, logcpdf = sample_logcpdf(sfunc, Tuple(par_samples))
logcpdf = logcpdf + sum([sample_cache[sinp][2] for sinp in network.parents[sfunc]])
sample_cache[sfunc] = (this_sample, logcpdf)
end
joint_sample = tuple([sample_cache[o][1] for o in network.outputs]...)
joint_logcpdf = sum([sample_cache[o][2] for o in network.outputs])
return (tupler(joint_sample), joint_logcpdf)
end
end
@impl begin
struct NetworkSFuncLogcpdf end
function logcpdf(sf::NetworkSFunc{I, O}, input::I, output::O) where {I, O}
# I think this returns a sample x of a distribution s.t. log(Expectation(exp(x))) gives the logcpdf
# In the special case where all NetworkSFunc nodes are in outputs then the calculation is deterministic
sample_cache = Dict{Union{SFunc, NetworkInput}, Any}((s_inp => (walk_inp, 0.0) for (s_inp, walk_inp) in zip(sf.input_placeholders, input))...,
(s_out => (walk_out, nothing) for (s_out, walk_out) in zip(sf.outputs, output))...)
# Assume network.funcs is topologically sorted. TODO?
# Is this even right? Seems neat
for sfunc in sf.sfuncs
if haskey(sample_cache, sfunc)
par_samples = [sample_cache[sinp][1] for sinp in sf.parents[sfunc]]
cumlogcpdf = logcpdf(sfunc, Tuple(par_samples), sample_cache[sfunc][1]) +
sum([sample_cache[sinp][2] for sinp in sf.parents[sfunc]])
sample_cache[sfunc] = (sample_cache[sfunc][1], cumlogcpdf)
else
par_samples = [sample_cache[sinp][1] for sinp in sf.parents[sfunc]]
this_sample = sample(sfunc, Tuple(par_samples))
cumlogcpdf = sum([sample_cache[sinp][2] for sinp in sf.parents[sfunc]])
sample_cache[sfunc] = (this_sample, cumlogcpdf)
end
end
return sum([sample_cache[o][2] for o in sf.outputs])
end
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 8802 |
export
Serial
"""
struct Serial{I,O} <: SFunc{I,O}
A sequence of sfuncs in series.
Although this could be implemented as a special case of NetworkSFunc,
the serial composition allows an easier and more efficient implementation of operations.
All but the first sfunc in the sequence will have a single input; the output of each
sfunc feeds into the input of the next sfunc.
To work properly, most of the operations on `Serial` need the support of the intermediate
sfuncs, given an input range. Rather than compute this each time, and to avoid having
the non-support operations take a size argument, support is memoized, and must be called
before other operations like logcpdf are called. The `support_memo` is a dictionary whose
keys are tuples of parent ranges and whose values are the support computed for those
parent ranges, along with the target size for which they were computed.
Storing the target size enables refinement algorithms that increase the size and improve
the support.
# Additional supported operators
- `support`
- `support_quality`
- `sample`
- `cpdf`
- `bounded_probs`
- `make_factors`
- `compute_pi`
- `send_lambda`
# Type parameters
- I the input type of the first sfunc
- O the output type of the last sfunc
"""
struct Serial{I,O} <: SFunc{I,O}
components :: NTuple{N,SFunc} where N
support_memo :: Dict
Serial(I,O,sfuncs) = new{I,O}(sfuncs, Dict())
end
@impl begin
struct SerialSupport end
function support(sf::Serial{I,O}, parranges, size, curr) where {I,O}
if parranges in keys(sf.support_memo)
(sup,sz) = sf.support_memo[parranges]
if sz >= size
return last(sup)
end
end
# Adequate support has not been found - compute it now
prs = parranges
compsups = Vector[]
for component in sf.components
sup = support(component, prs, size, output_type(component)[])
push!(compsups, sup)
prs = (sup,)
end
for c in curr
if !(c in sup)
push!(sup, c)
end
end
sf.support_memo[parranges] = (tuple(compsups...), size)
return sup
end
end
@impl begin
struct SerialSupportQuality end
function support_quality(sf::Serial{I,O}, parranges) where {I,O}
if !(parranges in keys(sf.support_memo))
error("Support must be called before support_quality for Serial")
end
compsups = sf.support_memo[parranges][1]
rank = support_quality_rank(:CompleteSupport)
prs = parranges
for (comp,sup) in zip(sf.components, compsups)
q = support_quality_rank(support_quality(comp, prs))
rank = min(rank, q)
prs = (sup,)
end
return support_quality_from_rank(rank)
end
end
@impl begin
struct SerialSample end
function sample(sf::Serial{I,O}, i::I) where {I,O}
x = i
for component in sf.components
x = (sample(component, x),)
end
return x[1]
end
end
function _checksup(sf, i)
for k in keys(sf.support_memo)
if all([i[j] in k[j] for j in 1:length(k)])
return sf.support_memo[k][1]
end
end
return nothing
end
@impl begin
struct SerialCpdf end
function cpdf(sf::Serial{I,O}, i::I, o::O) where {I,O}
compsups = _checksup(sf, i)
if isnothing(compsups)
error("No support found for parent values in logcpdf for Serial")
end
us = [i]
pis = [1.0]
n = length(sf.components)
for j in 1:n-1
comp = sf.components[j]
sup = compsups[j]
newpis = zeros(Float64, length(sup))
for (u,pi) in zip(us,pis)
for l in 1:length(sup)
x = sup[l]
newpis[l] += cpdf(comp, u, x) * pi
end
end
us = [(x,) for x in sup]
pis = newpis
end
result = 0.0
finalcomp = sf.components[n]
for (u, pi) in zip(us,pis)
result += cpdf(finalcomp, u, o) * pi
end
return result
end
end
@impl begin
struct SerialBoundedProbs end
function bounded_probs(sf::Serial{I,O}, range::Vector{<:O},
parranges) where {I,O}
probs = Float64[]
combos = cartesian_product(parranges)
for i in combos
for o in range
push!(probs, cpdf(sf, tuple(i...), o))
end
end
return (probs, probs)
end
end
@impl begin
struct SerialMakeFactors end
function make_factors(sf::Serial{I,O}, range::Vector{<:O}, parranges,
id, parids) where {I,O}
if !(parranges in keys(sf.support_memo))
error("Support must be called before make_factors for Serial")
end
compsups = sf.support_memo[parranges][1]
inids = parids
outid = nextkey()
prs = parranges
lowers = Factor[]
uppers = Factor[]
for i in 1:length(sf.components)
outid = i == length(sf.components) ? id : nextkey()
(ls, us) = make_factors(sf.components[i], compsups[i], prs, outid, inids)
inid = (outid,)
prs = (compsups[i],)
append!(lowers, ls)
append!(uppers, us)
end
return(lowers, uppers)
end
end
@impl begin
struct SerialComputePi end
function compute_pi(sf::Serial{I,O}, range::Vector{<:O}, parranges,
incoming_pis) where {I,O}
ps = zeros(Float64, length(range))
combos = cartesian_product(parranges)
m = length(parranges)
for combo in combos
parpis = [cpdf(incoming_pis[i], (), combo[i]) for i in 1:m]
ipi = reduce(*, parpis)
for (j,o) in enumerate(range)
ps[j] += cpdf(sf, tuple(combo...), o) * ipi
end
end
return Cat(range, ps)
end
end
function _incpis(components, supports, parranges, incoming_pis)
incpis = Tuple[incoming_pis]
prs = parranges
for i in 1:length(components)-1
comp = components[i]
rng = supports[i]
pi = compute_pi(comp, rng, prs, incpis[i])
push!(incpis, (pi,))
prs = (rng,)
end
return incpis
end
function _lambdas(components, supports, lambda, incpis)
lambdas = Score[lambda]
lam = lambda
for i = length(components):-1:2
comp = components[i]
rng = supports[i]
prs = (supports[i-1],)
ipis = incpis[i]
lam = send_lambda(comp, lam, rng, prs, ipis, 1)
pushfirst!(lambdas, lam)
end
return lambdas
end
@impl begin
struct SerialSendLambda end
function send_lambda(sf::Serial{I,O}, lambda, range, parranges, incoming_pis,
parent_idx) where {I,O}
if !(parranges in keys(sf.support_memo))
error("Support must be called before send_lambda for Serial")
end
compsups = sf.support_memo[parranges][1]
incpis = _incpis(sf.components, compsups, parranges, incoming_pis)
lambdas = _lambdas(sf.components, compsups, lambda, incpis)
return send_lambda(sf.components[1], lambdas[1], compsups[1],
parranges, incoming_pis, parent_idx)
end
end
# STATS
@impl begin
struct SerialInitialStats end
initial_stats(sf::Serial) = map(initial_stats, sf.components)
end
@impl begin
struct SerialAccumulateStats end
accumulate_stats(sf::Serial, existing_stats, new_stats) =
[accumulate_stats(sf.components[i], existing_stats[i], new_stats[i])
for i in 1:length(sf.components)]
end
@impl begin
struct SerialExpectedStats end
function expected_stats(sf::Serial, range, parranges, incoming_pis, child_lambda)
if !(parranges in keys(sf.support_memo))
error("Support must be called before expected_stats for Serial")
end
compsups = sf.support_memo[parranges][1]
incpis = _incpis(sf.components, compsups, parranges, incoming_pis)
lambdas = _lambdas(sf.components, compsups, child_lambda, incpis)
stats = Any[]
prs = parranges
for i in 1:length(sf.components)
comp = sf.components[i]
sup = compsups[i]
ipi = incpis[i]
lam = lambdas[i]
push!(stats, expected_stats(comp, sup, prs, ipi, lam))
prs = (sup,)
end
return tuple(stats...)
end
end
@impl begin
struct SerialMaximizeStats end
function maximize_stats(sf::Serial, stats)
ps = [maximize_stats(sf.components[i], stats[i]) for i in 1:length(sf.components)]
return tuple(ps...)
end
end
# STATS END
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 433 | struct SumSF{I, O, SFs <: NTuple{<:Number, <: SFunc{I, O}}} <: SFunc{I, O}
sfs::SFs
end
@impl begin
function sumsfs(fs::NTuple{N, <:SFunc}) where {N}
# Return an SFunc representing g(x) = f1(x) + f2(x) + ...
# I.e. convolution of the respective densities
return SumSF(fs)
end
end
@impl begin
function sample(sf::SumSF, x)
return sum(sample(sub_sf, x) for sub_sf in sf.sfs)
end
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 11716 | export
CLG
"""
CLG(paramdict::Dict)
Constructs an*sfunc representing a Conditional linear Gaussian. These sfuncs may have
both discrete and continuous parents. For each combination of discrete parents, there is
a `LinearGaussian` that depends on the continuous parents.
`CLG`s are implemented as a `Table` with a `LinearGaussian`.
The `paramdict` parameter defines the discrete and continuous parents, and the linear
gaussean values where the length of a key is the count of the discrete inputs, the
length of the tuple in a value is the count of continuous inputs, and the rest of the
values are used to build the parameters for `CLG` itself. For example,
```
Dict((:x,1) => ((-1.0, 1.0, 2.0), 3.0, 1.0),
(:x,2) => ((-2.0, 4.0, 2.0), 3.0, 1.0),
(:x,3) => ((-3.0, 2.0, 2.0), 3.0, 1.0),
(:y,1) => ((-4.0, 5.0, 2.0), 3.0, 1.0),
(:y,2) => ((-5.0, 3.0, 2.0), 3.0, 1.0),
(:y,3) => ((-6.0, 6.0, 2.0), 3.0, 1.0))
```
- the keys define two(2) discrete parents, with values `[:x,:y]` and `[1,2,3]`
- in the values, the first tuple defines three(3) continuous parents for each
underlying `LinearGausian`, with values `-1.0:-6.0`, `1.0:6.0`, and `2.0`
- the values `3.0` and `1.0` are mean/stddev of the underlying `LinearGaussian`
See also: [`Table`](@ref), [`LinearGaussian`](@ref)
"""
function CLG(paramdict::Dict)
numdiscreteinputs = length(collect(keys(paramdict))[1])
numcontinuousinputs = length(collect(values(paramdict))[1][1])
sfmaker(weights) = LinearGaussian(weights[1], weights[2], weights[3])
return Table(NTuple{numcontinuousinputs, Float64}, Float64, numdiscreteinputs, paramdict, sfmaker)
end
#=
This representation is deprecated, but there may be useful implementation details.
# TODO (MRH): Convert CLG constructor into a call to a method that builds a Tables as in make_CLG above
struct CLG{NumDiscreteInputs, NumContinuousInputs, O} <:
SFunc{Tuple{NTuple{NumDiscreteInputs, O},
NTuple{NumContinuousInputs, Float64}},
Float64,
Tuple{}} # TODO (MRH): Params type
# each segment corresponds to one setting of the discrete parents
# and includes a linear weight for each continuous parent and a bias term
segments :: Dict{<: Array{O, N} where N, <: Tuple{Array{Float64, N} where N, Float64}}
variance :: Float64
function CLG(num_discrete_inputs, num_continous_inputs, seg, var)
T = typeof(seg).parameters[1].parameters[1]
new{num_discrete_inputs, num_continous_inputs, T}(seg, var)
end
end
#############################
# #
# Helper functions for CLGs #
# #
#############################
# Return all 2-tuples of discrete parent combinations and continuous
# parent combinations
function get_parent_combos(:: CLG{M,N}, parent_ranges) where {M,N}
discrete_ranges = Array{Array{Any, 1}, 1}(parent_ranges[1:M])
continuous_ranges = Array{Array{Any, 1}, 1}(parent_ranges[M+1:M+N])
if isempty(discrete_ranges)
discrete_combos = [[]]
else
discrete_combos = cartesian_product(discrete_ranges)
end
if isempty(continuous_ranges)
continuous_combos = [[]]
else
continuous_combos = cartesian_product(continuous_ranges)
end
l = Array{Array{Any,1},1}([discrete_combos, continuous_combos])
return map(a -> Tuple(a), cartesian_product(l))
end
function overlaps(int1, int2) :: Bool
(l1, u1) = int1
(l2, u2) = int2
return l1 <= l2 && u1 >= l2 || l1 <= u2 && u1 >= u2
end
# Return the minimum and maximum probabilities of a normal distribution
# over an interval, when the mean is bounded and the variance is given.
function minmaxprob(lower, upper, lower_mean, upper_mean, variance)
if lower == -Inf || upper == Inf return (0,1) end
# The point can be anywhere between lower and upper
# The mean can be anywhere between lower_mean and upper_mean
# We need to find the minimum and maximum possible density
# To find this, we compute the minimum and maximum possible distances
# from the point to the mean.
# If the point interval overlaps the mean interval, the minimum possible
# distance is zero.
# Otherwise the minimum possible distance is the min distance between
# mean endpoints and interval endpoints.
# The maximum possible distance is always the max distance between mean
# endpoints and interval endpoints.
d1 = abs(lower_mean - lower)
d2 = abs(upper_mean - lower)
d3 = abs(lower_mean - upper)
d4 = abs(upper_mean - upper)
dmin = overlaps((lower, upper), (lower_mean, upper_mean)) ? 0 :
max(min(d1, d2, d3, d4), 0)
dmax = max(d1, d2, d3, d4)
densmin = normal_density(dmax, 0, variance)
densmax = normal_density(dmin, 0, variance)
diff = upper - lower
pmin = min(max(densmin * diff, 0), 1)
pmax = min(max(densmax * diff, 0), 1)
return (pmin, pmax)
end
# Compute numerical lower and upper bounds on the density by partitioning
# the interval into num_partitions and computing bounds in each partition
function numerical_bounds(lower, upper, lmean, umean,
num_partitions, variance)
if lower == -Inf || upper == Inf
return (0,1)
end
start = lower
step = (upper - lower) / num_partitions
l = 0.0
u = 0.0
for i = 1:num_partitions
(x,y) = minmaxprob(start, start + step, lmean, umean, variance)
l += x
u += y
start += step
end
return (l,u)
end
#= ============================================================
Operators
=============================================================== =#
function make_range(sf :: CLG, parranges, size :: Int)
sd = sqrt(sf.variance)
parent_combos = get_parent_combos(sf, parranges)
# Prepare for accumulating the list of candidates by determining
# whether the mean should be included for each segment and the number
# of candidates around the mean. We choose the number of values per
# segment so that the total is approximately the number of values desired.
values_per_segment = max(div(size, length(parent_combos)), 1)
if mod(values_per_segment, 2) == 1
use_mean = true
values_per_segment -= 1
else
use_mean = false
end
pairs_per_segment = values_per_segment / 2
# Get candidates for including in the range
# For each combinatiuon of parent values, we get the segment according
# to the discrete values and the mean according to the continuous values.
# We then spread candidates around the mean according to the standard
# deviation.
candidates = []
for (discrete_combo, continuous_combo) in parent_combos
(weights, bias) = sf.segments[discrete_combo]
mean = linear_value(weights, bias, continuous_combo)
if use_mean
push!(candidates, mean)
for i = 1:pairs_per_segment
push!(candidates, mean - i * sd)
push!(candidates, mean + i * sd)
end
else
for i = 1:pairs_per_segment
push!(candidates, mean - (i - 0.5) * sd)
push!(candidates, mean + (i - 0.5) * sd)
end
end
end
sort!(candidates)
# Select size of the candidates evenly spread
skip = div(length(candidates), size)
position = div(mod(length(candidates), size), 2) + 1
result = []
for i = 1:size
push!(result, candidates[position])
position += skip
end
return result
end
@op_impl begin
struct CLGSupport{NumDiscreteInputs, NumContinuousInputs, O} <: Support{CLG{NumDiscreteInputs, NumContinuousInputs, O}} end
function support((parranges, size, curr))
if isempty(parranges)
return nothing
end
old_size = length(curr)
if old_size >= size
return curr
end
result = make_range(sf, parranges, size)
# We must make sure that any value in the current range is kept
# so we replace the closest element in the new range with an element
# from the current range
i = 1
j = 1
is_current = fill(false, size)
while i <= old_size && j <= size
if curr[i] < result[j]
if j == 1 || is_current[j-1] ||
result[j] - curr[i] < curr[i] - result[j-1]
result[j] = curr[i]
is_current[j] = true
i += 1
j += 1
else
result[j-1] = curr[i]
is_current[j-1] = true
i += 1
end
else
j += 1
end
end
if i <= old_size
tail = old_size - i
result[size-tail:new_size] = curr[old_size-tail:old_size]
end
return result
end
end
# We bound the integral under the density by dividing each interval of the
# range into num_partitions partitions and computing the lower and upper
# bound in each partition. However, this is complicated by the fact that
# we don't know the density function because we only have ranges for the
# parents.
@op_impl begin
mutable struct CLGBoundedProbs <: BoundedProbs{CLG}
numpartitions::Int64 = 100
end
function bounded_probs((range, parrange))
vec = typeof(sf).parameters
M = vec[1]
N = vec[2]
intervals = make_intervals(range)
prs = Array{Array{Union{Symbol, Tuple{Float64, Float64}}, 1}, 1}(undef, M+N)
for i = 1:M
prs[i] = parrange[i]
end
for i = M+1:M+N
prs[i] = make_intervals(parrange[i])
end
parent_combos = get_parent_combos(sf, prs)
# Normally, the length of the range and the number of intervals is the same
# However, for an empty range, there is still one unbounded interval
if isempty(range)
lower = Array{Float64}(undef, length(parent_combos))
upper = Array{Float64}(undef, length(parent_combos))
else
lower = Array{Float64}(undef, length(parent_combos) * length(range))
upper = Array{Float64}(undef, length(parent_combos) * length(range))
end
pos = 1
for (discrete_combo, continuous_combo) in parent_combos
(weights, bias) = sf.segments[discrete_combo]
(lmean, umean) = bounded_linear_value(weights, bias, continuous_combo)
ls = []
us = []
for interval in intervals
(il, iu) = interval
(l,u) = numerical_bounds(il, iu, lmean, umean,
op_impl.numpartitions, sf.variance)
push!(ls, l)
push!(us, u)
end
# We get better bounds by considering the bounds on other intervals.
# This is especially important for intervals with -Inf or Inf as an
# endpoint.
for i = 1:length(intervals)
otherls = 0.0
otherus = 0.0
for j = 1:i-1
otherls += ls[j]
otherus += us[j]
end
for j = i+1:length(intervals)
otherls += ls[j]
otherus += us[j]
end
l = max(ls[i], 1 - otherus)
u = min(us[i], 1 - otherls)
lower[pos] = l
upper[pos] = u
pos += 1
end
end
return (lower, upper)
end
end
=# | Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 207 | include("conditional.jl")
include("det.jl")
include("invertible.jl")
include("table.jl")
include("discretecpt.jl")
include("lineargaussian.jl")
include("CLG.jl")
include("separable.jl")
include("switch.jl")
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 15607 | export
Conditional,
extend_tuple_type
using Folds
using StatsFuns
@inline tuplejoin(x, y, z...) = (x..., tuplejoin(y, z...)...)
tuplejoin(t::Tuple) = t
"""
extend_tuple_type(T1, T2)
Given two types `T1` and `T2`, concatenate the types into a single
tuple type.
# Arguments
- `T1`: Any type
- `T2`: A tuple type
# Returns
- If `T1` is a tuple type, a tuple with the concatenation of the types in `T1` and `T2`
- If `T1` is not a tuple type, a tuple with `T1` prepended to the types in `T2`
# Examples
```
julia> extend_tuple_type(Int64, Tuple{Float64})
Tuple{Int64, Float64}
julia> extend_tuple_type(Tuple{Int64}, Tuple{Float64})
Tuple{Int64, Float64}
julia> extend_tuple_type(Tuple{Vector{Float64}}, Tuple{Symbol,Symbol})
Tuple{Vector{Float64}, Symbol, Symbol}
```
"""
function extend_tuple_type(T1, T2)
if T1 <: Tuple
Tuple{tuplejoin(fieldtypes(T1), fieldtypes(T2))...}
else
Tuple{T1, fieldtypes(T2)...}
end
end
"""
abstract type Conditional{I <: Tuple, J <: Tuple, K <: Tuple, O, S <: SFunc{J, O}} <: SFunc{K, O}
`Conditional` *sfuncs* represent the generation of an sfunc depending on the values of parents. An
subtype of `Conditional` must provide a `gensf` method that takes an `I` and returns an
`SFunc{J,O}` (**important** the generated SFunc must not appear outside the Conditional.
It should not be a parent).
# Additional supported operators
- `support`
- `support_quality`
- `sample`
- `logcpdf`
- `make_factors`
- `compute_pi`
- `send_lambda`
# Type parameters
- `I`: the type of data used to generate the `Conditional`'s *sfunc*
- `J`: a tuple that represents the input types (the `I`) of the `Conditional`'s generated *sfunc*
- `K`: the input types of the `Conditional`; this is a tuple of types constructed from `I`,
and `J` using `extend_tuple_types`
- `O`: the output type(s) of both the `Conditional` and the `Conditional`'s generated *sfunc*
- `S`: the type of the `Conditional`'s generated *sfunc*
"""
abstract type Conditional{I, J <: Tuple, K <: Tuple, O, S <: SFunc{J, O}} <: SFunc{K, O}
end
function split_pars(::Conditional{I}, pars) where {I}
if I <: Tuple
n1 = length(fieldnames(I))
(Tuple(pars[1:n1]), Tuple(pars[n1+1:length(pars)]))
else
(pars[1], Tuple(pars[2:length(pars)]))
end
end
@impl begin
struct ConditionalSupport end
function support(sf::Conditional{I,J,K,O},
parranges::NTuple{N,Vector},
size::Integer,
curr::Vector{<:O}) where {I,J,K,O,N}
(iranges, jranges) = split_pars(sf, parranges)
irgs::Array{Array{Any, 1}, 1} = [r for r in iranges]
isrange = cartesian_product(irgs)
rng::Vector = isempty(curr) ? Vector{O}() : copy(curr)
# inc = Int(ceil(size / length(isrange)))
allcombranges= Dict{Any, Vector{O}}()
# create complete range regardless of the size
for is in isrange
gsf = gensf(sf,tuple(is...))
newrng = copy(support(gsf, jranges, size, curr))
allcombranges[is] = unique(newrng)
end
# create range by mixing values from ranges created by each combination of parents values up to size
allrng = collect(Iterators.flatten(values(allcombranges)))
max_size = length(unique(allrng))
while length(rng) < min(size, max_size)
for is in isrange
if(length(allcombranges[is]) > 0)
val = popfirst!(allcombranges[is])
push!(rng, val)
end
end
rng = unique(rng)
end
return isempty(rng) ? O[] : rng
end
end
# STATS
@impl begin
struct ConditionalInitialStats end
function initial_stats(::Conditional{I}) where {I}
Dict{I,Any}()
end
end
@impl begin
struct ConditionalAccumulateStats end
function accumulate_stats(cond::Conditional, existing_stats, new_stats)
if isnothing(new_stats) return existing_stats end
result = copy(existing_stats)
for (i,stat) in new_stats
sf = gensf(cond, i)
exist = get(result, i, initial_stats(sf))
next = accumulate_stats(sf, exist, stat)
result[i] = next
end
return result
end
end
@impl begin
struct ConditionalExpectedStats end
function expected_stats(cond::Conditional,
range::Vector{O},
parranges::NTuple{N,Vector},
parent_pis::NTuple{M,Dist},
lambda::Score{<:O}) where {O,N,M}
(iranges, jranges) = split_pars(cond, parranges)
iinds = cartesian_product(collect(map(r -> collect(1:length(r)), iranges)))
(ipis, jpis) = split_pars(cond, parent_pis)
result = Dict()
for iind in iinds
ival = [iranges[r][iind[r]] for r in 1:length(iranges)]
sf = gensf(cond, tuple(ival...))
ipi = reduce(*, (bounded_probs(ipis[r], [iind[r]], ())[1][1] for r in 1:length(iranges)))
innerstat = expected_stats(sf, range, jranges, jpis, lambda)
result[tuple(ival...)] = mult_through(innerstat, ipi)
end
return result
end
end
@impl begin
struct ConditionalMaximizeStats end
function maximize_stats(cond::Conditional, stats)
qs = Dict()
if !isnothing(stats)
for (i,st) in stats
sf = gensf(cond, i)
q = maximize_stats(sf, st)
qs[i] = q
end
end
result = do_maximize_stats(cond, qs)
return result # Must be written separately for each kind of Conditional
end
end
# STATS END
@impl begin
struct ConditionalSample end
function sample(sf::Conditional{I,J,K,O}, i::K)::O where {I,J,K,O}
(ivals, jvals) = split_pars(sf, i)
sfg = gensf(sf,ivals)
return sample(sfg, jvals)
end
end
@impl begin
struct ConditionalLogcpdf end
function logcpdf(sf::Conditional{I,J,K,O}, i::K, o::O) where {I,J,K,O}
(ivals, jvals) = split_pars(sf, i)
sfgen = gensf(sf,ivals)
return logcpdf(sfgen, jvals, o)
end
end
@impl begin
struct ConditionalSupportQuality end
function support_quality(sf::Conditional{I,J,K,O,S}, parranges) where {I,J,K,O,S}
q = support_quality_rank(:CompleteSupport)
(iranges, jranges) = split_pars(sf, parranges)
isrange = cartesian_product(iranges)
for is in isrange
gsf = gensf(sf,tuple(is...))
imp = get_imp(MultiInterface.get_policy(), Support, gsf, jranges, 0, O[])
q = min(q, support_quality_rank(support_quality(imp, gsf, jranges)))
end
return support_quality_from_rank(q)
end
end
@impl begin
mutable struct ConditionalMakeFactors
numpartitions::Int64 = 1
end
function make_factors(sf::Conditional{I,J,K,O},
range::VectorOption{<:O},
parranges::NTuple{N,Vector},
id,
parids::Tuple)::Tuple{Vector{<:Scruff.Utils.Factor}, Vector{<:Scruff.Utils.Factor}} where {I,J,K,O,N}
if any(isempty, parranges) # This is possible in lazy inference
# Just return an empty factor so as not to break
keys = (id,)
dims = (0,)
entries = Float64[]
fs = [Factor(dims, keys, entries)]
return (fs, fs)
end
# Here is a very general method for constructing factors for conditional sfuncs.
# We introduce a switch variable.
# For each of the generated sfuncs, we make its factors and extend them by saying
# they are only relevant when the switch value corresponds to that choice.
# We also add a factor saying that the switch variable takes on the appropriate value
# for each value of the i inputs.
# For this switch factor, we use another decomposition, relating the switch variable
# to each of the i inputs separately. This avoids a quadratic blowup and brings the
# complexity analogous to that of less general methods.
# This can be improved by making a special case where there is only one i parent,
# and returning a single factor.
(iranges, jranges) = split_pars(sf, parranges)
(iids, jids) = split_pars(sf, parids)
icombos = cartesian_product(iranges)
lfs = Factor[]
ufs = Factor[]
switchkey = nextkey()
switchsize = length(icombos)
for (switchval, ivals) in enumerate(icombos)
function extend(factor)
keys = [k for k in factor.keys]
push!(keys, switchkey)
keys = Tuple(keys)
dims = [d for d in factor.dims]
push!(dims, switchsize)
dims = Tuple(dims)
entries = Float64[]
for e in factor.entries
for k in 1:switchsize
# 1.0 is the irrelevant value when factors are multiplied
push!(entries, k == switchval ? e : 1.0)
end
end
result = Factor(dims, keys, entries)
return result
end
subsf = gensf(sf,tuple(ivals...))
(sublfs, subufs) = make_factors(subsf, range, jranges, id, jids)
append!(lfs, [extend(f) for f in sublfs])
append!(ufs, [extend(f) for f in subufs])
end
for i in 1:length(iranges)
switchfactorkeys = (parids[i], switchkey)
parsize = length(parranges[i])
switchfactordims = (parsize, switchsize)
switchfactorentries = Float64[]
for j in 1:parsize
for ivals in icombos
# The effect of this is that the product will only be 1 for all the corresponding
# i values.
push!(switchfactorentries, j == indexin([ivals[i]], parranges[i])[1] ? 1.0 : 0.0)
end
end
switchfact = Factor(switchfactordims, switchfactorkeys, switchfactorentries)
push!(lfs, switchfact)
push!(ufs, switchfact)
end
return (lfs, ufs)
end
end
@impl begin
struct ConditionalComputePi end
function compute_pi(sf::Conditional{I,J,K,O},
range::VectorOption{<:O},
parranges::NTuple{N,Vector},
incoming_pis::Tuple)::Dist{<:O} where {N,I,J,K,O}
(iranges, jranges) = split_pars(sf, parranges)
# We need to correctly handle the case with duplicate parent values
# logcpdf below will get the full incoming pi for all duplicate values
irgs::Vector{Set{Any}} = [Set(r) for r in iranges]
icombos = cartesian_product(irgs)
(ipis, jpis) = split_pars(sf, incoming_pis)
result_pieces = [Vector{Float64}() for x in range]
for is in icombos
gsf = gensf(sf,tuple(is...))
ps = compute_pi(gsf, range, Tuple(jranges), jpis)
if (!isempty(iranges))
# ipi = sum([logcpdf(ipis[i], (), irgs[i][ind[i]]) for i in 1:length(iranges)])
ipi = sum([logcpdf(ipis[j], (), is[j]) for j in 1:length(iranges)])
else
ipi = 1
end
for i in 1:length(range)
push!(result_pieces[i], ipi + logcpdf(ps, (), range[i]))
end
end
result = normalize([exp(StatsFuns.logsumexp(rps)) for rps in result_pieces])
return Cat(range, result)
end
end
@impl begin
struct ConditionalSendLambda end
function send_lambda(sf::Conditional{I,J,K,O},
lambda::Score{<:O},
range::VectorOption{<:O},
parranges::NTuple{N,Vector},
incoming_pis::Tuple,
parent_ix::Integer)::Score where {N,I,J,K,O}
(iranges, jranges) = split_pars(sf, parranges)
(ipis, jpis) = split_pars(sf, incoming_pis)
iar :: Array{Array{Int, 1}} = collect(map(r -> collect(1:length(r)), iranges))
iinds = cartesian_product(iar)
jar :: Array{Array{Int, 1}} = collect(map(r -> collect(1:length(r)), jranges))
jinds = cartesian_product(jar)
result = zeros(Float64, length(parranges[parent_ix]))
# Need to make sure the target parent range is a Vector{T} rather than a Vector{Any}
T = typejoin([typeof(x) for x in parranges[parent_ix]]...)
target_parrange :: Vector{T} = parranges[parent_ix]
if parent_ix <= length(iranges)
rs::Array{Array{Int, 1}, 1} = [collect(1:length(rg)) for rg in iranges]
deleteat!(rs, parent_ix)
restranges = cartesian_product(rs)
# We want to send a lambda message to the specific I input.
# We can ignore the pi message from this input.
# For each value of this input, we want to compute \sum_{i',j,o} \pi_J(j) P_ps.sf(o | j ; gen_params(ps, (i, i'))) \lambda(o).
for i = 1:length(iranges[parent_ix])
ival = iranges[parent_ix][i]
ls = Vector{Float64}()
for rest in restranges
ipi = 0
fullvals = []
for r = 1:parent_ix-1
ipi += logcpdf(ipis[r], (), parranges[r][rest[r]])
push!(fullvals, iranges[r][rest[r]])
end
push!(fullvals, ival)
for r = parent_ix+1:length(iranges)
ipi += logcpdf(ipis[r], (), parranges[r][rest[r-1]])
push!(fullvals, iranges[r][rest[r-1]])
end
gsf = gensf(sf,tuple(fullvals...))
for jind in jinds
jpi = 0
jval = []
for r = 1:length(jpis)
jpi += logcpdf(jpis[r], (), jind[r])
push!(jval, jranges[r][jind[r]])
end
jvaltypes = [typeof(j) for j in jval]
tjval = convert(Vector{typejoin(jvaltypes...)}, jval)
pi = ipi + jpi
for o in range
push!(ls, pi + logcpdf(gsf, Tuple(tjval), o) + get_log_score(lambda, o))
end
end
end
result[i] = StatsFuns.logsumexp(ls)
end
elseif parent_ix <= length(parranges)
n = length(iranges)
jix = parent_ix - n
# We weight the lambda messages sent to the specific J parent by the pi message for the I parents
result_pieces = [Vector{Float64}() for x in target_parrange]
for iind in iinds
ival = [iranges[r][iind[r]] for r in 1:n]
gsf = gensf(sf,tuple(ival...))
ipi = sum(logcpdf(ipis[r], (), iind[r]) for r in 1:n)
lam = send_lambda(gsf, lambda, range, jranges, jpis, jix)
for i in 1:length(target_parrange)
push!(result_pieces[i], ipi + get_log_score(lam, parranges[parent_ix][i]))
end
end
result = [StatsFuns.logsumexp(rp) for rp in result_pieces]
end
return LogScore(target_parrange, result)
end
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 7973 | export
Det,
ExplicitDet
import StatsBase
import StatsFuns
"""
abstract type Det{I, O} <: SFunc{I, O}
`Det` defines an *sfunc* that represents a deterministic function `I -> O`. When
a `Det` is subtyped, a function `apply(d::Det, i::I)::O` or `apply(d::Det, i::I...)::O`
must also be implemented. It has no parameters.
# Additional supported operators
- `support`
- `support_quality`
- `sample`
- `logcpdf`
- `bounded_probs`
- `make_factors`
- `compute_pi`
- `send_lambda`
# Type parameters
- `I`: the input type(s) of the `Det`
- `O`: the output type(s) of the `Det`
"""
abstract type Det{I, O} <: SFunc{I, O} end
"""
struct ExplicitDet{I, O} <: Det{I, O}
`ExplicitDet` is a `Det` that contains a field `f::Function`. It also has an `apply` method
that simply delegates to the `ExplicitDet`'s function:
```
apply(d::ExplicitDet, i...) = d.f(i...)
```
```
julia> d = ExplicitDet{Tuple{Vararg{Real}},Real}(sum)
ExplicitDet{Tuple{Vararg{Real, N} where N}, Real}(sum)
```
"""
struct ExplicitDet{I, O} <: Det{I, O}
f :: Function
end
apply(d::ExplicitDet, i::Tuple) = d.f(i...)
apply(d::ExplicitDet, i...) = d.f(i...)
function Det(I, O, f)
return ExplicitDet{I,O}(f)
end
@impl begin
struct DetSupport end
function support(sf::Det{I,O},
parranges::NTuple{N,Vector},
size::Integer,
curr::Vector{<:O}) where {I,O,N}
ps = cartesian_product(parranges)
result = unique([apply(sf, p...) for p in ps])
if (length(result) > size) # do downsampling but ensure that curr is included in the range
# include curr in the result, it is OK if it > size
if (!isempty(curr)) # curr is provided
curr = unique(curr)
curr_size= length(curr)
if(curr_size >= size)
result = curr
else
sample_size = size - curr_size # number of samples
result_without_curr = setdiff(result, curr) # what is being sampled
result = StatsBase.sample(result_without_curr, sample_size, replace = false) # samples
result = vcat(curr, result) # concatanate curr and samples
end
else # curr is not provided
result = StatsBase.sample(result, size, replace = false) # samples
end
end
sort!(result)
return result
end
end
@impl begin
struct DetSupportQuality end
function support_quality(::Det, parranges)
:CompleteSupport
end
end
@impl begin
struct DetSample end
function sample(sf::Det{I,O}, x::I)::O where {I,O}
if isa(x, Tuple)
apply(sf, x...)
else
apply(sf, x)
end
end
end
@impl begin
struct DetLogcpdf end
function logcpdf(sf::Det{I,O}, inp::I, o::O)::AbstractFloat where {I,O}
y = apply(sf, inp...)
return y == o ? 0.0 : -Inf
end
end
# We cannot define complete for Det in an abstract way, because it depends on whether the ranges of the parents
# is complete.
@impl begin
struct DetBoundedProbs end
function bounded_probs(sf::Det{I,O},
range::VectorOption{<:O},
parranges::NTuple{N,Vector})::
Tuple{Vector{<:AbstractFloat}, Vector{<:AbstractFloat}} where {I,O,N}
ps = cartesian_product(parranges)
result = Float64[]
for p in ps
x = apply(sf, p...)
for r in range
push!(result, x == r ? 1.0 : 0.0)
end
end
return (result, result)
end
end
@impl begin
struct DetMakeFactors end
function make_factors(sf::Det{I,O},
range::VectorOption{<:O},
parranges::NTuple{N,Vector},
id,
parids::Tuple)::Tuple{Vector{<:Scruff.Utils.Factor}, Vector{<:Scruff.Utils.Factor}} where {I,O,N}
entries = bounded_probs(sf, range, parranges)
keys = [i for i in parids]
push!(keys, id)
keys = Tuple(keys)
dims = [length(r) for r in parranges]
push!(dims, length(range))
dims = Tuple(dims)
factors = [Factor(dims, keys, entries[1])]
return (factors, factors)
end
end
@impl begin
struct DetComputePi end
function compute_pi(sf::Det{I,O},
range::VectorOption{<:O},
parranges::NTuple{N,Vector},
incoming_pis::Tuple)::Dist{<:O} where {N,I,O}
pinds = cartesian_product([collect(1:length(r)) for r in parranges])
result = zeros(Float64, length(range))
result_pieces = [Vector{Float64}() for i in range]
prng = 1:length(parranges)
for pind in pinds
xs = [parranges[i][pind[i]] for i in prng]
x = apply(sf, xs...)
idx = indexin([x], range)[1]
if !isnothing(idx)
pi = sum([logcpdf(incoming_pis[i], (), xs[i]) for i in prng])
push!(result_pieces[idx], pi)
end
end
result = [exp(StatsFuns.logsumexp(pieces)) for pieces in result_pieces]
return Cat(range, normalize(result))
end
end
@impl begin
struct DetSendLambda end
function send_lambda(sf::Det{I,O},
lambda::Score{<:O},
range::VectorOption{<:O},
parranges::NTuple{N,Vector},
incoming_pis::Tuple,
parent_ix::Integer)::Score where {N,I,O}
# For a particular parent, we consider all possible values of the other parents.
# We make a joint argument p, and compute pi(other parents) * lambda(f(p)).
# Need to make sure the target parent range is a Vector{T} rather than a Vector{Any}
T = typejoin([typeof(x) for x in parranges[parent_ix]]...)
target_parrange :: Vector{T} = parranges[parent_ix]
otherranges = [r for r in parranges]
deleteat!(otherranges, parent_ix)
otherinds = cartesian_product([collect(1:length(r)) for r in otherranges])
result_pieces = [Vector{Float64}() for u in target_parrange]
for i in 1:length(target_parrange)
parval = target_parrange[i]
for otherind in otherinds
fullval = []
pi = 0.0
for j = 1:parent_ix - 1
u = parranges[j][otherind[j]]
push!(fullval, u)
pi += logcpdf(incoming_pis[j], (), u)
end
push!(fullval, parval)
for j = parent_ix + 1:length(parranges)
u = parranges[j][otherind[j-1]]
push!(fullval, u)
pi += logcpdf(incoming_pis[j], (), u)
end
x = apply(sf, fullval...)
idx = indexin([x], range)[1]
if !isnothing(idx)
push!(result_pieces[i], pi + get_log_score(lambda, range[idx]))
end
end
end
result = [StatsFuns.logsumexp(pieces) for pieces in result_pieces]
return LogScore(target_parrange, result)
end
end
# STATS
@impl begin
struct DetInitialStats end
initial_stats(::Det) = nothing
end
@impl begin
struct DetAccumulateStats end
accumulate_stats(::Det, existing_stats, new_stats) = nothing
end
@impl begin
struct DetExpectedStats end
function expected_stats(sf::Det{I,O},
range::VectorOption{<:O},
parranges::NTuple{N,Vector},
pis::NTuple{M,Dist},
child_lambda::Score{<:O}) where {I,O,N,M}
nothing
end
end
@impl begin
struct DetMaximizeStats end
maximize_stats(::Det, stats) = nothing
end
# STATS END
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 1808 | export
DiscreteCPT
"""
function DiscreteCPT(range::Vector{O}, paramdict::Dict{I, Vector{Float64}}) where {I <: Tuple, O}
Constructs an sfunc that represents a Discrete Conditional Probability table.
`DiscreteCPT`s are implemented as a `Table` with a `Cat`.
The `range` parameter defines all the possible outputs of the `DiscreteCPT`. The `paramdict`
parameter defines the input(s) and the actual CPT. For example,
```
range = [1, 2]
paramdict = Dict((1,1) => [0.3, 0.7], (1,2) => [0.6, 0.4], (2,1) =>[0.4, 0.6],
(2,2) => [0.7, 0.3], (3,1) => [0.5, 0.5], (3,2) => [0.8, 0.2])
```
can create a `DiscreteCPT` which has two(2) inputs (the length of the key) and, given each input
as defined by the key, selects either `1` or `2` (the range) with the given probability. So, if
the input is `(2,1)`, `1` is selected with probability `0.4` and `2` is selected with probability
`0.6`.
See also: [`Table`](@ref), [`Cat`](@ref)
"""
function DiscreteCPT(range::Vector{O}, paramdict::Dict{I, Vector{Float64}}) where {I <: Tuple, O}
NumInputs = length(collect(keys(paramdict))[1])
sfmaker(probs) = Cat(range, probs)
return Table(Tuple{}, O, NumInputs, paramdict, sfmaker)
end
#=
# This form is provided for backward compatibility
# The ranges of the parents and child are all integer ranges from 1 to the number of values
function DiscreteCPT(params :: Array{Vector{Float64}, N}) where N
rangesizes = size(params)
parranges = [collect(1:r) for r in rangesizes]
parcombos = cartesian_product(parranges)
childrange = collect(1:length(params[1]))
I = NTuple{N, Int}
paramdict = Dict{I, Vector{Float64}}()
for (i, combo) in enumerate(parcombos)
paramdict[tuple(combo...)] = params[i]
end
return DiscreteCPT(childrange, paramdict)
end
=# | Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 3030 | export
Invertible
"""
struct Invertible{I,O} <: SFunc{Tuple{I},O}
An invertible sfunc, with both a `forward` and a `inverse` function provided.
# Additional supported operators
- `support`
- `support_quality`
- `sample`
- `logcpdf`
- `bounded_probs`
- `make_factors`
- `compute_pi`
- `send_lambda`
"""
struct Invertible{I,O} <: SFunc{Tuple{I},O}
forward :: Function
inverse :: Function
end
@impl begin
struct InvertibleSupport end
function support(sf::Invertible{I,O}, parranges, size, curr) where {I,O}
invsup = map(sf.forward, parranges[1])
return invsup
end
end
@impl begin
struct InvertibleSupportQuality end
function support_quality(::Invertible{I,O}, parranges) where {I,O}
return :CompleteSupport
end
end
@impl begin
struct InvertibleSample end
function sample(sf::Invertible{I,O}, i::Tuple{I}) where {I,O}
return sf.forward(i[1])
end
end
@impl begin
struct InvertibleLogcpdf end
function logcpdf(sf::Invertible{I,O}, i::Tuple{I}, o::O) where {I,O}
return sf.forward(i[1]) == o ? 0.0 : -Inf
end
end
@impl begin
struct InvertibleBoundedProbs end
function bounded_probs(sf::Invertible{I,O}, range::Vector{<:O},
parranges) where {I,O}
result = Float64[]
for i in parranges[1]
for o in range
p = sf.forward(i) == o ? 1.0 : 0.0
push!(result, p)
end
end
return (result, result)
end
end
@impl begin
struct InvertibleMakeFactors end
function make_factors(sf::Invertible{I,O}, range::Vector{<:O}, parranges,
id, parids) where {I,O}
dims = (length(parranges[1]), length(range))
keys = (parids[1], id)
entries = bounded_probs(sf, range, parranges)[1]
facts = [Factor(dims, keys, entries)]
return (facts, facts)
end
end
@impl begin
struct InvertibleComputePi end
function compute_pi(sf::Invertible{I,O}, range::Vector{<:O}, parranges,
incoming_pis) where {I,O}
ps = [cpdf(incoming_pis[1], (), sf.inverse(o)) for o in range]
return Cat(range, ps)
end
end
@impl begin
struct InvertibleSendLambda end
function send_lambda(sf::Invertible{I,O}, lambda, range, parranges, incoming_pis,
parent_idx) where {I,O}
@assert parent_idx == 1
ls = [get_log_score(lambda, sf.forward(x)) for x in parranges[1]]
return LogScore(parranges[1], ls)
end
end
#=
@impl begin
struct InvertibleInitialStats end
initial_stats(::Invertible) = nothing
end
@impl begin
struct InvertibleAccumulateStats end
accumulate_stats(::Invertible, existing_stats, new_stats) = nothing
end
@impl begin
struct InvertibleExpectedStats end
expected_stats(::Invertible, range, parranges, incoming_pis, child_lambda) =
nothing
end
@impl begin
struct InvertibleMaximizeStats end
maximize_stats(::Invertible, stats) = nothing
end
=#
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 1843 | export LinearGaussian
"""
mutable struct LinearGaussian{I <: Tuple{Vararg{Float64}}} <:
Conditional{I, Tuple{}, I, Float64, Normal}
`LinearGaussian` defines an sfunc whose mean is a linear function of its parents. A
`LinearGaussian`'s output type is a `Float`, its parameter type is
`Tuple{I, Float64, Float64}`, and it's contained *sfunc* is a `Normal` mean `0.0`.
# Type parameters
- `I`: the input type(s) of the `LinearGaussian`
See also: [`Conditional`](@ref), [`Normal`](@ref)
"""
mutable struct LinearGaussian{I <: Tuple{Vararg{Float64}}} <: Conditional{I, Tuple{}, I, Float64, Normal{Float64}}
sf :: Normal{Float64}
params :: Tuple{Tuple{Vararg{Float64}}, Float64, Float64}
"""
function LinearGaussian(weights :: Tuple{Vararg{Float64}}, bias :: Float64, sd :: Float64)
`LinearGaussian` constructor
# Arguments
- `weights::Tuple{Vararg{Float64}}`: the weights of each parent
- `bias::Float64`: the bias of the mean of the internal `Normal` *sfunc*
- `sd::Float64`: the standard deviation of the internal `Normal` *sfunc*
"""
function LinearGaussian(weights :: Tuple{Vararg{Float64}}, bias :: Float64, sd :: Float64)
params = (weights, bias, sd)
sf = Normal(0.0, sd)
N = length(weights)
new{NTuple{N, Float64}}(sf, params)
end
end
# STATS
@impl begin
struct LinearGaussianInitialStats end
function initial_stats(sf::LinearGaussian)
(weights, _, _) = sf.params
initweights = Tuple(zeros(length(weights)))
Dict(initweights=>(0.0, 0.0))
end
end
# END STATS
function gensf(lg::LinearGaussian, inputs::Tuple{Vararg{Float64}})::Normal{Float64}
(weights, bias, sd) = lg.params
to_sum = inputs .* weights
mean = isempty(to_sum) ? bias : sum(to_sum) + bias
return Normal(mean, sd)
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 2015 | export
Separable,
SepCPTs
"SepCPTs = Array{Dict{I, Array{Float64, 1}} where I}"
SepCPTs = Array{Dict{I, Array{Float64, 1}} where I}
# TODO: Generalize this to separable additive decompositions in general, not just CPTs
"""
function Separable(range::Vector{O}, probabilities :: Vector{Float64}, compparams :: SepCPTs) where O
Constructs an sfunc representing separable models, defined by additive decompositon of a conditional probability distribution into
separate distributions depending on each of the parents.
`Separable`s are implemented as a `Mixture` of `Extend` sfuncs that extend `DiscreteCPT`s.
To construct a `Separable`, this method is passed the `range` of output values, the `probabilities`
of each of the underlying `DiscreteCPT` (which are the internal sfuncs of the `Mixture`), and
the parameters for each of the `DiscreteCPT`s. For example,
```
alphas = [0.2, 0.3, 0.5]
cpd1 = Dict((1,) => [0.1, 0.9], (2,) => [0.2, 0.8])
cpd2 = Dict((1,) => [0.3, 0.7], (2,) => [0.4, 0.6], (3,) => [0.5, 0.5])
cpd3 = Dict((1,) => [0.6, 0.4], (2,) => [0.7, 0.3])
cpds :: Array{Dict{I,Array{Float64,1}} where I,1} = [cpd1, cpd2, cpd3]
s = Separable([1, 2], alphas, cpds)
```
See also: [`Mixture`](@ref), [`Extend`](@ref), [`DiscreteCPT`](@ref), [`Table`](@ref)
"""
function Separable(range::Vector{O}, probabilities :: Vector{Float64}, compparams :: SepCPTs) where O
N = length(compparams)
@assert length(probabilities) == N
IS = []
for i = 1:N
ks = collect(keys(compparams[i]))
push!(IS, typeof(ks[1][1]))
end
J = Tuple{IS...}
# Explicit typing is necessary to ensure that the cpts passed to Mixture all have the same type.
function make_cpt(i, I)::SFunc{J,O}
cpt::Table{1, I, Tuple{}, I, O} = DiscreteCPT(range, compparams[i])
extended::SFunc{J,O} = Extend(J, cpt, i)
return extended
end
cpts::Vector{SFunc{J,O}} =
[make_cpt(i,Tuple{IS[i]}) for i in 1:N]
return Mixture(cpts, probabilities)
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 7707 | export
Switch,
LinearSwitch,
If,
choose
"""
abstract type Switch{N, I, K, O} <: Det{K, O}
`Switch` defines an sfunc that represents choosing between multiple incoming (parent) Sfuncs
based on a test. A subtype of `Switch` must provide a `choose` function that takes the switch
and an `i` and returns an integer between `1` and `N`. This is an index into a 'parent array'.
`K` must be a flat tuple type consisting of `I` and `N` occurrences of `O`: for example,
if I is `Int`
```
K = extend_tuple_type(Tuple{Int}, NTuple{N, O})
```
If the subtype'd sfunc is not in the Scruff.SFuncs module, the system must
`import Scruff.SFuncs: choose`.
# Additional supported operators
- `support`
- `support_quality`
- `compute_pi`
- `send_lambda`
# Type parameters
- `N`: the count of *sfuncs* from which to choose
- `I`: the type of the second argument of the `choose` function defined for the `Switch`
- `K`: the input type of the `Switch`; see above
- `O`: the output type of the `Switch`
See also: [`choose`](@ref), [`extend_tuple_type`](@ref)
"""
abstract type Switch{N, I, K, O} <: Det{K, O} end
"""
`choose` interface. For every subtype of `Switch`, an implementation of this method must
be created, whose first parameter is the subtype, and the second parameter is of type `I`
for the parameter type in `Switch`.
For example, the definition of the `If` *sfunc* is as follows, where `choose` returns
either index `1` or index `2`.
```
struct If{O} <: Switch{2, Bool, Tuple{Bool, O, O}, O} end
choose(::If, b::Bool) = b ? 1 : 2
```
"""
function choose end
# apply(sw::Switch, i, hs...) = hs[choose(sw,i)] # implemented because subtype of `Det`
apply(sw::Switch, i, hs...) = hs[choose(sw,i)] # implemented because subtype of `Det`
struct LinearSwitch{N, K, O} <: Switch{N, Int, K, O}
n :: Int
function LinearSwitch(N, O)
K = extend_tuple_type(Tuple{Int}, NTuple{N, O})
new{N, K, O}(N)
end
end
choose(::LinearSwitch, i) = i
struct If{O} <: Switch{2, Bool, Tuple{Bool, O, O}, O} end
choose(::If, b) = b ? 1 : 2
# switch overrides the functions for det that use the Cartesian product of the parents.
# Since only one parent is ever active at a time, we don't need to compute the Cartesian product.
# This leads to exponential time savings in the number of parents.
@impl begin
struct SwitchSupport end
function support(sf::Switch{N,I,K,O},
parranges::NTuple{M,Vector},
size::Integer,
curr::Vector{<:O}) where {I,K,O,N,M}
result = Vector{output_type(sf)}()
for i in parranges[1]
h = choose(sf, i)
append!(result, parranges[h + 1])
end
return unique(result)
end
end
@impl begin
struct SwitchSupportQuality end
function support_quality(sf::LinearSwitch, parranges)
ivals = map(i -> choose(sf, i), parranges[1])
if all(j -> j in ivals, 1:sf.n)
return :CompleteSupport
else
return :BestEffortSupport
end
end
function support_quality(::Union{<:Support,Nothing}, sf::If, parranges)
ivals = map(i -> choose(sf, i), parranges[1])
if all(j -> j in ivals, 1:2)
return :CompleteSupport
else
return :BestEffortSupport
end
end
end
# We currently don't overwrite bounded_probs.
# bounded_probs will be replaced with an operation that produces factors.
@impl begin
struct SwitchComputePi end
function compute_pi(sf::Switch{N,I,K,O},
range::VectorOption{<:O},
parranges::NTuple{M,Vector},
incoming_pis::Tuple)::Dist{<:O} where {M,N,I,K,O}
result = zeros(Float64, length(range))
ipis = incoming_pis[1]
for (i,ival) in enumerate(parranges[1])
ipi = cpdf(ipis, (), parranges[1][i])
h = choose(sf, ival)
hpis = incoming_pis[h+1]
hrange = parranges[h+1]
for (j,jval) in enumerate(range)
k = indexin([jval], hrange)[1]
# the range of the parent can be a subset of the range of the switch
hpi = isnothing(k) ? 0.0 : cpdf(hpis, (), parranges[h+1][k])
result[j] += ipi * hpi
end
end
return Cat(range, result)
end
end
@impl begin
struct SwitchSendLambda end
function send_lambda(sf::Switch{N,I,K,O},
lambda::Score{<:O},
range::VectorOption{<:O},
parranges::NTuple{M,Vector},
incoming_pis::Tuple,
parent_ix::Integer)::Score where {M,N,I,K,O}
# This helper function computes, for a particular choice, the sum of
# pi times lambda values.
function compute1(h)
hrange = parranges[h+1]
hpis = incoming_pis[h+1]
tot = 0.0
for (j,jval) in enumerate(hrange)
# We need to keep track of indices properly.
# We cannot assume that the range of the switch and the range passed into
# send_lambda are the same, even though they are of the same type O.
# In particular, choices might have different ranges from each other,
# perhaps restricted or in a different order.
k = indexin([jval], range)[1]
tot += cpdf(hpis, (), parranges[h+1][j]) * get_score(lambda, range[k])
end
return tot
end
# We need to make sure the message is correctly typed to the output type of the appropriate parent
# Need to make sure the target parent range is a Vector{T} rather than a Vector{Any}
T = typejoin([typeof(x) for x in parranges[parent_ix]]...)
target_parrange :: Vector{T} = parranges[parent_ix]
if parent_ix == 1
# We send a lambda message to the selector parent
# For a given value of the selector, the lambda value is the
# probability of the child lambda for the corresponding choice.
# This is equal to the sum, over all values in the range of the
# choice, of the incoming_pi * lambda for that value.
return SoftScore(target_parrange,
map(i -> compute1(choose(sf, i)), target_parrange))
else
# The lambda message to a choice consists of two components:
# A choice-specific component equal to the pi of the I value that leads to that choice, times lambda
# A constant from all the other choices, equal to the sum, over all choices, of the pi of the I value for the choice
# times the sum of pi * lambda values for that choice.
con = 0.0
spec = nothing
ipis = incoming_pis[1]
for (i,ival) in enumerate(parranges[1])
h = choose(sf, ival)
if h+1 != parent_ix
con += cpdf(ipis, (), ival) * compute1(h)
else
# lambda is in the order of range, but hrange might be a subset and in a different order
hrange = parranges[h+1]
qs = []
for (j,jval) in enumerate(hrange)
# k = indexin([jval], range)[1]
# push!(qs, lambda[k])
push!(qs, get_score(lambda, jval))
end
spec = cpdf(ipis, (), ival) .* qs
end
end
return SoftScore(target_parrange, con .+ spec)
end
end
end | Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 3966 | export
Table
"""
mutable struct Table{NumInputs, I <: NTuple{NumInputs, Any}, J, K, O, S <: SFunc{J,O}} <: Conditional{I, J, K, O, S}
`Table` defines a `Conditional` whose function is given by a multi-dimensional table of type
`Array{Q, NumInputs}`, where `Q` is the output type of the internal *sfunc* and `NumInputs`
is the count of incoming parent values.
See also: [`Conditional`](@ref), [`DiscreteCPT`](@ref), [`CLG`](@ref)
"""
mutable struct Table{NumInputs, I <: NTuple{NumInputs, Any}, J, K, O, Q, S <: SFunc{J,O}} <: Conditional{I, J, K, O, S}
params :: Dict{I, Q}
sf_maker :: Function
__icombos :: Vector{I}
__iranges :: NTuple{NumInputs, Array{Any, 1}}
__isizes :: NTuple{NumInputs, Int}
__imults :: NTuple{NumInputs, Int}
__inversemaps :: NTuple{NumInputs, Dict{Any, Int}}
__sfs :: Array{S, NumInputs}
"""
function Table(J, O, NumInputs::Int, paramdict, sfmaker:Function) where {J, O, S <: SFunc{J,O}}
`Table` constructor.
# Arguments
- `J` the type of inputs to the sfuncs in the table
- 'O' the type of outputs from the sfuncs in the table
- `NumInputs::Int` the count of incoming parent values
- `paramdict` see [`DiscreteCPT`](@ref) and [`CLG`](@ref) for examples
- `sfmaker` a function from Q to S
"""
function Table(J, O, NumInputs::Int, paramdict::Dict{I, Q}, sf_maker::Function) where {I, Q}
K = extend_tuple_type(I,J)
icombos = keys(paramdict)
iranges :: Array{Array{Any,1}} = [unique(collect([combo[k] for combo in icombos])) for k in 1:NumInputs]
isizes = tuple([length(irange) for irange in iranges]...)
m = 1
ims = zeros(Int, NumInputs)
for k in NumInputs:-1:1
ims[k] = m
m *= isizes[k]
end
imults = tuple(ims...)
# TODO: Fix ordering of Dict
inversemaps = tuple([Dict([x => i for (i,x) in enumerate(irange)]) for irange in iranges]...)
sortedcombos = [tuple(p...) for p in cartesian_product(iranges)]
S = SFunc{J,O}
sfs = Array{S, NumInputs}(undef, isizes)
for k in 1:length(sortedcombos)
is = sortedcombos[k]
q = paramdict[is]
sfs[k] = sf_maker(q)
end
new{NumInputs,I,J,K,O,Q,S}(paramdict, sf_maker, sortedcombos, tuple(iranges...), isizes, imults, inversemaps, sfs)
end
end
@impl begin
struct TableGetParams end
get_params(t :: Table) = t.params
end
@impl begin
struct TableSetParams! end
function set_params!(t :: Table{NumInputs,I,J,K,O}, new_params) where {NumInputs,I,J,K,O}
Table(J, O, NumInputs, new_params, t.sf_maker)
end
end
#=
function dict2tableparams(sf::Table{NumInputs,I,J,K,O,S}, p::Dict{I,Q}) where {NumInputs,I,J,K,O,Q,S}
k1 = collect(keys(p))[1]
@assert NumInputs == length(k1)
icombos = keys(p)
iranges :: Array{Array{Any,1}} = [unique(collect([combo[k] for combo in icombos])) for k in 1:NumInputs]
sortedcombos = [tuple(p...) for p in cartesian_product(iranges)]
isizes = tuple([length(irange) for irange in iranges]...)
array = Array{Q, NumInputs}(undef, isizes)
for k in 1:length(sortedcombos)
is = sortedcombos[k]
array[k] = get(p, is, sf.default) # The input dict does not have to have all combos
end
array
end
=#
function gensf(t::Table{N}, parvals::NTuple{N,Any}) where {N}
inds = tuple([t.__inversemaps[k][parvals[k]] for k in 1:length(parvals)]...)
i = 1
for k in 1:N
i += (inds[k] - 1) * t.__imults[k]
end
return t.__sfs[i]# Change this to just index on the tuple of indices rather than do the calculation ourselves
end
# STATS
function do_maximize_stats(t::Table{N,I,J,K,O,Q,S}, sfmaximizers) where {N,I,J,K,O,Q,S}
# result = Dict{I, Q}()
# for k in keys(t.params)
# result[k] = sfmaximizers[k]
# end
# return result
sfmaximizers
end
# STATS END
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 6462 | export
Cat,
Categorical,
Discrete
import ..Utils.normalize
import Distributions
const Categorical{P, Ps} = DistributionsSF{Distributions.Categorical{P, Ps}, Int}
Categorical(p::Ps) where {P, Ps <: AbstractVector{P}} = Categorical{Ps, P}(p)
const Discrete{T, P, Ts, Ps} = DistributionsSF{Distributions.DiscreteNonParametric{T, P, Ts, Ps}, T}
function Discrete(xs::Xs, ps::Ps) where {X, Xs <: AbstractVector{X}, P, Ps <: AbstractVector{P}}
# Handle duplicates
sort_order = sortperm(xs)
xs = xs[sort_order]
ps = ps[sort_order]
for i=1:(length(xs) - 1)
if xs[i] == xs[i + 1]
ps[i] += ps[i + 1]
ps[i + 1] = 0
end
end
keep = ps .> 0
xs = xs[keep]
ps = ps[keep]
return Discrete{X, P, Xs, Ps}(xs, ps)
end
@doc """
mutable struct Cat{O} <: Dist{O, Vector{Real}}
`Cat` defines an sfunc that represents a set of categorical output values that are not conditioned
on any input. Its parameters are always of type `Vector{Real}`, which is the probability of each
output value.
# Supported operations
- `support`
- `support_quality`
- `sample`
- `cpdf`
- `bounded_probs`
- `compute_pi`
- `f_expectation`
# Type parameters
- `O`: the output type of the `Cat`
"""
mutable struct Cat{O, T<:Real} <: Dist{O}
original_range :: Vector{O}
params :: Vector{T}
__inversemap :: Dict{O, Int}
__compiled_range :: Vector{O}
"""
Cat(r::Vector{O}, ps::Vector{<:Real}) where O
`Cat`'s constructor
# Arguments
- `r::Vector{O}`: the set of possible output values
- `ps::Vector{<:Real}`: the set of probabilities for each value in `r`
(will be normalized on call to `sample`)
"""
function Cat(original_range::Vector{O}, params::Vector{T}) where {O, T<:Real}
@assert length(original_range) == length(params)
# Handle repeated values correctly
d = Dict{O, Float64}()
for (x, p) in zip(original_range, params)
d[x] = get(d, x, 0) + p
end
r = collect(keys(d))
ps = [d[x] for x in r]
inversemap = Dict([x => i for (i,x) in enumerate(r)])
return new{O, T}(original_range, ps, inversemap, r)
end
function Cat(d::Dict{O, <:Real}) where O
return Cat(collect(keys(d)), collect(values(d)))
end
"""
Cat(rps::Vector{Pair{O,<:Real}}) where O
`Cat` constructor
# Arguments
- rps::Vector{Pair{O,Float64}}: a set of `Pair`s `output_value=>probability_of_value`
"""
function Cat(rps::Vector{<:Pair{O,<:Real}}) where O
range = first.(rps)
probs = last.(rps)
# This should be moved up into the normal constructor, but there's a bunch of tests
# using Cat to represent densities at finite sets of points...?
probs = normalize(probs)
return Cat(range, probs)
end
end
@impl begin
struct CatGetParams end
get_params(c :: Cat) = c.params
end
@impl begin
struct CatSetParams! end
function set_params!(c :: Cat, p)
c.params = p
c
end
end
@impl begin
struct CatConfigure end
function configure(sf::Cat{O}, rps::Vector{<:Pair{O, <:Real}}) where O
Cat(rps)
end
end
@impl begin
struct CatSupport end
function support(sf::Cat{O},
parranges::NTuple{N,Vector},
size::Integer,
curr::Vector{<:O}) where {O,N}
sf.original_range
end
end
@impl begin
struct CatSupportQuality end
function support_quality(::Cat, parranges)
:CompleteSupport
end
end
@impl begin
struct CatSample end
function sample(sf::Cat{O}, i::Tuple{})::O where {O}
i = rand(Distributions.Categorical(sf.params))
return sf.__compiled_range[i]
end
end
@impl begin
struct CatCpdf end
function cpdf(sf::Cat{O}, i::Tuple{}, o::O) where {O}
ind = get(sf.__inversemap, o, 0)
if ind == 0
return 0.0
else
return sf.params[ind]
end
end
end
@impl begin
struct CatBoundedProbs end
function bounded_probs(sf::Cat{O},
range::Vector{O},
::NTuple{N,Vector})::Tuple{Vector{<:AbstractFloat},
Vector{<:AbstractFloat}} where {O,N}
ps = [x in keys(sf.__inversemap) ? sf.params[sf.__inversemap[x]] : 0.0 for x in range]
(ps, ps)
end
end
@impl begin
struct CatComputePi end
function compute_pi(sf::Cat{O},
range::Vector{O},
::NTuple{N,Vector},
::Tuple)::Cat{O} where {N,O}
ps = bounded_probs(sf, range, ())[1]
Cat(range, ps)
end
end
# STATS
@impl begin
struct CatInitialStats end
function initial_stats(sf::Cat{T}) where T
# d = Dict{T, Float64}()
# for k in sf.range
# d[k] = 0.0
# end
# d
zeros(Float64, length(sf.__compiled_range))
end
end
@impl begin
struct CatAccumulateStats end
function accumulate_stats(sf::Cat{T}, existing_stats, new_stats) where T
existing_stats .+ new_stats
end
end
@impl begin
struct CatExpectedStats end
function expected_stats(sf::Cat{O},
range::Vector{O},
::NTuple{N,Vector},
::NTuple{M,Dist},
lambda::Score{<:O}) where {O,N,M}
orig = sf.__compiled_range
ps = zeros(Float64, length(orig))
for (i,x) in enumerate(range)
if x in orig
ps[i] = sf.params[sf.__inversemap[x]]
end
end
ls = [get_score(lambda, r) for r in range]
return ps .* ls
end
end
@impl begin
struct CatMaximizeStats end
function maximize_stats(sf::Cat, stats)
normalize(stats)
end
end
# END STATS
@impl begin
struct CatFExpectation end
function f_expectation(sf::Cat, ::Tuple{}, fn::Function)
sum = 0.0
total = 0.0
for (i, x) in enumerate(sf.__compiled_range)
sum += fn(x) * sf.params[i]
total += sf.params[i]
end
return sum / total
end
end
@impl begin
function weighted_values(sf::Union{Discrete, Categorical})
d = sf.dist
return (d.support, d.p)
end
end
@impl begin
function weighted_values(sf::Cat)
return (sf.range, sf.params)
end
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 2434 | export
Constant
"""
mutable struct Constant{O} <: Dist{O,Nothing}
`Constant` represents an sfunc that always produces the same value. It has no
inputs and no parameters.
# Additional supported operators
- `support`
- `support_quality`
- `sample`
- `logcpdf`
- `bounded_probs`
- `compute_pi`
# Type parameters
- `O`: the output type(s) of the `Constant`
"""
mutable struct Constant{O} <: Dist{O}
"the constant value to be returned"
x :: O
end
@impl begin
struct ConstantSupport end
function support(sf::Constant{O},
parranges::NTuple{N,Vector},
size::Integer,
curr::Vector{<:O}) where {O,N}
[sf.x]
end
end
@impl begin
struct ConstantSupportQuality end
function support_quality(::Constant, parranges)
:CompleteSupport
end
end
@impl begin
struct ConstantSample end
function sample(sf::Constant{O}, i::Tuple{})::O where {O}
sf.x
end
end
@impl begin
struct ConstantLogcpdf end
function logcpdf(sf::Constant{O}, i::Tuple{}, o::O)::AbstractFloat where {O}
return o == sf.x ? 0.0 : -Inf
end
end
@impl begin
struct ConstantBoundedProbs end
function bounded_probs(sf::Constant{O},
range::Vector{<:O},
parranges::NTuple{N,Vector})::Tuple{Vector{<:AbstractFloat}, Vector{<:AbstractFloat}} where {O,N}
p = sf.x in range ? [1.0] : [0.0]
return (p, p)
end
end
#=
@impl begin
struct ConstantInitialStats end
initial_stats(sf::Constant) = nothing
end
=#
@impl begin
struct ConstantComputePi end
function compute_pi(sf::Constant{O},
range::Vector{<:O},
parranges::NTuple{N,Vector},
incoming_pis::Tuple)::Dist{<:O} where {O,N}
ps = [x == sf.x ? 1.0 : 0.0 for x in range]
Cat(range, ps)
end
end
# STATS
@impl begin
struct ConstantAccumulateStats end
function accumulate_stats(::Constant, existing_stats, new_stats)
nothing
end
end
@impl begin
struct ConstantExpectedStats end
function expected_stats(::Constant{O},
range::Vector{},
parranges::NTuple{N,Vector},
pis::NTuple{M,Dist},
lambda::Score{<:O}) where {O,N,M}
nothing
end
end
@impl begin
struct ConstantMaximizeStats end
function maximize_stats(::Constant, stats)
nothing
end
end
# STATS END | Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 1050 | @impl begin
struct DistMakeFactors
numpartitions::Int64 = 100
end
function make_factors(sf::Dist{T},
range::Vector{T},
parranges::NTuple{N,Vector},
id,
parids::Tuple)::Tuple{Vector{<:Scruff.Utils.Factor}, Vector{<:Scruff.Utils.Factor}} where {T,N}
(lowers, uppers) = bounded_probs(sf, range, ())
keys = (id,)
dims = (length(range),)
return ([Factor(dims, keys, lowers)], [Factor(dims, keys, uppers)])
end
end
@impl begin
struct DistSendLambda end
function send_lambda(sf::Dist{T},
lambda::Score{<:T},
range::VectorOption{<:T},
parranges::NTuple{N,Vector},
incoming_pis::Tuple,
parent_idx::Integer)::Score where {N,T}
SoftScore(Float64[], Float64[])
end
end
include("distributions.jl")
include("cat.jl")
include("constant.jl")
include("flip.jl")
include("normal.jl")
include("uniform.jl")
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 3781 | import Distributions
export DistributionsSF
struct DistributionsSF{D <: Distributions.Distribution, O} <: Dist{O}
dist::D
function DistributionsSF(dist::D) where {D <: Distributions.Distribution}
O = eltype(D)
return new{D, O}(dist)
end
function DistributionsSF{D}(params...) where {D <: Distributions.Distribution}
d = D(params...)
return DistributionsSF(d)
end
function DistributionsSF{D, O}(params...) where {D <: Distributions.Distribution, O}
d = D(params...)
return DistributionsSF(d)
end
end
@impl begin
function expectation(sf::DistributionsSF, i::Tuple{})
return Distributions.mean(sf.dist)
end
end
@impl begin
function sample(sf::DistributionsSF, i::Tuple{})
return Distributions.rand(sf.dist)
end
end
@impl begin
function variance(sf::DistributionsSF{D}, i::Tuple{}) where {D <: Distributions.ContinuousDistribution}
return Distributions.std(sf.dist)^2
end
end
@impl begin
function logcpdf(sf::DistributionsSF{D, T}, i::Tuple{}, o::T) where {D, T}
return Distributions.logpdf(sf.dist, o)
end
end
@impl begin
function support_minimum(sf::DistributionsSF, i::Tuple{})
return Distributions.minimum(sf.dist)
end
end
@impl begin
function support_maximum(sf::DistributionsSF, i::Tuple{})
return Distributions.maximum(sf.dist)
end
end
@impl begin
function support(sf::DistributionsSF{<:Distributions.DiscreteDistribution, O},
parranges::NTuple{N, Vector},
size::Integer,
curr::Vector{<:O}) where {O, N}
return Distributions.support(sf.dist)
end
end
@impl begin
function support_quality(::DistributionsSF{<:Distributions.DiscreteNonParametric}, parranges)
:CompleteSupport
end
end
# See https://juliastats.org/Distributions.jl/stable/convolution/
ConvSupported = Union{Distributions.Bernoulli,
Distributions.Binomial,
Distributions.NegativeBinomial,
Distributions.Geometric,
Distributions.Poisson,
Distributions.Normal,
Distributions.Cauchy,
Distributions.Chisq,
Distributions.Exponential,
Distributions.Gamma,
Distributions.MvNormal}
@impl begin
function sumsfs(fs::NTuple{N, DistributionsSF{SubSF}}) where {N, SubSF <: ConvSupported}
# Return an SFunc representing g(x) = f1(x) + f2(x) + ...
# I.e. convolution of the respective densities
dists = tuple((f.dist for f in fs)...)
return DistributionsSF(reduce(Distributions.convolve, dists))
end
end
# This won't work for all Distributions
@impl begin
struct GeneralFitDistributions end
function fit_mle(::Type{DistributionsSF{T, O}}, ref::Dist{O}) where {O, T}
return DistributionsSF(Distributions.fit_mle(T, weighted_values(ref)...))
end
end
# Some implementations are iterative and you can control iters - lets expose that via hyperparams
IterFitDists = Union{Distributions.Beta,
Distributions.Dirichlet,
Distributions.DirichletMultinomial,
Distributions.Gamma}
@impl begin
# These defaults are a fair bit looser than Distributions.jl
struct IterFitDistributions
maxiter::Int = 100
tol::Float64 = 1e-6
end
function fit_mle(::Type{DistributionsSF{T, O}}, ref::Dist{O}) where {O, T <: IterFitDists}
return DistributionsSF(Distributions.fit_mle(T, weighted_values(ref)...;
maxiter=maxiter, tol=tol))
end
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 279 | export Flip
"""
Flip(p)
Constructs a very simple *sfunc* corresponding to a Bernoulli distribution,
represented by a `Cat`. The output is `true` with probability `p`, and `false`
with probability `1-p`.
See also: [`Cat`](@ref)
"""
Flip(p) = Cat([false, true], [1-p, p])
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 6717 | export Normal
import Distributions
"""
mutable struct Normal <: Dist{Float64}
`Normal` defines an *sfunc* representing unconditional Gaussian distributions.
# Additional supported operators
- `support`
- `support_quality`
- `sample`
- `logcpdf`
- `bounded_probs`
- `compute_pi`
"""
const Normal{T} = DistributionsSF{Distributions.Normal{T}, T}
Normal(mean, sd) = Normal{typeof(mean)}(mean, sd)
@impl begin
struct NormalSupport end
function support(
sf::Normal,
parranges::NTuple{N,Vector},
size::Integer,
curr::Vector{Float64}) where N
mu = expectation(sf, ())
sd = variance(sf, ())^0.5
if isempty(curr)
oldl = mu
oldu = mu
else
oldl = minimum(filter(x -> x > -Inf, curr))
oldu = maximum(filter(x -> x < Inf, curr))
end
if oldl == oldu
if size <= 1
result = [oldl]
return result
else
l = oldl - sd
u = oldl + sd
result = Vector{Float64}()
gap = (u-l) / (size-1)
push!(result, l)
for i = 1:(size-1)
push!(result, l + i*gap)
end
return result
end
else
len = length(curr)
if size <= len
return curr
else # we need to extend and thicken the current range while keeping existing points
olddiff = oldu - oldl
oldgap = olddiff / (len-1)
newl = oldl - olddiff
newu = oldu + olddiff
result = copy(curr)
i = newl
while i < oldl
push!(result, i)
i += oldgap
end
i = newu
while i > oldu
push!(result, i)
i -= oldgap
end
# Now we have 3 * len points covering twice the range
# If size is greater, we will add points in between
numperinterval = ceil(size / (3*len))
if numperinterval > 1
newgap = oldgap / numperinterval
for x in copy(result)
for j = 1:(numperinterval-1)
push!(result, x + newgap * j)
end
end
end
sort!(result)
return result
end
end
end
end
@impl begin
struct NormalSupportQuality end
function support_quality(::Normal, parranges)
:IncrementalSupport
end
end
@impl begin
struct NormalBoundedProbsBoundedProbs
numpartitions::Int64 = 10
end
function bounded_probs(
sf::Normal,
range::VectorOption{Float64},
parranges::NTuple{N,Vector})::Tuple{Vector{<:AbstractFloat}, Vector{<:AbstractFloat}} where {N}
intervals = make_intervals(range)
lower = Array{Float64}(undef, length(range))
upper = Array{Float64}(undef, length(range))
d = sf.dist
ls = []
us = []
for interval in intervals
(l, u) = interval
if l == -Inf || u == Inf
push!(ls, 0.0)
push!(us, 1.0)
else
diff = (u - l) / numpartitions
mn = 0.0
mx = 0.0
for i = 1:numpartitions
il = l + (i-1) * diff
iu = l + i * diff
p1 = Distributions.pdf(d, il) * diff
p2 = Distributions.pdf(d, iu) * diff
mn += min(p1,p2)
mx += max(p1,p2)
end
push!(ls, mn)
push!(us, mx)
end
end
# We get better bounds by considering the bounds on other intervals.
# For any point, the lower bound can be normalized using the upper bounds of every other point, and vice versa.
# This is especially important for intervals with -Inf or Inf as an
# endpoint.
for i = 1:length(intervals)
otherls = 0.0
otherus = 0.0
for j = 1:i-1
otherls += ls[j]
otherus += us[j]
end
for j = i+1:length(intervals)
otherls += ls[j]
otherus += us[j]
end
l = max(ls[i], 1 - otherus)
u = min(us[i], 1 - otherls)
lower[i] = l
upper[i] = u
end
return(lower, upper)
end
end
# TODO: Replace this with a lazy implicit representation that doesn't require enumerating until the last minute
@impl begin
struct NormalComputePi end
function compute_pi(sf::Normal, range::VectorOption{Float64}, parranges::NTuple{N,Vector},
incoming_pis::Tuple)::Dist{Float64} where {N}
Cat(range, collect(map(x -> Distributions.pdf(sf.dist, x), range)))
end
end
# STATS
@impl begin
struct NormalExpectedStats end
function expected_stats(sf::Normal, range::VectorOption{Float64}, parranges::NTuple{N,Vector},
pis::NTuple{M,Dist},
child_lambda::Score{Float64}) where {N,M}
pis = [Distributions.pdf(sf.dist, x) for x in range]
ls = [get_score(child_lambda, r) for r in range]
prob = pis .* ls
let totalX = 0.0, totalX2 = 0.0, count = 0.0
for (i, x) in enumerate(range)
totalX += x * prob[i]
totalX2 += x^2 * prob[i]
count += prob[i]
end
return (count,totalX,totalX2)
end
end
end
@impl begin
struct NormalAccumulateStats end
function accumulate_stats(sf::Normal, existing_stats, new_stats)
existing_stats .+ new_stats
end
end
@impl begin
struct NormalInitialStats end
function initial_stats(sf::Normal)
return (0.0,0.0,0.0)
end
end
@impl begin
struct NormalMaximizeStats end
function maximize_stats(sf::Normal, stats)
(count, totalX, totalX2) = stats
mean = totalX/count
std = sqrt(totalX2/count - mean^2)
return (mean, std)
end
end
# STATS
Base.hash(n::Normal, h::UInt) = hash(n.dist.σ, hash(n.dist.μ, hash(:Normal, h)))
Base.isequal(a::Normal, b::Normal) = Base.isequal(hash(a), hash(b))
Base.:(==)(a::Normal, b::Normal) = Base.isequal(hash(a), hash(b))
Base.isless(a::Normal, b::Normal) = a.dist.μ < b.dist.μ
Base.:<(a::Normal, b::Normal) = Base.isless(a, b)
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 2404 | # Continuous uniform sfunc
export Uniform
import Distributions
const Uniform{T} = DistributionsSF{Distributions.Uniform{T}, T}
Uniform(lb, ub) = Uniform{typeof(lb)}(lb, ub)
@impl begin
struct UniformSupport end
function support(
sf::Uniform,
::NTuple,
size::Integer,
curr::Vector{Float64}
)
newsize = size - length(curr)
result = curr
sfmin = support_minimum(sf, ())
sfmax = support_maximum(sf, ())
if newsize > 0
x = sfmin
push!(result, x)
numsteps = newsize - 1
step = (sfmax - sfmin) / numsteps
for i in 1:numsteps
x += step
push!(result, x)
end
end
unique(result)
end
end
@impl begin
struct UniformSupportQuality end
function support_quality(::Uniform, parranges)
:IncrementalSupport
end
end
@impl begin
struct UniformBoundedProbs end
# assumes range is sorted
function bounded_probs(
sf::Uniform,
range::Vector{Float64},
::NTuple
)
l = support_minimum(sf, ())
u = support_maximum(sf, ())
d = u - l
n = length(range)
# Each element in the range is associated with the interval between the midpoint
# of it and the point below and the midpoint between it and the point above,
# except for the end intervals which go to negative or positive infinity.
points = [-Inf64]
for i in 2:n
push!(points, (range[i-1] + range[i]) / 2)
end
push!(points, Inf64)
# Each interval might be completely, partially, or not contained in the bounds
# of the uniform distribution. The following code determines the length of each
# interval that is in the bounds.
lengthsinbound = Float64[]
for i in 1:n
a = max(points[i], l)
b = min(points[i+1], u)
push!(lengthsinbound, max(b-a, 0.0))
end
ps = [lengthsinbound[i] / d for i in 1:n]
return (ps, ps)
end
end
@impl begin
struct UniformComputePi end
function compute_pi(sf::Uniform,
range::Vector{Float64},
::NTuple,
::Tuple)::Cat{Float64}
ps = bounded_probs(sf, range, ())[1]
Cat(range, ps)
end
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 1110 | #=
# Default implementations of operators. These are defined using other operators.
# They will always be called if a more specific implementation is not provided.
# If the operators they rely on are not implemented, they will produce a runtime error.
# Writers of default implementations should avoid infinite loops.
=#
# if forward is defined, we get a default implementation of sample
@impl begin
struct DefaultSample end
# This should not produce an infinite loop, because a dist should not implement forward,
# since forwards maps a parent to a dist, but here the parent is empty.
function sample(sf::SFunc{I,O}, i::I)::O where {I,O}
d = forward(sf, i)
return sample(d, tuple())
end
end
@impl begin
function cpdf(sf::SFunc{I,O}, i::I, o::O) where {I,O}
exp(logcpdf(sf, i, o))
end
end
@impl begin
function logcpdf(sf::SFunc{I,O}, i::I, o::O) where {I,O}
log(cpdf(sf, i, o))
end
end
# TODO: Create default implementations of compute_pi and send_lambda
# TODO: Create weighted_sample operator with default implementation using inverse
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 2303 | #=
bp_ops: operators used in BP that apply to all sfuncs
=#
using ..Utils
using Folds
@impl begin
struct SFuncComputeLambda end
function compute_lambda(sf::SFunc, range::VectorOption, lambda_msgs::Vector{<:Score})::Score
if isempty(lambda_msgs)
ps = zeros(Float64, length(range))
else
lams = [[get_log_score(l, o) for l in lambda_msgs] for o in range]
ps = [sum(lams[i]) for i in 1:length(range)]
end
# avoid underflow
m = maximum(ps)
qs = ps .- m
return LogScore(range, qs)
end
end
@impl begin
struct SFuncComputeBel end
function compute_bel(sf::SFunc{I,O}, range::VectorOption{O}, pi::Dist{O}, lambda::Score)::Dist{<:O} where {I,O}
ps = [cpdf(pi, (), x) * get_score(lambda, x) for x in range]
return Cat(range, normalize(ps))
end
end
@impl begin
struct SFuncSendPi end
function send_pi(sf::SFunc{I,O}, range::VectorOption{O}, bel::Dist{O}, lambda_msg::Score)::Dist{<:O} where {I,O}
# pi_msg = [get_score(bel, x) / max.(1e-8, get_score(lambda_msg, x)) for x in range]
f(x,y) = y == -Inf ? -Inf : x - y
ps = [f(logcpdf(bel, (), x), get_log_score(lambda_msg, x)) for x in range]
# delay exponentiation until after avoiding underflow
m = maximum(ps)
qs = ps .- m
exped = exp.(qs)
return Cat(range, normalize(exped))
end
end
@impl begin
struct SFuncOutgoingPis end
function outgoing_pis(sf::SFunc, range::VectorOption, bel::Dist,
incoming_lambdas::VectorOption{<:Score})::Vector{<:Dist}
if length(incoming_lambdas) == 0
return Vector{Dist}()
else
return [send_pi(sf, range, bel, l) for l in incoming_lambdas]
end
end
end
@impl begin
struct SFuncOutgoingLambdas end
function outgoing_lambdas(sf::SFunc{I,O},
lambda::Score,
range::VectorOption,
parranges::NTuple{N,Vector},
incoming_pis::Tuple)::Vector{<:Score} where {N,I,O}
lambdas = Score[]
for i = 1:length(incoming_pis)
msg = send_lambda(sf, lambda, range, parranges, incoming_pis, i)
push!(lambdas, msg)
end
return lambdas
end
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 1828 | #=
OBSOLETE - SUBSUMED BY SCORE
evidence_ops.jl : Support for operators that process evidence and interventions
=#
export
evidence_message,
evidence_entry,
intervention_message,
intervention_entry,
NoiseEvidence
import LinearAlgebra
function evidence_entry(hard_evidence::T, value::T) where {T}
return hard_evidence == value ? 1.0 : 0.0
end
function evidence_entry(soft_evidence::Dict{T, Float64}, value::T) where {T}
return get(soft_evidence, value, 0.0)
end
struct NoiseEvidence
mean ::Union{Array{Float64,1}, Float64}
std ::Union{Array{Float64,2}, Float64}
end
function evidence_entry(noise_evid::NoiseEvidence, value::Float64)
val = 1.0/(noise_evid.std * sqrt(2*pi)) * exp(-0.5 * ((noise_evid.mean - value)/noise_evid.std)^2)
return val
end
function evidence_entry(noise_evid::NoiseEvidence, value::Array{Float64,1})
k = length(value)
val = exp.(-0.5 * transpose(value - noise_evid.mean) * LinearAlgebra.inv(noise_evid.std) * (value - noise_evid.mean))/sqrt((2*pi)^k * LinearAlgebra.det(noise_evid.std))
return val
end
function evidence_entry(f::Function , value) ::Float64 # return f(v), has to be Float64
return f(value)
end
function intervention_entry(hard_evidence::T, value::T) where {T}
return hard_evidence == value ? 1.0 : 0.0
end
function evidence_message(::SFunc{I,O,P}, range::Vector{O}, evidence::Score{O})::Score{O} where {I,O,P}
n = length(range)
ps = Array{Float64, 1}(undef, n)
for i = 1:n
ps[i] = get_score(evidence, range[i])
end
return SoftScore{O}(range, ps)
end
function intervention_message(SFunc, range, intervention)
n = length(range)
ps = Array{Float64, 1}(undef, n)
for i = 1:n
ps[i] = intervention_entry(intervention, range[i])
end
return Cat(range, ps)
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 357 | export FunctionalScore
"""
struct FunctionalScore{I} <: Score{I}
A score defined by a function.
"""
struct FunctionalScore{I} <: Score{I}
fn :: Function # Function I => Double
end
@impl begin
struct FunctionalScoreGetScore end
function get_score(sf::FunctionalScore{I}, i::I)::AbstractFloat where {I}
return sf.fn(i)
end
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 289 | export HardScore
"""
HardScore{I} <: Score{I}
A fixed score.
"""
struct HardScore{I} <: Score{I}
value :: I
end
@impl begin
struct HardScoreGetScore end
function get_score(sf::HardScore{I}, i::I)::AbstractFloat where {I}
i == sf.value ? 1.0 : 0.0
end
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 858 | export LogScore
"""
struct LogScore{I} <: Score{I}
A Log score.
"""
struct LogScore{I} <: Score{I}
logscores :: Dict{I, Float64}
function LogScore(ls::Dict{I, Float64}) where I
new{I}(ls)
end
function LogScore(vs::Vector{I}, ss::Vector{Float64}) where I
# must handle non-unique values correctly
d = Dict{I, Float64}()
for (v,s) in zip(vs,ss)
d[v] = logsumexp([get(d, v, -Inf), s])
end
new{I}(d)
end
end
@impl begin
struct LogScoreGetScore end
function get_score(sf::LogScore{I}, i::I)::AbstractFloat where {I}
return exp(get_log_score(sf, i))
end
end
@impl begin
struct LogScoreGetLogScore end
function get_log_score(sf::LogScore{I}, x::I)::AbstractFloat where {I}
return x in keys(sf.logscores) ? sf.logscores[x] : -Inf
end
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 356 | # MultipleScore lets you assert multiple evidence on the same variable
export MultipleScore
struct MultipleScore{I} <: Score{I}
components :: Vector{<:Score{I}}
end
@impl begin
function get_log_score(ms::MultipleScore{I}, i::I) where I
tot = 0.0
for m in ms.components
tot += get_log_score(m, i)
end
tot
end
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 397 | export NormalScore
"""
struct NormalScore <: Score{Float64}
A score defined by a normal density given the mean and sd of the score.
"""
struct NormalScore <: Score{Float64}
mean :: Float64
sd :: Float64
end
@impl begin
struct NormalScoreGetScore end
function get_score(sf::NormalScore, i::Float64)::AbstractFloat
return normal_density(i, sf.mean, sf.sd)
end
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 411 | export Parzen
"""
struct Parzen <: Score{Float64}
A parzen score.
"""
struct Parzen <: Score{Float64}
means :: Vector{Float64}
sd :: Float64
end
@impl begin
struct ParzenGetScore end
function get_score(sf::Parzen, i::Float64)::AbstractFloat
t = 0.0
for m in sf.means
t += normal_density(i, m, sf.sd)
end
return t / length(sf.means)
end
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 366 | #default implementation
@impl begin
struct ScoreGetLogScore end
function get_log_score(sf::Score{I}, i::I)::AbstractFloat where {I}
return log(get_score(sf, i))
end
end
include("hardscore.jl")
include("softscore.jl")
include("multiplescore.jl")
include("logscore.jl")
include("functionalscore.jl")
include("normalscore.jl")
include("parzen.jl")
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 511 | export SoftScore
"""
SoftScore(vs::Vector{I}, ss::Vector{Float64})
Return a `LogScore` of the log values in `ss` vector for
the associated keys in `vs`.
"""
function SoftScore(vs::Vector{I}, ss::Vector{Float64}) where I
return LogScore(vs, [log(s) for s in ss])
end
"""
SoftScore(scores::Dict{I,Float64})
Return a `LogScore` of the keys and log values in `score`.
"""
function SoftScore(scores::Dict{I,Float64}) where I
d = Dict([(k,log(v)) for (k,v) in scores])
return LogScore(d)
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 5700 | export Extend
"""
struct Extend{I<:Tuple{Any},J,O} <: SFunc{J,O}
`Extend` defines an sfunc that extend the input of another sfunc.
Useful for defining Separable SFuncs.
# Additional supported operators
- `support`
- `support_quality`
- `sample`
- `logcpdf`
- `bounded_probs`
- `make_factors`
- `compute_pi`
- `send_lambda`
# Type parameters
- `I`: the input type(s) of the extended *sfunc*; it must be a tuple of length 1
- `J`: the input type(s) of the `Extend`
- `O`: the output type(s) of both the `Extend` and the extended *sfunc*
"""
struct Extend{I<:Tuple{Any},J,O} <: SFunc{J,O}
given :: SFunc{I,O} # I must be a Tuple type of length 1
position :: Int
"""
function Extend(J::DataType, given::S, position::Int) where {I<:Tuple{Any},O,S <: SFunc{I,O}}
`Extend`'s constructor
# Arguments
- `J::DataType`: the input type of the `Extend`
- `given::S`: the *sfunc* to extend
- `position::Int`: the index of the !TODO!
"""
function Extend(J::DataType, given::S, position::Int) where {I<:Tuple{Any},O,S <: SFunc{I,O}}
new{I,J,O}(given, position)
end
end
@impl begin
struct ExtendSupport end
function support(sf::Extend{I,J,O},
parranges::NTuple{N,Vector},
size::Integer,
curr::Vector{<:O}) where {I<:Tuple{Any},J,O,N}
parrange = parranges[sf.position]
return support(sf.given, Tuple([[p] for p in parrange]), size, curr)
end
end
@impl begin
struct ExtendSample end
function sample(sf::Extend{I,J,O}, i::J)::O where {I<:Tuple{Any},J,O}
parval = i[sf.position]
return sample(sf.given, tuple(parval))
end
end
@impl begin
struct ExtendLogcpdf end
function logcpdf(sf::Extend{I,J,O}, i::J, o::O)::AbstractFloat where {I<:Tuple{Any},J,O}
parval = i[sf.position]
return logcpdf(sf.given, tuple(parval), o)
end
end
@impl begin
struct ExtendSupportQuality end
function support_quality(sf::Extend{I,J,O}, fullparranges) where {I,J,O}
parrange = fullparranges[sf.position]
parranges = Tuple([[p] for p in parrange])
imp = get_imp(MultiInterface.get_policy(), Support, sf.given, parranges, 0, O[])
return support_quality(imp, sf.given, parranges)
end
end
@impl begin
mutable struct ExtendMakeFactors
numpartitions::Int64 = 10
end
function make_factors(sf::Extend{I,J,O},
range::VectorOption{<:O},
parranges::NTuple{N,Vector},
id,
parids::Tuple)::Tuple{Vector{<:Scruff.Utils.Factor}, Vector{<:Scruff.Utils.Factor}} where {I<:Tuple{Any},J,O,N}
parrange = parranges[sf.position]
parid = parids[sf.position]
return make_factors(sf.given, range, (parrange,), id, (parid,))
end
end
@impl begin
struct ExtendComputePi end
function compute_pi(sf::Extend{I,J,O},
range::VectorOption{<:O},
parranges::NTuple{N,Vector},
incoming_pis::Tuple)::Dist{<:O} where {N,J,I<:Tuple{Any},O}
thisparrange = parranges[sf.position]
thisincoming_pis = incoming_pis[sf.position]
return compute_pi(sf.given, range, (thisparrange,), (thisincoming_pis,))
end
end
@impl begin
struct ExtendSendLambda end
function send_lambda(sf::Extend{I,J,O},
lambda::Score{<:O},
range::VectorOption{<:O},
parranges::NTuple{N,Vector},
incoming_pis::Tuple,
parent_idx::Integer)::Score where {I<:Tuple{Any},N,J,O}
thisparrange = parranges[sf.position]
thisincoming_pis = incoming_pis[sf.position]
if parent_idx != sf.position
# This parent is not relevant.
# We need to return a constant: \sum_i \sum_x \pi_i P(x|i) \lambda(x)
# where i ranges over fullparranges[e.position]
cpieces = Vector{Float64}()
for i in 1:length(thisparrange)
pi = logcpdf(thisincoming_pis, (), thisparrange[i])
for j in 1:length(range)
px = logcpdf(sf.given, Tuple(thisparrange[i]), range[j])
push!(cpieces, pi + px + get_log_score(lambda, range[j]))
end
end
c = exp(StatsFuns.logsumexp(cpieces))
l = SoftScore(parranges[parent_idx], fill(c, length(parranges[parent_idx])))
return l
else
# We use parent_idx 1 since e.given has only one parent
l = send_lambda(sf.given, lambda, range, (thisparrange,), (thisincoming_pis,), 1)
return l
end
end
end
# STATS
@impl begin
struct ExtendInitialStat end
initial_stats(sf::Extend) = initial_stats(sf.given)
end
@impl begin
struct ExtendAccumulateStats end
function accumulate_stats(sf::Extend, existing_stats, new_stats)
accumulate_stats(sf.given, existing_stats, new_stats)
end
end
@impl begin
struct ExtendExpectedStats end
function expected_stats(sf::Extend{I,J,O},
range::VectorOption{<:O},
parranges::NTuple{N,Vector},
pis::NTuple{M,Dist},
child_lambda::Score{<:O}) where {I<:Tuple{Any},J,O,N,M}
parrange = parranges[sf.position]
parent_pis = pis[sf.position]
return expected_stats(sf.given, range, tuple(parrange), tuple(parent_pis), child_lambda)
end
end
@impl begin
struct ExtendMaximizeStats end
maximize_stats(sf::Extend, stats) = maximize_stats(sf.given, stats)
end
# STATS END
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 9044 | export
cartesian_product,
normalize,
normalized_product,
make_intervals,
linear_value,
bounded_linear_value,
normal_density,
memo,
doop,
mult_through,
add_through,
ancestors,
topsort,
converged_numeric
###############################################
# #
# Cartesian product of arrays #
# Result contains every combination of inputs #
# #
###############################################
"""
cartesian_product(xs::Tuple)
cartesian_product(xs::Array)
Given an array of arrays, returns the cartesian product of those arrays.
# Examples
```julia-repl
julia> cartesian_product([[1,2],[3,4]])
4-element Array{Array{Int64,1},1}:
[1, 3]
[1, 4]
[2, 3]
[2, 4]
julia> cartesian_product([[1,2],[3,4],[5,6]])
8-element Array{Array{Int64,1},1}:
[1, 3, 5]
[1, 3, 6]
[1, 4, 5]
[1, 4, 6]
[2, 3, 5]
[2, 3, 6]
[2, 4, 5]
[2, 4, 6]
```
"""
cartesian_product(xs::Tuple) = cartesian_product([x for x in xs])
function cartesian_product(xs :: Array)
if isempty(xs)
result = Array{Any, 1}[[]]
else
yss = cartesian_product(xs[2:end])
result = Array{Any, 1}[]
for x in xs[1]
for ys in yss
zs = copy(ys)
pushfirst!(zs, x)
push!(result, zs)
end
end
end
return result
end
########################################################
# #
# Normalize an array of non-negative reals to xum to 1 #
# #
########################################################
"""
normalized_product(xss, n)
Compute the product of the given arrays of length n and normalize the result.
Uses log computations to avoid underflow.
"""
function normalized_product(xss, n)
rs = zeros(Float64, n)
for xs in xss
rs .+= [log(x) for x in xs]
end
z = max(rs...)
rs .-= z
ps = [exp(x) for x in rs]
return normalize(ps)
end
#################################
# #
# Continuous variable utilities #
# #
#################################
"""
make_intervals(range)
Given a range of values of a continuous variable, create interval bins
surrounding the values
"""
function make_intervals(range)
srng = copy(range)
sort!(srng)
last = -Inf
result = Tuple{Float64, Float64}[]
for i = 2:length(srng)
next = (srng[i-1] + srng[i]) / 2
push!(result, (last, next))
last = next
end
push!(result, (last, Inf))
return result
end
"""
linear_value(weights, bias, continuous_combo)
Weight and sum the `continuous_combo` with the given bias
"""
function linear_value(weights, bias, continuous_combo)
result = bias
for (weight, parent_value) in zip(weights, continuous_combo)
result += weight * parent_value
end
return result
end
"""
bounded_linear_value(weights, bias, interval_combo)
Weight and sum the upper and lower bounds in `interval_combo` with the
given bias
"""
function bounded_linear_value(weights, bias, interval_combo)
lower = bias
upper = bias
# This code assumes that the intervals are correctly ordered
for (weight, (lower_parent, upper_parent)) in zip(weights, interval_combo)
if weight < 0
lower += weight * upper_parent
upper += weight * lower_parent
else
lower += weight * lower_parent
upper += weight * upper_parent
end
end
return (lower, upper)
end
"""
normal_density(x, mean, variance)
Get the normal density of `x`
"""
function normal_density(x, mean, variance)
d = sqrt(2 * pi * variance)
e = -0.5 * (x-mean) * (x-mean) / variance
return exp(e) / d
end
"""
memo(f::Function)
returns a memoized one argument function
"""
function memo(f::Function)
cache = Dict()
function apply(arg)
if arg in keys(cache)
return cache[arg]
else
result = f(arg)
cache[arg] = result
return result
end
end
return apply
end
####################################################################################
# #
# Functions for performing an arithmetic operation recursively on a data structure #
# #
####################################################################################
# This avoids having to write both forms of the function.
# Writing both forms could cause ambiguity.
# Will cause stack overflow if neither form is defined.
# Assumes op is commutative.
doop(x::Any, y::Any, op) = doop(y,x,op)
function doop(x::Float64, y::Float64, op::Function)
return op(x,y)
end
function doop(x::Dict, y::Any, op)
result = Dict()
xf = floatize(x)
for (k,v) in xf
result[k] = doop(v, floatize(y), op)
end
return result
end
function doop(x::Dict, y::Dict, op)
result = Dict()
xf = floatize(x)
yf = floatize(y)
for k in keys(xf)
result[k] = doop(xf[k], yf[k], op)
end
return result
end
function doop(xs::Array, y::Any, op)
result = floatize(xs)
for i = 1:length(xs)
result[i] = doop(xs[i], floatize(y), op)
end
return result
end
function doop(xs::Array, ys::Array, op)
xf = floatize(xs)
yf = floatize(ys)
return [doop(xf[i], yf[i], op) for i in 1:length(xs)]
end
function doop(xs::Tuple, y::Any, op)
xf = floatize(xs)
return ntuple(i -> doop(xf[i], floatize(y), op), length(xf))
end
function doop(xs::Tuple, ys::Tuple, op)
xf = floatize(xs)
yf = floatize(ys)
return ntuple(i -> doop(xf[i], yf[i], op), length(xs))
end
mult_through(x,y) = doop(x, y, (x,y) -> x*y)
add_through(x,y) = doop(x, y, (x,y) -> x+y)
floatize(x) = _transform(y -> Float64(y), x)
#=
Topological sort
=#
"""
ancestors(graph :: Dict{U, Vector{U}}, var :: U, found:: Set{U}) :: Vector{U} where U
Find the ancestors of the given value x in the graph. Found is a set of previously
found ancestors, to handle cyclic graphs and avoid infinite loops
"""
# Need to handle cyclic graphs
function ancestors(graph :: Dict{U, Vector{U}}, var :: U, found:: Set{U}) :: Vector{U} where U
result = Vector{U}()
for par in get(graph, var, [])
if isa(par, U) && !(par in found)
push!(found, par) # Prevent infinite recursion
append!(result, ancestors(graph, par, found))
push!(result, par) # Guarantee that the ancestors appear before par
end
end
return result
end
"""
topsort(graph::Dict{T, Vector{T}}) :: Vector{T} where T
Performs a topological sort on the given graph. In a cyclic graph, the order of variables in the cycle
is arbitrary, but they will be correctly sorted relative to other variables.
"""
function topsort(graph::Dict{U, Vector{U}}) :: Vector{U} where U
result = Vector{U}()
found = Set{U}()
for var in keys(graph)
if !(var in found)
push!(found, var)
s = Set{U}([var])
ancs = ancestors(graph, var, s) # guaranteed to be in topological order
for anc in ancs
if !(anc in found)
push!(found, anc)
push!(result, anc)
end
end
push!(result, var) # after the ancestors
end
end
return result
end
diff(x :: Number, y :: Number) = abs(x-y)
function diff(xs :: Dict, ys :: Dict)
total = 0.0
for k in keys(xs)
total += diff(xs[k], ys[k])
end
return total
end
function diff(xs :: Vector, ys :: Vector)
total = 0.0
for i in eachindex(xs)
total += diff(xs[i], ys[i])
end
return total
end
function same_structure_and_num_params(:: Number, :: Number)
return (true, 1)
end
function same_structure_and_num_params(xs :: Dict, ys :: Dict)
if Set(keys(xs)) != Set(keys(ys))
return (false, 0)
end
np = 0
for k in keys(xs)
(b, n) = same_structure_and_num_params(xs[k], ys[k])
if !b
return (false, np)
else
np += n
end
end
return (true, np)
end
function same_structure_and_num_params(xs :: Vector, ys :: Vector)
if length(xs) != length(ys)
return (false, 0)
end
np = 0
for i in eachindex(xs)
(b, n) = same_structure_and_num_params(xs[i], ys[i])
if !b
return(false, np)
else
np += n
end
end
return(true, np)
end
function converged_numeric(oldp, newp, eps::Float64 = 0.01)
(same_structure, num_params) = same_structure_and_num_params(oldp, newp)
if !same_structure
return false
end
return diff(newp, oldp) / num_params < eps
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 6768 | export
_complete,
depth,
expander_complete,
expansions,
expanded,
expansion,
subruntime,
clear_analysis!,
ensure_expansion_state!,
expand!,
expander_probs
function expander_complete(runtime :: Runtime, v :: Variable, parent_ranges)
for p in parent_ranges[1]
if !expanded(runtime, v, p) return false end
subnet = expansion(runtime, v, p)
subrun = subruntime(runtime, v, subnet)
if !_complete(subrun) return false end
end
return true
end
# This method _only_ works if the model being operated upon contains an
# operated called 'support_quality'
function _complete(runtime :: Runtime)
net = runtime.network
order = topsort(get_initial_graph(net))
for var in order
if !has_instance(runtime, var)
return false
end
inst = current_instance(runtime, var)
sf = get_sfunc(inst)
if !has_range(runtime, inst)
return false
end
parranges = parent_ranges(runtime, var)
imp = get_imp(MultiInterface.get_policy(), Support, sf, parranges, 0, output_type(sf)[])
# if is_fixed(var.model) && isa(sf, Expander)
if isa(sf, Expander)
if !expander_complete(runtime, var, Tuple(parranges))
return false
end
elseif !(support_quality(imp, sf, parranges) == :CompleteSupport)
return false
end
end
return true
end
function expansions(runtime::InstantRuntime, var::Variable) :: Dict
exp :: Expander = make_initial(var.model)
mod = var.model
has_state(runtime, :subnets) || return Dict()
exps = get_state(runtime, :subnets)
mod in keys(exps) || return Dict()
return exps[mod]
end
function expanded(runtime::Runtime, var::Variable, arg)
return arg in keys(expansions(runtime, var))
end
function expansion(runtime :: Runtime, var :: Variable, arg) :: Network
return expansions(runtime, var)[arg]
end
function subruntime(runtime::Runtime, var::Variable, net)
subruns = get_state(runtime, :subruntimes)
return subruns[net]
end
function depth(runtime::Runtime) :: Int
if has_state(runtime, :depth)
get_state(runtime, :depth)
else
0
end
end
function ensure_expansion_state!(runtime::Runtime)
has_state(runtime, :subnets) || set_state!(runtime, :subnets, Dict())
has_state(runtime, :subruntimes) || set_state!(runtime, :subruntimes, Dict())
end
# Returns the expanded subnet and a flag indicating whether it was newly expanded
function expand!(runtime::InstantRuntime, var::Variable, arg)
ensure_expansion_state!(runtime)
exp :: Expander = make_initial(var.model)
mod = var.model
exps = get_state(runtime, :subnets)
subruns = get_state(runtime, :subruntimes)
if !(mod in keys(exps))
exps[mod] = Dict()
elseif arg in keys(exps[mod])
return (exps[mod][arg], false)
end
subnet = apply(exp, arg)
subrun = Runtime(subnet)
# push all parent variables to subruntime
set_state!(subrun, :parent_env, get_env(runtime))
# we associate a subrun with the network, in case the same network is
# produced for multiple arguments, so we avoid repeating computation for all
# the arguments that produce the same network
subruns[subnet] = subrun
ensure_all!(subrun)
exps[mod][arg] = subnet
return (subnet, true)
end
# analysis is 'per network', so we have to make sure we track per-network
network_analysis = Dict{Network, Dict{Symbol, Any}}()
clear_analysis!() = empty!(network_analysis)
function has_analysis(net,sym)
haskey(network_analysis, net) && haskey(network_analysis[net], sym)
end
get_analysis(net,sym) = network_analysis[net][sym]
function add_analysis!(net,sym,val)
get!(network_analysis, net, Dict{Symbol, Any}())[sym] = val
end
function expander_probs(runtime::Runtime, fn::Function, v::Variable, depth::Int)
net = get_network(runtime)
par = get_parents(net, v)[1]
parinst = current_instance(runtime, par)
inst = current_instance(runtime, v)
if has_range(runtime, parinst)
parrange = get_range(runtime, parinst, depth)
else
parrange = []
end
if has_range(runtime, inst)
range = get_range(runtime, inst, depth)
else
range = []
end
lowers = Float64[]
uppers = Float64[]
function make_unexpanded_row()
for i in 1:length(range)
push!(lowers, 0)
push!(uppers, 1)
end
end
for p in parrange
if depth < 2 || !expanded(runtime, v, p)
make_unexpanded_row()
else
subnet = expansion(runtime, v, p)
subrun = subruntime(runtime, v, subnet)
output = get_node(subnet, :out)
need_to_compute = true
if has_analysis(subnet, :depthsolution)
(saved_depth, saved_range, solution) =
get_analysis(subnet, :depthsolution)
# To be able to reuse the solution to define this node's CPD,
# it must have the required depth and range
if saved_depth >= depth && saved_range == range
need_to_compute = false
(lfact, ufact) = solution
end
end
if need_to_compute
order = topsort(get_initial_graph(subnet))
((lfact, ufact), _) = fn(subrun, order, [output], depth - 1)
add_analysis!(subnet, :depthsolution,
(depth, range, (lfact, ufact)))
end
lsum = sum(lfact.entries)
output_range = get_range(subrun, output, depth - 1)
inds = indexin(range, output_range)
for i in 1:length(range)
if isempty(inds) || isnothing(inds[i])
# If this value is not in the output range, then it
# might not be in the range of the output, so has
# probability 0, or it might be in the range but not
# expanded yet. But it's probability cannot be more than
# 1 - lsum
push!(lowers, 0)
push!(uppers, 1 - lsum)
else
if isempty(lfact.entries)
push!(lowers, 0)
else
push!(lowers, lfact.entries[inds[i]])
end
if isempty(ufact.entries)
push!(uppers, 0)
else
push!(uppers, ufact.entries[inds[i]])
end
end
end
end
end
return (lowers, uppers)
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 8791 | import Base.show
import Base.isapprox
using Folds
export
Factor,
show,
product,
sum_over,
nextkey,
normalize
"""
struct Factor{N}
Representation of a factor over `N` instances.
# arguments
dims a tuple of dimensions of the instances
keys ids of the instances
entries a vector of factor values
"""
struct Factor{N, T <: Real}
dims :: NTuple{N, Int}
keys :: NTuple{N, Int}
entries :: Vector{T}
function Factor(dims::Nothing, keys::Nothing, entries::Vector{T}) where T
return new{0, T}((), (), entries)
end
function Factor(dims::NTuple{N, Int}, keys, entries::Vector{T}) where {N, T}
return new{N, T}(dims, keys, entries)
end
end
"""
function normalize(f::Factor)
Return a new factor equal to the given factor except that entries sum to 1
"""
function normalize(f::Factor)
z = sum(f.entries)
new_entries = [e / z for e in f.entries]
return Factor(f.dims, f.keys, new_entries)
end
"""
nextkey()::Int
Produce a fresh instance id that does not conflict with an existing id.
"""
nextkey = begin
count = 0
f() = begin
global count
count += 1
return count
end
f
end
function Base.isapprox(fact1 :: Factor, fact2 :: Factor, epsilon = 0.0001)
if fact1.dims != fact2.dims || fact1.keys != fact2.keys
return false
end
len = length(fact1.entries)
return all(map(i -> isapprox(fact2.entries[i], fact1.entries[i]; atol = epsilon), 1:len))
end
###########################
# #
# Pretty printing factors #
# #
###########################
function show_boundary(N, space)
total = (space + 1) * (N + 1) + 1
for i in 1:total
print("#")
end
println()
end
get_content(content :: String) = content
get_content(content) = sprint(show, content)
function show_spaced(space, content)
c = get_content(content)
init_gap = div(space - length(c), 2)
for i in 1:init_gap
print(' ')
end
print(c)
for i in 1:space - length(c) - init_gap
print(' ')
end
end
function show_content(N, space, content)
for i in 1:N
print('#')
show_spaced(space, content[i])
end
print('#')
for i in 1:space
print(' ')
end
print('#')
println()
end
"""
function show(f::Factor)
Print the factor in an easy to read format.
"""
function show(f :: Factor{0})
# This factor should have been produced by summing out all the variables
# and should have one row
space = length(string(f.entries[1])) + 2
show_boundary(0, space)
print('#')
show_spaced(space, f.entries[1])
println('#')
show_boundary(0, space)
end
function show(f :: Factor{N}) where N
max1 = max(map(k -> length(string(k)), f.keys)...)
max2 = max(map(k -> length(string(k)), f.entries)...)
space = max(max1, max2) + 2
show_boundary(N, space)
show_content(N, space, f.keys)
show_boundary(N, space)
mults = Array{Int64}(undef, N)
mults[N] = 1
for k in N-1:-1:1
mults[k] = mults[k+1] * f.dims[k+1]
end
for i in eachindex(f.entries)
vs = []
j = i-1
for k in 1:N
(r, j) = divrem(j, mults[k])
print("#")
show_spaced(space, r + 1)
end
print("#")
show_spaced(space, f.entries[i])
println("#")
end
show_boundary(N, space)
end
##################
# #
# Factor product #
# #
##################
# The product computation is written so that it can be compiled and optimized
# once the sizes of the input factors (N1 and N2),
# dimensions of the variables (D1 and D2),
# and join indices (J) are known.
struct ProdOp{N1, N2, D1, D2, J} end
function mults(dims)
n = length(dims)
result = Array{Int64}(undef, n)
result[n] = 1
for k in (n - 1):-1:1
result[k] = result[k+1] * dims[k+1]
end
return result
end
function do_prod(N1, N2, J, f1, f2)
# The result factor is constructed so that the variables in the second
# input come first, in order, and then any variables in the first factor
# that are not in the join come in order.
# We construct DR, the dimensions of variables in the result factor,
# as well as the keys of the result factor.
# We work out the instructions,
# which will be used to indicate which rows of the two input factors
# any row of the result factor will be composed from.
D1 = f1.dims
D2 = f2.dims
instructions = Vector{Vector{Tuple{Int, Int}}}(undef, N2)
NR = N2
rda = Vector{Int}([d for d in D2])
rk = Vector{Int}([k for k in f2.keys])
for j = 1:N2
instructions[j] = [(2, j)]
end
for (i, j) in J
if j == 0
NR += 1
push!(instructions, [(1, i)])
push!(rda, D1[i])
push!(rk, f1.keys[i])
else
push!(instructions[j], (1, i))
end
end
DR = NTuple{NR, Int}(rda)
mults1 = mults(f1.dims)
mults2 = mults(f2.dims)
multsr = mults(DR)
rkeys = NTuple{NR, Int}(rk)
result = Array{Float64}(undef, Folds.reduce(*, DR))
for i in eachindex(result)
# Cartesian indices don't work here because you can't get inside them
# So we have to build up the index ourselves
idx1 = Array{Int16}(undef, N1)
idx2 = Array{Int16}(undef, N2)
j = i-1
for k = 1:NR
instr = instructions[k]
(r, j) = divrem(j, multsr[k])
e = r + 1
if length(instr) == 2
idx2[instr[1][2]] = e
idx1[instr[2][2]] = e
elseif instr[1][1] == 1
idx1[instr[1][2]] = e
else
idx2[instr[1][2]] = e
end
end
x1 = sum(map(q -> (idx1[q] - 1) * mults1[q], 1:N1)) + 1
x2 = sum(map(q -> (idx2[q] - 1) * mults2[q], 1:N2)) + 1
result[i] = f1.entries[x1] * f2.entries[x2]
end
return Factor(DR, rkeys, result)
end
function product(f1 :: Factor{N1}, f2 :: Factor{N2}) where {N1, N2}
js = map(k -> findfirst(x -> x == k, f2.keys), f1.keys)
ijs :: Array{Tuple{Int16, Int16}, 1} = Array{Tuple{Int16, Int16}, 1}(undef, length(f1.keys))
for i = 1:length(f1.keys)
j = length(f1.keys) - i + 1
y = isnothing(js[j]) ? 0 : js[j]
ijs[i] = (j, y)
end
matches = NTuple{N1, Tuple{Int16, Int16}}(ijs)
return do_prod(N1, N2, matches, f1, f2)
end
###########################
# #
# Summing over a variable #
# #
###########################
struct SumOp{N, D, I} end
# Special case: Summing out the only variable in a factor
function do_sum(op :: SumOp{1, D, 1}, f :: Factor) where D
x = sum(f.entries)
dims = nothing
keys = nothing
return Factor(dims, keys, [x])
end
function do_sum(op :: SumOp{N, D, I}, f :: Factor) where {N, D, I}
rdims = []
rkeys = []
for i in 1:I-1
push!(rdims, f.dims[i])
push!(rkeys, f.keys[i])
end
for i in I+1:N
push!(rdims, f.dims[i])
push!(rkeys, f.keys[i])
end
rdims = Tuple(rdims)
rkeys = Tuple(rkeys)
# The summation is efficeintly implemented by computing a pattern of
# moves through the input factor's entries as we accumulate the result
# factor's entries.
# For any given entry of the result, we add a number of entries of the
# input. inner_skip is the gap between those entries.
# We also do inner_skip parallel sums per block, before skipping to the next
# block. outer_skip is the amount to skip to the next block.
inner_skip = 1
for k = I+1:N
inner_skip *= D[k]
end
outer_skip = inner_skip * D[I]
result_size = inner_skip
for k = 1:I-1
result_size *= D[k]
end
result = Array{Float64}(undef, result_size)
if result_size == 0 # This can legitimately happen for unexpanded Expander
return Factor(rdims, rkeys, result)
end
section_start = 1
result_index = 1
for j = 1 : div(result_size, inner_skip)
start = section_start
for k = 1 : inner_skip
total = 0.0
orig_index = start
for l = 1 : D[I]
total += f.entries[orig_index]
orig_index += inner_skip
end
result[result_index] = total
result_index += 1
start += 1
end
section_start += outer_skip
end
return Factor(rdims, rkeys, result)
end
function sum_over(f :: Factor{N}, key :: Int) where N
for i = 1:N
if f.keys[i] == key
return do_sum(SumOp{N, f.dims, i}(), f)
end
end
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 1755 | export
default_initializer
using ..Operators
"""
Deprecated
"""
function default_initializer(runtime::InstantRuntime, default_range_size::Int=10,
placeholder_beliefs :: Dict = Dict())
ensure_all!(runtime)
for (pname, bel) in placeholder_beliefs
node = get_node(runtime, pname)
inst = current_instance(runtime, node)
post_belief!(runtime, inst, bel)
end
order = topsort(get_initial_graph(get_network(runtime)))
set_ranges!(runtime, Dict{Symbol, Score}(), default_range_size, 1, order, placeholder_beliefs)
end
#=
function default_initializer(runtime::DynamicRuntime{T}, default_time::T, default_range_size::Int=10) where {T}
ensure_all!(runtime, default_time)
net = get_network(runtime)
ord = topsort(get_transition_graph(net))
ranges = Dict{Symbol, Array{Any, 1}}()
for var in ord
inst = current_instance(runtime, var)
sf = get_sfunc(inst)
parranges = collect(map(p -> ranges[p.name], get_parents(net, var)))
rng = support(sf, Tuple(parranges), default_range_size, Vector{output_type(sf)}())
set_range!(runtime, inst, rng)
ranges[var.name] = rng
end
end
function default_initializer(runtime::DynamicRuntime{Int})
default_initializer(runtime, 0)
end
=#
function preserve_evidence(evid_i, typeOfEvidence)
preserve_values = Vector{typeOfEvidence}()
if isa(evid_i, Dict) # soft evidence
push!(preserve_values, collect(keys(evid_i)))
elseif isa(evid_i, NoiseEvidence) # noise evidence
push!(preserve_values, evid_i.mean)
elseif isa(evid_i, Function)# hard evidence
# do nothing
else # hard evidence
push!(preserve_values, evid_i)
end
return preserve_values
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 5034 | """
logplots.jl contains code to help gain insight into belief propagation.
It creates a Logger called `BPLogger` that plots the results, for
a single variable, of the following methods in the three pass BP algorithm:
* compute_pi
* compute_lambda
* compute_bel
* incoming_pi
* outgoing_pis
To use `BPLogger`, first install `Plots` and use `pyplot()`. Then,
```
julia> include("src/utils/logplots.jl")
histogram_plot_discrete (generic function with 1 method)
julia> logger = BPLogger(:x1) # :x1 is the name of the variable being plotted
BPLogger(:x1)
julia> with_logger(logger) do
include("mybpcall.jl") # mybpcall.jl contains a call to threepassbp()
end
```
"""
export BPLogger
import Logging: handle_message, shouldlog, min_enabled_level
using Plots
using Logging
struct BPLogger <: AbstractLogger
varname::Vector{Symbol}
end
BPLogger(varname::Symbol...) = BPLogger(collect(varname))
function handle_message(
logger::BPLogger,
level,
message,
_module,
group,
id,
filepath,
line; kwargs...)
if isempty(kwargs) || !in(:type, keys(kwargs))
with_logger(global_logger()) do
@logmsg level message kwargs...
end
return
end
d = Dict(kwargs...)
type = get(d, :type, :cont)
if (type != :cont && type != :discrete)
write_missing("Value of :type must be either :cont or :discrete, not $(type)",
level, _module, filepath, line; kwargs...)
return
end
if (type == :cont && !in(:numBins, keys(kwargs)))
write_missing("Continuous missing a required parameter [:numBins]",
level, _module, filepath, line; kwargs...)
return
end
if !issubset([:range,:prob,:varname,:fname,:name], keys(kwargs))
write_missing("Missing a required parameter [:range,:prob,:varname,:fname,:name]",
level, _module, filepath, line; kwargs...)
return
end
if (d[:varname] in logger.varname)
if (type == :cont)
histogram_plot(
d[:range],
d[:prob],
d[:numBins],
d[:varname],
d[:fname],
d[:name])
else
histogram_plot_discrete(
d[:range],
d[:prob],
d[:varname],
d[:fname],
d[:name])
end
end
end
function write_missing(msg, level, _module, filepath, line; kwargs...)
buf = IOBuffer()
iob = IOContext(buf, stderr)
levelstr = level == Logging.Warn ? "Warning" : string(level)
msglines = split(chomp(string(msg)::String), '\n')
println(iob, "┌ ", levelstr, ": ", msglines[1])
for i in 2:length(msglines)
println(iob, "│ ", msglines[i])
end
for (key, val) in kwargs
println(iob, "│ ", key, " = ", val)
end
println(iob, "└ @ ", _module, " ", filepath, ":", line)
write(stderr, take!(buf))
nothing
end
function shouldlog(logger::BPLogger, level, _module, group, id)
true
# group == :threepassbp
end
function min_enabled_level(logger::BPLogger)
Logging.Debug
end
"""
histogram_plot(range, prob, numBins::Int64, name::String)
A utility function that, if the Plots module is loaded, returns a histogram
bar graph; otherwise it returns a string with the values that would have
been a histogram
"""
function histogram_plot(range, prob, numBins::Int64, varname::Symbol, fname, name::String)
@debug("histogram_plot", range=range,
prob=prob,numBins=numBins,varname=varname,
fname=fname,name=name)
if isempty(prob)
prob = zeros(length(range))
end
range_min = minimum(range)
range_max = maximum(range)
if(range_min==range_max)
range_max = range_min+1
end
numBins = max(min(length(range), numBins),1)
stepsize = (range_max - range_min)/ numBins
samplespace = range_min : stepsize : range_max
bins = zeros(numBins)
for (i,lb) in enumerate(samplespace)
curr_bin_min = samplespace[i]
curr_bin_max = samplespace[i] + stepsize
idx = findall(x -> curr_bin_min <= range[x] < curr_bin_max, 1:length(range))
if(!isempty(idx))
if(i==length(samplespace)) # last elements
bins[i-1] += sum(prob[idx])
else
bins[i] = sum(prob[idx])
end
end
end
x_min = samplespace[1]
x_max = samplespace[end] + stepsize
@debug "bin_centers=$(collect(samplespace)[1:numBins] .+ (stepsize/2)), bins=$bins"
gui(bar(samplespace[1:numBins].+(stepsize/2), bins; reuse=false,title="$(varname).$(fname)", label=name, xlims=(x_min-2, x_max+2), hover = samplespace[1:numBins].+(stepsize/2), legend=true))
end
function histogram_plot_discrete(range, prob:: Array{Float64,1}, varname, fname, name::String)
gui(bar(string.(range),prob; reuse=false,title="$(varname).$(fname)",label=name, hover = string.(range), legend=true))
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 9932 | export
parent_ranges,
get_range,
has_range,
set_range!,
set_ranges!,
expander_range
#####################################################################
# #
# Algorithm building blocks for setting and expanding the ranges of #
# all the current instances in a runtime. #
# #
# order is a topological sort of the network in the runtime. #
# size is a uniform target size for each instance. #
# #
#####################################################################
"""
parent_ranges(runtime::Runtime, var::Variable{I,J,O}, depth = typemax(Int)) where {I,J,O}
Returns the ranges of the parents of the given variable.
See [`Scruff.get_range`](@ref)
"""
function parent_ranges(runtime::Runtime, var::Variable{I,J,O}, depth = typemax(Int)) where {I,J,O}
net = get_network(runtime)
pars = get_parents(net, var)
result = []
for p in pars
pinst = current_instance(runtime, p)
push!(result, get_range(runtime, pinst, depth))
end
T = isempty(result) ? Vector{O} : typejoin([typeof(x) for x in result]...)
return convert(Vector{T}, result)
end
"""
RANGE
The constant key used to store the range of a specific variable instance
"""
const RANGE = :__range__
"""
set_range!(runtime::Runtime, inst::Instance{O}, range::Vector{<:O}, depth::Int = 1) where O
Sets the range value for the given instance. Defaults to depth of 1.
"""
function set_range!(runtime::Runtime, inst::Instance{O}, range::Vector{<:O}, depth::Int = 1) where O
if has_value(runtime, inst, RANGE)
curr = get_value(runtime, inst, RANGE)
s = Tuple{Int, Vector{O}}[]
i = 1
while i <= length(curr)
pair = curr[i]
d = pair[1]
if d > depth
push!(s, pair)
i += 1
end
push!(s, (depth, range))
i = d == depth ? i+1 : i
for j = i:length(curr)
push!(s, curr[j])
end
set_value!(runtime, inst, RANGE, s)
return
end
push!(s, (depth, range))
set_value!(runtime, inst, RANGE, s)
else
set_value!(runtime, inst, RANGE, [(depth, range)])
end
end
"""
get_range(runtime::Runtime, inst::Instance, depth = max_value(Int))
Returns the range value for the given instance; this will return
`nothing` if no range has been set.
The depth specifies the maximum depth of range desired.
"""
function get_range(runtime::Runtime, inst::Instance, depth = typemax(Int))
has_range(runtime, inst, depth) || return nothing
rng = get_value(runtime, inst, RANGE)
for i in 1:length(rng)
(d,r) = rng[i]
if d <= depth
return r
end
end
return nothing
end
function has_range(runtime::Runtime, inst::Instance, depth::Int = typemax(Int))
has_value(runtime, inst, RANGE) || return false
r = get_value(runtime, inst, RANGE)
(d,_) = r[length(r)]
return d <= depth
end
"""
get_range(runtime::DynamicRuntime, v::Variable{I,J,O}, depth = 1) where {I,J,O}
Returns the range of the most recent instance of the given variable.
"""
function get_range(runtime::DynamicRuntime, v::Variable{I,J,O}, depth = 1) where {I,J,O}
inst = current_instance(runtime, v)
range = get_range(runtime, inst, depth)
if range !== nothing
return range
elseif has_previous_instance(runtime, v)
prev = previous_instance(runtime, v)
if has_range(runtime, prev)
return get_range(runtime, prev, depth)
else
return O[]
end
else
return O[]
end
end
"""
get_range(runtime::InstantRuntime, v::Node{O}, depth = 1) where O
Returns the range of the given node.
"""
function get_range(runtime::InstantRuntime, v::Node{O}, depth = 1) where O
inst = runtime.instances[v]
range = get_range(runtime, inst, depth)
if range !== nothing
return range
else
return O[]
end
end
"""
set_ranges!(runtime::InstantRuntime, evidence = Dict{Symbol, Score}(),
size = 10, depth = 1,
order = topsort(get_initial_graph(get_network(runtime))),
placeholder_beliefs = get_placeholder_beliefs(runtime))
Set the ranges of all current instances in the runtime.
This method first checks whether ranges exist for the runtime at the desired depth,
with the desired range size, and with the same evidence. If so, it doesn't do anything.
If the depth is less than 1, it doesn't do anything.
Otherwise, it uses the support operator to compute ranges of variables in order.
Placeholders should have ranges set already in `placeholder_beliefs`.
For expanders, it recursively sets the ranges of the subnetworks at depth - 1.
Returns a flag indicating whether any instance has a changed range.
"""
function set_ranges!(runtime::InstantRuntime,
evidence::Dict{Symbol, <:Score} = Dict{Symbol, Score}(),
size :: Int = 10, depth :: Int = 1,
order = topsort(get_initial_graph(get_network(runtime))),
placeholder_beliefs = get_placeholder_beliefs(runtime))
if depth < 1
return false
end
nodes = get_nodes(get_network(runtime))
# If we require greater depth or greater size or different evidence, we have to do it again
if has_state(runtime, :nodes) && has_state(runtime, :range_size) && has_state(runtime, :range_depth) && has_state(runtime, :range_evidence) &&
get_state(runtime, :range_size) >= size && get_state(runtime, :range_depth) >= depth && get_state(runtime, :range_evidence) == evidence &&
get_state(runtime, :nodes) == nodes
return false
else
set_state!(runtime, :range_size, size)
set_state!(runtime, :range_depth, depth)
set_state!(runtime, :range_evidence, evidence)
set_state!(runtime, :nodes, nodes)
end
changed = false
for v in order
O = output_type(v)
rng::Vector{O} = O[]
curr = get_range(runtime, v, depth)
inst = current_instance(runtime, v)
if v isa Placeholder
pi = placeholder_beliefs[v.name]
rng = support(pi, (), size, output_type(v)[])
chng = rng != curr
else
sf = get_sfunc(inst)
if length(curr) < size
parranges = parent_ranges(runtime, v, depth)
if isa(sf, Expander)
rc = expander_range(runtime, v, size, depth)
rng= rc[1]
chng = rc[2]
else
rng = support(sf, tuple(parranges...), size, curr)
chng = rng != curr
end
if v.name in keys(evidence)
ev = evidence[v.name]
if ev isa HardScore
if !(ev.value in rng)
push!(rng, ev.value)
chng = true
end
elseif ev isa LogScore
for k in keys(ev.logscores)
if !(k in rng)
push!(rng, k)
chng = true
end
end
end
end
changed = changed || chng
else
rng = curr
end
end
set_range!(runtime, inst, rng, depth)
end
if changed
delete_state!(runtime, :solution)
end
return changed
end
"""
function expander_range(runtime :: Runtime, v :: Variable,
target_size :: Int, depth :: Int)
Recursively compute the range of the expander and subnetworks up to the given depth.
Computing the range of the expander expands enough of the parent
range to reach the desired target size, or expands all the parents fully.
"""
function expander_range(runtime :: Runtime, v :: Variable,
target_size :: Int, depth :: Int)
changed = false
net = runtime.network
parents = get_parents(net, v)
parent = parents[1]
parrange = get_range(runtime, parent, depth)
rangeset = Set()
subranges = Dict()
for p in parrange
if expanded(runtime, v, p)
subnet = expansion(runtime, v, p)
subrun = subruntime(runtime, v, subnet)
subout = current_instance(subrun, get_node(subnet, :out))
if has_range(subrun, subout, depth - 1)
subrange = get_range(subrun, subout, depth - 1)
subranges[p] = subrange
union!(rangeset, subrange)
else
subranges[p] = []
end
else
subranges[p] = []
end
end
todo = Dict()
for p in parrange
todo[p] = length(subranges[p])
end
while length(rangeset) < target_size && !isempty(todo)
p = argmin(todo)
delete!(todo, p)
(subnet, chng) = expand!(runtime, v, p)
changed = chng
subrun = subruntime(runtime, v, subnet)
if depth > 1
order = topsort(get_initial_graph(subnet))
outvar = get_node(subrun, :out)
# Cannot have evidence on subnet
if set_ranges!(subrun, Dict{Symbol, Score}(), target_size - length(rangeset), depth - 1, order)
changed = true
end
inst = current_instance(subrun, outvar)
rng = get_range(subrun, inst, depth - 1)
union!(rangeset, rng)
end
end
result = output_type(v)[]
for x in rangeset
push!(result, x)
end
sort!(result)
return (result, changed)
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 802 | import Base.copy
export
Graph,
add_node!,
add_undirected!
"""
struct Graph
A simple graph with nodes (Int), edges (outgoing), and a size
property for each node
"""
struct Graph
nodes :: Array{Int, 1}
edges :: Dict{Int, Array{Int, 1}}
sizes :: Dict{Int, Int}
Graph() = new([], Dict(), Dict())
Graph(ns, es, ss) = new(ns, es, ss)
end
function add_node!(g :: Graph, n :: Int, size :: Int)
if !(n in g.nodes)
push!(g.nodes, n)
g.edges[n] = []
end
g.sizes[n] = size
end
function add_undirected!(g :: Graph, n1 :: Int, n2 :: Int)
if n1 in g.nodes && n2 in g.nodes
if !(n2 in g.edges[n1])
push!(g.edges[n1], n2)
end
if !(n1 in g.edges[n2])
push!(g.edges[n2], n1)
end
end
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 537 | using Profile
include("runtests.jl")
@profile include("runtests.jl")
open("output/profile_tree.txt", "w") do s
Profile.print(IOContext(s, :displaysize => (24, 500));
format=:tree,
maxdepth=100,
noisefloor=2,
mincount=2)
end
open("output/profile_flat.txt", "w") do s
Profile.print(IOContext(s, :displaysize => (24, 500));
format=:flat,
sortedby=:count,
noisefloor=1,
mincount=1)
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 822 | # File test/runtests.jl
using Test
@testset "ScruffTests" begin
include("test_core.jl")
include("test_sfuncs.jl")
include("test_score.jl")
include("test_utils.jl")
include("test_ops.jl")
include("test_ve.jl")
include("test_lsfi.jl")
include("test_bp.jl")
include("test_importance.jl")
include("test_net.jl")
include("test_filter.jl")
include("test_em.jl")
# These don't test anything, but we want to make sure any changes haven't broken the examples
redirect_stdout(devnull) do
include("../docs/examples/novelty_example.jl")
include("../docs/examples/novelty_lazy.jl")
include("../docs/examples/novelty_filtering.jl")
include("../docs/examples/rembrandt_example.jl")
include("../docs/examples/soccer_example.jl")
end
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 75279 | import Base.timedwait
import Base.isapprox
import PrettyPrint
import Scruff.Operators.bounded_probs
import Scruff.Operators.range
using Test
using Scruff
using Scruff.Utils
using Scruff.RTUtils
using Scruff.SFuncs
using Scruff.Models
using Scruff.Operators
import Scruff.Algorithms: three_pass_BP, loopy_BP, ThreePassBP, LoopyBP, infer, probability
@testset "BP" begin
@testset "Cat operations" begin
x = Cat([1,2,3], [0.2, 0.3, 0.5])
@testset "compute_pi" begin
p = compute_pi(x, [1,2,3], (), ())
i1 = indexin(1, p.__compiled_range)[1]
i2 = indexin(2, p.__compiled_range)[1]
i3 = indexin(3, p.__compiled_range)[1]
@test p.params[i1] == 0.2
@test p.params[i2] == 0.3
@test p.params[i3] == 0.5
end
end
@testset "DiscreteCPT operations" begin
cpd1 = Dict((1,) => [0.1, 0.9], (2,) => [0.2, 0.8], (3,) => [0.3, 0.7])
cpd2 = Dict((1,1) => [0.3, 0.7], (1,2) => [0.6, 0.4], (2,1) =>[0.4, 0.6],
(2,2) => [0.7, 0.3], (3,1) => [0.5, 0.5], (3,2) => [0.8, 0.2])
cpd3entries = [0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6]
cpd3 = Dict{Tuple{Int, Int, Int}, Vector{Float64}}()
for i1 = 1:2
for i2 = 1:3
for i3 = 1:2
ix = (i1-1)*6 + (i2-1)*2 + i3
p = cpd3entries[ix]
cpd3[(i1,i2,i3)] = [p, 1-p]
end
end
end
cpd4entries =
[0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6,
0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9]
cpd4 = Dict{Tuple{Int, Int, Int, Int}, Vector{Float64}}()
for i1 = 1:2
for i2 = 1:3
for i3 = 1:2
for i4 = 1:2
ix = (i1-1)*12 + (i2-1)*4 + (i3-1)*2 + i4
p = cpd4entries[ix]
cpd4[(i1,i2,i3,i4)] = [p, 1-p]
end
end
end
end
x1 = DiscreteCPT([1, 2], cpd1)
x2 = DiscreteCPT([1, 2], cpd2)
x3 = DiscreteCPT([1, 2], cpd3)
x4 = DiscreteCPT([1, 2], cpd4)
i1 = indexin(1, x1.__sfs[1].__compiled_range)[1]
i2 = indexin(2, x1.__sfs[1].__compiled_range)[1]
@testset "compute_pi" begin
@testset "with one parent" begin
pa = 0.2 * 0.1 + 0.3 * 0.2 + 0.5 * 0.3
pb = 1 - pa
pi = compute_pi(x1, [1,2], ([1,2,3],), (Cat([1,2,3], [0.2, 0.3, 0.5]),))
@test isapprox(pi.params[i1], pa)
@test isapprox(pi.params[i2], pb)
end
@testset "with two parents" begin
# First parent is outermost (range 2).
# Second parent is innermost (range 3).
# If first parent probabilities are [0.1, 0.9]
# and second parent probablity are [0.2, 0.3, 0.5]
# we get the following probabilities over x:
pa = 0.2 * (0.1 * 0.3 + 0.9 * 0.6) +
0.3 * (0.1 * 0.4 + 0.9 * 0.7) +
0.5 * (0.1 * 0.5 + 0.9 * 0.8)
pb = 1 - pa
pi = compute_pi(x2, [1,2], ([1,2,3], [1,2]), (Cat([1,2,3], [0.2, 0.3, 0.5]), Cat([1,2],[0.1, 0.9])))
@test isapprox(pi.params[i1], pa)
@test isapprox(pi.params[i2], pb)
end
@testset "with three parents" begin
pa = 0.0
pi1 = [0.3, 0.7]
pi2 = [0.2, 0.3, 0.5]
pi3 = [0.6, 0.4]
for i = 1:12
ix1 = div(i-1,6) + 1
ix2 = div(mod(i-1,6), 2) + 1
ix3 = mod(i-1,2) + 1
parp = pi1[ix1] * pi2[ix2] * pi3[ix3]
pa += parp * cpd3entries[i]
end
pb = 1 - pa
pi = compute_pi(x3, [1,2], ([1,2], [1,2,3], [1,2]), (Cat([1,2], pi1), Cat([1,2,3], pi2), Cat([1,2], pi3)))
@test isapprox(pi.params[i1], pa)
@test isapprox(pi.params[i2], pb)
end
@testset "with four parents" begin
pa = 0.0
pi1 = [0.3, 0.7]
pi2 = [0.2, 0.3, 0.5]
pi3 = [0.6, 0.4]
pi4 = [0.8, 0.2]
for i = 1:24
ix1 = div(i-1,12) + 1
ix2 = div(mod(i-1,12), 4) + 1
ix3 = div(mod(i-1,4), 2) + 1
ix4 = mod(i-1, 2) + 1
parp = pi1[ix1] * pi2[ix2] * pi3[ix3] * pi4[ix4]
pa += parp * cpd4entries[i]
end
pb = 1 - pa
pi = compute_pi(x4, [1,2], ([1,2], [1,2,3], [1,2], [1,2]), (Cat([1,2], pi1), Cat([1,2,3], pi2), Cat([1,2], pi3), Cat([1,2], pi4)))
@test isapprox(pi.params[i1], pa)
@test isapprox(pi.params[i2], pb)
end
end
@testset "send_lambda" begin
@testset "with one parent" begin
p1 = 0.3 * 0.1 + 0.7 * 0.9
p2 = 0.3 * 0.2 + 0.7 * 0.8
p3 = 0.3 * 0.3 + 0.7 * 0.7
# l1 = send_lambda(x1, [0.3, 0.7], [1,2], ([1,2,3], [1,2]), [[0.2, 0.3, 0.5], [0.1, 0.9]], 1)
# @test isapprox(l1, [p1, p2, p3])
# FIXME is this a correct test?
l1 = send_lambda(x1, SoftScore([1,2], [0.3, 0.7]), [1,2], ([1,2,3],), (Cat([1,2,3], [0.2, 0.3, 0.5]),), 1)
@test isapprox(get_score(l1, 1), p1)
@test isapprox(get_score(l1, 2), p2)
@test isapprox(get_score(l1, 3), p3)
end
@testset "with two parents" begin
# If the lambda for x is [0.3, 0.7], the lambda messages to the parents are:
p11 = 0.3 * (0.1 * 0.3 + 0.9 * 0.6) +
0.7 * (0.1 * 0.7 + 0.9 * 0.4)
p12 = 0.3 * (0.1 * 0.4 + 0.9 * 0.7) +
0.7 * (0.1 * 0.6 + 0.9 * 0.3)
p13 = 0.3 * (0.1 * 0.5 + 0.9 * 0.8) +
0.7 * (0.1 * 0.5 + 0.9 * 0.2)
p21 = 0.3 * (0.2 * 0.3 + 0.3 * 0.4 + 0.5 * 0.5) +
0.7 * (0.2 * 0.7 + 0.3 * 0.6 + 0.5 * 0.5)
p22 = 0.3 * (0.2 * 0.6 + 0.3 * 0.7 + 0.5 * 0.8) +
0.7 * (0.2 * 0.4 + 0.3 * 0.3 + 0.5 * 0.2)
chlam = SoftScore([1,2], [0.3,0.7])
range = [1,2]
parranges = ([1,2,3], [1,2])
parpis = (Cat([1,2,3], [0.2, 0.3, 0.5]), Cat([1,2], [0.1, 0.9]))
l1 = send_lambda(x2, chlam, range, parranges, parpis, 1)
l2 = send_lambda(x2, chlam, range, parranges, parpis, 2)
@test isapprox([get_score(l1, i) for i in 1:3], [p11, p12, p13])
@test isapprox([get_score(l2, i) for i in 1:2], [p21, p22])
end
@testset "with three parents" begin
pi1 = [0.3, 0.7]
pi2 = [0.2, 0.3, 0.5]
pi3 = [0.6, 0.4]
p11 = 0.0
p12 = 0.0
p21 = 0.0
p22 = 0.0
p23 = 0.0
p31 = 0.0
p32 = 0.0
for i = 1:12
ix1 = div(i-1,6) + 1
ix2 = div(mod(i-1,6), 2) + 1
ix3 = mod(i-1,2) + 1
p1mod = (0.3 * cpd3entries[i] + 0.7 * (1 - cpd3entries[i])) * pi2[ix2] * pi3[ix3]
p2mod = (0.3 * cpd3entries[i] + 0.7 * (1 - cpd3entries[i])) * pi1[ix1] * pi3[ix3]
p3mod = (0.3 * cpd3entries[i] + 0.7 * (1 - cpd3entries[i])) * pi1[ix1] * pi2[ix2]
if ix1 == 1
p11 += p1mod
else
p12 += p1mod
end
if ix2 == 1
p21 += p2mod
elseif ix2 == 2
p22 += p2mod
else
p23 += p2mod
end
if ix3 == 1
p31 += p3mod
else
p32 += p3mod
end
end
chlam = SoftScore([1,2], [0.3, 0.7])
range = [1,2]
parranges = ([1,2], [1,2,3], [1,2])
parpis = (Cat([1,2], pi1), Cat([1,2,3], pi2), Cat([1,2], pi3))
l1 = send_lambda(x3, chlam, range, parranges, parpis, 1)
l2 = send_lambda(x3, chlam, range, parranges, parpis, 2)
l3 = send_lambda(x3, chlam, range, parranges, parpis, 3)
@test isapprox([get_score(l1, i) for i in 1:2], [p11, p12])
@test isapprox([get_score(l2, i) for i in 1:3], [p21, p22, p23])
@test isapprox([get_score(l3, i) for i in 1:2], [p31, p32])
end
@testset "with four parents" begin
pi1 = [0.3, 0.7]
pi2 = [0.2, 0.3, 0.5]
pi3 = [0.6, 0.4]
pi4 = [0.8, 0.2]
p11 = 0.0
p12 = 0.0
p21 = 0.0
p22 = 0.0
p23 = 0.0
p31 = 0.0
p32 = 0.0
p41 = 0.0
p42 = 0.0
for i = 1:24
ix1 = div(i-1,12) + 1
ix2 = div(mod(i-1,12), 4) + 1
ix3 = div(mod(i-1,4), 2) + 1
ix4 = mod(i-1, 2) + 1
p1mod = (0.3 * cpd4entries[i] + 0.7 * (1 - cpd4entries[i])) * pi2[ix2] * pi3[ix3] * pi4[ix4]
p2mod = (0.3 * cpd4entries[i] + 0.7 * (1 - cpd4entries[i])) * pi1[ix1] * pi3[ix3] * pi4[ix4]
p3mod = (0.3 * cpd4entries[i] + 0.7 * (1 - cpd4entries[i])) * pi1[ix1] * pi2[ix2] * pi4[ix4]
p4mod = (0.3 * cpd4entries[i] + 0.7 * (1 - cpd4entries[i])) * pi1[ix1] * pi2[ix2] * pi3[ix3]
if ix1 == 1
p11 += p1mod
else
p12 += p1mod
end
if ix2 == 1
p21 += p2mod
elseif ix2 == 2
p22 += p2mod
else
p23 += p2mod
end
if ix3 == 1
p31 += p3mod
else
p32 += p3mod
end
if ix4 == 1
p41 += p4mod
else
p42 += p4mod
end
end
chlam = SoftScore([1,2], [0.3, 0.7])
range = [1,2]
parranges = ([1,2], [1,2,3], [1,2], [1,2])
parpis = (Cat([1,2], pi1), Cat([1,2,3], pi2), Cat([1,2], pi3), Cat([1,2], pi4))
l1 = send_lambda(x4, chlam, range, parranges, parpis, 1)
l2 = send_lambda(x4, chlam, range, parranges, parpis, 2)
l3 = send_lambda(x4, chlam, range, parranges, parpis, 3)
l4 = send_lambda(x4, chlam, range, parranges, parpis, 4)
@test isapprox([get_score(l1, i) for i in 1:2], [p11, p12])
@test isapprox([get_score(l2, i) for i in 1:3], [p21, p22, p23])
@test isapprox([get_score(l3, i) for i in 1:2], [p31, p32])
@test isapprox([get_score(l4, i) for i in 1:2], [p41, p42])
end
end
end
@testset "Separable operations" begin
alphas = [0.2, 0.3, 0.5]
cpd1 = Dict((1,) => [0.1, 0.9], (2,) => [0.2, 0.8])
cpd2 = Dict((1,) => [0.3, 0.7], (2,) => [0.4, 0.6], (3,) => [0.5, 0.5])
cpd3 = Dict((1,) => [0.6, 0.4], (2,) => [0.7, 0.3])
cpds :: Array{Dict{I,Array{Float64,1}} where I,1} = [cpd1, cpd2, cpd3]
range = [1, 2]
parranges = ([1, 2], [1, 2, 3], [1, 2])
parpis = (Cat([1,2], [0.8, 0.2]), Cat([1,2,3], [0.5, 0.3, 0.2]), Cat([1,2], [0.9, 0.1]))
x = Separable([1, 2], alphas, cpds)
@testset "compute_pi" begin
# If the parent probabilities are [0.8, 0.2], [0.5, 0.3, 0.2], and
# [0.9, 0.1], x probabilities are:
pa = 0.2 * (0.8 * 0.1 + 0.2 * 0.2) +
0.3 * (0.5 * 0.3 + 0.3 * 0.4 + 0.2 * 0.5) +
0.5 * (0.9 * 0.6 + 0.1 * 0.7)
pb = 1 - pa
ps = compute_pi(x, range, parranges, parpis)
i1 = indexin(1, ps.__compiled_range)[1]
i2 = indexin(2, ps.__compiled_range)[1]
@test isapprox(ps.params[i1], pa)
@test isapprox(ps.params[i2], pb)
end
@testset "send_lambda" begin
# If the x probabilities are [0.3, 0.7], the lambda messages are computed by:
p1othera = 0.3 * (0.5 * 0.3 + 0.3 * 0.4 + 0.2 * 0.5) +
0.5 * (0.9 * 0.6 + 0.1 * 0.7)
p11a = 0.2 * 0.1 + p1othera
p11 = 0.3 * p11a + 0.7 * (1-p11a) # = 0.3 * 0.2 * 0.1 + 0.3 * p1othera + 0.7 * (1 - 0.2 * 0.1) + 0.7 * p1otherb
p12a = 0.2 * 0.2 + p1othera
p12 = 0.3 * p12a + 0.7 * (1-p12a)
p2othera = 0.2 * (0.8 * 0.1 + 0.2 * 0.2) +
0.5 * (0.9 * 0.6 + 0.1 * 0.7)
p21a = 0.3 * 0.3 + p2othera
p21 = 0.3 * p21a + 0.7 * (1-p21a)
p22a = 0.3 * 0.4 + p2othera
p22 = 0.3 * p22a + 0.7 * (1-p22a)
p23a = 0.3 * 0.5 + p2othera
p23 = 0.3 * p23a + 0.7 * (1-p23a)
q21a = 0.3 * 0.3
q21b = 0.3 * q21a + 0.7 * (1-q21a-p2othera)
p3othera = 0.2 * (0.8 * 0.1 + 0.2 * 0.2) +
0.3 * (0.5 * 0.3 + 0.3 * 0.4 + 0.2 * 0.5)
p31a = 0.5 * 0.6 + p3othera
p31 = 0.3 * p31a + 0.7 * (1-p31a)
p32a = 0.5 * 0.7 + p3othera
p32 = 0.3 * p32a + 0.7 * (1-p32a)
chlam = SoftScore([1,2], [0.3, 0.7])
l1 = send_lambda(x, chlam, range, parranges, parpis, 1)
l2 = send_lambda(x, chlam, range, parranges, parpis, 2)
l3 = send_lambda(x, chlam, range, parranges, parpis, 3)
@test isapprox([get_score(l1, i) for i in 1:2], [p11, p12])
@test isapprox([get_score(l2, i) for i in 1:3], [p21, p22, p23])
@test isapprox([get_score(l3, i) for i in 1:2], [p31, p32])
end
end
@testset "BP operations in general" begin
cpd = Dict((1,1) => [0.3, 0.7], (1,2) => [0.6, 0.4], (2,1) =>[0.4, 0.6],
(2,2) => [0.7, 0.3], (3,1) => [0.5, 0.5], (3,2) => [0.8, 0.2])
x = DiscreteCPT([1,2], cpd)
@testset "compute_lambda" begin
l1 = SoftScore([1,2], [0.1, 0.2])
l2 = SoftScore([1,2], [0.3, 0.4])
l3 = SoftScore([1,2], [0.5, 0.6])
lam1 = compute_lambda(x, [1,2], [l1, l2, l3])
lam2 = compute_lambda(x, [1,2], Score{output_type(x)}[])
@test isapprox(normalize([get_score(lam1, i) for i in 1:2]), normalize([0.1 * 0.3 * 0.5, 0.2 * 0.4 * 0.6]))
@test isapprox(normalize([get_score(lam2, i) for i in 1:2]), normalize([1.0, 1.0]))
end
@testset "compute_lambda avoids underflow" begin
ls = fill(SoftScore([1,2], [0.000000001, 0.000000001]), 500)
lam = compute_lambda(x, [1,2], ls)
@test isapprox(normalize([get_score(lam, i) for i in 1:2]), [0.5, 0.5])
end
@testset "compute_bel" begin
p1 = Cat([1, 2], [0.1, 0.2])
l1 = SoftScore([1, 2], [0.3, 0.4])
b = compute_bel(x, [2, 1], p1, l1)
@test isapprox([cpdf(b, (), i) for i in [2,1]], normalize([0.2 * 0.4, 0.1 * 0.3]))
end
@testset "send_pi" begin
b1 = Cat([1,2], [0.1, 0.2])
l1 = SoftScore([1,2], [0.3, 0.4])
l2 = SoftScore([1,2], [0.0, 0.0])
p1 = send_pi(x, [2, 1], b1, l1)
p2 = send_pi(x, [2, 1], b1, l2)
@test isapprox([cpdf(p1, (), i) for i in [2,1]], normalize([0.2 / 0.4, 0.1 / 0.3]))
@test all(y -> !isinf(y), [cpdf(p2, (), i) for i in [2,1]])
end
@testset "outgoing_pis" begin
b1 = Cat([1,2], [0.1, 0.2])
l1 = SoftScore([1,2], [0.3, 0.4])
l2 = SoftScore([1,2], [0.5, 0.6])
p1 = send_pi(x, [2,1], b1, l1)
p2 = send_pi(x, [2,1], b1, l2)
op = outgoing_pis(x, [2,1], b1, [l1, l2])
@test all(i -> cpdf(op[1], (), i) == cpdf(p1, (), i), [2,1])
@test all(i -> cpdf(op[2], (), i) == cpdf(p2, (), i), [2,1])
end
@testset "outgoing_lambdas" begin
lam = SoftScore([1,2], [0.3, 0.7])
incoming_pis = (Cat([1,2,3], [0.2, 0.3, 0.5]), Cat([1,2],[0.1, 0.9]))
l1 = send_lambda(x, lam, [1,2], ([1,2,3], [1,2]), incoming_pis, 1)
l2 = send_lambda(x, lam, [1,2], ([1,2,3], [1,2]), incoming_pis, 2)
ols = outgoing_lambdas(x, lam, [1,2], ([1,2,3], [1,2]), incoming_pis)
@test length(ols) == 2
@test all(i -> get_score(ols[1], i) == get_score(l1, i), [1,2])
@test all(i -> get_score(ols[2], i) == get_score(l2, i), [1,2])
end
end
@testset "Three pass BP" begin
x1m = Cat([1,2], [0.1, 0.9])()
x2m = Cat([1,2,3], [0.2, 0.3, 0.5])()
cpd2 = Dict((1,1) => [0.3, 0.7], (1,2) => [0.6, 0.4], (2,1) =>[0.4, 0.6],
(2,2) => [0.7, 0.3], (3,1) => [0.5, 0.5], (3,2) => [0.8, 0.2])
x3m = DiscreteCPT([1,2], cpd2)()
x4m = DiscreteCPT([1,2], Dict((1,) => [0.15, 0.85], (2,) => [0.25, 0.75]))()
x5m = DiscreteCPT([1,2], Dict((1,) => [0.35, 0.65], (2,) => [0.45, 0.55]))()
x6m = DiscreteCPT([1,2], Dict((1,) => [0.65, 0.35], (2,) => [0.75, 0.25]))()
x1 = x1m(:x1)
x2 = x2m(:x2)
x3 = x3m(:x3)
x4 = x4m(:x4)
x5 = x5m(:x5)
x6 = x6m(:x6)
singlenet = InstantNetwork(Variable[x1], VariableGraph())
@testset "single node network, no evidence" begin
run = Runtime(singlenet)
default_initializer(run)
inst = current_instance(run, x1)
three_pass_BP(run)
bel = get_belief(run, inst)
@test isapprox(cpdf(bel, (), 1), 0.1)
@test isapprox(cpdf(bel, (), 2), 0.9)
end
@testset "single node network, with evidence" begin
run = Runtime(singlenet)
default_initializer(run)
inst = current_instance(run, x1)
post_evidence!(run, inst, HardScore(2))
three_pass_BP(run)
bel = get_belief(run, inst)
@test isapprox(cpdf(bel, (), 1), 0.0)
@test isapprox(cpdf(bel, (), 2), 1.0)
end
twoindepnet = InstantNetwork(Variable[x1, x2], VariableGraph())
@testset "two independent nodes, no evidence" begin
run = Runtime(twoindepnet)
default_initializer(run)
inst = current_instance(run, x1)
three_pass_BP(run)
bel = get_belief(run, inst)
@test isapprox(cpdf(bel, (), 1), 0.1)
@test isapprox(cpdf(bel, (), 2), 0.9)
end
@testset "two independent nodes, with evidence on other variable" begin
run = Runtime(twoindepnet)
default_initializer(run)
inst1 = current_instance(run, x1)
inst2 = current_instance(run, x2)
post_evidence!(run, inst2, HardScore(2))
three_pass_BP(run)
bel = get_belief(run, inst1)
@test isapprox(cpdf(bel, (), 1), 0.1)
@test isapprox(cpdf(bel, (), 2), 0.9)
end
parchildnet = InstantNetwork(Variable[x1, x6], VariableGraph(x6=>[x1]))
@testset "parent and child, no evidence" begin
run = Runtime(parchildnet)
default_initializer(run)
inst1 = current_instance(run, x1)
inst6 = current_instance(run, x6)
three_pass_BP(run)
bel1 = get_belief(run, inst1)
@test isapprox(cpdf(bel1, (), 1), 0.1)
@test isapprox(cpdf(bel1, (), 2), 0.9)
bel6 = get_belief(run, inst6)
@test isapprox(cpdf(bel6, (), 1), 0.1 * 0.65 + 0.9 * 0.75)
@test isapprox(cpdf(bel6, (), 2), 0.1 * 0.35 + 0.9 * 0.25)
end
@testset "parent and child, evidence on parent" begin
run = Runtime(parchildnet)
default_initializer(run)
inst1 = current_instance(run, x1)
inst6 = current_instance(run, x6)
post_evidence!(run, inst1, HardScore(2))
three_pass_BP(run)
bel1 = get_belief(run, inst1)
@test isapprox(cpdf(bel1, (), 1), 0.0)
@test isapprox(cpdf(bel1, (), 2), 1.0)
bel6 = get_belief(run, inst6)
@test isapprox(cpdf(bel6, (), 1), 0.75)
@test isapprox(cpdf(bel6, (), 2), 0.25)
end
@testset "parent and child, evidence on child" begin
run = Runtime(parchildnet)
default_initializer(run)
inst1 = current_instance(run, x1)
inst6 = current_instance(run, x6)
post_evidence!(run, inst6, HardScore(2))
three_pass_BP(run)
p1 = 0.1 * 0.35
p2 = 0.9 * 0.25
z = p1 + p2
bel1 = get_belief(run, inst1)
@test isapprox(cpdf(bel1, (), 1), p1 / z)
@test isapprox(cpdf(bel1, (), 2), p2 / z)
bel6 = get_belief(run, inst6)
@test isapprox(cpdf(bel6, (), 1), 0.0)
@test isapprox(cpdf(bel6, (), 2), 1.0)
end
@testset "parent and child, intervention on parent" begin
run = Runtime(parchildnet)
default_initializer(run)
inst1 = current_instance(run, x1)
inst6 = current_instance(run, x6)
post_intervention!(run, inst1, Constant(2))
three_pass_BP(run)
bel1 = get_belief(run, inst1)
@test isapprox(cpdf(bel1, (), 1), 0.0)
@test isapprox(cpdf(bel1, (), 2), 1.0)
bel6 = get_belief(run, inst6)
@test isapprox(cpdf(bel6, (), 1), 0.75)
@test isapprox(cpdf(bel6, (), 2), 0.25)
end
@testset "parent and child, intervention on child" begin
run = Runtime(parchildnet)
default_initializer(run)
inst1 = current_instance(run, x1)
inst6 = current_instance(run, x6)
post_intervention!(run, inst6, Constant(2))
three_pass_BP(run)
p1 = 0.1 * 0.35
p2 = 0.9 * 0.25
z = p1 + p2
bel1 = get_belief(run, inst1)
@test isapprox(cpdf(bel1, (), 1), 0.1)
@test isapprox(cpdf(bel1, (), 2), 0.9)
bel6 = get_belief(run, inst6)
@test isapprox(cpdf(bel6, (), 1), 0.0)
@test isapprox(cpdf(bel6, (), 2), 1.0)
end
@testset "running bp twice with different evidence" begin
run = Runtime(parchildnet)
default_initializer(run)
inst1 = current_instance(run, x1)
inst6 = current_instance(run, x6)
post_evidence!(run, inst1, HardScore(2))
three_pass_BP(run)
delete_evidence!(run, inst1)
post_evidence!(run, inst6, HardScore(2))
three_pass_BP(run)
p1 = 0.1 * 0.35
p2 = 0.9 * 0.25
z = p1 + p2
bel1 = get_belief(run, inst1)
@test isapprox(cpdf(bel1, (), 1), p1 / z)
@test isapprox(cpdf(bel1, (), 2), p2 / z)
bel6 = get_belief(run, inst6)
@test isapprox(cpdf(bel6, (), 1), 0.0)
@test isapprox(cpdf(bel6, (), 2), 1.0)
end
@testset "with soft evidence on root and evidence on child" begin
run = Runtime(parchildnet)
default_initializer(run)
inst1 = current_instance(run, x1)
inst6 = current_instance(run, x6)
post_evidence!(run, inst1, SoftScore([1,2], [0.3, 0.7]))
post_evidence!(run, inst6, HardScore(2))
three_pass_BP(run)
# soft evidence is interpreted as an additional lambda message
p1 = 0.1 * 0.3 * 0.35
p2 = 0.9 * 0.7 * 0.25
z = p1 + p2
bel1 = get_belief(run, inst1)
@test isapprox(cpdf(bel1, (), 1), p1 / z)
@test isapprox(cpdf(bel1, (), 2), p2 / z)
bel6 = get_belief(run, inst6)
@test isapprox(cpdf(bel6, (), 1), 0.0)
@test isapprox(cpdf(bel6, (), 2), 1.0)
end
fivecpdnet = InstantNetwork(Variable[x1,x2,x3,x4,x5], VariableGraph(x3=>[x2,x1], x4=>[x3], x5=>[x3]))
acfhj = 0.1 * 0.2 * 0.3 * 0.15 * 0.35
acfhk = 0.1 * 0.2 * 0.3 * 0.15 * 0.65
acfij = 0.1 * 0.2 * 0.3 * 0.85 * 0.35
acfik = 0.1 * 0.2 * 0.3 * 0.85 * 0.65
acghj = 0.1 * 0.2 * 0.7 * 0.25 * 0.45
acghk = 0.1 * 0.2 * 0.7 * 0.25 * 0.55
acgij = 0.1 * 0.2 * 0.7 * 0.75 * 0.45
acgik = 0.1 * 0.2 * 0.7 * 0.75 * 0.55
adfhj = 0.1 * 0.3 * 0.4 * 0.15 * 0.35
adfhk = 0.1 * 0.3 * 0.4 * 0.15 * 0.65
adfij = 0.1 * 0.3 * 0.4 * 0.85 * 0.35
adfik = 0.1 * 0.3 * 0.4 * 0.85 * 0.65
adghj = 0.1 * 0.3 * 0.6 * 0.25 * 0.45
adghk = 0.1 * 0.3 * 0.6 * 0.25 * 0.55
adgij = 0.1 * 0.3 * 0.6 * 0.75 * 0.45
adgik = 0.1 * 0.3 * 0.6 * 0.75 * 0.55
aefhj = 0.1 * 0.5 * 0.5 * 0.15 * 0.35
aefhk = 0.1 * 0.5 * 0.5 * 0.15 * 0.65
aefij = 0.1 * 0.5 * 0.5 * 0.85 * 0.35
aefik = 0.1 * 0.5 * 0.5 * 0.85 * 0.65
aeghj = 0.1 * 0.5 * 0.5 * 0.25 * 0.45
aeghk = 0.1 * 0.5 * 0.5 * 0.25 * 0.55
aegij = 0.1 * 0.5 * 0.5 * 0.75 * 0.45
aegik = 0.1 * 0.5 * 0.5 * 0.75 * 0.55
bcfhj = 0.9 * 0.2 * 0.6 * 0.15 * 0.35
bcfhk = 0.9 * 0.2 * 0.6 * 0.15 * 0.65
bcfij = 0.9 * 0.2 * 0.6 * 0.85 * 0.35
bcfik = 0.9 * 0.2 * 0.6 * 0.85 * 0.65
bcghj = 0.9 * 0.2 * 0.4 * 0.25 * 0.45
bcghk = 0.9 * 0.2 * 0.4 * 0.25 * 0.55
bcgij = 0.9 * 0.2 * 0.4 * 0.75 * 0.45
bcgik = 0.9 * 0.2 * 0.4 * 0.75 * 0.55
bdfhj = 0.9 * 0.3 * 0.7 * 0.15 * 0.35
bdfhk = 0.9 * 0.3 * 0.7 * 0.15 * 0.65
bdfij = 0.9 * 0.3 * 0.7 * 0.85 * 0.35
bdfik = 0.9 * 0.3 * 0.7 * 0.85 * 0.65
bdghj = 0.9 * 0.3 * 0.3 * 0.25 * 0.45
bdghk = 0.9 * 0.3 * 0.3 * 0.25 * 0.55
bdgij = 0.9 * 0.3 * 0.3 * 0.75 * 0.45
bdgik = 0.9 * 0.3 * 0.3 * 0.75 * 0.55
befhj = 0.9 * 0.5 * 0.8 * 0.15 * 0.35
befhk = 0.9 * 0.5 * 0.8 * 0.15 * 0.65
befij = 0.9 * 0.5 * 0.8 * 0.85 * 0.35
befik = 0.9 * 0.5 * 0.8 * 0.85 * 0.65
beghj = 0.9 * 0.5 * 0.2 * 0.25 * 0.45
beghk = 0.9 * 0.5 * 0.2 * 0.25 * 0.55
begij = 0.9 * 0.5 * 0.2 * 0.75 * 0.45
begik = 0.9 * 0.5 * 0.2 * 0.75 * 0.55
@testset "five node non-loopy network with discrete CPD nodes and no evidence" begin
run = Runtime(fivecpdnet)
default_initializer(run)
inst1 = current_instance(run, x1)
inst3 = current_instance(run, x3)
inst5 = current_instance(run, x5)
three_pass_BP(run)
a = acfhj + acfhk + acfij + acfik + acghj + acghk + acgij + acgik +
adfhj + adfhk + adfij + adfik + adghj + adghk + adgij + adgik +
aefhj + aefhk + aefij + aefik + aeghj + aeghk + aegij + aegik
b = 1-a
f = acfhj + acfhk + acfij + acfik + adfhj + adfhk + adfij + adfik + aefhj + aefhk + aefij + aefik +
bcfhj + bcfhk + bcfij + bcfik + bdfhj + bdfhk + bdfij + bdfik + befhj + befhk + befij + befik
g = 1-f
j = acfhj + acfij + acghj + acgij + adfhj + adfij + adghj + adgij + aefhj + aefij + aeghj + aegij +
bcfhj + bcfij + bcghj + bcgij + bdfhj + bdfij + bdghj + bdgij + befhj + befij + beghj + begij
k = 1-j
bel1 = get_belief(run, inst1)
@test isapprox(cpdf(bel1, (), 1), a)
@test isapprox(cpdf(bel1, (), 2), b)
bel3 = get_belief(run, inst3)
@test isapprox(cpdf(bel3, (), 1), f)
@test isapprox(cpdf(bel3, (), 2), g)
bel5 = get_belief(run, inst5)
@test isapprox(cpdf(bel5, (), 1), j)
@test isapprox(cpdf(bel5, (), 2), k)
end
@testset "five node non-loopy network with discrete CPD nodes and evidence at root" begin
run = Runtime(fivecpdnet)
default_initializer(run)
inst1 = current_instance(run, x1)
inst5 = current_instance(run, x5)
post_evidence!(run, inst1, HardScore(2))
three_pass_BP(run)
bj = bcfhj + bcfij + bcghj + bcgij + bdfhj + bdfij + bdghj + bdgij + befhj + befij + beghj + begij
bk = bcfhk + bcfik + bcghk + bcgik + bdfhk + bdfik + bdghk + bdgik + befhk + befik + beghk + begik
bel5 = get_belief(run, inst5)
@test isapprox(cpdf(bel5, (), 1), bj / (bj + bk))
@test isapprox(cpdf(bel5, (), 2), bk / (bj + bk))
end
@testset "five node non-loopy network with discrete CPD nodes and evidence at leaves" begin
run = Runtime(fivecpdnet)
default_initializer(run)
inst1 = current_instance(run, x1)
inst4 = current_instance(run, x4)
inst5 = current_instance(run, x5)
post_evidence!(run, inst4, HardScore(1))
post_evidence!(run, inst5, HardScore(2))
three_pass_BP(run)
ahk = acfhk + acghk + adfhk + adghk + aefhk + aeghk
bhk = bcfhk + bcghk + bdfhk + bdghk + befhk + beghk
bel1 = get_belief(run, inst1)
@test isapprox(cpdf(bel1, (), 1), ahk / (ahk + bhk))
@test isapprox(cpdf(bel1, (), 2), bhk / (ahk + bhk))
end
@testset "five node non-loopy network with discrete CPD nodes and evidence at root and leaf" begin
run = Runtime(fivecpdnet)
default_initializer(run)
inst1 = current_instance(run, x1)
inst2 = current_instance(run, x2)
inst3 = current_instance(run, x3)
inst4 = current_instance(run, x4)
inst5 = current_instance(run, x5)
post_evidence!(run, inst1, HardScore(1))
post_evidence!(run, inst5, HardScore(2))
three_pass_BP(run)
ack = acfhk + acfik + acghk + acgik
adk = adfhk + adfik + adghk + adgik
aek = aefhk + aefik + aeghk + aegik
afj = acfhj + acfij + adfhj + adfij + aefhj + aefij
afk = acfhk + acfik + adfhk + adfik + aefhk + aefik
agj = acghj + acgij + adghj + adgij + aeghj + aegij
agk = acghk + acgik + adghk + adgik + aeghk + aegik
ahk = acfhk + acghk + adfhk + adghk + aefhk + aeghk
aik = acfik + acgik + adfik + adgik + aefik + aegik
af = afj + afk
ag = agj + agk
bel2 = get_belief(run, inst2)
@test isapprox(cpdf(bel2, (), 1), ack / (ack + adk + aek))
@test isapprox(cpdf(bel2, (), 2), adk / (ack + adk + aek))
@test isapprox(cpdf(bel2, (), 3), aek / (ack + adk + aek))
bel3 = get_belief(run, inst3)
@test isapprox(cpdf(bel3, (), 1), afk / (afk + agk))
@test isapprox(cpdf(bel3, (), 2), agk / (afk + agk))
bel4 = get_belief(run, inst4)
@test isapprox(cpdf(bel4, (), 1), ahk / (ahk + aik))
@test isapprox(cpdf(bel4, (), 2), aik / (ahk + aik))
end
y1 = Cat([1,2], [0.1, 0.9])()(:y1)
y2 = Cat([1,2,3], [0.2, 0.3, 0.5])()(:y2)
y3 = Cat([1,2], [0.8, 0.2])()(:y3)
cpt1 = Dict((1,) => [0.1, 0.9], (2,) => [0.2, 0.8])
cpt2 = Dict((1,) => [0.3, 0.7], (2,) => [0.4, 0.6], (3,) => [0.5, 0.5])
cpt3 = Dict((1,) => [0.6, 0.4], (2,) => [0.7, 0.3])
cpts::SepCPTs = [cpt1, cpt2, cpt3]
y4= Separable([1,2], [0.5, 0.2, 0.3], cpts)()(:y4)
y5 = DiscreteCPT([1,2], Dict((1,) => [0.35, 0.65], (2,) => [0.45, 0.55]))()(:y5)
fivesepnet = InstantNetwork(Variable[y1,y2,y3,y4,y5], VariableGraph(y4=>[y1,y2,y3], y5=>[y4]))
acfhj = 0.1 * 0.2 * 0.8 * (0.5 * 0.1 + 0.2 * 0.3 + 0.3 * 0.6) * 0.35
acfhk = 0.1 * 0.2 * 0.8 * (0.5 * 0.1 + 0.2 * 0.3 + 0.3 * 0.6) * 0.65
acfij = 0.1 * 0.2 * 0.8 * (0.5 * 0.9 + 0.2 * 0.7 + 0.3 * 0.4) * 0.45
acfik = 0.1 * 0.2 * 0.8 * (0.5 * 0.9 + 0.2 * 0.7 + 0.3 * 0.4) * 0.55
acghj = 0.1 * 0.2 * 0.2 * (0.5 * 0.1 + 0.2 * 0.3 + 0.3 * 0.7) * 0.35
acghk = 0.1 * 0.2 * 0.2 * (0.5 * 0.1 + 0.2 * 0.3 + 0.3 * 0.7) * 0.65
acgij = 0.1 * 0.2 * 0.2 * (0.5 * 0.9 + 0.2 * 0.7 + 0.3 * 0.3) * 0.45
acgik = 0.1 * 0.2 * 0.2 * (0.5 * 0.9 + 0.2 * 0.7 + 0.3 * 0.3) * 0.55
adfhj = 0.1 * 0.3 * 0.8 * (0.5 * 0.1 + 0.2 * 0.4 + 0.3 * 0.6) * 0.35
adfhk = 0.1 * 0.3 * 0.8 * (0.5 * 0.1 + 0.2 * 0.4 + 0.3 * 0.6) * 0.65
adfij = 0.1 * 0.3 * 0.8 * (0.5 * 0.9 + 0.2 * 0.6 + 0.3 * 0.4) * 0.45
adfik = 0.1 * 0.3 * 0.8 * (0.5 * 0.9 + 0.2 * 0.6 + 0.3 * 0.4) * 0.55
adghj = 0.1 * 0.3 * 0.2 * (0.5 * 0.1 + 0.2 * 0.4 + 0.3 * 0.7) * 0.35
adghk = 0.1 * 0.3 * 0.2 * (0.5 * 0.1 + 0.2 * 0.4 + 0.3 * 0.7) * 0.65
adgij = 0.1 * 0.3 * 0.2 * (0.5 * 0.9 + 0.2 * 0.6 + 0.3 * 0.3) * 0.45
adgik = 0.1 * 0.3 * 0.2 * (0.5 * 0.9 + 0.2 * 0.6 + 0.3 * 0.3) * 0.55
aefhj = 0.1 * 0.5 * 0.8 * (0.5 * 0.1 + 0.2 * 0.5 + 0.3 * 0.6) * 0.35
aefhk = 0.1 * 0.5 * 0.8 * (0.5 * 0.1 + 0.2 * 0.5 + 0.3 * 0.6) * 0.65
aefij = 0.1 * 0.5 * 0.8 * (0.5 * 0.9 + 0.2 * 0.5 + 0.3 * 0.4) * 0.45
aefik = 0.1 * 0.5 * 0.8 * (0.5 * 0.9 + 0.2 * 0.5 + 0.3 * 0.4) * 0.55
aeghj = 0.1 * 0.5 * 0.2 * (0.5 * 0.1 + 0.2 * 0.5 + 0.3 * 0.7) * 0.35
aeghk = 0.1 * 0.5 * 0.2 * (0.5 * 0.1 + 0.2 * 0.5 + 0.3 * 0.7) * 0.65
aegij = 0.1 * 0.5 * 0.2 * (0.5 * 0.9 + 0.2 * 0.5 + 0.3 * 0.3) * 0.45
aegik = 0.1 * 0.5 * 0.2 * (0.5 * 0.9 + 0.2 * 0.5 + 0.3 * 0.3) * 0.55
bcfhj = 0.9 * 0.2 * 0.8 * (0.5 * 0.2 + 0.2 * 0.3 + 0.3 * 0.6) * 0.35
bcfhk = 0.9 * 0.2 * 0.8 * (0.5 * 0.2 + 0.2 * 0.3 + 0.3 * 0.6) * 0.65
bcfij = 0.9 * 0.2 * 0.8 * (0.5 * 0.8 + 0.2 * 0.7 + 0.3 * 0.4) * 0.45
bcfik = 0.9 * 0.2 * 0.8 * (0.5 * 0.8 + 0.2 * 0.7 + 0.3 * 0.4) * 0.55
bcghj = 0.9 * 0.2 * 0.2 * (0.5 * 0.2 + 0.2 * 0.3 + 0.3 * 0.7) * 0.35
bcghk = 0.9 * 0.2 * 0.2 * (0.5 * 0.2 + 0.2 * 0.3 + 0.3 * 0.7) * 0.65
bcgij = 0.9 * 0.2 * 0.2 * (0.5 * 0.8 + 0.2 * 0.7 + 0.3 * 0.3) * 0.45
bcgik = 0.9 * 0.2 * 0.2 * (0.5 * 0.8 + 0.2 * 0.7 + 0.3 * 0.3) * 0.55
bdfhj = 0.9 * 0.3 * 0.8 * (0.5 * 0.2 + 0.2 * 0.4 + 0.3 * 0.6) * 0.35
bdfhk = 0.9 * 0.3 * 0.8 * (0.5 * 0.2 + 0.2 * 0.4 + 0.3 * 0.6) * 0.65
bdfij = 0.9 * 0.3 * 0.8 * (0.5 * 0.8 + 0.2 * 0.6 + 0.3 * 0.4) * 0.45
bdfik = 0.9 * 0.3 * 0.8 * (0.5 * 0.8 + 0.2 * 0.6 + 0.3 * 0.4) * 0.55
bdghj = 0.9 * 0.3 * 0.2 * (0.5 * 0.2 + 0.2 * 0.4 + 0.3 * 0.7) * 0.35
bdghk = 0.9 * 0.3 * 0.2 * (0.5 * 0.2 + 0.2 * 0.4 + 0.3 * 0.7) * 0.65
bdgij = 0.9 * 0.3 * 0.2 * (0.5 * 0.8 + 0.2 * 0.6 + 0.3 * 0.3) * 0.45
bdgik = 0.9 * 0.3 * 0.2 * (0.5 * 0.8 + 0.2 * 0.6 + 0.3 * 0.3) * 0.55
befhj = 0.9 * 0.5 * 0.8 * (0.5 * 0.2 + 0.2 * 0.5 + 0.3 * 0.6) * 0.35
befhk = 0.9 * 0.5 * 0.8 * (0.5 * 0.2 + 0.2 * 0.5 + 0.3 * 0.6) * 0.65
befij = 0.9 * 0.5 * 0.8 * (0.5 * 0.8 + 0.2 * 0.5 + 0.3 * 0.4) * 0.45
befik = 0.9 * 0.5 * 0.8 * (0.5 * 0.8 + 0.2 * 0.5 + 0.3 * 0.4) * 0.55
beghj = 0.9 * 0.5 * 0.2 * (0.5 * 0.2 + 0.2 * 0.5 + 0.3 * 0.7) * 0.35
beghk = 0.9 * 0.5 * 0.2 * (0.5 * 0.2 + 0.2 * 0.5 + 0.3 * 0.7) * 0.65
begij = 0.9 * 0.5 * 0.2 * (0.5 * 0.8 + 0.2 * 0.5 + 0.3 * 0.3) * 0.45
begik = 0.9 * 0.5 * 0.2 * (0.5 * 0.8 + 0.2 * 0.5 + 0.3 * 0.3) * 0.55
@testset "five node non-loopy network with separable CPD nodes and no evidence" begin
run = Runtime(fivesepnet)
default_initializer(run)
inst4 = current_instance(run, y4)
three_pass_BP(run)
h = acfhj + acfhk + acghj + acghk + adfhj + adfhk + adghj + adghk + aefhj + aefhk + aeghj + aeghk +
bcfhj + bcfhk + bcghj + bcghk + bdfhj + bdfhk + bdghj + bdghk + befhj + befhk + beghj + beghk
i = acfij + acfik + acgij + acgik + adfij + adfik + adgij + adgik + aefij + aefik + aegij + aegik +
bcfij + bcfik + bcgij + bcgik + bdfij + bdfik + bdgij + bdgik + befij + befik + begij + begik
bel4 = get_belief(run, inst4)
@test isapprox(cpdf(bel4, (), 1), h)
@test isapprox(cpdf(bel4, (), 2), i)
end
@testset "five node non-loopy network with separable CPD nodes and evidence at root" begin
run = Runtime(fivesepnet)
default_initializer(run)
inst1 = current_instance(run, y1)
inst5 = current_instance(run, y5)
post_evidence!(run, inst1, HardScore(2))
three_pass_BP(run)
bj = bcfhj + bcfij + bcghj + bcgij + bdfhj + bdfij + bdghj + bdgij + befhj + befij + beghj + begij
bk = bcfhk + bcfik + bcghk + bcgik + bdfhk + bdfik + bdghk + bdgik + befhk + befik + beghk + begik
bel5 = get_belief(run, inst5)
@test isapprox(cpdf(bel5, (), 1), bj / (bj + bk))
@test isapprox(cpdf(bel5, (), 2), bk / (bj + bk))
end
@testset "five node non-loopy network with separable CPD nodes and evidence at leaves" begin
run = Runtime(fivesepnet)
default_initializer(run)
inst1 = current_instance(run, y1)
inst5 = current_instance(run, y5)
post_evidence!(run, inst5, HardScore(2))
three_pass_BP(run)
ak = acfhk + acfik + acghk + acgik + adfhk + adfik + adghk + adgik + aefhk + aefik + aeghk + aegik
bk = bcfhk + bcfik + bcghk + bcgik + bdfhk + bdfik + bdghk + bdgik + befhk + befik + beghk + begik
bel1 = get_belief(run, inst1)
@test isapprox(cpdf(bel1, (), 1), ak / (ak + bk))
@test isapprox(cpdf(bel1, (), 2), bk / (ak + bk))
end
@testset "five node non-loopy network with separable CPD nodes and evidence at root and leaf" begin
run = Runtime(fivesepnet)
default_initializer(run)
inst1 = current_instance(run, y1)
inst4 = current_instance(run, y4)
inst5 = current_instance(run, y5)
post_evidence!(run, inst1, HardScore(2))
post_evidence!(run, inst5, HardScore(2))
three_pass_BP(run)
bhk = bcfhk + bcghk + bdfhk + bdghk + befhk + beghk
bik = bcfik + bcgik + bdfik + bdgik + befik + begik
bel4 = get_belief(run, inst4)
@test isapprox(cpdf(bel4, (), 1), bhk / (bhk + bik))
@test isapprox(cpdf(bel4, (), 2), bik / (bhk + bik))
end
@testset "five node non-loopy network with separable CPD nodes and evidence on separable node" begin
run = Runtime(fivesepnet)
default_initializer(run)
inst1 = current_instance(run, y1)
inst4 = current_instance(run, y4)
post_evidence!(run, inst4, HardScore(2))
three_pass_BP(run)
ai = acfij + acgij + adfij + adgij + aefij + aegij +
acfik + acgik + adfik + adgik + aefik + aegik
bi = bcfij + bcgij + bdfij + bdgij + befij + begij +
bcfik + bcgik + bdfik + bdgik + befik + begik
bel1 = get_belief(run, inst1)
@test isapprox(cpdf(bel1, (), 1), ai / (ai + bi))
@test isapprox(cpdf(bel1, (), 2), bi / (ai + bi))
end
z1 = Cat([1,2], [0.1, 0.9])()(:z1)
z2 = DiscreteCPT([1,2], Dict((1,) => [0.2, 0.8], (2,) => [0.3, 0.7]))()(:z2)
z3 = DiscreteCPT([1,2], Dict((1,) => [0.4, 0.6], (2,) => [0.5, 0.5]))()(:z3)
cpt1 = Dict((1,) => [0.6, 0.4], (2,) => [0.7, 0.3])
cpt2 = Dict((1,) => [0.8, 0.2], (2,) => [0.9, 0.1])
cpts = [cpt1, cpt2]
z4 = Separable([1,2], [0.75, 0.25], cpts)()(:z4)
loopynet = InstantNetwork(Variable[z1,z2,z3,z4], VariableGraph(z2=>[z1], z3=>[z1], z4=>[z2,z3]))
aceg = 0.1 * 0.2 * 0.4 * (0.75 * 0.6 + 0.25 * 0.8)
aceh = 0.1 * 0.2 * 0.4 * (0.75 * 0.4 + 0.25 * 0.2)
acfg = 0.1 * 0.2 * 0.6 * (0.75 * 0.6 + 0.25 * 0.9)
acfh = 0.1 * 0.2 * 0.6 * (0.75 * 0.4 + 0.25 * 0.1)
adeg = 0.1 * 0.8 * 0.4 * (0.75 * 0.7 + 0.25 * 0.8)
adeh = 0.1 * 0.8 * 0.4 * (0.75 * 0.3 + 0.25 * 0.2)
adfg = 0.1 * 0.8 * 0.6 * (0.75 * 0.7 + 0.25 * 0.9)
adfh = 0.1 * 0.8 * 0.6 * (0.75 * 0.3 + 0.25 * 0.1)
bceg = 0.9 * 0.3 * 0.5 * (0.75 * 0.6 + 0.25 * 0.8)
bceh = 0.9 * 0.3 * 0.5 * (0.75 * 0.4 + 0.25 * 0.2)
bcfg = 0.9 * 0.3 * 0.5 * (0.75 * 0.6 + 0.25 * 0.9)
bcfh = 0.9 * 0.3 * 0.5 * (0.75 * 0.4 + 0.25 * 0.1)
bdeg = 0.9 * 0.7 * 0.5 * (0.75 * 0.7 + 0.25 * 0.8)
bdeh = 0.9 * 0.7 * 0.5 * (0.75 * 0.3 + 0.25 * 0.2)
bdfg = 0.9 * 0.7 * 0.5 * (0.75 * 0.7 + 0.25 * 0.9)
bdfh = 0.9 * 0.7 * 0.5 * (0.75 * 0.3 + 0.25 * 0.1)
@testset "four node loopy network with separable nodes and no evidence" begin
run = Runtime(loopynet)
default_initializer(run)
inst4 = current_instance(run, z4)
three_pass_BP(run)
g = aceg + acfg + adeg + adfg + bceg + bcfg + bdeg + bdfg
h = aceh + acfh + adeh + adfh + bceh + bcfh + bdeh + bdfh
bel4 = get_belief(run, inst4)
@test isapprox(cpdf(bel4, (), 1), g)
@test isapprox(cpdf(bel4, (), 2), h)
end
@testset "four node loopy network with separable nodes and evidence at root (exact)" begin
run = Runtime(loopynet)
default_initializer(run)
inst1 = current_instance(run, z1)
inst4 = current_instance(run, z4)
post_evidence!(run, inst1, HardScore(2))
three_pass_BP(run)
bg = bceg + bcfg + bdeg + bdfg
bh = bceh + bcfh + bdeh + bdfh
bel4 = get_belief(run, inst4)
@test isapprox(cpdf(bel4, (), 1), bg / (bg + bh))
@test isapprox(cpdf(bel4, (), 2), bh / (bg + bh))
end
@testset "four nodeloopy network with separable nodes and evidence at leaves (approximate)" begin
run = Runtime(loopynet)
default_initializer(run)
inst1 = current_instance(run, z1)
inst4 = current_instance(run, z4)
post_evidence!(run, inst4, HardScore(2))
three_pass_BP(run)
ah = aceh + acfh + adeh + adfh
bh = bceh + bcfh + bdeh + bdfh
bel1 = get_belief(run, inst1)
@test isapprox(cpdf(bel1, (), 1), ah / (ah + bh); atol = 0.0001)
@test isapprox(cpdf(bel1, (), 2), bh / (ah + bh); atol = 0.0001)
end
end
@testset "loopy BP" begin
# Loopy BP is hard to test directly.
# Even on loopy networks, it is not guaranteed to have less error than three pass BP.
# We can only test that it behaves like three pass BP where expected, which is without evidence,
# and differently where not.
x1 = Cat([1,2], [0.1, 0.9])()(:x1)
x2 = DiscreteCPT([1,2], Dict((1,) => [0.2, 0.8], (2,) => [0.8, 0.2]))()(:x2)
x3 = DiscreteCPT([1,2], Dict((1,) => [0.3, 0.7], (2,) => [0.7, 0.3]))()(:x3)
x4 = DiscreteCPT([1,2], Dict((1,1) => [0.1, 0.9], (1,2) => [0.1, 0.9], (2,1) => [0.1, 0.9], (2,2) => [0.95, 0.05]))()(:x4)
loopynet = InstantNetwork(Variable[x1,x2,x3,x4], VariableGraph(x2=>[x1], x3=>[x1], x4=>[x3,x2]))
@testset "marginal query" begin
# On marginal queries, there are no lambda messages, so loopy BP converges in one pass
# Therefore it should have the same result as three pass BP.
runloopy = Runtime(loopynet)
default_initializer(runloopy)
loopy_BP(runloopy; epsilon = 0.000001)
runthree = Runtime(loopynet)
default_initializer(runthree)
three_pass_BP(runthree)
# runve = Runtime(loopynet)
# order = topsort(get_initial_graph(loopynet))
# ensure_all!(runve)
# set_ranges!(runve, 2, 1, order)
# exact = ve(runve, order, [x4])
loopyinst4 = current_instance(runloopy, x4)
loopybel = get_belief(runloopy, loopyinst4)
threeinst4 = current_instance(runthree, x4)
threebel = get_belief(runthree, threeinst4)
# exactbel = exact.entries
for i in 1:2
# @test abs(loopybel[i] - exactbel[i]) <= abs(threebel[i] - exactbel[i])
@test cpdf(loopybel, (), i) == cpdf(threebel, (), i)
end
end
@testset "marginal query" begin
# On conditional queries with evidence, loopy BP could run more iterations.
# We design the network to make sure this happens.
# Therefore it should have different results from three pass BP.
runloopy = Runtime(loopynet)
default_initializer(runloopy)
loopyinst4 = current_instance(runloopy, x4)
post_evidence!(runloopy, loopyinst4, HardScore(2))
loopy_BP(runloopy; epsilon = 0.000001)
runthree = Runtime(loopynet)
default_initializer(runthree)
threeinst4 = current_instance(runthree, x4)
post_evidence!(runthree, threeinst4, HardScore(2))
three_pass_BP(runthree)
loopyinst1 = current_instance(runloopy, x1)
loopybel = get_belief(runloopy, loopyinst1)
threeinst1 = current_instance(runthree, x1)
threebel = get_belief(runthree, threeinst1)
for x in [1,2]
@test cpdf(loopybel, (), x) != cpdf(threebel, (), x)
end
end
@testset "Correctly handle ranges not in alphabetical order" begin
# create network
p = Cat([:N, :AN],[1.0, 0.0])()(:p)
o = DiscreteCPT([:B, :I], Dict((:N,) => [1.0, 0.0], (:AN,) => [0.2, 0.8]))()(:o)
a = DiscreteCPT([:SK, :SH], Dict((:B,) => [1.0, 0.0], (:I,) => [0.0,1.0]))()(:a)
m = DiscreteCPT([:P, :M1, :M2], Dict((:SK,) => [0.9, 0.099, 0.001], (:SH,) => [0.2, 0.05, 0.75]))()(:m)
network = InstantNetwork(Variable[p,o,a,m], VariableGraph(o=>[p], a=>[o], m=>[a]))
# create runtime
runtime = Runtime(network)
default_initializer(runtime)
# run algorithm
loopy_BP(runtime; epsilon = 0.000001)
# get beliefs
p_i = current_instance(runtime,get_node(network,:p))
belief = get_belief(runtime, p_i)
@test isapprox([cpdf(belief, (), x) for x in [:N, :AN]], [1.0, 0.0];atol=0.1)
o_i = current_instance(runtime, get_node(network,:o))
belief = get_belief(runtime, o_i)
@test isapprox([cpdf(belief, (), x) for x in [:B, :I]], [1.0, 0.0];atol=0.1)
a_i = current_instance(runtime, get_node(network,:a))
belief = get_belief(runtime, a_i)
@test isapprox([cpdf(belief, (), x) for x in [:SK, :SH]], [1.0, 0.0];atol=0.1)
m_i = current_instance(runtime, get_node(network,:m))
belief = get_belief(runtime, m_i)
@test isapprox([cpdf(belief, (), x) for x in [:P, :M1,:M2]], [0.9, 0.099, 0.001];atol=0.1)
end
end
@testset "BP with determinisitic variables" begin
@testset "Det" begin
c1 = Cat([1.1, 2.2], [0.4, 0.6])
c2 = Cat([3.3, 4.4, 5.5], [0.2, 0.3, 0.5])
f(i,j) = Int(floor(i + j))
d = Det(Tuple{Float64, Float64}, Int, f)
vc1 = c1()(:c1)
vc2 = c2()(:c2)
vd = d()(:d)
net = InstantNetwork(Variable[vc1,vc2,vd], VariableGraph(vd=>[vc1,vc2]))
@testset "Prior probabilities" begin
run = Runtime(net)
default_initializer(run)
# post_evidence!(run, current_instance(run, vd), SoftScore([5,6], [0.9, 0.1]))
three_pass_BP(run)
bc1 = get_belief(run, current_instance(run, vc1))
bc2 = get_belief(run, current_instance(run, vc2))
bd = get_belief(run, current_instance(run, vd))
@test isapprox(cpdf(bc1, (), 1.1), 0.4)
@test isapprox(cpdf(bc1, (), 2.2), 0.6)
@test isapprox(cpdf(bc2, (), 3.3), 0.2)
@test isapprox(cpdf(bc2, (), 4.4), 0.3)
@test isapprox(cpdf(bc2, (), 5.5), 0.5)
@test isapprox(cpdf(bd, (), 4), 0.4 * 0.2)
@test isapprox(cpdf(bd, (), 5), 0.4 * 0.3 + 0.6 * 0.2)
@test isapprox(cpdf(bd, (), 6), 0.4 * 0.5 + 0.6 * 0.3)
@test isapprox(cpdf(bd, (), 7), 0.6 * 0.5)
end
@testset "Posterior probabilities" begin
run = Runtime(net)
default_initializer(run)
post_evidence!(run, current_instance(run, vd), SoftScore([5,6], [0.9, 0.1]))
three_pass_BP(run)
bc1 = get_belief(run, current_instance(run, vc1))
bc2 = get_belief(run, current_instance(run, vc2))
bd = get_belief(run, current_instance(run, vd))
p14 = 0.4 * 0.3 * 0.9
p15 = 0.4 * 0.5 * 0.1
p23 = 0.6 * 0.2 * 0.9
p24 = 0.6 * 0.3 * 0.1
p1 = p14 + p15
p2 = p23 + p24
p3 = p23
p4 = p14 + p24
p5 = p15
pd5 = p14 + p23
pd6 = p15 + p24
z1 = p1 + p2
z2 = p3 + p4 + p5
zd = pd5 + pd6
@test isapprox([cpdf(bc1, (), x) for x in [1.1, 2.2]], [p1, p2] ./ z1)
@test isapprox([cpdf(bc2, (), x) for x in [3.3, 4.4, 5.5]], [p3, p4, p5] ./ z2)
@test isapprox([cpdf(bd, (), x) for x in [4, 5, 6, 7]], [0, pd5, pd6, 0] ./ zd)
end
# @testset "Downsampling in Det" begin
# function f(x_tmin1::Float64, func::Symbol)
# delta_t = 1
# if (func==:F2) # square: x_t = t^2 <=> x_t = x_tmin1 + 2*sqrt(x_tmin1)*delta_t + delta_t^2
# x_t = floor(x_tmin1 + 2 * sqrt(x_tmin1) * delta_t + delta_t^2)
# elseif (func==:F3) # linear: x_t = at + k <=> x_t = x_tmin1 + a * delta_t
# a = 5 # slope
# x_t = floor(x_tmin1 + a * delta_t)
# else # include propagation constant x_t = x_tmin1 # :F1
# x_t = floor(x_tmin1)
# end
# return x_t
# end
# p = Det(Tuple{Float64, Symbol}, Float64, f)
# parranges = ([100.0], [:F1, :F2, :F3])
# pis = ((1.0,), (0.2,0.3,0.5))
# @test support(p, parranges, 100, Float64[])==[100.0, 105.0, 121.0]
# # test downsampling
# samples = support(p, parranges, 2, Float64[])
# @test length(samples)==2
# @test setdiff(samples, [100.0, 105.0, 121.0]) |> isempty == true # check if samples is contained in the original range
# #create network
# a0 = Cat([:F1,:F2,:F3], [1.0,0.0,0.0])()(:a0)
# b0 = DiscreteCPT([100.0], Dict((:F1,) => [1.0], (:F2,) => [1.0], (:F3,) => [1.0]))()(:b0)
# aCPD = Dict((:F1, ) => [0.95, 0.045, 0.005],
# (:F2, ) => [0.95, 0.045, 0.005],
# (:F3, ) => [0.45, 0.1, 0.45])
# a1 = DiscreteCPT([:F1,:F2,:F3], aCPD)()(:a1)
# b1 = Det(Tuple{Float64, Symbol}, Float64, f)()(:b1)
# a2 = DiscreteCPT([:F1,:F2,:F3], aCPD)()(:a2)
# b2 = Det(Tuple{Float64, Symbol}, Float64, f)()(:b2)
# network = InstantNetwork(Variable[a0,b0,a1,b1,a2,b2], VariableGraph(b0=>[a0], a1=>[a0], b1=>[b0,a1], a2=>[a1], b2=>[b1,a2]))
# runtime = Runtime(network)
# Scruff.RTUtils.default_initializer(runtime, 1, 3) # limit to 3 possible values of Det
# @test length(get_range(runtime, current_instance(runtime,b0))) == 1
# @test length(get_range(runtime, current_instance(runtime,b1))) == 3
# @test length(get_range(runtime, current_instance(runtime,b2))) == 3
# post_evidence!(runtime, current_instance(runtime,a1), HardScore(:F2))
# three_pass_BP(runtime)
# a0_i = current_instance(runtime,a0)
# belief_a0 = get_belief(runtime, a0_i)
# #println("belief a0=$belief_a0")
# b0_i = current_instance(runtime,b0)
# belief_b0 = get_belief(runtime, b0_i)
# #println("belief b0=$belief_b0 and range = $(get_value(runtime, current_instance(runtime,b0), :range))")
# a1_i = current_instance(runtime,a1)
# belief_a1 = get_belief(runtime, a1_i)
# #println("belief a1=$belief_a1")
# b1_i = current_instance(runtime,b1)
# belief_b1 = get_belief(runtime, b1_i)
# #println("belief b1=$belief_b1 and range = $(get_value(runtime, current_instance(runtime,b1), :range))")
# a2_i = current_instance(runtime,a2)
# belief_a2 = get_belief(runtime, a2_i)
# #println("belief a2=$belief_a2")
# b2_i = current_instance(runtime,b2)
# belief_b2 = get_belief(runtime, b2_i)
# #println("belief b2=$belief_b2 and range = $(get_value(runtime, current_instance(runtime,b2), :range))")
# end
end
@testset "If" begin
c1 = Cat([1, 2], [0.1, 0.9])
c2 = Cat([1, 2, 3], [0.2, 0.3, 0.5])
f = Flip(0.4)
i = If{Int}()
vc1 = c1()(:c1)
vc2 = c2()(:c2)
vf = f()(:f)
vi = i()(:i)
net = InstantNetwork(Variable[vc1,vc2,vf,vi], VariableGraph(vi=>[vf, vc1, vc2]))
@testset "Marginal probabilities without evidence" begin
run = Runtime(net)
default_initializer(run)
three_pass_BP(run)
bc1 = get_belief(run, current_instance(run, vc1))
bc2 = get_belief(run, current_instance(run, vc2))
bf = get_belief(run, current_instance(run, vf))
bi = get_belief(run, current_instance(run, vi))
@test isapprox([cpdf(bc1, (), x) for x in [1,2]], [0.1, 0.9])
@test isapprox([cpdf(bc2, (), x) for x in [1,2,3]], [0.2, 0.3, 0.5])
@test isapprox([cpdf(bf, (), x) for x in [false, true]], [0.6, 0.4])
@test isapprox([cpdf(bi, (), x) for x in [1,2,3]], [0.4 * 0.1 + 0.6 * 0.2, 0.4 * 0.9 + 0.6 * 0.3, 0.6 * 0.5])
end
@testset "Posterior probabilities with evidence" begin
run = Runtime(net)
default_initializer(run)
post_evidence!(run, current_instance(run, vi), HardScore(1))
three_pass_BP(run)
bc1 = get_belief(run, current_instance(run, vc1))
bc2 = get_belief(run, current_instance(run, vc2))
bf = get_belief(run, current_instance(run, vf))
bi = get_belief(run, current_instance(run, vi))
# The following are the probabilities of joint states consistent with vi = 1
p11t = 0.1 * 0.2 * 0.4
p12t = 0.1 * 0.3 * 0.4
p13t = 0.1 * 0.5 * 0.4
p11f = 0.1 * 0.2 * 0.6
p21f = 0.9 * 0.2 * 0.6
z = p11t + p12t + p13t + p11f + p21f
@test isapprox([cpdf(bc1, (), x) for x in [1,2]], [(p11t + p12t + p13t + p11f) / z, p21f / z])
@test isapprox([cpdf(bc2, (), x) for x in [1,2,3]], [(p11t + p11f + p21f) / z, p12t / z, p13t / z])
@test isapprox([cpdf(bf, (), x) for x in [false, true]], [(p11f + p21f) / z, (p11t + p12t + p13t) / z])
@test isapprox([cpdf(bi, (), x) for x in [1,2,3]], [1.0, 0.0, 0.0])
end
end
end
@testset "BP with Apply" begin
ff1((x,)) = x + x
ff2((x,)) = x
f1 = Det(Tuple{Int}, Int, ff1)
f2 = Det(Tuple{Int}, Int, ff2)
gg1(x, y) = x + y
gg2(x, y) = x
g1 = Det(Tuple{Int,Int}, Int, gg1)
g2 = Det(Tuple{Int,Int}, Int, gg2)
h1 = f2
h2params = Dict((1,) => [0.4,0.6], (2,) => [0.7, 0.3])
h2 = DiscreteCPT([1,2], h2params)
xrange = [1,2]
yrange = [1,2,3]
xyrange = [(1,1), (1,2), (1,3), (2,1), (2,2), (2,3)]
frange = [f1, f2]
grange = [g1, g2]
hrange = [h1, h2]
xpi = [0.1, 0.9]
ypi = [0.2, 0.3, 0.5]
xypi = [0.1 * 0.2, 0.1 * 0.3, 0.1 * 0.5, 0.9 * 0.2, 0.9 * 0.3, 0.9 * 0.5]
fpi = [0.2, 0.8]
gpi = [0.3, 0.7]
hpi = [0.7, 0.3]
x = Cat(xrange, xpi)
y = Cat(yrange, ypi)
xy = Cat(xyrange, xypi)
f = Cat(frange, fpi)
g = Cat(grange, gpi)
h = Cat(hrange, hpi)
appf = Apply{Tuple{Int}, Int}()
appg = Apply{Tuple{Int, Int}, Int}()
apph = appf
appfrange = [1,2,4]
appgrange = [1,2,3,4,5]
apphrange = [1,2]
lf = [0.1, 0.2, 0.7]
lg = [0.1, 0.2, 0.25, 0.3, 0.15]
lh = [0.75, 0.25]
lamappf = SoftScore(appfrange, lf)
lamappg = SoftScore(appgrange, lg)
lamapph = SoftScore(apphrange, lh)
@testset "compute_pi" begin
@testset "with one arg parent and deterministic sfuncs" begin
pi = compute_pi(appf, appfrange, (frange, xrange), (f, x))
p1 = cpdf(pi, (), 1)
p2 = cpdf(pi, (), 2)
p4 = cpdf(pi, (), 4)
@test isapprox(p1, 0.8 * 0.1)
@test isapprox(p2, 0.8 * 0.9 + 0.2 * 0.1)
@test isapprox(p4, 0.2 * 0.9)
end
@testset "with two arg parents and deterministic sfuncs" begin
pi = compute_pi(appg, appgrange, (grange, xyrange), (g, xy))
p1 = cpdf(pi, (), 1)
p2 = cpdf(pi, (), 2)
p3 = cpdf(pi, (), 3)
p4 = cpdf(pi, (), 4)
p5 = cpdf(pi, (), 5)
@test isapprox(p1, gpi[2] * xpi[1]) # Only 1
@test isapprox(p2, gpi[2] * xpi[2] + gpi[1] * xpi[1] * ypi[1]) # Only 2 or 1+1
@test isapprox(p3, gpi[1] * (xpi[1] * ypi[2] + xpi[2] * ypi[1])) # 1+2 or 2+1
@test isapprox(p4, gpi[1] * (xpi[1] * ypi[3] + xpi[2] * ypi[2])) # 1+3 or 2+2
@test isapprox(p5, gpi[1] * xpi[2] * ypi[3]) # 2+3
end
@testset "with stochastic functions" begin
pi = compute_pi(apph, apphrange, (hrange, xrange), (h, x))
q1 = cpdf(pi, (), 1)
q2 = cpdf(pi, (), 2)
p1given1 = hpi[1] + hpi[2] * h2params[(1,)][1]
p1given2 = hpi[2] * h2params[(2,)][1]
p2given1 = hpi[2] * h2params[(1,)][2]
p2given2 = hpi[1] + hpi[2] * h2params[(2,)][2]
p1 = xpi[1] * p1given1 + xpi[2] * p1given2
p2 = xpi[1] * p2given1 + xpi[2] * p2given2
@test isapprox(q1, p1)
@test isapprox(q2, p2)
end
end
@testset "compute_lambda" begin
@testset "on sfunc parent" begin
@testset "with one arg parent and deterministic functions" begin
lam = send_lambda(appf, lamappf, appfrange, (frange, xrange), (f, x), 1)
# Possibilities for f1 = x -> x + x:
# 1: impossible
# 2: x == 1
# 4: x == 2
l1 = cpdf(x, (), 1) * get_score(lamappf, 2) + cpdf(x, (), 2) * get_score(lamappf, 4)
# Possibilities for f2 = x -> x
# 1: x == 1
# 2: x == 2
# 4: impossible
l2 = cpdf(x, (), 1) * get_score(lamappf, 1) + cpdf(x, (), 2) * get_score(lamappf, 2)
@test isapprox(get_score(lam, f1), l1)
@test isapprox(get_score(lam, f2), l2)
end
@testset "with two arg parents and deterministic functions" begin
xyrange = [(xy[1], xy[2]) for xy in Utils.cartesian_product([xrange, yrange])]
xy = Cat(xyrange, [p[1] * p[2] for p in Utils.cartesian_product([xpi, ypi])])
pi = compute_pi(appg, appgrange, (grange, xyrange), (g, xy))
lam = send_lambda(appg, lamappg, appgrange, (grange, xyrange), (g, xy), 1)
# Possibilities for g1 = (x,y) -> x + y
# 1: impossible
# 2: x == 1, y == 1
# 3: x == 1, y == 2 or x == 2, y == 1
# 4: x == 1, y == 3 or x == 2, y == 2
# 5: x == 2, y == 3
l1 =
cpdf(x, (), 1) * cpdf(y, (), 1) * get_score(lamappg, 2) +
(cpdf(x, (), 1) * cpdf(y, (), 2) + cpdf(x, (), 2) * cpdf(y, (), 1)) * get_score(lamappg, 3) +
(cpdf(x, (), 1) * cpdf(y, (), 3) + cpdf(x, (), 2) * cpdf(y, (), 2)) * get_score(lamappg, 4) +
cpdf(x, (), 2) * cpdf(y, (), 3) * get_score(lamappg, 5)
# Possibilities for g2 = (x,y) -> x
# 1: x == 1, y == anything
# 2: x == 2, y == anything
# 3-5: impossible
l2 = cpdf(x, (), 1) * get_score(lamappg, 1) + cpdf(x, (), 2) * get_score(lamappg, 2)
@test isapprox(get_score(lam, g1), l1)
@test isapprox(get_score(lam, g2), l2)
end
@testset "with stochastic functions" begin
lam = send_lambda(apph, lamapph, apphrange, (hrange, xrange), (h, x), 1)
# possibilities for h1 = x -> x
# 1: x == 1
# 2: x == 2
l1 = cpdf(x, (), 1) * get_score(lamapph, 1) + cpdf(x, (), 2) * get_score(lamapph, 2)
# possibilities for h2 = DiscreteCpt([1,2], h2params)
# 1: x == 1 (prob h2params[(1,)][1]) or x == 2 (prob h2params[(2,)][1])
# 2: x == 1 (prob h2params[(1,)][2]) or x == 2 (prob h2params[(2,)][2])
l2 =
(cpdf(x, (), 1) * h2params[(1,)][1] + cpdf(x, (), 2) * h2params[(2,)][1]) * get_score(lamapph, 1) +
(cpdf(x, (), 1) * h2params[(1,)][2] + cpdf(x, (), 2) * h2params[(2,)][2]) * get_score(lamapph, 2)
@test isapprox(get_score(lam, h1), l1)
@test isapprox(get_score(lam, h2), l2)
end
end
@testset "on args parent" begin
@testset "with one arg parent and deterministic functions" begin
lam = send_lambda(appf, lamappf, appfrange, (frange, xrange), (f, x), 2)
# Possibilities for x == 1:
# 1: f == f2
# 2: f == f1
# 4: impossible
l1 = cpdf(f, (), f2) * get_score(lamappf, 1) + cpdf(f, (), f1) * get_score(lamappf, 2)
# Possibilities for x == 2
# 1: impossible
# 2: f == f2
# 4: f == f1
l2 = cpdf(f, (), f2) * get_score(lamappf, 2) + cpdf(f, (), f1) * get_score(lamappf, 4)
@test isapprox(get_score(lam, 1), l1)
@test isapprox(get_score(lam, 2), l2)
end
@testset "with two arg parents and deterministic functions" begin
lam = send_lambda(appg, lamappg, appgrange, (grange, xyrange), (g, xy), 2)
# Possibilities for (1,1)
# 1: g == g2
# 2: g == g1
l11 = cpdf(g, (), g2) * get_score(lamappg, 1) + cpdf(g, (), g1) * get_score(lamappg, 2)
# Possibilities for (1,2)
# 1: g == g2
# 3: g == g1
l12 = cpdf(g, (), g2) * get_score(lamappg, 1) + cpdf(g, (), g1) * get_score(lamappg, 3)
# Possibilities for (1,3)
# 1: g == g2
# 4: g == g1
l13 = cpdf(g, (), g2) * get_score(lamappg, 1) + cpdf(g, (), g1) * get_score(lamappg, 4)
# Possibilities for (2,1)
# 2: g == g2
# 3: g == g1
l21 = cpdf(g, (), g2) * get_score(lamappg, 2) + cpdf(g, (), g1) * get_score(lamappg, 3)
# Possibilities for (2,2)
# 2: g == g2
# 4: g == g1
l22 = cpdf(g, (), g2) * get_score(lamappg, 2) + cpdf(g, (), g1) * get_score(lamappg, 4)
# Possibilities for (2,3)
# 2: g == g2
# 5: g == g1
l23 = cpdf(g, (), g2) * get_score(lamappg, 2) + cpdf(g, (), g1) * get_score(lamappg, 5)
@test isapprox(get_score(lam, (1,1)), l11)
@test isapprox(get_score(lam, (1,2)), l12)
@test isapprox(get_score(lam, (1,3)), l13)
@test isapprox(get_score(lam, (2,1)), l21)
@test isapprox(get_score(lam, (2,2)), l22)
@test isapprox(get_score(lam, (2,3)), l23)
end
@testset "with stochastic functions" begin
lam = send_lambda(apph, lamapph, apphrange, (hrange, xrange), (h, x), 2)
# possibilities for 1
# 1: h == h1 or h == h2 (prob h2params[(1,)][1])
# 2: h == h2 (prob h2params[(1,)][2])
l1 =
(cpdf(h, (), h1) + cpdf(h, (), h2) * h2params[(1,)][1]) * get_score(lamapph, 1) +
cpdf(h, (), h2) * h2params[(1,)][2] * get_score(lamapph, 2)
# possibilities for 2
# 1: h == h2 (prob h2params[(2,)][1])
# 2: h == h1 h == h2 (prob h2params[(2,)][2])
l2 =
cpdf(h, (), h2) * h2params[(2,)][1] * get_score(lamapph, 1) +
(cpdf(h, (), h1) + cpdf(h, (), h2) * h2params[(2,)][2]) * get_score(lamapph, 2)
@test isapprox(get_score(lam, 1), l1)
@test isapprox(get_score(lam, 2), l2)
end
end
end
end
@testset "Using the ThreePassBP instant algorithm" begin
@testset "Basic" begin
v1 = Cat([:a,:b], [0.1, 0.9])()(:v1)
v2 = DiscreteCPT([1,2], Dict((:a,) => [0.2, 0.8], (:b,) => [0.3, 0.7]))()(:v2)
net = InstantNetwork(Variable[v1,v2], VariableGraph(v2 => [v1]))
runtime = Runtime(net)
default_initializer(runtime)
alg = ThreePassBP()
infer(alg, runtime)
i1 = current_instance(runtime, v1)
i2 = current_instance(runtime, v2)
@test isapprox(probability(alg, runtime, i1, :a), 0.1)
@test isapprox(probability(alg, runtime, i1, :b), 0.9)
@test isapprox(probability(alg, runtime, i2, 1), 0.1 * 0.2 + 0.9 * 0.3)
@test isapprox(probability(alg, runtime, i2, 2), 0.1 * 0.8 + 0.9 * 0.7)
end
@testset "Mean and Variance" begin
v1 = Cat([4, 34, 18, 12, 2, 26], [1/6, 1/6, 1/6, 1/6, 1/6, 1/6])()(:v1)
net = InstantNetwork(Variable[v1], VariableGraph())
runtime = Runtime(net)
alg = ThreePassBP()
infer(alg, runtime, Dict{Symbol, Score}())
i1 = current_instance(runtime, v1)
@test isapprox(Scruff.Algorithms.mean(alg, runtime, i1), (4 + 34 + 18 + 12 + 2 + 26)/6)
@test isapprox(Scruff.Algorithms.variance(alg, runtime, i1), ((4-16)^2 + (34-16)^2 + (18-16)^2 + (12-16)^2 + (2-16)^2 + (26-16)^2)/6)
end
@testset "With placeholder" begin
p1 = Placeholder{Symbol}(:p1)
v2 = DiscreteCPT([1,2], Dict((:a,) => [0.2, 0.8], (:b,) => [0.3, 0.7]))()(:v2)
net = InstantNetwork(Variable[v2], VariableGraph(v2 => [p1]), Placeholder[p1])
runtime = Runtime(net)
default_initializer(runtime, 10, Dict(p1.name => Cat([:a,:b], [0.1, 0.9])))
alg = ThreePassBP()
infer(alg, runtime)
i2 = current_instance(runtime, v2)
@test isapprox(probability(alg, runtime, i2, 1), 0.1 * 0.2 + 0.9 * 0.3)
@test isapprox(probability(alg, runtime, i2, 2), 0.1 * 0.8 + 0.9 * 0.7)
end
@testset "With evidence" begin
v1 = Cat([:a,:b], [0.1, 0.9])()(:v1)
v2 = DiscreteCPT([1,2], Dict((:a,) => [0.2, 0.8], (:b,) => [0.3, 0.7]))()(:v2)
net = InstantNetwork(Variable[v1,v2], VariableGraph(v2 => [v1]))
runtime = Runtime(net)
default_initializer(runtime)
alg = ThreePassBP()
infer(alg, runtime, Dict{Symbol, Score}(:v2 => HardScore(2)))
i1 = current_instance(runtime, v1)
i2 = current_instance(runtime, v2)
p1 = 0.1 * 0.8
p2 = 0.9 * 0.7
z = p1 + p2
@test isapprox(probability(alg, runtime, i1, :a), p1 / z)
@test isapprox(probability(alg, runtime, i1, :b), p2 / z)
@test isapprox(probability(alg, runtime, i2, 1), 0.0)
@test isapprox(probability(alg, runtime, i2, 2), 1.0)
end
@testset "With intervention" begin
v1 = Cat([:a,:b], [0.1, 0.9])()(:v1)
v2 = DiscreteCPT([1,2], Dict((:a,) => [0.2, 0.8], (:b,) => [0.3, 0.7]))()(:v2)
net = InstantNetwork(Variable[v1,v2], VariableGraph(v2 => [v1]))
runtime = Runtime(net)
default_initializer(runtime)
alg = ThreePassBP()
infer(alg, runtime, Dict{Symbol, Score}(), Dict{Symbol, Dist}(:v2 => Constant(2)))
i1 = current_instance(runtime, v1)
i2 = current_instance(runtime, v2)
@test isapprox(probability(alg, runtime, i1, :a), 0.1)
@test isapprox(probability(alg, runtime, i1, :b), 0.9)
@test isapprox(probability(alg, runtime, i2, 1), 0.0)
@test isapprox(probability(alg, runtime, i2, 2), 1.0)
end
end
@testset "Using the LoopyBP instant algorithm" begin
@testset "Basic" begin
v1 = Cat([:a,:b], [0.1, 0.9])()(:v1)
v2 = DiscreteCPT([1,2], Dict((:a,) => [0.2, 0.8], (:b,) => [0.3, 0.7]))()(:v2)
net = InstantNetwork(Variable[v1,v2], VariableGraph(v2 => [v1]))
runtime = Runtime(net)
default_initializer(runtime)
alg = LoopyBP()
infer(alg, runtime)
i1 = current_instance(runtime, v1)
i2 = current_instance(runtime, v2)
@test isapprox(probability(alg, runtime, i1, :a), 0.1)
@test isapprox(probability(alg, runtime, i1, :b), 0.9)
@test isapprox(probability(alg, runtime, i2, 1), 0.1 * 0.2 + 0.9 * 0.3)
@test isapprox(probability(alg, runtime, i2, 2), 0.1 * 0.8 + 0.9 * 0.7)
end
@testset "With placeholder" begin
p1 = Placeholder{Symbol}(:p1)
v2 = DiscreteCPT([1,2], Dict((:a,) => [0.2, 0.8], (:b,) => [0.3, 0.7]))()(:v2)
net = InstantNetwork(Variable[v2], VariableGraph(v2 => [p1]), Placeholder[p1])
runtime = Runtime(net)
default_initializer(runtime, 10, Dict(p1.name => Cat([:a,:b], [0.1, 0.9])))
alg = LoopyBP()
infer(alg, runtime)
i2 = current_instance(runtime, v2)
@test isapprox(probability(alg, runtime, i2, 1), 0.1 * 0.2 + 0.9 * 0.3)
@test isapprox(probability(alg, runtime, i2, 2), 0.1 * 0.8 + 0.9 * 0.7)
end
@testset "With evidence" begin
v1 = Cat([:a,:b], [0.1, 0.9])()(:v1)
v2 = DiscreteCPT([1,2], Dict((:a,) => [0.2, 0.8], (:b,) => [0.3, 0.7]))()(:v2)
net = InstantNetwork(Variable[v1,v2], VariableGraph(v2 => [v1]))
runtime = Runtime(net)
default_initializer(runtime)
alg = LoopyBP()
infer(alg, runtime, Dict{Symbol, Score}(:v2 => HardScore(2)))
i1 = current_instance(runtime, v1)
i2 = current_instance(runtime, v2)
p1 = 0.1 * 0.8
p2 = 0.9 * 0.7
z = p1 + p2
@test isapprox(probability(alg, runtime, i1, :a), p1 / z)
@test isapprox(probability(alg, runtime, i1, :b), p2 / z)
@test isapprox(probability(alg, runtime, i2, 1), 0.0)
@test isapprox(probability(alg, runtime, i2, 2), 1.0)
end
@testset "With intervention" begin
v1 = Cat([:a,:b], [0.1, 0.9])()(:v1)
v2 = DiscreteCPT([1,2], Dict((:a,) => [0.2, 0.8], (:b,) => [0.3, 0.7]))()(:v2)
net = InstantNetwork(Variable[v1,v2], VariableGraph(v2 => [v1]))
runtime = Runtime(net)
default_initializer(runtime)
alg = LoopyBP()
infer(alg, runtime, Dict{Symbol, Score}(), Dict{Symbol, Dist}(:v2 => Constant(2)))
i1 = current_instance(runtime, v1)
i2 = current_instance(runtime, v2)
@test isapprox(probability(alg, runtime, i1, :a), 0.1)
@test isapprox(probability(alg, runtime, i1, :b), 0.9)
@test isapprox(probability(alg, runtime, i2, 1), 0.0)
@test isapprox(probability(alg, runtime, i2, 2), 1.0)
end
end
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 1024 | include("..//src//utils//logplots.jl")
using Scruff
using Scruff.SFuncs
using Scruff.Utils
using Scruff.RTUtils
using Scruff.Models
import Scruff.Algorithms
pyplot()
x1m = Cat([1,2], [0.1, 0.9])()
x2m = Cat([1,2,3], [0.2, 0.3, 0.5])()
cpd2 = Dict((1,1) => [0.3, 0.7], (1,2) => [0.6, 0.4], (2,1) =>[0.4, 0.6],
(2,2) => [0.7, 0.3], (3,1) => [0.5, 0.5], (3,2) => [0.8, 0.2])
x3m = DiscreteCPT([1,2], cpd2)()
x4m = DiscreteCPT([1,2], Dict((1,) => [0.15, 0.85], (2,) => [0.25, 0.75]))()
x5m = DiscreteCPT([1,2], Dict((1,) => [0.35, 0.65], (2,) => [0.45, 0.55]))()
x6m = DiscreteCPT([1,2], Dict((1,) => [0.65, 0.35], (2,) => [0.75, 0.25]))()
x1 = x1m(:x1)
x2 = x2m(:x2)
x3 = x3m(:x3)
x4 = x4m(:x4)
x5 = x5m(:x5)
x6 = x6m(:x6)
fivecpdnet = InstantNetwork(Variable[x1,x2,x3,x4,x5], VariableGraph(x3=>[x2,x1], x4=>[x3], x5=>[x3]))
run = Runtime(fivecpdnet)
# default_initializer(run)
logger = BPLogger([:x1,:x2])
@info "logger=$logger"
# run em
with_logger(logger) do
Scruff.Algorithms.three_pass_BP(run)
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 8020 | module CoreTest
using Test
using Plots
import Distributions
using Scruff
using Scruff.Operators
using Scruff.MultiInterface
using Scruff.Models
using Scruff.SFuncs
import Scruff: make_initial, make_transition
import Scruff.Models: get_dt
import Scruff.Operators: Sample, sample, Logcpdf, logcpdf, Marginalize, marginalize, Expectation, expectation, Variance, variance
struct MyModel <: Model{Tuple{}, Tuple{Int}, Tuple{Int, Int}} end
function make_transition(m::MyModel, parenttimes, time)
return Constant((parenttimes[1],time))
end
function make_initial(m::MyModel, time)
return Cat([(1,0)], [1.0])
end
struct MyNormal <: SFunc{Tuple{}, Float32}
mean::Float32
var::Float32
end
@impl begin
struct MyNormalExpectation end
function expectation(sf::MyNormal, i::Tuple{})::Float32
# Have access the sfunc as `sf` as well as `op_imp`
return sf.mean
end
end
@impl begin
struct MyNormalVariance end
function variance(sf::MyNormal, i::Tuple{})::Float32
return sf.var
end
end
@impl begin
struct MyNormalSample end
function sample(sf::MyNormal, i::Tuple{})::Float32
return rand(Distributions.Normal(sf.mean, sqrt(sf.var)))
end
end
@impl begin
struct MyNormalLogcpdf end
function logcpdf(sf::MyNormal, i::Tuple{}, o::Float32)::AbstractFloat
return Distributions.logpdf(Distributions.Normal(sf.mean, sqrt(sf.var)), o)
end
end
struct MyCondMuNormal <: SFunc{Tuple{Float32}, Float32}
# A conditional distribution for a Normal conditioned on mu with fixed var.
# CondMuNormal_var(mu) = N(mu,var), essentially.
var::Float32
end
@impl begin
struct MyCondMuNormalSample end
function sample(sf::MyCondMuNormal, x::Tuple{Float32})::Float32
return rand(Distributions.Normal(x[1], sqrt(sf.var)))
end
end
@impl begin
struct MyCondMuNormalLogcpdf end
function logcpdf(sf::MyCondMuNormal, i::Tuple{Float32}, o::Float32)::AbstractFloat
return Distributions.logpdf(Distributions.Normal(i[1], sqrt(sf.var)), o)
end
end
@impl begin
struct MyCondMuNormalMarginalize end
function marginalize(x::MyNormal, sf::MyCondMuNormal)::MyNormal
mu = expectation(x, tuple())
var = variance(x, tuple()) + sf.var
return MyNormal(mu, var)
end
end
@impl begin
struct MyCondMuNormalExpectation end
function expectation(sf::MyCondMuNormal, x::Tuple{Float32, Tuple{}})::Float32
return x[1]
end
end
@impl begin
struct MyCondMuNormalVariance end
function variance(sf::MyCondMuNormal, i::Tuple{Float32, Tuple{}})::Float32
return sf.var
end
end
randomwalk = HomogeneousModel(MyNormal(0.0, 1.0), MyCondMuNormal(1.0), 2.0)
struct WienerProcess <: VariableTimeModel{Tuple{}, Tuple{Float32}, Float64}
# A continuous limit of Gaussian random walk
k::Float32 # "Rate" of random walk. Units of var/time
end
function make_transition(m::WienerProcess,parenttimes,time)
var = m.k*abs(time-parenttimes[1])
return MyCondMuNormal(var)
end
function make_initial(m::WienerProcess)
return MyNormal(0,1)
end
wienerprocess = WienerProcess(0.1)
@testset "Core" begin
@testset "instantiate!" begin
@testset "instant network" begin
@testset "correctly instantiates variables and placeholders" begin
v = MyModel()(:v)
p = Placeholder{Tuple{Int,Int}}(:p)
net = InstantNetwork(Variable[v], VariableGraph(v => [p]), Placeholder[p])
run = Runtime(net)
ensure_all!(run)
inst1 = current_instance(run, p)
inst2 = current_instance(run, v)
@test inst1 isa PlaceholderInstance
@test inst2 isa VariableInstance
@test get_sfunc(inst2) isa Cat
# ensure has_timeoffset returns false
@test !has_timeoffset(net, v, v)
end
end
@testset "dynamic network" begin
@testset "correctly calls make_initial or make_transition" begin
v = MyModel()(:v)
u = Constant(1)()(:u)
net = DynamicNetwork(Variable[u,v], VariableGraph(), VariableGraph(v => [u]))
run = Runtime(net)
instantiate!(run, u, 1)
instantiate!(run, v, 2)
instantiate!(run, v, 4)
inst2 = get_instance(run, v, 2)
inst4 = get_instance(run, v, 4)
@test get_sfunc(inst2) isa Cat
@test get_sfunc(inst4) isa Constant
end
@testset "correctly instantiates variables and placeholders" begin
v = MyModel()(:v)
p = Placeholder{Int}(:p)
net = DynamicNetwork(Variable[v], VariableGraph(),
VariableGraph(v => [p]), VariableParentTimeOffset(), Placeholder[p])
run = Runtime(net)
inst1 = instantiate!(run, p, 1)
inst2 = instantiate!(run, v, 2)
@test isa(inst1, PlaceholderInstance)
@test isa(inst2, VariableInstance)
end
@testset "passes the correct times to the model" begin
v = MyModel()(:v)
net = DynamicNetwork(Variable[], VariableGraph(), VariableGraph(v => [v]))
run = Runtime(net)
inst2 = instantiate!(run, v, 2)
inst4 = instantiate!(run, v, 4)
inst3 = instantiate!(run, v, 3)
inst5 = instantiate!(run, v, 5)
# Each instance should have a previous time of the previously existing latest instance, if any
@test get_sfunc(inst4).x == (2,4)
@test get_sfunc(inst3).x == (2,3)
@test get_sfunc(inst5).x == (4,5)
end
@testset "passes the correct offset times to the model" begin
u = MyModel()(:u)
v = MyModel()(:v)
net = DynamicNetwork(Variable[u,v], VariableGraph(), VariableGraph(v => [u], u => [u]), VariableParentTimeOffset([Pair(v, u)]))
run = Runtime(net)
uinst1 = instantiate!(run, u, 0)
inst1 = instantiate!(run, v, 1)
inst3 = instantiate!(run, v, 3)
uinst4 = instantiate!(run, u, 4)
uinst5 = instantiate!(run, u, 5)
inst5 = instantiate!(run, v, 5)
# Each instance should have a previous time of the previously existing latest instance, if any
@test get_sfunc(inst3).x == (0,3)
@test get_sfunc(inst5).x == (4,5)
end
end
end
@testset "models" begin
@testset "HomogeneousModel" begin
@testset "Initial model" begin
sf = make_initial(randomwalk)
@test sf isa MyNormal
# Test some ops
_sample = sample(sf, ())
_logcpdf = logcpdf(sf, (), _sample)
end
@testset "Transition model" begin
sf = make_transition(randomwalk,0.,2.)
@test sf isa MyCondMuNormal
# Test some ops
_sample = sample(sf, (0.0f0,))
_logcpdf = logcpdf(sf, (0.0f0,), _sample)
end
end
@testset "VariableTimeModel" begin
@testset "Initial model" begin
sf = make_initial(wienerprocess)
@test sf isa MyNormal
# Test some ops
_sample = sample(sf, ())
_logcpdf = logcpdf(sf, (), _sample)
end
@testset "Transition model" begin
sf = make_transition(wienerprocess,(0.,),2.)
@test sf isa MyCondMuNormal
# Test some ops
_sample = sample(sf, (0.0f0,))
_logcpdf = logcpdf(sf, (0.0f0,), _sample)
end
end
end
end
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 22251 | using Test
using Scruff
using Scruff.Utils
using Scruff.RTUtils
using Scruff.SFuncs
using Scruff.Operators
using Scruff.Algorithms
import Scruff.Algorithms: em, bp_info_provider
using Scruff.Models
using Base.Filesystem
using Random
@testset "EM" begin
@testset "Cat operations" begin
range = [1, 2, 3]
x = Cat(range, [0.2, 0.3, 0.5])
@testset "initial_stats" begin
@test initial_stats(x) == [0, 0, 0]
end
@testset "expected_stats" begin
@test expected_stats(x, range, (), (), FunctionalScore{Int}(i->[0,1,0][i])) == [0.0, 0.3, 0.0]
@test expected_stats(x, range, (), (), FunctionalScore{Int}(i->[0.5, 0.6, 0][i])) == [0.1, 0.18, 0.0]
end
@testset "accumulate_stats" begin
@test accumulate_stats(x, [0.2, 0.3, 0.5], [0.5, 0.5, 0.0]) == [0.7, 0.8, 0.5]
end
@testset "maximize_stats" begin
@test maximize_stats(x, [0.7, 0.8, 0.5]) == [0.35, 0.4, 0.25]
end
@testset "full loop" begin
s = initial_stats(x)
es1 = expected_stats(x, range, (), (), FunctionalScore{Int}(i->[0,1,0][i]))
es2 = expected_stats(x, range, (), (), FunctionalScore{Int}(i->[0.5, 0.6, 0][i]))
s = accumulate_stats(x, s, normalize(es1))
s = accumulate_stats(x, s, normalize(es2))
@test isapprox(maximize_stats(x, s), [0.1 / 0.28 / 2, (1 + 0.18 / 0.28) / 2, 0])
end
end
@testset "DiscreteCPT operations" begin
cpd = Dict((1,1) => [0.3, 0.7], (1,2) => [0.6, 0.4], (2,1) => [0.4, 0.6], (2,2) => [0.7, 0.3], (3,1) => [0.5, 0.5], (3,2) => [0.8, 0.2])
x = DiscreteCPT([1,2], cpd)
@testset "initial_stats" begin
@test initial_stats(x) == Dict()
end
@testset "expected_stats " begin
parpi1 = [0.2, 0.3, 0.5]
parpi2 = [0.9, 0.1]
lam = FunctionalScore{Int}(i->[0.1, 0.4][i])
px11 = [0.3 * 0.1, 0.7 * 0.4] * 0.9 * 0.2
px12 = [0.6 * 0.1, 0.4 * 0.4] * 0.1 * 0.2
px21 = [0.4 * 0.1, 0.6 * 0.4] * 0.9 * 0.3
px22 = [0.7 * 0.1, 0.3 * 0.4] * 0.1 * 0.3
px31 = [0.5 * 0.1, 0.5 * 0.4] * 0.9 * 0.5
px32 = [0.8 * 0.1, 0.2 * 0.4] * 0.1 * 0.5
d = Dict((1,1) => px11, (1,2) => px12, (2,1) => px21, (2,2) => px22, (3,1) => px31, (3,2) => px32)
stats = expected_stats(x, [1,2], ([1,2,3], [1,2]), (Cat([1,2,3], parpi1), Cat([1,2], parpi2)), lam)
@test length(stats) == length(d)
ans = [isapprox(stats[k], d[k]) for k in keys(stats)]
@test all(ans)
# for k in keys(stats)
# @test isapprox(stats[k], d[k])
# end
end
@testset "accumulate_stats" begin
d1 = Dict((1,1) => [0.4, 0.2], (1,2) => [0.5, 0.6])
d2 = Dict((1,1) => [0.7, 0.8], (2,1) => [1.0, 0.0])
d3 = Dict((1,1) => [1.1, 1.0], (1,2) => [0.5, 0.6], (2,1) => [1.0, 0.0])
stats = accumulate_stats(x, d1, d2)
# This test is written assuming the stats are not normalized while accumulating
@test length(stats) == length(d3)
for k in keys(stats)
@test isapprox(stats[k], d3[k])
end
end
@testset "maximize_stats" begin
stats = Dict((1,1) => [0.55, 0.95], (1,2) => [0.7, 1.3], (2,1) => [0.6, 1.4],
(2,2) => [1.0, 1.0], (3,1) => [0.9, 1.1], (3,2) => [1.3, 0.7])
m = maximize_stats(x, stats)
for k in keys(stats)
@test isapprox(m[k], normalize(stats[k]))
end
end
end
#=
@testset "Separable operations" begin
alphas = [0.2, 0.3, 0.5]
cpt1 = Dict((1,) => [0.1, 0.9], (2,) => [0.2, 0.8])
cpt2 = Dict((1,) => [0.3, 0.7], (2,) => [0.4, 0.6], (3,) => [0.5, 0.5])
cpt3 = Dict((1,) => [0.6, 0.4], (2,) => [0.7, 0.3])
cpts::SepCPTs = [cpt1, cpt2, cpt3]
x = Separable([1,2], alphas, cpts)
insts = [Dict(), Dict(), Dict()]
range = [1,2]
parranges = ([1,2], [1,2,3], [1,2])
parent_πs = ([0.8, 0.2], [0.5, 0.3, 0.2], [0.9, 0.1])
child_λ = FunctionalScore{Int}(i->[0.1, 0.3][i])
(p111, p112) = [0.1 * 0.1, 0.9 * 0.3] * 0.8
(p121, p122) = [0.2 * 0.1, 0.8 * 0.3] * 0.2
(p211, p212) = [0.3 * 0.1, 0.7 * 0.3] * 0.5
(p221, p222) = [0.4 * 0.1, 0.6 * 0.3] * 0.3
(p231, p232) = [0.5 * 0.1, 0.5 * 0.3] * 0.2
(p311, p312) = [0.6 * 0.1, 0.4 * 0.3] * 0.9
(p321, p322) = [0.7 * 0.1, 0.3 * 0.3] * 0.1
es1 = Dict((1,) => [p111, p112] * alphas[1], (2,) => [p121, p122] * alphas[1])
es2 = Dict((1,) => [p211, p212] * alphas[2], (2,) => [p221, p222] * alphas[2], (3,) => [p231, p232] * alphas[2])
es3 = Dict((1,) => [p311, p312] * alphas[3], (2,) => [p321, p322] * alphas[3])
exps = [es1, es2, es3]
@testset "initial_stats" begin
stats = initial_stats(x)
@test stats == insts
end
# This test checks whether the code correctly computes according to the theory in the comment
# prior to the function. It does not test whether this theory is correct.
@testset "expected_stats" begin
compparams = expected_stats(x, range, parranges, (Cat(parranges[1], parent_πs[1]), Cat(parranges[2], parent_πs[2]), Cat(parranges[3], parent_πs[3])), child_λ)
for i in 1:3
@test length(keys(compparams[i])) == length(keys(exps[i]))
for j in keys(compparams[i])
@test isapprox(compparams[i][j], exps[i][j])
end
end
end
@testset "accumulate_stats" begin
@test accumulate_stats(x, insts, exps) == exps
end
@testset "maximize_stats" begin
ms = maximize_stats(x, exps)
alphas = ms[1]
ps = tuple(ms[2:end]...)
a1 = sum(exps[1][(1,)]) + sum(exps[1][(2,)])
a2 = sum(exps[2][(1,)]) + sum(exps[2][(2,)]) + sum(exps[2][(3,)])
a3 = sum(exps[3][(1,)]) + sum(exps[3][(2,)])
as = normalize([a1, a2, a3])
@test length(alphas) == 3
@test isapprox(alphas[1], as[1])
@test isapprox(alphas[2], as[2])
@test isapprox(alphas[3], as[3])
@test length(cpts) == 3
c11 = normalize(exps[1][(1,)])
c12 = normalize(exps[1][(2,)])
c21 = normalize(exps[2][(1,)])
c22 = normalize(exps[2][(2,)])
c23 = normalize(exps[2][(3,)])
c31 = normalize(exps[3][(1,)])
c32 = normalize(exps[3][(2,)])
k1 = [x.components[1].given.inversemaps[1][i] for i in 1:2]
k2 = [x.components[2].given.inversemaps[1][i] for i in 1:3]
k3 = [x.components[3].given.inversemaps[1][i] for i in 1:2]
@test isapprox(ps[1][k1[1][1]], c11)
@test isapprox(ps[1][k1[2][1]], c12)
@test isapprox(ps[2][k2[1][1]], c21)
@test isapprox(ps[2][k2[2][1]], c22)
@test isapprox(ps[2][k2[3][1]], c23)
@test isapprox(ps[3][k3[1][1]], c31)
@test isapprox(ps[3][k3[2][1]], c32)
end
end
=#
ConfigurableCatModel(sf) = SimpleNumeric{Tuple{}, Int, Vector{Float64}}(sf)
ConfigurableDiscreteCPTModel(I, sf) = SimpleNumeric{I, Int, Dict{I, Vector{Float64}}}(sf)
sf1 = Cat([1,2], [0.1, 0.9])
mod1 = ConfigurableCatModel(sf1)
x1 = Variable(:x1, mod1)
sf2 = Cat([1,2,3], [0.2,0.3,0.5])
mod2 = ConfigurableCatModel(sf2)
x2 = Variable(:x2, mod2)
sf3 = DiscreteCPT([1,2], Dict((1,1) => [0.3, 0.7], (1,2) => [0.6, 0.4], (2,1) => [0.4, 0.6],
(2,2) => [0.7, 0.3], (3,1) => [0.5, 0.5], (3,2) => [0.8, 0.2]))
mod3 = ConfigurableDiscreteCPTModel(Tuple{Int, Int}, sf3)
x3 = Variable(:x3, mod3)
sf4 = DiscreteCPT([1,2], Dict((1,) => [0.15, 0.85], (2,) => [0.25, 0.75]))
mod4 = ConfigurableDiscreteCPTModel(Tuple{Int}, sf4)
x4 = Variable(:x4, mod4)
sf5 = DiscreteCPT([1,2], Dict((1,) => [0.35, 0.65], (2,) => [0.45, 0.55]))
mod5 = ConfigurableDiscreteCPTModel(Tuple{Int}, sf5)
x5 = Variable(:x5, mod5)
fivecpdnet = InstantNetwork([x1,x2,x3,x4,x5], VariableGraph(x3=>[x2,x1], x4=>[x3], x5=>[x3]))
@testset "support functions" begin
@testset "bp_info_provider" begin
run = Runtime(fivecpdnet)
default_initializer(run)
three_pass_BP(run)
# v_x3 = get_node(run, :x3)
# inst = current_instance(run, v_x3)
inst = current_instance(run, x3)
sf = get_sfunc(inst)
# m1 = get_message(run, get_node(run, :x1), v_x3, :pi_message)
# m2 = get_message(run, get_node(run, :x2), v_x3, :pi_message)
m1 = get_message(run, x1, x3, :pi_message)
m2 = get_message(run, x2, x3, :pi_message)
l = get_value(run, inst, :lambda)
info = bp_info_provider(run, inst)
expected_stats(sf, [1,2], ([1, 2, 3], [1, 2]), (m2, m1), l)
end
@testset "convergence" begin
p1 = Dict(:a => 0.1, :b => [0.2, 0.3])
p2 = Dict(:a => 0.1, :b => [0.2, 0.3])
p3 = Dict(:a => 0.10001, :b => [0.20001, 0.30001])
p4 = Dict(:a => 0.11, :b => [0.20001, 0.30001])
p5 = Dict(:a => 0.1, :b => [0.2, 0.3], :c => 0.4)
@test converged_numeric(p1, p2, 0.001)
@test converged_numeric(p1, p3, 0.001)
@test !converged_numeric(p1, p4, 0.001)
@test !converged_numeric(p1, p5, 0.001)
@test !converged_numeric(p5, p1, 0.001)
end
end
@testset "EM" begin
@testset "termination" begin
@testset "should terminate immediately with false flag with 0 max_iterations" begin
function err(runtime)
error()
end
@test em(fivecpdnet, nothing ; algorithm = err, max_iterations = 0)[1] == (false, 0)
end
@testset "should converge right away if parameters don't change" begin
# if there's no evidence, the parameters shouldn't change
data = [Dict()]
@test em(fivecpdnet, data ; max_iterations = 2, showprogress=false)[1] == (true, 2)
end
@testset "should converge in few iterations with fully observed data" begin
# Ordinarily, this would be 2 iterations.
# However, with noise added to the evidence, it may take a few more.
data = [Dict(:x1 => HardScore(1), :x2 => HardScore(1), :x3 => HardScore(1), :x4 => HardScore(1), :x5 => HardScore(2))]
@test !em(fivecpdnet, data ; max_iterations = 1)[1][1]
# @test em(fivecpdnet, data ; max_iterations = 5)[1][1]
end
@testset "does not use less than min_iterations" begin
data = [Dict(:x1 => HardScore(1), :x2 => HardScore(1), :x3 => HardScore(1), :x4 => HardScore(1), :x5 => HardScore(2))]
@test em(fivecpdnet, data ; min_iterations = 7)[1] == (true, 7)
end
end
@testset "learning with Cat" begin
@testset "on single node" begin
data = [
Dict(:x1 => HardScore(1)),
Dict(:x1 => HardScore(2)),
Dict(:x1 => HardScore(1)),
Dict(:x1 => HardScore(1))
]
onenet = InstantNetwork([x1], VariableGraph())
newparams = em(onenet, data)[2]
@test isapprox(newparams[:x1], normalize([3, 1]))
end
@testset "with two independent nodes, each observed sometimes" begin
data = [
Dict(:x1 => HardScore(1)),
Dict(:x2 => HardScore(1)),
Dict(:x1 => HardScore(2), :x2 => HardScore(2))]
twonet = InstantNetwork([x1, x2], VariableGraph())
# We should converge to ignoring the unobserved cases since the variables are independent
# and there is no other evidence affecting them
newparams = em(twonet, data)[2]
# When :x1 is not observed, its stats are 0.1, 0.9
# When :x2 is not observed, its stats are 0.2, 0.3, 0.5
@test isapprox(newparams[:x1], normalize([1 + 0.1, 1 + 0.9]))
@test isapprox(newparams[:x2], normalize([1 + 0.2, 1 + 0.3, 0.5]))
end
@testset "with soft score, should consider prior" begin
data = [Dict(:x1 => SoftScore(Dict(1 => 0.8, 2 => 0.2)))]
onenet = InstantNetwork([x1], VariableGraph())
newparams = em(onenet, data)[2]
@test isapprox(newparams[:x1], normalize([0.1 * 0.8, 0.9 * 0.2]))
end
end
@testset "learning with DiscreteCPT" begin
@testset "fully observed" begin
data = [
Dict(:x1 => HardScore(1), :x2 => HardScore(1), :x3 => HardScore(1)),
Dict(:x1 => HardScore(1), :x2 => HardScore(2), :x3 => HardScore(2)),
Dict(:x1 => HardScore(2), :x2 => HardScore(3), :x3 => HardScore(2))]
newparams = em(fivecpdnet, data)[2]
# p3 = [[[1.0, 0.0], [0.5, 0.5]] [[0.0, 1.0], [0.5, 0.5]] [[0.5, 0.5], [0.0, 1.0]]]
@test isapprox(newparams[:x1], [2.0 / 3, 1.0 / 3], atol = 0.0001)
@test isapprox(newparams[:x2], [1.0 / 3, 1.0 / 3, 1.0 / 3], atol = 0.0001)
p3 = newparams[:x3]
@test isapprox(p3[(1,1)], [1.0, 0.0])
@test isapprox(p3[(1,2)], [0.5, 0.5])
@test isapprox(p3[(2,1)], [0.0, 1.0])
@test isapprox(p3[(2,2)], [0.5, 0.5])
@test isapprox(p3[(3,1)], [0.5, 0.5])
@test isapprox(p3[(3,2)], [0.0, 1.0])
end
@testset "with children observed" begin
# sf3 = Cat([1,2], [0.1, 0.9])
# mod3 = SimpleNumeric(sf3)
# x3 = Variable(:x3, mod3)
# sf4 = DiscreteCPT([1,2], Dict((1,) => [0.9, 0.1], (2,) => [0.1, 0.9]))
# mod4 = SimpleNumeric(sf4)
# x4 = Variable(:x4, mod4)
# sf5 = DiscreteCPT([1,2], Dict((1,) => [0.9, 0.1], (2,) => [0.1, 0.9]))
# mod5 = SimpleNumeric(sf5)
# x5 = Variable(:x5, mod5)
# fivecpdnet = Network([x3,x4,x5], Placeholder[], Placeholder[], Dict(x4=>[x3], x5=>[x3]))
data = [
Dict(:x4 => HardScore(1), :x5 => HardScore(1)),
Dict(:x4 => HardScore(2), :x5 => HardScore(2))
]
((converged, numiters), newparams) = em(fivecpdnet, data)
p1s = newparams[:x1]
p2s = newparams[:x2]
p3s = newparams[:x3]
# belief on :x3 calculated from learned network
learned_belief3 =
p2s[1] * p1s[1] * p3s[(1,1)] .+
p2s[1] * p1s[2] * p3s[(1,2)] .+
p2s[2] * p1s[1] * p3s[(2,1)] .+
p2s[2] * p1s[2] * p3s[(2,2)] .+
p2s[3] * p1s[1] * p3s[(3,1)] .+
p2s[3] * p1s[2] * p3s[(3,2)]
# belief on :x3 computed analytically
# prior distribution over node 3
prior31 = 0.2 * 0.1 * 0.3 + 0.2 * 0.9 * 0.6 + 0.3 * 0.1 * 0.4 + 0.3 * 0.9 * 0.7 + 0.5 * 0.1 * 0.5 + 0.5 * 0.9 * 0.8
prior32 = 1 - prior31
# Posteriors for each data case are computed by considering the appropriate prior and lambda collect_messages.
# The these posteriors are summed over the data cases. (Note: each case is individually normalized so this makes sense.)
case1p1 = prior31 * 0.15 * 0.35
case1p2 = prior32 * 0.25 * 0.45
case2p1 = prior31 * 0.85 * 0.65
case2p2 = prior32 * 0.75 * 0.55
case1post3 = normalize([case1p1, case1p2])
case2post3 = normalize([case2p1, case2p2])
post3 = normalize(case1post3 .+ case2post3)
@test isapprox(learned_belief3, post3; atol = 0.01)
end
end
#=
@testset "learning with Separable" begin
# Testing separable models is challenging, because it can be hard to calculate what the maximizing alphas should be.
# Therefore, we use extreme cases to test.
@testset "with one component" begin
alphas = [1.0]
cpd1 = Dict((1,) => [0.2, 0.8])
cpds::SepCPTs = [cpd1]
sf1 = Cat([1], [1.0])
mod1 = SimpleNumeric(sf1)
x1 = Variable(:x1, mod1)
sf2 = Separable([1,2], alphas, cpds)()
mod2 = SimpleNumeric(sf2)
x2 = Variable(:x2, mod2)
net = Network([x1,x2], Placeholder[], Placeholder[], Dict(x2=>[x1]))
data = [Dict(:x2 => 1), Dict(:x2 => 1), Dict(:x2 => 2), Dict(:x2 => 1)]
cs = em(net, data)[2]
@test isapprox(cs[(:x2)][2], [[0.75, 0.25]]; atol = 0.01)
end
@testset "when each parent predicts a different child value" begin
alphas = [0.2, 0.3, 0.5]
cpd1 = Dict((1,) => [1.0, 0.0, 0.0])
cpd2 = Dict((1,) => [0.0, 1.0, 0.0])
cpd3 = Dict((1,) => [0.0, 0.0, 1.0])
cpds::SepCPTs = [cpd1, cpd2, cpd3]
sf1 = Cat([1], [1.0])
mod1 = SimpleNumeric(sf1)
x1 = Variable(:x1, mod1)
sf2 = Cat([1], [1.0])
mod2 = SimpleNumeric(sf2)
x2 = Variable(:x2, mod2)
sf3 = Cat([1], [1.0])
mod3 = SimpleNumeric(sf3)
x3 = Variable(:x3, mod3)
sf4 = Separable([1,2,3], alphas, cpds)
x4 = Variable(:x4, mod4)
net = Network([x1,x2,x3,x4], Placeholder[], Placeholder[], Dict(x4=>[x1,x2,x3]))
data = [Dict(:x4 => 1), Dict(:x4 => 2), Dict(:x4 => 2), Dict(:x4 => 3)]
newparams = em(net, data)[2]
(as, cs1, cs2, cs3) = newparams[:x4]
@test as == [0.25, 0.5, 0.25]
@test cs1 == [[1.0, 0.0, 0.0]]
@test cs2 == [[0.0, 1.0, 0.0]]
@test cs3 == [[0.0, 0.0, 1.0]]
end
end
=#
#=
@testset "Learning with If" begin
sfc1 = Cat([1, 2], [0.1, 0.9])
modc1 = SimpleNumeric(sfc1)
vc1 = Variable(:c1, modc1)
sfc2 = Cat([1, 2, 3], [0.2, 0.3, 0.5])
modc2 = SimpleNumeric(sfc2)
vc2 = Variable(:c2, modc2)
sff = Flip(0.4)
modf = SimpleNumeric(sff)
vf = Variable(:f, modf)
sfi = If{Int}
modi = SimpleModel(sfi)
vi = Variable(:i, modi)
net = Network([vc1,vc2,vf,vi], Placeholder[], Placeholder[], Dict(vi=>[vf,vc1,vc2]))
data = [Dict(:i => 1), Dict(:i => 2)]
newparams = em(net, data)[2]
pc1 = newparams[:c1]
pc2 = newparams[:c2]
pf = newparams[:f]
pi = newparams[:i]
@test pi === nothing
# The problem is underconstrained, but we can check that we have learned not to predict 3
# We can also check that 1 and 2 have similar probability.
# To avoid predicting 3, we need either f to always be true (second value) or c2 to always be 1 or 2.
@test isapprox(pf[1], 0.0; atol = 0.05) || isapprox(pc2[3], 0.0; atol = 0.05)
p1 = pf[1] * pc2[1] + pf[2] * pc1[1]
p2 = pf[1] * pc2[2] + pf[2] * pc1[2]
@test isapprox(p1, p2; atol = 0.05)
end
# Parameter sharing currently doesn't work
@testset "parameter sharing" begin
sf = Cat([1,2], [0.5, 0.5])
m = sf()
x1 = m(:x1)
x2 = m(:x2)
net = Network(Tuple{}, Nothing)
add_variable!(net, x1)
add_variable!(net, x2)
data = [Dict(:x1 => 1, :x2 => 1), Dict(:x1 => 2, :x2 => 1)]
newparams = em(net, data)[2]
@test newparams[:x1] == [0.75, 0.25]
@test newparams[:x2] == [0.75, 0.25]
end
=#
#= we do not currently have validation set code
@testset "with validation set" begin
@testset "retains the old parameters when learning makes validation set worse" begin
m = Cat([1,2], [0.5, 0.5])
x = m()(:x)
net = Network([x], Placeholder[], Placeholder[], Dict())
data = [Dict(:x => 1)]
validation = [Dict(:x => 2)]
((conv, iteration), newparams) = em(net, data; validationset = validation)
@test conv
@test iteration == 1
@test newparams[:x] == [0.5, 0.5]
end
@testset "when training set and validation set are the same, behaves like before" begin
m = Cat([1,2], [0.5, 0.5])
x = m()(:x)
net = Network([x], Placeholder[], Placeholder[], Dict())
data = [Dict(:x => 1), Dict(:x => 1), Dict(:x => 1), Dict(:x => 2)]
validation = [Dict(:x => 1), Dict(:x => 1), Dict(:x => 1), Dict(:x => 2)]
((conv, iteration), newparams) = em(net, data; validationset = validation)
@test conv
@test iteration == 2
@test newparams[:x] == [0.75, 0.25]
end
=#
end
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 38769 | using Test
using Scruff
using Scruff.Utils
using Scruff.RTUtils
using Scruff.Models
using Scruff.SFuncs
using Scruff.Algorithms
import Scruff: make_initial, make_transition
@testset "Filtering" begin
struct Tree
x :: Int
left :: Union{Nothing, Tree}
right :: Union{Nothing, Tree}
Tree(x) = new(x, nothing, nothing)
Tree(x,l,r) = new(x, l, r)
end
struct Model1 <: VariableTimeModel{Tuple{}, Tuple{}, Tree} end
global make_initial(::Model1, t) = Constant(Tree(0))
global make_transition(::Model1, parts, t) = Constant(Tree(t))
struct Model2 <: VariableTimeModel{Tuple{}, Tuple{Tree, Tree}, Tree} end
global make_initial(::Model2, t) = Constant(Tree(0))
global make_transition(::Model2, parts, t) = Det(Tuple{Tree, Tree}, Tree, (l,r) -> Tree(t, l, r))
@testset "Window utilities" begin
@testset "construct an instant network from instances" begin
usf = Constant(1)
umodel = HomogeneousModel(usf, usf)
uvar = umodel(:u)
vsf = DiscreteCPT([:a,:b], Dict((1,) => [0.5, 0.5]))
vmodel = HomogeneousModel(vsf, vsf)
vvar = vmodel(:v)
ph = Placeholder{Int}(:p)
dynnet = DynamicNetwork(Variable[uvar,vvar], VariableGraph(vvar => [uvar]),
VariableGraph(vvar => [uvar]), VariableParentTimeOffset(), Placeholder[ph], Placeholder[ph])
dynrun = Runtime(dynnet)
inst1 = instantiate!(dynrun, ph, 1)
# this instance won't be used to create the instant network
instantiate!(dynrun, uvar, 1)
inst2 = instantiate!(dynrun, uvar, 2)
inst3 = instantiate!(dynrun, vvar, 3)
inst4 = instantiate!(dynrun, uvar, 4)
set_value!(dynrun, inst1, :x, 1)
set_value!(dynrun, inst2, :x, 2)
set_value!(dynrun, inst3, :x, 3)
set_value!(dynrun, inst4, :x, 4)
run = instant_runtime_from_instances(dynrun, [inst3, inst1, inst2, inst4])
net = get_network(run)
phs = get_placeholders(net)
vars = get_variables(net)
p1 = Placeholder{Int}(:p_1)
@test phs == [p1]
@test length(vars) == 3
varnames = [var.name for var in vars]
i2 = findfirst(x -> x == :u_2, varnames)
i3 = findfirst(x -> x == :v_3, varnames)
i4 = findfirst(x -> x == :u_4, varnames)
@test !isnothing(i2)
@test !isnothing(i3)
@test !isnothing(i4)
v2 = vars[i2]
v3 = vars[i3]
v4 = vars[i4]
@test v2.model isa SimpleModel
@test v3.model isa SimpleModel
@test v4.model isa SimpleModel
@test v2.model.sf == usf
@test v3.model.sf == vsf
@test v4.model.sf == usf
@test get_parents(net, p1) == Node[]
@test get_parents(net, v2) == Node[]
@test get_parents(net, v3) == Node[v2]
@test get_parents(net, v4) == Node[]
@test has_instance(run, p1)
@test has_instance(run, v2)
@test has_instance(run, v3)
@test has_instance(run, v4)
i1 = current_instance(run, p1)
i2 = current_instance(run, v2)
i3 = current_instance(run, v3)
i4 = current_instance(run, v4)
@test get_value(run, i1, :x) == 1
@test get_value(run, i2, :x) == 2
@test get_value(run, i3, :x) == 3
@test get_value(run, i4, :x) == 4
end
@testset "restore values in the dynamic network" begin
usf = Constant(1)
umodel = HomogeneousModel(usf, usf)
uvar = umodel(:u)
vsf = DiscreteCPT([:a,:b], Dict((1,) => [0.5, 0.5]))
vmodel = HomogeneousModel(vsf, vsf)
vvar = vmodel(:v)
ph = Placeholder{Int}(:p)
dynnet = DynamicNetwork(Variable[uvar,vvar], VariableGraph(vvar => [uvar]),
VariableGraph(vvar => [uvar]), VariableParentTimeOffset(), Placeholder[ph], Placeholder[ph])
dynrun = Runtime(dynnet)
dyninst1 = instantiate!(dynrun, ph, 1)
# this instance won't be used to create the instant network
instantiate!(dynrun, uvar, 1)
dyninst2 = instantiate!(dynrun, uvar, 2)
dyninst3 = instantiate!(dynrun, vvar, 3)
dyninst4 = instantiate!(dynrun, uvar, 4)
set_value!(dynrun, dyninst1, :x, 1)
set_value!(dynrun, dyninst2, :x, 2)
set_value!(dynrun, dyninst3, :x, 3)
set_value!(dynrun, dyninst4, :x, 4)
instrun = instant_runtime_from_instances(dynrun,
[dyninst3, dyninst1, dyninst2, dyninst4])
instnet = get_network(instrun)
phs = get_placeholders(instnet)
instvars = get_variables(instnet)
instp1 = Placeholder{Int}(:p_1)
varnames = [var.name for var in instvars]
i2 = findfirst(x -> x == :u_2, varnames)
i3 = findfirst(x -> x == :v_3, varnames)
i4 = findfirst(x -> x == :u_4, varnames)
instv2 = instvars[i2]
instv3 = instvars[i3]
instv4 = instvars[i4]
instinst1 = current_instance(instrun, instp1)
instinst2 = current_instance(instrun, instv2)
instinst3 = current_instance(instrun, instv3)
instinst4 = current_instance(instrun, instv4)
set_value!(instrun, instinst1, :x, -1)
set_value!(instrun, instinst2, :x, -2)
set_value!(instrun, instinst3, :x, -3)
set_value!(instrun, instinst4, :x, -4)
retrieve_values_from_instant_runtime!(dynrun, instrun)
@test get_value(dynrun, dyninst1, :x) == -1
@test get_value(dynrun, dyninst2, :x) == -2
@test get_value(dynrun, dyninst3, :x) == -3
@test get_value(dynrun, dyninst4, :x) == -4
end
@testset "make an initial instant network" begin
ph = Placeholder{Int}(:p)
usf = DiscreteCPT([1], Dict((0,) => [1.0]))
umodel = HomogeneousModel(usf, usf)
uvar = umodel(:u)
vsf = DiscreteCPT([:a,:b], Dict((1,) => [0.5, 0.5]))
vmodel = HomogeneousModel(vsf, vsf)
vvar = vmodel(:v)
dynnet = DynamicNetwork(Variable[uvar,vvar], VariableGraph(uvar => [ph], vvar => [uvar]),
VariableGraph(uvar => [ph], vvar => [uvar]), VariableParentTimeOffset(), Placeholder[ph], Placeholder[ph])
dynrun = Runtime(dynnet)
ensure_all!(dynrun)
instrun = initial_instant_runtime(dynrun)
instnet = get_network(instrun)
phnames = map(get_name, get_placeholders(instnet))
@test length(phnames) == 1
@test :p_0 in phnames
varnames = map(get_name, get_variables(instnet))
@test length(varnames) == 2
@test :u_0 in varnames
@test :v_0 in varnames
p_0 = get_node(instnet, :p_0)
u_0 = get_node(instnet, :u_0)
v_0 = get_node(instnet, :v_0)
@test p_0 isa Placeholder
@test u_0 isa Variable
@test v_0 isa Variable
@test has_instance(instrun, p_0)
@test has_instance(instrun, u_0)
@test has_instance(instrun, v_0)
@test get_time(current_instance(instrun, p_0)) == 0
@test get_time(current_instance(instrun, u_0)) == 0
@test get_time(current_instance(instrun, v_0)) == 0
end
end
@testset "Particle filter" begin
@testset "Synchronous" begin
@testset "Initialization step" begin
c = Cat([1,2], [0.1, 0.9])
d = DiscreteCPT([:a, :b], Dict((1,) => [0.2, 0.8], (2,) => [0.3, 0.7]))
m1 = HomogeneousModel(c, c)
m2 = HomogeneousModel(d, d)
v1 = m1(:v1)
v2 = m2(:v2)
net = DynamicNetwork(Variable[v1, v2], VariableGraph(v2 => [v1]), VariableGraph(v2 => [v1]))
pf = SyncPF(1000)
runtime = Runtime(net)
init_filter(pf, runtime)
@test isapprox(probability(pf, runtime, v1, 1), 0.1; atol = 0.05)
# Can't currently answer joint probability queries
# @test isapprox(probability(pf, runtime, Queryable[v1,v2], x -> x[1] == 1 && x[2] == :a), 0.1 * 0.2; atol = 0.05)
end
@testset "Filter step" begin
p101 = 0.1
p102 = 0.9
p20a = 0.5
p20b = 0.5
# Observe v21 = :a
prior111 = p101 * 0.2 + p102 * 0.3
prior112 = p101 * 0.8 + p102 * 0.7
q111 = prior111 * 0.4
q112 = prior112 * 0.9
post111 = q111 / (q111 + q112)
post112 = q112 / (q111 + q112)
# Observe v22 = :b
prior121 = post111 * 0.2 + post112 * 0.3
prior122 = post111 * 0.8 + post112 * 0.7
q121 = prior121 * 0.6
q122 = prior122 * 0.1
post121 = q121 / (q121 + q122)
post122 = q122 / (q121 + q122)
c1 = Cat([1,2], [0.1, 0.9])
d1 = DiscreteCPT([1, :2], Dict((1,) => [0.2, 0.8], (2,) => [0.3, 0.7]))
c2 = Cat([:a,:b], [0.5, 0.5])
d2 = DiscreteCPT([:a, :b], Dict((1,) => [0.4, 0.6], (2,) => [0.9, 0.1]))
m1 = HomogeneousModel(c1, d1)
m2 = HomogeneousModel(c2, d2)
v1 = m1(:v1)
v2 = m2(:v2)
vars = Variable[v1, v2]
net = DynamicNetwork(vars, VariableGraph(), VariableGraph(v1 => [v1], v2 => [v1]))
pf = SyncPF(1000)
runtime = Runtime(net)
init_filter(pf, runtime)
@test isapprox(probability(pf, runtime, v1, 1), p101; atol = 0.05)
@test isapprox(probability(pf, runtime, v1, 2), p102; atol = 0.05)
@test isapprox(probability(pf, runtime, v2, :a), p20a; atol = 0.05)
@test isapprox(probability(pf, runtime, v2, :b), p20b; atol = 0.05)
filter_step(pf, runtime, vars, 1, Dict{Symbol, Score}(:v2 => HardScore(:a)))
@test isapprox(probability(pf, runtime, v1, 1), post111; atol = 0.05)
@test isapprox(probability(pf, runtime, v1, 2), post112; atol = 0.05)
@test isapprox(probability(pf, runtime, v2, :a), 1.0; atol = 0.05)
@test isapprox(probability(pf, runtime, v2, :b), 0.0; atol = 0.05)
filter_step(pf, runtime, vars, 2, Dict{Symbol, Score}(:v2 => HardScore(:b)))
@test isapprox(probability(pf, runtime, v1, 1), post121; atol = 0.05)
@test isapprox(probability(pf, runtime, v1, 2), post122; atol = 0.05)
@test isapprox(probability(pf, runtime, v2, :a), 0.0; atol = 0.05)
@test isapprox(probability(pf, runtime, v2, :b), 1.0; atol = 0.05)
end
end
@testset "Asynchronous PF" begin
# These tests are designed to test whether variables are instantiated correctly
# We use a four-node network v1 -> v2 -> v3 -> v4. Each variable also depends on its own previous state.
# To keep track of instantiation, each sfunc is a constant whose value represents the instantiation pattern as a tree.
# Instantiations happen at times 2, 3, and 5
# We consider three orders of instantiation:
# 1) v2, v3, v4 - no extra instances should be created
# 2) v3, v2, v4 - when v4 is instantiated, the coherent PF should also instantiate v3
# 3) v3, v1, v4 - when v4 is instantiated, the coherent PF should also instantiate v2 and v3 - difficult because it has to recognize an ancestor
v1 = Model1()(:v1)
v2 = Model2()(:v2)
v3 = Model2()(:v3)
v4 = Model2()(:v4)
net = DynamicNetwork(Variable[v1,v2,v3,v4], VariableGraph(), VariableGraph(v2 => [v2, v1], v3 => [v3, v2], v4 => [v4, v3]))
noev = Dict{Symbol, Score}()
@testset "Non-coherent PF" begin
@testset "Order 2-3-4" begin
pf = AsyncPF(10, 10, Int)
runtime = Runtime(net)
init_filter(pf, runtime)
filter_step(pf, runtime, Variable[v2], 2, noev)
filter_step(pf, runtime, Variable[v3], 3, noev)
filter_step(pf, runtime, Variable[v4], 5, noev)
t0 = Tree(0, nothing, nothing)
t22 = Tree(2, t0, t0)
t33 = Tree(3, t0, t22)
t45 = Tree(5, t0, t33)
@test probability(pf, runtime, v1, t0) == 1.0
@test probability(pf, runtime, v2, t22) == 1.0
@test probability(pf, runtime, v3, t33) == 1.0
@test probability(pf, runtime, v4, t45) == 1.0
end
@testset "Order 3-2-4" begin
pf = AsyncPF(10, 10, Int)
runtime = Runtime(net)
init_filter(pf, runtime)
filter_step(pf, runtime, Variable[v3], 2, noev)
filter_step(pf, runtime, Variable[v2], 3, noev)
filter_step(pf, runtime, Variable[v4], 5, noev)
t0 = Tree(0, nothing, nothing)
t32 = Tree(2, t0, t0)
t23 = Tree(3, t0, t0)
t45 = Tree(5, t0, t32)
@test probability(pf, runtime, v1, t0) == 1.0
@test probability(pf, runtime, v2, t23) == 1.0
@test probability(pf, runtime, v3, t32) == 1.0
@test probability(pf, runtime, v4, t45) == 1.0
end
@testset "Order 3-1-4" begin
pf = AsyncPF(10, 10, Int)
runtime = Runtime(net)
init_filter(pf, runtime)
filter_step(pf, runtime, Variable[v3], 2, noev)
filter_step(pf, runtime, Variable[v1], 3, noev)
filter_step(pf, runtime, Variable[v4], 5, noev)
t0 = Tree(0, nothing, nothing)
t32 = Tree(2, t0, t0)
t13 = Tree(3, nothing, nothing)
t45 = Tree(5, t0, t32)
@test probability(pf, runtime, v1, t13) == 1.0
@test probability(pf, runtime, v2, t0) == 1.0
@test probability(pf, runtime, v3, t32) == 1.0
@test probability(pf, runtime, v4, t45) == 1.0
end
end
@testset "Coherent PF" begin
@testset "Order 2-3-4" begin
pf = CoherentPF(10, 10, Int)
runtime = Runtime(net)
init_filter(pf, runtime)
filter_step(pf, runtime, Variable[v2], 2, noev)
filter_step(pf, runtime, Variable[v3], 3, noev)
filter_step(pf, runtime, Variable[v4], 5, noev)
t0 = Tree(0, nothing, nothing)
t22 = Tree(2, t0, t0)
t33 = Tree(3, t0, t22)
t45 = Tree(5, t0, t33)
@test probability(pf, runtime, v1, t0) == 1.0
@test probability(pf, runtime, v2, t22) == 1.0
@test probability(pf, runtime, v3, t33) == 1.0
@test probability(pf, runtime, v4, t45) == 1.0
end
@testset "Order 3-2-4" begin
pf = CoherentPF(10, 10, Int)
runtime = Runtime(net)
init_filter(pf, runtime)
filter_step(pf, runtime, Variable[v3], 2, noev)
filter_step(pf, runtime, Variable[v2], 3, noev)
filter_step(pf, runtime, Variable[v4], 5, noev)
t0 = Tree(0, nothing, nothing)
t32 = Tree(2, t0, t0)
t23 = Tree(3, t0, t0)
t35 = Tree(5, t32, t23) # extra instance added
t45 = Tree(5, t0, t35)
@test probability(pf, runtime, v1, t0) == 1.0
@test probability(pf, runtime, v2, t23) == 1.0
@test probability(pf, runtime, v3, t35) == 1.0
@test probability(pf, runtime, v4, t45) == 1.0
end
@testset "Order 3-1-4" begin
pf = CoherentPF(10, 10, Int)
runtime = Runtime(net)
init_filter(pf, runtime)
filter_step(pf, runtime, Variable[v3], 2, noev)
filter_step(pf, runtime, Variable[v1], 3, noev)
filter_step(pf, runtime, Variable[v4], 5, noev)
t0 = Tree(0, nothing, nothing)
t32 = Tree(2, t0, t0)
t13 = Tree(3, nothing, nothing)
t25 = Tree(5, t0, t13) # added
t35 = Tree(5, t32, t25) # added
t45 = Tree(5, t0, t35)
@test probability(pf, runtime, v1, t13) == 1.0
@test probability(pf, runtime, v2, t25) == 1.0
@test probability(pf, runtime, v3, t35) == 1.0
@test probability(pf, runtime, v4, t45) == 1.0
end
end
end
end
@testset "BP filter" begin
@testset "Synchronous" begin
@testset "Initialization step" begin
c = Cat([1,2], [0.1, 0.9])
d = DiscreteCPT([:a, :b], Dict((1,) => [0.2, 0.8], (2,) => [0.3, 0.7]))
m1 = HomogeneousModel(c, c)
m2 = HomogeneousModel(d, d)
v1 = m1(:v1)
v2 = m2(:v2)
net = DynamicNetwork(Variable[v1, v2], VariableGraph(v2 => [v1]), VariableGraph(v2 => [v1]))
pf = SyncBP()
runtime = Runtime(net)
init_filter(pf, runtime)
@test isapprox(probability(pf, runtime, v1, 1), 0.1; atol = 0.05)
# Can't currently answer joint probability queries
# @test isapprox(probability(pf, runtime, Queryable[v1,v2], x -> x[1] == 1 && x[2] == :a), 0.1 * 0.2; atol = 0.05)
end
@testset "Filter step" begin
p101 = 0.1
p102 = 0.9
p20a = 0.5
p20b = 0.5
# Observe v21 = :a
prior111 = p101 * 0.2 + p102 * 0.3
prior112 = p101 * 0.8 + p102 * 0.7
q111 = prior111 * 0.4
q112 = prior112 * 0.9
post111 = q111 / (q111 + q112)
post112 = q112 / (q111 + q112)
# Observe v22 = :b
prior121 = post111 * 0.2 + post112 * 0.3
prior122 = post111 * 0.8 + post112 * 0.7
q121 = prior121 * 0.6
q122 = prior122 * 0.1
post121 = q121 / (q121 + q122)
post122 = q122 / (q121 + q122)
c1 = Cat([1,2], [0.1, 0.9])
d1 = DiscreteCPT([1, :2], Dict((1,) => [0.2, 0.8], (2,) => [0.3, 0.7]))
c2 = Cat([:a,:b], [0.5, 0.5])
d2 = DiscreteCPT([:a, :b], Dict((1,) => [0.4, 0.6], (2,) => [0.9, 0.1]))
m1 = HomogeneousModel(c1, d1)
m2 = HomogeneousModel(c2, d2)
v1 = m1(:v1)
v2 = m2(:v2)
vars = Variable[v1, v2]
net = DynamicNetwork(vars, VariableGraph(), VariableGraph(v1 => [v1], v2 => [v1]))
pf = SyncBP()
runtime = Runtime(net)
init_filter(pf, runtime)
@test isapprox(probability(pf, runtime, v1, 1), p101; atol = 0.05)
@test isapprox(probability(pf, runtime, v1, 2), p102; atol = 0.05)
@test isapprox(probability(pf, runtime, v2, :a), p20a; atol = 0.05)
@test isapprox(probability(pf, runtime, v2, :b), p20b; atol = 0.05)
filter_step(pf, runtime, vars, 1, Dict{Symbol, Score}(:v2 => HardScore(:a)))
@test isapprox(probability(pf, runtime, v1, 1), post111; atol = 0.05)
@test isapprox(probability(pf, runtime, v1, 2), post112; atol = 0.05)
@test isapprox(probability(pf, runtime, v2, :a), 1.0; atol = 0.05)
@test isapprox(probability(pf, runtime, v2, :b), 0.0; atol = 0.05)
filter_step(pf, runtime, vars, 2, Dict{Symbol, Score}(:v2 => HardScore(:b)))
@test isapprox(probability(pf, runtime, v1, 1), post121; atol = 0.05)
@test isapprox(probability(pf, runtime, v1, 2), post122; atol = 0.05)
@test isapprox(probability(pf, runtime, v2, :a), 0.0; atol = 0.05)
@test isapprox(probability(pf, runtime, v2, :b), 1.0; atol = 0.05)
end
end
@testset "Asynchronous BP" begin
# These tests are designed to test whether variables are instantiated correctly
# We use a four-node network v1 -> v2 -> v3 -> v4. Each variable also depends on its own previous state.
# To keep track of instantiation, each sfunc is a constant whose value represents the instantiation pattern as a tree.
# Instantiations happen at times 2, 3, and 5
# We consider three orders of instantiation:
# 1) v2, v3, v4 - no extra instances should be created
# 2) v3, v2, v4 - when v4 is instantiated, the coherent PF should also instantiate v3
# 3) v3, v1, v4 - when v4 is instantiated, the coherent PF should also instantiate v2 and v3 - difficult because it has to recognize an ancestor
v1 = Model1()(:v1)
v2 = Model2()(:v2)
v3 = Model2()(:v3)
v4 = Model2()(:v4)
net = DynamicNetwork(Variable[v1,v2,v3,v4], VariableGraph(), VariableGraph(v2 => [v2, v1], v3 => [v3, v2], v4 => [v4, v3]))
noev = Dict{Symbol, Score}()
@testset "Non-coherent PF" begin
@testset "Order 2-3-4" begin
pf = AsyncBP(10, Int)
runtime = Runtime(net)
init_filter(pf, runtime)
filter_step(pf, runtime, Variable[v2], 2, noev)
filter_step(pf, runtime, Variable[v3], 3, noev)
filter_step(pf, runtime, Variable[v4], 5, noev)
t0 = Tree(0, nothing, nothing)
t22 = Tree(2, t0, t0)
t33 = Tree(3, t0, t22)
t45 = Tree(5, t0, t33)
@test probability(pf, runtime, v1, t0) == 1.0
@test probability(pf, runtime, v2, t22) == 1.0
@test probability(pf, runtime, v3, t33) == 1.0
@test probability(pf, runtime, v4, t45) == 1.0
end
@testset "Order 3-2-4" begin
pf = AsyncBP(10, Int)
runtime = Runtime(net)
init_filter(pf, runtime)
filter_step(pf, runtime, Variable[v3], 2, noev)
filter_step(pf, runtime, Variable[v2], 3, noev)
filter_step(pf, runtime, Variable[v4], 5, noev)
t0 = Tree(0, nothing, nothing)
t32 = Tree(2, t0, t0)
t23 = Tree(3, t0, t0)
t45 = Tree(5, t0, t32)
@test probability(pf, runtime, v1, t0) == 1.0
@test probability(pf, runtime, v2, t23) == 1.0
@test probability(pf, runtime, v3, t32) == 1.0
@test probability(pf, runtime, v4, t45) == 1.0
end
@testset "Order 3-1-4" begin
pf = AsyncBP(10, Int)
runtime = Runtime(net)
init_filter(pf, runtime)
filter_step(pf, runtime, Variable[v3], 2, noev)
filter_step(pf, runtime, Variable[v1], 3, noev)
filter_step(pf, runtime, Variable[v4], 5, noev)
t0 = Tree(0, nothing, nothing)
t32 = Tree(2, t0, t0)
t13 = Tree(3, nothing, nothing)
t45 = Tree(5, t0, t32)
@test probability(pf, runtime, v1, t13) == 1.0
@test probability(pf, runtime, v2, t0) == 1.0
@test probability(pf, runtime, v3, t32) == 1.0
@test probability(pf, runtime, v4, t45) == 1.0
end
end
@testset "Coherent BP" begin
@testset "Order 2-3-4" begin
pf = CoherentBP(10, Int)
runtime = Runtime(net)
init_filter(pf, runtime)
filter_step(pf, runtime, Variable[v2], 2, noev)
filter_step(pf, runtime, Variable[v3], 3, noev)
filter_step(pf, runtime, Variable[v4], 5, noev)
t0 = Tree(0, nothing, nothing)
t22 = Tree(2, t0, t0)
t33 = Tree(3, t0, t22)
t45 = Tree(5, t0, t33)
@test probability(pf, runtime, v1, t0) == 1.0
@test probability(pf, runtime, v2, t22) == 1.0
@test probability(pf, runtime, v3, t33) == 1.0
@test probability(pf, runtime, v4, t45) == 1.0
end
@testset "Order 3-2-4" begin
pf = CoherentBP(10, Int)
runtime = Runtime(net)
init_filter(pf, runtime)
filter_step(pf, runtime, Variable[v3], 2, noev)
filter_step(pf, runtime, Variable[v2], 3, noev)
filter_step(pf, runtime, Variable[v4], 5, noev)
t0 = Tree(0, nothing, nothing)
t32 = Tree(2, t0, t0)
t23 = Tree(3, t0, t0)
t35 = Tree(5, t32, t23) # extra instance added
t45 = Tree(5, t0, t35)
@test probability(pf, runtime, v1, t0) == 1.0
@test probability(pf, runtime, v2, t23) == 1.0
@test probability(pf, runtime, v3, t35) == 1.0
@test probability(pf, runtime, v4, t45) == 1.0
end
@testset "Order 3-1-4" begin
pf = CoherentBP(10, Int)
runtime = Runtime(net)
init_filter(pf, runtime)
filter_step(pf, runtime, Variable[v3], 2, noev)
filter_step(pf, runtime, Variable[v1], 3, noev)
filter_step(pf, runtime, Variable[v4], 5, noev)
t0 = Tree(0, nothing, nothing)
t32 = Tree(2, t0, t0)
t13 = Tree(3, nothing, nothing)
t25 = Tree(5, t0, t13) # added
t35 = Tree(5, t32, t25) # added
t45 = Tree(5, t0, t35)
@test probability(pf, runtime, v1, t13) == 1.0
@test probability(pf, runtime, v2, t25) == 1.0
@test probability(pf, runtime, v3, t35) == 1.0
@test probability(pf, runtime, v4, t45) == 1.0
end
end
end
end
@testset "Loopy filter" begin
@testset "Synchronous" begin
@testset "Initialization step" begin
c = Cat([1,2], [0.1, 0.9])
d = DiscreteCPT([:a, :b], Dict((1,) => [0.2, 0.8], (2,) => [0.3, 0.7]))
m1 = HomogeneousModel(c, c)
m2 = HomogeneousModel(d, d)
v1 = m1(:v1)
v2 = m2(:v2)
net = DynamicNetwork(Variable[v1, v2], VariableGraph(v2 => [v1]), VariableGraph(v2 => [v1]))
pf = SyncLoopy()
runtime = Runtime(net)
init_filter(pf, runtime)
@test isapprox(probability(pf, runtime, v1, 1), 0.1; atol = 0.05)
# Can't currently answer joint probability queries
# @test isapprox(probability(pf, runtime, Queryable[v1,v2], x -> x[1] == 1 && x[2] == :a), 0.1 * 0.2; atol = 0.05)
end
@testset "Filter step" begin
@testset "Without evidence" begin
p101 = 0.1
p102 = 0.9
p20a = 0.5
p20b = 0.5
# Observe v21 = :a
prior111 = p101 * 0.2 + p102 * 0.3
prior112 = p101 * 0.8 + p102 * 0.7
q111 = prior111 * 0.4
q112 = prior112 * 0.9
post111 = q111 / (q111 + q112)
post112 = q112 / (q111 + q112)
# Observe v22 = :b
prior121 = post111 * 0.2 + post112 * 0.3
prior122 = post111 * 0.8 + post112 * 0.7
q121 = prior121 * 0.6
q122 = prior122 * 0.1
post121 = q121 / (q121 + q122)
post122 = q122 / (q121 + q122)
c1 = Cat([1,2], [0.1, 0.9])
d1 = DiscreteCPT([1, :2], Dict((1,) => [0.2, 0.8], (2,) => [0.3, 0.7]))
c2 = Cat([:a,:b], [0.5, 0.5])
d2 = DiscreteCPT([:a, :b], Dict((1,) => [0.4, 0.6], (2,) => [0.9, 0.1]))
m1 = HomogeneousModel(c1, d1)
m2 = HomogeneousModel(c2, d2)
v1 = m1(:v1)
v2 = m2(:v2)
vars = Variable[v1, v2]
net = DynamicNetwork(vars, VariableGraph(), VariableGraph(v1 => [v1], v2 => [v1]))
pf = SyncLoopy()
runtime = Runtime(net)
init_filter(pf, runtime)
@test isapprox(probability(pf, runtime, v1, 1), p101; atol = 0.05)
@test isapprox(probability(pf, runtime, v1, 2), p102; atol = 0.05)
@test isapprox(probability(pf, runtime, v2, :a), p20a; atol = 0.05)
@test isapprox(probability(pf, runtime, v2, :b), p20b; atol = 0.05)
filter_step(pf, runtime, vars, 1, Dict{Symbol, Score}(:v2 => HardScore(:a)))
@test isapprox(probability(pf, runtime, v1, 1), post111; atol = 0.05)
@test isapprox(probability(pf, runtime, v1, 2), post112; atol = 0.05)
@test isapprox(probability(pf, runtime, v2, :a), 1.0; atol = 0.05)
@test isapprox(probability(pf, runtime, v2, :b), 0.0; atol = 0.05)
filter_step(pf, runtime, vars, 2, Dict{Symbol, Score}(:v2 => HardScore(:b)))
@test isapprox(probability(pf, runtime, v1, 1), post121; atol = 0.05)
@test isapprox(probability(pf, runtime, v1, 2), post122; atol = 0.05)
@test isapprox(probability(pf, runtime, v2, :a), 0.0; atol = 0.05)
@test isapprox(probability(pf, runtime, v2, :b), 1.0; atol = 0.05)
end
end
end
@testset "Asynchronous Loopy" begin
# These tests are designed to test whether variables are instantiated correctly
# We use a four-node network v1 -> v2 -> v3 -> v4. Each variable also depends on its own previous state.
# To keep track of instantiation, each sfunc is a constant whose value represents the instantiation pattern as a tree.
# Instantiations happen at times 2, 3, and 5
# We consider three orders of instantiation:
# 1) v2, v3, v4 - no extra instances should be created
# 2) v3, v2, v4 - when v4 is instantiated, the coherent PF should also instantiate v3
# 3) v3, v1, v4 - when v4 is instantiated, the coherent PF should also instantiate v2 and v3 - difficult because it has to recognize an ancestor
v1 = Model1()(:v1)
v2 = Model2()(:v2)
v3 = Model2()(:v3)
v4 = Model2()(:v4)
net = DynamicNetwork(Variable[v1,v2,v3,v4], VariableGraph(), VariableGraph(v2 => [v2, v1], v3 => [v3, v2], v4 => [v4, v3]))
noev = Dict{Symbol, Score}()
@testset "Non-coherent Loopy" begin
@testset "Order 2-3-4" begin
pf = AsyncLoopy(10, Int)
runtime = Runtime(net)
init_filter(pf, runtime)
filter_step(pf, runtime, Variable[v2], 2, noev)
filter_step(pf, runtime, Variable[v3], 3, noev)
filter_step(pf, runtime, Variable[v4], 5, noev)
t0 = Tree(0, nothing, nothing)
t22 = Tree(2, t0, t0)
t33 = Tree(3, t0, t22)
t45 = Tree(5, t0, t33)
@test probability(pf, runtime, v1, t0) == 1.0
@test probability(pf, runtime, v2, t22) == 1.0
@test probability(pf, runtime, v3, t33) == 1.0
@test probability(pf, runtime, v4, t45) == 1.0
end
@testset "Order 3-2-4" begin
pf = AsyncLoopy(10, Int)
runtime = Runtime(net)
init_filter(pf, runtime)
filter_step(pf, runtime, Variable[v3], 2, noev)
filter_step(pf, runtime, Variable[v2], 3, noev)
filter_step(pf, runtime, Variable[v4], 5, noev)
t0 = Tree(0, nothing, nothing)
t32 = Tree(2, t0, t0)
t23 = Tree(3, t0, t0)
t45 = Tree(5, t0, t32)
@test probability(pf, runtime, v1, t0) == 1.0
@test probability(pf, runtime, v2, t23) == 1.0
@test probability(pf, runtime, v3, t32) == 1.0
@test probability(pf, runtime, v4, t45) == 1.0
end
@testset "Order 3-1-4" begin
pf = AsyncLoopy(10, Int)
runtime = Runtime(net)
init_filter(pf, runtime)
filter_step(pf, runtime, Variable[v3], 2, noev)
filter_step(pf, runtime, Variable[v1], 3, noev)
filter_step(pf, runtime, Variable[v4], 5, noev)
t0 = Tree(0, nothing, nothing)
t32 = Tree(2, t0, t0)
t13 = Tree(3, nothing, nothing)
t45 = Tree(5, t0, t32)
@test probability(pf, runtime, v1, t13) == 1.0
@test probability(pf, runtime, v2, t0) == 1.0
@test probability(pf, runtime, v3, t32) == 1.0
@test probability(pf, runtime, v4, t45) == 1.0
end
end
@testset "Coherent Loopy" begin
@testset "Order 2-3-4" begin
pf = CoherentLoopy(10, Int)
runtime = Runtime(net)
init_filter(pf, runtime)
filter_step(pf, runtime, Variable[v2], 2, noev)
filter_step(pf, runtime, Variable[v3], 3, noev)
filter_step(pf, runtime, Variable[v4], 5, noev)
t0 = Tree(0, nothing, nothing)
t22 = Tree(2, t0, t0)
t33 = Tree(3, t0, t22)
t45 = Tree(5, t0, t33)
@test probability(pf, runtime, v1, t0) == 1.0
@test probability(pf, runtime, v2, t22) == 1.0
@test probability(pf, runtime, v3, t33) == 1.0
@test probability(pf, runtime, v4, t45) == 1.0
end
@testset "Order 3-2-4" begin
pf = CoherentLoopy(10, Int)
runtime = Runtime(net)
init_filter(pf, runtime)
filter_step(pf, runtime, Variable[v3], 2, noev)
filter_step(pf, runtime, Variable[v2], 3, noev)
filter_step(pf, runtime, Variable[v4], 5, noev)
t0 = Tree(0, nothing, nothing)
t32 = Tree(2, t0, t0)
t23 = Tree(3, t0, t0)
t35 = Tree(5, t32, t23) # extra instance added
t45 = Tree(5, t0, t35)
@test probability(pf, runtime, v1, t0) == 1.0
@test probability(pf, runtime, v2, t23) == 1.0
@test probability(pf, runtime, v3, t35) == 1.0
@test probability(pf, runtime, v4, t45) == 1.0
end
@testset "Order 3-1-4" begin
pf = CoherentLoopy(10, Int)
runtime = Runtime(net)
init_filter(pf, runtime)
filter_step(pf, runtime, Variable[v3], 2, noev)
filter_step(pf, runtime, Variable[v1], 3, noev)
filter_step(pf, runtime, Variable[v4], 5, noev)
t0 = Tree(0, nothing, nothing)
t32 = Tree(2, t0, t0)
t13 = Tree(3, nothing, nothing)
t25 = Tree(5, t0, t13) # added
t35 = Tree(5, t32, t25) # added
t45 = Tree(5, t0, t35)
@test probability(pf, runtime, v1, t13) == 1.0
@test probability(pf, runtime, v2, t25) == 1.0
@test probability(pf, runtime, v3, t35) == 1.0
@test probability(pf, runtime, v4, t45) == 1.0
end
end
end
end
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 30239 | import Base.timedwait
import Base.isapprox
import PrettyPrint
using Test
using Scruff
using Scruff.Utils
using Scruff.RTUtils
using Scruff.SFuncs
using Scruff.Algorithms
import Scruff.Operators: cpdf
@testset "Importance" begin
@testset "Sampling utilities" begin
@testset "Probability" begin
sample1 = Dict(:a => 1, :b => 2)
sample2 = Dict(:a => 1, :b => 3)
sample3 = Dict(:a => 2, :b => 4)
lws = [-0.1, -0.2, -0.3]
parts = Particles([sample1, sample2, sample3], lws)
tot = sum([exp(x) for x in lws])
p1 = exp(-0.1) + exp(-0.2)
@test isapprox(probability(parts, s -> s[:a] == 1), p1 / tot)
end
@testset "Marginal" begin
sample1 = Dict(:a => 1, :b => 2)
sample2 = Dict(:a => 1, :b => 3)
sample3 = Dict(:a => 2, :b => 4)
lws = [-0.1, -0.2, -0.3]
parts = Particles([sample1, sample2, sample3], lws)
p1 = exp(-0.1) + exp(-0.2)
p2 = exp(-0.3)
tot = p1 + p2
marg = marginal(parts, :a)
@test isapprox(cpdf(marg, (), 1), p1 / tot)
@test isapprox(cpdf(marg, (), 2), p2 / tot)
end
@testset "Effective sample size" begin
@test isapprox(effective_sample_size([log(0.4)]), 1.0)
@test isapprox(effective_sample_size([log(0.4), log(0.2)]), (0.6 * 0.6 / (0.4 * 0.4 + 0.2 * 0.2)))
end
@testset "Normalizing weights" begin
@test isapprox(normalize_weights([log(0.1), log(0.3)]), [log(0.25), log(0.75)])
end
@testset "Probability of evidence" begin
@test isapprox(log_prob_evidence([log(0.1), log(0.3)]), log((0.1+0.3)/2))
end
@testset "Resampling" begin
samples = fill(Dict(:a => false), 1000)
append!(samples, fill(Dict(:a => true), 1000))
weights = fill(log(0.1), 1000)
append!(weights, fill(log(0.9), 1000))
ps = resample(Particles(samples, weights))
@test all(x -> x == 0.0, ps.log_weights)
@test isapprox(count(x -> x[:a], ps.samples) / 2000, 0.9, atol = 0.05)
end
end
@testset "Rejection" begin
@testset "Basic" begin
v1 = Cat([:a,:b], [0.1, 0.9])()(:v1)
v2 = DiscreteCPT([1,2], Dict((:a,) => [0.2, 0.8], (:b,) => [0.3, 0.7]))()(:v2)
net = InstantNetwork(Variable[v1,v2], VariableGraph(v2 => [v1]))
runtime = Runtime(net)
default_initializer(runtime)
alg = Rejection(1000)
infer(alg, runtime)
i1 = current_instance(runtime, v1)
i2 = current_instance(runtime, v2)
@test isapprox(probability(alg, runtime, i1, x -> x == :a), 0.1; atol = 0.05)
@test isapprox(probability(alg, runtime, i1, x -> x == :b), 0.9; atol = 0.05)
@test isapprox(probability(alg, runtime, i2, x -> x == 1),
0.1 * 0.2 + 0.9 * 0.3; atol = 0.05)
@test isapprox(probability(alg, runtime, i2, x -> x == 2),
0.1 * 0.8 + 0.9 * 0.7; atol = 0.05)
m1 = marginal(alg, runtime, i1)
m2 = marginal(alg, runtime, i2)
@test isapprox(cpdf(m1, (), :a), 0.1; atol = 0.05)
@test isapprox(cpdf(m1, (), :b), 0.9; atol = 0.05)
@test isapprox(cpdf(m2, (), :1), 0.1 * 0.2 + 0.9 * 0.3; atol = 0.05)
@test isapprox(cpdf(m2, (), :2), 0.1 * 0.8 + 0.9 * 0.7; atol = 0.05)
end
@testset "With placeholder" begin
p1 = Placeholder{Symbol}(:p1)
v2 = DiscreteCPT([1,2], Dict((:a,) => [0.2, 0.8], (:b,) => [0.3, 0.7]))()(:v2)
net = InstantNetwork(Variable[v2], VariableGraph(v2 => [p1]), Placeholder[p1])
runtime = Runtime(net)
default_initializer(runtime, 10, Dict(p1.name => Cat([:a,:b], [0.1, 0.9])))
alg = Rejection(1000)
infer(alg, runtime)
i2 = current_instance(runtime, v2)
@test isapprox(probability(alg, runtime, i2, 1),
0.1 * 0.2 + 0.9 * 0.3; atol = 0.05)
@test isapprox(probability(alg, runtime, i2, 2),
0.1 * 0.8 + 0.9 * 0.7; atol = 0.05)
m2 = marginal(alg, runtime, i2)
@test isapprox(cpdf(m2, (), 1), 0.1 * 0.2 + 0.9 * 0.3; atol = 0.05)
@test isapprox(cpdf(m2, (), 2), 0.1 * 0.8 + 0.9 * 0.7; atol = 0.05)
end
@testset "With hard evidence" begin
v1 = Cat([:a,:b], [0.1, 0.9])()(:v1)
v2 = DiscreteCPT([1,2], Dict((:a,) => [0.2, 0.8], (:b,) => [0.3, 0.7]))()(:v2)
net = InstantNetwork(Variable[v1,v2], VariableGraph(v2 => [v1]))
runtime = Runtime(net)
default_initializer(runtime)
alg = Rejection(1000)
infer(alg, runtime, Dict{Symbol, Score}(:v2 => HardScore(2)))
i1 = current_instance(runtime, v1)
i2 = current_instance(runtime, v2)
p1 = 0.1 * 0.8
p2 = 0.9 * 0.7
z = p1 + p2
@test isapprox(probability(alg, runtime, i1, :a), p1 / z; atol = 0.05)
@test isapprox(probability(alg, runtime, i1, :b), p2 / z; atol = 0.05)
@test isapprox(probability(alg, runtime, i2, 1), 0.0; atol = 0.05)
@test isapprox(probability(alg, runtime, i2, 2), 1.0; atol = 0.05)
m1 = marginal(alg, runtime, i1)
m2 = marginal(alg, runtime, i2)
@test isapprox(cpdf(m1, (), :a), p1 / z; atol = 0.05)
@test isapprox(cpdf(m1, (), :b), p2 / z; atol = 0.05)
@test isapprox(cpdf(m2, (), 1), 0.0; atol = 0.05)
@test isapprox(cpdf(m2, (), 2), 1.0; atol = 0.05)
end
@testset "With soft evidence" begin
v1 = Cat([:a,:b], [0.1, 0.9])()(:v1)
v2 = DiscreteCPT([1,2], Dict((:a,) => [0.2, 0.8], (:b,) => [0.3, 0.7]))()(:v2)
net = InstantNetwork(Variable[v1,v2], VariableGraph(v2 => [v1]))
runtime = Runtime(net)
default_initializer(runtime)
alg = Rejection(1000)
infer(alg, runtime, Dict{Symbol, Score}(:v2 => SoftScore(Dict(1 => 0.6, 2 => 0.4))))
i1 = current_instance(runtime, v1)
i2 = current_instance(runtime, v2)
pa = 0.1 * (0.2 * 0.6 + 0.8 * 0.4)
pb = 0.9 * (0.3 * 0.6 + 0.7 * 0.4)
z1 = pa + pb
# soft evidence is treated like a lambda message on a variable,
# so the prior also comes into play, unlike an intervention
p1 = (0.1 * 0.2 + 0.9 * 0.3) * 0.6
p2 = (0.1 * 0.8 + 0.9 * 0.7) * 0.4
z2 = p1 + p2
@assert isapprox(z1, z2)
@test isapprox(probability(alg, runtime, i1, :a), pa / z1; atol = 0.05)
@test isapprox(probability(alg, runtime, i1, :b), pb / z1; atol = 0.05)
@test isapprox(probability(alg, runtime, i2, 1), p1 / z2; atol = 0.05)
@test isapprox(probability(alg, runtime, i2, 2), p2 / z2; atol = 0.05)
m1 = marginal(alg, runtime, i1)
m2 = marginal(alg, runtime, i2)
@test isapprox(cpdf(m1, (), :a), pa / z1; atol = 0.05)
@test isapprox(cpdf(m1, (), :b), pb / z1; atol = 0.05)
@test isapprox(cpdf(m2, (), :1), p1 / z2; atol = 0.05)
@test isapprox(cpdf(m2, (), :2), p2 / z2; atol = 0.05)
end
@testset "With intervention" begin
v1 = Cat([:a,:b], [0.1, 0.9])()(:v1)
v2 = DiscreteCPT([1,2], Dict((:a,) => [0.2, 0.8], (:b,) => [0.3, 0.7]))()(:v2)
net = InstantNetwork(Variable[v1,v2], VariableGraph(v2 => [v1]))
runtime = Runtime(net)
default_initializer(runtime)
alg = Rejection(1000)
infer(alg, runtime, Dict{Symbol, Score}(), Dict{Symbol, Dist}(:v2 => Constant(2)))
i1 = current_instance(runtime, v1)
i2 = current_instance(runtime, v2)
@test isapprox(probability(alg, runtime, i1, :a), 0.1; atol = 0.05)
@test isapprox(probability(alg, runtime, i1, :b), 0.9; atol = 0.05)
@test isapprox(probability(alg, runtime, i2, 1), 0.0; atol = 0.05)
@test isapprox(probability(alg, runtime, i2, 2), 1.0; atol = 0.05)
m1 = marginal(alg, runtime, i1)
m2 = marginal(alg, runtime, i2)
@test isapprox(cpdf(m1, (), :a), 0.1; atol = 0.05)
@test isapprox(cpdf(m1, (), :b), 0.9; atol = 0.05)
@test isapprox(cpdf(m2, (), :1), 0.0; atol = 0.05)
@test isapprox(cpdf(m2, (), :2), 1.0; atol = 0.05)
end
end
@testset "LW" begin
@testset "Basic" begin
v1 = Cat([:a,:b], [0.1, 0.9])()(:v1)
v2 = DiscreteCPT([1,2], Dict((:a,) => [0.2, 0.8], (:b,) => [0.3, 0.7]))()(:v2)
net = InstantNetwork(Variable[v1,v2], VariableGraph(v2 => [v1]))
runtime = Runtime(net)
default_initializer(runtime)
alg = LW(1000)
infer(alg, runtime)
i1 = current_instance(runtime, v1)
i2 = current_instance(runtime, v2)
@test isapprox(probability(alg, runtime, i1, x -> x == :a), 0.1; atol = 0.05)
@test isapprox(probability(alg, runtime, i1, x -> x == :b), 0.9; atol = 0.05)
@test isapprox(probability(alg, runtime, i2, x -> x == 1),
0.1 * 0.2 + 0.9 * 0.3; atol = 0.05)
@test isapprox(probability(alg, runtime, i2, x -> x == 2),
0.1 * 0.8 + 0.9 * 0.7; atol = 0.05)
m1 = marginal(alg, runtime, i1)
m2 = marginal(alg, runtime, i2)
@test isapprox(cpdf(m1, (), :a), 0.1; atol = 0.05)
@test isapprox(cpdf(m1, (), :b), 0.9; atol = 0.05)
@test isapprox(cpdf(m2, (), :1), 0.1 * 0.2 + 0.9 * 0.3; atol = 0.05)
@test isapprox(cpdf(m2, (), :2), 0.1 * 0.8 + 0.9 * 0.7; atol = 0.05)
end
@testset "With placeholder" begin
p1 = Placeholder{Symbol}(:p1)
v2 = DiscreteCPT([1,2], Dict((:a,) => [0.2, 0.8], (:b,) => [0.3, 0.7]))()(:v2)
net = InstantNetwork(Variable[v2], VariableGraph(v2 => [p1]), Placeholder[p1])
runtime = Runtime(net)
default_initializer(runtime, 10, Dict(p1.name => Cat([:a,:b], [0.1, 0.9])))
alg = LW(1000)
infer(alg, runtime)
i2 = current_instance(runtime, v2)
@test isapprox(probability(alg, runtime, i2, 1),
0.1 * 0.2 + 0.9 * 0.3; atol = 0.05)
@test isapprox(probability(alg, runtime, i2, 2),
0.1 * 0.8 + 0.9 * 0.7; atol = 0.05)
m2 = marginal(alg, runtime, i2)
@test isapprox(cpdf(m2, (), 1), 0.1 * 0.2 + 0.9 * 0.3; atol = 0.05)
@test isapprox(cpdf(m2, (), 2), 0.1 * 0.8 + 0.9 * 0.7; atol = 0.05)
end
@testset "With hard evidence" begin
v1 = Cat([:a,:b], [0.1, 0.9])()(:v1)
v2 = DiscreteCPT([1,2], Dict((:a,) => [0.2, 0.8], (:b,) => [0.3, 0.7]))()(:v2)
net = InstantNetwork(Variable[v1,v2], VariableGraph(v2 => [v1]))
runtime = Runtime(net)
default_initializer(runtime)
alg = LW(1000)
infer(alg, runtime, Dict{Symbol, Score}(:v2 => HardScore(2)))
i1 = current_instance(runtime, v1)
i2 = current_instance(runtime, v2)
p1 = 0.1 * 0.8
p2 = 0.9 * 0.7
z = p1 + p2
@test isapprox(probability(alg, runtime, i1, :a), p1 / z; atol = 0.05)
@test isapprox(probability(alg, runtime, i1, :b), p2 / z; atol = 0.05)
@test isapprox(probability(alg, runtime, i2, 1), 0.0; atol = 0.05)
@test isapprox(probability(alg, runtime, i2, 2), 1.0; atol = 0.05)
m1 = marginal(alg, runtime, i1)
m2 = marginal(alg, runtime, i2)
@test isapprox(cpdf(m1, (), :a), p1 / z; atol = 0.05)
@test isapprox(cpdf(m1, (), :b), p2 / z; atol = 0.05)
@test isapprox(cpdf(m2, (), :1), 0.0; atol = 0.05)
@test isapprox(cpdf(m2, (), :2), 1.0; atol = 0.05)
end
@testset "With soft evidence" begin
v1 = Cat([:a,:b], [0.1, 0.9])()(:v1)
v2 = DiscreteCPT([1,2], Dict((:a,) => [0.2, 0.8], (:b,) => [0.3, 0.7]))()(:v2)
net = InstantNetwork(Variable[v1,v2], VariableGraph(v2 => [v1]))
runtime = Runtime(net)
default_initializer(runtime)
alg = LW(1000)
infer(alg, runtime, Dict{Symbol, Score}(:v2 => SoftScore(Dict(1 => 0.6, 2 => 0.4))))
i1 = current_instance(runtime, v1)
i2 = current_instance(runtime, v2)
pa = 0.1 * (0.2 * 0.6 + 0.8 * 0.4)
pb = 0.9 * (0.3 * 0.6 + 0.7 * 0.4)
z1 = pa + pb
# soft evidence is treated like a lambda message on a variable,
# so the prior also comes into play, unlike an intervention
p1 = (0.1 * 0.2 + 0.9 * 0.3) * 0.6
p2 = (0.1 * 0.8 + 0.9 * 0.7) * 0.4
z2 = p1 + p2
@assert isapprox(z1, z2)
@test isapprox(probability(alg, runtime, i1, :a), pa / z1; atol = 0.05)
@test isapprox(probability(alg, runtime, i1, :b), pb / z1; atol = 0.05)
@test isapprox(probability(alg, runtime, i2, 1), p1 / z2; atol = 0.05)
@test isapprox(probability(alg, runtime, i2, 2), p2 / z2; atol = 0.05)
m1 = marginal(alg, runtime, i1)
m2 = marginal(alg, runtime, i2)
@test isapprox(cpdf(m1, (), :a), pa / z1; atol = 0.05)
@test isapprox(cpdf(m1, (), :b), pb / z1; atol = 0.05)
@test isapprox(cpdf(m2, (), :1), p1 / z2; atol = 0.05)
@test isapprox(cpdf(m2, (), :2), p2 / z2; atol = 0.05)
end
@testset "With intervention" begin
v1 = Cat([:a,:b], [0.1, 0.9])()(:v1)
v2 = DiscreteCPT([1,2], Dict((:a,) => [0.2, 0.8], (:b,) => [0.3, 0.7]))()(:v2)
net = InstantNetwork(Variable[v1,v2], VariableGraph(v2 => [v1]))
runtime = Runtime(net)
default_initializer(runtime)
alg = LW(1000)
infer(alg, runtime, Dict{Symbol, Score}(), Dict{Symbol, Dist}(:v2 => Constant(2)))
i1 = current_instance(runtime, v1)
i2 = current_instance(runtime, v2)
@test isapprox(probability(alg, runtime, i1, :a), 0.1; atol = 0.05)
@test isapprox(probability(alg, runtime, i1, :b), 0.9; atol = 0.05)
@test isapprox(probability(alg, runtime, i2, 1), 0.0; atol = 0.05)
@test isapprox(probability(alg, runtime, i2, 2), 1.0; atol = 0.05)
m1 = marginal(alg, runtime, i1)
m2 = marginal(alg, runtime, i2)
@test isapprox(cpdf(m1, (), :a), 0.1; atol = 0.05)
@test isapprox(cpdf(m1, (), :b), 0.9; atol = 0.05)
@test isapprox(cpdf(m2, (), :1), 0.0; atol = 0.05)
@test isapprox(cpdf(m2, (), :2), 1.0; atol = 0.05)
end
end
@testset "Custom proposal" begin
prop::Dict{Symbol,SFunc} = Dict(:v2 =>
DiscreteCPT([1,2], Dict((:a,) => [0.2, 0.8], (:b,) => [0.3, 0.7])))
alg = Importance(make_custom_proposal(prop), 1000)
@testset "Basic" begin
v1 = Cat([:a,:b], [0.1, 0.9])()(:v1)
v2 = DiscreteCPT([1,2], Dict((:a,) => [0.2, 0.8], (:b,) => [0.3, 0.7]))()(:v2)
net = InstantNetwork(Variable[v1,v2], VariableGraph(v2 => [v1]))
runtime = Runtime(net)
default_initializer(runtime)
infer(alg, runtime)
i1 = current_instance(runtime, v1)
i2 = current_instance(runtime, v2)
@test isapprox(probability(alg, runtime, i1, x -> x == :a), 0.1; atol = 0.05)
@test isapprox(probability(alg, runtime, i1, x -> x == :b), 0.9; atol = 0.05)
@test isapprox(probability(alg, runtime, i2, x -> x == 1),
0.1 * 0.2 + 0.9 * 0.3; atol = 0.05)
@test isapprox(probability(alg, runtime, i2, x -> x == 2),
0.1 * 0.8 + 0.9 * 0.7; atol = 0.05)
m1 = marginal(alg, runtime, i1)
m2 = marginal(alg, runtime, i2)
@test isapprox(cpdf(m1, (), :a), 0.1; atol = 0.05)
@test isapprox(cpdf(m1, (), :b), 0.9; atol = 0.05)
@test isapprox(cpdf(m2, (), :1), 0.1 * 0.2 + 0.9 * 0.3; atol = 0.05)
@test isapprox(cpdf(m2, (), :2), 0.1 * 0.8 + 0.9 * 0.7; atol = 0.05)
end
@testset "With placeholder" begin
p1 = Placeholder{Symbol}(:p1)
v2 = DiscreteCPT([1,2], Dict((:a,) => [0.2, 0.8], (:b,) => [0.3, 0.7]))()(:v2)
net = InstantNetwork(Variable[v2], VariableGraph(v2 => [p1]), Placeholder[p1])
runtime = Runtime(net)
default_initializer(runtime, 10, Dict(p1.name => Cat([:a,:b], [0.1, 0.9])))
infer(alg, runtime)
i2 = current_instance(runtime, v2)
@test isapprox(probability(alg, runtime, i2, 1),
0.1 * 0.2 + 0.9 * 0.3; atol = 0.05)
@test isapprox(probability(alg, runtime, i2, 2),
0.1 * 0.8 + 0.9 * 0.7; atol = 0.05)
m2 = marginal(alg, runtime, i2)
@test isapprox(cpdf(m2, (), 1), 0.1 * 0.2 + 0.9 * 0.3; atol = 0.05)
@test isapprox(cpdf(m2, (), 2), 0.1 * 0.8 + 0.9 * 0.7; atol = 0.05)
end
@testset "With hard evidence" begin
v1 = Cat([:a,:b], [0.1, 0.9])()(:v1)
v2 = DiscreteCPT([1,2], Dict((:a,) => [0.2, 0.8], (:b,) => [0.3, 0.7]))()(:v2)
net = InstantNetwork(Variable[v1,v2], VariableGraph(v2 => [v1]))
runtime = Runtime(net)
default_initializer(runtime)
infer(alg, runtime, Dict{Symbol, Score}(:v2 => HardScore(2)))
i1 = current_instance(runtime, v1)
i2 = current_instance(runtime, v2)
p1 = 0.1 * 0.8
p2 = 0.9 * 0.7
z = p1 + p2
@test isapprox(probability(alg, runtime, i1, :a), p1 / z; atol = 0.05)
@test isapprox(probability(alg, runtime, i1, :b), p2 / z; atol = 0.05)
@test isapprox(probability(alg, runtime, i2, 1), 0.0; atol = 0.05)
@test isapprox(probability(alg, runtime, i2, 2), 1.0; atol = 0.05)
m1 = marginal(alg, runtime, i1)
m2 = marginal(alg, runtime, i2)
@test isapprox(cpdf(m1, (), :a), p1 / z; atol = 0.05)
@test isapprox(cpdf(m1, (), :b), p2 / z; atol = 0.05)
@test isapprox(cpdf(m2, (), :1), 0.0; atol = 0.05)
@test isapprox(cpdf(m2, (), :2), 1.0; atol = 0.05)
end
@testset "With soft evidence" begin
v1 = Cat([:a,:b], [0.1, 0.9])()(:v1)
v2 = DiscreteCPT([1,2], Dict((:a,) => [0.2, 0.8], (:b,) => [0.3, 0.7]))()(:v2)
net = InstantNetwork(Variable[v1,v2], VariableGraph(v2 => [v1]))
runtime = Runtime(net)
default_initializer(runtime)
infer(alg, runtime, Dict{Symbol, Score}(:v2 => SoftScore(Dict(1 => 0.6, 2 => 0.4))))
i1 = current_instance(runtime, v1)
i2 = current_instance(runtime, v2)
pa = 0.1 * (0.2 * 0.6 + 0.8 * 0.4)
pb = 0.9 * (0.3 * 0.6 + 0.7 * 0.4)
z1 = pa + pb
# soft evidence is treated like a lambda message on a variable,
# so the prior also comes into play, unlike an intervention
p1 = (0.1 * 0.2 + 0.9 * 0.3) * 0.6
p2 = (0.1 * 0.8 + 0.9 * 0.7) * 0.4
z2 = p1 + p2
@assert isapprox(z1, z2)
@test isapprox(probability(alg, runtime, i1, :a), pa / z1; atol = 0.05)
@test isapprox(probability(alg, runtime, i1, :b), pb / z1; atol = 0.05)
@test isapprox(probability(alg, runtime, i2, 1), p1 / z2; atol = 0.05)
@test isapprox(probability(alg, runtime, i2, 2), p2 / z2; atol = 0.05)
m1 = marginal(alg, runtime, i1)
m2 = marginal(alg, runtime, i2)
@test isapprox(cpdf(m1, (), :a), pa / z1; atol = 0.05)
@test isapprox(cpdf(m1, (), :b), pb / z1; atol = 0.05)
@test isapprox(cpdf(m2, (), :1), p1 / z2; atol = 0.05)
@test isapprox(cpdf(m2, (), :2), p2 / z2; atol = 0.05)
end
@testset "With intervention" begin
v1 = Cat([:a,:b], [0.1, 0.9])()(:v1)
v2 = DiscreteCPT([1,2], Dict((:a,) => [0.2, 0.8], (:b,) => [0.3, 0.7]))()(:v2)
net = InstantNetwork(Variable[v1,v2], VariableGraph(v2 => [v1]))
runtime = Runtime(net)
default_initializer(runtime)
infer(alg, runtime, Dict{Symbol, Score}(), Dict{Symbol, Dist}(:v2 => Constant(2)))
i1 = current_instance(runtime, v1)
i2 = current_instance(runtime, v2)
@test isapprox(probability(alg, runtime, i1, :a), 0.1; atol = 0.05)
@test isapprox(probability(alg, runtime, i1, :b), 0.9; atol = 0.05)
@test isapprox(probability(alg, runtime, i2, 1), 0.0; atol = 0.05)
@test isapprox(probability(alg, runtime, i2, 2), 1.0; atol = 0.05)
m1 = marginal(alg, runtime, i1)
m2 = marginal(alg, runtime, i2)
@test isapprox(cpdf(m1, (), :a), 0.1; atol = 0.05)
@test isapprox(cpdf(m1, (), :b), 0.9; atol = 0.05)
@test isapprox(cpdf(m2, (), :1), 0.0; atol = 0.05)
@test isapprox(cpdf(m2, (), :2), 1.0; atol = 0.05)
end
end
@testset "Iterative sampling" begin
@testset "Basic" begin
v1 = Cat([:a,:b], [0.1, 0.9])()(:v1)
v2 = DiscreteCPT([1,2], Dict((:a,) => [0.2, 0.8], (:b,) => [0.3, 0.7]))()(:v2)
net = InstantNetwork(Variable[v1,v2], VariableGraph(v2 => [v1]))
runtime = Runtime(net)
default_initializer(runtime)
alg = IterativeSampler(LW(1000))
prepare(alg, runtime)
particles = get_state(runtime, :particles)
@test length(particles.samples) == 0
refine(alg, runtime)
particles = get_state(runtime, :particles)
@test length(particles.samples) == 1000
refine(alg, runtime)
particles = get_state(runtime, :particles)
@test length(particles.samples) == 2000
i1 = current_instance(runtime, v1)
i2 = current_instance(runtime, v2)
@test isapprox(probability(alg, runtime, i1, x -> x == :a), 0.1; atol = 0.05)
@test isapprox(probability(alg, runtime, i1, x -> x == :b), 0.9; atol = 0.05)
@test isapprox(probability(alg, runtime, i2, x -> x == 1),
0.1 * 0.2 + 0.9 * 0.3; atol = 0.05)
@test isapprox(probability(alg, runtime, i2, x -> x == 2),
0.1 * 0.8 + 0.9 * 0.7; atol = 0.05)
m1 = marginal(alg, runtime, i1)
m2 = marginal(alg, runtime, i2)
@test isapprox(cpdf(m1, (), :a), 0.1; atol = 0.05)
@test isapprox(cpdf(m1, (), :b), 0.9; atol = 0.05)
@test isapprox(cpdf(m2, (), :1), 0.1 * 0.2 + 0.9 * 0.3; atol = 0.05)
@test isapprox(cpdf(m2, (), :2), 0.1 * 0.8 + 0.9 * 0.7; atol = 0.05)
end
@testset "With placeholder" begin
p1 = Placeholder{Symbol}(:p1)
v2 = DiscreteCPT([1,2], Dict((:a,) => [0.2, 0.8], (:b,) => [0.3, 0.7]))()(:v2)
net = InstantNetwork(Variable[v2], VariableGraph(v2 => [p1]), Placeholder[p1])
runtime = Runtime(net)
default_initializer(runtime, 10, Dict(p1.name => Cat([:a,:b], [0.1, 0.9])))
alg = IterativeSampler(LW(1000))
prepare(alg, runtime)
particles = get_state(runtime, :particles)
@test length(particles.samples) == 0
refine(alg, runtime)
particles = get_state(runtime, :particles)
@test length(particles.samples) == 1000
refine(alg, runtime)
particles = get_state(runtime, :particles)
@test length(particles.samples) == 2000
i2 = current_instance(runtime, v2)
@test isapprox(probability(alg, runtime, i2, 1),
0.1 * 0.2 + 0.9 * 0.3; atol = 0.05)
@test isapprox(probability(alg, runtime, i2, 2),
0.1 * 0.8 + 0.9 * 0.7; atol = 0.05)
m2 = marginal(alg, runtime, i2)
@test isapprox(cpdf(m2, (), 1), 0.1 * 0.2 + 0.9 * 0.3; atol = 0.05)
@test isapprox(cpdf(m2, (), 2), 0.1 * 0.8 + 0.9 * 0.7; atol = 0.05)
end
@testset "With hard evidence" begin
v1 = Cat([:a,:b], [0.1, 0.9])()(:v1)
v2 = DiscreteCPT([1,2], Dict((:a,) => [0.2, 0.8], (:b,) => [0.3, 0.7]))()(:v2)
net = InstantNetwork(Variable[v1,v2], VariableGraph(v2 => [v1]))
runtime = Runtime(net)
default_initializer(runtime)
alg = IterativeSampler(LW(1000))
prepare(alg, runtime, Dict(:v2 => HardScore(2)))
particles = get_state(runtime, :particles)
@test length(particles.samples) == 0
refine(alg, runtime)
particles = get_state(runtime, :particles)
@test length(particles.samples) == 1000
refine(alg, runtime)
particles = get_state(runtime, :particles)
@test length(particles.samples) == 2000
i1 = current_instance(runtime, v1)
i2 = current_instance(runtime, v2)
p1 = 0.1 * 0.8
p2 = 0.9 * 0.7
z = p1 + p2
@test isapprox(probability(alg, runtime, i1, :a), p1 / z; atol = 0.05)
@test isapprox(probability(alg, runtime, i1, :b), p2 / z; atol = 0.05)
@test isapprox(probability(alg, runtime, i2, 1), 0.0; atol = 0.05)
@test isapprox(probability(alg, runtime, i2, 2), 1.0; atol = 0.05)
m1 = marginal(alg, runtime, i1)
m2 = marginal(alg, runtime, i2)
@test isapprox(cpdf(m1, (), :a), p1 / z; atol = 0.05)
@test isapprox(cpdf(m1, (), :b), p2 / z; atol = 0.05)
@test isapprox(cpdf(m2, (), :1), 0.0; atol = 0.05)
@test isapprox(cpdf(m2, (), :2), 1.0; atol = 0.05)
end
@testset "With soft evidence" begin
v1 = Cat([:a,:b], [0.1, 0.9])()(:v1)
v2 = DiscreteCPT([1,2], Dict((:a,) => [0.2, 0.8], (:b,) => [0.3, 0.7]))()(:v2)
net = InstantNetwork(Variable[v1,v2], VariableGraph(v2 => [v1]))
runtime = Runtime(net)
default_initializer(runtime)
alg = IterativeSampler(LW(1000))
prepare(alg, runtime, Dict(:v2 => SoftScore(Dict(1 => 0.6, 2 => 0.4))))
particles = get_state(runtime, :particles)
@test length(particles.samples) == 0
refine(alg, runtime)
particles = get_state(runtime, :particles)
@test length(particles.samples) == 1000
refine(alg, runtime)
particles = get_state(runtime, :particles)
@test length(particles.samples) == 2000
i1 = current_instance(runtime, v1)
i2 = current_instance(runtime, v2)
pa = 0.1 * (0.2 * 0.6 + 0.8 * 0.4)
pb = 0.9 * (0.3 * 0.6 + 0.7 * 0.4)
z1 = pa + pb
# soft evidence is treated like a lambda message on a variable,
# so the prior also comes into play, unlike an intervention
p1 = (0.1 * 0.2 + 0.9 * 0.3) * 0.6
p2 = (0.1 * 0.8 + 0.9 * 0.7) * 0.4
z2 = p1 + p2
@assert isapprox(z1, z2)
@test isapprox(probability(alg, runtime, i1, :a), pa / z1; atol = 0.05)
@test isapprox(probability(alg, runtime, i1, :b), pb / z1; atol = 0.05)
@test isapprox(probability(alg, runtime, i2, 1), p1 / z2; atol = 0.05)
@test isapprox(probability(alg, runtime, i2, 2), p2 / z2; atol = 0.05)
m1 = marginal(alg, runtime, i1)
m2 = marginal(alg, runtime, i2)
@test isapprox(cpdf(m1, (), :a), pa / z1; atol = 0.05)
@test isapprox(cpdf(m1, (), :b), pb / z1; atol = 0.05)
@test isapprox(cpdf(m2, (), :1), p1 / z2; atol = 0.05)
@test isapprox(cpdf(m2, (), :2), p2 / z2; atol = 0.05)
end
@testset "With intervention" begin
v1 = Cat([:a,:b], [0.1, 0.9])()(:v1)
v2 = DiscreteCPT([1,2], Dict((:a,) => [0.2, 0.8], (:b,) => [0.3, 0.7]))()(:v2)
net = InstantNetwork(Variable[v1,v2], VariableGraph(v2 => [v1]))
runtime = Runtime(net)
default_initializer(runtime)
alg = IterativeSampler(LW(1000))
prepare(alg, runtime, Dict{Symbol, Score}(), Dict(:v2 => Constant(2)))
particles = get_state(runtime, :particles)
@test length(particles.samples) == 0
refine(alg, runtime)
particles = get_state(runtime, :particles)
@test length(particles.samples) == 1000
refine(alg, runtime)
particles = get_state(runtime, :particles)
@test length(particles.samples) == 2000
i1 = current_instance(runtime, v1)
i2 = current_instance(runtime, v2)
@test isapprox(probability(alg, runtime, i1, :a), 0.1; atol = 0.05)
@test isapprox(probability(alg, runtime, i1, :b), 0.9; atol = 0.05)
@test isapprox(probability(alg, runtime, i2, 1), 0.0; atol = 0.05)
@test isapprox(probability(alg, runtime, i2, 2), 1.0; atol = 0.05)
m1 = marginal(alg, runtime, i1)
m2 = marginal(alg, runtime, i2)
@test isapprox(cpdf(m1, (), :a), 0.1; atol = 0.05)
@test isapprox(cpdf(m1, (), :b), 0.9; atol = 0.05)
@test isapprox(cpdf(m2, (), :1), 0.0; atol = 0.05)
@test isapprox(cpdf(m2, (), :2), 1.0; atol = 0.05)
end
end
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 3959 | using Test
using Logging
using Scruff
using Scruff.Models
using Scruff.Utils
using Scruff.RTUtils
using Scruff.SFuncs
using Scruff.Operators
@testset "test loggers" begin
x1 = Cat([1,2], [0.1, 0.9])()(:x1)
x2 = Cat([1,2,3], [0.2, 0.3, 0.5])()(:x2)
cpd2 = Dict((1,1) => [0.3, 0.7], (1,2) => [0.6, 0.4], (2,1) =>[0.4, 0.6],
(2,2) => [0.7, 0.3], (3,1) => [0.5, 0.5], (3,2) => [0.8, 0.2])
x3 = DiscreteCPT([1,2], cpd2)()(:x3)
x4 = DiscreteCPT([1,2], Dict((1,) => [0.15, 0.85], (2,) => [0.25, 0.75]))()(:x4)
x5 = DiscreteCPT([1,2], Dict((1,) => [0.35, 0.65], (2,) => [0.45, 0.55]))()(:x5)
x6 = DiscreteCPT([1,2], Dict((1,) => [0.65, 0.35], (2,) => [0.75, 0.25]))()(:x6)
fivecpdnet = InstantNetwork(Variable[x1,x2,x3,x4,x5], VariableGraph(x3=>[x2,x1], x4=>[x3], x5=>[x3]))
@testset "runtime logger" begin
run = Runtime(fivecpdnet)
default_initializer(run)
inst1 = current_instance(run, x1)
inst3 = current_instance(run, x3)
inst5 = current_instance(run, x5)
@test_logs(
(:info, r"distribute_messages!"),
(:info, r"get_node"),
(:info, r"collect_messages"),
(:info, r"get_variables"),
(:info, r"set_value!"),
(:info, r"current_instance"),
match_mode=:any,
trace_runtime(Scruff.Algorithms.three_pass_BP, run))
@test_logs(
(:info, r"collect_messages"),
(:info, r"collect_messages"),
(:info, r"collect_messages"),
(:info, r"collect_messages"),
(:info, r"collect_messages"),
(:info, r"collect_messages"),
(:info, r"collect_messages"),
(:info, r"collect_messages"),
(:info, r"collect_messages"),
(:info, r"collect_messages"),
(:info, r"collect_messages"),
(:info, r"collect_messages"),
(:info, r"collect_messages"),
(:info, r"collect_messages"),
(:info, r"collect_messages"),
(:info, r"collect_messages"),
(:info, r"collect_messages"),
(:info, r"collect_messages"),
(:info, r"collect_messages"),
(:info, r"collect_messages"),
(:info, r"collect_messages"),
(:info, r"collect_messages"),
(:info, r"collect_messages"),
(:info, r"collect_messages"),
(:info, r"collect_messages"),
trace_runtime(Scruff.Algorithms.three_pass_BP, run; fnamefilter=x->x == "collect_messages"))
end
#= This test takes too long
@testset "log all" begin
run = Runtime(fivecpdnet)
default_initializer(run)
inst1 = current_instance(run, x1)
inst3 = current_instance(run, x3)
inst5 = current_instance(run, x5)
@test_logs(
(:info, r"distribute_messages!"),
(:info, r"has_state"),
(:info, r"get_network"),
(:info, r"get_state"),
(:info, r"collect_messages"),
(:info, r"current_time"),
(:info, r"get_variables"),
(:info, r"set_value!"),
(:info, r"outgoing_pis"),
(:info, r"get_parents"),
(:info, r"get_children"),
(:info, r"compute_bel"),
(:info, r"current_instance"),
match_mode=:any,
trace_algorithm(Scruff.Algorithms.three_pass_BP, run))
end
=#
@testset "timing logger" begin
run = Runtime(fivecpdnet)
default_initializer(run)
inst1 = current_instance(run, x1)
inst3 = current_instance(run, x3)
inst5 = current_instance(run, x5)
@test_logs(
(:info, "three_pass_BP"),
(:info, "outgoing_pis"),
(:info, "get_sfunc"),
match_mode=:any,
time_algorithm(Scruff.Algorithms.three_pass_BP, run))
end
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 17088 | import Base.timedwait
import Base.isapprox
import PrettyPrint
using Test
using Scruff
using Scruff.Utils
using Scruff.RTUtils
using Scruff.Models
using Scruff.SFuncs
using Scruff.Operators
import Scruff.Algorithms: LSFI, probability_bounds, prepare, refine, ve
using Scruff.MultiInterface
import Scruff.Operators: Support, bounded_probs, support_quality, support
import Scruff.SFuncs: Dist
struct MyFlip <: Dist{Bool} end
solve_count = 0
@impl begin
struct MyFlipBoundedProbs end
function bounded_probs(sf::MyFlip,
range::VectorOption{Bool},
parranges::NTuple{N,Vector})::Tuple{Vector{<:AbstractFloat}, Vector{<:AbstractFloat}} where {N}
global solve_count += 1
return ([0.1, 0.9], [0.1, 0.9])
end
end
@impl begin
struct MyFlipSupport end
function support(sf::MyFlip,
parranges::NTuple{N,Vector},
size::Integer,
curr::Vector{Bool}) where {N}
[false, true]
end
end
@impl begin
struct MyFlipSupportQuality end
function support_quality(::MyFlip, parranges)
:CompleteSupport
end
end
@testset "lsfi" begin
@testset "expander" begin
@testset "Construction" begin
x1 = Cat([1,2], [0.1, 0.9])()
net1 = InstantNetwork(Variable[x1(:out)], VariableGraph())
x2 = Cat([1,2], [0.2, 0.8])()
net2 = InstantNetwork(Variable[x2(:out)], VariableGraph())
f(b) = b ? net1 : net2
b3v = Flip(0.5)()(:b3)
e3v = Expander(f, Tuple{Bool}, Int)()(:out)
net3 = InstantNetwork(Variable[e3v,b3v], VariableGraph(e3v=>[b3v]))
run3 = Runtime(net3)
ensure_expansion_state!(run3)
@test isempty(get_state(run3, :subnets))
@test isempty(get_state(run3, :subruntimes))
end
@testset "Expand" begin
x1 = Cat([1,2], [0.1, 0.9])()
net1 = InstantNetwork(Variable[x1(:out)], VariableGraph())
x2 = Cat([1,2], [0.2, 0.8])()
net2 = InstantNetwork(Variable[x2(:out)], VariableGraph())
f(b) = b ? net1 : net2
vb3 = Flip(0.5)()(:b3)
ve3 = Expander(f, Tuple{Bool}, Int)()(:out)
net3 = InstantNetwork(Variable[ve3,vb3], VariableGraph(ve3=>[vb3]))
run3 = Runtime(net3)
@testset "The first time" begin
expand!(run3, ve3, true)
@test expanded(run3, ve3, true)
@test expansion(run3, ve3, true) == net1
run1 = subruntime(run3, ve3, net1)
@test run1.network == net1
@test !expanded(run3, ve3, false)
end
@testset "The second time" begin
expand!(run3, ve3, true)
@test expanded(run3, ve3, true)
@test !expanded(run3, ve3, false)
end
end
@testset "Getting range" begin
x1 = Cat([1,2], [0.1, 0.9])()
net1 = InstantNetwork(Variable[x1(:out)], VariableGraph())
x2 = Cat([1,2], [0.2, 0.8])()
net2 = InstantNetwork(Variable[x2(:out)], VariableGraph())
f₀(b) = (b) = b ? net1 : net2
vb3 = Flip(0.5)()(:b3)
ve3 = Expander(f₀ , Tuple{Bool}, Int)()(:out)
net3 = InstantNetwork(Variable[ve3,vb3], VariableGraph(ve3=>[vb3]))
@testset "With size requiring all expansions" begin
run3 = Runtime(net3)
ensure_all!(run3)
instb3 = current_instance(run3, vb3)
set_range!(run3, instb3, [false, true], 2)
(rng, _) = expander_range(run3, ve3, 4, 2)
@test expanded(run3, ve3, false)
@test expanded(run3, ve3, true)
@test rng == [1, 2]
end
@testset "With size not requiring all expansions" begin
run3 = Runtime(net3)
ensure_all!(run3)
instb3 = current_instance(run3, vb3)
set_range!(run3, instb3, [false, true], 2)
(rng, _) = expander_range(run3, ve3, 1, 2)
@test expanded(run3, ve3, false)
@test !expanded(run3, ve3, true)
@test rng == [1, 2]
end
@testset "With recursion" begin
b1 = Cat([1,2], [0.1, 0.9])()
net1 = InstantNetwork(Variable[b1(:out)], VariableGraph())
b2 = Cat([1,2], [0.2, 0.8])()
net2 = InstantNetwork(Variable[b2(:out)], VariableGraph())
f₁(b) = b ? net1 : net2
vb3 = Flip(0.5)()(:b3)
ve3 = Expander(f₁, Tuple{Bool}, Int)()(:out)
net3 = InstantNetwork(Variable[ve3,vb3], VariableGraph(ve3=>[vb3]))
f₂(b) = b ? net1 : net2
vb3 = Flip(0.7)()(:b3)
ve3 = Expander(f₂, Tuple{Bool}, Int)()(:out)
net3 = InstantNetwork(Variable[vb3,ve3], VariableGraph(ve3=>[vb3]))
g(b) = net3
vb4 = Flip(0.6)()(:b4)
ve4 = Expander(g, Tuple{Bool}, Int)()(:out)
net4 = InstantNetwork(Variable[vb4,ve4], VariableGraph(ve4=>[vb4]))
run4 = Runtime(net4)
ensure_all!(run4)
expand!(run4, ve4, false)
expand!(run4, ve4, true)
run3 = subruntime(run4, ve4, net3)
expand!(run3, ve3, false)
expand!(run3, ve3, true)
instb4 = current_instance(run4, vb4)
set_range!(run4, instb4, [false, true], 2)
(rng, _) = expander_range(run4, ve4, 4, 2)
@test rng == [] # recursion depth too shallow
(rng, _) = expander_range(run4, ve4, 4, 3)
@test Set(rng) == Set([1, 2])
end
end
@testset "Expanding range" begin
vx1 = Normal(-0.1, 1.0)()(:out)
net1 = InstantNetwork(Variable[vx1], VariableGraph())
vx2 = Normal(0.4, 1.0)()(:x2)
vy2 = LinearGaussian((0.7,), 0.0, 1.0)()(:out)
net2 = InstantNetwork(Variable[vx2,vy2], VariableGraph(vy2=>[vx2]))
g(b) = b ? net1 : net2
vb3 = Flip(0.5)()(:b3)
ve3 = Expander(g, Tuple{Bool}, Float64)()(:out)
net3 = InstantNetwork(Variable[vb3,ve3], VariableGraph(ve3=>[vb3]))
run3 = Runtime(net3)
instb3 = instantiate!(run3, vb3, 0)
set_range!(run3, instb3, [false, true], 2)
(r1,_) = expander_range(run3, ve3, 5, 2)
(r2,_) = expander_range(run3, ve3, 10, 2)
@test length(r2) >= 10
@test issorted(r2)
@test length(Set(r2)) == length(r2)
@test issubset(Set(r1), Set(r2))
end
@testset "Computing probability bounds" begin
vx1 = Cat([1,2], [0.1, 0.9])()(:out)
net1 = InstantNetwork(Variable[vx1], VariableGraph())
vx2 = Cat([1,2], [0.2, 0.8])()(:out)
net2 = InstantNetwork(Variable[vx2], VariableGraph())
f(b) = b ? net1 : net2
vb3 = Flip(0.5)()(:b3)
ve3 = Expander(f, Tuple{Bool}, Int)()(:out)
net3 = InstantNetwork(Variable[vb3,ve3], VariableGraph(ve3=>[vb3]))
@testset "With a fully expanded model" begin
run3 = Runtime(net3)
ensure_all!(run3)
instb3 = current_instance(run3, vb3)
set_range!(run3, instb3, [false, true])
ord = topsort(get_initial_graph(net3))
set_ranges!(run3, Dict{Symbol, Score}(), 4, 2, ord)
f₃(runtime, order, query_vars, depth) = ve(runtime, order, query_vars; depth = depth, bounds = true)
(l, u) = expander_probs(run3, f₃, ve3, 2)
@test l == [0.2, 0.8, 0.1, 0.9]
@test u == [0.2, 0.8, 0.1, 0.9]
end
@testset "With a partially expanded model" begin
run3 = Runtime(net3)
ensure_all!(run3)
instb3 = current_instance(run3, vb3)
set_range!(run3, instb3, [false, true])
ord = topsort(get_initial_graph(net3))
set_ranges!(run3, Dict{Symbol, Score}(), 1, 2, ord)
f₄(runtime, order, query_vars, depth) = ve(runtime, order, query_vars; depth = depth, bounds = true)
(l, u) = expander_probs(run3, f₄, ve3, 2)
@test l == [0.2, 0.8, 0, 0]
@test u == [0.2, 0.8, 1, 1]
end
end
end
@testset "inside lsfi" begin
@testset "Completeness" begin
vx1 = Cat([1,2], [0.1, 0.9])()(:out)
net1 = InstantNetwork(Variable[vx1], VariableGraph())
vx2 = Cat([1,2], [0.2, 0.8])()(:out)
net2 = InstantNetwork(Variable[vx2], VariableGraph())
vx3 = Normal(-0.1, 1.0)()(:out)
net3 = InstantNetwork(Variable[vx3], VariableGraph())
vx4 = Normal(0.4, 1.0)()(:x4)
vy4 = LinearGaussian((0.7,), 0.0, 1.0)()(:out)
net4 = InstantNetwork(Variable[vx4,vy4], VariableGraph(vy4=>[vx4]))
f5(b) = b ? net1 : net2
vb5 = Flip(0.5)()(:b5)
ve5 = Expander(f5, Tuple{Bool}, Int)()(:out)
net5 = InstantNetwork(Variable[vb5,ve5], VariableGraph(ve5=>[vb5]))
f6(b) = b ? net3 : net4
vb6 = Flip(0.6)()(:b6)
ve6 = Expander(f6, Tuple{Bool}, Float64)()(:out)
net6 = InstantNetwork(Variable[vb6,ve6], VariableGraph(ve6=>[vb6]))
run5 = Runtime(net5)
ord5 = topsort(get_initial_graph(net5))
run6 = Runtime(net6)
ord6 = topsort(get_initial_graph(net6))
ensure_all!(run5, 0)
ensure_all!(run6, 0)
expand!(run5, ve5, false)
expand!(run5, ve5, true)
expand!(run6, ve6, false)
expand!(run6, ve6, true)
@test !RTUtils._complete(run5)
set_ranges!(run5, Dict{Symbol, Score}(), 1, 3, ord5) # range is too small to be complete
@test !RTUtils._complete(run5) # because subnetwork not expanded
set_ranges!(run5, Dict{Symbol, Score}(), 4, 3, ord5) # range is large enough
@test RTUtils._complete(run5)
set_ranges!(run6, Dict{Symbol, Score}(), 10, 3, ord6)
@test !RTUtils._complete(run6)
set_ranges!(run6, Dict{Symbol, Score}(), 20, 3, ord6) # continuous range not complete
@test !RTUtils._complete(run6)
end
@testset "Completeness with recursion" begin
f(b) = net1
vb1 = Flip(0.1)()(:b1)
ve1 = Expander(f, Tuple{Bool}, Bool)()(:out)
net1 = InstantNetwork(Variable[vb1,ve1], VariableGraph(ve1=>[vb1]))
run1 = Runtime(net1)
expand!(run1, ve1, false)
expand!(run1, ve1, true)
@test !RTUtils._complete(run1)
end
@testset "Run to completion" begin
v1 = Cat([1,2], [0.1, 0.9])()(:out)
net1 = InstantNetwork(Variable[v1], VariableGraph())
x2 = Cat([1,2], [0.2, 0.8])()(:out)
net2 = InstantNetwork(Variable[x2], VariableGraph())
vb5 = Flip(0.4)()(:b5)
f5(b) = b ? net2 : net1
ve5 = Expander(f5, Tuple{Bool}, Int)()(:out)
net5 = InstantNetwork(Variable[vb5,ve5], VariableGraph(ve5=>[vb5]))
run5 = Runtime(net5)
ord5 = topsort(get_initial_graph(net5))
ensure_all!(run5, 0)
alg = LSFI([ve5]; start_depth = 2)
prepare(alg, run5)
refine(alg, run5)
inst = current_instance(run5, ve5)
range = get_range(run5, inst)
(ls,us) = probability_bounds(alg, run5, inst, range)
pa = 0.4 * 0.2 + 0.6 * 0.1
pb = 0.4 * 0.8 + 0.6 * 0.9
@test isapprox(ls, [pa, pb])
@test isapprox(us, [pa, pb])
end
@testset "With dynamic programming" begin
expand_count = 0
global function PrettyPrint.pp_impl(io, frame::StackTraces.StackFrame, indent::Int)
line = frame.inlined ? "[inlined]" : "$(frame.line)"
print(io, "$(frame.func) at $(frame.file):$(line)")
indent
end
b1 = MyFlip()()(:out)
net1 = InstantNetwork(Variable[b1], VariableGraph())
function f1(x)
if x expand_count += 1 end
net1
end
vb2 = Flip(0.2)()(:b2)
ve2 = Expander(f1, Tuple{Bool}, Bool)()(:out)
net2 = InstantNetwork(Variable[vb2,ve2], VariableGraph(ve2=>[vb2]))
vb3 = Flip(0.3)()(:b3)
ve3 = Expander(x -> net2, Tuple{Bool}, Bool)()(:out)
net3 = InstantNetwork(Variable[vb3,ve3], VariableGraph(ve3=>[vb3]))
vb4 = Flip(0.4)()(:b4)
ve4 = Expander(x -> net2, Tuple{Bool}, Bool)()(:out)
net4 = InstantNetwork(Variable[vb4,ve4], VariableGraph(ve4=>[vb4]))
vb5 = Flip(0.5)()(:b5)
ve5 = Expander(b -> b ? net3 : net4, Tuple{Bool}, Bool)()(:out)
net5 = InstantNetwork(Variable[vb5,ve5], VariableGraph(ve5=>[vb5]))
run5 = Runtime(net5)
ensure_all!(run5, 0)
order = topsort(get_initial_graph(net5))
alg = LSFI([ve5]; start_depth = 10)
prepare(alg, run5)
refine(alg, run5)
inst = current_instance(run5, ve5)
range = get_range(run5, inst)
(lower, upper) = probability_bounds(alg, run5, inst, range)
@test isapprox(lower, [0.1, 0.9], atol = 0.0000001)
@test isapprox(upper, lower)
end
@testset "With recursion" begin
bx1 = Flip(1.0)()(:out)
netx1 = InstantNetwork(Variable[bx1], VariableGraph())
vx2 = Flip(0.9)()(:b2)
fx(b) = b ? netx2 : netx1
vex2 = Expander(fx, Tuple{Bool}, Bool)()(:out)
netx2 = InstantNetwork(Variable[vx2,vex2], VariableGraph(vex2=>[vx2]))
runtime = Runtime(netx2)
ensure_all!(runtime, 0)
alg = LSFI([vex2]; start_depth = 11)
prepare(alg, runtime)
refine(alg, runtime)
@test true # This test succeeds if the above call terminates
end
end
@testset "Using the LazyInference interface" begin
@testset "LSFI" begin
m1 = Cat([1,2], [0.1, 0.9])()
v1 = m1(:out)
net1 = InstantNetwork(Variable[v1], VariableGraph())
m2 = Cat([1,2], [0.2, 0.8])()
v2 = m2(:out)
net2 = InstantNetwork(Variable[v2], VariableGraph())
f(b) = b ? net1 : net2
m3 = Flip(0.9)()
m4 = Expander(f, Tuple{Bool}, Int)()
m5 = DiscreteCPT([1,2], Dict((1,) => [0.3, 0.7], (2,) => [0.4, 0.6]))()
v3 = m3(:v3)
v4 = m4(:v4)
v5 = m5(:out)
net3 = InstantNetwork(Variable[v3,v4,v5], VariableGraph(v4 => [v3], v5 => [v4]))
alg = LSFI([get_node(net3, :out)]; increment = 3, max_iterations = 20, start_size=3)
runtime = Runtime(net3)
ensure_all!(runtime, 0)
prepare(alg, runtime)
refine(alg, runtime)
@test has_instance(runtime, v5)
i5 = current_instance(runtime, v5)
state = alg.state
@test state.next_size == 6
@test state.next_iteration == 2
@test state.next_depth == 2
@test !state.is_complete
# At first expansion, the bounds for i5 are (0,1)
(ls1, us1) = probability_bounds(alg, runtime, i5, [1,2])
@test isapprox(ls1[1], 0.0)
@test isapprox(us1[1], 1.0)
q2 = 0.9 * (0.1 * 0.3 + 0.9 * 0.4) + 0.1 * (0.2 * 0.3 + 0.8 * 0.4)
refine(alg, runtime)
@test has_instance(runtime, v5)
i5 = current_instance(runtime, v5)
state = alg.state
@test state.next_size == 9
@test state.next_iteration == 3
@test state.next_depth == 3
@test state.is_complete
# At second expansion, lower bounds = upper bounds = q2
(ls2, us2) = probability_bounds(alg, runtime, i5, [1,2])
@test isapprox(ls2[1], q2)
@test isapprox(us2[1], q2)
end
end
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 1828 | using Test
using Scruff
using Scruff.SFuncs
using Scruff.Operators
@testset "net" begin
@testset "Build NetworkSFunc" begin
x = NetworkInput{Int}()
struct AddC <: SFunc{Tuple{Int},Int}
c::Int
end
addc = AddC(2)
struct MulC <: SFunc{Tuple{Int},Int}
c::Int
end
struct Add <: SFunc{Tuple{Int,Int},Int}
end
mulc = MulC(-1)
add = Add()
sfuncs = (addc, mulc, add)
parents = Dict{SFunc,Any}(addc=>[x], mulc=>[x], add=>[addc, mulc])
outputs = (add,)
net = NetworkSFunc((x,),
sfuncs,
parents,
outputs)
@test 1 == 1
end
@testset "Netwalk Ops" begin
x = NetworkInput{Int}()
x_y = DiscreteCPT([1,2], Dict((1,) => [0.9, 0.1], (2,) => [0.1, 0.9]))
y_z = DiscreteCPT([1,2], Dict((1,) => [0.1, 0.9], (2,) => [0.9, 0.1]))
sfuncs = (x_y, y_z)
parents = Dict{SFunc,Any}(x_y=>[x],y_z=>[x_y])
outputs = (y_z,)
net = NetworkSFunc((x,),
sfuncs,
parents,
outputs)
y1 = sample(net, (1,))
y2 = sample(net, (1,))
cpdf2 = logcpdf(net, (1,), y2)
# cpdf2, y2 = sample_logcpdf(net, 1)
N = 100000
correct = [0.18 0.82
0.82 0.18]
for x in 1:2
for z in 1:2
cpdf3 = 0.0
for i in 1:N
cpdf3 += exp(logcpdf(net, (x,), z))
end
cpdf3 = cpdf3/N
# println("P(z=$z|x=$x) = $cpdf3")
@test isapprox(correct[x,z], cpdf3, rtol=5e-2)
end
end
end
end
| Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
|
[
"BSD-3-Clause"
] | 0.9.0 | bfd0515d3e2361c639b104b8f4c919c80ee5c91b | code | 3097 | using Test
import Logging
using Scruff
using Scruff.Operators
using Scruff.MultiInterface: @impl
import Scruff.Operators: sample, forward, cpdf, logcpdf
import Scruff.SFuncs: Constant
# Test default implementation of operators
logger = Logging.SimpleLogger(stderr, Logging.Error+1)
struct SF1 <: SFunc{Tuple{Int},Int} end
@impl begin
struct SF1Forward end
function forward(sf::SF1, i::Tuple{Int})::Dist{Int}
Constant(1)
end
end
@impl begin
struct SF1Sample end
function sample(sf::SF1, i::Tuple{Int})::Int
0
end
end
struct SF2 <: SFunc{Tuple{Int},Int} end
@impl begin
struct SF2Forward end
function forward(sf::SF2, i::Tuple{Int})::Dist{Int}
Constant(1)
end
end
struct SF3 <: SFunc{Tuple{Int}, Int} end
struct SF4 <: SFunc{Tuple{Int}, Int} end
@impl begin
struct SF4Cpdf end
function cpdf(sf::SF4, i::Tuple{Int}, o::Int)
0.0
end
end
@impl begin
struct SF4Logcpdf end
function logcpdf(sf::SF4, i::Tuple{Int}, o::Int)
0.0 # so cpdf is 1.0
end
end
struct SF5 <: SFunc{Tuple{Int}, Int} end
@impl begin
struct SF5Cpdf end
function cpdf(sf::SF5, i::Tuple{Int}, o::Int)
0.0
end
end
struct SF6 <: SFunc{Tuple{Int}, Int} end
@impl begin
struct SF6Logcpdf end
function logcpdf(sf::SF6, i::Tuple{Int}, o::Int)
0.0
end
end
struct SF7 <: SFunc{Tuple{Int}, Int} end
Logging.with_logger(logger) do
@testset "Implementation of sample using forward" begin
@testset "When explicit sample defined that is different" begin
# To test the calls, we implement forward and sample in contradictory ways
# The explicit sample should be used
@test sample(SF1(), (2,)) == 0
end
@testset "When forward is implemented explicitly but not sample" begin
# sample should use forward
@test sample(SF2(), (2,)) == 1
end
@testset "When neither sample nor forward are implemented explicitly" begin
# Should throw a MethodError because forward is not found when using the default implemenation of sample``
@test_throws MethodError sample(SF3(), (2,))
end
end
@testset "Default implementations of cpdf and logcpdf in terms of each other" begin
@testset "When both cpdf and logcpdf are implemented explicitly" begin
# To test the calls, we implement cpdf and logcpdf in contradictory ways
@test cpdf(SF4(), (2,), 1) == 0.0
@test logcpdf(SF4(), (2,), 1) == 0.0
end
@testset "When only cpdf is implemented explicitly" begin
@test cpdf(SF5(), (2,), 1) == 0.0
@test logcpdf(SF5(), (2,), 1) == -Inf64
end
@testset "When only cpdf is implemented explicitly" begin
@test cpdf(SF6(), (2,), 1) == 1.0
@test logcpdf(SF6(), (2,), 1) == 0.0
end
# @testset "When neither cpdf nor logcpdf are implemented explicitly" begin
# # should detect infinite loop and throw error
# @test_throws ErrorException cpdf(SF7(), (2,), 1)
# @test_throws ErrorException logcpdf(SF7(), (2,), 1)
# end
end
end | Scruff | https://github.com/charles-river-analytics/Scruff.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.