licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 2248 |
export LinearRelative, PackedLinearRelative
"""
$(TYPEDEF)
Default linear offset between two scalar variables.
```math
X_2 = X_1 + η_Z
```
"""
struct LinearRelative{N, T <: SamplableBelief} <: AbstractManifoldMinimize # AbstractRelativeMinimize
Z::T
end
# need several helper constructors since the dimension over which LinearRelative will be used is unknown at this point
function LinearRelative{N}(
z0::T = MvNormal(zeros(N), diagm(ones(N))),
) where {N, T <: SamplableBelief}
#
return LinearRelative{N, T}(z0)
end
function LinearRelative(::UniformScaling = LinearAlgebra.I)
return LinearRelative{1}(MvNormal(zeros(1), diagm(ones(1))))
end
function LinearRelative(nm::Distributions.ContinuousUnivariateDistribution)
return LinearRelative{1, typeof(nm)}(nm)
end
LinearRelative(nm::MvNormal) = LinearRelative{length(nm.μ), typeof(nm)}(nm)
function LinearRelative(nm::Union{<:BallTreeDensity, <:ManifoldKernelDensity})
return LinearRelative{Ndim(nm), typeof(nm)}(nm)
end
getManifold(::InstanceType{LinearRelative{N}}) where {N} = getManifold(ContinuousEuclid{N})
# TODO standardize
getDimension(::InstanceType{LinearRelative{N}}) where {N} = N
# new and simplified interface for both nonparametric and parametric
function (s::CalcFactor{<:LinearRelative})(z, x1, x2)
# TODO convert to distance(distance(x2,x1),z) # or use dispatch on `-` -- what to do about `.-`
# if s._sampleIdx < 5
# @info "LinearRelative" s._sampleIdx "$z" "$x1" "$x2" s.solvefor getLabel.(s.fullvariables)
# @info "in variables" pointer(getVal(s.fullvariables[s.solvefor])) getVal(s.fullvariables[s.solvefor])[1]
# end
return z .- (x2 .- x1)
end
function Base.convert(
::Type{<:MB.AbstractManifold},
::InstanceType{LinearRelative{N}},
) where {N}
return Manifolds.TranslationGroup(N)
end
"""
$(TYPEDEF)
Serialization type for `LinearRelative` binary factor.
"""
Base.@kwdef mutable struct PackedLinearRelative <: AbstractPackedFactor
Z::PackedSamplableBelief
end
function convert(::Type{PackedLinearRelative}, d::LinearRelative)
return PackedLinearRelative(convert(PackedSamplableBelief, d.Z))
end
function convert(::Type{LinearRelative}, d::PackedLinearRelative)
return LinearRelative(convert(SamplableBelief, d.Z))
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 363 | # Meta prior brings additional information not necessarily numerical prior
Base.@kwdef struct MetaPrior{T} <: AbstractPrior
data::T
partial::Vector{Int} = Int[]
end
MetaPrior(data) = MetaPrior(;data)
getManifold(::MetaPrior) = TranslationGroup(0)
getMeasurementParametric(::MetaPrior) = MvNormal(zeros(0))
getSample(cf::CalcFactor{<:MetaPrior}) = SVector() | IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 5832 |
_defaultNamesMixtures(N::Int) = ((Symbol[Symbol("c$i") for i = 1:N])...,)
"""
$(TYPEDEF)
A `Mixture` object for use with either a `<: AbstractPrior` or `<: AbstractRelative`.
Notes
- The internal data representation is a `::NamedTuple`, which allows total type-stability for all component types.
- Various construction helpers can accept a variety of inputs, including `<: AbstractArray` and `Tuple`.
- `N` is the number of components used to make the mixture, so two bumps from two Normal components means `N=2`.
DevNotes
- FIXME swap API order so Mixture of distibutions works like a distribtion, see Caesar.jl #808
- Should not have field mechanics.
- TODO on sampling see #1099 and #1094 and #1069
Example
```juila
# prior factor
msp = Mixture(Prior,
[Normal(0,0.1), Uniform(-pi/1,pi/2)],
[0.5;0.5])
addFactor!(fg, [:head], msp, tags=[:MAGNETOMETER;])
# Or relative
mlr = Mixture(LinearRelative,
(correlator=AliasingScalarSampler(...), naive=Normal(0.5,5), lucky=Uniform(0,10)),
[0.5;0.4;0.1])
addFactor!(fg, [:x0;:x1], mlr)
```
"""
struct Mixture{N, F <: AbstractFactor, S, T <: Tuple} <: AbstractFactor
""" factor mechanics """
mechanics::F
components::NamedTuple{S, T}
diversity::Distributions.Categorical
""" dimension of factor, so range measurement would be dims=1 """
dims::Int
labels::Vector{Int}
end
function Mixture(
f::Type{F},
z::NamedTuple{S, T},
c::Distributions.DiscreteNonParametric,
) where {F <: AbstractFactor, S, T}
return Mixture{length(z), F, S, T}(
f(LinearAlgebra.I),
z,
c,
size(rand(z[1], 1), 1),
zeros(Int, 0),
)
end
function Mixture(
f::F,
z::NamedTuple{S, T},
c::Distributions.DiscreteNonParametric,
) where {F <: AbstractFactor, S, T}
return Mixture{length(z), F, S, T}(f, z, c, size(rand(z[1], 1), 1), zeros(Int, 0))
end
function Mixture(
f::Union{F, Type{F}},
z::NamedTuple{S, T},
c::AbstractVector{<:Real},
) where {F <: AbstractFactor, S, T}
return Mixture(f, z, Categorical([c...]))
end
function Mixture(
f::Union{F, Type{F}},
z::NamedTuple{S, T},
c::NTuple{N, <:Real},
) where {N, F <: AbstractFactor, S, T}
return Mixture(f, z, [c...])
end
function Mixture(
f::Union{F, Type{F}},
z::Tuple,
c::Union{
<:Distributions.DiscreteNonParametric,
<:AbstractVector{<:Real},
<:NTuple{N, <:Real},
},
) where {F <: AbstractFactor, N}
return Mixture(f, NamedTuple{_defaultNamesMixtures(length(z))}(z), c)
end
function Mixture(
f::Union{F, Type{F}},
z::AbstractVector{<:SamplableBelief},
c::Union{
<:Distributions.DiscreteNonParametric,
<:AbstractVector{<:Real},
<:NTuple{N, <:Real},
},
) where {F <: AbstractFactor, N}
return Mixture(f, (z...,), c)
end
function Base.resize!(mp::Mixture, s::Int)
return resize!(mp.labels, s)
end
_lengthOrNothing(val) = length(val)
_lengthOrNothing(val::Nothing) = 0
getManifold(m::Mixture) = getManifold(m.mechanics)
# TODO make in-place memory version
function sampleFactor(cf::CalcFactor{<:Mixture}, N::Int = 1)
#
# TODO consolidate #927, case if mechanics has a special sampler
# TODO slight bit of waste in computation, but easiest way to ensure special tricks in s.mechanics::F are included
## example case is old FluxModelsPose2Pose2 requiring velocity
# FIXME better consolidation of when to pass down .mechanics, also see #1099 and #1094 and #1069
cf_ = CalcFactorNormSq(
cf.factor.mechanics,
0,
cf._legacyParams,
cf._allowThreads,
cf.cache,
cf.fullvariables,
cf.solvefor,
cf.manifold,
cf.measurement,
nothing,
)
smpls = [getSample(cf_) for _ = 1:N]
# smpls = Array{Float64,2}(undef,s.dims,N)
#out memory should be right size first
length(cf.factor.labels) != N ? resize!(cf.factor.labels, N) : nothing
cf.factor.labels .= rand(cf.factor.diversity, N)
M = cf.manifold
# mixture needs to be refactored so let's make it worse :-)
if cf.factor.mechanics isa AbstractPrior
samplef = samplePoint
elseif cf.factor.mechanics isa AbstractRelative
samplef = sampleTangent
end
for i = 1:N
mixComponent = cf.factor.components[cf.factor.labels[i]]
# measurements relate to the factor's manifold (either tangent vector or manifold point)
setPointsMani!(smpls, samplef(M, mixComponent), i)
end
# TODO only does first element of meas::Tuple at this stage, see #1099
return smpls
end
function DistributedFactorGraphs.isPrior(::Mixture{N, F, S, T}) where {N, F, S, T}
return F <: AbstractPrior
end
"""
$(TYPEDEF)
Serialization type for `Mixture`.
"""
Base.@kwdef mutable struct PackedMixture <: AbstractPackedFactor
N::Int
# store the packed type for later unpacking
F_::String
S::Vector{String}
components::Vector{PackedSamplableBelief}
diversity::PackedSamplableBelief
end
function convert(::Type{<:PackedMixture}, obj::Mixture{N, F, S, T}) where {N, F, S, T}
allcomp = PackedSamplableBelief[]
for val in obj.components
dtr_ = convert(PackedSamplableBelief, val)
# FIXME ON FIRE, likely to be difficult for non-standard "Samplable" types -- e.g. Flux models in RoME
push!(allcomp, dtr_)
end
pm = DFG.convertPackedType(obj.mechanics)
pm_ = convert(pm, obj.mechanics)
sT = string(typeof(pm_))
dvst = convert(PackedSamplableBelief, obj.diversity)
return PackedMixture(N, sT, string.(collect(S)), allcomp, dvst)
end
function convert(::Type{<:Mixture}, obj::PackedMixture)
N = obj.N
F1 = getfield(Main, Symbol(obj.F_))
S = (Symbol.(obj.S)...,)
F2 = DFG.convertStructType(F1)
components = map(c -> convert(SamplableBelief, c), obj.components)
diversity = convert(SamplableBelief, obj.diversity)
# tupcomp = (components...,)
ntup = NamedTuple{S}(components) # ,typeof(tupcomp)
return Mixture(F2, ntup, diversity)
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 1386 |
"""
$(TYPEDEF)
Message prior on all dimensions of a variable node in the factor graph.
Notes
- Only temporary existance during CSM operations.
"""
struct MsgPrior{T <: SamplableBelief} <: AbstractPrior
Z::T
infoPerCoord::Vector{Float64}
M::Any
end
# MsgPrior{T}() where {T} = new{T}()
# MsgPrior{T}(z::T, infd::R) where {T <: SamplableBelief, R <: Real} = new{T}(z, infd)
# function MsgPrior(z::T, infd::R) where {T <: SamplableBelief, R <: Real}
# MsgPrior{T}(z, infd)
# end
function getSample(cf::CalcFactor{<:MsgPrior})
return rand(cf.factor.Z, 1)
end
#TODO check these for manifolds, may need updating to samplePoint
# MKD already returns a vector of points
function getSample(cf::CalcFactor{<:MsgPrior{<:ManifoldKernelDensity}})
mkd = cf.factor.Z
return samplePoint(mkd.manifold, mkd)
end
getManifold(mp::MsgPrior{<:ManifoldKernelDensity}) = mp.Z.manifold
getManifold(mp::MsgPrior) = mp.M
#FIXME this will not work on manifolds
(cfo::CalcFactor{<:MsgPrior})(z, x1) = z .- x1
Base.@kwdef struct PackedMsgPrior <: AbstractPackedFactor
Z::PackedSamplableBelief
infoPerCoord::Vector{Float64}
end
function convert(::Type{PackedMsgPrior}, d::MsgPrior)
return PackedMsgPrior(convert(PackedSamplableBelief, d.Z), d.infoPerCoord)
end
function convert(::Type{<:MsgPrior}, d::PackedMsgPrior)
return MsgPrior(convert(SamplableBelief, d.Z), d.infoPerCoord)
end
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 1705 |
"""
$(TYPEDEF)
Partial prior belief (absolute data) on any variable, given `<:SamplableBelief` and which dimensions of the intended variable.
Notes
- If using [`AMP.ManifoldKernelDensity`](@ref), don't double partial. Only define the partial in this `PartialPrior` container.
- Future TBD, consider using `AMP.getManifoldPartial` for more general abstraction.
"""
struct PartialPrior{T <: SamplableBelief, P <: Tuple} <: AbstractPrior
varType::Type{<:InferenceVariable}
Z::T
partial::P
end
# TODO, standardize, but shows error on testPartialNH.jl
getSample(cf::CalcFactor{<:PartialPrior}) = samplePoint(cf.factor.Z) # remove in favor of ManifoldSampling.jl
# getManifold(pp::PartialPrior) = TranslationGroup(length(pp.partial)) # uncomment
getManifold(pp::PartialPrior) = getManifoldPartial(getManifold(pp.varType), [pp.partial...])[1]
# getManifold(pp::PartialPrior) = getManifold(pp.varType)
# getManifold(pp::PartialPrior{<:PackedManifoldKernelDensity}) = pp.Z.manifold
"""
$(TYPEDEF)
Serialization type for `PartialPrior`.
"""
Base.@kwdef struct PackedPartialPrior <: AbstractPackedFactor
varType::String
Z::PackedSamplableBelief
partials::Vector{Int}
end
function convert(::Type{PackedPartialPrior}, d::PartialPrior)
return PackedPartialPrior(DFG.typeModuleName(d.varType), convert(PackedSamplableBelief, d.Z), [d.partial...;])
# return PackedPartialPrior(convert(PackedSamplableBelief, d.Z), [d.partial...;])
end
function convert(::Type{PartialPrior}, d::PackedPartialPrior)
return PartialPrior(DFG.getTypeFromSerializationModule(d.varType), convert(SamplableBelief, d.Z),(d.partials...,))
# return PartialPrior(convert(SamplableBelief, d.Z), (d.partials...,))
end
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 1791 | # prior factor that passes a density belief straight through to inference without resampling
export PartialPriorPassThrough, PackedPartialPriorPassThrough
struct PartialPriorPassThrough{
B <: Union{<:HeatmapGridDensity, <:LevelSetGridNormal},
T <: Tuple,
} <: AbstractPrior
Z::B
partial::T
end
getManifold(pppt::PartialPriorPassThrough) = getManifold(pppt.Z)
# this step is skipped during main inference process
function getSample(cf::CalcFactor{<:PartialPriorPassThrough})
# TODO should be samplePoint for priors?
return sampleTangent(cf.manifold, cf.factor.Z)
end
## ====================================================================================================
## Serialize PartialPriorPassThrough
## ====================================================================================================
"""
$TYPEDEF
Required internal density to store its type
"""
Base.@kwdef mutable struct PackedPartialPriorPassThrough <: AbstractPackedFactor
Z::PackedSamplableBelief # PackedHeatmapGridDensity
partial::Vector{Int}
end
# StructTypes.StructType(::Type{PackedPartialPriorPassThrough}) = StructTypes.UnorderedStruct()
# StructTypes.idproperty(::Type{PackedPartialPriorPassThrough}) = :id
# StructTypes.omitempties(::Type{PackedPartialPriorPassThrough}) = (:id,)
function convert(
::Union{Type{<:AbstractPackedFactor}, Type{<:PackedPartialPriorPassThrough}},
obj::PartialPriorPassThrough,
)
#
po = convert(PackedSamplableBelief, obj.Z)
return PackedPartialPriorPassThrough(po, Int[obj.partial...])
end
function convert(
::Union{Type{<:AbstractFactor}, Type{<:PartialPriorPassThrough}},
obj::PackedPartialPriorPassThrough,
)
#
dens = convert(SamplableBelief, obj.Z)
return PartialPriorPassThrough(dens, tuple(obj.partial...))
end
# | IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 539 |
Base.@kwdef struct PackedManifoldKernelDensity <: PackedSamplableBelief
_type::String = "IncrementalInference.PackedManifoldKernelDensity"
varType::String
pts::Vector{Vector{Float64}}
bw::Vector{Float64} = Float64[]
partial::Vector{Int} = Int[]
infoPerCoord::Vector{Float64} = zeros(length(pts[1]))
end
Base.@kwdef struct PackedAliasingScalarSampler <: PackedSamplableBelief
_type::String = "IncrementalInference.PackedAliasingScalarSampler"
domain::Vector{Float64} = [0; 1.0]
weights::Vector{Float64} = [0.5; 0.5]
end
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 1966 |
# TODO, add `<:` for concrete dispatch when using StringThemSamplableBeliefs
StringThemSamplableBeliefs = Union{
<:Uniform,
<:Normal,
<:MvNormal,
<:ZeroMeanDiagNormal,
<:Categorical,
<:DiscreteNonParametric,
<:Rayleigh,
<:BallTreeDensity,
<:ManifoldKernelDensity,
<:AliasingScalarSampler,
<:HeatmapGridDensity,
<:LevelSetGridNormal,
}
## TODO, TBD
# Base.@kwdef struct PackedDiscreteNonParametric <: PackedSamplableBelief
# _type::String = "IncrementalInference.PackedDiscreteNonParametric"
# end
Base.@kwdef struct PackedCategorical <: PackedSamplableBelief
_type::String = "IncrementalInference.PackedCategorical"
p::Vector{Float64} = [1.0;]
end
Base.@kwdef mutable struct PackedUniform <: PackedSamplableBelief
_type::String = "IncrementalInference.PackedUniform"
a::Float64 = 0.0
b::Float64 = 1.0
PackedSamplableTypeJSON::String = "IncrementalInference.PackedUniform"
end
Base.@kwdef struct PackedNormal <: PackedSamplableBelief
_type::String = "IncrementalInference.PackedNormal"
mu::Float64 = 0.0
sigma::Float64 = 1.0
end
Base.@kwdef struct PackedZeroMeanDiagNormal <: PackedSamplableBelief
_type::String = "IncrementalInference.PackedZeroMeanDiagNormal"
diag::Vector{Float64} = ones(1)
end
Base.@kwdef struct PackedZeroMeanFullNormal <: PackedSamplableBelief
_type::String = "IncrementalInference.PackedZeroMeanFullNormal"
cov::Vector{Float64} = ones(1)
end
Base.@kwdef mutable struct PackedDiagNormal <: PackedSamplableBelief
_type::String = "IncrementalInference.PackedDiagNormal"
mu::Vector{Float64} = zeros(1)
diag::Vector{Float64} = ones(1)
end
Base.@kwdef struct PackedFullNormal <: PackedSamplableBelief
_type::String = "IncrementalInference.PackedFullNormal"
mu::Vector{Float64} = zeros(1)
cov::Vector{Float64} = ones(1)
end
Base.@kwdef struct PackedRayleigh <: PackedSamplableBelief
_type::String = "IncrementalInference.PackedRayleigh"
sigma::Float64 = 1.0
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 1552 |
Base.@kwdef mutable struct PackedHeatmapGridDensity <: PackedSamplableBelief
_type::String = "IncrementalInference.PackedHeatmapGridDensity"
data::Vector{Vector{Float64}}
domain::Tuple{Vector{Float64}, Vector{Float64}}
hint_callback::String
bw_factor::Float64
N::Int
# _densityFnc::String = "" # only use if storing parched belief data entry label/id
end
Base.@kwdef mutable struct PackedLevelSetGridNormal <: PackedSamplableBelief
_type::String = "IncrementalInference.PackedLevelSetGridNormal"
level::Float64
sigma::Float64
sigma_scale::Float64
# make sure the JSON nested packing works with the serialization overlords
heatmap::PackedHeatmapGridDensity
end
Base.@kwdef mutable struct PackedFluxModelsDistribution <: PackedSamplableBelief
# standardized _type field
_type::String
# shape of the input data
inputDim::Vector{Int}
# shape of the output data
outputDim::Vector{Int}
# actual Flux models (Base64 encoded binary)
mimeTypeModel::String
models::Vector{String}
# the data used for prediction, must be <: AbstractArray
mimeTypeData::String
data::String
# shuffle model predictions relative to particle index at each sampling
shuffle::Bool
# false for default serialization with model info, set true for separate storage of models
serializeHollow::Bool
# TODO remove requirement and standardize sampler API
# specialSampler::Symbol
# TODO, only use ._type. Legacy, field name usage to direct the IIF serialization towards JSON method
PackedSamplableTypeJSON::String
end
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 4891 |
## packing converters-----------------------------------------------------------
# heavy use of multiple dispatch for converting between packed and original data types during DB usage
function convert(
::Type{PackedFunctionNodeData{P}},
d::FunctionNodeData{T},
) where {P <: AbstractPackedFactor, T <: FactorOperationalMemory}
return PackedFunctionNodeData(
d.eliminated,
d.potentialused,
d.edgeIDs,
convert(P, _getCCW(d).usrfnc!),
d.multihypo,
_getCCW(d).hyporecipe.certainhypo,
d.nullhypo,
d.solveInProgress,
d.inflation,
) # extract two values from ccw for storage -- ccw thrown away
end
## unpack converters------------------------------------------------------------
# see #1424
function reconstFactorData(
dfg::AbstractDFG,
varOrder::AbstractVector{Symbol},
::Type{<:GenericFunctionNodeData{<:CommonConvWrapper{F}}},
packed::GenericFunctionNodeData{<:AbstractPackedFactor},
) where {F <: AbstractFactor}
#
# TODO store threadmodel=MutliThreaded,SingleThreaded in persistence layer
usrfnc = convert(F, packed.fnc)
multihypo, nullhypo = parseusermultihypo(packed.multihypo, packed.nullhypo)
# IIF #1424
vars = map(f -> getVariable(dfg, f), varOrder)
userCache = preambleCache(dfg, vars, usrfnc)
# TODO -- improve _createCCW for hypotheses and certainhypo field recovery when deserializing
# reconstitute from stored data
# FIXME, add threadmodel=threadmodel
# FIXME https://github.com/JuliaRobotics/DistributedFactorGraphs.jl/issues/590#issuecomment-776838053
# FIXME dont know what manifolds to use in ccw
ccw = _createCCW(
vars,
usrfnc;
multihypo,
nullhypo,
certainhypo = packed.certainhypo,
inflation = packed.inflation,
userCache,
attemptGradients = getSolverParams(dfg).attemptGradients,
# Block recursion if NoSolverParams or if set to not attempt gradients.
_blockRecursion=
getSolverParams(dfg) isa NoSolverParams ||
!getSolverParams(dfg).attemptGradients,
)
#
# CommonConvWrapper{typeof(usrfnc)}
ret = FunctionNodeData{typeof(ccw)}(
packed.eliminated,
packed.potentialused,
packed.edgeIDs,
ccw,
packed.multihypo,
packed.certainhypo,
packed.nullhypo,
packed.solveInProgress,
packed.inflation,
)
#
return ret
end
##
"""
$(SIGNATURES)
After deserializing a factor using decodePackedType, use this to
completely rebuild the factor's CCW and user data.
Notes:
- This function is likely to be used for cache heavy factors, e.g. `ObjectAffordanceSubcloud`.
Dev Notes:
- TODO: We should only really do this in-memory if we can by without it (review this).
- TODO: needs testing
"""
function rebuildFactorMetadata!(
dfg::AbstractDFG{SolverParams},
factor::DFGFactor,
neighbors = map(vId -> getVariable(dfg, vId), listNeighbors(dfg, factor));
_blockRecursionGradients::Bool=false
)
#
# Set up the neighbor data
# Rebuilding the CCW
fsd = getSolverData(factor)
fnd_new = getDefaultFactorData(
dfg,
neighbors,
getFactorType(factor);
multihypo = fsd.multihypo,
nullhypo = fsd.nullhypo,
# special inflation override
inflation = fsd.inflation,
eliminated = fsd.eliminated,
potentialused = fsd.potentialused,
edgeIDs = fsd.edgeIDs,
solveInProgress = fsd.solveInProgress,
_blockRecursion=_blockRecursionGradients
)
#
factor_ = if typeof(fnd_new) != typeof(getSolverData(factor))
# must change the type of factor solver data FND{CCW{...}}
# create a new factor
factor__ = DFGFactor(
getLabel(factor),
getTimestamp(factor),
factor.nstime,
getTags(factor),
fnd_new,
getSolvable(factor),
Tuple(getVariableOrder(factor)),
)
#
# replace old factor in dfg with a new one
deleteFactor!(dfg, factor; suppressGetFactor = true)
addFactor!(dfg, factor__)
factor__
else
setSolverData!(factor, fnd_new)
# We're not updating here because we don't want
# to solve cloud in loop, we want to make sure this flow works:
# Pull big cloud graph into local -> solve local -> push back into cloud.
# updateFactor!(dfg, factor)
factor
end
#... Copying neighbor data into the factor?
# JT TODO it looks like this is already updated in getDefaultFactorData -> _createCCW
# factormetadata.variableuserdata is deprecated, remove when removing deprecation
# for i in 1:Threads.nthreads()
# ccw_new.fnc.cpt[i].factormetadata.variableuserdata = deepcopy(neighborUserData)
# end
return factor_
end
## =================================================================
## TODO Can code below be deprecated?
## =================================================================
function convert(::Type{Tuple{ManifoldKernelDensity, Float64}}, p::TreeBelief)
#
return (convert(ManifoldKernelDensity, p), p.infoPerCoord)
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 3717 |
# NOTE, user variables and manifolds will require the same definitions, TODO perhaps add into `@defVariable`
# unusual definitions, but all they need to do is pack and unpack as one-to-one
# this step actually occurs separate from the actual variables or factors (with their own manifolds)
# relies on later use of getManifold to give back the same <:AbstractManifold
# NOTE added to DFG.@defVariable
getVariableType(M::Euclidean{TypeParameter{Tuple{N}}}) where {N} = ContinuousEuclid(N)
getVariableType(M::TranslationGroup{TypeParameter{Tuple{N}}}) where {N} = ContinuousEuclid(N)
# getVariableType(M::RealCircleGroup) = Circular()
# getVariableType(M::Circle) = error("Circle manifold is deprecated use RealCircleGroup, will come back when we generalize to non-group Riemannian")
# Type converters for MKD
function Base.convert(::Type{<:SamplableBelief}, ::Type{<:PackedManifoldKernelDensity})
return ManifoldKernelDensity
end
function Base.convert(::Type{<:PackedSamplableBelief}, ::Type{<:ManifoldKernelDensity})
return PackedManifoldKernelDensity
end
"""
$SIGNATURES
Parching refers to drying/hollowing out the object to reduce its memory size to the bare minimum.
Notes
- Likely to be used in combination with [stashing](@ref section_stash_unstash) where large data blobs are independently stored.
- For example, a point cloud stored as a MKD is very large and likely to duplicate the already stored point cloud object values,
- When storing the MKD object, it might make sense to parch the MKD first and just persisting the context of the data.
- Reconstituting a full MKD object would then require the inverse, where the parched shell is sized and filled from a separate large data blob.
"""
function parchDistribution(mkd::ManifoldKernelDensity)
pts = getPoints(mkd)
bw = getBW(mkd)[:, 1]
return manikde!(
mkd.manifold,
pts[1:1],
mkd._u0;
bw,
partial = mkd._partial,
infoPerCoord = mkd.infoPerCoord,
)
end
# Data converters for MKD
function packDistribution(mkd::ManifoldKernelDensity)
#
pts = getPoints(mkd)
return PackedManifoldKernelDensity(
"IncrementalInference.PackedManifoldKernelDensity",
# piggy back on InferenceVariable serialization rather than try serialize anything Manifolds.jl
DFG.typeModuleName(getVariableType(mkd.manifold)),
[AMP.makeCoordsFromPoint(mkd.manifold, pt) for pt in pts],
getBW(mkd.belief)[:, 1],
mkd._partial isa Nothing ? collect(1:manifold_dimension(mkd.manifold)) : mkd._partial,
mkd.infoPerCoord,
)
end
function unpackDistribution(dtr::PackedManifoldKernelDensity)
# find InferenceVariable type from string (anything Manifolds.jl?)
M = DFG.getTypeFromSerializationModule(dtr.varType) |> getManifold
vecP = [AMP.makePointFromCoords(M, pt) for pt in dtr.pts]
bw = length(dtr.bw) === 0 ? nothing : dtr.bw
partial = if length(dtr.partial) == manifold_dimension(M) || length(dtr.partial) === 0
nothing
else
dtr.partial
end
return manikde!(M, vecP; bw, partial, infoPerCoord = dtr.infoPerCoord)
end
function Base.convert(::Type{String}, mkd::ManifoldKernelDensity)
#
packedMKD = packDistribution(mkd)
return JSON3.write(packedMKD)
end
# Use general dispatch
# Base.convert(::Type{<:PackedSamplableBelief}, mkd::ManifoldKernelDensity) = convert(String, mkd)
# make module specific
# good references:
# https://discourse.julialang.org/t/converting-string-to-datatype-with-meta-parse/33024/2
# https://discourse.julialang.org/t/is-there-a-way-to-import-modules-with-a-string/15723/6
function Base.convert(::Type{<:ManifoldKernelDensity}, str::AbstractString)
dtr = JSON3.read(str, PackedManifoldKernelDensity)
return unpackDistribution(dtr)
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 3750 |
## Distributions to JSON/Packed types
packDistribution(dtr::Categorical) = PackedCategorical(; p = dtr.p)
packDistribution(dtr::Uniform) = PackedUniform(; a = dtr.a, b = dtr.b)
packDistribution(dtr::Normal) = PackedNormal(; mu = dtr.μ, sigma = dtr.σ)
packDistribution(dtr::ZeroMeanDiagNormal) = PackedZeroMeanDiagNormal(; diag = dtr.Σ.diag)
packDistribution(dtr::ZeroMeanFullNormal) = PackedZeroMeanFullNormal(; cov = dtr.Σ.mat[:])
packDistribution(dtr::DiagNormal) = PackedDiagNormal(; mu = dtr.μ, diag = dtr.Σ.diag)
packDistribution(dtr::FullNormal) = PackedFullNormal(; mu = dtr.μ, cov = dtr.Σ.mat[:])
packDistribution(dtr::Rayleigh) = PackedRayleigh(; sigma = dtr.σ)
function packDistribution(dtr::AliasingScalarSampler)
return PackedAliasingScalarSampler(; domain = dtr.domain, weights = dtr.weights.values)
end
## Unpack JSON/Packed to Distribution types
unpackDistribution(dtr::PackedCategorical) = Categorical(dtr.p ./ sum(dtr.p))
unpackDistribution(dtr::PackedUniform) = Uniform(dtr.a, dtr.b)
unpackDistribution(dtr::PackedNormal) = Normal(dtr.mu, dtr.sigma)
function unpackDistribution(dtr::PackedZeroMeanDiagNormal)
return MvNormal(LinearAlgebra.Diagonal(map(abs2, sqrt.(dtr.diag))))
end # sqrt.(dtr.diag)
function unpackDistribution(dtr::PackedZeroMeanFullNormal)
return MvNormal(reshape(dtr.cov, length(dtr.mu), :))
end
unpackDistribution(dtr::PackedDiagNormal) = MvNormal(dtr.mu, sqrt.(dtr.diag))
function unpackDistribution(dtr::PackedFullNormal)
return MvNormal(dtr.mu, reshape(dtr.cov, length(dtr.mu), :))
end
unpackDistribution(dtr::PackedRayleigh) = Rayleigh(dtr.sigma)
function unpackDistribution(dtr::PackedAliasingScalarSampler)
return AliasingScalarSampler(dtr.domain, dtr.weights ./ sum(dtr.weights))
end
# ## strip field from NamedTuple
# function _delete( nt::Union{<:NamedTuple, <:Dict{K,T}},
# key::K=:_type ) where {K,T}
# #
# kys = keys(nt)
# # rm index
# ridx = findfirst(k->k==key, kys)
# # keep indices
# idxs = setdiff(1:length(nt), ridx)
# # to Dict
# dict = OrderedDict{K,Any}()
# for id in idxs
# ky = kys[id]
# dict[ky] = nt[ky]
# end
# #
# NamedTuple{Tuple(keys(dict))}(values(dict))
# end
## ===========================================================================================
## FIXME, should be obsolete and must be removed
## ===========================================================================================
# NOTE part of new effort to overhaul the SamplableBelief serialization approach
function convert(::Type{<:PackedSamplableBelief}, obj::StringThemSamplableBeliefs)
return packDistribution(obj)
end
convert(::Type{<:SamplableBelief}, obj::PackedSamplableBelief) = unpackDistribution(obj)
function convert(::Type{<:PackedSamplableBelief}, nt::Union{NamedTuple, JSON3.Object})
distrType = DFG.getTypeFromSerializationModule(nt._type)
return distrType(; nt...)
end
##===================================================================================
# FIXME ON FIRE, must deprecate nested JSON written fields in all serialization
# TODO is string necessary, because unpacking templated e.g. PackedType{T} has problems, see DFG #668
function convert(::Type{String}, dtr::StringThemSamplableBeliefs)
return JSON3.write(packDistribution(dtr))
end
function convert(::Type{<:SamplableBelief}, str_obj::AbstractString)
#
# go from stringified to generic packed (no type info)
_pck = JSON3.read(str_obj)
# NOTE, get the packed type from strong assumption that field `_type` exists in the
T = DFG.getTypeFromSerializationModule(_pck._type)
# unpack again to described packedType
pckT = JSON3.read(str_obj, T)
# unpack to regular <:SamplableBelief
return unpackDistribution(pckT)
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 2667 |
# A more specialized constructor to help serialization processes reading Any[Any[1,2,3,..]] rather than floats.
function PackedHeatmapGridDensity(
_type::String,
data::AbstractVector, # {Any}
domain::AbstractVector, # {Any}
hint_callback::String,
bw_factor::Float64,
N::Int64,
)
#
# TODO data might not be type Float64, should store and recover as performance enhancement (if user specified different element type)
data_ = Vector{Vector{Float64}}(undef, length(data))
for (i, dat) in enumerate(data)
dat_ = replace(dat, nothing => 0)
data_[i] = float.(dat_)
end
domain_ = tuple(float.(domain[1]), float.(domain[2]))
return PackedHeatmapGridDensity(_type, data_, domain_, hint_callback, bw_factor, N)
end
function packDistribution(obj::HeatmapGridDensity)
#
data_ = obj.data
@cast data[j][i] := data_[i, j]
# str = convert(SamplableBelief, obj.densityFnc)
N = Npts(obj.densityFnc)
# TODO misses the hint...
return PackedHeatmapGridDensity(
"IncrementalInference.PackedHeatmapGridDensity",
data,
obj.domain,
"",
obj.bw_factor,
N,
)
end
function packDistribution(dtr::LevelSetGridNormal)
return PackedLevelSetGridNormal(
"IncrementalInference.PackedLevelSetGridNormal",
dtr.level,
dtr.sigma,
dtr.sigma_scale,
convert(PackedHeatmapGridDensity, dtr.heatmap),
)
end
#
function parchDistribution(hgd::HeatmapGridDensity)
@assert 2 <= size(hgd.data, 1) "parchDistribution of HeatmapGridDensity can only be done when `.data` is larger than 2x1"
data = Matrix{eltype(hgd.data)}(undef, 2, 2)
data[1, 1] = hgd.data[1, 1]
# data[2,2] = hgd.data[2,2] # disable since data might be a single column in unusual cases
data[2, 1] = size(hgd.data, 1)
data[1, 2] = size(hgd.data, 2)
domain = hgd.domain
hint_callback = hgd.hint_callback
bw_factor = hgd.bw_factor
densityFnc = parchDistribution(hgd.densityFnc)
return HeatmapGridDensity(data, domain, hint_callback, bw_factor, densityFnc)
end
function unpackDistribution(obj::PackedHeatmapGridDensity)
#
# do intermediate conversions
data_ = obj.data
data__ = map(x -> collect(x), data_)
@cast data[i, j] := data__[j][i]
_data__ = collect(data)
# densFnc = convert(SamplableBelief, obj.densityFnc)
# build the final object, misses the hint...
return HeatmapGridDensity(
_data__,
obj.domain,
obj.hint_callback == "" ? nothing : nothing,
obj.bw_factor;
N = obj.N,
)
end
function unpackDistribution(dtr::PackedLevelSetGridNormal)
return LevelSetGridNormal(
dtr.level,
dtr.sigma,
dtr.sigma_scale,
convert(HeatmapGridDensity, dtr.heatmap),
)
end
# | IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 287 |
# export Circular, Circle
# """
# $(TYPEDEF)
# Circular is a `Manifolds.Circle{ℝ}` mechanization of one rotation, with `theta in [-pi,pi)`.
# """
# @defVariable Circular Circle() [0.0;]
# Base.convert(::Type{<:MB.AbstractManifold}, ::InstanceType{Circular}) = Manifolds.Circle()
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 1434 |
## Euclid 1
"""
$TYPEDEF
Continuous Euclidean variable of dimension `N` representing a Position in cartesian space.
"""
struct Position{N} <: InferenceVariable end
Position(N::Int) = Position{N}()
# not sure if these overloads are necessary since DFG 775?
DFG.getManifold(::InstanceType{Position{N}}) where {N} = TranslationGroup(N)
function DFG.getDimension(val::InstanceType{Position{N}}) where {N}
return manifold_dimension(getManifold(val))
end
DFG.getPointType(::Type{Position{N}}) where {N} = SVector{N, Float64}
DFG.getPointIdentity(M_::Type{Position{N}}) where {N} = @SVector(zeros(N)) # identity_element(getManifold(M_), zeros(N))
function Base.convert(
::Type{<:ManifoldsBase.AbstractManifold},
::InstanceType{Position{N}},
) where {N}
return TranslationGroup(N)
end
#
"""
$(TYPEDEF)
Most basic continuous scalar variable in a `::DFG.AbstractDFG` object.
Alias of `Position{1}`
"""
const ContinuousScalar = Position{1}
const ContinuousEuclid{N} = Position{N}
const Position1 = Position{1}
const Position2 = Position{2}
const Position3 = Position{3}
const Position4 = Position{4}
## Circular
"""
$(TYPEDEF)
Circular is a `Manifolds.Circle{ℝ}` mechanization of one rotation, with `theta in [-pi,pi)`.
"""
@defVariable Circular RealCircleGroup() [0.0;]
#TODO This is an example of what we want working, possible issue upstream in Manifolds.jl
# @defVariable Circular RealCircleGroup() Scalar(0.0)
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 3451 | # sample from weights
# import IncrementalInference: AliasingScalarSampler
"""
$(TYPEDEF)
Sampler from intensity map given Euclidean domain `x` and probability weights `p_x`.
Example
`AliasingScalarSampler(x::Vector{<:Real}, p_x::Vector{<:Real}; SNRfloor::Float64=0.0)`
"""
struct AliasingScalarSampler
domain::Vector{Float64}
weights::StatsBase.ProbabilityWeights
# inner constructor
function AliasingScalarSampler(
x::Vector{<:Real},
p_x::Vector{<:Real};
SNRfloor::Float64 = 0.0,
)
#
# pxf = Float64.(p_x)
# pxf .-= quantile(pxf, SNRfloor)
# pxf[pxf .< 0.0] = 0.0
# pxf ./= norm(pxf)
# new implementation : pxf should be an empirical pmf before use in statsbase
pxf = Float64.(p_x)
pxf[pxf .< 0.0] .= 0.0 # no negative values!
pxf ./= sum(pxf) # must sum to 1
pxf2 = pxf .- quantile(pxf, SNRfloor) # remove lowest quantile
pxf2[pxf2 .< 0.0] .= 0.0
pxf2s = sum(pxf2)
pxf[:] = 1e-10 < pxf2s ? pxf2 : pxf
pxf ./= sum(pxf)
if sum(isnan.(pxf)) == 0
nothing
else
error("AliasingScalarSampler got NaN because of particular values in p_x")
end
# pxf .-= quantile(pxf,SNRfloor) # remove lowest quantile
# pxf[pxf.<0.0] .= 0.0
# pxf ./=sum(pxf)
wim = StatsBase.ProbabilityWeights(pxf)
return new(x, wim)
end
end
function sampleTangent(
M::AbstractDecoratorManifold, # stand-in type to restrict to just group manifolds
z::AliasingScalarSampler,
p = getPointIdentity(M),
)
return hat(M, p, SVector{manifold_dimension(M)}(rand(z)))
end
function rand!(ass::AliasingScalarSampler, smpls::Array{Float64})
StatsBase.alias_sample!(ass.domain, ass.weights, smpls)
return nothing
end
function rand(ass::AliasingScalarSampler, N::Int = 1)
smpls = zeros(N)
rand!(ass, smpls)
return smpls
end
function convert(::Type{AliasingScalarSampler}, str::AS) where {AS <: AbstractString}
sstr = split(split(str, "AliasingScalarSampler")[end], '[')
sstr = sstr[length.(sstr) .> 2]
ssstr = split.(sstr, ']')
domain = parse.(Float64, strip.(split(ssstr[1][1], ',')))
weight = parse.(Float64, strip.(split(ssstr[2][1], ',')))
return AliasingScalarSampler(domain, weight)
end
# str = "IncrementalInference.AliasingScalarSampler([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0,24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0], [0.156102, 0.117163, 0.221049, 0.275905, 0.0488494, 0.0731541, 0.107584,0.313848, 0.0309002, 0.0, 0.0384554, 0.155308, 0.276917, 0.0271168, 0.293263, 0.171316, 0.27459, 0.175323, 0.0535772, 0.181663, 0.295042, 0.104593, 0.0472137, 0.326016, 0.055283, 0.0737767, 0.302647, 0.0291257, 0.0206642, 0.223375])"
##===================================================================================
## Notes for beyond 1D sampling
##===================================================================================
# grid = reshape(1:10000, 100,100)
# intensity = zeros(100,100)
# mn = MvNormal([50;50],[20;20.0])
# for i in 1:100, j in 1:100
# intensity[i,j] = pdf(mn, [i+0.0;j])
# end
# bss = AliasingScalarSampler(grid[:], intensity[:])
# pts1d = rand(bss, 1000) .|> Int
# # 1000-element Vector{Int64}:
# # 7171
# # 6983
# # 3632
# # ...(y->findall(x->x==y,grid)).(pts1d)
# # 1000-element Vector{Vector{CartesianIndex{2}}}:
# # [CartesianIndex(71, 72)]
# # [CartesianIndex(83, 70)]
# # [CartesianIndex(32, 37)]
# # ...
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 3308 |
import DistributedFactorGraphs: getVariableType
"""
CliqStatus
Clique status message enumerated type with status.
"""
@enum CliqStatus NULL NO_INIT INITIALIZED UPSOLVED MARGINALIZED DOWNSOLVED UPRECYCLED ERROR_STATUS
# Used for UPWARD_DIFFERENTIAL, UPWARD_COMMON, DOWNWARD_COMMON marginalized types
abstract type MessagePassDirection end
struct UpwardPass <: MessagePassDirection end
struct DownwardPass <: MessagePassDirection end
abstract type MessageType end
struct NonparametricMessage <: MessageType end
struct ParametricMessage <: MessageType end
abstract type PackedSamplableBelief end
StructTypes.StructType(::Type{<:PackedSamplableBelief}) = StructTypes.UnorderedStruct()
const SamplableBelief = Union{
<:Distributions.Distribution,
<:KDE.BallTreeDensity, # FIXME deprecate
<:AMP.ManifoldKernelDensity,
<:AliasingScalarSampler,
<:FluxModelsDistribution,
<:HeatmapGridDensity,
<:LevelSetGridNormal,
}
#Supported types for parametric
const ParametricTypes = Union{Normal, MvNormal}
"""
$TYPEDEF
INTERMEDIATE DATA STRUCTURE DURING REFACTORING.
Representation of the belief of a single variable.
Notes:
- we want to send the joint, this is just to resolve consolidation #459 first.
- Long term objective is single joint definition, likely called `LikelihoodMessage`.
"""
struct TreeBelief{T <: InferenceVariable, P, M <: MB.AbstractManifold}
val::Vector{P}
bw::Array{Float64, 2}
infoPerCoord::Vector{Float64}
# see DFG #603, variableType defines the domain and manifold as well as group operations for a variable in the factor graph
variableType::T
# TODO -- DEPRECATE
manifold::M # Tuple{Vararg{Symbol}} # NOTE added during #459 effort
# only populated during up as solvableDims for each variable in clique, #910
solvableDim::Float64
end
function TreeBelief(
p::ManifoldKernelDensity,
ipc::AbstractVector{<:Real} = [0.0;],
variableType::T = ContinuousScalar(),
manifold = getManifold(variableType),
solvableDim::Real = 0,
) where {T <: InferenceVariable}
return TreeBelief(getPoints(p), getBW(p), ipc, variableType, manifold, solvableDim)
end
function TreeBelief(
val::AbstractVector{P},
bw::Array{Float64, 2},
ipc::AbstractVector{<:Real} = [0.0;],
variableType::T = ContinuousScalar(),
manifold::M = getManifold(variableType),
solvableDim::Real = 0,
) where {P, T <: InferenceVariable, M <: MB.AbstractManifold}
return TreeBelief{T, P, M}(val, bw, ipc, variableType, manifold, solvableDim)
end
function TreeBelief(vnd::VariableNodeData{T}, solvDim::Real = 0) where {T}
return TreeBelief(
vnd.val,
vnd.bw,
vnd.infoPerCoord,
getVariableType(vnd),
getManifold(T),
solvDim,
)
end
function TreeBelief(vari::DFGVariable, solveKey::Symbol = :default; solvableDim::Real = 0)
return TreeBelief(getSolverData(vari, solveKey), solvableDim)
end
#
getVariableType(tb::TreeBelief) = tb.variableType
getManifold(treeb::TreeBelief) = getManifold(treeb.variableType)
function compare(t1::TreeBelief, t2::TreeBelief)
TP = true
TP = TP && norm(t1.val - t2.val) < 1e-5
TP = TP && norm(t1.bw - t2.bw) < 1e-5
TP = TP && isapprox(t1.infoPerCoord, t2.infoPerCoord; atol = 1e-4)
TP = TP && t1.variableType == t2.variableType
TP = TP && abs(t1.solvableDim - t2.solvableDim) < 1e-5
return TP
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 3312 |
abstract type AbstractMaxMixtureSolver end
abstract type CalcFactor{T<:AbstractFactor} end
"""
$TYPEDEF
User factor interface method for computing the residual values of factors.
Notes
- Also see #467 on API consolidation
```julia
function (cf::CalcFactor{<:LinearRelative})(res::AbstractVector{<:Real}, z, xi, xj)
cf.variablelist
cf.cache
# generic on-manifold residual function
return distance(z, distance(xj, xi))
end
```
DevNotes
- Follow the Github project in IIF to better consolidate CCW FMD CPT CF CFM
- TODO CalcFactorNormSq is a step towards having a dedicated structure for non-parametric solve.
CalcFactorNormSq will calculate the Norm Squared of the factor.
Related
[`CalcFactorMahalanobis`](@ref), [`CommonConvWrapper`](@ref)
"""
struct CalcFactorNormSq{
FT <: AbstractFactor,
X,
C,
VT <: Tuple,
M <: AbstractManifold,
MEAS,
S
} <: CalcFactor{FT}
""" the interface compliant user object functor containing the data and logic """
factor::FT
""" what is the sample (particle) id for which the residual is being calculated """
_sampleIdx::Int
""" legacy support for variable values old functor residual functions.
TBD, this is still being used by DERelative factors. """
_legacyParams::X #TODO rename to varValsHypo for consistent naming? and not not legacy any more
""" allow threading for either sampling or residual calculations (workaround for thread yield issue) """
_allowThreads::Bool
""" user cache of arbitrary type, overload the [`preambleCache`](@ref) function. NOT YET THREADSAFE """
cache::C
## TODO Consolidation WIP with FactorMetadata
# full list of variables connected to the factor
# TODO make sure this list is of the active hypo only
fullvariables::VT # Vector{<:DFGVariable} # FIXME change to tuple for better type stability
# which index is being solved for?
solvefor::Int
manifold::M
measurement::MEAS #TBD make measurement only one sample per calc factor
slack::S
end
#TODO deprecate after CalcFactor is updated to CalcFactorNormSq
function CalcFactor(args...; kwargs...)
Base.depwarn(
"`CalcFactor` changed to an abstract type, use CalcFactorNormSq, CalcFactorMahalanobis, or CalcFactorResidual",
:CalcFactor
)
CalcFactorNormSq(args...; kwargs...)
end
"""
$TYPEDEF
Internal parametric extension to [`CalcFactor`](@ref) used for buffering measurement and calculating Mahalanobis distance
Related
[`CalcFactor`](@ref)
"""
struct CalcFactorMahalanobis{
FT,
N,
C,
MEAS<:AbstractArray,
D,
L,
S <: Union{Nothing, AbstractMaxMixtureSolver}
} <: CalcFactor{FT}
faclbl::Symbol
factor::FT
cache::C
varOrder::Vector{Symbol}
meas::NTuple{N, MEAS}
iΣ::NTuple{N, SMatrix{D, D, Float64, L}}
specialAlg::S
end
struct CalcFactorResidual{
FT <: AbstractFactor,
N,
D,
MEAS <: AbstractArray,
L,
C,
} <: CalcFactor{FT}
faclbl::Symbol
factor::FT
varOrder::NTuple{N, Symbol}
varOrderIdxs::NTuple{N, Int}
meas::MEAS
sqrt_iΣ::SMatrix{D, D, Float64, L}
cache::C
end
_nvars(::CalcFactorResidual{FT, N, D, MEAS, L, C}) where {FT, N, D, MEAS, L, C} = N
# _typeof_meas(::CalcFactorManopt{FT, C, D, L, MEAS, N}) where {FT, C, D, L, MEAS, N} = MEAS
DFG.getDimension(::CalcFactorResidual{FT, N, D, MEAS, L, C}) where {FT, N, D, MEAS, L, C} = D
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 5020 | # Clique types
# this is a developmental type, will be standardized after conclusion of #1010
# TODO resolve type instability
const MsgRelativeType = Vector{
NamedTuple{(:variables, :likelihood), Tuple{Vector{Symbol}, <:DFG.AbstractRelative}},
}
const MsgPriorType = Dict{Symbol, MsgPrior{<:ManifoldKernelDensity}}
"""
$TYPEDEF
Internal development types used during consolidation. Stores relative and prior information making up a joint likelihood
message passed upward on the Bayes tree.
"""
mutable struct _MsgJointLikelihood
relatives::IIF.MsgRelativeType
priors::IIF.MsgPriorType
end
"""
$(TYPEDEF)
Belief message for message passing on the tree. This should be considered an incomplete joint probility.
Notes:
- belief -> Dictionary of [`TreeBelief`](@ref)
- variableOrder -> Ordered variable id list of the seperators in cliqueLikelihood
- cliqueLikelihood -> marginal distribution (<: `SamplableBelief`) over clique seperators.
- Older names include: productFactor, Fnew, MsgPrior, LikelihoodMessage
DevNotes:
- Used by both nonparametric and parametric.
- Objective for parametric case: `MvNormal(μ=[:x0;:x2;:l5], Σ=[+ * *; * + *; * * +])`.
- Part of the consolidation effort, see #459.
- Better conditioning for joint structure in the works using deconvolution, see #579, #635.
- TODO confirm why <: Singleton.
$(TYPEDFIELDS)
"""
mutable struct LikelihoodMessage{T <: MessageType} <: AbstractPrior
sender::NamedTuple{(:id, :step), Tuple{Int, Int}}
status::CliqStatus
belief::Dict{Symbol, TreeBelief} # will eventually be deprecated
variableOrder::Vector{Symbol}
cliqueLikelihood::Union{Nothing, SamplableBelief} # TODO drop the Union
msgType::T
hasPriors::Bool
# this is different from belief[].inferdim, as the total available infer dims remaining during down msgs -- see #910
childSolvDims::Dict{Int, Float64}
# calc differential factors for joint in the child clique
jointmsg::_MsgJointLikelihood
# diffJoints::Vector{NamedTuple{(:variables, :likelihood), Tuple{Vector{Symbol},DFG.AbstractRelative}}}
end
"""
$TYPEDEF
Cache messages being passed on the tree, one container per clique.
Notes
- See model 2 (?) on IIF #674
"""
mutable struct MessageBuffer
# up receive message buffer (multiple children, multiple messages)
upRx::Dict{Int, LikelihoodMessage}
# down receive message buffer (one parent)
downRx::Union{Nothing, LikelihoodMessage}
# RESERVED up outgoing message buffer (one parent)
upTx::Union{Nothing, LikelihoodMessage}
# RESERVED down outgoing message buffer (multiple children but one message)
downTx::Union{Nothing, LikelihoodMessage}
end
MessageBuffer() = MessageBuffer(Dict{Int, LikelihoodMessage}(), nothing, nothing, nothing)
##==============================================================================
## BayesTreeNodeData
##==============================================================================
"""
$(TYPEDEF)
Data structure for each clique in the Bayes (Junction) tree.
"""
mutable struct BayesTreeNodeData
status::CliqStatus
frontalIDs::Vector{Symbol}
separatorIDs::Vector{Symbol}
inmsgIDs::Vector{Symbol} # Int
potIDs::Vector{Symbol} # Int # this is likely redundant TODO -- remove
potentials::Vector{Symbol}
partialpotential::Vector{Bool}
dwnPotentials::Vector{Symbol}
dwnPartialPotential::Vector{Bool}
cliqAssocMat::Array{Bool, 2}
cliqMsgMat::Array{Bool, 2}
directvarIDs::Vector{Symbol}
directFrtlMsgIDs::Vector{Symbol}
msgskipIDs::Vector{Symbol}
itervarIDs::Vector{Symbol}
directPriorMsgIDs::Vector{Symbol}
debug::Any
debugDwn::Any
allmarginalized::Bool
initialized::Symbol
upsolved::Bool
downsolved::Bool
isCliqReused::Bool # holdover
# JT Local messages saved for cache and debugging, see IIF #675
messages::MessageBuffer
end
## Packed types for serialization
mutable struct PackedBayesTreeNodeData
frontalIDs::Vector{Symbol}
separatorIDs::Vector{Symbol}
inmsgIDs::Vector{Symbol} # Int
potIDs::Vector{Symbol} # Int # this is likely redundant TODO -- remove
potentials::Vector{Symbol}
partialpotential::Vector{Bool}
dwnPotentials::Vector{Symbol}
dwnPartialPotential::Vector{Bool}
cliqAssocMat::Array{Bool, 2}
cliqMsgMat::Array{Bool, 2}
directvarIDs::Vector{Symbol} # Int
directFrtlMsgIDs::Vector{Symbol} # Int
msgskipIDs::Vector{Symbol} # Int
itervarIDs::Vector{Symbol} # Int
directPriorMsgIDs::Vector{Symbol} # Int
end
## Full Clique Types
struct CliqueId{T}
value::T
end
"""
$(TYPEDEF)
Structure to store clique data
DEV NOTES: To replace TreeClique completely
$(FIELDS)
"""
mutable struct TreeClique
"Interger id unique within a tree with userId, robotId, sessionId"
id::CliqueId{Int64} # not to be confused with the underlying index used by LightGraphs.jl, see issue #540
"Data as `BayesTreeNodeData`"
data::BayesTreeNodeData
"Drawing attributes"
attributes::Dict{String, Any}
#solveInProgress #on a clique level a "solve in progress" might be very handy
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 4063 | # entities immediately available as private members in IIF, but requires other packages for actual use
# only export once the convenience constructors are available along with conditional Interpolations dependency
"""
$TYPEDEF
Generate a `<:SamplableBelief` from a heatmap, e.g. a digital elevation model.
Notes
- Give in heatmap and grid, and object becomes a density function that can also be sampled.
- Sampling can be more nuanced by injecting a hint, or location of interest:
- Mostly aimed at limiting compute when faced with massive heatmaps, e.g. nav units are 10's but map is ~1e6.
- Density approximation is constructed on Guassian measurement assumption of level set and sigma variation.
- Assume data is on a regular grid on TranslationGroup(2)
- Assume on early implementation `x_grid, y_grid = domain`
- Serialization currently does not store the hint callback.
- To save space, serialization does not store the internal density, but rather reconstructs at unpacking.
DevNotes:
- Generalize to scalar fields on any Manifold.
- Generalize to vector fields if interpolation is sensible.
- TODO standardize with AliasingScalarSampler see IIF #1341
- TODO store the hint function (at least any easy cases)
"""
struct HeatmapGridDensity{
T <: Real,
H <: Union{<:Function, Nothing},
B <: ManifoldKernelDensity,
}
"""intensity data, on regular grid"""
data::Matrix{T}
"""domain as grid or locations at which scalar intensity elements exist"""
domain::Tuple{<:AbstractVector{T}, <:AbstractVector{T}}
"""use location hint to focus sampling to specific area of data, requires additional info at `getSample`
assumed the callback will return _____ NOT ACTIVE YET"""
hint_callback::H
"""general rule for kernel bandwidths used in construction of grid density, e.g. bw is 0.7 of domain grid spacing"""
bw_factor::T
"""density function as samplable representation of the data over the domain"""
densityFnc::B
end
##
"""
$TYPEDEF
Generate a `<:SamplableBelief` by selecing normal (Gaussian) deviation from a Level Set of a heatmap, e.g. known altitude on a digital elevation model (DEM).
Notes
- Give in heatmap and grid, a level set gets generated, and the object becomes a density function for sampling.
- Sampling can be more nuanced by injecting a hint, or location of interest:
- Mostly aimed at limiting compute when faced with massive heatmaps, e.g. nav units are 10's but map is ~1e6.
- Density approximation is constructed on Guassian measurement assumption of level set and sigma variation.
- Assume data is on a regular grid on TranslationGroup(2)
DevNotes:
- Generalize to scalar fields on any Manifold.
- Generalize to vector fields if interpolation is sensible.
See also: [`HeatmapGridDensity`](@ref), [`ManifoldKernelDensity`](@ref)
"""
struct LevelSetGridNormal{T <: Real, H <: HeatmapGridDensity}
level::T
"""one sigma value associated with measurement noise of `level` against `data`"""
sigma::T
"""make samplible region of interest from data be `sigma_scale` from `level`, e.g. 3*sigma."""
sigma_scale::T
"""HeatmapDensityGrid is used to sample the LevelSet regions of interest"""
heatmap::H
end
##
"""
$SIGNATURES
Distribution made from neural networks.
Notes:
- Weak dependency, extension functionality loads when using Flux.jl.
"""
struct FluxModelsDistribution{ID, OD, P, D <: AbstractArray}
""" shape of the input data """
inputDim::NTuple{ID, Int}
""" shape of the output data """
outputDim::NTuple{OD, Int}
""" actual Flux models """
models::Vector{P}
""" the data used for prediction, must be <: AbstractArray """
data::D
""" shuffle model predictions relative to particle index at each sampling """
shuffle::Base.RefValue{Bool}
""" EXPL: false for default serialization with model info, set true for separate storage of models. TODO rename as to useStashing, see [docs](@ref section_stash_unstash) """
serializeHollow::Base.RefValue{Bool}
# # TODO remove requirement and standardize sampler API
# specialSampler::Function
end
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 1272 |
"""
$TYPEDEF
Build a full ODE solution into a relative factor to condense possible sensor data into a relative transformation,
but keeping the parameter estimation process fluid. Assumes first and second variable in order
are of same dimension and compatible manifolds, such that ODE runs from Xi to Xi+1 on all
dimensions. Internal state vector can be decoupled onto different domain as needed.
Notes
- Based on DifferentialEquations.jl
- `getSample` step does the `solve(ODEProblem)` step.
- `tspan` is taken from variables only once at object construction -- i.e. won't detect changed timestamps.
- Regular factor evaluation is done as full dimension `AbstractRelativeRoots`, and is basic linear difference.
DevNotes
- FIXME see 1025, `multihypo=` will not yet work.
- FIXME Lots of consolidation and standardization to do, see RoME.jl #244 regarding Manifolds.jl.
- TODO does not yet handle case where a factor spans across two timezones.
"""
struct DERelative{T <: InferenceVariable, P, D} <: AbstractManifoldMinimize # AbstractRelativeMinimize
domain::Type{T}
forwardProblem::P
backwardProblem::P
""" second element of this data tuple is additional variables that will be passed down as a parameter """
data::D
# specialSampler::Function
end | IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 2236 | # entities used or calculating gradients on factors
export FactorGradientsCached!
"""
$TYPEDEF
Container for calculating gradients on a factor. See helper contructor functions.
Take graph on one function ``f`` between two variables and based on measurement ``z``
```
(:x1)-[:x1x2f1]-(:x2)
```
The gradient (a matrix operating on coordinates) is defined as ``(▽f - I)`` according to
````math
0 = (▽ f - I) dC
````
where the augmented vector of coordinates is ``dC = [dX_1, dX_2]'``. The idea is that ``dC`` should be
in the null space of ``(▽f - I)`` as per normal partial derivative (vector) decomposition;
````math
dX_1 = {∂f}/{∂X_2'} dX_2 + ...
````
modeled in coordinates.
Notes
- Memory is cached in the object.
- Gradients are updated as a functor call to the object, see example.
- The factor is assumed to be in "tension or compression" which manifests as `_slack` in the calculated factor residual which is also cached.
DevNotes
- TODO type stability
- TODO in-place memory operations
- TODO relax hard coordinate assumptions and instead allow direct operator definitions with the manifold tangent ``T_f M``.
Example
```julia
# problem set
pp = LinearRelative(MvNormal([10;0],[1 0; 0 1]))
measurement = ([10.0;0.0],)
varTypes = (ContinuousEuclid{2}, ContinuousEuclid{2})
pts = ([0;0.0], [9.5;0])
# build and evaluate the functor object
grad = FactorGradientsCached!(pp, varTypes, measurement, pts);
J = grad(measurement..., pts...)
# cached value stored
J_ = grad.cached_gradients
# double check things are working
@assert norm(J_ - J) < 1e-6
```
Related
[`calcFactorResidualTemporary`](@ref), [`_buildGraphByFactorAndTypes`](@ref)
"""
mutable struct FactorGradientsCached!{F <: AbstractRelative, S, M, P, G, L}
dfgfct::DFGFactor{<:CommonConvWrapper{F}}
# cached jacobian matrix of gradients
cached_gradients::Matrix{Float64}
# likely <:AbstractVector, while CalcFactor residuals are vectors in Rn but could change to Tangent vectors
slack_residual::S
measurement::M
currentPoints::P
# factor evaluations are performed in-situ to a AbstractDFG object
_tfg::G
# nested-tuple of gradient lambda functions
_λ_fncs::L
_coord_sizes::Vector{Int}
# gradient delta
_h::Float64
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 3580 |
"""
$(TYPEDEF)
Main factor memory container used during inference operations -- i.e. values specific to one complete convolution operation
Notes
- CCW does not get serialized / persisted
- At writing, the assumption is there is just one CCW per factor
- Any multithreaded design needs to happens as sub-constainers inside CCW or otherwise, to carry separate memory.
- Since #467, `CalcFactor` is the only type 'seen by user' during `getSample` or function residual calculations `(cf::CalcFactor{<:MyFactor})`, s.t. `MyFactor <: AbstractRelative___`
- There also exists a `CalcFactorMahalanobis` for parameteric computations using as much of the same mechanics as possible.
- CCW is consolidated object of other previous types, FMD CPT CF CFM.
Related
[`CalcFactor`](@ref), [`CalcFactorMahalanobis`](@ref)
"""
Base.@kwdef struct CommonConvWrapper{
T <: AbstractFactor,
VT <: Tuple,
TP <: Base.RefValue{<:Tuple},
CT,
AM <: AbstractManifold,
HR <: HypoRecipeCompute,
MT,
G
} <: FactorOperationalMemory
# Basic factor topological info
""" Values consistent across all threads during approx convolution """
usrfnc!::T # user factor / function
""" Ordered tuple of all variables connected to this factor """
fullvariables::VT
# shortcuts to numerical containers
""" Numerical containers for all connected variables. Hypo selection needs to be passed
to each hypothesis evaluation event on user function via CalcFactor, #1321.
Points directly at the variable VND.val (not a deepcopy). """
varValsAll::TP
""" dummy cache value to be deep copied later for each of the CalcFactor instances """
dummyCache::CT = nothing
# derived config parameters for this factor
""" Factor manifold definition for frequent use (not the variables manifolds) """
manifold::AM = getManifold(usrfnc!)
""" Which dimensions does this factor influence. Sensitive (mutable) to both which 'solvefor index' variable and whether the factor is partial dimension """
partialDims::Vector{<:Integer} = collect(1:manifold_dimension(manifold))
""" is this a partial constraint as defined by the existance of factor field `.partial::Tuple` """
partial::Bool = false
""" probability that this factor is wholly incorrect and should be ignored during solving """
nullhypo::Float64 = 0.0
""" inflationSpread particular to this factor (by how much to dispurse the belief initial values before numerical optimization is run). Analogous to stochastic search """
inflation::Float64 = SolverParams().inflation
""" multihypo specific field containers for recipe of hypotheses to compute """
hyporecipe::HR = HypoRecipeCompute(;activehypo=collect(1:length(varValsAll)))
# buffers and indices to point numerical computations to specific memory locations
""" user defined measurement values for each approxConv operation
FIXME make type stable, JT should now be type stable if rest works.
SUPER IMPORTANT, if prior=>point or relative=>tangent, see #1661
can be a Vector{<:Tuple} or more direct Vector{<: pointortangenttype} """
measurement::Vector{MT} = Vector(Vector{Float64}())
""" which index is being solved for in params? """
varidx::Base.RefValue{Int} = Ref(1)
""" Consolidation from CPT, the actual particle being solved at this moment """
particleidx::Base.RefValue{Int} = Ref(1)
""" working memory to store residual for optimization routines """
res::Vector{Float64} = zeros(manifold_dimension(manifold))
""" experimental feature to embed gradient calcs with ccw """
_gradients::G = nothing
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 746 |
Base.@kwdef struct HypoRecipe
certainidx::Vector{Int}
allelements::Vector{Vector{Int}}
activehypo::Vector{Tuple{Int,Vector{Int}}}
mhidx::Vector{Int}
end
Base.@kwdef struct HypoRecipeCompute{
HP <: Union{Nothing, <:Distributions.Categorical{Float64, <:AbstractVector{Float64}}},
CH <: Union{Nothing, <:AbstractVector{<:Integer}},
}
""" multi hypothesis settings #NOTE no need for a parameter as type is known from `parseusermultihypo` """
hypotheses::HP = nothing
""" categorical to select which hypothesis is being considered during convolution operation """
certainhypo::CH = nothing
""" subsection indices to select which params should be used for this hypothesis evaluation """
activehypo::Vector{Int} = Int[]
end
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 1631 |
## ========================================================================================================================
## Bayes Trees
## ========================================================================================================================
abstract type AbstractBayesTree end
# TODO DEV MetaGraphs bayes tree, will potentially also make a LightBayesTree, CloudBayesTree,
"""
$(TYPEDEF)
Data structure for the Bayes (Junction) tree, which is used for inference and constructed from a given `::AbstractDFG`.
"""
mutable struct MetaBayesTree <: AbstractBayesTree
bt::MetaDiGraph{Int, Float64}
btid::Int
frontals::Dict{Symbol, CliqueId{Int}}
eliminationOrder::Vector{Symbol}
buildTime::Float64
end
const BayesTree = MetaBayesTree
"""
$TYPEDEF
Container for upward tree solve / initialization.
DevNotes
- TODO more direct clique access (cliq, parent, children), for multi-process solves
"""
mutable struct CliqStateMachineContainer{
BTND,
G <: AbstractDFG,
InMemG <: InMemoryDFGTypes,
BT <: AbstractBayesTree,
}
dfg::G
cliqSubFg::InMemG
tree::BT
cliq::TreeClique
incremental::Bool
drawtree::Bool
dodownsolve::Bool
delay::Bool
opts::SolverParams
refactoring::Dict{Symbol, String}
oldcliqdata::BTND
logger::SimpleLogger
cliqId::CliqueId
algorithm::Symbol
init_iter::Int
enableLogging::Bool
solveKey::Symbol
_csm_iter::Int
end
#TODO use @NamedTuple if julia compat > 1.5
const CSMHistoryTuple = NamedTuple{
(:timestamp, :id, :f, :csmc),
Tuple{DateTime, Int, Function, CliqStateMachineContainer},
}
const CSMHistory = Vector{CSMHistoryTuple}
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 3455 |
"""
$(TYPEDEF)
Solver parameters for the DistributedFactoGraph.
Dev Notes
- FIXME change to using kwargs from Parameters.jl
- TODO remove NothingUnion
- TODO Upgrade to common @kwargs struct approach
"""
Base.@kwdef mutable struct SolverParams <: DFG.AbstractParams
dimID::Int = 0
reference::NothingUnion{Dict{Symbol, Tuple{Symbol, Vector{Float64}}}} = nothing
stateless::Bool = false
""" Quasi fixed length """
qfl::Int = (2^(Sys.WORD_SIZE - 1) - 1)
""" true when adhering to qfl window size for solves """
isfixedlag::Bool = false
""" if true, then fixed lag will not update marginalized during down pass on tree """
limitfixeddown::Bool = false
""" use incremental tree updates, TODO consolidate with recycling """
incremental::Bool = true
""" Experimental, insert differential factors from upward joints """
useMsgLikelihoods::Bool = false
""" do tree upsolve """
upsolve::Bool = true
""" do tree downsolve """
downsolve::Bool = true
""" draw tree during solve """
drawtree::Bool = false
""" show CSM iteration count on tree visualization """
drawCSMIters::Bool = true
showtree::Bool = false
""" how fast should the tree vis file be redrawn """
drawtreerate::Float64 = 0.5
""" Experimental, enable additional tier debug features """
dbg::Bool = false
""" do not block on CSM tasks """
async::Bool = false
""" limit number of steps CSMs can take """
limititers::Int = 500
""" default number of particles """
N::Int = 100
""" should Distributed.jl tree solve compute features be used """
multiproc::Bool = 1 < nprocs()
""" "/tmp/caesar/logs/$(now())" # unique temporary file storage location for a solve """
logpath::String = joinpath(tempdir(),"caesar","logs","$(now(UTC))")
""" default to graph-based initialization of variables """
graphinit::Bool = true
""" init variables on the tree """
treeinit::Bool = false
limittreeinit_iters::Int = 10
""" list of algorithms to run [:default] is mmisam """
algorithms::Vector{Symbol} = [:default, :parametric]
""" entropy spread adjustment used for both null hypo cases. """
spreadNH::Float64 = 3.0
""" how much to disperse particles before convolution solves, #1051 """
inflation::Float64 = 5.0
""" minimum nullhypo for relative factors sibling to multihypo factors onto a specific variable. """
nullSurplusAdd::Float64 = 0.3
""" repeat convolutions for inflation to occur """
inflateCycles::Int = 3
""" number of Gibbs cycles to take per clique iteration variables """
gibbsIters::Int = 3
""" maximum incidence to a variable in an effort to enhance sparsity """
maxincidence::Int = 500
""" Development feature on whether new samples should be sampled at each Gibbs cycle convolution """
alwaysFreshMeasurements::Bool = true
""" should factor gradients be calculated or attempted (UNDER DEVELOPMENT, 21Q3) """
attemptGradients::Bool = false
""" empty container for new features, allowing workaround for breaking changes and legacy """
devParams::Dict{Symbol, String} = Dict{Symbol, String}()
#
end
StructTypes.omitempties(::Type{SolverParams}) = (:reference,)
convert(::Type{SolverParams}, ::NoSolverParams) = SolverParams()
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 4869 |
"""
$SIGNATURES
Return a random sample as a tangent vector from a belief represented by coordinates on a manifold at point p.
Notes
"""
function sampleTangent end
# Sampling MKD
function sampleTangent(M::AbstractDecoratorManifold, x::ManifoldKernelDensity, p = mean(x))
# get legacy matrix of coordinates and selected labels
#TODO make sure that when `sample` is replaced in MKD, coordinates is a vector
coords, lbls = sample(x.belief, 1)
X = hat(x.manifold, p, coords[:])
return X
end
function sampleTangent(x::ManifoldKernelDensity, p = mean(x))
return sampleTangent(x.manifold, x, p)
end
# Sampling Distributions
# assumes M is a group and will break for Riemannian, but leaving that enhancement as TODO
function sampleTangent(
M::AbstractManifold,
z::Distribution,
p = getPointIdentity(M),
basis::AbstractBasis = DefaultOrthogonalBasis()
)
return get_vector(M, p, rand(z), basis)
end
function sampleTangent(
M::AbstractDecoratorManifold,
z::Distribution,
p = getPointIdentity(M),
)
return hat(M, p, SVector{length(z)}(rand(z))) #TODO make sure all Distribution has length,
# if this errors maybe fall back no next line
# return convert(typeof(p), hat(M, p, rand(z, 1)[:])) #TODO find something better than (z,1)[:]
end
"""
$SIGNATURES
Return a random sample point on a manifold from a belief represented by coordinates at point p.
Notes
"""
function samplePoint(
M::AbstractManifold,
sbelief,
p,
basis::AbstractBasis,
retraction_method::AbstractRetractionMethod = ExponentialRetraction(),
)
X = sampleTangent(M, sbelief, p, basis)
return retract(M, p, X, retraction_method)
end
function samplePoint(
M::AbstractDecoratorManifold,
sbelief,
p = getPointIdentity(M),
retraction_method::AbstractRetractionMethod = ExponentialRetraction(),
)
X = sampleTangent(M, sbelief, p)
return retract(M, p, X, retraction_method)
end
function samplePoint(
M::AbstractDecoratorManifold,
sbelief::ManifoldKernelDensity,
# p = identity_element(M, mean(sbelief)), # 8.671254 seconds (82.64 M allocations: 3.668 GiB, 7.50% gc time)
p = getPointIdentity(M), #6.713209 seconds (66.42 M allocations: 3.141 GiB, 7.52% gc time)
retraction_method::AbstractRetractionMethod = ExponentialRetraction(),
)
X = sampleTangent(M, sbelief, p)
return retract(M, p, X, retraction_method)
end
function samplePoint(x::ManifoldKernelDensity, p = mean(x))
return samplePoint(x.manifold, x, p)
end
# FIXME: rather use manifolds
function samplePoint(distr::SamplableBelief)
Base.depwarn(
"samplePoint(distr::SamplableBelief) should be replaced by samplePoint(M<:AbstractManifold, distr::SamplableBelief, ...)",
:samplePoint,
)
return rand(distr, 1)
end
## default getSample
"""
$SIGNATURES
Sample the factor in `CalcFactor`. A default `getSample` method is provided that should cover most use cases,
if more advanced sampling is required, the `getSample` function should be extended.
The default behavior for `getSample` is as follows:
- The `SamplableBelief`` shall be in the field `Z` and that shall be enough to fully define the factor, i.e. `Z<:SamplableBelief` should be the only field.
- Sampling on `<:AbstractManifoldMinimize` factors defined on Group Manifolds:
- `getSample` normally returns a tangent vector at the identity element, however it should just match the custom factor definition.
- Sampling on prior (`<:AbstractPrior`) factors :
- `getSample` must return a point on the manifold that matches the point representation of the variable.
Notes
- Users should overload this method should their factor not only use field `Z` for the `SamplableBelief`.
- See the Custom Factors section in the Caesar.jl documentation for more examples and details.
- Also see issue https://github.com/JuliaRobotics/IncrementalInference.jl/issues/1441
See also: [`getMeasurementParametric`](@ref)
"""
function getSample end
function getSample(cf::CalcFactor{<:AbstractPrior})
M = getManifold(cf)
if hasfield(typeof(cf.factor), :Z)
X = samplePoint(M, cf.factor.Z)
else
error(
"""Factor $(typeof(cf.factor)) does not have a field `Z`, to use the default `getSample` method, use `Z` for the measurement.
Alternatively, provide a `getSample` method. See IIF issue #1441 and Custom Factors in the Caesar documentation.""",
)
end
return X
end
function getSample(cf::CalcFactor{<:AbstractRelative})
M =getManifold(cf)
if hasfield(typeof(cf.factor), :Z)
X = sampleTangent(M, cf.factor.Z)
else
error(
"""Factor $(typeof(cf.factor)) does not have a field `Z`, to use the default `getSample` method, use `Z` for the measurement.
Alternatively, provide a `getSample` method. See IIF issue #1441 and Custom Factors in the Caesar documentation.""",
)
end
return X
end
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 4643 |
## ================================================================================================
## Manifold and ManifoldDiff use with Optim
## ================================================================================================
# Modified from: https://gist.github.com/mateuszbaran/0354c0edfb9cdf25e084a2b915816a09
"""
ManifoldWrapper{TM<:AbstractManifold} <: Optim.Manifold
Adapts Manifolds.jl manifolds for use in Optim.jl
"""
struct ManifoldWrapper{TM<:AbstractManifold} <: Optim.Manifold
M::TM
end
function Optim.retract!(M::ManifoldWrapper, x)
ManifoldsBase.embed_project!(M.M, x, x)
return x
end
function Optim.project_tangent!(M::ManifoldWrapper, g, x)
ManifoldsBase.embed_project!(M.M, g, x, g)
return g
end
## ================================================================================================
## AbstractPowerManifold with N as field to avoid excessive compiling time.
## ================================================================================================
struct NPowerManifold{𝔽, M <: AbstractManifold{𝔽}} <:
AbstractPowerManifold{𝔽, M, NestedReplacingPowerRepresentation}
manifold::M
N::Int
end
Manifolds.get_iterator(M::NPowerManifold) = Base.OneTo(M.N)
function Manifolds.manifold_dimension(M::NPowerManifold)
return manifold_dimension(M.manifold) * M.N
end
function Manifolds.get_vector!(M::NPowerManifold, Y, p, c, B::AbstractBasis)
dim = manifold_dimension(M.manifold)
rep_size = representation_size(M.manifold)
v_iter = 1
for i in Manifolds.get_iterator(M)
Y[i] = get_vector(
M.manifold,
Manifolds._read(M, rep_size, p, i),
# view(c, v_iter:(v_iter + dim - 1)),
SVector{dim}(view(c, v_iter:(v_iter + dim - 1))),
B,
)
v_iter += dim
end
return Y
end
function Manifolds.exp!(M::NPowerManifold, q, p, X)
rep_size = representation_size(M.manifold)
for i in Manifolds.get_iterator(M)
q[i] = exp(
M.manifold,
Manifolds._read(M, rep_size, p, i),
Manifolds._read(M, rep_size, X, i),
)
end
return q
end
function Manifolds.compose!(M::NPowerManifold, x, p, q)
rep_size = representation_size(M.manifold)
for i in Manifolds.get_iterator(M)
x[i] = compose(
M.manifold,
Manifolds._read(M, rep_size, p, i),
Manifolds._read(M, rep_size, q, i),
)
end
return x
end
function Manifolds.allocate_result(M::NPowerManifold, f, x...)
if length(x) == 0
return [Manifolds.allocate_result(M.manifold, f) for _ in Manifolds.get_iterator(M)]
else
return copy(x[1])
end
end
function Manifolds.allocate_result(::NPowerManifold, ::typeof(get_vector), p, X)
return copy(p)
end
## ================================================================================================
## ArrayPartition getPointIdentity (identity_element)
## ================================================================================================
# NOTE This will be removed once moved upstream to Manifolds.jl
import DistributedFactorGraphs: getPointIdentity
function DFG.getPointIdentity(G::ProductGroup, ::Type{T} = Float64) where {T <: Real}
M = G.manifold
return ArrayPartition(map(x -> getPointIdentity(x, T), M.manifolds))
end
# fallback
function DFG.getPointIdentity(G::GroupManifold, ::Type{T} = Float64) where {T <: Real}
return error("getPointIdentity not implemented on $G")
end
function DFG.getPointIdentity(
@nospecialize(G::ProductManifold),
::Type{T} = Float64,
) where {T <: Real}
return ArrayPartition(map(x -> getPointIdentity(x, T), G.manifolds))
end
function DFG.getPointIdentity(
@nospecialize(M::PowerManifold),
::Type{T} = Float64,
) where {T <: Real}
N = Manifolds.get_iterator(M).stop
return fill(getPointIdentity(M.manifold, T), N)
end
function DFG.getPointIdentity(M::NPowerManifold, ::Type{T} = Float64) where {T <: Real}
return fill(getPointIdentity(M.manifold, T), M.N)
end
function DFG.getPointIdentity(G::SemidirectProductGroup, ::Type{T} = Float64) where {T <: Real}
M = base_manifold(G)
N, H = M.manifolds
np = getPointIdentity(N, T)
hp = getPointIdentity(H, T)
return ArrayPartition(np, hp)
end
function DFG.getPointIdentity(
G::SpecialOrthogonal{TypeParameter{Tuple{N}}},
::Type{T} = Float64
) where {N, T <: Real}
return SMatrix{N, N, T}(I)
end
function DFG.getPointIdentity(
G::TranslationGroup{TypeParameter{Tuple{N}}},
::Type{T} = Float64,
) where {N, T <: Real}
return zeros(SVector{N,T})
end
function DFG.getPointIdentity(G::RealCircleGroup, ::Type{T} = Float64) where {T <: Real}
return [zero(T)] #FIXME we cannot support scalars yet
end
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 2372 | # functions relating to parametric solutions of a single factor that is likely in need of consolidation
"""
$SIGNATURES
Helper function to propagate a parametric estimate along a factor chain.
This function takes and returns variable values as coordinates.
Notes
- Not used during MM-iSAM inference.
- Expected uses are for user analysis of factors and estimates.
- real-time dead reckoning chain prediction.
- Parametric binary factor utility function, used by DRT
DevNotes
- TODO ensure type stability, likely returning types `Any` at this time.
- TODO MeanMaxPPE currently stored as coordinates, complicating fast calculation.
Related: [`getMeasurementParametric`](@ref), [`approxConvBelief`](@ref), [`MutablePose2Pose2Gaussian`](@ref)
"""
function solveFactorParametric(
dfg::AbstractDFG,
fct::DFGFactor,
# currval::P1,
srcsym_vals::AbstractVector{Pair{Symbol, P}},
trgsym::Symbol;
solveKey::Symbol = :default,
evaltmpkw...,
) where {P}
#
varLbls = getVariableOrder(fct)
varTypes = tuple((getVariableType.(dfg, varLbls))...)
sfidx = findfirst(varLbls .== trgsym)
# get the measurement point
fctTyp = getFactorType(fct)
# this is definitely in coordinates, see JuliaRobotics/RoME.jl#465
mea, _ = getMeasurementParametric(fctTyp)
# must change measT to be a tangent vector
M = getManifold(fctTyp)
e0 = getPointIdentity(M)
mea_ = hat(M, e0, mea)
measT = [mea_]
# get variable points
function _getParametric(vari::DFGVariable, key = :default)
# hasp = haskey(getPPEDict(vari), key)
# FIXME use PPE via Manifold points currently in coordinates
# hasp ? getPPE(vari, key).suggested : calcMean(getBelief(vari, key))
pt = calcMean(getBelief(vari, key))
return collect(getCoordinates(getVariableType(vari), pt))
end
# overwrite specific src values from user
coordVals = _getParametric.(getVariable.(dfg, varLbls), solveKey)
for (srcsym, currval) in srcsym_vals
coordVals[findfirst(varLbls .== srcsym)] = currval
end
crds = tuple(coordVals...)
pts = tuple(map(t -> getPoint(t...), zip(varTypes, crds))...)
# do the calculation to find solvefor index using the factor, as manifold point
pt = _evalFactorTemporary!(
fctTyp,
varTypes,
sfidx,
measT,
pts;
solveKey,
evaltmpkw...,
)[1]
return getCoordinates(getVariableType(dfg, trgsym), pt)
end
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 7354 |
"""
$SIGNATURES
Notes
- Parametric state machine function nr. 3
"""
function solveUp_ParametricStateMachine(csmc::CliqStateMachineContainer)
infocsm(csmc, "Par-3, Solving Up")
setCliqueDrawColor!(csmc.cliq, "red")
# csmc.drawtree ? drawTree(csmc.tree, show=false, filepath=joinpath(getSolverParams(csmc.dfg).logpath,"bt.pdf")) : nothing
#TODO maybe change to symbols
msgfcts = DFGFactor[]
# LITTLE WEIRD get previously set up msgs (stored in this clique)
# FIXME, fetch message buffered in channels
# see #855
for (idx, upmsg) in getMessageBuffer(csmc.cliq).upRx #get cached messages taken from children saved in this clique
#TODO remove temp msgfcts container
append!(msgfcts, addMsgFactors!(csmc.cliqSubFg, upmsg, UpwardPass)) # addMsgFactors_Parametric!
end
logCSM(csmc, "length mgsfcts=$(length(msgfcts))")
infocsm(csmc, "length mgsfcts=$(length(msgfcts))")
# store the cliqSubFg for later debugging
_dbgCSMSaveSubFG(csmc, "fg_beforeupsolve")
vardict, result, varIds, Σ = solveGraphParametricOptim(csmc.cliqSubFg)
logCSM(csmc, "$(csmc.cliq.id) vars $(keys(varIds))")
# @info "$(csmc.cliq.id) Σ $(Σ)"
# Pack all results in variables
# FIXME test f_converged, ls_success, confirm convergence check
if result.f_converged || result.g_converged
logCSM(csmc, "$(csmc.cliq.id): subfg optim converged updating variables")
for (v, val) in vardict
vnd = getSolverData(getVariable(csmc.cliqSubFg, v), :parametric)
# fill in the variable node data value
logCSM(csmc, "$(csmc.cliq.id) up: updating $v : $val")
vnd.val[1] = val.val
#calculate and fill in covariance
#TODO rather broadcast than make new memory
vnd.bw = val.cov
end
# elseif length(lsfPriors(csmc.cliqSubFg)) == 0 #FIXME
# @error "Par-3, clique $(csmc.cliq.id) failed to converge in upsolve, but ignoring since no priors" result
else
@error "Par-3, clique $(csmc.cliq.id) failed to converge in upsolve" result
# propagate error to cleanly exit all cliques
putErrorUp(csmc)
if length(getParent(csmc.tree, csmc.cliq)) == 0
putErrorDown(csmc)
return IncrementalInference.exitStateMachine
end
return waitForDown_StateMachine
end
# Done with solve delete factors
#TODO confirm, maybe don't delete mesage factors on subgraph, maybe delete if its priors, but not conditionals
deleteMsgFactors!(csmc.cliqSubFg)
# store the cliqSubFg for later debugging
_dbgCSMSaveSubFG(csmc, "fg_afterupsolve")
#fill in belief
#TODO createBeliefMessageParametric(csmc.cliqSubFg, csmc.cliq, solvekey=opts.solvekey)
cliqSeparatorVarIds = getCliqSeparatorVarIds(csmc.cliq)
#Fill in CliqueLikelihood
cliqlikelihood =
calculateMarginalCliqueLikelihood(vardict, Σ, varIds, cliqSeparatorVarIds)
# @info "$(csmc.cliq.id) clique likelihood message $(cliqlikelihood)"
beliefMsg = LikelihoodMessage(;
sender = (; id = csmc.cliq.id.value, step = csmc._csm_iter),
status = UPSOLVED,
variableOrder = cliqSeparatorVarIds,
cliqueLikelihood = cliqlikelihood,
msgType = ParametricMessage(),
)
#FIXME bit of a hack, only fill in variable beliefs if there are priors or for now more than one seperator
if length(lsfPriors(csmc.cliqSubFg)) > 0 || length(cliqSeparatorVarIds) > 1
for si in cliqSeparatorVarIds
vnd = getSolverData(getVariable(csmc.cliqSubFg, si), :parametric)
beliefMsg.belief[si] = TreeBelief(deepcopy(vnd))
end
end
for e in getEdgesParent(csmc.tree, csmc.cliq)
logCSM(csmc, "$(csmc.cliq.id): put! on edge $(e)")
getMessageBuffer(csmc.cliq).upTx = deepcopy(beliefMsg)
putBeliefMessageUp!(csmc.tree, e, beliefMsg)
end
return waitForDown_StateMachine
end
"""
$SIGNATURES
Notes
- Parametric state machine function nr. 5
"""
function solveDown_ParametricStateMachine(csmc::CliqStateMachineContainer)
infocsm(csmc, "Par-5, Solving down")
setCliqueDrawColor!(csmc.cliq, "red")
# csmc.drawtree ? drawTree(csmc.tree, show=false, filepath=joinpath(getSolverParams(csmc.dfg).logpath,"bt.pdf")) : nothing
# TODO create function:
# updateMsgSeparators!(csmc.cliqSubFg, downmsg)
downmsg = getMessageBuffer(csmc.cliq).downRx #see #855
svars = getCliqSeparatorVarIds(csmc.cliq)
if !isnothing(downmsg)
for (msym, belief) in downmsg.belief
if msym in svars
#TODO maybe combine variable and factor in new prior?
vnd = getSolverData(getVariable(csmc.cliqSubFg, msym), :parametric)
logCSM(csmc, "$(csmc.cliq.id): Updating separator $msym from message $(belief.val)")
vnd.val .= belief.val
vnd.bw .= belief.bw
end
end
end
# store the cliqSubFg for later debugging
# NOTE ITS not changed for now but keep here for possible future use
# _dbgCSMSaveSubFG(csmc, "fg_beforedownsolve")
# DownSolve cliqSubFg
#only down solve if its not a root
if length(getParent(csmc.tree, csmc.cliq)) != 0
frontals = getCliqFrontalVarIds(csmc.cliq)
vardict, result, flatvars, Σ = solveConditionalsParametric(csmc.cliqSubFg, frontals)
#TEMP testing difference
# vardict, result = solveGraphParametric(csmc.cliqSubFg)
# Pack all results in variables
if result.g_converged || result.f_converged
logCSM(
csmc,
"$(csmc.cliq.id): subfg optim converged updating variables";
loglevel = Logging.Info,
)
for (v, val) in vardict
logCSM(csmc, "$(csmc.cliq.id) down: updating $v : $val"; loglevel = Logging.Info)
vnd = getSolverData(getVariable(csmc.cliqSubFg, v), :parametric)
#Update subfg variables
vnd.val[1] = val.val
vnd.bw .= val.cov
end
else
@error "Par-5, clique $(csmc.cliq.id) failed to converge in down solve" result
#propagate error to cleanly exit all cliques
putErrorDown(csmc)
return IncrementalInference.exitStateMachine
end
end
#TODO fill in belief
cliqFrontalVarIds = getCliqFrontalVarIds(csmc.cliq)
#TODO createBeliefMessageParametric
# beliefMsg = createBeliefMessageParametric(csmc.cliqSubFg, cliqFrontalVarIds, solvekey=opts.solvekey)
beliefMsg = LikelihoodMessage(;
sender = (; id = csmc.cliq.id.value, step = csmc._csm_iter),
status = DOWNSOLVED,
msgType = ParametricMessage(),
)
for fi in cliqFrontalVarIds
vnd = getSolverData(getVariable(csmc.cliqSubFg, fi), :parametric)
beliefMsg.belief[fi] = TreeBelief(vnd)
logCSM(csmc, "$(csmc.cliq.id): down message $fi : $beliefMsg"; loglevel = Logging.Info)
end
# pass through the frontal variables that were sent from above
if !isnothing(downmsg)
pass_through_separators = intersect(svars, keys(downmsg.belief))
for si in pass_through_separators
beliefMsg.belief[si] = downmsg.belief[si]
logCSM(csmc, "adding parent message"; sym = si, msg = downmsg.belief[si])
end
end
#TODO sendBeliefMessageParametric(csmc, beliefMsg)
#TODO maybe send a specific message to only the child that needs it
@sync for e in getEdgesChildren(csmc.tree, csmc.cliq)
logCSM(csmc, "$(csmc.cliq.id): put! on edge $(e)")
@async putBeliefMessageDown!(csmc.tree, e, beliefMsg)#put!(csmc.tree.messageChannels[e.index].downMsg, beliefMsg)
end
logCSM(csmc, "$(csmc.cliq.id): Solve completed")
return updateFromSubgraph_StateMachine
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 18167 | using Manopt
using FiniteDiff
using SparseDiffTools
using SparseArrays
# using ForwardDiff
# using Zygote
##
function getVarIntLabelMap(
vartypeslist::OrderedDict{DataType, Vector{Symbol}}
)
varlist_tuple = (values(vartypeslist)...,)
varlabelsAP = ArrayPartition{Symbol, typeof(varlist_tuple)}(varlist_tuple)
varIntLabel = OrderedDict(zip(varlabelsAP, collect(1:length(varlabelsAP))))
return varIntLabel, varlabelsAP
end
function CalcFactorResidual(
fg,
fct::DFGFactor,
varIntLabel
)
fac_func = getFactorType(fct)
varOrder = getVariableOrder(fct)
varOrderIdxs = getindex.(Ref(varIntLabel), varOrder)
M = getManifold(getFactorType(fct))
dims = manifold_dimension(M)
meas, iΣ = getFactorMeasurementParametric(fct)
sqrt_iΣ = convert(SMatrix{dims, dims}, sqrt(iΣ))
cache = preambleCache(fg, getVariable.(fg, varOrder), getFactorType(fct))
return CalcFactorResidual(
fct.label,
getFactorMechanics(fac_func),
tuple(varOrder...),
tuple(varOrderIdxs...),
meas,
sqrt_iΣ,
cache,
)
end
"""
CalcFactorResidualAP
Create an `ArrayPartition` of `CalcFactorResidual`s.
"""
function CalcFactorResidualAP(
fg::GraphsDFG,
factorLabels::Vector{Symbol},
varIntLabel::OrderedDict{Symbol, Int64}
)
factypes, typedict, alltypes = getFactorTypesCount(getFactor.(fg, factorLabels))
# skip non-numeric prior (MetaPrior)
#TODO test... remove MetaPrior{T} something like this
metaPriorKeys = filter(k->contains(string(k), "MetaPrior"), collect(keys(alltypes)))
delete!.(Ref(alltypes), metaPriorKeys)
parts = map(values(alltypes)) do labels
map(getFactor.(fg, labels)) do fct
CalcFactorResidual(fg, fct, varIntLabel)
end
end
parts_tuple = (parts...,)
return ArrayPartition{CalcFactorResidual, typeof(parts_tuple)}(parts_tuple)
end
function (cfm::CalcFactorResidual)(p)
meas = cfm.meas
points = map(idx->p[idx], cfm.varOrderIdxs)
return cfm.sqrt_iΣ * cfm(meas, points...)
end
# cost function f: M->ℝᵈ for Riemannian Levenberg-Marquardt
struct CostFres_cond!{PT, CFT}
points::PT
costfuns::ArrayPartition{CalcFactorResidual, CFT}
varLabels::Vector{Symbol}
end
function (costf::CostFres_cond!)(M::AbstractManifold, x::Vector, p::AbstractVector)
costf.points[1:length(p)] .= p
st = 1
for cfm_part in costf.costfuns.x
st = calcFactorResVec!(x, cfm_part, costf.points, st)
end
return x
end
struct CostFres!{CFT}
# points::PT #TODO RENAME - don't update this in functor, seperator static points only!
costfuns::ArrayPartition{CalcFactorResidual, CFT}
varLabels::Vector{Symbol} # vector for performance above ArrayPartition{Symbol}?
# varPoints::VPT
# sepLabels::Vector{Symbol}
# sepPoints::SPT
# facLabels::Vector{Symbol}
# add return_ranges to allow MultiThreaded
end
function calcFactorResVec!(
x::Vector{T},
cfm_part::Vector{<:CalcFactorResidual{FT, N, D}},
p::AbstractArray{T},
st::Int
) where {T, FT, N, D}
for cfm in cfm_part
x[st:st + D - 1] = cfm(p) #NOTE looks like do not broadcast here
st += D
end
return st
end
function calcFactorResVec_threaded!(x::Vector{T}, cfm_part::Vector{<:CalcFactorResidual}, p::AbstractArray{T}, st::Int) where T
l = getDimension(cfm_part[1]) # all should be the same
N = length(cfm_part)
chunkies = Iterators.partition(1:N, N ÷ Threads.nthreads())
Threads.@threads for chunki in collect(chunkies)
for i in chunki
r = range(st + l*(i - 1); length = l)
cfm = cfm_part[i]
x[r] = cfm(p) #NOTE looks like do not broadcast here
end
end
return st + l*N
end
function (costf::CostFres!{CFT})(M::AbstractManifold, x::Vector{T}, p::AbstractVector{T}) where {CFT,T}
st = 1
for cfm_part in costf.costfuns.x
# if length(cfm_part) > Threads.nthreads() * 10
# st = calcFactorResVec_threaded!(x, cfm_part, p, st)
# else
st = calcFactorResVec!(x, cfm_part, p, st)
# end
end
return x
end
## --------------------------------------------------------------------------------------------------------------
## jacobian of function for Riemannian Levenberg-Marquardt
## --------------------------------------------------------------------------------------------------------------
struct JacF_RLM!{CF, T, JC}
costF!::CF
X0::Vector{Float64}
X::T
q::T
res::Vector{Float64}
Jcache::JC
end
# function JacF_RLM!(M, costF!; basis_domain::AbstractBasis = DefaultOrthonormalBasis())
function JacF_RLM!(M, costF!, p, fg=nothing;
all_points=p,
basis_domain::AbstractBasis = DefaultOrthogonalBasis(),
is_sparse=!isnothing(fg)
)
res = reduce(vcat, map(f -> f(all_points), Vector(costF!.costfuns)))
X0 = zeros(manifold_dimension(M))
X = get_vector(M, p, X0, basis_domain)
q = exp(M, p, X)
if is_sparse
factLabels = collect(getproperty.(costF!.costfuns, :faclbl))
sparsity = eltype(res).(getSparsityPattern(fg, costF!.varLabels, factLabels))
colorvec = matrix_colors(sparsity)
else
sparsity = nothing
colorvec = 1:length(X0)
end
cache = FiniteDiff.JacobianCache(X0, res; colorvec, sparsity)
return JacF_RLM!(costF!, X0, X, q, res, cache)
end
# TODO addd M to JacF_RLM! and test this ipo closure
# function (jacF!::JacF_RLM!)(res, Xc)
# X = jacF!.X
# q = jacF!.q
# get_vector!(M, X, p, Xc, basis_domain)
# exp!(M, q, p, X)
# return jacF!.costF!(M, res, q)
# end
function (jacF!::JacF_RLM!)(
M::AbstractManifold,
J,
p::T;
# basis_domain::AbstractBasis = DefaultOrthonormalBasis(),
basis_domain::AbstractBasis = DefaultOrthogonalBasis(),
) where T
X0 = jacF!.X0
X = jacF!.X
q = jacF!.q
cache = jacF!.Jcache
fill!(X0, 0)
# TODO make sure closure performs (let, ::, or (jacF!::JacF_RLM!)(res, Xc))
function costf!(res, Xc)
get_vector!(M, X, p, Xc, basis_domain)
exp!(M, q, p, X)
jacF!.costF!(M, res, q)
end
FiniteDiff.finite_difference_jacobian!(
J,
costf!,
X0,
cache;
)
return J
end
# ϵ = getPointIdentity(M)
# function jaccost(res, Xc)
# exp!(M, q, ϵ, get_vector!(M, X, p, Xc, basis_domain))
# compose!(M, q, p, q)
# jacF!.costF!(M, res, q)
# end
# ManifoldDiff._jacobian!(
# J,
# (Xc)->jacF!.costF!(M, jacF!.res, exp!(M, q, p, get_vector!(M, X, p, Xc, basis_domain))),
# X0,
# ManifoldDiff.default_differential_backend()
# )
struct FactorGradient{A <: AbstractMatrix}
manifold::AbstractManifold
JacF!::JacF_RLM!
J::A
end
# TODO this function is not the sparsity pattern yet, it just fills in all entries from the biadjacency matrix
# TODO allow getting sparcity pattern for a subfg
# OLD 0.424040 seconds (940.11 k allocations: 45.512 MiB)
# NEW 0.001552 seconds (2.04 k allocations: 1.816 MiB)
function getSparsityPattern(fg, varLabels, factLabels)
biadj = getBiadjacencyMatrix(fg; varLabels, factLabels)
vdims = getDimension.(getVariable.(fg, biadj.varLabels))
fdims = getDimension.(getFactor.(fg, biadj.facLabels))
c_end = cumsum(vdims)
r_end = cumsum(fdims)
C_range = range.(c_end - vdims .+1, c_end)
R_range = range.(r_end - fdims .+1, r_end)
ROWS, COLS, _ = findnz(biadj.B)
iter = reduce(vcat, map(zip(ROWS, COLS)) do (R,C)
vec(CartesianIndices((R_range[R], C_range[C])))
end)
vec(CartesianIndices((R_range[2], C_range[1])))
return sparse(getindex.(iter,1), getindex.(iter,2), ones(Bool, length(iter)))
end
# TODO only calculate marginal covariances
function covarianceFiniteDiff(M, jacF!::JacF_RLM!, p0)
# Jcache
X0 = fill!(deepcopy(jacF!.X0), 0)
function costf(Xc)
let res = jacF!.res, X = jacF!.X, q = jacF!.q, p0=p0
get_vector!(M, X, p0, Xc, DefaultOrthogonalBasis())
exp!(M, q, p0, X)
1/2*norm(jacF!.costF!(M, res, q))^2
end
end
H = FiniteDiff.finite_difference_hessian(costf, X0)
# inv(H)
Σ = try
Matrix(H) \ Matrix{eltype(H)}(I, size(H)...)
catch ex #TODO only catch correct exception and try with pinv as fallback in certain cases.
@warn "Hessian inverse failed" ex
# Σ = pinv(H)
nothing
end
return Σ
end
function solve_RLM(
fg,
varlabels = ls(fg),
faclabels = lsf(fg);
is_sparse = true,
finiteDiffCovariance = false,
solveKey::Symbol = :parametric,
kwargs...
)
# get the manifold and variable types
vars = getVariable.(fg, varlabels)
M, varTypes, vartypeslist = buildGraphSolveManifold(vars)
varIntLabel, varlabelsAP = getVarIntLabelMap(vartypeslist)
#Can use varIntLabel (because its an OrderedDict), but varLabelsAP makes the ArrayPartition.
p0 = map(varlabelsAP) do label
getVal(fg, label; solveKey)[1]
end
# create an ArrayPartition{CalcFactorResidual} for faclabels
calcfacs = CalcFactorResidualAP(fg, faclabels, varIntLabel)
#cost and jacobian functions
# cost function f: M->ℝᵈ for Riemannian Levenberg-Marquardt
costF! = CostFres!(calcfacs, collect(varlabelsAP))
# jacobian of function for Riemannian Levenberg-Marquardt
jacF! = JacF_RLM!(M, costF!, p0, fg; is_sparse)
num_components = length(jacF!.res)
initial_residual_values = zeros(num_components)
# initial_jacobian_f not type stable, but function barrier so should be ok.
initial_jacobian_f = is_sparse ?
jacF!.Jcache.sparsity :
zeros(num_components, manifold_dimension(M))
lm_r = Manopt.LevenbergMarquardt!(
M,
costF!,
jacF!,
p0,
num_components;
evaluation=InplaceEvaluation(),
jacobian_tangent_basis = DefaultOrthogonalBasis(),
initial_residual_values,
initial_jacobian_f,
kwargs...
)
if length(initial_residual_values) < 1000
if finiteDiffCovariance
# TODO this seems to be correct, but way to slow
Σ = covarianceFiniteDiff(M, jacF!, lm_r)
else
# TODO make sure J initial_jacobian_f is updated, otherwise recalc jacF!(M, J, lm_r) # lm_r === p0
J = initial_jacobian_f
H = J'J # approx
Σ = H \ Matrix{eltype(H)}(I, size(H)...)
# Σ = pinv(H)
end
else
@warn "Not estimating a Dense Covariance $(size(initial_jacobian_f))"
Σ = nothing
end
return M, varlabelsAP, lm_r, Σ
end
# nlso = NonlinearLeastSquaresObjective(
# costF!,
# jacF!,
# num_components;
# evaluation = InplaceEvaluation(),
# jacobian_tangent_basis = DefaultOrthogonalBasis(),
# )
# @debug "starting solver"
# lm_r = LevenbergMarquardt!(
# M, nlso, p0;
# evaluation = InplaceEvaluation(),
# jacobian_tangent_basis = DefaultOrthogonalBasis(),
# initial_residual_values,
# initial_jacobian_f,
# kwargs...
# )
function solve_RLM_conditional(
fg,
frontals::Vector{Symbol} = ls(fg),
separators::Vector{Symbol} = setdiff(ls(fg), frontals);
is_sparse=false,
finiteDiffCovariance=true,
solveKey::Symbol = :parametric,
kwargs...
)
is_sparse && error("Sparse solve_RLM_conditional not supported yet")
# get the subgraph formed by all frontals, separators and fully connected factors
varlabels = union(frontals, separators)
faclabels = sortDFG(setdiff(getNeighborhood(fg, varlabels, 1), varlabels))
filter!(faclabels) do fl
return issubset(getVariableOrder(fg, fl), varlabels)
end
frontal_vars = getVariable.(fg, frontals)
separator_vars = getVariable.(fg, separators)
# so the subgraph consists of varlabels(frontals + separators) and faclabels
_, _, frontal_vartypeslist = getVariableTypesCount(getVariable.(fg,frontals))
frontal_varIntLabel, frontal_varlabelsAP = getVarIntLabelMap(frontal_vartypeslist)
if isempty(separators)
separator_vartypeslist = OrderedDict{DataType, Vector{Symbol}}()
separator_varlabelsAP = ArrayPartition{Symbol,Tuple}(())
else
_, _, separator_vartypeslist = getVariableTypesCount(getVariable.(fg,separators))
seperator_varIntLabel, separator_varlabelsAP = getVarIntLabelMap(separator_vartypeslist)
end
all_varlabelsAP = ArrayPartition((frontal_varlabelsAP.x..., separator_varlabelsAP.x...))
all_points = map(all_varlabelsAP) do label
getVal(fg, label; solveKey)[1]
end
p0 = ArrayPartition(all_points.x[1:length(frontal_varlabelsAP.x)])
all_varIntLabel = OrderedDict{Symbol,Int}(
map(enumerate(all_varlabelsAP)) do (i,l)
l=>i
end
)
# varIntLabel_frontals = filter(p->first(p) in frontals, varIntLabel)
# varIntLabel_separators = filter(p->first(p) in separators, varIntLabel)
calcfacs = CalcFactorResidualAP(fg, faclabels, all_varIntLabel)
# get the manifold and variable types
M, varTypes, vartypeslist = buildGraphSolveManifold(frontal_vars)
#cost and jacobian functions
# cost function f: M->ℝᵈ for Riemannian Levenberg-Marquardt
costF! = CostFres_cond!(all_points, calcfacs, Vector{Symbol}(collect(all_varlabelsAP)))
# jacobian of function for Riemannian Levenberg-Marquardt
jacF! = JacF_RLM!(M, costF!, p0, fg; all_points, is_sparse)
num_components = length(jacF!.res)
initial_residual_values = zeros(num_components)
initial_jacobian_f = is_sparse ?
jacF!.Jcache.sparsity :
zeros(num_components, manifold_dimension(M))
lm_r = LevenbergMarquardt(
M,
costF!,
jacF!,
p0,
num_components;
evaluation=InplaceEvaluation(),
initial_residual_values,
initial_jacobian_f,
kwargs...
)
if finiteDiffCovariance
Σ = covarianceFiniteDiff(M, jacF!, lm_r)
else
J = initial_jacobian_f
Σ = pinv(J'J)
end
return M, all_varlabelsAP, lm_r, Σ
end
#HEX solve
# sparse J 0.025235 seconds (133.65 k allocations: 9.964 MiB
# new1 0.013486 seconds (36.16 k allocations: 2.593 MiB)
# new2 0.010764 seconds (34.61 k allocations: 3.111 MiB)
# dense J 0.022079 seconds (283.54 k allocations: 18.146 MiB)
function autoinitParametric!(
fg,
varorderIds = getInitOrderParametric(fg);
reinit = false,
kwargs...
)
init_labels = @showprogress map(varorderIds) do vIdx
autoinitParametric!(fg, vIdx; reinit, kwargs...)
end
filter!(!isnothing, init_labels)
return init_labels
end
function autoinitParametric!(dfg::AbstractDFG, initme::Symbol; kwargs...)
return autoinitParametric!(dfg, getVariable(dfg, initme); kwargs...)
end
function autoinitParametric!(
dfg::AbstractDFG,
xi::DFGVariable;
solveKey = :parametric,
reinit::Bool = false,
perturb_point::Bool=false,
kwargs...,
)
#
initme = getLabel(xi)
vnd = getSolverData(xi, solveKey)
# don't initialize a variable more than once
if reinit || !isInitialized(xi, solveKey)
# frontals - initme
# separators - inifrom
initfrom = ls2(dfg, initme)
filter!(initfrom) do vl
return isInitialized(dfg, vl, solveKey)
end
# nothing to initialize if no initialized neighbors or priors
if isempty(initfrom) && !any(isPrior.(dfg, listNeighbors(dfg, initme)))
return false
end
vnd::VariableNodeData = getSolverData(xi, solveKey)
if perturb_point
_M = getManifold(xi)
p = vnd.val[1]
vnd.val[1] = exp(
_M,
p,
get_vector(
_M,
p,
randn(manifold_dimension(_M))*10^-6,
DefaultOrthogonalBasis()
)
)
end
M, vartypeslist, lm_r, Σ = solve_RLM_conditional(dfg, [initme], initfrom; solveKey, kwargs...)
val = lm_r[1]
vnd.val[1] = val
!isnothing(Σ) && (vnd.bw .= Σ)
# updateSolverDataParametric!(vnd, val, Σ)
vnd.initialized = true
#fill in ppe as mean
Xc::Vector{Float64} = collect(getCoordinates(getVariableType(xi), val))
ppe = MeanMaxPPE(solveKey, Xc, Xc, Xc)
getPPEDict(xi)[solveKey] = ppe
result = true
else
result = false
end
return result#isInitialized(xi, solveKey)
end
"""
$SIGNATURES
Batch parametric graph solve using Riemannian Levenberg Marquardt.
"""
solveGraphParametric(args...; kwargs...) = solve_RLM(args...; kwargs...)
function DFG.solveGraphParametric!(
fg::AbstractDFG,
args...;
init::Bool = false,
solveKey::Symbol = :parametric,
is_sparse = true,
# debug, stopping_criterion, damping_term_min=1e-2,
# expect_zero_residual=true,
kwargs...
)
# make sure variables has solverData, see #1637
makeSolverData!(fg; solveKey)
if !(:parametric in fg.solverParams.algorithms)
addParametricSolver!(fg; init = init)
elseif init
error("TODO: not implemented")
end
M, v, r, Σ = solve_RLM(fg, args...; is_sparse, kwargs...)
updateParametricSolution!(fg, M, v, r, Σ)
return M, v, r, Σ
end
## Check when time and delete if it can't be improved, curretnly ArrayPartition works best
#=
using FunctionWrappers: FunctionWrapper
# call with
calcfacs = CalcFactorResidualWrapper(fg, faclabels, varIntLabel, all_points)
costF! = CostF_RLM_WRAP2!(all_points, calcfacs, map(cfm->size(cfm.obj.x.iΣ,1), calcfacs))
function CalcFactorResidualWrapper(fg, factorLabels::Vector{Symbol}, varIntLabel::OrderedDict{Symbol, Int64}, points::ArrayPartition)
factypes, typedict, alltypes = getFactorTypesCount(getFactor.(fg, factorLabels))
# skip non-numeric prior (MetaPrior)
#TODO test... remove MetaPrior{T} something like this
metaPriorKeys = filter(k->contains(string(k), "MetaPrior"), collect(keys(alltypes)))
delete!.(Ref(alltypes), metaPriorKeys)
calcfacs = map(factorLabels) do labels
fct = getFactor(fg, labels)
# moet ek 'n view in p0 in maak wat jy net p0 update en CFM view automaties, toets dit...
cfm = IIF.CalcFactorResidual(fg, fct, varIntLabel, points)
# return FunctionWrapper{Vector{Float64}, Tuple{typeof(points)}}(cfm)
return FunctionWrapper{Vector{Float64}, Tuple{}}(cfm)
end
return calcfacs
end
struct CostF_RLM_WRAP2!{PT, CFW}
points::PT
costfuns::Vector{CFW}
retdims::Vector{Int}
end
function (cost::CostF_RLM_WRAP2!)(M::AbstractManifold, x::Vector{T}, p::AbstractVector{T}) where T
# x .= reduce(vcat, map(f -> f(p), cost.costfuns))
# x .= reduce(vcat, map(f -> f(), cost.costfuns))
st = 1
for (d, f) in zip(cost.retdims, cost.costfuns)
x[st:st + d - 1] .= f(p)
# x[st:st + d - 1] .= f()
# fx = f.obj.x
# x[st:st + d - 1] = fx.sqrt_iΣ * fx(fx.meas, fx.points...)
st += d
end
return x
end
=#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 489 |
# experimental
function optimizeManifold_FD(
M::AbstractManifold,
cost::Function,
x0::AbstractArray;
algorithm = Optim.ConjugateGradient(; manifold=ManifoldWrapper(M))
)
# finitediff setup
r_backend = ManifoldDiff.TangentDiffBackend(
ManifoldDiff.FiniteDifferencesBackend()
)
## finitediff gradient (non-manual)
function costgrad_FD!(X,p)
X .= ManifoldDiff.gradient(M, cost, p, r_backend)
X
end
Optim.optimize(cost, costgrad_FD!, x0, algorithm)
end
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 30321 | # ================================================================================================
## FlatVariables - used for packing variables for optimization
## ================================================================================================
struct FlatVariables{T <: Real}
X::Vector{T}
idx::OrderedDict{Symbol, UnitRange{Int}}
end
function FlatVariables(fg::AbstractDFG, varIds::Vector{Symbol})
index = 1
idx = OrderedDict{Symbol, UnitRange{Int}}()
for vid in varIds
v = getVariable(fg, vid)
dims = getDimension(v)
idx[vid] = index:(index + dims - 1)
index += dims
end
return FlatVariables(Vector{Float64}(undef, index - 1), idx)
end
function Base.setindex!(
flatVar::FlatVariables{T},
val::AbstractVector{T},
vId::Symbol,
) where {T <: Real}
if length(val) == length(flatVar.idx[vId])
flatVar.X[flatVar.idx[vId]] .= val
else
error("array could not be broadcast to match destination")
end
end
function Base.getindex(flatVar::FlatVariables{T}, vId::Symbol) where {T <: Real}
return flatVar.X[flatVar.idx[vId]]
end
## ================================================================================================
## Parametric Factors
## ================================================================================================
"""
$SIGNATURES
Returns the parametric measurement for a factor as a tuple (measurement, inverse covariance) for parametric inference (assuming Gaussian).
Defaults to find the parametric measurement at field `Z`.
Notes
- Users should overload this method should their factor not default to `.Z<:ParametricType`.
- First design choice was to restrict this function to returning coordinates
- See https://github.com/JuliaRobotics/RoME.jl/issues/465
- Pay attention to which tangent space point is used for converting points on a manifold to coordinates,
- Originally written just for Lie Groups to support legacy, but future needs may well alter the design.
- Original design driven by parametric solve and dead reckon tethering.
See also: [`accumulateFactorMeans`](@ref), [`solveFactorParametric`](@ref)
"""
function getMeasurementParametric end
function getMeasurementParametric(Z)
return error(
"$(typeof(Z)) is not supported, please use non-parametric or open an issue if it should be",
)
end
function getMeasurementParametric(Z::Normal)
meas = mean(Z)
iσ = 1 / std(Z)^2
return [meas], reshape([iσ], 1, 1)
end
function getMeasurementParametric(Z::MvNormal)
meas = mean(Z)
iΣ = invcov(Z)
return meas, iΣ
end
# the point `p` on the manifold is the mean
function getMeasurementParametric(s::ManifoldPrior)
meas = s.p
iΣ = invcov(s.Z)
return meas, iΣ
end
function getMeasurementParametric(s::AbstractFactor)
if hasfield(typeof(s), :Z)
Z = s.Z
else
error(
"getMeasurementParametric(::$(typeof(s))) not defined, please add it, or use non-parametric, or open an issue for help.",
)
end
return getMeasurementParametric(Z)
end
getMeasurementParametric(fct::DFGFactor) = getMeasurementParametric(getFactorType(fct))
getMeasurementParametric(dfg::AbstractDFG, flb::Symbol) = getMeasurementParametric(getFactor(dfg, flb))
# maybe rename getMeasurementParametric to something like getNormalDistributionParams or getMeanCov
# default to point on manifold
function getFactorMeasurementParametric(fac::AbstractPrior)
M = getManifold(fac)
ϵ = getPointIdentity(M)
dims = manifold_dimension(M)
Xc, iΣ = getMeasurementParametric(fac)
X = get_vector(M, ϵ, Xc, DefaultOrthogonalBasis())
meas = convert(typeof(ϵ), exp(M, ϵ, X))
iΣ = convert(SMatrix{dims, dims}, iΣ)
meas, iΣ
end
# default to point on tangent vector
function getFactorMeasurementParametric(fac::AbstractRelative)
M = getManifold(fac)
ϵ = getPointIdentity(M)
dims = manifold_dimension(M)
Xc, iΣ = getMeasurementParametric(fac)
measX = convert(typeof(ϵ), get_vector(M, ϵ, Xc, DefaultOrthogonalBasis()))
iΣ = convert(SMatrix{dims, dims}, iΣ)
measX, iΣ
end
getFactorMeasurementParametric(fct::DFGFactor) = getFactorMeasurementParametric(getFactorType(fct))
getFactorMeasurementParametric(dfg::AbstractDFG, flb::Symbol) = getFactorMeasurementParametric(getFactor(dfg, flb))
## ================================================================================================
## Parametric solve with Mahalanobis distance - CalcFactor
## ================================================================================================
#TODO maybe remove with Mixture rework see #1504
getFactorMechanics(f::AbstractFactor) = f
getFactorMechanics(f::Mixture) = f.mechanics
function CalcFactorMahalanobis(fg, fct::DFGFactor)
fac_func = getFactorType(fct)
varOrder = getVariableOrder(fct)
# NOTE, use getMeasurementParametric on DFGFactor{<:CCW} to allow special cases like OAS factors
_meas, _iΣ = getFactorMeasurementParametric(fct) # fac_func
# make sure its a tuple TODO Fix with mixture rework #1504
meas = typeof(_meas) <: Tuple ? _meas : (_meas,)
iΣ = typeof(_iΣ) <: Tuple ? _iΣ : (_iΣ,)
cache = preambleCache(fg, getVariable.(fg, varOrder), getFactorType(fct))
multihypo = getSolverData(fct).multihypo
nullhypo = getSolverData(fct).nullhypo
# FIXME, type instability
if length(multihypo) > 0
special = MaxMultihypo(multihypo)
elseif nullhypo > 0
special = MaxNullhypo(nullhypo)
elseif fac_func isa Mixture
special = MaxMixture(fac_func.diversity.p, Ref(0))
else
special = nothing
end
return CalcFactorMahalanobis(fct.label, getFactorMechanics(fac_func), cache, varOrder, meas, iΣ, special)
end
# This is where the actual parametric calculation happens, CalcFactor equivalent for parametric
# function (cfp::CalcFactorMahalanobis{FT, 1, C, MEAS, D, L, Nothing})(variables...) where {FT, C, MEAS, D, L, Nothing}# AbstractArray{T} where T <: Real
# # call the user function
# res = cfp.calcfactor!(cfp.meas..., variables...)
# # 1/2*log(1/( sqrt(det(Σ)*(2pi)^k) )) ## k = dim(μ)
# return res' * cfp.iΣ[1] * res
# end
# function (cfm::CalcFactorMahalanobis)(variables...)
# meas = cfm.meas
# points = map(idx->p[idx], cfm.varOrderIdxs)
# return cfm.sqrt_iΣ * cfm(meas, points...)
# end
function calcFactorMahalanobisDict(fg)
calcFactors = OrderedDict{Symbol, CalcFactorMahalanobis}()
for fct in getFactors(fg)
# skip non-numeric prior
getFactorType(fct) isa MetaPrior ? continue : nothing
calcFactors[fct.label] = CalcFactorMahalanobis(fg, fct)
end
return calcFactors
end
function getFactorTypesCount(facs::Vector{<:DFGFactor})
typedict = OrderedDict{DataType, Int}()
alltypes = OrderedDict{DataType, Vector{Symbol}}()
for f in facs
facType = typeof(getFactorType(f))
cnt = get!(typedict, facType, 0)
typedict[facType] = cnt + 1
dt = get!(alltypes, facType, Symbol[])
push!(dt, f.label)
end
#TODO tuple or vector?
# vartypes = tuple(keys(typedict)...)
factypes::Vector{DataType} = collect(keys(typedict))
return factypes, typedict, alltypes
end
function calcFactorMahalanobisVec(fg)
factypes, typedict, alltypes = getFactorTypesCount(getFactors(fg))
# skip non-numeric prior (MetaPrior)
#TODO test... remove MetaPrior{T} something like this
metaPriorKeys = filter(k->contains(string(k), "MetaPrior"), collect(keys(alltypes)))
delete!.(Ref(alltypes), metaPriorKeys)
parts = map(values(alltypes)) do labels
map(getFactor.(fg, labels)) do fct
CalcFactorMahalanobis(fg, fct)
end
end
parts_tuple = (parts...,)
return ArrayPartition{CalcFactorMahalanobis, typeof(parts_tuple)}(parts_tuple)
end
## ================================================================================================
## ================================================================================================
## New Parametric refactor WIP
## ================================================================================================
## ================================================================================================
## ================================================================================================
## LazyCase based on LazyBufferCache from PreallocationTools.jl
## ================================================================================================
"""
$SIGNATURES
A lazily allocated cache object.
"""
struct LazyCache{F <: Function}
dict::Dict{Tuple{DataType, Symbol}, Any}
fnc::F
end
function LazyCache(f::F = allocate) where {F <: Function}
return LazyCache(Dict{Tuple{DataType, Symbol}, Any}(), f)
end
# override the [] method
function Base.getindex(cache::LazyCache, u::T, varname::Symbol) where {T}
val = get!(cache.dict, (T, varname)) do
return cache.fnc(u)
end::T
return val
end
function getCoordCache!(cache::LazyCache, M, T::DataType, varname::Symbol)
val = get!(cache.dict, (T, varname)) do
return Vector{T}(undef, manifold_dimension(M))
end::Vector{T}
return val
end
## ================================================================================================
## GraphSolveStructures
## ================================================================================================
getVariableTypesCount(fg::AbstractDFG) = getVariableTypesCount(getVariables(fg))
function getVariableTypesCount(vars::Vector{<:DFGVariable})
typedict = OrderedDict{DataType, Int}()
alltypes = OrderedDict{DataType, Vector{Symbol}}()
for v in vars
varType = typeof(getVariableType(v))
cnt = get!(typedict, varType, 0)
typedict[varType] = cnt + 1
dt = get!(alltypes, varType, Symbol[])
push!(dt, v.label)
end
#TODO tuple or vector?
# vartypes = tuple(keys(typedict)...)
vartypes::Vector{DataType} = collect(keys(typedict))
return vartypes, typedict, alltypes
end
buildGraphSolveManifold(fg::AbstractDFG) = buildGraphSolveManifold(getVariables(fg))
function buildGraphSolveManifold(vars::Vector{<:DFGVariable})
vartypes, vartypecount, vartypeslist = getVariableTypesCount(vars)
PMs = map(vartypes) do vartype
N = vartypecount[vartype]
G = getManifold(vartype)
return NPowerManifold(G, N)
# PowerManifold(G, NestedReplacingPowerRepresentation(), N)
# PowerManifold(G, NestedPowerRepresentation(), N) #TODO investigate as it does not converge
end
M = ProductManifold(PMs...)
return M, vartypes, vartypeslist
end
struct GraphSolveBuffers{T <: Real, U}
ϵ::U
p::U
X::U
Xc::Vector{T}
end
function GraphSolveBuffers(@nospecialize(M), ::Type{T}) where {T}
ϵ = getPointIdentity(M, T)
p = deepcopy(ϵ)# allocate_result(M, getPointIdentity)
X = deepcopy(ϵ) #allcoate(p)
Xc = get_coordinates(M, ϵ, X, DefaultOrthogonalBasis())
return GraphSolveBuffers(ϵ, p, X, Xc)
end
struct GraphSolveContainer{CFT}
M::AbstractManifold # ProductManifold or ProductGroup
buffers::OrderedDict{DataType, GraphSolveBuffers}
varTypes::Vector{DataType}
varTypesIds::OrderedDict{DataType, Vector{Symbol}}
varOrderDict::OrderedDict{Symbol, Tuple{Int, Vararg{Int}}}
cfv::ArrayPartition{CalcFactorMahalanobis, CFT}
end
function GraphSolveContainer(fg)
M, varTypes, varTypesIds = buildGraphSolveManifold(fg)
varTypesIndexes = ArrayPartition(values(varTypesIds)...)
buffs = OrderedDict{DataType, GraphSolveBuffers}()
cfvec = calcFactorMahalanobisVec(fg)
varOrderDict = OrderedDict{Symbol, Tuple{Int, Vararg{Int}}}()
for cfp in cfvec
fid = cfp.faclbl
varOrder = cfp.varOrder
var_idx = map(varOrder) do v
return findfirst(==(v), varTypesIndexes)
end
varOrderDict[fid] = tuple(var_idx...)
end
return GraphSolveContainer(M, buffs, varTypes, varTypesIds, varOrderDict, cfvec)
end
function getGraphSolveCache!(gsc::GraphSolveContainer, ::Type{T}) where {T <: Real}
cache = gsc.buffers
M = gsc.M
val = get!(cache, T) do
@debug "cache miss, cacheing" T
return GraphSolveBuffers(M, T)
end
return val
end
function _toPoints2!(
M::AbstractManifold,
buffs::GraphSolveBuffers{T, U},
Xc::Vector{T},
) where {T, U}
ϵ = buffs.ϵ
p = buffs.p
X = buffs.X
get_vector!(M, X, ϵ, Xc, DefaultOrthogonalBasis())
exp!(M, p, ϵ, X)
return p::U
end
function cost_cfp(
cfp::CalcFactorMahalanobis,
p::AbstractArray{T},
vi::NTuple{N, Int},
) where {T,N}
# cfp(map(v->p[v],vi)...)
res = cfp(cfp.meas..., map(v->p[v],vi)...)
# 1/2*log(1/( sqrt(det(Σ)*(2pi)^k) )) ## k = dim(μ)
return res' * cfp.iΣ[1] * res
end
# function cost_cfp(
# @nospecialize(cfp::CalcFactorMahalanobis),
# @nospecialize(p::AbstractArray),
# vi::NTuple{1, Int},
# )
# return cfp(p[vi[1]])
# end
# function cost_cfp(
# @nospecialize(cfp::CalcFactorMahalanobis),
# @nospecialize(p::AbstractArray),
# vi::NTuple{2, Int},
# )
# return cfp(p[vi[1]], p[vi[2]])
# end
# function cost_cfp(
# @nospecialize(cfp::CalcFactorMahalanobis),
# @nospecialize(p::AbstractArray),
# vi::NTuple{3, Int},
# )
# return cfp(p[vi[1]], p[vi[2]], p[vi[3]])
# end
# function (gsc::GraphSolveContainer)(f::Vector{T}, Xc::Vector{T}, ::Val{true}) where T <: Real
# #
# buffs = getGraphSolveCache!(gsc, T)
# cfdict = gsc.cfdict
# varOrderDict = gsc.varOrderDict
# M = gsc.M
# p = _toPoints2!(M, buffs, Xc)
# for (i,(fid, cfp)) in enumerate(cfdict)
# varOrder_idx = varOrderDict[fid]
# # call the user function
# f[i] = cost_cfp(cfp, p, varOrder_idx)/2
# end
# return f
# end
# the cost function
function (gsc::GraphSolveContainer)(Xc::Vector{T}) where {T <: Real}
#
buffs = getGraphSolveCache!(gsc, T)
varOrderDict = gsc.varOrderDict
M = gsc.M
p = _toPoints2!(M, buffs, Xc)
obj = mapreduce(+, eachindex(gsc.cfv)) do i
cfp = gsc.cfv[i]
varOrder_idx = varOrderDict[cfp.faclbl]
# # call the user function
cost::T = cost_cfp(cfp, p, varOrder_idx)
return cost
end
return obj / 2
end
# FIXME, deprecate and improve legacy use of `MultiThreaded` type
struct MultiThreaded end
function (gsc::GraphSolveContainer)(Xc::Vector{T}, ::MultiThreaded) where {T <: Real}
#
buffs = getGraphSolveCache!(gsc, T)
cfdict = gsc.cfdict
varOrderDict = gsc.varOrderDict
M = gsc.M
p = _toPoints2!(M, buffs, Xc)
#NOTE multi threaded option
obj = zeros(T, (Threads.nthreads()))
Threads.@threads for fid in collect(keys(cfdict))
cfp = cfdict[fid]
#NOTE single thread option
# obj::T = zero(T)
# for (fid, cfp) in cfdict
varOrder_idx = varOrderDict[fid]
# call the user function
retval = cost_cfp(cfp, p, varOrder_idx)
#NOTE multi threaded option
obj[Threads.threadid()] += retval
# NOTE single thread option
# obj += retval
end
# 1/2*log(1/( sqrt(det(Σ)*(2pi)^k) )) ## k = dim(μ)
#NOTE multi threaded option
return sum(obj) / 2
# NOTE single thread option
# return obj/2
end
#fg = generateCanonicalFG_Honeycomb!()
# copy variables from graph
function initPoints!(p, gsc, fg::AbstractDFG, solveKey = :parametric)
for (i, vartype) in enumerate(gsc.varTypes)
varIds = gsc.varTypesIds[vartype]
for (j, vId) in enumerate(varIds)
p[gsc.M, i][j] = getVariableSolverData(fg, vId, solveKey).val[1]
end
end
end
function _get_dim_ranges(dims::NTuple{N,Any}) where {N}
dims_acc = accumulate(+, vcat(1, SVector(dims)))
return ntuple(i -> (dims_acc[i]:(dims_acc[i] + dims[i] - 1)), Val(N))
end
#NOTE this only works with a product of power manifolds
function getComponentsCovar(@nospecialize(PM::ProductManifold), Σ::AbstractMatrix)
dims = manifold_dimension.(PM.manifolds)
dim_ranges = _get_dim_ranges(dims)
subsigmas = map(zip(dim_ranges, PM.manifolds)) do v
r = v[1]
M = v[2]
return _getComponentsCovar(M, view(Σ, r, r))
end
return ArrayPartition(subsigmas...)
end
function _getComponentsCovar(@nospecialize(PM::PowerManifold), Σ::AbstractMatrix)
M = PM.manifold
dim = manifold_dimension(M)
subsigmas = map(Manifolds.get_iterator(PM)) do i
r = ((i - 1) * dim + 1):(i * dim)
return Σ[r, r]
end
return subsigmas
end
function _getComponentsCovar(@nospecialize(PM::NPowerManifold), Σ::AbstractMatrix)
M = PM.manifold
dim = manifold_dimension(M)
subsigmas = map(Manifolds.get_iterator(PM)) do i
r = ((i - 1) * dim + 1):(i * dim)
return Σ[r, r]
end
return subsigmas
end
function solveGraphParametricOptim(
fg::AbstractDFG;
verbose::Bool = false,
computeCovariance::Bool = true,
solveKey::Symbol = :parametric,
autodiff = :forward,
algorithm = Optim.BFGS,
algorithmkwargs = (), # add manifold to overwrite computed one
# algorithmkwargs = (linesearch=Optim.BackTracking(),), # add manifold to overwrite computed one
options = Optim.Options(;
allow_f_increases = true,
time_limit = 100,
# show_trace = true,
# show_every = 1,
),
)
#
# Build the container
gsc = GraphSolveContainer(fg)
buffs = getGraphSolveCache!(gsc, Float64)
M = gsc.M
ϵ = buffs.ϵ
p = buffs.p
X = buffs.X
Xc = buffs.Xc
#initialize points in buffer from fg, TODO maybe do in constructor
initPoints!(p, gsc, fg, solveKey)
# log!(M, X, Identity(ProductOperation), p)
# calculate initial coordinates vector for Optim
log!(M, X, ϵ, p)
get_coordinates!(M, Xc, ϵ, X, DefaultOrthogonalBasis())
initValues = Xc
#FIXME, for some reason we get NANs and adding a small random value works
initValues .+= randn(length(Xc)) * 0.0001
#optim setup and solve
alg = algorithm(; algorithmkwargs...)
tdtotalCost = Optim.TwiceDifferentiable(gsc, initValues; autodiff = autodiff)
result = Optim.optimize(tdtotalCost, initValues, alg, options)
!verbose ? nothing : @show(result)
rv = Optim.minimizer(result)
# optionally compute hessian for covariance
Σ = if computeCovariance
H = Optim.hessian!(tdtotalCost, rv)
pinv(H)
else
N = length(initValues)
zeros(N, N)
end
#TODO better return
#get point (p) values form results
get_vector!(M, X, ϵ, rv, DefaultOrthogonalBasis())
exp!(M, p, ϵ, X)
#extract covariances from result
# sigmas = getComponentsCovar(M, Σ)
# d = OrderedDict{Symbol,NamedTuple{(:val, :cov),Tuple{Vector{Float64},Matrix{Float64}}}}()
d = OrderedDict{Symbol, NamedTuple{(:val, :cov), Tuple{AbstractArray, Matrix{Float64}}}}()
varIds = vcat(values(gsc.varTypesIds)...)
varIdDict = FlatVariables(fg, varIds).idx
for (i, key) in enumerate(varIds)
r = varIdDict[key]
push!(d, key => (val = p[i], cov = Σ[r, r]))
# push!(d,key=>(val=p[i], cov=sigmas[i]))
end
return (opti = d, stat = result, varIds = varIdDict, Σ = Σ)
end
## Original
# ==============================
function _totalCost(fg, cfdict::OrderedDict{Symbol, <:CalcFactorMahalanobis}, flatvar, Xc)
#
obj = zero(eltype(Xc))
for (fid, cfp) in cfdict
varOrder = cfp.varOrder
Xparams = [
getPoint(getVariableType(fg, varId), view(Xc, flatvar.idx[varId])) for
varId in varOrder
]
# call the user function
# retval = cfp(Xparams...)
res = cfp(cfp.meas..., Xparams...)
# 1/2*log(1/( sqrt(det(Σ)*(2pi)^k) )) ## k = dim(μ)
obj += 1 / 2 * res' * cfp.iΣ[1] * res
end
return obj
end
"""
$SIGNATURES
Solve for frontal values only with values in seprarators fixed
DevNotes
- WIP
- Relates to: https://github.com/JuliaRobotics/IncrementalInference.jl/issues/466#issuecomment-562556953
- Consolidation
- Definitely with [`solveFactorParametric`](@ref)
- Maybe with [`solveGraphParametric`](@ref)
- https://github.com/JuliaRobotics/IncrementalInference.jl/pull/1588#issuecomment-1210406683
"""
function solveConditionalsParametric(
fg::AbstractDFG,
frontals::Vector{Symbol},
separators::Vector{Symbol} = setdiff(listVariables(fg), frontals);
solvekey::Symbol = :parametric,
autodiff = :forward,
algorithm = Optim.BFGS,
algorithmkwargs = (), # add manifold to overwrite computed one
options = Optim.Options(;
allow_f_increases = true,
time_limit = 100,
# show_trace = true,
# show_every = 1,
),
)
varIds = [frontals; separators]
sfg = issetequal(varIds, listVariables(fg)) ? fg : buildSubgraph(fg, varIds, 1)
flatvar = FlatVariables(fg, varIds)
for vId in varIds
p = getVariableSolverData(fg, vId, solvekey).val[1]
flatvar[vId] = getCoordinates(getVariableType(fg, vId), p)
end
initValues = flatvar.X
frontalsLength = sum(map(v -> getDimension(getVariable(fg, v)), frontals))
# build variables for frontals and seperators
# fX = view(initValues, 1:frontalsLength)
fX = initValues[1:frontalsLength]
# sX = view(initValues, (frontalsLength+1):length(initValues))
sX = initValues[(frontalsLength + 1):end]
alg = algorithm(; algorithmkwargs...)
# alg = algorithm(; algorithmkwargs...)
cfd = calcFactorMahalanobisDict(sfg)
tdtotalCost = Optim.TwiceDifferentiable(
(x) -> _totalCost(fg, cfd, flatvar, [x; sX]),
fX;
autodiff = autodiff,
)
# result = Optim.optimize((x)->_totalCost(fg, flatvar, [x;sX]), fX, alg, options)
result = Optim.optimize(tdtotalCost, fX, alg, options)
if !Optim.converged(result)
@warn "Optim did not converge:" result maxlog=10
end
rv = Optim.minimizer(result)
H = Optim.hessian!(tdtotalCost, rv)
Σ = pinv(H)
d = OrderedDict{Symbol, NamedTuple{(:val, :cov), Tuple{AbstractArray, Matrix{Float64}}}}()
for key in frontals
r = flatvar.idx[key]
p = getPoint(getVariableType(fg, key), rv[r])
push!(d, key => (val = p, cov = Σ[r, r]))
end
return (opti = d, stat = result, varIds = flatvar.idx, Σ = Σ)
end
## ================================================================================================
## UNDER DEVELOPMENT Parametric solveTree utils
## ================================================================================================
"""
$SIGNATURES
Get the indexes for labels in FlatVariables
"""
function collectIdx(varinds, labels)
idx = Int[]
for lbl in labels
append!(idx, varinds[lbl])
end
return idx
end
"""
$SIGNATURES
Calculate the marginal distribution for a clique over subsetVarIds.
#FIXME update to support manifolds
"""
function calculateMarginalCliqueLikelihood(vardict, Σ, varindxs, subsetVarIds)
μₘ = Float64[]
for lbl in subsetVarIds
append!(μₘ, vardict[lbl].val)
end
Aidx = collectIdx(varindxs, subsetVarIds)
Σₘ = Σ[Aidx, Aidx]
return createMvNormal(μₘ, Σₘ)
end
"""
$SIGNATURES
"""
function calculateCoBeliefMessage(soldict, Σ, flatvars, separators, frontals)
Aidx = IIF.collectIdx(flatvars, separators)
Cidx = IIF.collectIdx(flatvars, frontals)
#marginalize separators
A = Σ[Aidx, Aidx]
#marginalize frontals
C = Σ[Cidx, Cidx]
# cross
B = Σ[Aidx, Cidx]
Σₘ = deepcopy(A)
if length(separators) == 0
return (varlbl = Symbol[], μ = Float64[], Σ = Matrix{Float64}(undef, 0, 0))
elseif length(separators) == 1
# create messages
return (varlbl = deepcopy(separators), μ = soldict[separators[1]].val, Σ = A)
elseif length(separators) == 2
A = Σₘ[1, 1]
C = Σₘ[2, 2]
B = Σₘ[1, 2]
#calculate covariance between separators
ΣA_B = A - B * inv(C) * B'
# create messages
m2lbl = deepcopy(separators)
m2cov = isa(ΣA_B, Matrix) ? ΣA_B : fill(ΣA_B, 1, 1)
m2val = soldict[m2lbl[2]].val - soldict[m2lbl[1]].val
return (varlbl = m2lbl, μ = m2val, Σ = m2cov)
else
error("Messages with more than 2 seperators are not supported yet")
end
end
## ================================================================================================
## Parametric utils
## ================================================================================================
## SANDBOX of usefull development functions to be cleaned up
"""
$SIGNATURES
Update the parametric solver data value and covariance.
"""
function updateSolverDataParametric! end
function updateSolverDataParametric!(
vnd::VariableNodeData,
val::AbstractArray,
cov::AbstractMatrix,
)
# fill in the variable node data value
vnd.val[1] = val
#calculate and fill in covariance
vnd.bw .= cov
return vnd
end
function updateSolverDataParametric!(
v::DFGVariable,
val::AbstractArray,
cov::AbstractMatrix;
solveKey::Symbol = :parametric,
)
vnd = getSolverData(v, solveKey)
return updateSolverDataParametric!(vnd, val, cov)
end
"""
$SIGNATURES
Add parametric solver to fg, batch solve using [`solveGraphParametric`](@ref) and update fg.
"""
function solveGraphParametricOptim!(
fg::AbstractDFG;
init::Bool = true,
solveKey::Symbol = :parametric, # FIXME, moot since only :parametric used for parametric solves
initSolveKey::Symbol = :default,
verbose = false,
kwargs...
)
# make sure variables has solverData, see #1637
makeSolverData!(fg; solveKey)
if !(:parametric in fg.solverParams.algorithms)
addParametricSolver!(fg; init = init)
elseif init
initParametricFrom!(fg, initSolveKey; parkey=solveKey)
end
vardict, result, varIds, Σ = solveGraphParametricOptim(fg; verbose, kwargs...)
updateParametricSolution!(fg, vardict)
return vardict, result, varIds, Σ
end
"""
$SIGNATURES
Initialize the parametric solver data from a different solution in `fromkey`.
DevNotes
- TODO, keyword `force` not wired up yet.
"""
function initParametricFrom!(
fg::AbstractDFG,
fromkey::Symbol = :default;
parkey::Symbol = :parametric,
onepoint = false,
force::Bool = false,
)
#
if onepoint
for v in getVariables(fg)
fromvnd = getSolverData(v, fromkey)
dims = getDimension(v)
getSolverData(v, parkey).val[1] = fromvnd.val[1]
getSolverData(v, parkey).bw[1:dims, 1:dims] = LinearAlgebra.I(dims)
end
else
for var in getVariables(fg)
dims = getDimension(var)
μ, Σ = calcMeanCovar(var, fromkey)
getSolverData(var, parkey).val[1] = μ
getSolverData(var, parkey).bw[1:dims, 1:dims] = Σ
end
end
end
"""
$SIGNATURES
Add the parametric solveKey to all the variables in fg if it doesn't exists.
"""
function addParametricSolver!(fg; init = true)
if !(:parametric in fg.solverParams.algorithms)
push!(fg.solverParams.algorithms, :parametric)
foreach(
v -> IIF.setDefaultNodeDataParametric!(v, getVariableType(v); initialized = false),
getVariables(fg),
)
if init
initParametricFrom!(fg)
end
else
error("parametric solvekey already exists")
end
return nothing
end
"""
$SIGNATURES
Update the fg from solution in vardict and add MeanMaxPPE (all just mean). Usefull for plotting
"""
function updateParametricSolution!(sfg, vardict::AbstractDict; solveKey::Symbol = :parametric)
for (v, val) in vardict
vnd = getSolverData(getVariable(sfg, v), solveKey)
# Update the variable node data value and covariance
updateSolverDataParametric!(vnd, val.val, val.cov)
#fill in ppe as mean
Xc = collect(getCoordinates(getVariableType(sfg, v), val.val))
ppe = MeanMaxPPE(solveKey, Xc, Xc, Xc)
getPPEDict(getVariable(sfg, v))[solveKey] = ppe
end
end
function updateParametricSolution!(fg, M, labels::AbstractArray{Symbol}, vals, Σ; solveKey::Symbol = :parametric)
if !isnothing(Σ)
covars = getComponentsCovar(M, Σ)
end
for (i, (v, val)) in enumerate(zip(labels, vals))
vnd = getSolverData(getVariable(fg, v), solveKey)
covar = isnothing(Σ) ? vnd.bw : covars[i]
# Update the variable node data value and covariance
updateSolverDataParametric!(vnd, val, covar)#FIXME add cov
#fill in ppe as mean
Xc = collect(getCoordinates(getVariableType(fg, v), val))
ppe = MeanMaxPPE(solveKey, Xc, Xc, Xc)
getPPEDict(getVariable(fg, v))[solveKey] = ppe
end
end
function createMvNormal(val, cov)
#TODO do something better for properly formed covariance, but for now just a hack...FIXME
if all(diag(cov) .> 0.001) && isapprox(cov, transpose(cov); rtol = 1e-4)
return MvNormal(val, Symmetric(cov))
else
@error("Covariance matrix error", cov)
# return nothing # FIXME, blanking nothing during #459 consolidation
return MvNormal(val, ones(length(val)))
end
end
function createMvNormal(v::DFGVariable, key = :parametric)
if key == :parametric
vnd = getSolverData(v, :parametric)
dims = vnd.dims
return createMvNormal(vnd.val[1:dims, 1], vnd.bw[1:dims, 1:dims])
else
@warn "Trying MvNormal Fit, replace with PPE fits in future"
return fit(MvNormal, getSolverData(v, key).val)
end
end
#TODO this is still experimental and a POC
function getInitOrderParametric(fg; startIdx::Symbol = lsfPriors(fg)[1])
order = DFG.traverseGraphTopologicalSort(fg, startIdx)
filter!(order) do l
return isVariable(fg, l)
end
return order
end
function autoinitParametricOptim!(
fg,
varorderIds = getInitOrderParametric(fg);
reinit = false,
algorithm = Optim.NelderMead,
algorithmkwargs = (initial_simplex = Optim.AffineSimplexer(0.025, 0.1),),
kwargs...
)
@showprogress for vIdx in varorderIds
autoinitParametricOptim!(fg, vIdx; reinit, algorithm, algorithmkwargs, kwargs...)
end
return nothing
end
function autoinitParametricOptim!(dfg::AbstractDFG, initme::Symbol; kwargs...)
return autoinitParametricOptim!(dfg, getVariable(dfg, initme); kwargs...)
end
function autoinitParametricOptim!(
dfg::AbstractDFG,
xi::DFGVariable;
solveKey = :parametric,
reinit::Bool = false,
kwargs...,
)
#
initme = getLabel(xi)
vnd = getSolverData(xi, solveKey)
# don't initialize a variable more than once
if reinit || !isInitialized(xi, solveKey)
# frontals - initme
# separators - inifrom
initfrom = ls2(dfg, initme)
filter!(initfrom) do vl
return isInitialized(dfg, vl, solveKey)
end
vardict, result, flatvars, Σ =
solveConditionalsParametric(dfg, [initme], initfrom; kwargs...)
val, cov = vardict[initme]
updateSolverDataParametric!(vnd, val, cov)
vnd.initialized = true
#fill in ppe as mean
Xc = collect(getCoordinates(getVariableType(xi), val))
ppe = MeanMaxPPE(:parametric, Xc, Xc, Xc)
getPPEDict(xi)[:parametric] = ppe
# updateVariableSolverData!(dfg, xi, solveKey, true; warn_if_absent=false)
# updateVariableSolverData!(dfg, xi.label, getSolverData(xi, solveKey), :graphinit, true, Symbol[]; warn_if_absent=false)
else
result = nothing
end
return result#isInitialized(xi, solveKey)
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 3544 |
"""
$SIGNATURES
Construct (new) subgraph and draw the subgraph associated with clique `frontalSym::Symbol`.
Notes
- See `drawGraphCliq`/`writeGraphPdf` for details on keyword options.
Related
drawGraphCliq, spyCliqMat, drawTree, buildCliqSubgraphUp, buildSubgraphFromLabels!
"""
function drawCliqSubgraphUpMocking(
fgl::G,
treel::AbstractBayesTree,
frontalSym::Symbol;
show::Bool = true,
filepath::String = "/tmp/caesar/random/cliq_sfg.dot",
engine::AS1 = "sfdp",
viewerapp::AS2 = "xdot",
) where {G <: AbstractDFG, AS1 <: AbstractString, AS2 <: AbstractString}
#
sfg = buildCliqSubgraphUp(fgl, treel, frontalSym)
drawGraph(sfg; show = show, viewerapp = viewerapp, engine = engine, filepath = filepath)
return nothing
end
"""
$SIGNATURES
Draw and show the factor graph `<:AbstractDFG` via system graphviz and xdot app.
Notes
- Requires system install on Linux of `sudo apt-get install xdot`
- Should not be calling outside programs.
- Need long term solution
- DFG's `toDotFile` a better solution -- view with `xdot` application.
- also try `engine={"sfdp","fdp","dot","twopi","circo","neato"}`
Notes:
- Calls external system application `xdot` to read the `.dot` file format
- ```toDot(fg,file=...); @async run(`xdot file.dot`)```
Related
drawGraphCliq, [`drawTree`](@ref), printCliqSummary, spyCliqMat
"""
function drawGraph(
fgl::AbstractDFG;
viewerapp::AbstractString = "xdot",
filepath::AbstractString = "/tmp/caesar/random/fg.dot",
engine::AbstractString = "neato", #sfdp
show::Bool = true,
)
#
mkpath(dirname(filepath))
# mkpath(joinpath( "/", (split(filepath, '/')[1:(end-1)])...) )
@debug "Writing factor graph file"
fext = split(filepath, '.')[end]
fpwoext = filepath[1:(end - length(fext) - 1)] # split(filepath, '.')[end-1]
dotfile = fpwoext * ".dot"
# create the dot file
DFG.toDotFile(fgl, dotfile)
try
# run(`$(engine) $(dotfile) -T$(fext) -o $(filepath)`)
show ? (@async run(`$(viewerapp) $(dotfile)`)) : nothing
catch e
@warn "not able to show $(filepath) with viewerapp=$(viewerapp). Exception e=$(e)"
end
return nothing
end
"""
$SIGNATURES
Draw the factor graph from a clique state machine history at a particular step as pdf and show.
Related
drawCliqSubgraphUpMocking, drawGraph, drawTree
"""
function drawGraphCliq(
hists::Dict{Int, <:Tuple},
step::Int,
tree::AbstractBayesTree,
frontal::Symbol;
show::Bool = true,
)
#
cid = getId(getClique(tree, frontal))
cfg = hists[cid][step][4].cliqSubFg
return drawGraph(cfg; show = show)
end
"""
$SIGNATURES
Print basic statistics about a clique variables and factors.
Related
printCliqHistorySummary
"""
function printCliqSummary(
dfg::G,
cliq::TreeClique,
logger = ConsoleLogger(),
) where {G <: AbstractDFG}
#
frtl = getCliqFrontalVarIds(cliq)
seps = getCliqSeparatorVarIds(cliq)
fcts = getCliqFactorIdsAll(cliq)
isinit = map(x -> isInitialized(dfg, x), [frtl; seps])
# infdim = map(x->getVariableInferredDim(dfg, x), [frtl;seps])
with_logger(logger) do
@info "Clique $(getId(cliq)) summary:"
@info " num frontals: $(length(frtl))"
@info " num separators: $(length(seps))"
@info " num factors: $(length(fcts))"
@info " num initialized: $(sum(isinit)) of $(length(isinit))"
@info ""
@info " frontals: $(frtl)"
@info " separator: $(seps)"
@info " factors: $(fcts)"
@info " init'ed: $(Int.(isinit))"
# @info " infr'dims: $(infdim)"
end
return nothing
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 5367 | export shrinkFactorGraph,
getAllTrees,
nnzFrontals,
nnzClique,
nnzTree,
nnzSqrtInfoMatrix,
getTreeCost_01,
getTreeCost_02
"""
$SIGNATURES
Prunes factor graph to keep up to `upto` number of variables.
Warning: uses functions that are outside of IncrementalInference (e.g.,
`isSolvable()`), so will probably need to place this elsewhere.
"""
function shrinkFactorGraph(fg; upto::Int = 6)
fgs = deepcopy(fg)
delVars = filter(x -> isSolvable(getVariable(fgs, x)) == 0, ls(fgs))
todel = setdiff(lsf(fgs; solvable = 0), lsf(fgs; solvable = 1))
delFcts = intersect(lsf(fgs), todel)
allMags = filter(x -> :MAGNETOMETER in getTags(getFactor(fgs, x)), lsfPriors(fgs))
union!(delFcts, filter(x -> length(ls(fgs, x)) == 0, allMags))
union!(delVars, (ls(fgs, r"x\d") |> sortDFG)[upto:end])
union!(delFcts, map(x -> ls(fgs, x), delVars)...)
map(x -> deleteFactor!(fgs, x), delFcts)
map(x -> deleteVariable!(fgs, x), delVars)
return fgs
end
"""
$SIGNATURES
Deterministically get all trees associated with all possible variable orderings
of `dfg` factor graph. Returns a dictionary with (tree, ordering, nnz) tuples.
Warning: factorial number of possibilities, so use carefully!
"""
function getAllTrees(fg::AbstractDFG)
# dfg = generateCanonicalFG_Kaess(graphinit=false)
variables = ls(fg)
orderings = permutations(variables) |> collect
# Dimensionality check to make sure we do not break your computer.
max_dimension = 11 # something reasonable (11! ~ 40M).
if length(variables) > 11
throw(ArgumentError("You crazy! dfg is too big. Factorial explosion."))
end
# Produce a tree for each ordering, and store in dictionary.
all_trees = Dict{Int, Tuple{BayesTree, Vector{Symbol}, Float64}}()
for i = 1:length(orderings)
tree = buildTreeReset!(fg, orderings[i])
nnz = nnzTree(tree)
all_trees[i] = (tree, orderings[i], nnz)
end
return all_trees
end
"""
$SIGNATURES
Get number of non-zero entries for clique's frontal components. Num of non-zero
matrix entries is just the fully dense upper triangular part of square matrix.
"""
function nnzFrontals(dimension)
if dimension == 1
return 1
else
# Solved recurrence for n + (n-1) + ... + 2 + 1.
return (dimension * (dimension + 1)) / 2
end
end
"""
$SIGNATURES
Get total number of non-zero entries for a clique. Num of non-zero matrix
entries is the fully dense upper triangular part (frontals) plus the
(frontal x separator)-size rectangle.
"""
function nnzClique(clique)
frontal_dim = length(getCliqFrontalVarIds(clique))
separator_dim = length(getCliqSeparatorVarIds(clique))
return nnzFrontals(frontal_dim) + (frontal_dim * separator_dim)
end
"""
$SIGNATURES
Get total number of non-zero entries for a Bayes tree. Num of non-zero matrix
entries is the sum of all non-zero entries for each individual clique.
"""
function nnzTree(tree::AbstractBayesTree)
nnzTot = 0
for (cliqid, cliq) in tree.cliques
nnzTot += nnzClique(cliq)
end
return nnzTot
end
"""
$SIGNATURES
Get total number of non-zero entries for a factor graph's upper triangular
square root information matrix, i.e., R matrix in A = Q[R 0]^T, using the QR's
factorization algorithm variable ordering.
"""
function nnzSqrtInfoMatrix(A::Matrix)
q, r, p = qr(A, Val(true))
r .= abs.(r)
nz = 1e-5 .< r
r[nz] .= 1
return sum(nz)
end
"""
$SIGNATURES
Simple cost function for ranking the structure of a Bayes tree. Weighting:
cost = (max tree depth) * (max clique dimension)^alpha
"""
function getTreeCost_01(tree::AbstractBayesTree; alpha::Float64 = 1.0)
cliqs = tree.cliques |> values |> collect
maxdepth = map(x -> getCliqDepth(tree, x) + 1, cliqs) |> maximum
maxdim = length.(map(x -> getCliqVarIdsAll(x), cliqs)) |> maximum
return maxdepth * (maxdim^alpha)
end
"""
$SIGNATURES
Cost function for ranking the structure of a Bayes tree, putting and emphasis on
wider but shallower trees by penalizing the average number of siblings.
Weighting:
cost = 1/(total num of child / num of parents) *
(max tree depth) * (max clique dimension)^alpha
"""
function getTreeCost_02(tree::AbstractBayesTree; alpha::Float64 = 1.0)
# Frontal and number of children.
ARR = Tuple{Symbol, Int}[]
for (cliqid, vex) in tree.cliques
afrtl = getCliqFrontalVarIds(getClique(tree, cliqid))[1]
numch = length(getChildren(tree, afrtl))
push!(ARR, (afrtl, numch))
end
numParents = filter(x -> 0 < x[2], ARR) |> length
totalNumChildren = (x -> x[2]).(ARR) |> sum
return getTreeCost_01(tree; alpha = alpha) / (totalNumChildren / numParents)
end
## ============================================================================
# Tools for checking the numerical performance of the solve
function mmdSolveKey(
vari::DFGVariable,
refKey::Symbol,
tstKey::Symbol;
bw::AbstractVector{<:Real} = [0.001;],
)
#
refVal = getBelief(vari, refKey)
tstVal = getBelief(vari, tstKey)
# calc mmd distance
return mmd(refVal, tstVal, getVariableType(vari); bw = bw)
end
# vari = getVariable(fg, :x1)
# kys = filter(x->!(x in [:graphinit;:default]), listSolveKeys(fg) |> collect |>sortDFG)
# X1_dist_0 = kys .|> x->mmdSolveKey(vari, :default_0, x)
# kyD = [(kys[i],kys[i+1]) for i in 1:length(kys)-1]
# X1_dist_D = kyD .|> x->mmdSolveKey(vari, x[1], x[2])
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 10296 |
export calcFactorResidual
function approxConvBelief(
dfg::AbstractDFG,
fc::DFGFactor,
target::Symbol,
measurement::AbstractVector = Tuple[];
solveKey::Symbol = :default,
N::Int = length(measurement),
nullSurplus::Real = 0,
skipSolve::Bool = false,
)
#
v_trg = getVariable(dfg, target)
N = N == 0 ? getNumPts(v_trg; solveKey) : N
# approxConv should push its result into duplicate memory destination, NOT the variable.VND.val itself. ccw.varValsAll always points directly to variable.VND.val
# points and infoPerCoord
pts, ipc = evalFactor(
dfg,
fc,
v_trg.label,
measurement;
solveKey,
N,
skipSolve,
nullSurplus
)
len = length(ipc)
mask = 1e-14 .< abs.(ipc)
partl = collect(1:len)[mask]
# is the convolution infoPerCoord full or partial
res = if sum(mask) == len
# not partial
manikde!(getManifold(getVariable(dfg, target)), pts; partial = nothing)
else
# is partial
manikde!(getManifold(getVariable(dfg, target)), pts; partial = partl)
end
return res
end
approxConv(w...; kw...) = getPoints(approxConvBelief(w...; kw...), false)
"""
$SIGNATURES
Calculate the sequential series of convolutions in order as listed by `fctLabels`, and starting from the
value already contained in the first variable.
Notes
- `target` must be a variable.
- The ultimate `target` variable must be given to allow path discovery through n-ary factors.
- Fresh starting point will be used if first element in `fctLabels` is a unary `<:AbstractPrior`.
- This function will not change any values in `dfg`, and might have slightly less speed performance to meet this requirement.
- pass in `tfg` to get a recoverable result of all convolutions in the chain.
- `setPPE` and `setPPEmethod` can be used to store PPE information in temporary `tfg`
DevNotes
- TODO strong requirement that this function is super efficient on single factor/variable case!
- FIXME must consolidate with `accumulateFactorMeans`
- TODO `solveKey` not fully wired up everywhere yet
- tfg gets all the solveKeys inside the source `dfg` variables
- TODO add a approxConv on PPE option
- Consolidate with [`accumulateFactorMeans`](@ref), `approxConvBinary`
Related
[`approxDeconv`](@ref), `findShortestPathDijkstra`
"""
function approxConvBelief(
dfg::AbstractDFG,
from::Symbol,
target::Symbol,
measurement::AbstractVector = Tuple[];
solveKey::Symbol = :default,
N::Int = length(measurement),
tfg::AbstractDFG = LocalDFG(;solverParams=getSolverParams(dfg)),
setPPEmethod::Union{Nothing, Type{<:AbstractPointParametricEst}} = nothing,
setPPE::Bool = setPPEmethod !== nothing,
path::AbstractVector{Symbol} = Symbol[],
skipSolve::Bool = false,
nullSurplus::Real = 0,
)
#
# @assert isVariable(dfg, target) "approxConv(dfg, from, target,...) where `target`=$target must be a variable in `dfg`"
if from in ls(dfg, target)
# direct request
# TODO avoid this allocation for direct cases ( dfg, :x1x2f1, :x2[/:x1] )
path = Symbol[from; target]
varLbls = Symbol[target;]
else
# must first discover shortest factor path in dfg
# TODO DFG only supports LocalDFG.findShortestPathDijkstra at the time of writing (DFG v0.10.9)
path = 0 == length(path) ? findShortestPathDijkstra(dfg, from, target) : path
@assert path[1] == from "sanity check failing for shortest path function"
# list of variables
fctMsk = isFactor.(dfg, path)
# which factors in the path
fctLbls = path[fctMsk]
# must still add
varLbls = union(lsf.(dfg, fctLbls)...)
neMsk = exists.(tfg, varLbls) .|> x -> xor(x, true)
# put the non-existing variables into the temporary graph `tfg`
# bring all the solveKeys too
for v in getVariable.(dfg, varLbls[neMsk])
addVariable!(tfg, v.label, getVariableType(v))
end
# variables adjacent to the shortest path should be initialized from dfg
setdiff(varLbls, path[xor.(fctMsk, true)]) .|>
x -> initVariable!(tfg, x, getBelief(dfg, x))
end
# find/set the starting point
idxS = 1
pts = if varLbls[1] == from
# starting from a variable
getBelief(dfg, varLbls[1]) |> getPoints
else
# chain would start one later
idxS += 1
# get the factor
fct0 = getFactor(dfg, from)
# get the Matrix{<:Real} of projected points
pts1Bel = approxConvBelief(
dfg,
fct0,
path[2],
measurement;
solveKey,
N,
skipSolve,
nullSurplus,
)
if length(path) == 2
return pts1Bel
end
getPoints(pts1Bel)
end
# didn't return early so shift focus to using `tfg` more intensely
initVariable!(tfg, varLbls[1], pts)
# use in combination with setPPE and setPPEmethod keyword arguments
ppemethod = setPPEmethod === nothing ? MeanMaxPPE : setPPEmethod
!setPPE ? nothing : setPPE!(tfg, varLbls[1], solveKey, ppemethod)
# do chain of convolutions
for idx = idxS:length(path)
if fctMsk[idx]
# this is a factor path[idx]
fct = getFactor(dfg, path[idx])
addFactor!(tfg, fct)
ptsBel = approxConvBelief(tfg, fct, path[idx + 1]; solveKey, N, skipSolve)
initVariable!(tfg, path[idx + 1], ptsBel)
!setPPE ? nothing : setPPE!(tfg, path[idx + 1], solveKey, ppemethod)
end
end
# return target variable values
return getBelief(tfg, target)
end
"""
$(SIGNATURES)
Compute proposal belief on `vertid` through `fct` representing some constraint in factor graph.
Always full dimension variable node -- partial constraints will only influence subset of variable dimensions.
The remaining dimensions will keep pre-existing variable values.
Notes
- fulldim is true when "rank-deficient" -- TODO swap to false (or even float)
"""
function calcProposalBelief(
dfg::AbstractDFG,
fct::DFGFactor,
target::Symbol,
measurement::AbstractVector = Tuple[];
N::Int = length(measurement),
solveKey::Symbol = :default,
nullSurplus::Real = 0,
dbg::Bool = false,
)
#
# assuming it is properly initialized TODO
proposal = approxConvBelief(dfg, fct, target, measurement; solveKey, N, nullSurplus)
# return the proposal belief and inferdim, NOTE likely to be changed
return proposal
end
# specifically the PartialPriorPassThrough dispatch
function calcProposalBelief(
dfg::AbstractDFG,
fct::DFGFactor{<:CommonConvWrapper{<:PartialPriorPassThrough}},
target::Symbol,
measurement::AbstractVector = Tuple[];
N::Int = length(measurement),
solveKey::Symbol = :default,
nullSurplus::Real = 0,
dbg::Bool = false,
)
#
# density passed through directly from PartialPriorPassThrough.Z
fctFnc = getFactorType(fct)
proposal = fctFnc.Z.heatmap.densityFnc
# in case of partial, place the proposal into larger marginal/partial MKD
proposal_ = if isPartial(fctFnc)
# oldbel = getBelief(dfg, target, solveKey)
varType = getVariableType(dfg, target)
M = getManifold(varType)
u0 = getPointIdentity(varType)
# replace(oldbel, proposal)
antimarginal(M, u0, proposal, Int[fctFnc.partial...])
else
proposal
end
# return the proposal belief and inferdim, NOTE likely to be changed
return proposal_
end
"""
$SIGNATURES
Compute the proposals of a destination vertex for each of `factors` and place the result
as belief estimates in both `dens` and `partials` respectively.
Notes
- TODO: also return if proposals were "dimension-deficient" (aka ~rank-deficient).
"""
function proposalbeliefs!(
dfg::AbstractDFG,
destlbl::Symbol,
factors::AbstractVector, #{<:DFGFactor},
dens::AbstractVector{<:ManifoldKernelDensity},
measurement::AbstractVector = Tuple[];
solveKey::Symbol = :default,
N::Int = getSolverParams(dfg).N, #maximum([length(getPoints(getBelief(dfg, destlbl, solveKey))); getSolverParams(dfg).N]),
# how much nullSurplus should be added, see #1517
nullSurplusAdd::Real = getSolverParams(dfg).nullSurplusAdd,
dbg::Bool = false,
)
#
# populate the full and partial dim containers
ipcs = Vector{Vector{Float64}}(undef, length(factors))
# workaround for IIF #1517, additional entropy for sibling factors to target variable if one has multihypo
nullSrp = zeros(length(factors))
if any(isMultihypo.(factors))
# relative sibling factors get nullSurplus
for (i, f) in enumerate(factors)
# don't add additional nullSurplus, since its already being done in ExplicitDiscreteMarg!!! FIXME refactor to common solution
if isa(getFactorType(f), AbstractRelative) && !isMultihypo(f)
nullSrp[i] = nullSurplusAdd
end
end
end
vardim = getDimension(getVariable(dfg, destlbl))
# get a proposal belief from each factor connected to destlbl
for (count, fct) in enumerate(factors)
ccwl = _getCCW(fct)
# need way to convey partial information
# determine if evaluation is "dimension-deficient" solvable dimension
# FIXME, update to infoPerCoord
fct_ipc = ones(vardim) # getFactorSolvableDim(dfg, fct, destlbl, solveKey)
# convolve or passthrough to get a new proposal
propBel_ = calcProposalBelief(
dfg,
fct,
destlbl,
measurement;
N,
dbg,
solveKey,
nullSurplus = nullSrp[count],
)
# partial density
propBel = if isPartial(ccwl)
pardims = _getDimensionsPartial(ccwl)
@assert [getFactorType(fct).partial...] == [pardims...] "partial dims error $(getFactorType(fct).partial) vs $pardims"
AMP.marginal(propBel_, Int[pardims...])
else
propBel_
end
push!(dens, propBel)
ipcs[count] = fct_ipc
end
# len = maximum(length.(ipcs))
ipc = zeros(vardim)
for _ipc in ipcs
ipc .+= _ipc
end
return ipc
end
# group partial dimension factors by selected dimensions -- i.e. [(1,)], [(1,2),(1,2)], [(2,);(2;)]
# WIP, see `_buildGraphByFactorAndTypes!` where pts are full MKD Beliefs, following #1351
# Legacy use in RoMEPlotting: plotFactor
# function approxConvBelief(fct::AbstractFactorRelative,
# varTypes::Union{<:Tuple,<:AbstractVector{<:InstanceType{T}}},
# mkds::Union{<:Tuple,<:AbstractVector{<:InstanceType{T}}};
# tfg::AbstractDFG=_buildGraphByFactorAndTypes!(fct,)
# ) where {T <: InferenceVariable}
# #
# end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 6496 |
"""
$SIGNATURES
Determine the variable ordering used to construct both the Bayes Net and Bayes/Junction/Elimination tree.
Notes
- Heuristic method -- equivalent to QR or Cholesky.
- Are using Blas `QR` function to extract variable ordering.
- **NOT USING SUITE SPARSE** -- which would requires commercial license.
- For now `A::Array{<:Number,2}` as a dense matrix.
- Columns of `A` are system variables, rows are factors (without differentiating between partial or full factor).
- default is to use `solvable=1` and ignore factors and variables that might be used for dead reckoning or similar.
Future
- TODO: `A` should be sparse data structure (when we exceed 10'000 var dims)
- TODO: Incidence matrix is rectagular and adjacency is the square.
"""
function getEliminationOrder(
dfg::AbstractDFG;
ordering::Symbol = :qr,
solvable::Int = 1,
constraints::Vector{Symbol} = Symbol[],
)
#
@assert 0 == length(constraints) || ordering == :ccolamd "Must use ordering=:ccolamd when trying to use constraints"
# Get the sparse adjacency matrix, variable, and factor labels
adjMat, permuteds, permutedsf = DFG.getBiadjacencyMatrix(dfg; solvable = solvable)
# adjMat, permuteds, permutedsf = DFG.getAdjacencyMatrixSparse(dfg, solvable=solvable)
# Create dense adjacency matrix
p = Int[]
if ordering == :chol
# hack for dense matrix....
A = adjMat
p = cholesky(Matrix(A'A), Val(true)).piv
@warn "check that cholesky ordering is not reversed -- basically how much fill in (separator size) are you seeing??? Long skinny chains in tree is bad."
elseif ordering == :qr
# hack for dense matrix....
A = Array(adjMat)
# this is the default
q, r, p = qr(A, (v"1.7" <= VERSION ? ColumnNorm() : Val(true)))
p .= p |> reverse
elseif ordering == :ccolamd
cons = zeros(Int, length(adjMat.colptr) - 1)
cons[findall(x -> x in constraints, permuteds)] .= 1
p = _ccolamd(adjMat, cons)
# cons = zeros(SuiteSparse_long, length(adjMat.colptr) - 1)
# cons[findall(x -> x in constraints, permuteds)] .= 1
# p = Ccolamd.ccolamd(adjMat, cons)
@warn "Integration via AMD.ccolamd under development and replaces pre-Julia 1.9 direct ccall approach." maxlog=5
else
@error("getEliminationOrder -- cannot do the requested ordering $(ordering)")
end
# Return the variable ordering that we should use for the Bayes map
# reverse order checked in #475 and #499
return permuteds[p]
end
# lets create all the vertices first and then deal with the elimination variables thereafter
function addBayesNetVerts!(dfg::AbstractDFG, elimOrder::Array{Symbol, 1})
#
for pId in elimOrder
vert = DFG.getVariable(dfg, pId)
if getSolverData(vert).BayesNetVertID == nothing ||
getSolverData(vert).BayesNetVertID == :_null # Special serialization case of nothing
@debug "[AddBayesNetVerts] Assigning $pId.data.BayesNetVertID = $pId"
getSolverData(vert).BayesNetVertID = pId
else
@warn "addBayesNetVerts -- Something is wrong, variable '$pId' should not have an existing Bayes net reference to '$(getSolverData(vert).BayesNetVertID)'"
end
end
end
function addConditional!(dfg::AbstractDFG, vertId::Symbol, Si::Vector{Symbol})
#
bnv = DFG.getVariable(dfg, vertId)
bnvd = getSolverData(bnv)
bnvd.separator = Si
for s in Si
push!(bnvd.BayesNetOutVertIDs, s)
end
return nothing
end
function addChainRuleMarginal!(dfg::AbstractDFG, Si::Vector{Symbol})
#
lbls = String[]
genmarg = GenericMarginal()
Xi = map(v -> DFG.getVariable(dfg, v), Si)
# @info "adding marginal to"
# for x in Xi
# @info "x.index=",x.index
# end
addFactor!(dfg, Xi, genmarg; graphinit = false, suppressChecks = true)
return nothing
end
function rmVarFromMarg(dfg::AbstractDFG, fromvert::DFGVariable, gm::Vector{DFGFactor})
#
@debug " - Removing $(fromvert.label)"
for m in gm
@debug "Looking at $(m.label)"
for n in listNeighbors(dfg, m) #x1, x2
if n == getLabel(fromvert) # n.label ==? x1
@debug " - Breaking link $(m.label)->$(fromvert.label)..."
@debug " - Original links: $(DFG.ls(dfg, m))"
remvars = setdiff(DFG.ls(dfg, m), [fromvert.label])
@debug " - New links: $remvars"
DFG.deleteFactor!(dfg, m) # Remove it
if length(remvars) > 0
@debug "$(m.label) still has links to other variables, readding it back..."
addFactor!(
dfg,
remvars,
_getCCW(m).usrfnc!;
graphinit = false,
suppressChecks = true,
)
else
@debug "$(m.label) doesn't have any other links, not adding it back..."
end
end
end
# Added back in chain rule.
if DFG.exists(dfg, m) && length(listNeighbors(dfg, m)) <= 1
@warn "removing vertex id=$(m.label)"
DFG.deleteFactor!(dfg, m)
end
end
return nothing
end
function buildBayesNet!(dfg::AbstractDFG, elimorder::Vector{Symbol}; solvable::Int = 1)
#
# addBayesNetVerts!(dfg, elimorder)
for v in elimorder
@debug """
Eliminating $(v)
===============
"""
# which variable are we eliminating
# all factors adjacent to this variable
fi = Symbol[]
Si = Symbol[]
gm = DFGFactor[]
vert = DFG.getVariable(dfg, v)
for fctId in listNeighbors(dfg, vert; solvable = solvable)
fct = DFG.getFactor(dfg, fctId)
if (getSolverData(fct).eliminated != true)
push!(fi, fctId)
for sepNode in listNeighbors(dfg, fct; solvable = solvable)
# TODO -- validate !(sepNode.index in Si) vs. older !(sepNode in Si)
if sepNode != v && !(sepNode in Si) # Symbol comparison!
push!(Si, sepNode)
end
end
getSolverData(fct).eliminated = true
end
if typeof(_getCCW(fct)) == CommonConvWrapper{GenericMarginal}
push!(gm, fct)
end
end
if v != elimorder[end]
addConditional!(dfg, v, Si)
# not yet inserting the new prior p(Si) back into the factor graph
end
# mark variable
getSolverData(vert).eliminated = true
# TODO -- remove links from current vertex to any marginals
rmVarFromMarg(dfg, vert, gm)
#add marginal on remaining variables... ? f(xyz) = f(x | yz) f(yz)
# new function between all Si (round the outside, right the outside)
length(Si) > 0 && addChainRuleMarginal!(dfg, Si)
end
return nothing
end
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 19353 | # New factor interface, something perhaps like this
export calcFactorResidualTemporary
# NOTE, the full concrete type is recovered in reconstFactorData
getFactorOperationalMemoryType(dfg::SolverParams) = CommonConvWrapper
# difficult type piracy case needing both types NoSolverParams and CommonConvWrapper.
getFactorOperationalMemoryType(dfg::NoSolverParams) = CommonConvWrapper
getManifold(fct::DFGFactor{<:CommonConvWrapper}) = getManifold(_getCCW(fct))
function _getDimensionsPartial(ccw::CommonConvWrapper)
# @warn "_getDimensionsPartial not ready for use yet"
return ccw.partialDims
end
function _getDimensionsPartial(data::GenericFunctionNodeData)
return _getCCW(data) |> _getDimensionsPartial
end
_getDimensionsPartial(fct::DFGFactor) = _getDimensionsPartial(_getCCW(fct))
function _getDimensionsPartial(fg::AbstractDFG, lbl::Symbol)
return _getDimensionsPartial(getFactor(fg, lbl))
end
# Helper function to construct CF from a CCW
function CalcFactorNormSq(
ccwl::CommonConvWrapper;
factor = ccwl.usrfnc!,
_sampleIdx = ccwl.particleidx[],
_legacyParams = ccwl.varValsAll[],
_allowThreads = true,
cache = ccwl.dummyCache,
fullvariables = ccwl.fullvariables,
solvefor = ccwl.varidx[],
manifold = getManifold(ccwl),
slack=nothing,
)
#
# FIXME using ccwl.dummyCache is not thread-safe
return CalcFactorNormSq(
factor,
_sampleIdx,
_legacyParams,
_allowThreads,
cache,
tuple(fullvariables...),
solvefor,
manifold,
ccwl.measurement,
slack
)
end
"""
$SIGNATURES
Sample the factor stochastic model `N::Int` times and store the samples in the preallocated `ccw.measurement` container.
DevNotes
- Use in place operations where possible and remember `measurement` is a `::Tuple`.
- TODO only works on `.threadid()==1` at present, see #1094
- Also see, JuliaRobotics/RoME.jl#465
"""
sampleFactor(cf::CalcFactor{<:AbstractFactor}, N::Int = 1) = [getSample(cf) for _ = 1:N]
function Base.show(io::IO, x::CalcFactor)
println(io)
printstyled(io, " CalcFactor:\n"; color = :blue)
return println(io, " .factor: ", typeof(x.factor))
end
Base.show(io::IO, ::MIME"text/plain", x::CalcFactor) = show(io, x)
"""
$SIGNATURES
Function to calculate measurement dimension from factor sampling.
Notes
- Will not work in all situations, but good enough so far.
- # TODO standardize via domain or manifold definition...??
"""
function calcZDim(cf::CalcFactor{T}) where {T <: AbstractFactor}
#
M = getManifold(cf) # getManifold(T)
try
return manifold_dimension(M)
catch
@warn "no method getManifold(::$(string(T))), calcZDim will attempt legacy length(sample) method instead"
end
# NOTE try to make sure we get matrix back (not a vector)
smpls = sampleFactor(cf, 2)[1]
return length(smpls)
end
calcZDim(ccw::CommonConvWrapper) = calcZDim(CalcFactorNormSq(ccw))
calcZDim(cf::CalcFactor{<:GenericMarginal}) = 0
calcZDim(cf::CalcFactor{<:ManifoldPrior}) = manifold_dimension(cf.manifold)
"""
$SIGNATURES
Helper function for evaluating factor residual functions, by adding necessary `CalcFactor` wrapper.
Notes
- Factor must already be in a factor graph to work
- Will not yet properly support all multihypo nuances, more a function for testing
- Useful for debugging a factor.
Example
```julia
fg = generateGraph_Kaess()
residual = calcFactorResidual(fg, :x1x2f1, [1.0], [0.0], [0.0])
```
Related
[`calcFactorResidualTemporary`](@ref), [`_evalFactorTemporary!`](@ref), [`approxConvBelief`](@ref)
"""
function calcFactorResidual(
dfgfct::DFGFactor,
args...;
ccw::CommonConvWrapper = IIF._getCCW(dfgfct),
)
return CalcFactorNormSq(ccw)(args...)
end
function calcFactorResidual(dfg::AbstractDFG, fctsym::Symbol, args...)
return calcFactorResidual(getFactor(dfg, fctsym), args...)
end
"""
$SIGNATURES
Evaluate the residual function for a single sample.
Notes
- Binary factors only at this stage, and `multihypo` does not have to be considered in this test
- Assumes calculation is for a single particle, so `meas::Tuple{Z,other}` is only a single particles value.
Example
```julia
residual = calcFactorResidualTemporary(Pose2Pose2(...), (RoME.Pose2,RoME.Pose2), (z_i,), (x1, x2))
```
Related
[`calcFactorResidual`](@ref), [`CalcResidual`](@ref), [`_evalFactorTemporary!`](@ref), [`approxConvBelief`](@ref), [`_buildGraphByFactorAndTypes!`](@ref)
"""
function calcFactorResidualTemporary(
fct::AbstractRelative,
varTypes::Tuple,
measurement,
pts::Tuple;
tfg::AbstractDFG = initfg(),
_blockRecursion::Bool = false,
doTime::Bool = false,
)
#
# build a new temporary graph
_, _dfgfct = _buildGraphByFactorAndTypes!(
fct,
varTypes,
pts;
dfg = tfg,
_blockRecursion = _blockRecursion,
)
# get a fresh measurement if needed
_measurement = if measurement != [] #length(measurement) != 0
measurement
else
# now use the CommonConvWrapper object in `_dfgfct`
cfo = CalcFactorNormSq(_getCCW(_dfgfct))
sampleFactor(cfo, 1)[1]
end
# assume a single sample point is being run
res = if doTime
@time res = calcFactorResidual(_dfgfct, _measurement, pts...)
res
else
calcFactorResidual(_dfgfct, _measurement, pts...)
end
return res
end
## =============================================================================================
## FactorOperationalMemory helper constructors
## =============================================================================================
# the same as legacy, getManifold(ccwl.usrfnc!)
getManifold(ccwl::CommonConvWrapper) = ccwl.manifold
getManifold(cf::CalcFactor) = getManifold(cf.factor)
function _resizePointsVector!(
vecP::AbstractVector{P},
mkd::ManifoldKernelDensity,
N::Int,
) where {P}
#
pN = length(vecP)
resize!(vecP, N)
for j = pN:N
smp = AMP.sample(mkd, 1)[1]
# @show j, smp, typeof(smp), typeof(vecP[j])
vecP[j] = smp[1]
end
return vecP
end
function _checkVarValPointers(dfg::AbstractDFG, fclb::Symbol)
vars = getVariable.(dfg, getVariableOrder(dfg,fclb))
ptrsV = pointer.(getVal.(vars))
ccw = _getCCW(dfg, fclb)
ptrsC = pointer.(ccw.varValsAll[])
ptrsV, ptrsC
end
"""
$(SIGNATURES)
Prepare the particle arrays `ARR` to be used for approximate convolution.
This function ensures that ARR has te same dimensions among all the parameters.
Function returns with ARR[sfidx] pointing at newly allocated deepcopy of the
existing values in getVal(Xi[.label==solvefor]).
Notes
- 2023Q2 intended use, only create VarValsAll the first time a factor added/reconstructed
- Return values `sfidx` is the element in ARR where `Xi.label==solvefor` and
- `maxlen` is length of all (possibly resampled) `ARR` contained particles.
- `Xi` is order sensitive.
- for initialization, solveFor = Nothing.
- `P = getPointType(<:InferenceVariable)`
"""
function _createVarValsAll(
variables::AbstractVector{<:DFGVariable};
solveKey::Symbol = :default,
)
#
# Note, NamedTuple once upon a time created way too much recompile load on repeat solves, #1564
# FIXME ON FIRE issue on deserialization
valsAll = []
# when deserializing a factor, a new ccw gets created but the variables may not yet have VND entries
for var_i in variables
push!(
valsAll,
if haskey(getSolverDataDict(var_i), solveKey)
getVal(var_i; solveKey)
else
Vector{typeof(getPointDefault(getVariableType(var_i)))}()
end
)
end
varValsAll = tuple(valsAll...)
# how many points
LEN = length.(varValsAll)
maxlen = maximum(LEN)
# NOTE, forcing maxlen to N results in errors (see test/testVariousNSolveSize.jl) see #105
# maxlen = N == 0 ? maxlen : N
# NOTE resize! moves the pointer!!!!!!
# # allow each variable to have a different number of points, which is resized during compute here
# # resample variables with too few kernels (manifolds points)
# SAMP = LEN .< maxlen
# for i = 1:length(variables)
# if SAMP[i]
# Pr = getBelief(variables[i], solveKey)
# _resizePointsVector!(varValsAll[i], Pr, maxlen)
# end
# end
# TODO --rather define reusable memory for the proposal
# we are generating a proposal distribution, not direct replacement for existing memory and hence the deepcopy.
# POSSIBLE SOURCE OF HUGE MEMORY CONSUMPTION ALLOCATION
return varValsAll
end
"""
$SIGNATURES
Internal method to set which dimensions should be used as the decision variables for later numerical optimization.
"""
function _setCCWDecisionDimsConv!(
ccwl::Union{CommonConvWrapper{F}, CommonConvWrapper{Mixture{N_, F, S, T}}},
xDim::Int
) where {
N_,
F <: Union{
AbstractManifoldMinimize,
AbstractRelativeMinimize,
AbstractPrior,
},
S,
T,
}
#
# NOTE should only be done in the constructor
newval = if ccwl.partial
Int[ccwl.usrfnc!.partial...]
else
# NOTE this is the target variable dimension (not factor manifold dimension)
Int[1:xDim...] # ccwl.xDim
end
resize!(ccwl.partialDims, length(newval))
ccwl.partialDims[:] = newval
return nothing
end
function attemptGradientPrep(
varTypes,
usrfnc,
varParamsAll,
multihypo,
meas_single,
_blockRecursion,
)
# prepare new cached gradient lambdas (attempt)
try
# https://github.com/JuliaRobotics/IncrementalInference.jl/blob/db7ff84225cc848c325e57b5fb9d0d85cb6c79b8/src/DispatchPackedConversions.jl#L46
# also https://github.com/JuliaRobotics/DistributedFactorGraphs.jl/issues/590#issuecomment-891450762
# FIXME, suppressing nested gradient propagation on GenericMarginals for the time being, see #1010
if (!_blockRecursion) && usrfnc isa AbstractRelative && !(usrfnc isa GenericMarginal)
# take first value from each measurement-tuple-element
measurement_ = meas_single
# compensate if no info available during deserialization
# take the first value from each variable param
pts_ = map(x -> x[1], varParamsAll)
# FIXME, only using first meas and params values at this time...
# NOTE, must block recurions here, since FGC uses this function to calculate numerical gradients on a temp fg.
# assume for now fractional-var in multihypo have same varType
hypoidxs = _selectHypoVariables(pts_, multihypo)
gradients = FactorGradientsCached!(
usrfnc,
tuple(varTypes[hypoidxs]...),
measurement_,
tuple(pts_[hypoidxs]...);
_blockRecursion = true,
)
return gradients
end
catch e
@warn "Unable to create measurements and gradients for $usrfnc during prep of CCW, falling back on no-partial information assumption. Enable ENV[\"JULIA_DEBUG\"] = \"IncrementalInference\" for @debug printing to see the error."
# rethrow(e)
@debug(e)
end
return nothing
end
"""
$SIGNATURES
Notes
- _createCCW is likely only used when adding or reconstructing a new factor in the graph,
- else use _updateCCW
- Can be called with `length(Xi)==0`
"""
function _createCCW(
Xi::AbstractVector{<:DFGVariable},
usrfnc::T;
multihypo::Union{Nothing, <:Distributions.Categorical} = nothing,
nullhypo::Real = 0.0,
certainhypo = if multihypo !== nothing
collect(1:length(multihypo.p))[multihypo.p .== 0.0]
else
collect(1:length(Xi))
end,
inflation::Real = 0.0,
solveKey::Symbol = :default,
_blockRecursion::Bool = false,
attemptGradients::Bool = true,
userCache::CT = nothing,
) where {T <: AbstractFactor, CT}
#
if length(Xi) !== 0
nothing
else
@debug("cannot prep ccw.param list with length(Xi)==0, see DFG #590")
end
# TODO check no Anys, see #1321
# NOTE, _varValsAll is only a reference to the actual VND.val memory of each variable
_varValsAll = _createVarValsAll(Xi; solveKey)
manifold = getManifold(usrfnc)
# standard factor metadata
solvefor = length(Xi)
fullvariables = tuple(Xi...) # convert(Vector{DFGVariable}, Xi)
# create a temporary CalcFactor object for extracting the first sample
_cf = CalcFactorNormSq(
usrfnc,
1,
_varValsAll,
false,
userCache,
fullvariables,
solvefor,
manifold,
nothing,
nothing,
)
# get a measurement sample
meas_single = sampleFactor(_cf, 1)[1]
elT = typeof(meas_single)
#TODO preallocate measurement?
measurement = Vector{elT}()
#FIXME chicken and egg problem for getting measurement type, so creating twice.
_cf = CalcFactorNormSq(
usrfnc,
1,
_varValsAll,
false,
userCache,
fullvariables,
solvefor,
manifold,
measurement,
nothing,
)
# partialDims are sensitive to both which solvefor variable index and whether the factor is partial
partial = hasfield(T, :partial) # FIXME, use isPartial function instead
partialDims = if partial
Int[usrfnc.partial...]
else
Int[]
end
# FIXME, should incorporate multihypo selection
varTypes = getVariableType.(fullvariables)
# as per struct CommonConvWrapper
_gradients = if attemptGradients
attemptGradientPrep(
varTypes,
usrfnc,
_varValsAll,
multihypo,
meas_single,
_blockRecursion,
)
else
nothing
end
# variable Types
pttypes = getVariableType.(Xi) .|> getPointType
PointType = 0 < length(pttypes) ? pttypes[1] : Vector{Float64}
if !isconcretetype(PointType)
@warn "_createCCW PointType is not concrete $PointType" maxlog=50
end
# PointType[],
return CommonConvWrapper(;
usrfnc! = usrfnc,
fullvariables,
varValsAll = Ref(_varValsAll),
dummyCache = userCache,
manifold,
partialDims,
partial,
nullhypo = float(nullhypo),
inflation = float(inflation),
hyporecipe = HypoRecipeCompute(;
hypotheses = multihypo,
certainhypo,
),
measurement,
_gradients,
)
end
function updateMeasurement!(
ccwl::CommonConvWrapper,
N::Int=1;
measurement::AbstractVector = Vector{Tuple{}}(),
needFreshMeasurements::Bool=true,
_allowThreads::Bool = true
)
# FIXME do not divert Mixture for sampling
# option to disable fresh samples or user provided
if needFreshMeasurements
# TODO this is only one thread, make this a for loop for multithreaded sampling
sampleFactor!(ccwl, N; _allowThreads)
elseif 0 < length(measurement)
resize!(ccwl.measurement, length(measurement))
ccwl.measurement[:] = measurement
end
nothing
end
"""
$(SIGNATURES)
Prepare a common functor computation object `prepareCommonConvWrapper{T}` containing
the user factor functor along with additional variables and information using during
approximate convolution computations.
DevNotes
- TODO consolidate with others, see https://github.com/JuliaRobotics/IncrementalInference.jl/projects/6
"""
function _beforeSolveCCW!(
F_::Type{<:AbstractRelative},
ccwl::CommonConvWrapper{F},
variables::AbstractVector{<:DFGVariable},
sfidx::Int,
N::Integer;
measurement = Vector{Tuple{}}(),
needFreshMeasurements::Bool = true,
solveKey::Symbol = :default,
) where {F <: AbstractFactor} # F might be Mixture
#
if length(variables) !== 0
nothing
else
@debug("cannot prep ccw.param list with length(variables)==0, see DFG #590")
end
# in forward solve case, important to set which variable is being solved early in this sequence
# set the 'solvefor' variable index -- i.e. which connected variable of the factor is being computed in this convolution.
ccwl.varidx[] = sfidx
# ccwl.varidx[] = findfirst(==(solvefor), getLabel.(variables))
# splice, type stable
# make deepcopy of destination variable since multiple approxConv type computations should happen from different factors to the same variable
tvarv = tuple(
map(s->getVal(s; solveKey), variables[1:ccwl.varidx[]-1])...,
deepcopy(getVal(variables[ccwl.varidx[]]; solveKey)), # deepcopy(ccwl.varValsAll[][sfidx]),
map(s->getVal(s; solveKey), variables[ccwl.varidx[]+1:end])...,
)
ccwl.varValsAll[] = tvarv
# TODO, maxlen should parrot N (barring multi-/nullhypo issues)
# everybody use maxlen number of points in belief function estimation
maxlen = maximum((N, length.(ccwl.varValsAll[])...,))
# if solving for more or less points in destination
if N != length(ccwl.varValsAll[][ccwl.varidx[]])
varT = getVariableType(variables[ccwl.varidx[]])
# make vector right length
resize!(ccwl.varValsAll[][ccwl.varidx[]], N)
# define any new memory that might have been allocated
for i in 1:N
if !isdefined(ccwl.varValsAll[][ccwl.varidx[]], i)
ccwl.varValsAll[][ccwl.varidx[]][i] = getPointDefault(varT)
end
end
end
# FIXME, confirm what happens when this is a partial dimension factor? See #1246
# indexing over all possible hypotheses
xDim = getDimension(getVariableType(variables[ccwl.varidx[]]))
# TODO maybe refactor different type or api call?
# setup the partial or complete decision variable dimensions for this ccwl object
# NOTE perhaps deconv has changed the decision variable list, so placed here during consolidation phase
# TODO, should this not be part of `prepareCommonConvWrapper` -- only here do we look for .partial
_setCCWDecisionDimsConv!(ccwl, xDim)
# FIXME do not divert Mixture for sampling
updateMeasurement!(ccwl, maxlen; needFreshMeasurements, measurement, _allowThreads=true)
# used in ccw functor for AbstractRelativeMinimize
resize!(ccwl.res, _getZDim(ccwl))
fill!(ccwl.res, 0.0)
# calculate new gradients
# J = ccwl.gradients(measurement..., pts...)
return maxlen
end
function _beforeSolveCCW!(
F_::Type{<:AbstractPrior},
ccwl::CommonConvWrapper{F},
variables::AbstractVector{<:DFGVariable},
sfidx::Int,
N::Integer;
measurement = Vector{Tuple{}}(),
needFreshMeasurements::Bool = true,
solveKey::Symbol = :default,
) where {F <: AbstractFactor} # F might be Mixture
# FIXME, NEEDS TO BE CLEANED UP AND WORK ON MANIFOLDS PROPER
ccwl.varidx[] = sfidx
@assert ccwl.varidx[] == 1 "Solving on Prior with CCW should have sfidx=1, priors are unary factors."
# setup the partial or complete decision variable dimensions for this ccwl object
# NOTE perhaps deconv has changed the decision variable list, so placed here during consolidation phase
_setCCWDecisionDimsConv!(ccwl, getDimension(getVariableType(variables[ccwl.varidx[]])))
solveForPts = getVal(variables[ccwl.varidx[]]; solveKey)
maxlen = maximum([N; length(solveForPts); length(ccwl.varValsAll[][ccwl.varidx[]])]) # calcZDim(ccwl); length(measurement[1])
# FIXME do not divert Mixture for sampling
# update ccwl.measurement values
updateMeasurement!(ccwl, maxlen; needFreshMeasurements, measurement, _allowThreads=true)
return maxlen
end
# TODO, can likely deprecate this
function _beforeSolveCCW!(
ccwl::Union{CommonConvWrapper{F}, CommonConvWrapper{Mixture{N_, F, S, T}}},
Xi::AbstractVector{<:DFGVariable},
# destVarVals::AbstractVector,
sfidx::Int,
N::Integer;
kw...,
) where {N_, F <: AbstractRelative, S, T}
#
return _beforeSolveCCW!(F, ccwl, Xi, sfidx, N; kw...)
end
function _beforeSolveCCW!(
ccwl::Union{CommonConvWrapper{F}, CommonConvWrapper{Mixture{N_, F, S, T}}},
Xi::AbstractVector{<:DFGVariable},
# destVarVals::AbstractVector,
sfidx::Int,
N::Integer;
kw...,
) where {N_, F <: AbstractPrior, S, T}
#
return _beforeSolveCCW!(F, ccwl, Xi, sfidx, N; kw...)
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 8251 |
export generateGraph_Kaess,
generateGraph_TestSymbolic, generateGraph_CaesarRing1D, generateGraph_LineStep
export calcHelix_T
export generateGraph_EuclidDistance
"""
$SIGNATURES
Canonical example from literature, Kaess, et al.: ISAM2, IJRR, 2011.
Notes
- Paper variable ordering: p = [:l1;:l2;:x1;:x2;:x3]
"""
function generateGraph_Kaess(; graphinit::Bool = false)
fg = initfg()
addVariable!(fg, :x1, ContinuousScalar)
addFactor!(fg, [:x1;], Prior(Normal()); graphinit = graphinit)
addVariable!(fg, :x2, ContinuousScalar)
addFactor!(fg, [:x1, :x2], LinearRelative(Normal()); graphinit = graphinit)
addVariable!(fg, :x3, ContinuousScalar)
addFactor!(fg, [:x2, :x3], LinearRelative(Normal()); graphinit = graphinit)
addVariable!(fg, :l1, ContinuousScalar)
addFactor!(fg, [:x1, :l1], LinearRelative(Normal()); graphinit = graphinit)
addFactor!(fg, [:x2, :l1], LinearRelative(Normal()); graphinit = graphinit)
addVariable!(fg, :l2, ContinuousScalar)
addFactor!(fg, [:x3, :l2], LinearRelative(Normal()); graphinit = graphinit)
return fg
end
"""
$SIGNATURES
Canonical example introduced by Borglab.
Notes
- Known variable ordering: p = [:x1; :l3; :l1; :x5; :x2; :l2; :x4; :x3]
"""
function generateGraph_TestSymbolic(; graphinit::Bool = false)
fg = initfg()
addVariable!(fg, :x1, ContinuousScalar)
addVariable!(fg, :x2, ContinuousScalar)
addVariable!(fg, :x3, ContinuousScalar)
addVariable!(fg, :x4, ContinuousScalar)
addVariable!(fg, :x5, ContinuousScalar)
addVariable!(fg, :l1, ContinuousScalar)
addVariable!(fg, :l2, ContinuousScalar)
addVariable!(fg, :l3, ContinuousScalar)
addFactor!(fg, [:x1; :l1], LinearRelative(Normal()); graphinit = graphinit)
addFactor!(fg, [:x1; :x2], LinearRelative(Normal()); graphinit = graphinit)
addFactor!(fg, [:x2; :l1], LinearRelative(Normal()); graphinit = graphinit)
addFactor!(fg, [:x2; :x3], LinearRelative(Normal()); graphinit = graphinit)
addFactor!(fg, [:x3; :x4], LinearRelative(Normal()); graphinit = graphinit)
addFactor!(fg, [:x4; :l2], LinearRelative(Normal()); graphinit = graphinit)
addFactor!(fg, [:x4; :x5], LinearRelative(Normal()); graphinit = graphinit)
addFactor!(fg, [:l2; :x5], LinearRelative(Normal()); graphinit = graphinit)
addFactor!(fg, [:x4; :l3], LinearRelative(Normal()); graphinit = graphinit)
addFactor!(fg, [:x5; :l3], LinearRelative(Normal()); graphinit = graphinit)
return fg
end
"""
$SIGNATURES
Generate generalized helix parameterized by a curve along "t-axis" (i.e. z-axis, assuming z(t)=t).
Notes
- Returns vectors for (`t`, `x,y`, and `yaw` angle).
- Offset to t_start at origin and facing direction along +y-axis.
- Use callbacks `xr_t(t)` and `yr_t(t)` to skew the helix with any desired curve, examples include
- `xr_t = (t) -> (1/3)t` to generate helix pattern along x-axis,
- or make spiral along t using xr_t, yr_t to generate a rose pattern on xy,
- use `spine_t(t)=xr_t(t) + im*yr_t(t)` as shortcut for more complicated patterns,
- note `xr_t` and `yr_t` are scaled by a factor `radius`, unscale the input by division if desired.
- Use the function twice for simulated and noisy trajectories (i.e. easier Gauss-Markov processes)
- Gradient (i.e. angle) calculations are on the order of 1e-8.
Related
[`RoME.generateGraph_Helix2D!`](@ref)
"""
function calcHelix_T(
t_start::Real = 0,
t_stop::Real = 1,
pointsperturn = 20;
direction::Int = -1,
T::AbstractVector{<:Real} = (t_start:(t_stop * pointsperturn)) ./ pointsperturn,
radius::Real = 0.5,
spine_t = (t) -> 0 + im * 0,
xr_t::Function = (t) -> real(spine_t(t)),
yr_t::Function = (t) -> imag(spine_t(t)),
h::Real = 1e-8,
)
#
# calc the position
f(t, x = xr_t(t), y = yr_t(t)) = radius * (cis(pi + direction * 2pi * t) + 1 + x + im * y)
vals = f.(T)
# calc the gradient
g(t) = (f(t + h) - f(t)) / h
grad = g.(T)
return T, hcat(real.(vals), imag.(vals)), angle.(grad)
end
"""
$SIGNATURES
Canonical example introduced originally as Caesar Hex Example.
Notes
- Paper variable ordering: p = [:x0;:x2;:x4;:x6;:x1;:l1;:x5;:x3;]
"""
function generateGraph_CaesarRing1D(; graphinit::Bool = false)
fg = initfg()
addVariable!(fg, :x0, ContinuousScalar)
addVariable!(fg, :x1, ContinuousScalar)
addVariable!(fg, :x2, ContinuousScalar)
addVariable!(fg, :x3, ContinuousScalar)
addVariable!(fg, :x4, ContinuousScalar)
addVariable!(fg, :x5, ContinuousScalar)
addVariable!(fg, :x6, ContinuousScalar)
addFactor!(fg, [:x0], Prior(Normal()); graphinit = graphinit)
addFactor!(fg, [:x0; :x1], LinearRelative(Normal()); graphinit = graphinit)
addFactor!(fg, [:x1; :x2], LinearRelative(Normal()); graphinit = graphinit)
addFactor!(fg, [:x2; :x3], LinearRelative(Normal()); graphinit = graphinit)
addFactor!(fg, [:x3; :x4], LinearRelative(Normal()); graphinit = graphinit)
addFactor!(fg, [:x4; :x5], LinearRelative(Normal()); graphinit = graphinit)
addFactor!(fg, [:x5; :x6], LinearRelative(Normal()); graphinit = graphinit)
addVariable!(fg, :l1, ContinuousScalar)
addFactor!(fg, [:x0; :l1], LinearRelative(Normal()); graphinit = graphinit)
addFactor!(fg, [:x6; :l1], LinearRelative(Normal()); graphinit = graphinit)
return fg
end
"""
$SIGNATURES
Continuous, linear scalar and multivariate test graph generation. Follows a line
with the pose id equal to the ground truth.
"""
function generateGraph_LineStep(
lineLength::Int;
poseEvery::Int = 2,
landmarkEvery::Int = 4,
posePriorsAt = Int[0],
landmarkPriorsAt = Int[],
sightDistance::Int = 4,
vardims = 1,
noisy = false,
graphinit = false,
σ_pose_prior = 0.1,
σ_lm_prior = 0.1,
σ_pose_pose = 0.1,
σ_pose_lm = 0.1,
solverParams = SolverParams(),
)
# solverParams=SolverParams(algorithms=[:default, :parametric]))
vtype = (vardims == 1) ? ContinuousScalar() : ContinuousEuclid(vardims)
fg = LocalDFG{SolverParams}(; solverParams = solverParams)
function xNoise(i::Int, σ::Float64 = 1.0)
if (vardims == 1)
return noisy ? Normal(σ * randn() + i, σ) : Normal(0.0 * randn() + i, σ)
else
return if noisy
MvNormal(σ * randn(vardims) .+ i, σ)
else
MvNormal(0.0 * randn(vardims) .+ i, σ)
end
end
end
x = Int[]
lm = Int[]
for i = 0:lineLength
if mod(i, poseEvery) == 0
push!(x, i)
addVariable!(fg, Symbol("x", i), vtype) #, autoinit = graphinit)
(i in posePriorsAt) && addFactor!(
fg,
[Symbol("x", i)],
Prior(xNoise(i, σ_pose_prior));
graphinit = graphinit,
)
# "odo" type
(i > 0) && addFactor!(
fg,
[Symbol("x", i - poseEvery); Symbol("x", i)],
LinearRelative(xNoise(poseEvery, σ_pose_pose));
graphinit = graphinit,
)
end
if landmarkEvery != 0 && mod(i, landmarkEvery) == 0
push!(lm, i)
addVariable!(fg, Symbol("lm", i), vtype) #, autoinit = graphinit)
(i in landmarkPriorsAt) && addFactor!(
fg,
[Symbol("lm", i)],
Prior(xNoise(i, σ_lm_prior));
graphinit = graphinit,
)
end
end
#add landmarks sightings
for xi in x, lmi in lm
dist = lmi - xi
if abs(dist) < sightDistance
# @info "adding landmark lm$lmi to x$xi with dist $dist"
addFactor!(
fg,
[Symbol("x", xi); Symbol("lm", lmi)],
LinearRelative(xNoise(dist, σ_pose_lm));
graphinit = graphinit,
)
end
end
return fg
end
"""
$SIGNATURES
Generate a EuclidDistance test graph where 1 landmark position is unknown.
"""
function generateGraph_EuclidDistance(
points::Vector{Vector{Float64}} = [[100.0, 0], [0.0, 100]];
dist = 100.0,
σ_prior = 1.0,
σ_dist = 1.0,
N = 100,
graphinit = false,
)
#
dims = length(points[1])
fg = initfg()
fg.solverParams.N = N
fg.solverParams.graphinit = graphinit
for (i, p) in enumerate(points)
xlbl = Symbol("x", i)
addVariable!(fg, xlbl, ContinuousEuclid{dims})
addFactor!(fg, [xlbl], Prior(MvNormal(p, σ_prior * ones(dims))))
end
addVariable!(fg, :l1, ContinuousEuclid{dims})
for i = 1:length(points)
xlbl = Symbol("x", i)
addFactor!(fg, [xlbl; :l1], EuclidDistance(Normal(dist, σ_dist)))
end
return fg
end
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 6373 |
function _MsgJointLikelihood(;
relatives::MsgRelativeType = MsgRelativeType(),
priors::MsgPriorType = MsgPriorType(),
)
return _MsgJointLikelihood(relatives, priors)
end
function Base.show(io::IO, x::_MsgJointLikelihood)
println(io)
printstyled(io, " _MsgJointLikelihood:\n"; color = :blue)
print(io, " .relatives: ")
for tp in x.relatives
print(io, tp.variables, "::", typeof(tp.likelihood).name)
print(io, "; ")
end
println(io)
print(io, " .priors: ")
for k in keys(x.priors)
print(io, k, ", ")
end
return println(io)
end
Base.show(io::IO, ::MIME"text/plain", x::_MsgJointLikelihood) = show(io, x)
function LikelihoodMessage(;
sender::NamedTuple{(:id, :step), Tuple{Int, Int}} = (; id = 0, step = 0),
status::CliqStatus = NULL,
beliefDict::Dict = Dict{Symbol, TreeBelief}(),
variableOrder::Vector{Symbol} = Symbol[],
cliqueLikelihood = nothing,
msgType::T = NonparametricMessage(),
hasPriors::Bool = true,
childSolvDims::Dict{Int, Float64} = Dict{Int, Float64}(),
jointmsg::_MsgJointLikelihood = _MsgJointLikelihood(),
) where {T <: MessageType}
return LikelihoodMessage{T}(
sender,
status,
beliefDict,
variableOrder,
cliqueLikelihood,
msgType,
hasPriors,
childSolvDims,
jointmsg,
)
end
#
function Base.show(io::IO, msg::LikelihoodMessage)
t = typeof(msg)
fields = fieldnames(t)
nf = nfields(msg)
println(io, "LikelihoodMessage:")
for f in fields
printstyled(io, f, ": "; color = :blue)
show(io, getproperty(msg, f))
println(io)
end
end
Base.show(io::IO, ::MIME"text/plain", msg::LikelihoodMessage) = show(io, msg)
function compare(
l1::LikelihoodMessage,
l2::LikelihoodMessage;
skip::Vector{Symbol} = Symbol[],
)
#
TP = true
TP = TP && l1.status == l2.status
TP = TP && l1.variableOrder == l2.variableOrder
TP = TP && l1.msgType == l2.msgType
TP = TP && l1.cliqueLikelihood |> typeof == l2.cliqueLikelihood |> typeof
for (k, v) in l1.belief
TP = TP && haskey(l2.belief, k)
TP = TP && compare(v, l2.belief[k])
end
return TP
end
# overload
==(l1::LikelihoodMessage, l2::LikelihoodMessage) = compare(l1, l2)
function BayesTreeNodeData(;
status::CliqStatus = NULL,
frontalIDs = Symbol[],
separatorIDs = Symbol[],
inmsgIDs = Symbol[],
potIDs = Symbol[],
potentials = Symbol[],
partialpotential = Bool[],
dwnPotentials = Symbol[],
dwnPartialPotential = Bool[],
cliqAssocMat = Array{Bool}(undef, 0, 0),
cliqMsgMat = Array{Bool}(undef, 0, 0),
directvarIDs = Int[],
directFrtlMsgIDs = Int[],
msgskipIDs = Int[],
itervarIDs = Int[],
directPriorMsgIDs = Int[],
debug = nothing,
debugDwn = nothing,
allmarginalized = false,
initialized = :NULL,
upsolved = false,
downsolved = false,
isCliqReused = false,
messages = MessageBuffer(),
)
btnd = BayesTreeNodeData(
status,
frontalIDs,
separatorIDs,
inmsgIDs,
potIDs,
potentials,
partialpotential,
dwnPotentials,
dwnPartialPotential,
cliqAssocMat,
cliqMsgMat,
directvarIDs,
directFrtlMsgIDs,
msgskipIDs,
itervarIDs,
directPriorMsgIDs,
debug,
debugDwn,
allmarginalized,
initialized,
upsolved,
downsolved,
isCliqReused,
messages,
)
#
return btnd
end
#
function compare(c1::BayesTreeNodeData, c2::BayesTreeNodeData; skip::Vector{Symbol} = [])
#
TP = true
TP = TP && c1.frontalIDs == c2.frontalIDs
TP = TP && c1.separatorIDs == c2.separatorIDs
TP = TP && c1.inmsgIDs == c2.inmsgIDs
TP = TP && c1.potIDs == c2.potIDs
TP = TP && c1.potentials == c2.potentials
TP = TP && c1.partialpotential == c2.partialpotential
TP = TP && c1.dwnPotentials == c2.dwnPotentials
TP = TP && c1.dwnPartialPotential == c2.dwnPartialPotential
TP = TP && c1.cliqAssocMat == c2.cliqAssocMat
TP = TP && c1.cliqMsgMat == c2.cliqMsgMat
TP = TP && c1.directvarIDs == c2.directvarIDs
TP = TP && c1.directFrtlMsgIDs == c2.directFrtlMsgIDs
TP = TP && c1.msgskipIDs == c2.msgskipIDs
TP = TP && c1.itervarIDs == c2.itervarIDs
TP = TP && c1.directPriorMsgIDs == c2.directPriorMsgIDs
TP = TP && c1.debug == c2.debug
TP = TP && c1.debugDwn == c2.debugDwn
TP = TP && c1.allmarginalized == c2.allmarginalized
TP = TP && c1.initialized == c2.initialized
TP = TP && c1.upsolved == c2.upsolved
TP = TP && c1.downsolved == c2.downsolved
TP = TP && c1.isCliqReused == c2.isCliqReused
return TP
end
function convert(::Type{PackedBayesTreeNodeData}, btnd::BayesTreeNodeData)
return PackedBayesTreeNodeData(
btnd.frontalIDs,
btnd.separatorIDs,
btnd.inmsgIDs,
btnd.potIDs,
btnd.potentials,
btnd.partialpotential,
btnd.dwnPotentials,
btnd.dwnPartialPotential,
btnd.cliqAssocMat,
btnd.cliqMsgMat,
btnd.directvarIDs,
btnd.directFrtlMsgIDs,
btnd.msgskipIDs,
btnd.itervarIDs,
btnd.directPriorMsgIDs,
)
end
function convert(::Type{BayesTreeNodeData}, pbtnd::PackedBayesTreeNodeData)
btnd = BayesTreeNodeData()
btnd.frontalIDs = pbtnd.frontalIDs
btnd.separatorIDs = pbtnd.separatorIDs
btnd.inmsgIDs = pbtnd.inmsgIDs
btnd.potIDs = pbtnd.potIDs
btnd.potentials = pbtnd.potentials
btnd.partialpotential = pbtnd.partialpotential
btnd.dwnPotentials = pbtnd.dwnPotentials
btnd.dwnPartialPotential = pbtnd.dwnPartialPotential
btnd.cliqAssocMat = pbtnd.cliqAssocMat
btnd.cliqMsgMat = pbtnd.cliqMsgMat
btnd.directvarIDs = pbtnd.directvarIDs
btnd.directFrtlMsgIDs = pbtnd.directFrtlMsgIDs
btnd.msgskipIDs = pbtnd.msgskipIDs
btnd.itervarIDs = pbtnd.itervarIDs
btnd.directPriorMsgIDs = pbtnd.directPriorMsgIDs
return btnd
end
##==============================================================================
## Cliques
## TreeClique
##==============================================================================
Base.getindex(cId::CliqueId) = cId.value
Base.show(io::IO, ::MIME"text/plain", x::CliqueId) = print(io, x.value)
Base.show(io::IO, x::CliqueId) = print(io, x.value)
getId(c::TreeClique) = c.id
TreeClique(i::Int) = TreeClique(CliqueId(i), BayesTreeNodeData(), Dict{String, Any}())
TreeClique(id::CliqueId) = TreeClique(id, BayesTreeNodeData(), Dict{String, Any}())
DFG.getLabel(cliq::TreeClique) = cliq.attributes["label"]
function setLabel!(cliq::TreeClique, lbl::String)
cliq.attributes["label"] = lbl
return lbl
end
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 1848 | # TODO: KEEP
# function compareAll(Al::T1, Bl::T2; show::Bool=true, skip::Vector{Symbol}=Symbol[])::Bool where {T1 <: Union{SingleThreaded, MultiThreaded}, T2 <: Union{SingleThreaded, MultiThreaded}}
# return T1 == T2
# end
import DistributedFactorGraphs: compare, compareAllSpecial
# These are now moved to DistributedFactorGraphs, with the exceptions of
# the functions with IIF-specific parameters.
# To extend these, import the relevant DFG compareX function and overload it.
function Base.isapprox(
p1::Union{<:BallTreeDensity, <:ManifoldKernelDensity},
p2::Union{<:BallTreeDensity, <:ManifoldKernelDensity};
atol::Real = 1e-6,
)
#
return mmd(p1, p2) < atol
end
## FIXME, FIGURE OUT HOW TO DEPRECATE BELOW ==============================================
function compareAllSpecial(
A::T1,
B::T2;
skip = Symbol[],
show::Bool = true,
) where {T1 <: CommonConvWrapper, T2 <: CommonConvWrapper}
#
if T1 != T2
@warn "CCW types T1 and T2 not equal=>" T1 T2
# return false
end
# FIXME still issues with compare, skipping :vartypes https://github.com/JuliaRobotics/DistributedFactorGraphs.jl/issues/434
return compareAll(A, B; skip = union(skip, [:vartypes]), show = show)
end
function compare(
p1::Union{<:BallTreeDensity, <:ManifoldKernelDensity},
p2::Union{<:BallTreeDensity, <:ManifoldKernelDensity},
)
#
return compareAll(p1.bt, p2.bt; skip = [:calcStatsHandle; :data]) &&
compareAll(p1, p2; skip = [:calcStatsHandle; :bt])
end
function compare(c1::TreeClique, c2::TreeClique)
#
TP = true
TP = TP && c1.id == c2.id
# data
@warn "skipping ::TreeClique compare of data"
# TP = TP && compare(c1.data, c2.data)
# attributes
@warn "only comparing keys of TreeClique attributes"
TP = TP && collect(keys(c1.attributes)) == collect(keys(c2.attributes))
return TP
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 9637 | # series of deconvolution tools
## Initial version of selecting the dimension of a factor -- will be consolidated with existing infrastructure later
"""
$SIGNATURES
Inverse solve of predicted noise value and returns tuple of (newly calculated-predicted, and known measurements) values.
Notes
- Only works for first value in measurement::Tuple at this stage.
- "measured" is used as starting point for the "calculated-predicted" values solve.
- Not all factor evaluation cases are support yet.
- NOTE only works on `.threadid()==1` at present, see #1094
- This function is still part of the initial implementation and needs a lot of generalization improvements.
DevNotes
- TODO Test for various cases with multiple variables.
- TODO make multithread-safe, and able, see #1094
- TODO Test for cases with `nullhypo`
- FIXME FactorMetadata object for all use-cases, not just empty object.
- TODO resolve #1096 (multihypo)
- TODO Test cases for `multihypo`.
- TODO figure out if there is a way to consolidate with evalFactor and approxConv?
- basically how to do deconv for just one sample with unique values (wrt TAF)
- TODO N should not be hardcoded to 100
Related
[`approxDeconv`](@ref), [`_solveCCWNumeric!`](@ref)
"""
function approxDeconv(
fcto::DFGFactor,
ccw::CommonConvWrapper = _getCCW(fcto);
N::Int = 100,
measurement::AbstractVector = sampleFactor(ccw, N),
retries::Int = 3,
)
#
# FIXME needs xDim for all variables at once? xDim = 0 likely to break?
# but what if this is a partial factor -- is that important for general cases in deconv?
_setCCWDecisionDimsConv!(ccw, 0) # ccwl.xDim used to hold the last forward solve getDimension(getVariableType(Xi[sfidx]))
# FIXME This does not incorporate multihypo??
varsyms = getVariableOrder(fcto)
# vars = getPoints.(getBelief.(dfg, varsyms, solveKey) )
fcttype = getFactorType(fcto)
# get measurement dimension
zDim = _getZDim(fcto)
# TODO consider using ccw.cpt[thrid].res # likely needs resizing
res_ = zeros(zDim)
# TODO, consolidate fmd with getSample/sampleFactor and _buildLambda
fctSmpls = deepcopy(measurement)
# TODO assuming vector on only first container in measurement::Tuple
makeTarget = (smpidx) -> measurement[smpidx] # TODO does not support copy-primitive types like Float64, only Ref()
# makeTarget = (i) -> view(measurement[1][i],:)
# makeTarget = (i) -> view(measurement[1], :, i)
# NOTE
# build a lambda that incorporates the multihypo selections
# set these first
# ccw.cpt[].activehypo / .p / .params # params should already be set from construction
hyporecipe = _prepareHypoRecipe!(nothing, N, 0, length(varsyms))
# Juila 1.7 allows destructure assign `(;a,b) = namedtype`
# certainidx, allelements, activehypo, mhidx =
# only doing the current active hypo
@assert hyporecipe.activehypo[2][1] == 1 "deconv was expecting hypothesis nr == (1, 1:d)"
islen1 = zDim == 1
for idx = 1:N
# towards each particle in their own thread (not 100% ready yet, factors should be separate memory)
target_smpl = makeTarget(idx)
# TODO must first resolve hypothesis selection before unrolling them -- deferred #1096
resize!(ccw.hyporecipe.activehypo, length(hyporecipe.activehypo[2][2]))
ccw.hyporecipe.activehypo[:] = hyporecipe.activehypo[2][2]
onehypo! = _buildCalcFactorLambdaSample(ccw, idx, measurement)
#
# lambda with which to find best measurement values
function hypoObj(tgt)
# copyto!(target_smpl, tgt)
measurement[idx] = tgt
return onehypo!()
end
# hypoObj = (tgt) -> (target_smpl .= tgt; onehypo!())
# find solution via SubArray view pointing to original memory location
if fcttype isa AbstractManifoldMinimize
error("Fix dispatch on AbstractManifoldMinimize")
else
ts = _solveLambdaNumeric(fcttype, hypoObj, res_, measurement[idx], islen1)
measurement[idx] = ts
end
end
# return (deconv-prediction-result, independent-measurement)
# r_meas = map(m->m[1], measurement)
# r_fctSmpls = map(m->m[1], fctSmpls)
return measurement, fctSmpls
end
# TBD deprecate use of xDim
function approxDeconv(
fcto::DFGFactor{<:CommonConvWrapper{<:AbstractManifoldMinimize}},
ccw::CommonConvWrapper = _getCCW(fcto);
N::Int = 100,
measurement::AbstractVector = sampleFactor(ccw, N),
retries=nothing,
)
if !isnothing(retries)
Base.depwarn(
"approxDeconv kwarg retries is not used",
:approxDeconv,
)
end
# but what if this is a partial factor -- is that important for general cases in deconv?
_setCCWDecisionDimsConv!(ccw, 0)
varsyms = getVariableOrder(fcto)
# TODO assuming vector on only first container in measurement::Tuple # TBD How should user dispatch fancy tuple measurements on deconv.
# NOTE
# build a lambda that incorporates the multihypo selections
# deconv has to solve for the best matching for particles
# FIXME This does not incorporate multihypo, Apply hyporecipe to full variable order list. But remember hyporecipe assignment must be found (NPhard)
hyporecipe = _prepareHypoRecipe!(nothing, N, 0, length(varsyms))
# only doing the current active hypo
@assert hyporecipe.activehypo[2][1] == 1 "deconv was expecting hypothesis nr == (1, 1:d)"
# get measurement dimension
zDim = _getZDim(fcto)
islen1 = zDim == 1
#make a copy of the original measurement before mutating it
sampled_meas = deepcopy(measurement)
fcttype = getFactorType(fcto)
for idx = 1:N
# TODO must first resolve hypothesis selection before unrolling them -- deferred #1096
resize!(ccw.hyporecipe.activehypo, length(hyporecipe.activehypo[2][2]))
ccw.hyporecipe.activehypo[:] = hyporecipe.activehypo[2][2]
#TODO why is this resize in the loop?
# Create a CalcFactor functor of the correct hypo.
_hypoCalcFactor = _buildHypoCalcFactor(ccw, idx)
ts = _solveLambdaNumericMeas(fcttype, _hypoCalcFactor, measurement[idx], islen1)
measurement[idx] = ts
end
return measurement, sampled_meas
end
"""
$SIGNATURES
Generalized deconvolution to find the predicted measurement values of the factor `fctsym` in `dfg`.
Inverse solve of predicted noise value and returns tuple of (newly predicted, and known "measured" noise) values.
Notes
- Opposite operation contained in `approxConvBelief`.
- For more notes see [`solveFactorMeasurements`](@ref).
Related
[`approxConvBelief`](@ref), `deconvSolveKey`
"""
function approxDeconv(
dfg::AbstractDFG,
fctsym::Symbol,
solveKey::Symbol = :default;
retries::Int = 3,
)
#
# which factor
fct = getFactor(dfg, fctsym)
pts = getPoints(getBelief(dfg, getVariableOrder(fct)[1], solveKey))
N = length(pts)
pts = approxDeconv(fct; N = N, retries = retries)
return pts
end
function approxDeconv(
dfg::AbstractDFG,
fctlbl::Symbol,
factorType::AbstractRelative,
solveKey::Symbol = :default;
tfg::AbstractDFG = initfg(),
retries::Int = 3,
)
#
# build a local temporary graph copy containing the same values but user requested factor type.
fct = getFactor(dfg, fctlbl)
fctT = getFactorType(fct)
lbls = getVariableOrder(fct)
for lb in lbls
exists(tfg, lb) ? nothing : addVariable!(tfg, lb, getVariableType(dfg, lb))
initVariable!(tfg, lb, getBelief(dfg, lb, solveKey))
end
# add factor type requested by user
f_ = addFactor!(tfg, lbls, factorType; graphinit = false)
# peform the deconvolution operation on the temporary graph with user desired factor instead.
return approxDeconv(tfg, getLabel(f_); retries = retries)
end
# try default constructor
function approxDeconv(
dfg::AbstractDFG,
fctlbl::Symbol,
factorType::Type{<:AbstractRelative},
w...;
kw...,
)
return approxDeconv(dfg, fctlbl, factorType(), w...; kw...)
end
#
function approxDeconvBelief(dfg::AbstractDFG, lb::Symbol, w...; kw...)
return manikde!(
getManifold(getFactorType(dfg, lb)),
approxDeconv(dfg, lb, w...; kw...)[1],
)
end
"""
$SIGNATURES
Calculate the relative difference between two variables and across solveKeys.
Example
```julia
fg = generateGraph_Kaess()
solveTree!(fg, storeOld=true)
# calculate the relative motion induced by the solver from init to solve.
pts = deconvSolveKey(fg, :x1, :default, :x1, :graphinit)
```
Notes
- Can pass user `tfg::AbstractDFG` for better in-place operations.
DevNotes
- TODO use dfg, rather than building new tfg internally.
Related
[`approxDeconv`](@ref), [`mmd`](@ref)
"""
function deconvSolveKey(
dfg::AbstractDFG,
refSym::Symbol,
refKey::Symbol,
tstSym::Symbol,
tstKey::Symbol;
tfg = initfg(),
)
#
# create a new temporary factor graph for calculations
# add the first "reference" variable
Xref = getBelief(dfg, refSym, refKey)
refSym_ = Symbol(refSym, "_ref")
refVarType = getVariableType(dfg, refSym)
if !exists(tfg, refSym_)
addVariable!(tfg, refSym_, refVarType)
end
initVariable!(tfg, refSym_, Xref)
# add the second "test" variable
tstVarType = getVariableType(dfg, tstSym)
Xtst = getBelief(dfg, tstSym, tstKey)
tstSym_ = Symbol(tstSym, "_tst")
if !exists(tfg, tstSym_)
addVariable!(tfg, tstSym_, tstVarType)
end
initVariable!(tfg, tstSym_, Xtst)
# add the new dummy factor with default manifold for computations
fctType = selectFactorType(refVarType, tstVarType)
nf = addFactor!(tfg, [refSym_; tstSym_], fctType())
# TODO connect from dfg all other data that might form part of FactorMetadata in tfg
pts = approxDeconv(tfg, nf.label)
# assuming tfg was passed in by the user
deleteFactor!(tfg, nf.label)
# return result
return pts, fctType
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 1007 |
## TBD, will be redone together with fixes for #1010
"""
$SIGNATURES
Return a default factor type between two variables of types T1 and T2.
Notes
- Most likely used with deconvolution between target variables
"""
function selectFactorType(
Modl::Module,
T1::Type{<:InferenceVariable},
T2::Type{<:InferenceVariable},
)
return getfield(Modl, Symbol(T1, T2))
end
function selectFactorType(T1::Type{<:InferenceVariable}, T2::Type{<:InferenceVariable})
return selectFactorType(typeof(T1()).name.module, T1, T2)
end
selectFactorType(T1::Type{<:Position1}, T2::Type{<:Position1}) = LinearRelative{1}
function selectFactorType(T1::Type{<:Position{N}}, T2::Type{<:Position{N}}) where {N}
return LinearRelative{N}
end
function selectFactorType(T1::InferenceVariable, T2::InferenceVariable)
return selectFactorType(typeof(T1), typeof(T2))
end
function selectFactorType(dfg::AbstractDFG, s1::Symbol, s2::Symbol)
return selectFactorType(getVariableType(dfg, s1), getVariableType(dfg, s2))
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 24257 |
"""
$(SIGNATURES)
Perform the nonlinear numerical operations to approximate the convolution with a particular user defined likelihood function (conditional), which as been prepared in the `frl` object. This function uses root finding to enforce a non-linear function constraint.
Notes:
- remember this is a deepcopy of original sfidx, since we are generating a proposal distribution and not directly replacing the existing variable belief estimate
Future work:
- once Threads.@threads have been optmized JuliaLang/julia#19967, also see area4 branch
- improve handling of n and particleidx, especially considering future multithreading support
"""
function approxConvOnElements!(
destVarVals::AbstractArray,
ccwl::Union{CommonConvWrapper{F}, CommonConvWrapper{Mixture{N_, F, S, T}}},
elements::Union{Vector{Int}, UnitRange{Int}},
# ::Type{<:SingleThreaded},
_slack = nothing,
) where {N_, F <: AbstractRelative, S, T}
#
for n in elements
ccwl.particleidx[] = n
_solveCCWNumeric!(ccwl, _slack)
end
return nothing
end
"""
$SIGNATURES
Control the amount of entropy to add to null-hypothesis in multihypo case.
Notes:
- Basically calculating the covariance (with a bunch of assumptions TODO, fix)
- FIXME, Currently only supports Euclidean domains.
- FIXME, allow particle subpopulations instead of just all of a variable
"""
function calcVariableDistanceExpectedFractional(
ccwl::CommonConvWrapper,
sfidx::Integer,
certainidx::AbstractVector{<:Integer};
kappa::Real = 3.0,
# readonlyVarVals = ccwl.varValsAll[][sfidx]
)
#
@assert sfidx == ccwl.varidx[] "ccwl.varidx[] is expected to be the same as sfidx"
varTypes = getVariableType.(ccwl.fullvariables)
# @info "WHAT" isdefined(ccwl.varValsAll[][sfidx], 101)
if sfidx in certainidx
# on change of destination variable count N, only use the defined values before a solve
msst_ = calcStdBasicSpread(varTypes[sfidx], ccwl.varValsAll[][sfidx])
return kappa * msst_
end
# @assert !(sfidx in certainidx) "null hypo distance does not work for sfidx in certainidx"
# get mean of all fractional variables
# ccwl.params::Vector{Vector{P}}
uncertainidx = setdiff(1:length(ccwl.varValsAll[]), certainidx)
dists = zeros(length(uncertainidx) + length(certainidx))
dims = manifold_dimension(getManifold(varTypes[sfidx]))
uncMeans = zeros(dims, length(uncertainidx))
for (count, i) in enumerate(uncertainidx)
u = mean(getManifold(varTypes[i]), ccwl.varValsAll[][i])
uncMeans[:, count] .= getCoordinates(varTypes[i], u)
end
count = 0
refMean = getCoordinates(
varTypes[sfidx],
mean(getManifold(varTypes[sfidx]), ccwl.varValsAll[][sfidx]),
)
# calc for uncertain and certain
for i in uncertainidx
count += 1
dists[count] = norm(refMean - uncMeans[:, count])
end
# also check distance to certainidx for general scale reference (workaround heuristic)
for cidx in certainidx
count += 1
cerMeanPnt = mean(getManifold(varTypes[cidx]), ccwl.varValsAll[][cidx], GeodesicInterpolation())
cerMean = getCoordinates(varTypes[cidx], cerMeanPnt)
dists[count] = norm(refMean[1:dims] - cerMean[1:dims])
end
push!(dists, 1e-2)
return kappa * maximum(dists)
end
# Add entrypy on a point in `points` on manifold M, only on dimIdx if in p
function addEntropyOnManifold!(
M::ManifoldsBase.AbstractManifold,
points::Union{<:AbstractVector{<:Real}, SubArray},
dimIdx::AbstractVector,
spreadDist::Real,
p::Union{Colon, <:AbstractVector} = :,
)
#
if length(points) == 0
return nothing
end
# preallocate
T = number_eltype(points[1])
Xc = zeros(T, manifold_dimension(M))
#allocate to change SMatrix to MMatrix
X = allocate(get_vector(M, points[1], Xc, DefaultOrthogonalBasis()))
for idx in 1:length(points)
# build tangent coordinate random
for dim in dimIdx
if (p === :) || dim in p
Xc[dim] = spreadDist * (rand() - 0.5)
end
end
# update tangent vector X
get_vector!(M, X, points[idx], Xc, DefaultOrthogonalBasis())
#update point
# exp!(M, points[idx], points[idx], X)
# retract!(M, points[idx], points[idx], X)
# FOR TESTING MEMORY POINTER PROBLEM, FIXME PUT THIS BACK ON!!!!!!! LIKE MORPHEUS< REMEBER!
points[idx] = retract(M, points[idx], X)
end
#
return nothing
end
"""
$(SIGNATURES)
Common function to compute across a single user defined multi-hypothesis ambiguity per factor.
This function dispatches both `AbstractRelativeRoots` and `AbstractRelativeMinimize` factors.
Computation result is stored in `;destinationVarVals` and NOT in `ccwl.varValsAll[sfidx]` -- need a duplicate memory when doing approxConv
DevNotes
- Future combo with `_calcIPCRelative`
"""
function computeAcrossHypothesis!(
ccwl::Union{<:CommonConvWrapper{F}, <:CommonConvWrapper{Mixture{N_, F, S, T}}},
hyporecipe::HypoRecipe, #NamedTuple,
sfidx::Int,
maxlen::Int,
mani::ManifoldsBase.AbstractManifold; # maniAddOps::Tuple;
# destinationVarVals = ccwl.varValsAll[][sfidx], # deepcopy
spreadNH::Real = 5.0,
inflateCycles::Int = 3,
skipSolve::Bool = false,
testshuffle::Bool = false,
_slack = nothing,
) where {N_, F <: AbstractRelative, S, T}
#
count = 0
# transition to new hyporecipe approach
allelements = hyporecipe.allelements
activehypo = hyporecipe.activehypo
certainidx = hyporecipe.certainidx
@assert ccwl.varidx[] == sfidx "duplicate registers for solve for index should be the same in ccw.varidx"
@assert ccwl.hyporecipe.certainhypo == hyporecipe.certainidx "expected hyporecipe.certainidx to be the same as cached in ccw"
for (hypoidx, vars) in activehypo
count += 1
# now do hypothesis specific
if sfidx in certainidx && hypoidx != 0 || hypoidx in certainidx || hypoidx == sfidx
# hypo case hypoidx, sfidx = $hypoidx, $sfidx
# for i = 1:Threads.nthreads()
resize!(ccwl.hyporecipe.activehypo, length(vars))
ccwl.hyporecipe.activehypo[:] = vars
# end
# ccwl.varValsAll[][ccwl.varidx[]] should be an alternate/duplicate memory from getVal(variable; solveKey)
addEntr = view(ccwl.varValsAll[][ccwl.varidx[]], allelements[count]) # destinationVarVals
# do proposal inflation step, see #1051
# consider duplicate convolution approximations for inflation off-zero
# ultimately set by dfg.params.inflateCycles
for iflc = 1:inflateCycles
# dynamic estimate with user requested speadNH of how much noise to inject (inflation or nullhypo)
spreadDist = calcVariableDistanceExpectedFractional(
ccwl,
sfidx,
certainidx;
kappa = ccwl.inflation,
# readonlyVarVals = ccwl.varValsAll[][ccwl.varidx[]],
)
addEntropyOnManifold!(
mani,
addEntr,
1:getDimension(mani),
spreadDist,
ccwl.partialDims,
)
# no calculate new proposal belief on kernels `allelements[count]`
_checkErrorCCWNumerics(ccwl, testshuffle)
if skipSolve
@warn("skipping numerical solve operation")
else
approxConvOnElements!(ccwl.varValsAll[][ccwl.varidx[]], ccwl, allelements[count], _slack)
end
end
elseif hypoidx != sfidx && hypoidx != 0
# snap together case
# multihypo, take other value case
# sfidx=2, hypoidx=3: 2 should take a value from 3
# sfidx=3, hypoidx=2: 3 should take a value from 2
# DEBUG sfidx=2, hypoidx=1 -- bad when do something like multihypo=[0.5;0.5] -- issue 424
# ccwl.varValsAll[][ccwl.varidx[]][:,allelements[count]] = view(ccwl.varValsAll[hypoidx],:,allelements[count])
# NOTE make alternative case only operate as null hypo
addEntr = view(ccwl.varValsAll[][ccwl.varidx[]], allelements[count])
# dynamic estimate with user requested speadNH of how much noise to inject (inflation or nullhypo)
spreadDist =
calcVariableDistanceExpectedFractional(ccwl, sfidx, certainidx; kappa = spreadNH) #,readonlyVarVals = ccwl.varValsAll[][ccwl.varidx[]])
addEntropyOnManifold!(mani, addEntr, 1:getDimension(mani), spreadDist)
elseif hypoidx == 0
# basically do nothing since the factor is not active for these allelements[count]
# inject more entropy in nullhypo case
# add noise (entropy) to spread out search in convolution proposals
addEntr = view(ccwl.varValsAll[][ccwl.varidx[]], allelements[count])
# dynamic estimate with user requested speadNH of how much noise to inject (inflation or nullhypo)
spreadDist =
calcVariableDistanceExpectedFractional(ccwl, sfidx, certainidx; kappa = spreadNH) #, readonlyVarVals = ccwl.varValsAll[][ccwl.varidx[]])
# # make spread (1σ) equal to mean distance of other fractionals
addEntropyOnManifold!(mani, addEntr, 1:getDimension(mani), spreadDist)
else
error("computeAcrossHypothesis -- not dealing with multi-hypothesis case correctly")
end
end
return nothing
end
# elseif hypoidx == sfidx
# # multihypo, do conv case, hypoidx == sfidx
# ah = sort(union([sfidx;], certainidx))
# @assert norm(ah - vars) < 1e-10
# for i in 1:Threads.nthreads() ccwl.cpt[i].activehypo = ah; end
# approxConvOnElements!(ccwl, allelements[count])
# TODO what about nullhypo in recipe (when .mhidx[smpid]==0)?
# TODO figure out how best to combine with computeAcrossHypothesis!
function _calcIPCRelative(
Xi::AbstractVector{<:DFGVariable},
ccwl::CommonConvWrapper,
hyporecipe::HypoRecipe, #NamedTuple,
sfidx::Integer,
smpid::Integer = findfirst(x -> x != 0, hyporecipe.mhidx),
)
#
@assert hyporecipe.activehypo[1][1] === 0 "expected 0-hypo case in hyporecipe.activehypo, to get variable hypo mask for relative partial propagation calculations."
@assert hyporecipe.mhidx[smpid] !== 0 "_calcIPCRelative does not yet handle nullhypo gradients, try alternative hypo (smpid=$smpid), available hypos are hyporecipe.mhidx=$(hyporecipe.mhidx)"
# select only the active variables in case of multihypo
# @show smpid
# @show hyporecipe.mhidx
# @show hyporecipe.activehypo
# NOTE +1 bc first element in .activehypo is nullhypo case, e.g. `(0,[1;])`
_selhypo = hyporecipe.mhidx[smpid] + 1
activehypo = hyporecipe.activehypo[_selhypo]
activeids = activehypo[2]
# solvefor index without the fractional variables
active_mask = (x -> x in activeids).(1:length(Xi))
sfidx_active = sum(active_mask[1:sfidx])
# build a view to the decision variable memory
activeParams = view(ccwl.varValsAll[], activeids)
activeVars = Xi[active_mask]
# assume gradients are just done for the first sample values
# error("Possible issue, a factor has one manifold and attached variables have different manifolds. Make sure the plumbing respects that.")
@show typeof(ccwl.usrfnc!)
@show sfidx
# @show getLabel.(Xi)
@show getLabel.(activeVars)
@show getVariableType.(activeVars)
# @show _getindextuple(ccwl.measurement, smpid)
meas_pts =
tuple((_getindextuple(ccwl.measurement, smpid))..., (getindex.(activeParams, smpid))...)
# @show meas_pts
#
ipc = if ccwl._gradients === nothing
ones(getDimension(activeVars[sfidx_active]))
else
ipc_ = Pair[]
# get infoPerCoord from all variables
for (vid, var) in enumerate(activeVars)
# set all other variables infoPerCoord values
getLabel(var) != getLabel(activeVars[sfidx_active]) ? nothing : continue
push!(ipc_, vid => ones(getDimension(var)))
end
# update the gradients at current point estimates
# meas_pts =
ccwl._gradients(meas_pts...)
# do perturbation check
# @show ipc_
allipc = calcPerturbationFromVariable(ccwl._gradients, ipc_)
allipc[sfidx_active]
end
@show ipc
# FIXME REMOVE, overwrite with defauls during dev
# fill!(ipc, 1.0)
return ipc
end
"""
$(SIGNATURES)
Multiple dispatch wrapper for `<:AbstractRelative` types, to prepare and execute the general approximate convolution with user defined factor residual functions. This method also supports multihypothesis operations as one mechanism to introduce new modality into the proposal beliefs.
Planned changes will fold null hypothesis in as a standard feature and no longer appear as a separate `InferenceVariable`.
"""
function evalPotentialSpecific(
variables::AbstractVector{<:DFGVariable},
ccwl::CommonConvWrapper{T},
solvefor::Symbol,
T_::Type{<:AbstractRelative}, # NOTE Relative
measurement::AbstractVector = Tuple[]; # TODO make this a concrete type
needFreshMeasurements::Bool = true, # superceeds over measurement
solveKey::Symbol = :default,
sfidx::Integer = findfirst(==(solvefor), getLabel.(variables)),
# destinationVarVals = deepcopy(ccwl.varValsAll[][sfidx]),
N::Int = 0 < length(measurement) ? length(measurement) : maximum(Npts.(getBelief.(variables, solveKey))),
spreadNH::Real = 3.0,
inflateCycles::Int = 3,
nullSurplus::Real = 0,
dbg::Bool = false,
skipSolve::Bool = false,
_slack = nothing,
) where {T <: AbstractFactor}
#
# Prep computation variables
# add user desired measurement values if 0 < length
# 2023Q2, ccwl.varValsAll always points at the variable.VND.val memory locations
# remember when doing approxConv to make a deepcopy of the destination memory first.
maxlen = _beforeSolveCCW!(ccwl, variables, sfidx, N; solveKey, needFreshMeasurements, measurement)
# Check which variables have been initialized
isinit = map(x -> isInitialized(x), variables)
# assemble how hypotheses should be computed
# nullSurplus see #1517
runnullhypo = maximum((ccwl.nullhypo, nullSurplus))
hyporecipe =
_prepareHypoRecipe!(ccwl.hyporecipe.hypotheses, maxlen, sfidx, length(variables), isinit, runnullhypo)
# get manifold add operations
# TODO, make better use of dispatch, see JuliaRobotics/RoME.jl#244
# addOps, d1, d2, d3 = buildHybridManifoldCallbacks(manis)
mani = getManifold(variables[sfidx])
# @assert destinationVarVals !== ccwl.varValsAll[][ccwl.varidx[]] "destination of evalPotential for AbstractRelative not be ccwl.varValsAll[sfidx]"
# NOTE disabled getVal part of this assert because solveKey may not yet exist in different use cases, new graph or loadDFG etc.
# @assert destinationVarVals !== getVal(variables[ccwl.varidx[]]) "destination of evalPotential for AbstractRelative not be variable.VND.val"
# perform the numeric solutions on the indicated elements
# FIXME consider repeat solve as workaround for inflation off-zero
# NOTE alternate use of ccwl.certainidx to hyporecipe, certainidx = ccwl.hyporecipe.certainhypo
computeAcrossHypothesis!(
ccwl,
hyporecipe,
sfidx,
maxlen,
mani;
spreadNH,
inflateCycles,
skipSolve,
_slack,
)
#
# FIXME do info per coord
# ipc_ = _calcIPCRelative(variables, ccwl, hyporecipe, sfidx)
ipc = ones(getDimension(variables[sfidx]))
if isPartial(ccwl)
# FIXME this is a workaround until better _calcIPCRelative can be used
# TODO consolidate to common usage e.g. getPartialDims(ccwl)
msk_ = setdiff(1:length(ipc), ccwl.usrfnc!.partial)
for _i in msk_
ipc[_i] = 0.0
end
end
# return the found points, and info per coord
return ccwl.varValsAll[][sfidx], ipc
end
# TODO `measurement` might not be properly wired up yet
# TODO consider 1051 here to inflate proposals as general behaviour
function evalPotentialSpecific(
variables::AbstractVector{<:DFGVariable},
ccwl::CommonConvWrapper{T},
solvefor::Symbol,
T_::Type{<:AbstractPrior}, # NOTE Prior
measurement::AbstractVector = Tuple[];
needFreshMeasurements::Bool = true,
solveKey::Symbol = :default,
sfidx::Integer=findfirst(==(solvefor), getLabel.(variables)),
# destinationVarVals = deepcopy(ccwl.varValsAll[][sfidx]),
N::Int = 0 < length(measurement) ? length(measurement) : maximum(Npts.(getBelief.(variables, solveKey))),
spreadNH::Real = 3.0,
inflateCycles::Int = 3,
nullSurplus::Real = 0,
dbg::Bool = false,
skipSolve::Bool = false,
_slack = nothing,
) where {T <: AbstractFactor}
#
# Prep computation variables
maxlen = _beforeSolveCCW!(ccwl, variables, sfidx, N; solveKey, needFreshMeasurements, measurement)
# # FIXME, NEEDS TO BE CLEANED UP AND WORK ON MANIFOLDS PROPER
fnc = ccwl.usrfnc!
solveForPts = getVal(variables[sfidx]; solveKey)
# Check which variables have been initialized
# TODO not sure why forcing to Bool vs BitVector
isinit::Vector{Bool} = variables .|> isInitialized .|> Bool
# nullSurplus see #1517
runnullhypo = maximum((ccwl.nullhypo, nullSurplus))
hyporecipe =
_prepareHypoRecipe!(ccwl.hyporecipe.hypotheses, maxlen, sfidx, length(variables), isinit, runnullhypo)
# get solvefor manifolds, FIXME ON FIRE, upgrade to new Manifolds.jl
mani = getManifold(variables[sfidx])
# two cases on how to use the measurement
nhmask = hyporecipe.mhidx .== 0
ahmask = hyporecipe.mhidx .== 1
# generate nullhypo samples
# inject lots of entropy in nullhypo case
# make spread (1σ) equal to mean distance of other fractionals
# FIXME better standardize in-place operations (considering solveKey)
addEntr = if length(solveForPts) == maxlen
deepcopy(solveForPts)
else
ret = typeof(solveForPts)(undef, maxlen)
for i = 1:length(solveForPts)
ret[i] = solveForPts[i]
end
for i = (length(solveForPts) + 1):maxlen
ret[i] = getPointIdentity(getVariableType(variables[sfidx]))
end
ret
end
# TODO consider improving isPartial(ccwl<:AbstractPrior) to also check dimensions since we know pretty well what varDim is.
# TODO workaround until partial manifold approach is standardized, see #1492
Msrc = getManifold(fnc)
asPartial = isPartial(ccwl) || manifold_dimension(Msrc) < manifold_dimension(mani)
# view on elements marked for nullhypo
addEntrNH = view(addEntr, nhmask)
spreadDist = spreadNH * calcStdBasicSpread(getVariableType(variables[sfidx]), addEntr)
# partials are treated differently
ipc = if !asPartial # isPartial(ccwl) #ccwl.partial
# TODO for now require measurements to be coordinates too
# @show typeof(ccwl.measurement[1])
for m in (1:length(addEntr))[ahmask]
# FIXME, selection for all measurement::Tuple elements
# @info "check broadcast" ccwl.usrfnc! addEntr[m] ccwl.measurement[1][m]
setPointsMani!(addEntr, ccwl.measurement, m)
# addEntr[m] = ccwl.measurement[m][1]
end
# ongoing part of RoME.jl #244
addEntropyOnManifold!(mani, addEntrNH, 1:getDimension(mani), spreadDist)
# do info per coords
ones(getDimension(variables[sfidx]))
else
# FIXME but how to add partial factor info only on affected dimensions fro general manifold points?
# pvec
partialCoords = if hasfield(typeof(fnc), :partial)
ccwl.partialDims # [fnc.partial...]
else
collect(1:manifold_dimension(Msrc))
end
if !hasmethod(getManifold, (typeof(fnc),))
@debug "No method getManifold for $(typeof(fnc)), using getManifoldPartial"
end
# active hypo that receives the regular measurement information
for m in (1:length(addEntr))[ahmask]
# addEntr is no longer in coordinates, these are now general manifold points!!
# for (i,dimnum) in enumerate(fnc.partial)
# FIXME, need ability to replace partial points
# partialCoords = ccwl.partialDims
#FIXME check if getManifold is defined otherwise fall back to getManifoldPartial, JT: I would like to standardize to getManifold
if hasmethod(getManifold, (typeof(fnc),))
# Msrc = getManifold(fnc)
# # TODO workaround until partial manifold approach is standardized, see #1492
# asPartial = isPartial(fnc) || manifold_dimension(Msrc) < manifold_dimension(mani)
setPointPartial!(
mani,
addEntr,
Msrc,
ccwl.measurement, # FIXME, measurements are tangents=>relative or points=>priors
partialCoords,
m,
m,
asPartial,
)
else
# this case should be less prevalent following PR #1662
@warn "could not find definition for getManifold(::$(typeof(fnc)))" maxlog=10
Msrc, = getManifoldPartial(mani, partialCoords)
setPointPartial!(
mani,
addEntr[m],
Msrc,
ccwl.measurement[m],
partialCoords
)
end
# addEntr[m][dimnum] = ccwl.measurement[1][m][i]
# end
end
# null hypo mask that needs to be perturbed by "noise"
addEntrNHp = view(addEntr, nhmask)
# ongoing part of RoME.jl #244
addEntropyOnManifold!(mani, addEntrNHp, 1:getDimension(mani), spreadDist, partialCoords) # pvec
# do info per coords
ipc_ = zeros(getDimension(variables[sfidx]))
ipc_[partialCoords] .= 1.0 # pvec
ipc_
end
# check partial is easy as this is a prior
return addEntr, ipc
end
function evalPotentialSpecific(
Xi::AbstractVector{<:DFGVariable},
ccwl::CommonConvWrapper{Mixture{N_, F, S, T}},
solvefor::Symbol,
measurement::AbstractVector = Tuple[];
kw...,
) where {N_, F <: AbstractFactor, S, T}
#
return evalPotentialSpecific(Xi, ccwl, solvefor, F, measurement; kw...)
end
function evalPotentialSpecific(
Xi::AbstractVector{<:DFGVariable},
ccwl::CommonConvWrapper{F},
solvefor::Symbol,
measurement::AbstractVector = Tuple[];
kw...,
) where {F <: AbstractFactor}
#
return evalPotentialSpecific(Xi, ccwl, solvefor, F, measurement; kw...)
end
"""
$(SIGNATURES)
Single entry point for evaluating factors from factor graph, using multiple dispatch to locate the correct `evalPotentialSpecific` function.
"""
function evalFactor(
dfg::AbstractDFG,
fct::DFGFactor,
solvefor::Symbol,
measurement::AbstractVector = Tuple[]; # FIXME ensure type stable in all cases
needFreshMeasurements::Bool = true,
solveKey::Symbol = :default,
variables = getVariable.(dfg, getVariableOrder(fct)), # FIXME use tuple instead for type stability
N::Int = length(measurement),
inflateCycles::Int = getSolverParams(dfg).inflateCycles,
nullSurplus::Real = 0,
dbg::Bool = false,
skipSolve::Bool = false,
_slack = nothing,
)
#
return evalPotentialSpecific(
variables,
_getCCW(fct),
solvefor,
measurement;
needFreshMeasurements,
solveKey,
N,
dbg,
spreadNH = getSolverParams(dfg).spreadNH,
inflateCycles,
nullSurplus,
skipSolve,
_slack,
)
#
end
"""
$SIGNATURES
Perform factor evaluation to resolve the "solve for" variable of a factor.
This temporary function can be run without passing a factor graph object, but will internally allocate a new temporary new one.
Alternatively, the factor graph used for calculations can be passed in via the keyword `tfg`, hence the function name bang.
Notes
- `TypeParams_args::Vector{Tuple{InferenceVariable, P}}
- idea is please find best e.g. `b`, given `f(z,a,b,c)` either by roots or minimize (depends on definition of `f`)
- `sfidx::Int` is the solve for index, assuming `getVariableOrder(fct)`.
Example
```julia
B = _evalFactorTemporary!(EuclidDistance, (ContinuousScalar, ContinuousScalar), 2, ([10;],), ([0.],[9.5]) )
# should return `B = 10`
```
See also: [`calcFactorResidual`](@ref), [`testFactorResidualBinary`](@ref), [`solveFactorParametric`](@ref), [`approxConvBelief`](@ref)
"""
function _evalFactorTemporary!(
fct::AbstractFactor,
varTypes::Tuple,
sfidx::Int, # solve for index, assuming variable order for fct
measurement::AbstractVector,
pts::Tuple;
tfg::AbstractDFG = initfg(),
solveKey::Symbol = :default,
newFactor::Bool = true,
_slack = nothing,
buildgraphkw...,
)
#
# build up a temporary graph in dfg
_, _dfgfct = IIF._buildGraphByFactorAndTypes!(
fct,
varTypes,
pts;
dfg = tfg,
solveKey,
newFactor,
buildgraphkw...,
)
# get label convention for which variable to solve for
solvefor = getVariableOrder(_dfgfct)[sfidx]
# do the factor evaluation
sfPts, _ = evalFactor(
tfg,
_dfgfct,
solvefor,
measurement;
needFreshMeasurements = false,
solveKey,
inflateCycles = 1,
_slack,
)
return sfPts
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 10727 | # Second iteration of explicitly exloring the marginalization of discrete variables onto the continuous state space.
# Although this code is still excessive and messy, this is a significant feature expansion; and future versions will
# generalize the marginalization process to allow for implicit hypothesis exploration. The messy explicit version of code is
# intended to help develop the required generalistic unit tests. The unit tests will then be validated, frozen and used to
# confirm that future "algebraic" marginalization (implicit) versions operate correctly.
# FYI, the complexity of general multihypothesis convolutions can be deceiving, however, note that the coding
# complexity is contained for each indivual factor at a time. Global Bayes tree inference then creates the symphony
# of non-Gaussian (multimodal) posterior beliefs from the entire factor graph.
#
# 2018/6/01 @dehann
"""
$SIGNATURES
Return common vectors `(allmhp, certainidx,uncertnidx)` used for dealing with multihypo cases.
"""
function getHypothesesVectors(
mhp::Vector{Float64},
)::Tuple{Vector{Int}, Vector{Int}, Vector{Int}}
allmhp = 1:length(mhp)
certainidx = allmhp[mhp .== 0.0] # TODO remove after gwp removed
uncertnidx = allmhp[0.0 .< mhp]
return (allmhp, certainidx, uncertnidx)
end
"""
$(SIGNATURES)
This function explicitly encodes the marginalization of a discrete categorical selection variable
for ambiguous data association situations. This function populates `allelements` with particle
indices associated with particular multihypothesis selection while `activehypo` simultaneously
contains the hypothesis index and factor graph variables associated with that hypothesis selection. The return value `certainidx` are the hypotheses that are not in question.
This function does not consider whether a unroll hypo lambda can actually exist, it just generates
a recipe of which options should be considered. Whoever solves the recipe is responsible for
building the right lambda or doing something else.
Input:
- `maxlen` is the max number of samples across all variables
Output:
- `certainidx`: non fractional variables
- `allelements`: list of which particles go with which hypothesis selection
- `activehypo`: list of which hypotheses go with which certain + fractional variables
- `mhidx`: multihypothesis selection per particle idx
- `sfidx`: Solve for idx
Example:
```julia
idx=(1,2,3)
multihypo=[1.0;0.5;0.5] # X,La,Lb
# specfic example
# this is important -- e.g. `pts = approxConv(fg, :XLaLbf1, :La)`
sfidx=2
certainidx=
1-element Array{Int64,1}:
1 # X
allelements=
3-element Array{Any,1}:
Int64[]
[1, 2, 11, ...]
[3, 4, 5, ...]
activehypo=
3-element Array{Any,1}:
(0, [2]) # nullhypo -- forced afterward, might be deprecated if better solution is found
(1, [1, 2]) # unroll hypo lambda for X,La
(2, [1, 2]) # unroll hypo lambda for X,La
(3, [2, 3]) # would be (but cannot) unroll hypo La,Lb # almost our nullhypo # wont build a lambda
# now select which lambdas to build based on converted `activehypo` rand( Categorical( [0; 0.5;0.5] ) )
mhidx=
100-element Array{Int64,1}:
2, 2, 3, 3, 3,...
```
Another example on what is `certainidx`
```julia
multihypo=[1;1;0.5;0.5]
# results in: `certainidx = [1;2]`
```
Notes:
- Issue 427, race condition during initialization since n-ary variables not resolvable without other init.
DevNotes
- FIXME convert into some kind of `HypoRecipe` struct, to improve readibility and code maintainability
- TODO add nullhypo cases to returning result
- FIXME make type-stable `activehypo` and others
- Improved implementations should implicitly induce the same behaviour through summation (integration) when marginalizing any number of discrete variables.
```
# `allelements` example BearingRange [:x1, 0.5:l1a, 0.5:l1b]
# sfidx = (1=:x1,2=:l1a,3=:l1b)
if solvefor :x1, then allelem = [mhidx.==:l1a; mhidx.==l1b]
if solvefor :l1a, then allelem = [mhidx.==:l1a] and ARR[solvefor][:,mhidx.==:l1b]=ARR[:l1b][:,mhidx.==:l1b]
if solvefor :l1b, then allelem = [mhidx.==:l1b] and ARR[solvefor][:,mhidx.==:l1a]=ARR[:l1a][:,mhidx.==:l1a]
if solvefor 1, then allelem = [mhidx.==2; mhidx.==3]
if solvefor 2, then allelem = [mhidx.==2] and ARR[solvefor][:,mhidx.==3]=ARR[3][:,mhidx.==3]
if solvefor 3, then allelem = [mhidx.==3] and ARR[solvefor][:,mhidx.==2]=ARR[2][:,mhidx.==2]
# `activehypo` in example mh=[1.0;0.5;0.5]
sfidx=1, mhidx=2: ah = [1;2]
sfidx=1, mhidx=3: ah = [1;3]
sfidx=2, mhidx=2: ah = [1;2]
sfidx=2, mhidx=3: 2 should take a value from 3 or nullhypo
sfidx=3, mhidx=2: 3 should take a value from 2 or nullhypo
sfidx=3, mhidx=3: ah = [1;3]
# `activehypo` in example mh=[1.0;0.33;0.33;0.34]
sfidx=1, mhidx=2: ah = [1;2]
sfidx=1, mhidx=3: ah = [1;3]
sfidx=1, mhidx=4: ah = [1;4]
sfidx=2, mhidx=2: ah = [1;2]
sfidx=2, mhidx=3: 2 should take a value from 3 or nullhypo
sfidx=2, mhidx=4: 2 should take a value from 4 or nullhypo
sfidx=3, mhidx=2: 3 should take a value from 2 or nullhypo
sfidx=3, mhidx=3: ah = [1;3]
sfidx=3, mhidx=4: 3 should take a value from 4 or nullhypo
sfidx=4, mhidx=2: 4 should take a value from 2 or nullhypo
sfidx=4, mhidx=3: 4 should take a value from 3 or nullhypo
sfidx=4, mhidx=4: ah = [1;4]
```
Also keeping the default case documented:
```
# the default case where mh==nothing
# equivalent to mh=[1;1;1] # assuming 3 variables
sfidx=1, allelements=allidx[nhidx.==0], activehypo=(0,[1;])
sfidx=2, allelements=allidx[nhidx.==0], activehypo=(0,[2;])
sfidx=3, allelements=allidx[nhidx.==0], activehypo=(0,[3;])
```
TODO still need to compensate multihypo case for user nullhypo addition.
"""
function _prepareHypoRecipe!(
mh::Categorical,
maxlen::Int,
sfidx::Int,
lenXi::Int,
isinit::Vector{Bool} = ones(Bool, lenXi),
nullhypo::Real = 0,
)
#
allelements = Vector{Vector{Int}}()
activehypo = Vector{Tuple{Int, Vector{Int}}}()
mhidx = Vector{Int}()
allidx = 1:maxlen
allmhp, certainidx, uncertnidx = getHypothesesVectors(mh.p)
# select only hypotheses that can be used (ie variables have been initialized)
@assert !(sum(isinit) == 0 && sfidx == certainidx) # cannot init from nothing for any hypothesis
mhh = if sum(isinit) < lenXi - 1
@assert isLeastOneHypoAvailable(sfidx, certainidx, uncertnidx, isinit)
@info "not all hypotheses initialized, but at least one available -- see #427"
mhp = deepcopy(mh.p)
suppressmask = isinit .== false
suppressmask[sfidx] = false
mhp[suppressmask] .= 0.0
mhp ./= sum(mhp)
Categorical(mhp)
else
mh
end
# FIXME consolidate with addEntropyOnManifolds approach in `computeAcrossHypothesis!`
# prepend for the mhidx=0, bad-init-null-hypothesis case (if solving a fractional variable)
mhh = if sfidx in uncertnidx
nhw = (length(uncertnidx) + 1)
nmhw = [1 / nhw; length(uncertnidx) / nhw * mhh.p]
nmhw ./= sum(nmhw) # renormalize (should not be necessary)
Categorical(nmhw)
else
mhh
end
# prep mm-nultihypothesis selection values
mhidx = rand(mhh, maxlen) # selection of which hypothesis is correct
pidx = 0
if sfidx in uncertnidx
# shift down to get mhidx=0 case
mhidx .-= 1
pidx = -1
end
sfincer = sfidx in certainidx
for pval in mhh.p
pidx += 1
pidxincer = pidx in certainidx # ??
# permutation vectors for later computation
iterarr = allidx[mhidx .== pidx]
iterah = Int[]
if !pidxincer && sfincer && pidx != 0 # 1e-15 <= pval && mh.p[sfidx] < 1e-10 # proxy for sfidx in certainidx
# solve for one of the certain variables containing uncertain hypotheses in others
iterah = sort(union(certainidx, pidx)) # sort([sfidx;pidx])
# DONE -- supports n-ary factors in multihypo mode
elseif (pidxincer && !sfincer || sfidx == pidx) && pidx != 0 # pval < 1e-15 && mh.p[sfidx] >= 1e-10
# solve for one of the uncertain variables
iterah = sort(union(certainidx, sfidx)) # sort([sfidx;pidx])
# EXPERIMENTAL -- support more than binary factors in multihypo mode
elseif pidxincer && sfincer && pidx != 0 # pval < 1e-15 && mh.p[sfidx] < 1e-10
iterarr = Int[]
iterah = Int[] # may be moot anyway, but double check first
elseif !pidxincer && !sfincer && pidx != 0 # pval >= 1e-15 && mh.p[sfidx] >= 1e-10
iterah = uncertnidx #allmhp[mh.p .> 1e-15]
elseif pidx == 0
# nullhypo only take values from self sfidx, might add entropy later (for bad init case)
iterah = [sfidx;]
else
error("Unknown hypothesis case, got sfidx=$(sfidx) with mh.p=$(mh.p), pidx=$(pidx)")
end
push!(allelements, iterarr)
push!(activehypo, (pidx, iterah))
end
# # retroactively add nullhypo case (the 0 case)
# if sfidx in uncertnidx
# #
# end
return HypoRecipe(; certainidx, allelements, activehypo, mhidx)
# hyporecipe::NamedTuple
# return (; certainidx, allelements, activehypo, mhidx)
end
function _prepareHypoRecipe!(
mh::Nothing,
maxlen::Int,
sfidx::Int,
lenXi::Int,
isinit::Vector{Bool} = ones(Bool, lenXi),
nullhypo::Real = 0,
)
#
# FIXME, consolidate with the general multihypo case
# the default case where mh==nothing
# equivalent to mh=[1;1;1] # assuming 3 variables
# sfidx=1, allelements=allidx[nhidx.==0], activehypo=(0,[1;])
#
allelements = Vector{Vector{Int}}()
activehypo = Vector{Tuple{Int,Vector{Int}}}()
# TODO add cases where nullhypo occurs, see DFG #536, and IIF #237
nmhw = [nullhypo; (1 - nullhypo)]
nhh = Categorical(nmhw)
# prep mmultihypothesis selection values
# mhidx = Int[]
# NOTE, must do something special to get around Categorical([0;10]) error
# selection of which hypothesis is correct
mhidx = nullhypo == 0 ? ones(Int, maxlen) : (rand(nhh, maxlen) .- 1)
allidx = 1:maxlen
certainidx = 1:lenXi
# zero is nullhypo case, 1 is first sfidx variable
nullarr = allidx[mhidx .== 0]
# mhidx == 1 case is regular -- this will be all elements if nullhypo=0.0
reguarr = allidx[mhidx .!= 0]
pidxAll = [0; certainidx]
for pidx in pidxAll
if pidx == 0
# elements that occur during nullhypo active
push!(allelements, nullarr)
push!(activehypo, (pidx, [sfidx;]))
elseif pidx == 1
# elements that occur during regular hypothesis true
push!(allelements, reguarr)
push!(activehypo, (pidx, certainidx))
else
# all remaining collections are empty (part of multihypo support)
push!(allelements, Int[])
push!(activehypo, (pidx, Int[]))
end
end
return HypoRecipe(; certainidx, allelements, activehypo, mhidx)
# return hyporecipe::NamedTuple
# return (; certainidx, allelements, activehypo, mhidx)
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 15614 | # Factor Graph OS type utilities
# IIF methods should direclty detect extended types from user import
# of convert in their namespace
# FIXME, upgrade to AMP instead
KDE.getPoints(dfg::AbstractDFG, lbl::Symbol) = getBelief(dfg, lbl) |> getPoints
clampStringLength(st::AbstractString, len::Int = 5) = st[1:minimum([len; length(st)])]
function clampBufferString(
st::AbstractString,
max::Int,
len::Int = minimum([max, length(st)]),
)
@assert 0 <= max "max must be greater or equal to zero"
st = clampStringLength(st, len)
for i = len:(max - 1)
st *= " "
end
return st
end
"""
$SIGNATURES
Extract contiguous string of numbers at end of a label`::Symbol` -- e.g. `:x45_4` --> "4".
Returns `(string, suffix_substring)`
Related
[`incrSuffix`](@ref)
"""
function _getSuffix(lbl::Symbol; pattern::Regex = r"\d+")
slbl = string(lbl)
phrase_ = slbl |> reverse |> x -> match(pattern, x).match
return slbl, reverse(phrase_)
end
"""
$SIGNATURES
Utility for incrementing or decrementing suffix numbers in DFG variable labels, e.g.
```julia
incrSuffix(:x45_4)
# returns :x45_5
incrSuffix(:x45, +3)
# returns :x48
incrSuffix(:x45_4, -1)
# returns :x45_3
```
Notes
- Change `pattern::Regex=r"\\d+"` for alternative behaviour.
"""
function incrSuffix(lbl::Symbol, val::Integer = +1; pattern::Regex = r"\d+")
slbl, phrase = _getSuffix(lbl; pattern = pattern)
nint = phrase |> x -> (parse(Int, x) + val)
prefix = slbl[1:(end - length(phrase))]
return Symbol(prefix, nint)
end
"""
$SIGNATURES
Get the CommonConvWrapper for this factor.
"""
_getCCW(gfnd::GenericFunctionNodeData) = gfnd.fnc
_getCCW(fct::DFGFactor) = getSolverData(fct) |> _getCCW
_getCCW(dfg::AbstractDFG, lbl::Symbol) = getFactor(dfg, lbl) |> _getCCW
DFG.getFactorType(ccw::CommonConvWrapper) = ccw.usrfnc!
_getZDim(ccw::CommonConvWrapper) = getManifold(ccw) |> manifold_dimension # ccw.zDim
# TODO is MsgPrior piggy backing zdim on inferdim???
_getZDim(ccw::CommonConvWrapper{<:MsgPrior}) = length(ccw.usrfnc!.infoPerCoord) # ccw.usrfnc!.inferdim
_getZDim(fcd::GenericFunctionNodeData) = _getCCW(fcd) |> _getZDim
_getZDim(fct::DFGFactor) = _getCCW(fct) |> _getZDim
DFG.getDimension(fct::GenericFunctionNodeData) = _getZDim(fct)
DFG.getDimension(fct::DFGFactor) = _getZDim(fct)
"""
$SIGNATURES
Return the manifold on which this ManifoldKernelDensity is defined.
DevNotes
- TODO currently ignores the .partial aspect (captured in parameter `L`)
"""
function getManifold(
mkd::ManifoldKernelDensity{M, B, Nothing},
asPartial::Bool = false,
) where {M, B}
return mkd.manifold
end
function getManifold(
mkd::ManifoldKernelDensity{M, B, L},
asPartial::Bool = false,
) where {M, B, L <: AbstractVector}
return asPartial ? mkd.manifold : getManifoldPartial(mkd.manifold, mkd._partial)
end
"""
$TYPEDSIGNATURES
Return the number of dimensions this factor vertex `fc` influences.
DevNotes
- TODO document how this function handles partial dimensions
- Currently a factor manifold is just what the measurement provides (i.e. bearing only would be dimension 1)
"""
getFactorDim(w...) = getDimension(w...)
getFactorDim(fg::AbstractDFG, fctid::Symbol) = getFactorDim(getFactor(fg, fctid))
# extend convenience function (Matrix or Vector{P})
function manikde!(
variableType::Union{InstanceType{<:InferenceVariable}, InstanceType{<:AbstractFactor}},
pts::AbstractVector{P};
kw...,
) where {P <: Union{<:AbstractArray, <:Number, <:Manifolds.ArrayPartition}}
#
M = getManifold(variableType)
# @info "pts" P typeof(pts[1]) pts[1]
infoPerCoord = ones(AMP.getNumberCoords(M, pts[1]))
return AMP.manikde!(M, pts; infoPerCoord, kw...)
end
function manikde!(
varT::InstanceType{<:InferenceVariable},
pts::AbstractVector{<:Tuple};
kw...,
)
#
return manikde!(varT, (t -> ArrayPartition(t...)).(pts); kw...)
end
"""
$SIGNATURES
Return params.N measurement samples for a factor in `<:AbstractDFG`.
"""
function getMeasurements(dfg::AbstractDFG, fsym::Symbol, N::Int = getSolverParams(dfg).N)
return sampleFactor(dfg, fsym, N)
end
"""
$SIGNATURES
Get the folder location where debug and solver information is recorded for a particular factor graph.
"""
getLogPath(opt::SolverParams) = opt.logpath
getLogPath(dfg::AbstractDFG) = getSolverParams(dfg) |> getLogPath
"""
$SIGNATURES
Append `str` onto factor graph log path as convenience function.
"""
joinLogPath(opt::SolverParams, str...) = joinpath(getLogPath(opt), str...)
joinLogPath(dfg::AbstractDFG, str...) = joinLogPath(getSolverParams(dfg), str...)
"""
$(SIGNATURES)
Set variable(s) `sym` of factor graph to be marginalized -- i.e. not be updated by inference computation.
"""
function setfreeze!(dfg::AbstractDFG, sym::Symbol)
if !isInitialized(dfg, sym)
@warn "Vertex $(sym) is not initialized, and won't be frozen at this time."
return nothing
end
vert = DFG.getVariable(dfg, sym)
data = getSolverData(vert)
data.ismargin = true
return nothing
end
function setfreeze!(dfg::AbstractDFG, syms::Vector{Symbol})
for sym in syms
setfreeze!(dfg, sym)
end
end
"""
$(SIGNATURES)
Freeze nodes that are older than the quasi fixed-lag length defined by `fg.qfl`, according to `fg.fifo` ordering.
Future:
- Allow different freezing strategies beyond fifo.
"""
function fifoFreeze!(dfg::AbstractDFG)
if DFG.getSolverParams(dfg).qfl == 0
@warn "Quasi fixed-lag is enabled but QFL horizon is zero. Please set a valid window with FactoGraph.qfl"
end
# the fifo history
tofreeze = DFG.getAddHistory(dfg)[1:(end - DFG.getSolverParams(dfg).qfl)]
# check that the variable to freeze exists fix issue #966
filter!(v -> exists(dfg, v), tofreeze)
if length(tofreeze) == 0
@info "[fifoFreeze] QFL - no nodes to freeze."
return nothing
end
@info "[fifoFreeze] QFL - Freezing nodes $(tofreeze[1]) -> $(tofreeze[end])."
setfreeze!(dfg, tofreeze)
return nothing
end
DFG.getPoint(typ::InferenceVariable, w...; kw...) = getPoint(typeof(typ), w...; kw...)
function DFG.getCoordinates(typ::InferenceVariable, w...; kw...)
return getCoordinates(typeof(typ), w...; kw...)
end
# WIP
# _getMeasurementRepresentation(::AbstractPrior, coord::AbstractVector{<:Number}) =
"""
$SIGNATURES
Get the ParametricPointEstimates---based on full marginal belief estimates---of a variable in the distributed factor graph.
Calculate new Parametric Point Estimates for a given variable.
DevNotes
- TODO update for manifold subgroups.
- TODO standardize after AMP3D
Related
[`getPPE`](@ref), [`setPPE!`](@ref), [`getVariablePPE`](@ref)
"""
function calcPPE(
var::DFGVariable,
varType::InferenceVariable = getVariableType(var);
ppeType::Type{<:MeanMaxPPE} = MeanMaxPPE,
solveKey::Symbol = :default,
ppeKey::Symbol = solveKey
)
#
P = getBelief(var, solveKey)
maniDef = convert(MB.AbstractManifold, varType)
manis = convert(Tuple, maniDef) # LEGACY, TODO REMOVE
ops = buildHybridManifoldCallbacks(manis)
Pme = calcMean(P) # getKDEMean(P) #, addop=ops[1], diffop=ops[2]
# returns coordinates at identify
Pma = getKDEMax(P; addop = ops[1], diffop = ops[2])
# calculate point
## TODO make PPE only use getCoordinates for now (IIF v0.25)
Pme_ = getCoordinates(varType, Pme)
# Pma_ = getCoordinates(M,Pme)
ppes = getPPEDict(var)
id = if haskey(ppes, ppeKey)
ppes[ppeKey].id
else
nothing
end
# suggested, max, mean, current time
# TODO, poor constructor argument assumptions on `ppeType`
return ppeType(;
id,
solveKey=ppeKey,
suggested=Pme_,
max=Pma,
mean=Pme_,
)
end
# calcPPE(var::DFGVariable; method::Type{<:AbstractPointParametricEst}=MeanMaxPPE, solveKey::Symbol=:default) = calcPPE(var, getVariableType(var), method=method, solveKey=solveKey)
function calcPPE(
dfg::AbstractDFG,
label::Symbol;
solveKey::Symbol = :default,
ppeType::Type{<:AbstractPointParametricEst} = MeanMaxPPE,
)
#
var = getVariable(dfg, label)
return calcPPE(var, getVariableType(var); ppeType = ppeType, solveKey = solveKey)
end
const calcVariablePPE = calcPPE
"""
$SIGNATURES
Return bool on whether a certain factor has user defined multihypothesis.
Related
[`getMultihypoDistribution`](@ref)
"""
isMultihypo(fct::DFGFactor) = isa(_getCCW(fct).hyporecipe.hypotheses, Distribution)
"""
$SIGNATURES
Return the categorical distributed used for multihypothesis selection in a factor.
Related
isMultihypo
"""
getMultihypoDistribution(fct::DFGFactor) = _getCCW(fct).hyporecipe.hypotheses
"""
$SIGNATURES
Free all variables from marginalization.
"""
function dontMarginalizeVariablesAll!(fgl::AbstractDFG)
fgl.solverParams.isfixedlag = false
fgl.solverParams.qfl = (2^(Sys.WORD_SIZE - 1) - 1)
fgl.solverParams.limitfixeddown = false
for sym in ls(fgl)
setMarginalized!(fgl, sym, false)
end
return nothing
end
"""
$SIGNATURES
Free all variables from marginalization.
Related
dontMarginalizeVariablesAll!
"""
function unfreezeVariablesAll!(fgl::AbstractDFG)
return dontMarginalizeVariablesAll!(fgl)
end
# WIP
# function resetSolvableAllExcept!(dfg::AbstractDFG,
# fltr::NothingUnion{Regex}=nothing)
# #
# unfreezeVariablesAll!(dfg)
# end
"""
$SIGNATURES
Reset initialization flag on all variables in `::AbstractDFG`.
Notes
- Numerical values remain, but inference will overwrite since init flags are now `false`.
"""
function resetVariableAllInitializations!(fgl::AbstractDFG)
vsyms = ls(fgl)
for sym in vsyms
setVariableInitialized!(getVariable(fgl, sym), :false)
end
return nothing
end
"""
$SIGNATURES
Enable defaults for fixed-lag-like operation by using smart message passing on the tree.
Notes:
- These are only default settings, and can be modified in each use case scenario.
- Default does not update downsolve through to leaves of the tree.
"""
function defaultFixedLagOnTree!(
dfg::AbstractDFG,
len::Int = 30;
limitfixeddown::Bool = true,
)
#
getSolverParams(dfg).isfixedlag = true
getSolverParams(dfg).qfl = len
getSolverParams(dfg).limitfixeddown = limitfixeddown
return getSolverParams(dfg)
end
"""
$SIGNATURES
Return `::Tuple` with matching variable ID symbols and `Suggested` PPE values.
Related
getVariablePPE
"""
function getPPESuggestedAll(dfg::AbstractDFG, regexFilter::Union{Nothing, Regex} = nothing)
#
# get values
vsyms = listVariables(dfg, regexFilter) |> sortDFG
slamPPE = map(x -> getVariablePPE(dfg, x).suggested, vsyms)
# sizes to convert to matrix
rumax = zeros(Int, 2)
for ppe in slamPPE
rumax[2] = length(ppe)
rumax[1] = maximum(rumax)
end
# populate with values
XYT = zeros(length(slamPPE), rumax[1])
for i = 1:length(slamPPE)
XYT[i, 1:length(slamPPE[i])] = slamPPE[i]
end
return (vsyms, XYT)
end
"""
$SIGNATURES
Find and return a `::Tuple` of variables and distances to `loc::Vector{<:Real}`.
Related
findVariablesNearTimestamp
"""
function findVariablesNear(
dfg::AbstractDFG,
loc::Vector{<:Real},
regexFilter::Union{Nothing, Regex} = nothing;
number::Int = 3,
)
#
xy = getPPESuggestedAll(dfg, regexFilter)
dist = sum((xy[2][:, 1:length(loc)] .- loc') .^ 2; dims = 2) |> vec
prm = (dist |> sortperm)[1:number]
return (xy[1][prm], sqrt.(dist[prm]))
end
"""
$SIGNATURES
Find all factors that go `from` variable to any other complete variable set within `between`.
Notes
- Developed for downsolve in CSM, expanding the cliqSubFg to include all frontal factors.
"""
function findFactorsBetweenFrom(
dfg::G,
between::Vector{Symbol},
from::Symbol,
) where {G <: AbstractDFG}
# get all associated factors
allfcts = ls(dfg, from)
# remove candidates with neighbors outside between with mask
mask = ones(Bool, length(allfcts))
i = 0
for fct in allfcts
i += 1
# check if immediate neighbors are all in the `between` list
immnei = ls(dfg, fct)
if length(immnei) != length(intersect(immnei, between))
mask[i] = false
end
end
# return only masked factors
return allfcts[mask]
end
"""
$SIGNATURES
Return list of factors which depend only on variables in variable list in factor
graph -- i.e. among variables.
Notes
-----
* `unused::Bool=true` will disregard factors already used -- i.e. disregard where `potentialused=true`
"""
function getFactorsAmongVariablesOnly(
dfg::G,
varlist::Vector{Symbol};
unused::Bool = true,
) where {G <: AbstractDFG}
# collect all factors attached to variables
prefcts = Symbol[]
for var in varlist
union!(prefcts, DFG.ls(dfg, var))
end
almostfcts = Symbol[]
if unused
# now check if those factors have already been added
for fct in prefcts
vert = DFG.getFactor(dfg, fct)
if !getSolverData(vert).potentialused
push!(almostfcts, fct)
end
end
else
almostfcts = prefcts
end
# Select factors that have all variables in this clique var list
usefcts = Symbol[]
for fct in almostfcts
if length(setdiff(listNeighbors(dfg, fct), varlist)) == 0
push!(usefcts, fct)
end
end
return usefcts
end
"""
$SIGNATURES
Calculate new and then set PPE estimates for variable from some distributed factor graph.
DevNotes
- TODO solve key might be needed if one only wants to update one
- TODO consider a more fiting name.
- guess it would make sense that :default=>variableNodeData, goes with :default=>MeanMaxPPE
Aliases
- `setVariablePosteriorEstimates!`
DevNotes:
JT - TODO if subfg is in the cloud or from another fg it has to be updated
it feels like a waste to update the whole variable for one field.
currently i could find mergeUpdateVariableSolverData()
might be handy to use a setter such as updatePointParametricEst(dfg, variable, solverkey)
This might also not be the correct place, if it is uncomment:
````
if (subfg <: InMemoryDFGTypes)
updateVariable!(subfg, var)
end
```
Related
[`calcPPE`](@ref), getVariablePPE, (updatePPE! ?)
"""
function setPPE!(
variable::DFGVariable,
solveKey::Symbol = :default,
ppeType::Type{T} = MeanMaxPPE,
newPPEVal::T = calcPPE(variable; ppeType = ppeType, solveKey = solveKey),
) where {T <: AbstractPointParametricEst}
#
# vnd = getSolverData(variable, solveKey)
#TODO in the future one can perhaps populate other solver data types here by looking at the typeof ppeDict entries
getPPEDict(variable)[solveKey] = newPPEVal
return variable
end
function setPPE!(
subfg::AbstractDFG,
label::Symbol,
solveKey::Symbol = :default,
ppeType::Type{T} = MeanMaxPPE,
newPPEVal::NothingUnion{T} = nothing,
) where {T <: AbstractPointParametricEst}
#
variable = getVariable(subfg, label)
# slight optimization to avoid double variable lookup (should be optimized out during code lowering)
newppe = if newPPEVal !== nothing
newPPEVal
else
calcPPE(variable; solveKey = solveKey, ppeType = ppeType)
end
return setPPE!(variable, solveKey, ppeType, newppe)
end
const setVariablePosteriorEstimates! = setPPE!
## ============================================================================
# Starting integration with Manifolds.jl, via ApproxManifoldProducts.jl first
## ============================================================================
"""
$SIGNATURES
Fetch and unpack JSON dictionary stored as a data blob.
"""
function fetchDataJSON(dfg::AbstractDFG, varsym::Symbol, lbl::Symbol)
gde, rawData = getData(dfg, varsym, lbl)
if gde.mimeType == "application/json/octet-stream"
JSON3.read(IOBuffer(rawData))
else
error("Unknown JSON Blob format $(gde.mimeType)")
end
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 12175 | # utilities for calculating the gradient over factors
function factorJacobian(
fg,
faclabel::Symbol,
p0 = ArrayPartition(first.(getVal.(fg, getVariableOrder(fg, faclabel), solveKey = :parametric))...),
backend = ManifoldDiff.TangentDiffBackend(ManifoldDiff.FiniteDiffBackend()),
)
fac = getFactor(fg, faclabel)
varlabels = getVariableOrder(fac)
varIntLabel = OrderedDict(zip(varlabels, eachindex(varlabels)))
cfm = CalcFactorResidual(fg, fac, varIntLabel)
function costf(p)
points = map(idx->p.x[idx], cfm.varOrderIdxs)
return cfm.sqrt_iΣ * cfm(cfm.meas, points...)
end
M_dom = ProductManifold(getManifold.(fg, varlabels)...)
#TODO verify M_codom
M_codom = Euclidean(manifold_dimension(getManifold(fac)))
# Jx(M, p) = ManifoldDiff.jacobian(M, M_codom, calcfac, p, backend)
return ManifoldDiff.jacobian(M_dom, M_codom, costf, p0, backend)
end
export getCoordSizes
export checkGradientsToleranceMask, calcPerturbationFromVariable
# T_pt_args[:] = [(T1::Type{<:InferenceVariable}, point1); ...]
# FORCED TO START AT EITHER :x1
function _prepFactorGradientLambdas(
fct::Union{
<:AbstractRelativeMinimize,
# <:AbstractRelativeRoots,
<:AbstractManifoldMinimize,
},
measurement,
varTypes::Tuple,
pts::Tuple;
tfg::AbstractDFG = initfg(),
_blockRecursion::Bool = true,
# gradients relative to coords requires
slack_resid = calcFactorResidualTemporary(
fct,
varTypes,
measurement,
pts;
tfg = tfg,
_blockRecursion = _blockRecursion,
),
# numerical diff perturbation size
h::Real = 1e-4,
)
#
# get manifolds for all variables
M = getManifold.(varTypes)
# use the temporary factor graph throughout
# TODO, replace with retract operations instead -- i.e. closer to Manifolds.jl tangent representations
coord_ = (s) -> AMP.makeCoordsFromPoint(M[s], pts[s]) # vee(M,..., log(M,...) )
# perturb the coords of one variable on the factor
coord_h = (s, i, crd = coord_(s)) -> (crd[i] += h; crd)
# reassemble TypePoint vector with perturbation at (s,i)
T_pth_s_i = (s, i) -> makePointFromCoords(M[s], coord_h(s, i), pts[s]) # exp(M,..., hat(M,...) )
tup_pt_s_i_h = (s, i) -> tuple(pts[1:(s - 1)]..., T_pth_s_i(s, i), pts[(s + 1):end]...)
# build a residual calculation specifically considering graph factor selections `s`, e.g. for binary `s ∈ {1,2}`.
f_dsi_h =
(d, s, i) -> IIF._evalFactorTemporary!(
fct,
varTypes,
d,
[measurement],
tup_pt_s_i_h(s, i);
tfg = tfg,
newFactor = false,
currNumber = 0,
_slack = slack_resid,
)
# standard calculus derivative definition (in coordinate space)
Δf_dsi =
(d, s, i, crd = coord_(d)) ->
(makeCoordsFromPoint(M[s], f_dsi_h(d, s, i)[1]) - crd) ./ h
# jacobian block per s, for each i
▽f_ds = (d, s, crd = coord_(d)) -> ((i) -> Δf_dsi(d, s, i, crd)).(1:length(crd))
# jacobian stored in user provided matrix
▽f_ds_J! =
(J::AbstractMatrix{<:Real}, d, s) -> (J_ = ▽f_ds(d, s); (@cast J[_d, _s] = J_[_s][_d]))
# TODO generalize beyond binary
λ_fncs = () # by factor's variable order
# number of blocks
nblks = length(varTypes)
# length of all coordinate dimensions together
λ_sizes = length.(coord_.(1:nblks)) # by factor's variable order
len = sum(λ_sizes)
# full jacobian matrix
J_f = zeros(len, len)
# build final lambdas which are mapped to the blocks of the full jacobian gradients matrix J_f
Σd = 0
# each variable T, go down to coords, add eps to a coord, back to point and look at the change in residual (assumed in coords for AbstractRelative[Minimize/Roots])
# TODO change `a_` to `s_` as variable selection by factor order
for (d_, T_d) in enumerate(varTypes)
λ_row = ()
len_d = λ_sizes[d_]
Σs = 0
for (s_, T_s) in enumerate(varTypes)
len_s = λ_sizes[s_]
# create a view into the full jacobian matrix at (d,s)
_J_ds = view(J_f, (1 + Σd):(Σd + len_d), (1 + Σs):(Σs + len_s))
# function is ready for calculation but actual jacobian values must still be done
λ_row = tuple(λ_row..., () -> ▽f_ds_J!(_J_ds, d_, s_))
Σs += len_s
end
λ_fncs = tuple(λ_fncs..., λ_row)
Σd += len_d
end
# full gradients jacobian matrix, nested-tuple of lambdas to update, and sizes of blocks
return J_f, λ_fncs, λ_sizes, slack_resid
end
function FactorGradientsCached!(
fct::Union{
<:AbstractRelativeMinimize,
<:AbstractManifoldMinimize,
},
varTypes::Tuple,
meas_single,
pts::Tuple;
h::Real = 1e-4,
_blockRecursion::Bool = true,
)
#
# working memory location for computations
tfg = initfg()
# permanent location for points and later reference
# generate the necessary lambdas
J__, λ_fncs, λ_sizes, slack_resid = _prepFactorGradientLambdas(
fct,
meas_single,
varTypes,
pts;
tfg = tfg,
_blockRecursion = _blockRecursion,
h = 1e-4,
)
# get the one factor in tfg
fctsyms = lsf(tfg)
@assert length(fctsyms) == 1 "Expecting only a single factor in tfg"
# generate an object containing all the machinery necessary more rapid factor gradients, see DevNotes for future improvements
return FactorGradientsCached!(
getFactor(tfg, fctsyms[1]),
J__,
slack_resid,
meas_single,
pts,
tfg,
λ_fncs,
λ_sizes,
h,
)
end
# Gradient matrix has individual blocks
getCoordSizes(fgc::FactorGradientsCached!) = fgc._coord_sizes
function _setFGCSlack!(fgc::FactorGradientsCached!{F}, slack) where {F}
return setPointsMani!(fgc.slack_residual, slack)
end
function _setFGCSlack!(
fgc::FactorGradientsCached!{F, S},
slack::Number,
) where {F, S <: Number}
return fgc.slack_residual = slack
end
function (fgc::FactorGradientsCached!)(meas_pts...)
# separate the measurements (forst) from the variable points (rest)
lenm = 1# fgc.measurement is a single measurement so length is always 1
@assert (length(fgc.currentPoints) + lenm) == length(meas_pts) "Unexpected number of arguments, got $(length(meas_pts)) but expected $(length(fgc.currentPoints)+lenm) instead. Retry call with args (meas..., pts...), got meas_pts=$meas_pts"
# update in-place the new measurement value in preparation for new gradient calculation
# TODO should outside measurement be used or only that stored in FGC object?
# for (m, tup_m) in enumerate(fgc.measurement)
# setPointsMani!(tup_m, meas_pts[m])
# end
fgc.measurement = meas_pts[1] # why not 1:1 since ccwl.measurement::Vector{typeof(z)}
# update the residual _slack in preparation for new gradient calculation
fct = getFactorType(fgc.dfgfct)
measurement = meas_pts[1]
pts = meas_pts[2:end]
varTypes =
tuple(getVariableType.(getVariable.(fgc._tfg, getVariableOrder(fgc.dfgfct)))...)
new_slack = calcFactorResidualTemporary(fct, varTypes, measurement, pts; tfg = fgc._tfg)
# TODO make sure slack_residual is properly wired up with all the lambda functions as expected
_setFGCSlack!(fgc, new_slack)
# setPointsMani!(fgc.slack_residual, new_slack)
# set new points in preparation for new gradient calculation
for (s, pt) in enumerate(meas_pts[2:end])
# update the local memory in fgc to take the values of incoming `pts`
setPointsMani!(fgc.currentPoints[s], pt)
end
println.(fgc.currentPoints)
# update the gradients at new values contained in fgc
st = 0
for (s, λ_tup) in enumerate(fgc._λ_fncs), (k, λ) in enumerate(λ_tup)
# updating of the cached gradients assume that diagonal is zero for eigen value=1 -- i.e. (dA/dA - I)=0
if s == k
# coord length/size of this particular block
szi = fgc._coord_sizes[s]
_blk = view(fgc.cached_gradients, (st + 1):(st + szi), (st + 1):(st + szi))
fill!(_blk, 0.0)
# move on to next diagonal block
st += szi
continue
end
# recalculate the off diagonals
λ()
end
# return newly calculated gradients
return fgc.cached_gradients
end
# convenience function to update the gradients based on current measurement and point information stored in the fgc object
(fgc::FactorGradientsCached!)() = fgc(fgc.measurement, fgc.currentPoints...)
"""
$SIGNATURES
Return a mask of same size as gradients matrix `J`, indicating which elements are above the expected sensitivity threshold `tol`.
Notes
- Threshold accuracy depends on two parts,
- Numerical gradient perturbation size `fgc._h`,
- Accuracy tolerance to which the factor residual is computed (not controlled here)
"""
function checkGradientsToleranceMask(
fgc::FactorGradientsCached!,
J::AbstractArray = fgc.cached_gradients;
tol::Real = 0.02 * fgc._h,
)
#
# ignore anything 10 times smaller than numerical gradient delta used
# NOTE this ignores the factor residual solve accuracy
return tol * fgc._h .< abs.(J)
end
"""
$SIGNATURES
Return a tuple of infoPerCoord vectors that result from input variables as vector of `::Pair`, i.e. `fromVar::Int => infoPerCoord`.
For example, a binary `LinearRelative` factor has a one-to-one influence from the input to the one other variable.
Notes
- Assumes the gradients in `fgc` are up to date -- if not, first run `fgc(measurement..., pts...)`.
- `tol` does not recalculate the gradients to a new tolerance, instead uses the cached value in `fgc` to predict accuracy.
Example
```julia
# setup
fct = LinearRelative(MvNormal([10;0],[1 0; 0 1]))
measurement = ([10.0;0.0],)
varTypes = (ContinuousEuclid{2}, ContinuousEuclid{2})
pts = ([0;0.0], [9.5;0])
# create the gradients functor object
fgc = FactorGradientsCached!(fct, varTypes, measurement, pts);
# must first update the cached gradients
fgc(measurement..., pts...)
# check the perturbation influence through gradients on factor
ret = calcPerturbationFromVariable(fgc, [1=>[1;1]])
@assert isapprox(ret[2], [1;1])
```
DevNotes
- FIXME Support n-ary source factors by extending `fromVar` to more than just one.
Related
[`FactorGradientsCached!`](@ref), [`checkGradientsToleranceMask`](@ref)
"""
function calcPerturbationFromVariable(
fgc::FactorGradientsCached!,
from_var_ipc::AbstractVector{<:Pair};
tol::Real = 0.02 * fgc._h,
)
#
blkszs = getCoordSizes(fgc)
# assume projection through pp-factor from first to second variable
# ipc values from first variable belief, and zero for second
ipcAll = zeros(sum(blkszs))
# set any incoming infoPerCoord values
for (fromVar, infoPC) in from_var_ipc
# check on sizes with print warning
if (blkszs[fromVar] == length(infoPC))
nothing
else
@warn(
"Expecting incoming length(infoPerCoord) to equal the block size for variable $fromVar, as per factor used to construct the FactorGradientsCached!: $(getFactorType(fgc.dfgfct))"
)
end
# get range of interest
curr_b = sum(blkszs[1:(fromVar - 1)]) + 1
curr_e = sum(blkszs[1:fromVar])
ipcAll[curr_b:curr_e] .= infoPC
end
# clamp gradients below numerical solver resolution
mask = checkGradientsToleranceMask(fgc; tol = tol)
J = fgc.cached_gradients
_J = zeros(size(J)...)
_J[mask] .= J[mask]
# calculate the gradient influence on other variables
ipc_pert = _J * ipcAll
# round up over numerical solution tolerance
dig = floor(Int, log10(1 / tol))
ipc_pert .= round.(ipc_pert, digits = dig)
# slice the result
ipcBlk = []
for (i, sz) in enumerate(blkszs)
curr_b = sum(blkszs[1:(i - 1)]) + 1
curr_e = sum(blkszs[1:i])
blk_ = view(ipc_pert, curr_b:curr_e)
push!(ipcBlk, blk_)
end
return tuple(ipcBlk...)
end
function calcPerturbationFromVariable(
ccwl::CommonConvWrapper,
sfidx::Int,
smpid::Int = 1;
tol::Real = 0.02 * ccwl.gradients_cached._h,
)
#
# collapse the hypo associated with smpid
# get the variables associated with this hypo
# assemble the leave-one-out of varidx=>infoPerCoords -- e.g. sfidx=1, `var_ipcs::Vector{<:Pair}=[2=>ipc2;]`
# NOTE varidx as per the factor args, i.e. after fractional associations (hypos) are collapsed
# calcPerturbationFromVariable(ccwl.gradients_cached, var_ipcs; tol=tol)
return error("UNDER CONSTRUCTION")
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 23838 |
"""
$SIGNATURES
Initialize an empty in-memory DistributedFactorGraph `::DistributedFactorGraph` object.
"""
function initfg(
dfg::T = LocalDFG(; solverParams = SolverParams());
sessionname = "NA",
robotname = "",
username = "",
cloudgraph = nothing,
) where {T <: AbstractDFG}
#
#
return dfg
end
#init an empty fg with a provided type and SolverParams
function initfg(
::Type{T};
solverParams = SolverParams(),
sessionname = "NA",
robotname = "",
username = "",
cloudgraph = nothing,
) where {T <: AbstractDFG}
#
return T(; solverParams = solverParams)
end
function initfg(
::Type{T},
solverParams::S;
sessionname = "NA",
robotname = "",
username = "",
cloudgraph = nothing,
) where {T <: AbstractDFG, S <: SolverParams}
#
return T{S}(; solverParams = solverParams)
end
# Should deprecate in favor of TensorCast.jl
reshapeVec2Mat(vec::Vector, rows::Int) = reshape(vec, rows, round(Int, length(vec) / rows))
## ==============================================================================================
## MOVE TO / CONSOLIDATE WITH DFG
## ==============================================================================================
"""
$(SIGNATURES)
Fetch the variable marginal joint sampled points. Use [`getBelief`](@ref) to retrieve the full Belief object.
"""
getVal(v::DFGVariable; solveKey::Symbol = :default) = v.solverDataDict[solveKey].val
function getVal(v::DFGVariable, idx::Int; solveKey::Symbol = :default)
return v.solverDataDict[solveKey].val[:, idx]
end
getVal(vnd::VariableNodeData) = vnd.val
getVal(vnd::VariableNodeData, idx::Int) = vnd.val[:, idx]
function getVal(dfg::AbstractDFG, lbl::Symbol; solveKey::Symbol = :default)
return getVariable(dfg, lbl).solverDataDict[solveKey].val
end
"""
$(SIGNATURES)
Get the number of points used for the current marginal belief estimate represtation for a particular variable in the factor graph.
"""
function getNumPts(v::DFGVariable; solveKey::Symbol = :default)::Int
return length(getVal(getSolverData(v, solveKey)))
end
function AMP.getBW(vnd::VariableNodeData)
return vnd.bw
end
# setVal! assumes you will update values to database separate, this used for local graph mods only
function getBWVal(v::DFGVariable; solveKey::Symbol = :default)
return getSolverData(v, solveKey).bw
end
function setBW!(vd::VariableNodeData, bw::Array{Float64, 2}; solveKey::Symbol = :default)
vd.bw = bw
return nothing
end
function setBW!(v::DFGVariable, bw::Array{Float64, 2}; solveKey::Symbol = :default)
setBW!(getSolverData(v, solveKey), bw)
return nothing
end
function setVal!(vd::VariableNodeData, val::AbstractVector{P}) where {P}
vd.val = val
return nothing
end
function setVal!(
v::DFGVariable,
val::AbstractVector{P};
solveKey::Symbol = :default,
) where {P}
setVal!(getSolverData(v, solveKey), val)
return nothing
end
function setVal!(
vd::VariableNodeData,
val::AbstractVector{P},
bw::Array{Float64, 2},
) where {P}
setVal!(vd, val)
setBW!(vd, bw)
return nothing
end
function setVal!(
v::DFGVariable,
val::AbstractVector{P},
bw::Array{Float64, 2};
solveKey::Symbol = :default,
) where {P}
setVal!(v, val; solveKey = solveKey)
setBW!(v, bw; solveKey = solveKey)
return nothing
end
function setVal!(
vd::VariableNodeData,
val::AbstractVector{P},
bw::Vector{Float64},
) where {P}
setVal!(vd, val, reshape(bw, length(bw), 1))
return nothing
end
function setVal!(
v::DFGVariable,
val::AbstractVector{P},
bw::Vector{Float64};
solveKey::Symbol = :default,
) where {P}
setVal!(getSolverData(v, solveKey), val, bw)
return nothing
end
function setVal!(
dfg::AbstractDFG,
sym::Symbol,
val::AbstractVector{P};
solveKey::Symbol = :default,
) where {P}
return setVal!(getVariable(dfg, sym), val; solveKey = solveKey)
end
"""
$SIGNATURES
Set the point centers and bandwidth parameters of a variable node, also set `isInitialized=true` if `setinit::Bool=true` (as per default).
Notes
- `initialized` is used for initial solve of factor graph where variables are not yet initialized.
- `inferdim` is used to identify if the initialized was only partial.
"""
function setValKDE!(
vd::VariableNodeData,
pts::AbstractVector{P},
bws::Vector{Float64},
setinit::Bool = true,
ipc::AbstractVector{<:Real} = [0.0;],
) where {P}
#
setVal!(vd, pts, bws) # BUG ...al!(., val, . ) ## TODO -- this can be a little faster
setinit ? (vd.initialized = true) : nothing
vd.infoPerCoord = ipc
return nothing
end
function setValKDE!(
vd::VariableNodeData,
val::AbstractVector{P},
setinit::Bool = true,
ipc::AbstractVector{<:Real} = [0.0;],
) where {P}
# recover variableType information
varType = getVariableType(vd)
p = AMP.manikde!(varType, val)
setValKDE!(vd, p, setinit, ipc)
return nothing
end
function setValKDE!(
v::DFGVariable,
val::AbstractVector{P},
bws::Array{<:Real, 2},
setinit::Bool = true,
ipc::AbstractVector{<:Real} = [0.0;];
solveKey::Symbol = :default,
) where {P}
# recover variableType information
setValKDE!(getSolverData(v, solveKey), val, bws[:, 1], setinit, ipc)
return nothing
end
function setValKDE!(
v::DFGVariable,
val::AbstractVector{P},
setinit::Bool = true,
ipc::AbstractVector{<:Real} = [0.0;];
solveKey::Symbol = :default,
ppeType::Type{T} = MeanMaxPPE,
) where {P, T}
vnd = getSolverData(v, solveKey)
# recover variableType information
setValKDE!(vnd, val, setinit, ipc)
setPPE!(v; solveKey, ppeType)
return nothing
end
function setValKDE!(
v::DFGVariable,
em::TreeBelief,
setinit::Bool = true;
# inferdim::Union{Float32, Float64, Int32, Int64}=0;
solveKey::Symbol = :default,
)
#
setValKDE!(v, em.val, em.bw, setinit, em.infoPerCoord; solveKey = solveKey)
return nothing
end
function setValKDE!(
v::DFGVariable,
mkd::ManifoldKernelDensity,
setinit::Bool = true,
ipc::AbstractVector{<:Real} = [0.0;];
solveKey::Symbol = :default,
)
#
# @error("TESTING setValKDE! ", solveKey, string(listSolveKeys(v)))
setValKDE!(getSolverData(v, solveKey), mkd, setinit, Float64.(ipc))
return nothing
end
function setValKDE!(
dfg::AbstractDFG,
sym::Symbol,
mkd::ManifoldKernelDensity,
setinit::Bool = true,
ipc::AbstractVector{<:Real} = [0.0;];
solveKey::Symbol = :default,
)
#
setValKDE!(getVariable(dfg, sym), mkd, setinit, ipc; solveKey = solveKey)
return nothing
end
function setValKDE!(
vnd::VariableNodeData,
mkd::ManifoldKernelDensity{M, B, Nothing}, # TBD dispatch without partial?
setinit::Bool = true,
ipc::AbstractVector{<:Real} = [0.0;],
) where {M, B}
#
# L==Nothing means no partials
ptsArr = AMP.getPoints(mkd) # , false) # for not partial
# also set the bandwidth
bws = getBW(mkd)[:, 1]
setValKDE!(vnd, ptsArr, bws, setinit, ipc)
return nothing
end
function setValKDE!(
vnd::VariableNodeData,
mkd::ManifoldKernelDensity{M, B, L},
setinit::Bool = true,
ipc::AbstractVector{<:Real} = [0.0;],
) where {M, B, L <: AbstractVector}
#
oldBel = getBelief(vnd)
# New infomation might be partial
newBel = replace(oldBel, mkd)
# Set partial dims as Manifold points
ptsArr = AMP.getPoints(newBel, false)
# also get the bandwidth
bws = getBandwidth(newBel, false)
# update values in graph
setValKDE!(vnd, ptsArr, bws, setinit, ipc)
return nothing
end
function setBelief!(
vari::DFGVariable,
bel::ManifoldKernelDensity,
setinit::Bool=true,
ipc::AbstractVector{<:Real}=[0.0;];
solveKey::Symbol = :default
)
setValKDE!(vari, bel, setinit, ipc; solveKey)
# setValKDE!(vari,getPoints(bel, false), setinit, ipc)
end
"""
$SIGNATURES
Set variable initialized status.
"""
function setVariableInitialized!(varid::VariableNodeData, status::Bool)
#
return varid.initialized = status
end
function setVariableInitialized!(vari::DFGVariable, status::Bool)
return setVariableInitialized!(getSolverData(vari), status)
end
"""
$SIGNATURES
Set method for the inferred dimension value in a variable.
"""
setIPC!(varid::VariableNodeData, val::AbstractVector{<:Real}) = varid.infoPerCoord = val
function setIPC!(
vari::DFGVariable,
val::AbstractVector{<:Real},
solveKey::Symbol = :default,
)
return setVariableIPC!(getSolverData(vari, solveKey), val)
end
## ==============================================================================================
## ==============================================================================================
"""
$(SIGNATURES)
Get a ManifoldKernelDensity estimate from variable node data.
"""
function getBelief(vnd::VariableNodeData)
return manikde!(getManifold(getVariableType(vnd)), getVal(vnd); bw = getBW(vnd)[:, 1])
end
function getBelief(v::DFGVariable, solvekey::Symbol = :default)
return getBelief(getSolverData(v, solvekey))
end
function getBelief(dfg::AbstractDFG, lbl::Symbol, solvekey::Symbol = :default)
return getBelief(getVariable(dfg, lbl), solvekey)
end
"""
$SIGNATURES
Reset the solve state of a variable to uninitialized/unsolved state.
"""
function resetVariable!(varid::VariableNodeData; solveKey::Symbol = :default)::Nothing
#
val = getBelief(varid)
pts = AMP.getPoints(val)
# TODO not all manifolds will initialize to zero
for pt in pts
fill!(pt, 0.0)
end
pn = manikde!(getManifold(varid), pts; bw = zeros(Ndim(val)))
setValKDE!(varid, pn, false, [0.0;])
# setVariableInferDim!(varid, 0)
# setVariableInitialized!(vari, false)
return nothing
end
function resetVariable!(vari::DFGVariable; solveKey::Symbol = :default)
return resetVariable!(getSolverData(vari); solveKey = solveKey)
end
function resetVariable!(
dfg::G,
sym::Symbol;
solveKey::Symbol = :default,
)::Nothing where {G <: AbstractDFG}
#
return resetVariable!(getVariable(dfg, sym); solveKey = solveKey)
end
# return VariableNodeData
function DefaultNodeDataParametric(
dodims::Int,
dims::Int,
variableType::InferenceVariable;
initialized::Bool = true,
dontmargin::Bool = false,
solveKey::Symbol = :parametric
)
# this should be the only function allocating memory for the node points
if false && initialized
error("not implemented yet")
# pN = AMP.manikde!(variableType.manifold, randn(dims, N));
#
# sp = Int[0;] #round.(Int,range(dodims,stop=dodims+dims-1,length=dims))
# gbw = getBW(pN)[:,1]
# gbw2 = Array{Float64}(undef, length(gbw),1)
# gbw2[:,1] = gbw[:]
# pNpts = getPoints(pN)
# #initval, stdev
# return VariableNodeData(pNpts,
# gbw2, Symbol[], sp,
# dims, false, :_null, Symbol[], variableType, true, 0.0, false, dontmargin)
else
# dimIDs = round.(Int, range(dodims; stop = dodims + dims - 1, length = dims))
ϵ = getPointIdentity(variableType)
return VariableNodeData(variableType;
id=nothing,
val=[ϵ],
bw=zeros(dims, dims),
# Symbol[],
# dimIDs,
dims,
# false,
# :_null,
# Symbol[],
initialized=false,
infoPerCoord=zeros(dims),
ismargin=false,
dontmargin,
# 0,
# 0,
solveKey,
)
end
end
"""
$SIGNATURES
Makes and sets a parametric `VariableNodeData` object (`.solverData`).
DevNotes
- TODO assumes parametric solves will always just be under the `solveKey=:parametric`, should be generalized.
"""
function setDefaultNodeDataParametric!(
v::DFGVariable,
variableType::InferenceVariable;
solveKey::Symbol = :parametric,
kwargs...,
)
vnd = DefaultNodeDataParametric(0, variableType |> getDimension, variableType; solveKey, kwargs...)
setSolverData!(v, vnd, solveKey)
nothing
end
"""
$SIGNATURES
Create new solverData.
Notes
- Used during creation of new variable, as well as in CSM unique `solveKey`.
"""
function setDefaultNodeData!(
v::DFGVariable,
dodims::Int,
N::Int,
dims::Int=getDimension(v);
solveKey::Symbol = :default,
gt = Dict(),
initialized::Bool = true,
dontmargin::Bool = false,
varType = nothing,
)
#
# TODO review and refactor this function, exists as legacy from pre-v0.3.0
# this should be the only function allocating memory for the node points (unless number of points are changed)
data = nothing
isinit = false
sp = Int[0;]
(val, bw) = if initialized
pN = resample(getBelief(v))
bw = getBW(pN)[:, 1:1]
pNpts = getPoints(pN)
isinit = true
(pNpts, bw)
else
sp = round.(Int, range(dodims; stop = dodims + dims - 1, length = dims))
@assert getPointType(varType) != DataType "cannot add manifold point type $(getPointType(varType)), make sure the identity element argument in @defVariable $varType arguments is correct"
val = Vector{getPointType(varType)}(undef, N)
for i = 1:length(val)
val[i] = getPointIdentity(varType)
end
bw = zeros(dims, 1)
#
(val, bw)
end
# make and set the new solverData
setSolverData!(
v,
VariableNodeData(varType;
id=nothing,
val,
bw,
# Symbol[],
# sp,
dims,
# false,
# :_null,
# Symbol[],
initialized=isinit,
infoPerCoord=zeros(getDimension(v)),
ismargin=false,
dontmargin,
# 0,
# 0,
solveKey,
),
solveKey,
)
return nothing
end
# if size(initval,2) < N && size(initval, 1) == dims
# @warn "setDefaultNodeData! -- deprecated use of stdev."
# p = manikde!(varType.manifold, initval,diag(stdev));
# pN = resample(p,N)
# if size(initval,2) < N && size(initval, 1) != dims
# @info "Node value memory allocated but not initialized"
# else
# pN = manikde!(varType.manifold, initval)
# end
# dims = size(initval,1) # rows indicate dimensions
"""
$SIGNATURES
Reference data can be stored in the factor graph as a super-solve.
Notes
- Intended as a mechanism to store reference data alongside the numerical computations.
"""
function setVariableRefence!(
dfg::AbstractDFG,
sym::Symbol,
val::AbstractVector;
refKey::Symbol = :reference,
)
#
# which variable to update
var = getVariable(dfg, sym)
# Construct an empty VND object
vnd = VariableNodeData(
val,
zeros(getDimension(var), 1),
Symbol[],
Int[0;],
getDimension(var),
false,
:_null,
Symbol[],
getVariableType(var),
true,
zeros(getDimension(var)),
false,
true,
)
#
# set the value in the DFGVariable
return setSolverData!(var, vnd, refKey)
end
# get instance from variableType
_variableType(varType::InferenceVariable) = varType
_variableType(varType::Type{<:InferenceVariable}) = varType()
## ==================================================================================================
## DFG Overloads on addVariable! and addFactor!
## ==================================================================================================
"""
$(SIGNATURES)
Add a variable node `label::Symbol` to `dfg::AbstractDFG`, as `varType<:InferenceVariable`.
Notes
-----
- keyword `nanosecondtime` is experimental and intended as the whole subsection portion -- i.e. accurateTime = (timestamp MOD second) + Nanosecond
Example
-------
```julia
fg = initfg()
addVariable!(fg, :x0, Pose2)
```
"""
function addVariable!(
dfg::AbstractDFG,
label::Symbol,
varTypeU::Union{T, Type{T}};
N::Int = getSolverParams(dfg).N,
solvable::Int = 1,
timestamp::Union{DateTime, ZonedDateTime} = now(localzone()),
nanosecondtime::Union{Nanosecond, Int64, Nothing} = Nanosecond(0),
dontmargin::Bool = false,
tags::Vector{Symbol} = Symbol[],
smalldata = Dict{Symbol, DFG.SmallDataTypes}(),
checkduplicates::Bool = true,
initsolvekeys::Vector{Symbol} = getSolverParams(dfg).algorithms,
) where {T <: InferenceVariable}
#
varType = _variableType(varTypeU)
_zonedtime(s::DateTime) = ZonedDateTime(s, localzone())
_zonedtime(s::ZonedDateTime) = s
union!(tags, [:VARIABLE])
v = DFGVariable(
label,
varType;
tags = Set(tags),
smallData = smalldata,
solvable = solvable,
timestamp = _zonedtime(timestamp),
nstime = Nanosecond(nanosecondtime),
)
(:default in initsolvekeys) && setDefaultNodeData!(
v,
0,
N,
getDimension(varType);
initialized = false,
varType = varType,
dontmargin = dontmargin,
) # dodims
(:parametric in initsolvekeys) &&
setDefaultNodeDataParametric!(v, varType; initialized = false, dontmargin = dontmargin)
return DFG.addVariable!(dfg, v)
end
function parseusermultihypo(multihypo::Nothing, nullhypo::Float64)
verts = Symbol[]
mh = nothing
return mh, nullhypo
end
function parseusermultihypo(multihypo::Vector{Float64}, nullhypo::Float64)
mh = nothing
if 0 < length(multihypo)
multihypo2 = multihypo
multihypo2[1 - 1e-10 .< multihypo] .= 0.0
# check that terms sum to full probability
@assert abs(sum(multihypo2) % 1) < 1e-10 || 1 - 1e-10 < sum(multihypo2) % 1 "ensure multihypo sums to a (or nearly, 1e-10) interger, see #1086"
# check that only one variable broken into fractions
@assert sum(multihypo2[1e-10 .< multihypo2]) ≈ 1
# force normalize something that is now known to be close
multihypo2 ./= sum(multihypo2)
mh = Categorical(Float64[multihypo2...])
end
return mh, nullhypo
end
# return a BitVector masking the fractional portion, assuming converted 0's on 100% confident variables
function _getFractionalVars(varList::Union{<:Tuple, <:AbstractVector}, mh::Nothing)
return zeros(length(varList)) .== 1
end
_getFractionalVars(varList::Union{<:Tuple, <:AbstractVector}, mh::Categorical) = 0 .< mh.p
function _selectHypoVariables(
allVars::Union{<:Tuple, <:AbstractVector},
mh::Categorical,
sel::Integer = rand(mh),
)
#
mask = mh.p .≈ 0.0
mask[sel] = true
return (1:length(allVars))[mask]
end
function _selectHypoVariables(
allVars::Union{<:Tuple, <:AbstractVector},
mh::Nothing,
sel::Integer = 0,
)
return collect(1:length(allVars))
end
"""
$SIGNATURES
Overload for specific factor preamble usage.
Notes:
- See https://github.com/JuliaRobotics/IncrementalInference.jl/issues/1462
DevNotes
- Integrate into CalcFactor
- Add threading
Example:
```julia
import IncrementalInference: preableCache
preableCache(dfg::AbstractDFG, vars::AbstractVector{<:DFGVariable}, usrfnc::MyFactor) = MyFactorCache(randn(10))
# continue regular use, e.g.
mfc = MyFactor(...)
addFactor!(fg, [:a;:b], mfc)
# ...
```
"""
function preambleCache(
dfg::AbstractDFG,
vars::AbstractVector{<:DFGVariable},
usrfnc::AbstractFactor,
)
return nothing
end
# TODO perhaps consolidate with constructor?
"""
$SIGNATURES
Generate the default factor data for a new DFGFactor.
"""
function getDefaultFactorData(
dfg::AbstractDFG,
Xi::Vector{<:DFGVariable},
usrfnc::T;
multihypo::Vector{<:Real} = Float64[],
nullhypo::Float64 = 0.0,
# threadmodel = SingleThreaded,
eliminated::Bool = false,
potentialused::Bool = false,
edgeIDs = Int[],
solveInProgress = 0,
inflation::Real = getSolverParams(dfg).inflation,
_blockRecursion::Bool = false,
) where {T <: AbstractFactor}
#
# prepare multihypo particulars
# storeMH::Vector{Float64} = multihypo == nothing ? Float64[] : [multihypo...]
mhcat, nh = parseusermultihypo(multihypo, nullhypo)
# allocate temporary state for convolutional operations (not stored)
userCache = preambleCache(dfg, Xi, usrfnc)
ccwl = _createCCW(
Xi,
usrfnc;
multihypo = mhcat,
nullhypo = nh,
inflation,
attemptGradients = getSolverParams(dfg).attemptGradients,
_blockRecursion,
userCache,
)
# and the factor data itself
return FunctionNodeData{typeof(ccwl)}(
eliminated,
potentialused,
edgeIDs,
ccwl,
multihypo,
ccwl.hyporecipe.certainhypo,
nullhypo,
solveInProgress,
inflation,
)
end
"""
$SIGNATURES
Return `::Bool` on whether at least one hypothesis is available for intended computations (assuming direction `sfidx`).
"""
function isLeastOneHypoAvailable(
sfidx::Int,
certainidx::Vector{Int},
uncertnidx::Vector{Int},
isinit::Vector{Bool},
)
#
# @show isinit
# @show sfidx in certainidx, sum(isinit[uncertnidx])
# @show sfidx in uncertnidx, sum(isinit[certainidx])
return sfidx in certainidx && 0 < sum(isinit[uncertnidx]) ||
sfidx in uncertnidx && sum(isinit[certainidx]) == length(certainidx)
end
function assembleFactorName(dfg::AbstractDFG, Xi::Vector{<:DFGVariable})
#
existingFactorLabels = listFactors(dfg)
existingFactorLabelDict = Dict(existingFactorLabels .=> existingFactorLabels)
namestring = ""
for vert in Xi #f.Xi
namestring = string(namestring, vert.label)
end
opt = getSolverParams(dfg)
for i = 1:(opt.maxincidence)
tempnm = string(namestring, "f$i")
if !haskey(existingFactorLabelDict, Symbol(tempnm))
namestring = tempnm
break
end
if i != opt.maxincidence
nothing
else
error(
"Artificial restriction to not connect more than $(opt.maxincidence) factors to a variable (bad for sparsity), try setting getSolverParams(fg).maxincidence=1000 to adjust this restriction.",
)
end
end
return Symbol(namestring)
end
"""
$(SIGNATURES)
Add factor with user defined type `<:AbstractFactor`` to the factor graph
object. Define whether the automatic initialization of variables should be
performed. Use order sensitive `multihypo` keyword argument to define if any
variables are related to data association uncertainty.
Experimental
- `inflation`, to better disperse kernels before convolution solve, see IIF #1051.
"""
function DFG.addFactor!(
dfg::AbstractDFG,
Xi::AbstractVector{<:DFGVariable},
usrfnc::AbstractFactor;
multihypo::Vector{Float64} = Float64[],
nullhypo::Float64 = 0.0,
solvable::Int = 1,
tags::Vector{Symbol} = Symbol[],
timestamp::Union{DateTime, ZonedDateTime} = now(localzone()),
graphinit::Bool = getSolverParams(dfg).graphinit,
# threadmodel = SingleThreaded,
suppressChecks::Bool = false,
inflation::Real = getSolverParams(dfg).inflation,
namestring::Symbol = assembleFactorName(dfg, Xi),
_blockRecursion::Bool = !getSolverParams(dfg).attemptGradients,
)
#
@assert (suppressChecks || length(multihypo) === 0 || length(multihypo) == length(Xi)) "When using multihypo=[...], the number of variables and multihypo probabilies must match. See documentation on how to include fractional data-association uncertainty."
_zonedtime(s::ZonedDateTime) = s
_zonedtime(s::DateTime) = ZonedDateTime(s, localzone())
varOrderLabels = Symbol[v.label for v in Xi]
solverData = getDefaultFactorData(
dfg,
Xi,
deepcopy(usrfnc);
multihypo,
nullhypo,
# threadmodel,
inflation,
_blockRecursion,
)
#
newFactor = DFGFactor(
Symbol(namestring),
varOrderLabels,
solverData;
tags = Set(union(tags, [:FACTOR])),
solvable,
timestamp = _zonedtime(timestamp),
)
#
factor = addFactor!(dfg, newFactor)
# TODO: change this operation to update a conditioning variable
graphinit && doautoinit!(dfg, Xi; singles = false)
return factor
end
function _checkFactorAdd(usrfnc, xisyms)
if length(xisyms) == 1 && !(usrfnc isa AbstractPrior) && !(usrfnc isa Mixture)
@warn("Listing only one variable $xisyms for non-unary factor type $(typeof(usrfnc))")
end
return nothing
end
function DFG.addFactor!(
dfg::AbstractDFG,
vlbs::AbstractVector{Symbol},
usrfnc::AbstractFactor;
suppressChecks::Bool = false,
kw...,
)
#
# basic sanity check for unary vs n-ary
if !suppressChecks
_checkFactorAdd(usrfnc, vlbs)
@assert length(vlbs) == length(unique(vlbs)) "List of variables should be unique and ordered."
end
# variables = getVariable.(dfg, vlbs)
variables = map(vid -> getVariable(dfg, vid), vlbs)
return addFactor!(dfg, variables, usrfnc; suppressChecks, kw...)
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 16125 |
"""
$SIGNATURES
For variables in `varList` check and if necessary make solverData objects for both `:default` and `:parametric` solveKeys.
Example
```julia
num_made = makeSolverData(fg; solveKey=:parametric)
```
Notes
- Part of solving JuliaRobotics/IncrementalInference.jl issue 1637
DevNotes
- TODO, assumes parametric solves will always just be in solveKey `:parametric`.
See also: [`doautoinit!`](@ref), [`initAll!`](@ref)
"""
function makeSolverData!(
dfg::AbstractDFG;
solvable = 1,
varList::AbstractVector{Symbol} = ls(dfg; solvable),
solveKey::Symbol=:default
)
count = 0
for vl in varList
v = getVariable(dfg,vl)
varType = getVariableType(v) |> IIF._variableType
vsolveKeys = listSolveKeys(dfg,vl)
if solveKey != :parametric && !(solveKey in vsolveKeys)
IIF.setDefaultNodeData!(v, 0, getSolverParams(dfg).N, getDimension(varType); initialized=false, varType, solveKey) # dodims
count += 1
elseif solveKey == :parametric && !(:parametric in vsolveKeys)
# global doinit = true
IIF.setDefaultNodeDataParametric!(v, varType; initialized=false, solveKey)
count += 1
end
end
return count
end
"""
$SIGNATURES
Return `(::Bool, ::OKVarlist, ::NotOkayVarList)` on whether all other variables (besides `loovar::Symbol`)
attached to factor `fct::Symbol` are all initialized -- i.e. `fct` is usable.
Notes:
- Special carve out for multihypo cases, see issue 427, where at least one hypothesis should be available, but not all required at first.
Development Notes
* TODO get faster version of isInitialized for database version
Related
doautoinit!, initVariable!, isInitialized, isMultihypo
"""
function factorCanInitFromOtherVars(
dfg::AbstractDFG,
fct::Symbol,
loovar::Symbol;
solveKey::Symbol = :default,
)
#
# all variables attached to this factor
varsyms = listNeighbors(dfg, fct)
# which element is being solved for
sfidx = (1:length(varsyms))[varsyms .== loovar][1]
# list of factors to use in init operation
fctlist = Symbol[]
# list fo variables that cannot be used
faillist = Symbol[]
isinit = Bool[]
for vsym in varsyms
# check each variable one by one
xi = DFG.getVariable(dfg, vsym)
isi = isInitialized(xi, solveKey)
push!(isinit, isi)
if !isi
push!(faillist, vsym)
end
end
## determine if this factor can be used
# priors and general n-ary cases
canuse = length(varsyms) == 1 || (length(faillist) == 1 && loovar in faillist)
## special multihypo case (at least one hypothesis is available or initializing first hypo)
fctnode = getFactor(dfg, fct)
# @show canuse, isMultihypo(fctnode), isinit
if !canuse && isMultihypo(fctnode)
# multihypo=[1;0.5;0.5] : sfidx=1, isinit=[0,1,0] -- true
# multihypo=[1;0.5;0.5] : sfidx=1, isinit=[0,0,1] -- true
# multihypo=[1;0.5;0.5] : sfidx=2|3, isinit=[1,0,0] -- true
mhp = getMultihypoDistribution(fctnode).p
allmhp, certainidx, uncertnidx = getHypothesesVectors(mhp)
if isLeastOneHypoAvailable(sfidx, certainidx, uncertnidx, isinit)
# special case works
@info "allowing init from incomplete set of previously initialized hypotheses, fct=$fct"
canuse = true
end
end
# should add the factor for use?
if canuse
push!(fctlist, fct)
end
# return if can use, the factor in an array, and the non-initialized variables attached to the factor
return (canuse, fctlist, faillist)::Tuple{Bool, Vector{Symbol}, Vector{Symbol}}
end
"""
$(SIGNATURES)
EXPERIMENTAL: initialize target variable `xi` based on connected factors in the
factor graph `fgl`. Possibly called from `addFactor!`, or `doCliqAutoInitUp!` (?).
Notes:
- Special carve out for multihypo cases, see issue 427.
Development Notes:
- Target factor is first (singletons) or second (dim 2 pairwise) variable vertex in `xi`.
- TODO use DFG properly with local operations and DB update at end.
- TODO get faster version of `isInitialized` for database version.
- TODO: Persist this back if we want to here.
- TODO: init from just partials
"""
function doautoinit!(
dfg::AbstractDFG,
xi::DFGVariable;
solveKey::Symbol = :default,
singles::Bool = true,
N::Int = getSolverParams(dfg).N, #maximum([length(getPoints(getBelief(xi, solveKey))); getSolverParams(dfg).N]),
logger = ConsoleLogger(),
)
#
didinit = false
# don't initialize a variable more than once
if !isInitialized(xi, solveKey)
with_logger(logger) do
@info "try doautoinit! of $(xi.label)"
end
# get factors attached to this variable xi
vsym = xi.label
neinodes = listNeighbors(dfg, vsym)
# proceed if has more than one neighbor OR even if single factor
if (singles || length(neinodes) > 1)
# Which of the factors can be used for initialization
useinitfct = Symbol[]
# Consider factors connected to $vsym...
for xifct in neinodes
canuse, usefct, notusevars =
factorCanInitFromOtherVars(dfg, xifct, vsym; solveKey = solveKey)
if canuse
union!(useinitfct, usefct)
end
end
with_logger(logger) do
@info "init with useinitfct $useinitfct"
end
# println("Consider all singleton (unary) factors to $vsym...")
# calculate the predicted belief over $vsym
if length(useinitfct) > 0
with_logger(logger) do
@info "do init of $vsym"
end
# FIXME ensure a product of only partial densities and returned pts are put to proper dimensions
fcts = map(fx -> getFactor(dfg, fx), useinitfct)
bel, ipc = propagateBelief(dfg, getVariable(dfg, vsym), fcts; solveKey, logger, N)
# while the propagate step might allow large point counts, the graph should stay restricted to N
bel_ =
Npts(bel) == getSolverParams(dfg).N ? bel : resample(bel, getSolverParams(dfg).N)
# @info "MANIFOLD IS" bel.manifold isPartial(bel) string(bel._partial) string(getPoints(bel, false)[1])
setValKDE!(xi, bel_, true, ipc; solveKey) # getPoints(bel, false)
# Update the estimates (longer DFG function used so cloud is also updated)
setVariablePosteriorEstimates!(dfg, xi.label, solveKey)
# Update the data in the event that it's not local
# TODO perhaps usecopy=false
updateVariableSolverData!(dfg, xi, solveKey, true; warn_if_absent = false)
# deepcopy graphinit value, see IIF #612
updateVariableSolverData!(
dfg,
xi.label,
getSolverData(xi, solveKey),
:graphinit,
true,
Symbol[];
warn_if_absent = false,
)
didinit = true
end
end
end
return didinit
end
function doautoinit!(
dfg::AbstractDFG,
Xi::Vector{<:DFGVariable};
solveKey::Symbol = :default,
singles::Bool = true,
N::Int = getSolverParams(dfg).N,
logger = ConsoleLogger(),
)
#
#
# Mighty inefficient function, since we only need very select fields nearby from a few neighboring nodes
# do double depth search for variable nodes
didinit = true
# loop over all requested variables that must be initialized
for xi in Xi
didinit &=
doautoinit!(dfg, xi; solveKey = solveKey, singles = singles, N = N, logger = logger)
end
return didinit
end
function doautoinit!(
dfg::AbstractDFG,
xsyms::Vector{Symbol};
solveKey::Symbol = :default,
singles::Bool = true,
N::Int = getSolverParams(dfg).N,
logger = ConsoleLogger(),
)
#
verts = getVariable.(dfg, xsyms)
return doautoinit!(
dfg,
verts;
solveKey = solveKey,
singles = singles,
N = N,
logger = logger,
)
end
function doautoinit!(
dfg::AbstractDFG,
xsym::Symbol;
solveKey::Symbol = :default,
singles::Bool = true,
N::Int = getSolverParams(dfg).N,
logger = ConsoleLogger(),
)
#
return doautoinit!(
dfg,
[getVariable(dfg, xsym);];
solveKey = solveKey,
singles = singles,
N = N,
logger = logger,
)
end
"""
$(TYPEDSIGNATURES)
Method to manually initialize a variable using a set of points.
Notes
- Disable automated graphinit on `addFactor!(fg, ...; graphinit=false)
- any un-initialized variables will automatically be initialized by `solveTree!`
Example:
```julia
# some variable is added to fg
addVariable!(fg, :somepoint3, ContinuousEuclid{2})
# data is organized as (row,col) == (dimension, samples)
pts = randn(2,100)
initVariable!(fg, :somepoint3, pts)
# manifold management should be done automatically.
# note upgrades are coming to consolidate with Manifolds.jl, see RoME #244
## it is also possible to initVariable! by using existing factors, e.g.
initVariable!(fg, :x3, [:x2x3f1])
```
DevNotes
- TODO better document graphinit and treeinit.
"""
function initVariable!(
variable::DFGVariable,
ptsArr::ManifoldKernelDensity,
solveKey::Symbol = :default;
dontmargin::Bool = false,
N::Int = length(getPoints(ptsArr)),
)
#
@debug "initVariable! $(getLabel(variable))"
if !(solveKey in listSolveKeys(variable))
@debug "$(getLabel(variable)) needs new VND solveKey=$(solveKey)"
varType = getVariableType(variable)
setDefaultNodeData!(
variable,
0,
N,
getDimension(varType);
solveKey = solveKey,
initialized = false,
varType = varType,
dontmargin = dontmargin,
)
end
setValKDE!(variable, ptsArr, true; solveKey = solveKey)
return nothing
end
function initVariable!(
dfg::AbstractDFG,
label::Symbol,
belief::ManifoldKernelDensity,
solveKey::Symbol = :default;
dontmargin::Bool = false,
N::Int = getSolverParams(dfg).N,
)
#
variable = getVariable(dfg, label)
initVariable!(variable, belief, solveKey; dontmargin = dontmargin, N = N)
return nothing
end
function initVariable!(
dfg::AbstractDFG,
label::Symbol,
samplable_belief::SamplableBelief,
solveKey::Symbol = :default;
N::Int = getSolverParams(dfg).N,
)
#
variable = getVariable(dfg, label)
initVariable!(variable, samplable_belief, solveKey; N)
return nothing
end
function initVariable!(
variable::DFGVariable,
samplable_belief::SamplableBelief,
solveKey::Symbol = :default;
N::Int = length(getVal(variable)),
)
#
M = getManifold(variable)
if solveKey == :parametric
μ, iΣ = getMeasurementParametric(samplable_belief)
vnd = getSolverData(variable, solveKey)
vnd.val[1] = getPoint(getVariableType(variable), μ)
vnd.bw .= inv(iΣ)
vnd.initialized = true
else
points = [samplePoint(M, samplable_belief) for _ = 1:N]
initVariable!(variable, points, solveKey)
end
return nothing
end
function initVariable!(
dfg::AbstractDFG,
label::Symbol,
usefcts::AbstractVector{Symbol},
solveKey::Symbol = :default;
N::Int = getSolverParams(dfg).N,
kwargs...,
)
#
pts = propagateBelief(dfg, label, usefcts; solveKey = solveKey)[1]
# pts = predictbelief(dfg, label, usefcts; solveKey = solveKey)[1]
vert = getVariable(dfg, label)
Xpre = manikde!(getManifold(getVariableType(vert)), pts)
return initVariable!(vert, Xpre, solveKey; N, kwargs...)
# setValKDE!(vert, Xpre, true, solveKey=solveKey)
# return nothing
end
function initVariable!(
vari::DFGVariable,
pts::AbstractVector{P},
solveKey::Symbol = :default;
bw = nothing,
) where {P}
#
# specializations to support generic case of Tuple rather than ProductRepr or ArrayPartition inputs
# TODO ArrayPartition inputs
_prodrepr(pt) = pt
# _prodrepr(pt::Tuple) = Manifolds.ProductRepr(pt...)
_prodrepr(pt::Tuple) = Manifolds.ArrayPartition(pt...)
M = getManifold(vari)
pp = manikde!(M, _prodrepr.(pts); bw)
return initVariable!(vari, pp, solveKey)
end
function initVariable!(
dfg::AbstractDFG,
sym::Symbol,
pts::AbstractVector{P},
solveKey::Symbol = :default;
kwargs...,
) where {P}
#
return initVariable!(getVariable(dfg, sym), pts, solveKey; kwargs...)
end
# legacy alias
const initVariableManual! = initVariable!
"""
$SIGNATURES
Set solveKey values of `dest::AbstractDFG` according to `initKey::Symbol=:graphinit` values.
Notes
- Some flexibility for using two DFGs and different key values, see Examples and code for details.
- Can also be specific with `varList::Vector{Symbol}`.
- Returns `dest` graph.
- Uses the supersolve mechanism.
Examples
```julia
resetInitialValues!(fg)
resetInitialValues!(fg1,fg2) # into 1 from 2
resetInitialValues!(fg1,fg1,:myotherinit) # use different init value into solveKey :default
resetInitialValues!(fg1,fg1,:graphinit, :mysolver) # not into solveKey=:default but :mysolver
resetInitialValues!(fg1, varList=[:x1;:l3]) # Specific variables only
# Into `fgNew` object, leaving `fg` untouched
fgNew = deepcopy(fg)
resetInitialValues!(fgNew,fg)
```
Related
initVariable!, graphinit (keyword)
"""
function resetInitialValues!(
dest::AbstractDFG,
src::AbstractDFG = dest,
initKey::Symbol = :graphinit,
solveKey::Symbol = :default;
varList::AbstractVector{Symbol} = ls(dest),
)
#
for vs in varList
vnd = getSolverData(getVariable(src, vs), initKey)
# guess we definitely want to use copy to preserve the initKey memory
updateVariableSolverData!(dest, vs, vnd, solveKey, true; warn_if_absent = false)
end
return dest
end
const resetInitValues! = resetInitialValues!
"""
$SIGNATURES
Ensure that no variables set as `solvable=1` are floating free without any connected `solvable=1` factors. If any found, then set those 'free' variable's `solvable=solvableFallback` (default `0`).
Related
[`initAll!`](@ref)
"""
function ensureSolvable!(
dfg::AbstractDFG;
solvableTarget::Int = 1,
solvableFallback::Int = 0,
)
# workaround in case isolated variables occur
solvVars = ls(dfg; solvable = solvableTarget)
varHasFact = (x -> length(ls(dfg, x; solvable = solvableTarget)) == 0).(solvVars)
blankVars = solvVars[findall(varHasFact)]
if 0 < length(blankVars)
@warn(
"solveTree! dissallows solvable variables without any connected solvable factors -- forcing solvable=0 on $(blankVars)"
)
(x -> setSolvable!(dfg, x, solvableFallback)).(blankVars)
end
return blankVars
end
"""
$SIGNATURES
Perform `graphinit` over all variables with `solvable=1` (default).
See also: [`ensureSolvable!`](@ref), (EXPERIMENTAL 'treeinit')
"""
function initAll!(
dfg::AbstractDFG,
solveKey::Symbol = :default;
_parametricInit::Bool = solveKey === :parametric,
solvable::Int = 1,
N::Int = _parametricInit ? 1 : getSolverParams(dfg).N,
)
#
# allvarnodes = getVariables(dfg)
syms = intersect(getAddHistory(dfg), ls(dfg; solvable = solvable))
# syms = ls(dfg, solvable=solvable) # |> sortDFG
# May have to first add the solveKey VNDs if they are not yet available
for sym in syms
vari = getVariable(dfg, sym)
varType = getVariableType(vari) |> _variableType
# does SolverData exist for this solveKey?
vsolveKeys = listSolveKeys(vari)
# FIXME, likely some consolidation needed with #1637
if !_parametricInit && !(solveKey in vsolveKeys)
# accept complete defaults for a novel solveKey
setDefaultNodeData!(
vari,
0,
N,
getDimension(varType);
solveKey,
initialized = false,
varType,
)
end
if _parametricInit && !(:parametric in vsolveKeys)
setDefaultNodeDataParametric!(vari, varType; initialized = false)
end
end
# do the init
repeatCount = 0
repeatFlag = true
while repeatFlag
repeatFlag = false
repeatCount += 1
if 10 < repeatCount
@info "not able to initialize all variables via the factor graph, abort autoinit."
break
end
for sym in syms
var = getVariable(dfg, sym)
# is this SolverData initialized?
if !isInitialized(var, solveKey)
@info "$(var.label) is not initialized, and will do so now..."
if _parametricInit
autoinitParametric!(dfg, var; solveKey)
else
doautoinit!(dfg, [var;]; solveKey, singles = true)
end
!isInitialized(var, solveKey) ? (repeatFlag = true) : nothing
end
end
end
return nothing
end
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 4001 |
"""
$SIGNATURES
Calculate the proposals and products on `destvert` using `factors` in factor graph `dfg`.
Notes
- Returns tuple of product and whether full dimensional (=true) or partial (=false).
- `N` determines the number of samples to draw from the marginal.
- `dens` can contain mixed full and partial dimension `ManifoldKernelDensity` beliefs
Related
[`approxConvBelief`](@ref), [`proposalbeliefs!`](@ref), [`AMP.manifoldProduct`](@ref)
"""
function propagateBelief(
dfg::AbstractDFG,
destvar::DFGVariable,
factors::AbstractVector; #{<:DFGFactor};
solveKey::Symbol = :default,
dens::AbstractVector{<:ManifoldKernelDensity} = Vector{ManifoldKernelDensity}(), # TODO, abstract requires dynamic dispatch (slow)
N::Integer = getSolverParams(dfg).N,
needFreshMeasurements::Bool = true,
dbg::Bool = false,
logger = ConsoleLogger(),
asPartial::Bool=false,
)
#
# get proposal beliefs
destlbl = getLabel(destvar)
ipc = proposalbeliefs!(dfg, destlbl, factors, dens; solveKey, N, dbg)
# @show dens[1].manifold
# make sure oldPoints vector has right length
oldBel = getBelief(dfg, destlbl, solveKey)
_pts = getPoints(oldBel, false)
oldPoints = if Npts(oldBel) < N
nn = N - length(_pts) # should be larger than 0
_pts_, = sample(oldBel, nn)
vcat(_pts, _pts_)
else
_pts[1:N]
end
# few more data requirements
varType = getVariableType(destvar)
M = getManifold(varType)
# @info "BUILDING MKD" varType M isPartial.(dens)
# take the product
mkd = AMP.manifoldProduct(
dens,
M;
Niter = 1,
oldPoints,
N,
u0 = getPointDefault(varType),
)
# @info "GOT" mkd.manifold
return mkd, ipc
end
function propagateBelief(
dfg::AbstractDFG,
destlbl::Symbol,
fctlbls::AbstractVector{Symbol};
kw...,
)
return propagateBelief(
dfg,
getVariable(dfg, destlbl),
map(x -> getFactor(dfg, x), fctlbls);
kw...,
)
end
#
propagateBelief(dfg::AbstractDFG, destlbl::Symbol, ::Colon; kw...) = propagateBelief(dfg, destlbl, listNeighbors(dfg, destlbl); kw...)
"""
$(SIGNATURES)
Using factor graph object `dfg`, project belief through connected factors
(convolution with likelihood) to variable `sym` followed by a approximate functional product.
Return: product belief, full proposals, partial dimension proposals, labels
"""
function localProduct(
dfg::AbstractDFG,
sym::Symbol;
solveKey::Symbol = :default,
N::Int = getSolverParams(dfg).N, #maximum([length(getPoints(getBelief(dfg, sym, solveKey))); getSolverParams(dfg).N]),
dbg::Bool = false,
logger = ConsoleLogger(),
)
#
# vector of all neighbors as Symbols
lb = listNeighbors(dfg, sym)
# store proposal beliefs, TODO replace Abstract with concrete type
dens = Vector{ManifoldKernelDensity}()
fcts = map(x -> getFactor(dfg, x), lb)
mkd, sinfd = propagateBelief(
dfg,
getVariable(dfg, sym),
fcts;
solveKey = solveKey,
logger = logger,
dens = dens,
N = N,
)
return mkd, dens, lb, sinfd
end
function localProduct(dfg::AbstractDFG, lbl::AbstractString; kw...)
return localProduct(dfg, Symbol(lbl); kw...)
end
"""
$SIGNATURES
Basic wrapper to take local product and then set the value of `sym` in `dfg`.
Notes
- returns `::Tuple{ManifoldKernelDensity, Float64, Vector{Symbol}}`
DevNotes:
- Unknown issue first occurred here near IIF v0.8.4 tag, recorded case at 2020-01-17T15:26:17.673
"""
function localProductAndUpdate!(
dfg::AbstractDFG,
sym::Symbol,
setkde::Bool = true,
logger = ConsoleLogger();
solveKey::Symbol = :default,
)
#
# calculate new points for sym using existing structure around sym in dfg
newPts, dens, lbl, ipc =
localProduct(dfg, sym; solveKey = solveKey, N = getSolverParams(dfg).N, logger = logger)
# maybe update dfg sym with newly calculated points
if setkde && 0 < length(getPoints(newPts))
setValKDE!(dfg, sym, newPts, false, ipc; solveKey = solveKey)
else
nothing
end
return newPts, ipc, lbl
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 1665 |
function Base.isapprox(
a::HypoRecipe,
b::HypoRecipe;
iakws...
)
if !(
isapprox(a.certainidx, b.certainidx; iakws...) &&
isapprox(a.mhidx, b.mhidx; iakws...)
)
@debug "HypoRecipe a vs b not the same on either .certainidx or .mhidx"
return false
end
if length(a.allelements) != length(b.allelements)
@debug "HypoRecipe different lengths on a vs b .allelements"
return false
end
for (i,el) in enumerate(a.allelements)
if !isapprox(el, b.allelements[i]; iakws...)
@debug "HypoRecipe a vs b different on .allelements"
return false
end
end
if length(a.allelements) != length(b.allelements)
@debug "HypoRecipe different lengths on a vs b .activehypo"
return false
end
for (i,el) in enumerate(a.activehypo)
if el[1] != b.activehypo[i][1] || !isapprox(el[2], b.activehypo[i][2]; iakws...)
@debug "HypoRecipe a vs b different on .activehypo"
return false
end
end
return true
end
Base.:(==)(
a::HypoRecipe,
b::HypoRecipe
) = isapprox(a,b)
function Base.isapprox(
a::HypoRecipeCompute,
b::HypoRecipeCompute;
iakws...
)
if !(isnothing(a.hypotheses) && isnothing(b.hypotheses))
return isapprox(a.hypotheses.p, b.hypotheses.p; iakws...)
end
if !(isnothing(a.certainhypo) && isnothing(b.certainhypo))
return isapprox(a.certainhypo, b.certainhypo; iakws...)
end
if 0 < length(a.activehypo)
if length(a.activehypo) == length(b.activehypo)
return isapprox(a.activehypo, b.activehypo; iakws...)
else
return false
end
end
return true
end
Base.:(==)(
a::HypoRecipeCompute,
b::HypoRecipeCompute
) = isapprox(a,b)
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 4411 | function MetaBayesTree()
return MetaBayesTree(
MetaDiGraph{Int, Float64}(),
0,
Dict{AbstractString, Int}(),
Symbol[],
0.0,
)
end
function Base.propertynames(x::MetaBayesTree, private::Bool = false)
return (:bt, :btid, :cliques, :frontals, :eliminationOrder, :buildTime)
end
function Base.getproperty(x::MetaBayesTree, f::Symbol)
begin
if f == :cliques
@warn "Don't use cliques field directly, use eg. getClique(tree, cliqId)" maxlog = 1
d = Dict{Int, Any}()
for (k, v) in x.bt.vprops
d[k] = v[:clique]
end
return d
else
getfield(x, f)
end
end
end
function Base.setproperty!(x::MetaBayesTree, f::Symbol, val)
if f == :cliques
@warn "`setproperty!(clique)` Don't use cliques field directly, use eg. addClique(tree, cliqId)" maxlog =
1
for (k, v) in val
set_prop!(x.bt, k, :clique, v)
end
else
setfield!(x, f, val)
end
end
function getMessageChannels(tree::MetaBayesTree)
d = Dict{
Int,
NamedTuple{
(:upMsg, :downMsg),
Tuple{Channel{LikelihoodMessage}, Channel{LikelihoodMessage}},
},
}()
for (k, e) in tree.bt.eprops
d[k.dst] = (upMsg = e[:upMsg], downMsg = e[:downMsg])
end
return d
end
function Base.show(io::IO, mbt::MetaBayesTree)
printstyled(io, "MetaBayesTree\n"; color = :blue)
println(io, " Nr cliques: ", length(mbt.cliques))
# TODO ad dmore stats: max depth, widest point, longest chain, max clique size, average nr children
return nothing
end
Base.show(io::IO, ::MIME"text/plain", mbt::MetaBayesTree) = show(io, mbt)
Base.show(io::IO, o::CSMHistoryTuple) = print(io, "$(o[1]), $(o[2]), $(o[3])")
function CliqStateMachineContainer(
dfg::G,
cliqSubFg::M,
tree::T,
cliq::TreeClique,
incremental::Bool,
drawtree::Bool,
dodownsolve::Bool,
delay::Bool,
opts::SolverParams,
refactoring::Dict{Symbol, String} = Dict{Symbol, String}(),
oldcliqdata::BTND = BayesTreeNodeData(),
logger::SimpleLogger = SimpleLogger(Base.stdout);
cliqId::CliqueId = cliq.id,
algorithm::Symbol = :default,
init_iter::Int = 0,
enableLogging::Bool = true,
solveKey::Symbol = :default,
_csm_iter::Int = 0,
) where {BTND, G <: AbstractDFG, M <: InMemoryDFGTypes, T <: AbstractBayesTree}
#
return CliqStateMachineContainer{BTND, G, M, T}(
dfg,
cliqSubFg,
tree,
cliq,
incremental,
drawtree,
dodownsolve,
delay,
opts,
refactoring,
oldcliqdata,
logger,
cliqId,
algorithm,
init_iter,
enableLogging,
solveKey,
_csm_iter,
)
#
end
# TODO resolve name conflict
function DFG.compare(
cs1::CliqStateMachineContainer{BTND1, T1, InMemG1, BT1},
cs2::CliqStateMachineContainer{BTND2, T2, InMemG2, BT2};
skip::Vector{Symbol} = Symbol[],
) where {
BTND1,
T1 <: AbstractDFG,
InMemG1 <: InMemoryDFGTypes,
BT1 <: AbstractBayesTree,
BTND2,
T2 <: AbstractDFG,
InMemG2 <: InMemoryDFGTypes,
BT2 <: AbstractBayesTree,
}
#
BTND1 == BTND2 ? nothing : @warn("oldcliqdata::$BTND1 != oldcliqdata::$BTND2")
T1 == T2 ? nothing : @warn("dfg::$T1 != dfg::$T2")
InMemG1 == InMemG2 ? nothing : @warn("cliqSubFg::$InMemG1 != cliqSubFg::$InMemG2")
BT1 == BT2 ? nothing : @warn("tree::$BQ1 != tree::$BT2")
TP = true
@warn "Skipping compare of CSMC.dfg and .cliqSubFg"
# TP = TP && compare(cs1.dfg, cs2.dfg)
# TP = TP && compare(cs1.cliqSubFg, cs2.cliqSubFg)
@warn "Skipping compare of CSMC.tree"
# TP = TP && compare(cs1.tree, cs2.tree)
TP = TP && compare(cs1.cliq, cs2.cliq)
TP = TP && compare(cs1.cliqId, cs2.cliqId)
TP = TP && length(cs1.parentCliq) == length(cs2.parentCliq)
for i = 1:length(cs1.parentCliq)
TP = TP && compare(cs1.parentCliq[i], cs2.parentCliq[i])
end
TP = TP && length(cs1.childCliqs) == length(cs2.childCliqs)
for i = 1:length(cs1.childCliqs)
TP = TP && compare(cs1.childCliqs[i], cs2.childCliqs[i])
end
TP = TP && compare(cs1.incremental, cs2.incremental)
TP = TP && compare(cs1.drawtree, cs2.drawtree)
TP = TP && compare(cs1.dodownsolve, cs2.dodownsolve)
TP = TP && compare(cs1.delay, cs2.delay)
@warn "skipping compare on csmc.opts::SolverParams"
# TP = TP && compare(cs1.opts, cs2.opts)
TP = TP && compare(cs1.refactoring, cs2.refactoring)
# TP = TP && compare(cs1.oldcliqdata, cs2.oldcliqdata)
# TP = TP && compare(cs1.logger, cs2.logger)
return TP
end
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 49773 |
"""
$SIGNATURES
Set the color of a cliq in the Bayes (Junction) tree.
"""
function setCliqueDrawColor!(cliq::TreeClique, fillcolor::String)::Nothing
cliq.attributes["fillcolor"] = fillcolor
cliq.attributes["style"] = "filled"
return nothing
end
function getCliqueDrawColor(cliq::TreeClique)
return haskey(cliq.attributes, "fillcolor") ? cliq.attributes["fillcolor"] : nothing
end
"""
$SIGNATURES
Get the frontal variable IDs `::Int` for a given clique in a Bayes (Junction) tree.
"""
getCliqFrontalVarIds(
cliqdata::Union{BayesTreeNodeData, PackedBayesTreeNodeData},
)::Vector{Symbol} = cliqdata.frontalIDs
getCliqFrontalVarIds(cliq::TreeClique)::Vector{Symbol} =
getCliqFrontalVarIds(getCliqueData(cliq))
"""
$SIGNATURES
Get the frontal variable IDs `::Int` for a given clique in a Bayes (Junction) tree.
"""
getFrontals(cliqd::Union{TreeClique, BayesTreeNodeData})::Vector{Symbol} =
getCliqFrontalVarIds(cliqd)
"""
$SIGNATURES
Create a new clique.
"""
function addClique!(
bt::MetaBayesTree,
dfg::AbstractDFG,
varID::Symbol,
condIDs::Array{Symbol} = Symbol[],
)
bt.btid += 1 #increment cliqueID counter
cId = CliqueId(bt.btid)
clq = TreeClique(cId)
setLabel!(clq, "")
#TODO addClique!(bt, clq), can't we already have the parent here
# if isa(bt.bt,GenericIncidenceList)
# Graphs.add_vertex!(bt.bt, clq)
# bt.cliques[bt.btid] = clq
# clId = bt.btid
# if isa(bt.bt, MetaDiGraph)
# @assert MetaGraphs.add_vertex!(bt.bt, :clique, clq) "add_vertex! failed"
# clId = MetaGraphs.nv(bt.bt)
# MetaGraphs.set_indexing_prop!(bt.bt, clId, :cliqId, bt.btid)
# else
# error("Oops, something went wrong when adding a clique to the tree")
# end
@assert MetaGraphs.add_vertex!(bt.bt, :clique, clq) "Error trying to addClique! - add_vertex! failed"
MetaGraphs.set_indexing_prop!(bt.bt, MetaGraphs.nv(bt.bt), :cliqId, cId)
appendClique!(bt, cId, dfg, varID, condIDs)
return clq
end
"""
$(SIGNATURES)
Return the TreeClique node object that represents a clique in the Bayes
(Junction) tree, as defined by one of the frontal variables `frt<:AbstractString`.
Notes
- Frontal variables only occur once in a clique per tree, therefore is a unique identifier.
Related:
getCliq, getTreeAllFrontalSyms
"""
getClique(tree::AbstractBayesTree, cId::Int) = getClique(tree,cId)
getClique(bt::AbstractBayesTree, frt::Symbol) = getClique(bt, bt.frontals[frt])
function getClique(tree::MetaBayesTree, cId::CliqueId)
return MetaGraphs.get_prop(tree.bt, tree.bt[:cliqId][cId], :clique)
end
getClique(tree::MetaBayesTree, cIndex::Int) = MetaGraphs.get_prop(tree.bt, cIndex, :clique)
function Base.getindex(tr::AbstractBayesTree, afrontal::Union{Symbol, Int, CliqueId})
return getClique(tr, afrontal)
end
function Base.show(io::IO, cliq::TreeClique)
printstyled(io, "TreeClique (id=$(cliq.id))\n"; color = :blue)
println(io, " frontals: ", getFrontals(cliq))
println(io, " separator: ", getCliqSeparatorVarIds(cliq))
println(io, " status: ", cliq.data.status)
println(io, " allmarginld: ", cliq.data.allmarginalized)
println(io, " potentials: ", cliq.data.potentials)
printstyled(io, " messages\n"; bold = true, )
if cliq.data.messages.upTx isa Nothing
nothing
else
println(io, " .upTx: ", cliq.data.messages.upTx.belief |> keys)
end
if 0 != length(cliq.data.messages.upRx)
print(io, " .upRx: ")
for (id, msg) in cliq.data.messages.upRx
print(io, id, "=>", msg.belief |> keys)
end
println(io)
end
if cliq.data.messages.downTx isa Nothing
nothing
else
println(io, " .downTx: ", cliq.data.messages.downTx.belief |> keys)
end
if cliq.data.messages.downRx isa Nothing
nothing
else
println(io, " .downRx: ", cliq.data.messages.downRx.belief |> keys)
end
return nothing
end
Base.show(io::IO, ::MIME"text/plain", cliq::TreeClique) = show(io, cliq)
function DFG.ls(tr::AbstractBayesTree)
ids = keys(tr.cliques) |> collect |> sort
ret = Vector{Pair{Int, Vector{Symbol}}}(undef, length(ids))
for (idx, id) in enumerate(ids)
ret[idx] = (id => getFrontals(getClique(tr,id)))
end
return ret
end
function DFG.ls(tr::AbstractBayesTree, id::Union{Symbol, Int, CliqueId})
cliq = getClique(tr, id)
prnt = getParent(tr, cliq)
chld = getChildren(tr, cliq)
# build children list
chll = Vector{Pair{Int, Vector{Symbol}}}()
cp = (x -> x.id.value).(chld) |> sortperm
for ch in chld[cp]
push!(chll, ch.id.value => getFrontals(ch))
end
# NOTE prnt is Vector{TreeClique}
prll = Vector{Pair{Int, Vector{Symbol}}}()
for pr in prnt
push!(prll, pr.id.value => getFrontals(pr))
end
# experimental, return a NamedTuple for tree around specific cliques
return (; parent = prll, children = chll)
end
function Base.show(
io::IO,
ntl::NamedTuple{
(:parent, :children),
Tuple{Vector{Pair{Int64, Vector{Symbol}}}, Vector{Pair{Int64, Vector{Symbol}}}},
},
)
printstyled(io, "IIF.show(::NamedTuple{..}) for Bayes tree\n"; color = :blue)
println(io, " (parent = ", ntl.parent)
println(io, " children = ", ntl.children, ")")
return nothing
end
function Base.show(
io::IO,
::MIME"text/plain",
ntl::NamedTuple{
(:parent, :children),
Tuple{Vector{Pair{Int64, Vector{Symbol}}}, Vector{Pair{Int64, Vector{Symbol}}}},
},
)
return show(io, ntl)
end
"""
$(SIGNATURES)
"""
function deleteClique!(tree::MetaBayesTree, clique::TreeClique)
cId = clique.id
@assert MetaGraphs.rem_vertex!(tree.bt, tree.bt[:cliqId][cId]) "rem_vertex! failed"
foreach(frt -> delete!(tree.frontals, frt), getFrontals(clique))
return clique
end
function deleteClique!(tree::MetaBayesTree, cId::CliqueId)
return deleteClique!(tree, getClique(tree, cId))
end
isRoot(tree::MetaBayesTree, cliq::TreeClique) = isRoot(tree, cliq.id)
function isRoot(tree::MetaBayesTree, cliqId::CliqueId)
return length(MetaGraphs.inneighbors(tree.bt, tree.bt[:cliqId][cliqId])) == 0
end
"""
$SIGNATURES
Return boolean on whether the frontal variable `frt::Symbol` exists somewhere in the `::BayesTree`.
"""
hasClique(bt::AbstractBayesTree, frt::Symbol) = haskey(bt.frontals, frt)
"""
$SIGNATURES
Return depth in tree as `::Int`, with root as depth=0.
Related
getCliq
"""
function getCliqDepth(tree, cliq)::Int
prnt = getParent(tree, cliq)
if length(prnt) == 0
return 0
end
return getCliqDepth(tree, prnt[1]) + 1
end
getCliqDepth(tree::AbstractBayesTree, sym::Symbol)::Int =
getCliqDepth(tree, getClique(tree, sym))
getCliques(tree::AbstractBayesTree) = tree.cliques
function getCliques(tree::MetaBayesTree)
d = Dict{Int, Any}()
for (k, v) in tree.bt.vprops
d[k] = v[:clique]
end
return d
end
getCliqueIds(tree::AbstractBayesTree) = keys(getCliques(tree))
function getCliqueIds(tree::MetaBayesTree)
# MetaGraphs.vertices(tree.bt)
return keys(tree.bt.metaindex[:cliqId])
end
"""
$SIGNATURES
Return reference to the clique data container.
"""
getCliqueData(cliq::TreeClique) = cliq.data
function getCliqueData(tree::AbstractBayesTree, cId::Union{CliqueId, Int})
return getClique(tree, cId) |> getCliqueData
end
"""
$SIGNATURES
Set the clique data container to a new object `data`.
"""
function setCliqueData!(
cliq::TreeClique,
data::Union{PackedBayesTreeNodeData, BayesTreeNodeData},
)
return cliq.data = data
end
function setCliqueData!(
tree::AbstractBayesTree,
cId::Int,
data::Union{PackedBayesTreeNodeData, BayesTreeNodeData},
)
return setCliqueData!(getClique(tree, cId), data)
end
# #TODO
# addClique!(tree::AbstractBayesTree, parentCliqId::Int, cliq::TreeClique) = error("addClique!(tree::AbstractBayesTree, parentCliqId::Int, cliq::TreeClique) not implemented")
# updateClique!(tree::AbstractBayesTree, cliq::TreeClique) = error("updateClique!(tree::AbstractBayesTree, cliq::TreeClique)::Bool not implemented")
# deleteClique!(tree::AbstractBayesTree, cId::Int) = error("deleteClique!(tree::AbstractBayesTree, cId::Int)::TreeClique not implemented")
"""
$SIGNATURES
Generate the label for particular clique (used by graphviz for visualization).
"""
function makeCliqueLabel(
dfg::G,
bt::AbstractBayesTree,
clqID::CliqueId,
)::String where {G <: AbstractDFG}
clq = getClique(bt, clqID)
flbl = ""
clbl = ""
for fr in getCliqueData(clq).frontalIDs
flbl = string(flbl, DFG.getVariable(dfg, fr).label, ",")
end
for sepr in getCliqueData(clq).separatorIDs
clbl = string(clbl, DFG.getVariable(dfg, sepr).label, ",")
end
return setLabel!(clq, string(clqID, "| ", flbl, ": ", clbl))
end
"""
$SIGNATURES
Add the separator for the newly created clique.
"""
function appendSeparatorToClique!(
bt::AbstractBayesTree,
clqID::CliqueId,
seprIDs::Array{Symbol, 1},
)
#
union!(getCliqueData(bt, clqID).separatorIDs, seprIDs)
return nothing
end
"""
$SIGNATURES
Add a new frontal variable to clique.
DevNotes
- TODO, define what "conditionals" are CLEARLY!!
"""
function appendClique!(
bt::AbstractBayesTree,
clqID::CliqueId,
dfg::AbstractDFG,
varID::Symbol,
seprIDs::Array{Symbol, 1} = Symbol[],
)
#
clq = getClique(bt, clqID)
var = DFG.getVariable(dfg, varID)
# add frontal variable
push!(getCliqueData(clq).frontalIDs, varID)
# total dictionary of frontals for easy access
bt.frontals[varID] = clqID
# TODO - confirm this, append to cliq separator ??
# @info "going for: appendSeparatorToClique on (clqID, seprIDs)=($clqID, $seprIDs)"
appendSeparatorToClique!(bt, clqID, seprIDs)
makeCliqueLabel(dfg, bt, clqID)
return nothing
end
"""
$SIGNATURES
Instantiate a new child clique in the tree.
"""
function newChildClique!(
bt::AbstractBayesTree,
dfg::AbstractDFG,
CpID::CliqueId,
varID::Symbol,
Sepj::Array{Symbol, 1},
)
#
# physically create the new clique
chclq = addClique!(bt, dfg, varID, Sepj)
parent = getClique(bt, CpID)
if isa(bt.bt, MetaDiGraph)
# TODO EDGE properties here
@assert MetaGraphs.add_edge!(bt.bt, bt.bt[:cliqId][CpID], bt.bt[:cliqId][chclq.id]) "Add edge failed"
else
error("Oops, something went wrong when adding a new child clique to the tree")
end
return chclq
end
"""
$SIGNATURES
Post order tree traversal and build potential functions
"""
function findCliqueFromFrontal(bt::AbstractBayesTree, frtlID::Int)
for cliqPair in getCliques(bt, cliques)
id = cliqPair[1]
cliq = cliqPair[2]
for frtl in getFrontals(cliq)
if frtl == frtlID
return cliq
end
end
end
return error("Clique with desired frontal ID not found")
end
"""
$SIGNATURES
Find parent clique Cp that containts the first eliminated variable of Sj as frontal.
"""
function identifyFirstEliminatedSeparator(
dfg::AbstractDFG,
elimorder::Vector{Symbol},
firvert::DFGVariable,
Sj = getSolverData(firvert).separator,
)::DFGVariable
#
firstelim = (2^(Sys.WORD_SIZE - 1) - 1)
for s in Sj
temp = something(findfirst(isequal(s), elimorder), 0) # findfirst(p, s)
if (temp < firstelim)
firstelim = temp
end
end
return DFG.getVariable(dfg, elimorder[firstelim])
end
"""
$SIGNATURES
Eliminate a variable and add to tree cliques accordingly.
Dev Notes
- `p` should be elimination order.
- `var` is next variable to be added to the tree.
- TODO, make sure this works for disjoint graphs
- Check laplacian, check eigen == 1 is disjoint sets
References
Kaess et al.: Bayes Tree, WAFR, 2010, [Alg. 2]
Kaess et al.: iSAM2, IJRR, 2011, [Alg. 3]
Fourie, D.: mmisam, PhD thesis, 2017. [Chpt. 5]
"""
function newPotential(
tree::AbstractBayesTree,
dfg::G,
var::Symbol,
elimorder::Array{Symbol, 1},
) where {G <: AbstractDFG}
firvert = DFG.getVariable(dfg, var)
# no parent
if (length(getSolverData(firvert).separator) == 0)
# if (length(getCliques(tree)) == 0)
# create new root
addClique!(tree, dfg, var)
# else
# # add to root
# @warn "root append clique is happening"
# appendClique!(tree, 1, dfg, var)
# end
else
# find parent clique Cp that containts the first eliminated variable of Sj as frontal
Sj = getSolverData(firvert).separator
felbl = identifyFirstEliminatedSeparator(dfg, elimorder, firvert, Sj).label
# get clique id of first eliminated frontal
CpID = tree.frontals[felbl]
# look to add this conditional to the tree
cliq = getClique(tree, CpID)
# clique of the first eliminated frontal
unFC = union(getCliqFrontalVarIds(cliq), getCliqSeparatorVarIds(cliq))
# if the separator of this new variable is identical to the (entire) clique of the firstly eliminated frontal.
if (sort(unFC) == sort(Sj))
# just add new variable as frontal to this clique
# insert conditional (p(var|sepr)) into clique CpID -- i.e. just adding a frontal
# @info "adding new frontal $var to existing clique $CpID"
appendClique!(tree, CpID, dfg, var)
else
# a new child clique is required here (this becomes parent)
# @info "adding new child clique with parent separator."
newChildClique!(tree, dfg, CpID, var, Sj)
end
end
end
"""
$SIGNATURES
Build the whole tree in batch format.
"""
function buildTree!(
tree::AbstractBayesTree,
dfg::AbstractDFG,
elimorder::AbstractVector{Symbol},
)
#
revorder = reverse(elimorder; dims = 1) # fixing #499
for var in revorder
@debug "Adding $var to tree..."
newPotential(tree, dfg, var, elimorder)
prevVar = var
end
return tree
end
"""
$SIGNATURES
Open view to see the graphviz exported Bayes tree, assuming default location and
viewer app. See keyword arguments for more details.
"""
function showTree(; filepath::String = "/tmp/caesar/bt.dot", viewerapp::String = "xdot")
#
try
@async run(`$(viewerapp) $(filepath)`)
catch ex
@warn "not able to show via $(viewerapp) $(filepath)"
@show ex
@show stacktrace()
end
end
# A replacement for _to_dot that saves only plotting attributes
function savedot_attributes(io::IO, g::MetaDiGraph)
write(io, "digraph G {\n")
for p in props(g)
write(io, "$(p[1])=$(p[2]);\n")
end
for v in MetaGraphs.vertices(g)
write(io, "$v")
if length(props(g, v)) > 0
write(io, " [ ")
end
for p in props(g, v)
# key = p[1]
# write(io, "$key=\"$(p[2])\",")
for (k, v) in p[2]
write(io, "\"$k\"=\"$v\",")
end
end
if length(props(g, v)) > 0
write(io, "];")
end
write(io, "\n")
end
for e in MetaGraphs.edges(g)
write(io, "$(MetaGraphs.src(e)) -> $(MetaGraphs.dst(e)) [ ")
if MetaGraphs.has_prop(g, e, :downMsg) && MetaGraphs.has_prop(g, e, :upMsg)
if isready(MetaGraphs.get_prop(g, e, :downMsg))
write(io, "color=red")
elseif isready(MetaGraphs.get_prop(g, e, :upMsg))
write(io, "color=orange")
else
write(io, "color=black")
end
end
write(io, "]\n")
end
return write(io, "}\n")
end
function _to_dot(mdigraph::MetaDiGraph)
g = deepcopy(mdigraph)
for (i, val) in g.vprops
push!(g.vprops[i], :attributes => val[:clique].attributes)
delete!(g.vprops[i], :clique)
delete!(g.vprops[i], :cliqId)
end
m = PipeBuffer()
savedot_attributes(m, g)
data = take!(m)
close(m)
return String(data)
end
"""
$SIGNATURES
Draw the Bayes (Junction) tree by means of graphviz `.dot` files. Ensure Linux packages
are installed `sudo apt-get install graphviz xdot`.
Notes
- `xlabels` is optional `cliqid=>xlabel`.
"""
function drawTree(
treel::AbstractBayesTree;
show::Bool = false, # must remain false for stability and automated use in solver
suffix::AbstractString = "_" * (split(string(uuid1()), '-')[1]),
filepath::String = "/tmp/caesar/random/bt$(suffix).dot",
xlabels::Dict{Int, String} = Dict{Int, String}(),
dpi::Int = 200,
viewerapp::String = "xdot",
imgs::Bool = false,
)
#
fext = filepath[(end - 2):end] #split(filepath, '.')[end]
fpwoext = filepath[1:(end - 4)]# split(filepath, '.')[end-1]
path = mkpath(dirname(fpwoext))
# modify a deepcopy
btc = deepcopy(treel)
for (cid, cliq) in getCliques(btc)
if imgs
firstlabel = split(getLabel(cliq), ',')[1]
spyCliqMat(cliq; suppressprint = true) |> exportimg(path * "/$firstlabel$suffix.png")
cliq.attributes["image"] = path * "/$firstlabel$suffix.png"
setLabel!(cliq, "")
end
delete!(cliq.attributes, "data")
end
# add any xlabel info
for (cliqid, xlabel) in xlabels
btc.bt.vertices[cliqid].attributes["xlabel"] = xlabel
end
fid = IOStream("")
try
fid = open("$(fpwoext).dot", "w+")
write(fid, _to_dot(btc.bt))
close(fid)
if string(fext) == "png"
run(`dot $(fpwoext).dot -T $(fext) -Gdpi=$dpi -o $(fpwoext).pdf`)
else
run(`dot $(fpwoext).dot -T $(fext) -o $(fpwoext).pdf`)
end
catch ex
@warn ex
@show stacktrace()
finally
close(fid)
end
return show ? showTree(; viewerapp = viewerapp, filepath = filepath) : nothing
end
"""
$SIGNATURES
If opt.drawtree then start an async task to draw tree in a loop according to opt.drawtreerate.
Notes
- wont draw if opt.drawtree=false, just skips back to caller.
- Currently @async
- use `opt.showtree::Bool`
- Does not work too well when opt.async during solveTree! call, but user can use this function separately.
DevNotes
- TODO, use Threads.@spawn instead.
Related
drawTree, drawGraph
"""
function drawTreeAsyncLoop(
tree::AbstractBayesTree,
opt::SolverParams;
filepath = joinLogPath(opt, "bt.dot"),
dotreedraw = Int[1;],
)
#
# single drawtreerate
treetask = if opt.drawtree
@async begin
xlabels = Dict{Int, String}()
@info("Solve is drawing the Bayes tree")
while dotreedraw[1] == 1 && 0 < opt.drawtreerate
# actually draw the tree
drawTree(tree; show = false, filepath = filepath)
sleep(1 / opt.drawtreerate)
end
drawTree(tree; show = opt.showtree, filepath = filepath)
end
end
return treetask, dotreedraw
end
"""
$SIGNATURES
Draw the Bayes (junction) tree with LaTeX labels by means of `.dot` and `.tex`
files.
Notes
- Uses system install of graphviz.org.
- Uses external python `dot2tex` tool (`pip install dot2tex`).
Related:
drawTree
"""
function generateTexTree(
treel::AbstractBayesTree;
filepath::String = "/tmp/caesar/bayes/bt",
)
#
btc = deepcopy(treel)
for (cid, cliq) in getCliques(btc)
label = getLabel(cliq)
# Get frontals and separator, and split into elements.
frt, sep = split(label, ':')
efrt = split(frt, ',')
esep = split(sep, ',')
# Transform frontals into latex.
newfrontals = ""
for l in efrt
# Split into symbol and subindex (letter and number).
parts = split(l, r"[^a-z0-9]+|(?<=[a-z])(?=[0-9])|(?<=[0-9])(?=[a-z])")
if size(parts)[1] == 2
newfrontals = string(newfrontals, "\\bm{", parts[1], "}_{", parts[2], "}, ")
elseif size(parts)[1] == 3
newfrontals = string(newfrontals, "\\bm{", parts[2], "}_{", parts[3], "}, ")
end
end
# Remove the trailing comma.
newfrontals = newfrontals[1:(end - 2)]
# Transform separator into latex.
newseparator = ""
if length(sep) > 1
for l in esep
# Split into symbol and subindex.
parts = split(l, r"[^a-z0-9]+|(?<=[a-z])(?=[0-9])|(?<=[0-9])(?=[a-z])")
if size(parts)[1] == 2
newseparator = string(newseparator, "\\bm{", parts[1], "}_{", parts[2], "}, ")
elseif size(parts)[1] == 3
newseparator = string(newseparator, "\\bm{", parts[2], "}_{", parts[3], "}, ")
end
end
end
# Remove the trailing comma.
newseparator = newseparator[1:(end - 2)]
# Create full label and replace the old one.
newlabel = string(newfrontals, ":", newseparator)
setLabel!(cliq, newlabel)
end
# Use new labels to produce `.dot` and `.tex` files.
fid = IOStream("")
try
mkpath(joinpath((split(filepath, '/')[1:(end - 1)])...))
fid = open("$(filepath).dot", "w+")
write(fid, _to_dot(btc.bt))
close(fid)
# All in one command.
run(`dot2tex -tmath --preproc $(filepath).dot -o $(filepath)proc.dot`)
run(`dot2tex $(filepath)proc.dot -o $(filepath).tex`)
catch ex
@warn ex
@show stacktrace()
finally
close(fid)
end
return btc
end
"""
$SIGNATURES
Build Bayes/Junction/Elimination tree from a given variable ordering.
DevNotes
- TODO use `solvable` filter during local graph copy step
- TODO review `buildCliquePotentials` and rather incorporate into CSM, see #1083
Related
[`buildTreeReset!`](@ref)
"""
function buildTreeFromOrdering!(
dfg::DFG.AbstractDFG,
elimOrder::Vector{Symbol};
drawbayesnet::Bool = false,
solvable::Int = 1,
)
#
@debug "Building Bayes tree with local DFG copy"
t0 = time_ns()
fge = LocalDFG(; solverParams = getSolverParams(dfg))
#TODO JT - I think an optional solvable filter is needed in buildTreeFromOrdering!
# copy required for both remote and local graphs
DFG.deepcopyGraph!(fge, dfg)
println("Building Bayes net...")
buildBayesNet!(fge, elimOrder; solvable = solvable)
tree = BayesTree()
tree.eliminationOrder = elimOrder
buildTree!(tree, fge, elimOrder)
if drawbayesnet
println("Bayes Net")
sleep(0.1)
fid = open("bn.dot", "w+")
write(fid, _to_dot(fge.bn))
close(fid)
end
println("Find potential functions for each clique")
for cliqIds in getCliqueIds(tree)
# start at the root, of which there could be multiple disconnected trees
if isRoot(tree, cliqIds)
cliq = getClique(tree, cliqIds)
# fg does not have the marginals as fge does
buildCliquePotentials(dfg, tree, cliq; solvable = solvable)
end
end
# also store the build time
tree.buildTime = (time_ns() - t0) * 1e-9
return tree
end
"""
$SIGNATURES
Build Bayes/Junction/Elimination tree.
Notes
- Default to free qr factorization for variable elimination order.
DevNotes
- TODO deprecate and update to better name than `drawpdf`
"""
function prepBatchTreeOLD!(
dfg::AbstractDFG;
eliminationOrder::Union{Nothing, Vector{Symbol}} = nothing,
eliminationConstraints::Vector{Symbol} = Symbol[],
ordering::Symbol = 0 == length(eliminationConstraints) ? :qr : :ccolamd,
drawpdf::Bool = false,
show::Bool = false,
filepath::String = "/tmp/caesar/random/bt.dot",
viewerapp::String = "xdot",
imgs::Bool = false,
)
# drawbayesnet::Bool=false )
#
p = if eliminationOrder !== nothing
eliminationOrder
else
getEliminationOrder(dfg; ordering = ordering, constraints = eliminationConstraints)
end
# for debuggin , its useful to have the elimination ordering
if drawpdf
ispath(getLogPath(dfg)) ? nothing : Base.mkpath(getLogPath(dfg))
open(joinLogPath(dfg, "eliminationOrder.txt"), "a") do io
return writedlm(io, string.(reshape(p, 1, :)), ',')
end
end
tree = buildTreeFromOrdering!(dfg, Symbol.(p); drawbayesnet = false) # drawbayesnet
@info "Bayes Tree Complete"
if drawpdf
drawTree(tree; show = show, filepath = filepath, viewerapp = viewerapp, imgs = imgs)
end
return tree
end
"""
$SIGNATURES
Partial reset of basic data fields in `::VariableNodeData` of `::FunctionNode` structures.
"""
function resetData!(vdata::VariableNodeData)
vdata.eliminated = false
vdata.BayesNetOutVertIDs = Symbol[]
# vdata.BayesNetVertID = :_null # TODO dont use nothing, see DFG issue #16
vdata.separator = Symbol[]
return nothing
end
function resetData!(vdata::FunctionNodeData)
vdata.eliminated = false
vdata.potentialused = false
return nothing
end
"""
$SIGNATURES
Wipe data from `dfg` object so that a completely fresh Bayes/Junction/Elimination tree
can be constructed.
"""
function resetFactorGraphNewTree!(dfg::AbstractDFG)
for v in DFG.getVariables(dfg)
resetData!(getSolverData(v))
end
for f in DFG.getFactors(dfg)
resetData!(getSolverData(f))
end
return nothing
end
"""
$(SIGNATURES)
Build a completely new Bayes (Junction) tree, after first wiping clean all
temporary state in fg from a possibly pre-existing tree.
DevNotes
- replaces `resetBuildTreeFromOrder!`
Related:
buildTreeFromOrdering!,
"""
function buildTreeReset!(
dfg::AbstractDFG,
eliminationOrder::Union{Nothing, <:AbstractVector{Symbol}} = nothing;
ordering::Symbol = :qr,
drawpdf::Bool = false,
show::Bool = false,
filepath::String = "/tmp/caesar/random/bt.dot",
viewerapp::String = "xdot",
imgs::Bool = false,
ensureSolvable::Bool = true,
eliminationConstraints::AbstractVector{Symbol} = Symbol[],
)
#
if ensureSolvable
ensureSolvable!(dfg)
end
resetFactorGraphNewTree!(dfg)
return prepBatchTreeOLD!(
dfg;
eliminationOrder = eliminationOrder,
ordering = ordering,
drawpdf = drawpdf,
show = show,
filepath = filepath,
viewerapp = viewerapp,
imgs = imgs,
eliminationConstraints = eliminationConstraints,
)
end
"""
$(SIGNATURES)
Experimental create and initialize tree message channels
"""
function initTreeMessageChannels!(tree::MetaBayesTree)
for e in MetaGraphs.edges(tree.bt)
set_props!(
tree.bt,
e,
Dict{Symbol, Any}(
:upMsg => Channel{LikelihoodMessage}(0),
:downMsg => Channel{LikelihoodMessage}(0),
),
)
# push!(tree.messageChannels, e=>(upMsg=Channel{LikelihoodMessage}(0),downMsg=Channel{LikelihoodMessage}(0)))
end
return nothing
end
"""
$SIGNATURES
Returns state of Bayes tree clique `.initialized` flag.
Notes:
- used by Bayes tree clique logic.
- similar method in DFG
"""
isInitialized(cliq::TreeClique) = getSolverData(cliq).initialized
function appendUseFcts!(usefcts, lblid::Symbol, fct::DFGFactor)
# fid::Symbol )
#
union!(usefcts, Symbol(fct.label))
return nothing
end
"""
$SIGNATURES
Get all factors connected to frontal variables
Dev Notes
- Why not just do this, `ffcs = union( map(x->ls(fgl, x), frtl) )`?
"""
function getCliqFactorsFromFrontals(
fgl::G,
cliq::TreeClique,
varlist::Vector{Symbol};
inseparator::Bool = true,
unused::Bool = true,
solvable::Int = 1,
) where {G <: AbstractDFG}
#
frtls = getCliqueData(cliq).frontalIDs
seprs = getCliqueData(cliq).separatorIDs
allids = [frtls; seprs]
usefcts = Symbol[]
for frsym in frtls
# usefcts = Int[]
for fctid in ls(fgl, frsym)
fct = getFactor(fgl, fctid)
if !unused || !getSolverData(fct).potentialused
loutn = ls(fgl, fctid; solvable = solvable)
# deal with unary factors
if length(loutn) == 1
union!(usefcts, Symbol[Symbol(fct.label);])
# appendUseFcts!(usefcts, loutn, fct) # , frsym)
getSolverData(fct).potentialused = true
end
# deal with n-ary factors
for sep in loutn
if sep == frsym
continue # skip the frsym itself
end
insep = sep in allids
if !inseparator || insep
union!(usefcts, Symbol[Symbol(fct.label);])
getSolverData(fct).potentialused = true
if !insep
@debug "cliq=$(cliq.id) adding factor that is not in separator, $sep"
end
end
end
end
end
end
return usefcts
end
"""
$SIGNATURES
Return `::Bool` on whether factor is a partial constraint.
"""
isPartial(fcf::T) where {T <: AbstractFactor} = :partial in fieldnames(T)
isPartial(ccw::CommonConvWrapper) = ccw.usrfnc! |> isPartial
isPartial(fct::DFGFactor) = _getCCW(fct) |> isPartial
"""
$SIGNATURES
Determine and set the potentials for a particular `cliq` in the Bayes (Junction) tree.
"""
function setCliqPotentials!(
dfg::G,
bt::AbstractBayesTree,
cliq::TreeClique;
solvable::Int = 1,
) where {G <: AbstractDFG}
#
varlist = getCliqVarIdsAll(cliq)
@debug "using all factors connected to frontals and attached to separator"
fctsyms = getFactorsAmongVariablesOnly(dfg, varlist; unused = true)
# filter only factors connected to frontals (for upward)
frtfcts = union(map(x -> ls(dfg, x), getCliqFrontalVarIds(cliq))...)
fctsyms = intersect(fctsyms, frtfcts)
getCliqueData(cliq).potentials = fctsyms
getCliqueData(cliq).partialpotential = Vector{Bool}(undef, length(fctsyms))
fcts = map(x -> getFactor(dfg, x), fctsyms)
getCliqueData(cliq).partialpotential = map(x -> isPartial(x), fcts)
for fct in fcts
getSolverData(fct).potentialused = true
end
@debug "finding all frontals for down WIP"
ffctsyms = getCliqFactorsFromFrontals(
dfg,
cliq,
Symbol[];
inseparator = false,
unused = false,
solvable = solvable,
)
# fnsyms = getCliqVarsWithFrontalNeighbors(csmc.dfg, csmc.cliq)
getCliqueData(cliq).dwnPotentials = ffctsyms
getCliqueData(cliq).dwnPartialPotential = map(x -> isPartial(getFactor(dfg, x)), ffctsyms)
return nothing
end
getCliquePotentials(cliq::TreeClique) = getCliqueData(cliq).potentials
function cliqPotentialIDs(cliq::TreeClique)
potIDs = Symbol[]
for idfct in getCliqueData(cliq).potentials
push!(potIDs, idfct)
end
return potIDs
end
"""
$SIGNATURES
Collect and returl all child clique separator variables.
"""
function collectSeparators(bt::AbstractBayesTree, cliq::TreeClique)::Vector{Symbol}
allseps = Symbol[]
for child in childCliqs(bt, cliq)#tree
allseps = [allseps; getCliqueData(child).separatorIDs]
end
return allseps
end
"""
$SIGNATURES
Return boolean matrix of factor by variable (row by column) associations within
clique, corresponds to order presented by `getCliqFactorIds` and `getCliqAllVarIds`.
"""
function getCliqAssocMat(cliq::TreeClique)
return getCliqueData(cliq).cliqAssocMat
end
"""
$SIGNATURES
Return boolean matrix of upward message singletons (i.e. marginal priors) from
child cliques. Variable order corresponds to `getCliqAllVarIds`.
"""
getCliqMsgMat(cliq::TreeClique) = getCliqueData(cliq).cliqMsgMat
"""
$SIGNATURES
Return boolean matrix of factor variable associations for a clique, optionally
including (`showmsg::Bool=true`) the upward message singletons. Variable order
corresponds to `getCliqAllVarIds`.
"""
function getCliqMat(cliq::TreeClique; showmsg::Bool = true)
assocMat = getCliqAssocMat(cliq)
msgMat = getCliqMsgMat(cliq)
mat = showmsg ? [assocMat; msgMat] : assocMat
return mat
end
"""
$SIGNATURES
Get `cliq` separator (a.k.a. conditional) variable ids`::Symbol`.
"""
getCliqSeparatorVarIds(
cliqdata::Union{BayesTreeNodeData, PackedBayesTreeNodeData},
)::Vector{Symbol} = cliqdata.separatorIDs
getCliqSeparatorVarIds(cliq::TreeClique)::Vector{Symbol} =
getCliqSeparatorVarIds(getCliqueData(cliq))
"""
$SIGNATURES
Get `cliq` potentials (factors) ids`::Int`.
"""
getCliqFactorIds(cliqdata::BayesTreeNodeData)::Vector{Symbol} = cliqdata.potentials
getCliqFactorIds(cliq::TreeClique)::Vector{Symbol} = getCliqFactorIds(getCliqueData(cliq))
"""
$SIGNATURES
Get all `cliq` variable ids`::Symbol`.
Related
getCliqVarIdsAll, getCliqFactorIdsAll, getCliqVarsWithFrontalNeighbors
"""
function getCliqAllVarIds(cliq::TreeClique)::Vector{Symbol}
frtl = getCliqFrontalVarIds(cliq)
cond = getCliqSeparatorVarIds(cliq)
return union(frtl, cond)
end
"""
$SIGNATURES
Return all variables (including frontal factor neighbors).
Dev Notes
- TODO needs to be refactored and optimized.
Related
getCliqAllVarIds
"""
function getCliqVarsWithFrontalNeighbors(
fgl::G,
cliq::TreeClique;
solvable::Int = 1,
) where {G <: AbstractDFG}
#
frtl = getCliqFrontalVarIds(cliq)
cond = getCliqSeparatorVarIds(cliq)
syms = Symbol[]
union!(syms, Symbol.(frtl))
union!(syms, Symbol.(cond))
# TODO Can we trust factors are frontal connected?
ffcs = union(map(x -> ls(fgl, x; solvable = solvable), frtl)...)
# @show ffcs = getCliqueData(cliq).potentials
neig = union(map(x -> ls(fgl, x; solvable = solvable), ffcs)...)
union!(syms, Symbol.(neig))
return syms
end
"""
$SIGNATURES
Get all `cliq` variable ids`::Symbol`.
Related
getCliqAllVarIds, getCliqFactorIdsAll
"""
getCliqVarIdsAll(cliq::TreeClique)::Vector{Symbol} = getCliqAllVarIds(cliq::TreeClique)
"""
$SIGNATURES
Get all `cliq` factor ids`::Symbol`.
DEPRECATED, use getCliqFactorIdsAll instead.
Related
getCliqVarIdsAll, getCliqFactors
"""
getCliqFactorIdsAll(cliqd::BayesTreeNodeData) = cliqd.potentials
getCliqFactorIdsAll(cliq::TreeClique) = getCliqFactorIdsAll(getCliqueData(cliq))
function getCliqFactorIdsAll(treel::AbstractBayesTree, frtl::Symbol)
return getCliqFactorIdsAll(getClique(treel, frtl))
end
const getCliqFactors = getCliqFactorIdsAll
"""
$SIGNATURES
Return the number of factors associated with each variable in `cliq`.
"""
getCliqNumAssocFactorsPerVar(cliq::TreeClique)::Vector{Int} =
sum(getCliqAssocMat(cliq); dims = 1)[:]
"""
$SIGNATURES
Get variable ids`::Int` with prior factors associated with this `cliq`.
Notes:
- does not include any singleton messages from upward or downward message passing.
"""
function getCliqVarIdsPriors(
cliq::TreeClique,
allids::Vector{Symbol} = getCliqAllVarIds(cliq),
partials::Bool = true,
)
# get ids with prior factors associated with this cliq
amat = getCliqAssocMat(cliq)
prfcts = sum(amat; dims = 2) .== 1
# remove partials as per request
!partials ? nothing : (prfcts .&= getCliqueData(cliq).partialpotential)
# return variable ids in `mask`
mask = sum(amat[prfcts[:], :]; dims = 1)[:] .> 0
return allids[mask]::Vector{Symbol}
end
"""
$SIGNATURES
Get `cliq` variable IDs with singleton factors -- i.e. both in clique priors and up messages.
"""
function getCliqVarSingletons(
cliq::TreeClique,
allids::Vector{Symbol} = getCliqAllVarIds(cliq),
partials::Bool = true,
)
# get incoming upward messages (known singletons)
mask = sum(getCliqMsgMat(cliq); dims = 1)[:] .>= 1
upmsgids = allids[mask]
# get ids with prior factors associated with this cliq
prids = getCliqVarIdsPriors(cliq, getCliqAllVarIds(cliq), partials)
# return union of both lists
return union(upmsgids, prids)::Vector{Symbol}
end
"""
$SIGNATURES
Get each clique subgraph association matrix.
"""
function compCliqAssocMatrices!(
dfg::G,
bt::AbstractBayesTree,
cliq::TreeClique,
) where {G <: AbstractDFG}
frtl = getCliqFrontalVarIds(cliq)
cond = getCliqSeparatorVarIds(cliq)
inmsgIDs = collectSeparators(bt, cliq)
potIDs = cliqPotentialIDs(cliq)
# Construct associations matrix here
# matrix has variables are columns, and messages/constraints as rows
cols = [frtl; cond]
getCliqueData(cliq).inmsgIDs = inmsgIDs
getCliqueData(cliq).potIDs = potIDs
@debug "Building cliqAssocMat" cliq
@debug "Building cliqAssocMat" cliq.id string(inmsgIDs) string(potIDs)
cliqAssocMat = Array{Bool, 2}(undef, length(potIDs), length(cols))
cliqMsgMat = Array{Bool, 2}(undef, length(inmsgIDs), length(cols))
fill!(cliqAssocMat, false)
fill!(cliqMsgMat, false)
for j = 1:length(cols)
for i = 1:length(inmsgIDs)
if cols[j] == inmsgIDs[i]
cliqMsgMat[i, j] = true
end
end
for i = 1:length(potIDs)
idfct = getCliqueData(cliq).potentials[i]
if idfct == potIDs[i] # sanity check on clique potentials ordering
# TODO int and symbol compare is no good
for vertidx in getVariableOrder(DFG.getFactor(dfg, idfct))
if vertidx == cols[j]
cliqAssocMat[i, j] = true
end
end
else
@error("compCliqAssocMatrices! -- potential ID ordering was lost")
end
end
end
@debug "Final cliqAssocMat" cliq.id cliqAssocMat
getCliqueData(cliq).cliqAssocMat = cliqAssocMat
getCliqueData(cliq).cliqMsgMat = cliqMsgMat
return nothing
end
function countSkips(bt::AbstractBayesTree)
skps = 0
for cliq in getCliques(bt)
m = getCliqMat(cliq[2])
mi = map(Int, m)
skps += sum(map(Int, sum(mi; dims = 1) .== 1))
end
return skps
end
function skipThroughMsgsIDs(cliq::TreeClique)
cliqdata = getCliqueData(cliq)
numfrtl1 = floor(Int, length(cliqdata.frontalIDs) + 1)
condAssocMat = cliqdata.cliqAssocMat[:, numfrtl1:end]
condMsgMat = cliqdata.cliqMsgMat[:, numfrtl1:end]
mat = [condAssocMat; condMsgMat]
mab = sum(map(Int, mat); dims = 1) .== 1
mabM = sum(map(Int, condMsgMat); dims = 1) .== 1
mab = mab .& mabM
# rang = 1:size(condMsgMat,2)
msgidx = cliqdata.separatorIDs[vec(collect(mab))]
return msgidx
end
function directPriorMsgIDs(cliq::TreeClique)
frtl = getCliqueData(cliq).frontalIDs
sepr = getCliqueData(cliq).separatorIDs
cols = [frtl; sepr]
mat = getCliqMat(cliq; showmsg = true)
singr = sum(map(Int, mat); dims = 2) .== 1
rerows = collect(1:length(singr))
b = vec(collect(singr))
rerows2 = rerows[b]
sumsrAc = sum(map(Int, mat[rerows2, :]); dims = 1)
sumc = sum(map(Int, mat); dims = 1)
pmSkipCols = (sumsrAc - sumc) .== 0
return cols[vec(collect(pmSkipCols))]
end
function directFrtlMsgIDs(cliq::TreeClique)
numfrtl = length(getCliqueData(cliq).frontalIDs)
frntAssocMat = getCliqueData(cliq).cliqAssocMat[:, 1:numfrtl]
frtlMsgMat = getCliqueData(cliq).cliqMsgMat[:, 1:numfrtl]
mat = [frntAssocMat; frtlMsgMat]
mab = sum(map(Int, mat); dims = 1) .== 1
mabM = sum(map(Int, frtlMsgMat); dims = 1) .== 1
mab = mab .& mabM
return getCliqueData(cliq).frontalIDs[vec(collect(mab))]
end
function directAssignmentIDs(cliq::TreeClique)
# NOTE -- old version been included in iterated variable stack
assocMat = getCliqueData(cliq).cliqAssocMat
msgMat = getCliqueData(cliq).cliqMsgMat
mat = [assocMat; msgMat]
mab = sum(map(Int, mat); dims = 1) .== 1
mabA = sum(map(Int, assocMat); dims = 1) .== 1
mab = mab .& mabA
# TODO -- use proper accessor methods
frtl = getCliqueData(cliq).frontalIDs
sepr = getCliqueData(cliq).separatorIDs
cols = [frtl; sepr]
return cols[vec(collect(mab))]
# also calculate how which are conditionals
end
function mcmcIterationIDs(cliq::TreeClique)
@debug "mcmcIterationIDs\n" cliq.id getCliqFrontalVarIds(cliq) getCliqSeparatorVarIds(
cliq,
)
mat = getCliqMat(cliq)
@debug "getCliqMat" mat
# assocMat = getCliqueData(cliq).cliqAssocMat
# msgMat = getCliqueData(cliq).cliqMsgMat
# mat = [assocMat;msgMat];
if sum(sum(map(Int, mat); dims = 1)) == 0
error("mcmcIterationIDs -- unaccounted variables")
else
nothing
end
mab = 1 .< sum(map(Int, mat); dims = 1)
cols = getCliqAllVarIds(cliq)
# must also include "direct variables" connected through projection only
directvars = directAssignmentIDs(cliq)
usset = union(directvars, cols[vec(collect(mab))])
# NOTE -- fix direct vs itervar issue, DirectVarIDs against Iters should also Iter
# NOTE -- using direct then mcmcIter ordering to prioritize non-msg vars first
return setdiff(usset, getCliqueData(cliq).directPriorMsgIDs)
end
function getCliqMatVarIdx(cliq::TreeClique, varid::Symbol, allids = getCliqAllVarIds(cliq))
len = length(allids)
return [1:len;][allids .== varid][1]
end
"""
$SIGNATURES
Determine and return order list of variable ids required for minibatch Gibbs iteration inside `cliq`.
Notes
- Singleton factors (priors and up messages) back of the list
- least number of associated factor variables earlier in list
- Same as getCliqVarSolveOrderUp
"""
function mcmcIterationIdsOrdered(cliq::TreeClique)
# get unordered iter list
alliter = mcmcIterationIDs(cliq)
# get all singletons
allsings = getCliqVarSingletons(cliq)
singletonvars = intersect(alliter, allsings)
# get all non-singleton iters
nonsinglvars = setdiff(alliter, singletonvars)
# sort nonsingletons ascending number of factors
mat = getCliqMat(cliq)
lenfcts = sum(mat; dims = 1)
nonslen = zeros(length(nonsinglvars))
for i = 1:length(nonsinglvars)
varid = nonsinglvars[i]
varidx = getCliqMatVarIdx(cliq, varid)
nonslen[i] = lenfcts[varidx]
end
p = sortperm(nonslen)
ascnons = nonsinglvars[p]
# sort singleton vars ascending number of factors
singslen = zeros(length(singletonvars))
for i = 1:length(singletonvars)
varid = singletonvars[i]
varidx = getCliqMatVarIdx(cliq, varid)
singslen[i] = lenfcts[varidx]
end
p = sortperm(singslen)
ascsing = singletonvars[p]
return [ascnons; ascsing]
end
"""
$SIGNATURES
Determine and return order list of variable ids required for minibatch Gibbs iteration inside `cliq`.
Notes
- Singleton factors (priors and up messages) back of the list
- least number of associated factor variables earlier in list
- Same as mcmcIterationIdsOrdered
"""
function getCliqVarSolveOrderUp(cliq::TreeClique)
return mcmcIterationIdsOrdered(cliq)
end
"""
$(SIGNATURES)
Prepare the variable IDs for nested clique Gibbs mini-batch calculations, by assembing these clique data fields:
- `directPriorMsgIDs`
- `directvarIDs`
- `itervarIDs`
- `msgskipIDs`
- `directFrtlMsgIDs`
"""
function setCliqMCIDs!(cliq::TreeClique)
getCliqueData(cliq).directPriorMsgIDs = directPriorMsgIDs(cliq)
# NOTE -- directvarIDs are combined into itervarIDs
getCliqueData(cliq).directvarIDs = directAssignmentIDs(cliq)
# TODO find itervarIDs that have upward child singleton messages and update them last in iter list
getCliqueData(cliq).itervarIDs = mcmcIterationIdsOrdered(cliq)
getCliqueData(cliq).msgskipIDs = skipThroughMsgsIDs(cliq)
getCliqueData(cliq).directFrtlMsgIDs = directFrtlMsgIDs(cliq)
# TODO add initialization sequence var id list too
return nothing
end
# post order tree traversal and build potential functions
function buildCliquePotentials(
dfg::G,
bt::AbstractBayesTree,
cliq::TreeClique;
solvable::Int = 1,
) where {G <: AbstractDFG}
for child in childCliqs(bt, cliq)#tree
buildCliquePotentials(dfg, bt, child)
end
@debug "Get potentials $(getLabel(cliq))"
setCliqPotentials!(dfg, bt, cliq; solvable = solvable)
compCliqAssocMatrices!(dfg, bt, cliq)
setCliqMCIDs!(cliq)
return nothing
end
"""
$(SIGNATURES)
Return a vector of child cliques to `cliq`.
"""
function childCliqs(treel::MetaBayesTree, cliq::TreeClique)
cliqKey = treel.bt[:cliqId][cliq.id]
childcliqs = TreeClique[]
for cIdx in MetaGraphs.outneighbors(treel.bt, cliqKey)
push!(childcliqs, get_prop(treel.bt, cIdx, :clique))
end
return childcliqs
end
"""
$(SIGNATURES)
Return a vector of child cliques to `cliq`.
"""
getChildren(treel::AbstractBayesTree, frtsym::Symbol) = childCliqs(treel, frtsym)
getChildren(treel::AbstractBayesTree, cliq::TreeClique) = childCliqs(treel, cliq)
"""
$SIGNATURES
Get edges to children cliques
"""
function getEdgesChildren(tree::MetaBayesTree, cliqkey::Int)
return [
MetaGraphs.Edge(cliqkey, chkey) for chkey in MetaGraphs.outneighbors(tree.bt, cliqkey)
]
end
function getEdgesChildren(tree::MetaBayesTree, cliq::TreeClique)
return getEdgesChildren(tree, tree.bt[:cliqId][cliq.id])
end
"""
$SIGNATURES
Get edges to parent clique
"""
function getEdgesParent(tree::MetaBayesTree, cliqkey::Int)
return [
MetaGraphs.Edge(pkey, cliqkey) for pkey in MetaGraphs.inneighbors(tree.bt, cliqkey)
]
end
function getEdgesParent(tree::MetaBayesTree, cliq::TreeClique)
return getEdgesParent(tree, tree.bt[:cliqId][cliq.id])
end
"""
$SIGNATURES
Return a vector of all siblings to a clique, which defaults to not `inclusive` the calling `cliq`.
"""
function getCliqSiblings(
treel::AbstractBayesTree,
cliq::TreeClique,
inclusive::Bool = false,
)::Vector{TreeClique}
prnt = getParent(treel, cliq)
if length(prnt) > 0
allch = getChildren(treel, prnt[1])
end
if inclusive
return allch
end
sibs = TreeClique[]
for ch in allch
if ch.id != cliq.id
push!(sibs, ch)
end
end
return sibs
end
"""
$(SIGNATURES)
Return `cliq`'s parent clique.
"""
function parentCliq(treel::MetaBayesTree, cliq::TreeClique)
cliqKey = treel.bt[:cliqId][cliq.id]
parentcliqs = TreeClique[]
for pIdx in MetaGraphs.inneighbors(treel.bt, cliqKey)
push!(parentcliqs, get_prop(treel.bt, pIdx, :clique))
end
return parentcliqs
end
"""
$(SIGNATURES)
Return number of cliques in a tree.
"""
getNumCliqs(tree::MetaBayesTree) = MetaGraphs.nv(tree.bt)
"""
$(SIGNATURES)
Return `cliq`'s parent clique.
"""
function getParent(treel::AbstractBayesTree, afrontal::Union{Symbol, TreeClique})
return parentCliq(treel, afrontal)
end
"""
$SIGNATURES
Return one symbol (a frontal variable) from each clique in the `::BayesTree`.
Notes
- Frontal variables only occur once in a clique per tree, therefore is a unique identifier.
Related:
whichCliq, printCliqHistorySummary
"""
function getTreeAllFrontalSyms(::AbstractDFG, tree::AbstractBayesTree)
cliqs = getCliques(tree)
syms = Vector{Symbol}(undef, length(cliqs))
for (id, cliq) in cliqs
syms[id] = getCliqFrontalVarIds(cliq)[1]
end
return syms
end
"""
$SIGNATURES
Return the variable elimination order stored in a tree object.
"""
getEliminationOrder(treel::AbstractBayesTree) = treel.eliminationOrder
"""
$SIGNATURES
EXPERIMENTAL, Save a Bayes (Junction) tree object to file.
Notes
- Converts and saves to BSON format a set of `PackedBayesTreeNodeData` objects.
- IIF issue #481
Related
[`loadTree`](@ref), [`saveDFG`](@ref), [`loadDFG`](@ref), `BSON.@save`, `BSON.@load`
"""
function saveTree(
treel::AbstractBayesTree,
filepath = joinpath("/tmp", "caesar", "savetree.bson"),
)
#
savetree = deepcopy(treel)
for i = 1:length(getCliques(savetree))
if getCliqueData(savetree, i) isa BayesTreeNodeData
setCliqueData!(
getClique(savetree, i),
convert(PackedBayesTreeNodeData, getCliqueData(savetree, i)),
)
end
end
BSON.@save(filepath, savetree)
return filepath
end
function saveTree(
treeArr::Vector{T},
filepath = joinpath("/tmp", "caesar", "savetrees.bson"),
) where {T <: AbstractBayesTree}
#
savetree = deepcopy(treeArr)
for savtre in savetree, i = 1:length(getCliques(savtre))
if getCliqueData(savtre, i) isa BayesTreeNodeData
setCliqueData!(
getClique(savtre, i),
convert(PackedBayesTreeNodeData, getCliqueData(savtre, i)),
)
end
end
BSON.@save(filepath, savetree)
return filepath
end
"""
$SIGNATURES
EXPERIMENTAL, Save a Bayes (Junction) tree object to file.
Notes
- Converts and saves to JLD2 format a set of `PackedBayesTreeNodeData` objects.
- IIF issue #481
Related
[`saveTree`](@ref), [`saveDFG`](@ref), [`loadDFG`](@ref), `BSON.@save`, `BSON.@load`
"""
function loadTree(filepath = joinpath("/tmp", "caesar", "savetree.bson"))
data = BSON.@load(filepath, savetree)
# convert back to a type that which could not be serialized by JLD2
if savetree isa Vector
for savtre in savetree, i = 1:length(getCliques(savtre))
if getCliqueData(savtre, i) isa PackedBayesTreeNodeData
setCliqueData!(
getClique(savtre, i),
convert(BayesTreeNodeData, getCliqueData(savtre, i)),
)
end
end
else
for i = 1:length(getCliques(savetree))
if getCliqueData(savetree, i) isa PackedBayesTreeNodeData
setCliqueData!(
getClique(savetree, i),
convert(BayesTreeNodeData, getCliqueData(savetree, i)),
)
end
end
end
# return loaded and converted tree
return savetree
end
"""
$SIGNATURES
Return Tuple of number cliques (Marginalized, Reused).
"""
function calcCliquesRecycled(tree::AbstractBayesTree)
numMarg = 0
numReused = 0
numBoth = 0
for (key, cliq) in tree.cliques
numReused += getCliqueData(cliq).isCliqReused ? 1 : 0
numMarg += getCliqueData(cliq).allmarginalized ? 1 : 0
numBoth +=
getCliqueData(cliq).allmarginalized && getCliqueData(cliq).isCliqReused ? 1 : 0
end
return length(tree.cliques), numMarg, numReused, numBoth
end
## Tree Reuse
"""
$SIGNATURES
Special internal function to try return the clique data if succesfully identified in `othertree::AbstractBayesTree`,
based on contents of `seeksSimilar::BayesTreeNodeData`.
Notes
- Used to identify and skip similar cliques (i.e. recycle computations)
"""
function attemptTreeSimilarClique(
othertree::AbstractBayesTree,
seeksSimilar::BayesTreeNodeData,
)
#
# inner convenience function for returning empty clique
function EMPTYCLIQ()
clq = TreeClique(-1)
setLabel!(clq, "")
setCliqueData!(clq, BayesTreeNodeData())
return clq
end
# does the other clique even exist?
seekFrontals = getCliqFrontalVarIds(seeksSimilar)
if !hasClique(othertree, seekFrontals[1])
return EMPTYCLIQ()
end
# do the cliques share the same frontals?
otherCliq = getClique(othertree, seekFrontals[1])
otherFrontals = getCliqFrontalVarIds(otherCliq)
commonFrontals = intersect(seekFrontals, otherFrontals)
if length(commonFrontals) != length(seekFrontals) ||
length(commonFrontals) != length(otherFrontals)
return EMPTYCLIQ()
end
# do the cliques share the same separator variables?
seekSeparator = getCliqSeparatorVarIds(seeksSimilar)
otherSeparator = getCliqSeparatorVarIds(otherCliq)
commonSep = intersect(seekSeparator, otherSeparator)
if length(commonSep) != length(seekSeparator) ||
length(commonSep) != length(otherSeparator)
return EMPTYCLIQ()
end
# do the cliques use the same factors (potentials)
seekPotentials = getCliqFactorIds(seeksSimilar)
otherFactors = getCliqFactorIds(otherCliq)
commonFactors = intersect(seekPotentials, otherFactors)
if length(commonFactors) != length(seekPotentials) ||
length(commonFactors) != length(otherFactors)
return EMPTYCLIQ()
end
# lets assume they are the same
return otherCliq::TreeClique
end
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 4037 |
## ================================================================================================
## Experimental specialized dispatch for Mixture
## ================================================================================================
# To sort out how to dispatch on specialized functions.
# related to #931 and #1069
struct MaxMixture <: AbstractMaxMixtureSolver
p::Vector{Float64}
# the chosen component to be used for the optimization
choice::Base.RefValue{Int}
end
function getMeasurementParametric(s::Mixture{N, F, S, T}) where {N, F, S, T}
meas = map(c -> getMeasurementParametric(c)[1], values(s.components))
iΣ = map(c -> getMeasurementParametric(c)[2], values(s.components))
return meas, iΣ
end
function _calcFactorMahalanobis(cfp, meas, iΣ, variables...)
res = cfp.calcfactor!(meas, variables...)
r = res' * iΣ * res
return r
end
# DEV NOTE: function with other options including select once and use
# function (cfp::CalcFactorMahalanobis{<:CalcFactor, MaxMixture})(variables...)
# if cfp.specialAlg.choice[] == 0
# #calculate all mixture options
# r = [_calcFactorMahalanobis(cfp, cfp.meas[i], cfp.iΣ[i], variables...) for i = 1:length(cfp.meas)]
# p = cfp.specialAlg.p
# k = size(cfp.iΣ[1], 2)
# # α = 1 ./ sqrt.(2pi .* k .* det.(inv.(cfp.iΣ)))
# α = sqrt.(det.(cfp.iΣ) ./ ((2pi)^k))
# # mm, at = findmax(α .* p .* exp.(-0.5 .* r))
# # mm = sum(α .* p .* exp.(-0.5 .* r) )
# mm, at = findmin( 0.5 .* r .- log.(α .* p))
# # mm = -log(sum(α .* p .* exp.(-0.5 .* r) ))
# # return mm + maximum(log.(α .* p))
# cfp.specialAlg.choice[] = at
# return r[at]
# else
# at = cfp.specialAlg.choice[]
# return _calcFactorMahalanobis(cfp, cfp.meas[at], cfp.iΣ[at], variables...)
# end
# end
# function (cfp::CalcFactorMahalanobis{FT, N, C, MEAS, D, L, MaxMixture})(variables...) where {FT, N, C, MEAS, D, L}
# r = [
# _calcFactorMahalanobis(cfp, cfp.meas[i], cfp.iΣ[i], variables...) for
# i = 1:length(cfp.meas)
# ]
# p = cfp.specialAlg.p
# k = size(cfp.iΣ[1], 2)
# # α = 1 ./ sqrt.(2pi .* k .* det.(inv.(cfp.iΣ)))
# α = sqrt.(det.(cfp.iΣ) ./ ((2pi)^k))
# mm, at = findmin(r .- log.(α .* p))
# # mm = -log(sum(α .* p .* exp.(-0.5 .* r) ))
# return mm + maximum(log.(α .* p))
# end
## ================================================================================================
## Experimental specialised dispatch for multihypo and nullhypo
## ================================================================================================
#TODO better dispatch
struct MaxMultihypo <: AbstractMaxMixtureSolver
multihypo::Vector{Float64}
end
struct MaxNullhypo <: AbstractMaxMixtureSolver
nullhypo::Float64
end
# function (cfp::CalcFactorMahalanobis{FT, N, C, MEAS, D, L, Nothing})(X1, L1, L2) where {FT, N, C, MEAS, D, L}
# mh = cfp.specialAlg.multihypo
# @assert length(mh) == 3 "multihypo $mh not supported with parametric, length should be 3"
# @assert mh[1] == 0 "multihypo $mh not supported with parametric, first should be 0"
# #calculate both multihypo options
# r1 = cfp(X1, L1)
# r2 = cfp(X1, L2)
# r = [r1, r2]
# # hacky multihypo to start of with
# mm, at = findmin(r .* (1 .- mh[2:end]))
# nat = at == 1 ? 1 : 2
# k = length(X1) * one(r1) * 1e-3
# return r[at] + r[nat] * k
# end
# function (cfp::CalcFactorMahalanobis{FT, N, C, MEAS, D, L, MaxNullhypo})(X1, X2) where {FT, N, C, MEAS, D, L}
# nh = cfp.specialAlg.nullhypo
# @assert nh > 0 "nullhypo $nh not as expected"
# #calculate factor residual
# res = cfp(cfp.meas[1], X1, X2)
# r1 = res' * cfp.iΣ * res
# # compare to uniform nullhypo
# r2 = length(res) * one(r1)
# r = [r1, r2]
# mm, at = findmin(r .* [nh, (1 - nh)])
# residual = at == 1 ? r1 : r1 * 1e-3
# return residual
# # rand residual option
# # idx = rand(Categorical([(1-nh), nh]))
# # nh == 0.05 && cfp.varOrder==[:x1,:l1] && println("$idx -> $(r1.value), $r2")
# # return r[idx]
# end
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 15475 |
# TODO deprecate testshuffle
function _checkErrorCCWNumerics(
ccwl::Union{CommonConvWrapper{F}, CommonConvWrapper{Mixture{N_, F, S, T}}},
testshuffle::Bool = false,
) where {N_, F <: AbstractRelativeMinimize, S, T}
return nothing
end
function _checkErrorCCWNumerics(
ccwl::Union{CommonConvWrapper{F}, CommonConvWrapper{Mixture{N_, F, S, T}}},
testshuffle::Bool = false,
) where {N_, F <: AbstractManifoldMinimize, S, T}
return nothing
end
function _perturbIfNecessary(
fcttype::Union{F, <:Mixture{N_, F, S, T}},
len::Int = 1,
perturbation::Real = 1e-10,
) where {N_, F <: AbstractRelativeMinimize, S, T}
return 0
end
function _perturbIfNecessary(
fcttype::Union{F, <:Mixture{N_, F, S, T}},
len::Int = 1,
perturbation::Real = 1e-10,
) where {N_, F <: AbstractManifoldMinimize, S, T}
return 0
end
#
# internal use only, and selected out from approxDeconv functions
function _solveLambdaNumeric(
fcttype::AbstractPrior,
objResX::Function,
residual::AbstractVector{<:Real},
u0::AbstractVector{<:Real},
islen1::Bool = false;
perturb::Real = 1e-10,
)
return u0
end
#
function _solveLambdaNumeric(
fcttype::Union{F, <:Mixture{N_, F, S, T}},
objResX::Function,
residual::AbstractVector{<:Real},
u0::AbstractVector{<:Real},
islen1::Bool = false,
) where {N_, F <: AbstractRelativeMinimize, S, T}
# retries::Int=3 )
#
# wrt #467 allow residual to be standardize for Roots and Minimize and Parametric cases.
r = if islen1
Optim.optimize((x) -> (residual .= objResX(x); sum(residual .^ 2)), u0, Optim.BFGS())
else
Optim.optimize((x) -> (residual .= objResX(x); sum(residual .^ 2)), u0, Optim.Options(;iterations=1000))
end
if !Optim.converged(r)
@warn "Optim did not converge:" r maxlog=10
end
#
return r.minimizer
end
# struct OptimCalcConv end
# CalcFactorNormSq cost function for an input in coordinates as used by Optim.jl
function (hypoCalcFactor::CalcFactorNormSq)(M::AbstractManifold, Xc::AbstractVector)
# hypoCalcFactor.manifold is the factor's manifold, not the variable's manifold that is needed here
ϵ = getPointIdentity(M)
X = get_vector(M, ϵ, SVector(Xc), DefaultOrthogonalBasis())
p = exp(M, ϵ, X)
return hypoCalcFactor(CalcConv, p)
end
struct ManoptCalcConv end
function (hypoCalcFactor::CalcFactorNormSq)(::Type{ManoptCalcConv}, M::AbstractManifold, p)
return hypoCalcFactor(CalcConv, p)
end
function _solveLambdaNumeric(
fcttype::Union{F, <:Mixture{N_, F, S, T}},
hypoCalcFactor,
residual::AbstractVector{<:Real},
u0,#::AbstractVector{<:Real},
variableType::InferenceVariable,
islen1::Bool = false,
) where {N_, F <: AbstractManifoldMinimize, S, T}
#
M = getManifold(variableType)
# the variable is a manifold point, we are working on the tangent plane in optim for now.
#
#TODO this is not general to all manifolds, should work for lie groups.
ϵ = getPointIdentity(variableType)
X0c = zero(MVector{getDimension(M),Float64})
X0c .= vee(M, u0, log(M, ϵ, u0))
alg = islen1 ? Optim.BFGS() : Optim.NelderMead()
#WIP extremely slow, but runs, mean in manopt is bottleneck
# just to show how we can now swop to manopt
if false
r = Manopt.NelderMead(
M,
(M, x)->hypoCalcFactor(ManoptCalcConv, M, x),
NelderMeadSimplex(M, u0, DefaultOrthogonalBasis());
retraction_method = ExponentialRetraction()
)
return r
end
r = Optim.optimize(
x->hypoCalcFactor(M, x),
X0c,
alg
)
if !Optim.converged(r)
# TODO find good way for a solve to store diagnostics about number of failed converges etc.
@warn "Optim did not converge (maxlog=10):" r maxlog=10
end
return exp(M, ϵ, hat(M, ϵ, r.minimizer))
end
## deconvolution with calcfactor wip
struct CalcDeconv end
function (cf::CalcFactorNormSq)(::Type{CalcDeconv}, meas)
res = cf(meas, map(vvh -> _getindex_anyn(vvh, cf._sampleIdx), cf._legacyParams)...)
return sum(x->x^2, res)
end
# for deconv with the measurement a tangent vector, can dispatch for other measurement types.
function (hypoCalcFactor::CalcFactorNormSq)(::Type{CalcDeconv}, M::AbstractManifold, Xc::AbstractVector)
ϵ = getPointIdentity(M)
X = get_vector(M, ϵ, Xc, DefaultOrthogonalBasis())
return hypoCalcFactor(CalcDeconv, X)
end
# NOTE Optim.jl version that assumes measurement is on the tangent
# TODO test / dev for n-ary factor deconv
# TODO Consolidate with _solveLambdaNumeric, see #1374
function _solveLambdaNumericMeas(
fcttype::Union{F, <:Mixture{N_, F, S, T}},
hypoCalcFactor,
X0,#::AbstractVector{<:Real},
islen1::Bool = false,
) where {N_, F <: AbstractManifoldMinimize, S, T}
#
M = getManifold(fcttype)
ϵ = getPointIdentity(M)
X0c = zeros(manifold_dimension(M))
X0c .= vee(M, ϵ, X0)
alg = islen1 ? Optim.BFGS() : Optim.NelderMead()
r = Optim.optimize(
x->hypoCalcFactor(CalcDeconv, M, x),
X0c,
alg
)
if !Optim.converged(r)
@debug "Optim did not converge:" r
end
return hat(M, ϵ, r.minimizer)
end
## ================================================================================================
## Heavy dispatch for all AbstractFactor / Mixture cases below
## ================================================================================================
# internal function to dispatch view on either vector or matrix, rows are dims and samples are columns
_getindextuple(tup::Tuple, ind1::Int) = [getindex(t, ind1) for t in tup]
_getusrfnc(ccwl::CommonConvWrapper) = ccwl.usrfnc!
_getusrfnc(ccwl::CommonConvWrapper{<:Mixture}) = ccwl.usrfnc!.mechanics
function _buildCalcFactor(
ccwl::CommonConvWrapper,
smpid,
varParams,
activehypo,
_slack = nothing,
)
#
# FIXME, make thread safe (cache)
# activevariables = view(ccwl.fullvariables, activehypo)
activevariables = ccwl.fullvariables[activehypo]
solveforidx = findfirst(==(ccwl.varidx[]), activehypo)
return CalcFactorNormSq(
_getusrfnc(ccwl), #factor
smpid, #_sampleIdx
varParams, #_legacyParams
true, #_allowThreads
ccwl.dummyCache, #_cache
tuple(activevariables...), #fullvariables
solveforidx, #solvefor
getManifold(ccwl), #manifold
ccwl.measurement,
_slack,
)
end
"""
$SIGNATURES
Internal function to build lambda pre-objective function for finding factor residuals.
DevNotes
- TODO refactor relationship and common fields between (CCW, FMd, CPT, CalcFactor)
"""
function _buildCalcFactorLambdaSample(
ccwl::CommonConvWrapper,
smpid::Integer,
measurement_, # since [email protected], don't use default ccwl.measurement here, must pass from caller
_slack = nothing,
)
#
# TODO from obsolete _view:
# Should be replaced with ccw.hypoParams::Tuple(hypo1, hypo2,...), made at construction and allows direct hypo lookup
# DevNotes, also see new `hyporecipe` approach (towards consolidation CCW CPT FMd CF...)
# build a view to the decision variable memory
varValsHypo = ccwl.varValsAll[][ccwl.hyporecipe.activehypo]
# get the operational CalcFactor object
cf = _buildCalcFactor(ccwl, smpid, varValsHypo, ccwl.hyporecipe.activehypo)
# reset the residual vector
fill!(ccwl.res, 0.0) # Roots->xDim | Minimize->zDim
# build static lambda
unrollHypo! = if _slack === nothing
# DESIGN DECISION WAS MADE THAT CALCFACTOR CALLS DO NOT DO INPLACE CHANGES TO ARGUMENTS, INSTEAD USING ISBITSTYPEs!!!!!!!!!
# 5.366727 seconds (17.48 M allocations: 893.768 MiB, 8.76% gc time)
# () -> (cf::CalcFactorNormSq)(measurement_, smpid, varValsHypo)
# 6.075632 seconds (19.73 M allocations: 919.118 MiB, 9.14% gc time)
() -> cf(measurement_[smpid], map(vvh -> _getindex_anyn(vvh, smpid), varValsHypo)...)
else
# slack is used to shift the residual away from the natural "zero" tension position of a factor,
# this is useful when calculating factor gradients at a variety of param locations resulting in "non-zero slack" of the residual.
# see `IIF.calcFactorResidualTemporary`
# NOTE this minus operation assumes _slack is either coordinate or tangent vector element (not a manifold or group element)
() ->
cf(measurement_[smpid], map(vvh -> _getindex_anyn(vvh, smpid), varValsHypo)...) .- _slack
end
return unrollHypo!
end
"""
$(SIGNATURES)
Solve free variable x by root finding residual function `fgr.usrfnc(res, x)`. This is the
penultimate step before calling numerical operations to move actual estimates, which is
done by an internally created lambda function.
Notes
- Assumes `cpt_.p` is already set to desired X decision variable dimensions and size.
- Assumes only `ccw.particleidx` will be solved for
- small random (off-manifold) perturbation used to prevent trivial solver cases, div by 0 etc.
- perturb is necessary for NLsolve (obsolete) cases, and smaller than 1e-10 will result in test failure
- Also incorporates the active hypo lookup
DevNotes
- TODO testshuffle is now obsolete, should be removed
- TODO perhaps consolidate perturbation with inflation or nullhypo
"""
function _solveCCWNumeric!(
ccwl::Union{<:CommonConvWrapper{F}, <:CommonConvWrapper{<:Mixture{N_, F, S, T}}},
_slack = nothing;
perturb::Real = 1e-10,
) where {N_, F <: AbstractRelative, S, T}
#
#
# thrid = Threads.threadid()
smpid = ccwl.particleidx[]
# cannot Nelder-Mead on 1dim, partial can be 1dim or more but being conservative.
islen1 = length(ccwl.partialDims) == 1 || ccwl.partial
# islen1 = length(cpt_.X[:, smpid]) == 1 || ccwl.partial
# NOTE the factor residual function will receive as input args a slice from ccwl.varValsAll, hence
# ccwl.varValsAll[][ccwl.varidx[]] and target should point to the same memory; BUT
# remember that during approxConv the graph variable cannot be directly updated and
# a separate deepcopy of the destination (aka target) memory is necessary.
# Choosen solution is to splice together ccwl.varValsAll each time, with destination as
# deepcopy but other input variables are just point to the source variable values directly.
target = if ccwl.partial # FIXME likely type-instability on `typeof(target)`
# view(ccwl.varValsAll[][ccwl.varidx[]][smpid], ccwl.partialDims)
ccwl.varValsAll[][ccwl.varidx[]][smpid][ccwl.partialDims]
else
ccwl.varValsAll[][ccwl.varidx[]][smpid]
end
# build the pre-objective function for this sample's hypothesis selection
unrollHypo! = _buildCalcFactorLambdaSample(
# destVarVals,
ccwl,
smpid,
ccwl.measurement,
_slack,
)
# broadcast updates original view memory location
## using CalcFactor legacy path inside (::CalcFactor)
# _hypoObj = (x) -> (target[] = x; unrollHypo!())
function _hypoObj(x)
copyto!(target, x)
return unrollHypo!()
end
# TODO small off-manifold perturbation is a numerical workaround only, make on-manifold requires RoME.jl #244
# use all element dimensions : ==> 1:ccwl.xDim
# target .+= _perturbIfNecessary(getFactorType(ccwl), length(target), perturb)
sfidx = ccwl.varidx[]
# do the parameter search over defined decision variables using Minimization
X = ccwl.varValsAll[][sfidx][smpid][ccwl.partialDims]
# X = if ccwl.partial # TODO check for type-instability on `X`
# collect(view(ccwl.varValsAll[][sfidx][smpid], ccwl.partialDims))
# else
# ccwl.varValsAll[][sfidx][smpid][ccwl.partialDims]
# end
# # X = destVarVals[smpid]#[ccwl.partialDims]
retval = _solveLambdaNumeric(
getFactorType(ccwl),
_hypoObj,
ccwl.res,
X,
islen1
)
# Check for NaNs
if sum(isnan.(retval)) != 0
@error "$(ccwl.usrfnc!), got NaN, smpid = $(smpid), r=$(retval)\n"
return nothing
end
# insert result back at the correct variable element location
if ccwl.partial
# NOTE use workaround of TranslationGroup for coordinates on partial assignment
# FIXME consolidate to Manopt and upgrade to Riemannian (i.e. incl non-groups)
M = getManifold(ccwl) # TranslationGroup(length(ccwl.varValsAll[][sfidx][smpid]))
src = Vector{typeof(retval)}()
push!(src, retval)
setPointPartial!(M, ccwl.varValsAll[][sfidx], M, src, ccwl.partialDims, smpid, 1, true )
# ccwl.varValsAll[][sfidx][smpid][ccwl.partialDims] .= retval
else
# copyto!(ccwl.varValsAll[sfidx][smpid], retval)
copyto!(ccwl.varValsAll[][sfidx][smpid][ccwl.partialDims], retval)
end
return nothing
end
# brainstorming
# should only be calling a new arg list according to activehypo at start of particle
# Try calling an existing lambda
# sensitive to which hypo of course , see #1024
#
struct CalcConv end
_getindex_anyn(vec, n) = begin
len = length(vec)
# 1:len or any random element in that range
getindex(vec, n <= len ? n : rand(1:len) )
end
# NOTE to future self, this will likely become the cost function for Manopt as:
# function (cf::CalcFactorNormSq)(M::AbstractManifold, x)
# CalcConv is likeley needed for conv vs deconv
function (cf::CalcFactorNormSq)(::Type{CalcConv}, x)
sampleIdx = cf._sampleIdx
varValsHypo = cf._legacyParams
# set the target hypo on the correct sample to free variable x, was target object
varValsHypo[cf.solvefor][sampleIdx] = x
res = cf(cf.measurement[sampleIdx], map(vvh -> _getindex_anyn(vvh, sampleIdx), varValsHypo)...)
res = isnothing(cf.slack) ? res : res .- cf.slack
return sum(x->x^2, res)
end
function _buildHypoCalcFactor(ccwl::CommonConvWrapper, smpid::Integer, _slack=nothing)
# build a view to the decision variable memory
varValsHypo = ccwl.varValsAll[][ccwl.hyporecipe.activehypo]
# create calc factor selected hypo and samples
#TODO lots of allocations, can we refactor to reuse?
cf = _buildCalcFactor(
ccwl, #
smpid, # ends in _sampleIdx
varValsHypo, # ends in _legacyParams
ccwl.hyporecipe.activehypo, # ends in solvefor::Int
_slack,
)
return cf
end
function _solveCCWNumeric!(
ccwl::Union{<:CommonConvWrapper{F}, <:CommonConvWrapper{<:Mixture{N_, F, S, T}}},
_slack = nothing;
perturb::Real = 1e-10,
) where {N_, F <: AbstractManifoldMinimize, S, T}
#
# # FIXME, move this check higher and out of smpid loop
# _checkErrorCCWNumerics(ccwl, testshuffle)
smpid = ccwl.particleidx[]
# cannot Nelder-Mead on 1dim, partial can be 1dim or more but being conservative.
islen1 = length(ccwl.partialDims) == 1 || ccwl.partial
# build the pre-objective function for this sample's hypothesis selection
# SUPER IMPORTANT, this `target` is mem pointer that will be updated by optim library
# target = view(ccwl.varValsAll[][ccwl.varidx[]], smpid)
# SUPER IMPORTANT ON PARTIALS, RESIDUAL FUNCTION MUST DEAL WITH PARTIAL AND WILL GET FULL VARIABLE POINTS REGARDLESS
_hypoCalcFactor = _buildHypoCalcFactor(ccwl, smpid, _slack)
# do the parameter search over defined decision variables using Minimization
sfidx = ccwl.varidx[]
u0 = ccwl.varValsAll[][ccwl.varidx[]][smpid] # starting point for optimization
retval = _solveLambdaNumeric(
getFactorType(ccwl),
_hypoCalcFactor,
ccwl.res,
u0,
getVariableType(ccwl.fullvariables[sfidx]), # only used for getting variable manifold and identity_element
islen1,
)
# TBD Check for NaNs
# NOTE insert result back at the correct variable element location
ccwl.varValsAll[][ccwl.varidx[]][smpid] = retval
return nothing
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 7582 |
# starting to add exports here
global WORKERPOOL = WorkerPool()
"""
$SIGNATURES
For use with `multiproc`, nominal use is a worker pool of all processes available above and including 2..., but will return single process [1;] if only the first processes is available.
"""
function setWorkerPool!(pool::Vector{Int} = 1 < nprocs() ? setdiff(procs(), [1;]) : [1;])
global WORKERPOOL
return WORKERPOOL = WorkerPool(pool)
end
function getWorkerPool()
global WORKERPOOL
return WORKERPOOL
end
## =============================================================================================
# Iterate over variables in clique
## =============================================================================================
"""
$SIGNATURES
Dev Notes
- part of refactoring fmcmc.
- function seems excessive
"""
function compileFMCMessages(
fgl::AbstractDFG,
lbls::Vector{Symbol},
solveKey::Symbol,
logger = ConsoleLogger(),
)
#
d = Dict{Symbol, TreeBelief}()
for vsym in lbls
vari = DFG.getVariable(fgl, vsym)
d[vsym] = TreeBelief(vari, solveKey)
end
return d
end
function doFMCIteration(
fgl::AbstractDFG,
vsym::Symbol,
solveKey::Symbol,
cliq::TreeClique,
fmsgs,
N::Int,
dbg::Bool,
needFreshMeasurements::Bool = true,
logger = ConsoleLogger(),
)
#
vert = DFG.getVariable(fgl, vsym)
if !getSolverData(vert, solveKey).ismargin
# potprod = nothing
dens, ipc = propagateBelief(
fgl,
vsym,
:;
needFreshMeasurements,
N,
dbg,
logger,
)
if 0 < Npts(dens)
setBelief!(vert, dens, true, ipc)
# setValKDE!(vert, densPts, true, ipc)
# TODO perhaps more debugging inside `propagateBelief`?
end
end
return nothing
end
"""
$(SIGNATURES)
Iterate successive approximations of clique marginal beliefs by means
of the stipulated proposal convolutions and products of the functional objects
for tree clique `cliq`.
"""
function fmcmc!(
fgl::AbstractDFG,
cliq::TreeClique,
fmsgs::Vector{<:LikelihoodMessage},
lbls::Vector{Symbol},
solveKey::Symbol,
N::Int,
MCMCIter::Int,
dbg::Bool = false,
logger = ConsoleLogger(),
multithreaded::Bool = false,
)
#
with_logger(logger) do
@info "---------- successive fnc approx ------------$(getLabel(cliq))"
end
# repeat several iterations of functional Gibbs sampling for fixed point convergence
if length(lbls) == 1
MCMCIter = 1
end
# mcmcdbg = Array{CliqGibbsMC,1}()
# burn-in loop for outer Gibbs
for iter = 1:MCMCIter
# iterate through each of the variables, KL-divergence tolerence would be nice test here
with_logger(logger) do
@info "#$(iter)\t -- "
end
# dbgvals = !dbg ? nothing : CliqGibbsMC([], Symbol[])
needFreshMeasurements = iter == 1 || getSolverParams(fgl).alwaysFreshMeasurements
# outer Gibbs cycle
for vsym in lbls
doFMCIteration(
fgl,
vsym,
solveKey,
cliq,
fmsgs,
N,
dbg,
needFreshMeasurements,
logger,
)
end
# !dbg ? nothing : push!(mcmcdbg, dbgvals)
end
# populate dictionary for return NBPMessage in multiple dispatch
msgdict = compileFMCMessages(fgl, lbls, solveKey, logger)
return msgdict
end
## =============================================================================================
# Up solve
## =============================================================================================
"""
$(SIGNATURES)
Perform computations required for the upward message passing during belief propation on the Bayes (Junction) tree.
This function is usually called as via remote_call for multiprocess dispatch.
Notes
- `fg` factor graph,
- `tree` Bayes tree,
- `cliq` which cliq to perform the computation on,
- `parent` the parent clique to where the upward message will be sent,
- `childmsgs` is for any incoming messages from child cliques.
DevNotes
- FIXME total rewrite with AMP #41 and RoME #244 in mind
"""
function upGibbsCliqueDensity(
dfg::AbstractDFG,
cliq::TreeClique,
solveKey::Symbol,
inmsgs,
N::Int = getSolverParams(dfg).N,
dbg::Bool = false,
iters::Int = 3,
logger = ConsoleLogger(),
) # where {T, T2}
#
with_logger(logger) do
@info "up w $(length(inmsgs)) msgs"
end
# TODO -- some weirdness with: d,. = d = ., nothing
# mcmcdbg = Array{CliqGibbsMC,1}()
d = Dict{Symbol, TreeBelief}()
# priorprods = Vector{CliqGibbsMC}()
cliqdata = getCliqueData(cliq)
# use nested structure for more efficient Chapman-Kolmogorov solution approximation
if false
IDS = [cliqdata.frontalIDs; cliqdata.separatorIDs] #inp.cliq.attributes["frontalIDs"]
d = fmcmc!(dfg, cliq, inmsgs, IDS, solveKey, N, iters, dbg, logger)
else
# NOTE -- previous mistake, must iterate over directsvarIDs also (or incorporate once at the right time)
# NOTE -- double up on directs to allow inflation to take proper affect, see #1051
d = fmcmc!(
dfg,
cliq,
inmsgs,
cliqdata.directFrtlMsgIDs,
solveKey,
N,
1,
dbg,
logger,
true,
)
if length(cliqdata.msgskipIDs) > 0
dd = fmcmc!(dfg, cliq, inmsgs, cliqdata.msgskipIDs, solveKey, N, 1, dbg, logger, true)
for md in dd
d[md[1]] = md[2]
end
end
if length(cliqdata.itervarIDs) > 0
ddd = fmcmc!(
dfg,
cliq,
inmsgs,
cliqdata.itervarIDs,
solveKey,
N,
iters,
dbg,
logger,
false,
)
for md in ddd
d[md[1]] = md[2]
end
end
if length(cliqdata.directPriorMsgIDs) > 0
doids = setdiff(cliqdata.directPriorMsgIDs, cliqdata.msgskipIDs)
dddd = fmcmc!(dfg, cliq, inmsgs, doids, solveKey, N, 1, dbg, logger, true)
for md in dddd
d[md[1]] = md[2]
end
end
end
return d
end
## =============================================================================================
# Down solve
## =============================================================================================
## ============================================================================
# Initialization is slightly different and likely to be consolidated
## ============================================================================
"""
$SIGNATURES
Cycle through var order and initialize variables as possible in `subfg::AbstractDFG`.
Return true if something was updated.
Notes:
- assumed `subfg` is a subgraph containing only the factors that can be used.
- including the required up or down messages
- intended for both up and down initialization operations.
Dev Notes
- Should monitor updates based on the number of inferred & solvable dimensions
"""
function cycleInitByVarOrder!(
subfg::AbstractDFG,
varorder::Vector{Symbol};
solveKey::Symbol = :default,
logger = ConsoleLogger(),
)
#
with_logger(logger) do
@info "cycleInitByVarOrder! -- varorder=$(varorder)"
end
retval = false
count = 1
while count > 0
count = 0
for vsym in varorder
var = DFG.getVariable(subfg, vsym)
isinit = isInitialized(var, solveKey)
with_logger(logger) do
@info "var.label=$(var.label) is initialized=$(isinit)"
end
doautoinit!(subfg, [var;]; solveKey = solveKey, logger = logger)
if isinit != isInitialized(var, solveKey)
count += 1
retval = true
end
end
end
with_logger(logger) do
@info "cycleInitByVarOrder!, retval=$(retval)"
end
flush(logger.stream)
return retval
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 21844 | ## Various solver API's used in the past. These functions are due to be standardized, and obsolete code / functions removed.
export solveTree!, solveGraph!
export fetchCliqHistoryAll!
## ==============================================================================================
## Launch the tasks/treads for cliques
## ==============================================================================================
"""
$SIGNATURES
Start tasks (@async or Threads.@spawn threads if multithread=true) to solve the factor graph on the tree.
"""
function taskSolveTree!(
dfg::AbstractDFG,
treel::AbstractBayesTree,
timeout::Union{Nothing, <:Real} = nothing;
oldtree::AbstractBayesTree = BayesTree(),
drawtree::Bool = false,
verbose::Bool = false,
verbosefid = stdout,
limititers::Int = -1,
limititercliqs::Vector{Pair{Symbol, Int}} = Pair{Symbol, Int}[],
downsolve::Bool = false,
incremental::Bool = false,
multithread::Bool = false,
skipcliqids::Vector{Symbol} = Symbol[],
recordcliqs::Vector{Symbol} = Symbol[],
delaycliqs::Vector{Symbol} = Symbol[],
smtasks = Task[],
algorithm::Symbol = :default,
solveKey::Symbol = algorithm,
)
#
# revert DOWNSOLVED status to INITIALIZED in preparation for new upsolve
resetTreeCliquesForUpSolve!(treel)
drawtree ? drawTree(treel; show = false, filepath = joinLogPath(dfg, "bt.dot")) : nothing
cliqHistories = Dict{Int, Vector{CSMHistoryTuple}}()
resize!(smtasks, getNumCliqs(treel))
approx_iters = getNumCliqs(treel) * 24
solve_progressbar =
verbose ? nothing : ProgressUnknown("Solve Progress: approx max $approx_iters, at iter")
# queue all the tasks/threads
if !isTreeSolved(treel; skipinitialized = true)
@sync begin
monitortask = monitorCSMs(treel, smtasks)
# duplicate int i into async (important for concurrency)
for i = 1:getNumCliqs(treel) # TODO, this might not always work?
scsym = getCliqFrontalVarIds(getClique(treel, i))
if length(intersect(scsym, skipcliqids)) == 0
limthiscsm = filter(x -> (x[1] in scsym), limititercliqs)
limiter = 0 < length(limthiscsm) ? limthiscsm[1][2] : limititers
if multithread
smtasks[i] = Threads.@spawn tryCliqStateMachineSolve!(
dfg,
treel,
i,
timeout;
solveKey = solveKey,
algorithm = algorithm,
oldtree = oldtree,
verbose = verbose,
verbosefid = verbosefid,
drawtree = drawtree,
limititers = limititers,
downsolve = downsolve,
incremental = incremental,
delaycliqs = delaycliqs,
recordcliqs = recordcliqs,
solve_progressbar = solve_progressbar,
)
else
smtasks[i] = @async tryCliqStateMachineSolve!(
dfg,
treel,
i,
timeout;
solveKey = solveKey,
algorithm = algorithm,
oldtree = oldtree,
verbose = verbose,
verbosefid = verbosefid,
drawtree = drawtree,
limititers = limiter,
downsolve = downsolve,
incremental = incremental,
delaycliqs = delaycliqs,
recordcliqs = recordcliqs,
solve_progressbar = solve_progressbar,
)
end
end # if
end # for
end # sync
end # if
# if record cliques is in use, else skip computational delay
0 == length(recordcliqs) ? nothing : fetchCliqHistoryAll!(smtasks, cliqHistories)
!isnothing(solve_progressbar) && finish!(solve_progressbar)
return smtasks, cliqHistories
end
function tryCliqStateMachineSolve!(
dfg::G,
treel::AbstractBayesTree,
cliqKey::Union{Int, CliqueId},
timeout::Union{Nothing, <:Real} = nothing;
oldtree::AbstractBayesTree = BayesTree(),
verbose::Bool = false,
verbosefid = stdout,
drawtree::Bool = false,
limititers::Int = -1,
downsolve::Bool = false,
incremental::Bool = false,
delaycliqs::Vector{Symbol} = Symbol[],
recordcliqs::Vector{Symbol} = Symbol[],
solve_progressbar = nothing,
algorithm::Symbol = :default,
solveKey::Symbol = algorithm,
) where {G <: AbstractDFG}
#
clst = :na
cliq = getClique(treel, cliqKey)
syms = getCliqFrontalVarIds(cliq)
oldcliq = attemptTreeSimilarClique(oldtree, getCliqueData(cliq))
oldcliqdata = getCliqueData(oldcliq)
opts = getSolverParams(dfg)
# Base.rm(joinpath(opts.logpath,"logs/cliq$i"), recursive=true, force=true)
mkpath(joinpath(opts.logpath, "logs/cliq$(cliq.id)/"))
logger = SimpleLogger(open(joinpath(opts.logpath, "logs/cliq$(cliq.id)/log.txt"), "w+")) # NullLogger()
# global_logger(logger)
history = Vector{CSMHistoryTuple}()
recordthiscliq = length(intersect(recordcliqs, syms)) > 0
delaythiscliq = length(intersect(delaycliqs, syms)) > 0
try
history = initStartCliqStateMachine!(
dfg,
treel,
cliq,
timeout;
oldcliqdata = oldcliqdata,
drawtree = drawtree,
verbose = verbose,
verbosefid = verbosefid,
limititers = limititers,
downsolve = downsolve,
recordhistory = recordthiscliq,
incremental = incremental,
delay = delaythiscliq,
logger = logger,
solve_progressbar = solve_progressbar,
algorithm = algorithm,
solveKey = solveKey,
)
#
# cliqHistories[cliqKey] = history
if length(history) >= limititers && limititers != -1
# @warn "writing logs/cliq$(cliq.id)/csm.txt"
# @save "/tmp/cliqHistories/cliq$(cliq.id).jld2" history
fid = open(joinpath(opts.logpath, "logs/cliq$(cliq.id)/csm.txt"), "w")
printCliqHistorySummary(fid, history)
close(fid)
end
flush(logger.stream)
close(logger.stream)
# clst = getCliqueStatus(cliq)
# clst = cliqInitSolveUp!(dfg, treel, cliq, drawtree=drawtree, limititers=limititers )
catch err
bt = catch_backtrace()
println()
showerror(stderr, err, bt)
# @warn "writing /tmp/caesar/logs/cliq$(cliq.id)/*.txt"
fid = open(joinpath(opts.logpath, "logs/cliq$(cliq.id)/stacktrace.txt"), "w")
showerror(fid, err, bt)
close(fid)
fid = open(joinpath(opts.logpath, "logs/cliq$(cliq.id)_stacktrace.txt"), "w")
showerror(fid, err, bt)
close(fid)
# @save "/tmp/cliqHistories/$(cliq.label).jld2" history
fid = open(joinpath(opts.logpath, "logs/cliq$(cliq.id)/csm.txt"), "w")
printCliqHistorySummary(fid, history)
close(fid)
fid = open(joinpath(opts.logpath, "logs/cliq$(cliq.id)_csm.txt"), "w")
printCliqHistorySummary(fid, history)
close(fid)
flush(logger.stream)
close(logger.stream)
rethrow()
end
# if !(clst in [UPSOLVED; DOWNSOLVED; MARGINALIZED])
# error("Clique $(cliq.id), initInferTreeUp! -- cliqInitSolveUp! did not arrive at the desired solution statu: $clst")
# end
return history
end
"""
$SIGNATURES
Standalone state machine solution for a single clique.
Related:
initInferTreeUp!
"""
function solveCliqWithStateMachine!(
dfg::G,
tree::AbstractBayesTree,
frontal::Symbol;
iters::Int = 200,
downsolve::Bool = true,
recordhistory::Bool = false,
verbose::Bool = false,
nextfnc::Function = canCliqMargRecycle_StateMachine,
prevcsmc::Union{Nothing, CliqStateMachineContainer} = nothing,
) where {G <: AbstractDFG}
#
cliq = getClique(tree, frontal)
children = getChildren(tree, cliq)#Graphs.out_neighbors(cliq, tree.bt)
prnt = getParent(tree, cliq)
destType = (G <: InMemoryDFGTypes) ? G : LocalDFG
csmc = if isa(prevcsmc, Nothing)
CliqStateMachineContainer(
dfg,
initfg(destType; solverParams = getSolverParams(dfg)),
tree,
cliq,
prnt,
children,
false,
true,
true,
downsolve,
false,
getSolverParams(dfg),
)
else
prevcsmc
end
statemachine =
StateMachine{CliqStateMachineContainer}(; next = nextfnc, name = "cliq$(cliq.id)")
while statemachine(
csmc;
verbose = verbose,
iterlimit = iters,
recordhistory = recordhistory,
)
end
return statemachine, csmc
end
## ==============================================================================================
# Prepare CSM (based on FSM) entry points
## ==============================================================================================
"""
$SIGNATURES
Fetch solver history from clique state machines that have completed their async Tasks and store in the `hist::Dict{Int,Tuple}` dictionary.
"""
function fetchCliqHistoryAll!(
smt::Vector{Task},
hist::Dict{Int, Vector{CSMHistoryTuple}} = Dict{Int, Vector{CSMHistoryTuple}}(),
)
#
for i = 1:length(smt)
sm = smt[i]
# only fetch states that have completed processing
if sm.state == :done
haskey(hist, i) ? @warn("overwriting existing history key $i") : nothing
hist[i] = fetch(sm)
elseif !isnothing(sm.storage) && haskey(sm.storage, :statemachine)
hist[i] = CSMHistoryTuple.(sm.storage[:statemachine].history)
end
end
return hist
end
## ==============================================================================================
# Nominal user interface to the solver
## ==============================================================================================
"""
$SIGNATURES
Perform inference over the Bayes tree according to `opt::SolverParams` and keyword arguments.
Notes
- Aliased with `solveGraph!`
- Variety of options, including fixed-lag solving -- see `getSolverParams(fg)` for details.
- See online Documentation for more details: https://juliarobotics.org/Caesar.jl/latest/
- Latest result always stored in `solvekey=:default`.
- Experimental `storeOld::Bool=true` will duplicate the current result as supersolve `:default_k`.
- Based on `solvable==1` assumption.
- `limititercliqs` allows user to limit the number of iterations a specific CSM does.
- keywords `verbose` and `verbosefid::IOStream` can be used together to to send output to file or default `stdout`.
- keyword `recordcliqs=[:x0; :x7...]` identifies by frontals which cliques to record CSM steps.
- See [`repeatCSMStep!`](@ref), [`printCSMHistoryLogical`](@ref), [`printCSMHistorySequential`](@ref)
DevNotes
- TODO Change keyword arguments to new @parameter `SolverOptions` type.
Example
```julia
# pass in old `tree` to enable compute recycling -- see online Documentation for more details
tree = solveTree!(fg [,tree])
```
Related
`solveGraph!`, [`solveCliqUp!`](@ref), [`solveCliqDown!`](@ref), [`buildTreeReset!`](@ref), [`repeatCSMStep`](@ref), [`printCSMHistoryLogical`](@ref)
"""
function solveTree!(
dfgl::AbstractDFG,
oldtree::AbstractBayesTree = BayesTree();
timeout::Union{Nothing, <:Real} = nothing,
storeOld::Bool = false,
verbose::Bool = false,
verbosefid = stdout,
delaycliqs::Vector{Symbol} = Symbol[],
recordcliqs::Vector{Symbol} = Symbol[],
limititercliqs::Vector{Pair{Symbol, Int}} = Pair{Symbol, Int}[],
injectDelayBefore::Union{Nothing, Vector{<:Pair{Int, <:Pair{<:Function, <:Real}}}} = nothing,
skipcliqids::Vector{Symbol} = Symbol[],
eliminationOrder::Union{Nothing, Vector{Symbol}} = nothing,
eliminationConstraints::Vector{Symbol} = Symbol[],
smtasks::Vector{Task} = Task[],
dotreedraw = Int[1;],
runtaskmonitor::Bool = true,
algorithm::Symbol = :default,
solveKey::Symbol = algorithm,
multithread::Bool = false,
)
#
# workaround in case isolated variables occur
ensureSolvable!(dfgl)
opt = getSolverParams(dfgl)
# showtree should force drawtree
if opt.showtree && !opt.drawtree
@info("Since .showtree=true, also bumping .drawtree=true")
else
nothing
end
opt.drawtree |= opt.showtree
# depcrecation
# update worker pool incase there are more or less
setWorkerPool!()
if opt.multiproc && nprocs() == 1
@info "Setting `.multiproc=false` since `Distributed.nprocs() == 1`"
opt.multiproc = false
end
if opt.graphinit
@info "Ensure variables are all initialized (graphinit)"
if algorithm == :parametric
@warn "Parametric is using default graphinit (and ignoring solveKey)"
initAll!(dfgl)
initParametricFrom!(dfgl)
else
initAll!(dfgl, solveKey)
end
end
# construct tree
@info "Solving over the Bayes (Junction) tree."
hist = Dict{Int, Vector{CSMHistoryTuple}}()
if opt.isfixedlag
@info "Quasi fixed-lag is enabled (a feature currently in testing, and ignoring solveKey)!"
fifoFreeze!(dfgl)
end
# perhaps duplicate current value
if storeOld || opt.dbg
ss = listSupersolves(dfgl) .|> string
ss_ = ss[occursin.(r"default_", ss)] .|> x -> x[9:end]
filter!(x -> occursin(r"^\d+$", x), ss_) # ss_ = ss_[occursin.(r"^\d$",ss_)]
allk = parse.(Int, ss_)
nextk = length(allk) == 0 ? 0 : maximum(allk) + 1
newKey = Symbol(:default_, nextk)
cloneSolveKey!(dfgl, newKey, :default; solvable = 1)
# foreach(x->updateVariableSolverData!(dfgl, x, getSolverData(getVariable(dfgl,x), :default), newKey, true, Symbol[]), ls(dfgl, solvable=1))
@info "storeOld=true, previous :default deepcopied into $newKey for solvable==1 variables."
end
orderMethod = 0 < length(eliminationConstraints) ? :ccolamd : :qr
# current incremental solver builds a new tree and matches against old tree for recycling.
tree = buildTreeReset!(
dfgl,
eliminationOrder;
drawpdf = false,
show = opt.showtree,
ensureSolvable = false,
filepath = joinpath(opt.logpath, "bt.pdf"),
eliminationConstraints = eliminationConstraints,
ordering = orderMethod,
)
# setAllSolveFlags!(tree, false)
initTreeMessageChannels!(tree)
# if desired, drawtree in a loop
treetask, _dotreedraw = drawTreeAsyncLoop(tree, opt; dotreedraw = dotreedraw)
@info "Do tree based init-ference"
algorithm != :parametric ? nothing : @error("Under development, do not use, see #539")
!storeOld ? nothing : @error("parametric storeOld keyword not wired up yet.")
if opt.async
@async smtasks, hist = taskSolveTree!(
dfgl,
tree,
timeout;
solveKey = solveKey,
algorithm = algorithm,
multithread = multithread,
smtasks = smtasks,
oldtree = oldtree,
verbose = verbose,
verbosefid = verbosefid,
drawtree = opt.drawtree,
recordcliqs = recordcliqs,
limititers = opt.limititers,
downsolve = opt.downsolve,
incremental = opt.incremental,
skipcliqids = skipcliqids,
delaycliqs = delaycliqs,
limititercliqs = limititercliqs,
)
else
smtasks, hist = taskSolveTree!(
dfgl,
tree,
timeout;
solveKey = solveKey,
algorithm = algorithm,
multithread = multithread,
smtasks = smtasks,
oldtree = oldtree,
verbose = verbose,
verbosefid = verbosefid,
drawtree = opt.drawtree,
recordcliqs = recordcliqs,
limititers = opt.limititers,
downsolve = opt.downsolve,
incremental = opt.incremental,
skipcliqids = skipcliqids,
delaycliqs = delaycliqs,
limititercliqs = limititercliqs,
)
@info "Finished tree based init-ference"
end
# NOTE copy of data from new tree in to replace outisde oldtree
oldtree.bt = tree.bt
oldtree.btid = tree.btid
oldtree.cliques = tree.cliques
oldtree.frontals = tree.frontals
oldtree.eliminationOrder = tree.eliminationOrder
oldtree.buildTime = tree.buildTime
if opt.drawtree && opt.async
@warn "due to async=true, only keeping task pointer, not stopping the drawtreerate task! Consider not using .async together with .drawtreerate != 0"
push!(smtasks, treetask)
else
dotreedraw[1] = 0
end
# if debugging and not async then also print the CSMHistory
if opt.dbg && !opt.async
hists = !opt.async ? fetchCliqHistoryAll!(smtasks) : hist
printCSMHistorySequential(hists, joinLogPath(dfgl, "HistoryCSMAll.txt"))
end
return oldtree
end
"""
solveGrapn!
Just an alias, see documentation for `solveTree!`.
"""
DFG.solveGraph!(dfg::AbstractDFG, w...;kw...) = solveTree!(dfg, w...;kw...)
"""
$SIGNATURES
Internal function used for solveCliqUp! to build the incoming upward message (Rx)
"""
function _buildMessagesUp(
fg::AbstractDFG,
tree::AbstractBayesTree,
cliqid,
solveKey::Symbol;
status = UPSOLVED,
)
#
cliq = getClique(tree, cliqid)
beliefMessages = Dict{Int, LikelihoodMessage}()
for child in getChildren(tree, cliq)
msg = prepCliqueMsgUp(fg, child, solveKey, status)
push!(beliefMessages, child.id[] => msg)
end
return beliefMessages
end
"""
$SIGNATURES
Perform inference in the upward direction over one clique in the Bayes tree according to `opt::SolverParams`.
Example
```julia
tree = buildTreeReset!(fg)
hist, upMessageOut = solveCliqUp!(fg, tree, 2)
```
Notes
- Modifies fg with new values
- Calculates up messages from fg if not provided
DevNotes
- Test isfixedlag
- Test recordcliq
Related
[`solveTree!`](@ref), [`buildTreeReset!`](@ref), [`printCliqHistorySummary`](@ref), [`repeatCSMStep!`](@ref), `sandboxStateMachineStep`
"""
function solveCliqUp!(
fg::AbstractDFG,
tree::AbstractBayesTree,
cliqid::Union{CliqueId, Int, Symbol},
solveKey::Symbol = :default,
beliefMessages::Dict{Int, LikelihoodMessage} = _buildMessagesUp(
fg,
tree,
cliqid,
solveKey,
); # create belief message from fg if needed
verbose::Bool = false,
recordcliq::Bool = false,
)
# cliqHistories = Dict{Int,Vector{CSMHistoryTuple}}(),
#
# hist = Vector{CSMHistoryTuple}()
opt = DFG.getSolverParams(fg)
olddown = opt.downsolve
opt.downsolve = false
#TODO test
if opt.isfixedlag
@info "Quasi fixed-lag is enabled (a feature currently in testing)!"
fifoFreeze!(fg)
end
cliq = getClique(tree, cliqid)
# TODO improve, perhaps add to constructor, sommer add all channels here regardless.
initTreeMessageChannels!(tree)
@debug "putting messages on up channels from $(keys(beliefMessages))"
# put the up messages (beliefMessages) that will be used to solve this clique on the channel, the input
for (id, msg) in pairs(beliefMessages)
child = getClique(tree, id)
for e in getEdgesParent(tree, child)
@async putBeliefMessageUp!(tree, e, msg)
end
end
#
@debug "taking belief message that will be sent up"
# take! the message that is sent up by this clique, the output
takeUpTask = @async takeBeliefMessageUp!(tree, getEdgesParent(tree, cliq)[1])
recordcliqs = recordcliq ? [getFrontals(cliq)[1]] : Symbol[]
hist = tryCliqStateMachineSolve!(
fg,
tree,
cliq.id;
solveKey = solveKey,
verbose = verbose,
drawtree = opt.drawtree,
limititers = opt.limititers,
downsolve = false,
recordcliqs = recordcliqs,
incremental = opt.incremental,
)
#
# post-hoc store possible state machine history in clique (without recursively saving earlier history inside state history)
# assignTreeHistory!(tree, cliqHistories)
beliefMessageOut = fetch(takeUpTask)
#restore downsolve
opt.downsolve = olddown
return hist, beliefMessageOut
end
"""
$SIGNATURES
Internal function used for solveCliqDown! to build the incoming downward message (Rx)
"""
function _buildMessageDown(
fg::AbstractDFG,
tree::AbstractBayesTree,
cliqid,
solveKey::Symbol;
status::CliqStatus = DOWNSOLVED,
)
#
cliq = getClique(tree, cliqid)
parent = getParent(tree, cliq)[1]
return getCliqDownMsgsAfterDownSolve(fg, parent, solveKey; status = status)
end
function solveCliqDown!(
fg::AbstractDFG,
tree::AbstractBayesTree,
cliqid::Union{CliqueId, Int, Symbol},
solveKey::Symbol = :default,
beliefMessage::LikelihoodMessage = _buildMessageDown(fg, tree, cliqid, solveKey); # create belief message from fg if needed
verbose::Bool = false,
recordcliq::Bool = false,
)
#
# hist = Vector{CSMHistoryTuple}()
opt = DFG.getSolverParams(fg)
upsolve = opt.upsolve
opt.upsolve = false
cliq = getClique(tree, cliqid)
# TODO improve, perhaps add to constructor, sommer add all channels here regardless.
initTreeMessageChannels!(tree)
# Build the cliq up message to populate message factors that is needed for down
@debug "Putting message on up channel from children"
for (id, msg) in _buildMessagesUp(fg, tree, cliqid, solveKey)
child = getClique(tree, id)
for e in getEdgesParent(tree, child)
@async putBeliefMessageUp!(tree, e, msg)
end
end
# put the down message (beliefMessage) that will be used to solve this clique on the channel, the input
@debug "putting message on down channel from parent, used by this clique"
for e in getEdgesParent(tree, cliq)
@async putBeliefMessageDown!(tree, e, beliefMessage)
end
#take! and discart the up message sent in the skip up part of the solve
@debug "taking belief message that will be sent up"
@async takeBeliefMessageUp!(tree, getEdgesParent(tree, cliq)[1])
#
@debug "taking belief message that will be sent down"
# take! the message that is sent down by this clique, the output
takeDownTask = @async begin
messages = Dict{Int, LikelihoodMessage}()
for e in getEdgesChildren(tree, cliq)
messages[e.dst] = takeBeliefMessageDown!(tree, e)
end
messages
end
recordcliqs = recordcliq ? [getFrontals(cliq)[1]] : Symbol[]
hist = tryCliqStateMachineSolve!(
fg,
tree,
cliq.id;
solveKey = solveKey,
verbose = verbose,
drawtree = opt.drawtree,
limititers = opt.limititers,
recordcliqs = recordcliqs,
incremental = opt.incremental,
)
# fetch on down
beliefMessageOut = fetch(takeDownTask)
#restore
opt.upsolve = upsolve
return hist, beliefMessageOut
end
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 10603 | function fastnorm(u)
# dest[1] = ...
n = length(u)
T = eltype(u)
s = zero(T)
@fastmath @inbounds @simd for i = 1:n
s += u[i]^2
end
@fastmath @inbounds return sqrt(s)
end
# """
# $TYPEDSIGNATURES
# Calculate the Kernel Embedding MMD 'distance' between sample points (or kernel density estimates).
# Notes
# - `bw::Vector=[0.001;]` controls the mmd kernel bandwidths.
# - Overloading from ApproxManifoldProducts
# Related
# `AMP.kld`
# """
function mmd(
p1::AbstractVector{P1},
p2::AbstractVector{P2},
varType::Union{InstanceType{<:InferenceVariable}, InstanceType{<:AbstractFactor}},
threads::Bool = true;
bw::AbstractVector{<:Real} = SA[0.001;],
) where {P1 <: AbstractVector, P2 <: AbstractVector}
#
mani = getManifold(varType)
return mmd(mani, p1, p2, length(p1), length(p2), threads; bw)
end
function mmd(
p1::ManifoldKernelDensity,
p2::ManifoldKernelDensity,
nodeType::Union{InstanceType{<:InferenceVariable}, InstanceType{<:AbstractFactor}},
threads::Bool = true;
bw::AbstractVector{<:Real} = SA[0.001;],
asPartial::Bool = true
)
#
return mmd(getPoints(p1, asPartial), getPoints(p2, asPartial), nodeType, threads; bw)
end
# part of consolidation, see #927
function sampleFactor!(
ccwl::CommonConvWrapper,
N::Int;
_allowThreads::Bool=true
)
#
# FIXME get allocations here down to 0
# TODO make this an in-place operation as far possible
# TODO make this a multithreaded sampling function
# build a CalcFactor object and get fresh samples.
# cf = CalcFactor(ccwl; _allowThreads)
resize!(ccwl.measurement, N)
ccwl.measurement[:] = sampleFactor(ccwl, N; _allowThreads)
return ccwl.measurement
end
function sampleFactor(
ccwl::CommonConvWrapper,
N::Int;
_allowThreads::Bool=true
)
#
cf = CalcFactorNormSq(ccwl; _allowThreads)
return sampleFactor(cf, N)
end
sampleFactor(
fct::DFGFactor,
N::Int = 1;
_allowThreads::Bool=true
) = sampleFactor(
_getCCW(fct),
N;
_allowThreads
)
function sampleFactor(
dfg::AbstractDFG,
sym::Symbol,
N::Int = 1;
_allowThreads::Bool=true
)
#
return sampleFactor(getFactor(dfg, sym), N; _allowThreads)
end
"""
$(SIGNATURES)
Update cliq `cliqID` in Bayes (Juction) tree `bt` according to contents of `urt`.
Intended use is to update main clique after a upward belief propagation computation
has been completed per clique.
"""
function updateFGBT!(
fg::AbstractDFG,
cliq::TreeClique,
IDvals::Dict{Symbol, TreeBelief};
dbg::Bool = false,
fillcolor::String = "",
logger = ConsoleLogger(),
)
#
# if dbg
# # TODO find better location for the debug information (this is old code)
# cliq.attributes["debug"] = deepcopy(urt.dbgUp)
# end
if fillcolor != ""
setCliqueDrawColor!(cliq, fillcolor)
end
for (id, dat) in IDvals
with_logger(logger) do
@info "updateFGBT! up -- update $id, infoPerCoord=$(dat.infoPerCoord)"
end
updvert = DFG.getVariable(fg, id)
setValKDE!(updvert, deepcopy(dat), true) ## TODO -- not sure if deepcopy is required
end
with_logger(logger) do
@info "updateFGBT! up -- updated $(getLabel(cliq))"
end
return nothing
end
"""
$SIGNATURES
Build a graph given one factor and an ordered vector of `(variables types,nothing). In addition, init values can be passed instead of nothing.
Notes
- Often used to quickly generate temporary graphs for a variety of local calculations.
- does not yet support split `_` characters in auto-find `lastVar` from `varPattern`.
- Will always add a factor, but will skip adding variable labels that already exist in `dfg`.
DevNotes
- TODO allow pts to be full MKD beliefs, part of replacing old `approxConvCircular`, see #1351
"""
function _buildGraphByFactorAndTypes!(
fct::AbstractFactor,
varTypes::Tuple,
pts::Tuple = ();
dfg::AbstractDFG = initfg(),
solveKey::Symbol = :default,
newFactor::Bool = true,
destPattern::Regex = r"x\d+",
destPrefix::Symbol = match(r"[a-zA-Z_]+", destPattern.pattern).match |> Symbol,
_allVars::AbstractVector{Symbol} = sortDFG(ls(dfg, destPattern)),
currLabel::Symbol = 0 < length(_allVars) ? _allVars[end] : Symbol(destPrefix, 0),
currNumber::Integer = reverse(match(r"\d+", reverse(string(currLabel))).match) |>
x -> parse(Int, x),
graphinit::Bool = false,
_blockRecursion::Bool = false,
)
#
# TODO generalize beyond binary
len = length(varTypes)
vars = Symbol[Symbol(destPrefix, s_) for s_ in (currNumber .+ (1:len))]
for (s_, vTyp) in enumerate(varTypes)
# add the necessary variables
exists(dfg, vars[s_]) ? nothing : addVariable!(dfg, vars[s_], vTyp)
# set the numerical values if available
# TODO allow pts to come in as full MKD beliefs, not just one point
if ((0 < length(pts)) && (pts[s_] isa Nothing))
nothing
else
initVariable!(dfg, vars[s_], [pts[s_]], solveKey; bw = ones(getDimension(vTyp)))
end
end
# if newFactor then add the factor on vars, else assume only one existing factor between vars
_dfgfct = if newFactor
addFactor!(dfg, vars, fct; graphinit, _blockRecursion)
else
getFactor(dfg, intersect((ls.(dfg, vars))...)[1])
end
return dfg, _dfgfct
end
"""
$SIGNATURES
Check if a variable might already be located at the test location, by means of a (default) `refKey=:simulated` PPE stored in the existing variables.
Notes
- Checks, using provided `factor` from `srcLabel` in `fg` to an assumed `dest` variable whcih may or may not yet exist.
- This function was written to aid in building simulation code,
- it's use in real world usage may have unexpected behaviour -- hence not exported.
- Return `::Tuple{Bool, Vector{Float64}, Symbol}`, eg. already exists `(true, [refVal], :l17)`, or if a refernce variable does not yet `(false, [refVal], :l28)`.
- Vector contains the PPE reference location of the new variable as calculated.
- Auto `destPrefix` is trying to parse `destRegex` labels like `l\\d+` or `tag\\d+`, won't work with weirder labels e.g. `:l_4_23`.
- User can overcome weird names by self defining `destPrefix` and `srcNumber`.
- User can also ignore and replace the generated new label `Symbol(destPrefix, srcNumber)`.
- This function does not add new variables or factors to `fg`, user must do that themselves after.
- Useful to use in combination with `setPPE!` on new variable.
- At time of writing `accumulateFactorMeans` could only incorporate priors or binary relative factors.
- internal info, see [`solveFactorParametric`](@ref),
- This means at time of writing `factor` must be a binary factor.
- Tip, if simulations are inducing odometry bias, think of using two factors from caller (e.g. simPerfect and simBias).
Example
```julia
# fg has :x5 and :l2 and PPEs :simulated exists in all variables
# user wants to add a factor from :x5 to potential new :l5, but maybe a (simulated) variable, say :l2, is already there.
newFactor = RoME.Pose2Point2BearingRange(Normal(), Normal(20,0.5))
isAlready, simPPE, genLabel = IIF._checkVariableByReference(fg, :x5, r"l\\d+", Point2, newFactor)
# maybe add new variable
if !isAlready
@info "New variable with simPPE" genLabel simPPE
newVar = addVariable!(fg, genLabel, Point2)
addFactor!(fg, [:x5; genLabel], newFactor)
# also set :simulated PPE for similar future usage
newPPE = DFG.MeanMaxPPE(:simulated, simPPE, simPPE, simPPE)
setPPE!(newVar, :simulated, typeof(newPPE), newPPE) # TODO this API can be improved
else
@info "Adding simulated loop closure with perfect data association" :x5 genLabel
addFactor!(fg, [:x5; genLabel], newFactor)
end
# the point is that only the (0,20) values in newFactor are needed, all calculations are abstracted away.
```
See also: [`RoME.generateGraph_Honeycomb!`](@ref), [`accumulateFactorMeans`](@ref), [`getPPE`](@ref)
"""
function _checkVariableByReference(
fg::AbstractDFG,
srcLabel::Symbol,
destRegex::Regex,
destType::Type{<:InferenceVariable},
factor::AbstractRelative;
srcType::Type{<:InferenceVariable} = getVariableType(fg, srcLabel) |> typeof,
doRef::Bool = true,
refKey::Symbol = :simulated,
prior = if !doRef
nothing
else
DFG._getPriorType(srcType)(
MvNormal(getPPE(fg[srcLabel], refKey).suggested, diagm(ones(getDimension(srcType)))),
)
end,
atol::Real = 1e-2,
destPrefix::Symbol = match(r"[a-zA-Z_]+", destRegex.pattern).match |> Symbol,
srcNumber = match(r"\d+", string(srcLabel)).match |> x -> parse(Int, x),
overridePPE = doRef ? nothing : zeros(getDimension(destType)),
)
#
refVal = if overridePPE !== nothing
overridePPE
else
# calculate and add the reference value
# TODO refactor consolidation to use `_buildGraphByFactorAndTypes!`
tfg = initfg()
addVariable!(tfg, :x0, srcType)
addFactor!(tfg, [:x0], prior)
addVariable!(tfg, :l0, destType)
addFactor!(tfg, [:x0; :l0], factor; graphinit = false)
# calculate where the landmark reference position is
accumulateFactorMeans(tfg, [:x0f1; :x0l0f1])
end
ppe = DFG.MeanMaxPPE(refKey, refVal, refVal, refVal)
# now check if we already have a landmark at this location
varLms = ls(fg, destRegex) |> sortDFG
already = if doRef
ppeLms = getPPE.(getVariable.(fg, varLms), refKey) .|> x -> x.suggested
errmask = ppeLms .|> (x -> isapprox(x, refVal; atol = atol))
any(errmask)
else
false
end
if already
# does exist, ppe, variableLabel
alrLm = varLms[findfirst(errmask)]
# @info "Variable on :$refKey does exists at" srcLabel alrLm
return true, ppe, alrLm
end
# Nope does not exist, ppe, generated new variable label only
return false, ppe, Symbol(destPrefix, srcNumber)
end
function _checkVariableByReference(
fg::AbstractDFG,
srcLabel::Symbol,
destRegex::Regex,
destType::Type{<:InferenceVariable},
factor::AbstractPrior;
srcType::Type{<:InferenceVariable} = getVariableType(fg, srcLabel) |> typeof,
doRef::Bool = true,
refKey::Symbol = :simulated,
prior = typeof(factor)(MvNormal(getMeasurementParametric(factor)...)),
atol::Real = 1e-3,
destPrefix::Symbol = match(r"[a-zA-Z_]+", destRegex.pattern).match |> Symbol,
srcNumber = match(r"\d+", string(srcLabel)).match |> x -> parse(Int, x),
overridePPE = doRef ? nothing : zeros(getDimension(destType)),
)
#
refVal = if overridePPE !== nothing
overridePPE
else
getMeasurementParametric(factor)[1]
end
ppe = DFG.MeanMaxPPE(refKey, refVal, refVal, refVal)
# Nope does not exist, ppe, generated new variable label only
return false, ppe, Symbol(destPrefix, srcNumber)
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 4866 |
"""
$(SIGNATURES)
Specialized subgraph function for cliques to build a deep subgraph copy from the DFG given a list of frontals and separators.
Dev notes:
- TODO Since a clique should already have a list of frontals, seperators, and potentials (factors), this function should just be a light wrapper around copyGraph or buildSubgraph
- TODO Send in clique and then extract frontals, separators and factors
- TODO ability to limit which solveKeys to copy.
"""
function buildCliqSubgraph!(
cliqSubFg::AbstractDFG,
dfg::AbstractDFG,
frontals::Vector{Symbol},
separators::Vector{Symbol};
solvable::Int = 0,
verbose::Bool = false,
solveKey::Symbol = :NOTUSEDYET,
)
allvars = union(frontals, separators)
lenbefore = length(allvars)
# filter variables by solvable
solvable != 0 && filter!(fid -> (getSolvable(dfg, fid) >= solvable), allvars)
# Potential problem... what are variables doing in the clique if they are not solvable?
solvable != 0 &&
lenbefore != length(allvars) &&
@info("Not all variables are included in subgraph due to solvable $solvable")
#get list of factors to possibly add, ie. frontal neighbors
#todo replace with the factor list (potentials) from the clique
addfac = Symbol[]
for sym in frontals
union!(addfac, listNeighbors(dfg, sym))
end
allfacs = Symbol[]
for sym in addfac
vos = getVariableOrder(dfg, sym)
if vos ⊆ allvars #only add if not orphaned
union!(allfacs, [sym])
end
end
# filter factors by solvable
solvable != 0 && filter!(fid -> (getSolvable(dfg, fid) >= solvable), allfacs)
# add all the factors and variables to the new subgraph
DFG.deepcopyGraph!(cliqSubFg, dfg, allvars, allfacs; verbose = verbose)
return cliqSubFg
end
function buildCliqSubgraph!(
cliqSubFg::AbstractDFG,
dfg::AbstractDFG,
cliq::TreeClique;
solvable::Int = 0,
verbose::Bool = false,
solveKey::Symbol = :NOTUSEDYET,
)
vars = getCliqVarIdsAll(cliq)
facs = getCliqFactorIdsAll(cliq)
# Potential problem... what are variables/factors doing in the clique if they are not solvable?
solvable != 0 && filter!(fid -> (getSolvable(dfg, fid) >= solvable), vars)
solvable != 0 && filter!(fid -> (getSolvable(dfg, fid) >= solvable), facs)
# fix for issue #681
# @show ls(cliqSubFg), vars
if length(intersect(ls(cliqSubFg), vars)) != length(vars)
DFG.deepcopyGraph!(cliqSubFg, dfg, vars, facs; verbose = verbose)
end
return cliqSubFg
end
"""
$SIGNATURES
Build a new subgraph from `fgl<:AbstractDFG` containing all variables and factors
associated with `cliq`. Additionally add the upward message prior factors as
needed for belief propagation (inference).
Notes
- `cliqsym::Symbol` defines the cliq where variable appears as a frontal variable.
- `varsym::Symbol` defaults to the cliq frontal variable definition but can in case a
separator variable is required instead.
DevNotes
- TODO review, are all updates atomic?? Then perhaps in-memory only can be reduced to references back to csmc.dfg.
"""
function buildCliqSubgraph(
dfg::AbstractDFG,
cliq::TreeClique,
subfg::InMemoryDFGTypes = LocalDFG(; solverParams = getSolverParams(dfg));
solvable::Int = 1,
verbose::Bool = false,
)
#TODO why was solvable hardcoded to 1?
buildCliqSubgraph!(subfg, dfg, cliq; solvable = solvable, verbose = verbose)
return subfg
end
function buildCliqSubgraph(
fgl::AbstractDFG,
treel::AbstractBayesTree,
cliqsym::Symbol,
subfg::InMemoryDFGTypes = LocalDFG(; solverParams = getSolverParams(fgl));
solvable::Int = 1,
verbose::Bool = false,
)
#
buildCliqSubgraph!(
subfg,
fgl,
getClique(treel, cliqsym);
solvable = solvable,
verbose = verbose,
)
return subfg
end
#
"""
$SIGNATURES
Transfer contents of `src::AbstractDFG` variables `syms::Vector{Symbol}` to `dest::AbstractDFG`.
Notes
- Reads, `dest` := `src`, for all `syms`
"""
function transferUpdateSubGraph!(
dest::AbstractDFG,
src::AbstractDFG,
syms::Vector{Symbol} = union(ls(src)...),
logger = ConsoleLogger();
updatePPE::Bool = true,
solveKey::Symbol = :default,
)
#
with_logger(logger) do
@info "transferUpdateSubGraph! -- syms=$syms"
end
# transfer specific fields into dest from src
for var in (x -> getVariable(src, x)).(syms)
# copy not required since a broadcast is used internally
updateVariableSolverData!(
dest,
var,
solveKey,
false,
[:val; :bw; :infoPerCoord; :solvedCount; :initialized];
warn_if_absent = false,
)
if updatePPE
# create ppe on new key using defaults, TODO improve
if haskey(getPPEDict(var), solveKey)
DFG.updatePPE!(dest, var, solveKey; warn_if_absent = false)
else
ppe = calcPPE(var; ppeKey = solveKey)
addPPE!(dest, var.label, ppe)
end
end
end
return nothing
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 4305 | # tether utils
"""
$SIGNATURES
Standard mean and covariance propagation for linear systems. Directly equivalent to Kalman filtering methods.
Notes
- Does the proper continuous (Qc) to discrete process noise (Qd) calculation -- as per Farrell, 2008.
- Used downstream for in-time Gaussian mean and covariance propagation.
"""
function cont2disc(
F::Matrix{Float64},
G::Matrix{Float64},
Qc::Matrix{Float64},
dt::Float64,
Phik::Matrix{Float64} = Matrix{Float64}(LinearAlgebra.I, 0, 0),
)
#
fr, fc = size(F)
gr, gc = size(G)
# two bits of new memory allocated
M1 = zeros(fc + gc, fc + gc)
M2 = zeros(fr + fc, fr + fc)
M1[1:fr, 1:fc] = F
M1[1:gr, (fc + 1):end] = G #1:gr,(fc+1):(fc+gc)
# must convert to propagateLinSystem call, use trapezoidal
Md1 = exp(M1 * dt) # heavy lifting here
Phi = size(Phik, 1) == 0 ? Md1[1:fr, 1:fc] : Phik
Gamma = Md1[1:fr, (fc + 1):end]
#M2 = [[-F';(G*Qc*G')']';[zeros(9,9);F]'] # easy concat
GQG = (G * Qc * G')
gqgr, gqgc = size(GQG)
M2[1:fc, 1:fr] = -F
M2[1:fr, (fc + 1):end] = GQG
M2[(fr + 1):end, (fc + 1):end] = F'
Md2 = exp(M2 * dt) # heavy lifting here
Qd = Phi * Md2[1:fr, (fc + 1):end] #Qd = Phi*(Md2[1:fr,(fc+1):end])
# Qd = GQG*dt;
return Phi, Gamma, Qd
end
"""
$SIGNATURES
Helper function to modify factor connectivity to variables.
Notes
- Developed for updating a dead reckoning odometry factor.
- Arguments are order sensitive.
"""
function rebaseFactorVariable!(
dfg::AbstractDFG,
fctsym::Symbol,
newvars::Vector{Symbol};
rmDisconnected::Bool = true,
autoinit::Bool = false,
)::Nothing
#
# check that all new variables are available
@assert sum(map(x -> exists(dfg, x), newvars)) == length(newvars)
# get existing factor details
fct = getFactor(dfg, fctsym)
fcttype = getFactorType(fct)
mh = getMultihypoDistribution(fct)
mh = isnothing(mh) ? Float64[] : mh
# get old vars
oldvars = getVariableOrder(fct)
# delete old factor from graph
deleteFactor!(dfg, fctsym)
# add the factor back into graph against new variables
addFactor!(dfg, newvars, fcttype; graphinit = autoinit, multihypo = mh)
# clean up disconnected variables if requested
if rmDisconnected
for ov in oldvars
# find variables that are not connected to anything
if length(ls(dfg, ov)) == 0
deleteVariable!(dfg, ov)
end
end
end
return nothing
end
"""
$SIGNATURES
Accumulate chains of binary factors---potentially starting from a prior---as a parameteric mean value only.
Notes
- Not used during tree inference.
- Expected uses are for user analysis of factors and estimates.
- real-time dead reckoning chain prediction.
- Returns mean value as coordinates
DevNotes
- TODO consolidate with similar [`approxConvBelief`](@ref)
- TODO compare consolidate with [`solveParametricConditionals`](@ref)
- TODO compare consolidate with [`solveFactorParametric`](@ref)
Related:
[`approxConvBelief`](@ref), [`solveFactorParametric`](@ref), `RoME.MutablePose2Pose2Gaussian`
"""
function accumulateFactorMeans(
dfg::AbstractDFG,
fctsyms::AbstractVector{Symbol};
solveKey::Symbol=:default
)
## get the starting estimate
nextidx = 1
onePrior = false
currsym = :__nothing__
val = if isPrior(dfg, fctsyms[nextidx])
# if first factor is prior
# @assert !onePrior
onePrior = true
val, = getMeasurementParametric(getFactorType(dfg, fctsyms[nextidx]))
# val = getFactorMean(dfg, fctsyms[nextidx])
currsym = ls(dfg, fctsyms[nextidx])[1] # prior connected to only one variable
nextidx += 1
val
else
# get first value from current variable estimate
vars = getVariableOrder(dfg, fctsyms[nextidx])
nextsym =
1 < length(fctsyms) ? intersect(vars, ls(dfg, fctsyms[nextidx + 1])) : vars[end]
currsym = 1 < length(fctsyms) ? setdiff(vars, nextsym)[1] : vars[1]
calcPPE(dfg, currsym; solveKey).suggested
end
srcsym = currsym
# Propagate the parametric value along the factor chain
for fct in map(x -> getFactor(dfg, x), fctsyms[nextidx:end])
# first find direction of solve
vars = getVariableOrder(fct)
trgsym = setdiff(vars, [srcsym])[1]
val = solveFactorParametric(dfg, fct, [srcsym => val;], trgsym; solveKey)
srcsym = trgsym
end
return val
end
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 5559 |
function isCliqInitialized(cliq::TreeClique)::Bool
return getCliqueData(cliq).initialized in [INITIALIZED; UPSOLVED]
end
function isCliqUpSolved(cliq::TreeClique)::Bool
return getCliqueData(cliq).initialized == UPSOLVED
end
"""
$SIGNATURES
Return the most likely ordering for initializing factor (assuming up solve
sequence).
Notes:
- sorts id (label) for increasing number of connected factors using the clique subfg with messages already included.
"""
function getCliqVarInitOrderUp(subfg::AbstractDFG)
# rules to explore dimension from one to the other?
# get all variable ids and number of associated factors
B, varLabels, facLabels = getBiadjacencyMatrix(subfg)
nfcts = sum(B; dims = 1)[:]
# variables with priors
varswithpriors = listNeighbors.(subfg, lsfPriors(subfg))
singids = union(Symbol[], varswithpriors...)
# sort permutation order for increasing number of factor association
nfctsp = sortperm(nfcts)
sortedids = varLabels[nfctsp]
# organize the prior variables separately with asceding factor count
initorder = intersect(sortedids, singids)
# in ascending order of number of factors
union!(initorder, sortedids)
return initorder
end
"""
$SIGNATURES
Special function to do initialization in downward direction, assuming that not all
variables can be initialized. Relies on outside down messages.
Notes:
- assumed this `cliq` is being initialized from a previous `:needdownmsg` status.
- will use all possible local factors of cliquq in initilization process
- similar to upward initialization, but uses different message structure
- first draft assumes upward messages will not be used,
- full up solve still required which explicitly depends on upward messages.
Dev Notes
- Streamline add/delete msg priors from calling function and csm.
- TODO replace with nested 'minimum degree' type variable ordering.
"""
function getCliqInitVarOrderDown(
dfg::AbstractDFG,
cliq::TreeClique,
dwnkeys::Vector{Symbol},
) # downmsgs
#
allsyms = getCliqAllVarIds(cliq)
# convert input downmsg var symbols to integers (also assumed as prior beliefs)
# make sure ids are in the clique set, since parent may have more variables.
dwnmsgsym = intersect(dwnkeys, DFG.listVariables(dfg))
dwnvarids = intersect(allsyms, dwnmsgsym)
# find any other prior factors (might have partials)
prvarids = getCliqVarIdsPriors(cliq, allsyms, true)
hassinglids = union(dwnvarids, prvarids)
# Get all other variable factor counts
nfcts = getCliqNumAssocFactorsPerVar(cliq)
# add msg marginal prior (singletons) to number of factors
for msid in dwnmsgsym
nfcts[msid .== allsyms] .+= 1
end
# sort permutation order for increasing number of factor association
nfctsp = sortperm(nfcts)
sortedids = allsyms[nfctsp]
# all singleton variables
singids = union(prvarids, dwnvarids)
# organize the prior variables separately with asceding factor count
initorder = Symbol[] #zeros(Int, 0)
for id in sortedids
if id in singids
push!(initorder, id)
end
end
# sort remaining variables for increasing associated factors
for id in sortedids
if !(id in initorder)
push!(initorder, id)
end
end
# return variable order
return initorder::Vector{Symbol}
end
function _isInitializedOrInitSolveKey(
var::DFGVariable,
solveKey::Symbol = :default;
N::Int = 100,
)
# TODO, this solveKey existence test should probably be removed?
if !(solveKey in listSolveKeys(var))
varType = getVariableType(var)
setDefaultNodeData!(
var,
0,
N,
getDimension(varType);
solveKey = solveKey,
initialized = false,
varType = varType,
dontmargin = false,
)
#
# data = getSolverData(var, solveKey)
# if data === nothing
# end
return false
end
# regular is initialized check, this is fine
isinit = isInitialized(var, solveKey)
return isinit
end
"""
$SIGNATURES
Return true if clique has completed the local upward direction inference procedure.
"""
isUpInferenceComplete(cliq::TreeClique) = getCliqueData(cliq).upsolved
function areCliqVariablesAllInitialized(
dfg::AbstractDFG,
cliq::TreeClique,
solveKey::Symbol = :default;
N::Int = getSolverParams(dfg).N,
)
#
allids = getCliqAllVarIds(cliq)
isallinit = true
for vid in allids
var = DFG.getVariable(dfg, vid)
isallinit &= _isInitializedOrInitSolveKey(var, solveKey; N = N)
# isallinit &= isInitialized(var, solveKey)
end
return isallinit
end
"""
$SIGNATURES
Return true if all variables in clique are considered marginalized (and initialized).
"""
function areCliqVariablesAllMarginalized(subfg::AbstractDFG, cliq::TreeClique)
for vsym in getCliqAllVarIds(cliq)
vert = getVariable(subfg, vsym)
if !isMarginalized(vert) || !isInitialized(vert)
return false
end
end
return true
end
function printCliqInitPartialInfo(
subfg,
cliq,
solveKey::Symbol = :default,
logger = ConsoleLogger(),
)
varids = getCliqAllVarIds(cliq)
initstatus = Vector{Bool}(undef, length(varids))
initpartial = Vector{Float64}(undef, length(varids))
for i = 1:length(varids)
initstatus[i] = isInitialized(subfg, varids[i], solveKey) # getSolverData(getVariable(subfg, varids[i]), solveKey).initialized
initpartial[i] = -1 # getSolverData(getVariable(subfg, varids[i]), solveKey).inferdim
end
with_logger(logger) do
tt = split(string(now()), 'T')[end]
@info "$tt, cliq $(cliq.id), PARINIT: $varids | $initstatus | $initpartial"
end
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 23288 |
#
"""
$SIGNATURES
Get the main factor graph stored in a history object from a particular step in CSM.
Related
getCliqSubgraphFromHistory, printCliqHistorySummary, printCliqSummary
"""
getGraphFromHistory(hist::Vector{<:Tuple}, step::Int) = hist[step][4].dfg
"""
$SIGNATURES
Get the cliq sub graph fragment stored in a history object from a particular step in CSM.
Related
getGraphFromHistory, printCliqHistorySummary, printCliqSummary
"""
getCliqSubgraphFromHistory(hist::Vector{<:Tuple}, step::Int) = hist[step][4].cliqSubFg
function getCliqSubgraphFromHistory(
tree::AbstractBayesTree,
hists::Dict{Symbol, Tuple},
frnt::Symbol,
step::Int,
)
return getCliqSubgraphFromHistory(hists[frnt], step)
end
function printCliqSummary(
dfg::G,
tree::AbstractBayesTree,
frs::Symbol,
logger = ConsoleLogger(),
) where {G <: AbstractDFG}
#
return printCliqSummary(dfg, getClique(tree, frs), logger)
end
"""
$(SIGNATURES)
Calculate a fresh (single step) approximation to the variable `sym` in clique `cliq` as though during the upward message passing. The full inference algorithm may repeatedly calculate successive apprimxations to the variables based on the structure of the clique, factors, and incoming messages.
Which clique to be used is defined by frontal variable symbols (`cliq` in this case) -- see `getClique(...)` for more details. The `sym` symbol indicates which symbol of this clique to be calculated. **Note** that the `sym` variable must appear in the clique where `cliq` is a frontal variable.
"""
function treeProductUp(
fg::AbstractDFG,
tree::AbstractBayesTree,
cliq::Symbol,
sym::Symbol;
N::Int = 100,
dbg::Bool = false,
)
#
cliq = getClique(tree, cliq)
cliqdata = getCliqueData(cliq)
# perform the actual computation
pGM, fulldim = predictbelief(fg, sym, :; N = N, dbg = dbg)
return pGM, nothing
end
"""
$(SIGNATURES)
Calculate a fresh---single step---approximation to the variable `sym` in clique `cliq` as though during the downward message passing. The full inference algorithm may repeatedly calculate successive apprimxations to the variable based on the structure of variables, factors, and incoming messages to this clique.
Which clique to be used is defined by frontal variable symbols (`cliq` in this case) -- see `getClique(...)` for more details. The `sym` symbol indicates which symbol of this clique to be calculated. **Note** that the `sym` variable must appear in the clique where `cliq` is a frontal variable.
"""
function treeProductDwn(
fg::G,
tree::AbstractBayesTree,
cliq::Symbol,
sym::Symbol;
N::Int = 100,
dbg::Bool = false,
) where {G <: AbstractDFG}
#
@warn "treeProductDwn might not be working properly at this time. (post DFG v0.6 upgrade maintenance required)"
cliq = getClique(tree, cliq)
cliqdata = getCliqueData(cliq)
# get the local variable id::Int identifier
# vertid = fg.labelDict[sym]
# get all the incoming (upward) messages from the tree cliques
# convert incoming messages to Int indexed format (semi-legacy format)
cl = parentCliq(tree, cliq)
msgdict = getDwnMsgs(cl[1])
dict = Dict{Int, TreeBelief}()
for (dsy, btd) in msgdict
dict[fg.IDs[dsy]] =
TreeBelief(btd.val, btd.bw, btd.infoPerCoord, getVariableType(getVariable(fg, sym)))
end
dwnmsgssym = LikelihoodMessage[LikelihoodMessage(dict);]
# perform the actual computation
pGM, fulldim = predictbelief(fg, sym, :; N = N, dbg = dbg)
return pGM, nothing, sym, dwnmsgssym
end
"""
$SIGNATURES
Print one specific line of a clique state machine history.
Related:
[`printCliqHistorySequential`](@ref), [`printCliqHistorySummary`](@ref)
"""
function printHistoryLine(
fid,
hi::CSMHistoryTuple,
cliqid::AbstractString = "",
seq::Int = 0,
)
#
# global sequence number
first = clampBufferString("$seq", 5)
# 5.13
first *= clampBufferString("$cliqid.$(string(hi[2]))", 6)
# time
first *= clampBufferString(string(split(string(hi[1]), 'T')[end]), 14)
# next function
nextfn = split(split(string(hi[3]), '.')[end], '_')[1]
first *= clampBufferString(nextfn * " ", 20, 18)
first *= " | "
first *= clampBufferString(string(getCliqueStatus(hi[4].cliq)) * " ", 9, 7)
# parent status
first *= " P "
downRxMessage = getMessageBuffer(hi[4].cliq).downRx
toadd = if !isnothing(downRxMessage)
#TODO maybe don't use tree here
"$(getParent(hi[4].tree, hi[4].cliq)[1].id):$(downRxMessage.status)"
else
" ----"
end
first *= clampBufferString(toadd * " ", 9, 7)
# children status
first = first * "C "
upRxMessages = getMessageBuffer(hi[4].cliq).upRx
# all_child_status = map((k,msg) -> (k,msg.status), pairs(upRxMessages))
if length(upRxMessages) > 0
for (k, msg) in upRxMessages
toadd = string(k) * ":" * string(msg.status) * " "
first *= clampBufferString(toadd * " ", 8, 7)
end
else
first *= clampBufferString(" ----", 8)
end
# sibling status # TODO JT removed but kept for future if needed
# first *= "|S| "
# if 0 < length(hi[4].parentCliq)
# frt = (hi[4].parentCliq[1] |> getFrontals)[1]
# childs = getChildren(hi[4].tree, frt)
# # remove current clique to leave only siblings
# filter!(x->x.index!=hi[4].cliq.id.value, childs)
# for ch in childs
# first = first*"$(ch.index)"*string(getCliqueStatus(ch))*" "
# end
# end
return println(fid, first)
end
"""
$SIGNATURES
Print a short summary of state machine history for a clique solve.
Related:
getTreeAllFrontalSyms, animateCliqStateMachines, printHistoryLine, printCliqHistorySequential
"""
function printCliqHistorySummary(
fid,
hist::Vector{CSMHistoryTuple},
cliqid::AbstractString = "",
)
if length(hist) == 0
@warn "printCliqHistorySummary -- No CSM history found."
end
for hi in hist
printHistoryLine(fid, hi, cliqid)
end
return nothing
end
function printCliqHistorySummary(hist::Vector{CSMHistoryTuple}, cliqid::AbstractString = "")
#
return printCliqHistorySummary(stdout, hist, cliqid)
end
function printCliqHistorySummary(
hists::Dict{Int, Vector{CSMHistoryTuple}},
tree::AbstractBayesTree,
sym::Symbol,
)
#
hist = hists[getClique(tree, sym).id]
return printCliqHistorySummary(stdout, hist, string(getClique(tree, sym).id))
end
# TODO maybe Base. already has something like this Union{UnitRange, AbstractVector, etc.}
const CSMRangesT{T} = Union{T, UnitRange{T}, <:AbstractVector{T}}
const CSMRanges = CSMRangesT{Int}
# old
# const CSMTupleRangesT{T} = Union{Tuple{T,T},Tuple{T,UnitRange{T}},Tuple{T,AbstractVector{T}},Tuple{UnitRange{T},T},Tuple{UnitRange{T},UnitRange{T}},Tuple{AbstractVector{T},T},Tuple{AbstractVector{T},AbstractVector{T}},Tuple{AbstractVector{T},UnitRange{T}} }
"""
$SIGNATURES
Print a sequential summary lines of clique state machine histories in hists::Dict.
Notes
- Slices are allowed, see examples.
Example
```julia
printCSMHistorySequential(hists)
printCSMHistorySequential(hists, 2=>46)
printCSMHistorySequential(hists, 1=>11:15)
printCSMHistorySequential(hists, [1,4,6]=>11:15)
printCSMHistorySequential(hists, [2=>45:52; 1=>10:15])
```
DevNotes
- TODO perhaps move some of this functionality upstream to FSM
- TODO upgrade to default `whichstep = :=>:` -- i.e.
- add dispatch for `(:) |> typeof == Colon`,
- `5:6=>:`.
- TODO also add a elements between `Tuple{<:CSMRanges, Pair{<:CSMRanges,<:CSMRanges}}` option
- ([1;3], 1=>5:7) which will print all steps from CSM 1 and 3, which occur between 1=>5 and 1=>7.
- TODO maybe also `Dict(5=>5:8, 8=>20:25)`, or `Dict(2:5=>[1;3], 10=>1:5)`.
Related:
printHistoryLine, printCliqHistory
"""
function printCSMHistorySequential(
hists::Dict{Int, Vector{CSMHistoryTuple}},
whichsteps::Union{Nothing, Vector{<:Pair{<:CSMRanges, <:CSMRanges}}} = nothing,
fid = stdout,
)
#
# vectorize all histories in single Array
allhists = Vector{CSMHistoryTuple}()
alltimes = Vector{DateTime}()
allcliqids = Vector{Int}()
for (cid, hist) in hists, hi in hist
push!(allhists, hi)
push!(alltimes, hi[1])
push!(allcliqids, cid)
end
# sort array by timestamp element
pm = sortperm(alltimes)
allhists_ = allhists[pm]
alltimes_ = alltimes[pm]
allcliqids_ = allcliqids[pm]
# print each line of the sorted array with correct cliqid marker
for idx = 1:length(alltimes)
hiln = allhists_[idx]
# show only one line if whichstep is not nothing
inSliceList = whichsteps === nothing
if !inSliceList
for whichstep in whichsteps
inSliceList && break
inSliceList =
inSliceList || (allcliqids_[idx] in whichstep[1] && hiln[2] in whichstep[2])
end
end
if inSliceList
printHistoryLine(fid, hiln, string(allcliqids_[idx]), idx)
end
end
return nothing
end
function printCSMHistorySequential(
hists::Dict{Int, Vector{CSMHistoryTuple}},
whichstep::Pair{<:CSMRanges, <:CSMRanges},
fid = stdout,
)
#
return printCSMHistorySequential(hists, [whichstep;], fid)
end
function printCSMHistorySequential(
hists::Dict{Int, Vector{CSMHistoryTuple}},
fid::AbstractString,
)
#
@info "printCliqHistorySequential -- assuming file request, writing history to $fid"
file = open(fid, "w")
printCSMHistorySequential(hists, nothing, file)
close(file)
return nothing
end
"""
$SIGNATURES
Print one line of lanes summarizing all clique state machine histories.
Notes
- hiVec is vector of all cliques (i.e. lanes) to print as one LINE into `fid`
- contains `::Tuple{Int,..}` with global counter (not the default CSM counter)
- Vector of `CSMHistoryTuple`
Related:
printCliqHistoryLogical, printCliqHistoryLine
"""
function printHistoryLane(
fid,
linecounter::Union{Int, String},
hiVec::Vector{<:Union{NamedTuple, Tuple}},
seqLookup::NothingUnion{Dict{Pair{Int, Int}, Int}} = nothing,
)
#
## build a string
line = clampBufferString("$linecounter", 4)
for counter = 1:length(hiVec)
# lane marker
line *= "| "
if !isassigned(hiVec, counter)
line *= clampBufferString("", 19)
continue
end
hi = hiVec[counter]
# global counter
useCount = seqLookup !== nothing ? seqLookup[(hi[4].cliq.id.value => hi[2])] : hi[2]
line *= clampBufferString("$(useCount)", 4)
# next function
nextfn = split(string(hi[3]), '.')[end]
line *= clampBufferString(nextfn, 10, 9)
# clique status
st = hi[4] isa String ? hi[4] : string(getCliqueStatus(hi[4].cliq))
line *= clampBufferString(st, 5, 4)
end
## print the string
return println(fid, line)
end
"""
$SIGNATURES
Print history in swimming lanes, side by side with global sequence counter.
Examples
```julia
printCSMHistoryLogical(hist)
printCSMHistoryLogical(hists, order=[4;3], printLines=2:5)
# or to a IOStream, file, network, etc
fid = open(joinLogPath(fg, "CSMHistCustom.txt"),"w")
printCSMHistoryLogical(hist, fid)
close(fid)
```
DevNotes
- `order` should be flexible like `Sequential` and `<:CSMRanges`.
"""
function printCSMHistoryLogical(
hists::Dict{Int, Vector{CSMHistoryTuple}},
fid = stdout;
order::AbstractVector{Int} = sort(collect(keys(hists))),
printLines = 1:99999999,
)
#
# vectorize all histories in single Array
allhists = Vector{CSMHistoryTuple}()
alltimes = Vector{DateTime}()
allcliqids = Vector{Int}()
# "lanes" (i.e. individual cliques)
numLanes = length(order)
# "lines" (i.e. CSM steps)
maxLines = [0; 0]
for (cid, hist) in hists
# find max number of lines to print later
maxLines[2] = length(hist)
maxLines[1] = maximum(maxLines)
for hi in hist
push!(allhists, hi)
push!(alltimes, hi[1])
push!(allcliqids, cid)
end
end
maxLines[1] = minimum([maxLines[1]; printLines[end]])
# sort array by timestamp element
pm = sortperm(alltimes)
allhists_ = allhists[pm]
alltimes_ = alltimes[pm]
allcliqids_ = allcliqids[pm]
# first get the global sequence (invert order dict as bridge table)
seqLookup = Dict{Pair{Int, Int}, Int}()
for idx = 1:length(alltimes)
hiln = allhists_[idx]
seqLookup[(hiln[4].cliq.id.value => hiln[2])] = idx
end
# print the column titles
titles = Vector{Tuple{String, Int, String, String}}()
for ord in order
csym = 0 < length(hists[ord]) ? getFrontals(hists[ord][1][4].cliq)[1] |> string : ""
csym = clampBufferString("$csym", 9)
push!(titles, ("", ord, csym, clampBufferString("", 10)))
end
printHistoryLane(fid, "", titles)
print(fid, "----")
for i = 1:numLanes
print(fid, "+--------------------")
end
println(fid, "")
glbSeqCt = 0 # Ref{Int}(0)
## repeat for the maximum number of "lines" (i.e. CSM steps)
for idx = printLines[1]:maxLines[1]
## build each line as vector of "lanes" (i.e. individual cliques)
allLanes = Vector{CSMHistoryTuple}(undef, numLanes)
laIdx = 0
for laId in order
laIdx += 1
# if history data exists for this line (idx) and lane (laId), then build a new lane Tuple
if idx <= length(hists[laId])
# FIXME, swat first element with global counter (not local as stored in hists)
# @show hists[laId][idx][3]
allLanes[laIdx] = hists[laId][idx]
end
end
#$cliqid.$(string(hi[2]))
# glbSeqCt += 1
printHistoryLane(fid, idx, allLanes, seqLookup)
end
end
# print each line of the sorted array with correct cliqid marker
"""
$SIGNATURES
Repeat a solver state machine step -- useful for debugging.
Notes
- use in combination with `solveTree!(fg, recordcliqs=[:0; :x7; ...])` -- i.e. by clique frontals as identifier
- to record everything, one can do: `recordcliqs=ls(fg)`.
- `duplicate` avoids changing history or prime data in `hists`.
- Replaces old API `sandboxCliqResolveStep`
- Consider using this in combination with tools like [Revise.jl](https://github.com/timholy/Revise.jl)
- On by default in VSCode.
- Internally sets `csmc.enableLogging=false`
Example
```julia
using IncrementalInference
# generate a factor graph
fg = generateGraph_Kaess()
# solve and record everything
smtasks = Task[]
tree, _, = solveTree!(fg, smtasks=smtasks, recordcliqs=ls(fg));
# draw Bayes tree with graphviz and xdot installed
drawTree(tree, show=true)
# fetch histories
hists = fetchCliqHistoryAll!(smtasks);
# check a new csmc before step 2
csmc_ = repeatCSMStep!(hists, 1, 1)
# For use with VSCode debugging
@enter repeatCSMStep!(hists, 1, 1)
# or perhaps test a longer chain of changes
hists_ = deepcopy(hists)
repeatCSMStep!(hists_, 1, 4, duplicate=false)
repeatCSMStep!(hists_, 1, 5, duplicate=false)
repeatCSMStep!(hists_, 1, 6, duplicate=false)
```
DevNotes
- TODO consolidate upstream with `FSM.sandboxStateMachineStep`
Related
[`solveTree!`](@ref), [`solveCliqUp!`](@ref), [`fetchCliqHistoryAll`](@ref), [`printCSMHistoryLogical`](@ref), [`printCSMHistorySequential`](@ref), cliqHistFilterTransitions
"""
function repeatCSMStep!(
hist::AbstractVector{<:CSMHistoryTuple},
step::Int;
duplicate::Bool = true,
enableLogging::Bool = false,
)
#
# the function at steo
fnc_ = hist[step].f
# the data before step
csmc_ = (duplicate ? x -> deepcopy(x) : x -> x)(hist[step].csmc)
csmc_.enableLogging = enableLogging
csmc_.logger = enableLogging ? SimpleLogger() : SimpleLogger(Base.devnull)
# run the step
newfnc_ = fnc_(csmc_)
return newfnc_, csmc_
end
function repeatCSMStep!(
hists::Dict{Int, <:AbstractVector{CSMHistoryTuple}},
csmid::Int,
step::Int;
duplicate::Bool = true,
enableLogging::Bool = false,
)
#
return repeatCSMStep!(
hists[csmid],
step;
duplicate = duplicate,
enableLogging = enableLogging,
)
end
"""
$SIGNATURES
Reattach a CSM's data container after the deepcopy used from recordcliq.
MIGHT BE OBSOLETE
"""
function attachCSM!(
csmc::CliqStateMachineContainer,
dfg::AbstractDFG,
tree::MetaBayesTree;
logger = SimpleLogger(),
)
#
# csmc = csmc__
@error("attachCSM! has been updated without testing and might not work as you intended.")
csmc.dfg = dfg
csmc.tree = tree
csmc.logger = logger # TODO option to reopen and append to previous logger file
@info "attaching csmc and dropping any contents from csmc's previously held (copied) message channels."
cid = csmc.cliq.id.value
# pids = csmc.parentCliq .|> x->x.id
# cids = csmc.childCliqs .|> x->x.id
csmc.cliq = getClique(tree, cid)
# csmc.parentCliq = pids .|> x->getindex(tree.cliques, x)
# csmc.childCliqs = cids .|> x->getindex(tree.cliques, x)
return csmc
end
"""
$SIGNATURES
Draw many images in '/tmp/?/csm_%d.png' representing time synchronized state machine
events for cliques `cliqsyms::Vector{Symbol}`.
Notes
- State history must have previously been recorded (stored in tree cliques).
Related
printCliqHistorySummary
"""
function animateCliqStateMachines(
tree::AbstractBayesTree,
cliqsyms::Vector{Symbol},
hists::Dict{Symbol, Tuple};
frames::Int = 100,
)
#
error("`animateCliqStateMachines` is outdated")
startT = Dates.now()
stopT = Dates.now()
# get start and stop times across all cliques
first = true
for sym in cliqsyms
hist = hists[sym] #getCliqSolveHistory(tree, sym)
if hist[1][1] < startT
startT = hist[1][1]
end
if first
stopT = hist[end][1]
first = false
end
if stopT < hist[end][1]
stopT = hist[end][1]
end
end
# export all figures
folders = String[]
for sym in cliqsyms
hist = hists[sym] #getCliqSolveHistory(tree, sym)
# hist = getCliqSolveHistory(tree, sym)
retval = animateStateMachineHistoryByTime(
hist;
frames = frames,
folder = "caesar/animatecsm/cliq$sym",
title = "$sym",
startT = startT,
stopT = stopT,
rmfirst = false,
)
push!(folders, "cliq$sym")
end
return folders
end
"""
$SIGNATURES
Return state machine transition steps from history such that the `nextfnc::Function`.
Related:
printCliqHistorySummary, filterHistAllToArray, sandboxCliqResolveStep
"""
function cliqHistFilterTransitions(hist::Vector{CSMHistoryTuple}, nextfnc::Function)
ret = Vector{CSMHistoryTuple}()
for hi in hist
if hi[3] == nextfnc
push!(ret, hi)
end
end
return ret
end
"""
$SIGNATURES
Return state machine transition steps from all cliq histories with transition `nextfnc::Function`.
Related:
printCliqHistorySummary, cliqHistFilterTransitions, sandboxCliqResolveStep
"""
function filterHistAllToArray(
tree::AbstractBayesTree,
hists::Dict{Symbol, Tuple},
frontals::Vector{Symbol},
nextfnc::Function,
)
error("filterHistAllToArray needs to be updated for new CSM")
ret = Vector{CSMHistoryTuple}()
for sym in frontals
hist = hists[sym] # getCliqSolveHistory(tree, sym)
fih = cliqHistFilterTransitions(hist, nextfnc)
for fi in fih
push!(ret, fi)
end
end
return ret
end
"""
$SIGNATURES
Animate multiple clique state machines on the same graphviz visualization. Renders according to
linear time for all provided histories.
Example:
```julia
using Caesar
# build a factor graph
fg = initfg()
# addVariable!(...)
# addFactor!(...)
# ...
fsy = getTreeAllFrontalSyms(fg, tree) # for later use
# perform inference to find the factor graph marginal posterior estimates
tree = solveTree!(fg, recordcliqs=fsy)
# generate frames in standard location /tmp/caesar/csmCompound/
# requires: sudo apt-get install graphviz
csmAnimate(fg, tree, fsy, frames=500)
# to render and show from default location (might require)
# sudo apt-get install ffmpeg vlc
# .ogv [Totem Ubuntu default]
Base.rm("/tmp/caesar/csmCompound/out.ogv")
run(`ffmpeg -r 10 -i /tmp/caesar/csmCompound/csm_%d.png -c:v libtheora -vf fps=25 -pix_fmt yuv420p -vf "scale=trunc(iw/2)*2:trunc(ih/2)*2" -q 10 /tmp/caesar/csmCompound/out.ogv`)
run(`totem /tmp/caesar/csmCompound/out.ogv`)
# h.264 [VLC not default]
Base.rm("/tmp/caesar/csmCompound/out.mp4")
run(`ffmpeg -r 10 -i /tmp/caesar/csmCompound/csm_%d.png -c:v libx264 -vf fps=25 -pix_fmt yuv420p -vf "scale=trunc(iw/2)*2:trunc(ih/2)*2" /tmp/caesar/csmCompound/out.mp4`)
run(`vlc /tmp/caesar/csmCompound/out.mp4`)
```
"""
function animateCSM(
tree::AbstractBayesTree,
autohist::Dict{Int, T};
frames::Int = 100,
interval::Int = 2,
dpi::Int = 100,
rmfirst::Bool = true,
folderpath::AbstractString = "/tmp/caesar/csmCompound/",
fsmColors::Dict{Symbol, String} = Dict{Symbol, String}(),
defaultColor::AbstractString = "red",
) where {T <: AbstractVector}
#
easyNames = Dict{Symbol, Int}()
hists =
Dict{Symbol, Vector{Tuple{DateTime, Int64, Function, CliqStateMachineContainer}}}()
for (id, hist) in autohist
frtl = getFrontals(getClique(tree, id))
hists[frtl[1]] = Vector{Tuple{DateTime, Int64, Function, CliqStateMachineContainer}}()
for hi in hist
push!(hists[frtl[1]], (hi.timestamp, hi.id, hi.f, hi.csmc)) # Tuple.(hist)
end
easyNames[frtl[1]] = id
end
startT = Dates.now()
stopT = Dates.now()
# get start and stop times across all cliques
first = true
for (csym, hist) in hists
# global startT, stopT
@show csym
if hist[1][1] < startT
startT = hist[1][1]
end
if first
stopT = hist[end][1]
first = false
end
if stopT < hist[end][1]
stopT = hist[end][1]
end
end
# export all figures
if rmfirst
@warn "Removing $folderpath in preparation for new frames."
Base.rm("$folderpath"; recursive = true, force = true)
end
function csmTreeAni(hl::Tuple, frame::Int, folderpath::AbstractString)
drawTree(
hl[4].tree;
show = false,
filepath = joinpath(folderpath, "tree_$frame.png"),
dpi = dpi,
)
return nothing
end
function autocolor_cb(hi::Tuple, csym::Symbol, aniT::DateTime)
retc = getCliqueDrawColor(hi[4].cliq)
return (retc === nothing ? "gray" : retc)
end
# animateStateMachineHistoryByTimeCompound(hists, startT, stopT, folder="caesar/csmCompound", frames=frames)
return FSM.animateStateMachineHistoryIntervalCompound(
hists;
easyNames = easyNames,
folderpath = folderpath,
interval = interval,
dpi = dpi,
draw_more_cb = csmTreeAni,
fsmColors = fsmColors,
defaultColor = defaultColor,
autocolor_cb = autocolor_cb,
)
end
"""
$SIGNATURES
Convenience function to assign and make video of CSM state machine for `cliqs`.
Notes
- Probably several teething issues still (lower priority).
- Use `assignhist` if solver params async was true, or errored.
Related
csmAnimate, printCliqHistorySummary
"""
function makeCsmMovie(
fg::AbstractDFG,
tree::AbstractBayesTree,
cliqs = ls(fg);
assignhist = nothing,
show::Bool = true,
filename::AbstractString = "/tmp/caesar/csmCompound/out.ogv",
frames::Int = 1000,
)
#
if assignhist != nothing
assignTreeHistory!(tree, assignhist)
end
csmAnimate(fg, tree, cliqs; frames = frames)
# Base.rm("/tmp/caesar/csmCompound/out.ogv")
run(
`ffmpeg -r 10 -i /tmp/caesar/csmCompound/csm_%d.png -c:v libtheora -vf fps=25 -pix_fmt yuv420p -vf "scale=trunc(iw/2)*2:trunc(ih/2)*2" -q 10 $filename`,
)
if show
@async run(`totem $filename`)
end
return filename
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 4445 |
# likely to be deleted at some point
## =============================================================================
## Clique status accessors
## =============================================================================
"""
$SIGNATURES
Return `::Symbol` status a particular clique is in, with specific regard to solution
or numerical initialization status:
- :needdownmsg
- UPSOLVED
- DOWNSOLVED
- INITIALIZED
- MARGINALIZED
- NULL
Notes:
- `NULL` represents the first uninitialized state of a cliq.
"""
getCliqueStatus(cliqdata::BayesTreeNodeData) = cliqdata.status
getCliqueStatus(cliq::TreeClique) = getCliqueStatus(getCliqueData(cliq))
"""
$SIGNATURES
Set up initialization or solve status of this `cliq`.
"""
function setCliqueStatus!(cdat::BayesTreeNodeData, status::CliqStatus)
return cdat.status = status
end
function setCliqueStatus!(cliq::TreeClique, status::CliqStatus)
return setCliqueStatus!(getCliqueData(cliq), status)
end
## =============================================================================
## Regular up and down Message Registers/Channels, getters and setters
## =============================================================================
## =============================================================================
## Message channel put/take! + buffer message accessors
## =============================================================================
## -----------------------------------------------------------------------------
## UP
## -----------------------------------------------------------------------------
"""
$SIGNATURES
Get the message channel
"""
getMsgUpChannel(tree::MetaBayesTree, edge) = MetaGraphs.get_prop(tree.bt, edge, :upMsg)
"""
$SIGNATURES
Put a belief message on the up tree message channel `edge`. Blocks until a take! is performed by a different task.
"""
function putBeliefMessageUp!(tree::AbstractBayesTree, edge, beliefMsg::LikelihoodMessage)
# Blocks until data is available.
put!(getMsgUpChannel(tree, edge), beliefMsg)
return beliefMsg
end
"""
$SIGNATURES
Remove and return belief message from the up tree message channel edge. Blocks until data is available.
"""
function takeBeliefMessageUp!(tree::AbstractBayesTree, edge)
# Blocks until data is available.
beliefMsg = take!(getMsgUpChannel(tree, edge))
return beliefMsg
end
## -----------------------------------------------------------------------------
## DOWN
## -----------------------------------------------------------------------------
"""
$SIGNATURES
Get the message channel
"""
getMsgDwnChannel(tree::MetaBayesTree, edge) = MetaGraphs.get_prop(tree.bt, edge, :downMsg)
"""
$SIGNATURES
Put a belief message on the down tree message channel edge. Blocks until a take! is performed by a different task.
"""
function putBeliefMessageDown!(tree::AbstractBayesTree, edge, beliefMsg::LikelihoodMessage)
# Blocks until data is available.
put!(getMsgDwnChannel(tree, edge), beliefMsg)
return beliefMsg
end
"""
$SIGNATURES
Remove and return a belief message from the down tree message channel edge. Blocks until data is available.
"""
function takeBeliefMessageDown!(tree::AbstractBayesTree, edge)
# Blocks until data is available.
beliefMsg = take!(getMsgDwnChannel(tree, edge))
return beliefMsg
end
##==============================================================================
## Clique Message Buffers
##==============================================================================
"""
$SIGNATURES
Get the message buffer that is used to store messages in the clique between state machine steps.
Tx messages are currently used for debugging only and messages are recalculated on each pass.
"""
function getMessageBuffer(btnd::BayesTreeNodeData)
return btnd.messages
end
getMessageBuffer(clique::TreeClique) = getCliqueData(clique).messages
"""
$SIGNATURES
The up message received on the clique.
"""
getMessageUpRx(clique::TreeClique) = getMessageBuffer(clique).upRx
"""
$SIGNATURES
The down message received on the clique.
"""
getMessageDownRx(clique::TreeClique) = getMessageBuffer(clique).downRx
"""
$SIGNATURES
Used for debugging only, the up message that was sent by the clique
"""
getMessageUpTx(clique::TreeClique) = getMessageBuffer(clique).upTx
"""
$SIGNATURES
Used for debugging only, the down message that was sent by the clique
"""
getMessageDownTx(clique::TreeClique) = getMessageBuffer(clique).downTx
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 26505 | # init utils for tree based inference
## =============================================================================
# short preamble funcions
## =============================================================================
function convert(::Type{<:ManifoldKernelDensity}, src::TreeBelief)
return manikde!(getManifold(src.variableType), src.val; bw = src.bw[:, 1])
end
manikde!(em::TreeBelief) = convert(ManifoldKernelDensity, em)
## =============================================================================
# helper functions for tree message channels
## =============================================================================
"""
$SIGNATURES
Reset the state of all variables in a clique to not initialized.
Notes
- resets numberical values to zeros.
Dev Notes
- TODO not all kde manifolds will initialize to zero.
- FIXME channels need to be consolidated
"""
function resetCliqSolve!(
dfg::AbstractDFG,
treel::AbstractBayesTree,
cliq::TreeClique;
solveKey::Symbol = :default,
)
#
cda = getCliqueData(cliq)
vars = getCliqVarIdsAll(cliq)
for varis in vars
resetVariable!(dfg, varis; solveKey = solveKey)
end
# TODO remove once consolidation with upMsgs is done
putCliqueMsgUp!(cda, LikelihoodMessage())
# cda.dwnMsg = LikelihoodMessage()
putCliqueInitMsgDown!(cda, LikelihoodMessage())
setCliqueStatus!(cliq, NULL)
setCliqueDrawColor!(cliq, "")
return nothing
end
function resetCliqSolve!(
dfg::AbstractDFG,
treel::AbstractBayesTree,
frt::Symbol;
solveKey::Symbol = :default,
)
#
return resetCliqSolve!(dfg, treel, getClique(treel, frt); solveKey = solveKey)
end
## =============================================================================
# helper functions to add tree messages to subgraphs
## =============================================================================
function updateSubFgFromDownMsgs!(
sfg::G,
dwnmsgs::LikelihoodMessage,
seps::Vector{Symbol},
) where {G <: AbstractDFG}
#
# sanity check basic Bayes (Junction) tree property
# length(setdiff(keys(dwnmsgs), seps)) == 0 ? nothing : error("updateSubFgFromDownMsgs! -- separators and dwnmsgs not consistent")
# update specific variables in sfg from msgs
for (key, beldim) in dwnmsgs.belief
if key in seps
newBel = manikde!(getManifold(beldim.variableType), beldim.val; bw = beldim.bw[:, 1])
setValKDE!(sfg, key, newBel, false, beldim.infoPerCoord)
end
end
return nothing
end
function generateMsgPrior(belief_::TreeBelief, ::NonparametricMessage)
kdePr = manikde!(getManifold(belief_.variableType), belief_.val; bw = belief_.bw[:, 1])
return MsgPrior(kdePr, belief_.infoPerCoord, getManifold(belief_))
end
function generateMsgPrior(belief_::TreeBelief, ::ParametricMessage)
msgPrior = if length(belief_.val[1]) == 1 #FIXME ? && length(belief_.val) == 1
MsgPrior(
Normal(belief_.val[1][1], sqrt(belief_.bw[1])),
belief_.infoPerCoord,
getManifold(belief_),
)
elseif length(belief_.val[1]) > 1 #FIXME ? length(belief_.val) == 1
mvnorm = createMvNormal(belief_.val[1], belief_.bw)
mvnorm !== nothing ? nothing : (return DFGFactor[])
MsgPrior(mvnorm, belief_.infoPerCoord, getManifold(belief_))
end
return msgPrior
end
"""
$SIGNATURES
Return `Dict{Int, Vector{Symbol}}` where each `Int` is a new subgraph and the vector contains all variables
connected to that subgraph. Subgraphs connectivity is defined by factors of the [`selectFactorType`](@ref)
type -- e.g. `Pose2` variables connected by a chain of `Pose2Pose2` factors is connected, but not if a link
is `Pose2Pose2Range`. This function is specifically intended for use with `MessageRelativeLikelihoods` in mind
to determine which relative and prior factors should be included in an upward belief propagation (joint) message.
Each returned subgraph should receive a `MsgPrior` on the dominant variable.
Notes
- Disconnected subgraphs in the separator variables of a clique should instead be connected by a
`TangentAtlasFactor` approximation -- i.e. when analytical `selectFactorType`s cannot be used.
- Internally does `getfield(Main, Symbol(factorname::Core.TypeName))` which might cause unique situations with weird user functions
- As well as a possible speed penalty -- TODO, investigate
Related
[`_calcCandidatePriorBest`](@ref)
"""
function _findSubgraphsFactorType(
dfg_::AbstractDFG,
jointrelatives::MsgRelativeType,
separators::Vector{Symbol},
)
#
commonJoints = []
subClassify = Dict{Symbol, Int}()
newClass = 0
# 1. count separtor connectivity in UPWARD_DIFFERENTIAL
sepsCount = Dict{Symbol, Int}()
map(x -> (sepsCount[x] = 0), separators)
# tagsFilter = [:__LIKELIHOODMESSAGE__;]
# tflsf = lsf(fg, tags=tagsFilter)
for likl in jointrelatives
for vari in likl.variables
sepsCount[vari] += 1
end
end
# 2. start with 0's as subgraphs
for (id, count) in sepsCount
if count == 0
# also keep second list just to be sure based on labels
newClass += 1
subClassify[id] = newClass
end
end
# 3. then < 0 and search all paths, adding each hit to subgraph classifications
for key1 in setdiff(keys(sepsCount), keys(subClassify))
if !(key1 in keys(subClassify))
newClass += 1
subClassify[key1] = newClass
end
# if sepsCount[key1] == 1
# search connectivity throughout remaining variables, some duplicate computation occurring
for key2 in setdiff(keys(sepsCount), keys(subClassify))
defaultFct = selectFactorType(dfg_, key1, key2)
# @show key1, key2, defaultFct
# TODO validate getfield Main here
# resname = defaultFct isa UnionAll ? getfield(Main, defaultFct.body.name |> Symbol) : defaultFct
resname =
defaultFct isa UnionAll ? getfield(Main, nameof(defaultFct.body)) : defaultFct
pth = findShortestPathDijkstra(
dfg_,
key1,
key2;
typeFactors = [resname;],
initialized = true,
)
# check if connected to existing subClass
if 0 == length(pth)
# not connected, so need new class
newClass += 1
subClassify[key2] = newClass
else
# is connected, so add existing class of key1
subClassify[key2] = subClassify[key1]
end
end
# end
end
# 4. inverse classification dictionary
allClasses = Dict{Int, Vector{Symbol}}()
for (key, cls) in subClassify
if isInitialized(dfg_, key)
if !haskey(allClasses, cls)
(allClasses[cls] = Symbol[key;])
else
union!(allClasses[cls], [key;])
end
end
end
#
return allClasses
end
"""
$SIGNATURES
Build from a `LikelihoodMessage` a temporary distributed factor graph object containing differential
information likelihood factors based on values in the messages.
Notes
- Modifies tfg argument by adding `:__UPWARD_DIFFERENTIAL__` factors.
DevNotes
- Initial version which only works for Pose2 and Point2 at this stage.
"""
function addLikelihoodsDifferential!(
msgs::LikelihoodMessage,
cliqSubFG::AbstractDFG,
tfg::AbstractDFG = initfg(),
)
# create new local dfg and add all the variables with data
for difflikl in msgs.jointmsg.relatives
addFactor!(
cliqSubFG,
difflikl.variables,
difflikl.likelihood;
graphinit = false,
tags = [:__LIKELIHOODMESSAGE__; :__UPWARD_DIFFERENTIAL__],
)
end
# listVarByDim = Symbol[]
# for (label, val) in msgs.belief
# push!(listVarByDim, label)
# if !exists(tfg, label)
# addVariable!(tfg, label, val.variableType)
# @debug "New variable added to subfg" _group=:check_addLHDiff #TODO JT remove debug.
# end
# initVariable!(tfg, label, manikde!(val))
# end
# # list all variables in order of dimension size
# alreadylist = Symbol[]
# listDims = getDimension.(getVariable.(tfg,listVarByDim))
# per = sortperm(listDims, rev=true)
# listVarDec = listVarByDim[per]
# listVarAcc = reverse(listVarDec)
# # add all differential factors (without deconvolution values)
# for sym1_ in listVarDec
# push!(alreadylist, sym1_)
# for sym2_ in setdiff(listVarAcc, alreadylist)
# nfactype = selectFactorType(tfg, sym1_, sym2_)
# # assume default helper function # buildFactorDefault(nfactype)
# nfct = nfactype()
# afc = addFactor!(tfg, [sym1_;sym2_], nfct, graphinit=false, tags=[:DUMMY;])
# # calculate the general deconvolution between variables
# pts = solveFactorMeasurements(tfg, afc.label)
# newBel = manikde!(getManifold(nfactype), pts[1])
# # replace dummy factor with real deconv factor using manikde approx belief measurement
# fullFct = nfactype(newBel)
# deleteFactor!(tfg, afc.label)
# addFactor!( cliqSubFG, [sym1_;sym2_], fullFct, graphinit=false, tags=[:__LIKELIHOODMESSAGE__; :__UPWARD_DIFFERENTIAL__] )
# end
# end
return tfg
end
# default verbNoun API spec (dest, src)
function addLikelihoodsDifferential!(subfg::AbstractDFG, msgs::LikelihoodMessage)
return addLikelihoodsDifferential!(msgs, subfg)
end
# child CSM calculates the differential factors that should be sent up
# FIXME, must be renamed and standardized
function addLikelihoodsDifferentialCHILD!(
cliqSubFG::AbstractDFG,
seps::Vector{Symbol},
tfg::AbstractDFG = initfg(
LocalDFG(; solverParams = SolverParams(; N = getSolverParams(cliqSubFG).N)),
);
solveKey::Symbol = :default,
)
#
# return list of differential factors the parent should add as part upward partial joint posterior
retlist = MsgRelativeType()
# create new local dfg and add all the variables with data
for label in seps
if !exists(tfg, label)
addVariable!(tfg, label, getVariableType(cliqSubFG, label))
@debug "New variable added to subfg" _group = :check_addLHDiff #TODO JT remove debug.
end
initVariable!(tfg, label, getBelief(cliqSubFG, label, solveKey), solveKey)
end
# list all variables in order of dimension size
alreadylist = Symbol[]
listDims = getDimension.(getVariable.(tfg, seps))
per = sortperm(listDims; rev = true)
listVarDec = seps[per]
listVarAcc = reverse(listVarDec)
# add all differential factors (without deconvolution values)
for sym1_ in listVarDec
push!(alreadylist, sym1_)
for sym2_ in setdiff(listVarAcc, alreadylist)
isHom, ftyps = isPathFactorsHomogeneous(cliqSubFG, sym1_, sym2_)
# chain of user factors are of the same type
if isHom
_sft = selectFactorType(tfg, sym1_, sym2_)
sft = _sft()
# only take factors that are homogeneous with the generic relative
if typeof(sft).name == ftyps[1]
# assume default helper function # buildFactorDefault(nfactype)
afc = addFactor!(tfg, [sym1_; sym2_], sft; graphinit = false, tags = [:DUMMY;])
# calculate the general deconvolution between variables
pred_X, = approxDeconv(tfg, afc.label, solveKey) # solveFactorMeasurements
M = getManifold(_sft)
e0 = getPointIdentity(M)
pts = exp.(Ref(M), Ref(e0), pred_X)
newBel = manikde!(sft, pts)
# replace dummy factor with real deconv factor using manikde approx belief measurement
fullFct = _sft(newBel)
deleteFactor!(tfg, afc.label)
push!(retlist, (; variables = [sym1_; sym2_], likelihood = fullFct))
end
end
end
end
return retlist
end
# use variableList to select a sub-subgraph -- useful for disconnected segments of graph
# NOTE expect msgbeliefs to contain all required keys passed in via special variableList
function _calcCandidatePriorBest(
subfg::AbstractDFG,
msgbeliefs::Dict,
# msgs::LikelihoodMessage,
variableList::Vector{Symbol} = collect(keys(msgbeliefs)),
)
#
## TODO repackage as new function for wider use
len = length(variableList)
dims = Vector{Int}(undef, len)
syms = Vector{Symbol}(undef, len)
biAdj = Vector{Int}(undef, len)
# TODO, not considering existing priors for MsgPrior placement at this time
# priors = Vector{Int}(undef, len)
i = 0
for (label, val) in msgbeliefs
# skip elements not in variableList
(label in variableList) ? nothing : continue
# do calculations based on dimension
i += 1
dims[i] = getDimension(val.variableType)
syms[i] = label
biAdj[i] = ls(subfg, label) |> length
end
# work only with highest dimension variable
maxDim = maximum(dims)
dimMask = dims .== maxDim
mdAdj = biAdj[dimMask]
pe = sortperm(mdAdj; rev = true) # decending
# @show variableList, keys(msgbeliefs)
# @show syms
return (syms[dimMask])[pe][1]
end
"""
$SIGNATURES
Generate `MsgPriors` required for upward message joint. Follows which relative factors ("differentials")
should also be added.
Notes
- Might skip some priors based on `msg.hasPriors`
- This might still be hard to work with, will be clear once engaged in codebase
- TODO obviously much consolidation to do here
Related
[`_findSubgraphsFactorType`](@ref), [`_calcCandidatePriorBest`](@ref)
"""
function _generateSubgraphMsgPriors(
subfg::AbstractDFG,
solveKey::Symbol,
allClasses::Dict{Int, Vector{Symbol}},
msgbeliefs::Dict,
msgHasPriors::Bool,
msgType::MessageType,
)
#
priorsJoint = MsgPriorType()
# 5. find best variable of each of allClasses to place MsgPrior
for (id, syms) in allClasses
# if any `jointmsg per variable && !msg.hasPriors`, then dont add a prior
if (1 == length(syms) || msgHasPriors) && 0 < length(msgbeliefs)
whichVar = IIF._calcCandidatePriorBest(subfg, msgbeliefs, syms)
priorsJoint[whichVar] =
IIF.generateMsgPrior(TreeBelief(getVariable(subfg, whichVar), solveKey), msgType)
end
end
# return the required priors
return priorsJoint
end
"""
$SIGNATURES
Generate relative and prior factors that make up the joint msg likelihood.
DevNotes
- Non-standard relative likelihoods will be populated by TAF factors, removing priors assumption.
"""
function _generateMsgJointRelativesPriors(
cfg::AbstractDFG,
solveKey::Symbol,
cliq::TreeClique,
)
#
separators = getCliqSeparatorVarIds(cliq)
jointrelatives = addLikelihoodsDifferentialCHILD!(cfg, separators; solveKey = solveKey)
allClasses = IIF._findSubgraphsFactorType(cfg, jointrelatives, separators)
hasPriors = 0 < length(intersect(getCliquePotentials(cliq), lsfPriors(cfg)))
msgbeliefs = Dict{Symbol, TreeBelief}()
IIF._buildTreeBeliefDict!(msgbeliefs, cfg, cliq)
# @show cliq.id, ls(cfg), keys(msgbeliefs), allClasses
upmsgpriors = IIF._generateSubgraphMsgPriors(
cfg,
solveKey,
allClasses,
msgbeliefs,
hasPriors,
IIF.NonparametricMessage(),
)
return _MsgJointLikelihood(; relatives = jointrelatives, priors = upmsgpriors)
end
"""
$SIGNATURES
Place a single message likelihood prior on the highest dimension variable with highest connectivity in existing subfg.
"""
function addLikelihoodPriorCommon!(
subfg::AbstractDFG,
msg::LikelihoodMessage;
tags::Vector{Symbol} = Symbol[],
)
#
tags__ = union(Symbol[:__LIKELIHOODMESSAGE__; :__UPWARD_COMMON__], tags)
# find if any orphaned variables exist
for (lbl, msgpr) in msg.jointmsg.priors
# don't add numerical gauge reference unless absolutely necessary
if msg.hasPriors || 0 == length(ls(subfg, lbl))
# finally add the single AbstractPrior from LikelihoodMessage
addFactor!(subfg, [lbl], msgpr; graphinit = false, tags = tags__)
end
end
# # find max dimension variable, which also has highest biadjacency
# topCandidate = _calcCandidatePriorBest(subfg, msg.belief)
# # get prior for top candidate
# msgPrior = generateMsgPrior(msg.belief[topCandidate], msg.msgType)
# # get ready
# tags__ = union(Symbol[:__LIKELIHOODMESSAGE__;:__UPWARD_COMMON__], tags)
# # finally add the single AbstractPrior from LikelihoodMessage
# addFactor!(subfg, [topCandidate], msgPrior, graphinit=false, tags=tags__)
end
"""
$SIGNATURES
Special function to add a few variables and factors to the clique sub graph required for downward solve in CSM.
Dev Notes
- There is still some disparity on whether up and down solves of tree should use exactly the same subgraph... 'between for up and frontal connected for down'
"""
function addDownVariableFactors!(
dfg::AbstractDFG,
subfg::InMemoryDFGTypes,
cliq::TreeClique,
logger = ConsoleLogger();
solvable::Int = 1,
)
#
# determine which variables and factors needs to be added
currsyms = ls(subfg)
allclsyms = getCliqVarsWithFrontalNeighbors(dfg, cliq; solvable = solvable)
newsyms = setdiff(allclsyms, currsyms)
with_logger(logger) do
@info "addDownVariableFactors!, cliq=$(cliq.id), newsyms=$newsyms"
end
frtls = getCliqFrontalVarIds(cliq)
with_logger(logger) do
@info "addDownVariableFactors!, cliq=$(cliq.id), frtls=$frtls"
end
allnewfcts =
union(map(x -> findFactorsBetweenFrom(dfg, union(currsyms, newsyms), x), frtls)...)
newfcts = setdiff(allnewfcts, lsf(subfg))
with_logger(logger) do
@info "addDownVariableFactors!, cliq=$(cliq.id), newfcts=$newfcts, allnewfcts=$allnewfcts"
end
#TODO solvable?
DFG.mergeGraph!(subfg, dfg, newsyms, newfcts)
return newsyms, newfcts
end
"""
$SIGNATURES
Modify the `subfg::AbstractDFG` to include `msgs` as priors that are used
during clique inference.
Notes
- May be used initialization or inference, in both upward and downward directions.
- `msgs` are identified by variable label `::Symbol`, and my consist of multiple beliefs.
- Message sets from different cliques are identified by clique id `::Int`.
- assume lower limit on number of particles is 5.
- messages from children stored in vector or dict.
DevNotes
- TODO Split dispatch on `dir`, rather than internal `if` statement.
Related
`deleteMsgFactors!`
"""
function addMsgFactors!(
subfg::AbstractDFG,
msg::LikelihoodMessage,
dir::Type{<:MessagePassDirection};
tags::Vector{Symbol} = Symbol[],
attemptPriors::Bool = true,
)
#
# add messages as priors to this sub factor graph
msgfcts = DFGFactor[]
# TODO, expand -- this deconv approach only works for NonparametricMessage at this time.
if getSolverParams(subfg).useMsgLikelihoods &&
dir == UpwardPass &&
msg.msgType isa NonparametricMessage
#
if 0 < length(msg.belief)
# currently only works for nonparametric
addLikelihoodsDifferential!(subfg, msg) # :__UPWARD_DIFFERENTIAL__
if attemptPriors
# will only be added based on internal tests
prFcts = addLikelihoodPriorCommon!(subfg, msg) # :__UPWARD_COMMON__
end
end
else
svars = DFG.listVariables(subfg)
tags__ = union(Symbol[:__LIKELIHOODMESSAGE__;], tags)
dir == DownwardPass ? push!(tags__, :__DOWNWARD_COMMON__) : nothing
for (msym, belief_) in msg.belief
if msym in svars
msgPrior = generateMsgPrior(belief_, msg.msgType)
fc = addFactor!(subfg, [msym], msgPrior; graphinit = false, tags = tags__)
push!(msgfcts, fc)
end
end
end
return msgfcts
end
function addMsgFactors!(
subfg::AbstractDFG,
allmsgs::Dict{Int, LikelihoodMessage},
dir::Type{<:MessagePassDirection};
tags::Vector{Symbol} = Symbol[],
)
#
allfcts = DFGFactor[]
for (cliqid, msgs) in allmsgs
# do each dict in array separately
newfcts = addMsgFactors!(subfg, msgs, dir; tags = tags)
union!(allfcts, newfcts)
end
return allfcts
end
"""
$SIGNATURES
Delete from the subgraph`::AbstractDFG` prior belief `msgs` that could/would be used
during clique inference.
DevNotes
- TODO make `::Vector{Symbol}` version.
- TODO function taking fcts::Vector{DFGFactor} is unused and replace by the tags version, perhaps we can remove it.
Related
`addMsgFactors!`
"""
function deleteMsgFactors!(subfg::AbstractDFG, fcts::Vector{DFGFactor})
#
for fc in fcts
deleteFactor!(subfg, fc.label)
end
end
function deleteMsgFactors!(
subfg::AbstractDFG,
tags::Vector{Symbol} = [:__LIKELIHOODMESSAGE__],
)
# remove msg factors that were added to the subfg
facs = lsf(subfg; tags = tags)
deleteFactor!.(subfg, facs)
return facs
end
## =============================================================================
## Prepare Clique Up or Down Msgs
## =============================================================================
function _buildTreeBeliefDict!(
msgdict::Dict{Symbol, TreeBelief},
subfg::AbstractDFG,
cliq::TreeClique,
solveKey::Symbol = :default,
sdims = nothing; #getCliqVariableMoreInitDims(subfg, cliq, solveKey);
duplicate::Bool = true,
)
#
# TODO better logging
# with_logger(logger) do
# @info "$(now()), prepCliqInitMsgsUp, seps=$seps, sdims=$sdims"
# end
seps = getCliqSeparatorVarIds(cliq)
for vid in seps
var = DFG.getVariable(subfg, vid)
var = duplicate ? deepcopy(var) : var
if isInitialized(var)
msgdict[var.label] = TreeBelief(var; solvableDim = 1.0) #sdims[var.label])
end
end
return nothing
end
"""
$SIGNATURES
Prepare the upward inference messages from clique to parent and return as `Dict{Symbol}`.
Notes
- Does not require tree message likelihood factors in subfg.
- Also see #579 regarding elimited likelihoods and priors.
DevNotes
- set `msgs.hasPriors=true` only if a prior occurred here or lower down in tree branch.
"""
function prepCliqueMsgUp(
subfg::AbstractDFG,
cliq::TreeClique,
solveKey::Symbol,
status::CliqStatus = getCliqueStatus(cliq);
logger = ConsoleLogger(),
duplicate::Bool = true,
sender = (; id = 0, step = 0),
)
#
# get the current clique status
# sdims = getCliqVariableMoreInitDims(subfg, cliq, solveKey)
# construct init's up msg to place in parent from initialized separator variables
hasPriors = 0 < (lsfPriors(subfg) |> length)
msg = LikelihoodMessage(; sender = sender, status = status, hasPriors = hasPriors)
_buildTreeBeliefDict!(msg.belief, subfg, cliq, solveKey; duplicate = duplicate)
# seps = getCliqSeparatorVarIds(cliq)
# for vid in seps
# var = DFG.getVariable(subfg, vid)
# var = duplicate ? deepcopy(var) : var
# if isInitialized(var)
# msg.belief[var.label] = TreeBelief(var, solvableDim=sdims[var.label])
# end
# end
if getSolverParams(subfg).useMsgLikelihoods
msg.jointmsg = IIF._generateMsgJointRelativesPriors(subfg, solveKey, cliq)
end
# FIXME calculate the new DIFFERENTIAL factors
# retval = addLikelihoodsDifferentialCHILD!(subfg, getCliqSeparatorVarIds(cliq))
# msg.jointmsg.relatives = retval
return msg
end
"""
$SIGNATURES
Calculate new and then set the down messages for a clique in Bayes (Junction) tree.
"""
function prepCliqueMsgDown(
subfg::AbstractDFG,
cliq::TreeClique,
solveKey::Symbol,
prntDwnMsgs::LikelihoodMessage,
logger = ConsoleLogger();
status::CliqStatus = getCliqueStatus(cliq),
sender = (; id = cliq.id.value, step = 0),
)
#
allvars = getCliqVarIdsAll(cliq)
allprntkeys = collect(keys(prntDwnMsgs.belief))
passkeys = intersect(allvars, setdiff(allprntkeys, ls(subfg)))
remainkeys = setdiff(allvars, passkeys)
newDwnMsgs = LikelihoodMessage(; sender = sender, status = status)
# some msgs are just pass through from parent
for pk in passkeys
newDwnMsgs.belief[pk] = prntDwnMsgs.belief[pk]
end
# set solvable dimensions
# sdims = getCliqVariableMoreInitDims(subfg, cliq)
# other messages must be extracted from subfg
for mk in remainkeys
setVari = getVariable(subfg, mk)
if isInitialized(setVari)
newDwnMsgs.belief[mk] = TreeBelief(setVari, solveKey) #, solvableDim=sdims[mk] )
end
end
# set the downward keys
with_logger(logger) do
@info "cliq $(cliq.id), getSetDownMessagesComplete!, allkeys=$(allvars), passkeys=$(passkeys), msgkeys=$(collect(keys(newDwnMsgs.belief)))"
end
return newDwnMsgs
end
## =============================================================================
## Multimessage assemblies from multiple cliques
## =============================================================================
"""
$SIGNATURES
Return dictionary of all up belief messages currently in a Bayes `tree`.
"""
function getTreeCliqUpMsgsAll(tree::AbstractBayesTree)
allUpMsgs = Dict{Int, LikelihoodMessage}()
for (idx, cliq) in getCliques(tree)
msgs = getMessageBuffer(cliq).upRx
merge!(allUpMsgs, msgs)
end
return allUpMsgs
end
# TODO @NamedTuple{cliqId::CliqueId{Int}, depth::Int, belief::TreeBelief}
const UpMsgPlotting =
NamedTuple{(:cliqId, :depth, :belief), Tuple{CliqueId{Int}, Int, TreeBelief}}
"""
$SIGNATURES
Convert tree up messages dictionary to a new dictionary relative to variables specific messages and their depth in the tree
Notes
- Used in RoMEPlotting
- Return data in `UpMsgPlotting` format.
"""
function stackCliqUpMsgsByVariable(
tree::AbstractBayesTree,
tmpmsgs::Dict{Int, LikelihoodMessage},
)
#
# start of the return data structure
stack = Dict{Symbol, Vector{UpMsgPlotting}}()
# look at all the clique level data
for (cidx, tmpmsg) in tmpmsgs
# look at all variables up msg from each clique
for (sym, belief) in tmpmsg.belief
# create a new object for a particular variable if hasnt been seen before
if !haskey(stack, sym)
# FIXME this is an old message type
stack[sym] = Vector{UpMsgPlotting}()
end
# assemble metadata
cliq = getClique(tree, cidx)
#TODO why was the first frontal used? i changed to clique id (unique)
# frt = getCliqFrontalVarIds(cliq)[1]
# add this belief msg and meta data to vector of variable entry
push!(stack[sym], IIF.UpMsgPlotting((cliq.id, getCliqDepth(tree, cliq), belief)))
end
end
return stack
end
"""
$SIGNATURES
Return dictionary of down messages consisting of all frontal and separator beliefs of this clique.
Notes:
- Fetches numerical results from `subdfg` as dictated in `cliq`.
- return LikelihoodMessage
"""
function getCliqDownMsgsAfterDownSolve(
subdfg::AbstractDFG,
cliq::TreeClique,
solveKey::Symbol;
status::CliqStatus = NULL,
sender = (; id = cliq.id.value, step = 0),
)
#
# Dict{Symbol, MKD}
# where the return msgs are contained
container = LikelihoodMessage(; sender = sender, status = status)
# go through all msgs one by one
for sym in getCliqAllVarIds(cliq)
container.belief[sym] = TreeBelief(getVariable(subdfg, sym), solveKey)
end
# return the result
return container
end
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 1532 |
function Statistics.mean(vartype::InferenceVariable, args...; kwargs...)
return mean(getManifold(vartype), args...; kwargs...)
end
function Statistics.std(vartype::InferenceVariable, args...; kwargs...)
return std(getManifold(vartype), args...; kwargs...)
end
function Statistics.var(vartype::InferenceVariable, args...; kwargs...)
return var(getManifold(vartype), args...; kwargs...)
end
function Statistics.cov(
vartype::InferenceVariable,
ptsArr::AbstractVector;
basis::Manifolds.AbstractBasis = Manifolds.DefaultOrthogonalBasis(),
kwargs...,
)
return cov(getManifold(vartype), ptsArr; basis, kwargs...)
end
#TODO check performance and FIXME on makemutalbe might not be needed any more
function calcStdBasicSpread(vartype::InferenceVariable, ptsArr::AbstractVector) # {P}) where {P}
# _makemutable(s) = s
# _makemutable(s::StaticArray{Tuple{S},T,N}) where {S,T,N} = MArray{Tuple{S},T,N,S}(s)
# _makemutable(s::SMatrix{N,N,T,D}) where {N,T,D} = MMatrix{N,N,T,D}(s)
# FIXME, silly conversion since Manifolds.std internally replicates eltype ptsArr which doesn't work on StaticArrays
# σ = std(vartype, _makemutable.(ptsArr))
μ = mean(vartype, ptsArr, GeodesicInterpolation())
σ = std(vartype, ptsArr, μ)
#if no std yet, set to 1
msst = 1e-10 < σ ? σ : 1.0
return msst
end
#TODO consolidate
function calcMeanCovar(vari::DFGVariable, solvekey = :default)
pts = getSolverData(vari, solvekey).val
μ = mean(getManifold(vari), pts)
Σ = cov(getVariableType(vari), pts)
return μ, Σ
end
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 2318 | # using Revise
using DistributedFactorGraphs
using IncrementalInference
using Test
##
@testset "test CSM runaway on upsolve, (issue 427)" begin
## parameters
lm_prior_noise = 0.1
meas_noise = 0.25
odom_noise = 0.1
n_samples = 100
# initialize mean landmark locations
l1 = 50.0
l2 = -50.0
l3 = 80.0
# "Ground-truth" robot poses
x1 = 0.0
x2 = 0.0
x3 = 0.0
## Initialize empty factor graph
fg = initfg()
addVariable!(fg, Symbol("l1"), ContinuousScalar, N=n_samples)
addFactor!(fg, [:l1], Prior(Normal(l1, lm_prior_noise)))
addVariable!(fg, Symbol("l2"), ContinuousScalar, N=n_samples)
addFactor!(fg, [:l2], Prior(Normal(l2, lm_prior_noise)))
addVariable!(fg, Symbol("l1_0"), ContinuousScalar, N=n_samples)
addVariable!(fg, Symbol("l2_0"), ContinuousScalar, N=n_samples)
# Add first pose
addVariable!(fg, :x1, ContinuousScalar, N=n_samples)
addFactor!(fg, [:x1; :l1; :l1_0], LinearRelative(Normal(40., meas_noise)), multihypo=[1.0; 1.0/2.0; 1.0/2.0])
# Add second pose
addVariable!(fg, :x2, ContinuousScalar, N=n_samples)
# Gaussian transition model
addFactor!(fg, [:x1; :x2], LinearRelative(Normal(0., odom_noise)))
# Make second "door" measurement
# addFactor!(fg, [:x1; :l1], LinearRelative(Normal(0, meas_noise)) )
addFactor!(fg, [:x2; :l2; :l2_0], LinearRelative(Normal(-40., meas_noise)), multihypo=[1.0; 1.0/2.0; 1.0/2.0])
# drawGraph(fg)
# initAll!(fg)
approxConv(fg, :x1l1l1_0f1, :l1_0)
approxConv(fg, :x1l1l1_0f1, :x1)
# doautoinit!(fg, :l1_0)
# doautoinit!(fg, :x1)
## Run solver
getSolverParams(fg).limititers = 30 # previous runaway CSM issue due to excessive limits on autoinit.
# getSolverParams(fg).dbg = false
# getSolverParams(fg).async = false
# getSolverParams(fg).drawtree = false
# getSolverParams(fg).showtree = false
tree = solveTree!(fg, recordcliqs=ls(fg))
# drawGraph(fg)
# fetchAssignTaskHistoryAll!(tree, smt)
# printCliqHistorySummary(tree, :l1)
# getTreeCliqsSolverHistories(fg, tree)
#
# csmAnimate(fg, tree, [:l1;:l2])
#
# # Base.rm("/tmp/caesar/csmCompound/out.ogv")
# run(`ffmpeg -r 10 -i /tmp/caesar/csmCompound/csm_%d.png -c:v libtheora -vf fps=25 -pix_fmt yuv420p -vf "scale=trunc(iw/2)*2:trunc(ih/2)*2" -q 10 /tmp/caesar/csmCompound/out.ogv`)
# @async run(`totem /tmp/caesar/csmCompound/out.ogv`)
# using RoMEPlotting
# plotKDE(fg, ls(fg))
##
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 2276 | # test module wrapping
# addprocs(1) # TODO reinsert once Julia 0.7 tests pass
using Test
using IncrementalInference
module First
export solve, AnotherInferenceType, Container
abstract type AnotherInferenceType end
mutable struct Container
col::Dict{Symbol, Function}
Container(::Nothing) = new()
Container(;col=Dict{Symbol, Function}()) = new(col)
end
function registerCallback!(col::Container, fnc::Function)
# get module
# Symbol(string(m))
m = Symbol(typeof(fnc).name.module)
col.col[m] = fnc
end
function solve(ctl::Container, val::AnotherInferenceType) # m::Symbol,
m = Symbol(typeof(val).name.module)
if false
# doesnt work
evalPotential(val)
else
evalPotential = ctl.col[m]
evalPotential(val)
end
end
end
module SecondModule
using Main.First
export SecondType, SecondAgain, evalPotential, solve, registerCallback!, Container
struct SecondType <: Main.First.AnotherInferenceType
x::Int
end
struct SecondAgain <: Main.First.AnotherInferenceType
x::Int
end
function evalPotential(x::SecondType)
println("evalPotential sees $(x)")
return x.x
end
function evalPotential(x::SecondAgain)
println("evalPotential also sees $(x)")
return x.x
end
end
# get module
# typeof(f).name.mt.name
# Okay lets see
using Main.SecondModule
@testset "out of module evalPotential..." begin
Col = First.Container()
# running on first process only
First.registerCallback!(Col, SecondModule.evalPotential)
CCol = deepcopy(Col)
stA = 1.0
saA = 3.0
st = SecondModule.SecondType(stA)
sa = SecondModule.SecondAgain(saA)
# Symbol(typeof(st).name.module)
@test First.solve(CCol, st) == stA
@test First.solve(CCol, sa) == saA
@elapsed SecondModule.evalPotential(st)
t1 = @elapsed SecondModule.evalPotential(st)
t2 = @elapsed solve(Col, sa)
println("Check the speed is reasonable")
@test t2 < 15.0*t1 # should actually be about equal, slack for threading uncertainty
end
# expand tests to include multiprocessor
# println("Tesing call of function on separate process...")
# fr = remotecall(evalPotential,procs()[2], st)
# @test fetch(fr) == stA
#
# fr = remotecall(solve,procs()[2], CCol,sa)
# @test fetch(fr) == saA
#
#
#
# println("Stopping all but first process...")
# rmprocs(procs()[2:end])
# @show procs()
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 1116 | using IncrementalInference
using InteractiveUtils
using Test
##
@testset "test the basics" begin
##
fg = initfg()
addVariable!(fg, :x1, ContinuousScalar)
addVariable!(fg, :x2, ContinuousScalar)
addFactor!(fg, [:x1;:x2], LinearRelative(Normal()), graphinit=false)
addFactor!(fg, [:x2], Prior(Normal()), graphinit=false)
@test exists(fg, :x1)
@test !exists(fg, :l13)
##
end
@testset "test manikde! constructions on variableType" begin
##
pts = [randn(1) for _ in 1:100]
varT = LinearRelative{1}
manikde!(varT, pts)
DFG.@defVariable _TestManiKde IIF.Manifolds.SpecialEuclidean(2) ArrayPartition([0;0.], [1 0; 0 1.])
# construct directly with ArrayPartition
pts = [ArrayPartition(randn(2), [1 0; 0 1.]) for _ in 1:100]
varT = _TestManiKde
manikde!(varT, pts)
# construct indirectly via tuple (expect users only, not meant for general use)
pts = [(randn(2), [1 0; 0 1.]) for _ in 1:100]
varT = _TestManiKde
manikde!(varT, pts)
##
end
@testset "test InteractiveUtilsExt" begin
##
IIF.listTypeTree(AbstractManifoldMinimize)
IIF.getCurrentWorkspaceFactors()
IIF.getCurrentWorkspaceVariables()
##
end
# | IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 1965 | using IncrementalInference
using Test
##
@testset "test fourdoor early example" begin
## example parameters
# Number of kernels representing each marginal belief
N=100
# prior knowledge of four possible door locations
cv = 3.0
doorPrior = Mixture(Prior,
[Normal(-100,cv);Normal(0,cv);Normal(100,cv);Normal(300,cv)],
[1/4;1/4;1/4;1/4] )
## Build the factor graph object
fg = initfg()
getSolverParams(fg).useMsgLikelihoods = true
# first pose location
v1 = addVariable!(fg,:x1,ContinuousScalar,N=N)
# see a door for the first time
addFactor!(fg,[:x1], doorPrior)
# first solution with only one variable and factor (may take a few moments on first JIT compiling)
solveTree!(fg)
## drive to second pose location
addVariable!(fg,:x2, ContinuousScalar, N=N)
addFactor!(fg,[:x1;:x2],LinearRelative(Normal(50.0,2.0)))
# drive to third pose location
v3=addVariable!(fg,:x3,ContinuousScalar, N=N)
addFactor!(fg,[:x2;:x3], LinearRelative( Normal(50.0,4.0)))
# see a door for the second time
addFactor!(fg,[:x3], doorPrior)
# second solution should be much quicker
solveTree!(fg)
# drive to forth and final pose location
addVariable!(fg,:x4,ContinuousScalar, N=N)
addFactor!(fg,[:x3;:x4], LinearRelative( Normal(200.0,4.0)))
## make a third door sighting
addFactor!(fg,[:x4], doorPrior)
# solve over all data
tree = solveTree!(fg)
##
end
# # HMM computed ground truth, extended for 7 poses with landmark
# global gt = Dict{Symbol, Array{Float64,2}}()
# gt[:x0]=reshape(Float64[0.0;1.97304 ],2,1) # -0.0342366
# gt[:x2]=reshape(Float64[50.0; 2.83153 ],2,1) # 49.8797
# gt[:x3]=reshape(Float64[100.0; 1.65557 ],2,1) # 99.8351
# gt[:x4]=reshape(Float64[150.0; 1.64945 ],2,1) # 148.637
# gt[:x5]=reshape(Float64[200.0; 1.77992 ],2,1) # 198.62
# gt[:x6]=reshape(Float64[240.0; 2.20466 ],2,1) # 238.492
# gt[:x7]=reshape(Float64[300.0; 2.14353 ],2,1) # 298.467
# gt[:l1]=reshape(Float64[165.0; 1.17284 ],2,1) # 164.102
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 3357 |
# using Revise
using IncrementalInference
using Test
##
@testset "Test priors" begin
##
N=100
graphinits = [false, true]
# TEMP value when not using for loop
graphinit = true
for graphinit = graphinits
fg = initfg()
fg.solverParams.N = N
fg.solverParams.graphinit = graphinit
fg.solverParams.treeinit = !graphinit
addVariable!(fg, :x0, ContinuousScalar, N=N)
addFactor!(fg, [:x0], Prior(Normal(-1.0, 1.0)))
addVariable!(fg, :x1, ContinuousScalar, N=N)
addVariable!(fg, :x2, ContinuousScalar, N=N)
addFactor!(fg, [:x2], Prior(Normal(+1.0, 1.0)))
addFactor!(fg, [:x0; :x1], LinearRelative(Normal(0.0, 0.01)))
addFactor!(fg, [:x1; :x2], LinearRelative(Normal(0.0, 0.01)))
#solve
tree = solveTree!(fg)
x0_m = getKDEMean(getBelief(getVariable(fg, :x0)))[1]
x1_m = getKDEMean(getBelief(getVariable(fg, :x1)))[1]
x2_m = getKDEMean(getBelief(getVariable(fg, :x2)))[1]
@info ("Testing means = 0 with 2 priors:\ngraphinit=$graphinit\nMeans: x0: $(x0_m), x1: $x1_m, x2: $x2_m")
@test_skip isapprox(x0_m, 0.0, atol = 0.1)
@test_skip isapprox(x1_m, 0.0, atol = 0.1)
@test_skip isapprox(x2_m, 0.0, atol = 0.1)
@warn "priorusetest.jl is testing with large tolerances"
@test isapprox(x0_m, 0.0, atol = 1.0)
@test isapprox(x1_m, 0.0, atol = 1.0)
@test isapprox(x2_m, 0.0, atol = 1.0)
#testing if values are close to one another
testvals = [x0_m, x1_m, x2_m]
meanval = mean(testvals)
@test all(isapprox.(testvals, meanval, atol=0.4))
end
##
for graphinit = graphinits
fg = initfg()
fg.solverParams.N = N
fg.solverParams.graphinit = graphinit
fg.solverParams.treeinit = !graphinit
addVariable!(fg, :x0, ContinuousScalar, N=N)
addFactor!(fg, [:x0], Prior(Normal(-1.0, 1.0)))
addVariable!(fg, :l0, ContinuousScalar, N=N)
addFactor!(fg, [:l0], Prior(Normal(+1.0, 1.0)))
addVariable!(fg, :l1, ContinuousScalar, N=N)
addFactor!(fg, [:x0; :l0], LinearRelative(Normal(0, 0.01)))
addFactor!(fg, [:x0; :l1], LinearRelative(Normal(0, 0.01)))
addVariable!(fg, :x1, ContinuousScalar, N=N)
addFactor!(fg, [:x0; :x1], LinearRelative(Normal(0, 0.01)))
addVariable!(fg, :x2, ContinuousScalar, N=N)
addFactor!(fg, [:x1; :x2], LinearRelative(Normal(0, 0.01)))
addFactor!(fg, [:x2; :l0], LinearRelative(Normal(0, 0.01)))
addFactor!(fg, [:x2; :l1], LinearRelative(Normal(0, 0.01)))
#solve
tree = solveTree!(fg)
x0_m = getKDEMean(getBelief(getVariable(fg, :x0)))[1]
x1_m = getKDEMean(getBelief(getVariable(fg, :x1)))[1]
x2_m = getKDEMean(getBelief(getVariable(fg, :x2)))[1]
l0_m = getKDEMean(getBelief(getVariable(fg, :l0)))[1]
l1_m = getKDEMean(getBelief(getVariable(fg, :l1)))[1]
@info ("Testing means = 0 with 2 priors:\ngraphinit=$graphinit\nMeans: x0: $(x0_m), x1: $x1_m, x2: $x2_m, l0: $l0_m, l1: $l1_m")
@test_skip isapprox(x0_m, 0.0, atol = 0.1)
@test_skip isapprox(x1_m, 0.0, atol = 0.1)
@test_skip isapprox(x2_m, 0.0, atol = 0.1)
@test_skip isapprox(l0_m, 0.0, atol = 0.1)
@test_skip isapprox(l1_m, 0.0, atol = 0.1)
@warn "priorusetest.jl is testing with large tolerances"
@test isapprox(x0_m, 0.0, atol = 1.0)
@test isapprox(x1_m, 0.0, atol = 1.0)
@test isapprox(x2_m, 0.0, atol = 1.0)
@test isapprox(l0_m, 0.0, atol = 1.2)
@test isapprox(l1_m, 0.0, atol = 1.2)
#testing if values are close to one another
@show testvals = [x0_m, x1_m, x2_m, l0_m, l1_m]
@show meanval = mean(testvals)
@test all(isapprox.(testvals, meanval, atol=0.3))
end
##
end
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 3241 | using Test
TEST_GROUP = get(ENV, "IIF_TEST_GROUP", "all")
# temporarily moved to start (for debugging)
#...
if TEST_GROUP in ["all", "tmp_debug_group"]
include("testSpecialOrthogonalMani.jl")
include("testMultiHypo3Door.jl")
include("priorusetest.jl")
end
if TEST_GROUP in ["all", "basic_functional_group"]
# more frequent stochasic failures from numerics
include("testSpecialEuclidean2Mani.jl")
include("testEuclidDistance.jl")
# gradient / jacobian tests
include("manifolds/manifolddiff.jl")
include("manifolds/factordiff.jl")
@error "Gradient tests must be updated and restored for new ccw.varValsAll[]"
#include("testGradientUtils.jl")
#include("testFactorGradients.jl")
# start as basic as possible and build from there
include("typeReturnMemRef.jl")
include("testDistributionsGeneric.jl")
include("testCliqSolveDbgUtils.jl")
include("basicGraphsOperations.jl")
# regular testing
include("testSphereMani.jl")
include("testBasicManifolds.jl")
include("testDERelative.jl")
include("testHeatmapGridDensity.jl")
# include("TestModuleFunctions.jl")
include("testCompareVariablesFactors.jl")
include("saveconvertertypes.jl")
include("testgraphpackingconverters.jl")
include("testSaveLoadDFG.jl")
include("testPackingMixtures.jl")
include("testJunctionTreeConstruction.jl")
include("testBayesTreeiSAM2Example.jl")
include("testTreeFunctions.jl")
#FIXME fails on MetaBayesTree
include("testTreeSaveLoad.jl")
include("testSpecialSampler.jl") # TODO, rename, refine
include("testCommonConvWrapper.jl")
include("testApproxConv.jl")
include("testBasicForwardConvolve.jl")
include("testUseMsgLikelihoods.jl")
include("testDefaultDeconv.jl")
include("testPartialFactors.jl")
include("testPartialPrior.jl")
include("testpartialconstraint.jl")
include("testPartialNH.jl")
include("testMixturePrior.jl")
include("testStateMachine.jl")
include("testBasicCSM.jl")
include("testCliqueFactors.jl")
include("testCcolamdOrdering.jl")
include("testBasicGraphs.jl")
include("testJointEnforcement.jl")
include("testHasPriors913.jl")
include("testInitVariableOrder.jl")
include("testTreeMessageUtils.jl")
include("testCSMMonitor.jl")
include("testExpXstroke.jl")
include("testBasicRecycling.jl")
include("testSkipUpDown.jl")
include("testlocalconstraintexamples.jl")
include("testManualInit.jl")
include("testBasicTreeInit.jl")
include("testSolveOrphanedFG.jl")
include("testSolveSetPPE.jl")
include("testSolveKey.jl")
end
if TEST_GROUP in ["all", "test_cases_group"]
include("testnullhypothesis.jl")
include("testVariousNSolveSize.jl")
include("testExplicitMultihypo.jl")
include("TestCSMMultihypo.jl")
include("testCalcFactorHypos.jl")
include("testMultimodal1D.jl")
include("testMultihypoAndChain.jl")
include("testMultithreaded.jl")
include("testmultihypothesisapi.jl")
include("fourdoortest.jl")
include("testCircular.jl")
include("testMixtureLinearConditional.jl")
include("testFluxModelsDistribution.jl")
include("testAnalysisTools.jl")
include("testBasicParametric.jl")
# include("testMixtureParametric.jl") #FIXME parametric mixtures #1787
# dont run test on ARM, as per issue #527
if Base.Sys.ARCH in [:x86_64;]
include("testTexTreeIllustration.jl")
end
# include("testMultiprocess.jl")
include("testDeadReckoningTether.jl")
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 658 | using Test
module Dependency
import Base: convert
export abst, pabst, convert, convertsave
abstract type abst end
abstract type pabst end
convert(::Type{P}, ::T) where {P <: pabst, T <: abst} =
getfield(T.name.module, Symbol("Packed$(T.name.name)"))
convertsave(t) = convert(pabst, t)
end
module Extend
using Main.Dependency
import Main.Dependency: convert
export T1, PackedT1, convertsave
mutable struct T1 <: abst end
mutable struct PackedT1 <: pabst end
end
using Main.Extend
@testset "Ensure converter types can be run from extending namespaces..." begin
@test convertsave(T1()) == Extend.PackedT1
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 1288 | # Test for tree-based analysis tools found in `AnalysisTools.jl`.
using Test
using IncrementalInference
@testset "Number of non-zero calculation for frontals." begin
# Alternative way of calculating number of upper triangular matrix elements.
nnzFrontalsRecursive(dim) = dim==1 ? 1 : dim + nnzFrontalsRecursive(dim-1)
# Both must agree for any integer number dimension.
for dim in 1:100
@test nnzFrontalsRecursive(dim) == nnzFrontals(dim)
end
end
@testset "Number of non-zero calculation for full cliques." begin
fg = generateGraph_Kaess()
vo = [:l1, :l2, :x1, :x2, :x3]
tree = buildTreeReset!(fg, vo)
# Must agree with hand-calculated values, iSAM2 paper.
@test nnzClique(getClique(tree, 1)) == 3
@test nnzClique(getClique(tree, 2)) == 5
@test nnzClique(getClique(tree, 3)) == 2
end
@testset "Number of non-zero calculation for full trees." begin
fg = generateGraph_Kaess()
vo = [:l1, :l2, :x1, :x2, :x3]
tree = buildTreeReset!(fg, vo)
# Must agree with hand-calculated values, iSAM2 paper.
@test nnzTree(tree) == 10
end
@testset "Test drawTree" begin
fg = generateGraph_Kaess(graphinit=false)
vo = [:l1, :l2, :x1, :x2, :x3]
tree = buildTreeReset!(fg, vo)
drawTree(tree, show=false)
end
## | IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 2131 | # lets test long approxConv chains
using Test
using IncrementalInference
using Statistics
using TensorCast
##
@testset "zdim size of factor is correct" begin
##
fg = initfg()
addVariable!(fg, :x0, ContinuousEuclid{3})
addVariable!(fg, :x1, ContinuousEuclid{3})
lr3 = LinearRelative(MvNormal([1;0;0.0],diagm(ones(3))))
addFactor!(fg, [:x0; :x1], lr3, graphinit=false)
ccw = IIF._getCCW(fg, :x0x1f1)
@test IIF._getZDim(ccw) == 3
res = calcFactorResidualTemporary(lr3,
(ContinuousEuclid{3},ContinuousEuclid{3}),
[0;0;0.5],
(zeros(3), [0;0;1.0]) )
#
@test (sum(abs.(res)) - 0.5) < 1e-10
##
end
@testset "approxConv basic and chains" begin
##
fg = generateGraph_Kaess()
# from a prior to neighbor
pts_ = approxConv(fg, :x1f1, :x1)
# lazy
@cast pts[i,j] := pts_[j][i]
@test Statistics.mean(pts) |> abs < 0.4
@test 0.5 < Statistics.std(pts) < 1.5
# set a value in graph to start things off
initVariable!(fg, :x1, pts_)
# legacy case where relative to neighbor
pts_ = approxConv(fg, :x1x2f1, :x2)
@cast pts[i,j] := pts_[j][i]
@test Statistics.mean(pts) |> abs < 0.7
@test 0.7 < Statistics.std(pts) < 2
# along a chain of variables
pts_ = approxConv(fg, :x1, :x3)
@cast pts[i,j] := pts_[j][i]
@test Statistics.mean(pts) |> abs < 1.5
@test 1.3 < Statistics.std(pts) < 3.0
# from a prior down the chain of variables
pts_ = approxConv(fg, :x1f1, :l2)
@cast pts[i,j] := pts_[j][i]
@test Statistics.mean(pts) |> abs < 1.5
@test 1.6 < Statistics.std(pts) < 4.0
##
end
@testset "test approxConvBelief with partial prior" begin
##
fg = initfg()
addVariable!(fg, :x0, ContinuousEuclid{2})
pp = PartialPrior(ContinuousEuclid{2}, Normal(),(2,))
addFactor!(fg, [:x0], pp, graphinit=false)
approxConvBelief(fg, :x0f1, :x0)
@info "second test with more complicated manifolds in testSpecialEuclidean2Mani.jl"
##
end
@testset "Test all approxConv versions have same parameter behavior" begin
##
@warn("TODO .inflateCycles ignored in chain version of approxConv(fg, :x0, :x1)!")
@test_broken false
##
end
# | IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 1493 | # IIF #485 --
# using Revise
using Test
using Logging
using Statistics
using DistributedFactorGraphs
using IncrementalInference
using TensorCast
@testset "test basic three variable graph with prior" begin
VAR1 = :a
VAR2 = :b
VAR3 = :c
# logger = SimpleLogger(stdout, Logging.Debug)
# global_logger(logger)
dfg = initfg() #LocalDFG{SolverParams}(solverParams=SolverParams())
# Add some nodes.
v1 = addVariable!(dfg, VAR1, ContinuousScalar, tags = [:POSE])
v2 = addVariable!(dfg, VAR2, ContinuousScalar, tags = [:POSE])
v3 = addVariable!(dfg, VAR3, ContinuousScalar, tags = [:LANDMARK])
f1 = addFactor!(dfg, [VAR1; VAR2], LinearRelative(Normal(50.0,2.0)) )
f2 = addFactor!(dfg, [VAR2; VAR3], LinearRelative(Normal(50.0,2.0)) )
addFactor!(dfg, [VAR1], Prior(Normal()))
# drawGraph(dfg, show=true)
# tree = buildTreeReset!(dfg)
# # drawTree(tree, show=true)
#
# getCliqFactors(tree, VAR3)
# getCliqFactors(tree, VAR1)
initAll!(dfg)
# cliq= getClique(tree, VAR3)
# getCliqueData(cliq)
#
# cliq= getClique(tree, VAR1)
# getCliqueData(cliq)
getSolverParams(dfg).limititers = 50
# getSolverParams(dfg).drawtree = true
# getSolverParams(dfg).showtree = true
# getSolverParams(dfg).dbg = true
## getSolverParams(dfg).async = true
tree = solveTree!(dfg) #, recordcliqs=ls(dfg))
pts_ = getBelief(dfg, :c) |> getPoints
TensorCast.@cast pts[i,j] := pts_[j][i]
@test 70 < Statistics.mean(pts) < 130
# #
# using Gadfly, Cairo, Fontconfig
# drawTree(tree, show=true, imgs=true)
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 1439 | # test basic forward convolve, see IIF issue #477
# using Revise
using Test
using IncrementalInference
using Statistics
using TensorCast
##
@testset "Test basic convolution result (#477)..." begin
##
function forwardConvolve(X0::AbstractVector{P}, model) where P
fg = initfg()
addVariable!(fg, :x0, ContinuousScalar)
initVariable!(fg, :x0, X0)
addVariable!(fg, :x1, ContinuousScalar)
addFactor!(fg, [:x0;:x1], model)
## TODO -- dont use name here, add API to just use z2 here
return approxConv(fg, :x0x1f1, :x1)
end
## Start
# first numerical values -- samples from the marginal of X0
z1 = Normal(0,0.1)
X0 = [rand(z1, 1) for _ in 1:100]
## predict -- project / conv
# 0 -> 1 seconds
# make approx function
z2 = Normal(11,1.0) # odo
statemodel = LinearRelative( z2 )
X1_ = forwardConvolve(X0, statemodel)
## measure -- product of beliefs, using `ApproxManifoldProducts.jl`
predX1 = manikde!(ContinuousScalar, X1_)
z3 = Normal(9.5,0.75)
measX1 = manikde!(ContinuousScalar, [rand(z3,1) for _ in 1:100])
# do actual product
posterioriX1 = predX1 * measX1
X1 = getPoints(posterioriX1)
## predict, 1->2 seconds
z4 = Normal(8,2.0) # odo
statemodel = LinearRelative( z4 )
X2__ = forwardConvolve(X1, statemodel)
@cast X2_[i,j] := X2__[j][i]
@test size(X2_) == (1,100)
@test 15 < Statistics.mean(X2_) < 25
##
end
@error "Add a test to ensure that approxConv does NOT change target variable VND.val values!" | IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 10186 | # basic graph tests for trivial cases
using Test
using Statistics
using IncrementalInference
##
@testset "test basic utility functions" begin
@test incrSuffix(:x45_4) == :x45_5
@test incrSuffix(:x45, +3) == :x48
@test incrSuffix(:x45_4, -1) == :x45_3
end
@testset "Test basic single variable graph with one prior..." begin
fg = initfg()
addVariable!(fg, :x0, ContinuousScalar)
addFactor!(fg, [:x0;], Prior(Normal(0.0,1.0)))
# test solved flag
@test getSolvedCount(fg, :x0) == 0
@test !isSolved(getVariable(fg, :x0))
makeSolverData!(fg)
# run solver once
tree = solveTree!(fg)
@test getSolvedCount(fg, :x0) == 1
@test isSolved(fg, :x0)
tree = solveTree!(fg)
@test getSolvedCount(fg, :x0) == 2
@test isSolved(fg, :x0)
# check mean and covariance
@test (getBelief(fg, :x0) |> getKDEMean .|> abs)[1] < 0.5
pts_ = getPoints(getBelief(fg, :x0))
TensorCast.@cast pts[i,j] := pts_[j][i]
@test 0.3 < Statistics.cov( pts[1,:] ) < 1.9
# test free solvable variables (occurs in fixed-/ clique recycling)
addVariable!(fg, :x1, ContinuousScalar, solvable=1)
solveTree!(fg, storeOld=true)
@test getSolvable(fg, :x1) == 0
end
@testset "Test basic single variable graph with one prior offset by 1000..." begin
fg = initfg()
addVariable!(fg, :x0, ContinuousScalar)
addFactor!(fg, [:x0;], Prior(Normal(1000.0,1.0)))
tree = solveTree!(fg)
# check mean and covariance
@test abs((getBelief(fg, :x0) |> getKDEMean)[1]-1000) < 0.5
pts_ = getPoints(getBelief(fg, :x0))
TensorCast.@cast pts[i,j] := pts_[j][i]
@test 0.4 < Statistics.cov( pts[1,:] ) < 1.8
end
@testset "Test basic single variable graph with two identical priors..." begin
fg = initfg()
addVariable!(fg, :x0, ContinuousScalar)
addFactor!(fg, [:x0;], Prior(Normal(0.0,1.0)))
addFactor!(fg, [:x0;], Prior(Normal(0.0,1.0)))
tree = solveTree!(fg)
# check mean and covariance
@test (getBelief(fg, :x0) |> getKDEMean .|> abs)[1] < 0.4
# should be sqrt(0.5) = 0.7, but lands near 0.6 instead -- computation is too confident.
pts_ = getPoints(getBelief(fg, :x0))
TensorCast.@cast pts[i,j] := pts_[j][i]
@test 0.3 < Statistics.cov( pts[1,:] ) < 1.0
end
@testset "Test basic single variable graph with three identical priors..." begin
fg = initfg()
addVariable!(fg, :x0, ContinuousScalar)
addFactor!(fg, [:x0;], Prior(Normal(0.0,1.0)))
addFactor!(fg, [:x0;], Prior(Normal(0.0,1.0)))
addFactor!(fg, [:x0;], Prior(Normal(0.0,1.0)))
tree = solveTree!(fg)
# check mean and covariance
@test (getBelief(fg, :x0) |> getKDEMean .|> abs)[1] < 0.4
# should be sqrt(1/3) = 0.577, but lands near 0.35 instead -- computation is too confident.
pts_ = getPoints(getBelief(fg, :x0))
TensorCast.@cast pts[i,j] := pts_[j][i]
@test 0.1 < Statistics.cov( pts[1,:] ) < 0.75
end
@testset "Test basic single variable graph with two priors at + and - 1..." begin
fg = initfg()
addVariable!(fg, :x0, ContinuousScalar)
addFactor!(fg, [:x0;], Prior(Normal(-1.0,1.0)))
addFactor!(fg, [:x0;], Prior(Normal(+1.0,1.0)))
tree = solveTree!(fg)
# check mean and covariance -- should be zero
@test (getBelief(fg, :x0) |> getKDEMean .|> abs)[1] < 0.8
# should be sqrt(1/2) = 0.707 -- computation results nearer 0.7.
pts_ = getPoints(getBelief(fg, :x0))
TensorCast.@cast pts[i,j] := pts_[j][i]
@test 0.2 < Statistics.cov( pts[1,:] ) < 1.5
end
@testset "Test basic single variable graph with two priors at + and - 1, offset by -1000..." begin
fg = initfg()
addVariable!(fg, :x0, ContinuousScalar)
addFactor!(fg, [:x0;], Prior(Normal(-1.0-1000,1.0)))
addFactor!(fg, [:x0;], Prior(Normal(+1.0-1000,1.0)))
tree = solveTree!(fg)
# check mean and covariance -- should be zero
@test abs((getBelief(fg, :x0) |> getKDEMean)[1] + 1000) < 0.6
# should be sqrt(1/2) = 0.707 -- computation results nearer 0.7.
pts_ = getPoints(getBelief(fg, :x0))
TensorCast.@cast pts[i,j] := pts_[j][i]
@test 0.2 < Statistics.cov( pts[1,:] ) < 1.1
end
@testset "Test basic two variable graph with two identical priors and weak connection..." begin
fg = initfg()
addVariable!(fg, :x0, ContinuousScalar)
addVariable!(fg, :x1, ContinuousScalar)
addFactor!(fg, [:x0;], Prior(Normal(0.0,1.0)))
addFactor!(fg, [:x1;], Prior(Normal(0.0,1.0)))
addFactor!(fg, [:x0;:x1;], LinearRelative(Normal(0.0,10.0)))
tree = solveTree!(fg)
# check mean and covariance -- should be zero
@test (getBelief(fg, :x0) |> getKDEMean .|> abs)[1] < 0.6
@test (getBelief(fg, :x1) |> getKDEMean .|> abs)[1] < 0.6
pts_ = getPoints(getBelief(fg, :x0))
TensorCast.@cast pts[i,j] := pts_[j][i]
@test 0.4 < Statistics.cov( pts[1,:] ) < 2.3
pts_ = getPoints(getBelief(fg, :x1))
TensorCast.@cast pts[i,j] := pts_[j][i]
@test 0.4 < Statistics.cov( pts[1,:] ) < 2.4
end
@testset "Test basic two variable graph with two separated priors and weak connection..." begin
fg = initfg()
addVariable!(fg, :x0, ContinuousScalar)
addVariable!(fg, :x1, ContinuousScalar)
addFactor!(fg, [:x0;], Prior(Normal(-1.0,1.0)))
addFactor!(fg, [:x1;], Prior(Normal(+1.0,1.0)))
addFactor!(fg, [:x0;:x1;], LinearRelative(Normal(0.0,10.0)))
tree = solveTree!(fg)
# check mean and covariance -- should be near each prior
@test abs((getBelief(fg, :x0) |> getKDEMean)[1]+1) < 0.75
@test abs((getBelief(fg, :x1) |> getKDEMean)[1]-1) < 0.75
pts_ = getPoints(getBelief(fg, :x0))
TensorCast.@cast pts[i,j] := pts_[j][i]
@test 0.3 < Statistics.cov( pts[1,:] ) < 2.5
pts_ = getPoints(getBelief(fg, :x1))
TensorCast.@cast pts[i,j] := pts_[j][i]
@test 0.3 < Statistics.cov( pts[1,:] ) < 2.5
end
@testset "Test basic two variable graph with two separated priors and strong connection..." begin
fg = initfg()
addVariable!(fg, :x0, ContinuousScalar)
addVariable!(fg, :x1, ContinuousScalar)
addVariable!(fg, :x2, ContinuousScalar)
addFactor!(fg, [:x0;], Prior(Normal(-1.0,1.0)))
addFactor!(fg, [:x2;], Prior(Normal(+1.0,1.0)))
addFactor!(fg, [:x0;:x1;], LinearRelative(Normal(0.0,1.0)))
addFactor!(fg, [:x1;:x2;], LinearRelative(Normal(0.0,1.0)))
tree = solveTree!(fg)
# check mean and covariance -- should between two priors somewhere
@test abs((getBelief(fg, :x0) |> getKDEMean)[1] + 1) < 0.9
@test abs((getBelief(fg, :x1) |> getKDEMean)[1]) < 0.9
@test abs((getBelief(fg, :x2) |> getKDEMean)[1] - 1) < 0.9
pts_ = getPoints(getBelief(fg, :x0))
TensorCast.@cast pts[i,j] := pts_[j][i]
@test 0.3 < Statistics.cov( pts[1,:] ) < 1.8
pts_ = getPoints(getBelief(fg, :x1))
TensorCast.@cast pts[i,j] := pts_[j][i]
@test 0.3 < Statistics.cov( pts[1,:] ) < 2.0
pts_ = getPoints(getBelief(fg, :x2))
TensorCast.@cast pts[i,j] := pts_[j][i]
@test 0.3 < Statistics.cov( pts[1,:] ) < 2.2
end
@testset "Test basic five variable graph with two separated priors and nominal connection..." begin
fg = initfg()
addVariable!(fg, :x0, ContinuousScalar)
addVariable!(fg, :x1, ContinuousScalar)
addVariable!(fg, :x2, ContinuousScalar)
addVariable!(fg, :x3, ContinuousScalar)
addVariable!(fg, :x4, ContinuousScalar)
addFactor!(fg, [:x0;], Prior(Normal(-3.0,1.0)))
addFactor!(fg, [:x4;], Prior(Normal(+3.0,1.0)))
addFactor!(fg, [:x0;:x1;], LinearRelative(Normal(0.0,1.0)))
addFactor!(fg, [:x1;:x2;], LinearRelative(Normal(0.0,1.0)))
addFactor!(fg, [:x2;:x3;], LinearRelative(Normal(0.0,1.0)))
addFactor!(fg, [:x3;:x4;], LinearRelative(Normal(0.0,1.0)))
# #1196
drawGraph(fg, filepath="testgraphplot/myfg.dot", show=false)
tree = solveTree!(fg, storeOld=true)
# using KernelDensityEstimatePlotting
# plotKDE((x->getBelief(fg,x)).([:x0;:x1;:x2;:x3;:x4]))
# using Gadfly, Cairo, Fontconfig
# drawTree(tree,show=true,imgs=true)
# check mean and covariance -- should be zero
X0 = (getBelief(fg, :x0) |> getKDEMean)[1]
X1 = (getBelief(fg, :x1) |> getKDEMean)[1]
X2 = (getBelief(fg, :x2) |> getKDEMean)[1]
X3 = (getBelief(fg, :x3) |> getKDEMean)[1]
X4 = (getBelief(fg, :x4) |> getKDEMean)[1]
@test X0 < X1 < X2 < X3 < X4
@test abs(X0+X4) < 2.2
@test abs(X1+X3) < 2.2
@test abs(X2) < 2.2
pts_ = getPoints(getBelief(fg, :x0))
TensorCast.@cast pts[i,j] := pts_[j][i]
@test 0.2 < Statistics.cov( pts[1,:] ) < 2.8
pts_ = getPoints(getBelief(fg, :x1))
TensorCast.@cast pts[i,j] := pts_[j][i]
@test 0.2 < Statistics.cov( pts[1,:] ) < 2.9
pts_ = getPoints(getBelief(fg, :x2))
TensorCast.@cast pts[i,j] := pts_[j][i]
@test 0.2 < Statistics.cov( pts[1,:] ) < 3.0
pts_ = getPoints(getBelief(fg, :x3))
TensorCast.@cast pts[i,j] := pts_[j][i]
@test 0.2 < Statistics.cov( pts[1,:] ) < 3.1
pts_ = getPoints(getBelief(fg, :x4))
TensorCast.@cast pts[i,j] := pts_[j][i]
@test 0.2 < Statistics.cov( pts[1,:] ) < 3.2
@testset "Test localProduct on solveKey" begin
localProduct(fg,:x2)
localProduct(fg,:x2, solveKey=:graphinit)
end
end
##
@testset "consistency check on more factors (origin is a DERelative fail case)" begin
##
fg = initfg()
addVariable!(fg, :x0, Position{1})
addFactor!(fg, [:x0], Prior(Normal(1.0, 0.01)))
# force a basic setup
initAll!(fg)
@test isapprox( 1, getPPE(fg, :x0).suggested[1]; atol=0.1)
##
addVariable!(fg, :x1, Position{1})
addFactor!(fg, [:x0;:x1], LinearRelative(Normal(1.0, 0.01)))
addVariable!(fg, :x2, Position{1})
addFactor!(fg, [:x1;:x2], LinearRelative(Normal(1.0, 0.01)))
addVariable!(fg, :x3, Position{1})
addFactor!(fg, [:x2;:x3], LinearRelative(Normal(1.0, 0.01)))
##
tree = solveGraph!(fg)
##
@test isapprox( 1, getPPE(fg, :x0).suggested[1]; atol=0.1)
@test isapprox( 4, getPPE(fg, :x3).suggested[1]; atol=0.3)
## check contents of tree messages
tree[1]
msg1 = IIF.getMessageBuffer(tree[1])
##
end
@testset "Test graph reset to init..." begin
fg = initfg()
addVariable!(fg, :x0, ContinuousScalar)
addFactor!(fg, [:x0;], Prior(Normal(1000.0,1.0)))
initAll!(fg)
# init values before solve
pts_ = getPoints(getBelief(fg, :x0))
TensorCast.@cast pts[i,j] := pts_[j][i]
X0 = pts |> deepcopy
tree = solveTree!(fg)
# values after solve
pts_ = getPoints(getBelief(fg, :x0))
TensorCast.@cast pts[i,j] := pts_[j][i]
X0s = pts
@test 1e-10 < norm(X0 - X0s)
resetInitialValues!(fg)
pts_ = getPoints(getBelief(fg, :x0))
TensorCast.@cast pts[i,j] := pts_[j][i]
X0reset = pts |> deepcopy
@test norm(X0 - X0reset) < 1e-10
end
@testset "Test MetaPrior" begin
fg = generateGraph_Kaess()
addFactor!(fg, [:x1], MetaPrior(nothing))
solveGraph!(fg)
IIF.solveGraphParametric!(fg)
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 441 |
# test Manifolds
using Manifolds
using Test
##
@testset "Basic Manifolds consistency check" begin
##
w = [-0.0;-0.78;-0.18]
M = SpecialEuclidean(3)
Mr = M.manifold[2]
pPq = ArrayPartition(zeros(3), exp(Mr, Identity(Mr), hat(Mr, Identity(Mr), w)))
rPc_ = exp(M, Identity(M), hat(M, Identity(M), [zeros(3);w]))
rPc = ArrayPartition(rPc_.x[1], rPc_.x[2])
@test isapprox(pPq.x[1], rPc.x[1])
@test isapprox(pPq.x[2], rPc.x[2])
##
end
## | IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 8146 | using Test
using DistributedFactorGraphs
using IncrementalInference
##
@testset "Test consolidation of factors #467" begin
fg = generateGraph_LineStep(20, poseEvery=1, landmarkEvery=4, posePriorsAt=collect(0:7), sightDistance=2, solverParams=SolverParams(algorithms=[:default, :parametric]))
M, labels, minimizer, Σ = IIF.solveGraphParametric(fg)
d = Dict(labels.=>minimizer)
for i in 0:20
sym = Symbol("x",i)
@test isapprox(d[sym][1], i, atol=1e-6)
end
for i in 0:4:20
sym = Symbol("lm",i)
@test isapprox(d[sym][1], i, atol=1e-6)
end
end
##
@testset "Parametric Tests" begin
fg = LocalDFG(solverParams=SolverParams(algorithms=[:default, :parametric]))
addVariable!(fg, :x0, ContinuousScalar)
initVariable!(fg, :x0, Normal(0.1,1.1), :parametric)
addVariable!(fg, :x1, ContinuousScalar)
addFactor!(fg, [:x0,:x1], LinearRelative(Normal(1.0, 1.2)))
vardict, result, flatvars, Σ = IIF.solveConditionalsParametric(fg, [:x1])
v1 = vardict[:x1]
@test isapprox(v1.val, [1.1], atol=1e-3)
# TODO what should the covariance be, should covariance on :x0 not influence it?
@test isapprox(v1.cov, [1.44;;], atol=1e-3)
initVariable!(fg, :x1, Normal(v1.val[1], sqrt(v1.cov[1])), :parametric)
addVariable!(fg, :x2, ContinuousScalar)
addFactor!(fg, [:x0,:x2], LinearRelative(Normal(2.0, 0.5)))
addFactor!(fg, [:x1,:x2], LinearRelative(Normal(1.1, 0.5)))
vardict, result, flatvars, Σ = IIF.solveConditionalsParametric(fg, [:x2])
v2 = vardict[:x2]
@test isapprox(v2.val, [2.15], atol=1e-3)
# TODO what should the covariance be?
@test isapprox(v2.cov, [0.125;;], atol=1e-3)
initVariable!(fg, :x2, Normal(v2.val[1], sqrt(v2.cov[1])), :parametric)
addFactor!(fg, [:x0], Prior(Normal(0.1,1.1)))
IIF.solveGraphParametric!(fg; is_sparse=false)
end
@testset "Parametric Tests" begin
##
fg = generateGraph_LineStep(7, poseEvery=1, landmarkEvery=0, posePriorsAt=collect(0:7), sightDistance=2, solverParams=SolverParams(algorithms=[:default, :parametric]))
M, labels, minimizer, Σ = IIF.solveGraphParametric(fg)
d = Dict(labels.=>minimizer)
for i in 0:7
sym = Symbol("x",i)
@test isapprox(d[sym][1], i, atol=1e-6)
end
##
fg = generateGraph_LineStep(2, graphinit=true, vardims=1, poseEvery=1, landmarkEvery=0, posePriorsAt=Int[0], sightDistance=3, solverParams=SolverParams(algorithms=[:default, :parametric]))
@test IIF.autoinitParametric!(fg, :x0)
v0 = getVariable(fg,:x0)
@test length(v0.solverDataDict[:parametric].val[1]) === 1
@test isapprox(v0.solverDataDict[:parametric].val[1][1], 0.0, atol = 1e-4)
@test IIF.autoinitParametric!(fg, :x1)
v0 = getVariable(fg,:x1)
@test length(v0.solverDataDict[:parametric].val[1]) === 1
@test isapprox(v0.solverDataDict[:parametric].val[1][1], 1.0, atol = 1e-4)
IIF.initParametricFrom!(fg)
#
v0 = getVariable(fg,:x0)
@test length(v0.solverDataDict[:parametric].val[1]) === 1
@test isapprox(v0.solverDataDict[:parametric].val[1][1], 0.0, atol = 0.1)
v1 = getVariable(fg,:x1)
@test isapprox(v1.solverDataDict[:parametric].val[1][1], 1.0, atol = 0.1)
##
fg = generateGraph_LineStep(10, vardims=2, poseEvery=1, landmarkEvery=3, posePriorsAt=Int[0,5,10], sightDistance=3, solverParams=SolverParams(algorithms=[:default, :parametric]))
# addFactor!(fg, [:x5; :x15], LinearRelative(Normal(10, 0.1)))
# addFactor!(fg, [:x15; :x25], LinearRelative(Normal(10, 0.1)))
#to manually check all factors
# foreach(fct->println(fct.label, ": ", getFactorType(fct).Z), getFactors(fg))
# @profiler d,st = IIF.solveGraphParametric(fg)
M, labels, minimizer, Σ = IIF.solveGraphParametric(fg)
d = Dict(labels.=>minimizer)
for i in 0:10
sym = Symbol("x",i)
@test isapprox(d[sym][1], i, atol=1e-6)
@test isapprox(d[sym][2], i, atol=1e-6)
end
# print results out
if false
foreach(println, d)
end
##
foreach(x->getSolverData(getVariable(fg,x.first),:parametric).val[1] = x.second, pairs(d))
# getSolverParams(fg).dbg=true
# getSolverParams(fg).drawtree=true
# getSolverParams(fg).async = true
getSolverParams(fg).graphinit = false
tree2 = IIF.solveTree!(fg; algorithm = :parametric) #, recordcliqs=ls(fg))
for i in 0:10
sym = Symbol("x",i)
var = getVariable(fg,sym)
@show val = var.solverDataDict[:parametric].val
@test isapprox(val[1][1], i, atol=1e-3)
@test isapprox(val[1][2], i, atol=1e-3)
end
##
# Print answers
if false
vsds = DFG.getSolverData.(getVariables(fg), :parametric)
foreach(v->println(v.label, ": ", DFG.getSolverData(v, :parametric).val), sort!(getVariables(fg), by=getLabel, lt=natural_lt))
end
## #################################################################
fg = LocalDFG( solverParams=SolverParams(algorithms=[:default, :parametric]))
# fg = LocalDFG{SolverParams}( solverParams=SolverParams())
N = 100
fg.solverParams.N = N
graphinit = false
addVariable!(fg, :x0, ContinuousScalar, N=N) # autoinit = graphinit
addFactor!(fg, [:x0], Prior(Normal(-1.0, 1.0)))
addVariable!(fg, :x1, ContinuousScalar, N=N) # autoinit = graphinit
addVariable!(fg, :x2, ContinuousScalar, N=N) # autoinit = graphinit
addFactor!(fg, [:x2], Prior(Normal(+1.0, 1.0)))
addFactor!(fg, [:x0; :x1], LinearRelative(Normal(0.0, 1e-1)), graphinit=graphinit)
addFactor!(fg, [:x1; :x2], LinearRelative(Normal(0.0, 1e-1)), graphinit=graphinit)
foreach(fct->println(fct.label, ": ", getFactorType(fct).Z), getFactors(fg))
M, labels, minimizer, Σ = IIF.solveGraphParametric(fg)
d = Dict(labels.=>minimizer)
foreach(println, d)
@test isapprox(d[:x0][1][1], -0.01, atol=1e-3)
@test isapprox(d[:x1][1][1], 0.0, atol=1e-3)
@test isapprox(d[:x2][1][1], 0.01, atol=1e-3)
##
foreach(x->getSolverData(getVariable(fg,x.first),:parametric).val[1] = x.second, pairs(d))
# fg.solverParams.showtree = true
# fg.solverParams.drawtree = true
# fg.solverParams.dbg = true
# fg.solverParams.graphinit = false
# task = @async begin
# global tree2
# global smt
# global hist
#force message passing with manual variable order
tree2 = solveTree!(fg; algorithm=:parametric, eliminationOrder=[:x0, :x2, :x1])
# end
foreach(v->println(v.label, ": ", DFG.getSolverData(v, :parametric).val), getVariables(fg))
@test isapprox(getVariable(fg,:x0).solverDataDict[:parametric].val[1][1], -0.01, atol=1e-3)
@test isapprox(getVariable(fg,:x1).solverDataDict[:parametric].val[1][1], 0.0, atol=1e-3)
@test isapprox(getVariable(fg,:x2).solverDataDict[:parametric].val[1][1], 0.01, atol=1e-3)
## ##############################################################################
## multiple sections
fg = generateGraph_LineStep(10, poseEvery=1, landmarkEvery=10, posePriorsAt=Int[0,10], sightDistance=5, solverParams=SolverParams(algorithms=[:default, :parametric]))
# break fg in 2
deleteFactor!(fg, :x5x6f1)
# plotDFG(fg)
#check all factors
# foreach(fct->println(fct.label, ": ", getFactorType(fct).Z), getFactors(fg))
# @profiler d,st = IIF.solveGraphParametric(fg)
M, labels, minimizer, Σ = IIF.solveGraphParametric(fg)
d = Dict(labels.=>minimizer)
if false
foreach(println, d)
end
for i in 0:10
sym = Symbol("x",i)
@test isapprox(d[sym][1], i, atol=1e-6)
end
foreach(x->getSolverData(getVariable(fg,x.first),:parametric).val[1] = x.second, pairs(d))
# fg.solverParams.showtree = true
# fg.solverParams.drawtree = true
# fg.solverParams.dbg = false
getSolverParams(fg).graphinit = false
tree2 = IIF.solveTree!(fg; algorithm=:parametric)
# print results
if false
vsds = DFG.getSolverData.(getVariables(fg), :parametric)
foreach(v->println(v.label, ": ", DFG.getSolverData(v, :parametric).val), getVariables(fg))
end
for i in 0:10
sym = Symbol("x",i)
var = getVariable(fg,sym)
val = var.solverDataDict[:parametric].val
#TODO investigate why tolarance degraded (its tree related and not bad enough to worry now)
@test isapprox(val[1][1], i, atol=5e-4)
end
##
end
@testset "initAll!(fg, :parametric)" begin
##
fg = generateGraph_LineStep(7, poseEvery=1, landmarkEvery=0, posePriorsAt=collect(0:7), sightDistance=2, solverParams=SolverParams(graphinit=false), graphinit=false)
@test (l->!isInitialized(fg, l, :parametric)).(ls(fg)) |> all
initAll!(fg, :parametric)
@test (l->isInitialized(fg, l, :parametric)).(ls(fg)) |> all
##
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 9695 | using Test
using IncrementalInference
##
@testset "Testing basic marginalization" begin
## linear 6
N=6
fg = generateGraph_LineStep(N;
graphinit=false,
poseEvery=1,
landmarkEvery=N+1,
posePriorsAt=[0],
landmarkPriorsAt=[],
sightDistance=N+1)
deleteFactor!.(fg, [Symbol("x$(i)lm0f1") for i=1:(N-1)])
# tree = buildTreeReset!(fg, show=true, drawpdf=true)
tree = solveTree!(fg)
for var in sortDFG(ls(fg))
sppe = getVariable(fg,var) |> getPPE |> IIF.getPPESuggested
println("Testing ", var,": ", sppe)
@test isapprox(sppe[1], parse(Int,string(var)[end]), atol=0.35)
end
defaultFixedLagOnTree!(fg, 6)
# Back up data from these two poses so we can compare them once we solve again.
lm0 = deepcopy(getVal(fg, :lm0))
X0 = deepcopy(getVal(fg, :x0))
X1 = deepcopy(getVal(fg, :x1))
fifoFreeze!(fg)
##
tree = solveTree!(fg; recordcliqs=ls(fg));
for var in sortDFG(ls(fg))
sppe = getVariable(fg,var) |> getPPE |> IIF.getPPESuggested
println("Testing ", var,": ", sppe)
@test isapprox(sppe[1], parse(Int,string(var)[end]), atol=0.35)
end
# although variables are marginalized, no cliques are
@test calcCliquesRecycled(tree) == (6,0,0,0)
lm0cmp = deepcopy(getVal(fg, :lm0))
X0cmp = deepcopy(getVal(fg, :x0))
X1cmp = deepcopy(getVal(fg, :x1))
@test X0 == X0cmp #Frozen
@test lm0 == lm0cmp #Frozen
@test X1 != X1cmp #Recalculated
deleteVariable!(fg, :x0)
addVariable!.(fg, [Symbol("x$i") for i = 7:9], ContinuousScalar)
addFactor!(fg, [:x6,:x7], LinearRelative(Normal(1.0, 0.1)))
addFactor!(fg, [:x7,:x8], LinearRelative(Normal(1.0, 0.1)))
addFactor!(fg, [:x8,:x9], LinearRelative(Normal(1.0, 0.1)))
addFactor!(fg, [:lm0, :x9], LinearRelative(Normal(9,0.1)))
smtasks = Task[];
eliminationOrder = [:x1, :x3, :x9, :x7, :x5, :lm0, :x8, :x4, :x2, :x6]
tree = solveTree!(fg; recordcliqs=ls(fg), smtasks, eliminationOrder);
hists = fetchCliqHistoryAll!(smtasks)
#clique 7 should be marginalized and therefor not do up or downsolve
@test calcCliquesRecycled(tree) == (7,1,0,0)
@test !(IIF.solveDown_StateMachine in getindex.(hists[7], 3))
@test !(IIF.solveUp_StateMachine in getindex.(hists[7], 3))
for var in sortDFG(ls(fg))
sppe = getVariable(fg,var) |> getPPE |> IIF.getPPESuggested
println("Testing ", var,": ", sppe)
@test isapprox(sppe[1], parse(Int,string(var)[end]), atol=0.35)
end
@test lm0 == getVal(fg, :lm0) #Still Frozen
@test X1cmp == getVal(fg, :x1) #also now Frozen
unfreezeVariablesAll!(fg)
# freeze again
defaultFixedLagOnTree!(fg, 9)
tree = solveTree!(fg; eliminationOrder)
@test lm0 == getVal(fg, :lm0) #Still Frozen
@test X1cmp != getVal(fg, :x1) #not frozen
# freeze 6,8 to all marginalize clique 2
setfreeze!(fg, [:x6, :x8])
smtasks = Task[];
tree = solveTree!(fg; recordcliqs=ls(fg), smtasks, eliminationOrder);
hists = fetchCliqHistoryAll!(smtasks)
#clique 2 should be marginalized and therefor not do up or downsolve
@test calcCliquesRecycled(tree) == (7,1,0,0)
@test !(IIF.solveDown_StateMachine in getindex.(hists[2], 3))
@test !(IIF.solveUp_StateMachine in getindex.(hists[2], 3))
@test areCliqVariablesAllMarginalized(fg, getClique(tree,2))
tree = solveTree!(fg, tree; recordcliqs=ls(fg), eliminationOrder);
for var in sortDFG(ls(fg))
sppe = getVariable(fg,var) |> getPPE |> IIF.getPPESuggested
println("Testing ", var,": ", sppe)
@test isapprox(sppe[1], parse(Int,string(var)[end]), atol=0.355)
end
@test calcCliquesRecycled(tree) == (7,1,6,0)
X1 = deepcopy(getVal(fg, :x1))
# to freeze clique 2,3,4
setfreeze!(fg, [:x4, :x5, :x7])
tree = solveTree!(fg, tree; recordcliqs=ls(fg), eliminationOrder);
# csmAnimate(tree, hists, frames=1)
@test calcCliquesRecycled(tree) == (7,3,4,0)
@test lm0 == getVal(fg, :lm0) #Still Frozen
@test X1 != getVal(fg, :x1) #not frozen
for i = [2,3,4]
@test areCliqVariablesAllMarginalized(fg, getClique(tree, i))
end
end
@testset "Testing basic incremental recycle" begin
fg = generateGraph_LineStep(3;
poseEvery=1,
landmarkEvery=3,
posePriorsAt=[],
landmarkPriorsAt=[0],
sightDistance=2,
solverParams=SolverParams(algorithms=[:default, :parametric]))
getSolverParams(fg).graphinit = false
getSolverParams(fg).treeinit = true
# getSolverParams(fg).dbg = true
# tree = buildTreeReset!(fg, drawpdf=true, show=true)
eliminationOrder = [:lm3, :x0, :x3, :x1, :x2, :lm0]
tree = solveTree!(fg; recordcliqs=ls(fg), eliminationOrder); # , smtasks=smtasks
addFactor!(fg, [:lm3], Prior(Normal(3, 0.1)), graphinit=false)
smtasks = Task[]
tree = solveTree!(fg, tree; smtasks, recordcliqs=ls(fg), eliminationOrder);
hists = fetchCliqHistoryAll!(smtasks)
@test !(IIF.solveUp_StateMachine in getindex.(hists[3], 3))
for var in sortDFG(ls(fg))
sppe = getVariable(fg,var) |> getPPE |> IIF.getPPESuggested
println("Testing ", var,": ", sppe)
@test isapprox(sppe[1], parse(Int,string(var)[end]), atol=0.35)
end
end
@testset "Testing incremental hex" begin
N=6
sfg = generateGraph_LineStep(N;
graphinit=false,
poseEvery=1,
landmarkEvery=N+1,
posePriorsAt=[0],
landmarkPriorsAt=[],
sightDistance=N+1,
solverParams=SolverParams(algorithms=[:default, :parametric]))
deleteFactor!.(sfg, [Symbol("x$(i)lm0f1") for i=1:(N-1)])
vsyms = sortDFG(ls(sfg))
fsyms = sortDFG(lsf(sfg))
fg = deepcopyGraph(LocalDFG, sfg, vsyms[1:3], fsyms[1:3])
getSolverParams(fg).graphinit = false
getSolverParams(fg).treeinit = true
getSolverParams(fg).dbg = true
getSolverParams(fg).useMsgLikelihoods = true
tree = buildTreeReset!(fg)#, drawpdf=true, show=true)
smtasks = Task[]
tree = solveTree!(fg, tree; smtasks=smtasks, recordcliqs=ls(fg));
for var in sortDFG(ls(fg))
sppe = getVariable(fg,var) |> getPPE |> IIF.getPPESuggested
# println("Testing ", var,": ", sppe)
@test isapprox(sppe[1], parse(Int,string(var)[end]), atol=0.35)
end
# add some more
deepcopyGraph!(fg, sfg, vsyms[4:6], fsyms[4:6])
tree = solveTree!(fg, tree; smtasks=smtasks, recordcliqs=ls(fg));
for var in sortDFG(ls(fg))
sppe = getVariable(fg,var) |> getPPE |> IIF.getPPESuggested
# println("Testing ", var,": ", sppe)
@test isapprox(sppe[1], parse(Int,string(var)[end]), atol=0.35)
end
# add some more
deepcopyGraph!(fg, sfg, vsyms[7:8], fsyms[7:8])
tree = solveTree!(fg, tree; smtasks=smtasks, recordcliqs=ls(fg));
for var in sortDFG(ls(fg))
sppe = getVariable(fg,var) |> getPPE |> IIF.getPPESuggested
# println("Testing ", var,": ", sppe)
@test isapprox(sppe[1], parse(Int,string(var)[end]), atol=0.35)
end
# add some more,close the loop
deepcopyGraph!(fg, sfg, Symbol[], [fsyms[9]])
tree = solveTree!(fg, tree; smtasks=smtasks, recordcliqs=ls(fg));
for var in sortDFG(ls(fg))
sppe = getVariable(fg,var) |> getPPE |> IIF.getPPESuggested
println("Testing ", var,": ", sppe)
@test isapprox(sppe[1], parse(Int,string(var)[end]), atol=0.35)
end
# force a reshuffle
addFactor!(fg, [:x4], Prior(Normal(4.1,0.1)))
tree = solveTree!(fg, tree; smtasks=smtasks, recordcliqs=ls(fg));
for var in sortDFG(ls(fg))
sppe = getVariable(fg,var) |> getPPE |> IIF.getPPESuggested
# println("Testing ", var,": ", sppe)
@test isapprox(sppe[1], parse(Int,string(var)[end]), atol=0.35)
end
# and another reuse
addFactor!(fg, [:x4], Prior(Normal(3.9,0.1)))
tree = solveTree!(fg, tree; smtasks=smtasks, recordcliqs=ls(fg));
for var in sortDFG(ls(fg))
sppe = getVariable(fg,var) |> getPPE |> IIF.getPPESuggested
# println("Testing ", var,": ", sppe)
@test isapprox(sppe[1], parse(Int,string(var)[end]), atol=0.355)
end
# printCSMHistoryLogical(hists)
# all except clique 1 should go for UPRECYCLED directly
# | 1 x4 | 2 x2 | 3 x5 | 4 x3 | 5 lm0 | 6 x1
# ----+--------------------+--------------------+--------------------+--------------------+--------------------+--------------------
# 1 | 1 setClique NULL | 3 setClique NULL | 4 setClique NULL | 5 setClique NULL | 6 setClique NULL | 7 setClique NULL
# 2 | 2 buildCliq NULL | 8 buildCliq UPRE | 9 buildCliq UPRE | 10 buildCliq UPRE | 11 buildCliq UPRE | 12 buildCliq UPRE
# 3 | 13 waitForUp NULL | 14 waitForUp UPRE | 15 waitForUp UPRE | 17 waitForUp UPRE | 19 waitForUp UPRE | 21 waitForUp UPRE
# 4 | 34 preUpSolv NULL | 31 preUpSolv UPRE | 16 preUpSolv UPRE | 18 preUpSolv UPRE | 20 preUpSolv UPRE | 22 preUpSolv UPRE
# 5 | 35 solveUp_S NULL | 32 postUpSol UPRE | 23 postUpSol UPRE | 24 postUpSol UPRE | 25 postUpSol UPRE | 26 postUpSol UPRE
# 6 | 36 postUpSol UPSO | 33 waitForDo UPRE | 27 waitForDo UPRE | 28 waitForDo UPRE | 29 waitForDo UPRE | 30 waitForDo UPRE
# 7 | 37 waitForDo UPSO | 39 solveDown UPRE | 40 solveDown UPRE | 43 solveDown UPRE | 41 solveDown UPRE | 44 solveDown UPRE
# 8 | 38 solveDown DOWN | 47 updateFro DOWN | 46 updateFro DOWN | 52 updateFro DOWN | 50 updateFro DOWN | 54 updateFro DOWN
# 9 | 42 updateFro DOWN | 49 exitState DOWN | 48 exitState DOWN | 53 exitState DOWN | 51 exitState DOWN | 55 exitState DOWN
# 10 | 45 exitState DOWN |
end | IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 2807 |
using Test
using IncrementalInference
# using Gadfly
##
@testset "basic per clique stopping criteria" begin
##
fg = generateGraph_LineStep(1)
smtasks = Task[]
tree = solveTree!(fg, smtasks=smtasks, recordcliqs=[:x0;], limititercliqs=[(:x0=>2);])
hist = fetchCliqHistoryAll!(smtasks)
@test haskey(hist, 1)
@test hist[1] |> length == 2
#normal solve should have 11 states, update when more are added.
fg = generateGraph_LineStep(1)
smtasks = Task[]
tree = solveTree!(fg, smtasks=smtasks, recordcliqs=[:x0;]);
hist = fetchCliqHistoryAll!(smtasks)
@test haskey(hist, 1)
@test hist[1] |> length == 12
end
@testset "test endless cycle case, issue #754" begin
fg = generateGraph_LineStep(5;
poseEvery=1,
landmarkEvery=5,
posePriorsAt=[0,2],
sightDistance=4,
solverParams=SolverParams(algorithms=[:default, :parametric]))
getSolverParams(fg).graphinit = false
getSolverParams(fg).treeinit = true
getSolverParams(fg).limititers = 50
smtasks = Task[]
tree = solveTree!(fg; smtasks=smtasks, verbose=true, timeout=50, recordcliqs=ls(fg));
end
@testset "basic test for tree initialization functionality" begin
# small canonical factor graph, without graphinit
fg = generateGraph_CaesarRing1D(graphinit=false)
getSolverParams(fg).graphinit = false
getSolverParams(fg).treeinit = true
@show getLogPath(fg)
# # debug only
# getSolverParams(fg).drawtree = true
# getSolverParams(fg).showtree = true
# getSolverParams(fg).dbg = true
# mkpath(getLogPath(fg))
# verbosefid = open(joinLogPath(fg, "csmVerbose.log"),"w")
tree = solveTree!(fg, timeout=70) # , verbose=true, verbosefid=verbosefid)
# flush(verbosefid)
# close(verbosefid)
# open(joinLogPath(fg, "csmLogicalReconstructMax.log"),"w") do io
# IIF.reconstructCSMHistoryLogical(getLogPath(fg), fid=io)
# end
end
@testset "basic tree initialization limittreeinit_iters" begin
# part of fg that can init
fg = generateGraph_LineStep(3; poseEvery=1)
good_vars = sortDFG(ls(fg))
# fg.solverParams.showtree = true
# fg.solverParams.drawtree = true
# part of fg that cannot init
addVariable!(fg, :s0, ContinuousScalar)
addVariable!(fg, :s1, ContinuousScalar)
addVariable!(fg, :s2, ContinuousScalar)
addFactor!(fg, [:s0;:s1], LinearRelative(Normal()))
addFactor!(fg, [:s1;:s2], LinearRelative(Normal()))
smtasks = Task[]
tree = solveTree!(fg; smtasks=smtasks, verbose=true)
for var in good_vars
sppe = getVariable(fg,var) |> getPPE |> IIF.getPPESuggested
println("Testing ", var,": ", sppe)
@test isapprox(sppe[1], parse(Int,string(var)[end]), atol=0.15)
end
@error "Restore test on GadflyExt.spyCliqMat"
# pl = spyCliqMat(getClique(tree,1));
##
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 1826 | using IncrementalInference
using Test
@testset "basic Bayes tree construction" begin
global N=100
global fg = initfg()
# doors = [-100.0;0.0;100.0;300.0]'
# cov = [3.0]
addVariable!(fg,:x1, ContinuousScalar, N=N)
addFactor!(fg, [:x1;], Prior(Normal()))
addVariable!(fg,:x2, ContinuousScalar, N=N)
addFactor!(fg,[:x1, :x2], LinearRelative(Normal()))
addVariable!(fg, :x3, ContinuousScalar, N=N)
addFactor!(fg,[:x2,:x3],LinearRelative(Normal()))
addVariable!(fg, :l1, ContinuousScalar, N=N)
addFactor!(fg, [:x1,:l1], LinearRelative(Normal()) )
addFactor!(fg, [:x2,:l1], LinearRelative(Normal()) )
addVariable!(fg, :l2, ContinuousScalar, N=N)
addFactor!(fg, [:x3,:l2], LinearRelative(Normal()))
end
@testset "test building tree native" begin
global fg
resetFactorGraphNewTree!(fg)
# p = getEliminationOrder(fg, ordering=:qr)
p = [:l1;:l2;:x1;:x2;:x3]
println()
global fge = deepcopy(fg)
println("Building Bayes net...")
buildBayesNet!(fge, p)
global tree = BayesTree()
buildTree!(tree, fge, p)
@test getNumCliqs(tree) == 3
# Michael reference -- x2->x1, x2->x3, x2->x4, x2->l1, x4->x3, l1->x3, l1->x4
# Michael reference 3sig -- x2l1x4x3 x1|x2
# if false
# println("Bayes Tree")
# # Graphs.plot(tree.bt)
# fid = open("bt.dot","w+")
# write(fid,Graphs.to_dot(tree.bt))
# close(fid)
# run(`dot bt.dot -Tpdf -o bt.pdf`)
# end
println("Find potential functions for each clique")
cliq = tree.cliques[1] # start at the root
buildCliquePotentials(fg, tree, cliq); # fg does not have the marginals as fge does
end
# TODO -- add testing to ensure this is the correct tree!
@warn "add test tree verification"
# run(`evince bt.pdf`)
@testset "build tree from ordering" begin
global fg
resetFactorGraphNewTree!(fg)
vo = getEliminationOrder(fg)
tree = buildTreeFromOrdering!(fg, vo)
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 2924 | using IncrementalInference
using Test
## a new factor that is broken
struct BrokenFactor{T<: SamplableBelief} <: AbstractManifoldMinimize
Z::T
end
IIF.getManifold(::BrokenFactor) = TranslationGroup(1)
function IIF.getSample(cf::CalcFactor{<:BrokenFactor})
return rand(cf.factor.Z, 1)
end
function (s::CalcFactor{<:BrokenFactor})(z,
wxi,
wxj)
#
error("User factor has a bug.")
end
# # FIXME consolidate with CalcFactor according to #467
# function (s::BrokenFactor{<:IIF.ParametricTypes})(X1::AbstractArray{<:Real},
# X2::AbstractArray{<:Real};
# userdata::Union{Nothing,FactorMetadata}=nothing )
# error("User factor has a bug -- USE NEW CalcFactor API INSTEAD, v0.21.")
# end
##
@testset "Test CSM monitor/watchdog on errors" begin
##
# create a factor graph
fg = generateGraph_LineStep(10;
poseEvery=1,
landmarkEvery=5,
posePriorsAt=[0],
sightDistance=4,
solverParams=SolverParams(algorithms=[:default, :parametric]))
#
initAll!(fg)
##
#TODO test FSM watchdog
# add a broken factor - mid
addFactor!(fg, [:x9, :lm10], BrokenFactor(Normal()); graphinit=false)
smtasks = Task[]
@test_throws CompositeException tree = IIF.solveTree!(fg; smtasks=smtasks);
sleep(0.1)
## Test parametric solve also
addFactor!(fg, [:x9, :lm10], BrokenFactor(Normal()); graphinit=false)
##
# IIF.solveTree!(fg; smtasks=smtasks, algorithm = :parametric)
##
@test_throws CompositeException tree2 = IIF.solveTree!(fg; smtasks=smtasks, algorithm = :parametric)
sleep(0.1)
deleteFactor!(fg, :x9lm10f2)
## add a broken factor - leave
addFactor!(fg, [:x10, :lm10], BrokenFactor(Normal()); graphinit=false)
@test_throws CompositeException tree2 = IIF.solveTree!(fg; smtasks=smtasks, algorithm = :parametric)
sleep(0.1)
deleteFactor!(fg, :x10lm10f2)
## add a broken factor - root
addFactor!(fg, [:x7, :lm10], BrokenFactor(Normal()); graphinit=false)
@test_throws CompositeException tree2 = IIF.solveTree!(fg; smtasks=smtasks, algorithm = :parametric)
##
end
@testset "test CSM debug options work" begin
## create a factor graph
fg = generateGraph_LineStep(10;
poseEvery=1,
landmarkEvery=5,
posePriorsAt=[0],
sightDistance=4 )
#
smtasks = Task[];
solveTree!(fg, smtasks=smtasks, recordcliqs=ls(fg));
## make sure we fetch the results
hists = fetchCliqHistoryAll!(smtasks);
printCSMHistoryLogical(hists)
printCSMHistorySequential(hists)
## test CSM resolve steps
@info "test repeatCSMStep"
csmc_ = repeatCSMStep!(hists, 1, 1);
##
end | IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 1773 | # test if fmd in getSample and factor eval is working right
using IncrementalInference
using Test
import IncrementalInference: getSample, getManifold
##
struct MyFactor{T <: SamplableBelief} <: IIF.AbstractManifoldMinimize
Z::T
# specialSampler approach will be deprecated
# specialSampler::Function
end
getManifold(mf::MyFactor) = TranslationGroup(getDimension(mf.Z))
function getSample( cf::CalcFactor{<:MyFactor})
#
@warn "getSample(cf::CalcFactor{<:MyFactor},::Int) does not get hypo sub-selected FMD data: $(DFG.getLabel.(cf.fullvariables))" cf.solvefor maxlog=1
# @assert length( DFG.getLabel.(fmd_[1].fullvariables) ) < 3 "this factor is only between two variables"
return rand(cf.factor.Z, 1)
end
function (cf::CalcFactor{<:MyFactor})(z, X1, X2)
@assert length(cf.fullvariables) < 3 "this factor is only between two variables. solvefor=$(cf.solvefor)"
# just a linear difference to complete the test
return X2 - (X1 + z)
end
##
@testset "test FactorMetadata is properly populated" begin
##
fg = initfg()
addVariable!(fg, :x0, ContinuousScalar)
addVariable!(fg, :x1_a, ContinuousScalar)
addVariable!(fg, :x1_b, ContinuousScalar)
f0 = addFactor!(fg, [:x0], Prior(Normal()))
# create the object and add it to the graph
mf = MyFactor( Normal(10,1) )
## test #424
@test_throws AssertionError addFactor!(fg, [:x0;:x1_a;:x1_b], mf, multihypo=[1/2;1/2])
##
# this sampling might error
f1 = addFactor!(fg, [:x0;:x1_a;:x1_b], mf, multihypo=[1;1/2;1/2])
##
@test !isMultihypo(f0)
@test isMultihypo(f1)
##
meas = sampleFactor(fg, :x0x1_ax1_bf1, 10)
# initAll!(fg)
# pts = approxConv(fg, :x0x1_ax1_bf1, :x1_a)
# pts = approxConv(fg, :x0x1_ax1_bf1, :x1_b)
# pts = approxConv(fg, :x0x1_ax1_bf1, :x0)
##
solveTree!(fg);
##
end
# | IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 652 | using AMD
using IncrementalInference
using Test
##
@testset "Test ccolamd for constrained variable ordering" begin
##
fg = generateGraph_Kaess(graphinit=false)
vo = getEliminationOrder(fg, constraints=[:x3], ordering=:ccolamd)
@test vo[end] == :x3
@test length(vo) == length(ls(fg))
vo = getEliminationOrder(fg, constraints=[:l2], ordering=:ccolamd)
@test vo[end] == :l2
vo = getEliminationOrder(fg, constraints=[:x3;:l2], ordering=:ccolamd)
@test intersect(vo[end-1:end], [:x3;:l2]) |> length == 2
# catch
# @error "IncrInfrApproxMinDegreeExt test issue, work needed for Julia 1.10 compat via AMD.jl"
# @test_broken false
# end
##
end | IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 934 |
using IncrementalInference
using Test
##
@testset "test Circular" begin
##
fg = initfg()
getSolverParams(fg).useMsgLikelihoods = true
addVariable!.(fg, [Symbol("x$i") for i=0:4], Circular)
addFactor!(fg, [:x0], PriorCircular(Normal(0.0,0.1)))
map(i->addFactor!(fg, [Symbol("x$i"),Symbol("x$(i+1)")], CircularCircular(Normal(1.0, 0.1))), 0:3)
solveTree!(fg);
##
sppes = map(var->getPPE(var).suggested[1], sortDFG(getVariables(fg),by=getLabel))
gt = rem2pi.(collect(0:4), RoundNearest)
@show sppes
@show gt
@test all(isapprox.(sppes, gt, atol=0.35))
# test packing converters also
d = "/tmp/caesar/random/testfg"
saveDFG(d,fg)
lfg = loadDFG(d)
##
Base.rm(d*".tar.gz")
# check loaded fg for all variable and factors
@test issetequal(ls(fg), ls(lfg))
@test issetequal(lsf(fg), lsf(lfg))
##
end
@testset "test canonical helix generator utility" begin
##
tmp = calcHelix_T(0, 3, 25, radius=5, xr_t=t->(1/3)*t)
##
end
# | IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 886 | using IncrementalInference
using Test
##
@testset "Test solveCliqueUp! and solveCliqDown!" begin
##
N=8
fg = generateGraph_LineStep(N;
graphinit=false,
poseEvery=1,
landmarkEvery=N+1,
posePriorsAt=[0],
landmarkPriorsAt=[],
sightDistance=N+1)
#
deleteFactor!.(fg, [Symbol("x$(i)lm0f1") for i=1:(N-1)])
# test the initAll! separately anyway
initAll!(fg)
tree = buildTreeReset!(fg)
# for debuggin use
# ENV["JULIA_DEBUG"] = :csm_2
# solve clique up tests
a,b = solveCliqUp!(fg, tree, 2)
a,b = solveCliqUp!(fg, tree, 2; recordcliq = true)
@test length(a) > 0
# solve clique down tests
a,b = solveCliqDown!(fg, tree, 2)
a,b = solveCliqDown!(fg, tree, 2; recordcliq = true)
@test length(a) > 0
##
end
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 5743 | # originates from sisue #458
# using Revise
using Test
using IncrementalInference
@testset "Test clique factors, #458 Example 1" begin
fg = initfg()
addVariable!(fg, :x0, ContinuousScalar)
addVariable!(fg, :x1, ContinuousScalar)
addVariable!(fg, :x2, ContinuousScalar)
addVariable!(fg, :x3, ContinuousScalar)
addVariable!(fg, :x4, ContinuousScalar)
addVariable!(fg, :l0, ContinuousScalar)
addVariable!(fg, :l1, ContinuousScalar)
lc = LinearRelative(Normal())
lp = Prior(Normal())
addFactor!(fg, [:x0;:x1], lc, graphinit=false)
addFactor!(fg, [:x1;:x2], lc, graphinit=false)
addFactor!(fg, [:x2;:x3], lc, graphinit=false)
addFactor!(fg, [:x3;:x4], lc, graphinit=false)
addFactor!(fg, [:x0;:l0], lc, graphinit=false)
addFactor!(fg, [:x2;:l0], lc, graphinit=false)
addFactor!(fg, [:x0;:l1], lc, graphinit=false)
addFactor!(fg, [:x2;:l1], lc, graphinit=false)
addFactor!(fg, [:x0;], lp, graphinit=false)
addFactor!(fg, [:l0;], lp, graphinit=false)
# build tree
vo = Symbol[:x2, :x0, :l0, :x3, :x1, :l1, :x4]
tree = buildTreeReset!(fg, vo)
# drawTree(tree, show=true)
# check that frontal variables only show up once
frontals = getCliqFrontalVarIds.((x->getClique(tree, x)).([:x0; :l0; :x4]))
@test intersect(frontals[1], frontals[2]) |> length == 0
@test intersect(frontals[2], frontals[3]) |> length == 0
@test intersect(frontals[1], frontals[3]) |> length == 0
# check that all variables exist as frontals
lsvars = ls(fg)
@test intersect(union(frontals...), lsvars) |> length == lsvars |> length
## Now check if factors in cliques are okay
C3 = getClique(tree, :x0)
C3_fg = buildCliqSubgraph(fg, C3)
# drawGraph(C3_fg, show=true)
C3_fcts = [:x0l0f1;:x0l1f1;:x0x1f1;:x0f1]
@test intersect(ls(C3_fg), [:x0; :x1; :l0; :l1]) |> length == 4
@test intersect(lsf(C3_fg), C3_fcts) |> length == length(C3_fcts)
@test intersect(getCliqFactorIdsAll(C3), C3_fcts) |> length == length(C3_fcts)
C2 = getClique(tree, :l0)
C2_fg = buildCliqSubgraph(fg, C2)
# drawGraph(C2_fg, show=true)
C2_fcts = [:x1x2f1; :x2x3f1; :x2l0f1; :x2l1f1; :l0f1]
@test intersect(ls(C2_fg), [:x3; :x2; :x1; :l0; :l1]) |> length == 5
@test intersect(lsf(C2_fg), C2_fcts) |> length == length(C2_fcts)
@test intersect(getCliqFactorIdsAll(C2), C2_fcts) |> length == length(C2_fcts)
C1 = getClique(tree, :x4)
C1_fg = buildCliqSubgraph(fg, C1)
# drawGraph(C1_fg, show=true)
C1_fcts = [:x3x4f1;]
@test intersect(ls(C1_fg), [:x3; :x4; :x1; :l1]) |> length == 4
@test intersect(lsf(C1_fg), C1_fcts) |> length == length(C1_fcts)
@test intersect(getCliqFactorIdsAll(C1), C1_fcts) |> length == length(C1_fcts)
# check that all factors are counted
allCliqFcts = union(C1_fcts, C2_fcts, C3_fcts)
@test length(intersect(lsf(fg), allCliqFcts)) == length(allCliqFcts)
end
@testset "Test clique factors, #458 Example 2" begin
@warn "Test for Example 2 from 458 must still be coded."
fg = initfg()
addVariable!(fg, :x0, ContinuousScalar)
addVariable!(fg, :x1, ContinuousScalar)
addVariable!(fg, :x2, ContinuousScalar)
addVariable!(fg, :x3, ContinuousScalar)
addVariable!(fg, :lm0, ContinuousScalar)
addVariable!(fg, :lm3, ContinuousScalar)
lc = LinearRelative(Normal())
lp = Prior(Normal())
addFactor!(fg, [:x0;:x1], lc, graphinit=false)
addFactor!(fg, [:x1;:x2], lc, graphinit=false)
addFactor!(fg, [:x2;:x3], lc, graphinit=false)
addFactor!(fg, [:x0;:lm0], lc, graphinit=false)
addFactor!(fg, [:x1;:lm0], lc, graphinit=false)
addFactor!(fg, [:x2;:lm3], lc, graphinit=false)
addFactor!(fg, [:x3;:lm3], lc, graphinit=false)
# particular order from 458
vo = Symbol[:x0, :x2, :x1, :lm3, :lm0, :x3]
tree = buildTreeReset!(fg, vo)
# drawTree(tree, show=true)
sfg_1 = buildCliqSubgraph(fg,tree,:x3)
sfg_2 = buildCliqSubgraph(fg,tree,:x2)
sfg_3 = buildCliqSubgraph(fg,tree,:x0)
# drawGraph(sfg_1, show=true)
# drawGraph(sfg_2, show=true)
# drawGraph(sfg_3, show=true)
## WIP
C1_fcts = [:x1lm0f1; :x3lm3f1]
C2_fcts = [:x1x2f1; :x2x3f1; :x2lm3f1]
C3_fcts = [:x0x1f1; :x0lm0f1]
# check all factors are accounted for
@test union(C1_fcts, C2_fcts, C3_fcts) |> length == lsf(fg) |> length
@test intersect(C1_fcts, C2_fcts) |> length == 0
@test intersect(C1_fcts, C3_fcts) |> length == 0
@test intersect(C2_fcts, C3_fcts) |> length == 0
# clique 1
@test intersect(getCliqFactorIdsAll(tree, :x3), C1_fcts) |> length == 2
@test intersect(lsf(sfg_1), C1_fcts) |> length == 2
# clique 2
@test intersect( getCliqFactorIdsAll(tree, :x2), C2_fcts) |> length == 3
@test intersect( lsf(sfg_2), C2_fcts) |> length == 3
# clique 3
@test intersect( getCliqFactorIdsAll(tree, :x0), C3_fcts) |> length == 2
@test intersect( lsf(sfg_3), C3_fcts) |> length == 2
end
@testset "Test cliqueSubgraph frontals, seperators, potentials" begin
# clique subfg test
fg = generateGraph_LineStep(4, landmarkPriorsAt=[0,4])
# plotDFG(fg)
tree = buildTreeReset!(fg)
#check clique 1
cliq = getClique(tree, 1)
cliqfron = [:x0, :lm0, :x2]
cliqsep = Symbol[]
cliqfacs = [:lm0f1, :x0x2f1, :x0lm0f1, :x0f1, :x2lm0f1]
@test issetequal(getCliqFrontalVarIds(cliq), cliqfron)
@test issetequal(getCliqSeparatorVarIds(cliq), cliqsep)
@test issetequal(getCliqFactorIdsAll(cliq), cliqfacs)
sfg = buildCliqSubgraph(fg, cliq)
@test issetequal(ls(sfg), union(cliqsep, cliqfron))
@test issetequal(lsf(sfg), cliqfacs)
# check clique 2
cliq = getClique(tree, 2)
cliqfron = [:x4, :lm4]
cliqsep = [:x2]
cliqfacs = [:x2lm4f1, :x2x4f1, :x4lm4f1, :lm4f1]
@test issetequal(getCliqFrontalVarIds(cliq), cliqfron)
@test issetequal(getCliqSeparatorVarIds(cliq), cliqsep)
@test issetequal(getCliqFactorIdsAll(cliq), cliqfacs)
sfg = buildCliqSubgraph(fg, cliq)
@test issetequal(ls(sfg), union(cliqsep, cliqfron))
@test issetequal(lsf(sfg), cliqfacs)
sfg = buildCliqSubgraph(fg, tree, :x2)
# plotDFG(sfg)
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 3780 | # test CommonConvWrapper
using Test
# using NLsolve
using IncrementalInference
using Manifolds
using Statistics
using TensorCast
import IncrementalInference: getSample, getManifold
mutable struct FunctorWorks
a::Array{Float64,2}
end
@testset "FunctorWorks" begin
function (fw::FunctorWorks)(x)
fw.a[1,1] = -1.0
nothing
end
A = rand(2,3)
At = deepcopy(A)
At[1,1] = -1.0
fvar = FunctorWorks(A)
fvar(0.0)
@test At == A
end
mutable struct FunctorArray{T}
# This was a tuple, but array will like work better in the long term
fnc!::Function
a::Array{T, 1}
end
##
@testset "FunctorArray" begin
##
function testarray!(a1::Array{Float64, 2}, a2::Array{Float64,2})
a1[1,1] = -1.0
@show a1
nothing
end
function (fi::FunctorArray)(x)
fi.fnc!(fi.a...)
end
A = rand(2,3)
B = rand(2,3)
t = Array{Array{Float64,2},1}()
push!(t,A)
push!(t,B)
At = deepcopy(A)
At[1,1] = -1.0
fvar = FunctorArray(testarray!, t)
fvar([0.0])
@test At == A
##
end
##
# abstract Nonparametric <: Function
# This is what the internmediate user would be contributing
mutable struct Pose1Pose1Test{T} <: AbstractManifoldMinimize
Dx::T
end
getManifold(::Pose1Pose1Test) = TranslationGroup(1)
function getSample(cf::CalcFactor{<:Pose1Pose1Test})
return rand(cf.factor.Dx, 1)
end
#proposed standardized parameter list, does not have to be functor
function (cf::CalcFactor{<:Pose1Pose1Test})(Dx,
p1,
p2 )
#
return Dx[1] - (p2[1] - p1[1])
end
##
@testset "Test with CommonConvWrapper for un-permuted root finding..." begin
##
N = 110
p1 = rand(1,N)
p2 = rand(1,N)
t = Array{Array{Float64,2},1}()
push!(t,p1)
push!(t,p2)
odo = Pose1Pose1Test(Normal(100.0,1.0))
fg = initfg()
X0 = addVariable!(fg, :x0, ContinuousEuclid{1})
initVariable!(fg, :x0, [zeros(1) for _ in 1:100])
X1 = addVariable!(fg, :x1, ContinuousEuclid{1})
addFactor!(fg, [:x0;:x1], odo, graphinit=false)
pts = approxConv(fg, getFactor(fg, :x0x1f1), :x1)
## Now check the contents of internal CCW
ccw = IIF._getCCW(fg, :x0x1f1)
ptr_ = ccw.varValsAll[][ccw.varidx[]]
@cast tp1[i,j] := ptr_[j][i]
@test 90.0 < Statistics.mean(tp1) < 110.0
ptr_ = ccw.varValsAll[][1]
@cast tp2[i,j] := ptr_[j][i]
@test -10.0 < Statistics.mean(tp2) < 10.0
##
println("and in the reverse direction")
initVariable!(fg, :x1, [100*ones(1) for _ in 1:100])
pts = approxConv(fg, getFactor(fg, :x0x1f1), :x0)
ptr_ = ccw.varValsAll[][1]
@cast tp1[i,j] := ptr_[j][i]
@test -10.0 < Statistics.mean(tp1) < 10.0
ptr_ = ccw.varValsAll[][2]
@cast tp2[i,j] := ptr_[j][i]
@test 90.0 < Statistics.mean(tp2) < 110.0
##
end
# use the range only example, should give a circle with nothing in the middle
@testset "Generic convolution testing in factor graph context..." begin
##
N=100
p1 = [randn(1) for _ in 1:N]
d1 = manikde!(TranslationGroup(1), p1)
p2 = [randn(1) for _ in 1:N]
t = Vector{Vector{Vector{Float64}}}()
push!(t,p1)
push!(t,p2)
fg = initfg()
v1=addVariable!(fg, :x1, ContinuousScalar, N=N)
v2=addVariable!(fg, :x2, ContinuousScalar, N=N)
bws = getBW(d1)[:,1]
f1 = addFactor!(fg, [v1], Prior(manikde!(TranslationGroup(1), p1, bw=bws)) )
odo = Pose1Pose1Test(Normal(100.0,1.0))
f2 = addFactor!(fg, [v1;v2], odo)
tree = buildTreeReset!(fg)
pts_ = getBelief(fg,:x1) |> getPoints
@cast pts[i,j] := pts_[j][i]
@test abs(Statistics.mean(pts)-0.0) < 10.0
pts_ = getBelief(fg,:x2) |> getPoints
@cast pts[i,j] := pts_[j][i]
@test abs(Statistics.mean(pts)-0.0) < 10.0
##
tree = solveTree!(fg)
##
pts_ = getBelief(fg,:x1) |> getPoints
@cast pts[i,j] := pts_[j][i]
@test abs(Statistics.mean(pts)-0.0) < 10.0
pts_ = getBelief(fg,:x2) |> getPoints
@cast pts[i,j] := pts_[j][i]
@test abs(Statistics.mean(pts)-100.0) < 10.0
##
end
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 3055 | # using Revise
using DistributedFactorGraphs
using IncrementalInference
using Test
##
# @testset "deprecation of old api" begin
# LinearRelative(Normal(2.0, 0.1))
# end
@testset "testing compare functions for variables and factors..." begin
##
fg = initfg()
v1 = addVariable!(fg, :x0, ContinuousScalar)
f1 = addFactor!(fg, [:x0;], Prior(Normal()))
@test compareVariable(v1,v1)
@test compareFactor(f1,f1)
v2 = addVariable!(fg, :x1, ContinuousScalar)
f2 = addFactor!(fg, [:x0;:x1], LinearRelative(Normal(2.0, 0.1)))
fg2 = deepcopy(fg)
@test !compareVariable(v1,v2)
# not testing different factors in this way
# @test !compareFactor(f1,f2)
@test compareAllVariables(fg, fg)
@test compareAllVariables(fg, fg2)
@test compareSimilarVariables(fg, fg)
@test compareSimilarVariables(fg, fg2)
@test compareSimilarFactors(fg, fg)
@test compareSimilarFactors(fg, fg2; skip=[:particleidx])
@test compareFactorGraphs(fg, fg)
@test compareFactorGraphs(fg, fg2; skip=[:particleidx; :varidx])
# easier error messages
getSolverParams(fg).multiproc = false
tree = solveTree!(fg)
x1a = getVariable(fg, :x0)
x1b = getVariable(fg2, :x0)
@test !compareVariable(x1a, x1b, skipsamples=false)
@test !compareSimilarVariables(fg, fg2, skipsamples=false)
@test !compareSimilarFactors(fg, fg2, skipsamples=false, skip=[:measurement;])
@test compareFactorGraphs(fg, fg)
@test !compareFactorGraphs(fg, fg2, skipsamples=false)
initAll!(fg2)
@test compareSimilarVariables(fg, fg2, skipsamples=true, skip=Symbol[:initialized;:infoPerCoord;:ppeDict;:solvedCount])
# fg2 has been solved, so it should fail on the estimate dictionary
@test !compareSimilarVariables(fg, fg2, skipsamples=true, skip=Symbol[:initialized;:infoPerCoord])
tree = buildTreeReset!(fg2)
# Expect ccw to reflect different numerics since fg and fg2 have different numeric solutions
Al = IIF._getCCW(fg, getLabel(f2))
Bl = IIF._getCCW(fg2, getLabel(f2))
field = :varValsAll
@test !compareField(Al, Bl, field)
@test compareSimilarFactors(fg, fg2, skipsamples=true, skipcompute=true, skip=[:fullvariables; :varValsAll; :particleidx])
@test !compareSimilarFactors(fg, fg2, skipsamples=true, skipcompute=false)
##
end
@testset "test subgraph functions..." begin
##
fg = initfg()
addVariable!(fg, :x0, ContinuousScalar)
addFactor!(fg, [:x0;], Prior(Normal()))
addVariable!(fg, :x1, ContinuousScalar)
addFactor!(fg, [:x0;:x1], LinearRelative(Normal(2.0, 0.1)))
addVariable!(fg, :x2, ContinuousScalar)
addFactor!(fg, [:x1;:x2], LinearRelative(Normal(4.0, 0.1)))
addVariable!(fg, :l1, ContinuousScalar)
addFactor!(fg, [:x1;:l1], LinearRelative(Rayleigh()))
sfg = buildSubgraph(fg, [:x0;:x1], 1) # distance=1 to include factors
#FIXME JT - this doesn't make sense to pass, it is a subgraph so should it not rather be ⊂ [subset]?
# compareDFG(fg1, fg2, by=⊂, skip=...)
@test fg.sessionLabel == sfg.sessionLabel[1:length(fg.sessionLabel)]
@test compareFactorGraphs(fg, sfg, skip=[:labelDict;:addHistory;:logpath;:sessionLabel; :particleidx; :varidx])
# drawGraph(sfg)
##
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 18596 | # Basic test of DERelative
using Test
using DifferentialEquations
using IncrementalInference
using Dates
using Statistics
using TensorCast
## plotting functions
# using Plots
# using Cairo, RoMEPlotting
# Gadfly.set_default_plot_size(25cm,20cm)
##
@testset "First order DERelative" begin
##
# a user specified ODE in standard form
# inplace `xdot = f(x, u, t)`
# if linear, `xdot = F*x(t) + G*u(t)`
function firstOrder!(dstate, state, u, t)
β = -0.2
dstate[1] = β*state[1] + u(t)
nothing
end
# testing function parameter version (could also be array of data)
tstForce(t) = 0
## build a representative factor graph with ODE built inside
fg = initfg()
# the starting points and "0 seconds"
# `accurate_time = trunc(getDatetime(var), Second) + (1e-9*getNstime(var) % 1)`
addVariable!(fg, :x0, Position{1}, timestamp=DateTime(2000,1,1,0,0,0))
# pin with a simple prior
addFactor!(fg, [:x0], Prior(Normal(1,0.01)))
doautoinit!(fg, :x0)
prev = :x0
for i in 1:3
nextSym = Symbol("x$i")
# another point in the trajectory 5 seconds later
addVariable!(fg, nextSym, Position{1}, timestamp=DateTime(2000,1,1,0,0,5*i))
# build factor against manifold Manifolds.TranslationGroup(1)
ode_fac = IIF.DERelative(fg, [prev; nextSym],
Position{1},
firstOrder!,
tstForce,
dt=0.05,
problemType=ODEProblem )
#
addFactor!( fg, [prev;nextSym], ode_fac, graphinit=false )
initVariable!(fg, nextSym, [0.1*randn(1) for _ in 1:100])
prev = nextSym
end
## raw test against DiffEq API directly
oder_ = DERelative( fg, [:x0; :x3],
Position{1},
firstOrder!,
tstForce,
dt=0.05,
problemType=ODEProblem )
oder_.forwardProblem.u0 .= [1.0]
sl = DifferentialEquations.solve(oder_.forwardProblem)
x0_val_ref = sl(getVariable(fg, :x0) |> getTimestamp |> DateTime |> datetime2unix)
x1_val_ref = sl(getVariable(fg, :x1) |> getTimestamp |> DateTime |> datetime2unix)
x2_val_ref = sl(getVariable(fg, :x2) |> getTimestamp |> DateTime |> datetime2unix)
x3_val_ref = sl(getVariable(fg, :x3) |> getTimestamp |> DateTime |> datetime2unix)
## one layer wrapped API test through IIFExt to DiffEq
f = getFactor(fg, intersect(ls(fg,:x0),ls(fg,:x1))[1] )
fc = getFactorType(f)
fprob = fc.forwardProblem
meas = zeros(getDimension(getVariable(fg, :x1)))
u0pts = getPoints(getBelief(fg, :x0))[1]
res = IncrementalInference._solveFactorODE!(meas, fprob, u0pts)
@test isapprox( 5, res.t[end]-res.t[1]; atol=1e-6)
@test isapprox( x0_val_ref, res.u[1]; atol=0.1)
@test isapprox( x1_val_ref, res.u[end]; atol=0.1)
## basic sample test
meas = sampleFactor(fg, :x0x1f1, 10)
@test size(meas[1][1],1) == 1
@test size(meas,1) == 10
## do all forward solutions
pts = sampleFactor(fg, :x0f1, 100)
initVariable!(fg, :x0, pts)
pts_ = approxConv(fg, :x0x1f1, :x1)
@cast pts[i,j] := pts_[j][i]
@test 0.3 < Statistics.mean(pts) < 0.4
## check that the reverse solve also works
initVariable!(fg, :x1, pts_)
pts_ = approxConv(fg, :x0x1f1, :x0)
@cast pts[i,j] := pts_[j][i]
# check the reverse solve to be relatively accurate
ref_ = (getBelief(fg, :x0) |> getPoints)
@cast ref[i,j] := ref_[j][i]
@test norm(pts - ref) < 1e-4
##
# use Makie instead
# Plots.plot(sl,linewidth=2,xaxis="unixtime [s]",layout=(1,1))
# for lb in [:x0; :x1;:x2;:x3]
# x = getTimestamp(getVariable(fg, lb)) |> DateTime |> datetime2unix
# xx = [x;x]
# yy = [0;1]
# Plots.plot!(xx, yy, show=true)
# end
## temp graph solve check
tfg = initfg()
tx3_ = approxConvBelief(fg, :x0f1, :x3; setPPE=true, tfg)
pts_ = getPoints(tx3_)
# initVariable!(tfg, :x3, pts)
@cast pts[i,j] := pts_[j][i]
@test isapprox( x0_val_ref, getPPE(tfg, :x0).suggested ; atol = 0.1)
@test isapprox( x1_val_ref, getPPE(tfg, :x1).suggested ; atol = 0.1)
@test isapprox( x2_val_ref, getPPE(tfg, :x2).suggested ; atol = 0.1)
@test isapprox( x3_val_ref, mean(tx3_); atol=0.1)
# using KernelDensityEstimatePlotting
# plotKDE(tfg, [:x0;:x1;:x2;:x3])
## check if variables are initialized (only works for graphinit)
@test isInitialized(fg, :x0)
@test isapprox( x0_val_ref, mean(getBelief(fg[:x0])); atol=0.1)
@test isInitialized(fg, :x1)
# @test isapprox( x1_val_ref, mean(getBelief(fg[:x1])); atol=0.1)
X2_ = approxConvBelief(fg, :x1x2f1, :x2)
@test isapprox( x2_val_ref, mean(X2_); atol=0.1)
# FIXME, X2 and X3 are wrongly initialized to zero above
# X2_ = approxConvBelief(fg, :x2x3f1, :x2)
# @test isapprox( x2_val_ref, mean(X2_); atol=0.1)
# @enter approxConvBelief(fg, :x2x3f1, :x2)
factors = getFactor.(fg, IIF.listNeighbors(fg, :x2))
dens = ManifoldKernelDensity[]
ipc = IIF.proposalbeliefs!(fg, :x2, factors, dens)
#
mkd = *(dens...)
@test isapprox( x2_val_ref, mean(mkd); atol=0.1)
X2_,_ = propagateBelief(fg, :x2, :)
@test isapprox( x2_val_ref, mean(X2_); atol=0.1)
# @enter propagateBelief(fg, :x2, :)
@test isInitialized(fg, :x2)
@test isInitialized(fg, :x3)
# FIXME, wrongly initialized X2 and X3 to near zero above
# @test isapprox( x2_val_ref, mean(getBelief(fg[:x2])); atol=0.1)
# @test isapprox( x3_val_ref, mean(getBelief(fg[:x3])); atol=0.1) # happens to be near zero
## Now test a full graph solve
smtasks = Task[]
tree = solveTree!(fg; smtasks, recordcliqs=ls(fg));
hists = fetchCliqHistoryAll!(smtasks)
printCSMHistoryLogical(hists)
##
# intended steps at writing are 5, 6 (upsolve)
_, csmc = repeatCSMStep!(hists[1], 5; duplicate=true)
@test isapprox( 1, getPPESuggested(csmc.cliqSubFg, :x0)[1]; atol=0.1 )
nval_x0 = mean(getBelief(csmc.cliqSubFg, :x0))
@test isapprox( x0_val_ref, nval_x0; atol=0.1 )
nval_x1 = mean(getBelief(csmc.cliqSubFg, :x1))
@test isapprox( x1_val_ref, nval_x1; atol=0.1 )
sfg = deepcopy( hists[1][6][4].cliqSubFg )
dens, ipc = propagateBelief( sfg, :x0, :;)
@test isapprox( x0_val_ref, mean(dens); atol=0.1)
@test isapprox( x0_val_ref, mean(getBelief(sfg[:x0])); atol=0.1)
# @test isapprox( x2_val_ref, mean(getBelief(sfg[:x2])); atol=0.1) # TODO DELETE THIS LINE
dens, ipc = propagateBelief( sfg, :x1, :;)
@test isapprox( x1_val_ref, mean(dens); atol=0.1)
# @enter propagateBelief(sfg, :x1, :)
_, csmc = repeatCSMStep!(hists[1], 6; duplicate=true)
# @enter repeatCSMStep!(hists[1], 6; duplicate=true)
@test isapprox( x0_val_ref, getPPESuggested(csmc.cliqSubFg, :x0); atol=0.1 )
nval_x0 = mean(getBelief(csmc.cliqSubFg, :x0))
@test isapprox( x0_val_ref, nval_x0; atol=0.1 )
nval_x0 = mean(getBelief(csmc.cliqSubFg, :x0))
@test isapprox( x0_val_ref, nval_x0; atol=0.1 )
# TODO CHECK vnd.val points istype SArray???
# intended steps at writing are 11,12 (post-root clique downsolve)
val0 = getPPESuggested( hists[1][11][4].cliqSubFg[:x0] )
@test isapprox( x0_val_ref, val0; atol=0.1)
val0 = getPPESuggested( hists[1][12][4].cliqSubFg[:x0] )
@test isapprox( x0_val_ref, val0; atol=0.1)
##
@test isapprox( getPPE(fg, :x0).suggested, x0_val_ref; atol = 0.1)
@test isapprox( getPPE(fg, :x1).suggested, x1_val_ref; atol = 0.1)
@test isapprox( getPPE(fg, :x2).suggested, x2_val_ref; atol = 0.1)
@test isapprox( getPPE(fg, :x3).suggested, x3_val_ref; atol = 0.1)
##
end
##
@testset "Damped Oscillator DERelative" begin
## setup some example dynamics
# Lets build an damped oscillator to demonstrate the process in state space
# https://en.wikipedia.org/wiki/Harmonic_oscillator
# ddx/ddt = β dx/dt - ω x + force[t]
# dx/dt = dx/dt
function dampedOscillator!(dstate, state, force, t)
ω = 0.7
β = -0.3
dstate[2] = β*state[2] - ω*state[1] + force(t)
dstate[1] = state[2]
nothing
end
# testing function parameter version (could also be array of data)
tstForce(t) = 0
## build a representative factor graph with ODE built inside
fg = initfg()
# the starting points and "0 seconds"
addVariable!(fg, :x0, Position{2}, timestamp=DateTime(2000,1,1,0,0,0))
# pin with a simple prior
addFactor!(fg, [:x0], Prior(MvNormal([1;0],0.01*diagm(ones(2)))))
##
prev = :x0
DT = 2
for i in 1:7
nextSym = Symbol("x$i")
# another point in the trajectory 5 seconds later
addVariable!(fg, nextSym, Position{2}, timestamp=DateTime(2000,1,1,0,0,DT*i))
oder = DERelative( fg, [prev; nextSym],
Position{2},
dampedOscillator!,
tstForce,
# (state, var)->(state[1] = var[1]),
# (var, state)->(var[1] = state[1]),
dt=0.05,
problemType=ODEProblem )
#
addFactor!( fg, [prev;nextSym], oder; graphinit=false )
prev = nextSym
end
##
oder_ = DERelative( fg, [:x0; :x7],
Position{2},
dampedOscillator!,
tstForce,
# (state, var)->(state[1] = var[1]),
# (var, state)->(var[1] = state[1]),
dt=0.05,
problemType=ODEProblem )
oder_.forwardProblem.u0 .= [1.0;0.0]
sl = DifferentialEquations.solve(oder_.forwardProblem)
## Initialize the rest of the variables
initAll!(fg)
## check the solve values are correct
x0_val_ref = sl(getVariable(fg, :x0) |> getTimestamp |> DateTime |> datetime2unix)
x1_val_ref = sl(getVariable(fg, :x1) |> getTimestamp |> DateTime |> datetime2unix)
x2_val_ref = sl(getVariable(fg, :x2) |> getTimestamp |> DateTime |> datetime2unix)
x3_val_ref = sl(getVariable(fg, :x3) |> getTimestamp |> DateTime |> datetime2unix)
x4_val_ref = sl(getVariable(fg, :x4) |> getTimestamp |> DateTime |> datetime2unix)
x5_val_ref = sl(getVariable(fg, :x5) |> getTimestamp |> DateTime |> datetime2unix)
x6_val_ref = sl(getVariable(fg, :x6) |> getTimestamp |> DateTime |> datetime2unix)
x7_val_ref = sl(getVariable(fg, :x7) |> getTimestamp |> DateTime |> datetime2unix)
##
@test isapprox( getPPESuggested(fg, :x0), x0_val_ref; atol=0.2)
@test isapprox( getPPESuggested(fg, :x1), x1_val_ref; atol=0.2)
@test isapprox( getPPESuggested(fg, :x2), x2_val_ref; atol=0.2)
@test isapprox( getPPESuggested(fg, :x3), x3_val_ref; atol=0.2)
@test isapprox( getPPESuggested(fg, :x4), x4_val_ref; atol=0.2)
@test isapprox( getPPESuggested(fg, :x5), x5_val_ref; atol=0.2)
@test isapprox( getPPESuggested(fg, :x6), x6_val_ref; atol=0.2)
@test isapprox( getPPESuggested(fg, :x7), x7_val_ref; atol=0.2)
## check forward and backward solving
pts_ = approxConv(fg, :x0f1, :x0)
@cast pts[i,j] := pts_[j][i]
@test norm(Statistics.mean(pts, dims=2) - [1;0]) < 0.3
initVariable!(fg, :x0, pts_)
X0_ = deepcopy(pts)
pts_ = approxConv(fg, :x0x1f1, :x1)
@cast pts[i,j] := pts_[j][i]
@test norm(Statistics.mean(pts, dims=2) - [0;-0.6]) < 0.4
# now check the reverse direction solving
initVariable!(fg, :x1, pts_)
pts_ = approxConv(fg, :x0x1f1, :x0)
@cast pts[i,j] := pts_[j][i]
# check forward then backward convolves are reversible
@test isapprox(0, norm(X0_ - pts); atol=1e-2)
##
# plotKDE(tfg, ls(fg) |> sortDFG, dims=[1] )
##
tfg = initfg()
# for s in ls(fg)
# initVariable!(fg, s, [0.1.*zeros(2) for _ in 1:100])
# end
pts = approxConv(fg, :x0f1, :x7, setPPE=true, tfg=tfg)
initVariable!(tfg, :x7, pts)
##
@test isapprox( getPPESuggested(tfg, :x0), x0_val_ref; atol=0.2)
@test isapprox( getPPESuggested(tfg, :x1), x1_val_ref; atol=0.2)
@test isapprox( getPPESuggested(tfg, :x2), x2_val_ref; atol=0.2)
@test isapprox( getPPESuggested(tfg, :x3), x3_val_ref; atol=0.2)
@test isapprox( getPPESuggested(tfg, :x4), x4_val_ref; atol=0.2)
@test isapprox( getPPESuggested(tfg, :x5), x5_val_ref; atol=0.2)
@test isapprox( getPPESuggested(tfg, :x6), x6_val_ref; atol=0.2)
@test isapprox( getPPESuggested(tfg, :x7), x7_val_ref; atol=0.2)
##
@error "Disabling useMsgLikelihood for DERelative test, follow fix on #1010 as rough guide"
getSolverParams(fg).useMsgLikelihoods = false
smtasks = Task[]
tree = solveTree!(fg; recordcliqs=ls(fg), smtasks);
hists = fetchCliqHistoryAll!(smtasks)
printCSMHistoryLogical(hists)
_, csmc = repeatCSMStep!(hists[2], 6; duplicate=true);
##
# solveTree has weird problem in breaking correct init and inserting zeros???
@test isapprox( getPPESuggested(fg, :x0), x0_val_ref; atol=0.2)
@test isapprox( getPPESuggested(fg, :x1), x1_val_ref; atol=0.2)
@test isapprox( getPPESuggested(fg, :x2), x2_val_ref; atol=0.2)
@test isapprox( getPPESuggested(fg, :x3), x3_val_ref; atol=0.2)
@test isapprox( getPPESuggested(fg, :x4), x4_val_ref; atol=0.2)
@test isapprox( getPPESuggested(fg, :x5), x5_val_ref; atol=0.2)
@test isapprox( getPPESuggested(fg, :x6), x6_val_ref; atol=0.2)
@test isapprox( getPPESuggested(fg, :x7), x7_val_ref; atol=0.2)
##
# Plots.plot(sl,linewidth=2,xaxis="unixtime [s]",label=["ω [rad/s]" "θ [rad]"],layout=(2,1))
# for lb in sortDFG(ls(fg))
# x = getTimestamp(getVariable(tfg, lb)) |> DateTime |> datetime2unix
# xx = [x;x]
# yy = [-1;1]
# Plots.plot!(xx, yy, show=true)
# end
##
end
##
@testset "Parameterized Damped Oscillator DERelative (n-ary factor)" begin
## setup some example dynamics
# Lets build an damped oscillator to demonstrate the process in state space
# https://en.wikipedia.org/wiki/Harmonic_oscillator
# ddx/ddt = β dx/dt - ω x + force[t]
# dx/dt = dx/dt
# force_ωβ = (data, ωβ)
function dampedOscillatorParametrized!(dstate, state, force_ωβ, t)
# 3rd variable in this factor graph test example
force = force_ωβ[1]
ω = force_ωβ[2][1]
β = force_ωβ[2][2]
# classic ODE between first and second fg variables
dstate[2] = β*state[2] - ω*state[1] + force(t)
dstate[1] = state[2]
nothing
end
# testing function parameter version (could also be array of data)
tstForce(t) = 0
## build a representative factor graph with ODE built inside
fg = initfg()
# the starting points and "0 seconds"
addVariable!(fg, :x0, Position{2}, timestamp=DateTime(2000,1,1,0,0,0))
# pin with a simple prior
addFactor!(fg, [:x0], Prior(MvNormal([1;0],0.01*diagm(ones(2)))))
doautoinit!(fg, :x0)
# and the new parameterized variable
ω = 0.7
β = -0.3
# these are the stochastic parameters
addVariable!(fg, :ωβ, Position{2}) # timestamp should not matter
# pin with a simple prior
addFactor!(fg, [:ωβ], Prior(MvNormal([ω;β],0.0001*diagm(ones(2)))))
doautoinit!(fg, :ωβ)
##
prev = :x0
DT = 2
for i in 1:7
nextSym = Symbol("x$i")
# another point in the trajectory 5 seconds later
addVariable!(fg, nextSym, Position{2}, timestamp=DateTime(2000,1,1,0,0,DT*i))
oder = DERelative( fg, [prev; nextSym; :ωβ],
Position{2},
dampedOscillatorParametrized!,
tstForce, # this is passed in as `force_ωβ[1]`
# (state, var)->(state[1] = var[1]),
# (var, state)->(var[1] = state[1]),
# dt=0.05,
problemType=ODEProblem )
#
addFactor!( fg, [prev; nextSym; :ωβ], oder, graphinit=false, inflation=0.01 )
prev = nextSym
end
## check forward and backward solving
pts_ = approxConv(fg, :x0f1, :x0)
@cast pts[i,j] := pts_[j][i]
@test norm(Statistics.mean(pts, dims=2) - [1;0]) < 0.3
initVariable!(fg, :x0, pts_)
X0_ = deepcopy(pts)
pts_ = approxConv(fg, :x0x1ωβf1, :x1)
@cast pts[i,j] := pts_[j][i]
@test norm(Statistics.mean(pts, dims=2) - [0;-0.6]) < 0.4
# now check the reverse direction solving
initVariable!(fg, :x1, pts_)
# failing here
pts_ = approxConv(fg, :x0x1ωβf1, :x0)
@cast pts[i,j] := pts_[j][i]
@test (X0_ - pts) |> norm < 1e-2
##
tfg = initfg()
# for s in ls(fg)
# initVariable!(fg, s, [zeros(2) for _ in 1:100])
# end
# must initialize the parameters
pts = approxConv(fg, :ωβf1, :ωβ)
initVariable!(fg, :ωβ, pts)
# project forward
forcepath = [:x0f1;]
push!(forcepath, :x0)
push!(forcepath, :x0x1ωβf1)
push!(forcepath, :x1)
push!(forcepath, :x1x2ωβf1)
push!(forcepath, :x2)
push!(forcepath, :x2x3ωβf1)
push!(forcepath, :x3)
push!(forcepath, :x3x4ωβf1)
push!(forcepath, :x4)
push!(forcepath, :x4x5ωβf1)
push!(forcepath, :x5)
push!(forcepath, :x5x6ωβf1)
push!(forcepath, :x6)
push!(forcepath, :x6x7ωβf1)
push!(forcepath, :x7)
pts = approxConv(fg, :x0f1, :x7, setPPE=true, tfg=tfg, path=forcepath)
##
# plotKDE(tfg, ls(tfg) |> sortDFG, dims=[1] )
##
# getBelief(fg, :ωβ) |> getPoints
# plotKDE(tfg, :ωβ)
##
oder_ = DERelative( fg, [:x0; :x7; :ωβ],
Position{2},
dampedOscillatorParametrized!,
tstForce,
# (state, var)->(state[1] = var[1]),
# (var, state)->(var[1] = state[1]),
dt=0.05,
problemType=ODEProblem )
oder_.forwardProblem.u0 .= [1.0;0.0]
oder_.data[2] .= [ω;β]
sl = DifferentialEquations.solve(oder_.forwardProblem)
## check the approxConv is working right
for sym in setdiff(ls(tfg), [:ωβ])
@test getPPE(tfg, sym).suggested - sl(getVariable(fg, sym) |> getTimestamp |> DateTime |> datetime2unix) |> norm < 0.2
end
##
# Plots.plot(sl,linewidth=2,xaxis="unixtime [s]",label=["ω [rad/s]" "θ [rad]"],layout=(2,1))
# for lb in sortDFG(ls(fg))
# x = getTimestamp(getVariable(tfg, lb)) |> DateTime |> datetime2unix
# xx = [x;x]
# yy = [-1;1]
# Plots.plot!(xx, yy, show=true)
# end
## test convolution to the parameter (third) variable
# easy test with good starting points
pts = approxConv(fg, :ωβf1, :ωβ)
initVariable!(fg, :ωβ, pts)
# make sure the other variables are in the right place
pts_ = getBelief(fg, :x0) |> getPoints
@cast pts[i,j] := pts_[j][i]
@test Statistics.mean(pts, dims=2) - [1;0] |> norm < 0.1
pts_ = getBelief(fg, :x1) |> getPoints
@cast pts[i,j] := pts_[j][i]
@test Statistics.mean(pts, dims=2) - [0;-0.6] |> norm < 0.2
pts_ = approxConv(fg, :x0x1ωβf1, :ωβ)
@cast pts[i,j] := pts_[j][i]
@test Statistics.mean(pts, dims=2) - [0.7;-0.3] |> norm < 0.1
##
# repeat with more difficult starting point
initVariable!(fg, :ωβ, [zeros(2) for _ in 1:100])
pts_ = approxConv(fg, :x0x1ωβf1, :ωβ)
@cast pts[i,j] := pts_[j][i]
@test norm(Statistics.mean(pts, dims=2) - [0.7;-0.3]) < 0.1
@warn "n-ary DERelative test on :ωβ requires issue #1010 to be resolved first before being reintroduced."
# ## do a complete solve (must first resolve #1010)
# solveTree!(fg);
# ## Solve quality might not yet be good enough for this particular test case
# @test getPPE(fg, :ωβ).suggested - [0.7;-0.3] |> norm < 0.2
# for sym in setdiff(ls(tfg), [:ωβ])
# @test getPPE(fg, sym).suggested - sl(getVariable(fg, sym) |> getTimestamp |> DateTime |> datetime2unix) |> norm < 0.2
# end
##
end
@error "DERelative not tested for `multihypo=` case yet, see issue #1025"
# | IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 3157 | using DistributedFactorGraphs
using IncrementalInference
using Test
##
mutable struct MutableLinearRelative{N, T <: SamplableBelief} <: AbstractManifoldMinimize
Z::T
# timestamp::DateTime
end
function MutableLinearRelative{N}() where N
newval = MvNormal(zeros(N), diagm(ones(N)))
MutableLinearRelative{N,typeof(newval)}(newval)
end
MutableLinearRelative(n::Int=1) = MutableLinearRelative{n}()
MutableLinearRelative(nm::Distributions.ContinuousUnivariateDistribution) = MutableLinearRelative{1, typeof(nm)}(nm)
MutableLinearRelative(nm::MvNormal) = MutableLinearRelative{length(nm.μ), typeof(nm)}(nm)
MutableLinearRelative(nm::ManifoldKernelDensity) = MutableLinearRelative{Ndim(nm), typeof(nm)}(nm)
DFG.getDimension(::Type{MutableLinearRelative{N,<:SamplableBelief}}) where {N} = N
DFG.getManifold(::MutableLinearRelative{N}) where N = TranslationGroup(N)
function IIF.getSample(cf::CalcFactor{<:MutableLinearRelative})
return rand(cf.factor.Z, 1)
end
function (s::CalcFactor{<:MutableLinearRelative})( meas,
X1,
X2 )
#
return meas .- (X2 .- X1)
end
##
@testset "testing dead reckoning tether" begin
# test error message and then define method for MutableLinearRelative
# @test_throws ErrorException getFactorMean(MutableLinearRelative(Normal(0.0,0.1)))
# IIF.getFactorMean(fct::MutableLinearRelative) = getFactorMean(fct.Z)
##
# start with an empty factor graph object
fg = initfg()
# Add the first pose :x0
addVariable!(fg, :x0, ContinuousScalar)
# Add at a fixed location PriorPose2 to pin :x0 to a starting location (10,10, pi/4)
addFactor!(fg, [:x0], Prior( Normal(0.0,0.1) ))
# Drive around in line
for i in 0:5
psym = Symbol("x$i")
nsym = Symbol("x$(i+1)")
addVariable!(fg, nsym, ContinuousScalar)
pp = LinearRelative(Normal(1.0,0.1))
addFactor!(fg, [psym;nsym], pp )
end
# Add landmarks with Bearing range measurements
addVariable!(fg, :l1, ContinuousScalar, tags=[:LANDMARK])
p2br = LinearRelative(Normal(1.0,0.1))
addFactor!(fg, [:x0; :l1], p2br )
## async solving with dead-reckon branch
addVariable!(fg, :deadreckon_x0, ContinuousScalar, solvable=0)
drec = MutableLinearRelative(Normal(0.0,0.1))
addFactor!(fg, [:x0; :deadreckon_x0], drec, solvable=0)
#
@test length(map( x->x.label, getVariables(fg, solvable=1))) == 8
@test length(map( x->x.label, getVariables(fg, solvable=0))) == 9
#
# # make sure
@test length(getEliminationOrder(fg, solvable=1)) == 8
# check default
@test length(getEliminationOrder(fg)) == 8
# default check
vo = getEliminationOrder(fg)
@test length(vo) == 8
tree = buildTreeFromOrdering!(fg,vo)
tree2 = solveTree!(fg);
@test !isInitialized(fg, :deadreckon_x0)
val = accumulateFactorMeans(fg, [:x0deadreckon_x0f1])
# must fix return type stability
fval = float(val...)
@test isapprox(fval, calcVariablePPE(fg, :x0).suggested[1], atol=1e-4 )
#TODO improve test
rebaseFactorVariable!(fg, :x0deadreckon_x0f1, [:x1; :deadreckon_x0])
@test issetequal(ls2(fg, :x0), [:x1, :l1])
@test issetequal(ls2(fg, :x1), [:x0, :deadreckon_x0, :x2])
##
end
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 3319 | # test deconvolution functions
using Test
using IncrementalInference
using TensorCast
using Manifolds: Euclidean
##
@testset "basic deconvolution test" begin
##
fg = generateGraph_LineStep(2)
# drawGraph(fg, show=true)
## test trivial Prior
pred, meas = approxDeconv(fg, :x0f1)
@test mmd(Euclidean(1),pred, meas) < 1e-8
##
doautoinit!.(fg, [:x0; :x2])
##
pred, meas = approxDeconv(fg, :x0x2f1)
@test mmd(Euclidean(1), pred, meas) < 1e-3
##
P_ = approxDeconvBelief(fg, :x0x2f1, LinearRelative)
@test isapprox( mean(Euclidean(1), meas), mean(P_), atol=0.2 )
##
end
# voodoo-lite
@testset "deconv through multihypo" begin
##
fg = initfg()
addVariable!(fg, :x0, ContinuousScalar)
addVariable!(fg, :hypoA, ContinuousScalar)
addFactor!(fg, [:hypoA;], Prior(Normal(5,0.1)))
addVariable!(fg, :hypoB, ContinuousScalar)
addFactor!(fg, [:hypoB;], Prior(Normal(10,0.1)))
addFactor!(fg, [:x0; :hypoA; :hypoB], LinearRelative(Normal(10,0.1)), multihypo=[1;1/2;1/2])
solveTree!(fg);
##
# make sure each variable is where it should be first
@test isapprox(getPPE(fg, :hypoA).suggested[1], 5, atol=1)
@test isapprox(getPPE(fg, :hypoB).suggested[1], 10,atol=1)
X0_ = getBelief(fg, :x0)
X0 = AMP._pointsToMatrixCoords(X0_.manifold, getPoints(X0_))
# TensorCast.@cast X0[i,j] := X0_[j][i]
N = size(X0,2)
@test 0.2*N < sum( -7.5 .< X0 .< -2.5 )
@test 0.2*N < sum( -2.5 .< X0 .< 2.5 )
@test sum( 2.5 .< X0 ) < 0.05*N
@test sum( X0 .< -7.5 ) < 0.05*N
@test sum( -3.5 .< X0 .< -1.5 ) < 0.1*N
## do deconv and check
@error "approxDeconv on multihypo not fixed yet, see #467, #927"
# pred, meas = approxDeconv(fg, lsf(fg, LinearRelative)[1])
##
end
@testset "deconvolution tools via differential factors" begin
##
fg = generateGraph_CaesarRing1D()
getSolverParams(fg).useMsgLikelihoods = true
vo = [:x3,:x5,:x1,:l1,:x4,:x2,:x6,:x0]
mkpath(getLogPath(fg))
tree = solveTree!(fg, eliminationOrder=vo, verbose=true) #, timeout=5) # timeout creates interrupt exception
msg = IIF.getMessageBuffer(getClique(tree,2)).upRx
tfg = buildCliqSubgraph(fg, getClique(tree,2))
addLikelihoodsDifferential!.(tfg, values(msg))
# drawGraph(tfg, show=true)
@show ls(tfg)
@test issetequal(ls(tfg), [:x2,:x4,:x6])
@test lsf(tfg) |> length == 2
@test lsf(tfg, tags=[:__UPWARD_DIFFERENTIAL__]) |> length == 2
##
end
@testset "deconv on <:AbstractRelativeMinimize" begin
##
fg = initfg()
getSolverParams(fg).useMsgLikelihoods = true
addVariable!(fg, :x0, ContinuousScalar)
addVariable!(fg, :x1, ContinuousScalar)
addFactor!(fg, [:x0], Prior(Normal()))
doautoinit!(fg,:x0)
addFactor!(fg, [:x0;:x1], EuclidDistance(Normal(10,1)))
##
# initAll!(fg)
pts = approxConv(fg, :x0x1f1, :x1)
##
solveTree!(fg);
## make sure result is in the right place
@test abs(getPPE(fg, :x0).suggested[1]) < 1.0
X1_ = getBelief(fg, :x1) |> getPoints
TensorCast.@cast X1[i,j] := X1_[j][i]
N = size(X1,2)
@test sum(-5 .< X1 .< 5) < 0.1*N
@test sum(X1 .< -15) < 0.1*N
@test sum(15 .< X1) < 0.1*N
@test 0.2*N .< sum(-15 .< X1 .< -5)
@test 0.2*N .< sum(5 .< X1 .< 15)
## not check deconv
pred, meas = approxDeconv(fg, :x0x1f1)
@test mmd(Euclidean(1), pred, meas) < 1e-1
##
# using KernelDensityEstimatePlotting, Gadfly
# Gadfly.set_default_plot_size(25cm,20cm)
# plotKDE([kde!(pred); kde!(meas)], c=["red"; "green"])
##
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 315 |
using Test
using IncrementalInference
##
@testset "test generic functions on distributions" begin
##
@test getDimension(Uniform(0,1)) == 1
@test getDimension(Normal()) == 1
@test getDimension(MvNormal([1;1;0.1])) == 3
p = manikde!(ContinuousScalar, [randn(1) for i in 1:100])
@test getDimension(p) == 1
##
end | IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 6985 | # test EuclidDistance
using IncrementalInference
using Test
using TensorCast
##
@testset "test EuclidDistance on 1 dim" begin
##
fg = initfg()
addVariable!(fg, :x0, ContinuousScalar)
addFactor!(fg, [:x0], Prior(Normal()))
addVariable!(fg, :x1, ContinuousScalar)
eud = EuclidDistance(Normal(10,1))
addFactor!(fg, [:x0;:x1], eud)
##
tree = solveTree!(fg)
##
@test isapprox(getPPE(fg, :x0).suggested[1], 0, atol=1)
pts_ = getBelief(fg, :x1) |> getPoints
@cast pts[i,j] := pts_[j][i]
N = size(pts, 2)
@test 0.3*N < sum( 5 .< pts )
@test 0.3*N < sum( pts .< -5 )
@test sum( -5 .< pts .< 5 ) < 0.1*N
##
end
@testset "test EuclidDistance on 2 dim" begin
##
fg = initfg()
addVariable!(fg, :x0, ContinuousEuclid{2})
addFactor!(fg, [:x0], Prior(MvNormal(zeros(2),diagm([1;1.0]))))
addVariable!(fg, :x1, ContinuousEuclid{2})
eud = EuclidDistance(Normal(10,1))
addFactor!(fg, [:x0;:x1], eud)
tree = solveTree!(fg)
@test isapprox(getPPE(fg, :x0).suggested[1], 0, atol=1)
@test isapprox(getPPE(fg, :x0).suggested[1], 0, atol=1)
pts_ = getBelief(fg, :x1) |> getPoints
@cast pts[i,j] := pts_[j][i]
N = size(pts, 2)
pts = collect(pts)
pts .^= 2
@test 0.5*N < sum( 7 .< sqrt.(sum(pts, dims=1)) .< 13 )
##
end
@testset "test upward clique message range density behavior" begin
## Test zero with on x and y-axis
N=100
points = [[100.0;0.0],[0.0;100.0]]
fg = IIF.generateGraph_EuclidDistance(points)
eo = [:x2; :x1; :l1]
##
fg_ = deepcopy(fg)
tree = buildTreeReset!(fg_, eo)
hist,upMessage = solveCliqUp!(fg_, tree, :x2; recordcliq=true);
sfg = hist[end].csmc.cliqSubFg
L1__ = getBelief(sfg, :l1) |> getPoints
@cast L1_[i,j] := L1__[j][i]
# check for for ring density
@test 0.2*N < sum( 0 .< L1_[1,:] .< 130)
@test 0.2*N < sum( -130 .< L1_[1,:] .< 0)
@test 0.2*N < sum( 100 .< L1_[2,:] .< 230)
@test 0.2*N < sum( -230 .< L1_[2,:] .< 100)
# and must be in a ring
L1_ = collect(L1_)
L1_[2,:] .-= 100
@test 0.95*N < sum( 90 .< sqrt.(sum(L1_.^2, dims=1)) .< 110)
##
N=100
points = [[100.0;0.0],[0.0;100.0]]
fg = IIF.generateGraph_EuclidDistance(points)
# initVariable!(fg, :l1, [1000.0.*randn(2) for _ in 1:100])
# check regular full solution produces two modes
# similar test in RoME
for i in 1:1
# global TP, N
tree = solveTree!(fg, eliminationOrder=eo);
L1_ = getBelief(fg, :l1) |> getPoints
@cast L1[i,j] := L1_[j][i]
# check that two modes exist
@test (0.03*N < sum(-50 .< L1[1,:] .< 50))
@test (0.03*N < sum(-50 .< L1[2,:] .< 50))
# @error "suppressing dual mode tests, MUST restore before IIF v0.25, see #1305"
@test (0.03*N < sum(50 .< L1[1,:] .< 150)) # always this one
@test (0.03*N < sum(50 .< L1[2,:] .< 150))
end
# at least one of the 3 solves should produce the right result
##
#test one clique as in RoME
N=100
points = [[100.0;0.0],[0.0;100.0]]
fg = IIF.generateGraph_EuclidDistance(points)
fg.solverParams.graphinit = false
M = getManifold(fg, :l1)
TP = false
for i in 1:3
# global TP, N
tree = solveTree!(fg);
L1 = getBelief(fg, :l1) |> getPoints
# check that two modes exist
am1 = sum(isapprox.(Ref(M), L1, Ref([0.0,0.0]), atol=10))
am2 = sum(isapprox.(Ref(M), L1, Ref([100.0,100.0]), atol=10))
TP = am1 > N*0.03
TP &= am2 > N*0.03
if TP
@info "test passed in $i"
break
end
end
@test TP
##
end
@testset "Euclid Distance Tests" begin
# using Random
# Random.seed!(84)
# N=100
##
points = [[100.0],]
fg = IIF.generateGraph_EuclidDistance(points)
solveTree!(fg)
@test isapprox(getPPE(fg, :x1).suggested[1], 100, atol=1)
pts_ = getBelief(fg, :l1) |> getPoints
@cast pts[i,j] := pts_[j][i]
N = size(pts, 2)
# TODO add similar tests to the rest
@test_broken 0.3*N < sum(isapprox.(pts, 0, atol=5)) < 0.7*N
@test_broken 0.3*N < sum(isapprox.(pts,200, atol=5)) < 0.7*N
# Do it manually with a big inflation
# IIF._getCCW(fg, :x1l1f1).inflation = 100.0 # never gets there
# IIF._getCCW(fg, :x1l1f1).inflation = 150.0 # few iters gets there
fct = getFactorType(fg, :x1l1f1)
deleteFactor!(fg, :x1l1f1)
addFactor!(fg, [:x1;:l1], fct; inflation=200.0)
# IIF._getCCW(fg, :x1l1f1).inflation = 200.0 # One almost, second good
pts = approxConv(fg, :x1l1f1, :l1)
initVariable!(fg, :l1, pts)
# plotKDE(fg, ls(fg))
pts_ = approxConv(fg, :x1l1f1, :l1)
initVariable!(fg, :l1, pts_)
# plotKDE(fg, ls(fg))
@cast pts[i,j] := pts_[j][i]
@test 0.3*N < sum(isapprox.(pts, 0, atol=5)) < 0.7*N
@test 0.3*N < sum(isapprox.(pts,200, atol=5)) < 0.7*N
## Test zero with x-axis
points = [[100.0;0.0],]
fg = IIF.generateGraph_EuclidDistance(points)
solveTree!(fg)
## Test zero with y-axis
points = [[0.0;100.0],]
fg = IIF.generateGraph_EuclidDistance(points)
solveTree!(fg)
## Test zero with xy-axis 2 points
points = [[0.0;100.0],[100.0;0.0]]
fg = IIF.generateGraph_EuclidDistance(points)
solveTree!(fg)
## Test offsett with xy-axis 2 points
points = [[50.0;100.0],[100.0;50.0]]
fg = IIF.generateGraph_EuclidDistance(points; dist=50.0)
solveTree!(fg)
# plotKDE(fg, ls(fg))
## Manual init
points = [[0.0;100.0],[100.0;0.0]]
fg = IIF.generateGraph_EuclidDistance(points)
getSolverParams(fg).inflation=3.0
initVariable!(fg, :x1, [rand(MvNormal([100.,0], [1.,1])) for _ in 1:N])
initVariable!(fg, :x2, [rand(MvNormal([0.,100], [1.,1])) for _ in 1:N])
# init = MixtureModel([MvNormal([100.,100], [10.,10]),
# MvNormal([0.,0], [10.,10])],
# [0.5, 0.5])
init = MvNormal([25.,25], [1.,1])
initVariable!(fg, :l1, [rand(init) for _ in 1:N])
# plotKDE(fg, ls(fg))
# normal 2 clique eliminationOrder
eliminationOrder = [:l1; :x2; :x1]
# one clique eliminationOrder
eliminationOrder = [:l1; :x2; :x1]
tree = solveTree!(fg; eliminationOrder)
##
end
## SolverPlotter debug
# Random.seed!(84)
# empty!(IIF.g_u0)
# empty!(IIF.g_r)
# Plots.scatter([getindex.(IIF.g_u0,1), getindex.(IIF.g_u0,2)], legend=nothing)
# Plots.scatter!([getindex.(IIF.g_r,1),getindex.(IIF.g_r,2)], legend=nothing)
# Plots.scatter([getindex.(IIF.g_r,1),getindex.(IIF.g_r,2)], legend=nothing)
# x = reshape(getindex.(IIF.g_r,1),100,:)
# y = reshape(getindex.(IIF.g_r,2),100,:)
# Plots.scatter(x[:,1:2:end],y[:,1:2:end], legend=nothing)
# Plots.scatter(x[:,1:2:end],y[:,1:2:end], legend=nothing)
## what would clique solution produce as up message
##
# plotKDE(fg, :l1)
##
# pts = approxConv(fg, :x2l1f1, :l1)
# plotKDE(manikde!(ContinuousEuclid{2}, pts))
# plotLocalProduct(fg, :l1, levels=3)
## what would clique solution produce as up message
# @error "continue test dev with #1168"
#solve the clique in isolation
# hist = solveCliqUp!(fg, tree, :x2; recordcliq=true);
# printCliqHistorySummary(hist)
# sfg = hist[end].csmc.cliqSubFg
# the belief that would have been sent by this clique:
# L1 = IIF.getMessageBuffer(hist[11].csmc.cliq).upTx.belief[:l1] |> manikde!
# fnc_, csmc_ = repeatCSMStep!(hist, 5);
# sfg = csmc_.cliqSubFg
##
# plotKDE(sfg, :l1)
##
# initVariable!(sfg, :l1, pts)
# pts = approxConv(sfg, :x2l1f1, :l1)
# plotKDE(manikde!(ContinuousEuclid{2}, pts)) | IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 2696 |
##
using IncrementalInference
using Test
##
@testset "test endless cycle case, issue #754" begin
##
# testgraph from issue #754
fg = generateGraph_LineStep(5;
poseEvery=1,
landmarkEvery=5,
posePriorsAt=[0,2],
sightDistance=4)
#
getSolverParams(fg).graphinit = false
getSolverParams(fg).treeinit = true
getSolverParams(fg).useMsgLikelihoods = true
##
# tree = buildTreeReset!(fg)
# drawTree(tree, show=true)
##
# ENV["JULIA_DEBUG"] = :csm_4
smtasks = Task[]
hist = IIF.solveTree!(fg; smtasks=smtasks); #, recordcliqs=ls(fg));
##
# hists = fetchCliqHistoryAll!(smtasks);
# printCSMHistorySequential(hists)
# printCSMHistoryLogical(hists)
# ##
# fnc_ = repeatCSMStep!(hists, 4, 6)
##
for var in sortDFG(ls(fg))
sppe = getVariable(fg,var) |> getPPE |> IIF.getPPESuggested
println("Testing ", var,": ", sppe)
@test isapprox(sppe[1], parse(Int,string(var)[end]), atol=0.2)
end
##
# linear octo
N=8
fg = generateGraph_LineStep(N;
graphinit=false,
poseEvery=1,
landmarkEvery=N+1,
posePriorsAt=[0],
landmarkPriorsAt=[],
sightDistance=N+1)
deleteFactor!.(fg, [Symbol("x$(i)lm0f1") for i=1:(N-1)])
getSolverParams(fg).graphinit = false
getSolverParams(fg).treeinit = true
getSolverParams(fg).useMsgLikelihoods = true
smtasks = Task[]
tree = IIF.solveTree!(fg; smtasks=smtasks);
##
# hists = fetchCliqHistoryAll!(smtasks);
# printCSMHistorySequential(hists, 2=>1:50)
# printCSMHistoryLogical(hists)
# ##
# csmc_ = repeatCSMStep!(hists, 2, 14)
##
for var in sortDFG(ls(fg))
sppe = getVariable(fg,var) |> getPPE |> IIF.getPPESuggested
println("Testing ", var,": ", sppe)
@test isapprox(sppe[1], parse(Int,string(var)[end]), atol=0.4)
end
##
# Larger graph
fg = generateGraph_LineStep(15;
poseEvery=1,
landmarkEvery=3,
posePriorsAt=[0,7,12],
landmarkPriorsAt=[0,3],
sightDistance=2)
getSolverParams(fg).graphinit = false
getSolverParams(fg).treeinit = true
getSolverParams(fg).useMsgLikelihoods = true
smtasks = Task[]
tree = IIF.solveTree!(fg; smtasks=smtasks);
for var in sortDFG(ls(fg))
sppe = getVariable(fg,var) |> getPPE |> IIF.getPPESuggested
println("Testing ", var,": ", sppe)
s = findfirst(r"\d", string(var))[1]
@test isapprox(sppe[1], parse(Int,string(var)[s:end]), atol=0.45)
end
##
end | IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 17071 |
# Test multihypo computation assembly
using Test
using IncrementalInference
@testset "test IncrementalInference._prepareHypoRecipe! with only nullhypothesis..." begin
##
# n2_1 == (certainidx, allelements, activehypo, mhidx)
n2_1_gt1 = 1:2
n2_1_gt2_ = (3,3,0)
n2_1_gt3 = [(0,Int[1;]); (1,1:2); (2,Int[])]
n2_1_gt4_ = 20
n2_1 = IncrementalInference._prepareHypoRecipe!(nothing, 20, 1, 2, ones(Bool, 2), 0.5 )
@test sum([n2_1_gt1;] - [n2_1.certainidx;]) == 0
@test length(n2_1.allelements[1]) > n2_1_gt2_[1]
@test length(n2_1.allelements[2]) > n2_1_gt2_[2]
@test length(n2_1.allelements[3]) == n2_1_gt2_[3]
@test length(n2_1.allelements[1]) + length(n2_1.allelements[2]) == n2_1_gt4_
@test n2_1_gt3[1][1] == n2_1.activehypo[1][1]
@test n2_1_gt3[2][1] == n2_1.activehypo[2][1]
@test n2_1_gt3[3][1] == n2_1.activehypo[3][1]
@test sum(n2_1_gt3[1][2] .- n2_1.activehypo[1][2]) == 0
@test sum([n2_1_gt3[2][2];] .- [n2_1.activehypo[2][2];]) == 0
@test sum(n2_1_gt3[3][2] .- n2_1.activehypo[3][2]) == 0
@test sum(n2_1.mhidx .== 0) > n2_1_gt2_[1]
@test sum(n2_1.mhidx .== 1) > n2_1_gt2_[1]
@test sum( [1:n2_1_gt4_;][n2_1.mhidx .== 0] .== n2_1.allelements[1] ) == length(n2_1.allelements[1])
@test sum( [1:n2_1_gt4_;][n2_1.mhidx .== 1] .== n2_1.allelements[2] ) == length(n2_1.allelements[2])
@test length(n2_1.mhidx) == n2_1_gt4_
# n2_1 == (certainidx, allelements, activehypo, mhidx)
n2_1_gt1 = 1:2
n2_1_gt2_ = (3,3,0)
n2_1_gt3 = [(0,Int[2;]); (1,1:2); (2,Int[])]
n2_1_gt4_ = 20
n2_1 = IncrementalInference._prepareHypoRecipe!(nothing, 20, 2, 2, ones(Bool, 2), 0.5 )
@test sum([n2_1_gt1;] - [n2_1.certainidx;]) == 0
@test length(n2_1.allelements[1]) > n2_1_gt2_[1]
@test length(n2_1.allelements[2]) > n2_1_gt2_[2]
@test length(n2_1.allelements[3]) == n2_1_gt2_[3]
@test length(n2_1.allelements[1]) + length(n2_1.allelements[2]) == n2_1_gt4_
@test n2_1_gt3[1][1] == n2_1.activehypo[1][1]
@test n2_1_gt3[2][1] == n2_1.activehypo[2][1]
@test n2_1_gt3[3][1] == n2_1.activehypo[3][1]
@test sum(n2_1_gt3[1][2] .- n2_1.activehypo[1][2]) == 0
@test sum([n2_1_gt3[2][2];] .- [n2_1.activehypo[2][2];]) == 0
@test sum(n2_1_gt3[3][2] .- n2_1.activehypo[3][2]) == 0
@test sum(n2_1.mhidx .== 0) > n2_1_gt2_[1]
@test sum(n2_1.mhidx .== 1) > n2_1_gt2_[1]
@test sum( [1:n2_1_gt4_;][n2_1.mhidx .== 0] .== n2_1.allelements[1] ) == length(n2_1.allelements[1])
@test sum( [1:n2_1_gt4_;][n2_1.mhidx .== 1] .== n2_1.allelements[2] ) == length(n2_1.allelements[2])
@test length(n2_1.mhidx) == n2_1_gt4_
##
end
@testset "test IncrementalInference._prepareHypoRecipe! without multihypothesis..." begin
##
# certainidx = 1 ## ??
# sfidx=1, mhidx=0: ah = [1;]
# sfidx=1, mhidx=1: ah = [1;2]
# s2_1 == (certainidx, allelements, activehypo, mhidx)
s2_1_gt1 = 1:2
s2_1_gt2 = (Int[],1:20,Int[])
s2_1_gt3 = [(0,Int[1;]); (1,1:2); (2,Int[])]
s2_1_gt4 = ones(20)
s2_1 = IncrementalInference._prepareHypoRecipe!(nothing, 20, 1, 2 )
@test sum([s2_1_gt1;] .- [s2_1.certainidx;]) == 0
@test sum( s2_1_gt2[1] .- s2_1.allelements[1]) == 0
@test sum([s2_1_gt2[2];] .- [s2_1.allelements[2];]) == 0
@test sum( s2_1_gt2[3] .- s2_1.allelements[3]) == 0
@test s2_1_gt3[1][1] == s2_1.activehypo[1][1]
@test sum(s2_1_gt3[1][2] .- s2_1.activehypo[1][2]) == 0
@test s2_1_gt3[2][1] == s2_1.activehypo[2][1]
@test sum([s2_1_gt3[2][2];] .- [s2_1.activehypo[2][2];]) == 0
@test s2_1_gt3[3][1] == s2_1.activehypo[3][1]
@test sum(s2_1_gt3[3][2] .- s2_1.activehypo[3][2]) == 0
@test sum(s2_1_gt4 .- s2_1.mhidx) == 0
s2_2_gt1 = 1:2
s2_2_gt2 = (Int[],1:20,Int[])
s2_2_gt3 = [(0,Int[2;]); (1,1:2); (2,Int[])]
s2_2_gt4 = ones(20) # Int[]
s2_2 = IncrementalInference._prepareHypoRecipe!(nothing, 20, 2, 2 )
@test sum([s2_2_gt1;] .- [s2_2.certainidx;]) == 0
@test sum([s2_2_gt2[1];] .- [s2_2.allelements[1];]) == 0
@test sum(s2_2_gt2[2] .- s2_2.allelements[2]) == 0
@test sum(s2_2_gt2[3] .- s2_2.allelements[3]) == 0
@test s2_2_gt3[1][1] == s2_2.activehypo[1][1]
@test sum([s2_2_gt3[1][2];] .- [s2_2.activehypo[1][2];]) == 0
@test s2_2_gt3[2][1] == s2_2.activehypo[2][1]
@test sum([s2_2_gt3[2][2];] .- [s2_2.activehypo[2][2];]) == 0
@test s2_2_gt3[3][1] == s2_2.activehypo[3][1]
@test sum([s2_2_gt3[3][2];] .- [s2_2.activehypo[3][2];]) == 0
@test sum(s2_2_gt4 .- s2_2.mhidx) == 0
##
end
@testset "_prepareHypoRecipe! with bi-modality (certain variable)" begin
##
# certainidx = 1
# sfidx=1, mhidx=1: ah = []
# sfidx=1, mhidx=2: ah = [1;2]
# sfidx=1, mhidx=3: ah = [1;3]
s3_1_gt1 = [1]
s3_1_gt2 = (0,3,3,40)
s3_1_gt3 = [(1,Int[]); (2,Int[1;2]); (3,Int[1;3])]
s3_1_gt4 = 40
s3_1 = IncrementalInference._prepareHypoRecipe!(Categorical([0.0;0.5;0.5]), 40, 1, 3)
@test sum([s3_1_gt1;] - [s3_1.certainidx;]) == 0
@test sum([s3_1_gt2[1];] .- [s3_1.allelements[1];]) == 0
@test length(s3_1.allelements[2]) > s3_1_gt2[2]
@test length(s3_1.allelements[3]) > s3_1_gt2[3]
@test length(s3_1.allelements[2]) + length(s3_1.allelements[3]) == s3_1_gt2[4]
@test s3_1_gt3[1][1] == s3_1.activehypo[1][1]
@test s3_1_gt3[2][1] == s3_1.activehypo[2][1]
@test s3_1_gt3[3][1] == s3_1.activehypo[3][1]
@test sum(s3_1_gt3[1][2] .- s3_1.activehypo[1][2]) == 0
@test sum(s3_1_gt3[2][2] .- s3_1.activehypo[2][2]) == 0
@test sum(s3_1_gt3[3][2] .- s3_1.activehypo[3][2]) == 0
@test sum(s3_1.mhidx .== 2) > s3_1_gt2[2]
@test sum(s3_1.mhidx .== 3) > s3_1_gt2[3]
@test sum( [1:40;][s3_1.mhidx .== 2] .== s3_1.allelements[2] ) == length(s3_1.allelements[2])
@test sum( [1:40;][s3_1.mhidx .== 3] .== s3_1.allelements[3] ) == length(s3_1.allelements[3])
@test length(s3_1.mhidx) == s3_1_gt4
##
end
@testset "_prepareHypoRecipe! with bi-modality (fractional variable 1/2)" begin
##
# certainidx = 1
# sfidx=2, mhidx=1: ah = [1;2]
# sfidx=2, mhidx=2: ah = [2;3]
# sfidx=2, mhidx=3: [2;3], 2 should take a value from 3
s3_2_gt1 = [1]
s3_2_gt2 = (0,3,3,40)
s3_2_gt3 = [(0, Int[2]); (1,Int[1;2]); (2,Int[1;2]); (3,Int[2;3])]
s3_2_gt4 = 40
s3_2 = IncrementalInference._prepareHypoRecipe!(Categorical([0.0;0.5;0.5]), 40, 2, 3 )
@test sum(s3_2_gt1 - s3_2.certainidx) == 0
@test sum(s3_2_gt2[1] .- s3_2.allelements[2]) == 0
@test length(s3_2.allelements[1]) > 0.5*s3_2_gt2[2] # reuse test reference for bad-init nullhypo case
@test length(s3_2.allelements[2]) == 0
@test length(s3_2.allelements[3]) > s3_2_gt2[2]
@test length(s3_2.allelements[4]) > s3_2_gt2[3]
@test length(s3_2.allelements[1]) + length(s3_2.allelements[3]) + length(s3_2.allelements[4]) == s3_2_gt2[4]
@test s3_2_gt3[1][1] == s3_2.activehypo[1][1]
@test s3_2_gt3[2][1] == s3_2.activehypo[2][1]
@test s3_2_gt3[3][1] == s3_2.activehypo[3][1]
@test sum(s3_2_gt3[1][2] .- s3_2.activehypo[1][2]) == 0
@test sum(s3_2_gt3[2][2] .- s3_2.activehypo[2][2]) == 0
@test sum(s3_2_gt3[3][2] .- s3_2.activehypo[3][2]) == 0
@test sum(s3_2.mhidx .== 2) > s3_2_gt2[2]
@test sum(s3_2.mhidx .== 3) > s3_2_gt2[3]
@test sum( [1:40;][s3_2.mhidx .== 0] .== s3_2.allelements[1] ) == length(s3_2.allelements[1])
@test 0 == length(s3_2.allelements[2])
@test sum( [1:40;][s3_2.mhidx .== 2] .== s3_2.allelements[3] ) == length(s3_2.allelements[3])
@test sum( [1:40;][s3_2.mhidx .== 3] .== s3_2.allelements[4] ) == length(s3_2.allelements[4])
@test length(s3_2.mhidx) == s3_2_gt4
##
end
@testset "_prepareHypoRecipe! with bi-modality (fractional variable 2/2)" begin
##
# certainidx = 1
# sfidx=3, mhidx=1: ah = [1;3]
# sfidx=3, mhidx=2: [2:3], 3 should take a value from 2
# sfidx=3, mhidx=3: ah = [1;3]
s3_3_gt1 = [1]
s3_3_gt2 = (0,3,3,40)
s3_3_gt3 = [(0, Int[3]); (1,Int[1;3]); (2,Int[2;3]); (3,Int[1;3])]
s3_3_gt4 = 40
s3_3 = IncrementalInference._prepareHypoRecipe!(Categorical([0.0;0.5;0.5]), 40, 3, 3 )
@test sum(s3_3_gt1 - s3_3.certainidx) == 0
@test sum(s3_3_gt2[1] .- s3_3.allelements[2]) == 0
@test length(s3_3.allelements[1]) > 0.5*s3_3_gt2[2]
@test length(s3_3.allelements[2]) == 0
@test length(s3_3.allelements[3]) > s3_3_gt2[2]
@test length(s3_3.allelements[4]) > s3_3_gt2[3]
@test length(s3_3.allelements[1]) + length(s3_3.allelements[3]) + length(s3_3.allelements[4]) == s3_3_gt2[4]
@test s3_3_gt3[1][1] == s3_3.activehypo[1][1]
@test s3_3_gt3[2][1] == s3_3.activehypo[2][1]
@test s3_3_gt3[3][1] == s3_3.activehypo[3][1]
@test sum(s3_3_gt3[1][2] .- s3_3.activehypo[1][2]) == 0
@test sum(s3_3_gt3[2][2] .- s3_3.activehypo[2][2]) == 0
@test sum(s3_3_gt3[3][2] .- s3_3.activehypo[3][2]) == 0
@test sum(s3_3.mhidx .== 2) > s3_3_gt2[2]
@test sum(s3_3.mhidx .== 3) > s3_3_gt2[3]
@test sum( [1:40;][s3_3.mhidx .== 0] .== s3_3.allelements[1] ) == length(s3_3.allelements[1])
@test 0 == length(s3_3.allelements[2])
@test sum( [1:40;][s3_3.mhidx .== 2] .== s3_3.allelements[3] ) == length(s3_3.allelements[3])
@test sum( [1:40;][s3_3.mhidx .== 3] .== s3_3.allelements[4] ) == length(s3_3.allelements[4])
@test length(s3_3.mhidx) == s3_3_gt4
##
end
# @testset "test IncrementalInference._prepareHypoRecipe! with bi-modality backwards permutation..." begin
# certainidx = 1
# sfidx=1, mhidx=1: ah = []
# sfidx=1, mhidx=2: ah = [1;2]
# sfidx=1, mhidx=3: ah = [1;3]
# s3_1_gt1 = [1]
# s3_1_gt2 = (0,3,3,20)
# s3_1_gt3 = [(1,Int[]); (2,Int[1;2]); (3,Int[1;3])]
# s3_1_gt4 = 20
#
# s3_1 = IncrementalInference._prepareHypoRecipe!(Categorical([0.5;0.5;0.0]), 20, 1, 3)
# @test sum(s3_1_gt1 - s3_1.certainidx) == 0
# @test sum(s3_1_gt2[1] .- s3_1.allelements[1]) == 0
# @test length(s3_1.allelements[2]) > s3_1_gt2[2]
# @test length(s3_1.allelements[3]) > s3_1_gt2[3]
# @test length(s3_1.allelements[2]) + length(s3_1.allelements[3]) == s3_1_gt2[4]
# @test s3_1_gt3[1][1] == s3_1.activehypo[1][1]
# @test s3_1_gt3[2][1] == s3_1.activehypo[2][1]
# @test s3_1_gt3[3][1] == s3_1.activehypo[3][1]
# @test sum(s3_1_gt3[1][2] .- s3_1.activehypo[1][2]) == 0
# @test sum(s3_1_gt3[2][2] .- s3_1.activehypo[2][2]) == 0
# @test sum(s3_1_gt3[3][2] .- s3_1.activehypo[3][2]) == 0
#
# @test sum(s3_1.mhidx .== 2) > s3_1_gt2[2]
# @test sum(s3_1.mhidx .== 3) > s3_1_gt2[3]
#
# @test sum( [1:20;][s3_1.mhidx .== 2] .== s3_1.allelements[2] ) == length(s3_1.allelements[2])
# @test sum( [1:20;][s3_1.mhidx .== 3] .== s3_1.allelements[3] ) == length(s3_1.allelements[3])
# @test length(s3_1.mhidx) == s3_1_gt4
#
# end
@testset "_prepareHypoRecipe! with tri-modality (certain variable)" begin
##
N = 50
s4_1_gt1 = [1]
s4_1_gt2 = (0,3,3,3,N)
s4_1_gt3 = [(1,Int[]); (2,Int[1;2]); (3,Int[1;3]); (4,Int[1;4])]
s4_1_gt4 = N
s4_1 = IncrementalInference._prepareHypoRecipe!(Categorical([0.0;0.33;0.33;0.34]), N, 1, 4 )
@test sum(s4_1_gt1 - s4_1.certainidx) == 0
@test sum(s4_1_gt2[1] .- s4_1.allelements[1]) == 0
@test length(s4_1.allelements[2]) > s4_1_gt2[2]
@test length(s4_1.allelements[3]) > s4_1_gt2[3]
@test length(s4_1.allelements[4]) > s4_1_gt2[4]
@test length(s4_1.allelements[2]) + length(s4_1.allelements[3]) + length(s4_1.allelements[4]) == s4_1_gt2[5]
@test s4_1_gt3[1][1] == s4_1.activehypo[1][1]
@test s4_1_gt3[2][1] == s4_1.activehypo[2][1]
@test s4_1_gt3[3][1] == s4_1.activehypo[3][1]
@test s4_1_gt3[4][1] == s4_1.activehypo[4][1]
@test sum(s4_1_gt3[1][2] .- s4_1.activehypo[1][2]) == 0
@test sum(s4_1_gt3[2][2] .- s4_1.activehypo[2][2]) == 0
@test sum(s4_1_gt3[3][2] .- s4_1.activehypo[3][2]) == 0
@test sum(s4_1_gt3[4][2] .- s4_1.activehypo[4][2]) == 0
@test sum(s4_1.mhidx .== 2) > s4_1_gt2[2]
@test sum(s4_1.mhidx .== 3) > s4_1_gt2[3]
@test sum(s4_1.mhidx .== 4) > s4_1_gt2[4]
@test sum( [1:N;][s4_1.mhidx .== 2] .== s4_1.allelements[2] ) == length(s4_1.allelements[2])
@test sum( [1:N;][s4_1.mhidx .== 3] .== s4_1.allelements[3] ) == length(s4_1.allelements[3])
@test sum( [1:N;][s4_1.mhidx .== 4] .== s4_1.allelements[4] ) == length(s4_1.allelements[4])
@test length(s4_1.mhidx) == s4_1_gt4
##
end
@testset "_prepareHypoRecipe! with tri-modality (fractional variable 1/3)" begin
## solve for fractional variable in trinary case
N = 70
s4_2_gt1 = [1]
s4_2_gt2 = (0,3,3,3,N)
s4_2_gt3 = [(0,Int[2]); (1,Int[1;2]); (2,Int[1;2]); (3,Int[2;3;4]); (4,Int[2;3;4])]
s4_2_gt4 = N
s4_2 = IncrementalInference._prepareHypoRecipe!(Categorical([0.0;0.33;0.33;0.34]), N, 2, 4 )
@test sum(s4_2_gt1 - s4_2.certainidx) == 0
@test length(s4_2.allelements[1]) > 0.5*s4_2_gt2[2]
@test sum(s4_2_gt2[2] .- s4_2.allelements[2]) == 0
@test length(s4_2.allelements[3]) > s4_2_gt2[2]
@test length(s4_2.allelements[4]) > s4_2_gt2[3]
@test length(s4_2.allelements[5]) > s4_2_gt2[4]
@test length(s4_2.allelements[1]) + length(s4_2.allelements[3]) + length(s4_2.allelements[4]) + length(s4_2.allelements[5]) == s4_2_gt2[5]
@test s4_2_gt3[1][1] == s4_2.activehypo[1][1]
@test s4_2_gt3[2][1] == s4_2.activehypo[2][1]
@test s4_2_gt3[3][1] == s4_2.activehypo[3][1]
@test s4_2_gt3[4][1] == s4_2.activehypo[4][1]
@test sum(s4_2_gt3[1][2] .- s4_2.activehypo[1][2]) == 0
@test sum(s4_2_gt3[2][2] .- s4_2.activehypo[2][2]) == 0
@test sum(s4_2_gt3[3][2] .- s4_2.activehypo[3][2]) == 0
@test sum(s4_2_gt3[4][2] .- s4_2.activehypo[4][2]) == 0
@test sum(s4_2.mhidx .== 0) > s4_2_gt2[2]
@test sum(s4_2.mhidx .== 2) > s4_2_gt2[2]
@test sum(s4_2.mhidx .== 3) > s4_2_gt2[3]
@test sum(s4_2.mhidx .== 4) > s4_2_gt2[4]
@test sum( [1:N;][s4_2.mhidx .== 0] .== s4_2.allelements[1] ) == length(s4_2.allelements[1])
@test 0 == length(s4_2.allelements[2])
@test sum( [1:N;][s4_2.mhidx .== 2] .== s4_2.allelements[3] ) == length(s4_2.allelements[3])
@test sum( [1:N;][s4_2.mhidx .== 3] .== s4_2.allelements[4] ) == length(s4_2.allelements[4])
@test sum( [1:N;][s4_2.mhidx .== 4] .== s4_2.allelements[5] ) == length(s4_2.allelements[5])
@test length(s4_2.mhidx) == s4_2_gt4
##
end
@testset "_prepareHypoRecipe! with tri-modality (fractional variable 2/3)" begin
##
N = 70
s4_3_gt1 = [1]
s4_3_gt2 = (0,3,3,3,N)
s4_3_gt3 = [(0,Int[3]); (1,Int[1;3]); (2,Int[2;3;4]); (3,Int[1;3]); (4,Int[2;3;4])]
s4_3_gt4 = N
s4_3 = IncrementalInference._prepareHypoRecipe!(Categorical([0.0;0.33;0.33;0.34]), N, 3, 4 )
@test sum(s4_3_gt1 - s4_3.certainidx) == 0
@test length(s4_3.allelements[1]) > 0.5*s4_3_gt2[2]
@test sum(s4_3_gt2[2] .- s4_3.allelements[2]) == 0
@test length(s4_3.allelements[3]) > s4_3_gt2[2]
@test length(s4_3.allelements[4]) > s4_3_gt2[3]
@test length(s4_3.allelements[5]) > s4_3_gt2[4]
@test length(s4_3.allelements[1]) + length(s4_3.allelements[3]) + length(s4_3.allelements[4]) + length(s4_3.allelements[5]) == s4_3_gt2[5]
@test s4_3_gt3[1][1] == s4_3.activehypo[1][1]
@test s4_3_gt3[2][1] == s4_3.activehypo[2][1]
@test s4_3_gt3[3][1] == s4_3.activehypo[3][1]
@test s4_3_gt3[4][1] == s4_3.activehypo[4][1]
@test sum(s4_3_gt3[1][2] .- s4_3.activehypo[1][2]) == 0
@test sum(s4_3_gt3[2][2] .- s4_3.activehypo[2][2]) == 0
@test sum(s4_3_gt3[3][2] .- s4_3.activehypo[3][2]) == 0
@test sum(s4_3_gt3[4][2] .- s4_3.activehypo[4][2]) == 0
@test sum(s4_3.mhidx .== 0) > s4_3_gt2[2]
@test sum(s4_3.mhidx .== 2) > s4_3_gt2[2]
@test sum(s4_3.mhidx .== 3) > s4_3_gt2[3]
@test sum(s4_3.mhidx .== 4) > s4_3_gt2[4]
@test sum( [1:N;][s4_3.mhidx .== 0] .== s4_3.allelements[1] ) == length(s4_3.allelements[1])
@test 0 == length(s4_3.allelements[2])
@test sum( [1:N;][s4_3.mhidx .== 2] .== s4_3.allelements[3] ) == length(s4_3.allelements[3])
@test sum( [1:N;][s4_3.mhidx .== 3] .== s4_3.allelements[4] ) == length(s4_3.allelements[4])
@test sum( [1:N;][s4_3.mhidx .== 4] .== s4_3.allelements[5] ) == length(s4_3.allelements[5])
@test length(s4_3.mhidx) == s4_3_gt4
##
end
@testset "_prepareHypoRecipe! with tri-modality (fractional variable 3/3)" begin
##
N = 70
s4_4_gt1 = [1]
s4_4_gt2 = (0,3,3,3,N)
s4_4_gt3 = [(0,Int[4]); (1,Int[1;4]); (2,Int[2;3;4]); (3,Int[2;3;4]); (4,Int[1;4])]
s4_4_gt4 = N
s4_4 = IncrementalInference._prepareHypoRecipe!(Categorical([0.0;0.33;0.33;0.34]), N, 4, 4 )
@test sum(s4_4_gt1 - s4_4.certainidx) == 0
@test length(s4_4.allelements[1]) > 0.5*s4_4_gt2[2]
@test sum(s4_4_gt2[2] .- s4_4.allelements[2]) == 0
@test length(s4_4.allelements[3]) > s4_4_gt2[2]
@test length(s4_4.allelements[4]) > s4_4_gt2[3]
@test length(s4_4.allelements[5]) > s4_4_gt2[4]
@test length(s4_4.allelements[1]) + length(s4_4.allelements[3]) + length(s4_4.allelements[4]) + length(s4_4.allelements[5]) == s4_4_gt2[5]
@test s4_4_gt3[1][1] == s4_4.activehypo[1][1]
@test s4_4_gt3[2][1] == s4_4.activehypo[2][1]
@test s4_4_gt3[3][1] == s4_4.activehypo[3][1]
@test s4_4_gt3[4][1] == s4_4.activehypo[4][1]
@test sum(s4_4_gt3[1][2] .- s4_4.activehypo[1][2]) == 0
@test sum(s4_4_gt3[2][2] .- s4_4.activehypo[2][2]) == 0
@test sum(s4_4_gt3[3][2] .- s4_4.activehypo[3][2]) == 0
@test sum(s4_4_gt3[4][2] .- s4_4.activehypo[4][2]) == 0
@test sum(s4_4.mhidx .== 0) > s4_4_gt2[2]
@test sum(s4_4.mhidx .== 2) > s4_4_gt2[2]
@test sum(s4_4.mhidx .== 3) > s4_4_gt2[3]
@test sum(s4_4.mhidx .== 4) > s4_4_gt2[4]
@test sum( [1:N;][s4_4.mhidx .== 0] .== s4_4.allelements[1] ) == length(s4_4.allelements[1])
@test 0 == length(s4_4.allelements[2])
@test sum( [1:N;][s4_4.mhidx .== 2] .== s4_4.allelements[3] ) == length(s4_4.allelements[3])
@test sum( [1:N;][s4_4.mhidx .== 3] .== s4_4.allelements[4] ) == length(s4_4.allelements[4])
@test sum( [1:N;][s4_4.mhidx .== 4] .== s4_4.allelements[5] ) == length(s4_4.allelements[5])
@test length(s4_4.mhidx) == s4_4_gt4
@warn "only partially testing tri-modality"
##
end
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 2919 |
# PoC on jacobians for a factor
using IncrementalInference
using TensorCast
using Manifolds
using Test
# overloading with new dispatch
import IncrementalInference: getSample, getManifold
##
@testset "test manual call to gradient lambda utilities" begin
##
pp = LinearRelative(MvNormal([10;0],[1 0; 0 1]))
measurement = [10.0;0.0]
varTypes = (ContinuousEuclid{2}, ContinuousEuclid{2})
pts = ([0;0.0], [9.5;0])
##
J__, λ_fncs, λ_sizes = IIF._prepFactorGradientLambdas(pp, measurement, varTypes, pts; h=1e-4);
##
λ_fncs[1][1]()
λ_fncs[1][2]()
λ_fncs[2][1]()
λ_fncs[2][2]()
##
J__
@test norm( J__ - [0 0 1 0; 0 0 0 1; 1 0 0 0; 0 1 0 0] ) < 1e-4
## build new functor container
gradFct = FactorGradientsCached!(pp, varTypes, measurement, pts);
## test grad calc for current values
J_c = gradFct()
@test norm( J_c - [0 0 1 0; 0 0 0 1; 1 0 0 0; 0 1 0 0] ) < 1e-4
##
J = gradFct(measurement, pts...)
##
@test norm( J - [0 0 1 0; 0 0 0 1; 1 0 0 0; 0 1 0 0] ) < 1e-4
## check on transmitted info per coords
ret = calcPerturbationFromVariable(gradFct, [1=>[1;1]])
# the fromVar itself should be zero
@test length(ret[1]) == 2
@test isapprox( ret[1], [0;0], atol=1e-6 )
# the other variable
@test length(ret[2]) == 2
@test isapprox( ret[2], [1;1], atol=1e-6 )
##
end
##
struct TestPartialRelative2D{B <: SamplableBelief} <: IIF.AbstractRelativeMinimize
Z::B
partial::Tuple{Int}
end
# standard helper with partial set
TestPartialRelative2D(z::SamplableBelief) = TestPartialRelative2D(z, (2,))
# imported earlier for overload
getManifold(fnc::TestPartialRelative2D) = TranslationGroup(2)
getSample(cf::CalcFactor{<:TestPartialRelative2D}) = rand(cf.factor.Z, 1)
# currently requires residual to be returned as a tangent vector element
(cf::CalcFactor{<:TestPartialRelative2D})(z, x1, x2) = x2[2:2] - (x1[2:2] + z[1:1])
##
@testset "test a partial, binary relative factor perturbation (a new user factor)" begin
##
tpr = TestPartialRelative2D(Normal(10,1))
measurement = [10.0;]
pts = ([0;0.0], [0;10.0])
varTypes = (ContinuousEuclid{2}, ContinuousEuclid{2})
## construct the lambdas
gradients = FactorGradientsCached!(tpr, varTypes, measurement, pts);
## calculate new gradients
J = gradients(measurement, pts...)
## check on transmitted info per coords
ret = calcPerturbationFromVariable(gradients, [1=>[1;1]])
# the fromVar itself should be zero
@test length(ret[1]) == 2
@test isapprox( ret[1], [0;0], atol=1e-6 )
# the other variable only affects the second coordinate dimension
@test length(ret[2]) == 2
@test isapprox( ret[2], [0;1], atol=1e-6 )
## check the reverse perturbation
ret = calcPerturbationFromVariable(gradients, [2=>[1;1]])
# only the first coordinate dimension is affected
@test length(ret[1]) == 2
@test isapprox( ret[1], [0;1], atol=1e-6 )
## the fromVar itself should be zero
@test length(ret[2]) == 2
@test isapprox( ret[2], [0;0], atol=1e-6 )
##
end
# | IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 4314 | # test FluxModelsDistribution and serialization
using Test
using BSON, Flux, IncrementalInference
using TensorCast
##
@testset "FluxModelsDistribution serialization" begin
##
# can a model be serialized
mdls = [Chain(Dense(5,2),Dense(2,3)); Chain(Dense(5,4), Dense(4,3))]
fxd = FluxModelsDistribution((5,),(3,),mdls,rand(5), false, false)
# check sampler is working
measd = rand(fxd, 2)
@test length( measd ) == 2
# convert to packed type
fxp = convert(PackedSamplableBelief, fxd) # TODO, PackedSamplableBelief
@test fxp isa IIF.PackedFluxModelsDistribution
# convert back to hydrated object
fxu = convert(SamplableBelief, fxp)
@test fxu isa FluxModelsDistribution
measu = rand(fxu, 2)
@test measd[1] - measu[1] |> norm < 1e-6
##
end
@testset "FluxModelsDistribution serialization" begin
##
mdls = [Chain(Dense(5,2),Dense(2,3)); Chain(Dense(5,4), Dense(4,3))]
fxd = FluxModelsDistribution((5,),(3,),mdls,rand(5), false, false)
fg = initfg()
addVariable!(fg, :x0, ContinuousEuclid{3})
pr = Prior(fxd)
addFactor!(fg, [:x0;], pr)
##
smpls = sampleFactor(fg, :x0f1, 10)
@test eltype(smpls) <: Vector{<:Real}
@test smpls isa Vector #{Vector{Float64}}
@test length( smpls ) == 10
##
# check local product
localProduct(fg, :x0)
solveTree!(fg)
##
saveDFG("/tmp/fg_test_flux", fg)
fg_ = loadDFG("/tmp/fg_test_flux")
ff1 = getFactorType(fg_, :x0f1)
ff1.Z.shuffle[] = true
solveTree!(fg_);
# remove the testing file
Base.rm("/tmp/fg_test_flux.tar.gz")
##
end
@testset "FluxModelsDistribution as Mixture with relative factor" begin
##
mdls = [Chain(Dense(10,50, relu),Dense(50,20),softmax, Dense(20,1, tanh)) for i in 1:50];
fxd = FluxModelsDistribution((10,),(1,),mdls,rand(10), false, false)
fg = initfg()
addVariable!(fg, :x0, ContinuousEuclid{1})
addVariable!(fg, :x1, ContinuousEuclid{1})
# a prior
pr = Prior(Normal())
addFactor!(fg, [:x0;], pr)
# a relative mixture network
mfx = Mixture(LinearRelative, (naive=Normal(10, 10), nn=fxd), [0.5;0.5])
addFactor!(fg, [:x0;:x1], mfx)
# and test overall serialization before solving
saveDFG("/tmp/fg_test_flux", fg)
# solve existing fg
solveTree!(fg)
# prior should pin x0 pretty well
pts_ = getBelief(fg, :x0) |> getPoints
@cast pts[i,j] := pts_[j][i]
@test 80 < sum(-3 .< (pts) .< 3)
# at least some points should land according to the naive model
pts_ = getBelief(fg, :x1) |> getPoints
@cast pts[i,j] := pts_[j][i]
@test 5 < sum(5 .< (pts) .< 15)
# will predict from existing fg
f1 = getFactorType(fg, :x0x1f1)
predictions = map(f->f(f1.components.nn.data), f1.components.nn.models)
# unpack into new fg_
fg_ = loadDFG("/tmp/fg_test_flux")
# same predictions with deserialized object
f1_ = getFactorType(fg_, :x0x1f1)
predictions_ = map(f->f(f1_.components.nn.data), f1_.components.nn.models)
# check that all predictions line up
@show norm(predictions - predictions_)
@test norm(predictions - predictions_) < 1e-6
f1_.components.nn.shuffle[] = true
# test solving of the new object
solveTree!(fg_);
# prior should pin x0 pretty well
pts_ = getBelief(fg_, :x0) |> getPoints
@cast pts[i,j] := pts_[j][i]
@test 80 < sum(-3 .< (pts) .< 3)
# at least some points should land according to the naive model
pts_ = getBelief(fg_, :x1) |> getPoints
@cast pts[i,j] := pts_[j][i]
@test 5 < sum(5 .< (pts) .< 15)
# remove the testing file
Base.rm("/tmp/fg_test_flux.tar.gz")
##
end
@testset "MixtureFluxModels testing" begin
##
# some made up data
data = randn(10)
# Flux models
models = [Flux.Chain(softmax, Dense(10,5,σ), Dense(5,1, tanh)) for i in 1:20]
# mixture with user defined names (optional) -- could also just pass Vector or Tuple of components
mix = MixtureFluxModels(PriorCircular, models, (10,), data, (1,),
(naiveNorm=Normal(),naiveUnif=Uniform()),
[0.7; 0.2; 0.1],
shuffle=false )
#
# test by add to simple graph
fg = initfg()
addVariable!(fg, :testmix, Circular)
addFactor!(fg, [:testmix;], mix)
pts = approxConv(fg, :testmixf1, :testmix);
# look at proposal distribution from the only factor on :testmix
_,pts,__, = localProduct(fg, :testmix);
saveDFG("/tmp/fg_mfx", fg)
#
fg_ = loadDFG("/tmp/fg_mfx")
Base.rm("/tmp/fg_mfx.tar.gz")
solveTree!(fg_);
@test 10 < (getBelief(fg_, :testmix) |> getPoints |> length)
##
end
# | IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 3595 |
using IncrementalInference
using Test
##
@testset "test _evalFactorTemporary" begin
## test utility to build a temporary graph
fct = EuclidDistance(Normal(10,1))
varTypes = (ContinuousScalar,ContinuousScalar);
varPts = ([0;],[9.5;])
##
dfg, _dfgfct = IIF._buildGraphByFactorAndTypes!(fct, varTypes, varPts)
@test length(intersect(ls(dfg), [:x1; :x2])) == 2
@test lsf(dfg) == [:x1x2f1;]
## test the evaluation of factor without
B = IIF._evalFactorTemporary!(EuclidDistance(Normal(10,1)), varTypes, 2, [[10;]], varPts );
@test B isa Vector{Vector{Float64}}
@test isapprox( B[1], [10.0;], atol=1e-6)
##
end
@testset "test residual slack prerequisite for numerical factor gradients, Euclidean(1)" begin
##
fct = EuclidDistance(Normal(10,1))
measurement = [[10;]]
varTypes = (ContinuousScalar,ContinuousScalar)
pts = ([0;],[9.5;])
##
slack_resid = calcFactorResidualTemporary(fct, varTypes, measurement[1], pts)
##
coord_1 = IIF._evalFactorTemporary!(fct, varTypes, 1, measurement, pts, _slack=slack_resid )
@test length(coord_1) == 1
@test isapprox( coord_1[1], [0.0], atol=1e-6)
coord_2 = IIF._evalFactorTemporary!(fct, varTypes, 2, measurement, pts, _slack=slack_resid )
@test length(coord_2) == 1
@test isapprox( coord_2[1], [9.5], atol=1e-6)
##
coord_1 = IIF._evalFactorTemporary!(fct, varTypes, 1, measurement, pts )
@test length(coord_1) == 1
@test isapprox( coord_1[1], [-0.5], atol=1e-6)
coord_2 = IIF._evalFactorTemporary!(fct, varTypes, 2, measurement, pts )
@test length(coord_2) == 1
@test isapprox( coord_2[1], [10.0], atol=1e-6)
##
end
@testset "test residual slack prerequisite for numerical factor gradients, Euclidean(2)" begin
##
fct = LinearRelative(MvNormal([10;0],[1 0; 0 1]))
measurement = [[10.0;0.0]]
varTypes = (ContinuousEuclid{2},ContinuousEuclid{2})
pts = ([0;0.0] ,[9.5;0])
## test the building of factor graph to be correct
_fg,_ = IIF._buildGraphByFactorAndTypes!(fct, varTypes, pts);
@test length(getVal(_fg[:x1])) == 1
@test length(getVal(_fg[:x1])[1]) == 2
@test length(getVal(_fg[:x2])) == 1
@test length(getVal(_fg[:x2])[1]) == 2
##
_fg = initfg()
slack_resid = calcFactorResidualTemporary(fct, varTypes, measurement[1], pts, tfg=_fg)
@test length(getVal(_fg[:x1])) == 1
@test length(getVal(_fg[:x1])[1]) == 2
@test length(getVal(_fg[:x2])) == 1
@test length(getVal(_fg[:x2])[1]) == 2
## Manually provide a common temp graph and force no factor and same variables via keywords
tfg,_ = IIF._buildGraphByFactorAndTypes!(fct, varTypes, pts);
coord_1 = IIF._evalFactorTemporary!(fct, varTypes, 1, measurement, pts, _slack=slack_resid, tfg=tfg, newFactor=false, currNumber=0 )
##
@test length(coord_1) == 1
@test length(coord_1[1]) == 2
@test isapprox( coord_1[1], [0;0.0], atol=1e-6)
coord_2 = IIF._evalFactorTemporary!(fct, varTypes, 2, measurement, pts, _slack=slack_resid )
@test length(coord_2) == 1
@test length(coord_2[1]) == 2
@test isapprox( coord_2[1], [9.5; 0], atol=1e-6)
## Repeat the same test but allow _evalFactorTemporary to self construct internal temporary graph
coord_1 = IIF._evalFactorTemporary!(fct, varTypes, 1, measurement, pts )
##
@test length(coord_1) == 1
@test length(coord_1[1]) == 2
@test isapprox( coord_1[1], [-0.5;0], atol=1e-6)
coord_2 = IIF._evalFactorTemporary!(fct, varTypes, 2, measurement, pts )
@test length(coord_2) == 1
@test length(coord_2[1]) == 2
@test isapprox( coord_2[1], [10.0;0], atol=1e-6)
##
end
@testset "Enable SolverParams.attemptGradients" begin
##
fg = generateGraph_LineStep(4;
solverParams = SolverParams(;
attemptGradients=true
)
)
##
end
# | IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.