licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 1168 | using IncrementalInference
using Test
##
@testset "Only do UPWARD_COMMON message likelihoods if hasPriors" begin
##
fg = generateGraph_LineStep(4;
poseEvery=1,
landmarkEvery=5,
posePriorsAt=[],
landmarkPriorsAt=[],
sightDistance=5,
solverParams=SolverParams(algorithms=[:default, :parametric]))
deleteFactor!.(fg, [Symbol("x$(i)lm0f1") for i=1:3])
#force wrong init
prpo = Prior(Normal(5, 0.01))
addFactor!(fg, [:x0], prpo)
initAll!(fg)
deleteFactor!(fg, :x0f1)
# now the correct prior
prpo = Prior(Normal(0, 0.01))
addFactor!(fg, [:x0], prpo)
fg.solverParams.useMsgLikelihoods = true
smtasks = Task[]
tree = solveTree!(fg; smtasks=smtasks, verbose=true, timeout=30);
@warn("hasPriors test needs multiple solves")
tree = solveTree!(fg);
tree = solveTree!(fg);
# tree = solveTree!(fg; smtasks, verbose=true, timeout=20, recordcliqs=ls(fg));
for i = 0:4
ppe = getPPE(getVariable(fg, Symbol("x$i"))).suggested[1]
@show i ppe
@test isapprox(ppe, i; atol=0.7)
end
##
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 998 |
using Test
using Interpolations
using IncrementalInference
using Distributions
using TensorCast
using Distributions
##
@testset "Test HeatmapGridDensity" begin
##
x = -10:0.1:10;
y = -10:0.1:10;
img = zeros(length(x),length(y))
# lambda dispatch in this scope was REALLY slow
mv = MvNormal(Diagonal([1.0;1.0]))
g(x,y) = pdf(mv,[x;y])
for (i,x_) in enumerate(x), (j,y_) in enumerate(y)
img[i,j] = g(x_,y_)
end
##
println("build a HeatmapGridDensity")
hgd = IIF.HeatmapGridDensity(img, (x,y), nothing, 0.07; N=1000)
@show hgd
println("test packing converters")
# check conversions to packed types
phgd = convert(PackedSamplableBelief, hgd)
hgd_ = convert(SamplableBelief, phgd)
@test isapprox( hgd, hgd_ )
# use in graph
## Check that sampling of the HMD is correct
pts_ = sample(hgd,1000)[1]
@cast pts_x[i] := pts_[i][1]
@cast pts_y[i] := pts_[i][2]
f = fit(MvNormal, hcat(pts_x,pts_y)')
@test isapprox([0;0], f.μ; atol=0.15)
@test isapprox([1 0; 0 1], f.Σ.mat; atol=0.4)
##
end | IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 2145 | using Test
using IncrementalInference
@testset "Testing getCliqVarInitOrderUp" begin
fg = generateGraph_LineStep(3;
poseEvery=1,
landmarkEvery=5,
posePriorsAt=[0],
landmarkPriorsAt=[0,2],
sightDistance=3)
fg.solverParams.useMsgLikelihoods = true
# addVariable!(subfg, :x0, Con)
cliqInitOrder = getCliqVarInitOrderUp(fg)
#TODO variable order can change, but should be more stable using OrderedDict. Testing for both.
# maybe remove cliqInitOrder == [:x0, :lm0, :x3, :x2, :x1] test in v0.31
@test cliqInitOrder == [:x0, :lm0, :x3, :x2, :x1] || cliqInitOrder == [:x0, :lm0, :x3, :x1, :x2]
solveTree!(fg)
# construct a message on :x3
msg = LikelihoodMessage(status=IIF.UPSOLVED)
seps = [:x3]
for vid in seps
var = DFG.getVariable(fg, vid)
if isInitialized(var)
msg.belief[var.label] = TreeBelief(var)
end
end
addMsgFactors!(fg, msg, IIF.UpwardPass)
# less sensitive to order given msgjoint message updates
@test intersect(getCliqVarInitOrderUp(fg), [:x3, :x0, :lm0, :x2, :x1]) |> length == 5
# construct a message on :lm0,:x2
msg = LikelihoodMessage(status=IIF.UPSOLVED, hasPriors=false)
seps = [:lm0, :x2]
for vid in seps
var = DFG.getVariable(fg, vid)
if isInitialized(var)
msg.belief[var.label] = TreeBelief(var)
end
end
addMsgFactors!(fg, msg, IIF.UpwardPass)
@warn("disabled getCliqVarInitOrderUp test until #1010 is closed")
# @test getCliqVarInitOrderUp(fg) == [:x3, :x0, :lm0, :x1, :x2]
# test order with mixture prior #998
cv = 3.0
doorPrior = Mixture(Prior,
[Normal(-100,cv);Normal(0,cv);Normal(100,cv);Normal(300,cv)],
[1/4;1/4;1/4;1/4] )
fg = initfg()
fg.solverParams.graphinit = false
addVariable!(fg, :x0, ContinuousScalar)
addFactor!(fg, [:x0], doorPrior)
addVariable!(fg, :x1, ContinuousScalar)
addFactor!(fg,[:x0; :x1], LinearRelative( Normal(0.0,1.0)))
@test lsfPriors(fg) == [:x0f1]
@test getCliqVarInitOrderUp(fg) == [:x0, :x1]
end | IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 4790 | # test case to enforce consistency in joint gibbs
using Test
using IncrementalInference
##
@testset "test disjoint up clique message joint subgraph" begin
## test case with disjoint clique joint subgraph
fg = initfg()
addVariable!(fg, :x0, ContinuousEuclid{2})
addVariable!(fg, :x1, ContinuousEuclid{2})
addVariable!(fg, :x2, ContinuousEuclid{2})
initVariable!(fg, :x0, [randn(2) for _ in 1:100])
initVariable!(fg, :x1, [randn(2) .+ 10 for _ in 1:100])
initVariable!(fg, :x2, [randn(2) .+ 20 for _ in 1:100])
addFactor!(fg , [:x0; :x1], LinearRelative(MvNormal([10.0;10], diagm([1.0;1]))))
addFactor!(fg , [:x1; :x2], LinearRelative(MvNormal([10.0;10], diagm([1.0;1]))))
addVariable!(fg, :x3, ContinuousEuclid{2})
addFactor!( fg, [:x2; :x3], EuclidDistance(Normal(10, 1)) )
addFactor!( fg, [:x0; :x3], EuclidDistance(Normal(30, 1)), graphinit=false )
##
initAll!(fg)
# drawGraph(fg, show=true)
## test shortest path
# first path
tp1_ = [ :x0;:x0x1f1;:x1;:x1x2f1;:x2]
tp2_ = [ :x0;:x0x3f1;:x3;:x2x3f1;:x2]
pth = findShortestPathDijkstra(fg, :x0, :x2)
@test pth == tp1_ || pth == tp2_
pth = findShortestPathDijkstra(fg, :x0, :x2, typeFactors=[LinearRelative;])
@test pth == tp1_
# different path
pth = findShortestPathDijkstra(fg, :x0, :x2, typeFactors=[EuclidDistance;])
@test pth == tp2_
##
isHom, typeName = isPathFactorsHomogeneous(fg, :x0, :x2)
@test isHom
@test length(typeName) == 1
@test typeName[1].name == :LinearRelative
## use a specific solve order
vo = [:x3; :x1; :x2; :x0] # getEliminationOrder(fg)
tree = buildTreeReset!(fg, vo)
##
# drawTree(tree, show=true)
## Child clique subgraph
cliq2 = getClique(tree,:x3)
# drawGraph(cfg2, show=true)
##
cfg2 = buildCliqSubgraph(fg, cliq2)
jointmsg = IIF._generateMsgJointRelativesPriors(cfg2, :default, cliq2)
@info "update these jointmsg test after #1010"
@test intersect( keys(jointmsg.priors) , [:x0; :x2] ) |> length == 2
@test length(jointmsg.relatives) == 0
##
# retlist = addLikelihoodsDifferentialCHILD!([:x2; :x0], cfg2)
##
getSolverParams(fg).useMsgLikelihoods = true
tree = solveTree!(fg, eliminationOrder=vo);
## get up message from child clique
msgBuf = IIF.getMessageBuffer(getClique(tree, :x3))
msg = msgBuf.upTx
# only priors for these mixed factors, before TAF
@test msg.jointmsg.priors |> length == 2
@test msg.jointmsg.relatives |> length == 0
##
end
@testset "test homogeneous up clique message joint subgraph" begin
## test case with disjoint clique joint subgraph
fg = initfg()
addVariable!(fg, :x0, ContinuousEuclid{2})
addVariable!(fg, :x1, ContinuousEuclid{2})
addVariable!(fg, :x2, ContinuousEuclid{2})
initVariable!(fg, :x0, [randn(2) for _ in 1:100])
initVariable!(fg, :x1, [randn(2) .+ 10 for _ in 1:100])
initVariable!(fg, :x2, [randn(2) .+ 20 for _ in 1:100])
addFactor!(fg , [:x0; :x1], LinearRelative(MvNormal([10.0;10], diagm([1.0;1]))))
addFactor!(fg , [:x1; :x2], LinearRelative(MvNormal([10.0;10], diagm([1.0;1]))))
addVariable!(fg, :x3, ContinuousEuclid{2})
addFactor!( fg, [:x2; :x3], LinearRelative(MvNormal([10.0;10], diagm([1.0;1]))) )
addFactor!( fg, [:x0; :x3], LinearRelative(MvNormal([10.0;10], diagm([1.0;1]))) )
##
initAll!(fg)
# drawGraph(fg, show=true)
## test shortest path
# first path
tp1_ = [ :x0;:x0x1f1;:x1;:x1x2f1;:x2]
tp2_ = [ :x0;:x0x3f1;:x3;:x2x3f1;:x2]
pth = findShortestPathDijkstra(fg, :x0, :x2)
@test pth == tp1_ || pth == tp2_
pth = findShortestPathDijkstra(fg, :x0, :x2, typeFactors=[LinearRelative;])
@test pth == tp1_ || pth == tp2_
# different path
pth = findShortestPathDijkstra(fg, :x0, :x2, typeFactors=[EuclidDistance;])
@test pth == Symbol[]
##
vo = [:x3; :x1; :x2; :x0] # getEliminationOrder(fg)
tree = buildTreeReset!(fg, vo)
cliq2 = getClique(tree,:x3)
cfg2 = buildCliqSubgraph(fg, cliq2)
# drawGraph(cfg2, show=true)
##
separators = getCliqSeparatorVarIds(cliq2)
jointrelatives = addLikelihoodsDifferentialCHILD!( cfg2, separators )
allClasses = IIF._findSubgraphsFactorType( cfg2, jointrelatives, separators )
@test length(allClasses) == 1
@test intersect(allClasses[1], separators) |> length == 2
##
jointmsg = IIF._generateMsgJointRelativesPriors(cfg2, :default, cliq2)
##
@info "update these jointmsg test after #1010"
@test intersect( jointmsg.relatives[1].variables , [:x0; :x2] ) |> length == 2
@test length(keys(jointmsg.priors)) == 0
## similar during solving
getSolverParams(fg).useMsgLikelihoods = true
tree = solveTree!(fg, eliminationOrder=vo);
## get up message from child clique
msgBuf = IIF.getMessageBuffer(getClique(tree, :x3))
msg = msgBuf.upTx
## only priors for these mixed factors, before TAF
@test intersect( msg.jointmsg.relatives[1].variables , [:x0; :x2] ) |> length == 2
@test length(keys(msg.jointmsg.priors)) == 0
##
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 2190 | # another tree test
using IncrementalInference
using Test
@testset "Variable ordering Bayes tree member check." begin
fg = generateGraph_Kaess()
# Choose specific variable ordering and perform check.
vo = [:l1, :l2, :x1, :x2, :x3]
tree = buildTreeReset!(fg, vo)
@test vo == tree.eliminationOrder
@test vo == getEliminationOrder(tree)
end
@testset "Test Caesar Ring 1D symbolic tree construction" begin
fg = generateGraph_CaesarRing1D()
# drawGraph(fg, show=true)
eo = [:x0;:x2;:x4;:x6;:x1;:l1;:x5;:x3;]
tree = buildTreeReset!(fg,eo)
# drawTree(tree, show=true)
@test length(tree.cliques) == 6
C0 = getClique(tree, :x3)
@test intersect( C0 |> getFrontals, [:x3; :x5; :l1]) |> length == 3
@test C0 |> getCliqSeparatorVarIds |> length == 0
cC0 = getChildren(tree, C0)
@test cC0 |> length == 3
C1 = getClique(tree, :x1)
@test C1 in cC0
@test C1 |> getFrontals == [:x1;]
@test intersect(C1 |> getCliqSeparatorVarIds, [:x3;:l1]) |> length == 2
cC1 = getChildren(tree, C1)
@test cC1 |> length == 2
C4 = getClique(tree, :x2)
@test C4 in cC1
@test C4 |> getFrontals == [:x2;]
@test intersect(C4 |> getCliqSeparatorVarIds, [:x3;:x1]) |> length == 2
C5 = getClique(tree, :x0)
@test C5 in cC1
@test C5 |> getFrontals == [:x0;]
@test intersect(C5 |> getCliqSeparatorVarIds, [:l1;:x1]) |> length == 2
C2 = getClique(tree, :x6)
@test C2 in cC0
@test C2 |> getFrontals == [:x6;]
@test intersect(C2 |> getCliqSeparatorVarIds, [:l1;:x5]) |> length == 2
C3 = getClique(tree, :x4)
@test C3 in cC0
@test C3 |> getFrontals == [:x4;]
@test intersect(C3 |> getCliqSeparatorVarIds, [:x3;:x5]) |> length == 2
end
@testset "Test tree formation and consistent APIs" begin
fg = generateGraph_TestSymbolic()
#writeGraphPdf(fg, show=true)
eo = [:x1; :l3; :l1; :x5; :x2; :l2; :x4; :x3]
tree = buildTreeReset!(fg,eo)
# drawTree(tree, show=true)
@warn "TODO, complete further testing on tree formation"
## test variable order APIs consistent, see issue 499
vo = getEliminationOrder(fg)
tree1 = buildTreeReset!(fg, vo)
# drawTree(tree1, show=true)
tree2 = buildTreeReset!(fg)
# drawTree(tree2, show=true)
@test getEliminationOrder(tree1) == getEliminationOrder(tree1)
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 501 | using Test
using IncrementalInference
@testset "test Manual Init - distribution" begin
fg = initfg()
addVariable!(fg, :x0, ContinuousScalar)
belief = Normal(1.,0.1)
initVariable!(fg, :x0, belief)
pts = getPoints(fg, :x0)
M = getManifold(fg, :x0)
@test isapprox(mean(M, pts),[1],atol=0.1)
@test isapprox(std(M, pts),0.1,atol=0.1)
# test var api
v = getVariable(fg, :x0)
initVariable!(v, belief)
pts = getVal(v)
@test isapprox(mean(M, pts),[1],atol=0.1)
@test isapprox(std(M, pts),0.1,atol=0.1)
end | IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 4347 | ##
using IncrementalInference
using Test
using TensorCast
# using Statistics
# using Manifolds # should be done within regular exports
# using Pkg
# Pkg.resolve()
##
@testset "test Mixture sampling" begin
##
fg = initfg()
addVariable!(fg, :x0, ContinuousScalar)
mp = Mixture(Prior, (Normal(), Normal(10,1)),(1/2,1/2) )
addFactor!(fg, [:x0], mp)
##
pts_ = approxConv(fg, :x0f1, :x0)
@cast pts[i,j] := pts_[j][i]
N = size(pts,2)
@test 0.2*N < sum( -5 .< pts .< 5 )
@test 0.2*N < sum( 5 .< pts .< 15 )
@test sum( 15 .< pts ) < 0.1*N
@test sum( pts .< -5 ) < 0.1*N
@test sum( 3 .< pts .< 7 ) < 0.1*N
# using KernelDensityEstimatePlotting, Gadfly
# Gadfly.set_default_plot_size(25cm,20cm)
# plotKDE(kde!(pts))
##
fg = initfg()
addVariable!(fg, :x0, ContinuousScalar)
addVariable!(fg, :x1, ContinuousScalar)
addFactor!(fg, [:x0], Prior(Normal()), graphinit=false)
initVariable!(fg, :x0, [zeros(1) for _ in 1:100])
mlr = Mixture(LinearRelative, (Normal(), Normal(10,1)),(1/2,1/2) )
addFactor!(fg, [:x0;:x1], mlr, graphinit=false)
##
pts_ = approxConv(fg, :x0x1f1, :x1)
@cast pts[i,j] := pts_[j][i]
# plotKDE(kde!(pts))
##
N = size(pts,2)
@test 0.2*N < sum( -5 .< pts .< 5 )
@test 0.2*N < sum( 5 .< pts .< 15 )
@test sum( 15 .< pts ) < 0.1*N
@test sum( pts .< -5 ) < 0.1*N
@test sum( 3 .< pts .< 7 ) < 0.1*N
##
end
@testset "test packing of Mixture" begin
##
fg = initfg()
addVariable!(fg, :x0, ContinuousScalar)
addVariable!(fg, :x1, ContinuousScalar)
mp = Mixture(Prior, [Normal(); Normal(10,1)], [0.5;0.5])
f0 = addFactor!(fg, [:x0;], mp)
mr = Mixture(LinearRelative, (fancy=manikde!(ContinuousEuclid(1), [randn(1) for _ in 1:75]), naive=Normal(0,10)), [0.4;0.6])
f1 = addFactor!(fg, [:x0;:x1], mr)
##
pf0 = DFG.packFactor(f0)
pf1 = DFG.packFactor(f1)
# now test unpacking
fg_ = initfg();
addVariable!(fg_, :x0, ContinuousScalar)
addVariable!(fg_, :x1, ContinuousScalar)
##
f0_ = DFG.unpackFactor(fg_, pf0)
f1_ = DFG.unpackFactor(fg_, pf1)
##
# ENV["JULIA_DEBUG"] = "DistributedFactorGraphs"
@warn("Skipping pack/unpack compareFactor test for `timezone` and `zone`")
@show typeof(f1)
@show typeof(f1_)
@show typeof(getSolverData(f1).fnc.varValsAll[]);
@show typeof(getSolverData(f1_).fnc.varValsAll[]);
@test DFG.compareFactor(f1, f1_, skip=[:components;:labels;:timezone;:zone;:vartypes;:fullvariables;:particleidx;:varidx])
@test IIF._getCCW(f1).usrfnc!.components.naive == IIF._getCCW(f1).usrfnc!.components.naive
# already ManifoldKernelDensity
A = IIF._getCCW(f1).usrfnc!.components.fancy
B = IIF._getCCW(f1_).usrfnc!.components.fancy
# A = ManifoldBelief(Euclid, IIF._getCCW(f1).usrfnc!.components.fancy )
# B = ManifoldBelief(Euclid, IIF._getCCW(f1_).usrfnc!.components.fancy )
@test mmd(A,B) < 1e-6
##
end
@testset "test simple Mixture" begin
##
fg = initfg()
addVariable!(fg, :x0, ContinuousScalar)
addVariable!(fg, :x1, ContinuousScalar)
addFactor!(fg, [:x0], Prior(Normal(0.0,0.1)))
##
# require default ::UnitScaling constructor from all factors
mlr = Mixture(LinearRelative(I), [Normal(-1.0, 0.1), Normal(1.0, 0.1)], Categorical([0.5; 0.5]))
# test serialization while we are here
pmlr = convert(PackedMixture, mlr)
umlr = convert(Mixture, pmlr)
@test mlr.mechanics == umlr.mechanics
@test mlr.components == umlr.components
@test mlr.diversity == umlr.diversity
mlr = Mixture(LinearRelative, [Normal(-1.0, 0.1), Normal(1.0, 0.1)], Categorical([0.5; 0.5]))
addFactor!(fg, [:x0,:x1], mlr)
# To look at your factor graph
# if false
# using GraphPlot
# using DistributedFactorGraphs
# plotDFG(fg)
# end
tree = solveTree!(fg)
##
btd = getBelief(getVariable(fg, :x0))
@test isapprox(mean(getKDEfit(btd,distribution=Normal)), 0.0; atol=0.1)
@test isapprox(std(getKDEfit(btd,distribution=Normal)), 0.1; atol=0.05)
btd = getBelief(getVariable(fg, :x1))
pts_ = getPoints(btd)
@cast pts[i,j] := pts_[j][i]
pts_p = pts[pts .>= 0]
pts_n = pts[pts .< 0]
nfit_p = fit(Normal, pts_p)
@test isapprox(mean(nfit_p), 1.0; atol=0.1)
@test isapprox(std(nfit_p), 0.14; atol=0.05) #TODO confirm the correct value and tolerance
nfit_n = fit(Normal, pts_n)
@test isapprox(mean(nfit_n), -1.0; atol=0.1)
@test isapprox(std(nfit_n), 0.14; atol=0.05) #TODO confirm the correct value and tolerance
# To look at your results
# if false
# using RoMEPlotting
# plotKDE(fg, ls(fg))
# end
##
end
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 6223 | using IncrementalInference
using IncrementalInference.Optim
using Test
##
@testset "test parametric mixture" begin
## Test simple mixture prior
fg = LocalDFG(;solverParams=SolverParams(algorithms=[:default, :parametric]))
addVariable!(fg, :x0, ContinuousScalar)
p = Mixture(Prior(I), [Normal(0.8, 0.4), Normal(1.0, 0.1)], Categorical([0.1; 0.9]))
f = addFactor!(fg, [:x0], p)
addVariable!(fg, :x1, ContinuousScalar)
addFactor!(fg, [:x0,:x1], LinearRelative(Normal(1.0,0.1)))
options = Optim.Options(time_limit = 100,
iterations = 10000,
show_trace = true,
show_every = 10,
allow_f_increases=true,
g_tol = 1e-6,
)
algorithm = Optim.NelderMead
vardict, result, varIds, Σ = IIF.solveGraphParametric(fg; options, algorithm)
@test isapprox(vardict[:x0].val[1], 1, atol = 0.05)
@test isapprox(vardict[:x0].cov[1], 0.01, atol = 0.001)
##
fg = LocalDFG(;solverParams=SolverParams(algorithms=[:default, :parametric]))
addVariable!(fg, :x0, ContinuousScalar)
addVariable!(fg, :x1, ContinuousScalar)
addFactor!(fg, [:x0], Prior(Normal(0.0,0.1)))
mlr = Mixture(LinearRelative(I), [Normal(-1.0, 0.2), Normal(1.0, 0.1)], Categorical([0.5; 0.5]))
addFactor!(fg, [:x0,:x1], mlr)
addVariable!(fg, :l1, ContinuousScalar)
p = Mixture(Prior(I), [Normal(-1.5, 0.1), Normal(0.9, 0.2)], Categorical([0.5; 0.5]))
# p = Prior(Normal(0.9, 0.1))
addFactor!(fg, [:l1], p)
addFactor!(fg, [:x1,:l1], LinearRelative(Normal(0.0,0.1)))
vardict, result, varIds, Σ = IIF.solveGraphParametric(fg)
@test isapprox(vardict[:x0].val[1], 0, atol = 0.1)
@test isapprox(vardict[:x1].val[1], 1, atol = 0.1)
@test isapprox(vardict[:l1].val[1], 1, atol = 0.1)
## ContinuousEuclid(2) prior
fg = LocalDFG(;solverParams=SolverParams(algorithms=[:default, :parametric]))
addVariable!(fg, :x0, ContinuousEuclid(2))
p = Mixture(Prior(MvNormal(2,1.0)), [MvNormal([0.8, 0.5], [0.4, 0.4]), MvNormal([1.0, 0.5], [0.1, 0.1])], Categorical([0.1; 0.9]))
f = addFactor!(fg, [:x0], p)
vardict, result, varIds, Σ = IIF.solveGraphParametric(fg)
vardict
@test isapprox(vardict[:x0].val[1], 1.0, atol = 0.01)
@test isapprox(vardict[:x0].val[2], 0.5, atol = 0.01)
@test isapprox(vardict[:x0].cov[1,1], 0.01, atol = 0.001)
@test isapprox(vardict[:x0].cov[2,2], 0.01, atol = 0.001)
# x = collect(0.5:0.01:1.5)
# y = collect(0.0:0.01:1.0)
# cfm = IIF.CalcFactorMahalanobis(f)
# xy = Vector{Float64}[]
# x = 0.5:0.01:1.5
# y = 0:0.01:1
# for x=x,y=y
# push!(xy, [x,y])
# end
# r = cfm.(xy)
# surface(x,y,r)
##
end
if false
fg = LocalDFG(;solverParams=SolverParams(algorithms=[:default, :parametric]))
addVariable!(fg, :x0, ContinuousScalar)
addVariable!(fg, :x1, ContinuousScalar)
addFactor!(fg, [:x0], Prior(Normal(0.0,0.1)))
mlr = Mixture(LinearRelative(I), [Normal(-1.0, 0.2), Normal(1.0, 0.1)], Categorical([0.5; 0.5]))
# mlr = Mixture(LinearRelative(I), [Normal(-0.2, 0.05), Normal(0.2, 0.1)], Categorical([0.5; 0.5]))
# mlr = Mixture(LinearRelative(I), [Normal(0.0, 0.9), Normal(0.0, 0.1)], Categorical([0.5; 0.5]))
# mlr = Mixture(LinearRelative(I), [Normal(-0.2, 0.1), Normal(0.2, 0.1)], Categorical([0.9; 0.1]))
# mlr = Mixture(LinearRelative(I), [Normal(-0.2, 0.15), Normal(0.2, 0.1)], Categorical([0.5; 0.5]))
# mlr = Mixture(LinearRelative(I), [Normal(-1.0, 0.2), Normal(1.0, 0.1)], Categorical([0.5; 0.5]))
addFactor!(fg, [:x0,:x1], mlr)
f = fg[:x0x1f1]
cfm = IIF.CalcFactorMahalanobis(f)
x1 = collect(-2:0.01:2)
# x1 = reverse(collect(-2:0.01:2))
# x1 = collect(-0.5:0.01:0.5)
r1 = cfm.(Ref([0.0]), x1)
# plot!(x1,r)
plot(x1,r1)
##
addVariable!(fg, :l1, ContinuousScalar)
p = Mixture(Prior(I), [Normal(-1.5, 0.1), Normal(0.9, 0.2)], Categorical([0.5; 0.5]))
# p = Prior(Normal(0.9, 0.1))
addFactor!(fg, [:l1], p)
addFactor!(fg, [:x1,:l1], LinearRelative(Normal(0.0,0.1)))
f = fg[:l1f1]
cfm = IIF.CalcFactorMahalanobis(f)
x1 = collect(-2:0.01:2)
r2 = cfm.(x1)
plot!(x1,r2)
# plot!(x1,r1+r2)
##
initAll!(fg)
solveTree!(fg)
plotKDE(fg, :x1)
plotKDE(fg, :l1)
##
using IncrementalInference.Optim
options = Optim.Options(time_limit = 100,
iterations = 1000,
show_trace = true,
show_every = 1,
allow_f_increases=true,
g_tol = 1e-6,
)
algorithm = Optim.NelderMead
algorithm = Optim.BFGS
algorithmkwargs=(linesearch = Optim.LineSearches.HagerZhang(),)
# algorithmkwargs=(linesearch = Optim.LineSearches.Static(),)
vardict, result, varIds, Σ = IIF.solveGraphParametric(fg; algorithm, options, algorithmkwargs)
vardict
##
IIF.updateParametricSolution!(fg, vardict)
##
end
##
if false
using RoME
fg = LocalDFG(;solverParams=SolverParams(algorithms=[:default, :parametric]))
pr_noise = [0.01, 0.01, 0.001]
od_noise_1 = [0.5; 0.5; 0.2]
od_noise_2 = [0.05; 0.05; 0.02]
#x0 prior
addVariable!(fg, :x0, Pose2)
prpo = MvNormal([0,0,-pi], pr_noise)
addFactor!(fg, [:x0], PriorPose2(prpo))
# addFactor!(fg, [:x0], PriorPose2(MvNormal(rand(prpo), pr_noise)))
#x0 to x1
addVariable!(fg, :x1, Pose2)
pp1 = MvNormal([1.0,0,0], od_noise_1)
pp2 = MvNormal([1.0,0,0], od_noise_2)
mpp = Mixture(Pose2Pose2(I), [pp1, pp2], Categorical([0.5; 0.5]))
f = addFactor!(fg, [:x0,:x1], mpp)
cfm = IIF.CalcFactorMahalanobis(f)
##
xyθ = Vector{Float64}[]
for x = 0.5:0.01:1.5, y = 0, θ = 0
push!(xyθ, [x,y,θ])
end
x = 0.5:0.01:1.5
r = cfm.(Ref([0.0; 0.0; 0.0]), xyθ)
plot(x,r)
##
#x1 to x2
# addVariable!(fg, :x2, Pose2)
# pp1 = MvNormal([1.0,0,0], od_noise_1)
# pp2 = MvNormal([1.0,0,0], od_noise_2)
# mpp = Mixture(Pose2Pose2(I), [pp1, pp2], Categorical([0.5; 0.5]))
# addFactor!(fg, [:x1,:x2], mpp)
# vardict, result, varIds, Σ = solveFactorGraphParametric(fg)
# vardict
##
using IncrementalInference.Optim
options = Optim.Options(time_limit = 100,
iterations = 10000,
show_trace = true,
show_every = 1,
allow_f_increases=true,
g_tol = 1e-6,
)
algorithm = Optim.NelderMead
vardict, result, varIds, Σ = IIF.solveGraphParametric(fg; algorithm, autodiff=:finite)
vardict
end
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 2095 | # from #605
# using Revise
using IncrementalInference
using Test
using TensorCast
##
@testset "test mixture prior" begin
##
# init graph
fg = initfg()
N = 100
getSolverParams(fg).N = N;
# add first variable
addVariable!(fg, :x0, ContinuousScalar)
# also test AliasingScalingSampler
v = rand(50)
v[20:29] .+= 5*rand(10)
v ./= sum(v)
bss = AliasingScalarSampler(collect(1:50), v)
# add bi-modal mixture prior
Prior0 = Mixture(Prior,(a=Normal(-5.0,1.0), b=Uniform(0.0,1.0)), (0.5,0.5))
Prior0 = Mixture(Prior,(Normal(-5.0,1.0), Normal(0.0,1.0)), (0.5,0.5))
Prior0 = Mixture(Prior,[Normal(-5.0,1.0), Normal(0.0,1.0)], (0.5,0.5))
Prior0 = Mixture(Prior,(Normal(-5.0,1.0), Normal(0.0,1.0)), [0.5;0.5])
Prior0 = Mixture(Prior,[Normal(-5.0,1.0), Normal(0.0,1.0)], [0.5;0.5])
Prior0 = Mixture(Prior,(Normal(-5.0,1.0), bss), Categorical([0.5;0.5]))
f1 = addFactor!(fg, [:x0], Prior0)
# also test serialization of AliasingScalarSampler
saveDFG("/tmp/test_fg_bss", fg)
# check numerics -- replaced by CalcFactor{<:Mixture},
smpls_ = approxConv(fg, :x0f1, :x0)
# smpls, = getSample(Prior0, N) # ,lb=
# lazy
@cast smpls[i,j] := smpls_[j][i]
# should be a balance of particles
# @test sum(lb .== 1) - sum(lb .== 2) |> abs < 0.3*N
@test sum(smpls .< -2.5) - sum(-2.5 .< smpls) |> abs < 0.35*N
# solve
solveTree!(fg);
marginalPts_ = getBelief(fg, :x0) |> getPoints
# lazy
@cast marginalPts[i,j] := marginalPts_[j][i]
# check solver solution consistent too
@test sum(marginalPts .< -2.5) - sum(-2.5 .< marginalPts) |> abs < 0.35*N
##
end
@testset "Serialization of Mixture(Prior,..) including a AliasingScalarSampler" begin
##
fg_ = loadDFG("/tmp/test_fg_bss")
N = getSolverParams(fg_).N
solveTree!(fg_);
marginalPts_ = getBelief(fg_, :x0) |> getPoints
# lazy
@cast marginalPts[i,j] := marginalPts_[j][i]
# check solver solution consistent too
@test sum(marginalPts .< -2.5) - sum(-2.5 .< marginalPts) |> abs < 0.35*N
# cleanup
Base.rm("/tmp/test_fg_bss.tar.gz")
##
end
# using RoMEPlotting
# Gadfly.set_default_plot_size(35cm,20cm)
# # plot the results
# plotKDE(fg, :x0)
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 5947 | # load requried packages
# using Revise
using IncrementalInference
using Test
## during dev its clear functionality is working with 8/10 quality (Test API makes it difficult to write deterministic only tests for 8/10 quality.)
## parameters
lm_prior_noise = 0.01
meas_noise = 0.25
odom_noise = 0.1
n_samples = 200
# initialize mean landmark locations
l0 = 0.0
l1 = 10.0
l2 = 20.0
l3 = 40.0
# "Ground-truth" robot poses
x0 = 0.0
x1 = 10.0
x2 = 20.0
x3 = 40.0
##
@testset "Basic 3 door, trinary multihypothesis tests..." begin
## Initialize empty factor graph
fg = initfg()
getSolverParams(fg).N = n_samples
getSolverParams(fg).gibbsIters = 5
# forcefully work with ccw.varValsAll to check the pointers are pointing to getVal.(variables)
getSolverParams(fg).graphinit = false
# Place strong prior on locations of three "doors"
addVariable!(fg, :l0, ContinuousScalar, N=n_samples)
addFactor!(fg, [:l0], Prior(Normal(l0, lm_prior_noise)))
addVariable!(fg, :l1, ContinuousScalar, N=n_samples)
addFactor!(fg, [:l1], Prior(Normal(l1, lm_prior_noise)))
addVariable!(fg, :l2, ContinuousScalar, N=n_samples)
addFactor!(fg, [:l2], Prior(Normal(l2, lm_prior_noise)))
addVariable!(fg, :l3, ContinuousScalar, N=n_samples)
addFactor!(fg, [:l3], Prior(Normal(l3, lm_prior_noise)))
# Add first pose
addVariable!(fg, :x0, ContinuousScalar, N=n_samples)
# Make first "door" measurement
f1 = addFactor!(fg, [:x0; :l0; :l1; :l2; :l3], LinearRelative(Normal(0, meas_noise)), multihypo=[1.0; (1/4 for _=1:4)...])
# check pointers before init
a,b = IIF._checkVarValPointers(fg, getLabel(f1))
for i in 1:length(a)
@test a[i] == b[i]
end
# do init (and check that the var pointers did not change)
doautoinit!(fg ,:l0)
doautoinit!(fg ,:l1)
doautoinit!(fg ,:l2)
doautoinit!(fg ,:l3)
# make sure approxConv is as expected
@test isInitialized.(fg, [:l0;:l1;:l2;:l3]) |> all
x0_beforeConv = getVal(fg, :x0) |> deepcopy
# do the computation
X0 = approxConvBelief(fg, getLabel(f1), :x0)
# smpls = sampleFactor(fg, f1.label,10)
# check that the x0 variable memory has not be changed
@test all(norm.(x0_beforeConv - getVal(fg, :x0)) .< 1e-10)
# specifically after approxConv to :x0
a_,b_ = IIF._checkVarValPointers(fg, getLabel(f1))
# deep copy on destination memory for x0, given just did approxConv to x0s
@test a_[1] != b_[1]
@test a_[2] == b_[2]
@test a_[3] == b_[3]
@test a_[4] == b_[4]
@test a_[5] == b_[5]
##
# should have four equal sized peaks at landmark locations
@test 0.1 < X0([l0])[1]
@test 0.1 < X0([l1])[1]
@test 0.1 < X0([l2])[1]
@test 0.1 < X0([l3])[1]
# Add second pose
addVariable!(fg, :x1, ContinuousScalar, N=n_samples)
# Gaussian transition model
addFactor!(fg, [:x0; :x1], LinearRelative(Normal(x1-x0, odom_noise)))
# Make second "door" measurement
addFactor!(fg, [:x1; :l0; :l1; :l2; :l3], LinearRelative(Normal(0, meas_noise)), multihypo=[1.0; (1/4 for _=1:4)...])
##
solveGraph!(fg)
##
# check there is enough likelihood in the right places
@test 0.1 < getBelief(fg, :x0)([l0])[1]
@test 0.1 < getBelief(fg, :x0)([l1])[1]
@test getBelief(fg, :x0)([l2])[1] < 0.3
@test getBelief(fg, :x1)([l0])[1] < 0.3
@test 0.1 < getBelief(fg, :x1)([l1])[1]
@test 0.1 < getBelief(fg, :x1)([l2])[1]
##
for i in 1:2
solveGraph!(fg);
end
##
# check there is enough likelihood in the right places
@test 0.1 < getBelief(fg, :x0)([l0])[1]
@test 0.1 < getBelief(fg, :x0)([l1])[1]
@test getBelief(fg, :x0)([l2])[1] < 0.03
# @test getBelief(fg, :x1)([l0])[1] < 0.03 # why add this?
@test 0.1 < getBelief(fg, :x1)([l1])[1]
@test 0.1 < getBelief(fg, :x1)([l2])[1]
## Add one more pose/odometry to invoke issue #236
# Add third pose
addVariable!(fg, :x2, ContinuousScalar, N=n_samples)
addFactor!(fg, [:x1; :x2], LinearRelative(Normal(x2-x1, odom_noise)))
## Solve graph
tree = solveTree!(fg)
# drawGraph(fg)
# drawTree(tree, show=true)
##
# check there is enough likelihood in the right places
@test 0.05 < getBelief(fg, :x0)([l0])[1]
@test 0.05 < getBelief(fg, :x0)([l1])[1]
@test 0.05 < getBelief(fg, :x1)([l1])[1]
@test 0.05 < getBelief(fg, :x1)([l2])[1]
dx = x2-x1
@test 0.05 < getBelief(fg, :x2)([l1 + dx])[1]
@test 0.05 < getBelief(fg, :x2)([l2 + dx])[1]
if false
@test getBelief(fg, :x0)([l2])[1] < 0.03
@test getBelief(fg, :x1)([l0])[1] < 0.03
@test getBelief(fg, :x2)([l0 + dx])[1] < 0.03
else
@error("Suppressing parts of multihypo tests (stochastic pass or fail results in poor test quality")
end
##
# Add third pose
addVariable!(fg, :x3, ContinuousScalar, N=n_samples)
addFactor!(fg, [:x2; :x3], LinearRelative(Normal(x3-x2, odom_noise)))
# Make third "door" measurement
addFactor!(fg, [:x3; :l0; :l1; :l2; :l3], LinearRelative(Normal(0, meas_noise)), multihypo=[1.0; (1/4 for _=1:4)...])
##
solveGraph!(fg)
##
@error "must restore a few multimodal tests"
if false
@test isapprox(mean(getBelief(fg, :x0))[1], x0; atol = 3.0)
@test isapprox(mean(getBelief(fg, :x1))[1], x1; atol = 3.0)
# @test isapprox(mean(getBelief(fg, :x2))[1], x2; atol = 3.0)
@test isapprox(mean(getBelief(fg, :x3))[1], x3; atol = 3.0)
@test isapprox(mean(getBelief(fg, :l0))[1], l0; atol = 3.0)
@test isapprox(mean(getBelief(fg, :l1))[1], l1; atol = 3.0)
@test isapprox(mean(getBelief(fg, :l2))[1], l2; atol = 3.0)
@test isapprox(mean(getBelief(fg, :l3))[1], l3; atol = 3.0)
##
# check the PPEs are the same
@test isapprox(getPPE(fg, :x0).suggested[1], x0; atol = 3.0)
@test isapprox(getPPE(fg, :x1).suggested[1], x1; atol = 3.0)
@test isapprox(getPPE(fg, :x2).suggested[1], x2; atol = 3.0)
@test isapprox(getPPE(fg, :x3).suggested[1], x3; atol = 3.0)
@test isapprox(getPPE(fg, :l0).suggested[1], l0; atol = 3.0)
@test isapprox(getPPE(fg, :l1).suggested[1], l1; atol = 3.0)
@test isapprox(getPPE(fg, :l2).suggested[1], l2; atol = 3.0)
@test isapprox(getPPE(fg, :l3).suggested[1], l3; atol = 3.0)
end
##
end
# using RoMEPlotting
# Gadfly.set_default_plot_size(35cm,25cm)
#
# plotBelief(fg, sortDFG(ls(fg, r"x")))
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 3733 | using Test
using IncrementalInference
using Random
##
@testset "test basic multihypo" begin
## A simple multihypo example
Random.seed!(42) # The answer to reproducable noise
fg = LocalDFG(solverParams=SolverParams(graphinit=false, gibbsIters=5, spreadNH=5.0))
pRight = 0.99
pWrong = 0.01
pr_noise = 0.01
od_noise = 0.1
lm_noise = 0.01
# true positions
# x0 at 0
# x1 at 1
# l1 at 1
# l2 at 2
#x0 prior
addVariable!(fg, :x0, ContinuousScalar)
prpo = Normal(0.0, pr_noise)
addFactor!(fg, [:x0], Prior(Normal(rand(prpo), pr_noise)))
#l1 and l2
addVariable!(fg, :l1, ContinuousScalar, tags=[:LANDMARK])
addVariable!(fg, :l2, ContinuousScalar, tags=[:LANDMARK])
#x0 to l1 or l2
p2ln = Normal(1.0, lm_noise)
p2p = LinearRelative(Normal(rand(p2ln), lm_noise))
addFactor!(fg, [:x0; :l1; :l2], p2p, multihypo = [1, pRight, pWrong])
# addFactor!(fg, [:x0; :l1], p2p) #this one used for sanity check
#x0 to x1
addVariable!(fg, :x1, ContinuousScalar)
pp = Normal(1.0, od_noise)
addFactor!(fg, [:x0,:x1], LinearRelative(Normal(rand(pp), od_noise)))
#x1 to l1 or l2
p2ln = Normal(0.0, lm_noise)
p2p = LinearRelative(Normal(rand(p2ln), lm_noise))
addFactor!(fg, [:x1; :l1; :l2], p2p, multihypo = [1, pRight, pWrong])
# addFactor!(fg, [:x1; :l1], p2p) #this one used for sanity check
#x1 to l2 or l1
p2ln = Normal(1.0, lm_noise)
p2p = LinearRelative(Normal(rand(p2ln), lm_noise))
addFactor!(fg, [:x1; :l2; :l1], p2p, multihypo = [1, pRight, pWrong])
# addFactor!(fg, [:x1; :l2], p2p) #this one used for sanity check
##
# prescribe an elimination order to get a single clique
eo = [:l2,:x1,:x0,:l1]
# fg.solverParams.graphinit=true
smtasks = Task[]
tree = solveTree!(fg, eliminationOrder=eo) #, smtasks=smtasks, recordcliqs=ls(fg));
# hists = fetchCliqHistoryAll!(smtasks)
# plotKDE(fg, ls(fg))
##
@test isapprox(DFG.getPPESuggested(fg, :x0)[], 0, atol = 0.2)
@test isapprox(DFG.getPPESuggested(fg, :x1)[], 1, atol = 0.2)
@test isapprox(DFG.getPPESuggested(fg, :l1)[], 1, atol = 0.2)
L2 = getBelief(fg, :l2)
npts = length(getPoints(L2))
pts = [2.0.+0.1*randn(1) for _ in 1:npts]
L2_ = manikde!(ContinuousScalar, pts)
# test that there is at least a mode present
@test mmd(L2_, L2, ContinuousScalar) < 1e-3
# @test isapprox(DFG.getPPESuggested(fg, :l2)[], 2, atol = 0.2)
##
end
@testset "test multihypo chain example (see #462)..." begin
##
l1 = -10.0
l2 = +10.0
lnoise = 1.0
x1 = 0
x2 = 0
Onoise = 0.1
fg = initfg()
addVariable!(fg, :x1, ContinuousScalar)
addVariable!(fg, :x2, ContinuousScalar)
addVariable!(fg, :l1, ContinuousScalar)
addVariable!(fg, :l1_0, ContinuousScalar)
addVariable!(fg, :l2, ContinuousScalar)
addVariable!(fg, :l2_0, ContinuousScalar)
# priors on two landmarks only
addFactor!(fg, [:l1], Prior(Normal(l1, lnoise)))
addFactor!(fg, [:l2], Prior(Normal(l2, lnoise)))
# relative constraints
addFactor!(fg, [:x1;:l1;:l1_0], LinearRelative(Normal(l1-x1, lnoise)), multihypo=[1;1/2;1/2])
addFactor!(fg, [:x2;:l2;:l2_0], LinearRelative(Normal(l2-x2, lnoise)), multihypo=[1;1/2;1/2])
addFactor!(fg, [:x1;:x2], LinearRelative(Normal(0, Onoise)))
tree = solveTree!(fg)
# drawTree(tree, show=true)
# expect x1 x2 to have at least one mode at 0
@test getPPE(fg, :x1).suggested[1] - x1 |> abs < 1.2
@test getPPE(fg, :x2).suggested[1] - x2 |> abs < 1.2
@test getPPE(fg, :l1).suggested[1] - l1 |> abs < 1.2
@test getPPE(fg, :l2).suggested[1] - l2 |> abs < 1.2
# l1_0, l2_0 should be nearby around l1 and l2, but cannot confirm 100%
@test getPPE(fg, :l1_0).suggested[1] - l1 |> abs < 10
@test getPPE(fg, :l2_0).suggested[1] - l2 |> abs < 10
##
end
# using RoMEPlotting
# Gadfly.set_default_plot_size(35cm,25cm)
#
# plotKDE(fg, [:l1;:l2])
# plotKDE(fg, [:l1_0;:l2_0])
# plotKDE(fg, [:x1;:x2])
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 5733 |
## Continue with loading packages
using DistributedFactorGraphs
using IncrementalInference
using TensorCast
using Test
##==============================================================================
## Setup
##==============================================================================
n_samples = 100
graphinit = false
# noises
lm_prior_noise = 1.0
meas_noise = 1.0
odom_noise = 1.0
# initialize mean landmark locations
l1 = -30.0
l2 = 30.0
# l3 = -40.0
p_meas = 0.4
p_map = 1 - p_meas
graphinit = false
##==============================================================================
# Initialize empty factor graph
@testset "test multihypo 1D..." begin
##
fg = initfg()
# fg.solverParams.graphinit = graphinit
fg.solverParams.N = n_samples
fg.solverParams.gibbsIters = 6
fg.solverParams.spreadNH = 0.3
# lp landmark prior information
# lm landmark measurement
addVariable!(fg, :lp1, ContinuousScalar)
addFactor!(fg, [:lp1], Prior(Normal(l1, lm_prior_noise)), graphinit=graphinit)
addVariable!(fg, :lp2, ContinuousScalar)
addFactor!(fg, [:lp2], Prior(Normal(l2, lm_prior_noise)), graphinit=graphinit)
addVariable!(fg, :x1, ContinuousScalar)
addVariable!(fg, :lm2, ContinuousScalar)
addFactor!(fg, [:x1; :lm2; :lp2], LinearRelative(Normal(20., meas_noise)), multihypo=[1.0; p_meas; p_map], graphinit=graphinit)
addVariable!(fg, :lm1, ContinuousScalar)
addFactor!(fg, [:x1; :lm1; :lp1], LinearRelative(Normal(-20., meas_noise)), multihypo=[1.0; p_meas; p_map], graphinit=graphinit)
#weak lp1 lm1 relation to nudge one of two symmetric options
# addFactor!(fg, [:lp1; :lm1], LinearRelative(Normal(0., 100.)))
# initAll!(fg)
# drawGraph(fg, show=true)
# getSolverParams(fg).drawtree = false
# getSolverParams(fg).showtree = false
# getSolverParams(fg).dbg = true
# getSolverParams(fg).multiproc = false
##
varor = [:x1, :lm1, :lm2, :lp1, :lp2]
# tree = buildTreeReset!(fg, varor)
tree = solveTree!(fg, eliminationOrder = varor);
# tree = solveTree!(fg, eliminationOrder = varor);
# plotKDE(fg, ls(fg))
##
# X should be at one of two modes
pts_ = getPoints(getBelief(fg, :x1))
@cast pts[i,j] := pts_[j][i]
@test 0.7*getSolverParams(fg).N < sum(-20 .< pts[:] .< 0) + sum(0 .< pts[:] .< 20)
pts_ = getPoints(getBelief(fg, :lp1))
@cast pts[i,j] := pts_[j][i]
@test 0.7*getSolverParams(fg).N < sum(-38 .< pts[:] .< -28)
pts_ = getPoints(getBelief(fg, :lp2))
@cast pts[i,j] := pts_[j][i]
@test 0.7*getSolverParams(fg).N < sum(28 .< pts[:] .< 38)
pts_ = getPoints(getBelief(fg, :lm1))
@cast pts[i,j] := pts_[j][i]
@test 0.1*getSolverParams(fg).N < sum(-38 .< pts[:] .< -25)
pts_ = getPoints(getBelief(fg, :lm2))
@cast pts[i,j] := pts_[j][i]
@test 0.1*getSolverParams(fg).N < sum(25 .< pts[:] .< 38)
# @test 0.2*getSolverParams(fg).N < sum(-18 .< getPoints(getBelief(fg, :lm1))[:] .< -5) ||
# 0.2*getSolverParams(fg).N < sum(5 .< getPoints(getBelief(fg, :lm2))[:] .< 15)
##
end
## Debug plotting below
#
# using RoMEPlotting
# Gadfly.set_default_plot_size(35cm, 20cm)
#
#
# # tree = solveTree!(fg)
# varIds = [ :x1, :lp1, :lp2, :lm1, :lm2]
# pkde = plotKDE(fg, varIds)
#
# 0
#
# # # using Plots
# #
# # pmm = StatsPlots.plot(pkde.layers[2].mapping[:x],pkde.layers[2].mapping[:y], label = string(varIds[1]))
# # for i = 3:6
# # StatsPlots.plot!(pmm, pkde.layers[i].mapping[:x],pkde.layers[i].mapping[:y], label=string(varIds[i-1]))
# # end
# # plot!(pmm, title = "MM-iSAM", xlims = (-50, 60), xticks = -50:10:50)
#
#
#
#
#
# p1 = plotLocalProduct(fg, :lm2)
# p2 = plotLocalProduct(fg, :lm1)
# p3 = plotLocalProduct(fg, :lp2)
# p4 = plotLocalProduct(fg, :lp1)
# p5 = plotLocalProduct(fg, :x1)
# h0 = hstack(p1,p2,p3,p4,p5)
#
# # treeProductUp(fg, tree, :x1, :x1)
#
#
#
# stuff = localProduct(fg,:lm2)
# initVariable!(fg,:lm2, stuff[1]); p1 = plotKDE(stuff[1], title="lm2")
#
# stuff = localProduct(fg,:lm1)
# initVariable!(fg,:lm1, stuff[1]); p2 = plotKDE(stuff[1], title="lm1")
#
# stuff = localProduct(fg,:lp2)
# initVariable!(fg,:lp2, stuff[1]); p3 = plotKDE(stuff[1], title="lp2")
#
# stuff = localProduct(fg,:lp1)
# initVariable!(fg,:lp1, stuff[1]); p4 = plotKDE(stuff[1], title="lp1")
#
# stuff = localProduct(fg,:x1)
# initVariable!(fg,:x1, stuff[1]); p5 = plotKDE(stuff[1], title="x1")
#
# h1 = hstack(p1,p2,p3,p4,p5)
#
# vstack(h0,h1,h2,h3,h4,h5) |> PDF("/tmp/test_new.pdf", 35cm, 40cm)
#
#
# fg1 = initfg()
# loadDFG("/tmp/fix/lm2_1.tar.gz", Main, fg1)
#
# fg2 = initfg()
# loadDFG("/tmp/fix/lm1_2.tar.gz", Main, fg2)
#
# fg3 = initfg()
# loadDFG("/tmp/fix/lp2_3.tar.gz", Main, fg3)
#
# fg4 = initfg()
# loadDFG("/tmp/fix/lp1_4.tar.gz", Main, fg4)
#
# fg5 = initfg()
# loadDFG("/tmp/fix/x1_5.tar.gz", Main, fg5)
#
#
# fg6 = initfg()
# loadDFG("/tmp/fix/lm2_6.tar.gz", Main, fg6)
#
# fg7 = initfg()
# loadDFG("/tmp/fix/lm1_7.tar.gz", Main, fg7)
#
# fg8 = initfg()
# loadDFG("/tmp/fix/lp2_8.tar.gz", Main, fg8)
#
# fg9 = initfg()
# loadDFG("/tmp/fix/lp1_9.tar.gz", Main, fg9)
#
# fg10 = initfg()
# loadDFG("/tmp/fix/x1_10.tar.gz", Main, fg10)
#
# fg11 = initfg()
# loadDFG("/tmp/fix/lm2_11.tar.gz", Main, fg11)
#
# fg12 = initfg()
# loadDFG("/tmp/fix/lm1_12.tar.gz", Main, fg12)
#
# fg13 = initfg()
# loadDFG("/tmp/fix/lp2_13.tar.gz", Main, fg13)
#
# fg14 = initfg()
# loadDFG("/tmp/fix/lp1_14.tar.gz", Main, fg14)
#
# fg15 = initfg()
# loadDFG("/tmp/fix/x1_15.tar.gz", Main, fg15)
#
#
# h1 = hstack(plotKDE(fg1,:lm2),plotKDE(fg2,:lm1),plotKDE(fg3,:lp2),plotKDE(fg4,:lp1),plotKDE(fg5,:x1))
#
# h2 = hstack(plotKDE(fg6,:lm2),plotKDE(fg7,:lm1),plotKDE(fg8,:lp2),plotKDE(fg9,:lp1),plotKDE(fg10,:x1))
#
# h3 = hstack(plotKDE(fg11,:lm2),plotKDE(fg12,:lm1),plotKDE(fg13,:lp2),plotKDE(fg14,:lp1),plotKDE(fg15,:x1))
#
#
# vstack(h1,h2,h3) |> PDF("/tmp/test_new_fail.pdf",35cm,30cm)
#
#
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 204 | using Test
using Distributed
addprocs(2)
using IncrementalInference
@everywhere using IncrementalInference
@testset "test multiprocess solveTree!" begin
fg = generateGraph_Kaess()
solveTree!(fg)
end
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 995 | # development testing for multithreaded convolution
using IncrementalInference
using Test
using TensorCast
##
@testset "Basic ContinuousScalar example to ensure multithreaded convolutions work..." begin
##
@show Threads.nthreads()
N = 100
# Start with an empty factor graph
fg = initfg()
# add the first node
addVariable!(fg, :x0, ContinuousScalar, N=N)
# this is unary (prior) factor and does not immediately trigger autoinit of :x0.
addFactor!(fg, [:x0], Prior(Normal(0,1)))
addVariable!(fg, :x1, ContinuousScalar, N=N)
# P(Z | :x1 - :x0 ) where Z ~ Normal(10,1)
@warn "threadmodel=MultiThreaded is obsolete. Look at IIF.CalcFactor alternatives instead"
addFactor!(fg, [:x0, :x1], LinearRelative(Normal(10.0,1)) ) #, threadmodel=MultiThreaded)
@error "Factor threadmodel=MultiThreaded equivalence restoration TBD"
@test begin
pts_ = approxConv(fg, :x0x1f1, :x1, N=N)
@cast pts[i,j] := pts_[j][i]
@test 0.95*N <= sum(abs.(pts .- 10.0) .< 5.0)
true
end
##
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 4052 | # test serialization of distributions
using IncrementalInference
using Test
##
@testset "Packing Categorical" begin
##
ctg = Categorical(5)
packed = packDistribution(ctg)
@test packed isa PackedSamplableBelief
@test packed isa IncrementalInference.PackedCategorical
upck = unpackDistribution(packed)
@test upck isa Categorical
@test isapprox(ctg, upck)
##
end
@testset "Packing Normal" begin
##
nor = Normal()
packed = packDistribution(nor)
@test packed isa PackedSamplableBelief
@test packed isa PackedNormal
upck = unpackDistribution(packed)
@test upck isa Normal
@test isapprox(nor, upck)
##
end
@testset "Packing for FullNormal" begin
##
mv = MvNormal([0.0;1.0], [1 0.1; 0.1 1])
packed = packDistribution(mv)
@test packed isa PackedSamplableBelief
@test packed isa PackedFullNormal
upck = unpackDistribution(packed)
@test upck isa FullNormal
@test isapprox(mv, upck)
##
end
@testset "Packing for DiagNormal" begin
##
mv = MvNormal([0.0; 1.0], [4.0; 4.0])
packed = packDistribution(mv)
@test packed isa PackedSamplableBelief
@test packed isa PackedDiagNormal
upck = unpackDistribution(packed)
@test upck isa DiagNormal
@test isapprox(mv, upck)
##
end
@testset "Packing for ZeroMeanFullNormal" begin
##
mv = MvNormal([1 0.1; 0.1 1])
packed = packDistribution(mv)
@test packed isa PackedSamplableBelief
@test packed isa PackedZeroMeanFullNormal
upck = unpackDistribution(packed)
@test upck isa ZeroMeanFullNormal
@test isapprox(mv, upck)
##
end
@testset "Packing for ZeroMeanDiagNormal" begin
##
mv = MvNormal([4.0;4.0])
packed = packDistribution(mv)
@test packed isa PackedSamplableBelief
@test packed isa PackedZeroMeanDiagNormal
upck = unpackDistribution(packed)
@test upck isa ZeroMeanDiagNormal
@test isapprox( mv.Σ.diag, upck.Σ.diag )
# @test isapprox(mv, upck)
##
end
@testset "Packing for AliasingScalarSampler" begin
##
bss = AliasingScalarSampler([1.0;2.0], [0.6;0.4])
packed = packDistribution(bss)
@test packed isa PackedSamplableBelief
@test packed isa IncrementalInference.PackedAliasingScalarSampler
upck = unpackDistribution(packed)
@test upck isa AliasingScalarSampler
@test isapprox( bss.domain, upck.domain )
@test isapprox( bss.weights.values, upck.weights.values )
@test isapprox( bss.weights.sum, upck.weights.sum )
##
end
@testset "Packing for ManifoldKernelDensity" begin
##
T = ContinuousEuclid{2}
pts = [randn(2) for _ in 1:50]
mkd = manikde!(T, pts)
packed = packDistribution(mkd)
@test packed isa PackedSamplableBelief
@test packed isa IncrementalInference.PackedManifoldKernelDensity
upck = unpackDistribution(packed)
@test upck isa ManifoldKernelDensity
@test isapprox(mkd, upck)
##
end
@testset "Packing of Rayleigh" begin
##
r = Rayleigh(1.1)
r_ = packDistribution(r)
@test r_ isa PackedSamplableBelief
@test r_ isa PackedRayleigh
r__ = unpackDistribution(r_)
@test r__ isa Rayleigh
@test isapprox(r.σ, r__.σ)
##
end
## Legacy tests
# @testset "hard-coded test of PackedPrior to Prior" begin
# ##
# pt = PackedPrior("KDE:100:[1.5015]:[-98.8276 -101.803 -98.1296 -96.1897 -99.3076 -99.1881 -101.721 -101.332 -100.431 -97.7293 -96.7652 -99.3806 -95.5593 -104.237 -94.9318 -101.691 -102.255 -98.9559 -99.3386 -99.2361 -102.483 -102.896 -97.0244 -98.9643 -99.4457 -101.298 -103.263 -2.75251 5.14065 0.327863 3.60042 -0.604114 -0.0564047 -0.804898 3.05469 1.4974 1.34657 2.22745 4.78117 1.89485 -0.48091 6.63068 0.63013 -3.11422 1.73705 5.22904 -1.73223 2.47453 1.10584 -0.0179944 3.65585 4.50016 -1.95042 98.2664 98.9983 103.748 100.789 98.4127 101.397 104.364 102.125 96.3685 103.59 99.0581 100.913 101.461 105.211 103.513 99.3325 101.201 98.05 103.508 99.9785 104.624 100.202 100.258 101.579 96.6931 95.4181 299.02 296.804 301.322 298.127 299.578 298.36 296.339 300.156 299.641 297.731 299.822 296.941 295.857 299.482 302.531 301.875 302.192 301.999 300.634 294.084 300.44]")
# tt = convert(BallTreeDensity, pt.Z)
# @test isa(tt, BallTreeDensity)
# F = Prior
# upt = convert(F, pt)
# @test isa(upt, Prior)
# ## TODO add more tests
# end
# | IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 668 |
using IncrementalInference
using DistributedFactorGraphs
using JSON3
##
@testset "Test packing of mixture of distributions, 1498" begin
##
# Start with an empty factor graph
fg = initfg()
# add the first node
addVariable!(fg, :x0, ContinuousScalar)
addVariable!(fg, :x1, ContinuousScalar)
mmo = Mixture(LinearRelative,
(hypo1=Rayleigh(3), hypo2=Uniform(30,55)),
[0.4; 0.6])
addFactor!(fg, [:x0, :x1], mmo)
##
pf = packFactor(getFactor(fg, :x0x1f1))
##
pf_ = JSON3.write(pf)
##
saveDFG("/tmp/caesar/test_mixture.tar.gz", fg)
fg_ = loadDFG("/tmp/caesar/test_mixture.tar.gz")
Base.rm("/tmp/caesar/test_mixture.tar.gz")
##
end | IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 464 | # test aspects specific to partial factors
using IncrementalInference
using Test
@testset "testing basic partial factor functions..." begin
fg = initfg()
addVariable!(fg, :x0, ContinuousScalar)
addFactor!(fg, [:x0], Prior(Normal()) )
fc = getFactor(fg, :x0f1)
@test !isPartial(fc)
fg = initfg()
addVariable!(fg, :x1, ContinuousEuclid{2})
addFactor!(fg, [:x1], PartialPrior(ContinuousEuclid{2},Normal(), (1,)) )
@test isPartial(getFactor(fg, :x1f1))
end
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 1909 | # test nullhypo with n-dim partial
using Test
using IncrementalInference
##
@testset "test n-dimensional partial" begin
##
fg = initfg()
addVariable!(fg, :x0, ContinuousEuclid{3})
addFactor!(fg, [:x0;], PartialPrior(ContinuousEuclid{3}, MvNormal(zeros(2), ones(2)), (2,3)) )
addVariable!(fg, :x1, ContinuousEuclid{3})
addFactor!(fg, [:x1;], PartialPrior(ContinuousEuclid{3}, Normal(10,1),(1,)))
addFactor!(fg, [:x0; :x1], LinearRelative(MvNormal([10;0;0.0], ones(3))) )
##
initAll!(fg)
##
destlbl = :x0
dens = Vector{ManifoldKernelDensity}()
factors = getFactor.(fg, ls(fg, destlbl))
inferdim = IIF.proposalbeliefs!(fg, destlbl, factors, dens )
oldBel = getBelief(fg, destlbl)
oldpts = getPoints(oldBel)
varType = getVariableType(fg, destlbl)
pGM = getPoints( AMP.manifoldProduct(dens, getManifold(varType), N=100, oldPoints=oldpts), false )
# pGM = AMP.productbelief(oldpts, getManifold(varType), dens, 100, asPartial=false )
##
densPts, inferdim = propagateBelief(fg, :x0, :, needFreshMeasurements=true )
##
solveTree!(fg);
##
@warn "WIP on testPartialNH.jl during transition to Manifolds.jl"
@test isapprox( getPPE(fg, :x0).suggested, [0;0;0], atol=1)
@test isapprox( getPPE(fg, :x1).suggested, [10;0;0], atol=1)
##
end
@testset "test n-dimensional partial with nullhypo" begin
##
fg = initfg()
addVariable!(fg, :x0, ContinuousEuclid{3})
addFactor!(fg, [:x0;], PartialPrior(ContinuousEuclid{3}, MvNormal(zeros(2), ones(2)),(2,3)) , nullhypo=0.2)
addVariable!(fg, :x1, ContinuousEuclid{3})
addFactor!(fg, [:x1;], PartialPrior(ContinuousEuclid{3}, Normal(10,1),(1,)))
addFactor!(fg, [:x0; :x1], LinearRelative(MvNormal([10;0;0.0], ones(3))) , nullhypo=0.2)
##
solveTree!(fg);
##
@warn "WIP testPartialNH.jl during transition to Manifolds.jl"
@test isapprox( getPPE(fg, :x0).suggested, [0;0;0], atol=1)
@test isapprox( getPPE(fg, :x1).suggested, [10;0;0], atol=2)
##
end
# | IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 1611 |
using Test
using IncrementalInference
using Manifolds
using DistributedFactorGraphs
import IncrementalInference: getSample, getManifold
##
mutable struct PartialDim2{T} <: AbstractPrior
Z::T
partial::Tuple{Int}
end
PartialDim2(Z::D) where {D <: IIF.SamplableBelief} = PartialDim2(Z, (2,))
getManifold(pd::PartialDim2) = getManifoldPartial(TranslationGroup(2), [pd.partial...])[1]
getSample(cfo::CalcFactor{<:PartialDim2}) = [rand(cfo.factor.Z);]
##
@testset "Test partial dimensions on prior are correct" begin
##
fg = initfg()
v0 = addVariable!(fg, :x0, ContinuousEuclid{2})
f0 = addFactor!(fg, [:x0], PartialDim2(Normal()))
@test IIF._getDimensionsPartial(f0) == [2]
bel, infd = propagateBelief(fg, v0, [f0;])
@test isPartial(bel)
##
dens = Vector{ManifoldKernelDensity}()
IIF.proposalbeliefs!(fg, :x0, [f0], dens)
pts = getPoints(dens[1], false)
##
propagateBelief(fg, :x0, [:x0f1;])
@test true
##
end
@testset "test propagateBelief returning a partial" begin
##
fg = initfg()
v0 = addVariable!(fg, :x0, ContinuousEuclid{2})
pts = [randn(1) for _ in 1:1000];
mkd = manikde!(TranslationGroup(1), pts, bw=[0.1;])
pp = PartialPrior(ContinuousEuclid{2}, mkd, (2,))
f0 = addFactor!(fg, [:x0;], pp, graphinit=false)
##
bel, infd = propagateBelief(fg, v0, [f0;])
@test isPartial(bel)
##
doautoinit!(fg, :x0)
# check the number of points in the graph value store
@show getSolverParams(fg).N
@test length(getPoints(getBelief(fg, :x0))) == getSolverParams(fg).N
@info "PassThrough factors currently work different and will pass the full N=1000 count through to the graph."
##
end
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 2623 | # test for conv and product repeatability
using Test
using Statistics
using IncrementalInference
##
@testset "forward backward convolutions and products sequence" begin
fg = initfg()
addVariable!(fg, :a, ContinuousScalar)
addVariable!(fg, :b, ContinuousScalar)
addVariable!(fg, :c, ContinuousScalar)
addVariable!(fg, :d, ContinuousScalar)
addVariable!(fg, :e, ContinuousScalar)
addFactor!(fg, [:a], Prior(Normal()))
addFactor!(fg, [:a;:b], LinearRelative(Normal(10, 1)))
addFactor!(fg, [:b;:c], LinearRelative(Normal(10, 1)))
addFactor!(fg, [:c;:d], LinearRelative(Normal(10, 1)))
addFactor!(fg, [:d;:e], LinearRelative(Normal(10, 1)))
initAll!(fg)
tree = solveTree!(fg)
@test (Statistics.mean(getPoints(getBelief(fg, :a)))- 0 |> abs) < 3
@test (Statistics.mean(getPoints(getBelief(fg, :b)))-10 |> abs) < 4
@test (Statistics.mean(getPoints(getBelief(fg, :c)))-20 |> abs) < 4
@test (Statistics.mean(getPoints(getBelief(fg, :d)))-30 |> abs) < 5
@test (Statistics.mean(getPoints(getBelief(fg, :e)))-40 |> abs) < 5
@test 0.3 < Statistics.std(getPoints(getBelief(fg, :a))) < 2
@test 0.5 < Statistics.std(getPoints(getBelief(fg, :b))) < 4
@test 0.9 < Statistics.std(getPoints(getBelief(fg, :c))) < 6
@test 1.2 < Statistics.std(getPoints(getBelief(fg, :d))) < 7
@test 1.5 < Statistics.std(getPoints(getBelief(fg, :e))) < 8
# drawTree(tree, show=true)
# using RoMEPlotting
# plotKDE(fg, ls(fg))
# spyCliqMat(tree, :b)
end
@testset "Basic back and forth convolution over LinearRelative should spread" begin
fg = initfg()
addVariable!(fg, :a, ContinuousScalar)
addVariable!(fg, :b, ContinuousScalar)
addFactor!(fg, [:a;:b], LinearRelative(Normal(10, 1)), graphinit=false)
initVariable!(fg, :a, randn(1,100))
initVariable!(fg, :b, 10 .+randn(1,100))
A = getBelief(fg, :a)
B = getBelief(fg, :b)
# plotKDE(fg, [:a; :b])
# repeat many times to ensure the means stay put and covariances spread out
for i in 1:10
pts = approxConv(fg, :abf1, :b)
B_ = manikde!(ContinuousScalar, pts)
# plotKDE([B_; B])
initVariable!(fg, :b, B_)
pts = approxConv(fg, :abf1, :a)
A_ = manikde!(ContinuousScalar, pts)
# plotKDE([A_; A])
initVariable!(fg, :a, A_)
end
A_ = getBelief(fg, :a)
B_ = getBelief(fg, :b)
# plotKDE([A_; B_; A; B])
@test (Statistics.mean(getPoints(A)) |> abs) < 1
@test (Statistics.mean(getPoints(A_))|> abs) < 2
@test (Statistics.mean(getPoints(B)) -10 |> abs) < 1
@test (Statistics.mean(getPoints(B_))-10 |> abs) < 2
@test Statistics.std(getPoints(A)) < 2
@test 3 < Statistics.std(getPoints(A_))
@test Statistics.std(getPoints(B)) < 2
@test 3 < Statistics.std(getPoints(B_))
##
end
##
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 2155 |
using IncrementalInference
using Test
##
@testset "saving to and loading from FileDFG" begin
##
fg = generateGraph_Kaess()
addVariable!(fg, :x4, ContinuousScalar)
addFactor!(fg, [:x2;:x3;:x4], LinearRelative(Normal()), multihypo=[1.0;0.6;0.4])
saveFolder = "/tmp/dfg_test"
saveDFG(fg, saveFolder)
retDFG = initfg()
retDFG = loadDFG!(retDFG, saveFolder)
Base.rm(saveFolder*".tar.gz")
@test symdiff(ls(fg), ls(retDFG)) == []
@test symdiff(lsf(fg), lsf(retDFG)) == []
@show getFactor(fg, :x2x3x4f1).solverData.multihypo
@show getFactor(retDFG, :x2x3x4f1).solverData.multihypo
# check for match
@test getFactor(fg, :x2x3x4f1).solverData.multihypo - getFactor(retDFG, :x2x3x4f1).solverData.multihypo |> norm < 1e-10
@test getFactor(fg, :x2x3x4f1).solverData.certainhypo - getFactor(retDFG, :x2x3x4f1).solverData.certainhypo |> norm < 1e-10
##
end
@testset "saving to and loading from FileDFG with nullhypo, eliminated, solveInProgress" begin
##
fg = generateGraph_Kaess()
getSolverParams(fg).attemptGradients = true
addVariable!(fg, :x4, ContinuousScalar)
addFactor!(fg, [:x2;:x3;:x4], LinearRelative(Normal()), multihypo=[1.0;0.6;0.4])
addFactor!(fg, [:x1;], Prior(Normal(10,1)), nullhypo=0.5)
solveTree!(fg)
#manually change a few fields to test if they are preserved
fa = getFactor(fg, :x2x3x4f1)
fa.solverData.eliminated = true
fa.solverData.solveInProgress = 1
fa.solverData.nullhypo = 0.5
saveFolder = "/tmp/dfg_test"
saveDFG(fg, saveFolder)
retDFG = initfg()
getSolverParams(retDFG).attemptGradients = true
loadDFG!(retDFG, saveFolder)
Base.rm(saveFolder*".tar.gz")
@test issetequal(ls(fg), ls(retDFG))
@test issetequal(lsf(fg), lsf(retDFG))
@show getFactor(fg, :x2x3x4f1).solverData.multihypo
@show getFactor(retDFG, :x2x3x4f1).solverData.multihypo
# check for match
@test isapprox(getFactor(fg, :x2x3x4f1).solverData.multihypo, getFactor(retDFG, :x2x3x4f1).solverData.multihypo)
@test isapprox(getFactor(fg, :x2x3x4f1).solverData.certainhypo, getFactor(retDFG, :x2x3x4f1).solverData.certainhypo)
fb = getFactor(retDFG, :x2x3x4f1)
@test fa == fb
fa = getFactor(fg, :x1f2)
fb = getFactor(retDFG, :x1f2)
@test fa == fb
##
end
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 1588 |
using Test
using IncrementalInference
@testset "Skip downsolve" begin
N=6
fg = generateGraph_LineStep(N;
graphinit=false,
poseEvery=1,
landmarkEvery=N+1,
posePriorsAt=[0],
landmarkPriorsAt=[],
sightDistance=N+1)
deleteFactor!.(fg, [Symbol("x$(i)lm0f1") for i=1:(N-1)])
getSolverParams(fg).useMsgLikelihoods = true
getSolverParams(fg).downsolve = false
smtasks = Task[]
tree = solveTree!(fg; smtasks=smtasks, recordcliqs=[:x4]);
hists = fetchCliqHistoryAll!(smtasks)
# See if downsolve was called
@test !(IIF.solveDown_StateMachine in getindex.(hists[2], 3))
#test if values are still correct
for var in sortDFG(ls(fg))
sppe = getVariable(fg,var) |> getPPE |> IIF.getPPESuggested
@test isapprox(sppe[1], parse(Int,string(var)[end]), atol=0.2)
end
#now downsolve only
getSolverParams(fg).upsolve = false
getSolverParams(fg).downsolve = true
smtasks = Task[]
tree = solveTree!(fg; smtasks=smtasks, recordcliqs=[:x4]);
hists = fetchCliqHistoryAll!(smtasks);
# See if upsolved was called
@test !(IIF.solveUp_StateMachine in getindex.(hists[2], 3))
#test if values are still correct
for var in sortDFG(ls(fg))
sppe = getVariable(fg,var) |> getPPE |> IIF.getPPESuggested
@test isapprox(sppe[1], parse(Int,string(var)[end]), atol=0.2)
end
# recycled downsolve only
tree = solveTree!(fg, tree; smtasks=smtasks, recordcliqs=[:x4]);
@test !(IIF.solveUp_StateMachine in getindex.(hists[2], 3))
end | IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 2883 |
using Test
using IncrementalInference
##
@testset "Basic test of belief prediction on alternate solveKey" begin
##
fg = initfg()
addVariable!(fg, :a, ContinuousScalar)
addVariable!(fg, :b, ContinuousScalar)
addFactor!(fg, [:a], Prior(Normal(10,1)), graphinit=false)
addFactor!(fg, [:a;:b], LinearRelative(Normal(10,1)), graphinit=false)
deleteVariableSolverData!(fg, :a, :default)
deleteVariableSolverData!(fg, :b, :default)
##
pts = sampleFactor(fg, :af1, 100)
IIF.setDefaultNodeData!(getVariable(fg, :a), 0, 100, 1, solveKey=:testSolveKey,
initialized=false, varType=ContinuousScalar(), dontmargin=false)
#
initVariable!(fg, :a, pts, :testSolveKey)
@test !isInitialized(fg, :a)
@test isInitialized(fg, :a, :testSolveKey)
##
IIF.setDefaultNodeData!(getVariable(fg, :b), 0, 100, 1, solveKey=:testSolveKey,
initialized=false, varType=ContinuousScalar(), dontmargin=false)
#
@test !(:default in listSolveKeys(getVariable(fg, :a)))
@test !(:default in listSolveKeys(getVariable(fg, :b)))
@test (:testSolveKey in listSolveKeys(getVariable(fg, :a)))
@test (:testSolveKey in listSolveKeys(getVariable(fg, :b)))
##
doautoinit!(fg, :b, solveKey=:testSolveKey)
##
@test !(:default in listSolveKeys(getVariable(fg, :a)))
@test !(:default in listSolveKeys(getVariable(fg, :b)))
@test (:testSolveKey in listSolveKeys(getVariable(fg, :a)))
@test (:testSolveKey in listSolveKeys(getVariable(fg, :b)))
##
@test isapprox( calcPPE(fg, :a, solveKey=:testSolveKey).suggested[1], 10, atol=1)
@test isapprox( calcPPE(fg, :b, solveKey=:testSolveKey).suggested[1], 20, atol=1)
##
end
@testset "test solve with unique solveKey, see #1219" begin
##
fg = initfg()
getSolverParams(fg).graphinit=false
addVariable!(fg, :a, ContinuousScalar)
addVariable!(fg, :b, ContinuousScalar)
addVariable!(fg, :c, ContinuousScalar)
addVariable!(fg, :d, ContinuousScalar)
addVariable!(fg, :e, ContinuousScalar)
addFactor!(fg, [:a], Prior(Normal()))
addFactor!(fg, [:a;:b], LinearRelative(Normal(10, 1)))
addFactor!(fg, [:b;:c], LinearRelative(Normal(10, 1)))
addFactor!(fg, [:c;:d], LinearRelative(Normal(10, 1)))
addFactor!(fg, [:d;:e], LinearRelative(Normal(10, 1)))
getSolverParams(fg).graphinit=true
##
# getSolverParams(fg).limititers=30
solveTree!(fg, solveKey=:testSolveKey )
##
@test isapprox( calcPPE(fg, :a, solveKey=:testSolveKey).suggested[1], 0, atol=2)
@test isapprox( calcPPE(fg, :b, solveKey=:testSolveKey).suggested[1], 10, atol=2)
@test isapprox( calcPPE(fg, :c, solveKey=:testSolveKey).suggested[1], 20, atol=2)
@test isapprox( calcPPE(fg, :d, solveKey=:testSolveKey).suggested[1], 30, atol=2)
@test isapprox( calcPPE(fg, :e, solveKey=:testSolveKey).suggested[1], 40, atol=2)
##
# using RoMEPlotting
# Gadfly.set_default_plot_size(35cm,25cm)
# ##
# plotKDE(fg, ls(fg), solveKey=:testSolveKey)
##
end
# | IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 2226 | # test forest of graphs can solve with CSM, specifically #518
using IncrementalInference
using Statistics
using Test
using TensorCast
@testset "Test forest of orphaned graphs" begin
fg = initfg()
addVariable!(fg, :x0, ContinuousScalar)
addFactor!(fg, [:x0;], Prior(Normal(0,0.1)))
addVariable!(fg, :x1, ContinuousScalar)
addFactor!(fg, [:x0;:x1], LinearRelative(Normal(10,0.1)))
addVariable!(fg, :x2, ContinuousScalar)
addFactor!(fg, [:x1;:x2], LinearRelative(Normal(10,0.1)))
addVariable!(fg, :x10, ContinuousScalar)
addFactor!(fg, [:x10;], Prior(Normal()))
addVariable!(fg, :x11, ContinuousScalar)
addFactor!(fg, [:x10;:x11], LinearRelative(Normal(-10,1.0)))
addVariable!(fg, :x12, ContinuousScalar)
addFactor!(fg, [:x11;:x12], LinearRelative(Normal(-10,1.0)))
# plotDFG(fg)
# getSolverParams(fg).drawtree = true
# getSolverParams(fg).showtree = true
# solve factor graph with two orphaned components
vo = Symbol[:x12, :x2, :x0, :x11, :x1, :x10]
tree = solveTree!(fg, eliminationOrder=vo)
# test tree will have two different root nodes
@test getEliminationOrder(tree) == vo
@test getParent(tree, getClique(tree, :x1)) |> length == 0
@test getParent(tree, getClique(tree, :x10)) |> length == 0
@test getChildren(tree, getClique(tree, :x1)) |> length == 1
@test getChildren(tree, getClique(tree, :x10)) |> length == 1
@test getChildren(tree, getClique(tree, :x2)) |> length == 0
@test getChildren(tree, getClique(tree, :x12)) |> length == 0
## Test the numerical values are correct
pts_ = getBelief(fg, :x0) |> getPoints
@cast pts[i,j] := pts_[j][i]
@test pts |> mean |> abs < 1.0
pts_ = getBelief(fg, :x1) |> getPoints
@cast pts[i,j] := pts_[j][i]
@test (pts |> mean) - 10 |> abs < 2.0
pts_ = getBelief(fg, :x2) |> getPoints
@cast pts[i,j] := pts_[j][i]
@test (pts |> mean) - 20 |> abs < 3.0
pts_ = getBelief(fg, :x10) |> getPoints
@cast pts[i,j] := pts_[j][i]
@test pts |> mean |> abs < 2.0
pts_ = getBelief(fg, :x11) |> getPoints
@cast pts[i,j] := pts_[j][i]
@test (pts |> mean) + 10 |> abs < 4.0
pts_ = getBelief(fg, :x12) |> getPoints
@cast pts[i,j] := pts_[j][i]
@test (pts |> mean) + 20 |> abs < 5.0
# using RoMEPlotting
# Gadfly.set_default_plot_size(35cm, 25cm)
#
# plotKDE(fg, ls(fg))
end
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 1330 | # test that PPE values are update with solve, see issue #548
using Test
using IncrementalInference
using DistributedFactorGraphs
@testset "test PPE update during solve" begin
fg = generateGraph_Kaess(graphinit=true)
initAll!(fg)
# list of variables to check
vars = listVariables(fg)
# fetch values before solve
before = Dict()
for vs in vars
before[vs] = getVariablePPE(getVariable(fg, vs)) |> getPPESuggested
end
# do the solve
# getSolverParams(fg).dbg = true
# tree = buildTreeReset!(fg)
# drawTree(tree, show=true)
# solveCliqUp!(fg, tree, :l2)
# solveCliqUp!(fg, tree, :x3)
# solveCliqUp!(fg, tree, :x2)
solveTree!(fg)
after = Dict()
for vs in vars
after[vs] = getVariablePPE(getVariable(fg, vs)) |> getPPESuggested
end
# before and after should be noticably different, because first inferred values have been found
for vs in vars
errd = norm(before[vs] - after[vs])
# @show vs, errd
@test 1e-5 < errd
end
# force recalc and update each PPE
force = Dict()
for vs in vars
setVariablePosteriorEstimates!(fg, vs)
force[vs] = getVariablePPE(getVariable(fg, vs)) |> getPPESuggested
# these need to be close to the same as after
errd = norm(force[vs] - after[vs])
# @show vs, errd
@test errd < 0.1
end
## suspect cliqSubFg updated, but not back to main dfg object... test via load graph
end
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 19305 | using DistributedFactorGraphs
using IncrementalInference
using Interpolations
using Manifolds
using StaticArrays
using Test
import IncrementalInference: LevelSetGridNormal
import Rotations as _Rot
## define new local variable types for testing
@defVariable TranslationGroup2 TranslationGroup(2) @SVector[0.0, 0.0]
@defVariable SpecialEuclidean2 SpecialEuclidean(2) ArrayPartition(@SVector([0.0,0.0]), @SMatrix([1.0 0.0; 0.0 1.0]))
# @defVariable SpecialEuclidean2 SpecialEuclidean(2) ArrayPartition([0.0,0.0], [1.0 0.0; 0.0 1.0])
##
@testset "Test SpecialEuclidean(2)" begin
##
M = getManifold(SpecialEuclidean2)
@test M == SpecialEuclidean(2)
pT = getPointType(SpecialEuclidean2)
# @test pT == ArrayPartition{Float64,Tuple{Vector{Float64}, Matrix{Float64}}}
# @test pT == ArrayPartition{Tuple{MVector{2, Float64}, MMatrix{2, 2, Float64, 4}}}
@test pT == ArrayPartition{Float64, Tuple{SVector{2, Float64}, SMatrix{2, 2, Float64, 4}}}
pϵ = getPointIdentity(SpecialEuclidean2)
# @test_broken pϵ == ArrayPartition(@MVector([0.0,0.0]), @MMatrix([1.0 0.0; 0.0 1.0]))
@test all(isapprox.(pϵ,ArrayPartition(SA[0.0,0.0], SA[1.0 0.0; 0.0 1.0])))
@test is_point(getManifold(SpecialEuclidean2), getPointIdentity(SpecialEuclidean2))
##
fg = initfg()
v0 = addVariable!(fg, :x0, SpecialEuclidean2)
# mp = ManifoldPrior(SpecialEuclidean(2), ArrayPartition(@MVector([0.0,0.0]), @MMatrix([1.0 0.0; 0.0 1.0])), MvNormal([0.01, 0.01, 0.01]))
# mp = ManifoldPrior(SpecialEuclidean(2), ArrayPartition(@MVector([0.0,0.0]), @MMatrix([1.0 0.0; 0.0 1.0])), MvNormal(Diagonal(abs2.([0.01, 0.01, 0.01]))))
mp = ManifoldPrior(SpecialEuclidean(2), ArrayPartition([0.0,0.0], [1.0 0.0; 0.0 1.]), MvNormal(Diagonal(abs2.([0.01, 0.01, 0.01]))))
mp = ManifoldPrior(SpecialEuclidean(2), ArrayPartition(SA[0.0,0.0], SA[1.0 0.0; 0.0 1.]), MvNormal(Diagonal(abs2.(SA[0.01, 0.01, 0.01]))))
p = addFactor!(fg, [:x0], mp)
##
doautoinit!(fg, :x0)
##
vnd = getVariableSolverData(fg, :x0)
@test all(isapprox.(mean(vnd.val), ArrayPartition(SA[0.0,0.0], SA[1.0 0.0; 0.0 1.0]), atol=0.1))
@test all(is_point.(Ref(M), vnd.val))
##
v1 = addVariable!(fg, :x1, SpecialEuclidean2)
mf = ManifoldFactor(SpecialEuclidean(2), MvNormal(SA[1,2,pi/4], SA[0.01,0.01,0.01]))
f = addFactor!(fg, [:x0, :x1], mf)
doautoinit!(fg, :x1)
vnd = getVariableSolverData(fg, :x1)
@test all(isapprox(M, mean(M,vnd.val), ArrayPartition(SA[1.0,2.0], SA[0.7071 -0.7071; 0.7071 0.7071]), atol=0.1))
@test all(is_point.(Ref(M), vnd.val))
##
smtasks = Task[]
solveTree!(fg; smtasks, verbose=true) #, recordcliqs=ls(fg))
# hists = fetchCliqHistoryAll!(smtasks);
vnd = getVariableSolverData(fg, :x0)
@test all(isapprox.(mean(vnd.val), ArrayPartition(SA[0.0,0.0], SA[1.0 0.0; 0.0 1.0]), atol=0.1))
@test all(is_point.(Ref(M), vnd.val))
vnd = getVariableSolverData(fg, :x1)
@test all(isapprox.(mean(vnd.val), ArrayPartition(SA[1.0,2.0], SA[0.7071 -0.7071; 0.7071 0.7071]), atol=0.1))
@test all(is_point.(Ref(M), vnd.val))
v1 = addVariable!(fg, :x2, SpecialEuclidean2)
mf = ManifoldFactor(SpecialEuclidean(2), MvNormal(SA[1,2,pi/4], SA[0.01,0.01,0.01]))
f = addFactor!(fg, [:x1, :x2], mf)
##
#test new error from solvetree
smtasks = Task[]
result = solveTree!(fg; smtasks, verbose=true)
@test result isa AbstractBayesTree
## test partial prior issue
fg = initfg()
v0 = addVariable!(fg, :x0, SpecialEuclidean2)
mp = PartialPrior(SpecialEuclidean2,MvNormal([0.01, 0.01]), (1,2))
p = addFactor!(fg, [:x0], mp, graphinit=false)
##
pbel_ = approxConvBelief(fg, :x0f1, :x0)
@test isPartial(pbel_)
@test pbel_._partial == [1;2]
@test length(pbel_.infoPerCoord) == 3
##
end
@testset "test initVariableManual! with Vector of Tuple inputs" begin
##
fg = initfg()
pts = [(randn(2), Matrix(_Rot.RotMatrix2(randn()))) for _ in 1:50]
addVariable!(fg, :x0, SpecialEuclidean2)
initVariable!(getVariable(fg, :x0), pts)
@test isapprox( pts[1][1], getPoints(fg, :x0)[1].x[1])
@test isapprox( pts[1][2], getPoints(fg, :x0)[1].x[2])
# can delete upon deprecation of initVariable! and favor initVariable!
initVariable!(getVariable(fg, :x0), reverse(pts))
@test isapprox( pts[end][1], getPoints(fg, :x0)[1].x[1])
@test isapprox( pts[end][2], getPoints(fg, :x0)[1].x[2])
##
end
##
struct ManifoldFactorSE2{T <: SamplableBelief} <: IIF.AbstractManifoldMinimize
Z::T
end
ManifoldFactorSE2() = ManifoldFactorSE2(MvNormal(Diagonal([1,1,1])))
DFG.getManifold(::ManifoldFactorSE2) = SpecialEuclidean(2)
IIF.selectFactorType(::Type{<:SpecialEuclidean2}, ::Type{<:SpecialEuclidean2}) = ManifoldFactorSE2
function IIF.getSample(cf::CalcFactor{<:ManifoldFactorSE2})
M = cf.manifold # SpecialEuclidean(2)
ϵ = getPointIdentity(M)
X = sampleTangent(M, cf.factor.Z, ϵ)
return X
end
function (cf::CalcFactor{<:ManifoldFactorSE2})(X, p, q)
M = cf.manifold # SpecialEuclidean(2)
q̂ = Manifolds.compose(M, p, exp(M, identity_element(M, p), X)) #for groups
Xc = zeros(3)
vee!(M, Xc, q, log(M, q, q̂))
return Xc
end
##
@testset "Test Pose2 like hex as SpecialEuclidean2" begin
##
M = getManifold(SpecialEuclidean2)
fg = initfg()
v0 = addVariable!(fg, :x0, SpecialEuclidean2)
mp = ManifoldPrior(SpecialEuclidean(2), ArrayPartition(Vector([10.0,10.0]), Matrix([-1.0 0.0; 0.0 -1.0])), MvNormal([0.1, 0.1, 0.01]))
p = addFactor!(fg, [:x0], mp)
##
for i in 0:5
psym = Symbol("x$i")
nsym = Symbol("x$(i+1)")
addVariable!(fg, nsym, SpecialEuclidean2)
mf = ManifoldFactor(SpecialEuclidean(2), MvNormal([10.0,0,pi/3], [0.5,0.5,0.05]))
f = addFactor!(fg, [psym;nsym], mf)
end
addVariable!(fg, :l1, SpecialEuclidean2, tags=[:LANDMARK;])
mf = ManifoldFactor(SpecialEuclidean(2), MvNormal([10.0,0,0], [0.1,0.1,0.01]))
addFactor!(fg, [:x0; :l1], mf)
mf = ManifoldFactor(SpecialEuclidean(2), MvNormal([10.0,0,0], [0.1,0.1,0.01]))
addFactor!(fg, [:x6; :l1], mf)
##
smtasks = Task[]
solveTree!(fg; smtasks);
vnd = getVariableSolverData(fg, :x0)
@test isapprox(M, mean(M, vnd.val), ArrayPartition([10.0,10.0], [-1.0 0.0; 0.0 -1.0]), atol=0.2)
vnd = getVariableSolverData(fg, :x1)
@test isapprox(M, mean(M, vnd.val), ArrayPartition([0.0,10.0], [-0.5 0.866; -0.866 -0.5]), atol=0.4)
vnd = getVariableSolverData(fg, :x6)
@test isapprox(M, mean(M, vnd.val), ArrayPartition([10.0,10.0], [-1.0 0.0; 0.0 -1.0]), atol=0.5)
## Special test for manifold based messages
#FIXME this may show some bug in propagateBelief caused by empty factors
fg.solverParams.useMsgLikelihoods = true
smtasks = Task[]
result = solveTree!(fg; smtasks); #, recordcliqs=ls(fg))
@test result isa AbstractBayesTree
##
end
@testset "test deconv on <:AbstractManifoldMinimize" begin
##
fg = initfg()
getSolverParams(fg).useMsgLikelihoods = true
addVariable!(fg, :x0, SpecialEuclidean2)
addVariable!(fg, :x1, SpecialEuclidean2)
mp = ManifoldPrior(SpecialEuclidean(2), ArrayPartition(Vector([10.0,10.0]), Matrix([-1.0 0.0; 0.0 -1.0])), MvNormal(diagm([0.1, 0.1, 0.01].^2)))
p = addFactor!(fg, [:x0], mp)
doautoinit!(fg,:x0)
addFactor!(fg, [:x0;:x1], ManifoldFactorSE2(MvNormal([10.0,0,0.1], diagm([0.5,0.5,0.05].^2))))
initAll!(fg)
# now check deconv
pred, meas = approxDeconv(fg, :x0x1f1)
@test mmd(SpecialEuclidean(2), pred, meas) < 1e-1
p_t = map(x->x.x[1], pred)
m_t = map(x->x.x[1], meas)
p_θ = map(x->x.x[2][2], pred)
m_θ = map(x->x.x[2][2], meas)
@test isapprox(mean(p_θ), 0.1, atol=0.02)
@test isapprox(std(p_θ), 0.05, atol=0.02)
@test isapprox(mean(p_t), [10,0], atol=0.3)
@test isapprox(std(p_t), [0.5,0.5], atol=0.3)
@test isapprox(mean(p_θ), mean(m_θ), atol=0.03)
@test isapprox(std(p_θ), std(m_θ), atol=0.03)
@test isapprox(mean(p_t), mean(m_t), atol=0.3)
@test isapprox(std(p_t), std(m_t), atol=0.3)
end
## ======================================================================================
##
## ======================================================================================
struct ManiPose2Point2{T <: SamplableBelief} <: IIF.AbstractManifoldMinimize
Z::T
partial::Vector{Int}
end
function IIF.getSample(cf::CalcFactor{<:ManiPose2Point2})
return rand(cf.factor.Z)
end
DFG.getManifold(::ManiPose2Point2) = TranslationGroup(2)
# define the conditional probability constraint
function (cfo::CalcFactor{<:ManiPose2Point2})(measX, p, q)
#
M = SpecialEuclidean(2)
q_SE = ArrayPartition(q, identity_element(SpecialOrthogonal(2), p.x[2]))
X_se2 = log(M, identity_element(M, p), Manifolds.compose(M, inv(M, p), q_SE))
X = X_se2.x[1]
# NOTE wrong for what we want X̂ = log(M, p, q_SE)
return measX - X
end
##
@testset "Test SpecialEuclidean(2)" begin
##
# Base.convert(::Type{<:Tuple}, M::TranslationGroup{Tuple{2},ℝ}) = (:Euclid, :Euclid)
# Base.convert(::Type{<:Tuple}, ::IIF.InstanceType{TranslationGroup{Tuple{2},ℝ}}) = (:Euclid, :Euclid)
##
fg = initfg()
v0 = addVariable!(fg, :x0, SpecialEuclidean2)
mp = ManifoldPrior(SpecialEuclidean(2), ArrayPartition(Vector([0.0,0.0]), Matrix([1.0 0.0; 0.0 1.0])), MvNormal([0.01, 0.01, 0.01]))
p = addFactor!(fg, [:x0], mp)
##
v1 = addVariable!(fg, :x1, TranslationGroup2)
mf = ManiPose2Point2(MvNormal([1,2], [0.01,0.01]), [1;2])
f = addFactor!(fg, [:x0, :x1], mf)
doautoinit!(fg, :x1)
vnd = getVariableSolverData(fg, :x1)
@test all(isapprox.(mean(vnd.val), [1.0,2.0], atol=0.1))
##
smtasks = Task[]
solveTree!(fg; smtasks, verbose=true, recordcliqs=ls(fg))
# # hists = fetchCliqHistoryAll!(smtasks);
vnd = getVariableSolverData(fg, :x0)
@test isapprox(mean(getManifold(fg,:x0),vnd.val), ArrayPartition([0.0,0.0], [1.0 0.0; 0.0 1.0]), atol=0.1)
vnd = getVariableSolverData(fg, :x1)
@test all(isapprox.(mean(vnd.val), [1.0,2.0], atol=0.1))
##
end
@testset "test propagateBelief w HeatmapSampler and init for PartialPriorPassThrough w Priors" begin
##
fg = initfg()
v0 = addVariable!(fg, :x0, SpecialEuclidean2)
img_ = rand(10,10).+5.0
x_,y_ = ([-9:2.0:9;],[-9:2.0:9;])
hmd = LevelSetGridNormal(img_, (x_,y_), 5.5, 0.1, N=120)
pthru = PartialPriorPassThrough(hmd, (1,2))
@show hmd
## quick
pf = convert( AbstractPackedFactor, pthru )
upf = convert( AbstractFactor, pf )
@test pthru.partial == upf.partial
@test isapprox( pthru.Z.heatmap.data, upf.Z.heatmap.data )
@test isapprox( pthru.Z.heatmap.domain[1], upf.Z.heatmap.domain[1] )
@test isapprox( pthru.Z.heatmap.domain[2], upf.Z.heatmap.domain[2] )
@test isapprox( pthru.Z.level, upf.Z.level )
@test isapprox( pthru.Z.sigma, upf.Z.sigma )
@test isapprox( pthru.Z.sigma_scale, upf.Z.sigma_scale )
## test without nullhyp
f0 = addFactor!(fg, [:x0], pthru, graphinit=false)
## test the inference functions
bel, infd = propagateBelief(fg, v0, [f0])
@test isPartial(bel)
@test length(getPoints(bel)) == 120
## repeat test with nullhypo
fg = initfg()
v0 = addVariable!(fg, :x0, SpecialEuclidean2)
# test with nullhypo
f0 = addFactor!(fg, [:x0], pthru, graphinit=false, nullhypo=0.2)
## test the inference functions
bel, infd = propagateBelief(fg, v0, [f0])
@test isPartial(bel)
##
doautoinit!(fg, :x0)
@test length(getPoints(getBelief(fg, :x0))) == getSolverParams(fg).N # 120
# @info "PassThrough transfers the full point count to the graph, unless a product is calculated during the propagateBelief step."
##
# @test_broken begin
## check the partials magic
dens, ipc = propagateBelief(fg,:x0,:)
testv = deepcopy(getVariable(fg, :x0))
setBelief!(testv, dens, true, ipc)
##
smtasks = Task[]
solveGraph!(fg; smtasks);
# hists = fetchCliqHistoryAll!(smtasks)
# printCSMHistoryLogical(hists)
# hists_ = deepcopy(hists)
# repeatCSMStep!(hists, 1, 6)
@test_broken 120 == length(getPoints(fg, :x0))
@warn "must still check if bandwidths are recalculated on many points (not necessary), or lifted from this case single prior"
##
mp = ManifoldPrior(SpecialEuclidean(2), ArrayPartition(Vector([0.0,0.0]), Matrix([1.0 0.0; 0.0 1.0])), MvNormal([0.01, 0.01, 0.01],[1 0 0;0 1 0;0 0 1.]))
f1 = addFactor!(fg, [:x0], mp, graphinit=false)
@test length(ls(fg, :x0)) == 2
##
prp, infd = propagateBelief(fg, v0, [f0;f1])
@test length(getPoints(prp)) == getSolverParams(fg).N
## check that solve corrects the point count on graph variable
@test_broken 120 == length(getPoints(fg, :x0))
solveGraph!(fg);
# this number should drop down to usual, 100 at time of writing
@test getSolverParams(fg).N == length(getPoints(fg, :x0))
## check saveDFG (check consistency of packing converters above)
@error "Whats going on in PackedManifoldPrior, skipping tests"
@test_broken begin
saveDFG(joinpath(tempdir(),"passthru"), fg)
fg_ = loadDFG(joinpath(tempdir(),"passthru.tar.gz"))
Base.rm(joinpath(tempdir(),"passthru.tar.gz"))
end
# @error "#FIXME test propagateBelief w HeatmapSampler ... broken on ci but not local"
# return true
# end
##
end
@testset "test point count on propagate, solve, init for PartialPriorPassThrough w Relative" begin
##
fg = initfg()
v0 = addVariable!(fg, :x0, SpecialEuclidean2)
img_ = rand(10,10).+5.0
x_,y_ = ([-9:2.0:9;],[-9:2.0:9;])
hmd = LevelSetGridNormal(img_, (x_,y_), 5.5, 0.1, N=120)
pthru = PartialPriorPassThrough(hmd, (1,2))
# test without nullhyp
f0 = addFactor!(fg, [:x0], pthru, graphinit=false)
## test the inference functions
addVariable!(fg, :x1, SpecialEuclidean2)
# mp = ManifoldPrior(SpecialEuclidean(2), ArrayPartition(@MVector([0.0,0.0]), @MMatrix([1.0 0.0; 0.0 1.0])), MvNormal([0.01, 0.01, 0.01]))
mp = ManifoldPrior(SpecialEuclidean(2), ArrayPartition(Vector([0.0,0.0]), Matrix([1.0 0.0; 0.0 1.0])), MvNormal([0.01, 0.01, 0.01]))
f1 = addFactor!(fg, [:x1], mp, graphinit=false)
doautoinit!(fg, :x1)
## connect with relative and check calculation size on x0
mf = ManifoldFactor(SpecialEuclidean(2), MvNormal([1,2,pi/4], [0.01,0.01,0.01]))
f2 = addFactor!(fg, [:x0, :x1], mf, graphinit=false)
##
bel, infd = propagateBelief(fg, v0, [f0;f2])
@test !isPartial(bel)
@test getSolverParams(fg).N == length(getPoints(bel))
## check other functions
solveTree!(fg);
@test getSolverParams(fg).N == length(getPoints(fg, :x0))
@test getSolverParams(fg).N == length(getPoints(fg, :x1))
## and check that initAll! works the same (different init sequences may change code execution path)
fg = initfg()
v0 = addVariable!(fg, :x0, SpecialEuclidean2)
img_ = rand(10,10).+5.0
x_,y_ = ([-9:2.0:9;],[-9:2.0:9;])
hmd = LevelSetGridNormal(img_, (x_,y_), 5.5, 0.1, N=120)
pthru = PartialPriorPassThrough(hmd, (1,2))
f0 = addFactor!(fg, [:x0], pthru, graphinit=false)
addVariable!(fg, :x1, SpecialEuclidean2)
# mp = ManifoldPrior(SpecialEuclidean(2), ArrayPartition(@MVector([0.0,0.0]), @MMatrix([1.0 0.0; 0.0 1.0])), MvNormal([0.01, 0.01, 0.01]))
mp = ManifoldPrior(SpecialEuclidean(2), ArrayPartition(Vector([0.0,0.0]), Matrix([1.0 0.0; 0.0 1.0])), MvNormal([0.01, 0.01, 0.01]))
f1 = addFactor!(fg, [:x1], mp, graphinit=false)
mf = ManifoldFactor(SpecialEuclidean(2), MvNormal([1,2,pi/4], [0.01,0.01,0.01]))
f2 = addFactor!(fg, [:x0, :x1], mf, graphinit=false)
##
bel, infd = propagateBelief(fg, v0, [f0;f2])
@test !isPartial(bel)
@test getSolverParams(fg).N == length(getPoints(bel))
##
initAll!(fg)
@test getSolverParams(fg).N == length(getPoints(fg, :x0))
@test getSolverParams(fg).N == length(getPoints(fg, :x1))
##
end
@testset "Test SpecialEuclidean(2) to TranslationGroup(2) multihypo" begin
##
fg = initfg()
# fg.solverParams.attemptGradients=false
v0 = addVariable!(fg, :x0, SpecialEuclidean2)
# mp = ManifoldPrior(SpecialEuclidean(2), ArrayPartition(@MVector([0.0,0.0]), @MMatrix([1.0 0.0; 0.0 1.0])), MvNormal([0.01, 0.01, 0.01]))
mp = ManifoldPrior(SpecialEuclidean(2), ArrayPartition(Vector([0.0,0.0]), Matrix([1.0 0.0; 0.0 1.0])), MvNormal([0.01, 0.01, 0.01]))
p = addFactor!(fg, [:x0], mp)
##
addVariable!(fg, :x1a, TranslationGroup2)
addVariable!(fg, :x1b, TranslationGroup2)
mf = ManiPose2Point2(MvNormal([1,2], [0.01,0.01]), [1;2])
f = addFactor!(fg, [:x0, :x1a, :x1b], mf; multihypo=[1,0.5,0.5])
solveTree!(fg)
vnd = getVariableSolverData(fg, :x0)
@test isapprox(SpecialEuclidean(2), mean(SpecialEuclidean(2), vnd.val), ArrayPartition([0.0,0.0], [1.0 0; 0 1]), atol=0.1)
#FIXME I would expect close to 50% of particles to land on the correct place
# Currently software works so that 33% should land there so testing 20 for now
pnt = getPoints(fg, :x1a)
@test sum(isapprox.(pnt, Ref([1.0,2.0]), atol=0.1)) > 15
#FIXME I would expect close to 50% of particles to land on the correct place
pnt = getPoints(fg, :x1b)
@test sum(isapprox.(pnt, Ref([1.0,2.0]), atol=0.1)) > 15
## other way around
fg = initfg()
fg.solverParams.attemptGradients=false
addVariable!(fg, :x0, SpecialEuclidean2)
addVariable!(fg, :x1a, TranslationGroup2)
addVariable!(fg, :x1b, TranslationGroup2)
# mp = ManifoldPrior(SpecialEuclidean(2), ArrayPartition(@MVector([0.0,0.0]), @MMatrix([1.0 0.0; 0.0 1.0])), MvNormal([10, 10, 0.01]))
mp = ManifoldPrior(SpecialEuclidean(2), ArrayPartition(Vector([0.0,0.0]), Matrix([1.0 0.0; 0.0 1.0])), MvNormal(zeros(3),diagm([10, 10, 0.01])))
p = addFactor!(fg, [:x0], mp)
mp = ManifoldPrior(TranslationGroup(2), [1.,0], MvNormal([0.01, 0.01]))
p = addFactor!(fg, [:x1a], mp)
mp = ManifoldPrior(TranslationGroup(2), [-1.,0], MvNormal([0.01, 0.01]))
p = addFactor!(fg, [:x1b], mp)
mf = ManiPose2Point2(MvNormal([0., 1], [0.01,0.01]), [1;2])
f = addFactor!(fg, [:x0, :x1a, :x1b], mf; multihypo=[1,0.5,0.5])
solveTree!(fg)
pnts = getPoints(fg, :x0)
# c = getCoordinates.(SpecialEuclidean2, pnts)
# @cast p[i,j] := c[i][j]
# scatter(p[:,1], p[:,2])
#FIXME
@error "Invalid multihypo test"
if false
# FIXME ManiPose2Point2 factor mean [1.,0] cannot go "backwards" from [0,0] to [-1,0] with covariance 0.01 -- wholly inconsistent test design
@test 10 < sum(isapprox.(Ref(SpecialEuclidean(2)), pnts, Ref(ArrayPartition([-1.0,0.0], [1.0 0; 0 1])), atol=0.5))
@test 10 < sum(isapprox.(Ref(SpecialEuclidean(2)), pnts, Ref(ArrayPartition([1.0,0.0], [1.0 0; 0 1])), atol=0.5))
end
##
end
@testset "Test SpecialEuclidean(2) to SpecialEuclidean(2) multihypo" begin
##
fg = initfg()
# fg.solverParams.attemptGradients=false
v0 = addVariable!(fg, :x0, SpecialEuclidean2)
# mp = ManifoldPrior(SpecialEuclidean(2), ArrayPartition(@MVector([0.0,0.0]), @MMatrix([1.0 0.0; 0.0 1.0])), MvNormal([0.01, 0.01, 0.01]))
mp = ManifoldPrior(SpecialEuclidean(2), ArrayPartition(Vector([0.0,0.0]), Matrix([1.0 0.0; 0.0 1.0])), MvNormal([0.01, 0.01, 0.01]))
p = addFactor!(fg, [:x0], mp)
##
addVariable!(fg, :x1a, SpecialEuclidean2)
addVariable!(fg, :x1b, SpecialEuclidean2)
mf = ManifoldFactor(SpecialEuclidean(2), MvNormal([1,2,pi/4], [0.01,0.01,0.01]))
f = addFactor!(fg, [:x0, :x1a, :x1b], mf; multihypo=[1,0.5,0.5])
solveTree!(fg)
vnd = getVariableSolverData(fg, :x0)
@test isapprox(SpecialEuclidean(2), mean(SpecialEuclidean(2), vnd.val), ArrayPartition([0.0,0.0], [1.0 0; 0 1]), atol=0.1)
#FIXME I would expect close to 50% of particles to land on the correct place
# Currently software works so that 33% should land there so testing 20 for now
pnt = getPoints(fg, :x1a)
@test sum(isapprox.(Ref(SpecialEuclidean(2)), pnt, Ref(ArrayPartition([1.0,2.0], [0.7071 -0.7071; 0.7071 0.7071])), atol=0.1)) > 20
#FIXME I would expect close to 50% of particles to land on the correct place
pnt = getPoints(fg, :x1b)
@test sum(isapprox.(Ref(SpecialEuclidean(2)), pnt, Ref(ArrayPartition([1.0,2.0], [0.7071 -0.7071; 0.7071 0.7071])), atol=0.1)) > 20
##
end
# | IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 4181 | using DistributedFactorGraphs
using IncrementalInference
using LineSearches
using Manifolds
using StaticArrays
using Test
##
@testset "Test SpecialOrthogonal(2) prior" begin
##
# see AMP v0.7.1+
# Base.convert(::Type{<:Tuple}, M::SpecialOrthogonal{2}) = (:Circular,)
# Base.convert(::Type{<:Tuple}, ::IIF.InstanceType{SpecialOrthogonal{2}}) = (:Circular,)
# Base.convert(::Type{<:Tuple}, ::Type{SpecialOrthogonal{2}}) = (:Circular,)
# @defVariable SpecialOrthogonal2 SpecialOrthogonal(2) @MMatrix([1.0 0.0; 0.0 1.0])
@defVariable SpecialOrthogonal2 SpecialOrthogonal(2) SMatrix{2,2}(1.0, 0.0, 0.0, 1.0)
M = getManifold(SpecialOrthogonal2)
@test M == SpecialOrthogonal(2)
pT = getPointType(SpecialOrthogonal2)
# @test pT == MMatrix{2, 2, Float64, 4}
@test pT == SMatrix{2,2,Float64,4}
pϵ = getPointIdentity(SpecialOrthogonal2)
@test pϵ == [1.0 0.0; 0.0 1.0]
@test is_point(getManifold(SpecialOrthogonal2), getPointIdentity(SpecialOrthogonal2))
fg = initfg()
v0 = addVariable!(fg, :x0, SpecialOrthogonal2)
mp = ManifoldPrior(SpecialOrthogonal(2), SA[1.0 0.0; 0.0 1.0], MvNormal([0.01]))
p = addFactor!(fg, [:x0], mp)
##
doautoinit!(fg, :x0)
vnd = getVariableSolverData(fg, :x0)
@test all(isapprox.(mean(vnd.val), [1 0; 0 1], atol=0.1))
@test all(is_point.(Ref(M), vnd.val))
##
v1 = addVariable!(fg, :x1, SpecialOrthogonal2)
mf = ManifoldFactor(SpecialOrthogonal(2), MvNormal([pi], [0.01]))
f = addFactor!(fg, [:x0, :x1], mf)
doautoinit!(fg, :x1)
##
# smtasks = Task[]
solveTree!(fg) #; smtasks, verbose=true, recordcliqs=ls(fg))
# hists = fetchCliqHistoryAll!(smtasks);
# SArray 0.763317 seconds (2.36 M allocations: 160.488 MiB, 4.16% gc time)
# Vector 0.786390 seconds (2.41 M allocations: 174.334 MiB, 3.97% gc time)
# Vector 0.858993 seconds (2.42 M allocations: 176.613 MiB, 3.43% gc time) sample not tuple
##
end
@testset "Test SpecialOrthogonal(3) prior" begin
##
# Base.convert(::Type{<:Tuple}, M::SpecialOrthogonal{3}) = (:Euclid, :Euclid, :Euclid)
# Base.convert(::Type{<:Tuple}, ::IIF.InstanceType{SpecialOrthogonal{3}}) = (:Euclid, :Euclid, :Euclid)
# @defVariable SO3 SpecialOrthogonal(3) @MMatrix([1.0 0.0; 0.0 1.0])
@defVariable SO3 SpecialOrthogonal(3) SMatrix{3,3}(1.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,1.0)
M = getManifold(SO3)
@test M == SpecialOrthogonal(3)
pT = getPointType(SO3)
# @test pT == MMatrix{2, 2, Float64, 4}
@test pT == SMatrix{3,3,Float64,9}
pϵ = getPointIdentity(SO3)
@test pϵ == [1.0 0.0 0.0; 0.0 1.0 0.0; 0.0 0.0 1.0]
@test is_point(getManifold(SO3), getPointIdentity(SO3))
fg = initfg()
v0 = addVariable!(fg, :x0, SO3)
mp = ManifoldPrior(SpecialOrthogonal(3), SA[1.0 0.0 0.0; 0.0 1.0 0.0; 0.0 0.0 1.0], MvNormal([0.01, 0.01, 0.01]))
p = addFactor!(fg, [:x0], mp)
doautoinit!(fg, :x0)
vnd = getVariableSolverData(fg, :x0)
@test all(isapprox.( mean(SpecialOrthogonal(3),vnd.val), [1 0 0; 0 1 0; 0 0 1], atol=0.01))
@test all(is_point.(Ref(M), vnd.val))
points = sampleFactor(fg, :x0f1, 100)
std(SpecialOrthogonal(3), points)
##
v1 = addVariable!(fg, :x1, SO3)
mf = ManifoldFactor(SpecialOrthogonal(3), MvNormal([0.01,0.01,0.01], [0.01,0.01,0.01]))
f = addFactor!(fg, [:x0, :x1], mf)
doautoinit!(fg, :x1)
vnd = getVariableSolverData(fg, :x1)
@test all(isapprox.( mean(SpecialOrthogonal(3),vnd.val), [0.9999 -0.00995 0.01005; 0.01005 0.9999 -0.00995; -0.00995 0.01005 0.9999], atol=0.01))
@test all(is_point.(Ref(M), vnd.val))
# smtasks = Task[]
solveTree!(fg) # ; smtasks, verbose=true, recordcliqs=ls(fg))
# test them again after solve
vnd = getVariableSolverData(fg, :x0)
@test all(isapprox.( mean(SpecialOrthogonal(3),vnd.val), [1 0 0; 0 1 0; 0 0 1], atol=0.01))
@test all(is_point.(Ref(M), vnd.val))
vnd = getVariableSolverData(fg, :x1)
@test all(isapprox.( mean(SpecialOrthogonal(3),vnd.val), [0.9999 -0.00995 0.01005; 0.01005 0.9999 -0.00995; -0.00995 0.01005 0.9999], atol=0.01))
@test all(is_point.(Ref(M), vnd.val))
# 23Q2 default HagerZhang fails with `AssertionError: isfinite(phi_c) && isfinite(dphi_c)`, using alternate LineSearch
IIF.solveGraphParametric!(
fg;
# algorithmkwargs=(;alphaguess = LineSearches.InitialStatic(), linesearch = LineSearches.MoreThuente()),
verbose=true
)
##
end | IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 2185 | # test new sampling API
# TODO, this test might be obsolete given later changes and requirements added to getSample interface.
# TAC is to figure out if this functionality is still required and to remove of explain the code and related test like this file.
using Test
using IncrementalInference
using DistributedFactorGraphs
import IncrementalInference: getSample, getManifold
##
struct SpecialPrior{T <: SamplableBelief} <: AbstractPrior
z::T
end
getManifold(::SpecialPrior) = TranslationGroup(1)
getSample(s::CalcFactor{<:SpecialPrior}) = rand(s.factor.z,1)
struct SpecialLinearOffset{T <: SamplableBelief} <: AbstractManifoldMinimize
z::T
end
getManifold(::SpecialLinearOffset) = TranslationGroup(1)
function getSample(s::CalcFactor{<:SpecialLinearOffset})
return rand(s.factor.z,1)
end
function (s::CalcFactor{<:SpecialLinearOffset})(meas,
x1,
x2 )
#
meas .- (x2 .- x1)
end
##
@testset "test specialSampler functionality..." begin
##
fg = initfg()
addVariable!(fg, :x0, ContinuousScalar)
addFactor!(fg, [:x0], SpecialPrior(Normal()))
addVariable!(fg, :x1, ContinuousScalar)
addFactor!(fg, [:x0;:x1], SpecialLinearOffset(Normal(10,1)))
tree = solveTree!(fg)
@test getPPE(fg, :x0).suggested[1] |> abs < 1.0
@test getPPE(fg, :x1).suggested[1] - 10 |> abs < 3.0
## special test for IIF #568
## Singleton (Prior)
fcm = map(x->x[1], IIF._getCCW(fg, :x0f1).measurement |> deepcopy)
pts = approxConv(fg, :x0f1, :x1)
fcm2 = map(x->x[1], IIF._getCCW(fg, :x0f1).measurement)
fcm3 = map(x->x[1], IIF._getCCW(fg, :x0f1).measurement |> deepcopy)
@test 0.1 < norm(fcm - fcm2)
@test norm(fcm2 - fcm3) < 1e-5
## Pairwise
# forward direction
fcm = map(x->x[1], IIF._getCCW(fg, :x0x1f1).measurement |> deepcopy)
pts = approxConv(fg, :x0x1f1, :x1)
fcm2 = map(x->x[1], IIF._getCCW(fg, :x0x1f1).measurement)
@test 0.1 < norm(fcm - fcm2)
# reverse direction
fcm, = map(x->x[1], IIF._getCCW(fg, :x0x1f1).measurement |> deepcopy)
pts = approxConv(fg, :x0x1f1, :x0)
fcm2, = map(x->x[1], IIF._getCCW(fg, :x0x1f1).measurement)
@test 0.04 < norm(fcm - fcm2)
##
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 1974 | using DistributedFactorGraphs
using IncrementalInference
using Manifolds
using StaticArrays
using Test
import Manifolds: identity_element
##
@testset "Test Sphere(2) prior and relative" begin
##
# NOTE Sphere{2} is not a lie group so the identity element does not exits.
# this is for testing only and will be removed once upgraded to support any Riemannian Manifold.
DFG.getPointIdentity(::typeof(Sphere(2))) = SVector(1.0, 0.0, 0.0)
#FIXME REMOVE! this is type piracy and not a good idea, for testing only!!!
Manifolds.identity_element(::typeof(Sphere(2))) = SVector(1.0, 0.0, 0.0)
Manifolds.identity_element(::typeof(Sphere(2)), p::AbstractVector) = SVector(1.0, 0.0, 0.0) # Float64[1,0,0]
Base.convert(::Type{<:Tuple}, M::typeof(Sphere(2))) = (:Euclid, :Euclid)
Base.convert(::Type{<:Tuple}, ::IIF.InstanceType{typeof(Sphere(2))}) = (:Euclid, :Euclid)
@defVariable Sphere2 Sphere(2) SVector(1.0, 0.0, 0.0)
M = getManifold(Sphere2)
@test M == Sphere(2)
pT = getPointType(Sphere2)
@test pT == SVector{3,Float64}
pϵ = getPointIdentity(Sphere2)
@test pϵ == [1.0, 0.0, 0.0]
@test is_point(getManifold(Sphere2), getPointIdentity(Sphere2))
fg = initfg()
v0 = addVariable!(fg, :x0, Sphere2)
mp = ManifoldPrior(Sphere(2), SA[1., 0, 0], MvNormal(Diagonal(map(abs2, [0.01, 0.01]))), DefaultOrthonormalBasis(), ExponentialRetraction())
p = addFactor!(fg, [:x0], mp)
doautoinit!(fg, :x0)
vnd = getVariableSolverData(fg, :x0)
@test all(isapprox.(mean(M, vnd.val), [1,0,0], atol=0.1))
@test all(is_point.(Ref(M), vnd.val))
v1 = addVariable!(fg, :x1, Sphere2)
mf = ManifoldFactor(Sphere(2), MvNormal([0.1, 0.2], [0.05,0.05]))
f = addFactor!(fg, [:x0, :x1], mf)
##
smtasks = Task[]
solveTree!(fg; smtasks)
#
p = SA[1.,0,0]
X = get_vector(M, p, SA[0.1,0.2], DefaultOrthonormalBasis())
q = exp(M, p, X)
vnd = getVariableSolverData(fg, :x1)
mn_ = mean(M, vnd.val)
@info "isapprox" q mn_
@test all(isapprox.(mn_, q, atol=0.05))
@test all(is_point.(Ref(M), vnd.val))
##
end | IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 999 |
using IncrementalInference
import IncrementalInference: emptyState
using Test
## User state functions
function bar!(usrdata)
println("do bar!")
return IncrementalInference.exitStateMachine
end
function foo!(usrdata)
println("do foo!")
return bar!
end
@testset "Test IIF's generic state machine..." begin
statemachine = StateMachine{Nothing}(next=foo!)
while statemachine(nothing, verbose=true); end
@test statemachine.next == emptyState
statemachine = StateMachine{Nothing}(next=foo!)
while statemachine(nothing, verbose=false); end
@test statemachine.next == emptyState
statemachine = StateMachine{Nothing}(next=foo!)
while statemachine(nothing, breakafter=foo!); end
@test statemachine.next == bar!
statemachine = StateMachine{Nothing}(next=foo!)
while statemachine(nothing, iterlimit=1); end
@test statemachine.next == bar!
statemachine = StateMachine{Nothing}(next=bar!)
while statemachine(nothing, verbose=true); end
@test statemachine.next == emptyState
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 1912 | # Simple examples to illustrate how to obtain a Bayes (junction) tree using
# with beautiful Tex labels in `.dot` and `.tex` format.
using Test
using IncrementalInference
# EXPERIMENTAL FEATURE, 4Q19: need `sudo apt install dot2tex`
import IncrementalInference: generateTexTree
@testset "testing generateTexTree" begin
# Create a dummy factor graph, with variables and constraints.
fg = initfg()
# Add four pose variables, with 'x' symbol.
addVariable!(fg, :x1, ContinuousScalar)
addVariable!(fg, :x2, ContinuousScalar)
addVariable!(fg, :x3, ContinuousScalar)
addVariable!(fg, :x4, ContinuousScalar)
# Add two landmark variables, with 'l' symbol.
addVariable!(fg, :l1, ContinuousScalar)
addVariable!(fg, :l2, ContinuousScalar)
# Add the pose chain constraints (odometry and priors).
addFactor!(fg, [:x1], Prior(Normal()))
addFactor!(fg, [:x1;:x2], LinearRelative(Normal()))
addFactor!(fg, [:x2;:x3], LinearRelative(Normal()))
addFactor!(fg, [:x3;:x4], LinearRelative(Normal()))
# Add the pose-landmark constraints (range measurements)
addFactor!(fg, [:x1;:l1], LinearRelative(Normal()))
addFactor!(fg, [:x2;:l1], LinearRelative(Normal()))
addFactor!(fg, [:x3;:l1], LinearRelative(Normal()))
addFactor!(fg, [:x2;:l2], LinearRelative(Normal()))
addFactor!(fg, [:x3;:l2], LinearRelative(Normal()))
addFactor!(fg, [:x4;:l2], LinearRelative(Normal()))
# Let's take a peek to see how our factor graph looks like.
# drawGraph(fg, show=true)
# As well as our tree (AMD ordering)
tree = buildTreeReset!(fg)
# drawTree(tree, show=true, imgs=false)
# Now, let's generate the corresponding `.dot` and `.tex`.
texTree = generateTexTree(tree, filepath=joinpath(@__DIR__,"tmp","bt"))
# All you have to do now is compile your newly created `.tex` file, probably
# include the `bm` package (`\usepackage{bm}`), and enjoy!
# fake a count of 1, since we are actually testing generateTexTree
@test true
end # testset
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 1063 | using DistributedFactorGraphs
using IncrementalInference
using Manifolds
using StaticArrays
using Test
@testset "Test TranslationGroup(2)" begin
## ======================================================================================
##
## ======================================================================================
Base.convert(::Type{<:Tuple}, M::TranslationGroup{Tuple{2},ℝ}) = (:Euclid, :Euclid)
Base.convert(::Type{<:Tuple}, ::IIF.InstanceType{TranslationGroup{Tuple{2},ℝ}}) = (:Euclid, :Euclid)
@defVariable TranslationGroup2 TranslationGroup(2) [0.0, 0.0]
getManifold(TranslationGroup2)
getPointType(TranslationGroup2)
getPointIdentity(TranslationGroup2)
fg = initfg()
v0 = addVariable!(fg, :x0, TranslationGroup2)
v1 = addVariable!(fg, :x1, TranslationGroup2)
mp = ManifoldPrior(TranslationGroup(2), SA[10., 20], MvNormal([1.0,1.0]))
p = addFactor!(fg, [:x0], mp; graphinit=true)
doautoinit!(fg, :x0)
mf = ManifoldFactor(TranslationGroup(2), MvNormal([1.0, 2.0], [0.1,0.1]))
f = addFactor!(fg, [:x0, :x1], mf)
solveGraph!(fg)
end | IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 2912 | using IncrementalInference
using Test
@testset "Test delete tree clique" begin
fg = generateGraph_LineStep(3;
poseEvery=1,
landmarkEvery=3,
posePriorsAt=[],
landmarkPriorsAt=[0],
sightDistance=2,
solverParams=SolverParams(algorithms=[:default, :parametric]))
# getSolverParams(fg).graphinit = false
# getSolverParams(fg).treeinit = true
# getSolverParams(fg).dbg = true
# getSolverParams(fg).useMsgLikelihoods = true
# getSolverParams(fg).dbg = true
# getSolverParams(fg).drawtree = true
# getSolverParams(fg).showtree = true
smtasks = Task[]
oldtree = solveTree!(fg; smtasks=smtasks, verbose=false, recordcliqs=ls(fg));
@test IIF.isRoot(oldtree, IIF.CliqueId(1))
@test IIF.isRoot(oldtree, IIF.getClique(oldtree,1))
@test !IIF.isRoot(oldtree, IIF.CliqueId(2))
@test !IIF.isRoot(oldtree, IIF.CliqueId(3))
IIF.deleteClique!(oldtree, IIF.CliqueId(1))
@test IIF.isRoot(oldtree, IIF.CliqueId(2))
@test IIF.isRoot(oldtree, IIF.CliqueId(3))
@test IIF.getClique(oldtree, :x0) == IIF.getClique(oldtree, IIF.CliqueId(3)) == IIF.getClique(oldtree, 1)
@test IIF.getClique(oldtree, :x3) == IIF.getClique(oldtree, IIF.CliqueId(2)) == IIF.getClique(oldtree, 2)
# drawTree(oldtree, show=true)
tree = solveTree!(fg, oldtree; smtasks=smtasks, verbose=false, recordcliqs=ls(fg));
# csmAnimate(tree, hists, frames=1)
end
@testset "test tree show and list functions" begin
##
fg = generateGraph_Kaess(graphinit=false)
eo = [:l2; :l1; :x1; :x2; :x3]
tree = buildTreeReset!(fg, eo)
show(tree)
show(tree[:x3])
show(tree[:x1])
show(tree[:l2])
rtl = ls(tree)
# test the return is same structure and relatively sorted
@test 3 == length(rtl)
@test rtl[1][1] == 1
@test intersect(rtl[1][2], [:x3; :x2]) |> length == 2
@test rtl[2][1] == 2
@test intersect(rtl[2][2], [:x1; :l1]) |> length == 2
@test rtl[3][1] == 3
@test intersect(rtl[3][2], [:l2;]) |> length == 1
# test individual tree list
# the root clique with two children
rtl1 = ls(tree, 1)
rtl1 = ls(tree, :x3)
@test rtl1.parent |> length == 0
@test rtl1.children |> length == 2
@test intersect( (x->x[1]).(rtl1.children), [2,3]) |> length == 2
@test intersect(rtl1.children[1][2], [:x1; :l1]) |> length == 2
@test intersect(rtl1.children[2][2], [:l2]) |> length == 1
# first leaf clique with a parent
rtl2 = ls(tree, 2)
rtl2 = ls(tree, :x1)
rtl2 = ls(tree, :l1)
@test rtl2.parent |> length == 1
@test rtl2.children |> length == 0
@test intersect( (x->x[1]).(rtl2.parent), [1]) |> length == 1
@test length(rtl2.children) == 0
# second leaf clique with a parent
rtl3 = ls(tree, 3)
rtl3 = ls(tree, :l2)
@test rtl3.parent |> length == 1
@test rtl3.children |> length == 0
@test intersect( (x->x[1]).(rtl3.parent), [1]) |> length == 1
@test length(rtl3.children) == 0
##
end
## | IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 1452 |
using Test
using IncrementalInference
@testset "Testing tree message utils" begin
N=8
fg = generateGraph_LineStep(N;
graphinit=false,
poseEvery=1,
landmarkEvery=N+1,
posePriorsAt=[0],
landmarkPriorsAt=[],
sightDistance=N+1)
deleteFactor!.(fg, [Symbol("x$(i)lm0f1") for i=1:(N-1)])
# fixed eliminationOrder for repeatability
eliminationOrder = [:x3, :x8, :x5, :x1, :x6, :lm0, :x7, :x4, :x2, :x0]
smtasks = Task[]
tree = solveTree!(fg; smtasks=smtasks, eliminationOrder=eliminationOrder)
allmsgs = getTreeCliqUpMsgsAll(tree)
#TODO better test but for now spot check a few keys
@test issetequal(collect(2:8), keys(allmsgs))
belief2 = allmsgs[2].belief
@test issetequal([:x0, :x4], keys(belief2))
stackedmsgs = stackCliqUpMsgsByVariable(tree, allmsgs)
#everything except leave frontals
allvars = [:lm0, :x0, :x2, :x4, :x6, :x7]
@test issetequal(allvars, keys(stackedmsgs))
@test length(stackedmsgs[:x0]) == 3
x4stack = stackedmsgs[:x4]
cliq_depths = map(v->v.cliqId[]=>v.depth, x4stack)
@test issetequal(cliq_depths, [4 => 2, 6 => 3, 2 => 1, 8 => 1])
c3 = getClique(tree, IIF.CliqueId(3))
c7 = getClique(tree, IIF.CliqueId(7))
@test IIF.compare(IIF.getMessageUpRx(c3)[7], IIF.getMessageUpTx(c7))
@test IIF.compare(IIF.getMessageDownTx(c3), IIF.getMessageDownRx(c7))
end | IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 1421 | # test saving and loading of trees
using Test
using IncrementalInference
using MetaGraphs
using Graphs
##
@testset "Test loading and saving of Bayes (Junction) tree" begin
##
fg = generateGraph_Kaess(graphinit=false)
tree = buildTreeReset!(fg)
# save and load tree
saveTree(tree)
tree2 = loadTree()
# perform a few spot checks to see that the trees are similar
@test length(tree.cliques) == length(tree2.cliques)
@test getEliminationOrder(tree) == getEliminationOrder(tree2)
for (clid,cl) in tree.cliques
fsyms = getFrontals(cl)
cl2 = getClique(tree2, fsyms[1])
fsyms2 = getFrontals(cl2)
@test fsyms == fsyms2
@test getCliqSeparatorVarIds(cl) == getCliqSeparatorVarIds(cl2)
@test typeof(cl) == typeof(cl2)
end
##
end
@testset "Test loading and saving of Bayes (Junction) tree" begin
##
fg = generateGraph_Kaess(graphinit=false)
tree = buildTreeReset!(fg)
# save and load tree as array
filepath = saveTree([tree;deepcopy(tree)])
trees = loadTree(filepath)
# perform a few spot checks to see that the trees are similar
@test length(tree.cliques) == length(trees[1].cliques)
@test getEliminationOrder(tree) == getEliminationOrder(trees[1])
for (clid,cl) in tree.cliques
fsyms = getFrontals(cl)
cl2 = getClique(trees[1], fsyms[1])
fsyms2 = getFrontals(cl2)
@test fsyms == fsyms2
@test getCliqSeparatorVarIds(cl) == getCliqSeparatorVarIds(cl2)
@test typeof(cl) == typeof(cl2)
end
##
end
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 2098 | ##
# using Revise
using Test
using IncrementalInference
##
@testset "Test differential factors for MKD sampling types (useMsgLikelihoods" begin
##
fg = generateGraph_CaesarRing1D()
getSolverParams(fg).useMsgLikelihoods = true
## test getSample
fct = fg[:x0x1f1]
fT = getFactorType(fct)
@test fT isa LinearRelative
@test fT.Z isa Normal
##
M = getManifold(fT)
X = sampleTangent(M, fT.Z)
@test X isa AbstractVector{<:Real}
z = sampleFactor(fct)[1]
@test z isa AbstractVector{<:Real}
##
initAll!(fg)
##
# fl = lsf(fg) |> sortDFG
X_ = approxDeconvBelief(fg,:x0f1)
X_ = approxDeconvBelief(fg,:x0x1f1)
##
eliminationOrder = [:x3,:x5,:l1,:x1,:x6,:x4,:x2,:x0]
tree = buildTreeReset!(fg, eliminationOrder)
##
# cfg = buildCliqSubgraph(fg, tree[6])
# st = IIF._generateMsgJointRelativesPriors(cfg, :default, tree[6])
cfg = buildCliqSubgraph(fg, tree[5])
st = IIF._generateMsgJointRelativesPriors(cfg, :default, tree[5])
beliefMsg5 = IIF.prepCliqueMsgUp(cfg, tree[5], :default, IIF.UPSOLVED)
cfg = buildCliqSubgraph(fg, tree[4])
st = IIF._generateMsgJointRelativesPriors(cfg, :default, tree[4])
beliefMsg4 = IIF.prepCliqueMsgUp(cfg, tree[4], :default, IIF.UPSOLVED)
# cfg = buildCliqSubgraph(fg, tree[3])
# st = IIF._generateMsgJointRelativesPriors(cfg, :default, tree[3])
cfg = buildCliqSubgraph(fg, tree[2])
@test 3 === length(ls(cfg))
@test 0 === length(lsf(cfg))
IIF.addMsgFactors!(cfg, beliefMsg4, IIF.UpwardPass)
IIF.addMsgFactors!(cfg, beliefMsg5, IIF.UpwardPass)
@test 2 === length(lsf(cfg))
##
fct = cfg[:x0x6f1]
fT = getFactorType(fct)
@test fT isa LinearRelative
@test fT.Z isa MKD
##
M = getManifold(fT.Z)
X = sampleTangent(M, fT.Z)
@test X isa Vector{<:Real}
z = sampleFactor(fct)[1]
@test z isa Vector{<:Real}
##
childmsgs = LikelihoodMessage[]
retdict = IIF.upGibbsCliqueDensity(cfg, tree[2], :default, childmsgs)
# st = IIF._generateMsgJointRelativesPriors(cfg, :default, tree[2])
# cfg = buildCliqSubgraph(fg, tree[1])
# st = IIF._generateMsgJointRelativesPriors(cfg, :default, tree[1])
##
getSolverParams(fg).downsolve = false
solveGraph!(fg)
##
end | IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 1410 | # test for variations in DOF solving, #227 #316 #430
# using Revise
using IncrementalInference
using Test
##
@testset "Test for variations in N while solving" begin
##
fg = generateGraph_CaesarRing1D(graphinit=true)
##
try
pts_ = approxConv(fg, :x0x1f1, :x1, N=101)
@test length(pts_) == 101
catch
# allow one retry, vary rarely has consecutive optimization failure
pts_ = approxConv(fg, :x0x1f1, :x1, N=101)
@test length(pts_) == 101
end
##
@error "MUST RESTORE SOLVE WITH DIFFERENT SIZE N, see #1722"
if false
# Change to N=150 AFTER constructing the graph, so solver must update the belief sample values during inference
getSolverParams(fg).N = 150
# getSolverParams(fg).multiproc = false
# getSolverParams(fg).async = false
smtasks = Task[]
tree = solveTree!(fg; smtasks, recordcliqs=ls(fg));
##
pts_ = getBelief(fg, :x1) |> getPoints
@test length(pts_) == 150
@test length(pts_[1]) == 1
##
# test with change for second solve
getSolverParams(fg).N = 200
tree = solveTree!(fg)
pts_ = getBelief(fg, :x1) |> getPoints
@test length(pts_) == 200
@test length(pts_[1]) == 1
println("test making N smaller than current")
getSolverParams(fg).N = 99
tree = solveTree!(fg)
pts_ = getBelief(fg, :x1) |> getPoints
@warn "removing older solve N size test, likely to be reviewed and updated to new workflow in the future"
@test length(pts_) == 99
@test length(pts_[1]) == 1
end
##
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 2393 | # packing and unpacking of Graph related types
using IncrementalInference
using DistributedFactorGraphs
using Manifolds
using Test
##
@testset "Serialization of SamplableBelief types" begin
##
td = Uniform()
ptd = convert(String, td) # TODO, PackedSamplableBelief
utd = convert(SamplableBelief, td)
@test td.a - utd.a |> abs < 1e-10
@test td.b - utd.b |> abs < 1e-10
##
end
##
fg = initfg()
N=100
# doors = zeros(1,4)
T = ContinuousScalar
doors = [[-100.0;],[0.0;],[100.0;],[300.0;]]
pd = manikde!(T, doors; bw=[3.0;])
pd = resample(pd,N);
bws = getBW(pd)[:,1]
doors2 = getPoints(pd);
v1 = addVariable!(fg,:x1, ContinuousScalar,N=N)
f1 = addFactor!(fg,[:x1], Prior(manikde!(T, doors2; bw=bws)))
v2 = addVariable!(fg,:x2, ContinuousScalar, N=N)
lc = LinearRelative( Normal(50.0, 2.0) )
f2 = addFactor!(fg, [:x1; :x2], lc)
##
@testset "Testing conversion to packed function node data structure and back" begin
##
topack = getSolverData(f1)
dd = convert(PackedFunctionNodeData{PackedPrior},topack)
upd = reconstFactorData(fg, [:x1;], FunctionNodeData{CommonConvWrapper{Prior}}, dd)
@test compare(topack, upd)
topack = getSolverData(f2)
dd = convert(IncrementalInference.PackedFunctionNodeData{PackedLinearRelative},topack)
upd = reconstFactorData(fg, [:x1;:x2], IncrementalInference.FunctionNodeData{CommonConvWrapper{LinearRelative}}, dd)
@test compare(topack, upd)
##
end
@testset "Testing conversion to packed variable node data structure and back" begin
##
dat = getSolverData(getVariable(fg,:x1))
# dat.BayesNetVertID
pd = packVariableNodeData(dat)
unpckd = unpackVariableNodeData(pd)
@test compareFields(dat, unpckd, skip=[:variableType])
@test compareFields(getVariableType(dat), getVariableType(unpckd))
@test isa(getVariableType(dat), ContinuousScalar)
@test isa(getVariableType(unpckd), ContinuousScalar)
##
end
@testset "test serialization of ManifoldKernelDensity" begin
##
# create a basic manifoldkerneldensity
mkd = manikde!(TranslationGroup(2), [randn(2) for _ in 1:100])
# convert up and down
st = convert(String, mkd) # TODO, PackedSamplableBelief
upk = convert(SamplableBelief, st)
# and check the basics
@test isapprox( getPoints(mkd)[1], getPoints(upk)[1])
@test isapprox( getPoints(mkd)[end], getPoints(upk)[end])
@test mkd.manifold == upk.manifold
@test mkd._partial == upk._partial
@test mkd.infoPerCoord == upk.infoPerCoord
##
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 1093 |
using IncrementalInference
using Statistics
using Test
using TensorCast
##
N=100
fg = initfg()
doors = [[0.0;],]
pd = manikde!(ContinuousScalar, doors; bw=[3.0;])
pd = resample(pd, N);
bws = getBW(pd)[:,1]
doors2 = getPoints(pd);
##
@testset "test evaluation of pose pose constraint" begin
##
v1 = addVariable!(fg,:x1, ContinuousScalar,N=N)
f1 = addFactor!(fg,[v1], Prior( pd )) #, samplefnc=getSample
# tem = 2.0*randn(1,N)+getVal(v1)+50.0
v2 = addVariable!(fg,:x2, ContinuousScalar, N=N)
odoc = LinearRelative(Normal(50.0,2.0)) # Odo(50.0*ones(1,1),2.0*ones(1,1),[1.0])
f2 = addFactor!(fg, [:x1; :x2], odoc ) #, samplefnc=getSample
# @test isInitialized(fg, :x1)
pts_ = approxConv(fg, :x1x2f1, :x2)
@cast pts[i,j] := pts_[j][i]
# pts = evalFactor(fg, f2, v2.label)
@show Statistics.mean(pts,dims=2)
@test norm(Statistics.mean(pts,dims=2)-[50.0]) < 15.0
tree = solveTree!(fg)
pts_ = getVal(fg, :x2)
@cast pts[i,j] := pts_[j][i]
@test norm(Statistics.mean(pts,dims=2)-[50.0]) < 15.0
##
end
# using RoMEPlotting
#
# plotKDE( getBelief(fg,:x2) )
# plotKDE(kde!(pts))
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 7279 | # using Revise
using Test
using IncrementalInference, Distributions
using DistributedFactorGraphs
using Statistics
using TensorCast
# going to introduce two new constraint types
import Base: convert
import IncrementalInference: getSample, getManifold
##
mutable struct DevelopPrior{T <: SamplableBelief} <: AbstractPrior
# keeping to test user case using `.x` rather than default `.Z`
x::T
end
getManifold(dp::DevelopPrior) = TranslationGroup(getDimension(dp.x))
getSample(cf::CalcFactor{<:DevelopPrior}) = rand(cf.factor.x, 1)
mutable struct DevelopLikelihood{T <: SamplableBelief} <: AbstractManifoldMinimize
x::T
end
getManifold(dp::DevelopLikelihood) = TranslationGroup(getDimension(dp.x))
getSample(cf::CalcFactor{<:DevelopLikelihood}) = rand(cf.factor.x, 1)
(cf::CalcFactor{<:DevelopLikelihood})(meas, wXi, wXj) = meas - (wXj - wXi)
##
N = 100
fg = initfg()
##
@testset "test populate factor graph with a multi-hypothesis factor..." begin
##
v1 = addVariable!(fg, :x1, ContinuousScalar, N=N)
pr = DevelopPrior(Normal(10.0,1.0))
f1 = addFactor!(fg,[:x1],pr)
initAll!(fg)
pts_, _ = evalFactor(fg, f1, v1.label, N=N)
@cast pts[i,j] := pts_[j][i]
@test sum(abs.(pts .- 1.0) .< 5) < 30
@test sum(abs.(pts .- 10.0) .< 5) > 30
v2 = addVariable!(fg, :x2, ContinuousScalar, N=N)
pp = DevelopLikelihood(Normal(100.0,1.0))
f2 = addFactor!(fg, [:x1;:x2], pp)
initAll!(fg)
pts_ = getVal(fg, :x2)
@cast pts[i,j] := pts_[j][i]
@test abs(Statistics.mean(pts)-110.0) < 10.0
v3 = addVariable!(fg, :x3, ContinuousScalar, N=N)
v4 = addVariable!(fg, :x4, ContinuousScalar, N=N)
ppMH = DevelopLikelihood(Normal(90.0,1.0))
f3 = addFactor!(fg, [:x2;:x3;:x4], ppMH, multihypo=[1.0;0.5;0.5])
# @test IIIF._getCCW(f3).hypoverts == [:x3, :x4]
@test sum(abs.(IIF._getCCW(f3).hyporecipe.hypotheses.p[1] .- 0.0)) < 0.1 # 1.0 becomes 0.0 for computational convenience
@test sum(abs.(IIF._getCCW(f3).hyporecipe.hypotheses.p[2:3] .- 0.5)) < 0.1
initVariable!(fg, :x2, [1*ones(1) for _ in 1:N])
initVariable!(fg, :x3, [2*ones(1) for _ in 1:N])
initVariable!(fg, :x4, [3*ones(1) for _ in 1:N])
##
end
@testset "Test multi-hypothesis factor convolution exploration" begin
##
pts_ = approxConv(fg, :x2x3x4f1, :x2, N=N)
@cast pts[i,j] := pts_[j][i]
@test 99 < sum(pts .<= -70.0)
pts_ = approxConv(fg, :x2x3x4f1, :x3, N=N)
@cast pts[i,j] := pts_[j][i]
15 < sum(70 .< pts .< 110) < 75
pts_ = approxConv(fg, :x2x3x4f1, :x4, N=N)
@cast pts[i,j] := pts_[j][i]
15 < sum(70 .< pts .< 110) < 75
##
end
##
println("Packing converters")
Base.@kwdef mutable struct PackedDevelopPrior <: AbstractPackedFactor
x::PackedSamplableBelief
end
function convert(::Type{PackedDevelopPrior}, d::DevelopPrior)
PackedDevelopPrior(convert(PackedSamplableBelief, d.x))
end
function convert(::Type{DevelopPrior}, d::PackedDevelopPrior)
DevelopPrior(convert(SamplableBelief, d.x))
end
mutable struct PackedDevelopLikelihood <: AbstractPackedFactor
x::PackedSamplableBelief
end
function convert(::Type{PackedDevelopLikelihood}, d::DevelopLikelihood)
PackedDevelopLikelihood(convert(PackedSamplableBelief, d.x))
end
function convert(::Type{<:DevelopLikelihood}, d::PackedDevelopLikelihood)
DevelopLikelihood(convert(SamplableBelief, d.x))
end
##
@testset "test packing and unpacking the data structure" begin
##
topack = getSolverData(getFactor(fg,:x1f1))
dd = convert(PackedFunctionNodeData{PackedDevelopPrior},topack)
unpacked = reconstFactorData(fg, [:x1;], FunctionNodeData{CommonConvWrapper{DevelopPrior}},dd)
@test abs(IIF._getCCW(unpacked).usrfnc!.x.μ - 10.0) < 1e-10
@test abs(IIF._getCCW(unpacked).usrfnc!.x.σ - 1.0) < 1e-10
fct = getFactor(fg, :x2x3x4f1)
# @show typeof(fct)
topack = getSolverData(fct) # f3
dd = convert(PackedFunctionNodeData{PackedDevelopLikelihood},topack)
unpacked = reconstFactorData(fg, [:x2;:x3;:x4], FunctionNodeData{CommonConvWrapper{DevelopLikelihood}},dd)
# @test IIF._getCCW(unpacked).hypoverts == Symbol[:x3; :x4]
@test sum(abs.(IIF._getCCW(unpacked).hyporecipe.hypotheses.p[1] .- 0.0)) < 0.1
@test sum(abs.(IIF._getCCW(unpacked).hyporecipe.hypotheses.p[2:3] .- 0.5)) < 0.1
##
end
##
# start a new factor graph
N = 200
fg = initfg()
getSolverParams(fg).N = N
##
@testset "test tri-modal factor..." begin
##
v1 = addVariable!(fg, :x1, ContinuousScalar, N=N)
pr = DevelopPrior(Normal(10.0,1.0))
f1 = addFactor!(fg,[:x1],pr)
initAll!(fg)
@test length(getVal(fg, :x1)) == N
pts_ = approxConv(fg, Symbol(f1.label), :x1, N=N)
@cast pts[i,j] := pts_[j][i]
@test sum(abs.(pts .- 1.0) .< 5) < 30
@test sum(abs.(pts .- 10.0) .< 5) > 30
v2 = addVariable!(fg, :x2, ContinuousScalar, N=N)
pp = DevelopLikelihood(Normal(100.0,1.0))
f2 = addFactor!(fg, [:x1;:x2], pp)
initAll!(fg)
pts_ = getVal(fg, :x2)
@cast pts[i,j] := pts_[j][i]
@test abs(Statistics.mean(pts)-110.0) < 10.0
v3 = addVariable!(fg, :x3, ContinuousScalar, N=N)
v4 = addVariable!(fg, :x4, ContinuousScalar, N=N)
v5 = addVariable!(fg, :x5, ContinuousScalar, N=N)
ppMH = DevelopLikelihood(Normal(90.0,1.0))
f3 = addFactor!(fg, [:x2;:x3;:x4;:x5], ppMH, multihypo=[1.0,0.333,0.333,0.334])
# @test IIF._getCCW(f3).hypoverts == [:x3, :x4]
@test sum(abs.(IIF._getCCW(f3).hyporecipe.hypotheses.p[1] .- 0.0)) < 0.1 # 1.0 becomes 0.0 for computational convenience
@test sum(abs.(IIF._getCCW(f3).hyporecipe.hypotheses.p[2] .- 0.333)) < 0.001
@test sum(abs.(IIF._getCCW(f3).hyporecipe.hypotheses.p[3] .- 0.333)) < 0.001
@test sum(abs.(IIF._getCCW(f3).hyporecipe.hypotheses.p[4] .- 0.334)) < 0.001
initVariable!(fg, :x2 ,[1*ones(1) for _ in 1:N])
initVariable!(fg, :x3 ,[2*ones(1) for _ in 1:N])
initVariable!(fg, :x4 ,[3*ones(1) for _ in 1:N])
initVariable!(fg, :x5 ,[4*ones(1) for _ in 1:N])
# solve for certain idx
pts_ = approxConv(fg, :x2x3x4x5f1, :x2, N=N)
@cast pts[i,j] := pts_[j][i]
@test 0.95*N < sum(pts .<= -70.0)
# solve for one of uncertain variables
pts_ = approxConv(fg, :x2x3x4x5f1, :x3, N=N)
@cast pts[i,j] := pts_[j][i]
@test 0.1*N < sum(80 .< pts .< 100.0) < 0.5*N
# @test 0.1*N < sum(pts .== 3.0) < 0.5*N
# @test 0.1*N < sum(pts .== 4.0) < 0.5*N
# 0.7 to accomodate bad-init null hypo
# @test 0.5*N <= sum(70 .< pts .< 110.0) + sum(pts .== 3.0) + sum(pts .== 4.0)
# solve for one of uncertain variables
pts_ = approxConv(fg, :x2x3x4x5f1, :x4, N=N)
@cast pts[i,j] := pts_[j][i]
@test 0.1*N < sum(80 .< pts .< 100.0) < 0.5*N
# @test 0.1*N < sum(pts .== 2.0) < 0.5*N
# @test 0.1*N < sum(pts .== 4.0) < 0.5*N
# @test 0.5*N <= sum(80 .< pts .< 100.0) + sum(pts .== 2.0) + sum(pts .== 4.0)
# solve for one of uncertain variables
pts_ = approxConv(fg, :x2x3x4x5f1, :x5, N=N)
@cast pts[i,j] := pts_[j][i]
@test 0.1*N < sum(80 .< pts .< 100.0) < 0.5*N
# @test 0.1*N < sum(pts .== 2.0) < 0.5*N
# @test 0.1*N < sum(pts .== 3.0) < 0.5*N
# @test 0.5*N <= sum(80 .< pts .< 100.0) + sum(pts .== 2.0) + sum(pts .== 3.0)
##
end
@testset "test multihypo api numerical tolerance, #1086" begin
##
fg = initfg()
addVariable!(fg, :x0, ContinuousEuclid{1})
addVariable!(fg, :x1a, ContinuousEuclid{1})
addVariable!(fg, :x1b, ContinuousEuclid{1})
addFactor!(fg, [:x0;:x1a;:x1b], LinearRelative(Normal()), multihypo=[1; 0.5;0.4999999999999])
addFactor!(fg, [:x0;:x1a;:x1b], LinearRelative(Normal()), multihypo=[1; 0.5;0.5000000000001])
##
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 1429 | # test null hypothesis cases
using Test
using IncrementalInference
using TensorCast
# going to introduce two new constraint types
##
@testset "Post 237 without nullhypo" begin
##
fg = initfg()
addVariable!(fg, :x0, ContinuousScalar)
addVariable!(fg, :x1, ContinuousScalar)
addFactor!(fg, [:x0;], Prior(Normal()))
addFactor!(fg, [:x0;:x1], LinearRelative(Normal(10,1)))
solveTree!(fg)
pts_ = getBelief(fg, :x1) |> getPoints
@cast pts[i,j] := pts_[j][i]
@test 80 < sum(2 .< pts .< 18)
##
end
@testset "Post 237 nullhypo on prior" begin
##
fg = initfg()
addVariable!(fg, :x0, ContinuousScalar)
addFactor!(fg, [:x0;], Prior(Normal(10,1)), nullhypo=0.5)
solveTree!(fg)
pts_ = getBelief(fg, :x0) |> getPoints
@cast pts[i,j] := pts_[j][i]
@test 10 < sum(-15 .< pts .< 4) < 60
@test 30 < sum(4 .< pts .< 16) < 85
# @test 10 < sum(16 .< pts .< 60) < 60
##
end
@testset "Post 237 with nullhypo test" begin
##
fg = initfg()
addVariable!(fg, :x0, ContinuousScalar)
addVariable!(fg, :x1, ContinuousScalar)
addFactor!(fg, [:x0;], Prior(Normal()))
addFactor!(fg, [:x0;:x1], LinearRelative(Normal(10,1)), nullhypo=0.5)
pts_ = approxConv(fg, :x0x1f1, :x1)
@cast pts[i,j] := pts_[j][i]
@test 20 < sum(pts .< 5)
@test 20 < sum(5 .< pts .< 15)
solveTree!(fg)
pts2_ = getBelief(fg, :x1) |> getPoints
@cast pts2[i,j] := pts2_[j][i]
@test 30 < sum(8 .< pts2 .< 12) <= 80
@test 80 < sum(-10 .< pts2 .< 30)
##
end
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 8864 | # partial constraint development
using Test
using IncrementalInference
# going to introduce two new constraint mutable structs
using Statistics
using TensorCast
using Manifolds
import IncrementalInference: getManifold, getSample
##
mutable struct DevelopPartial{P <: Tuple} <: AbstractPrior
x::Distribution
partial::P
end
getSample(cf::CalcFactor{<:DevelopPartial}) = rand(cf.factor.x, 1)
getManifold(dp::DevelopPartial) = TranslationGroup(length(dp.partial))
#
mutable struct DevelopDim2 <: AbstractPrior
x::Distribution
end
getSample(cf::CalcFactor{<:DevelopDim2}) = rand(cf.factor.x, 1)
getManifold(dp::DevelopDim2) = TranslationGroup(getDimension(dp.x))
mutable struct DevelopPartialPairwise <: AbstractRelativeMinimize
x::Distribution
partial::Tuple
DevelopPartialPairwise(x::Distribution) = new(x, (2,))
end
getManifold(dp::IIF.InstanceType{DevelopPartialPairwise}) = TranslationGroup(length(dp.partial))
# getManifold(::IIF.InstanceType{DevelopPartialPairwise}) = TranslationGroup(1)
getSample(cf::CalcFactor{<:DevelopPartialPairwise}) = rand(cf.factor.x, 1)
function (dp::CalcFactor{<:DevelopPartialPairwise})(meas,
x1,
x2 )
#
return meas[1] - (x2[2]-x1[2])
end
## prep
N=100
pr = DevelopDim2(MvNormal([0.0;0.0], diagm([0.01;0.01])))
dp = DevelopPartial(Normal(2.0, 1.0),(1,))
## build graph
fg = initfg()
v1 = addVariable!(fg,:x1,ContinuousEuclid{2}(),N=N)
f1 = addFactor!(fg,[:x1],pr)
f2 = addFactor!(fg,[:x1],dp, graphinit=false)
doautoinit!(fg, :x1)
##
@testset "test evaluation of full constraint prior" begin
##
pts_, _ = evalFactor(fg, f1, v1.label; N)
@cast pts[i,j] := pts_[j][i]
@test size(pts,1) == 2
@test size(pts,2) == N
@test norm(Statistics.mean(pts,dims=2)[1] .- [0.0]) < 0.3
##
end
@testset "test evaluation of partial constraint prior" begin
##
memcheck_ = getVal(v1)
@cast memcheck[i,j] := memcheck_[j][i]
X1pts_ = getVal(v1)
@cast X1pts[i,j] := X1pts_[j][i]
pts_ = approxConv(fg, getLabel(f2), :x1; N)
@cast pts[i,j] := pts_[j][i]
@test size(pts, 1) == 2
@test size(pts, 2) == N
@test norm(Statistics.mean(pts,dims=2)[1] .- [2.0]) < 0.75
# ensure the correct response from
@test norm(X1pts[1,:] - pts[1,:]) > 2.0
@test norm(X1pts[2,:] - pts[2,:]) < 1e-10
@test norm(X1pts - memcheck) < 1e-10
##
end
@testset "check that partials are received through convolutions of prior" begin
##
# check that a partial belief is obtained
X1_ = approxConvBelief(fg, :x1f2, :x1)
@test isPartial(X1_)
##
end
@testset "test solving of factor graph" begin
##
getSolverParams(fg).N = N
tree = solveTree!(fg)
pts_ = getVal(fg, :x1)
@cast pts[i,j] := pts_[j][i]
@test norm(Statistics.mean(pts,dims=2)[1] .- [0.0]) < 0.4
@test norm(Statistics.mean(pts,dims=2)[2] .- [0.0]) < 0.4
##
end
# plotKDE(getBelief(fg, :x1),levels=3)
## partial relative gradient and graph
v2 = addVariable!(fg,:x2,ContinuousEuclid{2},N=N)
dpp = DevelopPartialPairwise(Normal(10.0, 1.0))
f3 = addFactor!(fg,[:x1;:x2],dpp)
dp2 = DevelopPartial( Normal(-20.0, 1.0), (1,) )
f4 = addFactor!(fg,[:x2;], dp2, graphinit=false)
doautoinit!(fg, :x2)
##
@testset "test partial info per coord through relative convolution (conditional)" begin
##
one_meas = [10.0;]
pts = ([0;0.0], [0;10.0])
gradients = FactorGradientsCached!(dpp, (ContinuousEuclid{2}, ContinuousEuclid{2}), one_meas, pts);
##
# check that the gradients can be calculated
J = gradients(one_meas, pts...)
@test size(J) == (4,4)
@test_broken norm(J - [0 0 0 0; 0 0 0 1; 0 0 0 0; 0 1 0 0] ) < 1e-4
## check perturbation logic
prtb = calcPerturbationFromVariable(gradients, [1=>[1;1]])
# self variation is taken as 0 at this time
@test isapprox( prtb[1], [0;0] )
# variable 1 influences 2 only through partial dimension 2 (as per DevelopPartialPairwise)
@test_broken isapprox( prtb[2], [0;1] )
## test evaluation through the convolution operation withing a factor graph
# add relative IPC calculation inside evalFactor
bel = approxConvBelief(fg, getLabel(f3), :x2)
@test isPartial(bel)
##
end
##
@testset "test evaluation of multiple simultaneous partial constraints" begin
global fg
##
initAll!(fg)
pts_ = approxConv(fg, :x1x2f1, :x2; N)
@cast pts[i,j] := pts_[j][i]
@test size(pts,1) == 2
@test_broken norm(Statistics.mean(pts,dims=2)[2] .- [10.0]) < 3.0
# not the same memory, ccw.varValsAll[][sfidx] is now a deepcopy as alternate destination memory
valx2_ = IIF._getCCW(fg[:x1x2f1]).varValsAll[][2] # getVal(fg, :x2)
@cast valx2[i,j] := valx2_[j][i]
@test norm(valx2[1,:] - pts[1,:]) < 1e-5
pts_ = approxConv(fg, :x2f1, :x2; N)
@cast pts[i,j] := pts_[j][i]
@test size(pts,1) == 2
@test norm(Statistics.mean(pts,dims=2)[1] .- [-20.0]) < 0.75
@test (Statistics.std(pts,dims=2)[1] .- 1.0) < 0.4
##
end
##
# keep previous values to ensure function evaluation is modifying correct data fields
@warn "restore calcProposalBelief as testset!"
# @testset "test calcProposalBelief..." begin
# global v2, fg, f3, f4, N
thefac = getFactor(fg, :x1x2f1)
X2lpts_ = getVal(getVariable(fg, :x2))
@cast X2lpts[i,j] := X2lpts_[j][i]
keepaside, = (calcProposalBelief(fg, thefac, :x2; N),)
@test Ndim(keepaside) == 2
lpts_ = getPoints(keepaside, false)
@cast lpts[i,j] := lpts_[j][i]
@test length(lpts_) == N
@show X2lpts[2,95:100]
@show lpts[2,95:100]
@show getPoints(keepaside)
##
# DevelopPartialPairwise must only modify the second dimension of proposal distribution on X2
@test norm(X2lpts[1,:] - lpts[1,:]) < 1e-10
# @test norm(X2lpts[2,:] - lpts[2,:]) > 1e-10 # 10*N*0.5 # why so big?
memcheck_ = getVal(v2)
@cast memcheck[i,j] := memcheck_[j][i]
@test norm(X2lpts - memcheck) < 1e-10
X2lpts_ = getVal(v2)
@cast X2lpts[i,j] := X2lpts_[j][i]
p4 = calcProposalBelief(fg, f4, v2.label; N)
@test Ndim(p4) == 2
lpts_ = getPoints(keepaside, false)
@cast lpts[i,j] := lpts_[j][i]
@test length(lpts_) == N
# DevelopPartialPairwise must only modify the second dimension of proposal distribution on X2
@test norm(X2lpts[1,:] - lpts[1,:]) < 1e-10
@test norm(X2lpts[2,:] - lpts[2,:]) > 1e-10 # 10*N*0.5 # why so big?
memcheck_ = getVal(v2)
@cast memcheck[i,j] := memcheck_[j][i]
@test norm(X2lpts - memcheck) < 1e-10
# end
##
@testset "test belief prediction with partials..." begin
##
global v2, fg
# partial prior
X2pts_ = getVal(v2)
@cast X2pts[i,j] := X2pts_[j][i]
# NOTE, SUPER IMPORTANT, predictbelief returns full dimension points (even if only partials are sent in for proposals)
valB, = propagateBelief(fg, v2, [f4]; N)
val_ = getPoints(valB, false)
@cast val[i,j] := val_[j][i]
@show X2pts_[1]';
@show val_[1]';
@test norm(X2pts[2,:] - val[2,:]) < 1e-10
@test 0.0 < norm(X2pts[1,:] - val[1,:])
@test norm(Statistics.mean(val[1,:]) .+ 20.0) < 0.75
# partial pairwise
X2pts_ = getVal(v2)
@cast X2pts[i,j] := X2pts_[j][i]
valB, = propagateBelief(fg, v2, [f3]; N)
val_ = getPoints(valB, false)
@cast val[i,j] := val_[j][i]
@test norm(X2pts[1,:] - val[1,:]) < 1e-10
@test 0.0 < norm(X2pts[2,:] - val[2,:])
val2_ = getVal(v1)
@cast val2[i,j] := val2_[j][i]
@test_broken abs(Statistics.mean(val[2,:] - val2[2,:]) .- 10.0) < 0.75
##
# combination of partials
valB, = propagateBelief(fg, v2, [f3;f4]; N)
val_ = getPoints(valB, false)
@cast val[i,j] := val_[j][i]
# plotKDE(kde!(val),levels=3)
@test norm(Statistics.mean(val,dims=2)[1] .- [-20.0]) < 1
@error "restore inconsistent test result (not always broken)"
if false
@test_broken norm(Statistics.mean(val,dims=2)[2] .- [10.0]) < 0.01
end
@test (Statistics.std(val,dims=2)[1] .- 1.0) < 3.0
@test_broken (Statistics.std(val,dims=2)[2] .- 1.0) < 3.0
##
getSolverParams(fg).N = N
tree = solveTree!(fg)
pts_ = getVal(fg, :x1)
@cast pts[i,j] := pts_[j][i]
@test norm(Statistics.mean(pts,dims=2)[1] .- [0.0]) < 0.5
@test norm(Statistics.mean(pts,dims=2)[2] .- [0.0]) < 0.5
pts_ = getVal(fg, :x2)
ppe = getPPE(fg, :x2).mean
X2 = getBelief(fg, :x2)
# check mean is close
@test_broken isapprox(mean(X2), [-20;10], atol=0.01)
# check covariance is close too
@test 0 < var(getManifold(X2), getPoints(X2))
@cast pts[i,j] := pts_[j][i]
@test (Statistics.std(pts,dims=2)[1]-1.0) < 3.0
@test_broken (Statistics.std(pts,dims=2)[2]-1.0) < 3.0
##
end
@testset "Test number of samples returned, N=75" begin
##
pr = DevelopDim2(MvNormal([0.0;0.0], diagm([0.01;0.01])))
dp = DevelopPartial(Normal(2.0, 1.0),(1,))
#
fg = initfg()
v1 = addVariable!(fg,:x1,Position{2}(),N=N)
f1 = addFactor!(fg,[:x1], pr, graphinit=false)
# force particular initialization
u0 = getPointIdentity(Position{2})
arr = push!(Vector{typeof(u0)}(), u0)
setVal!(fg, :x1, arr)
##----------- sanity check that predictbelief plumbing is doing the right thing
nbel, = propagateBelief(fg, :x1, ls(fg, :x1), N=75)
@test_broken 75 == Npts(nbel)
##
end
# plotKDE(getBelief(fg, :x2),levels=3)
# spyCliqMat(tree, :x2)
#
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 376 |
using Test
mutable struct MyType
arr::Array{Float64,1}
var::Int
str::String
MyType(x...) = new(x[1],x[2],x[3])
MyType() = new()
end
function f(M::MyType)
mm = MyType()
mm.arr = M.arr
return mm
end
@testset "Ensure memory return is working properly..." begin
m = MyType(rand(3),2,"hello")
mmm = f(m)
mmm.arr[1] = 1.0
@test m.arr[1] == 1.0
end
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 3676 |
# dev test script for factor gradients using ForwardDiff and ManifoldDiff
using ManifoldDiff
using Manifolds
using IncrementalInference
using StaticArrays
using Zygote
using Test
##
@testset "test factorJacobian" begin
##
# manual EuclidDistance
z = 10.0
f_eucliddist(x1,x2; z) = z - norm(x2 - x1)
x0, x1 = [0.0; 0.0], [10.0, 0.0]
J_ = Zygote.jacobian(()->f_eucliddist(x0, x1; z), Zygote.Params([x0, x1]))
Jx0 = J_[x0]
Jx1 = J_[x1]
##
fg = LocalDFG(;
solverParams = SolverParams(;
graphinit=false
)
)
addVariable!.(fg, [:x0; :x1], Position2)
f = addFactor!(fg, [:x0; :x1], EuclidDistance(Normal(z,1.0)))
p1 = [SA[x1[1];x1[2]] for _ in 1:1]
setVal!(fg, :x1, p1, solveKey=:parametric)
J = IIF.factorJacobian(fg, :x0x1f1)
@test isapprox( Jx0, J[1:1,1:2]; atol=1e-8)
@test isapprox( Jx1, J[1:1,3:4]; atol=1e-8)
##
end
##
# ##
# @testset "using RoME; FiniteDiff.jacobian of SpecialEuclidean(2) factor" begin
# ##
# fg = LocalDFG(;
# solverParams = SolverParams(;
# graphinit=false
# )
# )
# addVariable!.(fg, [:x0; :x1], Pose2)
# f = addFactor!(fg, [:x0; :x1], Pose2Pose2(MvNormal([10;0;pi/2],[1 0 0; 0 1 0; 0 0 1.0])))
# p1 = [ArrayPartition([10; 0.0], [0 1; -1 0.0]) for _ in 1:1]
# setVal!(fg, :x1, p1, solveKey=:parametric)
# J = IIF.factorJacobian(fg, :x0x1f1)
# @test isapprox( Jx0, J[1:1,1:2]; atol=1e-8)
# @test_broken isapprox( Jx1, J[1:1,3:4]; atol=1e-8)
# ##
# end
# ##
##
@testset "ManifoldDiff.jacobian of SpecialEuclidean(2) factor" begin
##
M = SpecialEuclidean(2)
z = ArrayPartition(SA[10.0; 0.0], SMatrix{2,2}(0.0, -1.0, 1.0, 0.0))
p1 = ArrayPartition(SA[0.0; 0.0], SMatrix{2,2}(1, 0, 0, 1.))
e0 = identity_element(M, p1)
p2 = exp(M, e0, hat(M, e0, [10,0,pi/2]))
function resid_SE2(X, p, q)
q̂ = Manifolds.compose(M, p, exp(M, identity_element(M, p), X)) #for groups
return vee(M, q, log(M, q, q̂))
end
# finitediff setup
r_backend = ManifoldDiff.TangentDiffBackend(
ManifoldDiff.FiniteDifferencesBackend()
)
Me = Euclidean(3)
function _factorJac!(J, z, p1, p2)
g1(p_) = resid_SE2(z, p_, p2)
g2(p_) = resid_SE2(z, p1, p_)
J[1:3,1:3] = ManifoldDiff.jacobian(M, Me, g1, p1, r_backend)
J[1:3,4:6] = ManifoldDiff.jacobian(M, Me, g2, p2, r_backend)
J
end
# f_SE2_z(z_) = resid_SE2(z_, e0, p2)
# f_SE2_x0(p_) = resid_SE2(z, e0, p_)
# f_SE2_x0(p_) = resid_SE2(z, e0, p_)
J = zeros(3,6)
J_ = _factorJac!(J, z, p1, p2)
# @profview _factorJac!(J, z, p1, p2)
if false
z_backend = ManifoldDiff.TangentDiffBackend(
ManifoldDiff.ZygoteDiffBackend()
)
g = ManifoldDiff.jacobian(M, Euclidean(3), f_SE2_x0, p1, z_backend)
else
@info "ManifoldDiff.ZygoteDiffBackend usage still under development (23Q3)"
end
##
end
# cost(p_) = distance(M, e0, p_) # ManifoldDiff.gradient(M, cost, p, r_backend)
# cost(p)
# ManifoldDiff.gradient(M, cost, p, r_backend)
# ##
# @testset "CODE STILL UNDER DEV:::Zygote on SpecialEuclidean(2)" begin
# ##
# # manual PosePose
# M = SpecialEuclidean(2)
# z = ArrayPartition(SA[10.0; 0.0], SMatrix{2,2}(0.0, -1.0, 1.0, 0.0))
# e0 = identity_element(M, z)
# # modified from IIF/test/testSpecialEuclidean2Mani.jl
# function f_SE2(X, p, q)
# q̂ = Manifolds.compose(M, p, exp(M, identity_element(M, p), X)) #for groups
# Xc = zeros(3)
# vee!(M, Xc, q, log(M, q, q̂))
# return Xc
# # return (Xc'*Xc)[1]
# end
# Xc_0 = [0.0; 0.0; 0.0] # deepcopy(e0)
# Xc_1 = [10.0; 0.0; pi/2] # deepcopy(z)
# J_ = Zygote.jacobian(
# ()->begin
# f_SE2(
# log(M, e0, z),
# exp(M, e0, hat(M, e0, Xc_0)),
# exp(M, e0, hat(M, e0, Xc_1))
# )
# end,
# Zygote.Params([Xc_0, Xc_1])
# )
# Jx0 = J_[x0]
# Jx1 = J_[x1]
# ##
# end | IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | code | 5222 |
# using Revise
using Test
using LinearAlgebra
using IncrementalInference
using ManifoldsBase
using Manifolds, Manopt
import Optim
using FiniteDifferences, ManifoldDiff
import Rotations as _Rot
##
# finitediff setup
r_backend = ManifoldDiff.TangentDiffBackend(
ManifoldDiff.FiniteDifferencesBackend()
)
##
@testset "ManifoldDiff, Basic test" begin
##
# problem setup
n = 100
σ = π / 8
M = Manifolds.Sphere(2)
p = 1 / sqrt(2) * [1.0, 0.0, 1.0]
data = [exp(M, p, σ * rand(M; vector_at=p)) for i in 1:n];
# objective function
f(M, p) = sum(1 / (2 * n) * distance.(Ref(M), Ref(p), data) .^ 2)
# f_(p) = f(M,p)
# non-manual: intrinsic finite differences gradient
function grad_f_FD(M,p)
f_(p_) = f(M,p_)
ManifoldDiff.gradient(M, f_, p, r_backend)
end
# manual gradient
# grad_f(M, p) = sum(1 / n * grad_distance.(Ref(M), data, Ref(p)));
# and solve
@time m1 = gradient_descent(M, f, grad_f_FD, data[1])
@info "Basic Manopt test" string(m1')
@test isapprox(p, m1; atol=0.15)
##
end
##
"""
ManifoldWrapper{TM<:AbstractManifold} <: Optim.Manifold
Adapts Manifolds.jl manifolds for use in Optim.jl
"""
struct ManifoldWrapper{TM<:AbstractManifold} <: Optim.Manifold
M::TM
end
function Optim.retract!(M::ManifoldWrapper, x)
ManifoldsBase.embed_project!(M.M, x, x)
return x
end
function Optim.project_tangent!(M::ManifoldWrapper, g, x)
ManifoldsBase.embed_project!(M.M, g, x, g)
return g
end
##
@testset "Optim.jl ManifoldWrapper example from mateuszbaran (copied to catch issues on future changes)" begin
##
# Example modified from: https://gist.github.com/mateuszbaran/0354c0edfb9cdf25e084a2b915816a09
# example usage of Manifolds.jl manifolds in Optim.jl
M = Manifolds.Sphere(2)
x0 = [1.0, 0.0, 0.0]
q = [0.0, 1.0, 0.0]
f(p) = 0.5 * distance(M, p, q)^2
# manual gradient
function g!(X, p)
log!(M, X, p, q)
X .*= -1
println(p, X)
end
##
sol = Optim.optimize(f, g!, x0, Optim.ConjugateGradient(; manifold=ManifoldWrapper(M)))
@test isapprox([0,1,0.], sol.minimizer; atol=1e-6)
## finitediff gradient (non-manual)
function g_FD!(X,p)
X .= ManifoldDiff.gradient(M, f, p, r_backend)
X
end
#
x0 = [1.0, 0.0, 0.0]
sol = Optim.optimize(f, g_FD!, x0, Optim.ConjugateGradient(; manifold=ManifoldWrapper(M)))
@test isapprox([0,1,0.], sol.minimizer; atol=1e-6)
##
# x0 = [1.0, 0.0, 0.0]
# # internal ForwardDfif doesnt work out the box on Manifolds
# sol = Optim.optimize(f, x0, Optim.ConjugateGradient(; manifold=ManifoldWrapper(M)); autodiff=:forward )
# @test isapprox([0,1,0.], sol.minimizer; atol=1e-8)
##
end
@testset "Modified Manifolds.jl ManifoldWrapper <: Optim.Manifold for SpecialEuclidean(2)" begin
##
M = Manifolds.SpecialEuclidean(2)
e0 = ArrayPartition([0,0.], [1 0; 0 1.])
x0 = deepcopy(e0)
Cq = 9*ones(3)
while 1.5 < abs(Cq[3])
@show Cq .= randn(3)
# Cq[3] = 1.5 # breaks ConjugateGradient
end
q = exp(M,e0,hat(M,e0,Cq))
f(p) = distance(M, p, q)^2
## finitediff gradient (non-manual)
function g_FD!(X,p)
X .= ManifoldDiff.gradient(M, f, p, r_backend)
X
end
## sanity check gradients
X = hat(M, e0, zeros(3))
g_FD!(X, q)
# gradient at the optimal point should be zero
@show X_ = [X.x[1][:]; X.x[2][:]]
@test isapprox(0, sum(abs.(X_)); atol=1e-8 )
# gradient not the optimal point should be non-zero
g_FD!(X, e0)
@show X_ = [X.x[1][:]; X.x[2][:]]
@test 0.01 < sum(abs.(X_))
## do optimization
x0 = deepcopy(e0)
sol = Optim.optimize(f, g_FD!, x0, Optim.ConjugateGradient(; manifold=ManifoldWrapper(M)))
Cq .= randn(3)
# Cq[
@show sol.minimizer
@test isapprox( f(sol.minimizer), 0; atol=1e-3 )
@test isapprox( 0, sum(abs.(log(M, e0, compose(M, inv(M,q), sol.minimizer)))); atol=1e-3)
##
end
@testset "Modified ManifoldsWrapper for Optim.Manifolds, SpecialEuclidean(3)" begin
##
M = Manifolds.SpecialEuclidean(3)
e0 = ArrayPartition([0,0,0.], Matrix(_Rot.RotXYZ(0,0,0.)))
x0 = deepcopy(e0)
Cq = 0.25*randn(6)
q = exp(M,e0,hat(M,e0,Cq))
f(p) = distance(M, p, q)^2
## finitediff gradient (non-manual)
function g_FD!(X,p)
X .= ManifoldDiff.gradient(M, f, p, r_backend)
X
end
## sanity check gradients
X = hat(M, e0, zeros(6))
g_FD!(X, q)
@show X_ = [X.x[1][:]; X.x[2][:]]
# gradient at the optimal point should be zero
@test isapprox(0, sum(abs.(X_)); atol=1e-6 )
# gradient not the optimal point should be non-zero
g_FD!(X, e0)
@show X_ = [X.x[1][:]; X.x[2][:]]
@test 0.01 < sum(abs.(X_))
## do optimization
x0 = deepcopy(e0)
sol = Optim.optimize(f, g_FD!, x0, Optim.ConjugateGradient(; manifold=ManifoldWrapper(M)))
# Cq .= 0.5*randn(6)
# Cq[
@show sol.minimizer
@test isapprox( f(sol.minimizer), 0; atol=1e-3 )
@test isapprox( 0, sum(abs.(log(M, e0, compose(M, inv(M,q), sol.minimizer)))); atol=1e-3)
##
end
@testset "Optim.Manifolds, SpecialEuclidean(3), using IIF.optimizeManifold_FD" begin
##
M = Manifolds.SpecialEuclidean(3)
e0 = ArrayPartition([0,0,0.], Matrix(_Rot.RotXYZ(0,0,0.)))
x0 = deepcopy(e0)
Cq = 0.5*randn(6)
q = exp(M,e0,hat(M,e0,Cq))
f(p) = distance(M, p, q)^2
sol = IncrementalInference.optimizeManifold_FD(M,f,x0)
@show sol.minimizer
@test isapprox( f(sol.minimizer), 0; atol=5e-3 )
@test isapprox( 0, sum(abs.(log(M, e0, compose(M, inv(M,q), sol.minimizer)))); atol=1e-3)
##
end | IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | docs | 9192 | # NEWS on IncrementalInference.jl Releases
Currently general maintenance and bug fix changes are mostly tracked via Github Integrations. E.g. see Milestones along with Label filters to quickly find specific issues.
- https://github.com/JuliaRobotics/IncrementalInference.jl/milestones?state=closed
Also see automated TagBot Release note, e.g.:
- https://github.com/JuliaRobotics/IncrementalInference.jl/releases
Alternatively, either use the Github Blame, or the Github `/compare/v0.18.0...v0.19.0` API, e.g.:
- https://github.com/JuliaRobotics/IncrementalInference.jl/compare/v0.18.0...v0.19.0
The list below highlights breaking changes according to normal semver workflow -- i.e. breaking changes go through at least one deprecatation (via warnings) on the dominant number in the version number. E.g. v0.18 -> v0.19 (warnings) -> v0.20 (breaking). Note that ongoing efforts are made to properly deprecate old code/APIs
# Changes in v0.35
- Standardize toward Manopt.jl (currently Riemannian Levenberg-Marquart), still have Optim.jl legacy support (#1784, #1778).
- Much faster solves, both parametric and nonparametric (#1783, #1782, #1793).
- Better standardize relative factors to use of tangent vectors (#1790).
- Now abstract type `CalcFactor` with dedicated dispatches in various cases, e.g. `CalcFactorNormSq`, etc. (#1786).
- Bug fixes and inference improvements (#1781, #1785, #1789)
- Support for Julia 1.10.
- Extension usage of AMD.jl for `ccolamd` variable ordering features, dropped internal SuiteSparse calls. JL 1.10 removal of `SuiteSparse_long` (#1763).
- Further bug fixes for transition to `StaticArrays` value stores and computes, including `Position{N}` (#1779, #1776).
- Restore `DifferentialEquation.jl` factor `DERelative` functionality and tests that were suppressed in a previous upgrade (#1774, #1777).
- Restore previously suppressed tests (#1781, #1721, #1780)
- Improve DERelative factor on-manifold operations (#1775, #1802, #1803).
- Fixed a typo via deprecation, `solveFactorParametric` replaces `solveFactorParameteric`.
# Changes in v0.34
- Start transition to Manopt.jl via Riemannian Levenberg-Marquart.
- Deprecate `AbstractRelativeRoots`.
- Standardization improvements surrounding weakdeps code extensions.
- Code quality improvements along wiht refactoring and reorganizing of file names and locations.
- Restoring `DERelative` factors, through v0.34.1 and v0.34.2.
- Switching to weakdep AMD.jl for `ccolmod` dependency, part of Julia 1.10 upgrade. Dropping `SuiteSparse_long` dependency. Further fixes necessary to restore full user constrained tree variable order functionality.
# Changes in v0.33
- Upgrades for DFG using StructTypes.jl (serialization).
# Changes in v0.32
- Major internal refactoring of `CommonConvWrapper` to avoid abstract field types, and better standardization; towards cleanup of internal multihypo handling and naming conventions.
- Internal refactoring removing several legacy fields from `CalcFactor`.
- All factors now require a `getManifold` definition.
- Now have `CalcFactor.manifold` to reduce new allocation load inside hot-loop for solving.
- Fixed tesing issues in `testSpecialEuclidean2Mani.jl`.
- Refactored, consolidated, and added more in-place operations in surrounding `ccw.measurement`.
- Refactor `CommonConvWrapper` to a not non-mutable struct, with several cleanups and some updated compat requirements.
- Refactor interal hard type `HypoRecipe`.
- Add `MetaPrior` for adding meta data but not influencing the numerical solution.
# Changes in v0.31
- `FactorMetaData` is deprecated and replaced by `CalcFactor`.
- Updated `Base.deepcopy_internal` fix for use with Julia 1.8.1, see #1629.
- Added a few missing exports incl. `getTags`, `_update!, see #1626 #1628.
- Refactoring to remove `FactorMetadata` (#1611) and `ConvPerThread` (#1615, #1625) objects, which is consolidated into `CalcFactor` and `CommonConvWrapper`.
- Added JuliaFormatter, see #1620.
- Add `SnoopPrecompile.jl` on a few basic solve features to start, see #1631.
- Support n-ary parametric solving such as OAS factors.
# Changes in v0.30
- `ArrayPartition` should be used instead of `ProductRepr`, see issue #1537.
- Remove old deprecated option keywords in `addVariable` and `addFactor`.
- Improve `IIF.solveGraphParametric`.
- Introduce `IIF.autoinitParametric!`.
- Upgrade `initAll!(dfg, :parametric)`.
- Refactor many files to subfolders `src/services` or `src/entities`.
# Changes in v0.29
- Upgrade to Manifolds.jl v0.8
- Deprecate `initManual!`, instead use `initVariable!`.
# Changes in v0.28
- `HeatmapGridDensity` now only supports `ManifoldKernelDensity` functions.
- `PackedHeatmapGridDensity` has an expanded fields to support future stash and cache serialization strategies.
- Internal `parchDistribution` functions have been added towards future stashed serialization strategies.
- Internal `_update!` function supports updating of the `HeatmapGridDensity` distribution.
- Unpacking of `PackedManifoldKernelDensity` is more versatile with improved `.partial` and `.bw` options.
- Bugfix on `multihypo=` which now includes `nullSurplus` on sibling relative factors to a variable with a `multihypo` factor, #1518.
# Changes in v0.27
- InMemDFGType is deprecated in favor of LocalDFG (exported from DistributedFactorGraphs).
- Factor serialization is now top level JSON only #1476.
- Serialization of distributions are now JSON only #1468, #1472, #1473 (removed custom string legacy).
- Fix chicken and egg problem on unpackFactor, change `convert` to `reconstFactorData`, #1424.
- Add factor `preambleCache(dfg, vecVars, usrfnc)`, #1462, #1466. Doesn't work for parametric yet (#1480).
- Add `CalcFactor.cache` using preamble, #1481. Not thread safe yet.
- Standardize local graph naming to `LocalDFG`, #1479.
- Refactor getDimension and sampling, #1463.
- Language upgrades on `qr` for Julia 1.7, #1464.
- Various other fixes and upgrades, https://github.com/JuliaRobotics/IncrementalInference.jl/milestone/111?closed=1
- Add distribution serialization for Rayleigh.
- Add `Position{N}` and `Position1`..`Position4` as new standard and aliases for `ContinuousScalar`, `ContinuousEuclid{N}`.
# Changes in v0.26
- Standarding (non-binding) easy factor dipatch cases so measurement field is under `.Z` (#1441).
- `CalcFactor._allowThreads` can now be used as workaround for `Threads` yield blocking issue during first run (#1451).
- Canonical graph generator API change to `generateGraph_ABC` (#1454).
# Changes in v0.25
- Changed API to `testFactorResidualBinary(fct, meas::Tuple, (T_i, param_i),...)` to grow beyond binary.
- PPE methods used keyword `method::AbstractPointParametricType` which is now replaced with the keyword `ppeType`.
- Belief points are now stored as a `Vector{P}` (rather than legacy `Matrix`), and currently still under the restriction `P <: AbstractVector{<:Real}`. Objective is moving to `P` any user desired point that fits with the JuliaManifolds/Manifolds.jl patterns.
- Deprecating use of `ensureAllInitialized!`, use `initAll!` instead.
- Upstream `calcHelix_T` canonical generator utility from RoME.jl.
- Deserialization of factors with DFG needs new API and change of solverData and CCW type in factor.
- Deprecate use of `getParametricMeasurement` and use `getMeasurementParametric` instead, and add `<:AbstractManifold` to API.
- Deprecate use of `solveBinaryFactorParameteric`, instead use `solveFactorParametric`.
- Deprecating `approxConvBinary`, use `approxConvBelief` instead.
- Removing obsolete `approxConvCircular`, use `approxConvBelief` instead.
- `getSample` should return a single sample and no longer takes the N(number of samples) parameter.
- `solveTree!` / `solveGraph!` now returns just one value `tree<:AbstractBayesTree`. Previous version returned three values, `tree, smt, hist` (#1379).
- **Note for v0.25.5** Serialization of newly introduced type `PackedHeatmapGridDensity` changed from v0.25.4, unlikely have yet been used publically, therefore emphasizing fastest possible standardization in this case (even though this particular event does not strictly follow semver). General usage and operation is effectively unchanged,see #1435.
# Changes in v0.24
- Update compat for ManifoldsBase.jl v0.11 with `AbstractManifold`.
- Transition to only `getManifold` (instead of `getManifolds`), thereby moving towards exclusively using Manifolds.jl, see #1234.
- Deprecate use of `getFactorMean`, use `IIF.getParametricMeasurement` instead.
- Upstreamed `is/set Marginalized` to DFG (#1269).
# Changes in v0.23
- New `@defVariable` only uses `ManifoldsBase.Manifold` as base abstraction for variable types.
# Changes in v0.22
- Work in progress toward `ManifoldsBase.Manifold` as base abstraction for variable types.
# Changes in v0.21
- `CalcResidual` no longer takes a `residual` as input parameter and should return `residual`, see #467 .
# Changes in v0.20
- The user factor API call strategy has been simplified via `CalcResidual`, see #467 for details.
- User factor API for `getSample` and `.specialsampler` has been standardized via `CalcResidual` (#927) -- for ongoing work please follow #1099 and #1094 and #1069.
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 0.35.4 | 2b7fa2c68128e0e5a086f997cff205410812509c | docs | 6102 | # IncrementalInference.jl
> Click on badges to follow links:
| Stable Release | Dev branch | Coverage | Documentation |
|----------------|------------|----------|---------------|
| [![iif-ci-stb][iif-ci-stb-img]][iif-ci-stb-url] <br> [![version][iif-ver-img]][iif-rel-url] | [![iif-ci-dev-img]][iif-ci-dev-url] <br> [![iif-commits-url]][contributors-url] <br> [![issues-time]][issues-url] | [![doi-img]][doi-url] <br> [![iif-cov-img]][iif-cov-url] <br> [![issues-open]][issues-url] | [![cjl-slack-badge]][cjl-slack] <br> [![caesar-docs]][cjl-docs-url] <br> [![dfg-docs]][dfg-docs-url] |
Optimization routines for incremental non-parametric and parametric solutions based on factor graphs and the Bayes (Junction) tree implemented in the [Julia language][jlorg-url].
# Introduction
This package implements a few different non-Gaussian factor graph inference algorithms, primarily
- Multi-Modal iSAM (MM-iSAM) ([see references][cjl-docs-refs]) which does hybrid non-parametric and parametric inference/state-estimation over large factor graphs.
- Batch Parametric (akin to conventional "non-linear least squares"),
- Max-mixtures parametric,
- Other multiparametric and non-Gaussian algorithms are in the works and will be announced in due course.
Fundamentally, inference is performed via the Bayes (junction) tree where Chapman-Kolmogorov transit integral solutions are based on marginal-joint belief estimation (a sum-product / belief-propagation approximation algorithm). Many benefits such as clique recycling are also available. See the common Caesar.jl documenation for more details. [![caesar-docs]][cjl-docs-url]
This package forms a cardinal piece of the [Caesar.jl][cjl-url] robotics toolkit, including 3D visualization and database interaction, which can serve as a base station for a robotic platform. A standalone Robot Motion Estimate ([RoME.jl][rjl-url]) package extends the available variables, factors, and utilities for use in robotic navigation. [![iif-deps-img]][iif-deps-jlh]
Note, that IncrementalInference.jl **does not** have to be used with RoME.jl / Caesar.jl -- IncrementalInference.jl only implements the algebraic inference operations against mathematical abstractions such as Manifolds.jl.
Furthermore, please contact [email protected] for more formal support on this package, [NavAbility(TM) by WhereWhen.ai Technologies Inc.][wwai-url].
# Examples
See the common Caesar.jl documenation for more details [![caesar-docs]][cjl-docs-url]. Further examples can be found in the examples and test folders.
## Installation
Install the package from inside Julia
```julia
pkg> add IncrementalInference
```
# Cite and Contributors
We are grateful for many, many contributions within the Julia package ecosystem -- see the [`Project.toml`](https://github.com/JuliaRobotics/Caesar.jl/blob/master/Project.toml) files for a far reaching list of upstream packages and contributions.
Consider citing our work using the common reference at [Caesar.jl Citation with IncrementalInference.jl DOI](https://github.com/JuliaRobotics/Caesar.jl#contributors)
## Get Involved, and Code of Conduct
This project adheres to the [JuliaRobotics code of conduct](https://github.com/JuliaRobotics/administration/blob/master/code_of_conduct.md), and we invite contributions or comments from the community. Use the slack channel, Julia Discourse, or Github issues to get in touch.
# References
See [references of interest here][cjl-docs-refs]
## Legacy
Pre-install the following packages system wide packages[, and easily draw factor graph and Bayes tree]:
```bash
sudo apt-get install hdf5-tools
sudo apt-get install graphviz xdot # optional
```
[iif-deps-img]: https://juliahub.com/docs/IncrementalInference/deps.svg
[iif-deps-jlh]: https://juliahub.com/ui/Packages/IncrementalInference/NrVw2??page=2
[doi-img]: https://zenodo.org/badge/DOI/10.5281/zenodo.5146221.svg
[doi-url]: https://doi.org/10.5281/zenodo.5146221
[dfg-docs]: https://img.shields.io/badge/DFGDocs-latest-blue.svg
[dfg-docs-url]: https://juliarobotics.org/DistributedFactorGraphs.jl/latest/
<!-- replicated in Caesar.jl README -->
[iif-ci-dev-img]: https://github.com/JuliaRobotics/IncrementalInference.jl/actions/workflows/ci.yml/badge.svg
[iif-ci-dev-url]: https://github.com/JuliaRobotics/IncrementalInference.jl/actions/workflows/ci.yml
[iif-ci-stb-img]: https://github.com/JuliaRobotics/IncrementalInference.jl/actions/workflows/ci.yml/badge.svg?branch=release%2Fv0.26
[iif-ci-stb-url]: https://github.com/JuliaRobotics/IncrementalInference.jl/actions/workflows/ci.yml
[iif-ver-img]: https://juliahub.com/docs/IncrementalInference/version.svg
[iif-rel-url]: https://github.com/JuliaRobotics/IncrementalInference.jl/releases
[iif-milestones]: https://github.com/JuliaRobotics/IncrementalInference.jl/milestones
[iif-cov-img]: https://codecov.io/github/JuliaRobotics/IncrementalInference.jl/coverage.svg?branch=master
[iif-cov-url]: https://codecov.io/github/JuliaRobotics/IncrementalInference.jl?branch=master
[iif-commits-url]: https://img.shields.io/github/commit-activity/y/JuliaRobotics/IncrementalInference.jl.svg?color=dark-green
[contributors-url]: https://github.com/JuliaRobotics/IncrementalInference.jl/graphs/contributors
[issues-time]: https://isitmaintained.com/badge/resolution/JuliaRobotics/IncrementalInference.jl.svg
[issues-open]: https://isitmaintained.com/badge/open/JuliaRobotics/IncrementalInference.jl.svg
[issues-url]: https://github.com/JuliaRobotics/IncrementalInference.jl/issues
[rjl-url]: https://github.com/JuliaRobotics/RoME.jl
[cjl-url]: https://github.com/JuliaRobotics/Caesar.jl
[caesar-docs]: https://img.shields.io/badge/CaesarDocs-latest-blue.svg
[cjl-docs-url]: http://juliarobotics.github.io/Caesar.jl/latest/
[cjl-docs-refs]: http://www.juliarobotics.org/Caesar.jl/latest/refs/literature/
[cjl-slack-badge]: https://img.shields.io/badge/Caesarjl-Slack-green.svg?style=popout
[cjl-slack]: https://join.slack.com/t/caesarjl/shared_invite/zt-ucs06bwg-y2tEbddwX1vR18MASnOLsw
[jlorg-url]: http://www.julialang.org/
[wwai-url]: https://www.wherewhen.ai
| IncrementalInference | https://github.com/JuliaRobotics/IncrementalInference.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 259 | module DiffEqBaseCUDAExt
using DiffEqBase, CUDA
function DiffEqBase.ODE_DEFAULT_NORM(
u::CuArray{T}, t) where {T <: Union{AbstractFloat, Complex}}
sqrt(sum(DiffEqBase.sse, u; init = DiffEqBase.sse(zero(T))) / DiffEqBase.totallength(u))
end
end
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 1007 | module DiffEqBaseChainRulesCoreExt
using DiffEqBase
using DiffEqBase.SciMLBase
import DiffEqBase: numargs, AbstractSensitivityAlgorithm, AbstractDEProblem
import ChainRulesCore
import ChainRulesCore: NoTangent
ChainRulesCore.rrule(::typeof(numargs), f) = (numargs(f), df -> (NoTangent(), NoTangent()))
ChainRulesCore.@non_differentiable DiffEqBase.checkkwargs(kwargshandle)
function ChainRulesCore.frule(::typeof(DiffEqBase.solve_up), prob,
sensealg::Union{Nothing, AbstractSensitivityAlgorithm},
u0, p, args...;
kwargs...)
DiffEqBase._solve_forward(
prob, sensealg, u0, p, SciMLBase.ChainRulesOriginator(), args...;
kwargs...)
end
function ChainRulesCore.rrule(::typeof(DiffEqBase.solve_up), prob::AbstractDEProblem,
sensealg::Union{Nothing, AbstractSensitivityAlgorithm},
u0, p, args...;
kwargs...)
DiffEqBase._solve_adjoint(
prob, sensealg, u0, p, SciMLBase.ChainRulesOriginator(), args...;
kwargs...)
end
end
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 214 | module DiffEqBaseDistributionsExt
using Distributions, DiffEqBase
DiffEqBase.handle_distribution_u0(_u0::Distributions.Sampleable) = rand(_u0)
DiffEqBase.isdistribution(_u0::Distributions.Sampleable) = true
end
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 1942 | module DiffEqBaseEnzymeExt
using DiffEqBase
import DiffEqBase: value, fastpow
using Enzyme
import Enzyme: Const
using ChainRulesCore
function Enzyme.EnzymeRules.augmented_primal(config::Enzyme.EnzymeRules.RevConfigWidth{1},
func::Const{typeof(DiffEqBase.solve_up)}, ::Type{Duplicated{RT}}, prob,
sensealg::Union{Const{Nothing}, Const{<:DiffEqBase.AbstractSensitivityAlgorithm}},
u0, p, args...; kwargs...) where {RT}
@inline function copy_or_reuse(val, idx)
if Enzyme.EnzymeRules.overwritten(config)[idx] && ismutable(val)
return deepcopy(val)
else
return val
end
end
@inline function arg_copy(i)
copy_or_reuse(args[i].val, i + 5)
end
res = DiffEqBase._solve_adjoint(
copy_or_reuse(prob.val, 2), copy_or_reuse(sensealg.val, 3),
copy_or_reuse(u0.val, 4), copy_or_reuse(p.val, 5),
SciMLBase.EnzymeOriginator(), ntuple(arg_copy, Val(length(args)))...;
kwargs...)
dres = Enzyme.make_zero(res[1])::RT
tup = (dres, res[2])
return Enzyme.EnzymeRules.AugmentedReturn{RT, RT, Any}(res[1], dres, tup::Any)
end
function Enzyme.EnzymeRules.reverse(config::Enzyme.EnzymeRules.RevConfigWidth{1},
func::Const{typeof(DiffEqBase.solve_up)}, ::Type{Duplicated{RT}}, tape, prob,
sensealg::Union{Const{Nothing}, Const{<:DiffEqBase.AbstractSensitivityAlgorithm}},
u0, p, args...; kwargs...) where {RT}
dres, clos = tape
dres = dres::RT
dargs = clos(dres)
for (darg, ptr) in zip(dargs, (func, prob, sensealg, u0, p, args...))
if ptr isa Enzyme.Const
continue
end
if darg == ChainRulesCore.NoTangent()
continue
end
ptr.dval .+= darg
end
Enzyme.make_zero!(dres.u)
return ntuple(_ -> nothing, Val(length(args) + 4))
end
Enzyme.Compiler.known_ops[typeof(DiffEqBase.fastpow)] = (:pow, 2, nothing)
end
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 335 | module DiffEqBaseGeneralizedGeneratedExt
if isdefined(Base, :get_extension)
using DiffEqBase
using GeneralizedGenerated
else
using ..DiffEqBase
using ..GeneralizedGenerated
end
function SciMLBase.numargs(::GeneralizedGenerated.RuntimeFn{Args}) where {Args}
GeneralizedGenerated.from_type(Args) |> length
end
end
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 328 | module DiffEqBaseMPIExt
if isdefined(Base, :get_extension)
using DiffEqBase
import MPI
else
using ..DiffEqBase
import ..MPI
end
if isdefined(MPI, :AbstractMultiRequest)
function DiffEqBase.anyeltypedual(::Type{T},
counter = 0) where {T <: MPI.AbstractMultiRequest}
Any
end
end
end
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 1379 | module DiffEqBaseMeasurementsExt
if isdefined(Base, :get_extension)
using DiffEqBase
import DiffEqBase: value
using Measurements
else
using ..DiffEqBase
import ..DiffEqBase: value
using ..Measurements
end
function DiffEqBase.promote_u0(u0::AbstractArray{<:Measurements.Measurement},
p::AbstractArray{<:Measurements.Measurement}, t0)
u0
end
DiffEqBase.promote_u0(u0, p::AbstractArray{<:Measurements.Measurement}, t0) = eltype(p).(u0)
value(x::Type{Measurements.Measurement{T}}) where {T} = T
value(x::Measurements.Measurement) = Measurements.value(x)
@inline DiffEqBase.fastpow(x::Measurements.Measurement, y::Measurements.Measurement) = x^y
# Support adaptive steps should be errorless
@inline function DiffEqBase.ODE_DEFAULT_NORM(
u::AbstractArray{
<:Measurements.Measurement,
N
},
t) where {N}
sqrt(sum(x -> DiffEqBase.ODE_DEFAULT_NORM(x[1], x[2]),
zip((value(x) for x in u), Iterators.repeated(t))) / length(u))
end
@inline function DiffEqBase.ODE_DEFAULT_NORM(u::Array{<:Measurements.Measurement, N},
t) where {N}
sqrt(sum(x -> DiffEqBase.ODE_DEFAULT_NORM(x[1], x[2]),
zip((value(x) for x in u), Iterators.repeated(t))) / length(u))
end
@inline function DiffEqBase.ODE_DEFAULT_NORM(u::Measurements.Measurement, t)
abs(Measurements.value(u))
end
end
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 1772 | module DiffEqBaseMonteCarloMeasurementsExt
if isdefined(Base, :get_extension)
using DiffEqBase
import DiffEqBase: value
using MonteCarloMeasurements
else
using ..DiffEqBase
import ..DiffEqBase: value
using ..MonteCarloMeasurements
end
function DiffEqBase.promote_u0(
u0::AbstractArray{
<:MonteCarloMeasurements.AbstractParticles,
},
p::AbstractArray{<:MonteCarloMeasurements.AbstractParticles},
t0)
u0
end
function DiffEqBase.promote_u0(u0,
p::AbstractArray{<:MonteCarloMeasurements.AbstractParticles},
t0)
eltype(p).(u0)
end
DiffEqBase.value(x::Type{MonteCarloMeasurements.AbstractParticles{T, N}}) where {T, N} = T
DiffEqBase.value(x::MonteCarloMeasurements.AbstractParticles) = mean(x.particles)
@inline function DiffEqBase.fastpow(x::MonteCarloMeasurements.AbstractParticles,
y::MonteCarloMeasurements.AbstractParticles)
x^y
end
# Support adaptive steps should be errorless
@inline function DiffEqBase.ODE_DEFAULT_NORM(
u::AbstractArray{
<:MonteCarloMeasurements.AbstractParticles,
N}, t) where {N}
sqrt(mean(x -> DiffEqBase.ODE_DEFAULT_NORM(x[1], x[2]),
zip((value(x) for x in u), Iterators.repeated(t))))
end
@inline function DiffEqBase.ODE_DEFAULT_NORM(
u::AbstractArray{
<:MonteCarloMeasurements.AbstractParticles,
N},
t::AbstractArray{
<:MonteCarloMeasurements.AbstractParticles,
N}) where {N}
sqrt(mean(x -> DiffEqBase.ODE_DEFAULT_NORM(x[1], x[2]),
zip((value(x) for x in u), Iterators.repeated(value.(t)))))
end
@inline function DiffEqBase.ODE_DEFAULT_NORM(u::MonteCarloMeasurements.AbstractParticles, t)
abs(value(u))
end
end
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 7054 | module DiffEqBaseReverseDiffExt
using DiffEqBase
import DiffEqBase: value
import ReverseDiff
import DiffEqBase.ArrayInterface
import DiffEqBase.ForwardDiff
function DiffEqBase.anyeltypedual(::Type{T},
::Type{Val{counter}} = Val{0}) where {counter} where {
V, D, N, VA, DA, T <: ReverseDiff.TrackedArray{V, D, N, VA, DA}}
DiffEqBase.anyeltypedual(V, Val{counter})
end
DiffEqBase.value(x::Type{ReverseDiff.TrackedReal{V, D, O}}) where {V, D, O} = V
function DiffEqBase.value(x::Type{
ReverseDiff.TrackedArray{V, D, N, VA, DA},
}) where {V, D,
N, VA,
DA}
Array{V, N}
end
DiffEqBase.value(x::ReverseDiff.TrackedReal) = x.value
DiffEqBase.value(x::ReverseDiff.TrackedArray) = x.value
# Force TrackedArray from TrackedReal when reshaping W\b
DiffEqBase._reshape(v::AbstractVector{<:ReverseDiff.TrackedReal}, siz) = reduce(vcat, v)
DiffEqBase.promote_u0(u0::ReverseDiff.TrackedArray, p::ReverseDiff.TrackedArray, t0) = u0
function DiffEqBase.promote_u0(u0::AbstractArray{<:ReverseDiff.TrackedReal},
p::ReverseDiff.TrackedArray, t0)
u0
end
function DiffEqBase.promote_u0(u0::ReverseDiff.TrackedArray,
p::AbstractArray{<:ReverseDiff.TrackedReal}, t0)
u0
end
function DiffEqBase.promote_u0(u0::AbstractArray{<:ReverseDiff.TrackedReal},
p::AbstractArray{<:ReverseDiff.TrackedReal}, t0)
u0
end
DiffEqBase.promote_u0(u0, p::ReverseDiff.TrackedArray, t0) = ReverseDiff.track(u0)
function DiffEqBase.promote_u0(
u0, p::ReverseDiff.TrackedArray{T}, t0) where {T <: ForwardDiff.Dual}
ReverseDiff.track(T.(u0))
end
DiffEqBase.promote_u0(u0, p::AbstractArray{<:ReverseDiff.TrackedReal}, t0) = eltype(p).(u0)
# Support adaptive with non-tracked time
@inline function DiffEqBase.ODE_DEFAULT_NORM(u::ReverseDiff.TrackedArray, t)
sqrt(sum(abs2, DiffEqBase.value(u)) / length(u))
end
@inline function DiffEqBase.ODE_DEFAULT_NORM(
u::AbstractArray{<:ReverseDiff.TrackedReal, N},
t) where {N}
sqrt(sum(x -> DiffEqBase.ODE_DEFAULT_NORM(x[1], x[2]),
zip((DiffEqBase.value(x) for x in u), Iterators.repeated(t))) / length(u))
end
@inline function DiffEqBase.ODE_DEFAULT_NORM(u::Array{<:ReverseDiff.TrackedReal, N},
t) where {N}
sqrt(sum(x -> DiffEqBase.ODE_DEFAULT_NORM(x[1], x[2]),
zip((DiffEqBase.value(x) for x in u), Iterators.repeated(t))) / length(u))
end
@inline function DiffEqBase.ODE_DEFAULT_NORM(u::ReverseDiff.TrackedReal, t)
abs(DiffEqBase.value(u))
end
# Support TrackedReal time, don't drop tracking on the adaptivity there
@inline function DiffEqBase.ODE_DEFAULT_NORM(u::ReverseDiff.TrackedArray,
t::ReverseDiff.TrackedReal)
sqrt(sum(abs2, u) / length(u))
end
@inline function DiffEqBase.ODE_DEFAULT_NORM(
u::AbstractArray{<:ReverseDiff.TrackedReal, N},
t::ReverseDiff.TrackedReal) where {N}
sqrt(sum(x -> DiffEqBase.ODE_DEFAULT_NORM(x[1], x[2]), zip(u, Iterators.repeated(t))) /
length(u))
end
@inline function DiffEqBase.ODE_DEFAULT_NORM(u::Array{<:ReverseDiff.TrackedReal, N},
t::ReverseDiff.TrackedReal) where {N}
sqrt(sum(x -> DiffEqBase.ODE_DEFAULT_NORM(x[1], x[2]), zip(u, Iterators.repeated(t))) /
length(u))
end
@inline function DiffEqBase.ODE_DEFAULT_NORM(u::ReverseDiff.TrackedReal,
t::ReverseDiff.TrackedReal)
abs(u)
end
# `ReverseDiff.TrackedArray`
function DiffEqBase.solve_up(prob::DiffEqBase.AbstractDEProblem,
sensealg::Union{
SciMLBase.AbstractOverloadingSensitivityAlgorithm,
Nothing}, u0::ReverseDiff.TrackedArray,
p::ReverseDiff.TrackedArray, args...; kwargs...)
ReverseDiff.track(DiffEqBase.solve_up, prob, sensealg, u0, p, args...; kwargs...)
end
function DiffEqBase.solve_up(prob::DiffEqBase.AbstractDEProblem,
sensealg::Union{
SciMLBase.AbstractOverloadingSensitivityAlgorithm,
Nothing}, u0, p::ReverseDiff.TrackedArray,
args...; kwargs...)
ReverseDiff.track(DiffEqBase.solve_up, prob, sensealg, u0, p, args...; kwargs...)
end
function DiffEqBase.solve_up(prob::DiffEqBase.AbstractDEProblem,
sensealg::Union{
SciMLBase.AbstractOverloadingSensitivityAlgorithm,
Nothing}, u0::ReverseDiff.TrackedArray, p,
args...; kwargs...)
ReverseDiff.track(DiffEqBase.solve_up, prob, sensealg, u0, p, args...; kwargs...)
end
# `AbstractArray{<:ReverseDiff.TrackedReal}`
function DiffEqBase.solve_up(prob::DiffEqBase.AbstractDEProblem,
sensealg::Union{
SciMLBase.AbstractOverloadingSensitivityAlgorithm,
Nothing},
u0::AbstractArray{<:ReverseDiff.TrackedReal},
p::AbstractArray{<:ReverseDiff.TrackedReal}, args...;
kwargs...)
DiffEqBase.solve_up(prob, sensealg, ArrayInterface.aos_to_soa(u0),
ArrayInterface.aos_to_soa(p), args...;
kwargs...)
end
function DiffEqBase.solve_up(prob::DiffEqBase.AbstractDEProblem,
sensealg::Union{
SciMLBase.AbstractOverloadingSensitivityAlgorithm,
Nothing}, u0,
p::AbstractArray{<:ReverseDiff.TrackedReal},
args...; kwargs...)
DiffEqBase.solve_up(
prob, sensealg, u0, ArrayInterface.aos_to_soa(p), args...; kwargs...)
end
function DiffEqBase.solve_up(prob::DiffEqBase.AbstractDEProblem,
sensealg::Union{
SciMLBase.AbstractOverloadingSensitivityAlgorithm,
Nothing}, u0::ReverseDiff.TrackedArray,
p::AbstractArray{<:ReverseDiff.TrackedReal},
args...; kwargs...)
DiffEqBase.solve_up(
prob, sensealg, u0, ArrayInterface.aos_to_soa(p), args...; kwargs...)
end
function DiffEqBase.solve_up(prob::DiffEqBase.DEProblem,
sensealg::Union{
SciMLBase.AbstractOverloadingSensitivityAlgorithm,
Nothing},
u0::AbstractArray{<:ReverseDiff.TrackedReal}, p,
args...; kwargs...)
DiffEqBase.solve_up(
prob, sensealg, ArrayInterface.aos_to_soa(u0), p, args...; kwargs...)
end
function DiffEqBase.solve_up(prob::DiffEqBase.DEProblem,
sensealg::Union{
SciMLBase.AbstractOverloadingSensitivityAlgorithm,
Nothing},
u0::AbstractArray{<:ReverseDiff.TrackedReal}, p::ReverseDiff.TrackedArray,
args...; kwargs...)
DiffEqBase.solve_up(
prob, sensealg, ArrayInterface.aos_to_soa(u0), p, args...; kwargs...)
end
# Required becase ReverseDiff.@grad function DiffEqBase.solve_up is not supported!
import DiffEqBase: solve_up
ReverseDiff.@grad function solve_up(prob, sensealg, u0, p, args...; kwargs...)
out = DiffEqBase._solve_adjoint(prob, sensealg, ReverseDiff.value(u0),
ReverseDiff.value(p),
SciMLBase.ReverseDiffOriginator(), args...; kwargs...)
function actual_adjoint(_args...)
original_adjoint = out[2](_args...)
if isempty(args) # alg is missing
tuple(original_adjoint[1:4]..., original_adjoint[6:end]...)
else
original_adjoint
end
end
Array(out[1]), actual_adjoint
end
end
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 342 | module DiffEqBaseSparseArraysExt
import SparseArrays
import DiffEqBase: NAN_CHECK, INFINITE_OR_GIANT
function NAN_CHECK(x::SparseArrays.AbstractSparseMatrixCSC)
any(NAN_CHECK, SparseArrays.nonzeros(x))
end
function INFINITE_OR_GIANT(x::SparseArrays.AbstractSparseMatrixCSC)
any(INFINITE_OR_GIANT, SparseArrays.nonzeros(x))
end
end
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 4391 | module DiffEqBaseTrackerExt
if isdefined(Base, :get_extension)
using DiffEqBase
import DiffEqBase: value
import Tracker
else
using ..DiffEqBase
import ..DiffEqBase: value
import ..Tracker
end
DiffEqBase.value(x::Type{Tracker.TrackedReal{T}}) where {T} = T
DiffEqBase.value(x::Type{Tracker.TrackedArray{T, N, A}}) where {T, N, A} = Array{T, N}
DiffEqBase.value(x::Tracker.TrackedReal) = x.data
DiffEqBase.value(x::Tracker.TrackedArray) = x.data
DiffEqBase.promote_u0(u0::Tracker.TrackedArray, p::Tracker.TrackedArray, t0) = u0
function DiffEqBase.promote_u0(u0::AbstractArray{<:Tracker.TrackedReal},
p::Tracker.TrackedArray, t0)
u0
end
function DiffEqBase.promote_u0(u0::Tracker.TrackedArray,
p::AbstractArray{<:Tracker.TrackedReal}, t0)
u0
end
function DiffEqBase.promote_u0(u0::AbstractArray{<:Tracker.TrackedReal},
p::AbstractArray{<:Tracker.TrackedReal}, t0)
u0
end
DiffEqBase.promote_u0(u0, p::Tracker.TrackedArray, t0) = Tracker.track(u0)
DiffEqBase.promote_u0(u0, p::AbstractArray{<:Tracker.TrackedReal}, t0) = eltype(p).(u0)
@inline DiffEqBase.fastpow(x::Tracker.TrackedReal, y::Tracker.TrackedReal) = x^y
@inline Base.any(f::Function, x::Tracker.TrackedArray) = any(f, Tracker.data(x))
# Support adaptive with non-tracked time
@inline function DiffEqBase.ODE_DEFAULT_NORM(u::Tracker.TrackedArray, t)
sqrt(sum(abs2, DiffEqBase.value(u)) / length(u))
end
@inline function DiffEqBase.ODE_DEFAULT_NORM(u::AbstractArray{<:Tracker.TrackedReal, N},
t) where {N}
sqrt(sum(x -> DiffEqBase.ODE_DEFAULT_NORM(x[1], x[2]),
zip((DiffEqBase.value(x) for x in u), Iterators.repeated(t))) / length(u))
end
@inline function DiffEqBase.ODE_DEFAULT_NORM(u::Array{<:Tracker.TrackedReal, N},
t) where {N}
sqrt(sum(x -> DiffEqBase.ODE_DEFAULT_NORM(x[1], x[2]),
zip((DiffEqBase.value(x) for x in u), Iterators.repeated(t))) / length(u))
end
@inline DiffEqBase.ODE_DEFAULT_NORM(u::Tracker.TrackedReal, t) = abs(DiffEqBase.value(u))
# Support TrackedReal time, don't drop tracking on the adaptivity there
@inline function DiffEqBase.ODE_DEFAULT_NORM(u::Tracker.TrackedArray,
t::Tracker.TrackedReal)
sqrt(sum(abs2, u) / length(u))
end
@inline function DiffEqBase.ODE_DEFAULT_NORM(u::AbstractArray{<:Tracker.TrackedReal, N},
t::Tracker.TrackedReal) where {N}
sqrt(sum(x -> DiffEqBase.ODE_DEFAULT_NORM(x[1], x[2]), zip(u, Iterators.repeated(t))) /
length(u))
end
@inline function DiffEqBase.ODE_DEFAULT_NORM(u::Array{<:Tracker.TrackedReal, N},
t::Tracker.TrackedReal) where {N}
sqrt(sum(x -> DiffEqBase.ODE_DEFAULT_NORM(x[1], x[2]), zip(u, Iterators.repeated(t))) /
length(u))
end
@inline DiffEqBase.ODE_DEFAULT_NORM(u::Tracker.TrackedReal, t::Tracker.TrackedReal) = abs(u)
function DiffEqBase.solve_up(prob::DiffEqBase.AbstractDEProblem,
sensealg::Union{
SciMLBase.AbstractOverloadingSensitivityAlgorithm,
Nothing}, u0::Tracker.TrackedArray,
p::Tracker.TrackedArray, args...; kwargs...)
Tracker.track(DiffEqBase.solve_up, prob, sensealg, u0, p, args...; kwargs...)
end
function DiffEqBase.solve_up(prob::DiffEqBase.AbstractDEProblem,
sensealg::Union{
SciMLBase.AbstractOverloadingSensitivityAlgorithm,
Nothing}, u0::Tracker.TrackedArray, p, args...;
kwargs...)
Tracker.track(DiffEqBase.solve_up, prob, sensealg, u0, p, args...; kwargs...)
end
function DiffEqBase.solve_up(prob::DiffEqBase.AbstractDEProblem,
sensealg::Union{
SciMLBase.AbstractOverloadingSensitivityAlgorithm,
Nothing}, u0, p::Tracker.TrackedArray, args...;
kwargs...)
Tracker.track(DiffEqBase.solve_up, prob, sensealg, u0, p, args...; kwargs...)
end
Tracker.@grad function DiffEqBase.solve_up(prob,
sensealg::Union{Nothing,
SciMLBase.AbstractOverloadingSensitivityAlgorithm
},
u0, p, args...;
kwargs...)
sol, pb_f = DiffEqBase._solve_adjoint(
prob, sensealg, Tracker.data(u0), Tracker.data(p),
SciMLBase.TrackerOriginator(), args...; kwargs...)
if sol isa AbstractArray
!hasfield(typeof(sol), :u) && return sol, pb_f # being safe here
return sol.u, pb_f # AbstractNoTimeSolution isa AbstractArray
end
return convert(AbstractArray, sol), pb_f
end
end
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 1189 | module DiffEqBaseUnitfulExt
if isdefined(Base, :get_extension)
using DiffEqBase
import DiffEqBase: value
using Unitful
else
using ..DiffEqBase
import ..DiffEqBase: value
using ..Unitful
end
# Support adaptive errors should be errorless for exponentiation
value(x::Type{Unitful.AbstractQuantity{T, D, U}}) where {T, D, U} = T
value(x::Unitful.AbstractQuantity) = x.val
@inline function DiffEqBase.ODE_DEFAULT_NORM(
u::AbstractArray{
<:Unitful.AbstractQuantity,
N
},
t) where {N}
sqrt(sum(x -> DiffEqBase.ODE_DEFAULT_NORM(x[1], x[2]),
zip((value(x) for x in u), Iterators.repeated(t))) / length(u))
end
@inline function DiffEqBase.ODE_DEFAULT_NORM(u::Array{<:Unitful.AbstractQuantity, N},
t) where {N}
sqrt(sum(x -> DiffEqBase.ODE_DEFAULT_NORM(x[1], x[2]),
zip((value(x) for x in u), Iterators.repeated(t))) / length(u))
end
@inline DiffEqBase.ODE_DEFAULT_NORM(u::Unitful.AbstractQuantity, t) = abs(value(u))
@inline function DiffEqBase.UNITLESS_ABS2(x::Unitful.AbstractQuantity)
real(abs2(x) / oneunit(x) * oneunit(x))
end
DiffEqBase._rate_prototype(u, t, onet) = u / unit(t)
end
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 6187 | module DiffEqBase
if isdefined(Base, :Experimental) &&
isdefined(Base.Experimental, Symbol("@max_methods"))
@eval Base.Experimental.@max_methods 1
end
import PrecompileTools
using ArrayInterface
using StaticArraysCore # data arrays
using LinearAlgebra, Printf
using DocStringExtensions
using FunctionWrappers: FunctionWrapper
using MuladdMacro, Parameters
using Statistics
using FastBroadcast: @.., True, False
using Static: reduce_tup
import RecursiveArrayTools
import TruncatedStacktraces
using Setfield
using ForwardDiff
using EnumX
using Markdown
using ConcreteStructs: @concrete
using FastClosures: @closure
# Could be made optional/glue
import PreallocationTools
import FunctionWrappersWrappers
using SciMLBase
using SciMLOperators: AbstractSciMLOperator, AbstractSciMLScalarOperator
using SciMLBase: @def, DEIntegrator, AbstractDEProblem,
AbstractDiffEqInterpolation,
DECallback, AbstractDEOptions, DECache, AbstractContinuousCallback,
AbstractDiscreteCallback, AbstractLinearProblem,
AbstractNonlinearProblem,
AbstractOptimizationProblem, AbstractSteadyStateProblem,
AbstractJumpProblem,
AbstractNoiseProblem, AbstractEnsembleProblem,
AbstractDynamicalODEProblem,
AbstractDEAlgorithm, StandardODEProblem, AbstractIntegralProblem,
AbstractSensitivityAlgorithm, AbstractODEAlgorithm,
AbstractSDEAlgorithm, AbstractDDEAlgorithm, AbstractDAEAlgorithm,
AbstractSDDEAlgorithm, AbstractRODEAlgorithm,
DAEInitializationAlgorithm,
AbstractSteadyStateAlgorithm, AbstractODEProblem,
AbstractDiscreteProblem, AbstractNonlinearAlgorithm,
AbstractSDEProblem, AbstractRODEProblem, AbstractDDEProblem,
AbstractDAEProblem, AbstractSDDEProblem, AbstractBVProblem,
AbstractTimeseriesSolution, AbstractNoTimeSolution, numargs,
AbstractODEFunction, AbstractSDEFunction, AbstractRODEFunction,
AbstractDDEFunction, AbstractSDDEFunction, AbstractDAEFunction,
AbstractNonlinearFunction, AbstractEnsembleSolution,
AbstractODESolution, AbstractRODESolution, AbstractDAESolution,
AbstractDDESolution,
EnsembleAlgorithm, EnsembleSolution, EnsembleSummary,
NonlinearSolution,
TimeGradientWrapper, TimeDerivativeWrapper, UDerivativeWrapper,
UJacobianWrapper, ParamJacobianWrapper, JacobianWrapper,
check_error!, has_jac, has_tgrad, has_Wfact, has_Wfact_t, has_paramjac,
AbstractODEIntegrator, AbstractSDEIntegrator, AbstractRODEIntegrator,
AbstractDDEIntegrator, AbstractSDDEIntegrator,
AbstractDAEIntegrator, unwrap_cache, has_reinit, reinit!,
postamble!, last_step_failed, islinear, has_stats,
initialize_dae!, build_solution, solution_new_retcode,
solution_new_tslocation, plot_indices,
NullParameters, isinplace, AbstractADType, AbstractDiscretization,
DISCRETE_OUTOFPLACE_DEFAULT, DISCRETE_INPLACE_DEFAULT,
has_analytic, calculate_solution_errors!, AbstractNoiseProcess,
has_colorvec, parameterless_type, undefined_exports,
is_diagonal_noise, AbstractDiffEqFunction, sensitivity_solution,
interp_summary, AbstractHistoryFunction, LinearInterpolation,
ConstantInterpolation, HermiteInterpolation, SensitivityInterpolation,
NoAD, @add_kwonly,
calculate_ensemble_errors, DEFAULT_UPDATE_FUNC, isconstant,
DEFAULT_REDUCTION, isautodifferentiable,
isadaptive, isdiscrete, has_syms, AbstractAnalyticalSolution,
RECOMPILE_BY_DEFAULT, wrap_sol, has_destats
import SciMLBase: solve, init, step!, solve!, __init, __solve, update_coefficients!,
update_coefficients, isadaptive, wrapfun_oop, wrapfun_iip,
unwrap_fw, promote_tspan, set_u!, set_t!, set_ut!
import SciMLBase: AbstractDiffEqLinearOperator # deprecation path
import SciMLStructures
import Tricks
using Reexport
Reexport.@reexport using SciMLBase
SciMLBase.isfunctionwrapper(x::FunctionWrapper) = true
"""
$(TYPEDEF)
"""
abstract type Tableau end
"""
$(TYPEDEF)
"""
abstract type ODERKTableau <: Tableau end
"""
$(TYPEDEF)
"""
abstract type DECostFunction end
import SciMLBase: Void, unwrapped_f
include("utils.jl")
include("fastpow.jl")
include("stats.jl")
include("calculate_residuals.jl")
include("tableaus.jl")
include("internal_falsi.jl")
include("internal_itp.jl")
include("callbacks.jl")
include("common_defaults.jl")
include("solve.jl")
include("internal_euler.jl")
include("forwarddiff.jl")
include("termination_conditions_deprecated.jl") # TODO: remove in the next major release
include("termination_conditions.jl")
include("norecompile.jl")
include("integrator_accessors.jl")
# This is only used for oop stiff solvers
default_factorize(A) = lu(A; check = false)
if isdefined(SciMLBase, :AbstractParameterizedFunction)
import SciMLBase: AbstractParameterizedFunction
else
"""
$(TYPEDEF)
"""
abstract type AbstractParameterizedFunction{iip} <: AbstractODEFunction{iip} end
end
"""
$(TYPEDEF)
"""
struct ConvergenceSetup{P, C}
probs::P
convergence_axis::C
end
export initialize!, finalize!
export SensitivityADPassThrough
export SteadyStateDiffEqTerminationMode, SimpleNonlinearSolveTerminationMode,
NormTerminationMode, RelTerminationMode, RelNormTerminationMode, AbsTerminationMode,
AbsNormTerminationMode, RelSafeTerminationMode, AbsSafeTerminationMode,
RelSafeBestTerminationMode, AbsSafeBestTerminationMode
# Deprecated API
export NLSolveTerminationMode,
NLSolveSafeTerminationOptions, NLSolveTerminationCondition,
NLSolveSafeTerminationResult
export KeywordArgError, KeywordArgWarn, KeywordArgSilent
end # module
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 4617 | """
calculate_residuals(ũ, u₀, u₁, α, ρ, internalnorm, t)
Calculate element-wise residuals
```math
\\frac{ũ}{α+\\max{|u₀|,|u₁|}*ρ}
```
"""
@inline @muladd function calculate_residuals(ũ::Number, u₀::Number, u₁::Number,
α, ρ, internalnorm, t)
@fastmath ũ / (α + max(internalnorm(u₀, t), internalnorm(u₁, t)) * ρ)
end
@inline function calculate_residuals(ũ::Array{T}, u₀::Array{T}, u₁::Array{T}, α::T2,
ρ::Real, internalnorm,
t) where
{T <: Number, T2 <: Number}
out = similar(ũ)
calculate_residuals!(out, ũ, u₀, u₁, α, ρ, internalnorm, t)
out
end
@inline function calculate_residuals(ũ, u₀, u₁, α, ρ, internalnorm, t)
@.. broadcast=false calculate_residuals(ũ, u₀, u₁, α, ρ, internalnorm, t)
end
"""
calculate_residuals(u₀, u₁, α, ρ, internalnorm, t)
Calculate element-wise residuals
```math
\\frac{u₁ - u₀}{α+\\max{|u₀|,|u₁|}*ρ}
```
"""
@inline @muladd function calculate_residuals(u₀::Number, u₁::Number,
α, ρ, internalnorm, t)
@fastmath (u₁ - u₀) / (α + max(internalnorm(u₀, t), internalnorm(u₁, t)) * ρ)
end
@inline function calculate_residuals(u₀::Array{T}, u₁::Array{T}, α::T2,
ρ::Real, internalnorm,
t) where {T <: Number, T2 <: Number}
out = similar(u₀)
calculate_residuals!(out, u₀, u₁, α, ρ, internalnorm, t)
out
end
@inline function calculate_residuals(u₀, u₁, α, ρ, internalnorm, t)
@.. broadcast=false calculate_residuals(u₀, u₁, α, ρ, internalnorm, t)
end
"""
calculate_residuals(E₁, E₂, u₀, u₁, α, ρ, δ, scalarnorm, t)
Return element-wise residuals
```math
\\frac{δ E₁ + E₂}{α+\\max{scalarnorm(u₀),scalarnorm(u₁)}*ρ}.
```
"""
@inline @muladd function calculate_residuals(
E₁::Number, E₂::Number, u₀::Number, u₁::Number,
α::Real, ρ::Real, δ::Number, scalarnorm, t)
@fastmath (δ * E₁ + E₂) / (α + max(scalarnorm(u₀, t), scalarnorm(u₁, t)) * ρ)
end
@inline function calculate_residuals(E₁::Array{<:Number}, E₂::Array{<:Number},
u₀::Array{<:Number}, u₁::Array{<:Number}, α::Real,
ρ::Real, δ::Number, scalarnorm, t)
out = similar(u₀)
calculate_residuals!(out, E₁, E₂, u₀, u₁, α, ρ, δ, scalarnorm, t)
out
end
@inline function calculate_residuals(E₁, E₂, u₀, u₁, α, ρ, δ, scalarnorm, t)
@.. broadcast=false calculate_residuals(E₁, E₂, u₀, u₁, α, ρ, δ, scalarnorm, t)
end
# Inplace Versions
"""
DiffEqBase.calculate_residuals!(out, ũ, u₀, u₁, α, ρ, thread=False())
Save element-wise residuals
```math
\\frac{ũ}{α+\\max{|u₀|,|u₁|}*ρ}
```
in `out`.
The argument `thread` determines whether internal broadcasting on
appropriate CPU arrays should be serial (`thread = False()`, default)
or use multiple threads (`thread = True()`) when Julia is started
with multiple threads.
"""
@inline function calculate_residuals!(out, ũ, u₀, u₁, α, ρ, internalnorm, t,
thread::Union{False, True} = False())
@.. broadcast=false thread=thread out=calculate_residuals(
ũ, u₀, u₁, α, ρ, internalnorm,
t)
nothing
end
@inline function calculate_residuals!(
out::Array, ũ::Array, u₀::Array, u₁::Array, α::Number,
ρ::Number, internalnorm::F, t, ::False) where {F}
@inbounds @simd ivdep for i in eachindex(out, ũ, u₀, u₁)
out[i] = calculate_residuals(ũ[i], u₀[i], u₁[i], α, ρ, internalnorm, t)
end
nothing
end
"""
calculate_residuals!(out, u₀, u₁, α, ρ, thread=False())
Save element-wise residuals
```math
\\frac{u₁ - u₀}{α+\\max{|u₀|,|u₁|}*ρ}
```
in `out`.
The argument `thread` determines whether internal broadcasting on
appropriate CPU arrays should be serial (`thread = False()`, default)
or use multiple threads (`thread = True()`) when Julia is started
with multiple threads.
"""
@inline function calculate_residuals!(out, u₀, u₁, α, ρ, internalnorm, t,
thread::Union{False, True} = False())
@.. broadcast=false thread=thread out=calculate_residuals(u₀, u₁, α, ρ, internalnorm, t)
end
"""
calculate_residuals!(out, E₁, E₂, u₀, u₁, α, ρ, δ, scalarnorm, thread=False())
Calculate element-wise residuals
```math
\\frac{δ E₁ + E₂}{α+\\max{scalarnorm(u₀),scalarnorm(u₁)}*ρ}.
```
The argument `thread` determines whether internal broadcasting on
appropriate CPU arrays should be serial (`thread = False()`, default)
or use multiple threads (`thread = True()`) when Julia is started
with multiple threads.
"""
@inline function calculate_residuals!(out, E₁, E₂, u₀, u₁, α, ρ, δ, scalarnorm, t,
thread::Union{False, True} = False())
@.. broadcast=false thread=thread out=calculate_residuals(E₁, E₂, u₀, u₁, α, ρ, δ,
scalarnorm, t)
out
end
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 27500 | """
initialize!(cb::CallbackSet,u,t,integrator::DEIntegrator)
Recursively apply `initialize!` and return whether any modified u
"""
function initialize!(cb::CallbackSet, u, t, integrator::DEIntegrator)
initialize!(u, t, integrator, false, cb.continuous_callbacks...,
cb.discrete_callbacks...)
end
initialize!(cb::CallbackSet{Tuple{}, Tuple{}}, u, t, integrator::DEIntegrator) = false
function initialize!(u, t, integrator::DEIntegrator, any_modified::Bool,
c::DECallback, cs::DECallback...)
c.initialize(c, u, t, integrator)
initialize!(u, t, integrator, any_modified || integrator.u_modified, cs...)
end
function initialize!(u, t, integrator::DEIntegrator, any_modified::Bool,
c::DECallback)
c.initialize(c, u, t, integrator)
any_modified || integrator.u_modified
end
"""
finalize!(cb::CallbackSet,u,t,integrator::DEIntegrator)
Recursively apply `finalize!` and return whether any modified u
"""
function finalize!(cb::CallbackSet, u, t, integrator::DEIntegrator)
finalize!(u, t, integrator, false, cb.continuous_callbacks..., cb.discrete_callbacks...)
end
finalize!(cb::CallbackSet{Tuple{}, Tuple{}}, u, t, integrator::DEIntegrator) = false
function finalize!(u, t, integrator::DEIntegrator, any_modified::Bool,
c::DECallback, cs::DECallback...)
c.finalize(c, u, t, integrator)
finalize!(u, t, integrator, any_modified || integrator.u_modified, cs...)
end
function finalize!(u, t, integrator::DEIntegrator, any_modified::Bool,
c::DECallback)
c.finalize(c, u, t, integrator)
any_modified || integrator.u_modified
end
# Helpers
function Base.isempty(cb::CallbackSet)
isempty(cb.continuous_callbacks) && isempty(cb.discrete_callbacks)
end
Base.isempty(cb::AbstractContinuousCallback) = false
Base.isempty(cb::AbstractDiscreteCallback) = false
has_continuous_callback(cb::DiscreteCallback) = false
has_continuous_callback(cb::ContinuousCallback) = true
has_continuous_callback(cb::VectorContinuousCallback) = true
has_continuous_callback(cb::CallbackSet) = !isempty(cb.continuous_callbacks)
has_continuous_callback(cb::Nothing) = false
#======================================================#
# Callback handling
#======================================================#
function get_tmp(integrator::DEIntegrator, callback)
_tmp = get_tmp_cache(integrator)
_tmp === nothing && return nothing
_cache = first(_tmp)
if callback.idxs === nothing
tmp = _cache
elseif !(callback.idxs isa Number)
tmp = @view _cache[callback.idxs]
else
tmp = nothing
end
return tmp
end
function get_condition(integrator::DEIntegrator, callback, abst)
tmp = get_tmp(integrator, callback)
ismutable = !(tmp === nothing)
if abst == integrator.t
if callback.idxs === nothing
tmp = integrator.u
elseif callback.idxs isa Number
tmp = integrator.u[callback.idxs]
else
tmp = @view integrator.u[callback.idxs]
end
elseif abst == integrator.tprev
if callback.idxs === nothing
tmp = integrator.uprev
elseif callback.idxs isa Number
tmp = integrator.uprev[callback.idxs]
else
tmp = @view integrator.uprev[callback.idxs]
end
else
if ismutable
if callback.idxs === nothing
integrator(tmp, abst, Val{0})
else
integrator(tmp, abst, Val{0}, idxs = callback.idxs)
end
else
if callback.idxs === nothing
tmp = integrator(abst, Val{0})
else
tmp = integrator(abst, Val{0}, idxs = callback.idxs)
end
end
# ismutable && !(callback.idxs isa Number) ? integrator(tmp,abst,Val{0},idxs=callback.idxs) :
# tmp = integrator(abst,Val{0},idxs=callback.idxs)
end
integrator.sol.stats.ncondition += 1
if callback isa VectorContinuousCallback
callback.condition(
@view(integrator.callback_cache.tmp_condition[1:(callback.len)]),
tmp, abst, integrator)
return @view(integrator.callback_cache.tmp_condition[1:(callback.len)])
else
return callback.condition(tmp, abst, integrator)
end
end
# Use a generated function for type stability even when many callbacks are given
@inline function find_first_continuous_callback(integrator,
callbacks::Vararg{
AbstractContinuousCallback,
N}) where {N}
find_first_continuous_callback(integrator, tuple(callbacks...))
end
@generated function find_first_continuous_callback(integrator,
callbacks::NTuple{N,
AbstractContinuousCallback
}) where {N}
ex = quote
tmin, upcrossing, event_occurred, event_idx = find_callback_time(integrator,
callbacks[1], 1)
identified_idx = 1
end
for i in 2:N
ex = quote
$ex
tmin2, upcrossing2, event_occurred2, event_idx2 = find_callback_time(
integrator,
callbacks[$i],
$i)
if event_occurred2 && (tmin2 < tmin || !event_occurred)
tmin = tmin2
upcrossing = upcrossing2
event_occurred = true
event_idx = event_idx2
identified_idx = $i
end
end
end
ex = quote
$ex
return tmin, upcrossing, event_occurred, event_idx, identified_idx, $N
end
ex
end
@inline function determine_event_occurance(integrator, callback::VectorContinuousCallback,
counter)
event_occurred = false
if callback.interp_points != 0
addsteps!(integrator)
end
ts = range(integrator.tprev, stop = integrator.t, length = callback.interp_points)
#=
# Faster but can be inaccurate
if callback.interp_points > 1
dt = (integrator.t - integrator.tprev) / (callback.interp_points-1)
else
dt = integrator.dt
end
ts = integrator.tprev:dt:integrator.t
=#
interp_index = 0
# Check if the event occurred
previous_condition = @views(integrator.callback_cache.previous_condition[1:(callback.len)])
if callback.idxs === nothing
callback.condition(previous_condition, integrator.uprev, integrator.tprev,
integrator)
else
callback.condition(previous_condition, integrator.uprev[callback.idxs],
integrator.tprev, integrator)
end
integrator.sol.stats.ncondition += 1
ivec = integrator.vector_event_last_time
prev_sign = @view(integrator.callback_cache.prev_sign[1:(callback.len)])
next_sign = @view(integrator.callback_cache.next_sign[1:(callback.len)])
if integrator.event_last_time == counter &&
minimum(ODE_DEFAULT_NORM(
ArrayInterface.allowed_getindex(previous_condition,
ivec), integrator.t)) <=
100ODE_DEFAULT_NORM(integrator.last_event_error, integrator.t)
# If there was a previous event, utilize the derivative at the start to
# chose the previous sign. If the derivative is positive at tprev, then
# we treat `prev_sign` as negative, and if the derivative is negative then we
# treat `prev_sign` as positive, regardless of the positivity/negativity
# of the true value due to it being =0 sans floating point issues.
# Only due this if the discontinuity did not move it far away from an event
# Since near even we use direction instead of location to reset
if callback.interp_points == 0
addsteps!(integrator)
end
# Evaluate condition slightly in future
abst = integrator.tprev + integrator.dt * callback.repeat_nudge
tmp_condition = get_condition(integrator, callback, abst)
@. prev_sign = sign(previous_condition)
prev_sign[ivec] = sign(tmp_condition[ivec])
else
@. prev_sign = sign(previous_condition)
end
prev_sign_index = 1
abst = integrator.t
next_condition = get_condition(integrator, callback, abst)
@. next_sign = sign(next_condition)
event_idx = findall_events!(next_sign, callback.affect!, callback.affect_neg!,
prev_sign)
if sum(event_idx) != 0
event_occurred = true
interp_index = callback.interp_points
end
if callback.interp_points != 0 && !isdiscrete(integrator.alg) &&
sum(event_idx) != length(event_idx) # Use the interpolants for safety checking
fallback = true
for i in 2:length(ts)
abst = ts[i]
copyto!(next_sign, get_condition(integrator, callback, abst))
_event_idx = findall_events!(next_sign, callback.affect!, callback.affect_neg!,
prev_sign)
if sum(_event_idx) != 0
event_occurred = true
event_idx = _event_idx
interp_index = i
fallback = false
break
else
prev_sign_index = i
end
end
if fallback
# If you get here, then you need to reset the event_idx to the
# non-interpolated version
abst = integrator.t
next_condition = get_condition(integrator, callback, abst)
@. next_sign = sign(next_condition)
event_idx = findall_events!(next_sign, callback.affect!, callback.affect_neg!,
prev_sign)
interp_index = callback.interp_points
end
end
event_occurred, interp_index, ts, prev_sign, prev_sign_index, event_idx
end
@inline function determine_event_occurance(integrator, callback::ContinuousCallback,
counter)
event_occurred = false
if callback.interp_points != 0
addsteps!(integrator)
end
ts = range(integrator.tprev, stop = integrator.t, length = callback.interp_points)
#=
# Faster but can be inaccurate
if callback.interp_points > 1
dt = (integrator.t - integrator.tprev) / (callback.interp_points-1)
else
dt = integrator.dt
end
ts = integrator.tprev:dt:integrator.t
=#
interp_index = 0
# Check if the event occurred
if callback.idxs === nothing
previous_condition = callback.condition(integrator.uprev, integrator.tprev,
integrator)
else
@views previous_condition = callback.condition(integrator.uprev[callback.idxs],
integrator.tprev, integrator)
end
integrator.sol.stats.ncondition += 1
prev_sign = 0.0
next_sign = 0.0
if integrator.event_last_time == counter &&
minimum(ODE_DEFAULT_NORM(previous_condition, integrator.t)) <=
100ODE_DEFAULT_NORM(integrator.last_event_error, integrator.t)
# If there was a previous event, utilize the derivative at the start to
# chose the previous sign. If the derivative is positive at tprev, then
# we treat `prev_sign` as negative, and if the derivative is negative then we
# treat `prev_sign` as positive, regardless of the positivity/negativity
# of the true value due to it being =0 sans floating point issues.
# Only due this if the discontinuity did not move it far away from an event
# Since near even we use direction instead of location to reset
if callback.interp_points == 0
addsteps!(integrator)
end
# Evaluate condition slightly in future
abst = integrator.tprev + integrator.dt * callback.repeat_nudge
tmp_condition = get_condition(integrator, callback, abst)
prev_sign = sign(tmp_condition)
else
prev_sign = sign(previous_condition)
end
prev_sign_index = 1
abst = integrator.t
next_condition = get_condition(integrator, callback, abst)
next_sign = sign(next_condition)
if ((prev_sign < 0 && callback.affect! !== nothing) ||
(prev_sign > 0 && callback.affect_neg! !== nothing)) && prev_sign * next_sign <= 0
event_occurred = true
interp_index = callback.interp_points
elseif callback.interp_points != 0 && !isdiscrete(integrator.alg) # Use the interpolants for safety checking
for i in 2:length(ts)
abst = ts[i]
new_sign = get_condition(integrator, callback, abst)
if ((prev_sign < 0 && callback.affect! !== nothing) ||
(prev_sign > 0 && callback.affect_neg! !== nothing)) &&
prev_sign * new_sign < 0
event_occurred = true
interp_index = i
break
else
prev_sign_index = i
end
end
end
event_idx = 1
event_occurred, interp_index, ts, prev_sign, prev_sign_index, event_idx
end
# rough implementation, needs multiple type handling
# always ensures that if r = bisection(f, (x0, x1))
# then either f(nextfloat(r)) == 0 or f(nextfloat(r)) * f(r) < 0
# note: not really using bisection - uses the ITP method
function bisection(
f, tup, t_forward::Bool, rootfind::SciMLBase.RootfindOpt, abstol, reltol;
maxiters = 1000)
if rootfind == SciMLBase.LeftRootFind
solve(IntervalNonlinearProblem{false}(f, tup),
InternalITP(), abstol = abstol,
reltol = reltol).left
else
solve(IntervalNonlinearProblem{false}(f, tup),
InternalITP(), abstol = abstol,
reltol = reltol).right
end
end
"""
findall_events!(next_sign,affect!,affect_neg!,prev_sign)
Modifies `next_sign` to be an array of booleans for if there is a sign change
in the interval between prev_sign and next_sign
"""
function findall_events!(next_sign::Union{Array, SubArray}, affect!::F1, affect_neg!::F2,
prev_sign::Union{Array, SubArray}) where {F1, F2}
@inbounds for i in 1:length(prev_sign)
next_sign[i] = ((prev_sign[i] < 0 && affect! !== nothing) ||
(prev_sign[i] > 0 && affect_neg! !== nothing)) &&
prev_sign[i] * next_sign[i] <= 0
end
next_sign
end
function findall_events!(next_sign, affect!::F1, affect_neg!::F2, prev_sign) where {F1, F2}
hasaffect::Bool = affect! !== nothing
hasaffectneg::Bool = affect_neg! !== nothing
f = (n, p) -> ((p < 0 && hasaffect) || (p > 0 && hasaffectneg)) && p * n <= 0
A = map!(f, next_sign, next_sign, prev_sign)
next_sign
end
function find_callback_time(integrator, callback::ContinuousCallback, counter)
event_occurred, interp_index, ts, prev_sign, prev_sign_index, event_idx = determine_event_occurance(
integrator,
callback,
counter)
if event_occurred
if callback.condition === nothing
new_t = zero(typeof(integrator.t))
else
if callback.interp_points != 0
top_t = ts[interp_index] # Top at the smallest
bottom_t = ts[prev_sign_index]
else
top_t = integrator.t
bottom_t = integrator.tprev
end
if callback.rootfind != SciMLBase.NoRootFind && !isdiscrete(integrator.alg)
zero_func(abst, p = nothing) = get_condition(integrator, callback, abst)
if zero_func(top_t) == 0
Θ = top_t
else
if integrator.event_last_time == counter &&
abs(zero_func(bottom_t)) <= 100abs(integrator.last_event_error) &&
prev_sign_index == 1
# Determined that there is an event by derivative
# But floating point error may make the end point negative
bottom_t += integrator.dt * callback.repeat_nudge
sign_top = sign(zero_func(top_t))
sign(zero_func(bottom_t)) * sign_top >= zero(sign_top) &&
error("Double callback crossing floating pointer reducer errored. Report this issue.")
end
Θ = bisection(zero_func, (bottom_t, top_t), isone(integrator.tdir),
callback.rootfind, callback.abstol, callback.reltol)
integrator.last_event_error = ODE_DEFAULT_NORM(zero_func(Θ), Θ)
end
#Θ = prevfloat(...)
# prevfloat guerentees that the new time is either 1 floating point
# numbers just before the event or directly at zero, but not after.
# If there's a barrier which is never supposed to be crossed,
# then this will ensure that
# The item never leaves the domain. Otherwise Roots.jl can return
# a float which is slightly after, making it out of the domain, causing
# havoc.
new_t = Θ - integrator.tprev
elseif interp_index != callback.interp_points && !isdiscrete(integrator.alg)
new_t = ts[interp_index] - integrator.tprev
else
# If no solve and no interpolants, just use endpoint
new_t = integrator.dt
end
end
else
new_t = zero(typeof(integrator.t))
end
new_t, prev_sign, event_occurred, event_idx
end
function find_callback_time(integrator, callback::VectorContinuousCallback, counter)
event_occurred, interp_index, ts, prev_sign, prev_sign_index, event_idx = determine_event_occurance(
integrator,
callback,
counter)
if event_occurred
if callback.condition === nothing
new_t = zero(typeof(integrator.t))
min_event_idx = findfirst(isequal(1), event_idx)
else
if callback.interp_points != 0
top_t = ts[interp_index] # Top at the smallest
bottom_t = ts[prev_sign_index]
else
top_t = integrator.t
bottom_t = integrator.tprev
end
if callback.rootfind != SciMLBase.NoRootFind && !isdiscrete(integrator.alg)
min_t = nextfloat(top_t)
min_event_idx = -1
for idx in 1:length(event_idx)
if ArrayInterface.allowed_getindex(event_idx, idx) != 0
function zero_func(abst, p = nothing)
ArrayInterface.allowed_getindex(
get_condition(integrator,
callback,
abst), idx)
end
if zero_func(top_t) == 0
Θ = top_t
else
if integrator.event_last_time == counter &&
integrator.vector_event_last_time == idx &&
abs(zero_func(bottom_t)) <=
100abs(integrator.last_event_error) &&
prev_sign_index == 1
# Determined that there is an event by derivative
# But floating point error may make the end point negative
bottom_t += integrator.dt * callback.repeat_nudge
sign_top = sign(zero_func(top_t))
sign(zero_func(bottom_t)) * sign_top >= zero(sign_top) &&
error("Double callback crossing floating pointer reducer errored. Report this issue.")
end
Θ = bisection(zero_func, (bottom_t, top_t),
isone(integrator.tdir), callback.rootfind,
callback.abstol, callback.reltol)
if integrator.tdir * Θ < integrator.tdir * min_t
integrator.last_event_error = ODE_DEFAULT_NORM(
zero_func(Θ),
Θ)
end
end
if integrator.tdir * Θ < integrator.tdir * min_t
min_event_idx = idx
min_t = Θ
end
end
end
#Θ = prevfloat(...)
# prevfloat guerentees that the new time is either 1 floating point
# numbers just before the event or directly at zero, but not after.
# If there's a barrier which is never supposed to be crossed,
# then this will ensure that
# The item never leaves the domain. Otherwise Roots.jl can return
# a float which is slightly after, making it out of the domain, causing
# havoc.
new_t = min_t - integrator.tprev
elseif interp_index != callback.interp_points && !isdiscrete(integrator.alg)
new_t = ts[interp_index] - integrator.tprev
min_event_idx = findfirst(isequal(1), event_idx)
else
# If no solve and no interpolants, just use endpoint
new_t = integrator.dt
min_event_idx = findfirst(isequal(1), event_idx)
end
end
else
new_t = zero(typeof(integrator.t))
min_event_idx = 1
end
if event_occurred && min_event_idx < 0
error("Callback handling failed. Please file an issue with code to reproduce.")
end
new_t, ArrayInterface.allowed_getindex(prev_sign, min_event_idx),
event_occurred::Bool, min_event_idx::Int
end
function apply_callback!(integrator,
callback::Union{ContinuousCallback, VectorContinuousCallback},
cb_time, prev_sign, event_idx)
if isadaptive(integrator)
set_proposed_dt!(integrator,
integrator.tdir * max(nextfloat(integrator.opts.dtmin),
integrator.tdir * callback.dtrelax * integrator.dt))
end
change_t_via_interpolation!(integrator, integrator.tprev + cb_time)
# handle saveat
_, savedexactly = savevalues!(integrator)
saved_in_cb = true
@inbounds if callback.save_positions[1]
# if already saved then skip saving
savedexactly || savevalues!(integrator, true)
end
integrator.u_modified = true
if prev_sign < 0
if callback.affect! === nothing
integrator.u_modified = false
else
callback isa VectorContinuousCallback ?
callback.affect!(integrator, event_idx) : callback.affect!(integrator)
end
elseif prev_sign > 0
if callback.affect_neg! === nothing
integrator.u_modified = false
else
callback isa VectorContinuousCallback ?
callback.affect_neg!(integrator, event_idx) : callback.affect_neg!(integrator)
end
end
if integrator.u_modified
if hasmethod(reeval_internals_due_to_modification!,
Tuple{typeof(integrator)}, (:callback_initializealg,))
reeval_internals_due_to_modification!(
integrator, callback_initializealg = callback.initializealg)
else # handle legacy dispatch without kwarg
reeval_internals_due_to_modification!(integrator)
end
@inbounds if callback.save_positions[2]
savevalues!(integrator, true)
saved_in_cb = true
end
return true, saved_in_cb
end
false, saved_in_cb
end
#Base Case: Just one
@inline function apply_discrete_callback!(integrator, callback::DiscreteCallback)
saved_in_cb = false
if callback.condition(integrator.u, integrator.t, integrator)
# handle saveat
_, savedexactly = savevalues!(integrator)
saved_in_cb = true
@inbounds if callback.save_positions[1]
# if already saved then skip saving
savedexactly || savevalues!(integrator, true)
end
integrator.u_modified = true
callback.affect!(integrator)
if integrator.u_modified
if hasmethod(reeval_internals_due_to_modification!,
Tuple{typeof(integrator), Bool}, (:callback_initializealg,))
reeval_internals_due_to_modification!(
integrator, false, callback_initializealg = callback.initializealg)
else # handle legacy dispatch without kwarg
reeval_internals_due_to_modification!(integrator, false)
end
end
@inbounds if callback.save_positions[2]
savevalues!(integrator, true)
saved_in_cb = true
end
end
integrator.sol.stats.ncondition += 1
integrator.u_modified, saved_in_cb
end
#Starting: Get bool from first and do next
@inline function apply_discrete_callback!(integrator, callback::DiscreteCallback, args...)
apply_discrete_callback!(integrator, apply_discrete_callback!(integrator, callback)...,
args...)
end
@inline function apply_discrete_callback!(integrator, discrete_modified::Bool,
saved_in_cb::Bool, callback::DiscreteCallback,
args...)
bool, saved_in_cb2 = apply_discrete_callback!(integrator,
apply_discrete_callback!(integrator,
callback)...,
args...)
discrete_modified || bool, saved_in_cb || saved_in_cb2
end
@inline function apply_discrete_callback!(integrator, discrete_modified::Bool,
saved_in_cb::Bool, callback::DiscreteCallback)
bool, saved_in_cb2 = apply_discrete_callback!(integrator, callback)
discrete_modified || bool, saved_in_cb || saved_in_cb2
end
function max_vector_callback_length_int(cs::CallbackSet)
max_vector_callback_length_int(cs.continuous_callbacks...)
end
max_vector_callback_length_int() = nothing
function max_vector_callback_length_int(continuous_callbacks...)
all(cb -> cb isa ContinuousCallback, continuous_callbacks) && return nothing
maxlen = -1
for cb in continuous_callbacks
if cb isa VectorContinuousCallback && cb.len > maxlen
maxlen = cb.len
end
end
maxlen
end
function max_vector_callback_length(cs::CallbackSet)
continuous_callbacks = cs.continuous_callbacks
maxlen_cb = nothing
maxlen = -1
for cb in continuous_callbacks
if cb isa VectorContinuousCallback && cb.len > maxlen
maxlen = cb.len
maxlen_cb = cb
end
end
maxlen_cb
end
"""
$(TYPEDEF)
"""
mutable struct CallbackCache{conditionType, signType}
tmp_condition::conditionType
previous_condition::conditionType
next_sign::signType
prev_sign::signType
end
function CallbackCache(u, max_len, ::Type{conditionType},
::Type{signType}) where {conditionType, signType}
tmp_condition = similar(u, conditionType, max_len)
previous_condition = similar(u, conditionType, max_len)
next_sign = similar(u, signType, max_len)
prev_sign = similar(u, signType, max_len)
CallbackCache(tmp_condition, previous_condition, next_sign, prev_sign)
end
function CallbackCache(max_len, ::Type{conditionType},
::Type{signType}) where {conditionType, signType}
tmp_condition = zeros(conditionType, max_len)
previous_condition = zeros(conditionType, max_len)
next_sign = zeros(signType, max_len)
prev_sign = zeros(signType, max_len)
CallbackCache(tmp_condition, previous_condition, next_sign, prev_sign)
end
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 5759 | function abs2_and_sum(x, y)
reduce(+, x, init = zero(real(value(eltype(x))))) +
reduce(+, y, init = zero(real(value(eltype(y)))))
end
UNITLESS_ABS2(x::Number) = abs2(x)
function UNITLESS_ABS2(x::AbstractArray)
mapreduce(UNITLESS_ABS2, abs2_and_sum, x, init = zero(real(value(eltype(x)))))
end
function UNITLESS_ABS2(x::RecursiveArrayTools.AbstractVectorOfArray)
mapreduce(UNITLESS_ABS2, abs2_and_sum, x.u, init = zero(real(value(eltype(x)))))
end
function UNITLESS_ABS2(x::RecursiveArrayTools.ArrayPartition)
mapreduce(UNITLESS_ABS2, abs2_and_sum, x.x, init = zero(real(value(eltype(x)))))
end
UNITLESS_ABS2(f::F, x::Number) where {F} = abs2(f(x))
function UNITLESS_ABS2(f::F, x::AbstractArray) where {F}
return mapreduce(UNITLESS_ABS2 ∘ f, abs2_and_sum, x;
init = zero(real(value(eltype(x)))))
end
function UNITLESS_ABS2(f::F, x::RecursiveArrayTools.ArrayPartition) where {F}
return mapreduce(UNITLESS_ABS2 ∘ f, abs2_and_sum, x.x;
init = zero(real(value(eltype(x)))))
end
recursive_length(u::AbstractArray{<:Number}) = length(u)
recursive_length(u::Number) = length(u)
recursive_length(u::AbstractArray{<:AbstractArray}) = sum(recursive_length, u)
recursive_length(u::RecursiveArrayTools.ArrayPartition) = sum(recursive_length, u.x)
recursive_length(u::RecursiveArrayTools.VectorOfArray) = sum(recursive_length, u.u)
function recursive_length(u::AbstractArray{
<:StaticArraysCore.StaticArray{S, <:Number}}) where {S}
prod(Size(eltype(u))) * length(u)
end
ODE_DEFAULT_NORM(u::Union{AbstractFloat, Complex}, t) = @fastmath abs(u)
function ODE_DEFAULT_NORM(f::F, u::Union{AbstractFloat, Complex}, t) where {F}
return @fastmath abs(f(u))
end
function ODE_DEFAULT_NORM(u::Array{T}, t) where {T <: Union{AbstractFloat, Complex}}
x = zero(T)
@inbounds @fastmath for ui in u
x += abs2(ui)
end
Base.FastMath.sqrt_fast(real(x) / max(length(u), 1))
end
function ODE_DEFAULT_NORM(f::F,
u::Union{Array{T}, Iterators.Zip{<:Tuple{Vararg{Array{T}}}}},
t) where {F, T <: Union{AbstractFloat, Complex}}
x = zero(T)
@inbounds @fastmath for ui in u
x += abs2(f(ui))
end
Base.FastMath.sqrt_fast(real(x) / max(length(u), 1))
end
function ODE_DEFAULT_NORM(u::StaticArraysCore.StaticArray{<:Tuple, T},
t) where {T <: Union{AbstractFloat, Complex}}
Base.FastMath.sqrt_fast(real(sum(abs2, u)) / max(length(u), 1))
end
function ODE_DEFAULT_NORM(f::F, u::StaticArraysCore.StaticArray{<:Tuple, T},
t) where {F, T <: Union{AbstractFloat, Complex}}
Base.FastMath.sqrt_fast(real(sum(abs2 ∘ f, u)) / max(length(u), 1))
end
function ODE_DEFAULT_NORM(
u::Union{
AbstractArray,
RecursiveArrayTools.AbstractVectorOfArray
},
t)
Base.FastMath.sqrt_fast(UNITLESS_ABS2(u) / max(recursive_length(u), 1))
end
function ODE_DEFAULT_NORM(f::F, u::AbstractArray, t) where {F}
Base.FastMath.sqrt_fast(UNITLESS_ABS2(f, u) / max(recursive_length(u), 1))
end
ODE_DEFAULT_NORM(u, t) = norm(u)
ODE_DEFAULT_NORM(f::F, u, t) where {F} = norm(f.(u))
ODE_DEFAULT_ISOUTOFDOMAIN(u, p, t) = false
function ODE_DEFAULT_PROG_MESSAGE(dt, u::Array, p, t)
tmp = u[1]
for i in eachindex(u)
tmp = ifelse(abs(u[i]) > abs(tmp), u[i], tmp)
end
"dt=" * string(dt) * "\nt=" * string(t) * "\nmax u=" * string(tmp)
end
function ODE_DEFAULT_PROG_MESSAGE(dt, u, p, t)
"dt=" * string(dt) * "\nt=" * string(t) * "\nmax u=" * string(maximum(abs.(u)))
end
NAN_CHECK(x::Number) = isnan(x)
NAN_CHECK(x::Enum) = false
function NAN_CHECK(x::Union{AbstractArray, RecursiveArrayTools.AbstractVectorOfArray})
any(
NAN_CHECK, x)
end
NAN_CHECK(x::RecursiveArrayTools.ArrayPartition) = any(NAN_CHECK, x.x)
INFINITE_OR_GIANT(x::Number) = !isfinite(x)
function INFINITE_OR_GIANT(x::Union{
AbstractArray, RecursiveArrayTools.AbstractVectorOfArray})
any(
INFINITE_OR_GIANT, x)
end
INFINITE_OR_GIANT(x::RecursiveArrayTools.ArrayPartition) = any(INFINITE_OR_GIANT, x.x)
ODE_DEFAULT_UNSTABLE_CHECK(dt, u, p, t) = false
function ODE_DEFAULT_UNSTABLE_CHECK(dt, u::Union{Number, AbstractArray{<:Number}}, p, t)
INFINITE_OR_GIANT(u)
end
# Nonlinear Solve Norm (norm(_, 2))
NONLINEARSOLVE_DEFAULT_NORM(u::Union{AbstractFloat, Complex}) = @fastmath abs(u)
function NONLINEARSOLVE_DEFAULT_NORM(f::F,
u::Union{AbstractFloat, Complex}) where {F}
return @fastmath abs(f(u))
end
function NONLINEARSOLVE_DEFAULT_NORM(u::Array{
T}) where {T <: Union{AbstractFloat, Complex}}
x = zero(T)
@inbounds @fastmath for ui in u
x += abs2(ui)
end
return Base.FastMath.sqrt_fast(real(x))
end
function NONLINEARSOLVE_DEFAULT_NORM(f::F,
u::Union{Array{T}, Iterators.Zip{<:Tuple{Vararg{Array{T}}}}}) where {
F, T <: Union{AbstractFloat, Complex}}
x = zero(T)
@inbounds @fastmath for ui in u
x += abs2(f(ui))
end
return Base.FastMath.sqrt_fast(real(x))
end
function NONLINEARSOLVE_DEFAULT_NORM(u::StaticArraysCore.StaticArray{
<:Tuple, T}) where {T <: Union{AbstractFloat, Complex}}
return Base.FastMath.sqrt_fast(real(sum(abs2, u)))
end
function NONLINEARSOLVE_DEFAULT_NORM(f::F,
u::StaticArraysCore.StaticArray{<:Tuple, T}) where {
F, T <: Union{AbstractFloat, Complex}}
return Base.FastMath.sqrt_fast(real(sum(abs2 ∘ f, u)))
end
function NONLINEARSOLVE_DEFAULT_NORM(u::AbstractArray)
return Base.FastMath.sqrt_fast(UNITLESS_ABS2(u))
end
function NONLINEARSOLVE_DEFAULT_NORM(f::F, u::AbstractArray) where {F}
return Base.FastMath.sqrt_fast(UNITLESS_ABS2(f, u))
end
NONLINEARSOLVE_DEFAULT_NORM(u) = norm(u)
NONLINEARSOLVE_DEFAULT_NORM(f::F, u) where {F} = norm(f.(u))
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 2359 | # From David Goldberg's blog post
# https://tech.ebayinc.com/engineering/fast-approximate-logarithms-part-iii-the-formulas/
@inline function fastlog2(x::Float32)::Float32
# (x-1)*(a*(x-1) + b)/((x-1) + c) (line 8 of table 2)
a = 0.338953f0
b = 2.198599f0
c = 1.523692f0
#
# Assume IEEE representation, which is sgn(1):exp(8):frac(23)
# representing (1+frac)*2^(exp-127) Call 1+frac the significand
#
# get exponent
ux1i = reinterpret(UInt32, x)
exp = (ux1i & 0x7F800000) >> 23
# actual exponent is exp-127, will subtract 127 later
greater = ux1i & 0x00400000 # true if signif > 1.5
if greater !== 0x00000000
ux2i = (ux1i & 0x007FFFFF) | 0x3f000000
signif = reinterpret(Float32, ux2i)
fexp = exp - 126.0f0 # 126 instead of 127 compensates for division by 2
signif = signif - 1.0f0
else
ux2i = (ux1i & 0x007FFFFF) | 0x3f800000
signif = reinterpret(Float32, ux2i)
fexp = exp - 127.0f0
signif = signif - 1.0f0
end
lg2 = fexp + signif * (a * signif + b) / (signif + c)
return lg2
end
# Translated from OpenLibm but less accurate because I forced the tableau to be
# Float32, whereas OpenLibm uses Float64
#
# https://github.com/JuliaMath/openlibm/blob/cca41bc1abd01804afa4862bbd2c79cc9803171a/src/s_exp2f.c
const EXP2FT = (Float32(0x1.6a09e667f3bcdp-1),
Float32(0x1.7a11473eb0187p-1),
Float32(0x1.8ace5422aa0dbp-1),
Float32(0x1.9c49182a3f090p-1),
Float32(0x1.ae89f995ad3adp-1),
Float32(0x1.c199bdd85529cp-1),
Float32(0x1.d5818dcfba487p-1),
Float32(0x1.ea4afa2a490dap-1),
Float32(0x1.0000000000000p+0),
Float32(0x1.0b5586cf9890fp+0),
Float32(0x1.172b83c7d517bp+0),
Float32(0x1.2387a6e756238p+0),
Float32(0x1.306fe0a31b715p+0),
Float32(0x1.3dea64c123422p+0),
Float32(0x1.4bfdad5362a27p+0),
Float32(0x1.5ab07dd485429p+0))
"""
fastpow(x::T, y::T) where {T} -> float(T)
Trips through Float32 for performance.
"""
@inline function fastpow(x::T, y::T) where {T<:Real}
outT = float(T)
if iszero(x)
return zero(outT)
elseif isinf(x) && isinf(y)
return convert(outT, Inf)
else
return convert(
outT, @fastmath exp2(convert(Float32, y) * fastlog2(convert(Float32, x))))
end
end
@inline fastpow(x, y) = x^y
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 18072 | const DUALCHECK_RECURSION_MAX = 10
"""
promote_dual(::Type{T},::Type{T2})
Is like the number promotion system, but always prefers a dual number type above
anything else. For higher order differentiation, it returns the most dualiest of
them all. This is then used to promote `u0` into the suspected highest differentiation
space for solving the equation.
"""
promote_dual(::Type{T}, ::Type{T2}) where {T, T2} = T
promote_dual(::Type{T}, ::Type{T2}) where {T <: ForwardDiff.Dual, T2} = T
function promote_dual(::Type{T},
::Type{T2}) where {T <: ForwardDiff.Dual, T2 <: ForwardDiff.Dual}
T
end
promote_dual(::Type{T}, ::Type{T2}) where {T, T2 <: ForwardDiff.Dual} = T2
function promote_dual(::Type{T},
::Type{T2}) where {T3, T4, V, V2 <: ForwardDiff.Dual, N, N2,
T <: ForwardDiff.Dual{T3, V, N},
T2 <: ForwardDiff.Dual{T4, V2, N2}}
T2
end
function promote_dual(::Type{T},
::Type{T2}) where {T3, T4, V <: ForwardDiff.Dual, V2, N, N2,
T <: ForwardDiff.Dual{T3, V, N},
T2 <: ForwardDiff.Dual{T4, V2, N2}}
T
end
function promote_dual(::Type{T},
::Type{T2}) where {
T3, V <: ForwardDiff.Dual, V2 <: ForwardDiff.Dual,
N,
T <: ForwardDiff.Dual{T3, V, N},
T2 <: ForwardDiff.Dual{T3, V2, N}}
ForwardDiff.Dual{T3, promote_dual(V, V2), N}
end
# `reduce` and `map` are specialized on tuples to be unrolled (via recursion)
# Therefore, they can be type stable even with heterogeneous input types.
# We also don't care about allocating any temporaries with them, as it should
# all be unrolled and optimized away.
# Being unrolled also means const prop can work for things like
# `mapreduce(f, op, propertynames(x))`
# where `f` may call `getproperty` and thus have return type dependent
# on the particular symbol.
# `mapreduce` hasn't received any such specialization.
@inline diffeqmapreduce(f::F, op::OP, x::Tuple) where {F, OP} = reduce_tup(op, map(f, x))
@inline function diffeqmapreduce(f::F, op::OP, x::NamedTuple) where {F, OP}
reduce_tup(op, map(f, x))
end
# For other container types, we probably just want to call `mapreduce`
@inline diffeqmapreduce(f::F, op::OP, x) where {F, OP} = mapreduce(f, op, x, init = Any)
struct DualEltypeChecker{T, T2}
x::T
counter::T2
end
getval(::Val{I}) where {I} = I
getval(::Type{Val{I}}) where {I} = I
getval(I::Int) = I
function (dec::DualEltypeChecker)(::Val{Y}) where {Y}
isdefined(dec.x, Y) || return Any
getval(dec.counter) >= DUALCHECK_RECURSION_MAX && return Any
anyeltypedual(getfield(dec.x, Y), Val{getval(dec.counter)})
end
# Untyped dispatch: catch composite types, check all of their fields
"""
anyeltypedual(x)
Searches through a type to see if any of its values are parameters. This is used to
then promote other values to match the dual type. For example, if a user passes a parameter
which is a `Dual` and a `u0` which is a `Float64`, after the first time step, `f(u,p,t) = p*u`
will change `u0` from `Float64` to `Dual`. Thus the state variable always needs to be converted
to a dual number before the solve. Worse still, this needs to be done in the case of
`f(du,u,p,t) = du[1] = p*u[1]`, and thus running `f` and taking the return value is not a valid
way to calculate the required state type.
But given the properties of automatic differentiation requiring that differentiation of parameters
implies differentiation of state, we assume any dual parameters implies differentiation of state
and then attempt to upconvert `u0` to match that dual-ness. Because this changes types, this needs
to be specified at compiled time and thus cannot have a Bool-based opt out, so in the future this
may be extended to use a preference system to opt-out with a `UPCONVERT_DUALS`. In the case where
upconversion is not done automatically, the user is required to upconvert all initial conditions
themselves, for an example of how this can be confusing to a user see
https://discourse.julialang.org/t/typeerror-in-julia-turing-when-sampling-for-a-forced-differential-equation/82937
"""
@generated function anyeltypedual(x, ::Type{Val{counter}} = Val{0}) where {counter}
x = x.name === Core.Compiler.typename(Type) ? x.parameters[1] : x
if x <: ForwardDiff.Dual
:($x)
elseif fieldnames(x) === ()
:(Any)
elseif counter < DUALCHECK_RECURSION_MAX
T = diffeqmapreduce(x -> anyeltypedual(x, Val{counter + 1}), promote_dual,
x.parameters)
if T === Any || isconcretetype(T)
:($T)
else
:(diffeqmapreduce(DualEltypeChecker($x, $counter + 1), promote_dual,
map(Val, fieldnames($(typeof(x))))))
end
else
:(Any)
end
end
const FORWARDDIFF_AUTODETECTION_FAILURE_MESSAGE = """
Failed to automatically detect ForwardDiff compatability of
the parameter object. In order for ForwardDiff.jl automatic
differentiation to work on a solution object, the state of
the differential equation or nonlinear solve (`u0`) needs to
be converted to a Dual type which matches the values being
differentiated. For example, for a loss function loss(p)
where `p`` is a `Vector{Float64}`, this conversion is
equivalent to:
```julia
# Convert u0 to match the new Dual element type of `p`
_prob = remake(prob, u0 = eltype(p).(prob.u0))
```
In most cases, SciML tools are able to do this conversion
automatically. However, it seems you have provided a
parameter type for which this automatic conversion has failed.
To fix this, you can do the conversion yourself. For example,
if you have a parameter vector being optimized `p` which is
then put into an odd struct, you can manually convert `u0`
to match `p`:
```julia
function loss(p)
_prob = remake(prob, u0 = eltype(p).(prob.u0), p = MyStruct(p))
sol = solve(_prob, ...)
# do stuff on sol
end
```
Or you can define a dispatch on `DiffEqBase.anyeltypedual`
which tells the system what fields to interpret as the
differentiable parts. For example, to support ODESolutions
as parameters we tell it the data is `sol.u` and `sol.t` via:
```julia
function DiffEqBase.anyeltypedual(sol::ODESolution, counter = 0)
DiffEqBase.anyeltypedual((sol.u, sol.t))
end
```
To opt a type out of the dual checking, define an overload
that returns Any. For example:
```julia
function DiffEqBase.anyeltypedual(::YourType, ::Type{Val{counter}}) where {counter}
Any
end
```
If you have defined this on a common type which should
be more generally supported, please open a pull request
adding this dispatch. If you need help defining this dispatch,
feel free to open an issue.
"""
struct ForwardDiffAutomaticDetectionFailure <: Exception end
function Base.showerror(io::IO, e::ForwardDiffAutomaticDetectionFailure)
print(io, FORWARDDIFF_AUTODETECTION_FAILURE_MESSAGE)
end
function anyeltypedual(::Type{Union{}})
throw(ForwardDiffAutomaticDetectionFailure())
end
# Opt out since these are using for preallocation, not differentiation
function anyeltypedual(x::Union{ForwardDiff.AbstractConfig, Module},
::Type{Val{counter}} = Val{0}) where {counter}
Any
end
function anyeltypedual(x::Type{T},
::Type{Val{counter}} = Val{0}) where {counter} where {T <:
ForwardDiff.AbstractConfig}
Any
end
function anyeltypedual(::Type{<:AbstractTimeseriesSolution{T, N}},
::Type{Val{counter}} = Val{0}) where {T, N, counter}
anyeltypedual(T)
end
function anyeltypedual(x::ForwardDiff.DiffResults.DiffResult,
::Type{Val{counter}} = Val{0}) where {counter}
Any
end
function anyeltypedual(x::Type{T},
::Type{Val{counter}} = Val{0}) where {counter} where {T <:
ForwardDiff.DiffResults.DiffResult}
Any
end
function anyeltypedual(x::SciMLBase.RecipesBase.AbstractPlot,
::Type{Val{counter}} = Val{0}) where {counter}
Any
end
function anyeltypedual(x::Returns, ::Type{Val{counter}} = Val{0}) where {counter}
anyeltypedual(x.value, Val{counter})
end
if isdefined(PreallocationTools, :FixedSizeDiffCache)
function anyeltypedual(x::PreallocationTools.FixedSizeDiffCache,
::Type{Val{counter}} = Val{0}) where {counter}
Any
end
end
Base.@assume_effects :foldable function __anyeltypedual(::Type{T}) where {T}
if T isa Union
promote_dual(anyeltypedual(T.a), anyeltypedual(T.b))
elseif hasproperty(T, :parameters)
mapreduce(anyeltypedual, promote_dual, T.parameters; init = Any)
else
T
end
end
function anyeltypedual(::Type{T}, ::Type{Val{counter}} = Val{0}) where {counter} where {T}
__anyeltypedual(T)
end
function anyeltypedual(::Type{T},
::Type{Val{counter}} = Val{0}) where {counter} where {T <: ForwardDiff.Dual}
T
end
function anyeltypedual(::Type{T},
::Type{Val{counter}} = Val{0}) where {counter} where {T <:
Union{AbstractArray, Set}}
anyeltypedual(eltype(T))
end
Base.@pure function __anyeltypedual_ntuple(::Type{T}) where {T <: NTuple}
if isconcretetype(eltype(T))
return eltype(T)
end
if isempty(T.parameters)
Any
else
mapreduce(anyeltypedual, promote_dual, T.parameters; init = Any)
end
end
function anyeltypedual(
::Type{T}, ::Type{Val{counter}} = Val{0}) where {counter} where {T <: NTuple}
__anyeltypedual_ntuple(T)
end
# Any in this context just means not Dual
function anyeltypedual(
x::SciMLBase.NullParameters, ::Type{Val{counter}} = Val{0}) where {counter}
Any
end
function anyeltypedual(sol::RecursiveArrayTools.AbstractDiffEqArray, counter = 0)
diffeqmapreduce(anyeltypedual, promote_dual, (sol.u, sol.t))
end
function anyeltypedual(prob::Union{ODEProblem, SDEProblem, RODEProblem, DDEProblem},
::Type{Val{counter}} = Val{0}) where {counter}
anyeltypedual((prob.u0, prob.p, prob.tspan))
end
function anyeltypedual(
prob::Union{NonlinearProblem, NonlinearLeastSquaresProblem, OptimizationProblem},
::Type{Val{counter}} = Val{0}) where {counter}
anyeltypedual((prob.u0, prob.p))
end
function anyeltypedual(x::Number, ::Type{Val{counter}} = Val{0}) where {counter}
anyeltypedual(typeof(x))
end
function anyeltypedual(
x::Union{String, Symbol}, ::Type{Val{counter}} = Val{0}) where {counter}
typeof(x)
end
function anyeltypedual(x::Union{AbstractArray{T}, Set{T}},
::Type{Val{counter}} = Val{0}) where {counter} where {
T <:
Union{Number,
Symbol,
String}}
anyeltypedual(T)
end
function anyeltypedual(x::Union{AbstractArray{T}, Set{T}},
::Type{Val{counter}} = Val{0}) where {counter} where {
T <: Union{
AbstractArray{
<:Number,
},
Set{
<:Number,
}}}
anyeltypedual(eltype(x))
end
function anyeltypedual(x::Union{AbstractArray{T}, Set{T}},
::Type{Val{counter}} = Val{0}) where {counter} where {N, T <: NTuple{N, <:Number}}
anyeltypedual(eltype(x))
end
# Try to avoid this dispatch because it can lead to type inference issues when !isconcrete(eltype(x))
function anyeltypedual(x::AbstractArray, ::Type{Val{counter}} = Val{0}) where {counter}
if isconcretetype(eltype(x))
anyeltypedual(eltype(x))
elseif !isempty(x) && all(i -> isassigned(x, i), 1:length(x)) &&
counter < DUALCHECK_RECURSION_MAX
_counter = Val{counter + 1}
mapreduce(y -> anyeltypedual(y, _counter), promote_dual, x)
else
# This fallback to Any is required since otherwise we cannot handle `undef` in all cases
# misses cases of
Any
end
end
function anyeltypedual(x::Set, ::Type{Val{counter}} = Val{0}) where {counter}
if isconcretetype(eltype(x))
anyeltypedual(eltype(x))
else
# This fallback to Any is required since otherwise we cannot handle `undef` in all cases
Any
end
end
function anyeltypedual(x::Tuple, ::Type{Val{counter}} = Val{0}) where {counter}
# Handle the empty tuple case separately for inference and to avoid mapreduce error
if x === ()
Any
else
diffeqmapreduce(anyeltypedual, promote_dual, x)
end
end
function anyeltypedual(x::Dict, ::Type{Val{counter}} = Val{0}) where {counter}
isempty(x) ? eltype(values(x)) : mapreduce(anyeltypedual, promote_dual, values(x))
end
function anyeltypedual(x::NamedTuple, ::Type{Val{counter}} = Val{0}) where {counter}
anyeltypedual(values(x))
end
function DiffEqBase.anyeltypedual(
f::SciMLBase.AbstractSciMLFunction, ::Type{Val{counter}}) where {counter}
Any
end
@inline promote_u0(::Nothing, p, t0) = nothing
@inline function promote_u0(u0, p, t0)
if SciMLStructures.isscimlstructure(p)
_p = SciMLStructures.canonicalize(SciMLStructures.Tunable(), p)[1]
if !isequal(_p, p)
return promote_u0(u0, _p, t0)
end
end
Tu = eltype(u0)
if Tu <: ForwardDiff.Dual
return u0
end
Tp = anyeltypedual(p)
if Tp == Any
Tp = Tu
end
Tt = anyeltypedual(t0)
if Tt == Any
Tt = Tu
end
Tcommon = promote_type(Tu, Tp, Tt)
return if Tcommon <: ForwardDiff.Dual
Tcommon.(u0)
else
u0
end
end
@inline function promote_u0(u0::AbstractArray{<:Complex}, p, t0)
if SciMLStructures.isscimlstructure(p)
_p = SciMLStructures.canonicalize(SciMLStructures.Tunable(), p)[1]
if !isequal(_p, p)
return promote_u0(u0, _p, t0)
end
end
Tu = real(eltype(u0))
if Tu <: ForwardDiff.Dual
return u0
end
Tp = anyeltypedual(p)
if Tp == Any
Tp = Tu
end
Tt = anyeltypedual(t0)
if Tt == Any
Tt = Tu
end
Tcommon = promote_type(eltype(u0), Tp, Tt)
return if real(Tcommon) <: ForwardDiff.Dual
Tcommon.(u0)
else
u0
end
end
function promote_tspan(u0::AbstractArray{<:ForwardDiff.Dual}, p,
tspan::Tuple{<:ForwardDiff.Dual, <:ForwardDiff.Dual}, prob, kwargs)
return _promote_tspan(tspan, kwargs)
end
function promote_tspan(u0::AbstractArray{<:ForwardDiff.Dual}, p, tspan, prob, kwargs)
if (haskey(kwargs, :callback) && has_continuous_callback(kwargs[:callback])) ||
(haskey(prob.kwargs, :callback) && has_continuous_callback(prob.kwargs[:callback]))
return _promote_tspan(eltype(u0).(tspan), kwargs)
else
return _promote_tspan(tspan, kwargs)
end
end
function promote_tspan(u0::AbstractArray{<:Complex{<:ForwardDiff.Dual}}, p, tspan, prob,
kwargs)
return _promote_tspan(real(eltype(u0)).(tspan), kwargs)
end
value(x::Type{ForwardDiff.Dual{T, V, N}}) where {T, V, N} = V
value(x::ForwardDiff.Dual) = value(ForwardDiff.value(x))
@inline fastpow(x::ForwardDiff.Dual, y::ForwardDiff.Dual) = x^y
sse(x::Number) = abs2(x)
sse(x::ForwardDiff.Dual) = sse(ForwardDiff.value(x)) + sum(sse, ForwardDiff.partials(x))
totallength(x::Number) = 1
function totallength(x::ForwardDiff.Dual)
totallength(ForwardDiff.value(x)) + sum(totallength, ForwardDiff.partials(x))
end
totallength(x::AbstractArray) = __sum(totallength, x; init = 0)
@inline ODE_DEFAULT_NORM(u::ForwardDiff.Dual, ::Any) = sqrt(sse(u))
@inline function ODE_DEFAULT_NORM(u::AbstractArray{<:ForwardDiff.Dual{Tag, T}},
t::Any) where {Tag, T}
sqrt(__sum(sse, u; init = sse(zero(T))) / totallength(u))
end
@inline ODE_DEFAULT_NORM(u::ForwardDiff.Dual, ::ForwardDiff.Dual) = sqrt(sse(u))
@inline function ODE_DEFAULT_NORM(u::AbstractArray{<:ForwardDiff.Dual{Tag, T}},
::ForwardDiff.Dual) where {Tag, T}
sqrt(__sum(sse, u; init = sse(zero(T))) / totallength(u))
end
if !hasmethod(nextfloat, Tuple{ForwardDiff.Dual})
# Type piracy. Should upstream
function Base.nextfloat(d::ForwardDiff.Dual{T, V, N}) where {T, V, N}
ForwardDiff.Dual{T}(nextfloat(d.value), d.partials)
end
function Base.prevfloat(d::ForwardDiff.Dual{T, V, N}) where {T, V, N}
ForwardDiff.Dual{T}(prevfloat(d.value), d.partials)
end
end
# bisection(f, tup::Tuple{T,T}, t_forward::Bool) where {T<:ForwardDiff.Dual} = find_zero(f, tup, Roots.AlefeldPotraShi())
# Static Arrays don't support the `init` keyword argument for `sum`
@inline __sum(f::F, args...; init, kwargs...) where {F} = sum(f, args...; init, kwargs...)
@inline function __sum(f::F, a::StaticArraysCore.StaticArray...; init, kwargs...) where {F}
return mapreduce(f, +, a...; init, kwargs...)
end
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 593 | # the following are setup per how integrators are implemented in OrdinaryDiffEq and
# StochasticDiffEq and provide dispatch points that JumpProcesses and others can use.
function get_tstops(integ::DEIntegrator)
error("get_tstops not implemented for integrators of type $(nameof(typeof(integ)))")
end
function get_tstops_array(integ::DEIntegrator)
error("get_tstops_array not implemented for integrators of type $(nameof(typeof(integ)))")
end
function get_tstops_max(integ::DEIntegrator)
error("get_tstops_max not implemented for integrators of type $(nameof(typeof(integ)))")
end
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 2499 | # This is a simple example showing how forward and backward Euler
# could be wrapped
module InternalEuler
using DiffEqBase, LinearAlgebra
# make a algorithm type
abstract type EulerAlgs <: DiffEqBase.AbstractODEAlgorithm end
struct FwdEulerAlg <: EulerAlgs end
struct BwdEulerAlg <: EulerAlgs end
function DiffEqBase.solve(prob::DiffEqBase.AbstractODEProblem{uType, tType, isinplace},
Alg::FwdEulerAlg;
dt = (prob.tspan[2] - prob.tspan[1]) / 100,
tstops = tType[],
kwargs...) where {uType, tType, isinplace}
u0 = prob.u0
f = prob.f
tspan = prob.tspan
p = prob.p
if isempty(tstops)
tstops = tspan[1]:dt:tspan[2]
end
@assert tstops[1] == tspan[1]
nt = length(tstops)
out = Vector{uType}(undef, nt)
out[1] = copy(u0)
tmp = copy(u0)
for i in 2:nt
t = tstops[i]
dt = t - tstops[i - 1]
if isinplace
f(tmp, out[i - 1], p, t)
out[i] = out[i - 1] + dt * tmp
else
out[i] = out[i - 1] + dt * f(out[i - 1], p, t)
end
end
# make solution type
DiffEqBase.build_solution(prob, Alg, tstops, out)
end
function DiffEqBase.solve(prob::DiffEqBase.AbstractODEProblem{uType, tType, isinplace},
Alg::BwdEulerAlg;
dt = (prob.tspan[2] - prob.tspan[1]) / 100,
tstops = tType[],
tol = 1e-5,
maxiter = 100,
kwargs...) where {uType, tType, isinplace}
u0 = prob.u0
f = prob.f
tspan = prob.tspan
p = prob.p
# TODO: fix numparameters as it picks up the Jacobian
# @assert !isinplace "Only out of place functions supported"
@assert DiffEqBase.has_jac(f) "Provide Jacobian as f(::Val{:jac}, ...)"
if isempty(tstops)
tstops = tspan[1]:dt:tspan[2]
end
@assert tstops[1] == tspan[1]
nt = length(tstops)
out = Vector{uType}(undef, nt)
out[1] = copy(u0)
for i in 2:nt
t = tstops[i]
dt = t - tstops[i - 1]
out[i] = newton(t, dt, out[i - 1], p, f, f.jac, tol, maxiter)
end
# make solution type
DiffEqBase.build_solution(prob, Alg, tstops, out)
end
function newton(t, dt, u_last, p, f, jac, tol, maxiter)
res = (u) -> u - u_last - dt * f(u, p, t)
u = u_last + dt * f(u_last, p, t) # forward Euler step as first guess
for i in 1:maxiter
du = -(I - dt * jac(u, p, t)) \ res(u)
u += du
norm(du, Inf) < tol && return u
end
error("Newton not converged")
end
end
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 4755 | """
prevfloat_tdir(x, x0, x1)
Move `x` one floating point towards x0.
"""
function prevfloat_tdir(x, x0, x1)
x1 > x0 ? prevfloat(x) : nextfloat(x)
end
function nextfloat_tdir(x, x0, x1)
x1 > x0 ? nextfloat(x) : prevfloat(x)
end
function max_tdir(a, b, x0, x1)
x1 > x0 ? max(a, b) : min(a, b)
end
"""
`InternalFalsi`: A non-allocating regula falsi method, internal to DiffEqBase for
simpler dependencies.
"""
struct InternalFalsi end
function SciMLBase.solve(prob::IntervalNonlinearProblem, alg::InternalFalsi, args...;
maxiters = 1000,
kwargs...)
f = Base.Fix2(prob.f, prob.p)
left, right = prob.tspan
fl, fr = f(left), f(right)
if iszero(fl)
return SciMLBase.build_solution(prob, alg, left, fl;
retcode = ReturnCode.ExactSolutionLeft, left = left,
right = right)
end
if iszero(fr)
return SciMLBase.build_solution(prob, alg, right, fr;
retcode = ReturnCode.ExactSolutionLeft, left = left,
right = right)
end
i = 1
using_falsi_steps = true
while i < maxiters
# First, perform a regula falsi iteration
if using_falsi_steps
if nextfloat_tdir(left, prob.tspan...) == right
return SciMLBase.build_solution(prob, alg, left, fl;
retcode = ReturnCode.FloatingPointLimit,
left = left, right = right)
end
mid = (fr * left - fl * right) / (fr - fl)
for i in 1:10
mid = max_tdir(left, prevfloat_tdir(mid, prob.tspan...), prob.tspan...)
end
if mid == right || mid == left
using_falsi_steps = false
continue
end
fm = f(mid)
if iszero(fm)
right = mid
using_falsi_steps = false
continue
end
if sign(fl) == sign(fm)
fl = fm
left = mid
else
fr = fm
right = mid
end
i += 1
end
# Then, perform a bisection iteration
mid = (left + right) / 2
(mid == left || mid == right) &&
return SciMLBase.build_solution(prob, alg, left, fl;
retcode = ReturnCode.FloatingPointLimit,
left = left, right = right)
fm = f(mid)
if iszero(fm)
right = mid
fr = fm
elseif sign(fm) == sign(fl)
left = mid
fl = fm
else
right = mid
fr = fm
end
i += 1
end
return SciMLBase.build_solution(prob, alg, left, fl; retcode = ReturnCode.MaxIters,
left = left, right = right)
end
function scalar_nlsolve_ad(prob, alg::InternalFalsi, args...; kwargs...)
f = prob.f
p = value(prob.p)
if prob isa IntervalNonlinearProblem
tspan = value(prob.tspan)
newprob = IntervalNonlinearProblem(f, tspan, p; prob.kwargs...)
else
u0 = value(prob.u0)
newprob = NonlinearProblem(f, u0, p; prob.kwargs...)
end
sol = solve(newprob, alg, args...; kwargs...)
uu = sol.u
if p isa Number
f_p = ForwardDiff.derivative(Base.Fix1(f, uu), p)
else
f_p = ForwardDiff.gradient(Base.Fix1(f, uu), p)
end
f_x = ForwardDiff.derivative(Base.Fix2(f, p), uu)
pp = prob.p
sumfun = let f_x′ = -f_x
((fp, p),) -> (fp / f_x′) * ForwardDiff.partials(p)
end
partials = sum(sumfun, zip(f_p, pp))
return sol, partials
end
function SciMLBase.solve(
prob::IntervalNonlinearProblem{uType, iip,
<:ForwardDiff.Dual{T, V, P}},
alg::InternalFalsi, args...;
kwargs...) where {uType, iip, T, V, P}
sol, partials = scalar_nlsolve_ad(prob, alg, args...; kwargs...)
return SciMLBase.build_solution(prob, alg, ForwardDiff.Dual{T, V, P}(sol.u, partials),
sol.resid; retcode = sol.retcode,
left = ForwardDiff.Dual{T, V, P}(sol.left, partials),
right = ForwardDiff.Dual{T, V, P}(sol.right, partials))
end
function SciMLBase.solve(
prob::IntervalNonlinearProblem{uType, iip,
<:AbstractArray{
<:ForwardDiff.Dual{T,
V,
P},
}},
alg::InternalFalsi, args...;
kwargs...) where {uType, iip, T, V, P}
sol, partials = scalar_nlsolve_ad(prob, alg, args...; kwargs...)
return SciMLBase.build_solution(prob, alg, ForwardDiff.Dual{T, V, P}(sol.u, partials),
sol.resid; retcode = sol.retcode,
left = ForwardDiff.Dual{T, V, P}(sol.left, partials),
right = ForwardDiff.Dual{T, V, P}(sol.right, partials))
end
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 4753 | """
`InternalITP`: A non-allocating ITP method, internal to DiffEqBase for
simpler dependencies.
"""
struct InternalITP
k1::Float64
k2::Float64
n0::Int
end
InternalITP() = InternalITP(0.007, 1.5, 10)
function SciMLBase.solve(prob::IntervalNonlinearProblem{IP, Tuple{T, T}}, alg::InternalITP,
args...;
maxiters = 1000, kwargs...) where {IP, T}
f = Base.Fix2(prob.f, prob.p)
left, right = prob.tspan # a and b
fl, fr = f(left), f(right)
ϵ = eps(T)
if iszero(fl)
return SciMLBase.build_solution(prob, alg, left, fl;
retcode = ReturnCode.ExactSolutionLeft, left = left,
right = right)
elseif iszero(fr)
return SciMLBase.build_solution(prob, alg, right, fr;
retcode = ReturnCode.ExactSolutionRight, left = left,
right = right)
end
#defining variables/cache
k1 = T(alg.k1)
k2 = T(alg.k2)
n0 = T(alg.n0)
n_h = ceil(log2(abs(right - left) / (2 * ϵ)))
mid = (left + right) / 2
x_f = (fr * left - fl * right) / (fr - fl)
xt = left
xp = left
r = zero(left) #minmax radius
δ = zero(left) # truncation error
σ = 1.0
ϵ_s = ϵ * 2^(n_h + n0)
i = 0 #iteration
while i <= maxiters
#mid = (left + right) / 2
span = abs(right - left)
r = ϵ_s - (span / 2)
δ = k1 * (span^k2)
## Interpolation step ##
x_f = left + (right - left) * (fl / (fl - fr))
## Truncation step ##
σ = sign(mid - x_f)
if δ <= abs(mid - x_f)
xt = x_f + (σ * δ)
else
xt = mid
end
## Projection step ##
if abs(xt - mid) <= r
xp = xt
else
xp = mid - (σ * r)
end
## Update ##
tmin, tmax = minmax(left, right)
xp >= tmax && (xp = prevfloat(tmax))
xp <= tmin && (xp = nextfloat(tmin))
yp = f(xp)
yps = yp * sign(fr)
if yps > 0
right = xp
fr = yp
elseif yps < 0
left = xp
fl = yp
else
left = prevfloat_tdir(xp, prob.tspan...)
right = xp
return SciMLBase.build_solution(prob, alg, left, f(left);
retcode = ReturnCode.Success, left = left,
right = right)
end
i += 1
mid = (left + right) / 2
ϵ_s /= 2
if nextfloat_tdir(left, prob.tspan...) == right
return SciMLBase.build_solution(prob, alg, left, fl;
retcode = ReturnCode.FloatingPointLimit, left = left,
right = right)
end
end
return SciMLBase.build_solution(prob, alg, left, fl; retcode = ReturnCode.MaxIters,
left = left, right = right)
end
function scalar_nlsolve_ad(prob, alg::InternalITP, args...; kwargs...)
f = prob.f
p = value(prob.p)
if prob isa IntervalNonlinearProblem
tspan = value(prob.tspan)
newprob = IntervalNonlinearProblem(f, tspan, p; prob.kwargs...)
else
u0 = value(prob.u0)
newprob = NonlinearProblem(f, u0, p; prob.kwargs...)
end
sol = solve(newprob, alg, args...; kwargs...)
uu = sol.u
if p isa Number
f_p = ForwardDiff.derivative(Base.Fix1(f, uu), p)
else
f_p = ForwardDiff.gradient(Base.Fix1(f, uu), p)
end
f_x = ForwardDiff.derivative(Base.Fix2(f, p), uu)
pp = prob.p
sumfun = let f_x′ = -f_x
((fp, p),) -> (fp / f_x′) * ForwardDiff.partials(p)
end
partials = sum(sumfun, zip(f_p, pp))
return sol, partials
end
function SciMLBase.solve(
prob::IntervalNonlinearProblem{uType, iip,
<:ForwardDiff.Dual{T, V, P}},
alg::InternalITP, args...;
kwargs...) where {uType, iip, T, V, P}
sol, partials = scalar_nlsolve_ad(prob, alg, args...; kwargs...)
return SciMLBase.build_solution(prob, alg, ForwardDiff.Dual{T, V, P}(sol.u, partials),
sol.resid; retcode = sol.retcode,
left = ForwardDiff.Dual{T, V, P}(sol.left, partials),
right = ForwardDiff.Dual{T, V, P}(sol.right, partials))
end
function SciMLBase.solve(
prob::IntervalNonlinearProblem{uType, iip,
<:AbstractArray{
<:ForwardDiff.Dual{T,
V,
P},
}},
alg::InternalITP, args...;
kwargs...) where {uType, iip, T, V, P}
sol, partials = scalar_nlsolve_ad(prob, alg, args...; kwargs...)
return SciMLBase.build_solution(prob, alg, ForwardDiff.Dual{T, V, P}(sol.u, partials),
sol.resid; retcode = sol.retcode,
left = ForwardDiff.Dual{T, V, P}(sol.left, partials),
right = ForwardDiff.Dual{T, V, P}(sol.right, partials))
end
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 3791 | struct OrdinaryDiffEqTag end
const dualT = ForwardDiff.Dual{ForwardDiff.Tag{OrdinaryDiffEqTag, Float64}, Float64, 1}
dualgen(::Type{T}) where {T} = ForwardDiff.Dual{ForwardDiff.Tag{OrdinaryDiffEqTag, T}, T, 1}
const NORECOMPILE_IIP_SUPPORTED_ARGS = (
Tuple{Vector{Float64}, Vector{Float64},
Vector{Float64}, Float64},
Tuple{Vector{Float64}, Vector{Float64},
SciMLBase.NullParameters, Float64})
const oop_arglists = (Tuple{Vector{Float64}, Vector{Float64}, Float64},
Tuple{Vector{Float64}, SciMLBase.NullParameters, Float64},
Tuple{Vector{Float64}, Vector{Float64}, dualT},
Tuple{Vector{dualT}, Vector{Float64}, Float64},
Tuple{Vector{dualT}, SciMLBase.NullParameters, Float64},
Tuple{Vector{Float64}, SciMLBase.NullParameters, dualT})
const NORECOMPILE_OOP_SUPPORTED_ARGS = (Tuple{Vector{Float64},
Vector{Float64}, Float64},
Tuple{Vector{Float64},
SciMLBase.NullParameters, Float64})
const oop_returnlists = (Vector{Float64}, Vector{Float64},
ntuple(x -> Vector{dualT}, length(oop_arglists) - 2)...)
const NORECOMPILE_ARGUMENT_MESSAGE = """
No-recompile mode is only supported for state arguments
of type `Vector{Float64}`, time arguments of `Float64`
and parameter arguments of type `Vector{Float64}` or
`SciMLBase.NullParameters`.
"""
struct NoRecompileArgumentError <: Exception
args::Any
end
function Base.showerror(io::IO, e::NoRecompileArgumentError)
println(io, NORECOMPILE_ARGUMENT_MESSAGE)
print(io, "Attempted arguments: ")
print(io, e.args)
end
function wrapfun_oop(ff, inputs::Tuple = ())
if !isempty(inputs)
IT = Tuple{map(typeof, inputs)...}
if IT ∉ NORECOMPILE_OOP_SUPPORTED_ARGS
throw(NoRecompileArgumentError(IT))
end
end
FunctionWrappersWrappers.FunctionWrappersWrapper(ff, oop_arglists,
oop_returnlists)
end
function wrapfun_iip(ff,
inputs::Tuple{T1, T2, T3, T4}) where {T1, T2, T3, T4}
T = eltype(T2)
dualT = dualgen(T)
dualT1 = ArrayInterface.promote_eltype(T1, dualT)
dualT2 = ArrayInterface.promote_eltype(T2, dualT)
dualT4 = dualgen(promote_type(T, T4))
iip_arglists = (Tuple{T1, T2, T3, T4},
Tuple{dualT1, dualT2, T3, T4},
Tuple{dualT1, T2, T3, dualT4},
Tuple{dualT1, dualT2, T3, dualT4})
iip_returnlists = ntuple(x -> Nothing, 4)
fwt = map(iip_arglists, iip_returnlists) do A, R
FunctionWrappersWrappers.FunctionWrappers.FunctionWrapper{R, A}(Void(ff))
end
FunctionWrappersWrappers.FunctionWrappersWrapper{typeof(fwt), false}(fwt)
end
const iip_arglists_default = (
Tuple{Vector{Float64}, Vector{Float64}, Vector{Float64},
Float64},
Tuple{Vector{Float64}, Vector{Float64},
SciMLBase.NullParameters,
Float64
},
Tuple{Vector{dualT}, Vector{Float64}, Vector{Float64}, dualT},
Tuple{Vector{dualT}, Vector{dualT}, Vector{Float64}, dualT},
Tuple{Vector{dualT}, Vector{dualT}, Vector{Float64}, Float64},
Tuple{Vector{dualT}, Vector{dualT}, SciMLBase.NullParameters,
Float64
},
Tuple{Vector{dualT}, Vector{Float64},
SciMLBase.NullParameters, dualT
})
const iip_returnlists_default = ntuple(x -> Nothing, length(iip_arglists_default))
function wrapfun_iip(@nospecialize(ff))
fwt = map(iip_arglists_default, iip_returnlists_default) do A, R
FunctionWrappersWrappers.FunctionWrappers.FunctionWrapper{R, A}(Void(ff))
end
FunctionWrappersWrappers.FunctionWrappersWrapper{typeof(fwt), false}(fwt)
end
function unwrap_fw(fw::FunctionWrapper)
fw.obj[]
end
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 66718 | struct EvalFunc{F} <: Function
f::F
end
(f::EvalFunc)(args...) = f.f(args...)
NO_TSPAN_PROBS = Union{AbstractLinearProblem, AbstractNonlinearProblem,
AbstractIntegralProblem, AbstractSteadyStateProblem,
AbstractJumpProblem}
has_kwargs(_prob::AbstractDEProblem) = has_kwargs(typeof(_prob))
Base.@pure __has_kwargs(::Type{T}) where {T} = :kwargs ∈ fieldnames(T)
has_kwargs(::Type{T}) where {T} = __has_kwargs(T)
const allowedkeywords = (:dense,
:saveat,
:save_idxs,
:tstops,
:tspan,
:d_discontinuities,
:save_everystep,
:save_on,
:save_start,
:save_end,
:initialize_save,
:adaptive,
:abstol,
:reltol,
:dt,
:dtmax,
:dtmin,
:force_dtmin,
:internalnorm,
:controller,
:gamma,
:beta1,
:beta2,
:qmax,
:qmin,
:qsteady_min,
:qsteady_max,
:qoldinit,
:failfactor,
:calck,
:alias_u0,
:maxiters,
:maxtime,
:callback,
:isoutofdomain,
:unstable_check,
:verbose,
:merge_callbacks,
:progress,
:progress_steps,
:progress_name,
:progress_message,
:progress_id,
:timeseries_errors,
:dense_errors,
:weak_timeseries_errors,
:weak_dense_errors,
:wrap,
:calculate_error,
:initializealg,
:alg,
:save_noise,
:delta,
:seed,
:alg_hints,
:kwargshandle,
:trajectories,
:batch_size,
:sensealg,
:advance_to_tstop,
:stop_at_next_tstop,
:u0,
:p,
# These two are from the default algorithm handling
:default_set,
:second_time,
# This is for DiffEqDevTools
:prob_choice,
# Jump problems
:alias_jump,
# This is for copying/deepcopying noise in StochasticDiffEq
:alias_noise,
# This is for SimpleNonlinearSolve handling for batched Nonlinear Solves
:batch,
# Shooting method in BVP needs to differentiate between these two categories
:nlsolve_kwargs,
:odesolve_kwargs,
# If Solvers which internally use linsolve
:linsolve_kwargs,
# Solvers internally using EnsembleProblem
:ensemblealg,
# Fine Grained Control of Tracing (Storing and Logging) during Solve
:show_trace,
:trace_level,
:store_trace,
# Termination condition for solvers
:termination_condition)
const KWARGWARN_MESSAGE = """
Unrecognized keyword arguments found.
The only allowed keyword arguments to `solve` are:
$allowedkeywords
See https://diffeq.sciml.ai/stable/basics/common_solver_opts/ for more details.
Set kwargshandle=KeywordArgError for an error message.
Set kwargshandle=KeywordArgSilent to ignore this message.
"""
const KWARGERROR_MESSAGE = """
Unrecognized keyword arguments found.
The only allowed keyword arguments to `solve` are:
$allowedkeywords
See https://diffeq.sciml.ai/stable/basics/common_solver_opts/ for more details.
"""
struct CommonKwargError <: Exception
kwargs::Any
end
function Base.showerror(io::IO, e::CommonKwargError)
println(io, KWARGERROR_MESSAGE)
notin = collect(map(x -> x ∉ allowedkeywords, keys(e.kwargs)))
unrecognized = collect(keys(e.kwargs))[notin]
print(io, "Unrecognized keyword arguments: ")
printstyled(io, unrecognized; bold = true, color = :red)
print(io, "\n\n")
println(io, TruncatedStacktraces.VERBOSE_MSG)
end
@enum KeywordArgError KeywordArgWarn KeywordArgSilent
const INCOMPATIBLE_U0_MESSAGE = """
Initial condition incompatible with functional form.
Detected an in-place function with an initial condition of type Number or SArray.
This is incompatible because Numbers cannot be mutated, i.e.
`x = 2.0; y = 2.0; x .= y` will error.
If using a immutable initial condition type, please use the out-of-place form.
I.e. define the function `du=f(u,p,t)` instead of attempting to "mutate" the immutable `du`.
If your differential equation function was defined with multiple dispatches and one is
in-place, then the automatic detection will choose in-place. In this case, override the
choice in the problem constructor, i.e. `ODEProblem{false}(f,u0,tspan,p,kwargs...)`.
For a longer discussion on mutability vs immutability and in-place vs out-of-place, see:
https://diffeq.sciml.ai/stable/tutorials/faster_ode_example/#Example-Accelerating-a-Non-Stiff-Equation:-The-Lorenz-Equation
"""
struct IncompatibleInitialConditionError <: Exception end
function Base.showerror(io::IO, e::IncompatibleInitialConditionError)
print(io, INCOMPATIBLE_U0_MESSAGE)
println(io, TruncatedStacktraces.VERBOSE_MSG)
end
const NO_DEFAULT_ALGORITHM_MESSAGE = """
Default algorithm choices require DifferentialEquations.jl.
Please specify an algorithm (e.g., `solve(prob, Tsit5())` or
`init(prob, Tsit5())` for an ODE) or import DifferentialEquations
directly.
You can find the list of available solvers at https://diffeq.sciml.ai/stable/solvers/ode_solve/
and its associated pages.
"""
struct NoDefaultAlgorithmError <: Exception end
function Base.showerror(io::IO, e::NoDefaultAlgorithmError)
print(io, NO_DEFAULT_ALGORITHM_MESSAGE)
println(io, TruncatedStacktraces.VERBOSE_MSG)
end
const NO_TSPAN_MESSAGE = """
No tspan is set in the problem or chosen in the init/solve call
"""
struct NoTspanError <: Exception end
function Base.showerror(io::IO, e::NoTspanError)
print(io, NO_TSPAN_MESSAGE)
println(io, TruncatedStacktraces.VERBOSE_MSG)
end
const NAN_TSPAN_MESSAGE = """
NaN tspan is set in the problem or chosen in the init/solve call.
Note that -Inf and Inf values are allowed in the timespan for solves
which are terminated via callbacks, however NaN values are not allowed
since the direction of time is undetermined.
"""
struct NaNTspanError <: Exception end
function Base.showerror(io::IO, e::NaNTspanError)
print(io, NAN_TSPAN_MESSAGE)
println(io, TruncatedStacktraces.VERBOSE_MSG)
end
const NON_SOLVER_MESSAGE = """
The arguments to solve are incorrect.
The second argument must be a solver choice, `solve(prob,alg)`
where `alg` is a `<: AbstractDEAlgorithm`, e.g. `Tsit5()`.
Please double check the arguments being sent to the solver.
You can find the list of available solvers at https://diffeq.sciml.ai/stable/solvers/ode_solve/
and its associated pages.
"""
struct NonSolverError <: Exception end
function Base.showerror(io::IO, e::NonSolverError)
print(io, NON_SOLVER_MESSAGE)
println(io, TruncatedStacktraces.VERBOSE_MSG)
end
const NOISE_SIZE_MESSAGE = """
Noise sizes are incompatible. The expected number of noise terms in the defined
`noise_rate_prototype` does not match the number of noise terms in the defined
`AbstractNoiseProcess`. Please ensure that
size(prob.noise_rate_prototype,2) == length(prob.noise.W[1]).
Note: Noise process definitions require that users specify `u0`, and this value is
directly used in the definition. For example, if `noise = WienerProcess(0.0,0.0)`,
then the noise process is a scalar with `u0=0.0`. If `noise = WienerProcess(0.0,[0.0])`,
then the noise process is a vector with `u0=0.0`. If `noise_rate_prototype = zeros(2,4)`,
then the noise process must be a 4-dimensional process, for example
`noise = WienerProcess(0.0,zeros(4))`. This error is a sign that the user definition
of `noise_rate_prototype` and `noise` are not aligned in this manner and the definitions should
be double checked.
"""
struct NoiseSizeIncompatabilityError <: Exception
prototypesize::Int
noisesize::Int
end
function Base.showerror(io::IO, e::NoiseSizeIncompatabilityError)
println(io, NOISE_SIZE_MESSAGE)
println(io, "size(prob.noise_rate_prototype,2) = $(e.prototypesize)")
println(io, "length(prob.noise.W[1]) = $(e.noisesize)")
println(io, TruncatedStacktraces.VERBOSE_MSG)
end
const PROBSOLVER_PAIRING_MESSAGE = """
Incompatible problem+solver pairing.
For example, this can occur if an ODE solver is passed with an SDEProblem.
Solvers are only capable of handling specific problem types. Please double
check that the chosen pairing is capable for handling the given problems.
"""
struct ProblemSolverPairingError <: Exception
prob::Any
alg::Any
end
function Base.showerror(io::IO, e::ProblemSolverPairingError)
println(io, PROBSOLVER_PAIRING_MESSAGE)
println(io, "Problem type: $(SciMLBase.__parameterless_type(typeof(e.prob)))")
println(io, "Solver type: $(SciMLBase.__parameterless_type(typeof(e.alg)))")
println(io,
"Problem types compatible with the chosen solver: $(compatible_problem_types(e.prob,e.alg))")
println(io, TruncatedStacktraces.VERBOSE_MSG)
end
function compatible_problem_types(prob, alg)
if alg isa AbstractODEAlgorithm
ODEProblem
elseif alg isa AbstractSDEAlgorithm
(SDEProblem, SDDEProblem)
elseif alg isa AbstractDDEAlgorithm # StochasticDelayDiffEq.jl just uses the SDE alg
DDEProblem
elseif alg isa AbstractDAEAlgorithm
DAEProblem
elseif alg isa AbstractSteadyStateAlgorithm
SteadyStateProblem
end
end
const DIRECT_AUTODIFF_INCOMPATABILITY_MESSAGE = """
Incompatible solver + automatic differentiation pairing.
The chosen automatic differentiation algorithm requires the ability
for compiler transforms on the code which is only possible on pure-Julia
solvers such as those from OrdinaryDiffEq.jl. Direct differentiation methods
which require this ability include:
- Direct use of ForwardDiff.jl on the solver
- `ForwardDiffSensitivity`, `ReverseDiffAdjoint`, `TrackerAdjoint`, and `ZygoteAdjoint`
sensealg choices for adjoint differentiation.
Either switch the choice of solver to a pure Julia method, or change the automatic
differentiation method to one that does not require such transformations.
For more details on automatic differentiation, adjoint, and sensitivity analysis
of differential equations, see the documentation page:
https://diffeq.sciml.ai/stable/analysis/sensitivity/
"""
struct DirectAutodiffError <: Exception end
function Base.showerror(io::IO, e::DirectAutodiffError)
println(io, DIRECT_AUTODIFF_INCOMPATABILITY_MESSAGE)
println(io, TruncatedStacktraces.VERBOSE_MSG)
end
const NONCONCRETE_ELTYPE_MESSAGE = """
Non-concrete element type inside of an `Array` detected.
Arrays with non-concrete element types, such as
`Array{Union{Float32,Float64}}`, are not supported by the
differential equation solvers. Anyways, this is bad for
performance so you don't want to be doing this!
If this was a mistake, promote the element types to be
all the same. If this was intentional, for example,
using Unitful.jl with different unit values, then use
an array type which has fast broadcast support for
heterogeneous values such as the ArrayPartition
from RecursiveArrayTools.jl. For example:
```julia
using RecursiveArrayTools
x = ArrayPartition([1.0,2.0],[1f0,2f0])
y = ArrayPartition([3.0,4.0],[3f0,4f0])
x .+ y # fast, stable, and usable as u0 into DiffEq!
```
Element type:
"""
struct NonConcreteEltypeError <: Exception
eltype::Any
end
function Base.showerror(io::IO, e::NonConcreteEltypeError)
print(io, NONCONCRETE_ELTYPE_MESSAGE)
print(io, e.eltype)
println(io, TruncatedStacktraces.VERBOSE_MSG)
end
const NONNUMBER_ELTYPE_MESSAGE = """
Non-Number element type inside of an `Array` detected.
Arrays with non-number element types, such as
`Array{Array{Float64}}`, are not supported by the
solvers.
If you are trying to use an array of arrays structure,
look at the tools in RecursiveArrayTools.jl. For example:
If this was a mistake, promote the element types to be
all the same. If this was intentional, for example,
using Unitful.jl with different unit values, then use
an array type which has fast broadcast support for
heterogeneous values such as the ArrayPartition
from RecursiveArrayTools.jl. For example:
```julia
using RecursiveArrayTools
u0 = ArrayPartition([1.0,2.0],[3.0,4.0])
u0 = VectorOfArray([1.0,2.0],[3.0,4.0])
```
are both initial conditions which would be compatible with
the solvers. Or use ComponentArrays.jl for more complex
nested structures.
Element type:
"""
struct NonNumberEltypeError <: Exception
eltype::Any
end
function Base.showerror(io::IO, e::NonNumberEltypeError)
print(io, NONNUMBER_ELTYPE_MESSAGE)
print(io, e.eltype)
println(io, TruncatedStacktraces.VERBOSE_MSG)
end
const GENERIC_NUMBER_TYPE_ERROR_MESSAGE = """
Non-standard number type (i.e. not Float32, Float64,
ComplexF32, or ComplexF64) detected as the element type
for the initial condition or time span. These generic
number types are only compatible with the pure Julia
solvers which support generic programming, such as
OrdinaryDiffEq.jl. The chosen solver does not support
this functionality. Please double check that the initial
condition and time span types are correct, and check that
the chosen solver was correct.
"""
struct GenericNumberTypeError <: Exception
alg::Any
uType::Any
tType::Any
end
function Base.showerror(io::IO, e::GenericNumberTypeError)
println(io, GENERIC_NUMBER_TYPE_ERROR_MESSAGE)
println(io, "Solver: $(e.alg)")
println(io, "u0 type: $(e.uType)")
print(io, "Timespan type: $(e.tType)")
println(io, TruncatedStacktraces.VERBOSE_MSG)
end
const COMPLEX_SUPPORT_ERROR_MESSAGE = """
Complex number type (i.e. ComplexF32, or ComplexF64)
detected as the element type for the initial condition
with an algorithm that does not support complex numbers.
Please check that the initial condition type is correct.
If complex number support is needed, try different solvers
such as those from OrdinaryDiffEq.jl.
"""
struct ComplexSupportError <: Exception
alg::Any
end
function Base.showerror(io::IO, e::ComplexSupportError)
println(io, COMPLEX_SUPPORT_ERROR_MESSAGE)
println(io, "Solver: $(e.alg)")
println(io, TruncatedStacktraces.VERBOSE_MSG)
end
const COMPLEX_TSPAN_ERROR_MESSAGE = """
Complex number type (i.e. ComplexF32, or ComplexF64)
detected as the element type for the independent variable
(i.e. time span). Please check that the tspan type is correct.
No solvers support complex time spans. If this is required,
please open an issue.
"""
struct ComplexTspanError <: Exception end
function Base.showerror(io::IO, e::ComplexTspanError)
println(io, COMPLEX_TSPAN_ERROR_MESSAGE)
println(io, TruncatedStacktraces.VERBOSE_MSG)
end
const TUPLE_STATE_ERROR_MESSAGE = """
Tuple type used as a state. Since a tuple does not have vector
properties, it will not work as a state type in equation solvers.
Instead, change your equation from using tuple constructors `()`
to static array constructors `SA[]`. For example, change:
```julia
function ftup((a,b),p,t)
return b,-a
end
u0 = (1.0,2.0)
tspan = (0.0,1.0)
ODEProblem(ftup,u0,tspan)
```
to:
```julia
using StaticArrays
function fsa(u,p,t)
SA[u[2],u[1]]
end
u0 = SA[1.0,2.0]
tspan = (0.0,1.0)
ODEProblem(ftup,u0,tspan)
```
This will be safer and fast for small ODEs. For more information, see:
https://diffeq.sciml.ai/stable/tutorials/faster_ode_example/#Further-Optimizations-of-Small-Non-Stiff-ODEs-with-StaticArrays
"""
struct TupleStateError <: Exception end
function Base.showerror(io::IO, e::TupleStateError)
println(io, TUPLE_STATE_ERROR_MESSAGE)
println(io, TruncatedStacktraces.VERBOSE_MSG)
end
const MASS_MATRIX_ERROR_MESSAGE = """
Mass matrix size is incompatible with initial condition
sizing. The mass matrix must represent the `vec`
form of the initial condition `u0`, i.e.
`size(mm,1) == size(mm,2) == length(u)`
"""
struct IncompatibleMassMatrixError <: Exception
sz::Int
len::Int
end
function Base.showerror(io::IO, e::IncompatibleMassMatrixError)
println(io, MASS_MATRIX_ERROR_MESSAGE)
print(io, "size(prob.f.mass_matrix,1): ")
println(io, e.sz)
print(io, "length(u0): ")
println(e.len)
println(io, TruncatedStacktraces.VERBOSE_MSG)
end
function init_call(_prob, args...; merge_callbacks = true, kwargshandle = nothing,
kwargs...)
kwargshandle = kwargshandle === nothing ? KeywordArgError : kwargshandle
kwargshandle = has_kwargs(_prob) && haskey(_prob.kwargs, :kwargshandle) ?
_prob.kwargs[:kwargshandle] : kwargshandle
if has_kwargs(_prob)
if merge_callbacks && haskey(_prob.kwargs, :callback) && haskey(kwargs, :callback)
kwargs_temp = NamedTuple{
Base.diff_names(Base._nt_names(values(kwargs)),
(:callback,))}(values(kwargs))
callbacks = NamedTuple{(:callback,)}((DiffEqBase.CallbackSet(
_prob.kwargs[:callback],
values(kwargs).callback),))
kwargs = merge(kwargs_temp, callbacks)
end
kwargs = isempty(_prob.kwargs) ? kwargs : merge(values(_prob.kwargs), kwargs)
end
checkkwargs(kwargshandle; kwargs...)
if _prob isa Union{ODEProblem, DAEProblem} && isnothing(_prob.u0)
build_null_integrator(_prob, args...; kwargs...)
elseif hasfield(typeof(_prob), :f) && hasfield(typeof(_prob.f), :f) &&
_prob.f.f isa EvalFunc
Base.invokelatest(__init, _prob, args...; kwargs...)#::T
else
__init(_prob, args...; kwargs...)#::T
end
end
function init(
prob::Union{AbstractDEProblem, NonlinearProblem}, args...; sensealg = nothing,
u0 = nothing, p = nothing, kwargs...)
if sensealg === nothing && has_kwargs(prob) && haskey(prob.kwargs, :sensealg)
sensealg = prob.kwargs[:sensealg]
end
u0 = u0 !== nothing ? u0 : prob.u0
p = p !== nothing ? p : prob.p
init_up(prob, sensealg, u0, p, args...; kwargs...)
end
function init(prob::AbstractJumpProblem, args...; kwargs...)
init_call(prob, args...; kwargs...)
end
function init_up(prob::AbstractDEProblem, sensealg, u0, p, args...; kwargs...)
alg = extract_alg(args, kwargs, has_kwargs(prob) ? prob.kwargs : kwargs)
if isnothing(alg) || !(alg isa AbstractDEAlgorithm) # Default algorithm handling
_prob = get_concrete_problem(prob, !(prob isa DiscreteProblem); u0 = u0,
p = p, kwargs...)
init_call(_prob, args...; kwargs...)
else
_prob = get_concrete_problem(prob, isadaptive(alg); u0 = u0, p = p, kwargs...)
_alg = prepare_alg(alg, _prob.u0, _prob.p, _prob)
check_prob_alg_pairing(_prob, alg) # alg for improved inference
if length(args) > 1
init_call(_prob, _alg, Base.tail(args)...; kwargs...)
else
init_call(_prob, _alg; kwargs...)
end
end
end
function solve_call(_prob, args...; merge_callbacks = true, kwargshandle = nothing,
kwargs...)
kwargshandle = kwargshandle === nothing ? KeywordArgError : kwargshandle
kwargshandle = has_kwargs(_prob) && haskey(_prob.kwargs, :kwargshandle) ?
_prob.kwargs[:kwargshandle] : kwargshandle
if has_kwargs(_prob)
if merge_callbacks && haskey(_prob.kwargs, :callback) && haskey(kwargs, :callback)
kwargs_temp = NamedTuple{
Base.diff_names(Base._nt_names(values(kwargs)),
(:callback,))}(values(kwargs))
callbacks = NamedTuple{(:callback,)}((DiffEqBase.CallbackSet(
_prob.kwargs[:callback],
values(kwargs).callback),))
kwargs = merge(kwargs_temp, callbacks)
end
kwargs = isempty(_prob.kwargs) ? kwargs : merge(values(_prob.kwargs), kwargs)
end
checkkwargs(kwargshandle; kwargs...)
if isdefined(_prob, :u0)
if _prob.u0 isa Array
if !isconcretetype(RecursiveArrayTools.recursive_unitless_eltype(_prob.u0))
throw(NonConcreteEltypeError(RecursiveArrayTools.recursive_unitless_eltype(_prob.u0)))
end
if !(eltype(_prob.u0) <: Number) && !(eltype(_prob.u0) <: Enum) &&
!(_prob.u0 isa AbstractVector{<:AbstractArray} && _prob isa BVProblem)
# Allow Enums for FunctionMaps, make into a trait in the future
# BVPs use Vector of Arrays for initial guesses
throw(NonNumberEltypeError(eltype(_prob.u0)))
end
end
if _prob.u0 === nothing
return build_null_solution(_prob, args...; kwargs...)
end
end
if hasfield(typeof(_prob), :f) && hasfield(typeof(_prob.f), :f) &&
_prob.f.f isa EvalFunc
Base.invokelatest(__solve, _prob, args...; kwargs...)#::T
else
__solve(_prob, args...; kwargs...)#::T
end
end
mutable struct NullODEIntegrator{IIP, ProbType, T, SolType, F, P} <:
AbstractODEIntegrator{Nothing, IIP, Nothing, T}
du::Vector{Float64}
u::Vector{Float64}
t::T
prob::ProbType
sol::SolType
f::F
p::P
end
function build_null_integrator(prob::AbstractDEProblem, args...;
kwargs...)
sol = solve(prob, args...; kwargs...)
# The DAE initialization in `build_null_solution` may change the parameter
# object `prob.p` via `@set!`, hence use the "new" prob instead of the "old" one.
prob = sol.prob
return NullODEIntegrator{
isinplace(prob), typeof(prob), eltype(prob.tspan), typeof(sol),
typeof(prob.f), typeof(prob.p)
}(Float64[],
Float64[],
prob.tspan[1],
prob,
sol,
prob.f,
prob.p)
end
function solve!(integ::NullODEIntegrator)
integ.t = integ.sol.t[end]
return nothing
end
function step!(integ::NullODEIntegrator, dt = nothing, stop_at_tdt = false)
if !isnothing(dt)
integ.t += dt
else
integ.t = integ.sol[end]
end
return nothing
end
function build_null_solution(prob::AbstractDEProblem, args...;
saveat = (),
save_everystep = true,
save_on = true,
save_start = save_everystep || isempty(saveat) ||
saveat isa Number || prob.tspan[1] in saveat,
save_end = true,
kwargs...)
ts = if saveat === ()
if save_start && save_end
[prob.tspan[1], prob.tspan[2]]
elseif save_start && !save_end
[prob.tspan[1]]
elseif !save_start && save_end
[prob.tspan[2]]
else
eltype(prob.tspan)[]
end
elseif saveat isa Number
prob.tspan[1]:saveat:prob.tspan[2]
else
saveat
end
timeseries = [Float64[] for i in 1:length(ts)]
if SciMLBase.has_initializeprob(prob.f) && SciMLBase.has_initializeprobpmap(prob.f)
initializeprob = prob.f.initializeprob
nlsol = solve(initializeprob)
@set! prob.p = prob.f.initializeprobpmap(prob, nlsol)
end
build_solution(prob, nothing, ts, timeseries, retcode = ReturnCode.Success)
end
function build_null_solution(
prob::Union{SteadyStateProblem, NonlinearProblem},
args...;
saveat = (),
save_everystep = true,
save_on = true,
save_start = save_everystep || isempty(saveat) ||
saveat isa Number || prob.tspan[1] in saveat,
save_end = true,
kwargs...)
SciMLBase.build_solution(prob, nothing, Float64[], nothing;
retcode = ReturnCode.Success)
end
function build_null_solution(
prob::NonlinearLeastSquaresProblem,
args...; abstol = 1e-6, kwargs...)
if isinplace(prob)
resid = isnothing(prob.f.resid_prototype) ? Float64[] : copy(prob.f.resid_prototype)
prob.f(resid, prob.u0, prob.p)
else
resid = prob.f(prob.u0, prob.p)
end
retcode = norm(resid) < abstol ? ReturnCode.Success : ReturnCode.Failure
SciMLBase.build_solution(prob, nothing, Float64[], resid;
retcode)
end
"""
```julia
solve(prob::AbstractDEProblem, alg::Union{AbstractDEAlgorithm,Nothing}; kwargs...)
```
## Arguments
The only positional argument is `alg` which is optional. By default, `alg = nothing`.
If `alg = nothing`, then `solve` dispatches to the DifferentialEquations.jl automated
algorithm selection (if `using DifferentialEquations` was done, otherwise it will
error with a `MethodError`).
## Keyword Arguments
The DifferentialEquations.jl universe has a large set of common arguments available
for the `solve` function. These arguments apply to `solve` on any problem type and
are only limited by limitations of the specific implementations.
Many of the defaults depend on the algorithm or the package the algorithm derives
from. Not all of the interface is provided by every algorithm.
For more detailed information on the defaults and the available options
for specific algorithms / packages, see the manual pages for the solvers of specific
problems. To see whether a specific package is compatible with the use of a
given option, see the [Solver Compatibility Chart](https://docs.sciml.ai/DiffEqDocs/stable/basics/compatibility_chart/#Solver-Compatibility-Chart)
### Default Algorithm Hinting
To help choose the default algorithm, the keyword argument `alg_hints` is
provided to `solve`. `alg_hints` is a `Vector{Symbol}` which describe the
problem at a high level to the solver. The options are:
* `:auto` vs `:nonstiff` vs `:stiff` - Denotes the equation as nonstiff/stiff.
`:auto` allow the default handling algorithm to choose stiffness detection
algorithms. The default handling defaults to using `:auto`.
Currently unused options include:
* `:interpolant` - Denotes that a high-precision interpolation is important.
* `:memorybound` - Denotes that the solver will be memory bound.
This functionality is derived via the benchmarks in
[SciMLBenchmarks.jl](https://github.com/SciML/SciMLBenchmarks.jl)
#### SDE Specific Alghints
* `:additive` - Denotes that the underlying SDE has additive noise.
* `:stratonovich` - Denotes that the solution should adhere to the Stratonovich
interpretation.
### Output Control
These arguments control the output behavior of the solvers. It defaults to maximum
output to give the best interactive user experience, but can be reduced all the
way to only saving the solution at the final timepoint.
The following options are all related to output control. See the "Examples"
section at the end of this page for some example usage.
* `dense`: Denotes whether to save the extra pieces required for dense (continuous)
output. Default is `save_everystep && isempty(saveat)` for algorithms which have
the ability to produce dense output, i.e. by default it's `true` unless the user
has turned off saving on steps or has chosen a `saveat` value. If `dense=false`,
the solution still acts like a function, and `sol(t)` is a linear interpolation
between the saved time points.
* `saveat`: Denotes specific times to save the solution at, during the solving
phase. The solver will save at each of the timepoints in this array in the
most efficient manner available to the solver. If only `saveat` is given, then
the arguments `save_everystep` and `dense` are `false` by default.
If `saveat` is given a number, then it will automatically expand to
`tspan[1]:saveat:tspan[2]`. For methods where interpolation is not possible,
`saveat` may be equivalent to `tstops`. The default value is `[]`.
* `save_idxs`: Denotes the indices for the components of the equation to save.
Defaults to saving all indices. For example, if you are solving a 3-dimensional ODE,
and given `save_idxs = [1, 3]`, only the first and third components of the
solution will be outputted.
Notice that of course in this case the outputted solution will be two-dimensional.
* `tstops`: Denotes *extra* times that the timestepping algorithm must step to.
This should be used to help the solver deal with discontinuities and
singularities, since stepping exactly at the time of the discontinuity will
improve accuracy. If a method cannot change timesteps (fixed timestep
multistep methods), then `tstops` will use an interpolation,
matching the behavior of `saveat`. If a method cannot change timesteps and
also cannot interpolate, then `tstops` must be a multiple of `dt` or else an
error will be thrown. Default is `[]`.
* `d_discontinuities:` Denotes locations of discontinuities in low order derivatives.
This will force FSAL algorithms which assume derivative continuity to re-evaluate
the derivatives at the point of discontinuity. The default is `[]`.
* `save_everystep`: Saves the result at every step.
Default is true if `isempty(saveat)`.
* `save_on`: Denotes whether intermediate solutions are saved. This overrides the
settings of `dense`, `saveat` and `save_everystep` and is used by some applications
to manually turn off saving temporarily. Everyday use of the solvers should leave
this unchanged. Defaults to `true`.
* `save_start`: Denotes whether the initial condition should be included in
the solution type as the first timepoint. Defaults to `true`.
* `save_end`: Denotes whether the final timepoint is forced to be saved,
regardless of the other saving settings. Defaults to `true`.
* `initialize_save`: Denotes whether to save after the callback initialization
phase (when `u_modified=true`). Defaults to `true`.
Note that `dense` requires `save_everystep=true` and `saveat=false`. If you need
additional saving while keeping dense output, see
[the SavingCallback in the Callback Library](https://docs.sciml.ai/DiffEqCallbacks/stable/output_saving/#DiffEqCallbacks.SavingCallback).
### Stepsize Control
These arguments control the timestepping routines.
#### Basic Stepsize Control
These are the standard options for controlling stepping behavior. Error estimates
do the comparison
```math
err_{scaled} = err/(abstol + max(uprev,u)*reltol)
```
The scaled error is guaranteed to be `<1` for a given local error estimate
(note: error estimates are local unless the method specifies otherwise). `abstol`
controls the non-scaling error and thus can be thought of as the error around zero.
`reltol` scales with the size of the dependent variables and so one can interpret
`reltol=1e-3` as roughly being (locally) correct to 3 digits. Note tolerances can
be specified element-wise by passing a vector whose size matches `u0`.
* `adaptive`: Turns on adaptive timestepping for appropriate methods. Default
is true.
* `abstol`: Absolute tolerance in adaptive timestepping. This is the tolerance
on local error estimates, not necessarily the global error (though these quantities
are related). Defaults to `1e-6` on deterministic equations (ODEs/DDEs/DAEs) and `1e-2`
on stochastic equations (SDEs/RODEs).
* `reltol`: Relative tolerance in adaptive timestepping. This is the tolerance
on local error estimates, not necessarily the global error (though these quantities
are related). Defaults to `1e-3` on deterministic equations (ODEs/DDEs/DAEs) and `1e-2`
on stochastic equations (SDEs/RODEs).
* `dt`: Sets the initial stepsize. This is also the stepsize for fixed
timestep methods. Defaults to an automatic choice if the method is adaptive.
* `dtmax`: Maximum dt for adaptive timestepping. Defaults are
package-dependent.
* `dtmin`: Minimum dt for adaptive timestepping. Defaults are
package-dependent.
* `force_dtmin`: Declares whether to continue, forcing the minimum `dt` usage.
Default is `false`, which has the solver throw a warning and exit early when
encountering the minimum `dt`. Setting this true allows the solver to continue,
never letting `dt` go below `dtmin` (and ignoring error tolerances in those
cases). Note that `true` is not compatible with most interop packages.
#### Fixed Stepsize Usage
Note that if a method does not have adaptivity, the following rules apply:
* If `dt` is set, then the algorithm will step with size `dt` each iteration.
* If `tstops` and `dt` are both set, then the algorithm will step with either a
size `dt`, or use a smaller step to hit the `tstops` point.
* If `tstops` is set without `dt`, then the algorithm will step directly to
each value in `tstops`
* If neither `dt` nor `tstops` are set, the solver will throw an error.
#### [Advanced Adaptive Stepsize Control](https://docs.sciml.ai/DiffEqDocs/stable/extras/timestepping/)
These arguments control more advanced parts of the internals of adaptive timestepping
and are mostly used to make it more efficient on specific problems. For detained
explanations of the timestepping algorithms, see the
[timestepping descriptions](https://docs.sciml.ai/DiffEqDocs/stable/extras/timestepping/#timestepping)
* `internalnorm`: The norm function `internalnorm(u,t)` which error estimates
are calculated. Required are two dispatches: one dispatch for the state variable
and the other on the elements of the state variable (scalar norm).
Defaults are package-dependent.
* `controller`: Possible examples are [`IController`](https://docs.sciml.ai/DiffEqDocs/stable/extras/timestepping/#OrdinaryDiffEq.IController),
[`PIController`](https://docs.sciml.ai/DiffEqDocs/stable/extras/timestepping/#OrdinaryDiffEq.PIController),
[`PIDController`](https://docs.sciml.ai/DiffEqDocs/stable/extras/timestepping/#OrdinaryDiffEq.PIDController),
[`PredictiveController`](https://docs.sciml.ai/DiffEqDocs/stable/extras/timestepping/#OrdinaryDiffEq.PredictiveController).
Default is algorithm-dependent.
* `gamma`: The risk-factor γ in the q equation for adaptive timestepping
of the controllers using it.
Default is algorithm-dependent.
* `beta1`: The Lund stabilization α parameter.
Default is algorithm-dependent.
* `beta2`: The Lund stabilization β parameter.
Default is algorithm-dependent.
* `qmax`: Defines the maximum value possible for the adaptive q.
Default is algorithm-dependent.
* `qmin`: Defines the minimum value possible for the adaptive q.
Default is algorithm-dependent.
* `qsteady_min`: Defines the minimum for the range around 1 where the timestep
is held constant. Default is algorithm-dependent.
* `qsteady_max`: Defines the maximum for the range around 1 where the timestep
is held constant. Default is algorithm-dependent.
* `qoldinit`: The initial `qold` in stabilization stepping.
Default is algorithm-dependent.
* `failfactor`: The amount to decrease the timestep by if the Newton iterations
of an implicit method fail. Default is 2.
### Memory Optimizations
* `calck`: Turns on and off the internal ability for intermediate
interpolations (also known as intermediate density). Not the same as `dense`, which is post-solution interpolation.
This defaults to `dense || !isempty(saveat) || "no custom callback is given"`.
This can be used to turn off interpolations
(to save memory) if one isn't using interpolations when a custom callback is
used. Another case where this may be used is to turn on interpolations for
usage in the integrator interface even when interpolations are used nowhere else.
Note that this is only required if the algorithm doesn't have
a free or lazy interpolation (`DP8()`). If `calck = false`, `saveat` cannot be used.
The rare keyword `calck` can be useful in event handling.
* `alias_u0`: allows the solver to alias the initial condition array that is contained
in the problem struct. Defaults to false.
### Miscellaneous
* `maxiters`: Maximum number of iterations before stopping. Defaults to 1e5.
* `callback`: Specifies a callback. Defaults to a callback function which
performs the saving routine. For more information, see the
[Event Handling and Callback Functions manual page](https://docs.sciml.ai/DiffEqCallbacks/stable/).
* `isoutofdomain`: Specifies a function `isoutofdomain(u,p,t)` where, when it
returns true, it will reject the timestep. Disabled by default.
* `unstable_check`: Specifies a function `unstable_check(dt,u,p,t)` where, when
it returns true, it will cause the solver to exit and throw a warning. Defaults
to `any(isnan,u)`, i.e. checking if any value is a NaN.
* `verbose`: Toggles whether warnings are thrown when the solver exits early.
Defaults to true.
* `merge_callbacks`: Toggles whether to merge `prob.callback` with the `solve` keyword
argument `callback`. Defaults to `true`.
* `wrap`: Toggles whether to wrap the solution if `prob.problem_type` has a preferred
alternate wrapper type for the solution. Useful when speed, but not shape of solution
is important. Defaults to `Val(true)`. `Val(false)` will cancel wrapping the solution.
* `u0`: The initial condition, overrides the one defined in the problem struct.
Defaults to `nothing` (no override, use the `u0` defined in `prob`).
* `p`: The parameters, overrides the one defined in the problem struct.
Defaults to `nothing` (no override, use the `p` defined in `prob`).
### Progress Monitoring
These arguments control the usage of the progressbar in ProgressLogging.jl compatible environments.
* `progress`: Turns on/off the Juno progressbar. Default is false.
* `progress_steps`: Numbers of steps between updates of the progress bar.
Default is 1000.
* `progress_name`: Controls the name of the progressbar. Default is the name
of the problem type.
* `progress_message`: Controls the message with the progressbar. Defaults to
showing `dt`, `t`, the maximum of `u`.
* `progress_id`: Controls the ID of the progress log message to distinguish simultaneous simulations.
### Error Calculations
If you are using the test problems (ex: `ODETestProblem`), then the following
options control the errors which are calculated:
* `timeseries_errors`: Turns on and off the calculation of errors at the steps
which were taken, such as the `l2` error. Default is true.
* `dense_errors`: Turns on and off the calculation of errors at the steps which
require dense output and calculate the error at 100 evenly-spaced points
throughout `tspan`. An example is the `L2` error. Default is false.
### Sensitivity Algorithms (`sensealg`)
`sensealg` is used for choosing the way the automatic differentiation is performed.
For more information, see the documentation for SciMLSensitivity:
https://docs.sciml.ai/SciMLSensitivity/stable/
## Examples
The following lines are examples of how one could use the configuration of
`solve()`. For these examples a 3-dimensional ODE problem is assumed, however
the extension to other types is straightforward.
1. `solve(prob, AlgorithmName())` : The "default" setting, with a user-specified
algorithm (given by `AlgorithmName()`). All parameters get their default values.
This means that the solution is saved at the steps the Algorithm stops internally
and dense output is enabled if the chosen algorithm allows for it.
All other integration parameters (e.g. stepsize) are chosen automatically.
2. `solve(prob, saveat = 0.01, abstol = 1e-9, reltol = 1e-9)` : Standard setting
for accurate output at specified (and equidistant) time intervals, used for
e.g. Fourier Transform. The solution is given every 0.01 time units,
starting from `tspan[1]`. The solver used is `Tsit5()` since no keyword
`alg_hits` is given.
3. `solve(prob, maxiters = 1e7, progress = true, save_idxs = [1])` : Using longer
maximum number of solver iterations can be useful when a given `tspan` is very
long. This example only saves the first of the variables of the system, either
to save size or because the user does not care about the others. Finally, with
`progress = true` you are enabling the progress bar.
"""
function solve(prob::AbstractDEProblem, args...; sensealg = nothing,
u0 = nothing, p = nothing, wrap = Val(true), kwargs...)
if sensealg === nothing && haskey(prob.kwargs, :sensealg)
sensealg = prob.kwargs[:sensealg]
end
u0 = u0 !== nothing ? u0 : prob.u0
p = p !== nothing ? p : prob.p
if wrap isa Val{true}
wrap_sol(solve_up(prob, sensealg, u0, p, args...; kwargs...))
else
solve_up(prob, sensealg, u0, p, args...; kwargs...)
end
end
"""
```julia
solve(prob::NonlinearProblem, alg::Union{AbstractNonlinearAlgorithm,Nothing}; kwargs...)
```
## Arguments
The only positional argument is `alg` which is optional. By default, `alg = nothing`.
If `alg = nothing`, then `solve` dispatches to the NonlinearSolve.jl automated
algorithm selection (if `using NonlinearSolve` was done, otherwise it will
error with a `MethodError`).
## Keyword Arguments
The NonlinearSolve.jl universe has a large set of common arguments available
for the `solve` function. These arguments apply to `solve` on any problem type and
are only limited by limitations of the specific implementations.
Many of the defaults depend on the algorithm or the package the algorithm derives
from. Not all of the interface is provided by every algorithm.
For more detailed information on the defaults and the available options
for specific algorithms / packages, see the manual pages for the solvers of specific
problems.
#### Error Control
* `abstol`: Absolute tolerance.
* `reltol`: Relative tolerance.
### Miscellaneous
* `maxiters`: Maximum number of iterations before stopping. Defaults to 1e5.
* `verbose`: Toggles whether warnings are thrown when the solver exits early.
Defaults to true.
### Sensitivity Algorithms (`sensealg`)
`sensealg` is used for choosing the way the automatic differentiation is performed.
For more information, see the documentation for SciMLSensitivity:
https://docs.sciml.ai/SciMLSensitivity/stable/
"""
function solve(prob::NonlinearProblem, args...; sensealg = nothing,
u0 = nothing, p = nothing, wrap = Val(true), kwargs...)
if sensealg === nothing && haskey(prob.kwargs, :sensealg)
sensealg = prob.kwargs[:sensealg]
end
u0 = u0 !== nothing ? u0 : prob.u0
p = p !== nothing ? p : prob.p
if wrap isa Val{true}
wrap_sol(solve_up(prob, sensealg, u0, p, args...; kwargs...))
else
solve_up(prob, sensealg, u0, p, args...; kwargs...)
end
end
function solve_up(prob::Union{AbstractDEProblem, NonlinearProblem}, sensealg, u0, p,
args...; kwargs...)
alg = extract_alg(args, kwargs, has_kwargs(prob) ? prob.kwargs : kwargs)
if isnothing(alg) || !(alg isa AbstractDEAlgorithm) # Default algorithm handling
_prob = get_concrete_problem(prob, !(prob isa DiscreteProblem); u0 = u0,
p = p, kwargs...)
solve_call(_prob, args...; kwargs...)
else
_prob = get_concrete_problem(prob, isadaptive(alg); u0 = u0, p = p, kwargs...)
_alg = prepare_alg(alg, _prob.u0, _prob.p, _prob)
check_prob_alg_pairing(_prob, alg) # use alg for improved inference
if length(args) > 1
solve_call(_prob, _alg, Base.tail(args)...; kwargs...)
else
solve_call(_prob, _alg; kwargs...)
end
end
end
function solve_call(prob::SteadyStateProblem,
alg::SciMLBase.AbstractNonlinearAlgorithm, args...;
kwargs...)
solve_call(NonlinearProblem(prob),
alg, args...;
kwargs...)
end
function solve(prob::EnsembleProblem, args...; kwargs...)
alg = extract_alg(args, kwargs, kwargs)
if length(args) > 1
__solve(prob, alg, Base.tail(args)...; kwargs...)
else
__solve(prob, alg; kwargs...)
end
end
function solve(prob::SciMLBase.WeightedEnsembleProblem, args...; kwargs...)
SciMLBase.WeightedEnsembleSolution(solve(prob.ensembleprob), prob.weights)
end
function solve(prob::AbstractNoiseProblem, args...; kwargs...)
__solve(prob, args...; kwargs...)
end
function solve(prob::AbstractJumpProblem, args...; kwargs...)
__solve(prob, args...; kwargs...)
end
function checkkwargs(kwargshandle; kwargs...)
if any(x -> x ∉ allowedkeywords, keys(kwargs))
if kwargshandle == KeywordArgError
throw(CommonKwargError(kwargs))
elseif kwargshandle == KeywordArgWarn
@warn KWARGWARN_MESSAGE
unrecognized = setdiff(keys(kwargs), allowedkeywords)
print("Unrecognized keyword arguments: ")
printstyled(unrecognized; bold = true, color = :red)
print("\n\n")
else
@assert kwargshandle == KeywordArgSilent
end
end
end
function get_concrete_problem(prob::AbstractJumpProblem, isadapt; kwargs...)
prob
end
function get_concrete_problem(prob::SteadyStateProblem, isadapt; kwargs...)
p = get_concrete_p(prob, kwargs)
u0 = get_concrete_u0(prob, isadapt, Inf, kwargs)
u0 = promote_u0(u0, p, nothing)
remake(prob; u0 = u0, p = p)
end
function get_concrete_problem(prob::NonlinearProblem, isadapt; kwargs...)
p = get_concrete_p(prob, kwargs)
u0 = get_concrete_u0(prob, isadapt, nothing, kwargs)
u0 = promote_u0(u0, p, nothing)
remake(prob; u0 = u0, p = p)
end
function get_concrete_problem(prob::NonlinearLeastSquaresProblem, isadapt; kwargs...)
p = get_concrete_p(prob, kwargs)
u0 = get_concrete_u0(prob, isadapt, nothing, kwargs)
u0 = promote_u0(u0, p, nothing)
remake(prob; u0 = u0, p = p)
end
function get_concrete_problem(prob::AbstractEnsembleProblem, isadapt; kwargs...)
prob
end
function solve(prob::PDEProblem, alg::AbstractDEAlgorithm, args...;
kwargs...)
solve(prob.prob, alg, args...; kwargs...)
end
function init(prob::PDEProblem, alg::AbstractDEAlgorithm, args...;
kwargs...)
init(prob.prob, alg, args...; kwargs...)
end
function get_concrete_problem(prob, isadapt; kwargs...)
p = get_concrete_p(prob, kwargs)
tspan = get_concrete_tspan(prob, isadapt, kwargs, p)
u0 = get_concrete_u0(prob, isadapt, tspan[1], kwargs)
u0_promote = promote_u0(u0, p, tspan[1])
tspan_promote = promote_tspan(u0_promote, p, tspan, prob, kwargs)
f_promote = promote_f(prob.f, Val(SciMLBase.specialization(prob.f)), u0_promote, p,
tspan_promote[1])
if isconcreteu0(prob, tspan[1], kwargs) && prob.u0 === u0 &&
typeof(u0_promote) === typeof(prob.u0) &&
prob.tspan == tspan && typeof(prob.tspan) === typeof(tspan_promote) &&
p === prob.p && f_promote === prob.f
return prob
else
return remake(prob; f = f_promote, u0 = u0_promote, p = p, tspan = tspan_promote)
end
end
function get_concrete_problem(prob::DAEProblem, isadapt; kwargs...)
p = get_concrete_p(prob, kwargs)
tspan = get_concrete_tspan(prob, isadapt, kwargs, p)
u0 = get_concrete_u0(prob, isadapt, tspan[1], kwargs)
du0 = get_concrete_du0(prob, isadapt, tspan[1], kwargs)
u0_promote = promote_u0(u0, p, tspan[1])
du0_promote = promote_u0(du0, p, tspan[1])
tspan_promote = promote_tspan(u0_promote, p, tspan, prob, kwargs)
f_promote = promote_f(prob.f, Val(SciMLBase.specialization(prob.f)), u0_promote, p,
tspan_promote[1])
if isconcreteu0(prob, tspan[1], kwargs) && typeof(u0_promote) === typeof(prob.u0) &&
isconcretedu0(prob, tspan[1], kwargs) && typeof(du0_promote) === typeof(prob.du0) &&
prob.tspan == tspan && typeof(prob.tspan) === typeof(tspan_promote) &&
p === prob.p && f_promote === prob.f
return prob
else
return remake(prob; f = f_promote, du0 = du0_promote, u0 = u0_promote, p = p,
tspan = tspan_promote)
end
end
function get_concrete_problem(prob::DDEProblem, isadapt; kwargs...)
p = get_concrete_p(prob, kwargs)
tspan = get_concrete_tspan(prob, isadapt, kwargs, p)
u0 = get_concrete_u0(prob, isadapt, tspan[1], kwargs)
if prob.constant_lags isa Function
constant_lags = prob.constant_lags(p)
else
constant_lags = prob.constant_lags
end
u0 = promote_u0(u0, p, tspan[1])
tspan = promote_tspan(u0, p, tspan, prob, kwargs)
remake(prob; u0 = u0, tspan = tspan, p = p, constant_lags = constant_lags)
end
# Most are extensions
promote_tspan(u0, p, tspan, prob, kwargs) = _promote_tspan(tspan, kwargs)
function _promote_tspan(tspan, kwargs)
if (dt = get(kwargs, :dt, nothing)) !== nothing
tspan1, tspan2, _ = promote(tspan..., dt)
return (tspan1, tspan2)
else
return tspan
end
end
function promote_f(f::F, ::Val{specialize}, u0, p, t) where {F, specialize}
# Ensure our jacobian will be of the same type as u0
uElType = u0 === nothing ? Float64 : eltype(u0)
if isdefined(f, :jac_prototype) && f.jac_prototype isa AbstractArray
f = @set f.jac_prototype = similar(f.jac_prototype, uElType)
end
@static if VERSION >= v"1.8-"
f = if f isa ODEFunction && isinplace(f) && !(f.f isa AbstractSciMLOperator) &&
# Some reinitialization code still uses NLSolvers stuff which doesn't
# properly tag, so opt-out if potentially a mass matrix DAE
f.mass_matrix isa UniformScaling &&
# Jacobians don't wrap, so just ignore those cases
f.jac === nothing &&
((specialize === SciMLBase.AutoSpecialize && eltype(u0) !== Any &&
RecursiveArrayTools.recursive_unitless_eltype(u0) === eltype(u0) &&
one(t) === oneunit(t) &&
Tricks.static_hasmethod(ArrayInterface.promote_eltype,
Tuple{Type{typeof(u0)}, Type{dualgen(eltype(u0))}}) &&
Tricks.static_hasmethod(promote_rule,
Tuple{Type{eltype(u0)}, Type{dualgen(eltype(u0))}}) &&
Tricks.static_hasmethod(promote_rule,
Tuple{Type{eltype(u0)}, Type{typeof(t)}})) ||
(specialize === SciMLBase.FunctionWrapperSpecialize &&
!(f.f isa FunctionWrappersWrappers.FunctionWrappersWrapper)))
return unwrapped_f(f, wrapfun_iip(f.f, (u0, u0, p, t)))
else
return f
end
else
return f
end
end
function promote_f(f::SplitFunction, ::Val{specialize}, u0, p, t) where {specialize}
typeof(f.cache) === typeof(u0) && isinplace(f) ? f : remake(f, cache = zero(u0))
end
prepare_alg(alg, u0, p, f) = alg
function get_concrete_tspan(prob, isadapt, kwargs, p)
if prob.tspan isa Function
tspan = prob.tspan(p)
elseif haskey(kwargs, :tspan)
tspan = kwargs[:tspan]
elseif prob.tspan === (nothing, nothing)
throw(NoTspanError())
else
tspan = prob.tspan
end
isadapt && eltype(tspan) <: Integer && (tspan = float.(tspan))
any(isnan, tspan) && throw(NaNTspanError())
tspan
end
function isconcreteu0(prob, t0, kwargs)
!eval_u0(prob.u0) && prob.u0 !== nothing && !isdistribution(prob.u0)
end
function isconcretedu0(prob, t0, kwargs)
!eval_u0(prob.u0) && prob.du0 !== nothing && !isdistribution(prob.du0)
end
function get_concrete_u0(prob, isadapt, t0, kwargs)
if eval_u0(prob.u0)
u0 = prob.u0(prob.p, t0)
elseif haskey(kwargs, :u0)
u0 = kwargs[:u0]
else
u0 = prob.u0
end
isadapt && eltype(u0) <: Integer && (u0 = float.(u0))
_u0 = handle_distribution_u0(u0)
if isinplace(prob) && (_u0 isa Number || _u0 isa SArray)
throw(IncompatibleInitialConditionError())
end
nu0 = length(something(_u0, ()))
if isdefined(prob.f, :mass_matrix) && prob.f.mass_matrix !== nothing &&
prob.f.mass_matrix isa AbstractArray &&
size(prob.f.mass_matrix, 1) !== nu0
throw(IncompatibleMassMatrixError(size(prob.f.mass_matrix, 1), nu0))
end
if _u0 isa Tuple
throw(TupleStateError())
end
_u0
end
function get_concrete_u0(prob::BVProblem, isadapt, t0, kwargs)
if haskey(kwargs, :u0)
u0 = kwargs[:u0]
else
u0 = prob.u0
end
isadapt && eltype(u0) <: Integer && (u0 = float.(u0))
_u0 = handle_distribution_u0(u0)
if isinplace(prob) && (_u0 isa Number || _u0 isa SArray)
throw(IncompatibleInitialConditionError())
end
if _u0 isa Tuple
throw(TupleStateError())
end
return _u0
end
function get_concrete_du0(prob, isadapt, t0, kwargs)
if eval_u0(prob.du0)
du0 = prob.du0(prob.p, t0)
elseif haskey(kwargs, :du0)
du0 = kwargs[:du0]
else
du0 = prob.du0
end
isadapt && eltype(du0) <: Integer && (du0 = float.(du0))
_du0 = handle_distribution_u0(du0)
if isinplace(prob) && (_du0 isa Number || _du0 isa SArray)
throw(IncompatibleInitialConditionError())
end
_du0
end
function get_concrete_p(prob, kwargs)
if haskey(kwargs, :p)
p = kwargs[:p]
else
p = prob.p
end
end
handle_distribution_u0(_u0) = _u0
eval_u0(u0::Function) = true
eval_u0(u0) = false
function __solve(
prob::AbstractDEProblem, args...; default_set = false, second_time = false,
kwargs...)
if second_time
throw(NoDefaultAlgorithmError())
elseif length(args) > 0 && !(first(args) isa
Union{Nothing, AbstractDEAlgorithm, AbstractNonlinearAlgorithm})
throw(NonSolverError())
else
__solve(prob, nothing, args...; default_set = false, second_time = true, kwargs...)
end
end
function __init(prob::AbstractDEProblem, args...; default_set = false, second_time = false,
kwargs...)
if second_time
throw(NoDefaultAlgorithmError())
elseif length(args) > 0 && !(first(args) isa
Union{Nothing, AbstractDEAlgorithm, AbstractNonlinearAlgorithm})
throw(NonSolverError())
else
__init(prob, nothing, args...; default_set = false, second_time = true, kwargs...)
end
end
function check_prob_alg_pairing(prob, alg)
if prob isa ODEProblem && !(alg isa AbstractODEAlgorithm) ||
prob isa SDEProblem && !(alg isa AbstractSDEAlgorithm) ||
prob isa SDDEProblem && !(alg isa AbstractSDEAlgorithm) ||
prob isa DDEProblem && !(alg isa AbstractDDEAlgorithm) ||
prob isa DAEProblem && !(alg isa AbstractDAEAlgorithm) ||
prob isa SteadyStateProblem && !(alg isa AbstractSteadyStateAlgorithm)
throw(ProblemSolverPairingError(prob, alg))
end
if isdefined(prob, :u0) && eltype(prob.u0) <: ForwardDiff.Dual &&
!SciMLBase.isautodifferentiable(alg)
throw(DirectAutodiffError())
end
if prob isa SDEProblem && prob.noise_rate_prototype !== nothing &&
prob.noise !== nothing &&
size(prob.noise_rate_prototype, 2) != length(prob.noise.W[1])
throw(NoiseSizeIncompatabilityError(size(prob.noise_rate_prototype, 2),
length(prob.noise.W[1])))
end
# Complex number support comes before arbitrary number support for a more direct
# error message.
if !SciMLBase.allowscomplex(alg)
if isdefined(prob, :u0) &&
RecursiveArrayTools.recursive_unitless_eltype(prob.u0) <: Complex
throw(ComplexSupportError(alg))
end
end
if isdefined(prob, :tspan) && eltype(prob.tspan) <: Complex
throw(ComplexTspanError())
end
# Check for concrete element type so that the non-concrete case throws a better error
if !SciMLBase.allows_arbitrary_number_types(alg)
if isdefined(prob, :u0)
uType = RecursiveArrayTools.recursive_unitless_eltype(prob.u0)
if Base.isconcretetype(uType) &&
!(uType <: Union{Float32, Float64, ComplexF32, ComplexF64})
throw(GenericNumberTypeError(alg,
isdefined(prob, :u0) ? typeof(prob.u0) :
nothing,
isdefined(prob, :tspan) ? typeof(prob.tspan) :
nothing))
end
end
if isdefined(prob, :tspan)
tType = eltype(prob.tspan)
if Base.isconcretetype(tType) &&
!(tType <: Union{Float32, Float64, ComplexF32, ComplexF64})
throw(GenericNumberTypeError(alg,
isdefined(prob, :u0) ? typeof(prob.u0) :
nothing,
isdefined(prob, :tspan) ? typeof(prob.tspan) :
nothing))
end
end
end
end
@inline function extract_alg(solve_args, solve_kwargs, prob_kwargs)
if isempty(solve_args) || isnothing(first(solve_args))
if haskey(solve_kwargs, :alg)
solve_kwargs[:alg]
elseif haskey(prob_kwargs, :alg)
prob_kwargs[:alg]
else
nothing
end
elseif first(solve_args) isa SciMLBase.AbstractSciMLAlgorithm &&
!(first(solve_args) isa SciMLBase.EnsembleAlgorithm)
first(solve_args)
else
nothing
end
end
################### Differentiation
"""
Ignores all adjoint definitions (i.e. `sensealg`) and proceeds to do standard
AD through the `solve` functions. Generally only used internally for implementing
discrete sensitivity algorithms.
"""
struct SensitivityADPassThrough <: AbstractDEAlgorithm end
###
### Legacy Dispatches to be Non-Breaking
###
@deprecate concrete_solve(prob::AbstractDEProblem,
alg::Union{AbstractDEAlgorithm, Nothing},
u0 = prob.u0, p = prob.p, args...; kwargs...) solve(prob, alg,
args...;
u0 = u0,
p = p,
kwargs...)
function _solve_adjoint(prob, sensealg, u0, p, originator, args...; merge_callbacks = true,
kwargs...)
alg = extract_alg(args, kwargs, prob.kwargs)
if isnothing(alg) || !(alg isa AbstractDEAlgorithm) # Default algorithm handling
_prob = get_concrete_problem(prob, !(prob isa DiscreteProblem); u0 = u0,
p = p, kwargs...)
else
_prob = get_concrete_problem(prob, isadaptive(alg); u0 = u0, p = p, kwargs...)
end
if has_kwargs(_prob)
if merge_callbacks && haskey(_prob.kwargs, :callback) && haskey(kwargs, :callback)
kwargs_temp = NamedTuple{
Base.diff_names(Base._nt_names(values(kwargs)),
(:callback,))}(values(kwargs))
callbacks = NamedTuple{(:callback,)}((DiffEqBase.CallbackSet(
_prob.kwargs[:callback],
values(kwargs).callback),))
kwargs = merge(kwargs_temp, callbacks)
end
kwargs = isempty(_prob.kwargs) ? kwargs : merge(values(_prob.kwargs), kwargs)
end
if length(args) > 1
_concrete_solve_adjoint(_prob, alg, sensealg, u0, p, originator,
Base.tail(args)...; kwargs...)
else
_concrete_solve_adjoint(_prob, alg, sensealg, u0, p, originator; kwargs...)
end
end
function _solve_forward(prob, sensealg, u0, p, originator, args...; merge_callbacks = true,
kwargs...)
alg = extract_alg(args, kwargs, prob.kwargs)
if isnothing(alg) || !(alg isa AbstractDEAlgorithm) # Default algorithm handling
_prob = get_concrete_problem(prob, !(prob isa DiscreteProblem); u0 = u0,
p = p, kwargs...)
else
_prob = get_concrete_problem(prob, isadaptive(alg); u0 = u0, p = p, kwargs...)
end
if has_kwargs(_prob)
if merge_callbacks && haskey(_prob.kwargs, :callback) && haskey(kwargs, :callback)
kwargs_temp = NamedTuple{
Base.diff_names(Base._nt_names(values(kwargs)),
(:callback,))}(values(kwargs))
callbacks = NamedTuple{(:callback,)}((DiffEqBase.CallbackSet(
_prob.kwargs[:callback],
values(kwargs).callback),))
kwargs = merge(kwargs_temp, callbacks)
end
kwargs = isempty(_prob.kwargs) ? kwargs : merge(values(_prob.kwargs), kwargs)
end
if length(args) > 1
_concrete_solve_forward(_prob, alg, sensealg, u0, p, originator,
Base.tail(args)...; kwargs...)
else
_concrete_solve_forward(_prob, alg, sensealg, u0, p, originator; kwargs...)
end
end
####
# Catch undefined AD overload cases
const ADJOINT_NOT_FOUND_MESSAGE = """
Compatibility with reverse-mode automatic differentiation requires SciMLSensitivity.jl.
Please install SciMLSensitivity.jl and do `using SciMLSensitivity`/`import SciMLSensitivity`
for this functionality. For more details, see https://sensitivity.sciml.ai/dev/.
"""
struct AdjointNotFoundError <: Exception end
function Base.showerror(io::IO, e::AdjointNotFoundError)
print(io, ADJOINT_NOT_FOUND_MESSAGE)
println(io, TruncatedStacktraces.VERBOSE_MSG)
end
function _concrete_solve_adjoint(args...; kwargs...)
throw(AdjointNotFoundError())
end
const FORWARD_SENSITIVITY_NOT_FOUND_MESSAGE = """
Compatibility with forward-mode automatic differentiation requires SciMLSensitivity.jl.
Please install SciMLSensitivity.jl and do `using SciMLSensitivity`/`import SciMLSensitivity`
for this functionality. For more details, see https://sensitivity.sciml.ai/dev/.
"""
struct ForwardSensitivityNotFoundError <: Exception end
function Base.showerror(io::IO, e::ForwardSensitivityNotFoundError)
print(io, FORWARD_SENSITIVITY_NOT_FOUND_MESSAGE)
println(io, TruncatedStacktraces.VERBOSE_MSG)
end
function _concrete_solve_forward(args...; kwargs...)
throw(ForwardSensitivityNotFoundError())
end
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 2778 | @static if isdefined(SciMLBase, :DEStats)
const Stats = SciMLBase.DEStats
else
"""
$(TYPEDEF)
Statistics from the differential equation solver about the solution process.
## Fields
- nf: Number of function evaluations. If the differential equation is a split function,
such as a `SplitFunction` for implicit-explicit (IMEX) integration, then `nf` is the
number of function evaluations for the first function (the implicit function)
- nf2: If the differential equation is a split function, such as a `SplitFunction`
for implicit-explicit (IMEX) integration, then `nf2` is the number of function
evaluations for the second function, i.e. the function treated explicitly. Otherwise
it is zero.
- nw: The number of W=I-gamma*J (or W=I/gamma-J) matrices constructed during the solving
process.
- nsolve: The number of linear solves `W\b` required for the integration.
- njacs: Number of Jacobians calculated during the integration.
- nnonliniter: Total number of iterations for the nonlinear solvers.
- nnonlinconvfail: Number of nonlinear solver convergence failures.
- ncondition: Number of calls to the condition function for callbacks.
- naccept: Number of accepted steps.
- nreject: Number of rejected steps.
- maxeig: Maximum eigenvalue over the solution. This is only computed if the
method is an auto-switching algorithm.
"""
mutable struct Stats
nf::Int
nf2::Int
nw::Int
nsolve::Int
njacs::Int
nnonliniter::Int
nnonlinconvfail::Int
ncondition::Int
naccept::Int
nreject::Int
maxeig::Float64
end
Base.@deprecate_binding DEStats Stats false
Stats(x::Int = -1) = Stats(x, x, x, x, x, x, x, x, x, x, 0.0)
function Base.show(io::IO, s::Stats)
println(io, summary(s))
@printf io "%-50s %-d\n" "Number of function 1 evaluations:" s.nf
@printf io "%-50s %-d\n" "Number of function 2 evaluations:" s.nf2
@printf io "%-50s %-d\n" "Number of W matrix evaluations:" s.nw
@printf io "%-50s %-d\n" "Number of linear solves:" s.nsolve
@printf io "%-50s %-d\n" "Number of Jacobians created:" s.njacs
@printf io "%-50s %-d\n" "Number of nonlinear solver iterations:" s.nnonliniter
@printf io "%-50s %-d\n" "Number of nonlinear solver convergence failures:" s.nnonlinconvfail
@printf io "%-50s %-d\n" "Number of rootfind condition calls:" s.ncondition
@printf io "%-50s %-d\n" "Number of accepted steps:" s.naccept
@printf io "%-50s %-d" "Number of rejected steps:" s.nreject
iszero(s.maxeig) || @printf io "\n%-50s %-d" "Maximum eigenvalue recorded:" s.maxeig
end
end
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 1394 | """
$(TYPEDEF)
Holds a tableau which defines an explicit Runge-Kutta method.
"""
mutable struct ExplicitRKTableau{MType <: AbstractMatrix, VType <: AbstractVector, S} <:
ODERKTableau
A::MType
c::VType
α::VType
αEEst::VType
d::VType # dense output coefficients
stages::Int
order::Int
adaptiveorder::Int #The lower order of the pair. Only used for adaptivity.
fsal::Bool
stability_size::S
end
function ExplicitRKTableau(A::MType, c::VType, α::VType, order;
adaptiveorder = 0, αEEst = similar(α, 0),
fsal = false, stability_size = 0.0,
d = similar(α, 0)) where {MType, VType}
S = typeof(stability_size)
ExplicitRKTableau{MType, VType, S}(A, c, α, αEEst, d, length(α), order, adaptiveorder,
fsal, stability_size)
end
"""
$(TYPEDEF)
Holds a tableau which defines an implicit Runge-Kutta method.
"""
mutable struct ImplicitRKTableau{MType <: AbstractMatrix, VType <: AbstractVector} <:
ODERKTableau
A::MType
c::VType
α::VType
αEEst::VType
stages::Int
order::Int
adaptiveorder::Int #The lower order of the pair. Only used for adaptivity.
end
function ImplicitRKTableau(A::MType, c::VType, α::VType, order;
adaptiveorder = 0, αEEst = VType()) where {MType, VType}
ImplicitRKTableau{MType, VType}(A, c, α, αEEst, length(α), order, adaptiveorder)
end
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 18223 | abstract type AbstractNonlinearTerminationMode end
abstract type AbstractSafeNonlinearTerminationMode <: AbstractNonlinearTerminationMode end
abstract type AbstractSafeBestNonlinearTerminationMode <:
AbstractSafeNonlinearTerminationMode end
"""
SteadyStateDiffEqTerminationMode <: AbstractNonlinearTerminationMode
Check if all values of the derivative is close to zero wrt both relative and absolute
tolerance.
!!! danger
This has been deprecated.
"""
struct SteadyStateDiffEqTerminationMode <: AbstractNonlinearTerminationMode
function SteadyStateDiffEqTerminationMode()
Base.depwarn("`SteadyStateDiffEqTerminationMode` is deprecated and isn't used \
in any upstream library. Remove uses of this.",
:SteadyStateDiffEqTerminationMode)
return new()
end
end
"""
SimpleNonlinearSolveTerminationMode <: AbstractNonlinearTerminationMode
Check if all values of the derivative is close to zero wrt both relative and absolute
tolerance. Or check that the value of the current and previous state is within the specified
tolerances.
!!! danger
This has been deprecated.
"""
struct SimpleNonlinearSolveTerminationMode <: AbstractNonlinearTerminationMode
function SimpleNonlinearSolveTerminationMode()
Base.depwarn("`SimpleNonlinearSolveTerminationMode` is deprecated and isn't used \
in any upstream library. Remove uses of this.",
:SimpleNonlinearSolveTerminationMode)
return new()
end
end
@inline set_termination_mode_internalnorm(mode, ::F) where {F} = mode
@inline __norm_type(::typeof(Base.Fix2(norm, Inf))) = :Inf
@inline __norm_type(::typeof(Base.Fix1(maximum, abs))) = :Inf
@inline __norm_type(::typeof(Base.Fix2(norm, 2))) = :L2
@inline __norm_type(::F) where {F} = F
const TERM_DOCS = Dict(
:Norm => doc"``\| \Delta u \| \leq reltol \times \| \Delta u + u \|`` or ``\| \Delta u \| \leq abstol``.",
:Rel => doc"``all \left(| \Delta u | \leq reltol \times | u | \right)``.",
:RelNorm => doc"``\| \Delta u \| \leq reltol \times \| \Delta u + u \|``.",
:Abs => doc"``all \left( | \Delta u | \leq abstol \right)``.",
:AbsNorm => doc"``\| \Delta u \| \leq abstol``."
)
const __TERM_INTERNALNORM_DOCS = """
where `internalnorm` is the norm to use for the termination condition. Special handling is
done for `norm(_, 2)`, `norm(_, Inf)`, and `maximum(abs, _)`.
Default is left as `nothing`, which allows upstream frameworks to choose the correct norm
based on the problem type. If directly using the `init` API, a proper norm must be
provided"""
for name in (:Rel, :Abs)
struct_name = Symbol(name, :TerminationMode)
doctring = TERM_DOCS[name]
@eval begin
"""
$($struct_name) <: AbstractNonlinearTerminationMode
Terminates if $($doctring).
``\\Delta u`` denotes the increment computed by the nonlinear solver and ``u`` denotes the solution.
"""
struct $(struct_name) <: AbstractNonlinearTerminationMode end
end
end
for name in (:Norm, :RelNorm, :AbsNorm)
struct_name = Symbol(name, :TerminationMode)
doctring = TERM_DOCS[name]
@eval begin
"""
$($struct_name) <: AbstractNonlinearTerminationMode
Terminates if $($doctring).
``\\Delta u`` denotes the increment computed by the inner nonlinear solver.
## Constructor
$($struct_name)(internalnorm = nothing)
$($__TERM_INTERNALNORM_DOCS).
"""
@concrete struct $(struct_name){F} <: AbstractNonlinearTerminationMode
internalnorm
$(struct_name)(f::F = nothing) where {F} = new{__norm_type(f), F}(f)
end
@inline function set_termination_mode_internalnorm(
::$(struct_name), internalnorm::F) where {F}
return $(struct_name)(internalnorm)
end
end
end
for norm_type in (:Rel, :Abs), safety in (:Safe, :SafeBest)
struct_name = Symbol(norm_type, safety, :TerminationMode)
supertype_name = Symbol(:Abstract, safety, :NonlinearTerminationMode)
doctring = safety == :Safe ?
"Essentially [`$(norm_type)NormTerminationMode`](@ref) + terminate if there \
has been no improvement for the last `patience_steps` + terminate if the \
solution blows up (diverges)." :
"Essentially [`$(norm_type)SafeTerminationMode`](@ref), but caches the best\
solution found so far."
@eval begin
"""
$($struct_name) <: $($supertype_name)
$($doctring)
## Constructor
$($struct_name)(internalnorm = nothing; protective_threshold = nothing,
patience_steps = 100, patience_objective_multiplier = 3,
min_max_factor = 1.3, max_stalled_steps = nothing)
$($__TERM_INTERNALNORM_DOCS).
"""
@concrete struct $(struct_name){F, T <: Union{Nothing, Int}} <: $(supertype_name)
internalnorm
protective_threshold
patience_steps::Int
patience_objective_multiplier
min_max_factor
max_stalled_steps::T
function $(struct_name)(f::F = nothing; protective_threshold = nothing,
patience_steps = 100, patience_objective_multiplier = 3,
min_max_factor = 1.3, max_stalled_steps = nothing) where {F}
return new{__norm_type(f), typeof(max_stalled_steps), F,
typeof(protective_threshold), typeof(patience_objective_multiplier),
typeof(min_max_factor)}(f, protective_threshold, patience_steps,
patience_objective_multiplier, min_max_factor, max_stalled_steps)
end
end
@inline function set_termination_mode_internalnorm(
mode::$(struct_name), internalnorm::F) where {F}
return $(struct_name)(internalnorm; mode.protective_threshold,
mode.patience_steps, mode.patience_objective_multiplier,
mode.min_max_factor, mode.max_stalled_steps)
end
end
end
@concrete mutable struct NonlinearTerminationModeCache{dep_retcode,
M <: AbstractNonlinearTerminationMode,
R <: Union{NonlinearSafeTerminationReturnCode.T, ReturnCode.T}}
u
retcode::R
abstol
reltol
best_objective_value
mode::M
initial_objective
objectives_trace
nsteps::Int
saved_values
u0_norm
step_norm_trace
max_stalled_steps
u_diff_cache
end
@inline get_termination_mode(cache::NonlinearTerminationModeCache) = cache.mode
@inline get_abstol(cache::NonlinearTerminationModeCache) = cache.abstol
@inline get_reltol(cache::NonlinearTerminationModeCache) = cache.reltol
@inline get_saved_values(cache::NonlinearTerminationModeCache) = cache.saved_values
function __update_u!!(cache::NonlinearTerminationModeCache, u)
cache.u === nothing && return
if cache.u isa AbstractArray && ArrayInterface.can_setindex(cache.u)
copyto!(cache.u, u)
else
cache.u = u
end
end
@inline __cvt_real(::Type{T}, ::Nothing) where {T} = nothing
@inline __cvt_real(::Type{T}, x) where {T} = real(T(x))
@inline _get_tolerance(η, ::Type{T}) where {T} = __cvt_real(T, η)
@inline function _get_tolerance(::Nothing, ::Type{T}) where {T}
η = real(oneunit(T)) * (eps(real(one(T))))^(4 // 5)
return _get_tolerance(η, T)
end
function SciMLBase.init(du::Union{AbstractArray{T}, T}, u::Union{AbstractArray{T}, T},
mode::AbstractNonlinearTerminationMode, saved_value_prototype...;
use_deprecated_retcodes::Val{D} = Val(true), # Remove in v8, warn in v7
abstol = nothing, reltol = nothing, kwargs...) where {D, T <: Number}
abstol = _get_tolerance(abstol, T)
reltol = _get_tolerance(reltol, T)
TT = typeof(abstol)
u_ = mode isa AbstractSafeBestNonlinearTerminationMode ?
(ArrayInterface.can_setindex(u) ? copy(u) : u) : nothing
if mode isa AbstractSafeNonlinearTerminationMode
if mode isa AbsSafeTerminationMode || mode isa AbsSafeBestTerminationMode
initial_objective = __apply_termination_internalnorm(mode.internalnorm, du)
u0_norm = nothing
else
initial_objective = __apply_termination_internalnorm(mode.internalnorm, du) /
(__add_and_norm(mode.internalnorm, du, u) + eps(TT))
u0_norm = mode.max_stalled_steps === nothing ? nothing : norm(u, 2)
end
objectives_trace = Vector{TT}(undef, mode.patience_steps)
step_norm_trace = mode.max_stalled_steps === nothing ? nothing :
Vector{TT}(undef, mode.max_stalled_steps)
best_value = initial_objective
max_stalled_steps = mode.max_stalled_steps
if ArrayInterface.can_setindex(u_) && !(u_ isa Number) &&
step_norm_trace !== nothing
u_diff_cache = similar(u_)
else
u_diff_cache = u_
end
else
initial_objective = nothing
objectives_trace = nothing
u0_norm = nothing
step_norm_trace = nothing
best_value = __cvt_real(T, Inf)
max_stalled_steps = nothing
u_diff_cache = u_
end
length(saved_value_prototype) == 0 && (saved_value_prototype = nothing)
retcode = ifelse(D, NonlinearSafeTerminationReturnCode.Default, ReturnCode.Default)
return NonlinearTerminationModeCache{D}(u_, retcode, abstol, reltol, best_value, mode,
initial_objective, objectives_trace, 0, saved_value_prototype, u0_norm,
step_norm_trace, max_stalled_steps, u_diff_cache)
end
function SciMLBase.reinit!(cache::NonlinearTerminationModeCache{dep_retcode}, du,
u, saved_value_prototype...; abstol = nothing, reltol = nothing,
kwargs...) where {dep_retcode}
T = eltype(cache.abstol)
length(saved_value_prototype) != 0 && (cache.saved_values = saved_value_prototype)
u_ = cache.mode isa AbstractSafeBestNonlinearTerminationMode ?
(ArrayInterface.can_setindex(u) ? copy(u) : u) : nothing
cache.u = u_
cache.retcode = ifelse(dep_retcode, NonlinearSafeTerminationReturnCode.Default,
ReturnCode.Default)
cache.abstol = _get_tolerance(abstol, T)
cache.reltol = _get_tolerance(reltol, T)
cache.nsteps = 0
mode = get_termination_mode(cache)
if mode isa AbstractSafeNonlinearTerminationMode
if mode isa AbsSafeTerminationMode || mode isa AbsSafeBestTerminationMode
initial_objective = __apply_termination_internalnorm(
cache.mode.internalnorm, du)
else
initial_objective = __apply_termination_internalnorm(
cache.mode.internalnorm, du) /
(__add_and_norm(cache.mode.internalnorm, du, u) + eps(TT))
cache.max_stalled_steps !== nothing && (cache.u0_norm = norm(u_, 2))
end
best_value = initial_objective
else
initial_objective = nothing
best_value = __cvt_real(T, Inf)
end
cache.best_objective_value = best_value
cache.initial_objective = initial_objective
return cache
end
# This dispatch is needed based on how Terminating Callback works!
# This intentially drops the `abstol` and `reltol` arguments
function (cache::NonlinearTerminationModeCache)(integrator::AbstractODEIntegrator,
abstol::Number, reltol::Number, min_t)
retval = cache(cache.mode, get_du(integrator), integrator.u, integrator.uprev)
(min_t === nothing || integrator.t ≥ min_t) && return retval
return false
end
function (cache::NonlinearTerminationModeCache)(du, u, uprev, args...)
return cache(cache.mode, du, u, uprev, args...)
end
function (cache::NonlinearTerminationModeCache)(mode::AbstractNonlinearTerminationMode, du,
u, uprev, args...)
return check_convergence(mode, du, u, uprev, cache.abstol, cache.reltol)
end
function (cache::NonlinearTerminationModeCache{dep_retcode})(
mode::AbstractSafeNonlinearTerminationMode,
du, u, uprev, args...) where {dep_retcode}
if mode isa AbsSafeTerminationMode || mode isa AbsSafeBestTerminationMode
objective = __apply_termination_internalnorm(mode.internalnorm, du)
criteria = cache.abstol
else
objective = __apply_termination_internalnorm(mode.internalnorm, du) /
(__add_and_norm(mode.internalnorm, du, u) + eps(cache.abstol))
criteria = cache.reltol
end
# Protective Break
if isinf(objective) || isnan(objective)
cache.retcode = ifelse(dep_retcode,
NonlinearSafeTerminationReturnCode.ProtectiveTermination, ReturnCode.Unstable)
return true
end
## By default we turn this off since it has the potential for false positives
if cache.mode.protective_threshold !== nothing &&
(objective > cache.initial_objective * cache.mode.protective_threshold * length(du))
cache.retcode = ifelse(dep_retcode,
NonlinearSafeTerminationReturnCode.ProtectiveTermination, ReturnCode.Unstable)
return true
end
# Check if best solution
if mode isa AbstractSafeBestNonlinearTerminationMode &&
objective < cache.best_objective_value
cache.best_objective_value = objective
__update_u!!(cache, u)
if cache.saved_values !== nothing && length(args) ≥ 1
cache.saved_values = args
end
end
# Main Termination Condition
if objective ≤ criteria
cache.retcode = ifelse(dep_retcode,
NonlinearSafeTerminationReturnCode.Success, ReturnCode.Success)
return true
end
# Terminate if there has been no improvement for the last `patience_steps`
cache.nsteps += 1
cache.nsteps == 1 && (cache.initial_objective = objective)
cache.objectives_trace[mod1(cache.nsteps, length(cache.objectives_trace))] = objective
if objective ≤ cache.mode.patience_objective_multiplier * criteria
if cache.nsteps ≥ cache.mode.patience_steps
if cache.nsteps < length(cache.objectives_trace)
min_obj, max_obj = extrema(@view(cache.objectives_trace[1:(cache.nsteps)]))
else
min_obj, max_obj = extrema(cache.objectives_trace)
end
if min_obj < cache.mode.min_max_factor * max_obj
cache.retcode = ifelse(dep_retcode,
NonlinearSafeTerminationReturnCode.PatienceTermination,
ReturnCode.Stalled)
return true
end
end
end
# Test for stalling if that is not disabled
if cache.step_norm_trace !== nothing
if ArrayInterface.can_setindex(cache.u_diff_cache) && !(u isa Number)
@. cache.u_diff_cache = u - uprev
else
cache.u_diff_cache = u .- uprev
end
du_norm = norm(cache.u_diff_cache, 2)
cache.step_norm_trace[mod1(cache.nsteps, length(cache.step_norm_trace))] = du_norm
if cache.nsteps ≥ cache.mode.max_stalled_steps
max_step_norm = maximum(cache.step_norm_trace)
if cache.mode isa AbsSafeTerminationMode ||
cache.mode isa AbsSafeBestTerminationMode
stalled_step = max_step_norm ≤ cache.abstol
else
stalled_step = max_step_norm ≤
cache.reltol * (max_step_norm + cache.u0_norm)
end
if stalled_step
cache.retcode = ifelse(dep_retcode,
NonlinearSafeTerminationReturnCode.PatienceTermination,
ReturnCode.Stalled)
return true
end
end
end
cache.retcode = ifelse(dep_retcode,
NonlinearSafeTerminationReturnCode.Failure, ReturnCode.Failure)
return false
end
# Check Convergence
function check_convergence(::SteadyStateDiffEqTerminationMode, duₙ, uₙ, uₙ₋₁, abstol,
reltol)
if __fast_scalar_indexing(duₙ, uₙ)
return all(@closure(xy->begin
x, y = xy
return (abs(x) ≤ abstol) | (abs(x) ≤ reltol * abs(y))
end),
zip(duₙ, uₙ))
else
return all(@. (abs(duₙ) ≤ abstol) | (abs(duₙ) ≤ reltol * abs(uₙ)))
end
end
function check_convergence(
::SimpleNonlinearSolveTerminationMode, duₙ, uₙ, uₙ₋₁, abstol, reltol)
if __fast_scalar_indexing(duₙ, uₙ)
return all(@closure(xy->begin
x, y = xy
return (abs(x) ≤ abstol) | (abs(x) ≤ reltol * abs(y))
end),
zip(duₙ, uₙ)) ||
__nonlinearsolve_is_approx(uₙ, uₙ₋₁; atol = abstol, rtol = reltol)
else
return all(@. (abs(duₙ) ≤ abstol) | (abs(duₙ) ≤ reltol * abs(uₙ))) ||
__nonlinearsolve_is_approx(uₙ, uₙ₋₁; atol = abstol, rtol = reltol)
end
end
function check_convergence(::RelTerminationMode, duₙ, uₙ, uₙ₋₁, abstol, reltol)
if __fast_scalar_indexing(duₙ, uₙ)
return all(@closure(xy->begin
x, y = xy
return abs(x) ≤ reltol * abs(y)
end), zip(duₙ, uₙ))
else
return all(@. abs(duₙ) ≤ reltol * abs(uₙ + duₙ))
end
end
function check_convergence(::AbsTerminationMode, duₙ, uₙ, uₙ₋₁, abstol, reltol)
return all(@closure(x->abs(x) ≤ abstol), duₙ)
end
function check_convergence(mode::NormTerminationMode, duₙ, uₙ, uₙ₋₁, abstol, reltol)
du_norm = __apply_termination_internalnorm(mode.internalnorm, duₙ)
return (du_norm ≤ abstol) ||
(du_norm ≤ reltol * __add_and_norm(mode.internalnorm, duₙ, uₙ))
end
function check_convergence(
mode::Union{
RelNormTerminationMode, RelSafeTerminationMode, RelSafeBestTerminationMode},
duₙ, uₙ, uₙ₋₁, abstol, reltol)
return __apply_termination_internalnorm(mode.internalnorm, duₙ) ≤
reltol * __add_and_norm(mode.internalnorm, duₙ, uₙ)
end
function check_convergence(
mode::Union{AbsNormTerminationMode, AbsSafeTerminationMode,
AbsSafeBestTerminationMode},
duₙ, uₙ, uₙ₋₁, abstol, reltol)
return __apply_termination_internalnorm(mode.internalnorm, duₙ) ≤ abstol
end
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 13347 | """
NonlinearSafeTerminationReturnCode
Return Codes for the safe nonlinear termination conditions.
These return codes have been deprecated. Termination Conditions will return
`SciMLBase.Retcode.T` starting from v7.
"""
@enumx NonlinearSafeTerminationReturnCode begin
"""
NonlinearSafeTerminationReturnCode.Success
Termination Condition was satisfied!
"""
Success
"""
NonlinearSafeTerminationReturnCode.Default
Default Return Code. Used for type stability and conveys no additional information!
"""
Default
"""
NonlinearSafeTerminationReturnCode.PatienceTermination
Terminate if there has been no improvement for the last `patience_steps`.
"""
PatienceTermination
"""
NonlinearSafeTerminationReturnCode.ProtectiveTermination
Terminate if the objective value increased by this factor wrt initial objective or the
value diverged.
"""
ProtectiveTermination
"""
NonlinearSafeTerminationReturnCode.Failure
Termination Condition was not satisfied!
"""
Failure
end
# NOTE: Deprecate the following API eventually. This API leads to quite a bit of type
# instability
@enumx NLSolveSafeTerminationReturnCode begin
Success
PatienceTermination
ProtectiveTermination
Failure
end
# SteadyStateDefault and NLSolveDefault are needed to be compatible with the existing
# termination conditions in NonlinearSolve and SteadyStateDiffEq
@enumx NLSolveTerminationMode begin
SteadyStateDefault
NLSolveDefault
Norm
Rel
RelNorm
Abs
AbsNorm
RelSafe
RelSafeBest
AbsSafe
AbsSafeBest
end
struct NLSolveSafeTerminationOptions{T1, T2, T3}
protective_threshold::T1
patience_steps::Int
patience_objective_multiplier::T2
min_max_factor::T3
end
TruncatedStacktraces.@truncate_stacktrace NLSolveSafeTerminationOptions
mutable struct NLSolveSafeTerminationResult{T, uType}
u::uType
best_objective_value::T
best_objective_value_iteration::Int
return_code::NLSolveSafeTerminationReturnCode.T
end
function NLSolveSafeTerminationResult(u = nothing; best_objective_value = Inf64,
best_objective_value_iteration = 0,
return_code = NLSolveSafeTerminationReturnCode.Failure)
u = u !== nothing ? copy(u) : u
Base.depwarn(
"NLSolveSafeTerminationResult has been deprecated in favor of the new dispatch based termination conditions. Please use the new termination conditions API!",
:NLSolveSafeTerminationResult)
return NLSolveSafeTerminationResult{typeof(best_objective_value), typeof(u)}(u,
best_objective_value, best_objective_value_iteration, return_code)
end
const BASIC_TERMINATION_MODES = (NLSolveTerminationMode.SteadyStateDefault,
NLSolveTerminationMode.NLSolveDefault,
NLSolveTerminationMode.Norm, NLSolveTerminationMode.Rel,
NLSolveTerminationMode.RelNorm,
NLSolveTerminationMode.Abs, NLSolveTerminationMode.AbsNorm)
const SAFE_TERMINATION_MODES = (NLSolveTerminationMode.RelSafe,
NLSolveTerminationMode.RelSafeBest,
NLSolveTerminationMode.AbsSafe,
NLSolveTerminationMode.AbsSafeBest)
const SAFE_BEST_TERMINATION_MODES = (NLSolveTerminationMode.RelSafeBest,
NLSolveTerminationMode.AbsSafeBest)
@doc doc"""
NLSolveTerminationCondition(mode; abstol::T = 1e-8, reltol::T = 1e-6,
protective_threshold = 1e3, patience_steps::Int = 30,
patience_objective_multiplier = 3, min_max_factor = 1.3)
Define the termination criteria for the NonlinearProblem or SteadyStateProblem.
## Termination Conditions
#### Termination on Absolute Tolerance
* `NLSolveTerminationMode.Abs`: Terminates if ``all \left( | \frac{\partial u}{\partial t} | \leq abstol \right)``
* `NLSolveTerminationMode.AbsNorm`: Terminates if ``\| \frac{\partial u}{\partial t} \| \leq abstol``
* `NLSolveTerminationMode.AbsSafe`: Essentially `abs_norm` + terminate if there has been no improvement for the last 30 steps + terminate if the solution blows up (diverges)
* `NLSolveTerminationMode.AbsSafeBest`: Same as `NLSolveTerminationMode.AbsSafe` but uses the best solution found so far, i.e. deviates only if the solution has not converged
#### Termination on Relative Tolerance
* `NLSolveTerminationMode.Rel`: Terminates if ``all \left(| \frac{\partial u}{\partial t} | \leq reltol \times | u | \right)``
* `NLSolveTerminationMode.RelNorm`: Terminates if ``\| \frac{\partial u}{\partial t} \| \leq reltol \times \| \frac{\partial u}{\partial t} + u \|``
* `NLSolveTerminationMode.RelSafe`: Essentially `rel_norm` + terminate if there has been no improvement for the last 30 steps + terminate if the solution blows up (diverges)
* `NLSolveTerminationMode.RelSafeBest`: Same as `NLSolveTerminationMode.RelSafe` but uses the best solution found so far, i.e. deviates only if the solution has not converged
#### Termination using both Absolute and Relative Tolerances
* `NLSolveTerminationMode.Norm`: Terminates if ``\| \frac{\partial u}{\partial t} \| \leq reltol \times \| \frac{\partial u}{\partial t} + u \|`` or ``\| \frac{\partial u}{\partial t} \| \leq abstol``
* `NLSolveTerminationMode.SteadyStateDefault`: Check if all values of the derivative is close to zero wrt both relative and absolute tolerance. This is usable for small problems but doesn't scale well for neural networks.
* `NLSolveTerminationMode.NLSolveDefault`: Check if all values of the derivative is close to zero wrt both relative and absolute tolerance. Or check that the value of the current and previous state is within the specified tolerances. This is usable for small problems but doesn't scale well for neural networks.
## General Arguments
* `abstol`: Absolute Tolerance
* `reltol`: Relative Tolerance
## Arguments specific to `*Safe*` modes
* `protective_threshold`: If the objective value increased by this factor wrt initial objective terminate immediately.
* `patience_steps`: If objective is within `patience_objective_multiplier` factor of the criteria and no improvement within `min_max_factor` has happened then terminate.
!!! warning
This has been deprecated and will be removed in the next major release. Please use the new dispatch based termination conditions API.
"""
struct NLSolveTerminationCondition{mode, T,
S <: Union{<:NLSolveSafeTerminationOptions, Nothing}}
abstol::T
reltol::T
safe_termination_options::S
end
TruncatedStacktraces.@truncate_stacktrace NLSolveTerminationCondition 1
function Base.show(io::IO, s::NLSolveTerminationCondition{mode}) where {mode}
print(io,
"NLSolveTerminationCondition(mode = $(mode), abstol = $(s.abstol), reltol = $(s.reltol)")
if mode ∈ SAFE_TERMINATION_MODES
print(io, ", safe_termination_options = ", s.safe_termination_options, ")")
else
print(io, ")")
end
end
get_termination_mode(::NLSolveTerminationCondition{mode}) where {mode} = mode
# Don't specify `mode` since the defaults would depend on the package
function NLSolveTerminationCondition(mode; abstol::T = 1e-8, reltol::T = 1e-6,
protective_threshold = 1e3, patience_steps::Int = 30,
patience_objective_multiplier = 3,
min_max_factor = 1.3) where {T}
Base.depwarn(
"NLSolveTerminationCondition has been deprecated in favor of the new dispatch based termination conditions. Please use the new termination conditions API!",
:NLSolveTerminationCondition)
@assert mode ∈ instances(NLSolveTerminationMode.T)
options = if mode ∈ SAFE_TERMINATION_MODES
NLSolveSafeTerminationOptions(protective_threshold, patience_steps,
patience_objective_multiplier, min_max_factor)
else
nothing
end
return NLSolveTerminationCondition{mode, T, typeof(options)}(abstol, reltol, options)
end
function (cond::NLSolveTerminationCondition)(storage::Union{
NLSolveSafeTerminationResult,
Nothing
})
mode = get_termination_mode(cond)
# We need both the dispatches to support solvers that don't use the integrator
# interface like SimpleNonlinearSolve
if mode in BASIC_TERMINATION_MODES
function _termination_condition_closure_basic(integrator, abstol, reltol, min_t)
return _termination_condition_closure_basic(get_du(integrator), integrator.u,
integrator.uprev, abstol, reltol)
end
function _termination_condition_closure_basic(du, u, uprev, abstol, reltol)
return _has_converged(du, u, uprev, cond, abstol, reltol)
end
return _termination_condition_closure_basic
else
mode ∈ SAFE_BEST_TERMINATION_MODES && @assert storage !== nothing
nstep::Int = 0
function _termination_condition_closure_safe(integrator, abstol, reltol, min_t)
return _termination_condition_closure_safe(get_du(integrator), integrator.u,
integrator.uprev, abstol, reltol)
end
@inbounds function _termination_condition_closure_safe(du, u, uprev, abstol, reltol)
aType = typeof(abstol)
protective_threshold = aType(cond.safe_termination_options.protective_threshold)
objective_values = aType[]
patience_objective_multiplier = cond.safe_termination_options.patience_objective_multiplier
if mode ∈ SAFE_BEST_TERMINATION_MODES
storage.best_objective_value = aType(Inf)
storage.best_objective_value_iteration = 0
end
if mode ∈ SAFE_BEST_TERMINATION_MODES
objective = NONLINEARSOLVE_DEFAULT_NORM(du)
criteria = abstol
else
objective = NONLINEARSOLVE_DEFAULT_NORM(du) /
(NONLINEARSOLVE_DEFAULT_NORM(du .+ u) + eps(aType))
criteria = reltol
end
if mode ∈ SAFE_BEST_TERMINATION_MODES
if objective < storage.best_objective_value
storage.best_objective_value = objective
storage.best_objective_value_iteration = nstep + 1
if storage.u !== nothing
storage.u .= u
end
end
end
# Main Termination Criteria
if objective ≤ criteria
storage.return_code = NLSolveSafeTerminationReturnCode.Success
return true
end
# Terminate if there has been no improvement for the last `patience_steps`
nstep += 1
push!(objective_values, objective)
if objective ≤ typeof(criteria)(patience_objective_multiplier) * criteria
if nstep ≥ cond.safe_termination_options.patience_steps
last_k_values = objective_values[max(1,
length(objective_values) -
cond.safe_termination_options.patience_steps):end]
if maximum(last_k_values) <
typeof(criteria)(cond.safe_termination_options.min_max_factor) *
minimum(last_k_values)
storage.return_code = NLSolveSafeTerminationReturnCode.PatienceTermination
return true
end
end
end
# Protective break
if objective ≥ objective_values[1] * protective_threshold * length(du)
storage.return_code = NLSolveSafeTerminationReturnCode.ProtectiveTermination
return true
end
storage.return_code = NLSolveSafeTerminationReturnCode.Failure
return false
end
return _termination_condition_closure_safe
end
end
# Convergence Criteria
@inline function _has_converged(du, u, uprev, cond::NLSolveTerminationCondition{mode},
abstol = cond.abstol, reltol = cond.reltol) where {mode}
return _has_converged(du, u, uprev, mode, abstol, reltol)
end
@inline @inbounds function _has_converged(du, u, uprev, mode, abstol, reltol)
if mode == NLSolveTerminationMode.Norm
du_norm = NONLINEARSOLVE_DEFAULT_NORM(du)
return du_norm ≤ abstol || du_norm ≤ reltol * NONLINEARSOLVE_DEFAULT_NORM(du + u)
elseif mode == NLSolveTerminationMode.Rel
return all(abs.(du) .≤ reltol .* abs.(u))
elseif mode ∈ (NLSolveTerminationMode.RelNorm, NLSolveTerminationMode.RelSafe,
NLSolveTerminationMode.RelSafeBest)
return NONLINEARSOLVE_DEFAULT_NORM(du) ≤
reltol * NONLINEARSOLVE_DEFAULT_NORM(du .+ u)
elseif mode == NLSolveTerminationMode.Abs
return all(abs.(du) .≤ abstol)
elseif mode ∈ (NLSolveTerminationMode.AbsNorm, NLSolveTerminationMode.AbsSafe,
NLSolveTerminationMode.AbsSafeBest)
return NONLINEARSOLVE_DEFAULT_NORM(du) ≤ abstol
elseif mode == NLSolveTerminationMode.SteadyStateDefault
return all((abs.(du) .≤ abstol) .| (abs.(du) .≤ reltol .* abs.(u)))
elseif mode == NLSolveTerminationMode.NLSolveDefault
atol, rtol = abstol, reltol
return all((abs.(du) .≤ abstol) .| (abs.(du) .≤ reltol .* abs.(u))) ||
isapprox(u, uprev; atol, rtol)
else
throw(ArgumentError("Unknown termination mode: $mode"))
end
end
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 4603 | # Handled in Extensions
value(x) = x
isdistribution(u0) = false
_vec(v) = vec(v)
_vec(v::Number) = v
_vec(v::AbstractSciMLScalarOperator) = v
_vec(v::AbstractVector) = v
_reshape(v, siz) = reshape(v, siz)
_reshape(v::Number, siz) = v
_reshape(v::AbstractSciMLScalarOperator, siz) = v
macro tight_loop_macros(ex)
:($(esc(ex)))
end
# TODO: would be good to have dtmin a function of dt
function prob2dtmin(prob; use_end_time = true)
prob2dtmin(prob.tspan, oneunit(eltype(prob.tspan)), use_end_time)
end
# This functino requires `eps` to exist, which restricts below `<: Real`
# Example of a failure is Rational
function prob2dtmin(tspan, ::Union{AbstractFloat, ForwardDiff.Dual}, use_end_time)
t1, t2 = tspan
isfinite(t1) || throw(ArgumentError("t0 in the tspan `(t0, t1)` must be finite"))
if use_end_time && isfinite(t2 - t1)
return max(eps(t2), eps(t1))
else
return max(eps(typeof(t1)), eps(t1))
end
end
prob2dtmin(tspan, ::Integer, ::Any) = 0
# Multiplication is for putting the right units on the constant!
prob2dtmin(tspan, onet, ::Any) = onet * 1 // Int64(2)^33 # roughly 10^10 but more likely to turn into a multiplication.
function timedepentdtmin(integrator::DEIntegrator)
timedepentdtmin(integrator.t, integrator.opts.dtmin)
end
timedepentdtmin(t::AbstractFloat, dtmin) = abs(max(eps(t), dtmin))
timedepentdtmin(::Any, dtmin) = abs(dtmin)
maybe_with_logger(f, logger) = logger === nothing ? f() : Logging.with_logger(f, logger)
function default_logger(logger)
Logging.min_enabled_level(logger) ≤ ProgressLogging.ProgressLevel && return nothing
if Sys.iswindows() || (isdefined(Main, :IJulia) && Main.IJulia.inited)
progresslogger = ConsoleProgressMonitor.ProgressLogger()
else
progresslogger = TerminalLoggers.TerminalLogger()
end
logger1 = LoggingExtras.EarlyFilteredLogger(progresslogger) do log
log.level == ProgressLogging.ProgressLevel
end
logger2 = LoggingExtras.EarlyFilteredLogger(logger) do log
log.level != ProgressLogging.ProgressLevel
end
LoggingExtras.TeeLogger(logger1, logger2)
end
# for the non-unitful case the correct type is just u
_rate_prototype(u, t::T, onet::T) where {T} = u
# Nonlinear Solve functionality
@inline __fast_scalar_indexing(args...) = all(ArrayInterface.fast_scalar_indexing, args)
@inline __maximum_abs(op::F, x, y) where {F} = __maximum(abs ∘ op, x, y)
## Nonallocating version of maximum(op.(x, y))
@inline function __maximum(op::F, x, y) where {F}
if __fast_scalar_indexing(x, y)
return maximum(@closure((xᵢyᵢ)->begin
xᵢ, yᵢ = xᵢyᵢ
return op(xᵢ, yᵢ)
end), zip(x, y))
else
return mapreduce(@closure((xᵢ, yᵢ)->op(xᵢ, yᵢ)), max, x, y)
end
end
@inline function __norm_op(::typeof(Base.Fix2(norm, 2)), op::F, x, y) where {F}
if __fast_scalar_indexing(x, y)
return sqrt(sum(@closure((xᵢyᵢ)->begin
xᵢ, yᵢ = xᵢyᵢ
return op(xᵢ, yᵢ)^2
end), zip(x, y)))
else
return sqrt(mapreduce(@closure((xᵢ, yᵢ)->(op(xᵢ, yᵢ)^2)), +, x, y))
end
end
@inline __norm_op(norm::N, op::F, x, y) where {N, F} = norm(op.(x, y))
function __nonlinearsolve_is_approx(x::Number, y::Number; atol = false,
rtol = atol > 0 ? false : sqrt(eps(promote_type(typeof(x), typeof(y)))))
return isapprox(x, y; atol, rtol)
end
function __nonlinearsolve_is_approx(x, y; atol = false,
rtol = atol > 0 ? false : sqrt(eps(promote_type(eltype(x), eltype(y)))))
length(x) != length(y) && return false
d = __maximum_abs(-, x, y)
return d ≤ max(atol, rtol * max(maximum(abs, x), maximum(abs, y)))
end
@inline function __add_and_norm(::Nothing, x, y)
Base.depwarn("Not specifying the internal norm of termination conditions has been \
deprecated. Using inf-norm currently.",
:__add_and_norm)
return __maximum_abs(+, x, y)
end
@inline __add_and_norm(::typeof(Base.Fix1(maximum, abs)), x, y) = __maximum_abs(+, x, y)
@inline __add_and_norm(::typeof(Base.Fix2(norm, Inf)), x, y) = __maximum_abs(+, x, y)
@inline __add_and_norm(f::F, x, y) where {F} = __norm_op(f, +, x, y)
@inline function __apply_termination_internalnorm(::Nothing, u)
Base.depwarn("Not specifying the internal norm of termination conditions has been \
deprecated. Using inf-norm currently.",
:__apply_termination_internalnorm)
return __apply_termination_internalnorm(Base.Fix1(maximum, abs), u)
end
@inline __apply_termination_internalnorm(f::F, u) where {F} = f(u)
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 984 | using DiffEqBase
using SciMLBase
using Test
using Random
mutable struct TestDiffEqOperator{T} <: SciMLBase.AbstractDiffEqLinearOperator{T}
m::Int
n::Int
end
TestDiffEqOperator(A::AbstractMatrix{T}) where {T} = TestDiffEqOperator{T}(size(A)...)
Base.size(A::TestDiffEqOperator) = (A.m, A.n)
A = TestDiffEqOperator([0 0; 0 1])
B = TestDiffEqOperator([0 0 0; 0 1 0; 0 0 2])
@test_throws ErrorException AffineDiffEqOperator{Int64}((A, B), ())
@testset "DiffEq linear operators" begin
Random.seed!(0)
M = rand(2, 2)
A = DiffEqArrayOperator(M)
b = rand(2)
u = rand(2)
p = rand(1)
t = rand()
As_list = [(A,), (A, A)]#, (A, α)]
bs_list = [(), (b,), (2b,), (b, 2b)]
@testset "combinations of A's and b's" for As in As_list, bs in bs_list
L = AffineDiffEqOperator{Float64}(As, bs, zeros(2))
mysum = sum(A * u for A in As)
for b in bs
mysum .+= b
end
@test L(u, p, t) == mysum
end
end
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 1270 | using Test
using DiffEqBase
using Aqua
@testset "Aqua tests (performance)" begin
# This tests that we don't accidentally run into
# https://github.com/JuliaLang/julia/issues/29393
# Aqua.test_unbound_args(DiffEqBase) # fails
ua = Aqua.detect_unbound_args_recursively(DiffEqBase)
@test length(ua) == 0
# Uncomment for debugging:
# @show ua
# See: https://github.com/SciML/OrdinaryDiffEq.jl/issues/1750
# Test that we're not introducing method ambiguities across deps
ambs = Aqua.detect_ambiguities(DiffEqBase; recursive = true)
pkg_match(pkgname, pkdir::Nothing) = false
pkg_match(pkgname, pkdir::AbstractString) = occursin(pkgname, pkdir)
filter!(x -> pkg_match("DiffEqBase", pkgdir(last(x).module)), ambs)
# Uncomment for debugging:
# for method_ambiguity in ambs
# @show method_ambiguity
# end
@warn "Number of method ambiguities: $(length(ambs))"
@test length(ambs) ≤ 4
end
@testset "Aqua tests (additional)" begin
Aqua.test_undefined_exports(DiffEqBase)
Aqua.test_stale_deps(DiffEqBase)
Aqua.test_deps_compat(DiffEqBase)
Aqua.test_project_extras(DiffEqBase)
Aqua.test_project_toml_formatting(DiffEqBase)
# Aqua.test_piracy(DiffEqBase) # failing
end
nothing
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 1499 | using DiffEqBase, Random, LinearAlgebra, Test
using DiffEqBase: isconstant, @..
@testset "Identity Operators" begin
u = [1.0, 2.0]
du = zeros(2)
Id = DiffEqIdentity(u)
@test Id * u == u
mul!(du, Id, u)
@test du == u
@test size(Id) == (2, 2)
end
@testset "Scalar Operators" begin
u = [1.0, 2.0]
u2 = [1.0, 2.0]
α = DiffEqScalar(2.0)
@test convert(Number, α) == 2.0
@test α * u == 2.0u
lmul!(α, u2)
@test u2 == 2.0u
@test size(α) == ()
@test isconstant(α) == true
end
@testset "Array Operators" begin
Random.seed!(0)
A = rand(2, 2)
u = rand(2)
du = zeros(2)
L = DiffEqArrayOperator(A)
@test Matrix(L) == A
@test size(L) == size(A)
@test L * u == A * u
mul!(du, L, u)
@test du == A * u
@test lu(L) \ u ≈ A \ u
@test opnorm(L) == opnorm(A)
@test exp(L) == exp(A)
@test L[1, 2] == A[1, 2]
@test isconstant(L) == true
L .= 0
@test all(iszero, L)
tmp = rand(size(L)...)
@.. L = muladd(1, tmp, 0)
@test L.A == tmp
rand!(tmp)
@.. tmp = muladd(1, L, 0)
@test L.A == tmp
end
@testset "Mutable Array Operators" begin
Random.seed!(0)
A = rand(2, 2)
u = rand(2)
du = zeros(2)
update_func = (_A, u, p, t) -> _A .= t * A
Lt = DiffEqArrayOperator(zeros(2, 2); update_func = update_func)
t = 5.0
@test isconstant(Lt) == false
@test Lt(u, nothing, t) ≈ (t * A) * u
Lt(du, u, nothing, t)
@test du ≈ (t * A) * u
end
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 3699 | using DiffEqBase, Test
condition = function (u, t, integrator) # Event when event_f(u,t,k) == 0
t - 2.95
end
affect! = function (integrator)
integrator.u = integrator.u + 2
end
rootfind = true
save_positions = (true, true)
callback = ContinuousCallback(condition, affect!; save_positions = save_positions)
cbs = CallbackSet(nothing)
@test typeof(cbs.discrete_callbacks) <: Tuple
@test typeof(cbs.continuous_callbacks) <: Tuple
cbs = CallbackSet(callback, nothing)
@test typeof(cbs.discrete_callbacks) <: Tuple
@test typeof(cbs.continuous_callbacks) <: Tuple
cbs = CallbackSet(callback, CallbackSet())
@test typeof(cbs.discrete_callbacks) <: Tuple
@test typeof(cbs.continuous_callbacks) <: Tuple
condition = function (integrator)
true
end
affect! = function (integrator) end
save_positions = (true, false)
saving_callback = DiscreteCallback(condition, affect!; save_positions = save_positions)
cbs1 = CallbackSet(callback, saving_callback)
@test length(cbs1.discrete_callbacks) == 1
@test length(cbs1.continuous_callbacks) == 1
cbs2 = CallbackSet(callback)
@test length(cbs2.continuous_callbacks) == 1
@test length(cbs2.discrete_callbacks) == 0
cbs3 = CallbackSet(saving_callback)
@test length(cbs3.discrete_callbacks) == 1
@test length(cbs3.continuous_callbacks) == 0
cbs4 = CallbackSet()
@test length(cbs4.discrete_callbacks) == 0
@test length(cbs4.continuous_callbacks) == 0
cbs5 = CallbackSet(cbs1, cbs2)
@test length(cbs5.discrete_callbacks) == 1
@test length(cbs5.continuous_callbacks) == 2
# For the purposes of this test, create a empty integrator type and
# override find_callback_time, since we don't actually care about testing
# the find callback time aspect, just the inference failure
struct EmptyIntegrator
u::Vector{Float64}
end
function DiffEqBase.find_callback_time(integrator::EmptyIntegrator,
callback::ContinuousCallback, counter)
1.0 + counter, 0.9 + counter, true, counter
end
function DiffEqBase.find_callback_time(integrator::EmptyIntegrator,
callback::VectorContinuousCallback, counter)
1.0 + counter, 0.9 + counter, true, counter
end
find_first_integrator = EmptyIntegrator([1.0, 2.0])
vector_affect! = function (integrator, idx)
integrator.u = integrator.u + idx
end
cond_1(u, t, integrator) = t - 1.0
cond_2(u, t, integrator) = t - 1.1
cond_3(u, t, integrator) = t - 1.2
cond_4(u, t, integrator) = t - 1.3
cond_5(u, t, integrator) = t - 1.4
cond_6(u, t, integrator) = t - 1.5
cond_7(u, t, integrator) = t - 1.6
cond_8(u, t, integrator) = t - 1.7
cond_9(u, t, integrator) = t - 1.8
cond_10(u, t, integrator) = t - 1.9
# Setup a lot of callbacks so the recursive inference failure happens
callbacks = (ContinuousCallback(cond_1, affect!),
ContinuousCallback(cond_2, affect!),
ContinuousCallback(cond_3, affect!),
ContinuousCallback(cond_4, affect!),
ContinuousCallback(cond_5, affect!),
ContinuousCallback(cond_6, affect!),
ContinuousCallback(cond_7, affect!),
ContinuousCallback(cond_8, affect!),
ContinuousCallback(cond_9, affect!),
ContinuousCallback(cond_10, affect!),
VectorContinuousCallback(cond_1, vector_affect!, 2),
VectorContinuousCallback(cond_2, vector_affect!, 2),
VectorContinuousCallback(cond_3, vector_affect!, 2),
VectorContinuousCallback(cond_4, vector_affect!, 2),
VectorContinuousCallback(cond_5, vector_affect!, 2),
VectorContinuousCallback(cond_6, vector_affect!, 2));
function test_find_first_callback(callbacks, int)
@timed(DiffEqBase.find_first_continuous_callback(int, callbacks...))
end
test_find_first_callback(callbacks, find_first_integrator);
@test test_find_first_callback(callbacks, find_first_integrator).bytes == 0
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 2683 | using DiffEqBase, Test, RecursiveArrayTools
macro iop_def(funcdef::Expr)
"""Define in- and out-of-place functions simultaneously.
Call on oop function definition, defines two functions with suffixes _op and _ip.
"""
@assert funcdef.head ∈ (:function, :(=)) && funcdef.args[1].head == :call
fname = funcdef.args[1].args[1]
opname = Symbol("$(fname)_op")
ipname = Symbol("$(fname)_ip")
opdef = deepcopy(funcdef)
opdef.args[1].args[1] = opname
return quote
$(esc(opdef))
$(esc(ipname))(du, args...) = du .= $(esc(opname))(args...)
end
end
function test_inplace(du, expected, f::Function, args...)
"""Test the in-place version of a function."""
fill!(du, NaN)
f(du, args...)
@test du == expected
end
# Allocate du automatically based on type of expected result
function test_inplace(expected, f::Function, args...)
test_inplace(similar(expected), expected, f, args...)
end
function test_iop(expected, f_op::Function, f_ip::Function, args...)
"""Test in- and out-of-place version of function both match expected value."""
@test f_op(args...) == expected
test_inplace(expected, f_ip, args...)
end
@iop_def f(u, p, t) = p[1] .* u
u = [1.0, 2.0, 3.0]
p = [2.0]
t = 0.0
@testset "ODEFunction with default recompile flag" begin
odefun = ODEFunction{false}(f_op)
odefun_ip = ODEFunction{true}(f_ip)
expected = f_op(u, p, t)
test_iop(expected, odefun, odefun_ip, u, p, t)
end
@testset "ODEFunction with recompile flag: $rflag" for rflag in (true, false)
odefun = ODEFunction{false, rflag}(f_op)
odefun_ip = ODEFunction{true, rflag}(f_ip)
expected = f_op(u, p, t)
test_iop(expected, odefun, odefun_ip, u, p, t)
end
# SplitFunction
@iop_def f2(u, p, t) = u .^ 2
sfun = SplitFunction{false}(f_op, f2_op)
sfun_ip = SplitFunction{true}(f_ip, f2_ip; _func_cache = similar(u))
expected = f_op(u, p, t) + f2_op(u, p, t)
test_iop(expected, sfun, sfun_ip, u, p, t)
# DynamicalODEFunction
@iop_def dode_f1(v, u, p, t) = -u
@iop_def dode_f2(v, u, p, t) = p[1] .* v
dodefun = DynamicalODEFunction{false}(dode_f1_op, dode_f2_op)
dodefun_ip = DynamicalODEFunction{true}(dode_f1_ip, dode_f2_ip)
v = [4.0, 5.0, 6.0]
expected = ArrayPartition(dode_f1_op(v, u, p, t), dode_f2_op(v, u, p, t))
test_iop(expected, dodefun, dodefun_ip, ArrayPartition(v, u), p, t)
# DiscreteFunction
dfun = DiscreteFunction{false}(f_op)
dfun_ip = DiscreteFunction{true}(f_ip)
test_iop(f_op(u, p, t), dfun, dfun_ip, u, p, t)
# Type stability
f_analytic(u, p, t) = u
jac = (u, p, t) -> 1
@inferred ODEFunction{false}(f_op, jac = jac)
@inferred DiscreteFunction{false}(f_op, analytic = f_analytic)
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 82 | using DiffEqBase
using Test
@test DiffEqBase.undefined_exports(DiffEqBase) == []
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 454 | using DiffEqBase: fastlog2, fastpow
using Test
@testset "Fast log2" begin
for x in 0.001:0.001:1.2 # (0, 1+something] is the domain which a controller uses
@test log2(x)≈fastlog2(Float32(x)) atol=1e-3
end
end
@testset "Fast pow" begin
@test fastpow(1, 1) isa Float64
@test fastpow(1.0, 1.0) isa Float64
errors = [abs(^(x, y) - fastpow(x, y)) for x in 0.001:0.001:1, y in 0.08:0.001:0.5]
@test maximum(errors) < 1e-4
end
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 13412 | using DiffEqBase, ForwardDiff, Test, InteractiveUtils
using ReverseDiff, SciMLStructures
using Plots
u0 = 2.0
p = 2.0
t0 = 1.0
@test DiffEqBase.promote_u0(u0, p, t0) isa Float64
@test DiffEqBase.promote_u0(u0, p, t0) == 2.0
@test DiffEqBase.promote_u0(cis(u0), p, t0) isa ComplexF64
@test DiffEqBase.promote_u0(cis(u0), p, t0) == cis(2.0)
struct MyStruct{T, T2} <: Number
x::T
y::T2
end
struct MyStruct2{T, T2}
x::T
y::T2
MyStruct2(x) = new{typeof(x), Any}(x)
end
struct MyStruct3{T, T2}
x::T
y::T2
MyStruct3(x) = new{typeof(x), Float64}(x)
end
module Mod end
p_possibilities = [ForwardDiff.Dual(2.0), (ForwardDiff.Dual(2.0), 2.0),
[ForwardDiff.Dual(2.0)], ([ForwardDiff.Dual(2.0)], 2.0),
(2.0, ForwardDiff.Dual(2.0)), (; x = 2.0, y = ForwardDiff.Dual(2.0)),
(; x = 2.0, y = [ForwardDiff.Dual(2.0)]), (; x = 2.0, y = [[ForwardDiff.Dual(2.0)]]),
Set([2.0, ForwardDiff.Dual(2.0)]), (SciMLBase.NullParameters(), ForwardDiff.Dual(2.0)),
((), ForwardDiff.Dual(2.0)), ForwardDiff.Dual{Nothing}(ForwardDiff.Dual{MyStruct}(2.0)),
(plot(), ForwardDiff.Dual(2.0)), [(1.0, ForwardDiff.Dual(1.0, (1.0,)))]
]
for p in p_possibilities
@show p
@test DiffEqBase.anyeltypedual(p) <: ForwardDiff.Dual
local u0 = 2.0
@test DiffEqBase.promote_u0(u0, p, t0) isa ForwardDiff.Dual
@test DiffEqBase.promote_u0([cis(u0)], p, t0) isa
AbstractArray{<:Complex{<:ForwardDiff.Dual}}
u0 = ForwardDiff.Dual(2.0)
@test DiffEqBase.promote_u0(u0, p, t0) isa ForwardDiff.Dual
@test DiffEqBase.promote_u0([cis(u0)], p, t0) isa
AbstractArray{<:Complex{<:ForwardDiff.Dual}}
@inferred DiffEqBase.anyeltypedual(p)
end
higher_order_p_possibilities = [ForwardDiff.Dual{Nothing}(ForwardDiff.Dual{MyStruct}(2.0)),
(ForwardDiff.Dual{Nothing}(ForwardDiff.Dual{MyStruct}(2.0)),
SciMLBase.NullParameters()),
(ForwardDiff.Dual{Nothing}(ForwardDiff.Dual{MyStruct}(2.0)),
ForwardDiff.Dual{Nothing}(2.0)),
(ForwardDiff.Dual{Nothing}(2.0),
ForwardDiff.Dual{Nothing}(ForwardDiff.Dual{MyStruct}(2.0)))
]
for p in higher_order_p_possibilities
@show p
@test DiffEqBase.anyeltypedual(p) <: ForwardDiff.Dual
@test DiffEqBase.anyeltypedual(p) <:
ForwardDiff.Dual{Nothing, ForwardDiff.Dual{MyStruct, Float64, 0}, 0}
local u0 = 2.0
@test DiffEqBase.promote_u0(u0, p, t0) isa ForwardDiff.Dual
@test DiffEqBase.promote_u0([cis(u0)], p, t0) isa
AbstractArray{<:Complex{<:ForwardDiff.Dual}}
u0 = ForwardDiff.Dual(2.0)
@test DiffEqBase.promote_u0(u0, p, t0) isa ForwardDiff.Dual
@test DiffEqBase.promote_u0([cis(u0)], p, t0) isa
AbstractArray{<:Complex{<:ForwardDiff.Dual}}
@inferred DiffEqBase.anyeltypedual(p)
end
p_possibilities17 = [
MyStruct(2.0, ForwardDiff.Dual(2.0)), [MyStruct(2.0, ForwardDiff.Dual(2.0))],
[MyStruct(2.0, [2.0, ForwardDiff.Dual(2.0)])],
[MyStruct(2.0, (2.0, ForwardDiff.Dual(2.0)))],
((;), ForwardDiff.Dual(2.0)), MyStruct3(ForwardDiff.Dual(2.0)),
(Mod, ForwardDiff.Dual(2.0)), (() -> 2.0, ForwardDiff.Dual(2.0)),
(Base.pointer([2.0]), ForwardDiff.Dual(2.0))
]
push!(p_possibilities17, Returns((a = 2, b = 1.3, c = ForwardDiff.Dual(2.0f0))))
for p in p_possibilities17
@show p
@test DiffEqBase.anyeltypedual(p) <: ForwardDiff.Dual
local u0 = 2.0
@test DiffEqBase.promote_u0(u0, p, t0) isa ForwardDiff.Dual
@test DiffEqBase.promote_u0([cis(u0)], p, t0) isa
AbstractArray{<:Complex{<:ForwardDiff.Dual}}
u0 = ForwardDiff.Dual(2.0)
@test DiffEqBase.promote_u0(u0, p, t0) isa ForwardDiff.Dual
@test DiffEqBase.promote_u0([cis(u0)], p, t0) isa
AbstractArray{<:Complex{<:ForwardDiff.Dual}}
if VERSION >= v"1.7"
# v1.6 does not infer `getproperty` mapping
@inferred DiffEqBase.anyeltypedual(p)
ci = InteractiveUtils.@code_typed DiffEqBase.anyeltypedual(p)
@show filter(!=(Expr(:code_coverage_effect)), ci.first.code)
#@test count(x -> (x != (Expr(:code_coverage_effect))) &&
# (x != GlobalRef(DiffEqBase, :Any)), ci.first.code) == 1
end
end
p_possibilities_uninferrred = [
Dict(:x => 2.0, :y => ForwardDiff.Dual(2.0)),
Dict(:x => 2.0, :y => [ForwardDiff.Dual(2.0)]),
Dict(:x => 2.0, :y => [(; x = (ForwardDiff.Dual(2.0), 2.0), y = 2.0)]),
Dict(:x => 2.0, :y => [(; x = [MyStruct(2.0, [2.0, ForwardDiff.Dual(2.0)])], y = 2.0)]),
[MyStruct("2", [2.0, ForwardDiff.Dual(2.0)])],
Dict(:x => [MyStruct("2", [2.0, MyStruct(ForwardDiff.Dual(2.0), 2.0)])],
:y => ForwardDiff.Dual{MyStruct}(2.0)),
((Dict(:x => nothing)), ForwardDiff.Dual(2.0)),
MyStruct2(ForwardDiff.Dual(2.0)),
[MyStruct2(ForwardDiff.Dual(2.0)), 2.0],
# Vectors of non-number types won't infer
[MyStruct(2.0, ForwardDiff.Dual(2.0))],
(; x = 2.0, y = [[MyStruct3(ForwardDiff.Dual(2.0))]]),
(; x = Vector{Float64}(undef, 2), y = [[MyStruct3(ForwardDiff.Dual(2.0))]]),
(; x = Matrix{Any}(undef, 2, 2), y = [[MyStruct3(ForwardDiff.Dual(2.0))]])
]
for p in p_possibilities_uninferrred
@show p
@test DiffEqBase.anyeltypedual(p) <: ForwardDiff.Dual
local u0 = 2.0
@test DiffEqBase.promote_u0(u0, p, t0) isa ForwardDiff.Dual
@test DiffEqBase.promote_u0([cis(u0)], p, t0) isa
AbstractArray{<:Complex{<:ForwardDiff.Dual}}
u0 = ForwardDiff.Dual(2.0)
@test DiffEqBase.promote_u0(u0, p, t0) isa ForwardDiff.Dual
@test DiffEqBase.promote_u0([cis(u0)], p, t0) isa
AbstractArray{<:Complex{<:ForwardDiff.Dual}}
end
p_possibilities_missed = [
Set([2.0, "s", ForwardDiff.Dual(2.0)]),
Set([2.0, ForwardDiff.Dual(2.0), SciMLBase.NullParameters()]),
Set([Matrix{Float64}(undef, 2, 2), ForwardDiff.Dual(2.0)])
]
for p in p_possibilities_missed
@show p
@test_broken DiffEqBase.anyeltypedual(p) <: ForwardDiff.Dual
local u0 = 2.0
@test_broken DiffEqBase.promote_u0(u0, p, t0) isa ForwardDiff.Dual
@test_broken DiffEqBase.promote_u0([cis(u0)], p, t0) isa
AbstractArray{<:Complex{<:ForwardDiff.Dual}}
u0 = ForwardDiff.Dual(2.0)
@test DiffEqBase.promote_u0(u0, p, t0) isa ForwardDiff.Dual
@test DiffEqBase.promote_u0([cis(u0)], p, t0) isa
AbstractArray{<:Complex{<:ForwardDiff.Dual}}
end
p_possibilities_notdual = [
(), (;), [2.0], [2.0, 2], [2.0, (2.0)], [2.0, MyStruct(2.0, 2.0f0)]
]
for p in p_possibilities_notdual
@show p
@test !(DiffEqBase.anyeltypedual(p) <: ForwardDiff.Dual)
local u0 = 2.0
@test !(DiffEqBase.promote_u0(u0, p, t0) isa ForwardDiff.Dual)
@test !(DiffEqBase.promote_u0([cis(u0)], p, t0) isa
AbstractArray{<:Complex{<:ForwardDiff.Dual}})
u0 = ForwardDiff.Dual(2.0)
@test DiffEqBase.promote_u0(u0, p, t0) isa ForwardDiff.Dual
@test DiffEqBase.promote_u0([cis(u0)], p, t0) isa
AbstractArray{<:Complex{<:ForwardDiff.Dual}}
@inferred DiffEqBase.anyeltypedual(p)
end
p_possibilities_notdual_uninferred = [
[],
# Undefs cause inference loss
[2.0, MyStruct3(2.0)], [2.0, MyStruct2(2.0)], [2.0, MyStruct2(2.0), []],
[Dict(:x => 2, "y" => 5), MyStruct2(2.0)],
# Dictionaries can have inference issues
Dict(:x => 2, :y => 5), Dict(:x => 2, "y" => 5)
]
# Also check circular references
# https://github.com/SciML/DiffEqBase.jl/issues/784
x = Any[[1.0, 2.0]]
push!(x, x)
push!(p_possibilities_notdual_uninferred, x)
struct X
x::Any
end
x = Any[[1.0, 2.0]]
push!(x, X(x))
push!(p_possibilities_notdual_uninferred, x)
mutable struct Y
x::Any
end
x = Y(1)
x.x = x
push!(p_possibilities_notdual_uninferred, x)
for p in p_possibilities_notdual_uninferred
@test !(DiffEqBase.anyeltypedual(p) <: ForwardDiff.Dual)
local u0 = 2.0
@test !(DiffEqBase.promote_u0(u0, p, t0) isa ForwardDiff.Dual)
@test !(DiffEqBase.promote_u0([cis(u0)], p, t0) isa
AbstractArray{<:Complex{<:ForwardDiff.Dual}})
u0 = ForwardDiff.Dual(2.0)
@test DiffEqBase.promote_u0(u0, p, t0) isa ForwardDiff.Dual
@test DiffEqBase.promote_u0([cis(u0)], p, t0) isa
AbstractArray{<:Complex{<:ForwardDiff.Dual}}
end
f(du, u, p, t) = du .= u
config = ForwardDiff.JacobianConfig(f, ones(5))
p_possibilities_configs = [
(config, config), (config, 2.0), config, (; x = config, y = 2.0)
]
for p in p_possibilities_configs
@show p
@test !(DiffEqBase.anyeltypedual(p) <: ForwardDiff.Dual)
local u0 = 2.0
@test !(DiffEqBase.promote_u0(u0, p, t0) isa ForwardDiff.Dual)
@test !(DiffEqBase.promote_u0([cis(u0)], p, t0) isa
AbstractArray{<:Complex{<:ForwardDiff.Dual}})
u0 = ForwardDiff.Dual(2.0)
@test DiffEqBase.promote_u0(u0, p, t0) isa ForwardDiff.Dual
@test DiffEqBase.promote_u0([cis(u0)], p, t0) isa
AbstractArray{<:Complex{<:ForwardDiff.Dual}}
@inferred DiffEqBase.anyeltypedual(p)
end
p_possibilities_configs_not_inferred = [
[2.0, (2.0,), config], [2.0, config, MyStruct(2.0, 2.0f0)]
]
for p in p_possibilities_configs_not_inferred
@show p
@test !(DiffEqBase.anyeltypedual(p) <: ForwardDiff.Dual)
local u0 = 2.0
@test !(DiffEqBase.promote_u0(u0, p, t0) isa ForwardDiff.Dual)
@test !(DiffEqBase.promote_u0([cis(u0)], p, t0) isa
AbstractArray{<:Complex{<:ForwardDiff.Dual}})
u0 = ForwardDiff.Dual(2.0)
@test DiffEqBase.promote_u0(u0, p, t0) isa ForwardDiff.Dual
@test DiffEqBase.promote_u0([cis(u0)], p, t0) isa
AbstractArray{<:Complex{<:ForwardDiff.Dual}}
end
# use `getfield` on `Pairs`, see https://github.com/JuliaLang/julia/pull/39448
@test_nowarn DiffEqBase.DualEltypeChecker(pairs((;)), 0)(Val(:data))
# https://discourse.julialang.org/t/type-instability-with-differentialequations-jl-when-using-nested-structs/109764/5
struct Fit
m₁::Float64
c₁::Float64
m₂::Float64
c₂::Float64
function Fit()
m₁ = 1.595
c₁ = 3.438
m₂ = 1.075
c₂ = 3.484
new(m₁, c₁, m₂, c₂)
end
end
struct EOS
fit::Fit
function EOS()
fit = Fit()
new(fit)
end
end
p = EOS()
@test !(DiffEqBase.anyeltypedual(p) <: ForwardDiff.Dual)
@inferred DiffEqBase.anyeltypedual(p)
# Check methods used for prevention of Dual-detection when using
# DiffResults.DiffResult in a wrapper.
# https://github.com/SciML/DiffEqBase.jl/issues/1009
struct OutsideWrapper{T}
a::Float64
b::T
end
struct InsideWrapper{T, S}
du::T
dual_du::S
end
f(x) = 2 * x[1] + 3 * x[2]^2
xdual = ones(
ForwardDiff.Dual{ForwardDiff.Tag{DiffEqBase.OrdinaryDiffEqTag, Float64}, Float64, 1}, 2)
x = [1.0, 1.0]
diffresult = ForwardDiff.DiffResults.GradientResult(x)
diffresult_dual = ForwardDiff.DiffResults.GradientResult(xdual)
iw = InsideWrapper(diffresult, diffresult_dual)
ow = OutsideWrapper(1.0, iw)
@test !(DiffEqBase.anyeltypedual(iw) <: ForwardDiff.Dual)
@test !(DiffEqBase.anyeltypedual(ow) <: ForwardDiff.Dual)
@inferred DiffEqBase.anyeltypedual(iw)
@inferred DiffEqBase.anyeltypedual(ow)
# Issue https://github.com/SciML/ModelingToolkit.jl/issues/2717
u0 = [1.0, 2.0, 3.0]
p = [1, 2]
t = ForwardDiff.Dual{ForwardDiff.Tag{DiffEqBase.OrdinaryDiffEqTag, Float64}, Float64, 1}(1.0)
@test DiffEqBase.promote_u0(u0, p, t) isa AbstractArray{<:ForwardDiff.Dual}
u0 = [1.0 + 1im, 2.0, 3.0]
@test DiffEqBase.promote_u0(u0, p, t) isa AbstractArray{<:Complex{<:ForwardDiff.Dual}}
# Issue https://github.com/SciML/NonlinearSolve.jl/issues/440
f(u, p, t) = [u[2], 1.5u[1]^2]
ode = ODEProblem(f, [0.0, 0.0], (0, 1))
@inferred DiffEqBase.anyeltypedual(ode)
ode = NonlinearProblem(f, [0.0, 0.0], (0, 1))
@inferred DiffEqBase.anyeltypedual(ode)
# Issue https://github.com/SciML/DiffEqBase.jl/issues/1021
f(u, p, t) = 1.01 * u
struct Foo{T}
sol::T
end
u0 = 1 / 2
tspan = (0.0, 1.0)
prob = ODEProblem{false}(f, u0, tspan)
foo = SciMLBase.build_solution(
prob, DiffEqBase.InternalEuler.FwdEulerAlg(), [u0, u0], [0.0, 1.0])
DiffEqBase.anyeltypedual((; x = foo))
DiffEqBase.anyeltypedual((; x = foo, y = prob.f))
@test DiffEqBase.anyeltypedual(ReverseDiff.track(ones(3))) == Any
@test DiffEqBase.anyeltypedual(typeof(ReverseDiff.track(ones(3)))) == Any
@test DiffEqBase.anyeltypedual(ReverseDiff.track(ones(ForwardDiff.Dual, 3))) ==
eltype(ones(ForwardDiff.Dual, 3))
@test DiffEqBase.anyeltypedual(typeof(ReverseDiff.track(ones(ForwardDiff.Dual, 3)))) ==
eltype(ones(ForwardDiff.Dual, 3))
struct FakeParameterObject{T}
tunables::T
end
SciMLStructures.isscimlstructure(::FakeParameterObject) = true
function SciMLStructures.canonicalize(::SciMLStructures.Tunable, f::FakeParameterObject)
f.tunables, x -> FakeParameterObject(x), true
end
@test DiffEqBase.promote_u0(
ones(3), FakeParameterObject(ReverseDiff.track(ones(3))), 0.0) isa
ReverseDiff.TrackedArray
@test DiffEqBase.promote_u0(1.0, FakeParameterObject(ReverseDiff.track(ones(3))), 0.0) isa
ReverseDiff.TrackedReal
@test DiffEqBase.promote_u0(
ones(3), FakeParameterObject(ReverseDiff.track(ones(ForwardDiff.Dual, 3))), 0.0) isa
ReverseDiff.TrackedArray{<:ForwardDiff.Dual}
@test DiffEqBase.promote_u0(
1.0, FakeParameterObject(ReverseDiff.track(ones(ForwardDiff.Dual, 3))), 0.0) isa
ReverseDiff.TrackedReal{<:ForwardDiff.Dual}
@test DiffEqBase.promote_u0(NaN, [NaN], 0.0) isa Float64
@test DiffEqBase.promote_u0([1.0], [NaN], 0.0) isa Vector{Float64}
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 2063 | using DiffEqBase, Test
using Distributions
@test DiffEqBase.promote_tspan((0.0, 1.0)) == (0.0, 1.0)
@test DiffEqBase.promote_tspan((0, 1.0)) == (0.0, 1.0)
@test DiffEqBase.promote_tspan(1.0) == (0.0, 1.0)
@test DiffEqBase.promote_tspan(nothing) == (nothing, nothing)
@test DiffEqBase.promote_tspan(Real[0, 1.0]) == (0.0, 1.0)
# https://github.com/SciML/OrdinaryDiffEq.jl/issues/1776
# promote_tspan(u0, p, tspan, prob, kwargs)
@test DiffEqBase.promote_tspan((0, 1)) == (0, 1)
@test DiffEqBase.promote_tspan(nothing, nothing, (0, 1), nothing, (dt = 1,)) == (0, 1)
@test DiffEqBase.promote_tspan(nothing, nothing, (0, 1), nothing, (dt = 1 / 2,)) ==
(0.0, 1.0)
prob = ODEProblem((u, p, t) -> u, (p, t0) -> p[1], (p) -> (0.0, p[2]), (2.0, 1.0))
prob2 = DiffEqBase.get_concrete_problem(prob, true)
@test prob2.u0 == 2.0
@test prob2.tspan == (0.0, 1.0)
prob = ODEProblem((u, p, t) -> u, (p, t) -> Normal(p, 1), (0.0, 1.0), 1.0)
prob2 = DiffEqBase.get_concrete_problem(prob, true)
@test typeof(prob2.u0) == Float64
kwargs(; kw...) = kw
prob = ODEProblem((u, p, t) -> u, 1.0, nothing)
prob2 = DiffEqBase.get_concrete_problem(prob, true, tspan = (1.2, 3.4))
@test prob2.tspan === (1.2, 3.4)
prob = ODEProblem((u, p, t) -> u, nothing, nothing)
prob2 = DiffEqBase.get_concrete_problem(prob, true, u0 = 1.01, tspan = (1.2, 3.4))
@test prob2.u0 === 1.01
prob = ODEProblem((u, p, t) -> u, 1.0, (0, 1))
prob2 = DiffEqBase.get_concrete_problem(prob, true)
@test prob2.tspan == (0.0, 1.0)
prob = DDEProblem((u, h, p, t) -> -h(p, t - p[1]), (p, t0) -> p[2], (p, t) -> 0,
(p) -> (0.0, p[3]), (1.0, 2.0, 3.0); constant_lags = (p) -> [p[1]])
prob2 = DiffEqBase.get_concrete_problem(prob, true)
@test prob2.u0 == 2.0
@test prob2.tspan == (0.0, 3.0)
@test prob2.constant_lags == [1.0]
prob = SteadyStateProblem((u, p, t) -> u, [1.0, 2.0])
prob2 = DiffEqBase.get_concrete_problem(prob, true; u0 = [2.0, 3.0])
@test prob2.u0 == [2.0, 3.0]
prob3 = DiffEqBase.get_concrete_problem(prob, true; u0 = [1.0, 3.0], p = 3.0)
@test prob3.u0 == [1.0, 3.0]
@test prob3.p == 3.0
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 532 | using DiffEqBase, DiffEqBase.InternalEuler
# Try it
ff = ODEFunction((u, p, t) -> u,
jac = (u, p, t) -> 1.0,
analytic = (u0, p, t) -> u0 * exp(t))
dt = 0.01
prob = ODEProblem(ff, 1.0, (0.0, 1.0))
sol = solve(prob, InternalEuler.FwdEulerAlg(), tstops = 0:dt:1)
sol2 = solve(prob, InternalEuler.BwdEulerAlg(), tstops = 0:dt:1)
#using Plots
#plot(sol)
#plot!(sol2)
#plot!(sol2, plot_analytic=true)
#using DiffEqDevTools
#dts = 1./2.^(8:-1:4)
#sim = test_convergence(dts,p2,BwdEulerAlg())
#@show sim.𝒪est[:final]
#plot(sim)
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 1373 | using DiffEqBase
using DiffEqBase: InternalFalsi, InternalITP, IntervalNonlinearProblem
using ForwardDiff
for Rootfinder in (InternalFalsi, InternalITP)
rf = Rootfinder()
# From SimpleNonlinearSolve
f = (u, p) -> u * u - p
tspan = (1.0, 20.0)
g = function (p)
probN = IntervalNonlinearProblem{false}(f, typeof(p).(tspan), p)
sol = solve(probN, rf)
return sol.u
end
for p in (1.0,) #1.1:0.1:100.0
@test g(p) ≈ sqrt(p)
#@test ForwardDiff.derivative(g, p) ≈ 1 / (2 * sqrt(p))
end
# https://github.com/SciML/DiffEqBase.jl/issues/916
inp = IntervalNonlinearProblem((t, p) -> min(-1.0 + 0.001427344607477125 * t, 1e-9),
(699.0079267259368, 700.6176418816023))
@test solve(inp, rf).u ≈ 700.6016590257979
# Flipped signs & reversed tspan test for bracketing algorithms
f1(u, p) = u * u - p
f2(u, p) = p - u * u
for p in 1:4
inp1 = IntervalNonlinearProblem(f1, (1.0, 2.0), p)
inp2 = IntervalNonlinearProblem(f2, (1.0, 2.0), p)
inp3 = IntervalNonlinearProblem(f1, (2.0, 1.0), p)
inp4 = IntervalNonlinearProblem(f2, (2.0, 1.0), p)
@test abs.(solve(inp1, rf).u) ≈ sqrt.(p)
@test abs.(solve(inp2, rf).u) ≈ sqrt.(p)
@test abs.(solve(inp3, rf).u) ≈ sqrt.(p)
@test abs.(solve(inp4, rf).u) ≈ sqrt.(p)
end
end
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 490 | using Test
using ForwardDiff: Dual, gradient, partials
using DiffEqBase: ODE_DEFAULT_NORM
const internalnorm = ODE_DEFAULT_NORM
val = rand(10)
par = rand(10)
u = Dual.(val, par)
reference(val, par) = sqrt((sum(abs2, val) + sum(abs2, par)) / (length(val) + length(par)))
dual_real = internalnorm(u, 1)
dual_dual = internalnorm(u, u[1])
@test reference(val, par) ≈ dual_real
@test reference(val, par) ≈ dual_dual
@test partials(dual_dual, 1) ≈ gradient(x -> internalnorm(x, x[1]), val)'par
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 1662 | using Test, RecursiveArrayTools, StaticArrays, ForwardDiff
using DiffEqBase: UNITLESS_ABS2, recursive_length, ODE_DEFAULT_NORM
@test recursive_length(1.0) == 1
n = UNITLESS_ABS2(3.0 + 4.0im)
@test n == 25.0
@test typeof(n) <: Real
@test ODE_DEFAULT_NORM(3.0 + 4.0im, 0.0) == 5.0
u1 = ones(3)
@test UNITLESS_ABS2(u1) == 3.0
@test recursive_length(u1) == 3
@test ODE_DEFAULT_NORM(u1, 0.0) == 1.0
u2 = [SA[1.0 1.0; 1.0 1.0] for i in 1:3]
@test UNITLESS_ABS2(u2) == 12.0
@test recursive_length(u2) == 12
@test ODE_DEFAULT_NORM(u2, 0.0) == 1.0
u3 = VectorOfArray([ones(5), ones(5)])
@test UNITLESS_ABS2(u3) == 10.0
@test recursive_length(u3) == 10
@test ODE_DEFAULT_NORM(u3, 0.0) == 1.0
u4 = ArrayPartition(u1, u2, u3)
@test UNITLESS_ABS2(u4) == 25.0
@test recursive_length(u4) == 25
@test ODE_DEFAULT_NORM(u4, 0.0) == 1.0
u5 = ArrayPartition(u4, u4)
@test UNITLESS_ABS2(u5) == 50.0
@test recursive_length(u5) == 50
@test ODE_DEFAULT_NORM(u5, 0.0) == 1.0
u6 = ArrayPartition(1.0, 1.0)
@test UNITLESS_ABS2(u6) == 2.0
@test recursive_length(u6) == 2
@test ODE_DEFAULT_NORM(u6, 0.0) == 1.0
u7 = ArrayPartition(u1, ones(0))
@test UNITLESS_ABS2(u7) == 3.0
@test recursive_length(u7) == 3
@test ODE_DEFAULT_NORM(u7, 0.0) == 1.0
@test ODE_DEFAULT_NORM(Float64[], 0.0) == 0.0
# https://github.com/SciML/DiffEqBase.jl/issues/1023
u8 = ForwardDiff.Dual{:b}.(ForwardDiff.Dual{:a}.([1.0, 2.0, 3.0], true), true)
u8_ref = 1.2909944487358056
@test ODE_DEFAULT_NORM(u8, 4.0) isa Float64
@test ODE_DEFAULT_NORM(u8, 4.0) ≈ u8_ref
@test ODE_DEFAULT_NORM(u8, ForwardDiff.Dual{:b}(4.0, true)) isa Float64
@test ODE_DEFAULT_NORM(u8, ForwardDiff.Dual{:b}(4.0, true)) ≈ u8_ref
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 945 | using Test, RecursiveArrayTools, StaticArrays, SparseArrays
using DiffEqBase: NAN_CHECK
@test !NAN_CHECK(3.0 + 4.0im)
@test NAN_CHECK(NaN)
u1 = ones(3)
@test !NAN_CHECK(u1)
u1′ = copy(u1)
u1′[2] = NaN
@test NAN_CHECK(u1′)
u2 = [SA[1.0 1.0; 1.0 1.0] for i in 1:3]
@test !NAN_CHECK(u2)
u2′ = copy(u2)
u2′[2] = SA[1.0 NaN; 1.0 1.0]
@test NAN_CHECK(u2′)
u3 = VectorOfArray([ones(5), ones(5)])
@test !NAN_CHECK(u3)
u3′ = recursivecopy(u3)
u3′[3, 2] = NaN
@test NAN_CHECK(u3′)
u4 = ArrayPartition(u1, u2, u3)
@test !NAN_CHECK(u4)
u4_1 = ArrayPartition(u1′, u2, u3)
@test NAN_CHECK(u4_1)
u4_2 = ArrayPartition(u1, u2′, u3)
@test NAN_CHECK(u4_2)
u4_3 = ArrayPartition(u1, u2, u3′)
@test NAN_CHECK(u4_3)
@test !NAN_CHECK(ArrayPartition(u4, u4))
@test NAN_CHECK(ArrayPartition(u4, u4_1))
@test NAN_CHECK(ArrayPartition(u4, u4_2))
@test NAN_CHECK(ArrayPartition(u4, u4_3))
u5 = spzeros(1, 1)
@test !NAN_CHECK(u5)
u5[1, 1] = NaN
@test NAN_CHECK(u5)
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 2063 | using DiffEqBase.InternalEuler, SciMLBase, DiffEqBase, Test
# Here's the problem to solve
struct LorenzFunction <: Function
syms::Vector{Symbol}
end
function (::LorenzFunction)(u, p, t)
[10.0(u[2] - u[1]), u[1] * (28.0 - u[3]) - u[2], u[1] * u[2] - (8 / 3) * u[3]]
end
lorenz = LorenzFunction([:x, :y, :z])
u0 = [1.0, 5.0, 10.0]
tspan = (0.0, 100.0)
prob = ODEProblem(lorenz, u0, tspan)
dt = 0.1
sol = solve(prob, InternalEuler.FwdEulerAlg(), tstops = 0:dt:1)
syms = [:x, :y, :z]
@test SciMLBase.interpret_vars([(0, 1), (1, 3), (4, 5)], sol) == [
(SciMLBase.DEFAULT_PLOT_FUNC, 0, 1),
(SciMLBase.DEFAULT_PLOT_FUNC, 1, 3),
(SciMLBase.DEFAULT_PLOT_FUNC, 4, 5)
]
@test SciMLBase.interpret_vars([1, (1, 3), (4, 5)], sol) == [
(SciMLBase.DEFAULT_PLOT_FUNC, 0, 1),
(SciMLBase.DEFAULT_PLOT_FUNC, 1, 3),
(SciMLBase.DEFAULT_PLOT_FUNC, 4, 5)
]
@test SciMLBase.interpret_vars([1, 3, 4], sol) == [
(SciMLBase.DEFAULT_PLOT_FUNC, 0, 1),
(SciMLBase.DEFAULT_PLOT_FUNC, 0, 3),
(SciMLBase.DEFAULT_PLOT_FUNC, 0, 4)
]
@test SciMLBase.interpret_vars(([1, 2, 3], [4, 5, 6]), sol) == [
(SciMLBase.DEFAULT_PLOT_FUNC, 1, 4),
(SciMLBase.DEFAULT_PLOT_FUNC, 2, 5),
(SciMLBase.DEFAULT_PLOT_FUNC, 3, 6)
]
@test SciMLBase.interpret_vars((1, [2, 3, 4]), sol) == [
(SciMLBase.DEFAULT_PLOT_FUNC, 1, 2),
(SciMLBase.DEFAULT_PLOT_FUNC, 1, 3),
(SciMLBase.DEFAULT_PLOT_FUNC, 1, 4)
]
f(x, y) = (x + y, y)
@test SciMLBase.interpret_vars([(f, 0, 1), (1, 3), (4, 5)], sol) ==
[(f, 0, 1), (SciMLBase.DEFAULT_PLOT_FUNC, 1, 3), (SciMLBase.DEFAULT_PLOT_FUNC, 4, 5)]
@test SciMLBase.interpret_vars([1, (f, 1, 3), (4, 5)], sol) ==
[(SciMLBase.DEFAULT_PLOT_FUNC, 0, 1), (f, 1, 3), (SciMLBase.DEFAULT_PLOT_FUNC, 4, 5)]
@test SciMLBase.interpret_vars([1, (f, 0, 1), (1, 2)], sol) ==
[(SciMLBase.DEFAULT_PLOT_FUNC, 0, 1), (f, 0, 1), (SciMLBase.DEFAULT_PLOT_FUNC, 1, 2)]
@test SciMLBase.interpret_vars([(1, 2)], sol) ==
[(SciMLBase.DEFAULT_PLOT_FUNC, 1, 2)]
@test SciMLBase.interpret_vars((f, 1, 2), sol) == [(f, 1, 2)]
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 3897 | using DiffEqBase, Test
function f_lin(du, u, p, t)
du[1] = 0.2u[1] + p[1] * u[2]
du[2] = 0.2u[1] - p[2] * u[2]
end
p = (0.0, 1.0)
prob = LinearProblem(f_lin, ones(2))
prob = LinearProblem(rand(2, 2), ones(2))
function f_nonlin(du, u, p)
du[1] = 0.2u[1] + p[1] * u[2]
du[2] = 0.2u[1] - p[2] * u[2]
end
p = (0.0, 1.0)
prob = NonlinearProblem(f_nonlin, ones(2), p)
function f_quad(du, u, p)
du[1] = 0.2u[1] + p[1] * u[2]
du[2] = 0.2u[1] - p[2] * u[2]
end
p = (0.0, 1.0)
prob = IntegralProblem(f_quad, (zeros(2), ones(2)), p)
function f(du, u, p, t)
du[1] = 0.2u[1]
du[2] = 0.4u[2]
end
u0 = ones(2)
tspan = (0, 1.0)
prob = ODEProblem(f, u0, tspan)
@test typeof(prob.tspan) == Tuple{Float64, Float64}
prob = ODEProblem{true}(f, u0, tspan)
@test typeof(prob.tspan) == Tuple{Float64, Float64}
prob = ODEProblem(ODEFunction{true}(f), u0, tspan)
@test typeof(prob.tspan) == Tuple{Float64, Float64}
@test isinplace(prob) == true
prob = ODEProblem{false}(f, u0, tspan)
@test isinplace(prob) == false
@inferred ODEProblem{true}(f, u0, tspan)
@test_broken @inferred(ODEProblem(f, u0, tspan)) == ODEProblem(f, u0, tspan)
function f(dv, u, v, p, t)
dv .= 2.0 .* v
end
u0 = ones(2)
v0 = ones(2)
tspan = (0, 1.0)
prob = SecondOrderODEProblem(f, u0, v0, tspan)
prob = SDEProblem((u, p, t) -> 1.01u, (u, p, t) -> 0.87u, 1 / 2, (0.0, 1.0))
function f(du, u, p, t)
du[1] = 0.2u[1]
du[2] = 0.4u[2]
end
function g(du, u, p, t)
du[1] = 0.2u[1]
du[2] = 0.4u[2]
end
u0 = ones(2)
tspan = (0, 1.0)
prob = SDEProblem(f, g, u0, tspan)
prob = SDEProblem{true}(f, g, u0, tspan)
@test_broken @inferred(SDEProblem(f, g, u0, tspan)) == SDEProblem(f, g, u0, tspan)
@inferred SDEProblem{true}(f, g, u0, tspan)
f_1delay = function (du, u, h, p, t)
du[1] = -h(t - 1)[1]
end
prob = DDEProblem(f_1delay, ones(1), t -> zeros(1), (0.0, 10.0), constant_lags = ones(1))
prob = DDEProblem{true}(f_1delay, ones(1), t -> zeros(1), (0.0, 10.0),
dependent_lags = ones(1))
@test_broken @inferred(DDEProblem(f_1delay, ones(1), t -> zeros(1), (0.0, 10.0),
constant_lags = ones(1))) == DDEProblem(f_1delay, ones(1), t -> zeros(1), (0.0, 10.0),
constant_lags = ones(1))
@inferred DDEProblem{true}(f_1delay, ones(1), t -> zeros(1), (0.0, 10.0),
dependent_lags = ones(1))
function f(r, yp, y, p, tres)
r[1] = -0.04 * y[1] + 1.0e4 * y[2] * y[3]
r[2] = -r[1] - 3.0e7 * y[2] * y[2] - yp[2]
r[1] -= yp[1]
r[3] = y[1] + y[2] + y[3] - 1.0
end
u0 = [1.0, 0, 0]
du0 = [-0.04, 0.04, 0.0]
differential_vars = [true, true, false]
prob_dae_resrob = DAEProblem(f, du0, u0, (0.0, 100000.0))
prob_dae_resrob = DAEProblem{true}(f, du0, u0, (0.0, 100000.0))
@test_broken @inferred(DAEProblem(f, du0, u0, (0.0, 100000.0))) ==
DAEProblem(f, du0, u0, (0.0, 100000.0))
@inferred DAEProblem{true}(f, du0, u0, (0.0, 100000.0))
# Ensures uniform dimensionality of u0, du0, and differential_vars
@test_throws ArgumentError DAEProblem(f, du0, u0[1:(end - 1)], (0.0, 100000.0))
@test_throws ArgumentError DAEProblem(f, du0, u0, (0.0, 100000.0);
differential_vars = differential_vars[1:(end - 1)])
f(u, t, W) = 1.01u .+ 0.87u .* W
u0 = 1.00
tspan = (0.0, 1.0)
prob = RODEProblem(f, u0, tspan)
prob = RODEProblem{false}(f, u0, tspan)
@test_broken @inferred(RODEProblem(f, u0, tspan)) == RODEProblem(f, u0, tspan)
@inferred RODEProblem{false}(f, u0, tspan)
DiscreteProblem(ones(1), tspan)
f(t, u) = 0.5
DiscreteProblem{false}(f, ones(1), tspan)
@test_broken @inferred(DiscreteProblem(ones(1), tspan)) == DiscreteProblem(ones(1), tspan)
@inferred DiscreteProblem{false}(f, ones(1), tspan)
function f(du, u, t)
du[1] = 2 - 2u[1]
du[2] = u[1] - 4u[2]
end
u0 = zeros(2)
prob = SteadyStateProblem(f, u0)
@test_broken @inferred(SteadyStateProblem(f, u0)) == SteadyStateProblem(f, u0)
@test SteadyStateProblem(ODEProblem(f, u0, tspan, :param)).p == :param
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 3280 | using DiffEqBase
using SciMLBase: @add_kwonly, add_kwonly
using LinearAlgebra, Test
@add_kwonly function f(a, b; c = 3, d = 4)
(a, b, c, d)
end
@test f(1, 2) == (1, 2, 3, 4)
@test f(a = 1, b = 2) == (1, 2, 3, 4)
@test_throws ErrorException f()
@add_kwonly g(a, b; c = 3, d = 4) = (a, b, c, d)
@test g(1, 2) == (1, 2, 3, 4)
@test g(a = 1, b = 2) == (1, 2, 3, 4)
@add_kwonly h(; c = 3, d = 4) = (c, d)
@test h() == (3, 4)
@test_throws ErrorException add_kwonly(:(i(c = 3, d = 4) = (c, d)))
dprob = DiscreteProblem((u, p, t) -> 2u, 0.5, (0.0, 1.0))
@test remake(dprob) == dprob
@test remake(dprob; u0 = 1.0).u0 == 1.0
oprob = ODEProblem((u, p, t) -> 2u, 0.5, (0.0, 1.0))
@test_broken remake(oprob) == oprob # fails due to change to mutable struct due to === fallback
@test remake(oprob; u0 = 1.0).u0 == 1.0
sprob = SDEProblem((u, p, t) -> 2u, (u, p, t) -> 2u, 0.5, (0.0, 1.0))
@test remake(sprob) == sprob
@test remake(sprob; u0 = 1.0).u0 == 1.0
daeprob = DAEProblem((du, u, p, t) -> du - 2u, 0.5, 0.5, (0.0, 1.0))
@test remake(daeprob) == daeprob
@test remake(daeprob; u0 = 1.0).u0 == 1.0
ddeprob = DDEProblem((du, u, h, p, t) -> -2u, 0.5, (p, t) -> 0.0, (0.0, 1.0))
@test remake(ddeprob) == ddeprob
@test remake(daeprob; u0 = 1.0).u0 == 1.0
function f(du, u, p, t)
du[1] = 0.2u[1]
du[2] = 0.4u[2]
end
u0 = ones(2)
tspan = (0, 1.0)
# Create a ODEProblem and test remake:
prob1 = SplitODEProblem(f, f, u0, tspan, Dict(), callback = nothing)
prob2 = @inferred remake(prob1; u0 = prob1.u0 .+ 1)
@test prob1.f === prob2.f
@test prob1.p === prob2.p
@test prob1.u0 .+ 1 ≈ prob2.u0
@test prob1.tspan == prob2.tspan
@test prob1.kwargs[:callback] === prob2.kwargs[:callback]
@test prob1.problem_type === prob2.problem_type
prob2 = @inferred remake(prob1; u0 = prob1.u0 .+ 1, callback = :test)
@test prob2.kwargs[:callback] == :test
# Test remake with SplitFunction:
prob1 = SplitODEProblem((u, p, t) -> u / 2, (u, p, t) -> 2u, 1.0, (0.0, 1.0))
prob2 = remake(prob1; # prob1 is a ODEProblem
f = remake(prob1.f; # prob1.f is a SplitFunction
f2 = (u, p, t) -> 3u))
# Test remake with NoiseProblem (a struct w/o isinplace type parameter):
struct DummyNoiseProcess <: SciMLBase.AbstractNoiseProcess{Int, 1, Nothing, true}
dummy::Any
end
tspan1 = (0.0, 1.0)
tspan2 = (0.0, 2.0)
noise1 = NoiseProblem(DummyNoiseProcess(Dict()), tspan1);
noise2 = remake(noise1; tspan = tspan2);
@test noise1.noise === noise2.noise
@test noise1.tspan == tspan1
@test noise2.tspan == tspan2
@test noise1.tspan != noise2.tspan
# Test remake with TwoPointBVPFunction (manually defined):
f1 = SciMLBase.TwoPointBVPFunction((u, p, t) -> 1, ((u_a, p) -> 2, (u_b, p) -> 2))
@test_broken f2 = remake(f1; bc = ((u_a, p) -> 3, (u_b, p) -> 4))
@test_broken f1.bc() == 1
@test_broken f2.bc() == 2
# Testing remake for no recompile
u0 = [0; 2.0]
tspan = (0.0, 6.3)
prob = ODEProblem{true, SciMLBase.FunctionWrapperSpecialize}((du, u, p, t) -> 2u, u0, tspan)
prob2 = remake(prob; u0 = [1; 2])
@test prob2.u0 == [1; 2]
@test prob2.f.f isa SciMLBase.FunctionWrappersWrappers.FunctionWrappersWrapper
prob2 = remake(prob; p = (1, 2))
@test remake(prob; p = (1, 2)).p == (1, 2)
@test prob2.f.f isa SciMLBase.FunctionWrappersWrappers.FunctionWrappersWrapper
SciMLBase.unwrapped_f(prob2.f)
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 4372 | using Pkg
using SafeTestsets
using Test
const GROUP = get(ENV, "GROUP", "All")
const is_APPVEYOR = (Sys.iswindows() && haskey(ENV, "APPVEYOR"))
function activate_downstream_env()
Pkg.activate("downstream")
Pkg.develop(PackageSpec(path = dirname(@__DIR__)))
Pkg.instantiate()
end
function activate_gpu_env()
Pkg.activate("gpu")
Pkg.develop(PackageSpec(path = dirname(@__DIR__)))
Pkg.instantiate()
end
@time begin
if GROUP == "All" || GROUP == "Core"
@time @safetestset "Fast Power" include("fastpow.jl")
@time @safetestset "Callbacks" include("callbacks.jl")
@time @safetestset "Internal Rootfinders" include("internal_rootfinder.jl")
@time @safetestset "Plot Vars" include("plot_vars.jl")
@time @safetestset "Problem Creation Tests" include("problem_creation_tests.jl")
@time @safetestset "Affine differential equation operators" include("affine_operators_tests.jl")
@time @safetestset "Export tests" include("export_tests.jl")
@time @safetestset "Remake tests" include("remake_tests.jl")
@time @safetestset "High Level solve Interface" include("high_level_solve.jl")
@time @safetestset "DiffEqFunction tests" include("diffeqfunction_tests.jl")
@time @safetestset "Internal Euler" include("internal_euler_test.jl")
@time @safetestset "Basic Operators Interface" include("basic_operators_interface.jl")
@time @safetestset "Norm" include("norm.jl")
@time @safetestset "Utils" include("utils.jl")
@time @safetestset "ForwardDiff Dual Detection" include("forwarddiff_dual_detection.jl")
@time @safetestset "ODE default norm" include("ode_default_norm.jl")
@time @safetestset "ODE default unstable check" include("ode_default_unstable_check.jl")
@time @safetestset "Termination Conditions" include("termination_conditions.jl")
end
if !is_APPVEYOR && GROUP == "Downstream"
activate_downstream_env()
@time @safetestset "Kwarg Warnings" include("downstream/kwarg_warn.jl")
@time @safetestset "Solve Error Handling" include("downstream/solve_error_handling.jl")
@time @safetestset "Null DE Handling" include("downstream/null_de.jl")
@time @safetestset "StaticArrays + AD" include("downstream/static_arrays_ad.jl")
@time @safetestset "Unitful" include("downstream/unitful.jl")
@time @safetestset "Dual Detection Solution" include("downstream/dual_detection_solution.jl")
@time @safetestset "Null Parameters" include("downstream/null_params_test.jl")
@time @safetestset "Ensemble Simulations" include("downstream/ensemble.jl")
@time @safetestset "Ensemble Analysis" include("downstream/ensemble_analysis.jl")
@time @safetestset "Ensemble Thread Safety" include("downstream/ensemble_thread_safety.jl")
@time @safetestset "Inference Tests" include("downstream/inference.jl")
@time @safetestset "Table Inference Tests" include("downstream/tables.jl")
@time @safetestset "Default linsolve with structure" include("downstream/default_linsolve_structure.jl")
@time @safetestset "Callback Merging Tests" include("downstream/callback_merging.jl")
@time @safetestset "LabelledArrays Tests" include("downstream/labelledarrays.jl")
end
if !is_APPVEYOR && GROUP == "Downstream2"
activate_downstream_env()
@time @safetestset "Prob Kwargs" include("downstream/prob_kwargs.jl")
@time @safetestset "Unwrapping" include("downstream/unwrapping.jl")
@time @safetestset "Callback BigFloats" include("downstream/bigfloat_events.jl")
@time @safetestset "DE stats" include("downstream/stats_tests.jl")
@time @safetestset "Ensemble AD Tests" include("downstream/ensemble_ad.jl")
@time @safetestset "Community Callback Tests" include("downstream/community_callback_tests.jl")
@time @safetestset "AD via ode with complex numbers" include("downstream/complex_number_ad.jl")
@time @testset "Distributed Ensemble Tests" include("downstream/distributed_ensemble.jl")
end
if !is_APPVEYOR && GROUP == "GPU"
activate_gpu_env()
@time @safetestset "Simple GPU" include("gpu/simple_gpu.jl")
@time @safetestset "GPU Termination Conditions" include("gpu/termination_conditions.jl")
end
end
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 801 | using BenchmarkTools, DiffEqBase, LinearAlgebra, Test
du = rand(4)
u = rand(4)
uprev = rand(4)
const TERMINATION_CONDITIONS = [
RelTerminationMode(), NormTerminationMode(), RelNormTerminationMode(),
AbsTerminationMode(), AbsNormTerminationMode(), RelSafeTerminationMode(),
AbsSafeTerminationMode(), RelSafeBestTerminationMode(), AbsSafeBestTerminationMode()
]
@testset "Termination Conditions: Allocations" begin
@testset "Mode: $(tcond)" for tcond in TERMINATION_CONDITIONS
for nfn in (Base.Fix1(maximum, abs), Base.Fix2(norm, 2), Base.Fix2(norm, Inf))
tcond = DiffEqBase.set_termination_mode_internalnorm(tcond, nfn)
@test (@ballocated DiffEqBase.check_convergence($tcond, $du, $u, $uprev, 1e-3,
1e-3)) == 0
end
end
end
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 2019 | using Test
using DiffEqBase, ForwardDiff
using DiffEqBase: prob2dtmin, timedepentdtmin, _rate_prototype
using Unitful
using ForwardDiff: Dual, Tag
@testset "tspan2dtmin" begin
# we only need to test very rough equality since timestepping isn't science.
function approxoftype(a, b; rtol = 0.5)
return typeof(a) === typeof(b) && isapprox(a, b; rtol = rtol)
end
function tspan2dtmin(tspan; kwargs...)
prob2dtmin(ODEProblem((u, p, t) -> u, 1, tspan); kwargs...)
end
@test approxoftype(tspan2dtmin((10, 100.0)), eps(100.0))
@test approxoftype(tspan2dtmin((-10000.0, 100.0)), eps(10000.0))
@test tspan2dtmin((1, 2)) === 0
@test approxoftype(tspan2dtmin((1 // 10, 2 // 10)), 1 // 2^33)
@test approxoftype(tspan2dtmin((2 // 10, Inf)), eps(1.0))
@test approxoftype(tspan2dtmin((2 // 1, Inf)), eps(2.0))
@test approxoftype(tspan2dtmin((0, Inf)), eps(1.0))
@test approxoftype(tspan2dtmin((0.0, Inf)), eps(1.0))
@test approxoftype(tspan2dtmin((0.0, 1e-6)), eps(1e-6))
@test approxoftype(tspan2dtmin((1e6, 1e6 + 1)), eps(1e6))
@test_throws ArgumentError tspan2dtmin((Inf, 100.0))
@test approxoftype(tspan2dtmin((0.0f0, 1.0f5); use_end_time = false), eps(1.0f0))
@test approxoftype(timedepentdtmin(10.0f0, eps(1.0f0)), eps(10.0f0))
@test approxoftype(timedepentdtmin(10, eps(1.0f0)), eps(1.0f0))
end
@testset "prob2dtmin" begin
@test prob2dtmin((0.0, 10.0), 1.0, false) == eps(Float64)
@test prob2dtmin((0.0f0, 10.0f0), 1.0f0, false) == eps(Float32)
@test prob2dtmin((0.0, 10.0), ForwardDiff.Dual(1.0), false) == eps(Float64)
end
@testset "_rate_prototype" begin
@test _rate_prototype([1.0f0], 1.0, 1.0) isa Vector{Float32}
td = Dual{Tag{typeof(+), Float64}}(2.0, 1.0)
@test _rate_prototype([1.0f0], td, td) isa Vector{Float32}
xd = [Dual{Tag{typeof(+), Float32}}(2.0, 1.0)]
@test _rate_prototype(xd, 1.0, 1.0) isa typeof(xd)
@test _rate_prototype([u"1f0m"], u"1.0s", 1.0) isa typeof([u"1f0m/s"])
end
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 585 | using OrdinaryDiffEq
function lorenz!(du, u, p, t)
du[1] = 10.0 * (u[2] - u[1])
du[2] = u[1] * (28.0 - u[3]) - u[2]
du[3] = u[1] * u[2] - (8 / 3) * u[3]
end
u0 = BigFloat[1.0; 0.0; 0.0]
tspan = (big(0.0), big(100.0))
prob = ODEProblem(lorenz!, u0, tspan)
sol = solve(prob, Tsit5(), save_everystep = false)
x = sol.u[end]
import LinearAlgebra.norm
function condition(u, t, integrator)
norm(u - x) - 0.1
end
affect!(integrator) = terminate!(integrator)
cb = ContinuousCallback(condition, affect!)
sol2 = solve(prob, Tsit5(), save_everystep = false, callback = cb)
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 283 | using OrdinaryDiffEq
# Auto callback merging
do_nothing = DiscreteCallback((u, t, integrator) -> true,
integrator -> nothing)
problem = ODEProblem((u, p, t) -> -u,
1.0, (0.0, 1.0),
callback = do_nothing)
solve(problem, Euler(),
dt = 0.1,
callback = do_nothing)
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 6485 | using OrdinaryDiffEq, DiffEqCallbacks, LinearAlgebra
# https://github.com/SciML/DiffEqBase.jl/issues/564 : Fixed
gravity = 9.8
stiffness = 500
equilibrium_length = 1
T = 5.0
f(u, p, t) = begin
x1, x2, dx1, dx2 = u
length = abs(x2 - x1)
spring_force = stiffness * (equilibrium_length - length)
ddx1 = -gravity - spring_force
ddx2 = -gravity + spring_force
if x1 <= 0
ddx1 = max(0, ddx1)
end
if x2 <= 0
ddx2 = max(0, ddx2)
end
[dx1, dx2, ddx1, ddx2]
end
sol = solve(ODEProblem(f, [5.0, 6.0, 0.0, 0.0], (0.0, T)),
# Euler(),
# dt=0.005,
Rosenbrock23(),
callback = ContinuousCallback((u, _, _) -> u[1],
(integrator) -> (integrator.u[1] = 0; integrator.u[3] = 0)),
# callback = ContinuousCallback((u, _, _) -> u[1], (integrator) -> (integrator.u[3] = 0)),
reltol = 1e-3,
abstol = 1e-3)
@show sol.stats
# https://github.com/SciML/DiffEqBase.jl/issues/553 : Floating point issue is resolved but some other error occurs
function model(du, u, p, t)
du[1] = 0.0
for i in 2:(length(du) - 1)
du[i] = p[i] * (u[i - 1] - u[i])
end
du[end] = p[end] * (p[1] * u[end - 1] - u[end])
return nothing
end
perror = [
1.0,
0.02222434508140991,
0.017030281542289794,
0.015917011145559996,
0.1608874463597176,
0.13128016561792297,
0.11056834258380167,
0.5222141958458832,
1.0711942201995688,
0.2672878398678257,
8.900058706990183,
0.010760065201065117,
0.016319181296867765,
2.2693845639611925,
0.2152216345154439,
0.029186712540925457,
0.21419429135100806,
0.029177617589788596,
0.03064986043089549,
0.023280222517122397,
6.931251277770224
]
y_max = 0.002604806609572015
u0 = [1, zeros(length(perror) - 1)...]
tspan = (0.0, 5000.0)
condition(u, t, i) = (t == 1.0)
affect!(i) = (i.u[1] = 0.0)
condition2(u, t, i) = u[end] - y_max / 2.0
t_half_1 = 0.0
affect2!(i) = (t_half_1 = i.t)
prob = ODEProblem(model, u0, tspan, perror)
sol = solve(prob,
Rosenbrock23();
callback = CallbackSet(PositiveDomain(),
DiscreteCallback(condition, affect!),
ContinuousCallback(condition2, affect2!, terminate!)),
tstops = [1.0],
force_dtmin = true)
# https://github.com/SciML/DiffEqBase.jl/issues/515 : Fixed
using StaticArrays
using MultiScaleArrays
t_last = 0.0
function attactor(du, u, p, t)
α, β = p
n = length(u.nodes)
return for k in 1:n
du.nodes[k] = zero(du.nodes[k])
for j in 1:n
if (k == j)
du.nodes[k] .+= [
u.nodes[k][3],
u.nodes[k][4],
-β * u.nodes[k][3],
-β * u.nodes[k][4]
]
else
du.nodes[k][3:4] .+= α * (u.nodes[j][1:2] - u.nodes[k][1:2])
end
end
end
end
struct Thingy{B} <: AbstractMultiScaleArrayLeaf{B}
values::Vector{B}
end
struct PhysicsLaw{T <: AbstractMultiScaleArray, B <: Number} <:
AbstractMultiScaleArrayHead{B}
nodes::Vector{T}
values::Vector{B}
end_idxs::Vector{Int}
end
Newton = construct(PhysicsLaw,
[
Thingy([-700.0, -350.0, 0.0, 0.0]),
Thingy([-550.0, -150.0, 0.0, 0.0]),
Thingy([-600.0, 15.0, 0.0, 10.0]),
Thingy([200.0, -200.0, 5.0, -5.0])
])
parameters = [1e-2, 0.06]
function condition(out, u, t, integrator)
i = 0
n = length(u.nodes)
for k in 1:n
for l in (k + 1):n
i += 1
out[i] = sum(abs2, u.nodes[k][1:2] .- u.nodes[l][1:2]) - 10000
end
end
end
function affect!(integrator, idx)
i = 0
u = integrator.u
n = length(u.nodes)
return for k in 1:n
for l in (k + 1):n
i += 1
if idx == i
x₁ = u.nodes[k][1:2]
v₁ = u.nodes[k][3:4]
x₂ = u.nodes[l][1:2]
v₂ = u.nodes[l][3:4]
# https://stackoverflow.com/a/35212639
v₁ = (v₁ -
2 / (1 + 1) *
(dot(v₁ - v₂, x₁ - x₂) / sum(abs2, x₁ - x₂) * (x₁ - x₂)))
v₂ = -(v₂ -
2 / (1 + 1) *
(dot(v₂ - v₁, x₂ - x₁) / sum(abs2, x₂ - x₁) * (x₂ - x₁)))
println("Collision handled.")
m = (x₁ + x₂) / 2
u.nodes[k][3:4] .= v₁
u.nodes[l][3:4] .= v₂
set_u!(integrator, u)
println(sqrt(sum(abs2, x₁ .- x₂)) - 100, ":", v₁ ./ v₂)
println(norm(v₁), ":", norm(v₂), ":", integrator.t, ":",
integrator.t - t_last)
global t_last = integrator.t
break
end
end
end
end
cback = VectorContinuousCallback(condition,
affect!,
(x -> Int(((x - 1) * x) / 2))(length(Newton.nodes)))
problemp = ODEProblem(attactor, Newton, (0.0, Inf), parameters)
world = init(problemp, AutoTsit5(Rosenbrock23()); save_everystep = false, callback = cback)
dt = 0.2
for i in 1:1000
step!(world, dt)
end
## https://github.com/SciML/OrdinaryDiffEq.jl/issues/1528
function f!(out, u, p, t)
out[1] = 0
out[2] = u[3]
out[3] = -1.0 * (u[2] - u[1])
end
u0 = [0, 0, 1.0]
function cond!(out, u, t, i)
out[1] = u[3]
nothing
end
function affect!(int, idx)
terminate!(int)
end
cb = VectorContinuousCallback(cond!, affect!, nothing, 1)
u0 = [0.0, 0.0, 1.0]
prob = ODEProblem(f!, u0, (0.0, 10.0); callback = cb)
soln = solve(prob, Tsit5())
@test soln.t[end] ≈ 4.712347213360699
odefun = ODEFunction((u, p, t) -> [u[2], u[2] - p]; mass_matrix = [1 0; 0 0])
callback = PresetTimeCallback(0.5, integ -> (integ.p = -integ.p))
prob = ODEProblem(odefun, [0.0, -1.0], (0.0, 1), 1; callback)
#test that reinit happens for both FSAL and non FSAL integrators
@testset "dae re-init" for alg in [FBDF(), Rodas5P()]
sol = solve(prob, alg)
# test that the callback flipping p caused u[2] to get flipped.
first_t = findfirst(isequal(0.5), sol.t)
@test sol.u[first_t][2] == -sol.u[first_t + 1][2]
end
daefun = DAEFunction((du, u, p, t) -> [du[1] - u[2], u[2] - p])
prob = DAEProblem(daefun, [0.0, 0.0], [0.0, -1.0], (0.0, 1), 1;
differential_vars = [true, false], callback)
sol = solve(prob, DFBDF())
# test that the callback flipping p caused u[2] to get flipped.
first_t = findfirst(isequal(0.5), sol.t)
@test sol.u[first_t][2] == -sol.u[first_t + 1][2]
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
|
[
"MIT"
] | 6.157.0 | 8977ef8249b602e4cb46ddbaf3c51e6adc2958c7 | code | 2248 | using LinearAlgebra, OrdinaryDiffEq, Test
import ForwardDiff
# setup
pd = 3
## ode with complex numbers
H0 = rand(ComplexF64, pd, pd)
A = rand(ComplexF64, pd, pd)
function f!(du, u, p, t)
a, b, c = p
du .= (A * u) * (a * cos(b * t + c))
du .+= H0 * u
return nothing
end
## time span
tspan = (0.0, 1.0)
## initial state
u0 = hcat(normalize(rand(ComplexF64, pd)), normalize(rand(pd)))
## ode problem
prob0 = ODEProblem(f!, u0, tspan, rand(3); saveat = range(tspan..., length = 3),
reltol = 1e-6,
alg = Tsit5())
## final state cost
cost(u) = abs2(tr(first(u)'u[2])) - abs2(tr(first(u)'last(u)))
## real loss function via complex ode
function loss(p)
prob = remake(prob0; p)
sol = solve(prob)
cost(sol.u) + sum(p) / 10
end
## same problem via reals
### realify complex ode problem
function real_f(du, u, p, t)
complex_u = complex.(selectdim(u, 3, 1), selectdim(u, 3, 2))
complex_du = copy(complex_u)
prob0.f(complex_du, complex_u, p, t)
selectdim(du, 3, 1) .= real(complex_du)
selectdim(du, 3, 2) .= imag(complex_du)
return nothing
end
prob0_real = remake(prob0; f = real_f, u0 = cat(real(prob0.u0), imag(prob0.u0); dims = 3))
### real loss function via real ode
function loss_via_real(p)
prob = remake(prob0_real; p)
sol = solve(prob)
u = [complex.(selectdim(u, 3, 1), selectdim(u, 3, 2)) for u in sol.u]
cost(u) + sum(p) / 10
end
# assert
@assert eltype(last(solve(prob0).u)) <: Complex
@assert eltype(last(solve(prob0_real).u)) <: Real
function assert_fun()
p0 = rand(3)
isapprox(loss(p0), loss_via_real(p0); rtol = 1e-4)
end
@assert all([assert_fun() for _ in 1:(2^6)])
# test ad with ForwardDiff
function test_ad()
p0 = rand(3)
grad_real = ForwardDiff.gradient(loss_via_real, p0)
grad_complex = ForwardDiff.gradient(loss, p0)
any(isnan.(grad_complex)) &&
@warn "NaN detected in gradient using ode with complex numbers !!"
any(isnan.(grad_real)) && @warn "NaN detected in gradient using realified ode !!"
rel_err = norm(grad_complex - grad_real) / max(norm(grad_complex), norm(grad_real))
isapprox(grad_complex, grad_real; rtol = 1e-6) ? true : (@show rel_err; false)
end
@time @test all([test_ad() for _ in 1:(2^6)])
| DiffEqBase | https://github.com/SciML/DiffEqBase.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.