licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 4.26.1 | e3fffd09185c6eb69f66b9ed29af0240b0dd0adc | code | 3503 |
"""
optsumj(os::OptSummary, j::Integer)
Return an `OptSummary` with the `j`'th component of the parameter omitted.
`os.final` with its j'th component omitted is used as the initial parameter.
"""
function optsumj(os::OptSummary, j::Integer)
return OptSummary(
deleteat!(copy(os.final), j),
deleteat!(copy(os.lowerbd), j),
os.optimizer,
)
end
function profileobj!(
m::LinearMixedModel{T}, θ::AbstractVector{T}, opt::Opt, osj::OptSummary
) where {T}
isone(length(θ)) && return objective!(m, θ)
fmin, xmin, ret = NLopt.optimize(opt, copyto!(osj.final, osj.initial))
_check_nlopt_return(ret)
return fmin
end
function profileθj!(
val::NamedTuple, sym::Symbol, tc::TableColumns{T}; threshold=4
) where {T}
(; m, fwd, rev) = val
optsum = m.optsum
(; final, fmin, lowerbd) = optsum
j = parsej(sym)
θ = copy(final)
lbj = lowerbd[j]
osj = optsum
opt = Opt(osj)
if length(θ) > 1 # set up the conditional optimization problem
notj = deleteat!(collect(axes(final, 1)), j)
osj = optsumj(optsum, j)
opt = Opt(osj) # create an NLopt optimizer object for the reduced problem
function obj(x, g)
isempty(g) ||
throw(ArgumentError("gradients are not evaluated by this objective"))
for i in eachindex(notj, x)
@inbounds θ[notj[i]] = x[i]
end
return objective!(m, θ)
end
NLopt.min_objective!(opt, obj)
end
pnm = (; p=sym)
ζold = zero(T)
tbl = [merge(pnm, mkrow!(tc, m, ζold))] # start with the row for ζ = 0
δj = inv(T(64))
θj = final[j]
θ[j] = θj - δj
while (abs(ζold) < threshold) && θ[j] ≥ lbj && length(tbl) < 100 # decreasing values of θ[j]
ζ = sign(θ[j] - θj) * sqrt(profileobj!(m, θ, opt, osj) - fmin)
push!(tbl, merge(pnm, mkrow!(tc, m, ζ)))
θ[j] == lbj && break
δj /= (4 * abs(ζ - ζold)) # take smaller steps when evaluating negative zeta
ζold = ζ
θ[j] = max(lbj, (θ[j] -= δj))
end
reverse!(tbl) # reorder the new part of the table by increasing ζ
sv = getproperty(sym).(tbl)
δj = if length(sv) > 3 # need to handle the case of convergence on the boundary
slope = (
Derivative(1) *
interpolate(sv, getproperty(:ζ).(tbl), BSplineOrder(4), Natural())
)(
last(sv)
)
δj = inv(T(2) * slope) # approximate step for increase of 0.5
else
inv(T(32))
end
ζold = zero(T)
copyto!(θ, final)
θ[j] += δj
while (ζold < threshold) && (length(tbl) < 120)
fval = profileobj!(m, θ, opt, osj)
if fval < fmin
@warn "Negative difference ", fval - fmin, " for ", sym, " at ", θ[j]
ζ = zero(T)
else
ζ = sqrt(profileobj!(m, θ, opt, osj) - fmin)
end
push!(tbl, merge(pnm, mkrow!(tc, m, ζ)))
δj /= (2 * abs(ζ - ζold))
ζold = ζ
θ[j] += δj
end
append!(val.tbl, tbl)
updateL!(setθ!(m, final))
sv = getproperty(sym).(tbl)
ζv = getproperty(:ζ).(tbl)
fwd[sym] = interpolate(sv, ζv, BSplineOrder(4), Natural())
isnondecreasing(fwd[sym]) || @warn "Forward spline for $sym is not monotone."
rev[sym] = interpolate(ζv, sv, BSplineOrder(4), Natural())
isnondecreasing(rev[sym]) || @warn "Reverse spline for $sym is not monotone."
return val
end
| MixedModels | https://github.com/JuliaStats/MixedModels.jl.git |
|
[
"MIT"
] | 4.26.1 | e3fffd09185c6eb69f66b9ed29af0240b0dd0adc | code | 5459 | """
TableColumns
A structure containing the column names for the numeric part of the profile table.
The struct also contains a Dict giving the column ranges for Symbols like `:σ` and `:β`.
Finally it contains a scratch vector used to accumulate to values in a row of the profile table.
!!! note
This is an internal structure used in [`MixedModelProfile`](@ref).
As such, it may change or disappear in a future release without being considered breaking.
"""
struct TableColumns{T<:AbstractFloat,N}
cnames::NTuple{N,Symbol}
positions::Dict{Symbol,UnitRange{Int}}
v::Vector{T}
corrpos::Vector{NTuple{3,Int}}
end
"""
_generatesyms(tag::Char, len::Integer)
Utility to generate a vector of Symbols of the form :<tag><index> from a tag and a length.
The indices are left-padded with zeros to allow lexicographic sorting.
"""
function _generatesyms(tag::AbstractString, len::Integer)
return Symbol.(string.(tag, lpad.(Base.OneTo(len), ndigits(len), '0')))
end
_generatesyms(tag::Char, len::Integer) = _generatesyms(string(tag), len)
function TableColumns(m::LinearMixedModel{T}) where {T}
nmvec = [:ζ]
positions = Dict(:ζ => 1:1)
lastpos = 1
sz = m.feterm.rank
append!(nmvec, _generatesyms('β', sz))
positions[:β] = (lastpos + 1):(lastpos + sz)
lastpos += sz
push!(nmvec, :σ)
lastpos += 1
positions[:σ] = lastpos:lastpos
sz = sum(t -> size(t.λ, 1), m.reterms)
append!(nmvec, _generatesyms('σ', sz))
positions[:σs] = (lastpos + 1):(lastpos + sz)
lastpos += sz
corrpos = NTuple{3,Int}[]
for (i, re) in enumerate(m.reterms)
(isa(re.λ, Diagonal) || isa(re, ReMat{T,1})) && continue
indm = indmat(re)
for j in axes(indm, 1)
rowj = view(indm, j, :)
for k in (j + 1):size(indm, 1)
if !iszero(dot(rowj, view(indm, k, :)))
push!(corrpos, (i, j, k))
end
end
end
end
sz = length(corrpos)
if sz > 0
append!(nmvec, _generatesyms('ρ', sz))
positions[:ρs] = (lastpos + 1):(lastpos + sz)
lastpos += sz
end
sz = length(m.θ)
append!(nmvec, _generatesyms('θ', sz))
positions[:θ] = (lastpos + 1):(lastpos + sz)
return TableColumns((nmvec...,), positions, zeros(T, length(nmvec)), corrpos)
end
function mkrow!(tc::TableColumns{T,N}, m::LinearMixedModel{T}, ζ::T) where {T,N}
(; cnames, positions, v, corrpos) = tc
v[1] = ζ
fixef!(view(v, positions[:β]), m)
v[first(positions[:σ])] = m.σ
σvals!(view(v, positions[:σs]), m)
getθ!(view(v, positions[:θ]), m) # must do this first to preserve a copy
if length(corrpos) > 0
ρvals!(view(v, positions[:ρs]), corrpos, m)
setθ!(m, view(v, positions[:θ]))
end
return NamedTuple{cnames,NTuple{N,T}}((v...,))
end
"""
parsej(sym::Symbol)
Return the index from symbol names like `:θ1`, `:θ01`, etc.
!!! note
This method is internal.
"""
function parsej(sym::Symbol)
symstr = string(sym) # convert Symbol to a String
return parse(Int, SubString(symstr, nextind(symstr, 1))) # drop first Unicode character and parse as Int
end
#= # It appears that this method is not used
"""
σvals(m::LinearMixedModel)
Return a Tuple of the standard deviation estimates of the random effects
"""
function σvals(m::LinearMixedModel{T}) where {T}
(; σ, reterms) = m
isone(length(reterms)) && return σvals(only(reterms), σ)
return (collect(Iterators.flatten(σvals.(reterms, σ)))...,)
end
=#
function σvals!(v::AbstractVector{T}, m::LinearMixedModel{T}) where {T}
(; σ, reterms) = m
isone(length(reterms)) && return σvals!(v, only(reterms), σ)
ind = firstindex(v)
for t in m.reterms
S = size(t.λ, 1)
σvals!(view(v, ind:(ind + S - 1)), t, σ)
ind += S
end
return v
end
function ρvals!(
v::AbstractVector{T}, corrpos::Vector{NTuple{3,Int}}, m::LinearMixedModel{T}
) where {T}
reterms = m.reterms
lasti = 1
λ = first(reterms).λ
for r in eachrow(λ)
normalize!(r)
end
for (ii, pos) in enumerate(corrpos)
i, j, k = pos
if lasti ≠ i
λ = reterms[i].λ
for r in eachrow(λ)
normalize!(r)
end
lasti = i
end
v[ii] = dot(view(λ, j, :), view(λ, k, :))
end
return v
end
"""
_copy_away_from_lowerbd!(sink, source, bd; incr=0.01)
Replace `sink[i]` by `max(source[i], bd[i] + incr)`. When `bd[i] == -Inf` this simply copies `source[i]`.
"""
function _copy_away_from_lowerbd!(sink, source, bd; incr=0.01)
for i in eachindex(sink, source, bd)
@inbounds sink[i] = max(source[i], bd[i] + incr)
end
return sink
end
#= # It appears that this method is not used
"""
stepsize(tbl::Vector{NamedTuple}, resp::Symbol, pred::Symbol; rev::Bool=false)
Return the stepsize from the last value of `tbl.pred` to increase `resp` by approximately 0.5
"""
function stepsize(tbl::Vector{<:NamedTuple}, resp::Symbol, pred::Symbol)
ntbl = length(tbl)
lm1tbl = tbl[ntbl - 1]
x1 = getproperty(lm1tbl, pred)
y1 = getproperty(lm1tbl, resp)
x2 = getproperty(last(tbl), pred)
y2 = getproperty(last(tbl), resp)
return (x2 - x1) / (2 * (y2 - y1))
end
=#
function isnondecreasing(spl::SplineInterpolation)
return all(≥(0), (Derivative(1) * spl).(spl.x))
end
| MixedModels | https://github.com/JuliaStats/MixedModels.jl.git |
|
[
"MIT"
] | 4.26.1 | e3fffd09185c6eb69f66b9ed29af0240b0dd0adc | code | 3680 |
"""
profilevc(m::LinearMixedModel{T}, val::T, rowj::AbstractVector{T}) where {T}
Profile an element of the variance components.
!!! note
This method is called by `profile` and currently considered internal.
As such, it may change or disappear in a future release without being considered breaking.
"""
function profilevc(m::LinearMixedModel{T}, val::T, rowj::AbstractVector{T}) where {T}
optsum = m.optsum
function obj(x, g)
isempty(g) || throw(ArgumentError("g must be empty"))
updateL!(setθ!(m, x))
optsum.sigma = val / norm(rowj)
objctv = objective(m)
return objctv
end
opt = Opt(optsum)
NLopt.min_objective!(opt, obj)
fmin, xmin, ret = NLopt.optimize!(opt, copyto!(optsum.final, optsum.initial))
_check_nlopt_return(ret)
return fmin, xmin
end
"""
profileσs!(val::NamedTuple, tc::TableColumns{T}; nzlb=1.0e-8) where {T}
Profile the variance components.
!!! note
This method is called by `profile` and currently considered internal.
As such, it may change or disappear in a future release without being considered breaking.
"""
function profileσs!(val::NamedTuple, tc::TableColumns{T}; nzlb=1.0e-8) where {T}
m = val.m
(; λ, σ, β, optsum, parmap, reterms) = m
isnothing(optsum.sigma) || throw(ArgumentError("Can't profile vc's when σ is fixed"))
(; initial, final, fmin, lowerbd) = optsum
lowerbd .+= T(nzlb) # lower bounds must be > 0 b/c θ's occur in denominators
saveinitial = copy(initial)
copyto!(initial, max.(final, lowerbd))
zetazero = mkrow!(tc, m, zero(T)) # parameter estimates
vcnms = filter(keys(first(val.tbl))) do sym
str = string(sym)
return startswith(str, 'σ') && (length(str) > 1)
end
ind = 0
for t in reterms
for r in eachrow(t.λ)
optsum.sigma = nothing # re-initialize the model
objective!(m, final)
ind += 1
sym = vcnms[ind]
gpsym = getproperty(sym) # extractor function
estimate = gpsym(zetazero)
pnm = (; p=sym)
tbl = [merge(pnm, zetazero)]
xtrms = extrema(gpsym, val.tbl)
lub = log(last(xtrms))
llb = log(max(first(xtrms), T(0.01) * lub))
for lx in LinRange(lub, llb, 15) # start at the upper bound where things are more stable
x = exp(lx)
obj, xmin = profilevc(m, x, r)
copyto!(initial, xmin)
zeta = sign(x - estimate) * sqrt(max(zero(T), obj - fmin))
push!(tbl, merge(pnm, mkrow!(tc, m, zeta)))
end
if iszero(first(xtrms)) && !iszero(estimate) # handle the case of lower bound of zero
zrows = filter(iszero ∘ gpsym, val.tbl)
isone(length(zrows)) ||
filter!(r -> iszero(getproperty(r, first(r))), zrows)
rr = only(zrows) # will error if zeros in sym column occur in unexpected places
push!(tbl, merge(pnm, rr[(collect(keys(rr))[2:end]...,)]))
end
sort!(tbl; by=gpsym)
append!(val.tbl, tbl)
ζcol = getproperty(:ζ).(tbl)
symcol = gpsym.(tbl)
val.fwd[sym] = interpolate(symcol, ζcol, BSplineOrder(4), Natural())
issorted(ζcol) &&
(val.rev[sym] = interpolate(ζcol, symcol, BSplineOrder(4), Natural()))
end
end
copyto!(final, initial)
copyto!(initial, saveinitial)
lowerbd .-= T(nzlb)
optsum.sigma = nothing
updateL!(setθ!(m, final))
return val
end
| MixedModels | https://github.com/JuliaStats/MixedModels.jl.git |
|
[
"MIT"
] | 4.26.1 | e3fffd09185c6eb69f66b9ed29af0240b0dd0adc | code | 10582 | using DataFrames
using LinearAlgebra
using MixedModels
using Random
using SparseArrays
using StatsModels
using Test
using MixedModels: dataset, levels, modelcols, nlevs
const LMM = LinearMixedModel
@testset "scalarReMat" begin
ds = dataset("dyestuff")
f1 = @formula(yield ~ 1 + (1|batch))
y1, Xs1 = modelcols(apply_schema(f1, schema(ds), LMM), ds)
sf = Xs1[2]
psts = dataset("pastes")
f2 = @formula(strength ~ 1 + (1|batch/cask))
y2, Xs2 = modelcols(apply_schema(f2, schema(psts), LMM), psts)
sf1 = Xs2[2]
sf2 = Xs2[3]
@testset "size" begin
@test size(sf) == (30, 6)
@test size(sf,1) == 30
@test size(sf,2) == 6
@test size(sf,3) == 1
@test size(sf1) == (60, 10)
@test size(sf2) == (60, 30)
end
@testset "utilities" begin
@test levels(sf) == string.('A':'F')
@test refpool(sf) == levels(sf)
@test refarray(sf) == repeat(1:6, inner=5)
@test refvalue(sf, 3) == "C"
@test nlevs(sf) == 6
@test eltype(sf) == Float64
@test sparse(sf) == sparse(1:30, sf.refs, ones(30))
fsf = Matrix(sf)
@test size(fsf) == (30, 6)
@test count(!iszero, fsf) == 30
@test sort!(unique(fsf)) == [0.0, 1.0]
@test cond(sf) == 1.0
@test MixedModels.nθ(sf) == 1
@test MixedModels.getθ(sf) == ones(1)
@test MixedModels.getθ!(Vector{Float64}(undef, 1), sf) == ones(1)
@test lowerbd(sf) == zeros(1)
@test MixedModels.getθ(setθ!(sf, [0.5])) == [0.5]
MixedModels.unscaledre!(Vector{Float64}(undef, 30), sf)
@test_throws DimensionMismatch MixedModels.getθ!(Float64[], sf)
@test_throws DimensionMismatch setθ!(sf, ones(2))
end
@testset "products" begin
@test ones(30, 1)'sf == fill(5.0, (1, 6))
@test mul!(Array{Float64}(undef, (size(sf1, 2), size(sf2, 2))), sf1', sf2) == Array(sf1'sf2)
crp = sf'sf
@test isa(crp, Diagonal{Float64})
crp1 = copy(crp)
@test crp1 == crp
@test crp[2,6] == 0
@test crp[6,6] == 5
@test size(crp) == (6,6)
@test crp.diag == fill(5.,6)
rhs = y1'sf
@test rhs == reshape([7525.0,7640.0,7820.0,7490.0,8000.0,7350.0], (1, 6))
@test ldiv!(crp, copy(rhs)') == [1505.,1528.,1564.,1498.,1600.,1470.]
@test isa(sf1'sf1, Diagonal{Float64})
@test isa(sf2'sf2, Diagonal{Float64})
@test isa(sf2'sf1,SparseMatrixCSC{Float64})
@test MixedModels.lmulΛ!(sf', ones(6)) == fill(0.5, 6)
@test MixedModels.rmulΛ!(ones(6, 6), sf) == fill(0.5, (6, 6))
end
@testset "reweight!" begin
wts = rand(MersenneTwister(1234321), size(sf, 1))
@test isapprox(vec(MixedModels.reweight!(sf, wts).wtz), wts)
end
end
@testset "RandomEffectsTerm" begin
slp = dataset("sleepstudy")
contrasts = Dict{Symbol,Any}()
@testset "Detect same variable as blocking and experimental" begin
f = @formula(reaction ~ 1 + (1 + subj|subj))
@test_throws ArgumentError apply_schema(f, schema(f, slp, contrasts), LMM)
end
@testset "Detect both blocking and experimental variables" begin
# note that U is not in the fixed effects because we want to make square
# that we're detecting all the variables in the random effects
f = @formula(reaction ~ 1 + (1 + days|subj))
form = apply_schema(f, schema(f, slp, contrasts), LMM)
@test StatsModels.termvars(form.rhs) == [:days, :subj]
end
@testset "Runtime construction of random effects terms" begin
# operator precedence and basic terms:
@test term(:a) | term(:b) isa RandomEffectsTerm
@test term(1) + term(:a) | term(:b) isa RandomEffectsTerm
@test term(1) + term(:a) + term(:a) & term(:c) | term(:b) isa RandomEffectsTerm
# sleep study data:
r, d, s, one = term.((:reaction, :days, :subj, 1))
f1 = @formula(reaction ~ 1 + (1 + days | subj))
f2 = r ~ one + (one + d | s)
@test f2.rhs[end] isa RandomEffectsTerm
ff1 = apply_schema(f1, schema(slp), LMM)
ff2 = apply_schema(f2, schema(slp), LMM)
# equality of RE terms not defined so check that they generate same modelcols
@test modelcols(ff1.rhs[end], slp) == modelcols(ff2.rhs[end], slp)
m1 = fit(LMM, f1, slp; progress=false)
m2 = fit(LMM, f2, slp; progress=false)
@test all(m1.λ .== m2.λ)
@test StatsModels.terms(f2.rhs[end]) == [one, d, s]
@test StatsModels.termvars(f2.rhs[end]) == [d.sym, s.sym]
end
@testset "Runtime construction of ZeroCorr" begin
r, d, s, one = term.((:reaction, :days, :subj, 1))
f1 = @formula(reaction ~ 1 + zerocorr(1 + days | subj))
f2 = r ~ one + zerocorr(one + d | s)
@test f2.rhs[end] isa MixedModels.ZeroCorr
ff1 = apply_schema(f1, schema(slp), LMM)
ff2 = apply_schema(f2, schema(slp), LMM)
# equality of RE terms not defined so check that they generate same modelcols
mc1 = modelcols(ff1.rhs[end], slp)
mc2 = modelcols(ff2.rhs[end], slp)
# test that zerocorr actually worked
@test mc1.inds == mc2.inds == [1, 4]
m1 = fit(LMM, f1, slp; progress=false)
m2 = fit(LMM, f2, slp; progress=false)
@test all(m1.λ .== m2.λ)
@test StatsModels.terms(f2.rhs[end]) == [one, d, s]
@test StatsModels.termvars(f2.rhs[end]) == [d.sym, s.sym]
end
@testset "ZeroCorr delegation" begin
r, d, s, one = term.((:reaction, :days, :subj, 1))
f = @formula(0 ~ 1 + days).rhs
zc = zerocorr(one + d | s)
@test f == zc.lhs
@test zc.rhs.sym == :subj
end
@testset "Amalgamation of ZeroCorr with other terms" begin
f = @formula(reaction ~ 1 + days + (1|subj) + zerocorr(days|subj))
m = LMM(f, dataset(:sleepstudy), contrasts = Dict(:days => DummyCoding()))
re = only(m.reterms)
@test length(re.cnames) == length(unique(re.cnames)) == 10
end
end
@testset "random effects term syntax" begin
dat = (y = rand(18),
g = string.(repeat('a':'f', inner=3)),
f = string.(repeat('A':'C', outer=6)))
@testset "fulldummy" begin
@test_throws ArgumentError fulldummy(1)
f = @formula(y ~ 1 + fulldummy(f))
f1 = apply_schema(f, schema(dat))
@test typeof(last(f1.rhs.terms)) <: FunctionTerm{typeof(fulldummy)}
@test_throws ArgumentError modelcols(f1, dat)
f2 = apply_schema(f, schema(dat), MixedModel)
@test typeof(last(f2.rhs.terms)) <: CategoricalTerm{<:StatsModels.FullDummyCoding}
@test modelcols(f2.rhs, dat)[1:3, :] == [1 1 0 0
1 0 1 0
1 0 0 1]
# implicit intercept
ff = apply_schema(@formula(y ~ 1 + (f | g)), schema(dat), MixedModel)
rem = modelcols(last(ff.rhs), dat)
@test size(rem) == (18, 18)
@test rem[1:3, 1:4] == [1 0 0 0
1 1 0 0
1 0 1 0]
# explicit intercept
ff = apply_schema(@formula(y ~ 1 + (1+f | g)), schema(dat), MixedModel)
rem = modelcols(last(ff.rhs), dat)
@test size(rem) == (18, 18)
@test rem[1:3, 1:4] == [1 0 0 0
1 1 0 0
1 0 1 0]
# explicit intercept + full dummy
ff = apply_schema(@formula(y ~ 1 + (1+fulldummy(f) | g)), schema(dat), MixedModel)
rem = modelcols(last(ff.rhs), dat)
@test size(rem) == (18, 24)
@test rem[1:3, 1:4] == [1 1 0 0
1 0 1 0
1 0 0 1]
# explicit dropped intercept (implicit full dummy)
ff = apply_schema(@formula(y ~ 1 + (0+f | g)), schema(dat), MixedModel)
rem = modelcols(last(ff.rhs), dat)
@test size(rem) == (18, 18)
@test rem[1:3, 1:4] == [1 0 0 0
0 1 0 0
0 0 1 0]
end
@testset "nesting" begin
ff = apply_schema(@formula(y ~ 1 + (1|g/f)), schema(dat), MixedModel)
@test modelcols(last(ff.rhs), dat) == float(Matrix(I, 18, 18))
# in fixed effects:
d2 = (a = rand(20), b = repeat([:X, :Y], outer=10), c = repeat([:S,:T],outer=10))
f2 = apply_schema(@formula(0 ~ 1 + b/a), schema(d2), MixedModel)
@test modelcols(f2.rhs, d2) == [ones(20) d2.b .== :Y (d2.b .== :X).*d2.a (d2.b .== :Y).*d2.a]
@test coefnames(f2.rhs) == ["(Intercept)", "b: Y", "b: X & a", "b: Y & a"]
# check promotion
f3 = apply_schema(@formula(0 ~ 0 + b/a), schema(d2), MixedModel)
@test modelcols(f3.rhs, d2) == [d2.b .== :X d2.b .== :Y (d2.b .== :X).*d2.a (d2.b .== :Y).*d2.a]
@test coefnames(f3.rhs) == ["b: X", "b: Y", "b: X & a", "b: Y & a"]
# errors for continuous grouping
@test_throws ArgumentError apply_schema(@formula(0 ~ 1 + a/b), schema(d2), MixedModel)
# errors for too much nesting
@test_throws ArgumentError apply_schema(@formula(0 ~ 1 + b/c/a), schema(d2), MixedModel)
# fitted model to test amalgamate and fnames, and equivalence with other formulations
psts = dataset("pastes")
m = fit(MixedModel, @formula(strength ~ 1 + (1|batch/cask)), psts; progress=false)
m2 = fit(MixedModel, @formula(strength ~ 1 + (1|batch) + (1|batch&cask)), psts; progress=false)
m2r = fit(MixedModel, term(:strength) ~ term(1) + (term(1)|term(:batch)) + (term(1)|term(:batch)&term(:cask)), psts; progress=false)
@test fnames(m) == fnames(m2) == fnames(m2r) == (Symbol("batch & cask"), :batch)
@test coefnames(first(m.reterms)) == ["(Intercept)"]
@test m.λ == m2.λ == m2r.λ
@test deviance(m) == deviance(m2) == deviance(m2r)
end
@testset "multiple terms with same grouping" begin
dat = MixedModels.dataset(:kb07)
sch = schema(dat)
f1 = @formula(rt_trunc ~ 1 + (1 + prec + load | spkr))
ff1 = apply_schema(f1, sch, MixedModel)
retrm = last(ff1.rhs)
@test last(retrm.lhs.terms).contrasts.contrasts isa DummyCoding
f2 = @formula(rt_trunc ~ 1 + (1 + prec | spkr) + (0 + load | spkr))
ff2 = apply_schema(f2, sch, MixedModel)
retrm2 = last(ff2.rhs)
@test last(retrm2.lhs.terms).contrasts.contrasts isa DummyCoding
end
end
| MixedModels | https://github.com/JuliaStats/MixedModels.jl.git |
|
[
"MIT"
] | 4.26.1 | e3fffd09185c6eb69f66b9ed29af0240b0dd0adc | code | 2996 | using LinearAlgebra
using MixedModels
using Random
using SparseArrays
using StatsModels
using Test
const LMM = LinearMixedModel
@testset "UBlk" begin
ex22 = UniformBlockDiagonal(reshape(Vector(1.0:12.0), (2, 2, 3)))
Lblk = UniformBlockDiagonal(fill(0., (2,2,3)))
ds = (Y = rand(12), A = repeat(['N','Y'], outer=6), G = repeat('a':'c', inner=4),
H = repeat('A':'B', outer=6), U = repeat([-1,0,1], inner=2, outer=2))
sch = schema(ds, Dict(:A=>EffectsCoding()))
vf1 = modelcols(apply_schema(@formula(Y ~ 1 + A + (1+A|G)), sch, LMM), ds)[2][2]
vf2 = modelcols(apply_schema(@formula(Y ~ 1 + U + (1+U|H)), sch, LMM), ds)[2][2]
prd = vf2'vf1
@testset "size" begin
@test size(ex22) == (6, 6)
@test size(ex22, 1) == 6
@test size(ex22, 2) == 6
@test size(ex22.data) == (2, 2, 3)
# @test length(ex22.facevec) == 3
@test size(vf1) == (12, 6)
@test size(vf2) == (12, 4)
@test size(prd) == (4, 6)
end
@testset "elements" begin
@test ex22[1, 1] == 1
@test ex22[2, 1] == 2
@test ex22[3, 1] == 0
@test ex22[2, 2] == 4
@test ex22[3, 3] == 5
@test ex22[:, 3] == [0,0,5,6,0,0]
@test ex22[5, 6] == 11
end
@testset "facevec" begin
@test view(ex22.data, :, :, 3) == reshape(9:12, (2,2))
end
@testset "copyscaleinflate" begin
MixedModels.copyscaleinflate!(Lblk, ex22, vf1)
@test view(Lblk.data, :, :, 1) == [2. 3.; 2. 5.]
setθ!(vf1, [1.,1.,1.])
Λ = vf1.λ
MixedModels.copyscaleinflate!(Lblk, ex22, vf1)
target = Λ'view(ex22.data, :, :, 1)*Λ + I
@test view(Lblk.data, :, :, 1) == target
end
@testset "updateL" begin
@test ones(2, 2) == MixedModels.rankUpdate!(Hermitian(zeros(2, 2)), ones(2), 1., 1.)
d3 = MixedModels.dataset(:d3)
sch = schema(d3)
vf1 = modelcols(apply_schema(@formula(y ~ 1 + u + (1+u|g)), sch, LMM), d3)[2][2]
vf2 = modelcols(apply_schema(@formula(y ~ 1 + u + (1+u|h)), sch, LMM), d3)[2][2]
@test vf1.λ == LowerTriangular(Matrix(I, 2, 2))
setθ!(vf2, [1.75, 0.0, 1.0])
A11 = vf1'vf1
L11 = MixedModels.cholUnblocked!(MixedModels.copyscaleinflate!(UniformBlockDiagonal(fill(0., size(A11.data))), A11, vf1), Val{:L})
L21 = vf2'vf1
@test isa(L21, BlockedSparse)
@test L21[1,1] == 30.0
@test size(L21) == (344, 9452)
@test size(L21, 1) == 344
MixedModels.lmulΛ!(vf2', MixedModels.rmulΛ!(L21, vf1))
@test size(Matrix(L21)) == size(sparse(L21))
# L21cb1 = copy(L21.colblocks[1])
# @test L21cb1 == Vf2.Λ * A21cb1 * Vf1.Λ
# rdiv!(L21, adjoint(LowerTriangular(L11)))
# @test_broken L21.colblocks[1] == rdiv!(L21cb1, adjoint(LowerTriangular(L11.facevec[1])))
A22 = vf2'vf2
L22 = MixedModels.copyscaleinflate!(UniformBlockDiagonal(fill(0., size(A22.data))), A22, vf2)
end
end
| MixedModels | https://github.com/JuliaStats/MixedModels.jl.git |
|
[
"MIT"
] | 4.26.1 | e3fffd09185c6eb69f66b9ed29af0240b0dd0adc | code | 12872 | using DataFrames
using LinearAlgebra
using MixedModels
using Random
using Statistics
using StableRNGs
using Statistics
using Suppressor
using Tables
using Test
using MixedModels: dataset, MixedModelBootstrap
include("modelcache.jl")
function quickboot(m, n=2)
return parametricbootstrap(MersenneTwister(42), n, m;
progress=false, use_threads=false,
optsum_overrides=(;ftol_rel=1e-8))
end
@testset "simulate!(::MixedModel)" begin
@testset "LMM" begin
ds = dataset(:dyestuff)
fm = only(models(:dyestuff))
# # just in case the fit was modified in a previous test
# refit!(fm, vec(float.(ds.yield)))
resp₀ = copy(response(fm))
# type conversion of ints to floats
simulate!(StableRNG(1234321), fm, β=[1], σ=1)
refit!(fm, resp₀; progress=false)
refit!(simulate!(StableRNG(1234321), fm); progress=false)
@test deviance(fm) ≈ 322.6582 atol=0.001
refit!(fm, float(ds.yield), progress=false)
# Global/implicit RNG method
Random.seed!(1234321)
refit!(simulate!(fm); progress=false)
# just make sure this worked, don't check fit
# (because the RNG can change between Julia versions)
@test response(fm) ≠ resp₀
simulate!(fm, θ = fm.θ)
@test_throws DimensionMismatch refit!(fm, zeros(29); progress=false)
# restore the original state
refit!(fm, vec(float.(ds.yield)); progress=false)
@testset "zerocorr" begin
fmzc = models(:sleepstudy)[2]
@test length(simulate(fmzc)) == length(response(fmzc))
end
end
@testset "Poisson" begin
center(v::AbstractVector) = v .- (sum(v) / length(v))
grouseticks = DataFrame(dataset(:grouseticks))
grouseticks.ch = center(grouseticks.height)
gm4 = fit(MixedModel, only(gfms[:grouseticks]), grouseticks, Poisson(), fast=true, progress=false) # fails in pirls! with fast=false
gm4sim = refit!(simulate!(StableRNG(42), deepcopy(gm4)); progress=false)
@test isapprox(gm4.β, gm4sim.β; atol=norm(stderror(gm4)))
end
@testset "Binomial" begin
cbpp = dataset(:cbpp)
gm2 = fit(MixedModel, first(gfms[:cbpp]), cbpp, Binomial(), wts=float(cbpp.hsz), progress=false)
gm2sim = refit!(simulate!(StableRNG(42), deepcopy(gm2)); fast=true, progress=false)
@test isapprox(gm2.β, gm2sim.β; atol=norm(stderror(gm2)))
end
@testset "_rand with dispersion" begin
@test_throws ArgumentError MixedModels._rand(StableRNG(42), Normal(), 1, 1, 1)
@test_throws ArgumentError MixedModels._rand(StableRNG(42), Gamma(), 1, 1, 1)
@test_throws ArgumentError MixedModels._rand(StableRNG(42), InverseGaussian(), 1, 1, 1)
end
end
@testset "bootstrap" begin
fm = only(models(:dyestuff))
# two implicit tests
# 1. type conversion of ints to floats
# 2. test method for default RNG
@test_logs((:warn, r"hide_progress"),
parametricbootstrap(1, fm, β=[1], σ=1, hide_progress=true))
bsamp = parametricbootstrap(MersenneTwister(1234321), 100, fm;
use_threads=false, progress=false)
@test isa(propertynames(bsamp), Vector{Symbol})
@test length(bsamp.objective) == 100
@test keys(first(bsamp.fits)) == (:objective, :σ, :β, :se, :θ)
@test isa(bsamp.σs, Vector{<:NamedTuple})
@test length(bsamp.σs) == 100
allpars = DataFrame(bsamp.allpars)
@test isa(allpars, DataFrame)
@testset "optsum_overrides" begin
bsamp2 = parametricbootstrap(MersenneTwister(1234321), 100, fm;
use_threads=false, progress=false,
optsum_overrides=(;ftol_rel=1e-8))
# for such a simple, small model setting the function value
# tolerance has little effect until we do something extreme
@test bsamp.objective ≈ bsamp2.objective
bsamp2 = parametricbootstrap(MersenneTwister(1234321), 100, fm;
use_threads=false, progress=false,
optsum_overrides=(;ftol_rel=1.0))
@test !(bsamp.objective ≈ bsamp2.objective)
end
cov = shortestcovint(shuffle(1.:100.))
# there is no unique shortest coverage interval here, but the left-most one
# is currently returned, so we take that. If this behavior changes, then
# we'll have to change the test
@test first(cov) == 1.
@test last(cov) == 95.
coefp = DataFrame(bsamp.coefpvalues)
@test isa(coefp, DataFrame)
@test coefp.iter == 1:100
@test only(unique(coefp.coefname)) == Symbol("(Intercept)")
@test propertynames(coefp) == [:iter, :coefname, :β, :se, :z, :p]
@testset "threaded bootstrap" begin
@test_logs (:warn, r"use_threads is deprecated") parametricbootstrap(MersenneTwister(1234321), 1, fm;
use_threads=true, progress=false)
end
@testset "zerocorr + Base.length + ftype" begin
fmzc = models(:sleepstudy)[2]
pbzc = parametricbootstrap(MersenneTwister(42), 5, fmzc, Float16;
progress=false)
@test length(pbzc) == 5
@test Tables.istable(shortestcovint(pbzc))
@test typeof(pbzc) == MixedModelBootstrap{Float16}
end
@testset "zerocorr + not zerocorr" begin
form_zc_not = @formula(rt_trunc ~ 1 + spkr * prec * load +
(1 + spkr + prec + load | subj) +
zerocorr(1 + spkr + prec + load | item))
fmzcnot = fit(MixedModel, form_zc_not, dataset(:kb07); progress=false)
pbzcnot = parametricbootstrap(MersenneTwister(42), 2, fmzcnot, Float16;
progress=false)
end
@testset "vcat" begin
sleep = quickboot(last(models(:sleepstudy)))
zc1 = quickboot(models(:sleepstudy)[2])
zc2 = quickboot(models(:sleepstudy)[3])
@test_throws ArgumentError vcat(sleep, zc1)
@test_throws ArgumentError reduce(vcat, [sleep, zc1])
# these are the same model even if the formulae
# are expressed differently
@test length(vcat(zc1, zc2)) == 4
@test length(reduce(vcat, [zc1, zc2])) == 4
end
@testset "save and restore replicates" begin
io = IOBuffer()
m0 = first(models(:sleepstudy))
m1 = last(models(:sleepstudy))
pb0 = quickboot(m0)
pb1 = quickboot(m1)
savereplicates(io, pb1)
@test isa(pb0.tbl, Table)
@test isa(pb1.tbl, Table) # create tbl here to check it doesn't modify pb1
@test ncol(DataFrame(pb1.β)) == 3
# wrong model
@test_throws ArgumentError restorereplicates(seekstart(io), m0)
# need to specify an eltype!
@test_throws MethodError restorereplicates(seekstart(io), m1, MixedModelBootstrap)
# make sure exact and approximate equality work
@test pb1 == pb1
@test pb1 == restorereplicates(seekstart(io), m1)
@test pb1 ≈ restorereplicates(seekstart(io), m1)
@test pb1 ≈ pb1
@test pb1 ≈ restorereplicates(seekstart(io), m1, Float64)
@test restorereplicates(seekstart(io), m1, Float32) ≈ restorereplicates(seekstart(io), m1, Float32)
# too much precision is lost
f16 = restorereplicates(seekstart(io), m1, Float16)
@test !isapprox(pb1, f16)
@test isapprox(pb1, f16; atol=eps(Float16))
@test isapprox(pb1, f16; rtol=0.0001)
# two paths, one destination
@test restorereplicates(seekstart(io), m1, MixedModelBootstrap{Float16}) == restorereplicates(seekstart(io), m1, Float16)
# changing eltype breaks exact equality
@test pb1 != restorereplicates(seekstart(io), m1, Float32)
# test that we don't need the model to be fit when restoring
@test pb1 == restorereplicates(seekstart(io), MixedModels.unfit!(deepcopy(m1)))
@test pb1 ≈ restorereplicates(seekstart(io), m1, Float16) rtol=1
end
@testset "Bernoulli simulate! and GLMM bootstrap" begin
contra = dataset(:contra)
# need a model with fast=false to test that we only
# copy the optimizer constraints for θ and not β
gm0 = fit(MixedModel, first(gfms[:contra]), contra, Bernoulli(), fast=false, progress=false)
bs = parametricbootstrap(StableRNG(42), 100, gm0; progress=false)
# make sure we're not copying
@test length(bs.lowerbd) == length(gm0.θ)
bsci = filter!(:type => ==("β"), DataFrame(shortestcovint(bs)))
ciwidth = 2 .* stderror(gm0)
waldci = DataFrame(coef=fixefnames(gm0),
lower=fixef(gm0) .- ciwidth,
upper=fixef(gm0) .+ ciwidth)
# coarse tolerances because we're not doing many bootstrap samples
@test all(isapprox.(bsci.lower, waldci.lower; atol=0.5))
@test all(isapprox.(bsci.upper, waldci.upper; atol=0.5))
σbar = mean(MixedModels.tidyσs(bs)) do x; x.σ end
@test σbar ≈ 0.56 atol=0.1
apar = filter!(row -> row.type == "σ", DataFrame(MixedModels.allpars(bs)))
@test !("Residual" in apar.names)
@test mean(apar.value) ≈ σbar
# can't specify dispersion for families without that parameter
@test_throws ArgumentError parametricbootstrap(StableRNG(42), 100, gm0;
σ=2, progress=false)
@test sum(issingular(bs)) == 0
end
@testset "Rank deficient" begin
rng = MersenneTwister(0);
x = rand(rng, 100);
data = (x = x, x2 = 1.5 .* x, y = rand(rng, [0,1], 100), z = repeat('A':'T', 5))
@testset "$family" for family in [Normal(), Bernoulli()]
model = @suppress fit(MixedModel, @formula(y ~ x + x2 + (1|z)), data, family; progress=false)
boot = quickboot(model, 10)
dropped_idx = model.feterm.piv[end]
dropped_coef = coefnames(model)[dropped_idx]
@test all(boot.β) do nt
# if we're the dropped coef, then we must be -0.0
# need isequal because of -0.0
return nt.coefname != dropped_coef || isequal(nt.β, -0.0)
end
yc = simulate(StableRNG(1), model; β=coef(model))
yf = simulate(StableRNG(1), model; β=fixef(model))
@test all(x -> isapprox(x...), zip(yc, yf))
end
@testset "partial crossing" begin
id = lpad.(string.(1:40), 2, "0")
B = ["b0", "b1", "b2"]
C = ["c0", "c1", "c2", "c3", "c4"]
df = DataFrame(reshape(collect(Iterators.product(B, C, id)), :), [:b, :c, :id])
df[!, :y] .= 0
filter!(df) do row
b = last(row.b)
c = last(row.c)
return b != c
end
m = LinearMixedModel(@formula(y ~ 1 + b * c + (1|id)), df)
β = 1:rank(m)
σ = 1
simulate!(StableRNG(628), m; β, σ)
fit!(m)
boot = parametricbootstrap(StableRNG(271828), 1000, m);
bootci = DataFrame(shortestcovint(boot))
filter!(:group => ismissing, bootci)
select!(bootci, :names => disallowmissing => :coef, :lower, :upper)
transform!(bootci, [:lower, :upper] => ByRow(middle) => :mean)
@test all(x -> isapprox(x[1], x[2]; atol=0.1), zip(coef(m), bootci.mean))
end
end
end
@testset "show and summary" begin
fmzc = models(:sleepstudy)[2]
level = 0.68
pb = parametricbootstrap(MersenneTwister(42), 500, fmzc; progress=false)
pr = profile(fmzc)
@test startswith(sprint(show, MIME("text/plain"), pr),
"MixedModelProfile -- Table with 9 columns and 151 rows:")
@test startswith(sprint(show, MIME("text/plain"), pb),
"MixedModelBootstrap with 500 samples\n parameter min q25 median mean q75 max\n ")
df = DataFrame(pr)
@test nrow(df) == 151
@test propertynames(df) == collect(propertynames(pr.tbl))
@testset "CI method comparison" begin
level = 0.68
ci_boot_equaltail = confint(pb; level, method=:equaltail)
ci_boot_shortest = confint(pb; level, method=:shortest)
@test_throws ArgumentError confint(pb; level, method=:other)
ci_wald = confint(fmzc; level)
ci_prof = confint(pr; level)
@test first(ci_boot_shortest.lower, 2) ≈ first(ci_prof.lower, 2) atol=0.5
@test first(ci_boot_equaltail.lower, 2) ≈ first(ci_prof.lower, 2) atol=0.5
@test first(ci_prof.lower, 2) ≈ first(ci_wald.lower, 2) atol=0.1
end
end
| MixedModels | https://github.com/JuliaStats/MixedModels.jl.git |
|
[
"MIT"
] | 4.26.1 | e3fffd09185c6eb69f66b9ed29af0240b0dd0adc | code | 1218 | using MixedModels
using Suppressor
using Test
@testset "linear" begin
m1 = fit(MixedModel, @formula(yield ~ 1 + (1|batch)), MixedModels.dataset(:dyestuff); progress=false)
@test first(m1.θ) ≈ 0.7525806757718846 rtol=1.0e-5
end
@testset "generalized" begin
gm1 = fit(MixedModel, @formula(use ~ 1 + urban + livch + age + abs2(age) + (1|dist)),
MixedModels.dataset(:contra), Bernoulli(); progress=false)
@test deviance(gm1) ≈ 2372.7286 atol=1.0e-3
end
@testset "Normal-IdentityLink" begin
@test isa(fit(MixedModel, @formula(yield ~ 1 + (1|batch)), MixedModels.dataset(:dyestuff), Normal(); progress=false),
LinearMixedModel)
@test_throws(ArgumentError("use LinearMixedModel for Normal distribution with IdentityLink"),
fit(GeneralizedLinearMixedModel,
@formula(yield ~ 1 + (1|batch)),
MixedModels.dataset(:dyestuff); progress=false))
end
@testset "Normal Distribution GLMM" begin
@test @suppress isa(fit(MixedModel, @formula(yield ~ 1 + (1|batch)), MixedModels.dataset(:dyestuff),
Normal(), SqrtLink(); progress=false),
GeneralizedLinearMixedModel)
end
| MixedModels | https://github.com/JuliaStats/MixedModels.jl.git |
|
[
"MIT"
] | 4.26.1 | e3fffd09185c6eb69f66b9ed29af0240b0dd0adc | code | 237 | using MixedModels, Test
@testset "GHnorm" begin
gh2 = GHnorm(2)
@test gh2.z == [-1.0, 1.0]
@test gh2.w == [0.5, 0.5]
@test GHnorm(2) === gh2
gh9 = GHnorm(9.0)
@test sum(gh9.w) ≈ 1
@test length(gh9) == 9
end
| MixedModels | https://github.com/JuliaStats/MixedModels.jl.git |
|
[
"MIT"
] | 4.26.1 | e3fffd09185c6eb69f66b9ed29af0240b0dd0adc | code | 4618 | using MixedModels
using StatsModels
using Test
using MixedModels: schematize
using StatsModels: ContrastsMatrix, FullDummyCoding
@testset "Grouping" begin
g = Grouping()
@test isnothing(g.levels)
end
@testset "Grouping pseudo-contrasts" begin
d = (; y=rand(2_000_000),
grp=string.([1:1_000_000; 1:1_000_000]),
outer=rand('A':'z', 2_000_000))
## OOM seems to result in the process being killed on Mac so this messes up CI
# @test_throws OutOfMemoryError schema(d)
sch = schema(d, Dict(:grp => Grouping()))
t = sch[term(:grp)]
@test t isa CategoricalTerm{Grouping}
@test size(t.contrasts.matrix) == (0,0)
@test length(t.contrasts.levels) == 1_000_000
@test_throws ErrorException StatsModels.modelcols(t, (a = 1.,))
levs = sort(string.(1:1_000_000))
@test all(t.contrasts.invindex[lev] == i for (i,lev) in enumerate(levs))
@test all(t.contrasts.levels[i] == lev for (i,lev) in enumerate(levs))
end
@testset "Auto application of Grouping()" begin
d = (; y=rand(100),
x=rand('A':'Z', 100),
z=rand('A':'Z', 100),
grp=rand('a':'z', 100))
contrasts = Dict{Symbol, Any}()
@testset "blocking variables are grouping" for f in [@formula(y ~ 1 + x + (1|grp)),
@formula(y ~ 1 + x + zerocorr(1|grp))]
fsch = schematize(f, d, contrasts)
fe = fsch.rhs[1]
x = last(fe.terms)
@test x.contrasts isa ContrastsMatrix{DummyCoding}
re = fsch.rhs[2]
grp = re.rhs
@test grp.contrasts isa ContrastsMatrix{Grouping}
end
@testset "FE contrasts take priority" for f in [@formula(y ~ 1 + x + (1|x)),
@formula(y ~ 1 + x + zerocorr(1|x))]
fsch = schematize(f, d, contrasts)
fe = fsch.rhs[1]
x = last(fe.terms)
@test x.contrasts isa ContrastsMatrix{DummyCoding}
re = fsch.rhs[2]
grp = re.rhs
@test grp.contrasts isa ContrastsMatrix{DummyCoding}
fsch = schematize(@formula(y ~ 1 + x + (1|x)), d, Dict(:x => EffectsCoding()))
fe = fsch.rhs[1]
x = last(fe.terms)
@test x.contrasts isa ContrastsMatrix{EffectsCoding}
re = fsch.rhs[2]
grp = re.rhs
@test grp.contrasts isa ContrastsMatrix{EffectsCoding}
end
@testset "Nesting and interactions" for f in [@formula(y ~ 1 + x + (1|grp/z))]
# XXX zerocorr(1|grp/z) doesn't work!
fsch = schematize(f, d, contrasts)
fe = fsch.rhs[1]
x = last(fe.terms)
@test x.contrasts isa ContrastsMatrix{DummyCoding}
re = fsch.rhs[2:end]
grp = re[1].rhs
@test grp.contrasts isa ContrastsMatrix{Grouping}
interaction = re[2].rhs
# this is less than ideal but we need it for now to get the nesting logic to work
@test interaction.terms[1].contrasts isa ContrastsMatrix{FullDummyCoding}
# this is the desired behavior
@test_broken interaction.terms[1].contrasts isa ContrastsMatrix{Grouping}
@test interaction.terms[2].contrasts isa ContrastsMatrix{Grouping}
end
@testset "Interactions where one component is FE" for f in [@formula(y ~ 1 + x + (1|x&grp)),
@formula(y ~ 1 + x + zerocorr(1|x&grp))]
# occurs in e.g. the contra models
# @formula(use ~ 1+age+abs2(age)+urban+livch+(1|urban&dist)
fsch = schematize(f, d, contrasts)
fe = fsch.rhs[1]
x = last(fe.terms)
@test x.contrasts isa ContrastsMatrix{DummyCoding}
re = fsch.rhs[2]
x_re = re.rhs.terms[1]
# this is less than ideal but it relates to the way interactions are computed in RE
@test x_re.contrasts isa ContrastsMatrix{DummyCoding}
# this is the desired behavior:
# even if the contrast matrix has to be small enough to invert,
# it's silly to store it and invert it again when we don't need it here
@test_broken x_rex.contrasts isa ContrastsMatrix{Grouping}
grp = re.rhs.terms[2]
@test grp.contrasts isa ContrastsMatrix{Grouping}
end
@test_throws(ArgumentError("Same variable appears on both sides of |"),
schematize(@formula(y ~ 1 + (x|x)), d, contrasts))
f1 = schematize(@formula(y ~ 1 + x + z), d, contrasts)
f2 = apply_schema(@formula(y ~ 1 + x + z), schema(d, contrasts))
# skip intercept term
@test all(a.contrasts == b.contrasts for (a, b) in zip(f1.rhs.terms[2:end], f2.rhs.terms[2:end]))
end
| MixedModels | https://github.com/JuliaStats/MixedModels.jl.git |
|
[
"MIT"
] | 4.26.1 | e3fffd09185c6eb69f66b9ed29af0240b0dd0adc | code | 5583 | using DataFrames
using GLM
using MixedModels
using Suppressor
using Test
using MixedModels: dataset, likelihoodratiotest
using GLM: ProbitLink
using StatsModels: lrtest, isnested
include("modelcache.jl")
@testset "isnested" begin
slp = dataset(:sleepstudy)
# these tests don't actually depend on the models being fit,
# so we just construct them
# mismatched RE terms
m1 = LinearMixedModel(@formula(reaction ~ 1 + days + (1+days|subj)), slp)
m2 = LinearMixedModel(@formula(reaction ~ 1 + days + (0+days|subj)), slp)
@test !isnested(m1, m2)
# mismatched FE
m1 = LinearMixedModel(@formula(reaction ~ 1 + days + (1|subj)), slp)
m2 = LinearMixedModel(@formula(reaction ~ 0 + days + (1|subj)), slp)
@test !isnested(m1, m2)
# mismatched grouping vars
kb07 = dataset(:kb07)
m1 = LinearMixedModel(@formula(rt_trunc ~ 1 + (1|subj)), kb07)
m2 = LinearMixedModel(@formula(rt_trunc ~ 1 + (1|item)), kb07)
@test !isnested(m1, m2)
# fixed-effects specification in REML and
# conversion of internal ArgumentError into @error for StatsModels.isnested
kb07 = dataset(:kb07)
m1 = fit(MixedModel, @formula(rt_trunc ~ 1 + prec + (1|subj)), kb07, REML=true, progress=false)
m2 = fit(MixedModel, @formula(rt_trunc ~ 1 + prec + (1+prec|subj)), kb07, REML=true, progress=false)
@test isnested(m1, m2)
m2 = fit(MixedModel, @formula(rt_trunc ~ 1 + (1+prec|subj)), kb07, REML=true, progress=false)
@test @suppress !isnested(m1, m2)
end
@testset "likelihoodratio test" begin
slp = dataset(:sleepstudy);
fm0 = fit(MixedModel,@formula(reaction ~ 1 + (1+days|subj)),slp, progress=false);
fm1 = fit(MixedModel,@formula(reaction ~ 1 + days + (1+days|subj)),slp, progress=false);
lm0 = lm(@formula(reaction ~ 1), slp)
lm1 = lm(@formula(reaction ~ 1 + days), slp)
@test MixedModels._iscomparable(lm0, fm1)
@test !MixedModels._iscomparable(lm1, fm0)
lrt = likelihoodratiotest(fm0,fm1)
@test [deviance(fm0), deviance(fm1)] == lrt.deviance
@test deviance(fm0) - deviance(fm1) == only(lrt.tests.deviancediff)
@test only(lrt.tests.dofdiff) == 1
@test sum(map(length,lrt.tests)) == 3
@test sum(map(length,lrt.pvalues)) == 1
@test sum(map(length,lrt.models)) == 4
@test length(lrt.formulae) == 2
show(IOBuffer(),lrt);
@test :pvalues in propertynames(lrt)
lrt = likelihoodratiotest(lm1,fm1)
@test lrt.deviance ≈ likelihoodratiotest(lm1.model,fm1).deviance
@test lrt.dof == [3, 6]
@test lrt.deviance ≈ -2 * loglikelihood.([lm1, fm1])
shown = sprint(show, lrt)
@test occursin("-2 logLik", shown)
@test !occursin("deviance", shown)
# non nested FE between non-mixed and mixed
@test_throws ArgumentError likelihoodratiotest(lm1, fm0)
# mix of REML and ML
fm0 = fit(MixedModel,@formula(reaction ~ 1 + (1+days|subj)),slp, REML=true, progress=false);
@test_throws ArgumentError likelihoodratiotest(fm0,fm1)
@test_throws ArgumentError likelihoodratiotest(lm0,fm0)
# differing FE with REML
fm1 = fit(MixedModel,@formula(reaction ~ 1 + days + (1+days|subj)),slp, REML=true, progress=false);
@test_throws ArgumentError likelihoodratiotest(fm0,fm1)
contra = MixedModels.dataset(:contra);
# glm doesn't like categorical responses, so we convert it to numeric ourselves
# TODO: upstream fix
cc = DataFrame(contra);
cc.usenum = ifelse.(cc.use .== "Y", 1 , 0)
gmf = glm(@formula(usenum ~ 1+age+urban+livch), cc, Bernoulli());
gmf2 = glm(@formula(usenum ~ 1+age+abs2(age)+urban+livch), cc, Bernoulli());
gm0 = fit(MixedModel, @formula(use ~ 1+age+urban+livch+(1|urban&dist)), contra, Bernoulli(), fast=true, progress=false);
gm1 = fit(MixedModel, @formula(use ~ 1+age+abs2(age)+urban+livch+(1|urban&dist)), contra, Bernoulli(), fast=true, progress=false);
lrt = likelihoodratiotest(gmf, gm1)
@test [-2 * loglikelihood(gmf), deviance(gm1)] ≈ lrt.deviance
@test -2 * loglikelihood(gmf) - deviance(gm1) ≈ only(lrt.tests.deviancediff)
shown = sprint(show, lrt)
@test !occursin("-2 logLik", shown)
@test occursin("deviance", shown)
lrt = likelihoodratiotest(gm0,gm1);
@test [deviance(gm0), deviance(gm1)] == lrt.deviance
@test deviance(gm0) - deviance(gm1) == only(lrt.tests.deviancediff)
@test first(lrt.tests.dofdiff) == 1
@test sum(length, lrt.tests) == 3
@test sum(length, lrt.pvalues) == 1
@test sum(length, lrt.models) == 4
@test length(lrt.formulae) == 2
# mismatched links
gm_probit = fit(MixedModel, @formula(use ~ 1+age+urban+livch+(1|urban&dist)), contra, Bernoulli(), ProbitLink(), fast=true, progress=false);
@test_throws ArgumentError likelihoodratiotest(gmf, gm_probit)
@test_throws ArgumentError likelihoodratiotest(gm0, gm_probit)
# mismatched families
gm_poisson = fit(MixedModel, @formula(use ~ 1+age+urban+livch+(1|urban&dist)), contra, Poisson(), fast=true, progress=false);
@test_throws ArgumentError likelihoodratiotest(gmf, gm_poisson)
@test_throws ArgumentError likelihoodratiotest(gm0, gm_poisson)
@test !MixedModels._iscomparable(lm0, gm0)
@test !MixedModels._iscomparable(gmf, fm1)
@test MixedModels._iscomparable(gmf, gm0)
@test !MixedModels._iscomparable(gmf2, gm0)
@test MixedModels._isnested(gmf.mm.m, gm0.X)
@test !MixedModels._isnested(gmf2.mm.m, gm0.X)
# this skips the linear term so that the model matrices
# have the same column rank
@test !MixedModels._isnested(gmf2.mm.m[:,Not(2)], gm0.X)
end
| MixedModels | https://github.com/JuliaStats/MixedModels.jl.git |
|
[
"MIT"
] | 4.26.1 | e3fffd09185c6eb69f66b9ed29af0240b0dd0adc | code | 4045 | using LinearAlgebra
using MixedModels
using PooledArrays
using Random
using SparseArrays
using Test
using MixedModels: rankUpdate!
@testset "mul!" begin
for (m, p, n, q, k) in (
(10, 0.7, 5, 0.3, 15),
(100, 0.01, 100, 0.01, 20),
(100, 0.1, 100, 0.2, 100),
)
a = sprand(m, n, p)
b = sprand(n, k, q)
as = sparse(a')
bs = sparse(b')
c = zeros(m, k)
ab = a * b
arbt = Array(b')
aab = Array(a) * Array(b)
@test aab ≈ mul!(c, a, bs', true, true)
@test aab ≈ mul!(c, a, bs')
@test aab ≈ mul!(c, a, arbt')
@test aab ≈ mul!(c, a, arbt')
@test aab ≈ mul!(fill!(c, 0.0), a, arbt', true, true)
@test maximum(abs, mul!(c, a, arbt', -1.0, true)) ≤ sqrt(eps())
@test maximum(abs.(ab - aab)) < 100*eps()
@test a*bs' == ab
@test as'*b == ab
@test as'*bs' == ab
f = Diagonal(rand(n))
@test Array(a*f) == Array(a)*f
@test Array(f*b) == f*Array(b)
end
end
@testset "reweight!" begin
rng = MersenneTwister(1234321)
df = (
Y = randn(rng, 400),
A = repeat(PooledArray(["N","Y"]), outer=200),
G = repeat(PooledArray(string.('A':'T')), inner = 2, outer=10),
H = repeat(PooledArray(string.('a':'j')), inner=40),
)
m1 = fit(MixedModel, @formula(Y ~ 1 + A + (1+A|G) + (1+A|H)), df; progress=false)
wm1 = fit(MixedModel, @formula(Y ~ 1+A+(1+A|G)+(1+A|H)), df, wts=ones(400), progress=false)
@test loglikelihood(wm1) ≈ loglikelihood(m1)
MixedModels.reweight!(wm1, ones(400))
@test loglikelihood(refit!(wm1, progress=false)) ≈ loglikelihood(m1)
end
@testset "rankupdate!" begin
x = [1 1; 1 1];
# in Julia 1.6+, typeof(x) == Matrix{Int64}
# in < 1.6, typeof(x) == Array{Int64, 2}
err = ErrorException("We haven't implemented a method for $(typeof(x)), $(typeof(x)). Please file an issue on GitHub.");
@test_throws ErrorException rankUpdate!(x, x, 1, 1);
L21 = sprand(MersenneTwister(42), 100, 1000, 0.05)
L22L = rankUpdate!(Symmetric(zeros(100, 100), :L), L21, 1.0, 1.0)
@test L22L ≈ rankUpdate!(Symmetric(zeros(100, 100), :U), sparse(transpose(L21)), 1.0, 1.0)
end
#= I don't see this testset as meaningful b/c diagonal A does not occur after amalgamation of ReMat's for the same grouping factor - D.B.
@testset "rankupdate!" begin
@test ones(2, 2) == rankUpdate!(Hermitian(zeros(2, 2)), ones(2))
d2 = Diagonal(fill(2., 2))
@test Diagonal(fill(5.,2)) == rankUpdate!(Diagonal(ones(2)), d2, 1.)
@test Diagonal(fill(-3.,2)) == rankUpdate!(Diagonal(ones(2)), d2, -1.)
# when converting straight from diagonal to symmetric, the type is different
@test Diagonal(fill(5.,2)) == rankUpdate!(Symmetric(Matrix(1. * I(2)), :L), d2)
# generic method
@test Diagonal(fill(5.,2)) == rankUpdate!(Matrix(1. * I(2)), d2)
end
=#
@testset "lmulλ!" begin
levs(ng, tag='S') = string.(tag, lpad.(string.(1:ng), ndigits(ng), '0'))
function gendata(rng::AbstractRNG, n::Integer, ng::Integer, nh::Integer)
(
Y = randn(rng, n),
X = rand(rng, n),
G = PooledArray(rand(rng, levs(ng, 'G'), n)),
H = PooledArray(rand(rng, levs(nh, 'H'), n)),
)
end
gendata(n::Integer, ng::Integer, nh::Integer) = gendata(MersenneTwister(42), n, ng, nh)
gendata(n::Integer, ng::Integer) = gendata(MersenneTwister(42), n, ng, ng)
@testset "Adjoint{T,ReMat{T,1}}, BlockedSparse{T,1,2}" begin
# this is an indirect test of lmulΛ! for a blocking structure found in
# an example in MixedModels.jl#123
df = gendata(10000, 500)
f = @formula(Y ~ (1 + X|H) + (1|G))
m500 = fit!(LinearMixedModel(f, df); progress=false)
# the real test here isn't in the theta comparison but in that the fit
# completes successfully
@test length(m500.u) == 2
end
end
| MixedModels | https://github.com/JuliaStats/MixedModels.jl.git |
|
[
"MIT"
] | 4.26.1 | e3fffd09185c6eb69f66b9ed29af0240b0dd0adc | code | 2943 | using LinearAlgebra, MixedModels, StableRNGs, Test, SparseArrays
include("modelcache.jl")
@testset "Xymat" begin
trm = MixedModels.FeTerm(hcat(ones(30), repeat(0:9, outer = 3)), ["(Intercept)", "U"])
piv = trm.piv
ipiv = invperm(piv)
mat = MixedModels.FeMat(trm, Float64.(collect(axes(trm.x, 1))))
@test size(mat) == (30, 3)
@test length(mat) == 90
@test size(mat') == (3, 30)
@test eltype(mat) == Float64
@test mat.xy === mat.wtxy
prd = mat'mat
@test typeof(prd) == Matrix{Float64}
@test prd[ipiv, ipiv] == [30.0 135.0; 135.0 855.0]
wts = rand(StableRNG(123454321), 30)
MixedModels.reweight!(mat, wts)
@test mul!(prd, mat', mat)[ipiv[1], ipiv[1]] ≈ sum(abs2, wts)
# empty fixed effects
trm = MixedModels.FeTerm(ones(10,0), String[])
#@test size(trm) == (10, 0) # There no longer are size and length methods for FeTerm
#@test length(trm) == 0
#@test size(trm') == (0, 10)
@test eltype(trm) == Float64
@test trm.rank == 0
end
@testset "XymatSparse" begin
@testset "sparse and dense yield same fit" begin
# deepcopy because we're going to modify
m = last(models(:insteval))
# this is kinda sparse:
# julia> mean(first(m.feterm).x)
# 0.10040140325753434
fe = m.feterm
X = MixedModels.FeTerm(SparseMatrixCSC(fe.x), fe.cnames)
@test typeof(X.x) <: SparseMatrixCSC
@test X.rank == 28
@test X.cnames == fe.cnames
m1 = LinearMixedModel(collect(m.y), X, deepcopy(m.reterms), m.formula)
# because of the way the initial values are calculated
# m1.optsum.initial == m.optsum.final at this point
copyto!(m1.optsum.initial, m.optsum.initial)
fit!(m1; progress=false)
@test isapprox(m1.θ, m.θ, rtol = 1.0e-5)
end
@testset "rank deficiency in sparse FeTerm" begin
trm = MixedModels.FeTerm(SparseMatrixCSC(hcat(ones(30),
repeat(0:9, outer = 3),
2repeat(0:9, outer = 3))),
["(Intercept)", "U", "V"])
# at present there is no attempt to evaluate the rank of a SparseMatrixCSC
piv = trm.piv
ipiv = invperm(piv)
@test_broken rank(trm) == 2
mat = MixedModels.FeMat(trm, Float64.(collect(axes(trm.x, 1))))
@test size(mat) == (30, 4)
@test length(mat) == 120
@test size(mat') == (4, 30)
@test eltype(mat) == Float64
@test mat.xy === mat.wtxy
prd = MixedModels.densify(mat'mat)
@test typeof(prd) == typeof(prd)
@test prd[ipiv, ipiv] == [30.0 135.0 270.0; 135.0 855.0 1710.0; 270.0 1710.0 3420.0]
wts = rand(StableRNG(123454321), 30)
MixedModels.reweight!(mat, wts)
@test mul!(prd, mat', mat)[ipiv[1], ipiv[1]] ≈ sum(abs2, wts)
end
end
| MixedModels | https://github.com/JuliaStats/MixedModels.jl.git |
|
[
"MIT"
] | 4.26.1 | e3fffd09185c6eb69f66b9ed29af0240b0dd0adc | code | 8910 | using MixedModels
using Suppressor
using Test
using MixedModels: dataset, likelihoodratiotest
using MixedModels: pirls!, setβθ!, setθ!, updateL!
include("modelcache.jl")
# explicitly setting theta for these to so that we can do exact textual comparisons
βθ = [0.1955554704948119, 0.05755412761885973, 0.3207843518569843, -1.0582595252774376,
-2.1047524824609853, -1.0549789653925743, 1.339766125847893, 0.4953047709862237]
gm3 = GeneralizedLinearMixedModel(only(gfms[:verbagg]), dataset(:verbagg), Bernoulli())
pirls!(setβθ!(gm3, βθ))
fm0θ = [1.1656121258575225]
fm0 = updateL!(setθ!(first(models(:sleepstudy)), fm0θ))
fm0.optsum.feval = 1
fm1θ = [0.9292213288149662, 0.018168393450877257, 0.22264486671069741]
fm1 = updateL!(setθ!(last(models(:sleepstudy)), fm1θ))
fm1.optsum.feval = 1
fmreθ = [0.32352483854887326, 0.4715395478019364, 0.0,
0.43705610601403755, 0.016565641868150047, 0.17732248078617097]
# this is a junk model, but it stresses parts of the display code
fmre = LinearMixedModel(@formula(rt_trunc ~ 1+(0+spkr|subj)+(1+load|item)), MixedModels.dataset(:kb07))
updateL!(setθ!(fmre, fmreθ))
fmre.optsum.feval = 1
lrt = likelihoodratiotest(fm0, fm1)
@testset "markdown" begin
mime = MIME"text/markdown"()
gm3.optsum.feval = -1
@test_logs (:warn, "Model has not been fit: results will be nonsense") sprint(show, mime, gm3)
gm3.optsum.feval = 1
@testset "lmm" begin
@test sprint(show, mime, fm0) == """
| | Est. | SE | z | p | σ_subj |
|:----------- | --------:| ------:| -----:| ------:| -------:|
| (Intercept) | 251.4051 | 9.5062 | 26.45 | <1e-99 | 36.0121 |
| days | 10.4673 | 0.8017 | 13.06 | <1e-38 | |
| Residual | 30.8954 | | | | |
"""
@test sprint(show, mime, fm1) == """
| | Est. | SE | z | p | σ_subj |
|:----------- | --------:| ------:| -----:| ------:| -------:|
| (Intercept) | 251.4051 | 6.6323 | 37.91 | <1e-99 | 23.7805 |
| days | 10.4673 | 1.5022 | 6.97 | <1e-11 | 5.7168 |
| Residual | 25.5918 | | | | |
"""
end
@testset "re without fe" begin
@test sprint(show, mime, fmre) == """
| | Est. | SE | z | p | σ_subj | σ_item |
|:----------- | ---------:| -------:| -----:| ------:| --------:| --------:|
| (Intercept) | 2092.3713 | 76.9426 | 27.19 | <1e-99 | | 349.7858 |
| spkr: old | | | | | 377.3837 | |
| spkr: new | | | | | 258.9242 | |
| load: yes | | | | | | 142.5331 |
| Residual | 800.3224 | | | | | |
"""
end
@testset "glmm" begin
@test sprint(show, mime, gm3) in ("""
| | Est. | SE | z | p | σ_subj | σ_item |
|:------------ | -------:| ------:| -----:| ------:| ------:| ------:|
| (Intercept) | 0.1956 | 0.4052 | 0.48 | 0.6294 | 1.3398 | 0.4953 |
| anger | 0.0576 | 0.0168 | 3.43 | 0.0006 | | |
| gender: M | 0.3208 | 0.1913 | 1.68 | 0.0935 | | |
| btype: scold | -1.0583 | 0.2568 | -4.12 | <1e-04 | | |
| btype: shout | -2.1048 | 0.2585 | -8.14 | <1e-15 | | |
| situ: self | -1.0550 | 0.2103 | -5.02 | <1e-06 | | |
""","""
| | Est. | SE | z | p | σ_subj | σ_item |
|:------------ | -------:| ------:| -----:| ------:| ------:| ------:|
| (Intercept) | 0.1956 | 0.4052 | 0.48 | 0.6294 | 1.3398 | 0.4953 |
| anger | 0.0576 | 0.0168 | 3.43 | 0.0006 | | |
| gender: M | 0.3208 | 0.1913 | 1.68 | 0.0935 | | |
| btype: scold | -1.0583 | 0.2568 | -4.12 | <1e-4 | | |
| btype: shout | -2.1048 | 0.2585 | -8.14 | <1e-15 | | |
| situ: self | -1.0550 | 0.2103 | -5.02 | <1e-6 | | |
""")
end
@testset "lrt" begin
@test sprint(show, mime, lrt) in ("""
| | model-dof | deviance | χ² | χ²-dof | P(>χ²) |
|:---------------------------------------- | ---------:| --------:| ---:| ------:|:------ |
| reaction ~ 1 + days + (1 \\| subj) | 4 | 1794 | | | |
| reaction ~ 1 + days + (1 + days \\| subj) | 6 | 1752 | 42 | 2 | <1e-09 |
""","""
| | model-dof | deviance | χ² | χ²-dof | P(>χ²) |
|:---------------------------------------- | ---------:| --------:| ---:| ------:|:------ |
| reaction ~ 1 + days + (1 \\| subj) | 4 | 1794 | | | |
| reaction ~ 1 + days + (1 + days \\| subj) | 6 | 1752 | 42 | 2 | <1e-9 |
""")
end
@testset "blockdescription" begin
@test sprint(show, mime, BlockDescription(gm3)) == """
| rows | subj | item | fixed |
|:---- |:-------- |:---------- |:----- |
| 316 | Diagonal | | |
| 24 | Dense | Diag/Dense | |
| 7 | Dense | Dense | Dense |
"""
end
@testset "optsum" begin
fm1.optsum.feval = 1
fm1.optsum.initial_step = [0.75, 1.0, 0.75]
fm1.optsum.finitial = 1784.642296192471
fm1.optsum.final = [0.9292, 0.0182, 0.2226]
fm1.optsum.fmin =1751.9393444647023
out = sprint(show, mime, fm1.optsum)
@test startswith(out,"""
| | |
|:------------------------ |:--------------------------- |
| **Initialization** | |
| Initial parameter vector | [1.0, 0.0, 1.0] |
| Initial objective value | 1784.642296192471 |
| **Optimizer settings** | |
| Optimizer (from NLopt) | `LN_BOBYQA` |
| Lower bounds | [0.0, -Inf, 0.0] |""")
end
@testset "varcorr" begin
@test sprint(show, mime, VarCorr(fm1)) == """
| | Column | Variance | Std.Dev | Corr. |
|:-------- |:----------- | ---------:| --------:| -----:|
| subj | (Intercept) | 565.51069 | 23.78047 | |
| | days | 32.68212 | 5.71683 | +0.08 |
| Residual | | 654.94145 | 25.59182 | |
"""
@test sprint(show, mime, VarCorr(gm3)) == """
| | Column | Variance | Std.Dev |
|:---- |:----------- | ---------:| --------:|
| subj | (Intercept) | 1.794973 | 1.339766 |
| item | (Intercept) | 0.245327 | 0.495305 |
"""
end
end
@testset "html" begin
# this is minimal since we're mostly testing that dispatch works
# the stdlib actually handles most of the conversion
@test sprint(show, MIME("text/html"), BlockDescription(gm3)) == """
<table><tr><th align="left">rows</th><th align="left">subj</th><th align="left">item</th><th align="left">fixed</th></tr><tr><td align="left">316</td><td align="left">Diagonal</td><td align="left"></td><td align="left"></td></tr><tr><td align="left">24</td><td align="left">Dense</td><td align="left">Diag/Dense</td><td align="left"></td></tr><tr><td align="left">7</td><td align="left">Dense</td><td align="left">Dense</td><td align="left">Dense</td></tr></table>
"""
optsum = sprint(show, MIME("text/html"), fm0.optsum)
@test occursin("<b>Initialization</b>", optsum)
@test occursin("<code>LN_BOBYQA</code>", optsum)
end
@testset "latex" begin
# this is minimal since we're mostly testing that dispatch works
# the stdlib actually handles most of the conversion
b = BlockDescription(gm3)
@test sprint(show, MIME("text/latex"), b) == """
\\begin{tabular}
{l | l | l | l}
rows & subj & item & fixed \\\\
\\hline
316 & Diagonal & & \\\\
24 & Dense & Diag/Dense & \\\\
7 & Dense & Dense & Dense \\\\
\\end{tabular}
"""
@test sprint(show, MIME("text/xelatex"), b) == sprint(show, MIME("text/latex"), b)
@test sprint(show, MIME("text/xelatex"), gm3) != sprint(show, MIME("text/latex"), gm3)
@test startswith(sprint(show, MIME("text/latex"), gm3),"""
\\begin{tabular}
{l | r | r | r | r | r | r}
& Est. & SE & z & p & \$\\sigma_\\text{subj}\$ & \$\\sigma_\\text{item}\$ \\\\""")
# not doing the full comparison here because there's a zero-padded exponent
# that will render differently on different platforms
@test startswith(sprint(show, MIME("text/latex"), lrt),
"\\begin{tabular}\n{l | r | r | r | r | l}\n & model-dof & deviance & \$\\chi^2\$ & \$\\chi^2\$-dof & P(>\$\\chi^2\$) \\\\")
optsum = sprint(show, MIME("text/latex"), fm0.optsum)
@test occursin(raw"\textbf{Initialization}", optsum)
@test occursin(raw"\texttt{LN\_BOBYQA}", optsum)
end
# return these models to their fitted state for the cache
refit!(fm1; progress=false)
refit!(fm0; progress=false)
| MixedModels | https://github.com/JuliaStats/MixedModels.jl.git |
|
[
"MIT"
] | 4.26.1 | e3fffd09185c6eb69f66b9ed29af0240b0dd0adc | code | 920 | using MixedModels
using Test
using MixedModels: dataset
@testset "formula misspecification" begin
dyestuff = dataset(:dyestuff)
@test MixedModel(@formula(yield ~ 0 + (1|batch)), dyestuff) isa LinearMixedModel
@test MixedModel(@formula(yield ~ 1 + (1|batch)), dyestuff) isa LinearMixedModel
@test_throws MixedModels._MISSING_RE_ERROR MixedModel(@formula(yield ~ 0 + batch), dyestuff)
@test_throws MixedModels._MISSING_RE_ERROR MixedModel(@formula(yield ~ 1), dyestuff)
@test MixedModel(@formula(yield ~ 0 + (1|batch)), dyestuff, Poisson()) isa GeneralizedLinearMixedModel
@test MixedModel(@formula(yield ~ 1 + (1|batch)), dyestuff, Poisson()) isa GeneralizedLinearMixedModel
@test_throws MixedModels._MISSING_RE_ERROR MixedModel(@formula(yield ~ 0 + batch), dyestuff, Poisson())
@test_throws MixedModels._MISSING_RE_ERROR MixedModel(@formula(yield ~ 1), dyestuff, Poisson())
end
| MixedModels | https://github.com/JuliaStats/MixedModels.jl.git |
|
[
"MIT"
] | 4.26.1 | e3fffd09185c6eb69f66b9ed29af0240b0dd0adc | code | 1398 | using DataFrames
using MixedModels
using Test
# convert to DataFrame to modify it
slp = DataFrame(MixedModels.dataset(:sleepstudy))
allowmissing!(slp, :days)
slp[1,:days] = missing
# TODO: re-enable this test when better missing support has landed in StatsModels
# @testset "No impact from missing on schema" begin
# f = @formula(Y ~ 1 + U + (1|G))
# contrasts = Dict{Symbol,Any}()
# form = apply_schema(f, schema(f, dat[:sleepstudy], contrasts), LinearMixedModel)
# form_missing = apply_schema(f, schema(f, slp, contrasts), LinearMixedModel)
#
# @test form.lhs == form_missing.lhs
# @test form.rhs == form_missing.rhs
# end
@testset "Missing Omit" begin
@testset "Missing from unused variables" begin
# missing from unused variables should have no impact
m1 = fit(MixedModel, @formula(reaction ~ 1 + (1|subj)), MixedModels.dataset(:sleepstudy), progress=false)
m1_missing = fit(MixedModel, @formula(reaction ~ 1 + (1|subj)), slp, progress=false)
@test isapprox(m1.θ, m1_missing.θ, rtol=1.0e-12)
end
@testset "Missing from used variables" begin
m1 = fit(MixedModel, @formula(reaction ~ 1 + days + (1|subj)), MixedModels.dataset(:sleepstudy), progress=false)
m1_missing = fit(MixedModel, @formula(reaction ~ 1 + days + (1|subj)), slp, progress=false)
@test nobs(m1) - nobs(m1_missing) == 1
end
end
| MixedModels | https://github.com/JuliaStats/MixedModels.jl.git |
|
[
"MIT"
] | 4.26.1 | e3fffd09185c6eb69f66b9ed29af0240b0dd0adc | code | 2198 | using MixedModels
using MixedModels: dataset
@isdefined(gfms) || const global gfms = Dict(
:cbpp => [@formula((incid/hsz) ~ 1 + period + (1|herd))],
:contra => [@formula(use ~ 1+age+abs2(age)+urban+livch+(1|urban&dist)),
@formula(use ~ 1+urban+(1+urban|dist))], # see #563
:grouseticks => [@formula(ticks ~ 1+year+ch+ (1|index) + (1|brood) + (1|location))],
:verbagg => [@formula(r2 ~ 1+anger+gender+btype+situ+(1|subj)+(1|item))],
)
@isdefined(fms) || const global fms = Dict(
:oxide => [@formula(Thickness ~ 1 + (1|Lot/Wafer)),
@formula(Thickness ~ 1 + Source + (1+Source|Lot) + (1+Source|Lot&Wafer))],
:dyestuff => [@formula(yield ~ 1 + (1|batch))],
:dyestuff2 => [@formula(yield ~ 1 + (1|batch))],
:d3 => [@formula(y ~ 1 + u + (1+u|g) + (1+u|h) + (1+u|i))],
:insteval => [
@formula(y ~ 1 + service + (1|s) + (1|d) + (1|dept)),
@formula(y ~ 1 + service*dept + (1|s) + (1|d)),
],
:kb07 => [
@formula(rt_trunc ~ 1+spkr+prec+load+(1|subj)+(1|item)),
@formula(rt_trunc ~ 1+spkr*prec*load+(1|subj)+(1+prec|item)),
@formula(rt_trunc ~ 1+spkr*prec*load+(1+spkr+prec+load|subj)+(1+spkr+prec+load|item)),
],
:pastes => [
@formula(strength ~ 1 + (1|batch&cask)),
@formula(strength ~ 1 + (1|batch/cask)),
],
:penicillin => [@formula(diameter ~ 1 + (1|plate) + (1|sample))],
:sleepstudy => [
@formula(reaction ~ 1 + days + (1|subj)),
@formula(reaction ~ 1 + days + zerocorr(1+days|subj)),
@formula(reaction ~ 1 + days + (1|subj) + (0+days|subj)),
@formula(reaction ~ 1 + days + (1+days|subj)),
],
)
# for some reason it seems necessary to prime the pump in julia-1.6.0-DEV
@isdefined(fittedmodels) || const global fittedmodels = Dict{Symbol,Vector{MixedModel}}(
:dyestuff => [fit(MixedModel, only(fms[:dyestuff]), dataset(:dyestuff); progress=false)]
);
@isdefined(allfms) || const global allfms = merge(fms, gfms)
if !@isdefined(models)
function models(nm::Symbol)
get!(fittedmodels, nm) do
[fit(MixedModel, f, dataset(nm); progress=false) for f in allfms[nm]]
end
end
end
| MixedModels | https://github.com/JuliaStats/MixedModels.jl.git |
|
[
"MIT"
] | 4.26.1 | e3fffd09185c6eb69f66b9ed29af0240b0dd0adc | code | 923 | using MixedModels
using MixedModels: dataset
using Test
include("modelcache.jl")
@testset "opt limits" begin
@testset "maxfeval" begin
fm1 = LinearMixedModel(first(fms[:sleepstudy]), dataset(:sleepstudy))
fm1.optsum.maxfeval = 1
@test_logs (:warn, "NLopt optimization failure: MAXEVAL_REACHED") refit!(fm1; progress=false)
@test fm1.optsum.returnvalue == :MAXEVAL_REACHED
@test fm1.optsum.feval == 1
end
@testset "maxtime" begin
# we need a big model to guarantee that we hit the time limit,
# no matter how small
fm1 = LinearMixedModel(last(fms[:kb07]), dataset(:kb07))
maxtime = 1e-6
fm1.optsum.maxtime = maxtime
@test_logs (:warn, "NLopt optimization failure: MAXTIME_REACHED") fit!(fm1; progress=false)
@test fm1.optsum.returnvalue == :MAXTIME_REACHED
@test fm1.optsum.maxtime == maxtime
end
end
| MixedModels | https://github.com/JuliaStats/MixedModels.jl.git |
|
[
"MIT"
] | 4.26.1 | e3fffd09185c6eb69f66b9ed29af0240b0dd0adc | code | 10807 | using DataFrames
using Distributions
using MixedModels
using PooledArrays
using StableRNGs
using Tables
using Test
using GLM: Link
using MixedModels: dataset
include("modelcache.jl")
@testset "GLMM from MixedModel" begin
f = first(gfms[:contra])
d = dataset(:contra)
@test MixedModel(f, d, Bernoulli()) isa GeneralizedLinearMixedModel
@test MixedModel(f, d, Bernoulli(), ProbitLink()) isa GeneralizedLinearMixedModel
end
@testset "Type for instance" begin
vaform = @formula(r2 ~ 1 + anger + gender + btype + situ + (1|subj) + (1|item))
verbagg = dataset(:verbagg)
@test_throws ArgumentError fit(MixedModel, vaform, verbagg, Bernoulli, LogitLink)
@test_throws ArgumentError fit(MixedModel, vaform, verbagg, Bernoulli(), LogitLink)
@test_throws ArgumentError fit(MixedModel, vaform, verbagg, Bernoulli, LogitLink())
@test_throws ArgumentError fit(GeneralizedLinearMixedModel, vaform, verbagg, Bernoulli, LogitLink)
@test_throws ArgumentError fit(GeneralizedLinearMixedModel, vaform, verbagg, Bernoulli(), LogitLink)
@test_throws ArgumentError fit(GeneralizedLinearMixedModel, vaform, verbagg, Bernoulli, LogitLink())
@test_throws ArgumentError GeneralizedLinearMixedModel(vaform, verbagg, Bernoulli, LogitLink)
@test_throws ArgumentError GeneralizedLinearMixedModel(vaform, verbagg, Bernoulli(), LogitLink)
@test_throws ArgumentError GeneralizedLinearMixedModel(vaform, verbagg, Bernoulli, LogitLink())
end
@testset "contra" begin
contra = dataset(:contra)
thin = 5
gm0 = fit(MixedModel, first(gfms[:contra]), contra, Bernoulli(); fast=true, progress=false, thin)
fitlog = gm0.optsum.fitlog
@test length(fitlog) == (div(gm0.optsum.feval, thin) + 1) # for the initial value
@test first(fitlog) == (gm0.optsum.initial, gm0.optsum.finitial)
@test gm0.lowerbd == zeros(1)
@test isapprox(gm0.θ, [0.5720734451352923], atol=0.001)
@test !issingular(gm0)
@test issingular(gm0, [0])
@test isapprox(deviance(gm0), 2361.657188518064, atol=0.001)
# the first 9 BLUPs -- I don't think there's much point in testing all 102
blups = [-0.5853637711570235, -0.9546542393824562, -0.034754249031292345, # values are the same but in different order
0.2894692928724314, 0.6381376605845264, -0.2513134928312374,
0.031321447845204374, 0.10836110432794945, 0.24632286640099466]
@test only(ranef(gm0))[1:9] ≈ blups atol=1e-4
retbl = raneftables(gm0)
@test isone(length(retbl))
@test isa(retbl, NamedTuple)
@test Tables.istable(only(retbl))
@test !dispersion_parameter(gm0)
@test dispersion(gm0, false) == 1
@test dispersion(gm0, true) == 1
@test sdest(gm0) === missing
@test varest(gm0) === missing
@test gm0.σ === missing
@test Distribution(gm0) == Distribution(gm0.resp)
@test Link(gm0) == Link(gm0.resp)
gm1 = fit(MixedModel, first(gfms[:contra]), contra, Bernoulli(); progress=false);
@test isapprox(gm1.θ, [0.573054], atol=0.005)
@test lowerbd(gm1) == vcat(fill(-Inf, 7), 0.)
@test isapprox(deviance(gm1), 2361.54575, rtol=0.00001)
@test isapprox(loglikelihood(gm1), -1180.77288, rtol=0.00001)
@test dof(gm0) == length(gm0.β) + length(gm0.θ)
@test nobs(gm0) == 1934
refit!(gm0; fast=true, nAGQ=7, progress=false)
@test isapprox(deviance(gm0), 2360.9838, atol=0.001)
gm1 = fit(MixedModel, first(gfms[:contra]), contra, Bernoulli(); nAGQ=7, progress=false)
@test isapprox(deviance(gm1), 2360.8760, atol=0.001)
@test gm1.β == gm1.beta
@test gm1.θ == gm1.theta
gm1y = gm1.y
@test length(gm1y) == size(gm1.X, 1)
@test eltype(gm1y) == eltype(gm1.X)
@test gm1y == (MixedModels.dataset(:contra).use .== "Y")
@test response(gm1) == gm1y
@test !islinear(gm1)
@test :θ in propertynames(gm0)
@testset "GLMM rePCA" begin
@test length(MixedModels.PCA(gm0)) == 1
@test length(MixedModels.rePCA(gm0)) == 1
@test length(gm0.rePCA) == 1
end
# the next three values are not well defined in the optimization
#@test isapprox(logdet(gm1), 75.7217, atol=0.1)
#@test isapprox(sum(abs2, gm1.u[1]), 48.4747, atol=0.1)
#@test isapprox(sum(gm1.resp.devresid), 2237.349, atol=0.1)
show(IOBuffer(), gm1)
show(IOBuffer(), BlockDescription(gm0))
gm_slope = fit(MixedModel, gfms[:contra][2], contra, Bernoulli(); progress=false);
@test !issingular(gm_slope)
@test issingular(gm_slope, zeros(5))
end
@testset "cbpp" begin
cbpp = dataset(:cbpp)
gm2 = fit(MixedModel, first(gfms[:cbpp]), cbpp, Binomial(); wts=float(cbpp.hsz), progress=false, init_from_lmm=[:β, :θ])
@test weights(gm2) == cbpp.hsz
@test deviance(gm2,true) ≈ 100.09585619892968 atol=0.0001
@test sum(abs2, gm2.u[1]) ≈ 9.723054788538546 atol=0.0001
@test logdet(gm2) ≈ 16.90105378801136 atol=0.001
@test isapprox(sum(gm2.resp.devresid), 73.47174762237978, atol=0.001)
@test isapprox(loglikelihood(gm2), -92.02628186840045, atol=0.001)
@test !dispersion_parameter(gm2)
@test dispersion(gm2, false) == 1
@test dispersion(gm2, true) == 1
@test sdest(gm2) === missing
@test varest(gm2) === missing
@test gm2.σ === missing
@testset "GLMM refit" begin
gm2r = deepcopy(gm2)
@test_throws ArgumentError fit!(gm2r; progress=false)
refit!(gm2r; fast=true, progress=false)
@test length(gm2r.optsum.final) == 1
@test gm2r.θ ≈ gm2.θ atol=1e-3
# swapping successes and failures to give us the same model
# but with opposite signs. healthy ≈ 1 - response(gm2r)
# but defining it in terms of the original values avoids some
# nasty floating point issues
healthy = @. (cbpp.hsz - cbpp.incid) / cbpp.hsz
refit!(gm2r, healthy; fast=false, progress=false)
@test length(gm2r.optsum.final) == 5
@test gm2r.β ≈ -gm2.β atol=1e-3
@test gm2r.θ ≈ gm2.θ atol=1e-3
end
@testset "constant response" begin
cbconst = DataFrame(cbpp)
cbconst.incid = zero(cbconst.incid)
# we do construction and fitting in two separate steps to make sure
# that construction succeeds and that the ArgumentError occurs in fitting.
mcbconst = GeneralizedLinearMixedModel(first(gfms[:cbpp]), cbconst, Binomial(); wts=float(cbpp.hsz))
@test mcbconst isa GeneralizedLinearMixedModel
@test_throws ArgumentError("The response is constant and thus model fitting has failed") fit!(mcbconst; progress=false)
end
end
@testset "verbagg" begin
gm3 = fit(MixedModel, only(gfms[:verbagg]), dataset(:verbagg), Bernoulli(); progress=false)
@test deviance(gm3) ≈ 8151.40 rtol=1e-5
@test lowerbd(gm3) == vcat(fill(-Inf, 6), zeros(2))
@test fitted(gm3) == predict(gm3)
# these two values are not well defined at the optimum
@test isapprox(sum(x -> sum(abs2, x), gm3.u), 273.29646346940785, rtol=1e-3)
@test sum(gm3.resp.devresid) ≈ 7156.550941446312 rtol=1e-4
end
@testset "grouseticks" begin
center(v::AbstractVector) = v .- (sum(v) / length(v))
grouseticks = DataFrame(dataset(:grouseticks))
grouseticks.ch = center(grouseticks.height)
gm4 = fit(MixedModel, only(gfms[:grouseticks]), grouseticks, Poisson(); fast=true, progress=false)
@test isapprox(deviance(gm4), 851.4046, atol=0.001)
# these two values are not well defined at the optimum
#@test isapprox(sum(x -> sum(abs2, x), gm4.u), 196.8695297987013, atol=0.1)
#@test isapprox(sum(gm4.resp.devresid), 220.92685781326136, atol=0.1)
@test !dispersion_parameter(gm4)
@test dispersion(gm4, false) == 1
@test dispersion(gm4, true) == 1
@test sdest(gm4) === missing
@test varest(gm4) === missing
@test gm4.σ === missing
gm4slow = fit(MixedModel, only(gfms[:grouseticks]), grouseticks, Poisson(); fast=false, progress=false)
# this tolerance isn't great, but then again the optimum isn't well defined
# @test gm4.θ ≈ gm4slow.θ rtol=0.05
# @test gm4.β[2:end] ≈ gm4slow.β[2:end] atol=0.1
@test isapprox(deviance(gm4), deviance(gm4slow); rtol=0.1)
end
@testset "goldstein" begin # from a 2020-04-22 msg by Ben Goldstein to R-SIG-Mixed-Models
goldstein = (
group = PooledArray(repeat(string.('A':'J'), outer=10)),
y = [
83, 3, 8, 78, 901, 21, 4, 1, 1, 39,
82, 3, 2, 82, 874, 18, 5, 1, 3, 50,
87, 7, 3, 67, 914, 18, 0, 1, 1, 38,
86, 13, 5, 65, 913, 13, 2, 0, 0, 48,
90, 5, 5, 71, 886, 19, 3, 0, 2, 32,
96, 1, 1, 87, 860, 21, 3, 0, 1, 54,
83, 2, 4, 70, 874, 19, 5, 0, 4, 36,
100, 11, 3, 71, 950, 21, 6, 0, 1, 40,
89, 5, 5, 73, 859, 29, 3, 0, 2, 38,
78, 13, 6, 100, 852, 24, 5, 0, 1, 39
],
)
gform = @formula(y ~ 1 + (1|group))
m1 = GeneralizedLinearMixedModel(gform, goldstein, Poisson())
@test !isfitted(m1)
fit!(m1; progress=false)
@test isfitted(m1)
@test deviance(m1) ≈ 193.5587302384811 rtol=1.e-5
@test only(m1.β) ≈ 4.192196439077657 atol=1.e-5
@test only(m1.θ) ≈ 1.838245201739852 atol=1.e-5
m11 = fit(MixedModel, gform, goldstein, Poisson(); nAGQ=11, progress=false)
@test deviance(m11) ≈ 193.51028088736842 rtol=1.e-5
@test only(m11.β) ≈ 4.192196439077657 atol=1.e-5
@test only(m11.θ) ≈ 1.838245201739852 atol=1.e-5
end
@testset "dispersion" begin
form = @formula(reaction ~ 1 + days + (1+days|subj))
dat = dataset(:sleepstudy)
@test_logs (:warn, r"dispersion parameter") GeneralizedLinearMixedModel(form, dat, Gamma())
@test_logs (:warn, r"dispersion parameter") GeneralizedLinearMixedModel(form, dat, InverseGaussian())
@test_logs (:warn, r"dispersion parameter") GeneralizedLinearMixedModel(form, dat, Normal(), SqrtLink())
# notes for future tests when GLMM with dispersion works
# @test dispersion_parameter(gm)
# @test dispersion(gm, false) == val
# @test dispersion(gm, true) == val
# @test sdest(gm) == dispersion(gm, false) == gm.σ
# @test varest(gm) == dispersion(gm, true)
end
@testset "mmec" begin
# Data on "Malignant melanoma in the European community" from the mlmRev package for R
# The offset of log.(expected) is to examine the ratio of observed to expected, based on population
mmec = dataset(:mmec)
mmform = @formula(deaths ~ 1 + uvb + (1|region))
gm5 = fit(MixedModel, mmform, mmec, Poisson(); offset=log.(mmec.expected), nAGQ=11, progress=false)
@test isapprox(deviance(gm5), 655.2533533016059, atol=5.e-3)
@test isapprox(first(gm5.θ), 0.4121684550775567, atol=1.e-3)
@test isapprox(first(gm5.β), -0.13860166843315044, atol=1.e-3)
@test isapprox(last(gm5.β), -0.034414458364713504, atol=1.e-3)
end
| MixedModels | https://github.com/JuliaStats/MixedModels.jl.git |
|
[
"MIT"
] | 4.26.1 | e3fffd09185c6eb69f66b9ed29af0240b0dd0adc | code | 1797 | using LinearAlgebra, StableRNGs, StatsModels, Test
import MixedModels: statsrank
xtx(X) = Symmetric(X'X, :U) # creat the symmetric matrix X'X from X
LinearAlgebra.rank(F::LinearAlgebra.QRPivoted; tol=1e-8) = searchsortedlast(abs.(diag(F.R)), tol, rev=true);
const rng = StableRNG(4321234)
const simdat = (
G = repeat('A':'T', inner=10),
H = repeat('a':'e', inner=2, outer=20),
U = repeat(0.:9, outer=20),
V = repeat(-4.5:4.5, outer=20),
Y = 0.1 * randn(rng, 200),
Z = rand(rng, 200)
)
@testset "fullranknumeric" begin
mm = modelmatrix(@formula(Y ~ 1 + U), simdat)
r, pivot = statsrank(mm)
@test pivot == 1:2
end
@testset "fullrankcategorical" begin
mm = modelmatrix(@formula(Y ~ 1 + G*H), simdat)
r, pivot = statsrank(mm)
@test r == 100
@test pivot == 1:100
end
@testset "dependentcolumn" begin
mm = modelmatrix(@formula(Y ~ 1 + U + V + Z), simdat)
r, pivot = statsrank(mm)
# V is just mean-centered U
# so either V or U gets pivoted out
# perm1 = [1,2,4,3] # x86-64 OpenBLAS
# perm2 = [1,3,4,2] # Apple M1
@test r == 3
@test pivot[1] == 1 # intercept remains
@test pivot[3] == 4 # z doesn't get pivoted
@test pivot[4] in [2, 3]
end
@testset "qr missing cells" begin
mm = modelmatrix(@formula(Y ~ 1 + G*H), simdat)[5:end,:]
r, pivot = statsrank(mm)
@test r == 98
# we no longer offer ordering guarantees besides preserving
# relative order of linearly independent columns
# and trying to keep the first column in the first position
unpivoted = pivot[begin:r]
@test unpivoted == sort(unpivoted)
end
@testset "zero columns in X" begin
X = Matrix{Float64}(undef, 100, 0)
r, pivot = statsrank(X)
@test iszero(r)
@test pivot == collect(axes(X, 2))
end
| MixedModels | https://github.com/JuliaStats/MixedModels.jl.git |
|
[
"MIT"
] | 4.26.1 | e3fffd09185c6eb69f66b9ed29af0240b0dd0adc | code | 30555 | using GLM # bring r2 into scope
using LinearAlgebra
using MixedModels
using PooledArrays
using Random
using SparseArrays
using Suppressor
using Statistics
using StatsModels
using Tables
using Test
using TypedTables
using MixedModels: likelihoodratiotest
@isdefined(io) || const global io = IOBuffer()
include("modelcache.jl")
@testset "LMM from MixedModel" begin
f = @formula(reaction ~ 1 + days + (1|subj))
d = MixedModels.dataset(:sleepstudy)
@test MixedModel(f, d) isa LinearMixedModel
@test MixedModel(f, d, Normal()) isa LinearMixedModel
@test MixedModel(f, d, Normal(), IdentityLink()) isa LinearMixedModel
end
@testset "offset" begin
let off = repeat([1], 180),
slp = MixedModels.dataset(:sleepstudy),
frm = @formula(reaction ~ 1 + (1|subj))
@test_throws ArgumentError fit(MixedModel, frm, slp; offset=off)
@test_throws ArgumentError fit(MixedModel, frm, slp, Normal(), IdentityLink(); offset=off)
end
end
@testset "Dyestuff" begin
fm1 = only(models(:dyestuff))
@test length(fm1.A) == 3
@test size(fm1.reterms) == (1, )
@test lowerbd(fm1) == zeros(1)
@test fm1.lowerbd == zeros(1)
@test fm1.optsum.initial == ones(1)
fm1.θ = ones(1)
@test fm1.θ == ones(1)
@test islinear(fm1)
@test responsename(fm1) == "yield"
@test meanresponse(fm1) ≈ 1527.5
@test modelmatrix(fm1) == ones(30, 1)
@test weights(fm1) == ones(30)
@test_throws ArgumentError fit!(fm1)
fm1.optsum.feval = -1
@test_logs (:warn, "Model has not been fit") show(fm1)
@test !isfitted(fm1)
@test objective(updateL!(setθ!(fm1, [0.713]))) ≈ 327.34216280955366
show(io, BlockDescription(fm1))
@test countlines(seekstart(io)) == 3
output = String(take!(io))
@test startswith(output, "rows:")
refit!(fm1; progress=false)
@test isfitted(fm1)
@test :θ in propertynames(fm1)
@test objective(fm1) ≈ 327.3270598811428 atol=0.001
@test fm1.θ ≈ [0.752580] atol=1.e-5
@test fm1.λ ≈ [LowerTriangular(reshape(fm1.θ, (1,1)))] atol=1.e-5
@test deviance(fm1) ≈ 327.32705988 atol=0.001
@test aic(fm1) ≈ 333.3270598811394 atol=0.001
@test bic(fm1) ≈ 337.5306520261259 atol=0.001
@test fixef(fm1) ≈ [1527.5]
@test dispersion_parameter(fm1)
@test first(first(fm1.σs)) ≈ 37.26034462135931 atol=0.0001
@test fm1.β ≈ [1527.5]
@test dof(fm1) == 3
@test nobs(fm1) == 30
@test MixedModels.fixef!(zeros(1),fm1) ≈ [1527.5]
@test coef(fm1) ≈ [1527.5]
fm1β = fm1.βs
@test fm1β isa NamedTuple
@test isone(length(fm1β))
@test first(values(fm1β)) ≈ 1527.5
fm1σρ = fm1.σρs
@test fm1σρ isa NamedTuple
@test isone(length(fm1σρ))
@test isone(length(getproperty(first(fm1σρ), :σ)))
@test isempty(getproperty(first(fm1σρ), :ρ))
@test fm1.σ == sdest(fm1)
@test fm1.b == ranef(fm1)
@test fm1.u == ranef(fm1, uscale=true)
@test fm1.stderror == stderror(fm1)
@test isone(length(fm1.pvalues))
@test fm1.objective == objective(fm1)
@test fm1.σ ≈ 49.510099986291145 atol=1.e-5
@test fm1.X == ones(30,1)
ds = MixedModels.dataset(:dyestuff)
@test fm1.y == ds[:yield]
@test response(fm1) == ds.yield
@test cond(fm1) == ones(1)
@test first(leverage(fm1)) ≈ 0.15650534392640486 rtol=1.e-5
@test sum(leverage(fm1)) ≈ 4.695160317792145 rtol=1.e-5
cm = coeftable(fm1)
@test length(cm.rownms) == 1
@test length(cm.colnms) == 4
@test fnames(fm1) == (:batch,)
@test response(fm1) == ds[:yield]
rfu = ranef(fm1, uscale = true)
rfb = ranef(fm1)
@test abs(sum(rfu[1])) < 1.e-5
cv = condVar(fm1)
@test length(cv) == 1
@test size(first(cv)) == (1, 1, 6)
show(IOBuffer(), fm1.optsum)
@test logdet(fm1) ≈ 8.06014522999825 atol=0.001
@test varest(fm1) ≈ 2451.2501089607676 atol=0.001
@test pwrss(fm1) ≈ 73537.50152584909 atol=0.01 # this quantity is not precisely estimated
@test stderror(fm1) ≈ [17.69455188898009] atol=0.0001
vc = VarCorr(fm1)
show(io, vc)
str = String(take!(io))
@test startswith(str, "Variance components:")
@test vc.s == sdest(fm1)
refit!(fm1; REML=true, progress=false)
@test objective(fm1) ≈ 319.65427684225216 atol=0.0001
@test_throws ArgumentError loglikelihood(fm1)
@test dof_residual(fm1) ≥ 0
print(io, fm1)
@test startswith(String(take!(io)), "Linear mixed model fit by REML")
vc = fm1.vcov
@test isa(vc, Matrix{Float64})
@test only(vc) ≈ 375.7167775 rtol=1.e-3
# since we're caching the fits, we should get it back to being correctly fitted
# we also take this opportunity to test fitlog
@testset "fitlog" begin
thin = 1
refit!(fm1; REML=false, progress=false, thin)
fitlog = fm1.optsum.fitlog
fitlogtbl = columntable(fm1.optsum)
@test length(fitlogtbl) == 3
@test keys(fitlogtbl) == (:iter, :objective, :θ)
@test length(first(fitlogtbl)) > 15 # can't be sure of exact length
@test first(fitlogtbl)[1:3] == 1:3
@test last(fitlogtbl.objective) == fm1.optsum.fmin
fitlogstackedtbl = columntable(fm1.optsum; stack=true)
@test length(fitlogstackedtbl) == 4
@test keys(fitlogstackedtbl) == (:iter, :objective, :par, :value)
d, r = divrem(length(first(fitlogstackedtbl)), length(first(fitlogtbl)))
@test iszero(r)
@test d == length(first(fitlogtbl.θ))
thin = 2
refit!(fm1; REML=false, progress=false, thin)
@test length(fitlog) == (div(fm1.optsum.feval, thin) + 1) # for the initial value
@test first(fitlog) == (fm1.optsum.initial, fm1.optsum.finitial)
end
@testset "profile" begin
dspr01 = profile(only(models(:dyestuff)))
sigma0row = only(filter(r -> r.p == :σ && iszero(r.ζ), dspr01.tbl))
@test sigma0row.σ ≈ dspr01.m.σ
@test sigma0row.β1 ≈ only(dspr01.m.β)
@test sigma0row.θ1 ≈ only(dspr01.m.θ)
end
end
@testset "Dyestuff2" begin
fm = only(models(:dyestuff2))
@test lowerbd(fm) == zeros(1)
show(IOBuffer(), fm)
@test fm.θ ≈ zeros(1)
@test objective(fm) ≈ 162.87303665382575
@test abs(std(fm)[1][1]) < 1.0e-9
@test std(fm)[2] ≈ [3.653231351374652]
@test stderror(fm) ≈ [0.6669857396443261]
@test coef(fm) ≈ [5.6656]
@test logdet(fm) ≈ 0.0
@test issingular(fm)
#### modifies the model
refit!(fm, float(MixedModels.dataset(:dyestuff)[:yield]); progress=false)
@test objective(fm) ≈ 327.3270598811428 atol=0.001
refit!(fm, float(MixedModels.dataset(:dyestuff2)[:yield]); progress=false) # restore the model in the cache
@testset "profile" begin # tests a branch in profileσs! for σ estimate of zero
dspr02 = profile(only(models(:dyestuff2)))
sigma10row = only(filter(r -> r.p == :σ1 && iszero(r.ζ), dspr02.tbl))
@test iszero(sigma10row.σ1)
sigma1tbl = Table(filter(r -> r.p == :σ1, dspr02.tbl))
@test all(≥(0), sigma1tbl.σ1)
end
end
@testset "penicillin" begin
fm = only(models(:penicillin))
@test size(fm) == (144, 1, 30, 2)
@test fm.optsum.initial == ones(2)
@test lowerbd(fm) == zeros(2)
@test objective(fm) ≈ 332.18834867227616 atol=0.001
@test coef(fm) ≈ [22.97222222222222] atol=0.001
@test fixef(fm) ≈ [22.97222222222222] atol=0.001
@test coef(fm)[1] ≈ mean(MixedModels.dataset(:penicillin).diameter)
@test stderror(fm) ≈ [0.7445960346851368] atol=0.0001
@test fm.θ ≈ [1.5375772376554968, 3.219751321180035] atol=0.001
@test first(std(fm)) ≈ [0.8455645948223015] atol=0.0001
@test std(fm)[2] ≈ [1.770647779277388] atol=0.0001
@test varest(fm) ≈ 0.3024263987592062 atol=0.0001
@test logdet(fm) ≈ 95.74614821367786 atol=0.001
cv = condVar(fm)
@test length(cv) == 2
@test size(first(cv)) == (1, 1, 24)
@test size(last(cv)) == (1, 1, 6)
@test first(first(cv)) ≈ 0.07331320237988301 rtol=1.e-4
@test last(last(cv)) ≈ 0.04051547211287544 rtol=1.e-4
cv2 = condVar(fm, :sample)
@test cv2 ≈ last(cv)
rfu = ranef(fm, uscale=true)
@test length(rfu) == 2
@test first(first(rfu)) ≈ 0.523162392717432 rtol=1.e-4
rfb = ranef(fm)
@test length(rfb) == 2
@test last(last(rfb)) ≈ -3.001823834230942 rtol=1.e-4
show(io, BlockDescription(fm))
@test countlines(seekstart(io)) == 4
tokens = Set(split(String(take!(io)), r"\s+"))
@test "Diag/Dense" in tokens
@test "Diagonal" in tokens
end
@testset "pastes" begin
fm = last(models(:pastes))
@test size(fm) == (60, 1, 40, 2)
@test fm.optsum.initial == ones(2)
@test lowerbd(fm) == zeros(2)
@test objective(fm) ≈ 247.99446586289676 atol=0.001
@test coef(fm) ≈ [60.05333333333329] atol=0.001
@test fixef(fm) ≈ [60.05333333333329] atol=0.001
@test stderror(fm) ≈ [0.6421359883527029] atol=0.0001
@test fm.θ ≈ [3.5268858714382905, 1.3299230213750168] atol=0.001
@test first(std(fm)) ≈ [2.904069002535747] atol=0.001
@test std(fm)[2] ≈ [1.095070371687089] atol=0.0001
@test std(fm)[3] ≈ [0.8234088395243269] atol=0.0001
@test varest(fm) ≈ 0.6780020742644107 atol=0.0001
@test logdet(fm) ≈ 101.0381339953986 atol=0.001
cv = condVar(fm)
@test length(cv) == 2
@test size(first(cv)) == (1, 1, 30)
@test first(first(cv)) ≈ 1.111873335663485 rtol=1.e-4
@test size(last(cv)) == (1, 1, 10)
@test last(last(cv)) ≈ 0.850428770978789 rtol=1.e-4
show(io, BlockDescription(fm))
@test countlines(seekstart(io)) == 4
tokens = Set(split(String(take!(io)), r"\s+"))
@test "Sparse" in tokens
@test "Diagonal" in tokens
lrt = likelihoodratiotest(models(:pastes)...)
@test length(lrt.deviance) == length(lrt.formulas) == length(lrt.models )== 2
@test first(lrt.tests.pvalues) ≈ 0.5233767966395597 atol=0.0001
@testset "missing variables in formula" begin
ae = ArgumentError("The following formula variables are not present in the table: [:reaction, :joy, :subj]")
@test_throws(ae,
fit(MixedModel, @formula(reaction ~ 1 + joy + (1|subj)), dataset(:pastes)))
end
end
@testset "InstEval" begin
fm1 = first(models(:insteval))
@test size(fm1) == (73421, 2, 4114, 3)
@test fm1.optsum.initial == ones(3)
@test lowerbd(fm1) == zeros(3)
spL = sparseL(fm1)
@test size(spL) == (4114, 4114)
@test 733090 < nnz(spL) < 733100
@test objective(fm1) ≈ 237721.7687745563 atol=0.001
ftd1 = fitted(fm1);
@test size(ftd1) == (73421, )
@test ftd1 == predict(fm1)
@test first(ftd1) ≈ 3.17876 atol=0.0001
resid1 = residuals(fm1);
@test size(resid1) == (73421, )
@test first(resid1) ≈ 1.82124 atol=0.00001
@testset "PCA" begin
@test length(fm1.rePCA) == 3
pca = MixedModels.PCA(fm1)
@test length(pca) == 3
@test :covcor in propertynames(first(pca))
str = String(take!(io))
show(io, first(pca), stddevs=true, variances=true)
str = String(take!(io))
@test !isempty(findall("Standard deviations:", str))
@test !isempty(findall("Variances:", str))
end
show(io, BlockDescription(fm1))
@test countlines(seekstart(io)) == 5
tokens = Set(split(String(take!(io)), r"\s+"))
@test "Sparse" in tokens
@test "Sparse/Dense" in tokens
@test "Diag/Dense" in tokens
fm2 = last(models(:insteval))
@test objective(fm2) ≈ 237585.5534151694 atol=0.001
@test size(fm2) == (73421, 28, 4100, 2)
end
@testset "sleep" begin
fm = last(models(:sleepstudy))
@test lowerbd(fm) == [0.0, -Inf, 0.0]
A11 = first(fm.A)
@test isa(A11, UniformBlockDiagonal{Float64})
@test isa(first(fm.L), UniformBlockDiagonal{Float64})
@test size(A11) == (36, 36)
a11 = view(A11.data, :, :, 1)
@test a11 == [10. 45.; 45. 285.]
@test size(A11.data, 3) == 18
λ = first(fm.λ)
b11 = LowerTriangular(view(first(fm.L).data, :, :, 1))
@test b11 * b11' ≈ λ'a11*λ + I rtol=1e-5
@test count(!iszero, Matrix(first(fm.L))) == 18 * 4
@test rank(fm) == 2
@test objective(fm) ≈ 1751.9393444647046
@test fm.θ ≈ [0.929221307, 0.01816838, 0.22264487096] atol=1.e-6
@test pwrss(fm) ≈ 117889.46144025437
@test logdet(fm) ≈ 73.90322021999222 atol=0.001
@test stderror(fm) ≈ [6.632257721914501, 1.5022354739749826] atol=0.0001
@test coef(fm) ≈ [251.40510484848477,10.4672859595959]
@test fixef(fm) ≈ [251.40510484848477,10.4672859595959]
@test std(fm)[1] ≈ [23.780468100188497, 5.716827903196682] atol=0.01
@test logdet(fm) ≈ 73.90337187545992 atol=0.001
@test cond(fm) ≈ [4.175251] atol=0.0001
@test loglikelihood(fm) ≈ -875.9696722323523
@test sum(leverage(fm)) ≈ 28.611525700136877 rtol=1.e-5
σs = fm.σs
@test length(σs) == 1
@test keys(σs) == (:subj,)
@test length(σs.subj) == 2
@test first(values(σs.subj)) ≈ 23.7804686 atol=0.0001
@test last(values(first(σs))) ≈ 5.7168278 atol=0.0001
@test fm.corr ≈ [1.0 -0.1375451787621904; -0.1375451787621904 1.0] atol=0.0001
u3 = ranef(fm, uscale=true)
@test length(u3) == 1
@test size(first(u3)) == (2, 18)
@test first(u3)[1, 1] ≈ 3.030300122575336 atol=0.001
cv = condVar(fm)
@test length(cv) == 1
@test size(first(cv)) == (2, 2, 18)
@test first(first(cv)) ≈ 140.96612241084617 rtol=1.e-4
@test last(last(cv)) ≈ 5.157750215432247 rtol=1.e-4
@test first(cv)[2] ≈ -20.60428045516186 rtol=1.e-4
cvt = condVartables(fm)
@test length(cvt) == 1
@test only(keys(cvt)) == :subj
cvtsubj = cvt.subj
@test only(cvt) === cvtsubj
@test keys(cvtsubj) == (:subj, :σ, :ρ)
@test Tables.istable(cvtsubj)
@test first(cvtsubj.subj) == "S308"
cvtsubjσ1 = first(cvtsubj.σ)
@test all(==(cvtsubjσ1), cvtsubj.σ)
@test first(cvtsubjσ1) ≈ 11.87291549750297 atol=1.0e-4
@test last(cvtsubjσ1) ≈ 2.271068078114843 atol=1.0e-4
cvtsubjρ = first(cvtsubj.ρ)
@test all(==(cvtsubjρ), cvtsubj.ρ)
@test only(cvtsubjρ) ≈ -0.7641347018831385 atol=1.0e-4
b3 = ranef(fm)
@test length(b3) == 1
@test size(first(b3)) == (2, 18)
@test first(first(b3)) ≈ 2.815819441982976 atol=0.001
b3tbl = raneftables(fm)
@test length(b3tbl) == 1
@test keys(b3tbl) == (:subj,)
@test isa(b3tbl, NamedTuple)
@test Tables.istable(only(b3tbl))
@testset "PosDefException from constant response" begin
slp = MixedModels.dataset(:sleepstudy)
@test_throws ArgumentError("The response is constant and thus model fitting has failed") refit!(fm, zero(slp.reaction); progress=false)
refit!(fm, slp.reaction; progress=false)
end
simulate!(fm) # to test one of the unscaledre methods
# must restore state of fm as it is cached in the global fittedmodels
slp = MixedModels.dataset(:sleepstudy)
copyto!(fm.y, slp.reaction)
updateL!(MixedModels.reevaluateAend!(fm))
@test objective(fm) ≈ 1751.9393444647046 # check the model is properly restored
fmnc = models(:sleepstudy)[2]
@test size(fmnc) == (180,2,36,1)
@test fmnc.optsum.initial == ones(2)
@test lowerbd(fmnc) == zeros(2)
sigmas = fmnc.σs
@test length(only(sigmas)) == 2
@test first(only(sigmas)) ≈ 24.171449484676224 atol=1e-4
@testset "zerocorr PCA" begin
@test length(fmnc.rePCA) == 1
@test fmnc.rePCA.subj ≈ [0.5, 1.0]
@test any(Ref(fmnc.PCA.subj.loadings) .≈ (I(2), I(2)[:, [2,1]]))
@test show(IOBuffer(), MixedModels.PCA(fmnc)) === nothing
end
@test deviance(fmnc) ≈ 1752.0032551398835 atol=0.001
@test objective(fmnc) ≈ 1752.0032551398835 atol=0.001
@test coef(fmnc) ≈ [251.40510484848585, 10.467285959595715]
@test fixef(fmnc) ≈ [251.40510484848477, 10.467285959595715]
@test stderror(fmnc) ≈ [6.707710260366577, 1.5193083237479683] atol=0.001
@test fmnc.θ ≈ [0.9458106880922268, 0.22692826607677266] atol=0.0001
@test first(std(fmnc)) ≈ [24.171449463289047, 5.799379721123582]
@test last(std(fmnc)) ≈ [25.556130034081047]
@test logdet(fmnc) ≈ 74.46952585564611 atol=0.001
ρ = first(fmnc.σρs.subj.ρ)
@test ρ === -0.0 # test that systematic zero correlations are returned as -0.0
MixedModels.likelihoodratiotest(fm, fmnc)
fmrs = fit(MixedModel, @formula(reaction ~ 1+days + (0+days|subj)), slp; progress=false);
@test objective(fmrs) ≈ 1774.080315280528 rtol=0.00001
@test fmrs.θ ≈ [0.24353985679033105] rtol=0.00001
fm_ind = models(:sleepstudy)[3]
@test objective(fm_ind) ≈ objective(fmnc)
@test coef(fm_ind) ≈ coef(fmnc)
@test fixef(fm_ind) ≈ fixef(fmnc)
@test stderror(fm_ind) ≈ stderror(fmnc)
@test fm_ind.θ ≈ fmnc.θ
@test std(fm_ind) ≈ std(fmnc)
@test logdet(fm_ind) ≈ logdet(fmnc)
# combining [ReMat{T,S1}, ReMat{T,S2}] for S1 ≠ S2
slpcat = (subj = slp.subj, days = PooledArray(string.(slp.days)), reaction = slp.reaction)
fm_cat = fit(MixedModel, @formula(reaction ~ 1+days+(1|subj)+(0+days|subj)),slpcat; progress=false)
@test fm_cat isa LinearMixedModel
σρ = fm_cat.σρs
@test σρ isa NamedTuple
@test isone(length(σρ))
@test first(keys(σρ)) == :subj
@test keys(σρ.subj) == (:σ, :ρ)
@test length(σρ.subj) == 2
@test length(first(σρ.subj)) == 10
@test length(σρ.subj.ρ) == 45
# test that there's no correlation between the intercept and days columns
ρs_intercept = σρ.subj.ρ[1 .+ cumsum(0:8)]
@test all(iszero.(ρs_intercept))
# amalgamate should set these to -0.0 to indicate structural zeros
@test all(ρs_intercept .=== -0.0)
# also works without explicitly dropped intercept
fm_cat2 = fit(MixedModel, @formula(reaction ~ 1+days+(1|subj)+(days|subj)),slpcat; progress=false)
@test fm_cat2 isa LinearMixedModel
σρ = fm_cat2.σρs
@test σρ isa NamedTuple
@test isone(length(σρ))
@test first(keys(σρ)) == :subj
@test keys(σρ.subj) == (:σ, :ρ)
@test length(σρ.subj) == 2
@test length(first(σρ.subj)) == 10
@test length(σρ.subj.ρ) == 45
# test that there's no correlation between the intercept and days columns
ρs_intercept = σρ.subj.ρ[1 .+ cumsum(0:8)]
@test all(iszero.(ρs_intercept))
# amalgamate should set these to -0.0 to indicate structural zeros
@test all(ρs_intercept .=== -0.0)
@testset "diagonal λ in zerocorr" begin
# explicit zerocorr
fmzc = models(:sleepstudy)[2]
λ = first(fmzc.reterms).λ
@test λ isa Diagonal{Float64, Vector{Float64}}
# implicit zerocorr via amalgamation
fmnc = models(:sleepstudy)[3]
λ = first(fmnc.reterms).λ
@test λ isa Diagonal{Float64, Vector{Float64}}
end
@testset "disable amalgamation" begin
fm_chunky = fit(MixedModel,
@formula(reaction ~ 1 + days + (1 | subj) + (0 + days | subj)),
dataset(:sleepstudy); amalgamate=false, progress=false)
@test loglikelihood(fm_chunky) ≈ loglikelihood(models(:sleepstudy)[2])
@test length(fm_chunky.reterms) == 2
vc = sprint(show, VarCorr(fm_chunky))
@test all(occursin(vc), ["subj", "subj.2"])
end
show(io, BlockDescription(first(models(:sleepstudy))))
@test countlines(seekstart(io)) == 3
@test "Diagonal" in Set(split(String(take!(io)), r"\s+"))
show(io, BlockDescription(last(models(:sleepstudy))))
@test countlines(seekstart(io)) == 3
@test "BlkDiag" in Set(split(String(take!(io)), r"\s+"))
@testset "optsumJSON" begin
fm = last(models(:sleepstudy))
# using a IOBuffer for saving JSON
saveoptsum(seekstart(io), fm)
m = LinearMixedModel(fm.formula, MixedModels.dataset(:sleepstudy))
restoreoptsum!(m, seekstart(io))
@test loglikelihood(fm) ≈ loglikelihood(m)
@test bic(fm) ≈ bic(m)
@test coef(fm) ≈ coef(m)
fm_mod = deepcopy(fm)
fm_mod.optsum.fmin += 1
saveoptsum(seekstart(io), fm_mod)
@test_throws(ArgumentError("model m at final does not give stored fmin"),
restoreoptsum!(m, seekstart(io)))
restoreoptsum!(m, seekstart(io); atol=1)
@test m.optsum.fmin - fm.optsum.fmin ≈ 1
# using a temporary file for saving JSON
fnm = first(mktemp())
saveoptsum(fnm, fm)
m = LinearMixedModel(fm.formula, MixedModels.dataset(:sleepstudy))
restoreoptsum!(m, fnm)
@test loglikelihood(fm) ≈ loglikelihood(m)
@test bic(fm) ≈ bic(m)
@test coef(fm) ≈ coef(m)
# check restoreoptsum from older versions
m = LinearMixedModel(
@formula(reaction ~ 1 + days + (1 + days | subj)),
MixedModels.dataset(:sleepstudy),
)
iob = IOBuffer(
"""
{
"initial":[1.0,0.0,1.0],
"finitial":1784.642296192436,
"ftol_rel":1.0e-12,
"ftol_abs":1.0e-8,
"xtol_rel":0.0,
"xtol_abs":[1.0e-10,1.0e-10,1.0e-10],
"initial_step":[0.75,1.0,0.75],
"maxfeval":-1,
"maxtime":-1.0,
"feval":57,
"final":[0.9292213195402981,0.01816837807519162,0.22264487477788353],
"fmin":1751.9393444646712,
"optimizer":"LN_BOBYQA",
"returnvalue":"FTOL_REACHED",
"nAGQ":1,
"REML":false
}
"""
)
@test_logs((:warn,
r"optsum was saved with an older version of MixedModels.jl: consider resaving"),
restoreoptsum!(m, seekstart(iob)))
@test loglikelihood(fm) ≈ loglikelihood(m)
@test bic(fm) ≈ bic(m)
@test coef(fm) ≈ coef(m)
iob = IOBuffer(
"""
{
"initial":[1.0,0.0,1.0],
"finitial":1784.642296192436,
"ftol_rel":1.0e-12,
"xtol_rel":0.0,
"xtol_abs":[1.0e-10,1.0e-10,1.0e-10],
"initial_step":[0.75,1.0,0.75],
"maxfeval":-1,
"maxtime":-1.0,
"feval":57,
"final":[0.9292213195402981,0.01816837807519162,0.22264487477788353],
"fmin":1751.9393444646712,
"optimizer":"LN_BOBYQA",
"returnvalue":"FTOL_REACHED",
"nAGQ":1,
"REML":false,
"sigma":null,
"fitlog":[[[1.0,0.0,1.0],1784.642296192436]]
}
"""
)
@test_throws(ArgumentError("optsum names: [:ftol_abs] not found in io"),
restoreoptsum!(m, seekstart(iob)))
iob = IOBuffer(
"""
{
"initial":[1.0,0.0,1.0],
"finitial":1784.642296192436,
"ftol_rel":1.0e-12,
"ftol_abs":1.0e-8,
"xtol_rel":0.0,
"xtol_abs":[1.0e-10,1.0e-10,1.0e-10],
"initial_step":[0.75,1.0,0.75],
"maxfeval":-1,
"maxtime":-1.0,
"feval":57,
"final":[-0.9292213195402981,0.01816837807519162,0.22264487477788353],
"fmin":1751.9393444646712,
"optimizer":"LN_BOBYQA",
"returnvalue":"FTOL_REACHED",
"nAGQ":1,
"REML":false,
"sigma":null,
"fitlog":[[[1.0,0.0,1.0],1784.642296192436]]
}
"""
)
@test_throws(ArgumentError("initial or final parameters in io do not satisfy lowerbd"),
@suppress restoreoptsum!(m, seekstart(iob)))
# make sure new fields are correctly restored
mktemp() do path, io
m = deepcopy(last(models(:sleepstudy)))
m.optsum.xtol_zero_abs = 0.5
m.optsum.ftol_zero_abs = 0.5
saveoptsum(io, m)
m.optsum.xtol_zero_abs = 1.0
m.optsum.ftol_zero_abs = 1.0
@suppress restoreoptsum!(m, seekstart(io))
@test m.optsum.xtol_zero_abs == 0.5
@test m.optsum.ftol_zero_abs == 0.5
end
end
@testset "profile" begin
pr = @suppress profile(last(models(:sleepstudy)))
tbl = pr.tbl
@test length(tbl) >= 122
ci = confint(pr)
@test Tables.istable(ci)
@test propertynames(ci) == (:par, :estimate, :lower, :upper)
@test collect(ci.par) == [:β1, :β2, :σ, :σ1, :σ2]
@test isapprox(
ci.lower.values,
[237.681, 7.359, 22.898, 14.381, 0.0];
atol=1.e-3)
@test isapprox(
ci.upper.values,
[265.130, 13.576, 28.858, 37.718, 8.753];
atol=1.e-3)
@test first(only(filter(r -> r.p == :σ && iszero(r.ζ), pr.tbl)).σ) == last(models(:sleepstudy)).σ
@testset "REML" begin
m = refit!(deepcopy(last(models(:sleepstudy))); progress=false, REML=true)
ci = @suppress confint(profile(m))
@test all(splat(<), zip(ci.lower, ci.upper))
end
end
@testset "confint" begin
ci = confint(last(models(:sleepstudy)))
@test Tables.istable(ci)
@test isapprox(ci.lower.values, [238.4061184564825, 7.52295850741417]; atol=1.e-3)
end
end
@testset "d3" begin
fm = only(models(:d3))
@test pwrss(fm) ≈ 5.30480294295329e6 rtol=1.e-4
@test objective(fm) ≈ 884957.5540213 rtol = 1e-4
@test coef(fm) ≈ [0.4991229873, 0.31130780953] atol = 1.e-4
@test length(ranef(fm)) == 3
@test sum(leverage(fm)) ≈ 8808.00706143011 rtol = 1.e-4
show(io, BlockDescription(fm))
tokens = Set(split(String(take!(io)), r"\s+"))
@test "BlkDiag/Dense" in tokens
end
@testset "kb07" begin
global io
pca = last(models(:kb07)).PCA
@test keys(pca) == (:subj, :item)
show(io, models(:kb07)[2])
@test sum(leverage(last(models(:kb07)))) ≈ 131.28414754217545 rtol = 7.e-3
tokens = Set(split(String(take!(io)), r"\s+"))
@test "Corr." in tokens
@test "-0.89" in tokens
@testset "profile" begin
contrasts = Dict(:item => Grouping(), :subj => Grouping(), :prec => EffectsCoding(; base="maintain"),
:spkr => EffectsCoding(), :load => EffectsCoding())
kbf03 = @formula rt_trunc ~ 1+prec+spkr+load+(1+prec|item)+(1|subj)
kbpr03 = profile(fit(MixedModel, kbf03, MixedModels.dataset(:kb07); contrasts, thin=1, progress=false))
@test length(Tables.columnnames(kbpr03.tbl)) == 15
@test length(Tables.rows(kbpr03.tbl)) > 200
end
end
@testset "oxide" begin
# this model has an interesting structure with two diagonal blocks
m = first(models(:oxide))
@test isapprox(m.θ, [1.689182746, 2.98504262]; atol=1e-3)
m = last(models(:oxide))
# NB: this is a poorly defined fit
# lme4 gives all sorts of convergence warnings for the different
# optimizers and even quite different values
# the overall estimates of the standard deviations are similar-ish
# but the correlation structure seems particular unstable
θneldermead = [1.6454, 8.6373e-02, 8.2128e-05, 8.9552e-01, 1.2014, 2.9286]
# two different BOBYQA implementations
θnlopt = [1.645, -0.221, 0.986, 0.895, 2.511, 1.169]
θminqa = [1.6455, -0.2430, 1.0160, 0.8955, 2.7054, 0.0898]
# very loose tolerance for unstable fit
# but this is a convenient test of rankUpdate!(::UniformBlockDiagonal)
@test isapprox(m.θ, θnlopt; atol=5e-2)
end
@testset "Rank deficient" begin
rng = MersenneTwister(0);
x = rand(rng, 100);
data = (x = x, x2 = 1.5 .* x, y = rand(rng, 100), z = repeat('A':'T', 5))
model = @suppress fit(MixedModel, @formula(y ~ x + x2 + (1|z)), data; progress=false)
@test length(fixef(model)) == 2
@test rank(model) == 2
@test length(coef(model)) == 3
ct = coeftable(model)
@test ct.rownms == ["(Intercept)", "x", "x2"]
@test length(fixefnames(model)) == 2
@test coefnames(model) == ["(Intercept)", "x", "x2"]
piv = model.feterm.piv
r = model.feterm.rank
@test coefnames(model)[piv][1:r] == fixefnames(model)
end
@testset "coeftable" begin
ct = coeftable(only(models(:dyestuff)));
@test [3,4] == [ct.teststatcol, ct.pvalcol]
end
@testset "wts" begin
# example from https://github.com/JuliaStats/MixedModels.jl/issues/194
data = (
a = [1.55945122,0.004391538,0.005554163,-0.173029772,4.586284429,0.259493671,-0.091735715,5.546487603,0.457734831,-0.030169602],
b = [0.24520519,0.080624178,0.228083467,0.2471453,0.398994279,0.037213859,0.102144973,0.241380251,0.206570975,0.15980803],
c = PooledArray(["H","F","K","P","P","P","D","M","I","D"]),
w1 = [20,40,35,12,29,25,65,105,30,75],
w2 = [0.04587156,0.091743119,0.080275229,0.027522936,0.066513761,0.05733945,0.149082569,0.240825688,0.068807339,0.172018349],
)
#= no need to fit yet another model without weights, but here are the reference values from lme4
m1 = fit(MixedModel, @formula(a ~ 1 + b + (1|c)), data; progress=false)
@test m1.θ ≈ [0.0]
@test stderror(m1) ≈ [1.084912, 4.966336] atol = 1.e-4
@test vcov(m1) ≈ [1.177035 -4.802598; -4.802598 24.664497] atol = 1.e-4
=#
m2 = fit(MixedModel, @formula(a ~ 1 + b + (1|c)), data; wts = data.w1, progress=false)
@test m2.θ ≈ [0.295181729258352] atol = 1.e-4
@test stderror(m2) ≈ [0.9640167, 3.6309696] atol = 1.e-4
@test vcov(m2) ≈ [0.9293282 -2.557527; -2.5575267 13.183940] atol = 1.e-4
end
@testset "unifying ReMat eltypes" begin
sleepstudy = MixedModels.dataset(:sleepstudy)
re = LinearMixedModel(@formula(reaction ~ 1 + days + (1|subj) + (days|subj)), sleepstudy).reterms
# make sure that the eltypes are still correct
# otherwise this test isn't checking what it should be
@test eltype(sleepstudy.days) == Int8
@test eltype(sleepstudy.reaction) == Float64
# use explicit typeof() and == is to remind us that things may break
# if we change things and don't check their type implications now
# that we're starting to support a non trivial type hierarchy
@test typeof(re) == Vector{AbstractReMat{Float64}}
end
@testset "recovery from misscaling" begin
model = fit(MixedModel,
@formula(reaction ~ 1 + days + zerocorr(1+fulldummy(days)|subj)),
MixedModels.dataset(:sleepstudy);
progress=false,
contrasts=Dict(:days => HelmertCoding(),
:subj => Grouping()))
fm1 = MixedModels.unfit!(deepcopy(model))
fm1.optsum.initial .*= 1e8
@test_logs (:info, r"Initial objective evaluation failed") (:warn, r"Failure of the initial ") fit!(fm1; progress=false)
@test objective(fm1) ≈ objective(model) rtol=0.1
# it would be great to test the handling of PosDefException after the first iteration
# but this is surprisingly hard to trigger in a reliable way across platforms
# just because of the vagaries of floating point.
end
@testset "methods we don't define" begin
m = first(models(:sleepstudy))
for f in [r2, adjr2]
@test_logs (:error,) @test_throws MethodError f(m)
end
end
| MixedModels | https://github.com/JuliaStats/MixedModels.jl.git |
|
[
"MIT"
] | 4.26.1 | e3fffd09185c6eb69f66b9ed29af0240b0dd0adc | code | 7775 | using DataFrames
using LinearAlgebra
using MixedModels
using Random
using Suppressor
using StableRNGs
using Tables
using Test
using GLM: Link, linkfun, linkinv
using MixedModels: dataset
include("modelcache.jl")
@testset "simulate[!](::AbstractVector)" begin
@testset "LMM" begin
slp = DataFrame(dataset(:sleepstudy))
m = first(models(:sleepstudy))
mc = deepcopy(m)
fit!(simulate!(StableRNG(42), mc); progress=false)
@test simulate(StableRNG(42), m) ≈ mc.y
y = similar(mc.y)
@test simulate!(StableRNG(42), y, m) ≈ mc.y
@test y ≈ mc.y
@test simulate(StableRNG(42), m, slp) ≈ y
slptop = first(slp, 90)
@test simulate(StableRNG(42), m, slptop) ≈ simulate(StableRNG(42), m, slptop; β=m.β, θ=m.θ, σ=m.σ)
# test of methods using default RNG
rng = deepcopy(Random.GLOBAL_RNG)
@test length(simulate(m, slptop)) == nrow(slptop)
@test length(simulate!(y, m, slptop)) == nrow(slptop)
end
@testset "GLMM" begin
contra = DataFrame(dataset(:contra))
m = fit(MixedModel, first(gfms[:contra]), contra, Bernoulli(); fast=true,
contrasts=Dict(:urban => EffectsCoding()), progress=false)
mc = deepcopy(m)
fit!(simulate!(StableRNG(42), mc); progress=false)
@test simulate(StableRNG(42), m) ≈ mc.y
y = similar(mc.y)
@test simulate!(StableRNG(42), y, m) ≈ mc.y
@test y ≈ mc.y
@test length(simulate!(StableRNG(42), y, m, contra)) == length(mc.y)
end
end
@testset "predict" begin
@testset "single obs" begin
kb07 = DataFrame(dataset(:kb07))
m = models(:kb07)[1]
only(predict(m, kb07[1:1, :])) ≈ first(fitted(m))
end
slp = DataFrame(dataset(:sleepstudy))
slp2 = transform(slp, :subj => ByRow(x -> (x == "S308" ? "NEW" : x)) => :subj)
slpm = allowmissing(slp, :reaction)
@testset "LMM" for m in models(:sleepstudy)[[begin,end]]
# these currently use approximate equality
# because of floating point, but realistically
# this should be exactly equal in most cases
@test predict(m) ≈ fitted(m)
@test predict(m, slp; new_re_levels=:error) ≈ fitted(m)
@test predict(m, slp; new_re_levels=:population) ≈ fitted(m)
@test predict(m, slp; new_re_levels=:missing) ≈ fitted(m)
@test_throws ArgumentError predict(m, slp2; new_re_levels=:error)
ymissing = predict(m, slp2; new_re_levels=:missing)
@test count(ismissing, ymissing) == 10
@test ymissing[11:end] ≈ fitted(m)[11:end]
ypop = predict(m, slp2; new_re_levels=:population)
@test ypop[1:10] ≈ view(m.X, 1:10, :) * m.β
@test ypop[11:end] ≈ fitted(m)[11:end]
@test_throws ArgumentError predict(m, slp[:, Not(:reaction)])
copyto!(slpm.reaction, slp.reaction)
slpm[1, :reaction] = missing
@test_throws ArgumentError predict(m, slpm)
fill!(slpm.reaction, missing)
@test_throws ArgumentError predict(m, slpm)
end
@testset "rank deficiency" begin
@testset "in original fit" begin
refvals = predict(first(models(:sleepstudy)), slp)
slprd = transform(slp, :days => ByRow(x -> 2x) => :days2)
m = @suppress fit(MixedModel, @formula(reaction ~ 1 + days + days2 + (1|subj)), slprd; progress=false)
# predict assumes that X is the correct length and stored pivoted
# so these first two tests will fail if we change that storage detail
@test size(m.X) == (180, 3)
@test all(2 .* view(m.X, :, m.feterm.piv[2]) .== view(m.X, :, m.feterm.piv[3]))
@test @suppress predict(m, slprd) == refvals
slprd0 = transform(slp, :days => zero => :days0)
m = @suppress fit(MixedModel, @formula(reaction ~ 1 + days0 + days + (1|subj)), slprd0; progress=false)
@test @suppress predict(m, slprd0) == refvals
# change the formula order slightly so that the original ordering and hence the
# permutation vector for pivoting isn't identical
m = @suppress fit(MixedModel, @formula(reaction ~ 1 + days + days0 + (1|subj)), slprd0; progress=false)
@test @suppress predict(m, slprd0) == refvals
end
@testset "in newdata" begin
mref = first(models(:sleepstudy))
# remove days
refvals = fitted(mref) .- view(mref.X, :, 2) * mref.β[2]
slp0 = transform(slp, :days => zero => :days)
vals = @suppress predict(mref, slp0)
@test all(refvals .≈ vals)
end
@testset "in both" begin
# now what happens when old and new are rank deficient
mref = first(models(:sleepstudy))
# remove days
refvals = fitted(mref) .- view(mref.X, :, 2) * mref.β[2]
# days gets pivoted out
slprd = transform(slp, :days => ByRow(x -> 2x) => :days2)
m = @suppress fit(MixedModel, @formula(reaction ~ 1 + days + days2 + (1|subj)), slprd; progress=false)
# days2 gets pivoted out
slp0 = transform(slp, :days => zero => :days2)
vals = @suppress predict(m, slp0)
# but in the original fit, days gets pivoted out, so its coef is zero
# and now we have a column of zeros for days2
# leaving us with only the intercept
# this is consistent behavior
@test all(refvals .≈ vals)
slp1 = transform(slp, :days => ByRow(one) => :days2)
vals = @suppress predict(m, slp1)
refvals = fitted(mref) .- view(mref.X, :, 2) * mref.β[2] .+ last(fixef(m))
@test all(refvals .≈ vals)
end
end
@testset "transformed response" begin
slp1 = subset(slp, :days => ByRow(>(0)))
# this model probably doesn't make much sense, but it has two
# variables on the left hand side in a FunctionTerm
m = @suppress fit(MixedModel, @formula(reaction / days ~ 1 + (1|subj)), slp1)
# make sure that we're getting the transformation
@test response(m) ≈ slp1.reaction ./ slp1.days
@test_throws ArgumentError predict(m, slp[:, Not(:reaction)])
# these currently use approximate equality
# because of floating point, but realistically
# this should be exactly equal in most cases
@test predict(m) ≈ fitted(m)
@test predict(m, slp1) ≈ fitted(m)
m = @suppress fit(MixedModel, @formula(log10(reaction) ~ 1 + days + (1|subj)), slp1)
# make sure that we're getting the transformation
@test response(m) ≈ log10.(slp1.reaction)
@test_throws ArgumentError predict(m, slp[:, Not(:reaction)])
# these currently use approximate equality
# because of floating point, but realistically
# this should be exactly equal in most cases
@test predict(m) ≈ fitted(m)
@test predict(m, slp1) ≈ fitted(m)
end
@testset "GLMM" begin
contra = dataset(:contra)
for fast in [true, false]
gm0 = fit(MixedModel, first(gfms[:contra]), contra, Bernoulli(); fast, progress=false)
@test_throws ArgumentError predict(gm0, contra; type=:doh)
# we can skip a lot of testing if the broad strokes work because
# internally this is punted off to the same machinery as LMM
@test predict(gm0) ≈ fitted(gm0)
# XXX these tolerances aren't great but are required for fast=false fits
@test predict(gm0, contra; type=:linpred) ≈ gm0.resp.eta rtol=0.1
@test predict(gm0, contra; type=:response) ≈ gm0.resp.mu rtol=0.01
end
end
end
| MixedModels | https://github.com/JuliaStats/MixedModels.jl.git |
|
[
"MIT"
] | 4.26.1 | e3fffd09185c6eb69f66b9ed29af0240b0dd0adc | code | 1255 | using Aqua
using ExplicitImports
using GLM
using MixedModels
using Test
import InteractiveUtils: versioninfo
import LinearAlgebra: BLAS
using Base: splat # necessary for Julia 1.8 compat
# there seem to be processor-specific issues and knowing this is helpful
@info sprint(versioninfo)
@info BLAS.get_config()
@testset "Aqua" begin
# we can't check for unbound type parameters
# because we actually need one at one point for _same_family()
Aqua.test_all(MixedModels; ambiguities=false, unbound_args=false,
# XXX TODO: upstream this piracy
piracies=(;treat_as_own=[GLM.wrkresp!, Base.:|]))
end
@testset "ExplicitImports" begin
@test check_no_implicit_imports(MixedModels) === nothing
@test check_no_stale_explicit_imports(MixedModels) === nothing
end
include("utilities.jl")
include("misc.jl")
include("pivot.jl")
include("UniformBlockDiagonal.jl")
include("linalg.jl")
include("matrixterm.jl")
include("FactorReTerm.jl")
include("grouping.jl")
include("pls.jl")
include("pirls.jl")
include("gausshermite.jl")
include("fit.jl")
include("missing.jl")
include("likelihoodratiotest.jl")
include("bootstrap.jl")
include("mime.jl")
include("optsummary.jl")
include("predict.jl")
include("sigma.jl")
| MixedModels | https://github.com/JuliaStats/MixedModels.jl.git |
|
[
"MIT"
] | 4.26.1 | e3fffd09185c6eb69f66b9ed29af0240b0dd0adc | code | 1566 | using MixedModels
using Test
using StableRNGs
@testset "fixed sigma" begin
σ = 3
n = 100
dat = (; x = ones(n),
z = collect(1:n),
y = σ*randn(StableRNG(42), n))
fmσ1 = fit(MixedModel, @formula(y ~ 0 + (1|z)), dat;
contrasts=Dict(:z => Grouping()),
σ=1)
@test isempty(fixef(fmσ1))
# verify that we report the exact value requested
@test fmσ1.σ == 1
# verify that the constrain actually worked
@test pwrss(fmσ1) / nobs(fmσ1) ≈ 1.0
@test only(fmσ1.θ) ≈ σ atol=0.1
fmσ1 = fit(MixedModel, @formula(y ~ 0 + (1|z)), dat;
contrasts=Dict(:z => Grouping()),
σ=3.14)
@test isempty(fixef(fmσ1))
# verify that we report the exact value requested
@test fmσ1.σ == 3.14
# verify that the constrain actually worked
@test pwrss(fmσ1) / nobs(fmσ1) ≈ 3.14^2 atol=0.5
# the shrinkage forces things to zero because 3.14/3 is very close to 0
@test only(fmσ1.θ) ≈ 0 atol=0.1
end
# specifying sigma was done to allow for doing meta-analytic models
# the example from metafor that doesn't work with lme4 and R-based nlme
# can be done here!
# https://www.metafor-project.org/doku.php/tips:rma_vs_lm_lme_lmer
#
# using RCall
# using MixedModels
# R"""
# library(metafor)
# dat <- escalc(measure="ZCOR", ri=ri, ni=ni, data=dat.molloy2014)
# dat$study <- 1:nrow(dat)
# """
# @rget dat
# fit(MixedModel, @formula(yi ~ 1 + (1 | study)), dat;
# wts=1 ./ dat.vi,
# REML=true,
# contrasts=Dict(:study => Grouping()),
# σ=1)
| MixedModels | https://github.com/JuliaStats/MixedModels.jl.git |
|
[
"MIT"
] | 4.26.1 | e3fffd09185c6eb69f66b9ed29af0240b0dd0adc | code | 2257 | using LinearAlgebra
using MixedModels
using StableRNGs
using SparseArrays
using Test
using MixedModels: isconstant, average, densify, dataset
using StatsModels: FormulaTerm
@isdefined(io) || const global io = IOBuffer()
include("modelcache.jl")
@testset "average" begin
@test average(1.1, 1.2) == 1.15
end
@testset "densify" begin
@test densify(sparse(1:5, 1:5, ones(5))) == Diagonal(ones(5))
rsparsev = SparseVector(float.(rand(StableRNG(123454321), Bool, 20)))
@test densify(rsparsev) == Vector(rsparsev)
@test densify(Diagonal(rsparsev)) == Diagonal(Vector(rsparsev))
end
@testset "isconstant" begin
@test isconstant((true, true, true))
@test isconstant([true, true, true])
@test !isconstant((true, false, true))
@test !isconstant([true, false, true])
@test !isconstant(collect(1:4))
@test isconstant((false, false, false))
@test isconstant([false, false, false])
@test isconstant(ones(3))
@test isconstant(1, 1, 1)
# equality of arrays with broadcasting
@test isconstant(["(Intercept)", "days"], ["(Intercept)", "days"])
# arrays or tuples with missing values
@test !isconstant([missing, 1])
@test isconstant(Int[])
@test isconstant(Union{Int,Missing}[missing, missing, missing])
end
@testset "replicate" begin
@test_logs (:warn, r"use_threads is deprecated") replicate(string, 1; use_threads=true)
@test_logs (:warn, r"hide_progress") replicate(string, 1; hide_progress=true)
end
@testset "datasets" begin
@test isa(MixedModels.datasets(), Vector{String})
@test length(MixedModels.dataset(:dyestuff)) == 2
@test length(MixedModels.dataset("dyestuff")) == 2
dyestuff = MixedModels.dataset(:dyestuff);
@test keys(dyestuff) == [:batch, :yield]
@test length(dyestuff.batch) == 30
@test_throws ArgumentError MixedModels.dataset(:foo)
end
@testset "PCA" begin
io = IOBuffer()
pca = models(:kb07)[3].PCA.item
show(io, pca, covcor=true, loadings=false)
str = String(take!(io))
@test !isempty(findall("load: yes", str))
show(io, pca, covcor=false, loadings=true)
str = String(take!(io))
@test !isempty(findall("PC1", str))
@test !isempty(findall("load: yes", str))
end
@testset "formula" begin
@test formula(first(models(:sleepstudy))) isa FormulaTerm
@test formula(first(models(:contra))) isa FormulaTerm
end
| MixedModels | https://github.com/JuliaStats/MixedModels.jl.git |
|
[
"MIT"
] | 4.26.1 | e3fffd09185c6eb69f66b9ed29af0240b0dd0adc | docs | 32473 | MixedModels v4.26.1 Release Notes
==============================
- lower and upper edges of profile confidence intervals for REML-fitted models are no longer flipped [#785]
MixedModels v4.26.0 Release Notes
==============================
- `issingular` now accepts comparison tolerances through the keyword arguments `atol` and `rtol`. [#783]
MixedModels v4.25.4 Release Notes
==============================
- Added additional precompilation for rePCA. [#749]
MixedModels v4.25.3 Release Notes
==============================
- Fix a bug in the handling of rank deficiency in the `simulate[!]` code. This has important correctness implications for bootstrapping models with rank-deficient fixed effects (as can happen in the case of partial crossing of the fixed effects / missing cells). [#778]
MixedModels v4.25.2 Release Notes
==============================
- Use `public` keyword so that users don't see unnecessary docstring warnings on 1.11+. [#776]
- Fix accidental export of `dataset` and `datasets` and make them `public` instead. [#776]
MixedModels v4.25.1 Release Notes
==============================
- Use more sophisticated checks on property names in `restoreoptsum` to allow for optsums saved by pre-v4.25 versions to be used with this version and later. [#775]
MixedModels v4.25 Release Notes
==============================
- Add type notations in `pwrss(::LinearMixedModel)` and `logdet(::LinearMixedModel)` to enhance type inference. [#773]
- Take advantage of type parameter for `StatsAPI.weights(::LinearMixedModel{T})`. [#772]
- Fix use of kwargs in `fit!((::LinearMixedModel)`: [#772]
- user-specified `σ` is actually used, defaulting to existing value
- `REML` defaults to model's already specified REML value.
- Clean up code of keyword convenience constructor for `OptSummary`. [#772]
- Refactor thresholding parameters for forcing near-zero parameter values into `OptSummary`. [#772]
MixedModels v4.24.1 Release Notes
==============================
- Re-export accidentally dropped export `lrtest`. [#769]
MixedModels v4.24.0 Release Notes
==============================
* Properties for `GeneralizedLinearMixedModel` now default to delegation to the internal weighted `LinearMixedModel` when that property is not explicitly handled by `GeneralizedLinearMixedModel`. Previously, properties were delegated on an explicit basis, which meant that they had to be added manually as use cases were discovered. The downside to the new approach is that it is now possible to access properties whose definition in the LMM case doesn't match the GLMM definition when the GLMM definition hasn't been explicitly been implemented. [#767]
MixedModels v4.23.1 Release Notes
==============================
* Fix for `simulate!` when only the estimable coefficients for a rank-deficient model are provided. [#756]
* Improve handling of rank deficiency in GLMM. [#756]
* Fix display of GLMM bootstrap without a dispersion parameter. [#756]
MixedModels v4.23.0 Release Notes
==============================
* Support for rank deficiency in the parametric bootstrap. [#755]
MixedModels v4.22.5 Release Notes
==============================
* Use `muladd` where possible to enable fused multiply-add (FMA) on architectures with hardware support. FMA will generally improve computational speed and gives more accurate rounding. [#740]
* Replace broadcasted lambda with explicit loop and use `one`. This may result in a small performance improvement. [#738]
MixedModels v4.22.4 Release Notes
==============================
* Switch to explicit imports from all included packages (i.e. replace `using Foo` by `using Foo: Foo, bar, baz`) [#748]
* Reset parameter values before a `deepcopy` in a test (doesn't change test result) [#744]
MixedModels v4.22.3 Release Notes
==============================
* Comment out calls to `@debug` [#733]
* Update package versions in compat and change `Aqua.test_all` argument name [#733]
MixedModels v4.22.0 Release Notes
==============================
* Support for equal-tail confidence intervals for `MixedModelBootstrap`. [#715]
* Basic `show` methods for `MixedModelBootstrap` and `MixedModelProfile`. [#715]
* The `hide_progress` keyword argument to `parametricbootstrap` is now deprecated. Users should instead use `progress` (which is consistent with e.g. `fit`). [#717]
MixedModels v4.21.0 Release Notes
==============================
* Auto apply `Grouping()` to grouping variables that don't already have an explicit contrast. [#652]
MixedModels v4.20.0 Release Notes
==============================
* The `.tbl` property of a `MixedModelBootstrap` now includes the correlation parameters for lower triangular elements of the `λ` field. [#702]
MixedModels v4.19.0 Release Notes
==============================
* New method `StatsAPI.coefnames(::ReMat)` returns the coefficient names associated with each grouping factor. [#709]
MixedModels v4.18.0 Release Notes
==============================
* More user-friendly error messages when a formula contains variables not in the data. [#707]
MixedModels v4.17.0 Release Notes
==============================
* **EXPERIMENTAL** New kwarg `amalgamate` can be used to disable amalgation of random effects terms sharing a single grouping variable. Generally, `amalgamate=false` will result in a slower fit, but may improve convergence in some pathological cases. Note that this feature is experimental and changes to it are **not** considered breakings. [#673]
* More informative error messages when passing a `Distribution` or `Link` type instead of the desired instance. [#698]
* More informative error message on the intentional decision not to define methods for the coefficient of determination. [#698]
* **EXPERIMENTAL** Return `finitial` when PIRLS drifts into a portion of the parameter space that yields a (numerically) invalid covariance matrix. This recovery strategy may be removed in a future release. [#616]
MixedModels v4.16.0 Release Notes
==============================
* Support for check tolerances in deserialization. [#703]
MixedModels v4.15.0 Release Notes
==============================
* Support for different optimization criteria during the bootstrap. [#694]
* Support for combining bootstrap results with `vcat`. [#694]
* Support for saving and restoring bootstrap replicates with `savereplicates` and `restorereplicates`. [#694]
MixedModels v4.14.0 Release Notes
==============================
* New function `profile` for computing likelihood profiles for `LinearMixedModel`. The resultant `MixedModelProfile` can be then be used for computing confidence intervals with `confint`. Note that this API is still somewhat experimental and as such the internal storage details of `MixedModelProfile` may change in a future release without being considered breaking. [#639]
* A `confint(::LinearMixedModel)` method has been defined that returns Wald confidence intervals based on the z-statistic, i.e. treating the denominator degrees of freedom as infinite. [#639]
MixedModels v4.13.0 Release Notes
==============================
* `raneftables` returns a `NamedTuple` where the names are the grouping factor names and the values are some `Tables.jl`-compatible type. This type has been changed to a `Table` from `TypedTables.jl`. [#682]
MixedModels v4.12.1 Release Notes
==============================
* Precompilation is now handled with `PrecompileTools` instead of `SnoopPrecompile`. [#681]
* An unnecessary explicit `Vararg` in an internal method has been removed. This removal eliminates a compiler warning about the deprecated `Vararg` pattern. [#680]
MixedModels v4.12.0 Release Notes
==============================
* The pirated method `Base.:/(a::AbstractTerm, b::AbstractTerm)` is no longer defined. This does not impact the use of `/` as a nesting term in `@formula` within MixedModels, only the programmatic runtime construction of formula, e.g. `term(:a) / term(:b)`. If you require `Base.:/`, then [`RegressionFormulae.jl`](https://github.com/kleinschmidt/RegressionFormulae.jl) provides this method. (Avoiding method redefinition when using `RegressionFormulae.jl` is the motivating reason for this change.) [#677]
MixedModels v4.11.0 Release Notes
==============================
* `raneftables` returns a `NamedTuple` where the names are the grouping factor names and the values are some `Tables.jl`-compatible type. Currently this type is a `DictTable` from `TypedTables.jl`. [#634]
MixedModels v4.10.0 Release Notes
==============================
* Rank deficiency in prediction is now supported, both when the original model was fit to rank-deficient data and when the new data are rank deficient. The behavior is consistent but may be surprising when both old and new data are rank deficient. See the `predict` docstring for an example. [#676]
* Multithreading in `parametricbootstrap` with `use_threads` is now deprecated and a noop. With improvements in BLAS threading, multithreading at the Julia level did not help performance and sometimes hurt it. [#674]
MixedModels v4.9.0 Release Notes
==============================
* Support `StatsModels` 0.7, drop support for `StatsModels` 0.6. [#664]
* Revise code in benchmarks to work with recent Julia and PkgBenchmark.jl [#667]
* Julia minimum compat version raised to 1.8 because of BSplineKit [#665]
MixedModels v4.8.2 Release Notes
==============================
* Use `SnoopPrecompile` for better precompilation performance. This can dramatically increase TTFX, especially on Julia 1.9. [#663]
MixedModels v4.8.1 Release Notes
==============================
* Don't fit a GLM internally during construction of GLMM when the fixed effects are empty (better compatibility with
`dropcollinear` kwarg in newer GLM.jl) [#657]
MixedModels v4.8.0 Release Notes
==============================
* Allow predicting from a single observation, as long as `Grouping()` is used for the grouping variables. The simplified implementation of `Grouping()` also removes several now unnecessary `StatsModels` methods that should not have been called directly by the user. [#653]
MixedModels v4.7.3 Release Notes
==============================
* More informative error message for formulae lacking random effects [#651]
MixedModels v4.7.2 Release Notes
==============================
* Replace separate calls to `copyto!` and `scaleinflate!` in `updateL!` with `copyscaleinflate!` [#648]
MixedModels v4.7.1 Release Notes
==============================
* Avoid repeating initial objective evaluation in `fit!` method for `LinearMixedModel`
* Ensure that the number of function evaluations from NLopt corresponds to `length(m.optsum.fitlog) when `isone(thin)`. [#637]
MixedModels v4.7.0 Release Notes
==============================
* Relax type restriction for filename in `saveoptsum` and `restoreoptsum!`. Users can now pass any type with an appropriate `open` method, e.g. `<:AbstractPath`. [#628]
MixedModels v4.6.5 Release Notes
========================
* Attempt recovery when the initial parameter values lead to an invalid covariance matrix by rescaling [#615]
* Return `finitial` when the optimizer drifts into a portion of the parameter space that yields a (numerically) invalid covariance matrix [#615]
MixedModels v4.6.4 Release Notes
========================
* Support transformed responses in `predict` [#614]
* Simplify printing of BLAS configuration in tests. [#597]
MixedModels v4.6.3 Release Notes
========================
* Add precompile statements to speed up first `LinearMixedModel` and Bernoulli `GeneralizedLinearModel` fit [#608]
MixedModels v4.6.2 Release Notes
========================
* Efficiency improvements in `predict`, both in memory and computation [#604]
* Changed the explanation of `predict`'s keyword argument `new_re_levels` in a way that is clearer about the behavior when there are multiple grouping variables. [#603]
* Fix the default behavior of `new_re_levels=:missing` to match the docstring. Previously, the default was `:population`, in disagreement with the docstring. [#603]
MixedModels v4.6.1 Release Notes
========================
* Loosen type restriction on `shortestcovint(::MixedModelBootstrap)` to `shortestcovint(::MixedModelFitCollection)`. [#598]
MixedModels v4.6.0 Release Notes
========================
* Experimental support for initializing `GeneralizedLinearMixedModel` fits from a linear mixed model instead of a marginal (non-mixed) generalized linear model. [#588]
MixedModels v4.5.0 Release Notes
========================
* Allow constructing a `GeneralizedLinearMixedModel` with constant response, but don't update the ``L`` matrix nor initialize its deviance. This allows for the model to still be used for simulation where the response will be changed before fitting. [#578]
* Catch `PosDefException` during the first optimization step and throw a more informative `ArgumentError` if the response is constant. [#578]
MixedModels v4.4.1 Release Notes
========================
* Fix type parameterization in MixedModelsBootstrap to support models with a mixture of correlation structures (i.e. `zerocorr` in some but not all RE terms) [#577]
MixedModels v4.4.0 Release Notes
========================
* Add a constructor for the abstract type `MixedModel` that delegates to `LinearMixedModel` or `GeneralizedLinearMixedModel`. [#572]
* Compat for Arrow.jl 2.0 [#573]
MixedModels v4.3.0 Release Notes
========================
* Add support for storing bootstrap results with lower precision [#566]
* Improved support for zerocorr models in the bootstrap [#570]
MixedModels v4.2.0 Release Notes
========================
* Add support for zerocorr models to the bootstrap [#561]
* Add a `Base.length(::MixedModelsFitCollection)` method [#561]
MixedModels v4.1.0 Release Notes
========================
* Add support for specifying a fixed value of `σ`, the residual standard deviation,
in `LinearMixedModel`. `fit` takes a keyword-argument `σ`. `fit!` does not expose `σ`,
but `σ` can be changed after model construction by setting `optsum.sigma`. [#551]
* Add support for logging the non-linear optimizer's steps via a `thin`
keyword-argument for `fit` and `fit!`. The default behavior is 'maximal' thinning,
such that only the initial and final values are stored. `OptSummary` has a new field
`fitlog` that contains the aforementioned log as a vector of tuples of parameter and
objective values.[#552]
* Faster version of `leverage` for `LinearMixedModel` allowing for experimentation
using the sum of the leverage values as an empirical degrees of freedom for the
model. [#553], see also [#535]
* Optimized version of `condVar` with an additional method for extracting only the
conditional variances associated with a single grouping factor. [#545]
MixedModels v4.0.0 Release Notes
========================
* Drop dependency on `BlockArrays` and use a `Vector` of matrices to represent
the lower triangle in packed, row-major order. The non-exported function `block`
can be used for finding the corresponding `Vector` index of a block. [#456]
* `simulate!` now marks the modified model as being unfitted.
* Deprecated and unused `named` argument removed from `ranef` [#469]
* Introduce an abstract type for collections of fits `MixedModelFitCollection`,
and make `MixedModelBootstrap` a subtype of it. Accordingly, rename the `bstr`
field to `fits`. [#465]
* The response (dependent variable) is now stored internally as part of the
the renamed `FeMat` field, now called `Xymat` [#464]
* Replace internal `statscholesky` and `statsqr` functions for determining the
rank of `X` by `statsrank`. [#479]
* Artifacts are now loaded lazily: the test data loaded via `dataset` is
downloaded on first use [#486]
* `ReMat` and `PCA` now support covariance factors (`λ`) that are `LowerTriangular`
or `Diagonal`. This representation is both more memory efficient and
enables additional computational optimizations for particular covariance
structures.[#489]
* `GeneralizedLinearMixedModel` now includes the response distribution as one
of its type parameters. This will allow dispatching on the model family and may allow
additional specialization in the future.[#490]
* `saveoptsum` and `restoreoptsum!` provide for saving and restoring the `optsum`
field of a `LinearMixedModel` as a JSON file, allowing for recreating a model fit
that may take a long time for parameter optimization. [#506]
* Verbose output now uses `ProgressMeter`, which gives useful information about the timing
of each iteration and does not flood stdio. The `verbose` argument has been renamed `progress`
and the default changed to `true`. [#539]
* Support for Julia < 1.6 has been dropped. [#539]
* New `simulate`, `simulate!` and `predict` methods for simulating and
predicting responses to new data. [#427]
Run-time formula syntax
-----------------------
* It is now possible to construct `RandomEffectsTerm`s at run-time from `Term`s
(methods for `Base.|(::AbstractTerm, ::AbstractTerm)` added) [#470]
* `RandomEffectsTerm`s can have left- and right-hand side terms that are
"non-concrete", and `apply_schema(::RandomEffectsTerm, ...)` works more like
other StatsModels.jl `AbstractTerm`s [#470]
* Methods for `Base./(::AbstractTerm, ::AbstractTerm)` are added, allowing
nesting syntax to be used with `Term`s at run-time as well [#470]
MixedModels v3.9.0 Release Notes
========================
* Add support for `StatsModels.formula` [#536]
* Internal method `allequal` renamed to `isconstant` [#537]
MixedModels v3.8.0 Release Notes
========================
* Add support for NLopt `maxtime` option to `OptSummary` [#524]
MixedModels v3.7.1 Release Notes
========================
* Add support for `condVar` for models with a BlockedSparse structure [#523]
MixedModels v3.7.0 Release Notes
========================
* Add `condVar` and `condVartables` for computing the conditional variance on the random effects [#492]
* Bugfix: store the correct lower bound for GLMM bootstrap, when the original model was fit with `fast=false` [#518]
MixedModels v3.6.0 Release Notes
========================
* Add `likelihoodratiotest` method for comparing non-mixed (generalized) linear models to (generalized) linear mixed models [#508].
MixedModels v3.5.2 Release Notes
========================
* Explicitly deprecate vestigial `named` kwarg in `ranef` in favor of `raneftables` [#507].
MixedModels v3.5.1 Release Notes
========================
* Fix MIME show methods for models with random-effects not corresponding to a fixed effect [#501].
MixedModels v3.5.0 Release Notes
========================
* The Progressbar for `parametricbootstrap` and `replicate` is not displayed
when in a non-interactive (i.e. logging) context. The progressbar can also
be manually disabled with `hide_progress=true`.[#495]
* Threading in `parametricbootstrap` now uses a `SpinLock` instead of a `ReentrantLock`.
This improves performance, but care should be taken when nesting spin locks. [#493]
* Single-threaded use of `paramatricbootstrap` now works when nested within a larger
multi-threaded context (e.g. `Threads.@threads for`). (Multi-threaded `parametricbootstrap`
worked and continues to work within a nested threading context.) [#493]
MixedModels v3.4.1 Release Notes
========================
* The value of a named `offset` argument to `GeneralizedLinearMixedModel`,
which previously was ignored [#453], is now handled properly. [#482]
MixedModels v3.4.0 Release Notes
========================
* `shortestcovint` method for `MixedModelsBootstrap` [#484]
MixedModels v3.3.0 Release Notes
========================
* HTML and LaTeX `show` methods for `MixedModel`, `BlockDescription`,
`LikelihoodRatioTest`, `OptSummary` and `VarCorr`. Note that the interface for
these is not yet completely stable. In particular, rounding behavior may
change. [#480]
MixedModels v3.2.0 Release Notes
========================
* Markdown `show` methods for `MixedModel`, `BlockDescription`,
`LikelihoodRatioTest`, `OptSummary` and `VarCorr`. Note that the interface for
these is not yet completely stable. In particular, rounding behavior may
change. White-space padding within Markdown may also change, although this
should not impact rendering of the Markdown into HTML or LaTeX. The
Markdown presentation of a `MixedModel` is much more compact than the
REPL summary. If the REPL-style presentation is desired, then this can
be assembled from the Markdown output from `VarCorr` and `coeftable` [#474].
MixedModels v3.1.4 Release Notes
========================
* [experimental] Additional convenience constructors for `LinearMixedModel` [#449]
MixedModels v3.1.3 Release Notes
========================
* Compatibility updates
* `rankUpdate!` method for `UniformBlockDiagonal` by `Dense` [#447]
MixedModels v3.1.2 Release Notes
========================
* Compatibility updates
* `rankUpdate!` method for `Diagonal` by `Dense` [#446]
* use eager (install-time) downloading of `TestData` artifact to avoid compatibility
issues with `LazyArtifacts` in Julia 1.6 [#444]
MixedModels v3.1.1 Release Notes
========================
* Compatibility updates
* Better `loglikelihood(::GeneralizedLinearMixedModel)` which will work for models with
dispersion parameters [#419]. Note that fitting such models is still problematic.
MixedModels v3.1 Release Notes
========================
* `simulate!` and thus `parametricbootstrap` methods for `GeneralizedLinearMixedModel` [#418].
* Documented inconsistent behavior in `sdest` and `varest` `GeneralizedLinearMixedModel` [#418].
MixedModels v3.0.2 Release Notes
========================
* Compatibility updates
* Minor updates for formatting in various `show` method for `VarCorr`.
MixedModels v3.0 Release Notes
========================
New formula features
---------------------
* Nested grouping factors can be written using the `/` notation, as in
`@formula(strength ~ 1 + (1|batch/cask))` as a model for the `pastes` dataset.
* The `zerocorr` function converts a vector-valued random effects term from
correlated random effects to uncorrelated. (See the `constructors` section of the docs.)
* The `fulldummy` function can be applied to a factor to obtain a redundant encoding
of a categorical factor as a complete set of indicators plus an intercept. This is only
practical on the left-hand side of a random-effects term. (See the `constructors` section
of the docs.)
* `Grouping()` can be used as a contrast for a categorical array in the `contrasts` dictionary.
Doing so bypasses creation of contrast matrix, which, when the number of levels is large,
may cause memory overflow. As the name implies, this is used for grouping factors. [#339]
Rank deficiency
--------------------
* Checks for rank deficiency in the model matrix for the fixed-effects
parameters have been improved.
* The distinction between `coef`, which always returns a full set of coefficients
in the original order, and `fixef`, which returns possibly permuted and
non-redundant coefficients, has been made consistent across models.
Parametric bootstrap
--------------------
* The `parametricbootstrap` function and the struct it produces have been
extensively reworked for speed and convenience.
See the `bootstrap` section of the docs.
Principal components
--------------------
* The PCA property for `MixedModel` types provides principal components from
the correlation of the random-effects distribution (as opposed to the covariance)
* Factor loadings are included in the `print` method for the `PCA` struct.
`ReMat` and `FeMat` types
----------------------------------
* An `AbstractReMat` type has now been introduced to support [#380] work on constrained
random-effects structures and random-effects structures appropriate for applications
in GLM-based deconvolution as used in fMRI and EEG (see e.g. [unfold.jl](https://github.com/unfoldtoolbox/unfold.jl).)
* Similarly, a constructor for `FeMat{::SparseMatrixCSC,S}` has been introduced [#309].
Currently, this constructor assumes a full-rank matrix, but the work on rank
deficiency may be extended to this constructor as well.
* Analogous to `AbstractReMat`, an `AbstractReTerm <: AbstractTerm` type has been introduced [#395].
Terms created with `zerocorr` are of type `ZeroCorr <: AbstractReTerm`.
Availability of test data sets
------------------------------
* Several data sets from the literature were previously saved in `.rda` format
in the `test` directory and read using the `RData` package. These are now available
in an `Artifact` in the [`Arrow`](https://github.com/JuliaData/Arrow.jl.git) format [#382].
* Call `MixedModels.datasets()` to get a listing of the names of available datasets
* To load, e.g. the `dyestuff` data, use `MixedModels.dataset(:dyestuff)`
* Data sets are loaded using `Arrow.Table` which returns a column table. Wrap the
call in `DataFrame` if you prefer a `DataFrame`.
* Data sets are cached and multiple calls to `MixedModels.dataset()` for the same
data set have very low overhead after the first call.
Replacement capabilities
------------------------
* `describeblocks` has been dropped in favor of the `BlockDescription` type
* The `named` argument to `ranef` has been dropped in favor of `raneftables`
Package dependencies
--------------------
* Several package dependencies have been dropped, including `BlockDiagonals`, `NamedArrays` [#390],
`Printf` and `Showoff`
* Dependencies on `Arrow` [#382], `DataAPI` [#384], and `PooledArrays` have been added.
<!--- generated by NEWS-update.jl: -->
[#309]: https://github.com/JuliaStats/MixedModels.jl/issues/309
[#339]: https://github.com/JuliaStats/MixedModels.jl/issues/339
[#380]: https://github.com/JuliaStats/MixedModels.jl/issues/380
[#382]: https://github.com/JuliaStats/MixedModels.jl/issues/382
[#384]: https://github.com/JuliaStats/MixedModels.jl/issues/384
[#390]: https://github.com/JuliaStats/MixedModels.jl/issues/390
[#395]: https://github.com/JuliaStats/MixedModels.jl/issues/395
[#418]: https://github.com/JuliaStats/MixedModels.jl/issues/418
[#419]: https://github.com/JuliaStats/MixedModels.jl/issues/419
[#427]: https://github.com/JuliaStats/MixedModels.jl/issues/427
[#444]: https://github.com/JuliaStats/MixedModels.jl/issues/444
[#446]: https://github.com/JuliaStats/MixedModels.jl/issues/446
[#447]: https://github.com/JuliaStats/MixedModels.jl/issues/447
[#449]: https://github.com/JuliaStats/MixedModels.jl/issues/449
[#453]: https://github.com/JuliaStats/MixedModels.jl/issues/453
[#456]: https://github.com/JuliaStats/MixedModels.jl/issues/456
[#464]: https://github.com/JuliaStats/MixedModels.jl/issues/464
[#465]: https://github.com/JuliaStats/MixedModels.jl/issues/465
[#469]: https://github.com/JuliaStats/MixedModels.jl/issues/469
[#470]: https://github.com/JuliaStats/MixedModels.jl/issues/470
[#474]: https://github.com/JuliaStats/MixedModels.jl/issues/474
[#479]: https://github.com/JuliaStats/MixedModels.jl/issues/479
[#480]: https://github.com/JuliaStats/MixedModels.jl/issues/480
[#482]: https://github.com/JuliaStats/MixedModels.jl/issues/482
[#484]: https://github.com/JuliaStats/MixedModels.jl/issues/484
[#486]: https://github.com/JuliaStats/MixedModels.jl/issues/486
[#489]: https://github.com/JuliaStats/MixedModels.jl/issues/489
[#490]: https://github.com/JuliaStats/MixedModels.jl/issues/490
[#492]: https://github.com/JuliaStats/MixedModels.jl/issues/492
[#493]: https://github.com/JuliaStats/MixedModels.jl/issues/493
[#495]: https://github.com/JuliaStats/MixedModels.jl/issues/495
[#501]: https://github.com/JuliaStats/MixedModels.jl/issues/501
[#506]: https://github.com/JuliaStats/MixedModels.jl/issues/506
[#507]: https://github.com/JuliaStats/MixedModels.jl/issues/507
[#508]: https://github.com/JuliaStats/MixedModels.jl/issues/508
[#518]: https://github.com/JuliaStats/MixedModels.jl/issues/518
[#523]: https://github.com/JuliaStats/MixedModels.jl/issues/523
[#524]: https://github.com/JuliaStats/MixedModels.jl/issues/524
[#535]: https://github.com/JuliaStats/MixedModels.jl/issues/535
[#536]: https://github.com/JuliaStats/MixedModels.jl/issues/536
[#537]: https://github.com/JuliaStats/MixedModels.jl/issues/537
[#539]: https://github.com/JuliaStats/MixedModels.jl/issues/539
[#545]: https://github.com/JuliaStats/MixedModels.jl/issues/545
[#551]: https://github.com/JuliaStats/MixedModels.jl/issues/551
[#552]: https://github.com/JuliaStats/MixedModels.jl/issues/552
[#553]: https://github.com/JuliaStats/MixedModels.jl/issues/553
[#561]: https://github.com/JuliaStats/MixedModels.jl/issues/561
[#566]: https://github.com/JuliaStats/MixedModels.jl/issues/566
[#570]: https://github.com/JuliaStats/MixedModels.jl/issues/570
[#572]: https://github.com/JuliaStats/MixedModels.jl/issues/572
[#573]: https://github.com/JuliaStats/MixedModels.jl/issues/573
[#577]: https://github.com/JuliaStats/MixedModels.jl/issues/577
[#578]: https://github.com/JuliaStats/MixedModels.jl/issues/578
[#588]: https://github.com/JuliaStats/MixedModels.jl/issues/588
[#597]: https://github.com/JuliaStats/MixedModels.jl/issues/597
[#598]: https://github.com/JuliaStats/MixedModels.jl/issues/598
[#603]: https://github.com/JuliaStats/MixedModels.jl/issues/603
[#604]: https://github.com/JuliaStats/MixedModels.jl/issues/604
[#608]: https://github.com/JuliaStats/MixedModels.jl/issues/608
[#614]: https://github.com/JuliaStats/MixedModels.jl/issues/614
[#615]: https://github.com/JuliaStats/MixedModels.jl/issues/615
[#616]: https://github.com/JuliaStats/MixedModels.jl/issues/616
[#628]: https://github.com/JuliaStats/MixedModels.jl/issues/628
[#634]: https://github.com/JuliaStats/MixedModels.jl/issues/634
[#637]: https://github.com/JuliaStats/MixedModels.jl/issues/637
[#639]: https://github.com/JuliaStats/MixedModels.jl/issues/639
[#648]: https://github.com/JuliaStats/MixedModels.jl/issues/648
[#651]: https://github.com/JuliaStats/MixedModels.jl/issues/651
[#652]: https://github.com/JuliaStats/MixedModels.jl/issues/652
[#653]: https://github.com/JuliaStats/MixedModels.jl/issues/653
[#657]: https://github.com/JuliaStats/MixedModels.jl/issues/657
[#663]: https://github.com/JuliaStats/MixedModels.jl/issues/663
[#664]: https://github.com/JuliaStats/MixedModels.jl/issues/664
[#665]: https://github.com/JuliaStats/MixedModels.jl/issues/665
[#667]: https://github.com/JuliaStats/MixedModels.jl/issues/667
[#673]: https://github.com/JuliaStats/MixedModels.jl/issues/673
[#674]: https://github.com/JuliaStats/MixedModels.jl/issues/674
[#676]: https://github.com/JuliaStats/MixedModels.jl/issues/676
[#677]: https://github.com/JuliaStats/MixedModels.jl/issues/677
[#680]: https://github.com/JuliaStats/MixedModels.jl/issues/680
[#681]: https://github.com/JuliaStats/MixedModels.jl/issues/681
[#682]: https://github.com/JuliaStats/MixedModels.jl/issues/682
[#694]: https://github.com/JuliaStats/MixedModels.jl/issues/694
[#698]: https://github.com/JuliaStats/MixedModels.jl/issues/698
[#702]: https://github.com/JuliaStats/MixedModels.jl/issues/702
[#703]: https://github.com/JuliaStats/MixedModels.jl/issues/703
[#707]: https://github.com/JuliaStats/MixedModels.jl/issues/707
[#709]: https://github.com/JuliaStats/MixedModels.jl/issues/709
[#715]: https://github.com/JuliaStats/MixedModels.jl/issues/715
[#717]: https://github.com/JuliaStats/MixedModels.jl/issues/717
[#733]: https://github.com/JuliaStats/MixedModels.jl/issues/733
[#738]: https://github.com/JuliaStats/MixedModels.jl/issues/738
[#740]: https://github.com/JuliaStats/MixedModels.jl/issues/740
[#744]: https://github.com/JuliaStats/MixedModels.jl/issues/744
[#748]: https://github.com/JuliaStats/MixedModels.jl/issues/748
[#749]: https://github.com/JuliaStats/MixedModels.jl/issues/749
[#755]: https://github.com/JuliaStats/MixedModels.jl/issues/755
[#756]: https://github.com/JuliaStats/MixedModels.jl/issues/756
[#767]: https://github.com/JuliaStats/MixedModels.jl/issues/767
[#769]: https://github.com/JuliaStats/MixedModels.jl/issues/769
[#772]: https://github.com/JuliaStats/MixedModels.jl/issues/772
[#773]: https://github.com/JuliaStats/MixedModels.jl/issues/773
[#775]: https://github.com/JuliaStats/MixedModels.jl/issues/775
[#776]: https://github.com/JuliaStats/MixedModels.jl/issues/776
[#778]: https://github.com/JuliaStats/MixedModels.jl/issues/778
[#783]: https://github.com/JuliaStats/MixedModels.jl/issues/783
[#785]: https://github.com/JuliaStats/MixedModels.jl/issues/785
| MixedModels | https://github.com/JuliaStats/MixedModels.jl.git |
|
[
"MIT"
] | 4.26.1 | e3fffd09185c6eb69f66b9ed29af0240b0dd0adc | docs | 8968 | # Mixed-effects models in Julia
|**Documentation**|**Citation**|**Build Status**|**Code Coverage**| **Style Guide** |
|:---------------:|:----------:|:--------------:|:---------------:|:----------------|
|[![Stable Docs][docs-stable-img]][docs-stable-url] [![Dev Docs][docs-dev-img]][docs-dev-url] | [![DOI][doi-img]][doi-url] | [![Julia Current][current-img]][current-url] [![Julia Minimum Supported Version][minimum-img]][minimum-url] [![Julia Nightly][nightly-img]][nightly-url] [![PkgEval][pkgeval-img]][pkgeval-url] | [![CodeCov][codecov-img]][codecov-url] | [](https://github.com/invenia/BlueStyle) |
[doi-img]: https://zenodo.org/badge/9106942.svg
[doi-url]: https://zenodo.org/badge/latestdoi/9106942
[docs-dev-img]: https://img.shields.io/badge/docs-dev-blue.svg
[docs-dev-url]: https://juliastats.github.io/MixedModels.jl/dev
[docs-stable-img]: https://img.shields.io/badge/docs-stable-blue.svg
[docs-stable-url]: https://juliastats.github.io/MixedModels.jl/stable
[codecov-img]: https://codecov.io/github/JuliaStats/MixedModels.jl/badge.svg?branch=main
[codecov-url]: https://codecov.io/github/JuliaStats/MixedModels.jl?branch=main
[current-img]: https://github.com/JuliaStats/MixedModels.jl/actions/workflows/current.yml/badge.svg
[current-url]: https://github.com/JuliaStats/MixedModels.jl/actions?workflow=current
[nightly-img]: https://github.com/JuliaStats/MixedModels.jl/actions/workflows/nightly.yml/badge.svg
[nightly-url]: https://github.com/JuliaStats/MixedModels.jl/actions?workflow=nightly
[minimum-img]: https://github.com/JuliaStats/MixedModels.jl/actions/workflows/minimum.yml/badge.svg
[minimum-url]: https://github.com/JuliaStats/MixedModels.jl/actions?workflow=minimum
[pkgeval-img]: https://juliaci.github.io/NanosoldierReports/pkgeval_badges/M/MixedModels.svg
[pkgeval-url]: https://juliaci.github.io/NanosoldierReports/pkgeval_badges/report.html
This package defines linear mixed models (`LinearMixedModel`) and generalized linear mixed models (`GeneralizedLinearMixedModel`). Users can use the abstraction for statistical model API to build, fit (`fit`/`fit!`), and query the fitted models.
A _mixed-effects model_ is a statistical model for a _response_ variable as a function of one or more _covariates_.
For a categorical covariate the coefficients associated with the levels of the covariate are sometimes called _effects_, as in "the effect of using Treatment 1 versus the placebo".
If the potential levels of the covariate are fixed and reproducible, e.g. the levels for `Sex` could be `"F"` and `"M"`, they are modeled with _fixed-effects_ parameters.
If the levels constitute a sample from a population, e.g. the `Subject` or the `Item` at a particular observation, they are modeled as _random effects_.
A _mixed-effects_ model contains both fixed-effects and random-effects terms.
With fixed-effects it is the coefficients themselves or combinations of coefficients that are of interest.
For random effects it is the variability of the effects over the population that is of interest.
In this package random effects are modeled as independent samples from a multivariate Gaussian distribution of the form 𝓑 ~ 𝓝(0, 𝚺).
For the response vector, 𝐲, only the mean of conditional distribution, 𝓨|𝓑 = 𝐛 depends on 𝐛 and it does so through a _linear predictor expression_, 𝛈 = 𝐗𝛃 + 𝐙𝐛, where 𝛃 is the fixed-effects coefficient vector and 𝐗 and 𝐙 are model matrices of the appropriate sizes,
In a `LinearMixedModel` the conditional mean, 𝛍 = 𝔼[𝓨|𝓑 = 𝐛], is the linear predictor, 𝛈, and the conditional distribution is multivariate Gaussian, (𝓨|𝓑 = 𝐛) ~ 𝓝(𝛍, σ²𝐈).
In a `GeneralizedLinearMixedModel`, the conditional mean, 𝔼[𝓨|𝓑 = 𝐛], is related to the linear predictor via a _link function_.
Typical distribution forms are _Bernoulli_ for binary data or _Poisson_ for count data.
## Currently Tested Platforms
|OS | OS Version |Arch |Julia |
|:------:|:-------------:|:---:|:--------------:|
|Linux | Ubuntu 20.04 | x64 |v1.8 |
|Linux | Ubuntu 20.04 | x64 |current release |
|Linux | Ubuntu 20.04 | x64 |nightly |
|macOS | Monterey 12 | x64 |v1.8 |
|Windows | Server 2019 | x64 |v1.8 |
Note that previous releases still support older Julia versions.
## Version 4.0.0
Version 4.0.0 contains some user-visible changes and many changes in the underlying code.
Please see [NEWS](NEWS.md) for a complete overview, but a few key points are:
- The internal storage of the model matrices in `LinearMixedModel` has changed and been optimized. This change should be transparent to users who are not manipulating the fields of the model `struct` directly.
- The [handling of rank deficiency](https://juliastats.org/MixedModels.jl/v4.0/rankdeficiency/) continues to evolve.
- Additional [`predict` and `simulate`](https://juliastats.org/MixedModels.jl/v4.0/prediction/) methods have been added for generalizing to new data.
- `saveoptsum` and `restoreoptsum!` provide for saving and restoring the `optsum` and thus offer a way to serialize a model fit.
- There is improved support for the runtime construction of model formula, especially `RandomEffectsTerm`s and nested terms (methods for `Base.|(::AbstractTerm, ::AbstractTerm)` and `Base./(::AbstractTerm, ::AbstractTerm)`).
- A progress display is shown by default for models taking more than a few hundred milliseconds to fit. This can be disabled with the keyword argument `progress=false`.
## Quick Start
```julia-repl
julia> using MixedModels
julia> m1 = fit(MixedModel, @formula(yield ~ 1 + (1|batch)), MixedModels.dataset(:dyestuff))
Linear mixed model fit by maximum likelihood
yield ~ 1 + (1 | batch)
logLik -2 logLik AIC AICc BIC
-163.6635 327.3271 333.3271 334.2501 337.5307
Variance components:
Column Variance Std.Dev.
batch (Intercept) 1388.3332 37.2603
Residual 2451.2501 49.5101
Number of obs: 30; levels of grouping factors: 6
Fixed-effects parameters:
────────────────────────────────────────────────
Coef. Std. Error z Pr(>|z|)
────────────────────────────────────────────────
(Intercept) 1527.5 17.6946 86.33 <1e-99
────────────────────────────────────────────────
julia> using Random
julia> bs = parametricbootstrap(MersenneTwister(42), 1000, m1);
Progress: 100%%|████████████████████████████████████████████████| Time: 0:00:00
julia> propertynames(bs)
13-element Vector{Symbol}:
:allpars
:objective
:σ
:β
:se
:coefpvalues
:θ
:σs
:λ
:inds
:lowerbd
:fits
:fcnames
julia> bs.coefpvalues # returns a row table
1000-element Vector{NamedTuple{(:iter, :coefname, :β, :se, :z, :p), Tuple{Int64, Symbol, Float64, Float64, Float64, Float64}}}:
(iter = 1, coefname = Symbol("(Intercept)"), β = 1517.0670832927115, se = 20.76271142094811, z = 73.0669059804057, p = 0.0)
(iter = 2, coefname = Symbol("(Intercept)"), β = 1503.5781855888436, se = 8.1387737362628, z = 184.7425956676446, p = 0.0)
(iter = 3, coefname = Symbol("(Intercept)"), β = 1529.2236379016574, se = 16.523824785737837, z = 92.54659001356465, p = 0.0)
⋮
(iter = 998, coefname = Symbol("(Intercept)"), β = 1498.3795009457242, se = 25.649682012258104, z = 58.417079019913054, p = 0.0)
(iter = 999, coefname = Symbol("(Intercept)"), β = 1526.1076747922416, se = 16.22412120273579, z = 94.06411945042063, p = 0.0)
(iter = 1000, coefname = Symbol("(Intercept)"), β = 1557.7546433870125, se = 12.557577103806015, z = 124.04898098653763, p = 0.0)
julia> using DataFrames
julia> DataFrame(bs.coefpvalues) # puts it into a DataFrame
1000×6 DataFrame
│ Row │ iter │ coefname │ β │ se │ z │ p │
│ │ Int64 │ Symbol │ Float64 │ Float64 │ Float64 │ Float64 │
├──────┼───────┼─────────────┼─────────┼─────────┼─────────┼─────────┤
│ 1 │ 1 │ (Intercept) │ 1517.07 │ 20.7627 │ 73.0669 │ 0.0 │
│ 2 │ 2 │ (Intercept) │ 1503.58 │ 8.13877 │ 184.743 │ 0.0 │
│ 3 │ 3 │ (Intercept) │ 1529.22 │ 16.5238 │ 92.5466 │ 0.0 │
⋮
│ 998 │ 998 │ (Intercept) │ 1498.38 │ 25.6497 │ 58.4171 │ 0.0 │
│ 999 │ 999 │ (Intercept) │ 1526.11 │ 16.2241 │ 94.0641 │ 0.0 │
│ 1000 │ 1000 │ (Intercept) │ 1557.75 │ 12.5576 │ 124.049 │ 0.0 │
julia> DataFrame(bs.β)
1000×3 DataFrame
│ Row │ iter │ coefname │ β │
│ │ Int64 │ Symbol │ Float64 │
├──────┼───────┼─────────────┼─────────┤
│ 1 │ 1 │ (Intercept) │ 1517.07 │
│ 2 │ 2 │ (Intercept) │ 1503.58 │
│ 3 │ 3 │ (Intercept) │ 1529.22 │
⋮
│ 998 │ 998 │ (Intercept) │ 1498.38 │
│ 999 │ 999 │ (Intercept) │ 1526.11 │
│ 1000 │ 1000 │ (Intercept) │ 1557.75 │
```
## Funding Acknowledgement
The development of this package was supported by the Center for Interdisciplinary Research, Bielefeld (ZiF)/Cooperation Group "Statistical models for psychological and linguistic data".
| MixedModels | https://github.com/JuliaStats/MixedModels.jl.git |
|
[
"MIT"
] | 4.26.1 | e3fffd09185c6eb69f66b9ed29af0240b0dd0adc | docs | 350 | Thanks for contributing!
Did behavior change? Did you add need features? If so, please update NEWS.md
- [ ] add entry in NEWS.md
- [ ] after opening this PR, add a reference and run `docs/NEWS-update.jl` to update the cross-references.
Should we release your changes right away? If so, bump the version:
- [ ] I've bumped the version appropriately
| MixedModels | https://github.com/JuliaStats/MixedModels.jl.git |
|
[
"MIT"
] | 4.26.1 | e3fffd09185c6eb69f66b9ed29af0240b0dd0adc | docs | 10176 | # Normalized Gauss-Hermite Quadrature
[*Gaussian Quadrature rules*](https://en.wikipedia.org/wiki/Gaussian_quadrature) provide sets of `x` values, called *abscissae*, and corresponding weights, `w`, to approximate an integral with respect to a *weight function*, $g(x)$.
For a `k`th order rule the approximation is
```math
\int f(x)g(x)\,dx \approx \sum_{i=1}^k w_i f(x_i)
```
For the *Gauss-Hermite* rule the weight function is
```math
g(x) = e^{-x^2}
```
and the domain of integration is $(-\infty, \infty)$.
A slight variation of this is the *normalized Gauss-Hermite* rule for which the weight function is the standard normal density
```math
g(z) = \phi(z) = \frac{e^{-z^2/2}}{\sqrt{2\pi}}
```
Thus, the expected value of $f(z)$, where $\mathcal{Z}\sim\mathscr{N}(0,1)$, is approximated as
```math
\mathbb{E}[f]=\int_{-\infty}^{\infty} f(z) \phi(z)\,dz\approx\sum_{i=1}^k w_i\,f(z_i) .
```
Naturally, there is a caveat. For the approximation to be accurate the function $f(z)$ must behave like a low-order polynomial over the range of interest.
More formally, a `k`th order rule is exact when `f` is a polynomial of order `2k-1` or less. [^1]
## Evaluating the weights and abscissae
In the [*Golub-Welsch algorithm*](https://en.wikipedia.org/wiki/Gaussian_quadrature#The_Golub-Welsch_algorithm) the abscissae for a particular Gaussian quadrature rule are determined as the eigenvalues of a symmetric tri-diagonal matrix and the weights are derived from the squares of the first row of the matrix of eigenvectors.
For a `k`th order normalized Gauss-Hermite rule the tridiagonal matrix has zeros on the diagonal and the square roots of `1:k-1` on the super- and sub-diagonal, e.g.
```@setup Main
using DisplayAs
```
```@example Main
using DataFrames, LinearAlgebra, Gadfly
sym3 = SymTridiagonal(zeros(3), sqrt.(1:2))
ev = eigen(sym3);
ev.values
```
```@example Main
abs2.(ev.vectors[1,:])
```
As a function of `k` this can be written as
```@example Main
function gausshermitenorm(k)
ev = eigen(SymTridiagonal(zeros(k), sqrt.(1:k-1)))
ev.values, abs2.(ev.vectors[1,:])
end;
```
providing
```@example Main
gausshermitenorm(3)
```
The weights and positions are often shown as a *lollipop plot*.
For the 9th order rule these are
```@example Main
gh9=gausshermitenorm(9)
plot(x=gh9[1], y=gh9[2], Geom.hair, Geom.point, Guide.ylabel("Weight"), Guide.xlabel(""))
```
Notice that the magnitudes of the weights drop quite dramatically away from zero, even on a logarithmic scale
```@example Main
plot(
x=gh9[1], y=gh9[2], Geom.hair, Geom.point,
Scale.y_log2, Guide.ylabel("Weight (log scale)"),
Guide.xlabel(""),
)
```
The definition of `MixedModels.GHnorm` is similar to the `gausshermitenorm` function with some extra provisions for ensuring symmetry of the abscissae and the weights and for caching values once they have been calculated.
```@docs
GHnorm
```
```@example Main
using MixedModels
GHnorm(3)
```
By the properties of the normal distribution, when $\mathcal{X}\sim\mathscr{N}(\mu, \sigma^2)$
```math
\mathbb{E}[g(x)] \approx \sum_{i=1}^k g(\mu + \sigma z_i)\,w_i
```
For example, $\mathbb{E}[\mathcal{X}^2]$ where $\mathcal{X}\sim\mathcal{N}(2, 3^2)$ is
```@example Main
μ = 2; σ = 3; ghn3 = GHnorm(3);
sum(@. ghn3.w * abs2(μ + σ * ghn3.z)) # should be μ² + σ² = 13
```
(In general a dot, '`.`', after the function name in a function call, as in `abs2.(...)`, or before an operator creates a [*fused vectorized*](https://docs.julialang.org/en/stable/manual/performance-tips/#More-dots:-Fuse-vectorized-operations-1) evaluation in Julia.
The macro `@.` has the effect of vectorizing all operations in the subsequent expression.)
## Application to a model for contraception use
A *binary response* is a "Yes"/"No" type of answer.
For example, in a 1989 fertility survey of women in Bangladesh (reported in [Huq, N. M. and Cleland, J., 1990](https://www.popline.org/node/371841)) one response of interest was whether the woman used artificial contraception.
Several covariates were recorded including the woman's age (centered at the mean), the number of live children the woman has had (in 4 categories: 0, 1, 2, and 3 or more), whether she lived in an urban setting, and the district in which she lived.
The version of the data used here is that used in review of multilevel modeling software conducted by the Center for Multilevel Modelling, currently at University of Bristol (http://www.bristol.ac.uk/cmm/learning/mmsoftware/data-rev.html).
These data are available as the `:contra` dataset.
```@example Main
contra = DataFrame(MixedModels.dataset(:contra))
describe(contra)
```
A smoothed scatterplot of contraception use versus age
```@example Main
plot(contra, x=:age, y=:use, Geom.smooth, Guide.xlabel("Centered age (yr)"),
Guide.ylabel("Contraception use"))
```
shows that the proportion of women using artificial contraception is approximately quadratic in age.
A model with fixed-effects for age, age squared, number of live children and urban location and with random effects for district, is fit as
```@example Main
const form1 = @formula use ~ 1 + age + abs2(age) + livch + urban + (1|dist);
m1 = fit(MixedModel, form1, contra, Bernoulli(), fast=true)
DisplayAs.Text(ans) # hide
```
For a model such as `m1`, which has a single, scalar random-effects term, the unscaled conditional density of the spherical random effects variable, $\mathcal{U}$,
given the observed data, $\mathcal{Y}=\mathbf{y}_0$, can be expressed as a product of scalar density functions, $f_i(u_i),\; i=1,\dots,q$.
In the PIRLS algorithm, which determines the conditional mode vector, $\tilde{\mathbf{u}}$, the optimization is performed on the *deviance scale*,
```math
D(\mathbf{u})=-2\sum_{i=1}^q \log(f_i(u_i))
```
The objective, $D$, consists of two parts: the sum of the (squared) *deviance residuals*, measuring fidelity to the data, and the squared length of $\mathbf{u}$, which is the penalty.
In the PIRLS algorithm, only the sum of these components is needed.
To use Gauss-Hermite quadrature the contributions of each of the $u_i,\;i=1,\dots,q$ should be separately evaluated.
```@example Main
const devc0 = map!(abs2, m1.devc0, m1.u[1]); # start with uᵢ²
const devresid = m1.resp.devresid; # n-dimensional vector of deviance residuals
const refs = only(m1.LMM.reterms).refs; # n-dimensional vector of indices in 1:q
for (dr, i) in zip(devresid, refs)
devc0[i] += dr
end
show(devc0)
```
One thing to notice is that, even on the deviance scale, the contributions of different districts can be of different magnitudes.
This is primarily due to different sample sizes in the different districts.
```@example Main
using FreqTables
freqtable(contra, :dist)'
```
Because the first district has one of the largest sample sizes and the third district has the smallest sample size, these two will be used for illustration.
For a range of $u$ values, evaluate the individual components of the deviance and store them in a matrix.
```@example Main
const devc = m1.devc;
const xvals = -5.0:2.0^(-4):5.0;
const uv = vec(m1.u[1]);
const u₀ = vec(m1.u₀[1]);
results = zeros(length(devc0), length(xvals))
for (j, u) in enumerate(xvals)
fill!(devc, abs2(u))
fill!(uv, u)
MixedModels.updateη!(m1)
for (dr, i) in zip(devresid, refs)
devc[i] += dr
end
copyto!(view(results, :, j), devc)
end
```
A plot of the deviance contribution versus $u_1$
```@example Main
plot(x=xvals, y=view(results, 1, :), Geom.line, Guide.xlabel("u₁"),
Guide.ylabel("Deviance contribution"))
```
shows that the deviance contribution is very close to a quadratic.
This is also true for $u_3$
```@example Main
plot(x=xvals, y=view(results, 3, :), Geom.line, Guide.xlabel("u₃"),
Guide.ylabel("Deviance contribution"))
```
The PIRLS algorithm provides the locations of the minima of these scalar functions, stored as
```@example Main
m1.u₀[1]
```
the minima themselves, evaluated as `devc0` above, and a horizontal scale, which is the inverse of diagonal of the Cholesky factor.
As shown below, this is an estimate of the conditional standard deviations of the components of $\mathcal{U}$.
```@example Main
using MixedModels: block
const s = inv.(m1.LMM.L[block(1,1)].diag);
s'
```
The curves can be put on a common scale, corresponding to the standard normal, as
```@example Main
for (j, z) in enumerate(xvals)
@. uv = u₀ + z * s
MixedModels.updateη!(m1)
@. devc = abs2(uv) - devc0
for (dr, i) in zip(devresid, refs)
devc[i] += dr
end
copyto!(view(results, :, j), devc)
end
```
```@example Main
plot(x=xvals, y=view(results, 1, :), Geom.line,
Guide.xlabel("Scaled and shifted u₁"),
Guide.ylabel("Shifted deviance contribution"))
```
```@example Main
plot(x=xvals, y=view(results, 3, :), Geom.line,
Guide.xlabel("Scaled and shifted u₃"),
Guide.ylabel("Shifted deviance contribution"))
```
On the original density scale these become
```@example Main
for (j, z) in enumerate(xvals)
@. uv = u₀ + z * s
MixedModels.updateη!(m1)
@. devc = abs2(uv) - devc0
for (dr, i) in zip(devresid, refs)
devc[i] += dr
end
copyto!(view(results, :, j), @. exp(-devc/2))
end
```
```@example Main
plot(x=xvals, y=view(results, 1, :), Geom.line,
Guide.xlabel("Scaled and shifted u₁"),
Guide.ylabel("Conditional density"))
```
```@example Main
plot(x=xvals, y=view(results, 3, :), Geom.line,
Guide.xlabel("Scaled and shifted u₃"),
Guide.ylabel("Conditional density"))
```
and the function to be integrated with the normalized Gauss-Hermite rule is
```@example Main
for (j, z) in enumerate(xvals)
@. uv = u₀ + z * s
MixedModels.updateη!(m1)
@. devc = abs2(uv) - devc0
for (dr, i) in zip(devresid, refs)
devc[i] += dr
end
copyto!(view(results, :, j), @. exp((abs2(z) - devc)/2))
end
```
```@example Main
plot(x=xvals, y=view(results, 1, :), Geom.line,
Guide.xlabel("Scaled and shifted u₁"), Guide.ylabel("Kernel ratio"))
```
```@example Main
plot(x=xvals, y=view(results, 3, :), Geom.line,
Guide.xlabel("Scaled and shifted u₃"), Guide.ylabel("Kernel ratio"))
```
[^1]: https://en.wikipedia.org/wiki/Gaussian_quadrature | MixedModels | https://github.com/JuliaStats/MixedModels.jl.git |
|
[
"MIT"
] | 4.26.1 | e3fffd09185c6eb69f66b9ed29af0240b0dd0adc | docs | 1576 | # API
In addition to its own functionality, MixedModels.jl also implements extensive support for the [`StatsAPI.StatisticalModel`](https://github.com/JuliaStats/StatsAPI.jl/blob/main/src/statisticalmodel.jl) and [`StatsAPI.RegressionModel`](https://github.com/JuliaStats/StatsAPI.jl/blob/main/src/regressionmodel.jl) API.
## Types
```@autodocs
Modules = [MixedModels]
Order = [:type]
```
## Exported Functions
```@autodocs
Modules = [MixedModels]
Private = false
Order = [:function]
```
## Methods from `StatsAPI.jl`, `StatsBase.jl`, `StatsModels.jl` and `GLM.jl`
```julia
aic
aicc
bic
coef
coefnames
coeftable
deviance
dispersion
dispersion_parameter
dof
dof_residual
fit
fit!
fitted
formula
isfitted
islinear
leverage
loglikelihood
meanresponse
modelmatrix
model_response
nobs
predict
residuals
response
responsename
StatsModels.lrtest # not exported
std
stderror
vcov
weights
```
### MixedModels.jl "alternatives" and extensions to StatsAPI and GLM functions
The following are MixedModels.jl-specific functions and not simply methods for functions defined in StatsAPI and GLM.jl.
```julia
coefpvalues
condVar
condVarTables
fitted!
fixef
fixefnames
likelihoodratiotest # not exported
pwrss
ranef
raneftables
refit!
shortestcovint
sdest
simulate
simulate!
stderrror!
varest
```
## Non-Exported Functions
Note that unless discussed elsewhere in the online documentation, non-exported functions should be considered implementation details.
```@autodocs
Modules = [MixedModels]
Public = false
Order = [:function]
Filter = f -> !startswith(string(f), "_")
```
| MixedModels | https://github.com/JuliaStats/MixedModels.jl.git |
|
[
"MIT"
] | 4.26.1 | e3fffd09185c6eb69f66b9ed29af0240b0dd0adc | docs | 16652 | # Benchmark Report for */home/bates/.julia/packages/MixedModels/dn0WY/src/MixedModels.jl*
## Job Properties
* Time of benchmark: 2 Oct 2018 - 13:42
* Package commit: non gi
* Julia commit: 5d4eac
* Julia command flags: None
* Environment variables: None
## Results
Below is a table of this job's results, obtained by running the benchmarks.
The values listed in the `ID` column have the structure `[parent_group, child_group, ..., key]`, and can be used to
index into the BaseBenchmarks suite to retrieve the corresponding benchmarks.
The percentages accompanying time and memory values in the below table are noise tolerances. The "true"
time/memory value for a given benchmark is expected to fall within this percentage of the reported value.
An empty cell means that the value was zero.
| ID | time | GC time | memory | allocations |
|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------:|-----------:|----------------:|------------:|
| `["crossed", "Assay:1+A+B*C+(1|G)+(1|H)"]` | 2.943 ms (5%) | | 449.23 KiB (1%) | 7821 |
| `["crossed", "Demand:1+U+V+W+X+(1|G)+(1|H)"]` | 2.775 ms (5%) | | 386.13 KiB (1%) | 8827 |
| `["crossed", "InstEval:1+A*I+(1|G)+(1|H)"]` | 1.247 s (5%) | 114.131 ms | 234.50 MiB (1%) | 33070 |
| `["crossed", "InstEval:1+A+(1|G)+(1|H)+(1|I)"]` | 1.999 s (5%) | 12.898 ms | 187.33 MiB (1%) | 47246 |
| `["crossed", "Penicillin:1+(1|G)+(1|H)"]` | 2.697 ms (5%) | | 350.83 KiB (1%) | 8064 |
| `["crossed", "ScotsSec:1+A+U+V+(1|G)+(1|H)"]` | 4.833 ms (5%) | | 1.45 MiB (1%) | 9699 |
| `["crossed", "dialectNL:1+A+T+U+V+W+X+(1|G)+(1|H)+(1|I)"]` | 416.892 ms (5%) | 6.731 ms | 95.20 MiB (1%) | 28416 |
| `["crossed", "egsingle:1+A+U+V+(1|G)+(1|H)"]` | 31.421 ms (5%) | 3.427 ms | 48.19 MiB (1%) | 16055 |
| `["crossed", "ml1m:1+(1|G)+(1|H)"]` | 36.714 s (5%) | 225.872 ms | 323.09 MiB (1%) | 2045434 |
| `["crossed", "paulsim:1+S+T+U+(1|H)+(1|G)"]` | 14.097 ms (5%) | | 4.41 MiB (1%) | 10208 |
| `["crossedvector", "bs10:1+U+V+W+((1+U+V+W)|G)+((1+U+V+W)|H)"]` | 165.171 ms (5%) | 3.149 ms | 25.47 MiB (1%) | 806498 |
| `["crossedvector", "d3:1+U+((1+U)|G)+((1+U)|H)+((1+U)|I)"]` | 49.023 s (5%) | 1.766 s | 7.51 GiB (1%) | 301762163 |
| `["crossedvector", "d3:1+U+(1|G)+(1|H)+(1|I)"]` | 299.348 ms (5%) | 117.923 ms | 371.75 MiB (1%) | 43708 |
| `["crossedvector", "gb12:1+S+T+U+V+W+X+Z+((1+S+U+W)|G)+((1+S+T+V)|H)"]` | 134.101 ms (5%) | | 15.88 MiB (1%) | 537616 |
| `["crossedvector", "kb07:1+S+T+U+V+W+X+Z+((1+S+T+U+V+W+X+Z)|G)+((1+S+T+U+V+W+X+Z)|H)"]` | 3.488 s (5%) | 16.508 ms | 124.58 MiB (1%) | 4319046 |
| `["crossedvector", "kb07:1+S+T+U+V+W+X+Z+(1|G)+((0+S)|G)+((0+T)|G)+((0+U)|G)+((0+V)|G)+((0+W)|G)+((0+X)|G)+((0+Z)|G)+(1|H)+((0+S)|H)+((0+T)|H)+((0+U)|H)+((0+V)|H)+((0+W)|H)+((0+X)|H)+((0+Z)|H)"]` | 493.390 ms (5%) | 7.953 ms | 70.86 MiB (1%) | 3239747 |
| `["nested", "Animal:1+(1|G)+(1|H)"]` | 1.261 ms (5%) | | 178.91 KiB (1%) | 3819 |
| `["nested", "Chem97:1+(1|G)+(1|H)"]` | 58.460 ms (5%) | 6.975 ms | 93.76 MiB (1%) | 19565 |
| `["nested", "Chem97:1+U+(1|G)+(1|H)"]` | 59.353 ms (5%) | 7.019 ms | 94.54 MiB (1%) | 19736 |
| `["nested", "Genetics:1+A+(1|G)+(1|H)"]` | 2.062 ms (5%) | | 317.86 KiB (1%) | 6566 |
| `["nested", "Pastes:1+(1|G)+(1|H)"]` | 2.298 ms (5%) | | 326.86 KiB (1%) | 7028 |
| `["nested", "Semi2:1+A+(1|G)+(1|H)"]` | 2.309 ms (5%) | | 352.11 KiB (1%) | 7236 |
| `["simplescalar", "Alfalfa:1+A*B+(1|G)"]` | 1.210 ms (5%) | | 208.80 KiB (1%) | 3528 |
| `["simplescalar", "Alfalfa:1+A+B+(1|G)"]` | 1.021 ms (5%) | | 168.47 KiB (1%) | 2901 |
| `["simplescalar", "AvgDailyGain:1+A*U+(1|G)"]` | 1.287 ms (5%) | | 193.33 KiB (1%) | 3811 |
| `["simplescalar", "AvgDailyGain:1+A+U+(1|G)"]` | 1.144 ms (5%) | | 169.59 KiB (1%) | 3294 |
| `["simplescalar", "BIB:1+A*U+(1|G)"]` | 1.574 ms (5%) | | 222.20 KiB (1%) | 4738 |
| `["simplescalar", "BIB:1+A+U+(1|G)"]` | 1.171 ms (5%) | | 171.31 KiB (1%) | 3384 |
| `["simplescalar", "Bond:1+A+(1|G)"]` | 958.770 μs (5%) | | 141.25 KiB (1%) | 2615 |
| `["simplescalar", "Cultivation:1+A*B+(1|G)"]` | 1.089 ms (5%) | | 173.38 KiB (1%) | 3298 |
| `["simplescalar", "Cultivation:1+A+(1|G)"]` | 1.138 ms (5%) | | 162.14 KiB (1%) | 3254 |
| `["simplescalar", "Cultivation:1+A+B+(1|G)"]` | 1.147 ms (5%) | | 173.47 KiB (1%) | 3433 |
| `["simplescalar", "Dyestuff2:1+(1|G)"]` | 830.840 μs (5%) | | 105.20 KiB (1%) | 2225 |
| `["simplescalar", "Dyestuff:1+(1|G)"]` | 974.091 μs (5%) | | 120.86 KiB (1%) | 2692 |
| `["simplescalar", "Exam:1+A*U+B+(1|G)"]` | 2.250 ms (5%) | | 1.17 MiB (1%) | 4662 |
| `["simplescalar", "Exam:1+A+B+U+(1|G)"]` | 2.133 ms (5%) | | 1.03 MiB (1%) | 4325 |
| `["simplescalar", "Gasoline:1+U+(1|G)"]` | 1.164 ms (5%) | | 162.03 KiB (1%) | 3294 |
| `["simplescalar", "Hsb82:1+A+B+C+U+(1|G)"]` | 3.048 ms (5%) | | 2.12 MiB (1%) | 4611 |
| `["simplescalar", "IncBlk:1+A+U+V+W+Z+(1|G)"]` | 1.226 ms (5%) | | 208.83 KiB (1%) | 4135 |
| `["simplescalar", "Mississippi:1+A+(1|G)"]` | 980.968 μs (5%) | | 145.75 KiB (1%) | 2704 |
| `["simplescalar", "PBIB:1+A+(1|G)"]` | 1.509 ms (5%) | | 234.47 KiB (1%) | 3881 |
| `["simplescalar", "Rail:1+(1|G)"]` | 1.251 ms (5%) | | 151.34 KiB (1%) | 3622 |
| `["simplescalar", "Semiconductor:1+A*B+(1|G)"]` | 1.313 ms (5%) | | 222.95 KiB (1%) | 3674 |
| `["simplescalar", "TeachingII:1+A+T+U+V+W+X+Z+(1|G)"]` | 1.483 ms (5%) | | 284.53 KiB (1%) | 5472 |
| `["simplescalar", "cake:1+A*B+(1|G)"]` | 1.606 ms (5%) | | 412.83 KiB (1%) | 3666 |
| `["simplescalar", "ergoStool:1+A+(1|G)"]` | 1.057 ms (5%) | | 155.59 KiB (1%) | 2913 |
| `["singlevector", "Early:1+U+U&A+((1+U)|G)"]` | 20.373 ms (5%) | | 3.47 MiB (1%) | 80473 |
| `["singlevector", "HR:1+A*U+V+((1+U)|G)"]` | 5.183 ms (5%) | | 915.00 KiB (1%) | 27962 |
| `["singlevector", "Oxboys:1+U+((1+U)|G)"]` | 13.207 ms (5%) | | 1.93 MiB (1%) | 51919 |
| `["singlevector", "SIMS:1+U+((1+U)|G)"]` | 61.675 ms (5%) | | 12.86 MiB (1%) | 394095 |
| `["singlevector", "WWheat:1+U+((1+U)|G)"]` | 7.311 ms (5%) | | 902.31 KiB (1%) | 24071 |
| `["singlevector", "Weights:1+A*U+((1+U)|G)"]` | 18.303 ms (5%) | | 3.20 MiB (1%) | 92915 |
| `["singlevector", "sleepstudy:1+U+((1+U)|G)"]` | 4.829 ms (5%) | | 797.48 KiB (1%) | 23820 |
| `["singlevector", "sleepstudy:1+U+(1|G)+((0+U)|G)"]` | 3.219 ms (5%) | | 605.13 KiB (1%) | 19180 |
## Benchmark Group List
Here's a list of all the benchmark groups executed by this job:
- `["crossed"]`
- `["crossedvector"]`
- `["nested"]`
- `["simplescalar"]`
- `["singlevector"]`
## Julia versioninfo
```
Julia Version 1.0.0
Commit 5d4eaca0c9 (2018-08-08 20:58 UTC)
Platform Info:
OS: Linux (x86_64-linux-gnu)
Ubuntu 18.04.1 LTS
uname: Linux 4.15.0-36-generic #39-Ubuntu SMP Mon Sep 24 16:19:09 UTC 2018 x86_64 x86_64
CPU: Intel(R) Core(TM) i5-3570 CPU @ 3.40GHz:
speed user nice sys idle irq
#1 1690 MHz 140498 s 134 s 18382 s 1495130 s 0 s
#2 2513 MHz 131505 s 16 s 18277 s 1504212 s 0 s
#3 1900 MHz 145131 s 581 s 18892 s 1485409 s 0 s
#4 1682 MHz 190751 s 38 s 17941 s 1445446 s 0 s
Memory: 15.554645538330078 GB (10502.1171875 MB free)
Uptime: 16578.0 sec
Load Avg: 1.4091796875 2.07080078125 1.63037109375
WORD_SIZE: 64
LIBM: libopenlibm
LLVM: libLLVM-6.0.0 (ORCJIT, ivybridge)
``` | MixedModels | https://github.com/JuliaStats/MixedModels.jl.git |
|
[
"MIT"
] | 4.26.1 | e3fffd09185c6eb69f66b9ed29af0240b0dd0adc | docs | 8805 | # Parametric bootstrap for mixed-effects models
Julia is well-suited to implementing bootstrapping and other simulation-based methods for statistical models.
The `parametricbootstrap` function in the [MixedModels package](https://github.com/JuliaStats/MixedModels.jl) provides an efficient parametric bootstrap for mixed-effects models.
```@docs
parametricbootstrap
```
## The parametric bootstrap
[Bootstrapping](https://en.wikipedia.org/wiki/Bootstrapping_(statistics)) is a family of procedures
for generating sample values of a statistic, allowing for visualization of the distribution of the
statistic or for inference from this sample of values.
A _parametric bootstrap_ is used with a parametric model, `m`, that has been fit to data.
The procedure is to simulate `n` response vectors from `m` using the estimated parameter values
and refit `m` to these responses in turn, accumulating the statistics of interest at each iteration.
The parameters of a `LinearMixedModel` object are the fixed-effects
parameters, `β`, the standard deviation, `σ`, of the per-observation noise, and the covariance
parameter, `θ`, that defines the variance-covariance matrices of the random effects.
For example, a simple linear mixed-effects model for the `Dyestuff` data in the [`lme4`](http://github.com/lme4/lme4)
package for [`R`](https://www.r-project.org) is fit by
```@example Main
using DataFrames
using Gadfly # plotting package
using MixedModels
using Random
```
```@example Main
dyestuff = MixedModels.dataset(:dyestuff)
m1 = fit(MixedModel, @formula(yield ~ 1 + (1 | batch)), dyestuff)
```
To bootstrap the model parameters, first initialize a random number generator then create a bootstrap sample and extract the `tbl` property, which is a `Table` - a lightweight dataframe-like object.
```@example Main
const rng = MersenneTwister(1234321);
samp = parametricbootstrap(rng, 10_000, m1);
tbl = samp.tbl
```
A density plot of the estimates of `σ`, the residual standard deviation, can be created as
```@example Main
plot(x = tbl.σ, Geom.density, Guide.xlabel("Parametric bootstrap estimates of σ"))
```
or, for the intercept parameter
```@example Main
plot(x = tbl.β1, Geom.density, Guide.xlabel("Parametric bootstrap estimates of β₁"))
```
A density plot of the estimates of the standard deviation of the random effects is obtained as
```@example Main
plot(x = tbl.σ1, Geom.density,
Guide.xlabel("Parametric bootstrap estimates of σ₁"))
```
Notice that this density plot has a spike, or mode, at zero.
Although this mode appears to be diffuse, this is an artifact of the way that density plots are created.
In fact, it is a pulse, as can be seen from a histogram.
```@example Main
plot(x = tbl.σ1, Geom.histogram,
Guide.xlabel("Parametric bootstrap estimates of σ₁"))
```
The bootstrap sample can be used to generate intervals that cover a certain percentage of the bootstrapped values.
We refer to these as "coverage intervals", similar to a confidence interval.
The shortest such intervals, obtained with the `shortestcovint` extractor, correspond to a highest posterior density interval in Bayesian inference.
```@docs
shortestcovint
```
We generate these directly from the original bootstrap object:
```@example Main
Table(shortestcovint(samp))
```
A value of zero for the standard deviation of the random effects is an example of a *singular* covariance.
It is easy to detect the singularity in the case of a scalar random-effects term.
However, it is not as straightforward to detect singularity in vector-valued random-effects terms.
For example, if we bootstrap a model fit to the `sleepstudy` data
```@example Main
sleepstudy = MixedModels.dataset(:sleepstudy)
contrasts = Dict(:subj => Grouping())
m2 = let f = @formula reaction ~ 1+days+(1+days|subj)
fit(MixedModel, f, sleepstudy; contrasts)
end
```
```@example Main
samp2 = parametricbootstrap(rng, 10_000, m2);
tbl2 = samp2.tbl
```
the singularity can be exhibited as a standard deviation of zero or as a correlation of $\pm1$.
```@example Main
shortestcovint(samp2)
```
A histogram of the estimated correlations from the bootstrap sample has a spike at `+1`.
```@example Main
plot(x = tbl2.ρ1, Geom.histogram,
Guide.xlabel("Parametric bootstrap samples of correlation of random effects"))
```
or, as a count,
```@example Main
count(tbl2.ρ1 .≈ 1)
```
Close examination of the histogram shows a few values of `-1`.
```@example Main
count(tbl2.ρ1 .≈ -1)
```
Furthermore there are even a few cases where the estimate of the standard deviation of the random effect for the intercept is zero.
```@example Main
count(tbl2.σ1 .≈ 0)
```
There is a general condition to check for singularity of an estimated covariance matrix or matrices in a bootstrap sample.
The parameter optimized in the estimation is `θ`, the relative covariance parameter.
Some of the elements of this parameter vector must be non-negative and, when one of these components is approximately zero, one of the covariance matrices will be singular.
The `issingular` method for a `MixedModel` object that tests if a parameter vector `θ` corresponds to a boundary or singular fit.
This operation is encapsulated in a method for the `issingular` function.
```@example Main
count(issingular(samp2))
```
## Reduced Precision Bootstrap
`parametricbootstrap` accepts an optional keyword argument `optsum_overrides`, which can be used to override the convergence criteria for bootstrap replicates. One possibility is setting `ftol_rel=1e-8`, i.e., considering the model converged when the relative change in the objective between optimizer iterations is smaller than 0.00000001.
This threshold corresponds approximately to the precision from treating the value of the objective as a single precision (`Float32`) number, while not changing the precision of the intermediate computations.
The resultant loss in precision will generally be smaller than the variation that the bootstrap captures, but can greatly speed up the fitting process for each replicates, especially for large models.
More directly, lowering the fit quality for each replicate will reduce the quality of each replicate, but this may be more than compensated for by the ability to fit a much larger number of replicates in the same time.
```@example Main
t = @timed parametricbootstrap(MersenneTwister(42), 1000, m2; progress=false)
t.time
```
```@example Main
optsum_overrides = (; ftol_rel=1e-8)
t = @timed parametricbootstrap(MersenneTwister(42), 1000, m2; optsum_overrides, progress=false)
t.time
```
## Distributed Computing and the Bootstrap
Earlier versions of MixedModels.jl supported a multi-threaded bootstrap via the `use_threads` keyword argument.
However, with improved BLAS multithreading, the Julia-level threads often wound up competing with the BLAS threads, leading to no improvement or even a worsening of performance when `use_threads=true`.
Nonetheless, the bootstrap is a classic example of an [embarrassingly parallel](https://en.wikipedia.org/wiki/Embarrassingly_parallel) problem and so we provide a few convenience methods for combining results computed separately.
In particular, there are `vcat` and an optimized `reduce(::typeof(vcat))` methods for `MixedModelBootstrap` objects.
For computers with many processors (as opposed to a single processor with several cores) or for computing clusters, these provide a convenient way to split the computation across nodes.
```@example Main
using Distributed
# you already have 1 proc by default, so add the number of additional cores with `addprocs`
# you need at least as many RNGs as cores you want to use in parallel
# but you shouldn't use all of your cores because nested within this
# is the multithreading of the linear algebra
# addprocs(1)
@info "Currently using $(nprocs()) processors total and $(nworkers()) for work"
# Load the necessary packages on all workers
# For clusters, you will also need to make sure that the Julia
# environment (Project.toml) is set up and activated on each worker.
@everywhere begin
using ProgressMeter
using MixedModels
end
# copy everything to workers
@showprogress for w in workers()
remotecall_fetch(() -> coefnames(m2), w)
end
# split the replicates across the workers
# this rounds down, so if the number of workers doesn't divide the
# number of replicates, you'll be a few replicates short!
n_replicates = 1000
n_rep_per_worker = n_replicates ÷ nworkers()
# NB: You need a different seed/RNG for each worker otherwise you will
# have copies of the same replicates and not independent replicates!
pb_map = @showprogress pmap(MersenneTwister.(1:nworkers())) do rng
parametricbootstrap(rng, n_rep_per_worker, m2; optsum_overrides)
end;
# get rid of all the workers
# rmprocs(workers())
confint(reduce(vcat, pb_map))
```
| MixedModels | https://github.com/JuliaStats/MixedModels.jl.git |
|
[
"MIT"
] | 4.26.1 | e3fffd09185c6eb69f66b9ed29af0240b0dd0adc | docs | 20727 | # Model constructors
The `LinearMixedModel` type represents a linear mixed-effects model.
Typically it is constructed from a `Formula` and an appropriate `Table` type, usually a `DataFrame`.
## Examples of linear mixed-effects model fits
For illustration, several data sets from the *lme4* package for *R* are made available in `.arrow` format in this package.
Often, for convenience, we will convert these to `DataFrame`s.
These data sets include the `dyestuff` and `dyestuff2` data sets.
```@setup Main
using DisplayAs
```
```@example Main
using DataFrames, MixedModels, StatsModels
dyestuff = MixedModels.dataset(:dyestuff)
```
```@example Main
describe(DataFrame(dyestuff))
```
### The `@formula` language in Julia
MixedModels.jl builds on the the *Julia* formula language provided by [StatsModels.jl](https://juliastats.org/StatsModels.jl/stable/formula/), which is similar to the formula language in *R* and is also based on the notation from Wilkinson and Rogers ([1973](https://dx.doi.org/10.2307/2346786)). There are two ways to construct a formula in Julia. The first way is to enclose the formula expression in the `@formula` macro:
```@docs
@formula
```
The second way is to combine `Term`s with operators like `+`, `&`, `~`, and others at "run time". This is especially useful if you wish to create a formula from a list a variable names. For instance, the following are equivalent:
```@example Main
@formula(y ~ 1 + a + b + a & b) == (term(:y) ~ term(1) + term(:a) + term(:b) + term(:a) & term(:b))
```
MixedModels.jl provides additional formula syntax for representing *random-effects terms*. Most importantly, `|` separates random effects and their grouping factors (as in the formula extension used by the *R* package [`lme4`](https://cran.r-project.org/web/packages/lme4/index.html). Much like with the base formula language, `|` can be used within the `@formula` macro and to construct a formula programmatically:
```@example Main
@formula(y ~ 1 + a + b + (1 + a + b | g))
```
```@example Main
terms = sum(term(t) for t in [1, :a, :b])
group = term(:g)
response = term(:y)
response ~ terms + (terms | group)
```
### Models with simple, scalar random effects
A basic model with simple, scalar random effects for the levels of `batch` (the batch of an intermediate product, in this case) is declared and fit as
```@example Main
fm = @formula(yield ~ 1 + (1|batch))
fm1 = fit(MixedModel, fm, dyestuff)
DisplayAs.Text(ans) # hide
```
(If you are new to Julia you may find that this first fit takes an unexpectedly long time, due to Just-In-Time (JIT) compilation of the code. The subsequent calls to such functions are much faster.)
```@example Main
using BenchmarkTools
dyestuff2 = MixedModels.dataset(:dyestuff2)
@benchmark fit(MixedModel, $fm, $dyestuff2)
```
By default, the model is fit by maximum likelihood. To use the `REML` criterion instead, add the optional named argument `REML=true` to the call to `fit`
```@example Main
fm1reml = fit(MixedModel, fm, dyestuff, REML=true)
DisplayAs.Text(ans) # hide
```
### Floating-point type in the model
The type of `fm1`
```@example Main
typeof(fm1)
```
includes the floating point type used internally for the various matrices, vectors, and scalars that represent the model.
At present, this will always be `Float64` because the parameter estimates are optimized using the [`NLopt` package](https://github.com/JuliaOpt/NLopt.jl) which calls compiled C code that only allows for optimization with respect to a `Float64` parameter vector.
So in theory other floating point types, such as `BigFloat` or `Float32`, can be used to define a model but in practice only `Float64` works at present.
> In theory, theory and practice are the same. In practice, they aren't. -- Anon
### Simple, scalar random effects
A simple, scalar random effects term in a mixed-effects model formula is of the form `(1|G)`.
All random effects terms end with `|G` where `G` is the *grouping factor* for the random effect.
The name or, more generally the expression, `G`, should evaluate to a categorical array that has a distinct set of *levels*.
The random effects are associated with the levels of the grouping factor.
A *scalar* random effect is, as the name implies, one scalar value for each level of the grouping factor.
A *simple, scalar* random effects term is of the form, `(1|G)`.
It corresponds to a shift in the intercept for each level of the grouping factor.
### Models with vector-valued random effects
The *sleepstudy* data are observations of reaction time, `reaction`, on several subjects, `subj`, after 0 to 9 days of sleep deprivation, `days`.
A model with random intercepts and random slopes for each subject, allowing for within-subject correlation of the slope and intercept, is fit as
```@example Main
sleepstudy = MixedModels.dataset(:sleepstudy)
fm2 = fit(MixedModel, @formula(reaction ~ 1 + days + (1 + days|subj)), sleepstudy)
DisplayAs.Text(ans) # hide
```
### Models with multiple, scalar random-effects terms
A model for the *Penicillin* data incorporates random effects for the plate, and for the sample.
As every sample is used on every plate these two factors are *crossed*.
```@example Main
penicillin = MixedModels.dataset(:penicillin)
fm3 = fit(MixedModel, @formula(diameter ~ 1 + (1|plate) + (1|sample)), penicillin)
DisplayAs.Text(ans) # hide
```
In contrast, the `cask` grouping factor is *nested* within the `batch` grouping factor in the *Pastes* data.
```@example Main
pastes = DataFrame(MixedModels.dataset(:pastes))
describe(pastes)
```
This can be expressed using the solidus (the "`/`" character) to separate grouping factors, read "`cask` nested within `batch`":
```@example Main
fm4a = fit(MixedModel, @formula(strength ~ 1 + (1|batch/cask)), pastes)
DisplayAs.Text(ans) # hide
```
If the levels of the inner grouping factor are unique across the levels of the outer grouping factor, then this nesting does not need to expressed explicitly in the model syntax. For example, defining `sample` to be the combination of `batch` and `cask`, yields a naming scheme where the nesting is apparent from the data even if not expressed in the formula. (That is, each level of `sample` occurs in conjunction with only one level of `batch`.) As such, this model is equivalent to the previous one.
```@example Main
pastes.sample = (string.(pastes.cask, "&", pastes.batch))
fm4b = fit(MixedModel, @formula(strength ~ 1 + (1|sample) + (1|batch)), pastes)
DisplayAs.Text(ans) # hide
```
In observational studies it is common to encounter *partially crossed* grouping factors.
For example, the *InstEval* data are course evaluations by students, `s`, of instructors, `d`.
Additional covariates include the academic department, `dept`, in which the course was given and `service`, whether or not it was a service course.
```@example Main
insteval = MixedModels.dataset(:insteval)
fm5 = fit(MixedModel, @formula(y ~ 1 + service * dept + (1|s) + (1|d)), insteval)
DisplayAs.Text(ans) # hide
```
### Simplifying the random effect correlation structure
MixedModels.jl estimates not only the *variance* of the effects for each random effect level, but also the *correlation* between the random effects for different predictors.
So, for the model of the *sleepstudy* data above, one of the parameters that is estimated is the correlation between each subject's random intercept (i.e., their baseline reaction time) and slope (i.e., their particular change in reaction time per day of sleep deprivation).
In some cases, you may wish to simplify the random effects structure by removing these correlation parameters.
This often arises when there are many random effects you want to estimate (as is common in psychological experiments with many conditions and covariates), since the number of random effects parameters increases as the square of the number of predictors, making these models difficult to estimate from limited data.
The special syntax `zerocorr` can be applied to individual random effects terms inside the `@formula`:
```@example Main
fm2zerocorr_fm = fit(MixedModel, @formula(reaction ~ 1 + days + zerocorr(1 + days|subj)), sleepstudy)
DisplayAs.Text(ans) # hide
```
Alternatively, correlations between parameters can be removed by including them as separate random effects terms:
```@example Main
fit(MixedModel, @formula(reaction ~ 1 + days + (1|subj) + (days|subj)), sleepstudy)
DisplayAs.Text(ans) # hide
```
Finally, for predictors that are categorical, MixedModels.jl will estimate correlations between each level.
Notice the large number of correlation parameters if we treat `days` as a categorical variable by giving it contrasts:
```@example Main
fit(MixedModel, @formula(reaction ~ 1 + days + (1 + days|subj)), sleepstudy,
contrasts = Dict(:days => DummyCoding()))
DisplayAs.Text(ans) # hide
```
Separating the `1` and `days` random effects into separate terms removes the correlations between the intercept and the levels of `days`, but not between the levels themselves:
```@example Main
fit(MixedModel, @formula(reaction ~ 1 + days + (1|subj) + (days|subj)), sleepstudy,
contrasts = Dict(:days => DummyCoding()))
DisplayAs.Text(ans) # hide
```
(Notice that the variance component for `days: 1` is estimated as zero, so the correlations for this component are undefined and expressed as `NaN`, not a number.)
An alternative is to force all the levels of `days` as indicators using `fulldummy` encoding.
```@docs
fulldummy
```
```@example Main
fit(MixedModel, @formula(reaction ~ 1 + days + (1 + fulldummy(days)|subj)), sleepstudy,
contrasts = Dict(:days => DummyCoding()))
DisplayAs.Text(ans) # hide
```
This fit produces a better fit as measured by the objective (negative twice the log-likelihood is 1610.8) but at the expense of adding many more parameters to the model.
As a result, model comparison criteria such, as `AIC` and `BIC`, are inflated.
But using `zerocorr` on the individual terms does remove the correlations between the levels:
```@example Main
fit(MixedModel, @formula(reaction ~ 1 + days + zerocorr(1 + days|subj)), sleepstudy,
contrasts = Dict(:days => DummyCoding()))
DisplayAs.Text(ans) # hide
```
```@example Main
fit(MixedModel, @formula(reaction ~ 1 + days + (1|subj) + zerocorr(days|subj)), sleepstudy,
contrasts = Dict(:days => DummyCoding()))
DisplayAs.Text(ans) # hide
```
```@example Main
fit(MixedModel, @formula(reaction ~ 1 + days + zerocorr(1 + fulldummy(days)|subj)), sleepstudy,
contrasts = Dict(:days => DummyCoding()))
DisplayAs.Text(ans) # hide
```
## Fitting generalized linear mixed models
To create a GLMM representation, the distribution family for the response, and possibly the link function, must be specified.
```@example Main
verbagg = MixedModels.dataset(:verbagg)
verbaggform = @formula(r2 ~ 1 + anger + gender + btype + situ + mode + (1|subj) + (1|item));
gm1 = fit(MixedModel, verbaggform, verbagg, Bernoulli())
DisplayAs.Text(ans) # hide
```
The canonical link, which is `LogitLink` for the `Bernoulli` distribution, is used if no explicit link is specified.
Note that, in keeping with convention in the [`GLM` package](https://github.com/JuliaStats/GLM.jl), the distribution family for a binary (i.e. 0/1) response is the `Bernoulli` distribution.
The `Binomial` distribution is only used when the response is the fraction of trials returning a positive, in which case the number of trials must be specified as the case weights.
### Optional arguments to fit
An alternative approach is to create the `GeneralizedLinearMixedModel` object then call `fit!` on it.
The optional arguments `fast` and/or `nAGQ` can be passed to the optimization process via both `fit` and `fit!` (i.e these optimization settings are not used nor recognized when constructing the model).
As the name implies, `fast=true`, provides a faster but somewhat less accurate fit.
These fits may suffice for model comparisons.
```@example Main
gm1a = fit(MixedModel, verbaggform, verbagg, Bernoulli(), fast = true)
deviance(gm1a) - deviance(gm1)
```
```@example Main
@benchmark fit(MixedModel, $verbaggform, $verbagg, Bernoulli())
```
```@example Main
@benchmark fit(MixedModel, $verbaggform, $verbagg, Bernoulli(), fast = true)
```
The optional argument `nAGQ=k` causes evaluation of the deviance function to use a `k` point
adaptive Gauss-Hermite quadrature rule.
This method only applies to models with a single, simple, scalar random-effects term, such as
```@example Main
contraception = MixedModels.dataset(:contra)
contraform = @formula(use ~ 1 + age + abs2(age) + livch + urban + (1|dist));
bernoulli = Bernoulli()
deviances = Dict{Symbol,Float64}()
b = @benchmarkable deviances[:default] = deviance(fit(MixedModel, $contraform, $contraception, $bernoulli));
run(b)
b = @benchmarkable deviances[:fast] = deviance(fit(MixedModel, $contraform, $contraception, $bernoulli, fast = true));
run(b)
b = @benchmarkable deviances[:nAGQ] = deviance(fit(MixedModel, $contraform, $contraception, $bernoulli, nAGQ=9));
run(b)
b = @benchmarkable deviances[:nAGQ_fast] = deviance(fit(MixedModel, $contraform, $contraception, $bernoulli, nAGQ=9, fast=true));
run(b)
sort(deviances)
```
# Extractor functions
`LinearMixedModel` and `GeneralizedLinearMixedModel` are subtypes of `StatsAPI.RegressionModel` which, in turn, is a subtype of `StatsBase.StatisticalModel`.
Many of the generic extractors defined in the `StatsBase` package have methods for these models.
## Model-fit statistics
The statistics describing the quality of the model fit include
```@docs
loglikelihood
aic
bic
dof
nobs
```
```@example Main
loglikelihood(fm1)
```
```@example Main
aic(fm1)
```
```@example Main
bic(fm1)
```
```@example Main
dof(fm1) # 1 fixed effect, 2 variances
```
```@example Main
nobs(fm1) # 30 observations
```
```@example Main
loglikelihood(gm1)
```
In general the [`deviance`](https://en.wikipedia.org/wiki/Deviance_(statistics)) of a statistical model fit is negative twice the log-likelihood adjusting for the saturated model.
```@docs
deviance(::StatisticalModel)
```
Because it is not clear what the saturated model corresponding to a particular `LinearMixedModel` should be, negative twice the log-likelihood is called the `objective`.
```@docs
objective
```
This value is also accessible as the `deviance` but the user should bear in mind that this doesn't have all the properties of a deviance which is corrected for the saturated model.
For example, it is not necessarily non-negative.
```@example Main
objective(fm1)
```
```@example Main
deviance(fm1)
```
The value optimized when fitting a `GeneralizedLinearMixedModel` is the Laplace approximation to the deviance or an adaptive Gauss-Hermite evaluation.
```@docs
MixedModels.deviance!
```
```@example Main
MixedModels.deviance!(gm1)
```
## Fixed-effects parameter estimates
The `coef` and `fixef` extractors both return the maximum likelihood estimates of the fixed-effects coefficients.
They differ in their behavior in the rank-deficient case.
The associated `coefnames` and `fixefnames` return the corresponding coefficient names.
```@docs
coef
coefnames
fixef
fixefnames
```
```@example Main
coef(fm1)
coefnames(fm1)
```
```@example Main
fixef(fm1)
fixefnames(fm1)
```
An alternative extractor for the fixed-effects coefficient is the `β` property.
Properties whose names are Greek letters usually have an alternative spelling, which is the name of the Greek letter.
```@example Main
fm1.β
```
```@example Main
fm1.beta
```
```@example Main
gm1.β
```
A full list of property names is returned by `propertynames`
```@example Main
propertynames(fm1)
```
```@example Main
propertynames(gm1)
```
The variance-covariance matrix of the fixed-effects coefficients is returned by
```@docs
vcov
```
```@example Main
vcov(fm2)
```
```@example Main
vcov(gm1)
```
The standard errors are the square roots of the diagonal elements of the estimated variance-covariance matrix of the fixed-effects coefficient estimators.
```@docs
stderror
```
```@example Main
stderror(fm2)
```
```@example Main
stderror(gm1)
```
Finally, the `coeftable` generic produces a table of coefficient estimates, their standard errors, and their ratio.
The *p-values* quoted here should be regarded as approximations.
```@docs
coeftable
```
```@example Main
coeftable(fm2)
DisplayAs.Text(ans) # hide
```
## Covariance parameter estimates
The covariance parameters estimates, in the form shown in the model summary, are a `VarCorr` object
```@example Main
VarCorr(fm2)
DisplayAs.Text(ans) # hide
```
```@example Main
VarCorr(gm1)
DisplayAs.Text(ans) # hide
```
Individual components are returned by other extractors
```@docs
varest
sdest
```
```@example Main
varest(fm2)
```
```@example Main
sdest(fm2)
```
```@example Main
fm2.σ
```
## Conditional modes of the random effects
The `ranef` extractor
```@docs
ranef
```
```@example Main
ranef(fm1)
```
```@example Main
fm1.b
```
returns the *conditional modes* of the random effects given the observed data.
That is, these are the values that maximize the conditional density of the random effects given the observed data.
For a `LinearMixedModel` these are also the conditional means.
These are sometimes called the *best linear unbiased predictors* or [`BLUPs`](https://en.wikipedia.org/wiki/Best_linear_unbiased_prediction) but that name is not particularly meaningful.
At a superficial level these can be considered as the "estimates" of the random effects, with a bit of hand waving, but pursuing this analogy too far usually results in confusion.
To obtain tables associating the values of the conditional modes with the levels of the grouping factor, use
```@docs
raneftables
```
as in
```@example Main
DataFrame(only(raneftables(fm1)))
```
The corresponding conditional variances are returned by
```@docs
condVar
```
```@example Main
condVar(fm1)
```
## Case-wise diagnostics and residual degrees of freedom
The `leverage` values
```@docs
leverage
```
```@example Main
leverage(fm1)
```
are used in diagnostics for linear regression models to determine cases that exert a strong influence on their own predicted response.
The documentation refers to a "projection".
For a linear model without random effects the fitted values are obtained by orthogonal projection of the response onto the column span of the model matrix and the sum of the leverage values is the dimension of this column span.
That is, the sum of the leverage values is the rank of the model matrix and `n - sum(leverage(m))` is the degrees of freedom for residuals.
The sum of the leverage values is also the trace of the so-called "hat" matrix, `H`.
(The name "hat matrix" reflects the fact that $\hat{\mathbf{y}} = \mathbf{H} \mathbf{y}$. That is, `H` puts a hat on `y`.)
For a linear mixed model the sum of the leverage values will be between `p`, the rank of the fixed-effects model matrix, and `p + q` where `q` is the total number of random effects.
This number does not represent a dimension (or "degrees of freedom") of a linear subspace of all possible fitted values because the projection is not an orthogonal projection.
Nevertheless, it is a reasonable measure of the effective degrees of freedom of the model and `n - sum(leverage(m))` can be considered the effective residual degrees of freedom.
For model `fm1` the dimensions are
```@example Main
n, p, q, k = size(fm1)
```
which implies that the sum of the leverage values should be in the range [1, 7].
The actual value is
```@example Main
sum(leverage(fm1))
```
For model `fm2` the dimensions are
```@example Main
n, p, q, k = size(fm2)
```
providing a range of [2, 38] for the effective degrees of freedom for the model.
The observed value is
```@example Main
sum(leverage(fm2))
```
When a model converges to a singular covariance, such as
```@example Main
fm3 = fit(MixedModel, @formula(yield ~ 1+(1|batch)), MixedModels.dataset(:dyestuff2))
DisplayAs.Text(ans) # hide
```
the effective degrees of freedom is the lower bound.
```@example Main
sum(leverage(fm3))
```
Models for which the estimates of the variances of the random effects are large relative to the residual variance have effective degrees of freedom close to the upper bound.
```@example Main
fm4 = fit(MixedModel, @formula(diameter ~ 1+(1|plate)+(1|sample)),
MixedModels.dataset(:penicillin))
DisplayAs.Text(ans) # hide
```
```@example Main
sum(leverage(fm4))
```
Also, a model fit by the REML criterion generally has larger estimates of the variance components and hence a larger effective degrees of freedom.
```@example Main
fm4r = fit(MixedModel, @formula(diameter ~ 1+(1|plate)+(1|sample)),
MixedModels.dataset(:penicillin), REML=true)
DisplayAs.Text(ans) # hide
```
```@example Main
sum(leverage(fm4r))
```
| MixedModels | https://github.com/JuliaStats/MixedModels.jl.git |
|
[
"MIT"
] | 4.26.1 | e3fffd09185c6eb69f66b9ed29af0240b0dd0adc | docs | 489 | # MixedModels.jl Documentation
```@meta
CurrentModule = MixedModels
```
*MixedModels.jl* is a Julia package providing capabilities for fitting and examining linear and generalized linear mixed-effect models.
It is similar in scope to the [*lme4*](https://github.com/lme4/lme4) package for `R`.
```@contents
Pages = [
"constructors.md",
"optimization.md",
"GaussHermite.md",
"bootstrap.md",
"rankdeficiency.md",
"mime.md",
]
Depth = 2
```
| MixedModels | https://github.com/JuliaStats/MixedModels.jl.git |
|
[
"MIT"
] | 4.26.1 | e3fffd09185c6eb69f66b9ed29af0240b0dd0adc | docs | 4078 | # Alternative display and output formats
In the documentation, we have presented the output from MixedModels.jl in the same format you will see when working in the REPL.
You may have noticed, however, that output from other packages received pretty printing.
For example, DataFrames are converted into nice HTML tables.
In MixedModels, we recently (v3.2.0) introduced limited support for such pretty printing.
(For more details on how the print and display system in Julia works, check out [this NextJournal post](https://nextjournal.com/sdanisch/julias-display-system).)
In particular, we have defined Markdown, HTML and LaTeX output, i.e. `show` methods, for our types.
Note that the Markdown output can also be easily and more flexibly translated into HTML, LaTeX (e.g. with `booktabs`) or even a MS Word Document using tools such as [pandoc](https://pandoc.org/).
Packages like `IJulia` and `Documenter` can often detect the presence of these display options and use them automatically.
```@example Main
using MixedModels
form = @formula(rt_trunc ~ 1 + spkr * prec * load +
(1 + load | item) +
(1 + spkr + prec + load | subj))
contr = Dict(:spkr => EffectsCoding(),
:prec => EffectsCoding(),
:load => EffectsCoding(),
:item => Grouping(),
:subj => Grouping())
kbm = fit(MixedModel, form, MixedModels.dataset(:kb07); contrasts=contr)
```
Note that the display here is more succinct than the standard REPL display:
```@example Main
using DisplayAs
kbm |> DisplayAs.Text
```
This brevity is intentional: we wanted these types to work well with traditional academic publishing constraints on tables.
The summary for a model fit presented in the REPL does not mesh well with being treated as a single table (with columns shared between the random and fixed effects).
In our experience, this leads to difficulties in typesetting the resulting tables.
We nonetheless encourage users to report fit statistics such as the log likelihood or AIC as part of the caption of their table.
If the correlation parameters in the random effects are of interest, then [`VarCorr`](@ref) can also be pretty printed:
```@example Main
VarCorr(kbm)
```
Similarly for [`BlockDescription`](@ref), `OptSummary` and `MixedModels.likelihoodratiotest`:
```@example Main
BlockDescription(kbm)
```
```@example Main
kbm.optsum
```
```@example Main
m0 = fit(MixedModel, @formula(reaction ~ 1 + (1|subj)), MixedModels.dataset(:sleepstudy))
m1 = fit(MixedModel, @formula(reaction ~ 1 + days + (1+days|subj)), MixedModels.dataset(:sleepstudy))
MixedModels.likelihoodratiotest(m0,m1)
```
To explicitly invoke this behavior, we must specify the right `show` method.
(The raw and not rendered output is intentionally shown here.)
```julia
show(MIME("text/markdown"), m1)
```
```@example Main
println(sprint(show, MIME("text/markdown"), kbm)) # hide
```
```julia
show(MIME("text/html"), m1)
```
```@example Main
println(sprint(show, MIME("text/html"), kbm)) # hide
```
Note for that LaTeX, the column labels for the random effects are slightly changed: σ is placed into math mode and escaped and the grouping variable is turned into a subscript.
Similarly for the likelihood ratio test, the χ² is escaped into math mode.
This transformation improves pdfLaTeX and journal compatibility, but also means that XeLaTeX and LuaTeX may use a different font at this point.
```julia
show(MIME("text/latex"), m1)
```
```@example Main
println(sprint(show, MIME("text/latex"), kbm)) # hide
```
This escaping behavior can be disabled by specifying `"text/xelatex"` as the MIME type.
(Note that other symbols may still be escaped, as the internal conversion uses the `Markdown` module from the standard library, which performs some escaping on its own.)
```julia
show(MIME("text/xelatex"), m1)
```
```@example Main
println(sprint(show, MIME("text/xelatex"), kbm)) # hide
```
This output can also be written directly to file:
```julia
open("model.md", "w") do io
show(io, MIME("text/markdown"), kbm)
end
```
| MixedModels | https://github.com/JuliaStats/MixedModels.jl.git |
|
[
"MIT"
] | 4.26.1 | e3fffd09185c6eb69f66b9ed29af0240b0dd0adc | docs | 19365 | # Details of the parameter estimation
## The probability model
Maximum likelihood estimates are based on the probability model for the observed responses.
In the probability model the distribution of the responses is expressed as a function of one or more *parameters*.
For a continuous distribution the probability density is a function of the responses, given the parameters.
The *likelihood* function is the same expression as the probability density but regarding the observed values as fixed and the parameters as varying.
In general a mixed-effects model incorporates two random variables: $\mathcal{B}$, the $q$-dimensional vector of random effects, and $\mathcal{Y}$, the $n$-dimensional response vector.
The value, $\bf y$, of $\mathcal{Y}$ is observed; the value, $\bf b$, of $\mathcal{B}$ is not.
## Linear Mixed-Effects Models
In a linear mixed model the unconditional distribution of $\mathcal{B}$ and the conditional distribution, $(\mathcal{Y} | \mathcal{B}=\bf{b})$, are both multivariate Gaussian distributions,
```math
\begin{aligned}
(\mathcal{Y} | \mathcal{B}=\bf{b}) &\sim\mathcal{N}(\bf{ X\beta + Z b},\sigma^2\bf{I})\\\\
\mathcal{B}&\sim\mathcal{N}(\bf{0},\Sigma_\theta) .
\end{aligned}
```
The *conditional mean* of $\mathcal Y$, given $\mathcal B=\bf b$, is the *linear predictor*, $\bf X\bf\beta+\bf Z\bf b$, which depends on the $p$-dimensional *fixed-effects parameter*, $\bf \beta$, and on $\bf b$.
The *model matrices*, $\bf X$ and $\bf Z$, of dimension $n\times p$ and $n\times q$, respectively, are determined from the formula for the model and the values of covariates.
Although the matrix $\bf Z$ can be large (i.e. both $n$ and $q$ can be large), it is sparse (i.e. most of the elements in the matrix are zero).
The *relative covariance factor*, $\Lambda_\theta$, is a $q\times q$ lower-triangular matrix, depending on the *variance-component parameter*, $\bf\theta$, and generating the symmetric $q\times q$ variance-covariance matrix, $\Sigma_\theta$, as
```math
\Sigma_\theta=\sigma^2\Lambda_\theta\Lambda_\theta'
```
The *spherical random effects*, $\mathcal{U}\sim\mathcal{N}(\bf{0},\sigma^2\bf{I}_q)$, determine $\mathcal B$ according to
```math
\mathcal{B}=\Lambda_\theta\mathcal{U}.
```
The *penalized residual sum of squares* (PRSS),
```math
r^2(\theta,\beta,\bf{u})=\|\bf{y} - \bf{X}\beta -\bf{Z}\Lambda_\theta\bf{u}\|^2+\|\bf{u}\|^2,
```
is the sum of the residual sum of squares, measuring fidelity of the model to the data, and a penalty on the size of $\bf u$, measuring the complexity of the model.
Minimizing $r^2$ with respect to $\bf u$,
```math
r^2_{\beta,\theta} =\min_{\bf{u}}\left(\|\bf{y} -\bf{X}{\beta} -\bf{Z}\Lambda_\theta\bf{u}\|^2+\|\bf{u}\|^2\right)
```
is a direct (i.e. non-iterative) computation.
The particular method used to solve this generates a *blocked Choleksy factor*, $\bf{L}_\theta$, which is an lower triangular $q\times q$ matrix satisfying
```math
\bf{L}_\theta\bf{L}_\theta'=\Lambda_\theta'\bf{Z}'\bf{Z}\Lambda_\theta+\bf{I}_q .
```
where ${\bf I}_q$ is the $q\times q$ *identity matrix*.
Negative twice the log-likelihood of the parameters, given the data, $\bf y$, is
```math
d({\bf\theta},{\bf\beta},\sigma|{\bf y})
=n\log(2\pi\sigma^2)+\log(|{\bf L}_\theta|^2)+\frac{r^2_{\beta,\theta}}{\sigma^2}.
```
where $|{\bf L}_\theta|$ denotes the *determinant* of ${\bf L}_\theta$.
Because ${\bf L}_\theta$ is triangular, its determinant is the product of its diagonal elements.
Because the conditional mean, $\bf\mu_{\mathcal Y|\mathcal B=\bf b}=\bf
X\bf\beta+\bf Z\Lambda_\theta\bf u$, is a linear function of both $\bf\beta$ and $\bf u$, minimization of the PRSS with respect to both $\bf\beta$ and $\bf u$ to produce
```math
r^2_\theta =\min_{{\bf\beta},{\bf u}}\left(\|{\bf y} -{\bf X}{\bf\beta} -{\bf Z}\Lambda_\theta{\bf u}\|^2+\|{\bf u}\|^2\right)
```
is also a direct calculation.
The values of $\bf u$ and $\bf\beta$ that provide this minimum are called, respectively, the *conditional mode*, $\tilde{\bf u}_\theta$, of the spherical random effects and the conditional estimate, $\widehat{\bf\beta}_\theta$, of the fixed effects.
At the conditional estimate of the fixed effects the objective is
```math
d({\bf\theta},\widehat{\beta}_\theta,\sigma|{\bf y})
=n\log(2\pi\sigma^2)+\log(|{\bf L}_\theta|^2)+\frac{r^2_\theta}{\sigma^2}.
```
Minimizing this expression with respect to $\sigma^2$ produces the conditional estimate
```math
\widehat{\sigma^2}_\theta=\frac{r^2_\theta}{n}
```
which provides the *profiled log-likelihood* on the deviance scale as
```math
\tilde{d}(\theta|{\bf y})=d(\theta,\widehat{\beta}_\theta,\widehat{\sigma}_\theta|{\bf y})
=\log(|{\bf L}_\theta|^2)+n\left[1+\log\left(\frac{2\pi r^2_\theta}{n}\right)\right],
```
a function of $\bf\theta$ alone.
The MLE of $\bf\theta$, written $\widehat{\bf\theta}$, is the value that minimizes this profiled objective.
We determine this value by numerical optimization.
In the process of evaluating $\tilde{d}(\widehat{\theta}|{\bf y})$ we determine $\widehat{\beta}=\widehat{\beta}_{\widehat\theta}$, $\tilde{\bf u}_{\widehat{\theta}}$ and $r^2_{\widehat{\theta}}$, from which we can evaluate $\widehat{\sigma}=\sqrt{r^2_{\widehat{\theta}}/n}$.
The elements of the conditional mode of $\mathcal B$, evaluated at the parameter estimates,
```math
\tilde{\bf b}_{\widehat{\theta}}=\Lambda_{\widehat{\theta}}\tilde{\bf u}_{\widehat{\theta}}
```
are sometimes called the *best linear unbiased predictors* or BLUPs of the random effects.
Although BLUPs an appealing acronym, I don’t find the term particularly instructive (what is a “linear unbiased predictor” and in what sense are these the “best”?) and prefer the term “conditional modes”, because these are the values of $\bf b$ that maximize the density of the conditional distribution $\mathcal{B} | \mathcal{Y} = {\bf y}$.
For a linear mixed model, where all the conditional and unconditional distributions are Gaussian, these values are also the *conditional means*.
## Internal structure of $\Lambda_\theta$ and $\bf Z$
In the types of `LinearMixedModel` available through the `MixedModels` package, groups of random effects and the corresponding columns of the model matrix, $\bf Z$, are associated with *random-effects terms* in the model formula.
For the simple example
```@setup Main
using DisplayAs
```
```@example Main
using BenchmarkTools, DataFrames, MixedModels
```
```@example Main
dyestuff = MixedModels.dataset(:dyestuff)
fm1 = fit(MixedModel, @formula(yield ~ 1 + (1|batch)), dyestuff)
DisplayAs.Text(ans) # hide
```
the only random effects term in the formula is `(1|batch)`, a simple, scalar random-effects term.
```@example Main
t1 = only(fm1.reterms);
Int.(t1) # convert to integers for more compact display
```
The matrix `t1` is a sparse matrix, meaning that most of the elements are zero, and its transpose is stored in a sparse form.
```@example Main
sparse(t1)'
```
provides a compact representation of the positions of the non-zeros in this matrix.
This `RandomEffectsTerm` contributes a block of columns to the model matrix $\bf Z$ and a diagonal block to $\Lambda_\theta$.
In this case the diagonal block of $\Lambda_\theta$ (which is also the only block) is a multiple of the $6\times6$
identity matrix where the multiple is
```@example Main
t1.λ
```
Because there is only one random-effects term in the model, the matrix $\bf Z$ is the indicators matrix shown as the result of `Int.(t1)`, but stored in a special sparse format.
Furthermore, there is only one block in $\Lambda_\theta$.
For a vector-valued random-effects term, as in
```@example Main
sleepstudy = MixedModels.dataset(:sleepstudy)
fm2 = fit(MixedModel, @formula(reaction ~ 1+days+(1+days|subj)), sleepstudy)
DisplayAs.Text(ans) # hide
```
the model matrix $\bf Z$ is of the form
```@example Main
t21 = only(fm2.reterms);
sparse(t21)'
```
and $\Lambda_\theta$ is a $36\times36$ block diagonal matrix with $18$ diagonal blocks, all of the form
```@example Main
t21.λ
```
The $\theta$ vector is
```@example Main
MixedModels.getθ(t21)
```
Random-effects terms in the model formula that have the same grouping factor are amalgamated into a single `ReMat` object.
```@example Main
fm3 = fit(MixedModel, @formula(reaction ~ 1+days+(1|subj) + (0+days|subj)), sleepstudy)
t31 = only(fm3.reterms);
sparse(t31)'
```
For this model the matrix $\bf Z$ is the same as that of model `fm2` but the diagonal blocks of $\Lambda_\theta$ are themselves diagonal.
```@example Main
t31.λ
```
```@example Main
MixedModels.getθ(t31)
```
Random-effects terms with distinct grouping factors generate distinct elements of the `reterms` field of the `LinearMixedModel` object.
Multiple `ReMat` objects are sorted by decreasing numbers of random effects.
```@example Main
penicillin = MixedModels.dataset(:penicillin)
fm4 = fit(MixedModel,
@formula(diameter ~ 1 + (1|sample) + (1|plate)),
penicillin)
sparse(first(fm4.reterms))'
```
```@example Main
sparse(last(fm4.reterms))'
```
Note that the first `ReMat` in `fm4.reterms` corresponds to grouping factor `plate` even though the term `(1|plate)` occurs in the formula after `(1|sample)`.
### Progress of the optimization
By default a progress display is shown when fitting a model that takes a second or more to fit. (The optional named argument, `progress=false`, can be used to suppress this display.) The number of iterations performed, the average time per iteration and the current value of the objective are shown in this display.
After the model has been fit, a summary of the optimization process is available as the `optsum` property of the `LinearMixedModel`.
```@example Main
fm2.optsum
DisplayAs.Text(ans) # hide
```
More detailed information about the intermediate steps of the nonlinear optimizer can be obtained the `fitlog` field.
By default, `fitlog` contains entries for only the initial and final steps, but additional information about every nth step can be obtained with the `thin` keyword-argument to `fit`, `fit!` and `refit!`:
```@example Main
refit!(fm2; thin=1)
fm2.optsum.fitlog[1:10]
DisplayAs.Text(ans) # hide
```
## A blocked Cholesky factor
A `LinearMixedModel` object contains two blocked matrices; a symmetric matrix `A` (only the lower triangle is stored) and a lower-triangular `L` which is the lower Cholesky factor of the updated and inflated `A`.
In versions 4.0.0 and later of `MixedModels` only the blocks in the lower triangle are stored in `A` and `L`, as a `Vector{AbstractMatrix{T}}`.
`BlockDescription` shows the structure of the blocks
```@example Main
BlockDescription(fm2)
DisplayAs.Text(ans) # hide
```
Another change in v4.0.0 and later is that the last row of blocks is constructed from `m.Xymat` which contains the full-rank model matrix `X` with the response `y` concatenated on the right.
The operation of installing a new value of the variance parameters, `θ`, and updating `L`
```@docs
setθ!
updateL!
```
is the central step in evaluating the objective (negative twice the log-likelihood).
Typically, the (1,1) block is the largest block in `A` and `L` and it has a special form, either `Diagonal` or
`UniformBlockDiagonal`
providing a compact representation and fast matrix multiplication or solutions of linear systems of equations.
### Modifying the optimization process
The `OptSummary` object contains both input and output fields for the optimizer.
To modify the optimization process the input fields can be changed after constructing the model but before fitting it.
Suppose, for example, that the user wishes to try a [Nelder-Mead](https://en.wikipedia.org/wiki/Nelder%E2%80%93Mead_method) optimization method instead of the default [`BOBYQA`](https://en.wikipedia.org/wiki/BOBYQA) (Bounded Optimization BY Quadratic Approximation) method.
```@example Main
fm2nm = LinearMixedModel(@formula(reaction ~ 1+days+(1+days|subj)), sleepstudy);
fm2nm.optsum.optimizer = :LN_NELDERMEAD;
fit!(fm2nm; thin=1)
fm2nm.optsum
DisplayAs.Text(ans) # hide
```
The parameter estimates are quite similar to those using `:LN_BOBYQA` but at the expense of 140 functions evaluations for `:LN_NELDERMEAD` versus 57 for `:LN_BOBYQA`.
When plotting the progress of the individual fits, it becomes obvious that `:LN_BOBYQA` has fully converged by the time `:LN_NELDERMEAD` begins to approach the optimum.
```@example Main
using Gadfly
nm = fm2nm.optsum.fitlog
bob = fm2.optsum.fitlog
convdf = DataFrame(algorithm=[repeat(["NelderMead"], length(nm));
repeat(["BOBYQA"], length(bob))],
objective=[last.(nm); last.(bob)],
step=[1:length(nm); 1:length(bob)])
plot(convdf, x=:step, y=:objective, color=:algorithm, Geom.line)
```
Run time can be constrained with `maxfeval` and `maxtime`.
See the documentation for the [`NLopt`](https://github.com/JuliaOpt/NLopt.jl) package for details about the various settings.
### Convergence to singular covariance matrices
To ensure identifiability of $\Sigma_\theta=\sigma^2\Lambda_\theta \Lambda_\theta$, the elements of $\theta$ corresponding to diagonal elements of $\Lambda_\theta$ are constrained to be non-negative.
For example, in a trivial case of a single, simple, scalar, random-effects term as in `fm1`, the one-dimensional $\theta$ vector is the ratio of the standard deviation of the random effects to the standard deviation of the response.
It happens that $-\theta$ produces the same log-likelihood but, by convention, we define the standard deviation to be the positive square root of the variance.
Requiring the diagonal elements of $\Lambda_\theta$ to be non-negative is a generalization of using this positive square root.
If the optimization converges on the boundary of the feasible region, that is if one or more of the diagonal elements of $\Lambda_\theta$ is zero at convergence, the covariance matrix $\Sigma_\theta$ will be *singular*.
This means that there will be linear combinations of random effects that are constant.
Usually convergence to a singular covariance matrix is a sign of an over-specified model.
Singularity can be checked with the `issingular` predicate function.
```@docs
issingular
```
```@example Main
issingular(fm2)
```
## Generalized Linear Mixed-Effects Models
In a [*generalized linear model*](https://en.wikipedia.org/wiki/Generalized_linear_model) the responses are modelled as coming from a particular distribution, such as `Bernoulli` for binary responses or `Poisson` for responses that represent counts.
The scalar distributions of individual responses differ only in their means, which are determined by a *linear predictor* expression $\eta=\bf X\beta$, where, as before, $\bf X$ is a model matrix derived from the values of covariates and $\beta$ is a vector of coefficients.
The unconstrained components of $\eta$ are mapped to the, possibly constrained, components of the mean response, $\mu$, via a scalar function, $g^{-1}$, applied to each component of $\eta$.
For historical reasons, the inverse of this function, taking components of $\mu$ to the corresponding component of $\eta$ is called the *link function* and the more frequently used map from $\eta$ to $\mu$ is the *inverse link*.
A *generalized linear mixed-effects model* (GLMM) is defined, for the purposes of this package, by
```math
\begin{aligned}
(\mathcal{Y} | \mathcal{B}=\bf{b}) &\sim\mathcal{D}(\bf{g^{-1}(X\beta + Z b)},\phi)\\\\
\mathcal{B}&\sim\mathcal{N}(\bf{0},\Sigma_\theta) .
\end{aligned}
```
where $\mathcal{D}$ indicates the distribution family parameterized by the mean and, when needed, a common scale parameter, $\phi$.
(There is no scale parameter for `Bernoulli` or for `Poisson`.
Specifying the mean completely determines the distribution.)
```@docs
Bernoulli
Poisson
```
A `GeneralizedLinearMixedModel` object is generated from a formula, data frame and distribution family.
```@example Main
verbagg = MixedModels.dataset(:verbagg)
const vaform = @formula(r2 ~ 1 + anger + gender + btype + situ + (1|subj) + (1|item));
mdl = GeneralizedLinearMixedModel(vaform, verbagg, Bernoulli());
typeof(mdl)
```
A separate call to `fit!` can be used to fit the model.
This involves optimizing an objective function, the Laplace approximation to the deviance, with respect to the parameters, which are $\beta$, the fixed-effects coefficients, and $\theta$, the covariance parameters.
The starting estimate for $\beta$ is determined by fitting a GLM to the fixed-effects part of the formula
```@example Main
mdl.β
```
and the starting estimate for $\theta$, which is a vector of the two standard deviations of the random effects, is chosen to be
```@example Main
mdl.θ
```
The Laplace approximation to the deviance requires determining the conditional modes of the random effects.
These are the values that maximize the conditional density of the random effects, given the model parameters and the data.
This is done using Penalized Iteratively Reweighted Least Squares (PIRLS).
In most cases PIRLS is fast and stable.
It is simply a penalized version of the IRLS algorithm used in fitting GLMs.
The distinction between the "fast" and "slow" algorithms in the `MixedModels` package (`nAGQ=0` or `nAGQ=1` in `lme4`) is whether the fixed-effects parameters, $\beta$, are optimized in PIRLS or in the nonlinear optimizer.
In a call to the `pirls!` function the first argument is a `GeneralizedLinearMixedModel`, which is modified during the function call.
(By convention, the names of such *mutating functions* end in `!` as a warning to the user that they can modify an argument, usually the first argument.)
The second and third arguments are optional logical values indicating if $\beta$ is to be varied and if verbose output is to be printed.
```@example Main
pirls!(mdl, true, false)
DisplayAs.Text(ans) # hide
```
```@example Main
deviance(mdl)
```
```@example Main
mdl.β
```
```@example Main
mdl.θ # current values of the standard deviations of the random effects
```
If the optimization with respect to $\beta$ is performed within PIRLS then the nonlinear optimization of the Laplace approximation to the deviance requires optimization with respect to $\theta$ only.
This is the "fast" algorithm.
Given a value of $\theta$, PIRLS is used to determine the conditional estimate of $\beta$ and the conditional mode of the random effects, **b**.
```@example Main
mdl.b # conditional modes of b
```
```@example Main
fit!(mdl, fast=true);
DisplayAs.Text(ans) # hide
```
The optimization process is summarized by
```@example Main
mdl.LMM.optsum
DisplayAs.Text(ans) # hide
```
As one would hope, given the name of the option, this fit is comparatively fast.
```@example Main
@btime fit(MixedModel, vaform, verbagg, Bernoulli(), fast=true)
DisplayAs.Text(ans) # hide
```
The alternative algorithm is to use PIRLS to find the conditional mode of the random effects, given $\beta$ and $\theta$ and then use the general nonlinear optimizer to fit with respect to both $\beta$ and $\theta$.
```@example Main
mdl1 = @btime fit(MixedModel, vaform, verbagg, Bernoulli())
DisplayAs.Text(ans) # hide
```
This fit provided slightly better results (Laplace approximation to the deviance of 8151.400 versus 8151.583) but took 6 times as long.
That is not terribly important when the times involved are a few seconds but can be important when the fit requires many hours or days of computing time.
| MixedModels | https://github.com/JuliaStats/MixedModels.jl.git |
|
[
"MIT"
] | 4.26.1 | e3fffd09185c6eb69f66b9ed29af0240b0dd0adc | docs | 6813 | # Prediction and simulation in Mixed-Effects Models
We recommend the [MixedModelsSim.jl](https://github.com/RePsychLing/MixedModelsSim.jl/) package and associated documentation for useful tools in constructing designs to simulate. For now, we'll use the sleep study data as a starting point.
```@example Main
using DataFrames
using MixedModels
using StatsBase
using DisplayAs # hide
# use a DataFrame to make it easier to change things later
slp = DataFrame(MixedModels.dataset(:sleepstudy))
slpm = fit(MixedModel, @formula(reaction ~ 1 + days + (1|subj)), slp)
DisplayAs.Text(slpm) # hide
```
## Prediction
The simplest form of prediction are the fitted values from the model: they are indeed the model's predictions for the observed data.
```@example Main
predict(slpm) ≈ fitted(slpm)
```
When generalizing to new data, we need to consider what happens if there are new, previously unobserved levels of the grouping variable(s).
MixedModels.jl provides three options:
1. `:error`: error on encountering unobserved levels
2. `:population`: use population values (i.e. only the fixed effects) for observations with unobserved levels
3. `:missing`: return `missing` for observations with unobserved levels.
Providing either no prediction (`:error`, `:missing`) or providing the population-level values seem to be the most reasonable ways for *predicting* new values.
For *simulating* new values based on previous estimates of the variance components, use `simulate`.
In the case where there are no new levels of the grouping variable, all three of these methods provide the same results:
```@example Main
predict(slpm, slp; new_re_levels=:population) ≈ fitted(slpm)
```
```@example Main
predict(slpm, slp; new_re_levels=:missing) ≈ fitted(slpm)
```
```@example Main
predict(slpm, slp; new_re_levels=:error) ≈ fitted(slpm)
```
In the case where there are new levels of the grouping variable, these methods differ.
```@example Main
# create a new level
slp2 = transform(slp, :subj => ByRow(x -> (x == "S308" ? "NEW" : x)) => :subj)
DisplayAs.Text(ans) # hide
```
```@example Main
try
predict(slpm, slp2; new_re_levels=:error)
catch e
show(e)
end
```
```@example Main
predict(slpm, slp2; new_re_levels=:missing)
```
```@example Main
predict(slpm, slp2; new_re_levels=:population)
```
!!! note
Currently, we do not support predicting based on a subset of the random effects.
!!! note
`predict` is deterministic (within the constraints of floating point) and never adds noise to the result.
If you want to construct prediction intervals, then `simulate` will generate new data with noise (including new values of the random effects).
For generalized linear mixed models, there is an additional keyword argument to `predict`: `type` specifies whether the predictions are returned on the scale of the linear predictor (`:linpred`) or on the level of the response `(:response)` (i.e. the level at which the values were originally observed).
```@example Main
cbpp = DataFrame(MixedModels.dataset(:cbpp))
cbpp.rate = cbpp.incid ./ cbpp.hsz
gm = fit(MixedModel, @formula(rate ~ 1 + period + (1|herd)), cbpp, Binomial(), wts=float(cbpp.hsz))
predict(gm, cbpp; type=:response) ≈ fitted(gm)
```
```@example Main
logit(x) = log(x / (1 - x))
predict(gm, cbpp; type=:linpred) ≈ logit.(fitted(gm))
```
## Simulation
In contrast to `predict`, `simulate` and `simulate!` introduce randomness.
This randomness occurs both at the level of the observation-level (residual) variance and at the level of the random effects, where new conditional modes are sampled based on the specified covariance parameter (θ; see [Details of the parameter estimation](@ref)), which defaults to the estimated value of the model.
For reproducibility, we specify a pseudorandom generator here; if none is provided, the global PRNG is taken as the default.
The simplest example of `simulate` takes a fitted model and generates a new response vector based on the existing model matrices combined with noise.
```@example Main
using Random
ynew = simulate(MersenneTwister(42), slpm)
```
The simulated response can also be placed in a pre-allocated vector:
```@example Main
ynew2 = zeros(nrow(slp))
simulate!(MersenneTwister(42), ynew2, slpm)
ynew2 ≈ ynew
```
Or even directly replace the previous response vector in a model, at which point the model must be refit to the new values:
```@example Main
slpm2 = deepcopy(slpm)
refit!(simulate!(MersenneTwister(42), slpm2))
DisplayAs.Text(ans) # hide
```
This inplace simulation actually forms the basis of [`parametricbootstrap`](@ref).
Finally, we can also simulate the response from entirely new data.
```@example Main
df = DataFrame(days = repeat(1:10, outer=20), subj=repeat(1:20, inner=10))
df[!, :subj] = string.("S", lpad.(df.subj, 2, "0"))
df[!, :reaction] .= 0
df
DisplayAs.Text(df) # hide
```
```@example Main
ysim = simulate(MersenneTwister(42), slpm, df)
```
Note that this is a convenience method for creating a new model and then using the parameters from the old model to call `simulate` on that model.
In other words, this method incurs the cost of constructing a new model and then discarding it.
If you could re-use that model (e.g., fitting that model as part of a simulation study), it often makes sense to do these steps to perform these steps explicitly and avoid the unnecessary construction and discarding of an intermediate model:
```@example Main
msim = LinearMixedModel(@formula(reaction ~ 1 + days + (1|subj)), df)
simulate!(MersenneTwister(42), msim; θ=slpm.θ, β=slpm.β, σ=slpm.σ)
response(msim) ≈ ysim
```
```@example Main
fit!(msim)
DisplayAs.Text(ans) # hide
```
For simulating from generalized linear mixed models, there is no `type` option because the observation-level always occurs at the level of the response and not of the linear predictor.
!!! warning
Simulating the model response in place may not yield the same result as simulating into a pre-allocated or new vector, depending on choice of pseudorandom number generator.
Random number generation in Julia allows optimization based on type, and the internal storage type of the model response (currently a view into a matrix storing the concatenated fixed-effects model matrix and the response) may not match the type of a pre-allocated or new vector.
See also [discussion here](https://discourse.julialang.org/t/weird-prng-behavior/63186).
!!! note
All the methods that take new data as a table construct an additional `MixedModel` behind the scenes, even when the new data is exactly the same as the data that the model was fitted to.
For the simulation methods in particular, these thus form a convenience wrapper for constructing a new model and calling `simulate` without new data on that model with the parameters from the original model.
| MixedModels | https://github.com/JuliaStats/MixedModels.jl.git |
|
[
"MIT"
] | 4.26.1 | e3fffd09185c6eb69f66b9ed29af0240b0dd0adc | docs | 8609 | # Rank deficiency in mixed-effects models
```@setup Main
using MixedModels
using DisplayAs
```
The *(column) rank* of a matrix refers to the number of linearly independent columns in the matrix.
Clearly, the rank can never be more than the number of columns; however, the rank can be less than the number of columns.
In a regression context, this corresponds to a (linear) dependency in the predictors.
The simplest case of rank deficiency is a duplicated predictor or a predictor that is exactly a multiple of another predictor.
However, rank deficiency can also arise in more subtle ways, such as from missing cells in a two-factor experimental design.
Rank deficiency can also arise as an extreme case of multicollinearity.
In all cases, it is important to remember that we can only assess the numerical rank of a matrix, which may be less than its theoretical rank, and that evaluation of this numerical rank requires setting some numerical tolerance levels.
These choices are not always well defined.
In other words, the rank of a matrix is well-defined in theory but in practice can be difficult to evaluate.
Rank deficiency can occur in two ways in mixed-effects models: in the fixed effects and in the random effects.
The implications of rank deficiency and thus the handling of it differ between these.
## Fixed effects
The consequences of rank deficiency in the fixed effects are similar to those in classical ordinary least squares (OLS) regression.
If one or more predictors can be expressed as a linear combination of the other columns, then this column is redundant and the model matrix is rank deficient.
Note however, that the redundant column is not defined uniquely.
For example, in the case that of two columns `a` and `b` where `b = 2a`, then the rank deficiency can be handled by eliminating either `a` or `b`.
While we defined `b` here in terms of `a`, it may be that `b` is actually the more 'fundamental' predictor and hence we may define `a` in terms of `b` as `a = 0.5b`.
The user may of course possess this information, but the choice is not apparent to the modelling software.
As such, the handling of rank deficiency in `MixedModels.jl` should not be taken as a replacement for thinking about the nature of the predictors in a given model.
There is a widely accepted convention for how to make the coefficient estimates for these redundant columns well-defined: we set their value to zero and their standard errors to `NaN` (and thus also their $z$ and $p$-values).
The values that have been defined to be zero, as opposed to evaluating to zero, are displayed as `-0.0` as an additional visual aid to distinguish them from the other coefficients.
In practice the determination of rank and the redundant coefficients is done via a 'pivoting' scheme during a decomposition to
move the surplus columns to the right side of the model matrix.
In subsequent calculations, these columns are effectively ignored (as their estimates are zero and thus won't contribute to any other computations).
For display purposes, this pivoting is unwound when the `coef` values are displayed.
Both the pivoted and unpivoted coefficients are available in MixedModels.
The [`fixef`](@ref) extractor returns the pivoted, truncated estimates (i.e. the non redundant terms), while the [`coef`](@ref) extractor returns the unpivoted estimates (i.e. all terms, included the redundant ones).
The same holds for the associated [`fixefnames`](@ref) and [`coefnames`](@ref).
### Pivoting is platform dependent
In MixedModels.jl, we use standard numerical techniques to detect rank deficiency.
We currently offer no guarantees as to which exactly of the standard techniques (pivoted QR decomposition, pivoted Cholesky decomposition, etc.) will be used.
This choice should be viewed as an implementation detail.
Similarly, we offer no guarantees as to which of columns will be treated as redundant.
This choice may vary between releases and even between platforms (both in broad strokes of "Linux" vs. "Windows" and at the level of which BLAS options are loaded on a given processor architecture) for the same release.
In other words, *you should not rely on the order of the pivoted columns being consistent!* when you switch to a different computer or a different operating system.
If consistency in the pivoted columns is important to you, then you should instead determine your rank ahead of time and remove extraneous columns / predictors from your model specification.
This lack of consistency guarantees arises from a more fundamental issue: numeric linear algebra is challenging and sensitive to the underlying floating point operations.
Due to rounding error, floating point arithmetic is not associative:
```@example
0.1 + 0.1 + 0.1 - 0.3 == 0.1 + 0.1 + (0.1 - 0.3)
```
This means that "nearly" / numerically rank deficient matrices may or may not be detected as rank deficient, depending on details of the platform.
Determining the rank of a matrix is the type of problem that is well-defined in theory but not in practice.
Currently, a coarse heuristic is applied to reduce the chance that the intercept column will be pivoted, but even this behavior is not guaranteed.
### Undetected Rank Deficiency
Undetected rank deficiency in the fixed effects will lead to numerical issues, such as nonsensical estimates.
A `PosDefException` may indicate rank deficiency because the covariance matrix will only be positive semidefinite and not positive definite (see [Details of the parameter estimation](@ref)).
In other words, checking that the fixed effects are full rank is a great first step in debugging a `PosDefException`.
Note that `PosDefException` is not specific to rank deficiency and may arise in other ill-conditioned models.
In any case, examining the model specification and the data to verify that they work together is the first step.
For generalized linear mixed-effects models, it may also be worthwhile to try out `fast=true` instead of the default `fast=false`.
See this [GitHub issue](https://github.com/JuliaStats/MixedModels.jl/issues/349) and linked Discourse discussion for more information.
## Random effects
Rank deficiency presents less of a problem in the random effects than in the fixed effects because the "estimates" (more formally, the conditional modes of the random effects given the observed data) are determined as the solution to a penalized least squares problem.
The *shrinkage* effect which moves the conditional modes (group-level predictions) towards the grand mean is a form of *regularization*, which provides well-defined "estimates" for overparameterized models.
(For more reading on this general idea, see also this [blog post](https://jakevdp.github.io/blog/2015/07/06/model-complexity-myth/) on the model complexity myth.)
The nature of the penalty in the penalized least squares solution is such that the "estimates" are well-defined even when the covariance matrix of the random effects converges to a "singular" or "boundary" value.
In other words, singularity of the covariance matrix for the random effects, which means that there are one or more directions in which there is no variability in the random effects, is different from singularity of the model matrix for the random effects, which would affect the ability to define uniquely these coefficients.
The penalty term always provides a unique solution for the random-effects coefficients.
In addition to handling naturally occurring rank deficiency in the random effects, the regularization allows us to fit explicitly overparameterized random effects.
For example, we can use `fulldummy` to fit both an intercept term and $n$ indicator variables in the random effects for a categorical variable with $n$ levels instead of the usual $n-1$ contrasts.
```@example Main
kb07 = MixedModels.dataset(:kb07)
contrasts = Dict(var => HelmertCoding() for var in (:spkr, :prec, :load))
fit(MixedModel, @formula(rt_raw ~ spkr * prec * load + (1|subj) + (1+prec|item)), kb07; contrasts=contrasts)
DisplayAs.Text(ans) # hide
```
```@example Main
fit(MixedModel, @formula(rt_raw ~ spkr * prec * load + (1|subj) + (1+fulldummy(prec)|item)), kb07; contrasts=contrasts)
DisplayAs.Text(ans) # hide
```
This may be useful when the `PCA` property suggests a random effects structure larger than only main effects but smaller than all interaction terms.
This is also similar to the functionality provided by `dummy` in `lme4`, but as in the difference between `zerocorr` in Julia and `||` in R, there are subtle differences in how this expansion interacts with other terms in the random effects.
| MixedModels | https://github.com/JuliaStats/MixedModels.jl.git |
|
[
"MIT"
] | 0.1.0 | 695a5cabf9db19911e31b60ab98786423c3cac48 | code | 11098 | module UnitfulSystems
# This file is part of UnitfulSystems.jl. It is licensed under the MIT license
# UnitfulSystems Copyright (C) 2020 Michael Reed
import Base: @pure
using UnitSystems, Unitful
import UnitSystems: Systems, Constants, Physics, Convert
export UnitSystems, Unitful, unitful
const ftlb = u"slug*ft^2/s^2"
UnitSystems.unit(x::Quantity,y=1) = x
for unit ∈ (Systems...,Constants...,Physics...,Convert...)
unit ∉ (:length,:time) && @eval export $unit
end
for unit ∈ (Constants...,Physics...)
if unit ∈ (:molarmass,:permeability,:permittivity,:charge,:magneticflux,:impedance,:conductance,:luminousefficacy)
@eval @pure $unit(U::UnitSystem) = UnitSystems.$unit(U)
@eval @doc $(string(@eval @doc UnitSystems.$unit)) $unit
else
@eval import UnitSystems.$unit
end
end
for unit ∈ Convert
@eval begin
@pure @inline $unit(v::Number,U::UnitSystem) = $unit(v,U,Metric)
@pure @inline $unit(v::Number,U::UnitSystem,S::UnitSystem) = ustrip((u=ustrip($unit(U,S));isone(u) ? v : v/u))*unit($unit(Natural,U))
@pure @inline $unit(v::Number,U::UnitSystem{kB,ħ,𝘤,μ₀,mₑ},S::UnitSystem{kB,ħ,𝘤,μ₀,mₑ}) where {kB,ħ,𝘤,μ₀,mₑ} = ustrip(v)*unit($unit(UnitSystems.Natural,U))
@pure @inline $unit(U::UnitSystem,S::UnitSystem) = UnitSystems.$unit(U,S)
end
if unit ∉ (Constants...,:permittivity,:charge,:magneticflux,:impedance,:conductance)
@eval @pure @inline $unit(U::UnitSystem) = $unit(U,Metric)
@eval @doc $(string(@eval @doc UnitSystems.$unit)) $unit
end
end
for unit ∈ (:(Base.length),:(Base.time))
@eval begin
@pure @inline $unit(v::Quantity,U::UnitSystem) = $unit(v,U,Metric)
@pure @inline $unit(v::Quantity,U::UnitSystem,S::UnitSystem) = ustrip((u=ustrip($unit(U,S));isone(u) ? v : v/u))*unit($unit(Natural,U))
@pure @inline $unit(v::Quantity,U::UnitSystem{kB,ħ,𝘤,μ₀,mₑ},S::UnitSystem{kB,ħ,𝘤,μ₀,mₑ}) where {kB,ħ,𝘤,μ₀,mₑ} = ustrip(v)*unit($unit(Natural,U))
end
end
"""
unitful(::UnitSystem,JK=u"J/K",Js=u"J*s",ms=u"m/s",Hm=u"H/m",kg=u"kg")
Convert a `UnitSystem` to use `Unitful` values instead of plain numerical values.
"""
unitful(U::UnitSystem,JK=u"J/K",Js=u"J*s",ms=u"m/s",Hm=u"H/m",kg=u"kg") = U(JK,Js,ms,Hm,kg)
const EMU2019 = unitful(UnitSystems.EMU2019,u"erg/K",u"erg*s",u"cm/s",u"nH/cm",u"g")
const MTS = unitful(UnitSystems.MTS,u"kJ/K",u"kJ*s",u"m/s",u"kH/m",u"Mg")
const Metric = unitful(UnitSystems.Metric)
const SI2019 = unitful(UnitSystems.SI2019)
const CODATA = unitful(UnitSystems.CODATA)
const Conventional = unitful(UnitSystems.Conventional)
const English = unitful(UnitSystems.English,ftlb/u"Ra",ftlb*u"s",u"ft/s",1,u"slug")
const EnglishUS = unitful(UnitSystems.EnglishUS,ftlb/u"Ra",ftlb*u"s",u"ft/s",1,u"slug")
const SI = SI2019
@pure molarmass(U::UnitSystem{boltzmann(MTS)}) = molarmass(UnitSystems.MTS)*u"Mg/mol"
@pure molarmass(U::UnitSystem{boltzmann(EMU2019)}) = molarmass(UnitSystems.EMU2019)*u"g/mol"
@pure molarmass(U::UnitSystem{boltzmann(Metric)}) = molarmass(UnitSystems.Metric)*u"kg/mol"
@pure molarmass(U::UnitSystem{boltzmann(SI2019)}) = molarmass(UnitSystems.SI2019)*u"kg/mol"
@pure molarmass(U::UnitSystem{boltzmann(CODATA)}) = molarmass(UnitSystems.CODATA)*u"kg/mol"
@pure molarmass(U::UnitSystem{boltzmann(Conventional)}) = molarmass(UnitSystems.Conventional)*u"kg/mol"
@pure molarmass(U::UnitSystem{boltzmann(English)}) = molarmass(UnitSystems.English)*u"slug/mol"
@pure molarmass(U::UnitSystem{boltzmann(EnglishUS)}) = molarmass(UnitSystems.EnglishUS)*u"slug/mol"
for us ∈ (:EMU2019,:MTS,:Metric,:SI2019,:CODATA,:Conventional,:English,:EnglishUS)
@eval @pure hyperfine(U::UnitSystem{boltzmann($us)}) = hyperfine(UnitSystems.$us)*u"Hz"
end
for us ∈ (:Metric,:SI2019,:CODATA,:Conventional)
@eval @pure luminousefficacy(U::UnitSystem{boltzmann($us)}) = luminousefficacy(UnitSystems.$us)*u"cd/W"
end
@pure luminousefficacy(U::UnitSystem{boltzmann(EMU2019)}) = luminousefficacy(UnitSystems.EMU2019)*u"cd*s/erg"
@pure luminousefficacy(U::UnitSystem{boltzmann(MTS)}) = luminousefficacy(UnitSystems.MTS)*u"cd/GW"
@pure luminousefficacy(U::UnitSystem{boltzmann(English)}) = luminousefficacy(UnitSystems.English)*u"cd*s^3/slug/ft^2"
@pure luminousefficacy(U::UnitSystem{boltzmann(EnglishUS)}) = luminousefficacy(UnitSystems.EnglishUS)*u"cd*s^3/slug/ft^2"
for CAL ∈ (:cal,:calₜₕ,:cal₄,:cal₁₀,:cal₂₀,:calₘ,:calᵢₜ)
KCAL = Symbol(:k,CAL)
@eval const $CAL = UnitSystems.$CAL*u"cal"
@eval const $KCAL = UnitSystems.$KCAL*u"kcal"
end
const atm = UnitSystems.atm*u"kPa"
const g₀ = UnitSystems.g₀*u"m/s^2"
const lbm = UnitSystems.lbm*u"ft/s^2"
const slug = UnitSystems.slug*u"kg/slug"
const ft = UnitSystems.ft*u"m/ft"
const ftUS = UnitSystems.ftUS*u"m/ft"
const rankine = UnitSystems.rankine*u"K/Ra"
const kelvin = UnitSystems.kelvin*u"Ra/K"
const ΔνCs = UnitSystems.ΔνCs*u"Hz"
const Kcd = UnitSystems.Kcd*u"cd/W"
const mP = UnitSystems.mP*u"kg"
const NA = UnitSystems.NA*u"mol^-1"
const kB = UnitSystems.kB*u"J/K"
const 𝘩 = UnitSystems.𝘩*u"J*s"
const 𝘤 = UnitSystems.𝘤*u"m/s"
const 𝘦 = UnitSystems.𝘦*u"C"
const R∞ = UnitSystems.R∞*u"m^-1"
const μ₀ = UnitSystems.μ₀*u"H/m"
const ħ = UnitSystems.ħ*u"J*s" # u"J*s/rad" ?
const δμ₀ = UnitSystems.δμ₀*u"H/m"
const Rᵤ = UnitSystems.Rᵤ*u"J/K/mol"
const mₑ = UnitSystems.mₑ*u"kg"
const RK1990 = UnitSystems.RK1990*u"Ω"
const RK2014 = UnitSystems.RK2014*u"Ω"
const KJ1990 = UnitSystems.KJ1990*u"Hz/V"
const KJ2014 = UnitSystems.KJ2014*u"Hz/V"
const ħ1990 = UnitSystems.ħ1990*u"J*s"
const ħ2014 = UnitSystems.ħ2014*u"J*s"
const mₑ1990 = UnitSystems.mₑ1990*u"kg"
const mₑ2014 = UnitSystems.mₑ2014*u"kg"
const GG = UnitSystems.GG*u"m^3/kg/s^2"
const κ = UnitSystems.κ*u"s^2/m/kg"
const σ = UnitSystems.σ*u"W/m^2/K^4" #
const μB = UnitSystems.μB*u"J/T" #
const ε₀ = UnitSystems.ε₀*u"F/m" #
const kₑ = UnitSystems.kₑ*u"N*m^2/C^2" #
const mₚ = UnitSystems.mₚ*u"kg"
const mᵤ = UnitSystems.mᵤ*u"kg"
const Mᵤ = UnitSystems.Mᵤ*u"kg/mol" #
const 𝔉 = UnitSystems.𝔉*u"C/mol" #
const Φ₀ = UnitSystems.Φ₀*u"Wb" #
const Z₀ = UnitSystems.Z₀*u"Ω" #
const G₀ = UnitSystems.G₀*u"S" #
const Eₕ = UnitSystems.Eₕ*u"J"
const a₀ = UnitSystems.a₀*u"m"
const rₑ = UnitSystems.rₑ*u"m"
const RK = UnitSystems.RK*u"Ω" #
const KJ = UnitSystems.KJ*u"Hz/V" #
const RH = UnitSystems.RH*u"1/m"
const Ry = UnitSystems.Ry*u"J"
const ℓP = UnitSystems.ℓP*u"m"
const tP = UnitSystems.tP*u"s"
const TP = UnitSystems.TP*u"K"
const lS = UnitSystems.lS*u"m"
const tS = UnitSystems.tS*u"s"
const mS = UnitSystems.mS*u"kg"
const qS = UnitSystems.qS*u"C"
const lA = UnitSystems.lA*u"m"
const tA = UnitSystems.tA*u"s"
const mA = UnitSystems.mA*u"kg"
const qA = UnitSystems.qA*u"C"
const lQCD = UnitSystems.lQCD*u"m"
const tQCD = UnitSystems.tQCD*u"s"
const mQCD = UnitSystems.mQCD*u"kg"
const Mu,Ru,SB,hh,cc,m0,e0,ke,me,mp,mu,ee,FF,Z0,G0,Eh,a0,re,g0,lP,ϵ₀,mB = Mᵤ,Rᵤ,σ,𝘩,𝘤,μ₀,ε₀,kₑ,mₑ,mₚ,mᵤ,𝘦,𝔉,Z₀,G₀,Eₕ,a₀,rₑ,g₀,ℓP,ε₀,μB
export κ, GG, NA, kB, Rᵤ, σ, 𝘩, ħ, 𝘤, μ₀, ε₀, kₑ, mₑ, mₚ, mᵤ, 𝘦, 𝔉, Φ₀, Z₀, G₀, Eₕ, R∞, a₀, rₑ, KJ, RK, Ru, SB, hh, cc, m0, e0, ke, me, mp, mu, ee, FF, Z0, G0, Eh, a0, re, μB
export αG, αinv, μₚₑ, μₑᵤ, μₚᵤ, mpe, meu, mpu, mP, δμ₀, Mᵤ, Mu, RH, Ry, ΔνCs, Kcd, ainv
export cal, kcal, calₜₕ, kcalₜₕ, calᵢₜ, kcalᵢₜ, ℓP, g₀, g0, atm, lbm, aG, BTUJ, BTUftlb
export lP, tP, TP, lS, tS, mS, qS, lA, tA, mA, qA, lQCD, tQCD, mQCD, ϵ₀, aL, αL
export slug, ft, KJ1990, KJ2014, RK1990, RK2014, mₑ1990, mₑ2014, temp, units
export slugs, kilograms, lbm, meters, feet, rankine, kelvin, moles, molecules
export UnitSystem, US, SI, CGS, CGS2019, CGSm, CGSe, HLU, FFF
@doc """
EMU2019::UnitSystem{1e7*kB,1e7*ħ,100𝘤,1e7*μ₀,1000mₑ}
Centimetre-gram-second `UnitSystem` variant of tuned `SI2019` based on EMU (rationalized).
```Julia
julia> boltzmann(EMU2019)
$(boltzmann(EMU2019))
julia> planckreduced(EMU2019)
$(planckreduced(EMU2019))
julia> lightspeed(EMU2019)
$(lightspeed(EMU2019))
julia> permeability(EMU2019)
$(permeability(EMU2019))
julia> electronmass(EMU2019)
$(electronmass(EMU2019))
```
""" EMU2019
@doc """
MTS::UnitSystem{1e6*Rᵤ*mₑ/μₑᵤ,1000ħ,𝘤,4π/1e4,mₑ/1000}
Metre-tonne-second `UnitSystem` variant of `Metric` system.
```Julia
julia> boltzmann(MTS)
$(boltzmann(MTS))
julia> planckreduced(MTS)
$(planckreduced(MTS))
julia> lightspeed(MTS)
$(lightspeed(MTS))
julia> permeability(MTS)
$(permeability(MTS))
julia> electronmass(MTS)
$(electronmass(MTS))
```
""" MTS
@doc """
Metric::UnitSystem{Rᵤ*mₑ/μₑᵤ/0.001,ħ,𝘤,4π*1e-7,mₑ}
Systeme International d'Unites (the SI units) adopted as the preferred `UnitSystem`.
```Julia
julia> boltzmann(Metric)
$(boltzmann(Metric))
julia> planckreduced(Metric)
$(planckreduced(Metric))
julia> lightspeed(Metric)
$(lightspeed(Metric))
julia> permeability(Metric)
$(permeability(Metric))
julia> electronmass(Metric)
$(electronmass(Metric))
```
""" Metric
@doc """
SI2019::UnitSystem{kB,ħ,𝘤,μ₀,mₑ}
Systeme International d'Unites (the SI units) with `μ₀` for a tuned `charge` exactly.
```Julia
julia> boltzmann(SI2019)
$(boltzmann(SI2019))
julia> planckreduced(SI2019)
$(planckreduced(SI2019))
julia> lightspeed(SI2019)
$(lightspeed(SI2019))
julia> permeability(SI2019)
$(permeability(SI2019))
julia> electronmass(SI2019)
$(electronmass(SI2019))
```
""" SI2019, SI
@doc """
CODATA::UnitSystem{Rᵤ*mₑ2014/μₑᵤ/0.001,2/RK2014/KJ2014^2/π,𝘤,2RK2014/𝘤/αinv,mₑ2014}
Metric `UnitSystem` based on Committee on Data of the International Science Council.
```Julia
julia> boltzmann(CODATA)
$(boltzmann(CODATA))
julia> planckreduced(CODATA)
$(planckreduced(CODATA))
julia> lightspeed(CODATA)
$(lightspeed(CODATA))
julia> permeability(CODATA)
$(permeability(CODATA))
julia> electronmass(CODATA)
$(electronmass(CODATA))
```
""" CODATA
@doc """
Conventional::UnitSystem{Rᵤ*mₑ1990/μₑᵤ/0.001,2/RK1990/KJ1990^2/π,𝘤,2RK1990/𝘤/αinv,mₑ1990}
Conventional electronic `UnitSystem` with 1990 tuned `josephson` and `klitzing` constants.
```Julia
julia> boltzmann(Conventional)
$(boltzmann(Conventional))
julia> planckreduced(Conventional)
$(planckreduced(Conventional))
julia> lightspeed(Conventional)
$(lightspeed(Conventional))
julia> permeability(Conventional)
$(permeability(Conventional))
julia> electronmass(Conventional)
$(electronmass(Conventional))
```
""" Conventional
@doc """
English::UnitSystem{kB*rankine/slug/ft^2,ħ/slug/ft^2,𝘤/ft,4π,mₑ/slug}
Engineering `UnitSystem` historically used by Britain and United States.
```Julia
julia> boltzmann(English)
$(boltzmann(English))
julia> planckreduced(English)
$(planckreduced(English))
julia> lightspeed(English)
$(lightspeed(English))
julia> electronmass(English)
$(electronmass(English))
```
""" English
@doc """
EnglishUS::UnitSystem{1000Rᵤ*mₑ/μₑᵤ*rankine/slug/ftUS^2,ħ/slug/ftUS^2,𝘤/ftUS,4π,mₑ/slug}
Engineering `UnitSystem` based on the geophysical US survey foot (1200/3937).
```Julia
julia> boltzmann(EnglishUS)
$(boltzmann(EnglishUS))
julia> planckreduced(EnglishUS)
$(planckreduced(EnglishUS))
julia> lightspeed(EnglishUS)
$(lightspeed(EnglishUS))
julia> electronmass(EnglishUS)
$(electronmass(EnglishUS))
```
""" EnglishUS
end # module
| UnitfulSystems | https://github.com/chakravala/UnitfulSystems.jl.git |
|
[
"MIT"
] | 0.1.0 | 695a5cabf9db19911e31b60ab98786423c3cac48 | code | 134 | using UnitfulSystems, Test
@test molarmass(SI2019) == molarmass(EMU2019)
@test luminousefficacy(SI2019) == luminousefficacy(EMU2019)
| UnitfulSystems | https://github.com/chakravala/UnitfulSystems.jl.git |
|
[
"MIT"
] | 0.1.0 | 695a5cabf9db19911e31b60ab98786423c3cac48 | docs | 6082 | # UnitfulSystems.jl
*Unitful.jl compatibility layer for UnitSystems.jl*
Specifications for dimensional units are in the [UnitSystems.jl](https://github.com/chakravala/UnitSystems.jl) and [UnitfulSystems.jl](https://github.com/chakravala/UnitfulSystems.jl) repositories.
The two packages are designed so that they can be interchanged if compatibility with [Unitful.jl](https://github.com/PainterQubits/Unitful.jl) is desired or not.
However, the `UnitfulSystems` package has fewer `UnitSystem` specifications available than the `UnitSystems` package due to limitations in combination with the `Unitful` package.
Specifically, `Metric`, `SI2019`, `CODATA`, `Conventional`, `MTS`, `EMU2019`, `English`, and `EnglishUS` can have `Unitful` values; while `Gauss`, `LorentzHeaviside`, `Thomson`, `EMU`, `ESU`, `ESU2019`, `IAU`, `FFF`, `Planck`, `PlanckGauss`, `Stoney`, `Hartree`, `Rydberg`, `Schrodinger`, `Electronic`, `Natural`, `NaturalGauss`, `QCD`, `QCDGauss`, and `QCDoriginal` are plain valued.
```Julia
pkg> add UnitfulSystems # or UnitSystems
julia> using UnitfulSystems
```
A `UnitSystem` is a consistent set of dimensional values selected to accomodate a particular use case or standardization.
It is possible to convert derived physical quantities from any `UnitSystem` specification into any other using accurate values.
In total, five fundamental constants `kB,ħ,𝘤,μ₀,mₑ` are used to specify a specific unit system.
These are the constants of `boltzmann`, `planckreduced`, `lightspeed`, `permeability`, and `electronmass`.
Different choices of natural units or physical measurements result in a variety of unit systems for many purposes.
Main documentation is at https://geophysics.crucialflow.com/dev/units
Another important additional definition is the `molarmass` constant `Mᵤ`, which is automatically selected based on the choice of `boltzmann` constant (but can also be customized if necessary).
Historically, older electromagnetic unit systems also relied on a `rationalization` constant `λ` and a `lorentz` force proportionality constant `αL`.
In most unit systems these extra constants have a value of `1` unless otherwise specified.
```Julia
UnitSystem{kB,ħ,𝘤,μ₀,mₑ,λ,αL}
```
Fundamental constants of physics are: `kB` Boltzmann's constant, `ħ` reduced Planck's constant, `𝘤` speed of light, `μ₀` vacuum permeability, `mₑ` electron rest mass, `λ` Gauss rationalization, and `αL` Lorentz's constant.
Primarily the `Metric` SI unit system is used in addition to the historic `English` engineering unit system.
These constants induce derived values for `avogadro`, `boltzmann`, `universal`, `planck`, `planckreduced`, `lightspeed`, `planckmass`, `atomicmass`, `protonmass`, `electronmass`, `newton`, `einstein`, `permeability`, `permittivity`, `coulomb`, and
additional constants `molarmass`, `hyperfine`, `luminousefficacy`, `stefan`, `radiationintensity`, `ampere`, `lorentz`, `biotsavart`, `rationalization`, `impedance`, `charge`, `magneton`, `conductance`, `faraday`, `magneticflux`, `josephson`, `klitzing`, `hartree`, `rydberg`, `bohr`, and `bohrreduced`.
Physics constant documentation is at https://geophysics.crucialflow.com/dev/constants
Additional reference `UnitSystem` variants: `EMU`, `ESU`, `Gauss`, `LorentzHeaviside`, `MTS`, `SI2019`, `CODATA`, `Conventional`, `IAU`, `EnglishUS`; and natural atomic units based on gravitational coupling `αG` and the fine structure `1/αinv` constant (`Planck`, `PlanckGauss`, `Stoney`, `Hartree`, `Rydberg`, `Schrodinger`, `Electronic`, `Natural`, `NaturalGauss`, `QCD`, `QCDGauss`, and `QCDoriginal`).
Unt conversion documentation is at https://geophysics.crucialflow.com/dev/convert
**Derived Unit conversions:**
Mechanics: `time`, `length`, `area`, `volume`, `wavenumber`, `fuelefficiency`, `frequency`, `frequencydrift`, `speed`, `acceleration`, `jerk`, `snap`, `volumeflow`,
`mass`, `massflow`, `lineardensity`, `areadensity`, `density`, `specificvolume`, `force`, `stiffness`, `pressure`, `compressibility`, `viscosity`, `diffusivity`, `rotationalinertia`, `momentum`, `angularmomentum`, `yank`, `energy`, `specificenergy`, `action`, `fluence`, `power`, `powerdensity`, `intensity`, `spectralflux`, `soundexposure`, `impedance`, `specificimpedance`, `admittance`, `compliance`, `inertance`;
Electromagnetics: `charge`, `chargedensity`, `linearchargedensity`, `exposure`, `mobility`, `current`, `currentdensity`, `resistance`, `conductance`, `resistivity`, `conductivity`, `capacitance`, `inductance`, `reluctance`, `permeance`, `permittivity`, `permeability`, `susceptibility`, `specificsusceptibility`, `demagnetizingfactor`, `vectorpotential`, `electricpotential`, `magneticpotential`, `electricfield`, `magneticfield`, `electricflux`, `magneticflux`, `electricfluxdensity`, `magneticfluxdensity`, `electricdipolemoment`, `magneticdipolemoment`, `electricpolarizability`, `magneticpolarizability`, `magneticmoment`, `magnetizability`, `magnetization`, `specificmagnetization`, `rigidity`, `polestrength`;
Thermodynamics: `temperature`, `entropy`, `specificentropy`, `volumeheatcapacity`, `thermalconductivity`, `thermalconductance`, `thermalresistance`, `thermalexpansion`, `lapserate`,
`molarmass`, `molality`, `mole`, `molarity`, `molarvolume`, `molarentropy`, `molarenergy`, `molarconductivity`, `molarsusceptibility`, `catalysis`, `specificity`,
`luminousflux`, `luminance`, `luminousenergy`, `luminousexposure`, `luminousefficacy`.
Other similar packages include [UnitSystems.jl](https://github.com/chakravala/UnitSystems.jl), [PhysicalConstants.jl](https://github.com/JuliaPhysics/PhysicalConstants.jl), [MathPhysicalConstants.jl](https://github.com/LaGuer/MathPhysicalConstants.jl), [Unitful.jl](https://github.com/PainterQubits/Unitful.jl.git), [UnitfulUS.jl](https://github.com/PainterQubits/UnitfulUS.jl), [UnitfulAstro.jl](https://github.com/JuliaAstro/UnitfulAstro.jl), [UnitfulAtomic.jl](https://github.com/sostock/UnitfulAtomic.jl), [NaturallyUnitful.jl](https://github.com/MasonProtter/NaturallyUnitful.jl), and [UnitfulMoles.jl](https://github.com/rafaqz/UnitfulMoles.jl).
| UnitfulSystems | https://github.com/chakravala/UnitfulSystems.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 2109 | using InvariantMeasures
using ValidatedNumerics
using Plots
using LaTeXStrings
using StatsPlots
include("warmup.jl")
function runExperiment()
time_assembling = @elapsed begin
D = PwMap(
[x -> 17x/5,
x -> 34(x-5//17)^2/25 + 3(x-5//17),
x -> 34(x-10//17)^2/25 + 3(x-10//17),
x -> 17(x-15//17)/5
],
[0, @interval(5/17), @interval(10/17), @interval(15/17), 1]
)
B = Ulam(1024)
Q = DiscretizedOperator(B, D)
end
time_norms = @elapsed norms = powernormbounds(B, D; Q=Q)
time_eigen = @elapsed w = invariant_vector(B, Q)
time_error = @elapsed error = distance_from_invariant(B, D, Q, w, norms)
time_assembling_fine = @elapsed begin
B_fine = Ulam(2^16)
Q_fine = DiscretizedOperator(B_fine, D)
end
time_norms_fine = @elapsed norms_fine = finepowernormbounds(B, B_fine, D, norms; Q_fine=Q_fine)
time_eigen_fine = @elapsed w_fine = invariant_vector(B_fine, Q_fine)
time_error_fine = @elapsed error_fine = distance_from_invariant(B_fine, D, Q_fine, w_fine, norms_fine)
A, BB = dfly(strong_norm(B), aux_norm(B), D)
p1 = plot(D, title="Dynamic (dfly coeffs $(round(A, sigdigits=2)),$(round(BB, sigdigits=2)))", label=L"T(x)", legend=:bottomright)
p2 = plot(B, w, title="Invariant measure (n=$(length(B)))")
p2 = plot!(p2, B, error, w, label="L1 error $(round(error, sigdigits=2))")
p3 = plot(B_fine, w_fine, title="Invariant measure (n=$(length(B_fine)))")
p3 = plot!(p3, B_fine, error_fine, w_fine, label="L1 error $(round(error_fine, sigdigits=2))")
p4 = groupedbar(
vcat(
[time_error time_eigen time_norms time_assembling 0],
[time_error_fine time_eigen_fine time_norms_fine time_assembling_fine time_assembling+time_norms]
),
bar_position = :stack,
legend = :topleft,
label = ["err" "eigen" "norm" "matrix" "coarse"],
title = "CPU time breakdown (s)",
xticks = (1:2, ["1-grid estimate", "2-grid estimate"])
)
plot(p1, p2, p3, p4, layout=4)
end
runExperiment()
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 1900 | using InvariantMeasures
using ValidatedNumerics
using Plots
using LaTeXStrings
using StatsPlots
include("warmup.jl")
function runExperiment()
time_assembling = @elapsed begin
D = mod1_dynamic(x -> 17//5 * x)
B = Ulam(1024)
Q = DiscretizedOperator(B, D)
end
time_norms = @elapsed norms = powernormbounds(B, D; Q=Q)
time_eigen = @elapsed w = invariant_vector(B, Q)
time_error = @elapsed error = distance_from_invariant(B, D, Q, w, norms)
time_assembling_fine = @elapsed begin
B_fine = Ulam(2^16)
Q_fine = DiscretizedOperator(B_fine, D)
end
time_norms_fine = @elapsed norms_fine = finepowernormbounds(B, B_fine, D, norms; Q_fine=Q_fine)
time_eigen_fine = @elapsed w_fine = invariant_vector(B_fine, Q_fine)
time_error_fine = @elapsed error_fine = distance_from_invariant(B_fine, D, Q_fine, w_fine, norms_fine)
A, BB = dfly(strong_norm(B), aux_norm(B), D)
p1 = plot(D, title="Dynamic (dfly coeffs $(round(A, sigdigits=2)),$(round(BB, sigdigits=2)))", label=L"T(x)", legend=:bottomright)
p2 = plot(B, w, title="Invariant measure (n=$(length(B)))")
p2 = plot!(p2, B, error, w, label="L1 error $(round(error, sigdigits=2))")
p3 = plot(B_fine, w_fine, title="Invariant measure (n=$(length(B_fine)))")
p3 = plot!(p3, B_fine, error_fine, w_fine, label="L1 error $(round(error_fine, sigdigits=2))")
p4 = groupedbar(
vcat(
[time_error time_eigen time_norms time_assembling 0],
[time_error_fine time_eigen_fine time_norms_fine time_assembling_fine time_assembling+time_norms]
),
bar_position = :stack,
legend = :topleft,
label = ["err" "eigen" "norm" "matrix" "coarse"],
title = "CPU time breakdown (s)",
xticks = (1:2, ["1-grid estimate", "2-grid estimate"])
)
plot(p1, p2, p3, p4, layout=4)
end
runExperiment()
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 2057 | using InvariantMeasures
using ValidatedNumerics
using Plots
using LaTeXStrings
using StatsPlots
include("warmup.jl")
function runExperiment()
time_assembling = @elapsed begin
D = Mod1Dynamic(x -> 4*x + 0.01*InvariantMeasures.sinpi(8*x))
# different backend, a tad slower
# D = mod1_dynamic(x -> 4*x + 0.01*InvariantMeasures.sinpi(8*x))
B = Hat(1024)
Q = DiscretizedOperator(B, D)
end
time_norms = @elapsed norms = powernormbounds(B, D; Q=Q)
time_eigen = @elapsed w = invariant_vector(B, Q)
time_error = @elapsed error = distance_from_invariant(B, D, Q, w, norms)
time_assembling_fine = @elapsed begin
B_fine = Hat(2^16)
Q_fine = DiscretizedOperator(B_fine, D)
end
time_norms_fine = @elapsed norms_fine = finepowernormbounds(B, B_fine, D, norms; Q_fine=Q_fine)
time_eigen_fine = @elapsed w_fine = invariant_vector(B_fine, Q_fine)
time_error_fine = @elapsed error_fine = distance_from_invariant(B_fine, D, Q_fine, w_fine, norms_fine)
A, BB = dfly(strong_norm(B), aux_norm(B), D)
p1 = plot(D, title="Dynamic (dfly coeffs $(round(A, sigdigits=2)),$(round(BB, sigdigits=2)))", label=L"T(x)", legend=:bottomright)
p2 = plot(B, w, title="Invariant measure (n=$(length(B)))")
p2 = plot!(p2, B, error, w, label="L-inf error $(round(error, sigdigits=2))")
p3 = plot(B_fine, w_fine, title="Invariant measure (n=$(length(B_fine)))")
p3 = plot!(p3, B_fine, error_fine, w_fine, label="L-inf error $(round(error_fine, sigdigits=2))")
p4 = groupedbar(
vcat(
[time_error time_eigen time_norms time_assembling 0],
[time_error_fine time_eigen_fine time_norms_fine time_assembling_fine time_assembling+time_norms]
),
bar_position = :stack,
legend = :topleft,
label = ["err" "eigen" "norm" "matrix" "coarse"],
title = "CPU time breakdown (s)",
xticks = (1:2, ["1-grid estimate", "2-grid estimate"])
)
plot(p1, p2, p3, p4, layout=4)
end
runExperiment()
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 2054 | using InvariantMeasures
using ValidatedNumerics
using Plots
using LaTeXStrings
using StatsPlots
include("warmup.jl")
function runExperiment()
time_assembling = @elapsed begin
D = Mod1Dynamic(x -> 4*x + 0.01*InvariantMeasures.sinpi(8*x))
# different backend, a tad slower
# D = mod1_dynamic(x -> 4*x + 0.01*InvariantMeasures.sinpi(8*x))
B = Ulam(1024)
Q = DiscretizedOperator(B, D)
end
time_norms = @elapsed norms = powernormbounds(B, D, Q=Q)
time_eigen = @elapsed w = invariant_vector(B, Q)
time_error = @elapsed error = distance_from_invariant(B, D, Q, w, norms)
time_assembling_fine = @elapsed begin
B_fine = Ulam(2^16)
Q_fine = DiscretizedOperator(B_fine, D)
end
time_norms_fine = @elapsed norms_fine = finepowernormbounds(B, B_fine, D, norms; Q_fine=Q_fine)
time_eigen_fine = @elapsed w_fine = invariant_vector(B_fine, Q_fine)
time_error_fine = @elapsed error_fine = distance_from_invariant(B_fine, D, Q_fine, w_fine, norms_fine)
A, BB = dfly(strong_norm(B), aux_norm(B), D)
p1 = plot(D, title="Dynamic (dfly coeffs $(round(A, sigdigits=2)),$(round(BB, sigdigits=2)))", label=L"T(x)", legend=:bottomright)
p2 = plot(B, w, title="Invariant measure (n=$(length(B)))")
p2 = plot!(p2, B, error, w, label="L1 error $(round(error, sigdigits=2))")
p3 = plot(B_fine, w_fine, title="Invariant measure (n=$(length(B_fine)))")
p3 = plot!(p3, B_fine, error_fine, w_fine, label="L1 error $(round(error_fine, sigdigits=2))")
p4 = groupedbar(
vcat(
[time_error time_eigen time_norms time_assembling 0],
[time_error_fine time_eigen_fine time_norms_fine time_assembling_fine time_assembling+time_norms]
),
bar_position = :stack,
legend = :topleft,
label = ["err" "eigen" "norms" "matrix" "coarse"],
title = "CPU time breakdown (s)",
xticks = (1:2, ["1-grid estimate", "2-grid estimate"])
)
plot(p1, p2, p3, p4, layout=4)
end
runExperiment()
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 1299 | using InvariantMeasures
using ValidatedNumerics
m = 30
m_extend = 100
D = ApproxInducedLSV(0.5, 15)
B = Ulam(128)
Q = DiscretizedOperator(B, D)
normQ = opnormbound(weak_norm(B), Q)
trivial_norms = norms_of_powers_trivial(weak_norm(B), Q, m)
computed_norms = norms_of_powers(weak_norm(B), m, Q, integral_covector(B))
(dfly_strongs, dfly_norms) = norms_of_powers_dfly(B, D, m)
norms = min.(trivial_norms, computed_norms, dfly_norms) # in the current version, dfly_norms are always larger and can be omitted
@info norms
better_norms = refine_norms_of_powers(norms, m_extend)
w = invariant_vector(B, Q)
@show distance_from_invariant(B, D, Q, w, better_norms)
B_fine = Ulam(1024)
Q_fine = DiscretizedOperator(B_fine, D)
norm_Q_fine = opnormbound(weak_norm(B_fine), Q_fine)
trivial_norms_fine = norms_of_powers_trivial(weak_norm(B_fine), Q_fine, m_extend)
twogrid_norms_fine = norms_of_powers_from_coarser_grid(B_fine, B, D, better_norms, norm_Q_fine)
(dfly_strongs_fine, dfly_norms_fine) = norms_of_powers_dfly(B_fine, D, m_extend)
norms_fine = min.(trivial_norms_fine, twogrid_norms_fine, dfly_norms_fine)
better_norms_fine = refine_norms_of_powers(norms_fine, m_extend)
w_fine = invariant_vector(B_fine, Q_fine)
@show distance_from_invariant(B_fine, D, Q_fine, w_fine, better_norms_fine)
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 2023 | using InvariantMeasures
using ValidatedNumerics
using Plots
using LaTeXStrings
using StatsPlots
include("warmup.jl")
function runExperiment()
time_assembling = @elapsed begin
DD = mod1_dynamic(x->2*x+0.5*x*(1-x))
# Taking an iterate is necessary here to get a DFLY inequality with A < 1
D = Iterate(DD, 3)
B = Ulam(1024)
Q = DiscretizedOperator(B, D)
end
time_norms = @elapsed norms = powernormbounds(B, D; Q=Q)
time_eigen = @elapsed w = invariant_vector(B, Q)
time_error = @elapsed error = distance_from_invariant(B, D, Q, w, norms)
time_assembling_fine = @elapsed begin
B_fine = Ulam(2^16)
Q_fine = DiscretizedOperator(B_fine, D)
end
time_norms_fine = @elapsed norms_fine = finepowernormbounds(B, B_fine, D, norms; Q_fine=Q_fine)
time_eigen_fine = @elapsed w_fine = invariant_vector(B_fine, Q_fine)
time_error_fine = @elapsed error_fine = distance_from_invariant(B_fine, D, Q_fine, w_fine, norms_fine)
A, BB = dfly(strong_norm(B), aux_norm(B), D)
p1 = plot(D, title="Dynamic (dfly coeffs $(round(A, sigdigits=2)),$(round(BB, sigdigits=2)))", label=L"T(x)", legend=:bottomright)
p2 = plot(B, w, title="Invariant measure (n=$(length(B)))")
p2 = plot!(p2, B, error, w, label="L1 error $(round(error, sigdigits=2))")
p3 = plot(B_fine, w_fine, title="Invariant measure (n=$(length(B_fine)))")
p3 = plot!(p3, B_fine, error_fine, w_fine, label="L1 error $(round(error_fine, sigdigits=2))")
p4 = groupedbar(
vcat(
[time_error time_eigen time_norms time_assembling 0],
[time_error_fine time_eigen_fine time_norms_fine time_assembling_fine time_assembling+time_norms]
),
bar_position = :stack,
legend = :topleft,
label = ["err" "eigen" "norm" "matrix" "coarse"],
title = "CPU time breakdown (s)",
xticks = (1:2, ["1-grid estimate", "2-grid estimate"])
)
plot(p1, p2, p3, p4, layout=4)
end
runExperiment()
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 2127 | using InvariantMeasures
using ValidatedNumerics
using Plots
using LaTeXStrings
using StatsPlots
include("warmup.jl")
function runExperiment()
time_assembling = @elapsed begin
# Note that (unlike the experiment in [Galatolo, Nisoli] paper) we do not need
# to take Iterate(D, 2) here
D = Mod1Dynamic(x->2*x+0.5*x*(1-x))
# different backend, a tad slower
# D = mod1_dynamic(x -> 2*x+0.5*x*(1-x))
B = Ulam(1024)
Q = DiscretizedOperator(B, D)
end
time_norms = @elapsed norms = powernormbounds(B, D; Q=Q)
time_eigen = @elapsed w = invariant_vector(B, Q)
time_error = @elapsed error = distance_from_invariant(B, D, Q, w, norms)
time_assembling_fine = @elapsed begin
B_fine = Ulam(2^16)
Q_fine = DiscretizedOperator(B_fine, D)
end
time_norms_fine = @elapsed norms_fine = finepowernormbounds(B, B_fine, D, norms; Q_fine=Q_fine)
time_eigen_fine = @elapsed w_fine = invariant_vector(B_fine, Q_fine)
time_error_fine = @elapsed error_fine = distance_from_invariant(B_fine, D, Q_fine, w_fine, norms_fine)
A, BB = dfly(strong_norm(B), aux_norm(B), D)
p1 = plot(D, title="Dynamic (dfly coeffs $(round(A, sigdigits=2)),$(round(BB, sigdigits=2)))", label=L"T(x)", legend=:bottomright)
p2 = plot(B, w, title="Invariant measure (n=$(length(B)))")
p2 = plot!(p2, B, error, w, label="L1 error $(round(error, sigdigits=2))")
p3 = plot(B_fine, w_fine, title="Invariant measure (n=$(length(B_fine)))")
p3 = plot!(p3, B_fine, error_fine, w_fine, label="L1 error $(round(error_fine, sigdigits=2))")
p4 = groupedbar(
vcat(
[time_error time_eigen time_norms time_assembling 0],
[time_error_fine time_eigen_fine time_norms_fine time_assembling_fine time_assembling+time_norms]
),
bar_position = :stack,
legend = :topleft,
label = ["err" "eigen" "norm" "matrix" "coarse"],
title = "CPU time breakdown (s)",
xticks = (1:2, ["1-grid estimate", "2-grid estimate"])
)
plot(p1, p2, p3, p4, layout=4)
end
runExperiment()
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 2020 | using InvariantMeasures
using ValidatedNumerics
using Plots
using LaTeXStrings
using StatsPlots
include("warmup.jl")
function runExperiment()
time_assembling = @elapsed begin
θ = 109/64
α = 51/64
D = PwMap([x->θ*abs(x-0.5)^α, x->1-θ*abs(x-0.5)^α],
[@interval(0), @interval(0.5), @interval(1)])
B = Ulam(1024)
Q = DiscretizedOperator(B, D)
end
time_norms = @elapsed norms = powernormbounds(B, D; Q=Q)
# time_eigen = @elapsed w = invariant_vector(B, Q)
# time_error = @elapsed error = distance_from_invariant(B, D, Q, w, norms)
# time_assembling_fine = @elapsed begin
# B_fine = Ulam(2^16)
# Q_fine = DiscretizedOperator(B_fine, D)
# end
# time_norms_fine = @elapsed norms_fine = finepowernormbounds(B, B_fine, D, norms; Q_fine=Q_fine)
# time_eigen_fine = @elapsed w_fine = invariant_vector(B_fine, Q_fine)
# time_error_fine = @elapsed error_fine = distance_from_invariant(B_fine, D, Q_fine, w_fine, norms_fine)
# A, BB = dfly(strong_norm(B), aux_norm(B), D)
# p1 = plot(D, title="Dynamic (dfly coeffs $(round(A, sigdigits=2)),$(round(BB, sigdigits=2)))", label=L"T(x)", legend=:bottomright)
# p2 = plot(B, w, title="Invariant measure (n=$(length(B)))")
# p2 = plot!(p2, B, error, w, label="L1 error $(round(error, sigdigits=2))")
# p3 = plot(B_fine, w_fine, title="Invariant measure (n=$(length(B_fine)))")
# p3 = plot!(p3, B_fine, error_fine, w_fine, label="L1 error $(round(error_fine, sigdigits=2))")
# p4 = groupedbar(
# vcat(
# [time_error time_eigen time_norms time_assembling 0],
# [time_error_fine time_eigen_fine time_norms_fine time_assembling_fine time_assembling+time_norms]
# ),
# bar_position = :stack,
# legend = :topleft,
# label = ["err" "eigen" "norm" "matrix" "coarse"],
# title = "CPU time breakdown (s)",
# xticks = (1:2, ["1-grid estimate", "2-grid estimate"])
# )
# plot(p1, p2, p3, p4, layout=4)
end
runExperiment()
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 2003 | using InvariantMeasures
using ValidatedNumerics
using Plots
using LaTeXStrings
using StatsPlots
include("warmup.jl")
function runExperiment()
time_assembling = @elapsed begin
D = PwMap([x->2.5*x, x->4*x-1, x->4*x-2, x-> 4*x-3],
[@interval(0), @interval(0.25), @interval(0.5), @interval(0.75), @interval(1)])
B = Ulam(1024)
Q = DiscretizedOperator(B, D)
end
time_norms = @elapsed norms = powernormbounds(B, D; Q=Q)
time_eigen = @elapsed w = invariant_vector(B, Q)
time_error = @elapsed error = distance_from_invariant(B, D, Q, w, norms)
time_assembling_fine = @elapsed begin
B_fine = Ulam(2^16)
Q_fine = DiscretizedOperator(B_fine, D)
end
time_norms_fine = @elapsed norms_fine = finepowernormbounds(B, B_fine, D, norms; Q_fine=Q_fine)
time_eigen_fine = @elapsed w_fine = invariant_vector(B_fine, Q_fine)
time_error_fine = @elapsed error_fine = distance_from_invariant(B_fine, D, Q_fine, w_fine, norms_fine)
A, BB = dfly(strong_norm(B), aux_norm(B), D)
p1 = plot(D, title="Dynamic (dfly coeffs $(round(A, sigdigits=2)),$(round(BB, sigdigits=2)))", label=L"T(x)", legend=:bottomright)
p2 = plot(B, w, title="Invariant measure (n=$(length(B)))")
p2 = plot!(p2, B, error, w, label="L1 error $(round(error, sigdigits=2))")
p3 = plot(B_fine, w_fine, title="Invariant measure (n=$(length(B_fine)))")
p3 = plot!(p3, B_fine, error_fine, w_fine, label="L1 error $(round(error_fine, sigdigits=2))")
p4 = groupedbar(
vcat(
[time_error time_eigen time_norms time_assembling 0],
[time_error_fine time_eigen_fine time_norms_fine time_assembling_fine time_assembling+time_norms]
),
bar_position = :stack,
legend = :topleft,
label = ["err" "eigen" "norm" "matrix" "coarse"],
title = "CPU time breakdown (s)",
xticks = (1:2, ["1-grid estimate", "2-grid estimate"])
)
plot(p1, p2, p3, p4, layout=4)
end
runExperiment()
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 1477 | using InvariantMeasures
using ValidatedNumerics
m = 30
m_extend = 100
D = PwMap([x->17*x/5,
x->(34*((17*x-5)/17)/25+3)*((17*x-5)/17),
x->(34*((17*x-10)/17)/25+3)*((17*x-10)/17),
x->17*((17*x-15)/17)/5],
[Interval(0), Interval(5)/17, Interval(10)/17, Interval(15)/17, Interval(1)])
B = Ulam(1024)
Q = DiscretizedOperator(B, D)
normQ = opnormbound(weak_norm(B), Q)
trivial_norms = norms_of_powers_trivial(weak_norm(B), Q, m)
computed_norms = norms_of_powers(weak_norm(B), m, Q, integral_covector(B))
(dfly_strongs, dfly_norms) = norms_of_powers_dfly(B, D, m)
norms = min.(trivial_norms, computed_norms, dfly_norms) # in the current version, dfly_norms are always larger and can be omitted
better_norms = refine_norms_of_powers(norms, m_extend)
w = invariant_vector(B, Q)
@show distance_from_invariant(B, D, Q, w, better_norms)
B_fine = Ulam(2^15)
Q_fine = DiscretizedOperator(B_fine, D)
norm_Q_fine = opnormbound(weak_norm(B_fine), Q_fine)
trivial_norms_fine = norms_of_powers_trivial(weak_norm(B_fine), Q_fine, m_extend)
twogrid_norms_fine = norms_of_powers_from_coarser_grid(B_fine, B, D, better_norms, norm_Q_fine)
(dfly_strongs_fine, dfly_norms_fine) = norms_of_powers_dfly(B_fine, D, m_extend)
norms_fine = min.(trivial_norms_fine, twogrid_norms_fine, dfly_norms_fine)
better_norms_fine = refine_norms_of_powers(norms_fine, m_extend)
w_fine = invariant_vector(B_fine, Q_fine)
@show distance_from_invariant(B_fine, D, Q_fine, w_fine, better_norms_fine)
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 670 | using InvariantMeasures
using ValidatedNumerics
# Testing whether the "shooting method" beats taking successive preimages
# f = x -> 2*x + 0.1*InvariantMeasures.sinpi(2*x)
# preim1 = root(x ->f(x)-1., 0..4, 0)
f = x-> 2.5x*(1-x) + 0.01*x*x + 0.3*x*x*x + 0.003*x*x*x*x
preim1 = @interval(0.5)
mypreim(y) = root(x -> f(x)-y, hull(0..0,preim1), 0)
n = 15
y0 = @interval(0.21)
# method 1
ys = fill(y0, n+1)
ys[1] = y0
for i = 1:n
ys[i+1] = mypreim(ys[i])
end
# method 2
zs = fill(hull(0..0,preim1), n)
fs = fill(f, n)
nthpreimage!(y0, fs, zs)
z = zs[1]
[zs[end:-1:1] ys[2:end]]
[diam.(zs[end:-1:1]) diam.(ys[2:end])]
diam.(zs[end:-1:1]) ./ diam.(ys[2:end]) | InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 1302 | using InvariantMeasures
using ValidatedNumerics
using Plots
using LaTeXStrings
D = Mod1Dynamic(x -> 4*x + 0.01*InvariantMeasures.sinpi(8*x))
#D = Mod1Dynamic(x -> 16*x + 0.01*InvariantMeasures.sinpi(32*x))
#D = Mod1Dynamic(x->2*x+0.5*x*(1-x))
# D = PwMap(
# [x -> 17x/5,
# x -> 34(x-5//17)^2/25 + 3(x-5//17),
# x -> 34(x-10//17)^2/25 + 3(x-10//17),
# x -> 17(x-15//17)/5
# ],
# [0, @interval(5/17), @interval(10/17), @interval(15/17), 1]
# )
num_norms = 30
B = Hat(1024)
Q = DiscretizedOperator(B, D)
trivial_norms = norms_of_powers_trivial(weak_norm(B), Q, num_norms)
computed_norms = norms_of_powers(weak_norm(B), num_norms, Q, integral_covector(B))
(dfly_strongs, dfly_norms) = norms_of_powers_dfly(B, D, num_norms)
norms = min.(trivial_norms, computed_norms, dfly_norms)
better_norms = refine_norms_of_powers(norms, num_norms)
pgfplotsx()
p = plot(trivial_norms,
label = L"$\|Q\|^k$",
yscale = :log10,
legend = :bottomleft,
title = "Various bounds for norms of powers",
xlabel = L"$k$",
ylabel = L"bound to $\|Q^k|_U\|$"
)
plot!(p, computed_norms,
label = "computational bounds")
plot!(p, dfly_norms,
label = L"DFLY $2\times 2$ matrix bounds")
plot!(p, better_norms,
label = "min(previous) + refinement")
# savefig(p, "norm_bounds_kinds.tikz")
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 1338 | using InvariantMeasures
using ValidatedNumerics
using Plots
using LaTeXStrings
D = Mod1Dynamic(x -> 4*x + 0.01*InvariantMeasures.sinpi(8*x))
#D = Mod1Dynamic(x -> 16*x + 0.01*InvariantMeasures.sinpi(32*x))
#D = Mod1Dynamic(x->2*x+0.5*x*(1-x))
# D = PwMap(
# [x -> 17x/5,
# x -> 34(x-5//17)^2/25 + 3(x-5//17),
# x -> 34(x-10//17)^2/25 + 3(x-10//17),
# x -> 17(x-15//17)/5
# ],
# [0, @interval(5/17), @interval(10/17), @interval(15/17), 1]
# )
function plot_norms(D; args...)
dims = 2 .^ (1:12)
num_norms = 30
norms = fill(NaN, (num_norms, length(dims)))
for (i, d) in enumerate(dims)
B = Ulam(d)
Q = DiscretizedOperator(B, D)
norms[:,i] = norms_of_powers(weak_norm(B), num_norms, Q, integral_covector(B))
end
pgfplotsx()
plot(norms,
label = L"n = " .* string.(dims'),
yscale= :log10,
legend = :topright,
# ylims = (1e-6, 1)
xlabel = L"k",
ylabel = L"computational bounds to $\|Q^k|_U\|$";
args...
)
end
p1 = plot_norms(Mod1Dynamic(x -> 4*x + 0.01*InvariantMeasures.sinpi(8*x)),
title = L"4x + 0.01\sin(8 \pi x)", legend=:none)
p2 = plot_norms(Mod1Dynamic(x->2*x+0.5*x*(1-x)),
title = "Lanford map", legend = :bottomleft)
p = plot(p1, p2)
# savefig(p, "norms_vs_dimension_plot.tikz")
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 120 | # this small script takes as an input an example
# file and outputs a formatted file with the outputs
include(ARGS[1])
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 3688 | using InvariantMeasures
using ValidatedNumerics
using Plots
using LaTeXStrings
using StatsPlots
pgfplotsx()
function onegrid(T, Btype, size)
time_assembling = @elapsed begin
D = Mod1Dynamic(T)
# different backend, a tad slower
# D = mod1_dynamic(x -> x->2*x+0.5*x*(1-x))
B = Btype(size)
Q = DiscretizedOperator(B, D)
end
time_norms = @elapsed norms = powernormbounds(B, D; Q=Q)
time_eigen = @elapsed w = invariant_vector(B, Q)
time_error = @elapsed error = distance_from_invariant(B, D, Q, w, norms)
times = [time_error time_eigen time_norms time_assembling 0]
return times, error, (B, D, norms, time_assembling+time_norms)
end
function twogrid(Btype, size, (B, D, norms, time_coarse))
time_assembling_fine = @elapsed begin
B_fine = Btype(size)
Q_fine = DiscretizedOperator(B_fine, D)
end
time_norms_fine = @elapsed norms_fine = finepowernormbounds(B, B_fine, D, norms; Q_fine=Q_fine)
time_eigen_fine = @elapsed w_fine = invariant_vector(B_fine, Q_fine)
time_error_fine = @elapsed error_fine = distance_from_invariant(B_fine, D, Q_fine, w_fine, norms_fine)
times_fine = [time_error_fine time_eigen_fine time_norms_fine time_assembling_fine time_coarse]
return times_fine, error_fine
end
function time_convergence_plot(T, Btype, k_onegrid, k_twogrid)
n_onegrid = 2 .^ k_onegrid
times_onegrid = fill(NaN, length(n_onegrid), 5)
errors_onegrid = fill(NaN, size(n_onegrid))
onegrid(T, Btype, n_onegrid[1]) #warmup
for i in 1:length(n_onegrid)
times_onegrid[i,:], errors_onegrid[i], _ = onegrid(T, Btype, n_onegrid[i])
end
n_twogrid = 2 .^ k_twogrid
_, _, coarse_data = onegrid(T, Btype, 1024)
times_twogrid = fill(NaN, length(n_twogrid), 5)
errors_twogrid = fill(NaN, size(n_twogrid))
twogrid(Btype, n_twogrid[1], coarse_data) #warmup
for i in 1:length(n_twogrid)
times_twogrid[i,:], errors_twogrid[i] = twogrid(Btype, n_twogrid[i], coarse_data)
end
p1 = groupedbar(
times_onegrid,
bar_position = :stack,
legend = false,
label = ["err" "eigen" "norm" "matrix" "coarse"],
title = "CPU time breakdown (s)",
xticks = (1:length(n_onegrid), LaTeXString.(raw"2^{" .* string.(k_onegrid) .* raw"}")),
link = :y,
)
p2 = groupedbar(
times_twogrid,
bar_position = :stack,
legend = false,
label = ["err" "eigen" "norm" "matrix" "coarse"],
title = "CPU time breakdown (s)",
xticks = (1:length(n_twogrid), LaTeXString.(raw"2^{" .* string.(k_twogrid) .* raw"}")),
link = :y,
)
p3 = plot(
n_onegrid,
errors_onegrid,
title = "Error",
mark = :dot,
yscale = :log10,
xscale = :log10,
xticks = (n_onegrid, LaTeXString.(raw"2^{" .* string.(k_onegrid) .* raw"}" )),
label = "One-grid strategy",
legend = :bottomleft,
link = :y,
)
p4 = plot(
n_twogrid,
errors_twogrid,
title = "Error",
mark = :dot,
yscale = :log10,
xscale = :log10,
color = :red,
xticks = (n_twogrid, LaTeXString.(raw"2^{" .* string.(k_twogrid) .* raw"}")),
label = "Two-grid strategy",
legend = :bottomleft,
link = :y,
)
p = plot(p1, p2, p3, p4)
end
p = time_convergence_plot(x->2x+0.5x*(1-x), Ulam, 8:11, 11:14)
# large-scale version:
# p = time_convergence_plot(x->2x+0.5x*(1-x), Ulam, 8:14, 13:26); savefig(p, "time_convergence_plot.tikz"); savefig(p, "time_convergence_plot.pdf");
# savefig(p, "time_convergence_plot.tikz")
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 959 | using InvariantMeasures
using ValidatedNumerics
using Plots
using LaTeXStrings
using StatsPlots
"""
Simple and quick experiment to precompile all needed functions before timing measurements
"""
D = Iterate(mod1_dynamic(x->2*x+0.5*x*(1-x)), 4)
B = Ulam(8)
Q = DiscretizedOperator(B, D)
norms = powernormbounds(B, D; Q=Q)
B_fine = Ulam(64)
Q_fine = DiscretizedOperator(B_fine, D)
norms_fine = finepowernormbounds(B, B_fine, D, norms; Q_fine=Q_fine)
w_fine = invariant_vector(B_fine, Q_fine)
error_fine = distance_from_invariant(B_fine, D, Q_fine, w_fine, norms_fine)
D = Mod1Dynamic(x -> 4*x + 0.01*InvariantMeasures.sinpi(8*x))
B = Hat(64)
Q = DiscretizedOperator(B, D)
norms = powernormbounds(B, D; Q=Q)
B_fine = Hat(128)
Q_fine = DiscretizedOperator(B_fine, D)
norms_fine = finepowernormbounds(B, B_fine, D, norms; Q_fine=Q_fine)
w_fine = invariant_vector(B_fine, Q_fine)
error_fine = distance_from_invariant(B_fine, D, Q_fine, w_fine, norms_fine)
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 4267 | module BasisDefinition
using ..DynamicDefinition
import Base
export Basis, DualComposedWithDynamic, ProjectDualElement, AverageZero, assemble,
integral_covector, one_vector, is_integral_preserving, strong_norm,
weak_norm, aux_norm, is_dual_element_empty, nonzero_on, is_refinement
abstract type Basis end
length(B::Basis) = @error "Not Implemented"
struct DualComposedWithDynamic{B<:Basis, D<:Dynamic}
basis::B
dynamic::D
ϵ::Float64
end
Base.iterate(S::DualComposedWithDynamic{B, D}, state) where {B<:Basis, D<:Dynamic} = @error "Not implemented"
struct ProjectDualElement{B<:Basis, DT}
basis::B
j_min::Int64
j_max::Int64
dual_element::DT
end
ProjectDualElement(basis::B, j_min, j_max, y::DT) where {B,DT} = ProjectDualElement{B,DT}(basis, j_min, j_max, y)
Base.length(S::ProjectDualElement{B,DT}) where {B,DT} = S.j_max - S.j_min + 1
is_dual_element_empty(B::Basis, I) = @error "Not Implemented"
nonzero_on(B::Basis, I) = @error "Not Implemented"
function ProjectDualElement(B::Basis, y)
j_min, j_max = nonzero_on(B, y)
return ProjectDualElement(B, j_min, j_max, y)
end
# Base.iterate(S::ProjectDualElement, state) = @error "Not Implemented"
evaluate(B::Basis, i, x) = @error "Not Implemented"
evaluate_integral(B::Basis, i; T = Float64) = @error "Not Implemented"
strong_norm(B::Basis) = @error "Must be specialized"
weak_norm(B::Basis) = @error "Must be specialized"
aux_norm(B::Basis) = @error "Must be specialized"
"""
Check if Bfine is a refinement of Bcoarse
"""
is_refinement(Bfine::Basis, Bcoarse::Basis) = @error "Not implemented"
"""
Covector that represents the integral
"""
integral_covector(B::Basis) = @error "Must be specialized"
"""
Vector that represents the function 1
"""
one_vector(B::Basis) = @error "Must be specialized"
"""
Integral-preserving discretizations may specialize this to "true"
"""
is_integral_preserving(B::Basis) = false
"""
Integral of a function in U_h
Args:
v (any type of vector):
Returns:
the integral, computed with the arithmetic of v.
"""
function integral(B::Basis, v; T = Float64)
return sum([T(v[i])*evaluate_integral(B, i, T) for i in 1:length(B)])
end
"""
Yields a basis of the space of average zero vectors
"""
struct AverageZero{B<:Basis}
basis::B
end
Base.iterate(S::AverageZero{B}, state) where {B} = @error "Not Implemented"
Base.length(S::AverageZero) = length(S.basis)-1
"""
Return a constant Kh (typically scales as h ~ 1/n) such that `||P_h f-f||\\leq Kh ||f||_s`
Must be rounded up correctly!
Arg:
B::Basis
"""
weak_projection_error(B::Basis) = @error "Not implemented"
"""
Return a constant Eh (typically scales as h ~ 1/n) such that `|||P_h f|||\\leq |||f|||+ Eh * ||f||_s`
Must be rounded up correctly!
Arg:
B::Basis
"""
aux_normalized_projection_error(B::Basis) = @error "Not implemented"
"""
Return a constant M₁n such that for a vector v in Uₕ `||v||_s\\leq M1n*||v||`
Must be rounded up correctly!
"""
strong_weak_bound(B::Basis) = @error "Not implemented"
"""
Return a constant M₂ such that for a vector v in Uₕ `|||v|||\\leq M_2||v||`
Must be rounded up correctly!
"""
aux_weak_bound(B::Basis) = @error "Not implemented"
"""
Return constants S₁, S₂ such that for a vector v in Uₕ `||v||\\leq S_1||v||_s+S_2|||v|||`
Must be rounded up correctly!
"""
weak_by_strong_and_aux_bound(B::Basis) = @error "Not implemented"
"""
Return constants W₁, W₂ such that for a vector v in Uₕ `||v||\\leq W_1||v||_1+W_2||v||_{\\infty}`
Must be rounded up correctly!
"""
bound_weak_norm_from_linalg_norm(B::Basis) = @error "Not implemented"
"""
Return a constant A such that for a vector v in Uₕ `||v||_1\\leq A||v||`
Must be rounded up correctly!
"""
bound_linalg_norm_L1_from_weak(B::Basis) = @error "Not implemented"
"""
Return a constant A such that for a vector v in Uₕ `||v||_\\infty \\leq A||v||`
Must be rounded up correctly!
"""
bound_linalg_norm_L∞_from_weak(B::Basis) = @error "Not implemented"
"""
Bounds ||u||_s, where ||u|| is the invariant measure normalized with integral(u)=1.
"""
invariant_measure_strong_norm_bound(B::Basis, D::Dynamic) = @error "Must be specialized"
"""
Returns an a priori bound on the weak norm of the abstract operator L
"""
bound_weak_norm_abstract(B::Basis) = @error "Must be specialized"
end
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 8163 | module C2BasisDefinition
"""
C2 basis on [0,1]
"""
using ..BasisDefinition, ..DynamicDefinition, ValidatedNumerics
import Base: iterate, length
import ..BasisDefinition: one_vector, integral_covector, is_integral_preserving
import ...InvariantMeasures: NormKind, Linf
export C2Basis, dual_val, dual_der, C1, C2
struct C1 <: NormKind end
struct C2 <: NormKind end
"""
Equispaced partition of size n of [0,1]
"""
struct EquispacedPartitionInterval{T} <: AbstractVector{T}
n::Integer
end
function Base.getindex(p::EquispacedPartitionInterval{T}, i::Int) where {T}
@boundscheck 1 <= i <= p.n+1 || throw(BoundsError(p, i))
return convert(T, i-1) / p.n
end
EquispacedPartitionInterval(i::Int) = @error "The real type must be specified explicitly"
Base.size(p::EquispacedPartitionInterval) = (p.n+1,)
Base.IndexStyle(::Type{<:EquispacedPartitionInterval}) = IndexLinear()
Base.issorted(p::EquispacedPartitionInterval) = true
struct C2Basis{T<:AbstractVector} <:Basis
p::T
# TODO: check in constructor that p is sorted and starts with 0
end
C2Basis(n::Integer) = C2Basis(EquispacedPartitionInterval{Float64}(n))
"""
Return the size of the C2 basisBase.length(S::AverageZero) = length(S.basis)-1
"""
Base.length(b::C2Basis) = 2*length(b.p)
function ϕ(x::Interval{T}) where {T}
if x ∩ Interval{T}(-1, 1) == ∅
return zero(x)
else
x₋ = x ∩ Interval{T}(-1, 0)
val₋ = evalpoly(x₋, (1, 0, 0, 10, 15, 6))
x₊ = x ∩ Interval{T}(0, 1)
val₊ = evalpoly(x₊, (1, 0, 0, -10, 15, -6))
return val₋ ∪ val₊
end
end
function ϕprime(x::Interval{T}) where {T}#Derivative of ϕ
if x ∩ Interval{T}(-1, 1) == ∅
return zero(x)
else
x₋ = x ∩ Interval{T}(-1, 0)
val₋ = evalpoly(x₋, (0, 0, 30, 60, 30))
x₊ = x ∩ Interval{T}(0, 1)
val₊ = evalpoly(x₊, (0, 0, -30, 60, -30))
return val₋ ∪ val₊
end
end
function ν(x::Interval{T}) where {T}
if x ∩ Interval{T}(-1, 1) == ∅
return zero(x)
else
x₋ = x ∩ Interval{T}(-1, 0)
val₋ = evalpoly(x₋, (0, 1, 0, -6, -8, -3))
x₊ = x ∩ Interval{T}(0, 1)
val₊ = evalpoly(x₊, (0, 1, 0, -6, 8, -3))
return val₋ ∪ val₊
end
end
function νprime(x::Interval{T}) where {T} #Derivative of ν
if x ∩ Interval{T}(-1, 1) == ∅
return zero(x)
else
x₋ = x ∩ Interval{T}(-1, 0)
val₋ = evalpoly(x₋, (1, 0, -18, -32, -15))
x₊ = x ∩ Interval{T}(0, 1)
val₊ = evalpoly(x₊, (1, 0, -18, 32, -15))
return val₋ ∪ val₊
end
end
κ(x) = 6*x*(1-x)
function Base.getindex(B::C2Basis, i::Int)
n = length(B.p)
@boundscheck 1 <= i <= 2*n || throw(BoundsError(B, i))
if i<=n
#println("val")
return x->ϕ((n-1)*x-i+1)
elseif n<i<=2*n
return x->ν((n-1)*x-i+n+1)/(n-1) #we have to subtract n to i
end
#return x->κ(x)
end
function basis_element_with_der(B::C2Basis, i::Int)
n = length(B.p)
@boundscheck 1 <= i <= 2*n || throw(BoundsError(B, i))
if i<=n
#println("val")
return x->ϕ((n-1)*x-i+1), x-> (n-1)*ϕprime((n-1)*x-i+1)
elseif n<i<=2*n
return x->ν((n-1)*x-i+n+1)/(n-1), x-> νprime((n-1)*x-i+n+1) #we have to subtract n to i
end
#return x->κ(x) for i in 1:N+1
end
function BasisDefinition.is_dual_element_empty(::C2Basis, d)
# TODO: the preim() may indeed be empty, so there could be an additional check here
return false
end
# Check the absolute values in the formula
dual_val(f::Function, fprime::Function, x, der, derder) = f(x)/der
dual_der(f::Function, fprime::Function, x, der, derder) = fprime(x)/der^2-f(x)*derder/der^3
"""
Return (in an iterator) the pairs (i, (x, |T'(x)|)) where x is a preimage of p[i], which
describe the "dual" L* evaluation(p[i])
"""
function Base.iterate(S::DualComposedWithDynamic{T, Dynamic}, state = (1, 1)) where T<:C2Basis
i, k = state
if i == length(S.basis)+1
return nothing
end
# remark that this version supposes that for each i there exists a preimage
# another more specific version should be implemented for maps with
# incomplete branches
x = preim(S.dynamic, k, S.basis.p[i], S.ϵ)
absT′ = abs(der(S.dynamic, x))
derder = der_der(S.dynamic, x)
if i <= n
ret = (x, (f, fprime) -> dual_val(f, fprime, x, der, derder))
else
ret = (x, (f, fprime) -> dual_der(f, fprime, x, der, derder))
end
#if k == nbranches(S.dynamic)
# return ((i, ret), (i+1, 1))
#else
# return ((i, ret), (i, k+1))
#end
#if k == nbranches(S.dynamic)
# return ((i, (x, absT′, derder)), (i+1, 1))
#else
# return ((i, (x, absT′, derder)), (i, k+1))
#end
end
BasisDefinition.is_refinement(Bf::C2Basis, Bc::C2Basis) = Bc.p ⊆ Bf.p
function integral_covector(B::C2Basis)
n = length(B.p)
return 1/(n-1)*[@interval 0.5; ones(Interval{Float64}, n-2); @interval 0.5; @interval 1//10; zeros(n-2); @interval -1//10]'
end
function one_vector(B::C2Basis)
return [ones(length(B.p)); zeros(length(B.p))]
end
"""
Return the range of indices of the elements of the basis whose support intersects
with the given dual element (i.e., a pair (y, absT', derder)).
"""
function BasisDefinition.nonzero_on(B::C2Basis, dual_element)
y = dual_element[1]
# Note that this cannot rely on arithmetic unless it is verified
# searchsortedfirst(a, x) return the index of the first value in a greater than or equal to x
lo = max(searchsortedfirst(B.p, y.lo) -1, 1)
hi = searchsortedfirst(B.p, y.hi)
if lo == 1 # 1:N+1 does not make sense and becomes 1:N
hi = min(hi, length(B))
end
return (lo, hi)
end
"""
Given a preimage ```y``` of a point ```x```, this iterator returns
```\\phi_j(y)/T'(y) ```
"""
function Base.iterate(S::ProjectDualElement{T}, state = (S.j_min, :val) ) where T <: C2Basis
dual = S.dual_element[2]
j = state[1]
n = length(S.basis.p)
if state[2] == :val # we first fill up the coordinates of the ϕ
f, fprime = basis_element_with_der(S.basis, j)
if state[1] == S.j_max
return ((j, dual(f, fprime)), (S.j_min, :der))
end
return ((j, dual(f, fprime)), (j+1, :val))
end
if state[2] == :der # we then fill up the coordinates of the ν
if state[1] == S.j_max+1
return nothing
end
f, fprime = basis_element_with_der(S.basis, j+n)
return ((j+n, dual(f, fprime)), (j+1, :der))
end
end
using IntervalOptimisation
function infnormoffunction(B::C2Basis, v)
n = length(B.p)
maximum = -Inf
for i in 1:length(B.p)-1
coeff= v[i]*[1, 0, 0, -10, 15, -6] #coeff for ϕ
coeff+= (v[i+n]/(n-1))*[0, 1, 0, -6, 8, -3] #coeff for ν
# coefficients from the right endpoint
coeff+= v[i+1]*[0, 0, 0, 10, -15, 6]
coeff+= (v[i+n+1]/(n-1))*[0, 0, 0, -4, +7, -3]
dom = Interval(0, 1)
f(x) = abs(evalpoly(x, coeff))
maximum = max(maximum, maximise(f, dom)[1])
end
return maximum
end
function infnormofderivative(B::C2Basis, v)
n = length(B.p)
maximum = -Inf
for i in 1:length(B.p)-1
coeff= (n-1)*v[i]*[0, 0, -30, 60, -30] #coeff for ϕ
coeff+= v[i+n]*[1, 0, -18, 32, -15] #coeff for ν
# coefficients from the right endpoint
coeff+= (n-1)*v[i+1]*[0, 0, 30, -60, 30]
coeff+= v[i+n+1]*[0, 0, -12, +28, -15]
dom = Interval(0, 1)
f(x) = abs(evalpoly(x, coeff))
maximum = max(maximum, maximise(f, dom)[1])
end
return maximum
end
C1Norm(B::C2Basis, v) = infnormoffunction(B,v)+infnormofderivative(B,v)
rescaling_factor(B::C2Basis) = 3*length(B.p)
Base.length(S::AverageZero{T}) where T<:C2Basis = length(S.basis)-1
function Base.iterate(S::AverageZero{T}, state = 1) where T<:C2Basis
n = length(S.basis)÷2
v = zeros(2*n)
i = state
if i == 2*n
return nothing
elseif 1 <= i < n
v[i+1] = 1
v[1] = -2
elseif i == n
v[i+1] = 1
v[1] = -1/5
elseif n<i<2*n-1
v[i+1] = 1
elseif i==2*n-1
v[i+1] = 1
v[1] = 1/5
end
return v, state+1
end
end
using RecipesBase
@userplot PlotC2
@recipe function f(h::PlotC2)
if length(h.args)!= 2 || (typeof(h.args[1])!= C2) || !(typeof(h.args[2])<:AbstractVector)
error("Plot C2 needs as an input a C2 Basis and a vector")
end
B = h.args[1]
w = h.args[2]
seriestype := :path
collect(B), mid.(w)
end
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 12119 | """
Chebyshev basis on the Interval [0,1]
"""
using ..BasisDefinition, ..Mod1DynamicDefinition, ..DynamicDefinition
using ValidatedNumerics
import ..BasisDefinition: one_vector, integral_covector, is_integral_preserving
struct Chebyshev{T<:AbstractVector} <:Basis
p::T
# TODO: check in constructor that p is sorted, starts with 0 and ends with 1
end
#we suppose ValidatedNumerics computes the Chebyshev points with adequate precision
ChebCouples(n, T) = hcat(
[Interval{T}(pi); (reverse([Interval{T}(0.0); [j*Interval{T}(pi)/n for j in 1:n-1] ]))],
[Interval{T}(0.0) ; (reverse([Interval{T}(1.0); [cos(j*Interval{T}(pi)/n) for j in 1:n-1]]).+1)/2])
ChebPoints(n, T) = ChebCouples(n, T)[:, 2]
Chebyshev(n::Integer; T = Float64) = Chebyshev(ChebPoints(n, T))
"""
Return the size of the Chebyshev basis
"""
Base.length(B::Chebyshev) = length(B.p)
"""
Eval the Chebyshev polynomial up to degree n on an array of
points in [-1, 1].
Not satisfactory, the intervals explode
"""
function _eval_T(n, x::Array{T}) where {T}
k = length(x)
M = zeros(T, k, n+1)
M[:, 1] = ones(k)
M[:, 2] = x
for i in 3:n+1
M[:, i] = (2*x).*M[:, i-1]-M[:, i-2]
end
return M
end
""" Eval a polynomial in Chebyshev basis, ClenshawBackward, using ball arithmetic
Following Viviane Ledoux, Guillaume Moroz
"Evaluation of Chebyshev polynomials on intervals andapplication to root finding"
"""
function eval_Clenshaw_BackwardFirst(coeff::Vector{Interval{S}}, x::Interval{T}) where {S,T}
coeff_a = mid.(coeff)
coeff_r = radius.(coeff)
a, r = midpoint_radius(x)
m = length(coeff)
u = zeros(Interval{T}, m+1)
ϵ = zeros(Interval{T}, m+1)
u[m] = coeff_a[m]
for k in reverse(2:m-1)
u_temp = 2*a*u[k+1]-u[k+2]+Interval{T}(coeff_a[k])
u[k], ϵ[k] = midpoint_radius(u_temp)
end
u_temp = a*u[2]-u[3]+Interval{T}(coeff_a[1])
u[1], ϵ[1] = midpoint_radius(u_temp)
e = zeros(Interval{T}, m+1)
e[m] = coeff_r[m]
for k in reverse(2:m-1)
e[k] = e[k+1]+2*r*abs(u[k+1])+ϵ[k]+coeff_r[k]
end
e[1] = e[2]+r*abs(u[1])+ϵ[1]+coeff_r[1]
γ = e[1].hi
return u[1]+Interval(-γ, γ)
end
eval_Clenshaw_BackwardFirst(coeff::Vector{Float64}, x::Interval) = eval_Clenshaw_BackwardFirst(Interval.(coeff), x)
function eval_Clenshaw_BackwardSecond(coeff::Vector{Interval{S}}, x::Interval{T}) where {S, T}
coeff_a = mid.(coeff)
coeff_r = radius.(coeff)
a, r = midpoint_radius(x)
m = length(coeff)
u = zeros(Interval{T}, m+1)
ϵ = zeros(Interval{T}, m+1)
u[m] = coeff_a[m]
for k in reverse(2:m-1)
u_temp = 2*a*u[k+1]-u[k+2]+Interval{T}(coeff_a[k])
u[k], ϵ[k] = midpoint_radius(u_temp)
end
u_temp = 2*a*u[2]-u[3]+Interval{T}(coeff_a[1])
u[1], ϵ[1] = midpoint_radius(u_temp)
e = zeros(Interval{T}, m+1)
e[m] = coeff_r[m]
for k in reverse(2:m-1)
e[k] = e[k+1]+(k+1)*(2*r*abs(u[k+1])+ϵ[k]+coeff_r[k])
end
e[1] = e[2]+2*r*abs(u[1])+ϵ[1]+coeff_r[1]
γ = e[1].hi
return u[1]+Interval(-γ, γ)
end
function Clenshaw(coeff, x)
n = length(coeff)
u = zeros(typeof(x), n+1)
u[n] = coeff[n]
for k in reverse(2:n-1)
u[k] = coeff[k]+2*x*u[k+1]-u[k+2]
end
u[1] = coeff[1]+x*u[2]-u[3]
return u[1]
end
function ClenshawSecond(coeff, x::T) where {T<:Real}
n = length(coeff)
u = zeros(T, n+1)
u[n] = coeff[n]
for k in reverse(2:n-1)
u[k] = coeff[k]+2*x*u[k+1]-u[k+2]
end
u[1] = coeff[1]+2*x*u[2]-u[3]
return u[1]
end
Clenshaw(coeff, x::Interval{T}) where {T} = eval_Clenshaw_BackwardFirst(coeff, x)
function ChebyshevDerivative(coeff, x::Interval{T}) where {T}
n = length(coeff)
coeff_der = [(i-1)*Interval{T}(coeff[i]) for i in 2:n]
#@info coeff
#@info coeff_der
return eval_Clenshaw_BackwardSecond(coeff_der, x)
end
evalChebyshev(coeff, x::Interval) = eval_Clenshaw_BackwardFirst(coeff, Interval(mid.(2*x-1)))
evalChebyshevDerivative(coeff, x::Interval) = 2*ChebyshevDerivative(coeff, 2*x-1)
function evalChebyschevCentered(coeff, x::Interval)
m = Interval(mid.(x))
return evalChebyshev(coeff, m)+evalChebyshevDerivative(coeff, x)*(x-m)
end
using IntervalOptimisation
function infnormoffunction(B::Chebyshev, v)
val = 0
try
val = maximize(x->abs(evalChebyschevCentered(v, x)), Interval(0, 1))[1]
catch
print("Refining grid")
f(x) = abs(evalChebyshevCentered(v, x))
ran = range_estimate(f, Interval(0,1), 5)
Bval = union(val, ran)
end
return val
end
function infnormofderivative(B::Chebyshev, v)
val = Interval(0)
try
val = maximize(x->abs(evalChebyshevDerivative(v, x)), Interval(0, 1))[1]
catch
print("Refining grid")
f(x) = abs(evalChebyshevDerivative(v, x))
ran = range_estimate(f, Interval(0,1), 5)
val = union(val, ran)
end
return val
end
import .C2BasisDefinition: C1Norm
C1Norm(B::Chebyshev, v) = infnormoffunction(B,v)+infnormofderivative(B,v)
rescaling_factor(B::Chebyshev) = log(length(B)+1)
Base.length(S::AverageZero{T}) where T<:Chebyshev = length(S.basis)-1
function Base.iterate(S::AverageZero{T}, state = 1) where T<:Chebyshev
B = S.basis
i = state
if i == length(B)
return nothing
end
v = zeros(length(B))
v[i+1] = 1
v[1] = -mid.(integral_covector(B)[i+1])
return (v, 2*i*i), state+1
end
# makes so that B[j] returns the (i+1)-th Chebyshev polynomial
# on [0, 1]
"""
Returns a function that computes the values of the (i-1)-th Chebyshev polynomial
on [0, 1]
"""
function Base.getindex(B::Chebyshev, i::Int)
n = length(B)
@boundscheck 1 <= i <= n+1 || throw(BoundsError(B, i))
v = zeros(n+1)
v[i] = 1
return x-> Clenshaw(v, 2*x-1)
end
struct ChebyshevDual <: Dual
x::Vector{Interval} #TODO: a more generic type may be needed in future
xlabel::Vector{Int}
x′::Vector{Interval}
end
function ChebDualBranch(y, br::Branch, ylabel = 1:length(y), ϵ = 0.0)
if br.increasing
endpoint_X = br.X[2]
der = Contractors.derivative(br.f)(endpoint_X)
preim_der = preimages_and_derivatives(y, br, ylabel, ϵ)
return [preim_der[1]; endpoint_X],
[preim_der[2]; length(preim_der[2])+1],
[preim_der[3]; der]
else
endpoint_X = br.X[2]
der = Contractors.derivative(br.f)(endpoint_X)
preim_der = preimages_and_derivatives(B.p, D, 1:length(B.p)-1, ϵ)
return [preim_der[1]; endpoint_X],
[preim_der[2]; length(preim_with_der[2])+1],
[preim_der[3]; der]
end
end
function Dual(B::Chebyshev, D::PwMap, ϵ)
@assert is_full_branch(D)
results = collect(ChebDualBranch(B.p, b, 1:length(B.p)-1, ϵ) for b in branches(D))
x = vcat((result[1] for result in results)...)
xlabel = vcat((result[2] for result in results)...)
x′ = vcat((result[3] for result in results)...)
return x, xlabel, x′
end
Base.length(dual::ChebyshevDual) = length(dual.x)
Base.eltype(dual::ChebyshevDual) = Tuple{eltype(dual.xlabel), Tuple{eltype(dual.x), eltype(dual.x′)}}
function iterate(dual::ChebyshevDual, state=1)
if state <= length(dual.x)
return ((dual.xlabel[state], (dual.x[state], abs(dual.x′[state]))), state+1)
else
return nothing
end
end
using FFTW
function chebtransform(w)
#@info sum(w)
n = length(w)-1
z= fft([reverse(w); w[2: end-1]])/n
#@info z
t = real.(z[1:length(w)])
t[1]/=2
t[end]/=2
return Interval.(t)
end
using ProgressMeter
function assemble(B::Chebyshev, D::Dynamic, ϵ=0.0; T = Float64)
n = length(B.p)
M = zeros(Interval{T}, (n, n))
x, labels, x′ = Dual(B, D, ϵ)
@showprogress for i in 1:n
ϕ = B[i]
w = zeros(Interval{Float64}, n)
for j in 1:length(x)
w[labels[j]]+=ϕ(x[j])/abs(x′[j])
end
#@info w
M[:, i] = chebtransform(mid.(w))
end
return M
end
is_integral_preserving(B::Chebyshev) = false
integral_covector(B::Chebyshev; T= Float64) = [Interval{T}(1); 0; [0.5*Interval{T}((-1)^n+1)/(1-n^2) for n in 2:length(B)-1]]'
one_vector(B::Chebyshev) = [1; zeros(length(B)-1)]
# """
# Return (in an iterator) the pairs (i, (x, |T'(x)|)) where x is a preimage of p[i], which
# describe the "dual" L* evaluation(p[i
# """
# function Base.iterate(S::DualComposedWithDynamic{T, D}, state = (1, 1)) where T<:Hat where D<:Dynamic
# @assert is_full_branch(S.dynamic)
# i, k = state
# if i == length(S.basis)+1
# return nothing
# end
# x = preim(S.dynamic, k, S.basis.p[i], S.ϵ)
# absT′ = abs(derivative(S.dynamic, x))
# if k == nbranches(S.dynamic)
# return ((i, (x, absT′)), (i+1, 1))
# else
# return ((i, (x, absT′)), (i, k+1))
# end
# end
# function BasisDefinition.is_dual_element_empty(::Hat, d)
# # TODO: the preim() may indeed be empty, so there could be an additional check here
# return false
# end
# BasisDefinition.is_refinement(Bf::Hat, Bc::Hat) = Bc.p ⊆ Bf.p
# function integral_covector(B::Hat)
# n = length(B)
# return 1/n * ones(Interval{Float64}, n)'
# end
# function one_vector(B::Hat)
# return ones(length(B))
# end
# """
# Return the range of indices of the elements of the basis whose support intersects
# with the given dual element (i.e., a pair (y, absT')).
# The range may end with length(B)+1; this must be interpreted "mod length(B)":
# it means that it intersects with the hat function peaked in 0 as well
# (think for instance y = 0.9999).
# """
# function BasisDefinition.nonzero_on(B::Hat, dual_element)
# y, absT′ = dual_element
# # Note that this cannot rely on arithmetic unless it is verified
# y = y ∩ Interval(0.,1.) # we assume it's bona-fide interval in [0,1]
# # this should work for preims(), since they are supposed to return
# # a number in [0,1]
# # finds in which semi-open interval [p[k], p[k+1]) y.lo and y.hi fall
# lo = searchsortedlast(B.p, y.lo)
# hi = searchsortedlast(B.p, y.hi)
# lo = min(lo, length(B)) # lo may be n+1 if y.lo==1
# hi = min(hi, length(B)) # hi may be n+1 if y.hi==1
# hi = hi + 1 # because the hat centered in p[k] is also nonzero in the interval before
# if lo == 1 # 1:N+1 does not make sense and would mean that the first interval is counted twice
# hi = min(hi, length(B))
# end
# return (lo, hi)
# end
# """
# Given a preimage ```y``` of a point ```x```, this iterator returns
# ```\\phi_j(y)/T'(y) ```
# """
# function Base.iterate(S::ProjectDualElement{T,DT}, state = S.j_min) where {T <: Hat,DT}
# if state == S.j_max+1
# return nothing
# end
# y, absT′ = S.dual_element
# j = state
# y_normalized = IntervalOnTorus(y)
# n = length(S.basis)
# return ((j, S.basis[mod(j, 1:n)](y_normalized) / absT′),
# state+1)
# end
# BasisDefinition.strong_norm(B::Hat) = Lipschitz
# BasisDefinition.weak_norm(B::Hat) = Linf
# BasisDefinition.aux_norm(B::Hat) = L1
# evaluate_integral(B::Hat, i, T) = T(i)/length(B)
# function Base.iterate(S::AverageZero{Hat}, state = 1)
# n = length(S.basis)
# if state == n
# return nothing
# end
# v = zeros(Float64, n)
# v[1] = 1
# v[state+1]=-1
# return (v, state+1)
# end
# BasisDefinition.weak_projection_error(B::Hat) = 0.5 ⊘₊ Float64(length(B), RoundDown)
# BasisDefinition.aux_normalized_projection_error(B::Hat) = 0.5 ⊘₊ Float64(length(B), RoundDown)
# BasisDefinition.strong_weak_bound(B::Hat) = 2. ⊗₊ Float64(length(B), RoundDown)
# BasisDefinition.aux_weak_bound(B::Hat) = 1.
# BasisDefinition.weak_by_strong_and_aux_bound(B::Hat) = (1., 1.)
# BasisDefinition.bound_weak_norm_from_linalg_norm(B::Hat) = @error "TODO"
# BasisDefinition.bound_linalg_norm_L1_from_weak(B::Hat) = @error "TODO"
# BasisDefinition.bound_linalg_norm_L∞_from_weak(B::Hat) = @error "TODO"
# function BasisDefinition.invariant_measure_strong_norm_bound(B::Hat, D::Dynamic)
# A, B = dfly(strong_norm(B), aux_norm(B), D)
# @assert A < 1.
# return B ⊘₊ (1. ⊖₋ A)
# end
# using RecipesBase
# """
# Plots a function in the Hat basis
# """
# @recipe function f(B::Hat, w::AbstractVector)
# legend --> :bottomright
# if eltype(w) <: Interval
# w = mid.(w)
# end
# @series begin
# seriestype --> :path
# label --> L"f_{\delta}"
# ylims --> (0, NaN)
# B.p, vcat(w, w[end])
# end
# end
# """
# Displays error on a function in the Hat basis
# """
# @recipe function f(B::Hat, error::Number, w)
# if eltype(w) <: Interval
# w = mid.(w)
# end
# if isfinite(error)
# @series begin
# seriestype --> :path
# seriesalpha --> 0.5
# fillrange --> vcat(w, w[end]) .- error
# label --> "Error area"
# B.p, vcat(w, w[end]) .+ error
# end
# end
# end
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 1571 | using .C2BasisDefinition: C1, C1Norm
function opnormbound(N::Type{<:C1}, M, B)
n = size(M, 2)
est = 0
for i in 1:n
v = zeros(n)
v[i] = 1
z = C1Norm(B, v)
est+= C1Norm(B, M*v)/z
end
return est.hi
end
using ProgressMeter
function norms_of_powers_basis(B, m::Integer, Q::DiscretizedOperator, f::AbstractArray;
normv0::Real=-1., #used as "missing" value
normQ::Real=-1.,
normE::Real=-1.,
normEF::Real=-1.,
normIEF::Real=-1.,
normN::Real=-1.)
@assert eltype(f) <: Interval
T = typeof(zero(eltype(Q.L)).hi) # gets "Float64" from Q.L
n = size(Q.L, 1)
M = mid.(Q.L)
R = radius.(Q.L)
#δ = opnormbound(N, R)
#γz = gamma(T, max_nonzeros_per_row(Q.L))
#γn = gamma(T, n+3) # not n+2 like in the paper, because we wish to allow for f to be the result of rounding
#ϵ = zero(T)
midf = mid.(f)
# TODO: correct here
norm(v) = C1Norm(B, v)
norms = zeros(m)
#S = zeros((n, m))
#k = length(B.p)
factor = rescaling_factor(B)
@showprogress for (v, norm_0) in AverageZero(B)
#@info v
v/= norm_0
#@info v
#@info norm(v)
for i in 1:m
w = M*v
v = w - Q.e * (midf*w)[1]
#@info v
#@info "norm_$i" norm(v)*factor
# @info infnormoffunction(B, v)
# @info infnormofderivative(B, v)
norms[i] = max(norm(v).hi*factor, norms[i])
end
#@info norms
end
return norms
end
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 3446 | module Contractors
using ValidatedNumerics
export root, range_estimate, ShootingMethod, nthpreimage!, preimage, unique_sign, unique_increasing
"""
unique_sign(x)
Sign of an interval, but throws an error if it is not unique.
Used by various functions to compute orientations
"""
function unique_sign(x)
s = sign(x)
@assert isthin(s)
return s.hi
end
"""
unique_increasing(a, b)
Given intervals a, b, returns `true` if a < b, `false` if b < a, and raises an error if it is not uniquely determined.
"""
function unique_increasing(a::Interval, b::Interval)
if a.hi < b.lo
return true
elseif b.hi < a.lo
return false
else
error("Insufficient precision to check the sign of this function")
end
end
function unique_increasing(a, b) # Fallback for Float64
if a < b
return true
elseif a > b
return false
else
error("Could not determine sign")
end
end
# this seems slower
#using TaylorSeries
#derivative(f) = x-> f(Taylor1([x,1.],1))[1]
using DualNumbers
derivative(f) = x->f(Dual(x, 1..1)).epsilon
"""
Compute a single root with (possibly multivariate) interval Newton
x must be an Interval (univariate) or IntervalBox (multivariate)
Stops when the interval reaches a fixed point, when the diameter is smaller than ε,
or when max_iter iterations are reached (with an error)
"""
root(f, x, ϵ; max_iter = 100) = root(f, derivative(f), x, ϵ; max_iter = max_iter)
function root(f, f′, x, ϵ; max_iter = 100)
for i in 1:max_iter
x_old = x
x_mid = Interval(mid(x))
x = intersect(x, x_mid - f′(x) \ f(x_mid))
if x_old == x || isempty(x) || diam(x) < ϵ
return x
end
end
@info "Maximum iterates reached" max_iter, x, f(x)
return x
end
preimage(y, f, X, ϵ; max_iter=100) = root(x -> f(x)-y, X, ϵ; max_iter)
# superseded by IntervalOptimisation.jl
function range_estimate(f, domain, recstep = 5)
if recstep == 1
return f(domain)
else
a, b = bisect(domain)
Iₐ = range_estimate(f, a, recstep-1)
Iᵦ = range_estimate(f, b, recstep-1)
return Iₐ ∪ Iᵦ
end
end
using LinearAlgebra
# this function generates the Jacobian for
# f(x_0)=x_1, f(x_1)=x_2, ..., f(x_{n-1})=y
coeff_interval(x::Array{Interval{T}, 1}) where {T} = T
function Jac(fprime, v::Vector{T}) where {T}
dv = fprime.(v)
ev = -ones(T, length(v)-1)
return Bidiagonal{T}(dv, ev, :U)
end
# Used in InducedLSV; won't touch it to avoid breaking stuff --federico
function ShootingMethod(f, fprime, n, x, y, rigstep = 10)
F = x->(f.(x)-[x[2:end]; y])
for i in 1:rigstep
x_mid = Interval{coeff_interval(x)}.(mid.(x))
x = intersect.(x, x_mid-Jac(fprime, x)\F(x_mid))
end
return x
end
"""
Newer version of the 'shooting method' to compute the kth preimage of a point (or interval y)
fs contains k functions, X contains their domains. This computes a solution of
fk(f_{k-1}( ... f1(x) ... )) = y.
Overwrites X with [x f1(x) f2(f1(x)) ... f_{k-1}(...)], so the
true solution is X[1].
Tries to avoid allocations and stuff.
"""
function nthpreimage!(y, fs, X; max_iter = 100)
newX = zero(X)
Xmid = zero(X)
n = length(X)
for i in 1:max_iter
Xmid .= Interval.(mid.(X))
newX[end] = (fs[end](Xmid[end]) - y) / derivative(fs[end])(X[end])
for i = length(X)-1:-1:1
newX[i] = (fs[i](Xmid[i]) - Xmid[i+1] + newX[i+1]) / derivative(fs[i])(X[i])
end
newX .= intersect.(Xmid - newX, X)
if isempty(newX) || newX == X
return newX
end
X .= newX
end
@info "Maximum iterates reached" max_iter
return X
end
end #module
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 1480 | # This implements the contractions time estimator for the "platonic"
# operator L, the original small matrix method in Galatolo-Nisoli-Saussol
# An elementary way to rigorously estimate convergence to equilibrium and escape rates
abstract type TrinormWeakCompare end
struct WeakStrongerTrinorm <: TrinormWeakCompare end
struct WeakTrinormUncomparable <: TrinormWeakCompare end
comparetrinormweak(Bas::Basis) = WeakTrinormUncomparable
comparetrinormweak(Bas::Ulam) = WeakStrongerTrinorm
#convergencerateabstract(Bas::Basis, D::Dynamic, norms) = _convergenceratesabstract(Bas, D, norms, comparetrinormweak(Bas))
#function _convergenceratesabstract(Bas::Basis, D::Dynamic, norms, ::WeakStrongerTrinorm)
# boundL = bound_weak_norm_abstract(Bas)
# A, B = dfly(strong_norm(Bas), aux_norm(Bas), D)
# m = length(coarse_norms)
#
# Kh = BasisDefinition.weak_projection_error(coarse_basis)
#
# strong_norms = fill(NaN, m+1)
# weak_norms = fill(NaN, m+1)
# strong_norms[1] = 1.
# weak_norms[1] = 1.
# for i in 1:m
function convergencerateabstract(Bas::Ulam, D::Dynamic, norms)
boundL = BasisDefinition.bound_weak_norm_abstract(Bas)
A, B = dfly(strong_norm(Bas), aux_norm(Bas), D)
m = length(norms)
Kh = BasisDefinition.weak_projection_error(Bas)
C = (A⊕₊ 1.0)/(1.0 ⊖₋A)
D = B⊗₊ (A⊕₊2.0)
small_matrices = [[A^n B; Kh*C Kh*n*D+norms[n]] for n in 1:length(norms)]
return small_matrices
end
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 2022 | """
defines generic Dynamic type
"""
module DynamicDefinition
using ValidatedNumerics
using TaylorSeries:Taylor1
export Dynamic, MarkovDynamic, preim, nbranches, plottable, is_full_branch, domain, derivative, distorsion, endpoints, branch, expansivity, max_distorsion, orientation
abstract type Dynamic end
abstract type MarkovDynamic <: Dynamic end
domain(S::Dynamic) = @error "Not implemented"
nbranches(S::Dynamic) = @error "Not implemented"
branch(S::Dynamic, k) = @error "Not implemented"
plottable(S::Dynamic) = @error "Not implemented"
"""
preim(S::Dynamic, k, y, ϵ)
Computes the preim of y in branch k of a dynamic, with accuracy ϵ
"""
function preim end
is_full_branch(S::Dynamic) = @error "Not implemented"
"""
Endpoints of the branches, in increasing order (returned as a vector of intervals)
"""
endpoints(S::Dynamic) = @error "Not implemented"
# Derivative and distorsion of a generic function (*not* a dynamic). Here for convenience,
# the isempty check is required because otherwise derivative(x -> 4*x, ∅) == 4.
"""
Nth derivative of a function (or a dynamic)
"""
derivative(f, x) = derivative(1, f, x)
derivative(n, f, x) = isempty(x) ? ∅ : f(Taylor1([x, 1], n))[n] * factorial(n)
"""
Distorsion of a function (or a dynamic), i.e., |f′′ / f′^2|
"""
function distorsion(f, x)
if isempty(x)
return ∅
end
series = f(Taylor1([x, 1], 2))
f′ = series[1]
f′′ = 2*series[2]
return abs(f′′ / f′^2)
end
"""
Maximum of |1/T'|
"""
function expansivity(D::Dynamic, tol=1e-3)
v = endpoints(D)
return maximum(maximise(x -> abs(1/derivative(branch(D,k), x)), hull(v[k], v[k+1]), tol=tol)[1] for k in 1:nbranches(D))
end
"""
Maximum of distorsion(D, x) = |T''| / (T')^2, over all branches
"""
function max_distorsion(D::Dynamic, tol=1e-3)
v = endpoints(D)
return maximum(maximise(x -> distorsion(branch(D, k) , x), hull(v[k], v[k+1]), tol=tol)[1] for k in 1:nbranches(D))
end
"""
orientation(D, k)
Orientation of branch k: 1. for increasing, -1. for decreasing
"""
function orientation end
end
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 3521 | using ValidatedNumerics, SparseArrays
using ..DynamicDefinition, ..BasisDefinition
using LinearAlgebra
import ValidatedNumerics.IntervalArithmetic: mid
import Base: size, eltype
import LinearAlgebra: mul!
"""
Very generic assembler function
"""
function assemble(B::Basis, D::Dynamic, ϵ=0.0; T = Float64)
I = Int64[]
J = Int64[]
nzvals = Interval{T}[]
n = length(B)
# TODO: reasonable size hint?
for (i, dual_element) in DualComposedWithDynamic(B, D, ϵ)
if !is_dual_element_empty(B, dual_element)
for (j, x) in ProjectDualElement(B, dual_element)
push!(I, i)
push!(J, mod(j,1:n))
push!(nzvals, x)
end
end
end
return sparse(I, J, nzvals, n, n)
end
abstract type DiscretizedOperator end
struct IntegralPreservingDiscretizedOperator{T<:AbstractMatrix} <: DiscretizedOperator
L:: T
end
IntegralPreservingDiscretizedOperator(L) = IntegralPreservingDiscretizedOperator{typeof(L)}(L)
"""
An operator of the form Q = L + e*w (sparse plus rank-1).
"""
struct NonIntegralPreservingDiscretizedOperator{T<:AbstractMatrix, S<:AbstractVector, U<:AbstractMatrix} <: DiscretizedOperator
L:: T
e:: S
w:: U
end
NonIntegralPreservingDiscretizedOperator(L, e, w) = NonIntegralPreservingDiscretizedOperator{typeof(L), typeof(e), typeof(w)}(L, e, w)
# Some cruft needed for eigs
Base.size(Q::DiscretizedOperator) = size(Q.L)
Base.eltype(Q::DiscretizedOperator) = eltype(Q.L)
LinearAlgebra.issymmetric(Q::IntegralPreservingDiscretizedOperator) = issymmetric(Q.L)
LinearAlgebra.issymmetric(Q::NonIntegralPreservingDiscretizedOperator) = issymmetric(Q.L) && Q.e' == Q.w
opnormbound(N::Type{<:NormKind}, Q::IntegralPreservingDiscretizedOperator) = opnormbound(N, Q.L)
function DiscretizedOperator(B, D, ϵ=0.0; T = Float64)
L = assemble(B, D, ϵ; T)
if is_integral_preserving(B)
return IntegralPreservingDiscretizedOperator(L)
else
f = integral_covector(B)
e = one_vector(B)
w = f - f*L #will use interval arithmetic when L is an interval matrix
return NonIntegralPreservingDiscretizedOperator(L, e, w)
end
end
function opnormbound(N::Type{<:NormKind}, Q::NonIntegralPreservingDiscretizedOperator)
normL = opnormbound(N, Q.L)
norme = opnormbound(N, Q.e)
normw = opnormbound(N, Q.w)
return normL ⊕₊ norme ⊗₊ normw
end
function IntervalArithmetic.mid(Q::IntegralPreservingDiscretizedOperator)
return IntegralPreservingDiscretizedOperator(map(mid, Q.L))
end
function IntervalArithmetic.mid(Q::NonIntegralPreservingDiscretizedOperator)
# we are assuming that e is *not* an interval, for now. Types will break otherwise;
# this may need to be fixed in an API.
return NonIntegralPreservingDiscretizedOperator(map(mid, Q.L), Q.e, map(mid, Q.w))
end
function LinearAlgebra.mul!(Y, Q::IntegralPreservingDiscretizedOperator, v::AbstractArray, α, β)
mul!(Y, Q.L, v, α, β)
end
function LinearAlgebra.mul!(Y, Q::NonIntegralPreservingDiscretizedOperator, v::AbstractArray, α, β)
mul!(Y, Q.L, v, α, β)
T = Base.promote_eltype(Q.e, Q.w)
Y .+= convert(Array{T},Q.e) * (Q.w * v) * α
end
# these should be defined in terms of mul!, but it's simpler for now
function Base.:*(Q::IntegralPreservingDiscretizedOperator, v::Array)
return Q.L * v
end
function Base.:*(Q::NonIntegralPreservingDiscretizedOperator, v::Array)
T = Base.promote_eltype(Q.e, Q.w)
return Q.L * v + convert(Array{T},Q.e) * (Q.w * v)
end
BasisDefinition.is_integral_preserving(Q::NonIntegralPreservingDiscretizedOperator) = false
BasisDefinition.is_integral_preserving(Q::IntegralPreservingDiscretizedOperator) = true
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 5441 | using LinearAlgebra, Arpack, FastRounding, ValidatedNumerics
using ..DynamicDefinition, ..BasisDefinition
export invariant_vector, finepowernormbounds, powernormbounds
"""
Return a numerical approximation to the (hopefully unique) invariant vector
of the dynamic with discretized operator Q.
The vector is normalized so that integral_covector(B)*w ≈ 1
"""
function invariant_vector(B::Basis, Q::DiscretizedOperator; tol = 0.0)
mQ = mid(Q)
n = size(Q)[1]
# setting a larger nev seems to slow things down
F = eigs(mQ; tol=tol, nev=1, ritzvec=true, v0=ones((n,)))
w = F[2][:, 1]
@assert imag(w) ≈ zeros(n)
w = real(w) # this seems a pretty safe assumption.
# In the Ulam case, in principle we could enforce w >= 0, but in practice
# it will hardly ever be relevant.
w = w ./ (mid.(integral_covector(B))*w) #normalization
return w
end
"""
Return an upper bound to Q_h*w - w in the given norm
"""
function residualbound(N::Type{<:NormKind}, Q::DiscretizedOperator, w::AbstractVector)
return normbound(N, Q*w - w)
end
"""
Bounds rigorously the distance of w from the fixed point of Q (normalized with integral = 1),
using a vector of bounds norms[k] ≥ ||Q_h^k|_{U_h^0}||.
"""
function distance_from_invariant(B::Basis, D::Dynamic, Q::DiscretizedOperator, w::AbstractVector, norms::Vector; ε₁::Float64 = residualbound(weak_norm(B), Q, w), ε₂::Float64 = mag(integral_covector(B) * w - 1), normQ::Float64 = opnormbound(weak_norm(B), Q))
if ε₂ > 1e-8
@error "w does not seem normalized correctly"
end
us = BasisDefinition.invariant_measure_strong_norm_bound(B, D)
Cs = infinite_sum_norms(norms)
Kh = BasisDefinition.weak_projection_error(B)
normw = normbound(weak_norm(B), w)
return Cs ⊗₊ (2. ⊗₊ Kh ⊗₊ (1. ⊕₊ normQ) ⊗₊ us ⊕₊ ε₁ ⊘₊ (1. ⊖₋ ε₂)) ⊕₊ ε₂ ⊘₊ (1. ⊖₋ ε₂) ⊗₊ normw
end
# """
# This function returns a sequence of Cᵢ, \\tilde{C}ᵢ for a matrix P
# on a subspace V such that ||P^i|_V||_1\\leq C_i and
# ||P^i|_V||_{\\infty}\\leq \\tilde{C}_i, with respect to the
# """
# function contractmatrix(B::Basis, P::AbstractMatrix{Interval{T}}, m) where {T}
# # vector of the Cᵢ for P
# C = zeros(m)
# S = zeros((length(B), m))
# tilde_C = zeros(m)
#
# PP = mid.(P)
#
# for v in BasisDefinition.AverageZero(B)
# λ₁, λ₂ = norm(v, 1), norm(v, Inf)
# for i in 1:m
# v = PP*v
# C[i] = max(C[i], norm(v, 1)/λ₁)
# S[:, i]+=abs.(v)/λ₂
# end
# end
#
#
# for k in 1:m
# tilde_C[k] = maximum(S[:,k])
# end
#
# # we keep track of the error due to the basis we have chosen
# η₁ = BasisDefinition.spaceconstant(B, Val(:L1))
# η₂ = BasisDefinition.spaceconstant(B, Val(:L∞))
#
# return η₁*C, η₂*tilde_C
# end
#
# @deprecate contractmatrix norms_of_powers
"""
This function returns the bound on the weak norm of the discretized operator
"""
function boundnorm(B::Basis, P::AbstractMatrix{Interval{T}}, m) where {T}
W₁, W₂ = BasisDefinition.bound_weak_norm_from_linalg_norm(B)
α₁ = BasisDefinition.bound_linalg_norm_L1_from_weak(B)
α₂ = BasisDefinition.bound_linalg_norm_L∞_from_weak(B)
C, tilde_C = contractmatrix(B, P, m)
return (W₁/α₁)*C+(W₂/α₂)*tilde_C
end
"""
Uses different strategies to compute power norm bounds.
If specified, `m` norms of powers are estimated computationally, and then
`m_extend` norms are obtained with a cheaper refinement process. Otherwise
these numbers are selected automatically.
A vector of length m_extend is returned, such that norms[k] ≥ ||Q_h^k|_{U_h^0}||
"""
function powernormbounds(B, D, m, m_extend; Q=DiscretizedOperator(B, D))
trivial_norms = norms_of_powers_trivial(weak_norm(B), Q, m)
computed_norms = norms_of_powers(weak_norm(B), m, Q, integral_covector(B))
(dfly_strongs, dfly_norms) = norms_of_powers_dfly(B, D, m)
# in the current version, dfly_norms seem to be always larger and could be omitted
# however they do not cost much to compute
norms = min.(trivial_norms, computed_norms, dfly_norms)
better_norms = refine_norms_of_powers(norms, m_extend)
return better_norms
end
function powernormbounds(B, D; Q=DiscretizedOperator(B, D))
m = 8
computed_norms = []
while true
computed_norms = norms_of_powers(weak_norm(B), m, Q, integral_covector(B))
if any(computed_norms .< 0.1)
break
end
m = 2*m
end
trivial_norms = norms_of_powers_trivial(weak_norm(B), Q, m)
(dfly_strongs, dfly_norms) = norms_of_powers_dfly(B, D, m)
# in the current version, dfly_norms seem to be always larger and could be omitted
# however they do not cost much to compute
norms = min.(trivial_norms, computed_norms, dfly_norms)
m_extend = 2*m
better_norms = []
while true
better_norms = refine_norms_of_powers(norms, m_extend)
if better_norms[end] < 1e-8
break
end
m_extend = 2*m_extend
end
return better_norms
end
"""
Uses power norm bounds already computed for a coarse operator to estimate
the same norms for a finer operator
"""
function finepowernormbounds(B, B_fine, D, coarse_norms; Q_fine=DiscretizedOperator(B_fine, D))
m = length(coarse_norms)
norm_Q_fine = opnormbound(weak_norm(B_fine), Q_fine)
trivial_norms_fine = norms_of_powers_trivial(weak_norm(B_fine), Q_fine, m)
twogrid_norms_fine = norms_of_powers_from_coarser_grid(B_fine, B, D, coarse_norms, norm_Q_fine)
(dfly_strongs_fine, dfly_norms_fine) = norms_of_powers_dfly(B_fine, D, m)
norms_fine = min.(trivial_norms_fine, twogrid_norms_fine, dfly_norms_fine)
better_norms_fine = refine_norms_of_powers(norms_fine, m)
return better_norms_fine
end
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 8193 | """
Hat basis on the Torus [0,1]
"""
using ..BasisDefinition, ..Mod1DynamicDefinition, ..DynamicDefinition
using ValidatedNumerics
import ..BasisDefinition: one_vector, integral_covector, is_integral_preserving
struct Hat{T<:AbstractVector} <:Basis
p::T
# TODO: check in constructor that p is sorted, starts with 0 and ends with 1
end
Hat(n::Integer) = Hat(LinRange(0., 1., n+1))
"""
Return the size of the Hat basis
"""
Base.length(B::Hat) = length(B.p) - 1
"""
Hat function (on the reals)
This is a piecewise linear function such that:
f(x) = 0 if x <= lo
f(mi) = 1
f(hi)
"""
struct HatFunction{T<: Number}
lo::T
mi::T
hi::T
function HatFunction{T}(lo, mi, hi) where T <: Number
@assert lo <= mi <= hi
new{T}(lo, mi, hi)
end
end
HatFunction(lo::T, mi::T, hi::T) where {T<: Number} = HatFunction{T}(lo,mi,hi);
HatFunction(lo::Number, mi::Number, hi::Number) = HatFunction(promote(lo,mi,hi)...)
"""
Evaluate a HatFunction (on the real line).
Must work correctly when S is an interval.
"""
function (f::HatFunction{T})(x::S) where T where S <: Number
lo = convert(S, f.lo)
mi = convert(S, f.mi)
hi = convert(S, f.hi)
left_branch = (x-lo)/(mi-lo)
right_branch = (hi-x)/(hi-mi)
# 1 is not necessary in practice, but it avoids spurious results from rounding
return max(min(left_branch, right_branch, 1), 0)
end
"""
A separate type for intervals on the torus (mod 1) to "remind" us of the quotient
The interval is normalized in the constructor: the caller may assume that
* 0 <= i.lo < 1
* i.hi < i.lo + 1 OR i==Interval(0,1)
"""
struct IntervalOnTorus{T <: Real}
I::Interval{T}
function IntervalOnTorus{T}(I::Interval) where {T<:Real}
# Note that this test may "expand" intervals such as 1e-30..1, to 0..1, but it is not a big issue anyway
if diam(I) >= 1.
new{T}(0..1)
else
# Note that I - floor(I.lo) may return something smaller than zero in some rounding modes
new{T}(max(I - floor(I.lo), 0))
end
end
end
IntervalOnTorus(I::Interval{T}) where {T} = IntervalOnTorus{T}(I)
"""
Hat function (on the torus)
This is a piecewise linear function such that:
f(x) = 0 if x <= lo
f(mi) = 1
f(x) = 0 if x >= ho
"""
struct HatFunctionOnTorus{T<: Number}
lo::T
mi::T
hi::T
function HatFunctionOnTorus{T}(lo, mi, hi) where {T <: Number}
@assert 0 <= lo < 1
@assert 0 <= mi < 1
@assert 0 <= hi < 1
new{T}(lo, mi, hi)
end
end
HatFunctionOnTorus(lo::T, mi::T, hi::T) where {T<: Number} = HatFunctionOnTorus{T}(lo,mi,hi);
HatFunctionOnTorus(lo::Number, mi::Number, hi::Number) = HatFunctionOnTorus(promote(lo,mi,hi)...)
"""
Evaluate a HatFunctionOnTorus correctly on an IntervalOnTorus
Assumption: this is only called on functions defined on our partition,
so either mi==0, hi==0, or the three values are in increasing order
"""
function (f::HatFunctionOnTorus{T})(x::IntervalOnTorus) where {T}
lo = Interval{T}(f.lo)
mi = Interval{T}(f.mi)
hi = Interval{T}(f.hi)
# Since I is normalized, we only need two "hats": the one centered in mi,
# and the one centered in mi+1. It is not complicated to check this
# (one needs to treat mi==0 separately)
# constructs correct endpoints for the hat centered in mi, also in the edge cases
if mi == 0
lo = lo - 1
end
if hi == 0
hi = 1
end
I = x.I
left_branch = (I-lo)/(mi-lo)
right_branch = (hi-I)/(hi-mi)
# 1 is not necessary in practice, but it avoids spurious results from rounding
first_hat = max(min(left_branch, right_branch, 1), 0)
I = x.I - 1
left_branch = (I-lo)/(mi-lo)
right_branch = (hi-I)/(hi-mi)
# 1 is not necessary in practice, but it avoids spurious results from rounding
second_hat = max(min(left_branch, right_branch, 1), 0)
return max(first_hat, second_hat)
end
"""
makes so that B[j] returns a HatFunctionOnTorus with the j-th basis element
"""
function Base.getindex(B::Hat, i::Int)
n = length(B)
@boundscheck 1 <= i <= n || throw(BoundsError(B, i))
return HatFunctionOnTorus(B.p[mod(i-1, 1:n)], B.p[mod(i, 1:n)], B.p[mod(i+1, 1:n)])
end
"""
Return (in an iterator) the pairs (i, (x, |T'(x)|)) where x is a preimage of p[i], which
describe the "dual" L* evaluation(p[i])
"""
function Base.iterate(S::DualComposedWithDynamic{T, D}, state = (1, 1)) where T<:Hat where D<:Dynamic
@assert is_full_branch(S.dynamic)
i, k = state
if i == length(S.basis)+1
return nothing
end
x = preim(S.dynamic, k, S.basis.p[i], S.ϵ)
absT′ = abs(derivative(S.dynamic, x))
if k == nbranches(S.dynamic)
return ((i, (x, absT′)), (i+1, 1))
else
return ((i, (x, absT′)), (i, k+1))
end
end
function BasisDefinition.is_dual_element_empty(::Hat, d)
# TODO: the preim() may indeed be empty, so there could be an additional check here
return false
end
BasisDefinition.is_refinement(Bf::Hat, Bc::Hat) = Bc.p ⊆ Bf.p
function integral_covector(B::Hat)
n = length(B)
return 1/n * ones(Interval{Float64}, n)'
end
function one_vector(B::Hat)
return ones(length(B))
end
"""
Return the range of indices of the elements of the basis whose support intersects
with the given dual element (i.e., a pair (y, absT')).
The range may end with length(B)+1; this must be interpreted "mod length(B)":
it means that it intersects with the hat function peaked in 0 as well
(think for instance y = 0.9999).
"""
function BasisDefinition.nonzero_on(B::Hat, dual_element)
y, absT′ = dual_element
# Note that this cannot rely on arithmetic unless it is verified
y = y ∩ Interval(0.,1.) # we assume it's bona-fide interval in [0,1]
# this should work for preims(), since they are supposed to return
# a number in [0,1]
# finds in which semi-open interval [p[k], p[k+1]) y.lo and y.hi fall
lo = searchsortedlast(B.p, y.lo)
hi = searchsortedlast(B.p, y.hi)
lo = min(lo, length(B)) # lo may be n+1 if y.lo==1
hi = min(hi, length(B)) # hi may be n+1 if y.hi==1
hi = hi + 1 # because the hat centered in p[k] is also nonzero in the interval before
if lo == 1 # 1:N+1 does not make sense and would mean that the first interval is counted twice
hi = min(hi, length(B))
end
return (lo, hi)
end
"""
Given a preimage ```y``` of a point ```x```, this iterator returns
```\\phi_j(y)/T'(y) ```
"""
function Base.iterate(S::ProjectDualElement{T,DT}, state = S.j_min) where {T <: Hat,DT}
if state == S.j_max+1
return nothing
end
y, absT′ = S.dual_element
j = state
y_normalized = IntervalOnTorus(y)
n = length(S.basis)
return ((j, S.basis[mod(j, 1:n)](y_normalized) / absT′),
state+1)
end
BasisDefinition.strong_norm(B::Hat) = Lipschitz
BasisDefinition.weak_norm(B::Hat) = Linf
BasisDefinition.aux_norm(B::Hat) = L1
evaluate_integral(B::Hat, i, T) = T(i)/length(B)
function Base.iterate(S::AverageZero{Hat}, state = 1)
n = length(S.basis)
if state == n
return nothing
end
v = zeros(Float64, n)
v[1] = 1
v[state+1]=-1
return (v, state+1)
end
BasisDefinition.weak_projection_error(B::Hat) = 0.5 ⊘₊ Float64(length(B), RoundDown)
BasisDefinition.aux_normalized_projection_error(B::Hat) = 0.5 ⊘₊ Float64(length(B), RoundDown)
BasisDefinition.strong_weak_bound(B::Hat) = 2. ⊗₊ Float64(length(B), RoundDown)
BasisDefinition.aux_weak_bound(B::Hat) = 1.
BasisDefinition.weak_by_strong_and_aux_bound(B::Hat) = (1., 1.)
BasisDefinition.bound_weak_norm_from_linalg_norm(B::Hat) = @error "TODO"
BasisDefinition.bound_linalg_norm_L1_from_weak(B::Hat) = @error "TODO"
BasisDefinition.bound_linalg_norm_L∞_from_weak(B::Hat) = @error "TODO"
function BasisDefinition.invariant_measure_strong_norm_bound(B::Hat, D::Dynamic)
A, B = dfly(strong_norm(B), aux_norm(B), D)
@assert A < 1.
return B ⊘₊ (1. ⊖₋ A)
end
using RecipesBase
"""
Plots a function in the Hat basis
"""
@recipe function f(B::Hat, w::AbstractVector)
legend --> :bottomright
if eltype(w) <: Interval
w = mid.(w)
end
@series begin
seriestype --> :path
label --> L"f_{\delta}"
ylims --> (0, NaN)
B.p, vcat(w, w[end])
end
end
"""
Displays error on a function in the Hat basis
"""
@recipe function f(B::Hat, error::Number, w)
if eltype(w) <: Interval
w = mid.(w)
end
if isfinite(error)
@series begin
seriestype --> :path
seriesalpha --> 0.5
fillrange --> vcat(w, w[end]) .- error
label --> "Error area"
B.p, vcat(w, w[end]) .+ error
end
end
end
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 2759 | using Symbolics
# this symbolic structure corresponds to a symbolic transfer operator
# (L_n f)(x) = \sum_{y\in T^{-1}(x)}\frac{f(y)}{|T'(y)|^n}
struct SymbL
n
f
end
@variables x D(x)
∂ = Differential(x)
Diff(P::SymbL) = [SymbL(P.n+1, ∂(P.f)), SymbL(P.n, P.n*P.f*D)]
function Diff(v::Vector{SymbL})
w = SymbL[]
for P in v
append!(w, Diff(P))
end
return w
end
function compute_dfly_k_fi_DDi(k::Int)
@variables x
@variables f[0:k](x)
@variables DD[0:k](x)
∂ = Differential(x)
der_dict = Dict([[D => DD[1] ]; [∂(f[i]) => f[i+1] for i in 1:k]; [∂(DD[i]) => DD[i+1] for i in 1:k]])
P = SymbL(1, f[1]) #Lf
v = P
for i in 1:k
v = Diff(v)
for k in 1:length(v)
l = expand_derivatives(v[k].f, true)
l = substitute(l, der_dict)
l = simplify(l; expand = true)
v[k] = SymbL(v[k].n, l)
end
end
return v
end
using SymbolicUtils
function _optimize_mult(k, n, h::SymbolicUtils.Mul, vals)
@variables x
@variables f[0:k](x)
boolf = [(symb in keys(h.dict)) for symb in f]
@variables DD[0:k](x)
boolDD = [(symb in keys(h.dict)) for symb in DD]
# I start with a simple version, where I use the n on
# the highest derivative of DD, in further versions
# I will try to optimize over all possibilities
i = findlast(boolDD)
pow = h.dict[DD[i]]
# This uses the computed bounds to compute
# ||DDᵢ/(T')^n-1||_{∞}*||DD_i||_{∞}^{pow-1}
bound = vals[i+1, n]*vals[i+1, 1]^(pow-1)
for j in 1:i-1
if boolDD[j]!= false
pow = h.dict[DD[j]]
bound *= vals[j+1, 1]*pow
end
end
i = findlast(boolf)
return h.coeff*bound*f[i]
end
function optimize_coefficients(k, v::Vector{SymbL}, vals)
out = Num(0)
for x in v
n = x.n
#@info "x" x
if typeof(x.f.val) <: SymbolicUtils.Mul
y = x.f.val
val = _optimize_mult(k, n, y, vals)
out+= _optimize_mult(k, n, x.f.val, vals)
elseif typeof(x.f.val) <: SymbolicUtils.Add
for y in keys(x.f.val.dict)
val = _optimize_mult(k, n, y, vals)
out+= x.f.val.dict[y]*val
end
end
end
return out
end
function substitute_values(k, v::Vector{SymbL}, vals)
λ = vals[1]
DDs = vals[2:end]
@variables x
@variables f[0:k](x)
@variables DD[0:k](x)
subsdict = Dict([DD[i]=>DDs[i] for i in 1:k])
w = zeros(Num, length(v))
for k in 1:length(v)
n = v[k].n
f = v[k].f
#w[k] = substitute(f, subsdict)
#w[k] = λ^(n-1)*w[k]
end
return w
end
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 9361 | module InducedLSVMapDefinition
using ValidatedNumerics
using ..DynamicDefinition, ..Contractors
import ..Hat
import ..BasisDefinition: DualComposedWithDynamic
import ..C2BasisDefinition: C2Basis, dual_val, dual_der
export ApproxInducedLSV, preim, nbranches, plottable
"""
This class introduces an ApproximatedLSV
the induced map for the Liverani-Saussol-Vaienti maps
on the interval I = [0.5, 1].
The interval I is then mapped to [0,1]
"""
struct ApproxInducedLSV <: Dynamic
TnListPlottable::Array{Function, 1}
nbranches::Integer
domains::Array{Interval, 1}
α::Real
T::Function
end
# coordinate change that maps [0.5, 1] to [0, 1]
@inline CoordinateChange(x) = 2*x-1
# inverse coordinate change
@inline InvCoordinateChange(x) = x/2+0.5
function derleft(D::ApproxInducedLSV, x)
@assert 0<=x<=0.5
α = D.α
return 1+2^α*(α+1)*x^α # check that α must be an interv)al?
end
function derderleft(D::ApproxInducedLSV, x)
@assert 0<=x<=0.5
α = D.α
return 2^α*(α+1)*α*x^(α-1) # check that α must be an interval?
end
function ShootingLSV(n, y, α, rigstep = 10; T = Float64)
x = [Interval{T}(0.5, 1); Interval{T}(0, 0.5)*ones(Interval{T}, n-1)]
f(x) = 0<=x<=0.5 ? x*(1+(2*x)^α) : 2x-1
fprime(x) = 0<=x<=0.5 ? 1+(α+1)*(2*x)^α : 2.
return ShootingMethod(f, fprime, n, x, y, rigstep)
end
function GetDomains(branches, α; T = Float64)
domains=Interval{T}[]
left = Interval{T}(0.5)
for i in branches:-1:2
right = ShootingLSV(i, 0.5, α; T = T)[1]
push!(domains,union(left,right))
left = right
#for i=2:branches
# push!(domains, interval(ShootingLSV(i, 0.5, α)[1].lo, ShootingLSV(i-1, 0.5, α)[1].hi))
end
push!(domains, union(left, Interval{T}(0.75)))
push!(domains, union(Interval{T}(0.75), Interval{T}(1)))
return domains
end
function DefineTnPlottable(branches, α; T = Float64)
Tleft(x)=x*(1+(2*x)^α)
Tnlist = Array{Function, 1}(undef, branches+1)
Tnlist[branches+1] = x->2*x-1
for i=branches:-1:2
Tnlist[i] = x-> Tleft(Tnlist[i+1](x))
end
right = mid(ShootingLSV(branches, 0.5, α; T = T)[1])
Tnlist[1] = x-> 0.5*1/(right-0.5)*(x-0.5)+0.5
return Tnlist
end
"""
This constructor builds the induced LSV map on [0.5, 1],
truncated with k branches
"""
function ApproxInducedLSV(α, k)
nbranches = k+1
domains = GetDomains(k, α)
TnListPlottable = DefineTnPlottable(k, α)
return ApproxInducedLSV(TnListPlottable, nbranches, domains, α, x-> 0<=x<=0.5 ? x*(1+(2*x)^α) : 2x-1)
end
domains(D::ApproxInducedLSV)=CoordinateChange.(D.domains)
DynamicDefinition.nbranches(S::ApproxInducedLSV)=S.nbranches
DynamicDefinition.is_full_branch(S::ApproxInducedLSV) = true
function _T(x, domains, TnList)
@assert 0 ≤ x ≤ 1
x = InvCoordinateChange(x)
for (i, I) in enumerate(domains)
if x in I
return CoordinateChange(TnList[i](x))
end
end
return 0
end
DynamicDefinition.plottable(D::ApproxInducedLSV, x) = _T(x, D.domains, D.TnListPlottable)
function DynamicDefinition.preim(D::ApproxInducedLSV, k, y, ϵ)
@assert 1 <= k <= D.nbranches
_y = InvCoordinateChange(y)
if k == 1
right = ShootingLSV(D.nbranches-1, 0.5, D.α)[1]
_x = (2*_y-1)*(right-0.5)+0.5
return CoordinateChange(_x)
elseif k == nbranches
_x = (_y+1)/2
return CoordinateChange(_x)
else
_x = ShootingLSV(D.nbranches-k+1, _y, D.α)[1]
return CoordinateChange(_x)
end
end
#returns the preimage of a point in a branch with the der and the second derivative
function preimwithder_derder(D::ApproxInducedLSV, k, y, ϵ) #::NTuple{Interval, 3}
@assert 1 <= k <= D.nbranches
y = Interval(y) #hack, please check
_y = InvCoordinateChange(y)
if k == 1 # the manufactured branch
right = ShootingLSV(D.nbranches-1, 0.5, D.α)[1]
_x = (2*_y-1)*(right-0.5)+0.5
return CoordinateChange(_x), 0.5/(right-0.5), 0
elseif k == D.nbranches # the linear branch
_x = (_y+1)/2
return CoordinateChange(_x), 2, 0
else
orbit_x = ShootingLSV(D.nbranches-k+1, _y, D.α)
der = 2
derder = 0
for i in 2:length(orbit_x)
dx = derleft(D, orbit_x[i])
ddx = derderleft(D, orbit_x[i])
derder = ddx*der^2+dx*derder
der*=dx
end
return CoordinateChange(orbit_x[1]), der, derder/2
# this /2 follows from the coordinate change
# i.e., we are looking at the map F(x) = ψ(f(ϕ(x))) where
# ϕ maps linearly [0,1]->[0.5, 1] and ψ is the
# inverse map
# by a direct computation we get F''(x) = ψ'(f(ϕ(x)))f''(ϕ(x))(ϕ'(x))^2
end
end
#returns the preimage of a point in a branch with the der and the second derivative
function preimwithder(D::ApproxInducedLSV, k, y, ϵ) #::NTuple{Interval, 3}
@assert 1 <= k <= D.nbranches
y = Interval(y) #hack, please check
_y = InvCoordinateChange(y)
if k == 1 # the manufactured branch
right = ShootingLSV(D.nbranches-1, 0.5, D.α)[1]
_x = (2*_y-1)*(right-0.5)+0.5
return CoordinateChange(_x), 0.5/(right-0.5), 0
elseif k == D.nbranches # the linear branch
_x = (_y+1)/2
return CoordinateChange(_x), 2, 0
else
orbit_x = ShootingLSV(D.nbranches-k+1, _y, D.α)
der = 2
for i in 2:length(orbit_x)
dx = derleft(D, orbit_x[i])
der*=dx
end
return CoordinateChange(orbit_x[1]), der
end
end
"""
Return (in an iterator) the pairs (i, (x, |T'(x)|)) where x is a preimage of p[i], which
describe the "dual" L* evaluation(p[i])
"""
function Base.iterate(S::DualComposedWithDynamic{T, ApproxInducedLSV}, state = (1, 1)) where T<:Hat
i, k = state
if i == length(S.basis)+1
return nothing
end
n = length(S.basis.p)
x, der, derder = preimwithder_derder(S.dynamic, k, S.basis.p[i], S.ϵ)
ret = x, abs(der)
if k == nbranches(S.dynamic)
return ((i, ret), (i+1, 1))
else
return ((i, ret), (i, k+1))
end
end
"""
Return (in an iterator) the pairs (i, (x, |T'(x)|)) where x is a preimage of p[i], which
describe the "dual" L* evaluation(p[i])
"""
function Base.iterate(S::DualComposedWithDynamic{T, ApproxInducedLSV}, state = (1, 1)) where T<:C2Basis
i, k = state
if i == length(S.basis)+1
return nothing
end
n = length(S.basis.p)
if i <= n
x, der, derder = preimwithder_derder(S.dynamic, k, S.basis.p[i], S.ϵ)
ret = x, (f, fprime) -> dual_val(f, fprime, x, der, derder)
else
x, der, derder = preimwithder_derder(S.dynamic, k, S.basis.p[i-n], S.ϵ)
ret = x, (f, fprime) -> dual_der(f, fprime, x, der, derder)
end
if k == nbranches(S.dynamic)
return ((i, ret), (i+1, 1))
else
return ((i, ret), (i, k+1))
end
end
function DynamicDefinition.derivative(D::ApproxInducedLSV, x)
@error "Not implemented"
end
function iterate_LSV(x, i, α)
@assert i>0
x = 2*x-1
for j in 2:i
x = x*(1+(2*x)^α)
end
return x
end
using RecipesBase
@recipe f(::Type{ApproxInducedLSV}, D::ApproxInducedLSV) where {FT} = x -> plottable(D, x)
end
function InvariantMeasures.Dual(B::Chebyshev, D::InducedLSVMapDefinition.ApproxInducedLSV, ϵ = 0.0; T = Float64)
labels = Int64[]
x = Interval{T}[]
x′ = Interval{T}[]
for k in 1:D.nbranches
@info "$k-th branch"
for i in 1:length(B.p)
x_i, x_i_prime = InducedLSVMapDefinition.preimwithder(D, k, B.p[i], ϵ)
append!(labels, i)
append!(x, x_i)
append!(x′, x_i_prime)
end
end
return x, labels, x′
end
# this function belongs to the InvariantMeasures namespace,
# this is the reason why we define it outside
using TaylorSeries: Taylor1
function dfly(::Type{TotalVariation}, ::Type{L1}, D::InvariantMeasures.InducedLSVMapDefinition.ApproxInducedLSV)
dist = @interval(0.)
lam = @interval(0.)
for i in 1:D.nbranches
if i==1
right = InducedLSVMapDefinition.ShootingLSV(D.nbranches-1, 0.5, D.α)[1]
lam = max(lam, 2*(right-0.5).hi)
dist = max(dist, 0)
elseif i==D.nbranches
lam = max(lam, 0.5)
dist = max(dist, 0)
else
f(x) = InducedLSVMapDefinition.iterate_LSV(x, D.nbranches-i+1, D.α)
fprime(x) = f(Taylor1([x, 1], 1))[1]
fsecond(x) = f(Taylor1([x, 1], 2))[2]/2
distorsion(x)=abs(fsecond(x)/(fprime(x)^2))
lambda(x) = abs(1/fprime(x))
dist = max(dist, maximise(distorsion, D.domains[i])[1].hi)
lam = max(lam, maximise(lambda, D.domains[i])[1].hi)
end
end
return lam.hi, dist.hi
end
import TaylorSeries
import TaylorSeries: Taylor1
function derivatives_D(α, k, l; T = Float64)
w = zeros(Interval, (l, l))
right = Interval(1.0)
for i in 1:k
@info i
left = InducedLSVMapDefinition.ShootingLSV(i, 0.5, α; T = T)[1]
dom = hull(left, right)
f(x) = InducedLSVMapDefinition.iterate_LSV(x, i, α)
g(x) = 1/(TaylorSeries.derivative(f(Taylor1([x, 1], l))))
dom = Interval(left.lo, right.hi)
tol = diam(dom)*2^(-10)
for i = 0:l-1, j=0:l-1
h(x) = abs((factorial(i)*g(x)[i])*g(x)[0]^j) # \partial^i (1/T') * (1/T')^j
val = maximise(h, dom, tol = tol)[1]
w[i+1, j+1] = max(w[i+1, j+1], val)
end
right = left
if mod(i,20) == 0
@info w
end
end
return w
end
#import DualNumbers
#function bound_b_ω(α, k; T = Float64)
# right = Interval(1.0)
# for i in 1:k
# @info i
# left = InducedLSVMapDefinition.ShootingLSV(i, 0.5, α; T = T)[1]
# dom = hull(left, right)
# f(α, x) = InducedLSVMapDefinition.iterate_LSV(x, i, α)
# f_prime_α(x) = f(DualNumbers.Dual(α, 1), x).epsilon
# f_prime_x(x) = f(α, DualNumbers.Dual(x, 1)).epsilon
# h(x) = -f_prime_α(x)/f_prime_x(x)
# dom = Interval(left.lo, right.hi)
# tol = diam(dom)*2^(-10)
# val = maximise(h, dom, tol = tol)[1]
# @info val
# end
# return w
#end | InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 2057 | module InvariantMeasures
using IntervalArithmetic: range_atan
abstract type NormKind end
struct L1 <: NormKind end
struct L2 <: NormKind end
struct Linf <: NormKind end
struct Lipschitz <: NormKind end
struct TotalVariation <: NormKind end
struct ℓ1 <: NormKind end
struct ℓinf <: NormKind end
# the module Contractors does not depend on any submodule
include("Contractors.jl")
include("DynamicDefinition.jl")
include("BasisDefinition.jl")
include("GenericAssembler.jl")
include("GenericEstimate.jl")
include("PwDynamicDefinition.jl")
include("Mod1Dynamic.jl")
include("Mod1PwDynamic.jl")
include("IterateDynamic.jl")
include("UlamBasis.jl")
include("HatBasis.jl")
using .DynamicDefinition, .BasisDefinition, .Mod1DynamicDefinition, .Contractors, .PwDynamicDefinition
include("Norms.jl")
include("pitrig.jl")
include("NormsOfPowers.jl")
include("preimages.jl")
include("precompile.jl")
export NormKind, L1, Linf, Lipschitz, TotalVariation
export Ulam, PwMap, Mod1Dynamic, Basis, Dynamic, assemble, preim, Hat,
EquispacedPartition, norms_of_powers, sinpi, cospi, dfly,
DiscretizedOperator, IntegralPreservingDiscretizedOperator, NonIntegralPreservingDiscretizedOperator,
opnormbound, weak_norm, strong_norm, aux_norm, integral_covector, distance_from_invariant,
mod1_dynamic, Iterate, derivative, distorsion, endpoints, nbranches, branch,
expansivity, max_distorsion, is_refinement,
skip_beginning, last_end, preimages
import ValidatedNumerics: Interval
export Interval
include("C2Basis.jl")
using .C2BasisDefinition
export C2Basis
include("ContractionC1.jl")
include("ConvergenceRatesOriginal.jl")
include("HigherDFLY.jl")
include("Chebyshev.jl")
# a special example, the induced map for the LSV map
include("InducedLSV.jl")
using .InducedLSVMapDefinition
export ApproxInducedLSV
include("NoiseKernel.jl")
export UniformNoiseUlam
include("NormsOfPowersNoise.jl")
export powernormboundsnoise, finepowernormboundsnoise, abstractpowernormboundsnoise, invariant_vector_noise, distance_from_invariant_noise
include("Observables.jl")
end
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 3087 | using ValidatedNumerics
using .DynamicDefinition, .PwDynamicDefinition
using .Contractors
using .DynamicDefinition: derivative, plottable
using TaylorSeries: Taylor1
struct Iterate <: Dynamic
D::PwMap
n::Int
end
Base.show(io::IO, D::Iterate) = print(io, "$(D.n)-times iterate of: ", D.D)
function (D::Iterate)(x::Taylor1)
y = x
for i = 1:D.n
y = (D.D)(y)
end
return y
end
DynamicDefinition.nbranches(D::Iterate) = nbranches(D.D)^D.n
DynamicDefinition.is_full_branch(D::Iterate) = is_full_branch(D.D)
DynamicDefinition.domain(D::Iterate) = domain(D.D)
"""
compose_endpoints(D, endpointsf)
Find discontinuity points of f ∘ D, where f has discontinuity points endpointsf
and D is a given PwMap.
"endpoints" always include the extrema of the given domain.
"""
function compose_endpoints(D, endpointsf)
v = [D.endpoints[1]]
for k = 1:nbranches(D)
preims = [preim(D, k, x) for x in endpointsf[2:end-1]]
if orientation(D, k) > 0
# TODO: still trouble if the branches are not full
append!(v, preims)
else
append!(v, preims[end:-1:1])
end
append!(v, [D.endpoints[k+1]])
end
return v
end
function DynamicDefinition.endpoints(D::Iterate)
@assert D.n >= 1
endpoints = D.D.endpoints
for k = 2:D.n
endpoints = compose_endpoints(D.D, endpoints)
end
return endpoints
end
"""
unpack(k, b, D)
Convert an integer k∈[1,b^n] into a tuple ∈[1,b]^n bijectively
This is used to index preimages: the k'th of the b^n preimages of an Iterate
corresponds to choosing the v[i]'th branch when choosing the i'th preimage, for i = 1..k,
where v = unpack(k, b, n)
"""
function unpack(k, b, n)
@assert 1 ≤ k ≤ b^n
v = fill(0, n)
k = k-1
for i = 1:n
(k, v[n+1-i]) = divrem(k, b)
end
return v .+ 1
end
function evaluate_branch(D::Iterate, k, x)
@assert 1 ≤ k ≤ nbranches(D)
n = D.n
v = unpack(k, nbranches(D.D), n)
for i = 1:n
x = branch(D.D, v[i])(x)
end
return x
end
DynamicDefinition.branch(D::Iterate, k) = x -> evaluate_branch(D, k, x)
using LinearAlgebra: Bidiagonal
function Jac(fprime, v::Vector{T}) where {T}
dv = fprime.(v)
ev = -ones(T, length(v)-1)
return Bidiagonal{T}(dv, ev, :U)
end
"""
Compute the preimage of an Iterate D in its k'th branch
"""
function DynamicDefinition.preim(D::Iterate, k, y, ϵ=1e-15; max_iter = 100)
@assert 1 <= k <= nbranches(D)
n = D.n
v = unpack(k, nbranches(D.D), n)
fs = D.D.Ts[v]
S = [hull(D.D.endpoints[v[i]], D.D.endpoints[v[i]+1]) for i in 1:n]
return nthpreimage!(y, fs, S)[1]
end
function DynamicDefinition.orientation(D::Iterate, k)
v = unpack(k, nbranches(D.D), D.n)
return prod([orientation(D.D, k) for k in v])
end
function DynamicDefinition.plottable(D::Iterate, x)
@assert 0 <= x <= 1
for k = 1:D.n
x = DynamicDefinition.plottable(D.D, x)
x = clamp(x, 0, 1)
end
return x
end
using RecipesBase
@recipe f(::Type{Iterate}, D::Iterate) = x -> plottable(D, x)
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 322 | module LorenzMapDefinition
using ..PwDynamicDefinition: PwMap
using ValidatedNumerics
export LorenzMap
struct LorenzMap
D::PwMap
end
LorenzMap(θ, α) = PwMap([x->θ*abs(x-0.5)^α, x->1-θ*abs(x-0.5)^α],
[@interval(0), @interval(0.5), @interval(1)])
import ..DynamicDefinition: derivative
end
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 2335 | module Mod1DynamicDefinition
using ValidatedNumerics
using ..DynamicDefinition, ..Contractors
using ..DynamicDefinition: derivative
export Mod1Dynamic, preim, nbranches, plottable
"""
Defines a Dynamic on [0,1] as the Mod-1 quotient of a given map.
An alternative newer implementation relying on piecewise-defined functions is in `mod1_dynamic`
"""
struct Mod1Dynamic{FT} <: MarkovDynamic
T::FT
nbranches::Int
orientation::Float64
domain::Interval{Float64}
is_full_branch::Bool
end
Mod1Dynamic(T::FT, nbranches = undef, domain = Interval{Float64}(0,1)) where {FT} = Mod1Dynamic{FT}(T, nbranches, domain)
function Mod1Dynamic{FT}(T, nbranches = undef, domain = Interval{Float64}(0,1)) where {FT}
@assert domain == 0..1 # TODO: this only works for domain == 0..1, for now
range_diff = T(@interval(1.))-T(@interval(0.))
orientation = unique_sign(range_diff)
nbranches = ceil(orientation * range_diff).hi
is_full_branch = isinteger(T(0..0)) & isinteger(T(1..1))
return Mod1Dynamic{FT}(T, nbranches, orientation, domain, is_full_branch)
end
DynamicDefinition.domain(S::Mod1Dynamic{FT}) where {FT} = S.domain
DynamicDefinition.nbranches(S::Mod1Dynamic{FT}) where {FT} =S.nbranches
DynamicDefinition.is_full_branch(S::Mod1Dynamic{FT}) where {FT} = S.is_full_branch
# TODO: serious doubts that this works if T(0) is not an integer...
function DynamicDefinition.preim(D::Mod1Dynamic{FT}, k, y, ϵ) where {FT}
# we need to treat the case with the other orientation, 0 not fixed point...
@assert 1 <= k <= D.nbranches
f(x) = D.T(x)-D.T(0)-(y-D.T(0)+(k-1)*D.orientation)
root(f, D.domain, ϵ)
end
DynamicDefinition.derivative(n, D::Mod1Dynamic{FT}, x) where {FT} = derivative(n, D.T, x)
DynamicDefinition.distorsion(D::Mod1Dynamic{FT}, x) where {FT} = distorsion(D.T, x)
DynamicDefinition.max_distorsion(D::Mod1Dynamic{FT}, tol=1e-3) where {FT} = maximise(x -> distorsion(D.T, x), domain(D), tol=tol)[1]
DynamicDefinition.expansivity(D::Mod1Dynamic{FT}, tol=1e-3) where {FT} = maximise(x -> abs(1/derivative(D, x)), domain(D), tol=tol)[1]
function DynamicDefinition.plottable(D::Mod1Dynamic{FT}, x) where {FT}
@assert 0 <= x <= 1
return mod(D.T(x), 1.)
end
using RecipesBase
@recipe f(::Type{Mod1Dynamic{FT}}, D::Mod1Dynamic{FT}) where {FT} = x -> plottable(D, x)
orientation(D::Mod1Dynamic, k) = D.orientation
end
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 2945 | using ValidatedNumerics
using .DynamicDefinition, .PwDynamicDefinition
using .Contractors
using .DynamicDefinition: derivative
"""
Reimplementation of Mod1Dynamic as a PwMap instance.
We assume that f is monotonic and differentiable, for now (this is not restrictive, for our purposes)
"""
function mod1_dynamic(f::Function, X = (0.,1.), ε = 0.0)
br = Branch(f, X)
# check monotonicity
fprime = x -> derivative(f, x)
@assert minimise(x -> fprime(x) * (br.increasing ? 1 : -1), hull(Interval.(X)...))[1] > 0
Yhull = hull(Interval.(br.Y)...)
possible_integer_parts = floor(Int, Yhull.lo):ceil(Int, Yhull.hi)
x, integer_parts = preimages(possible_integer_parts, br, possible_integer_parts)
ep = [x; X[end]]
Ts = [x->f(x)-k for k in integer_parts]
n = length(x)
if br.increasing
y_endpoints::Matrix{Interval{Float64}} = hcat(fill(0., n), fill(1., n))
else
y_endpoints = hcat(fill(1., n), fill(0., n))
end
y_endpoints[1, 1] = br.Y[begin] - integer_parts[begin]
if y_endpoints[1, 1] == 0.
y_endpoints[1, 1] = 0. # hack to get rid of -0..0 intervals
end
y_endpoints[end, end] = br.Y[end] - integer_parts[end]
if y_endpoints[end, end] == 0.
y_endpoints[end, end] = 0. # hack to get rid of -0..0 intervals
end
# if !isthin(floor(T0))
# @error "T(I.lo) does not have a unique integer part; we did not implement this more complicated case"
# end
# first_branch_integer_part = floor(T0).lo
# if isinteger(T0) && orientation < 0
# first_branch_integer_part -= 1
# end
# last_branch_integer_part = floor(T1).hi
# if isinteger(T1) && orientation > 0
# last_branch_integer_part -= 1
# end
# if orientation > 0
# Ts = [x->T(x)-k for k in first_branch_integer_part:last_branch_integer_part]
# endpoints = [Interval(domain.lo); [preimage(k, T, domain, ε) for k in first_branch_integer_part+1:last_branch_integer_part]; Interval(domain.hi)]
# if length(Ts) == 1
# y_endpoints = [(isinteger(T0) ? 0 : T0-floor(T0), isinteger(T1) ? 1 : T1-floor(T1))]
# else
# y_endpoints = [(isinteger(T0) ? 0 : T0-floor(T0)) , 1; fill((0,1), length(Ts)-2); (0, isinteger(T1) ? 1 : T1-floor(T1))]
# end
# else
# Ts = [x->T(x)-k for k in first_branch_integer_part:-1:last_branch_integer_part]
# endpoints = [Interval(domain.lo); [preimage(k, T, domain, ε) for k in first_branch_integer_part:-1:last_branch_integer_part+1]; Interval(domain.hi)]
# if length(Ts) == 1
# y_endpoints = [(isinteger(T0) ? 1 : T0-floor(T0), isinteger(T1) ? 0 : T1-floor(T1))]
# else
# y_endpoints = [(isinteger(T0) ? 1 : T0-floor(T0), 0); fill((1,0), length(Ts)-2); (1, isinteger(T1) ? 0 : T1-floor(T1))]
# end
# end
return PwMap(Ts, ep, y_endpoints, fill(br.increasing, length(Ts)))
end
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 4136 |
#export DiscretizedNoiseKernel, UniformNoise
using FFTW, LinearAlgebra
abstract type NoiseKernel end
function Base.:*(M::NoiseKernel, v)
@error "Not implemented"
end
opnormbound(N::NormKind, M::NoiseKernel) = @error "Not Implemented"
opradius(N::NormKind, M::NoiseKernel) = @error "Not Implemented"
nonzero_per_row(M::NoiseKernel) = @error "Not Implemented"
struct DiscretizedNoiseKernelFFT{S<:AbstractVector, T<:AbstractVector} <: NoiseKernel
v::S
Mfft::T
rad
P
end
import IntervalArithmetic: Interval, mid, radius, @interval
import InvariantMeasures: opnormbound, Linf
DiscretizedNoiseKernelFFT(v::Vector{Real}) = DiscretizedNoiseKernelFFT(v, fft(v), 0, plan_fft(mid.(v)))
DiscretizedNoiseKernelFFT(v::Vector{Interval{T}}) where {T} = DiscretizedNoiseKernelFFT(v, fft(mid.(v)), opnormbound(L2, radius.(v)), plan_fft(mid.(v)))
Mfft(Q::DiscretizedNoiseKernelFFT) = Q.Mfft
#this is the Ulam discretization of the uniform noise of size ξ, on a partition of size k
function UniformNoiseFFT(ξ, k, boundarycondition = :periodic)
n = Int64(floor(ξ*k))
if boundarycondition == :periodic
v = Interval.([ones(n); zeros(k-n)])*(1/(2*ξ))
v+= Interval.([zeros(k-n); ones(n)])*(1/(2*ξ))
v[n+1]+= @interval (ξ-n*1/k)*k/(2*ξ)
v[k-n-1]+= @interval (ξ-n*1/k)*k/(2*ξ)
end
return DiscretizedNoiseKernelFFT(v)
end
function Base.:*(M::DiscretizedNoiseKernelFFT, v)
P = M.P
w = P*v
@info w
w = M.Mfft.*w
@info w
return real.(P\w)/length(v)
end
struct DiscretizedNoiseKernelUlam{S<:AbstractVector} <: NoiseKernel
ξ
v::S
rad
boundarycondition::Symbol
w::Vector
z::Vector
end
function UniformNoiseUlam(ξ, B::Ulam, boundarycondition = :periodic)
k = length(B)
n = 2*Int64(ceil(ξ*k))
v = zeros(Interval{Float64}, n)
a = 1/(2*Interval(ξ))
v[2:n-1] = a*ones(Interval{Float64}, n-2)
v[1] = (k-sum(v))/2
v[n] = v[1]
if boundarycondition == :periodic
return DiscretizedNoiseKernelUlam(Interval(ξ),
mid.(v),
opnormbound(L1, v)-k,
boundarycondition,
zeros(k+n),
zeros(k))
elseif boundarycondition == :reflecting
return DiscretizedNoiseKernelUlam(Interval(ξ),
mid.(v),
opnormbound(L1, v)-k,
boundarycondition,
zeros(k+n+2),
zeros(k))
end
end
#TODO, but at the moment this is fine, it is a Markov operator
opnormbound(::Type{L1}, M::DiscretizedNoiseKernelUlam) = 1.0
opradius(::Type{L1}, M::DiscretizedNoiseKernelUlam) = M.rad
nonzero_per_row(M::DiscretizedNoiseKernelUlam) = length(M.v)
dfly(::Type{TotalVariation}, ::Type{L1}, N::DiscretizedNoiseKernelUlam) = return 0.0, (1/(2*N.ξ)).hi
function Base.:*(M::DiscretizedNoiseKernelUlam, v)
mult(M, v, Val(M.boundarycondition))
end
function mult(M::DiscretizedNoiseKernelUlam, v, ::Val{:periodic})
n = length(M.v)
k = length(v)
l =n÷2
M.w .= 0
M.w[l+1:l+k] = v
M.w[1:l] = v[end-l+1:end]
M.w[end-l+1:end] = v[1:l]
for i in 1:k
h = @view M.w[i:i+n-1]
v[i] = sum( M.v.* h)/k
end
return v
end
function mult(M::DiscretizedNoiseKernelUlam, v::Vector{Interval{T}}, ::Val{:periodic}) where {T}
n = length(M.v)
k = length(v)
l =n÷2
nrmv = opnormbound(L1, v)
midv = mid.(v)
radv = radius.(v)
nrmrad = opnormbound(L1, radv)
M.w .= 0
M.w[l+1:l+k] = midv
M.w[1:l] = midv[end-l+1:end]
M.w[end-l+1:end] = midv[1:l]
for i in 1:k
h = @view M.w[i:i+n-1]
v[i] = sum( M.v.* h)/k
end
δₖ = opradius(L1, M)
γₖ = gamma(T, nonzero_per_row(M))
nrm_MK = opnormbound(L1, M)
normMK = nrm_MK ⊕₊ δₖ
ϵ = (γₖ ⊗₊ normMK) ⊗₊ nrmv ⊕₊ normMK ⊗₊ nrmrad
return v + fill(Interval(-ϵ ,ϵ), length(v))
end
function mult(M::DiscretizedNoiseKernelUlam, v, ::Val{:reflecting})
n = length(M.v)
k = length(v)
l = n÷2+1
@info v
M.w .= 0
M.w[l+1:l+k] = v
v .= 0
for i in 1:l
h = @view M.w[i:i+n-1]
@info h, i
val = sum( M.v.* h)/k
v[l+1-i] += val
end
for i in 1:k
h = @view M.w[i+l:i+l+n-1]
@info h, i+l
val = sum( M.v.* h)/k
v[i]+=val
end
@info "stop"
for i in 1:l
h = @view M.w[k+l+i:k+l+i+n-1]
@info h
val = sum( M.v.* h)/k
v[end-i+1] +=val
end
return v
end
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 6878 | """
Functions to deal with various types of norms and seminorms
"""
using FastRounding
using IntervalOptimisation
using TaylorSeries: Taylor1
using SparseArrays: getcolptr
using .DynamicDefinition
using .DynamicDefinition: derivative
"""
'Absolute value' definition that returns mag(I) for an interval and abs(x) for a real
"""
abs_or_mag(x::Number) = abs(x)
abs_or_mag(x::Interval) = mag(x)
"""
Computes a rigorous upper bound for z*z'
"""
z_times_conjz(z::Complex) = square_round(abs_or_mag(real(z)), RoundUp) ⊕₊ square_round(abs_or_mag(imag(z)), RoundUp)
abs_or_mag(z::Complex) = sqrt_round(z_times_conjz(z), RoundUp)
"""
Certified upper bound to ||A|| (of specified NormKind)
"""
function opnormbound(::Type{L1}, A::AbstractVecOrMat{T}) where {T}
# partly taken from JuliaLang's LinearAlgebra/src/generic.jl
Tnorm = typeof(abs_or_mag(float(real(zero(T)))))
Tsum = promote_type(Float64, Tnorm)
nrm::Tsum = 0
@inbounds begin
for j = 1:size(A, 2)
nrmj::Tsum = 0
for i = 1:size(A, 1)
nrmj = nrmj ⊕₊ abs_or_mag(A[i,j])
end
nrm = max(nrm,nrmj)
end
end
return convert(Tnorm, nrm)
end
function opnormbound(::Type{Linf}, A::AbstractVecOrMat{T}) where {T}
# partly taken from JuliaLang's LinearAlgebra/src/generic.jl
Tnorm = typeof(abs_or_mag(float(real(zero(T)))))
Tsum = promote_type(Float64, Tnorm)
nrm::Tsum = 0
@inbounds begin
for i = 1:size(A, 1)
nrmi::Tsum = 0
for j = 1:size(A, 2)
nrmi = nrmi ⊕₊ abs_or_mag(A[i,j])
end
nrm = max(nrm,nrmi)
end
end
return convert(Tnorm, nrm)
end
"""
These functions compute a rigorous upper bound for the 2-norm of a vector;
we have a specialized version for complex numbers to avoid taking
the sqrt root and squaring again
"""
function opnormbound(::Type{L2}, v::Vector{T}) where {T<:Real}
# partly taken from JuliaLang's LinearAlgebra/src/generic.jl
Tnorm = typeof(abs_or_mag(float(real(zero(T)))))
Tsum = promote_type(Float64, Tnorm)
nrm::Tsum = 0
@inbounds begin
for j = 1:length(v)
nrm = nrm ⊕₊ square_round(abs_or_mag(v[j]), RoundUp)
end
end
return convert(Tnorm, sqrt_round(nrm, RoundUp))
end
function opnormbound(::Type{L2}, v::Vector{T}) where {T<:Complex}
# partly taken from JuliaLang's LinearAlgebra/src/generic.jl
Tnorm = typeof(abs_or_mag(float(real(zero(T)))))
Tsum = promote_type(Float64, Tnorm)
nrm::Tsum = 0
@inbounds begin
for j = 1:length(v)
nrm = nrm ⊕₊ z_times_conjz(v[j])
end
end
return convert(Tnorm, sqrt_round(nrm, RoundUp))
end
function opnormbound(::Type{L1}, A::SparseMatrixCSC) where {T}
# partly taken from JuliaLang's Sparsearray/src/linalg.jl
m, n = size(A)
Tnorm = typeof(abs_or_mag(float(real(zero(eltype(A))))))
Tsum = promote_type(Float64,Tnorm)
nA::Tsum = 0
@inbounds begin
for j=1:n
colSum::Tsum = 0
for i = getcolptr(A)[j]:getcolptr(A)[j+1]-1
colSum = colSum ⊕₊ abs_or_mag(nonzeros(A)[i])
end
nA = max(nA, colSum)
end
end
return convert(Tnorm, nA)
end
function opnormbound(::Type{Linf}, A::SparseMatrixCSC) where {T}
# partly taken from JuliaLang's Sparsearray/src/linalg.jl
m, n = size(A)
Tnorm = typeof(abs_or_mag(float(real(zero(eltype(A))))))
Tsum = promote_type(Float64,Tnorm)
rowSum = zeros(Tsum,m)
@inbounds begin
for i=1:length(nonzeros(A))
rowSum[rowvals(A)[i]] = rowSum[rowvals(A)[i]] ⊕₊ abs_or_mag(nonzeros(A)[i])
end
end
return convert(Tnorm, maximum(rowSum))
end
"""
Rigorous upper bound on a vector norm. Note that Linf, L1 are the "analyst's" norms
"""
normbound(N::Type{L1}, v::AbstractVector) = opnormbound(L1, v) ⊘₊ Float64(length(v), RoundDown)
normbound(N::Type{Linf}, v::AbstractVector) = opnormbound(Linf, v)
"""
Types to compute norms iteratively by "adding a column at a time".
"""
abstract type NormCacher{T} end
mutable struct NormCacherL1 <: NormCacher{L1}
C::Float64
function NormCacherL1(n)
new(0)
end
end
"""
Create a new NormCacher to compute the normbound of the empty matrix with n rows
"""
NormCacher{L1}(n) = NormCacherL1(n)
mutable struct NormCacherLinf <: NormCacher{Linf}
C::Vector{Float64}
function NormCacherLinf(n)
new(zeros(n))
end
end
NormCacher{Linf}(n) = NormCacherLinf(n)
"""
Update a NormCacher to add one column to the matrix it is computing a norm of.
This column may be affected by an error ε (in the same norm).
"""
function add_column!(Cacher::NormCacherL1, v::AbstractVector, ε::Float64)
Cacher.C = max(Cacher.C, opnormbound(L1, v) ⊕₊ ε)
end
function add_column!(Cacher::NormCacherLinf, v::AbstractVector, ε::Float64)
Cacher.C = Cacher.C .⊕₊ abs.(v) .⊕₊ ε
end
"""
Return the norm of the matrix the NormCacher is working on.
"""
function get_norm(Cacher::NormCacherL1)
return Cacher.C
end
function get_norm(Cacher::NormCacherLinf)
return maximum(Cacher.C)
end
"""
(A, B) = dfly(strongnorm, auxnorm, dynamic)
Constants (A, B) such that ||Lf||_s ≦ A||f||_s + B||f||_aux
"""
dfly(::Type{<:NormKind}, ::Type{<:NormKind}, ::Dynamic) = @error "Not implemented"
# I don't think this is used in production anymore
function dfly(::Type{TotalVariation}, ::Type{L1}, D::Dynamic)
dist = max_distorsion(D)
lam = expansivity(D)
if !(abs(lam) < 1) # these are intervals, so this is *not* equal to abs(lam) >= 1.
@error "The function is not expanding"
end
if is_full_branch(D)
return lam.hi, dist.hi
else
if !(abs(lam) < 0.5)
@error "Expansivity is insufficient to prove a DFLY. Try with an iterate."
end
# We need a way to estimate the branch widths
@error "Not implemented"
end
end
function dfly(::Type{TotalVariation}, ::Type{L1}, D::PwMap)
dist = max_distorsion(D)
lam = expansivity(D)
vec = endpoints(D)
disc = maximum(2/abs(vec[i]-vec[i+1]) for i in nbranches(D))
if is_full_branch(D)
if !(abs(lam) < 1) # these are intervals, so this is *not* equal to abs(lam) >= 1.
@error "The function is not expanding"
end
return lam.hi, dist.hi
else
if !(abs(2*lam) < 1)
@error "Expansivity is insufficient to prove a DFLY. Try with an iterate."
end
return (2*lam).hi, (dist + disc).hi
end
end
function dfly(::Type{Lipschitz}, ::Type{L1}, D::Dynamic)
# TODO: should assert that D is globally C2 instead, but we don't have that kind of infrastructure yet.
@assert is_full_branch(D)
dist = max_distorsion(D)
lam = expansivity(D)
return ((lam*(2*dist+1)).hi, (dist*(dist+1)).hi)
end
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 8007 | """
Functions to estimate Q|_{U^0}. See our paper for details.
"""
using LinearAlgebra
using SparseArrays
using FastRounding
using ValidatedNumerics
export norms_of_powers, refine_norms_of_powers, norms_of_powers_dfly, norms_of_powers_trivial, norms_of_powers_from_coarser_grid
"""
Returns the maximum number of (structural) nonzeros in a row of A
"""
function max_nonzeros_per_row(A::SparseMatrixCSC)
rows = rowvals(A)
m, n = size(A)
nonzeros_in_each_row = zeros(eltype(rows), m)
for i in rows
nonzeros_in_each_row[i] += 1
end
return maximum(nonzeros_in_each_row)
end
"""
γₙ constants for floating point error estimation, as in [Higham, Accuracy and Stability of Numerical Algorithms]
"""
function gamma(T, n::Integer)
u = eps(T)
nu = u ⊗₊ T(n) #TODO: in theory, this should be rounded up/down. In practice, all integers up to 2^53 or so fit in Float64, so it won't be needed.
return nu ⊘₊ (one(T) ⊖₋ nu)
end
using ProgressMeter
"""
Estimates the norms ||Q||, ||Q^2||, ... ||Q^m|| on U^0.
U is the matrix [ones(1,n-1); -I_(n-1,n-1)]. It is currently assumed that
f*U==0 (i.e., all elements of f are equal).
f must be an interval vector.
The following constants may be specified as keyword arguments:
normQ, normE, normv0, normEF, normIEF, normN
otherwise they are computed (which may be slower).
e and f must be specified in case is_integral_preserving==false
In case is_integral_preserving is true, they may be specified but they are then ignored.
Implementation note: currently we perform this computation one column at a time,
to be able to scale (slowly) to cases with large size; for moderate sizes, it would
indeed be better to do the computation all columns at the same time, in BLAS level 3.
"""
function norms_of_powers(N::Type{<:NormKind}, m::Integer, Q::DiscretizedOperator, f::AbstractArray;
normv0::Real=-1., #used as "missing" value
normQ::Real=-1.,
normE::Real=-1.,
normEF::Real=-1.,
normIEF::Real=-1.,
normN::Real=-1.)
@assert eltype(f) <: Interval
T = typeof(zero(eltype(Q.L)).hi) # gets "Float64" from Q.L
n = size(Q.L, 1)
M = mid.(Q.L)
R = radius.(Q.L)
δ = opnormbound(N, R)
γz = gamma(T, max_nonzeros_per_row(Q.L))
γn = gamma(T, n+3) # not n+2 like in the paper, because we wish to allow for f to be the result of rounding
ϵ = zero(T)
nrmM = opnormbound(N, M)
# precompute norms
if !is_integral_preserving(Q)
if normE == -1.
normE = opnormbound(N, Q.e)
end
if normEF == -1.
normEF = opnormbound(N, Q.e*f)
end
if normIEF == -1.
normIEF = opnormbound(N, [Matrix(UniformScaling{Float64}(1),n,n) Q.e*f])
end
if normN == -1.
normN = opnormbound(N, Matrix(UniformScaling{Float64}(1),n,n) - Q.e*f)
end
end
if normQ == -1.
if is_integral_preserving(Q)
normQ = nrmM ⊕₊ δ
else
defect = opnormbound(N, Q.w)
normQ = nrmM ⊕₊ δ ⊕₊ normE ⊗₊ defect
end
end
# initialize normcachers
normcachers = [NormCacher{N}(n) for j in 1:m]
midf = map(mid, f)
# main loop
v = zeros(T, n)
@showprogress for j = 1:n-1
v .= zero(T) # TODO: check for type stability in cases with unusual types
v[1] = one(T) # TODO: in full generality, this should contain entries of f rather than ±1
v[j+1] = -one(T)
if normv0 == -1.
nrmv = opnormbound(N, v)
else
nrmv = normv0
end
ϵ = 0.
nrmw = nrmv # we assume that initial vectors are already integral-preserving
for k = 1:m
w = M * v
if is_integral_preserving(Q)
v = w
ϵ = (γz ⊗₊ nrmM ⊕₊ δ) ⊗₊ nrmv ⊕₊ normQ ⊗₊ ϵ
else
v = w - Q.e * (midf*w) # TODO: we are currently assuming that f is not too large, to estimate the error (the result of only one floating point operation)
new_nrmw = opnormbound(N, w)
ϵ = γn ⊗₊ normIEF ⊗₊ (new_nrmw ⊕₊ normEF ⊗₊ nrmw) ⊕₊ normN ⊗₊ (γz ⊗₊ nrmM ⊕₊ δ) ⊗₊ nrmv ⊕₊ normQ ⊗₊ ϵ
nrmw = new_nrmw
end
nrmv = opnormbound(N, v)
add_column!(normcachers[k], v, ϵ) #TODO: Could pass and reuse nrmv in the case of norm-1
end
end
return map(get_norm, normcachers)
end
"""
Array of "trivial" bounds for the powers of a DiscretizedOperator (on the whole space)
coming from from ||Q^k|| ≤ ||Q||^k
"""
function norms_of_powers_trivial(N::Type{<:NormKind}, Q::DiscretizedOperator, m::Integer)
norms = fill(NaN, m)
norms[1] = opnormbound(N, Q)
for i = 2:m
norms[i] = norms[i-1] ⊗₀ norms[1]
end
return norms
end
"""
Arrays of bounds to ||Q^k||_{w → s} = sup_{||f||_w=1} ||Q^k f||_s
and to ||Q^k||_{w}
coming theoretically from iterated DFLY inequalities (the "small matrix method").
Returns two arrays (strongs, norms) of length m:
strongs[k] bounds ||Q^k f||_s, norms[k] bounds ||Q^k f||)
"""
function norms_of_powers_dfly(Bas::Basis, D::Dynamic, m)
A, B = dfly(strong_norm(Bas), aux_norm(Bas), D)
Eh = BasisDefinition.aux_normalized_projection_error(Bas)
M₁n = BasisDefinition.strong_weak_bound(Bas)
M₂ = BasisDefinition.aux_weak_bound(Bas)
S₁, S₂ = BasisDefinition.weak_by_strong_and_aux_bound(Bas)
norms = fill(NaN, m)
strongs = fill(NaN, m)
v = Array{Float64}([M₁n; M₂])
# We evaluate [S₁ S₂] * ([1 0; Eh 1]*[A B; 0 1])^k * [M₁n; M₂] (with correct rounding)
for k = 1:m
# invariant: v[1] bounds ||Q^kf||_s for ||f||_w=1
# v[2] bounds |||Q^kf||| for ||f||_w=1
v[1] = A ⊗₊ v[1] ⊕₊ B ⊗₊ v[2]
v[2] = Eh ⊗₊ v[1] ⊕₊ v[2]
strongs[k] = v[1]
norms[k] = S₁ ⊗₊ v[1] ⊕₊ S₂ ⊗₊ v[2]
end
return strongs, norms
end
"""
Compute better and/or more estimates of power norms using
the fact that ||Q^{k+h}|| ≤ ||Q^k|| * ||Q^h||.
This uses multiplicativity, so it will not work for mixed norms,
e.g., ||Q^k||_{s → w}, or ||M^k|_{U^0}||
(unless M preserves U^0, which is the case for Q|_{U^0}).
"""
function refine_norms_of_powers(norms::Vector, m)
better_norms = fill(NaN, m)
better_norms[1] = norms[1]
for k in 2:m
better_norms[k] = minimum(better_norms[i] ⊗₊ better_norms[k-i] for i in 1:k-1)
if k <= length(norms)
better_norms[k] = min(better_norms[k], norms[k])
end
end
return better_norms
end
refine_norms_of_powers(norms::Vector) = refine_norms_of_powers(norms, length(norms))
"""
Estimate norms of powers from those on a coarser grid (see paper for details)
"""
function norms_of_powers_from_coarser_grid(fine_basis::Basis, coarse_basis::Basis, D::Dynamic, coarse_norms::Vector, normQ::Real)
if !BasisDefinition.is_refinement(fine_basis, coarse_basis)
@error "The fine basis is not a refinement of the coarse basis"
end
m = length(coarse_norms)
fine_norms = fill(NaN, m)
(strongs, norms) = norms_of_powers_dfly(fine_basis, D, m)
# adds a 0th element to strongs
strongs0(k::Integer) = k==0 ? BasisDefinition.strong_weak_bound(fine_basis) : strongs[k]
coarse_norms0(k::Integer) = k==0 ? 1. : coarse_norms[k]
Kh = BasisDefinition.weak_projection_error(coarse_basis)
for k in 1:m
temp = 0.
for k in 0:m-1
temp = temp ⊕₊ coarse_norms0(m-1-k) ⊗₊ (normQ ⊗₊ strongs0(k) ⊕₊ strongs0(k+1))
end
fine_norms[k] = coarse_norms[k] ⊕₊ 2. ⊗₊ Kh ⊗₊ temp
end
return fine_norms
end
"""
Estimate ||I + Q + Q^2 + … || (infinite sum) using a list of computed norm bounds norms[k] ≥ ||Q^k||.
"""
function infinite_sum_norms(norms::Vector)
m = length(norms)
if norms[m] >= 1
@error "The last norm is >= 1: the bounds are not sufficient to show that the sum converges"
end
S = 1.
for k = 1:m-1
S = S ⊕₊ norms[k]
end
S = S ⊘₊ (1. ⊖₋ norms[m])
return S
end
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 11945 | """
Functions to estimate Q|_{U^0}. See our paper for details.
"""
using LinearAlgebra
using SparseArrays
using FastRounding
using ValidatedNumerics
using FFTW
export norms_of_powers_noise
"""
Estimates the norms ||Q||, ||Q^2||, ... ||Q^m|| on U^0.
U is the matrix [ones(1,n-1); -I_(n-1,n-1)]. It is currently assumed that
f*U==0 (i.e., all elements of f are equal).
f must be an interval vector.
The following constants may be specified as keyword arguments:
normQ, normE, normv0, normEF, normIEF, normN
otherwise they are computed (which may be slower).
e and f must be specified in case is_integral_preserving==false
In case is_integral_preserving is true, they may be specified but they are then ignored.
"""
function norms_of_powers_noise(N::Type{L1},
m::Integer,
Q::DiscretizedOperator,
MK::NoiseKernel,
f::AbstractArray;
normv0::Real=-1., #used as "missing" value
normQ::Real=-1.,
normE::Real=-1.,
normEF::Real=-1.,
normIEF::Real=-1.,
normN::Real=-1.,
normρ::Real=-1.)
@assert eltype(f) <: Interval
T = typeof(zero(eltype(Q.L)).hi) # gets "Float64" from Q.L
n = size(Q.L, 1)
M = mid.(Q.L)
R = radius.(Q.L)
δ = opnormbound(N, R)
# this is the operator radius of the noise operator,
# we use this function since the operator may be defined implictly
γz = gamma(T, max_nonzeros_per_row(Q.L))
γn = gamma(T, n+3) # not n+2 like in the paper, because we wish to allow for f to be the result of rounding
ϵ = zero(T)
nrmM = opnormbound(N, M)
# these need to be implemented for each kernel
δₖ = opradius(N, MK) #this is the opnorm of the radius matrix
# essentially nonzero_per_row(MK) is
# the number of mul_add needed to obtain one
# entry of MK*v
γₖ = gamma(T, nonzero_per_row(MK))
nrm_MK = opnormbound(N, MK)
normMK = nrm_MK ⊕₊ δₖ
# precompute norms
if !is_integral_preserving(Q)
if normE == -1.
normE = opnormbound(N, Q.e)
end
if normEF == -1.
normEF = opnormbound(N, Q.e*f)
end
if normIEF == -1.
normIEF = opnormbound(N, [Matrix(UniformScaling{Float64}(1),n,n) Q.e*f])
end
if normN == -1.
normN = opnormbound(N, Matrix(UniformScaling{Float64}(1),n,n) - Q.e*f)
end
end
if normQ == -1.
if is_integral_preserving(Q)
normQ = nrmM ⊕₊ δ
else
defect = opnormbound(N, Q.w)
normQ = nrmM ⊕₊ δ ⊕₊ normE ⊗₊ defect
end
end
# initialize normcachers
normcachers = [NormCacher{N}(n) for j in 1:m]
midf = map(mid, f)
# main loop
v = zeros(T, n)
@showprogress for j = 1:n-1
v .= zero(T) # TODO: check for type stability in cases with unusual types
v[1] = one(T) # TODO: in full generality, this should contain entries of f rather than ±1
v[j+1] = -one(T)
if normv0 == -1.
nrmv = opnormbound(N, v)
else
nrmv = normv0
end
ϵ = 0.
nrmw = nrmv # we assume that initial vectors are already integral-preserving
for k = 1:m
w = M * v
if is_integral_preserving(Q)
v = w
ϵ = (γz ⊗₊ nrmM ⊕₊ δ) ⊗₊ nrmv ⊕₊ normQ ⊗₊ ϵ
else
v = w - Q.e * (midf*w) # TODO: we are currently assuming that f is not too large, to estimate the error (the result of only one floating point operation)
new_nrmw = opnormbound(N, w)
ϵ = γn ⊗₊ normIEF ⊗₊ (new_nrmw ⊕₊ normEF ⊗₊ nrmw) ⊕₊ normN ⊗₊ (γz ⊗₊ nrmM ⊕₊ δ) ⊗₊ nrmv ⊕₊ normQ ⊗₊ ϵ
nrmw = new_nrmw
end
# the noise step
nrmv = opnormbound(N, v)
v = w
w = MK * v
v = w
ϵ = (γₖ ⊗₊nrm_MK ⊕₊δₖ) ⊗₊ nrmv ⊕₊ normMK ⊗₊ ϵ
nrmv = opnormbound(N, v)
add_column!(normcachers[k], v, ϵ) #TODO: Could pass and reuse nrmv in the case of norm-1
end
end
return map(get_norm, normcachers)
end
"""
Array of "trivial" bounds for the powers of a DiscretizedOperator (on the whole space)
coming from from ||Q^k|| ≤ ||Q||^k
"""
function norms_of_powers_trivial_noise(N::Type{<:NormKind},
Q::DiscretizedOperator,
MK::NoiseKernel,
m::Integer)
norms = fill(NaN, m)
δₖ = opradius(N, MK)
nrm_MK = opnormbound(N, MK)
norms[1] = opnormbound(N, Q) ⊗₊ nrm_MK⊕₊δₖ
for i = 2:m
norms[i] = norms[i-1] ⊗₀ norms[1]
end
return norms
end
"""
Arrays of bounds to ||Q^k||_{w → s} = sup_{||f||_w=1} ||Q^k f||_s
and to ||Q^k||_{w}
coming theoretically from iterated DFLY inequalities (the "small matrix method").
Returns two arrays (strongs, norms) of length m:
strongs[k] bounds ||Q^k f||_s, norms[k] bounds ||Q^k f||)
"""
function norms_of_powers_abstract_noise(Bas::Basis, N::NoiseKernel, m)
A, B = dfly(strong_norm(Bas), aux_norm(Bas), N)
Eh = BasisDefinition.aux_normalized_projection_error(Bas)
M₁n = BasisDefinition.strong_weak_bound(Bas)
M₂ = BasisDefinition.aux_weak_bound(Bas)
S₁, S₂ = BasisDefinition.weak_by_strong_and_aux_bound(Bas)
norms = fill(NaN, m)
strongs = fill(NaN, m)
v = Array{Float64}([M₁n; M₂])
# We evaluate [S₁ S₂] * ([1 0; Eh 1]*[A B; 0 1])^k * [M₁n; M₂] (with correct rounding)
for k = 1:m
# invariant: v[1] bounds ||Q^kf||_s for ||f||_w=1
# v[2] bounds |||Q^kf||| for ||f||_w=1
v[1] = A ⊗₊ v[1] ⊕₊ B ⊗₊ v[2]
v[2] = Eh ⊗₊ v[1] ⊕₊ v[2]
strongs[k] = v[1]
norms[k] = S₁ ⊗₊ v[1] ⊕₊ S₂ ⊗₊ v[2]
end
return strongs, norms
end
"""
Estimate norms of powers from those on a coarser grid (see paper for details)
TODO: Check if it works for other basis types
"""
function norms_of_powers_from_coarser_grid_noise(fine_basis::Ulam,
coarse_basis::Ulam,
Q::DiscretizedOperator,
NK::NoiseKernel,
coarse_norms::Vector)
if !BasisDefinition.is_refinement(fine_basis, coarse_basis)
@error "The fine basis is not a refinement of the coarse basis"
end
m = length(coarse_norms)
fine_norms = fill(NaN, m+1)
trivial_norms = norms_of_powers_trivial_noise(weak_norm(fine_basis), Q, NK, m+1)
A, B = dfly(strong_norm(fine_basis), aux_norm(fine_basis), NK)
# adds a 0th element to strongs
trivial_norms0(k::Integer) = k==0 ? 1. : trivial_norms[k]
coarse_norms0(k::Integer) = k==0 ? 1. : coarse_norms[k]
Kh = BasisDefinition.weak_projection_error(coarse_basis)
fine_norms[1] = trivial_norms0(1)
for k in 1:m
temp = 0.
for i in 0:k-1
temp = temp ⊕₊ coarse_norms0(i) ⊗₊ (trivial_norms0(k-i) ⊕₊ trivial_norms0(k-i-1))
end
fine_norms[k+1] = coarse_norms0(k) ⊕₊ B ⊗₊ Kh ⊗₊ (temp ⊕₊ 1.0)
end
return fine_norms
end
function norms_of_powers_from_coarser_grid_noise_abstract(coarse_basis::Ulam,
NK::NoiseKernel,
coarse_norms::Vector)
m = length(coarse_norms)
abstract_norms = fill(NaN, m+1)
trivial_norms = ones(m+1)
A, B = dfly(strong_norm(coarse_basis), aux_norm(coarse_basis), NK)
# adds a 0th element to strongs
trivial_norms0(k::Integer) = k==0 ? 1. : trivial_norms[k]
coarse_norms0(k::Integer) = k==0 ? 1. : coarse_norms[k]
Kh = BasisDefinition.weak_projection_error(coarse_basis)
fine_norms = fill(NaN, m+1)
fine_norms[1] = trivial_norms0(1)
for k in 1:m
temp = 0.
for i in 0:k-1
temp = temp ⊕₊ coarse_norms0(i) ⊗₊ (trivial_norms0(k-i) ⊕₊ trivial_norms0(k-i-1))
end
fine_norms[k+1] = coarse_norms0(k) ⊕₊ B ⊗₊ Kh ⊗₊ (temp ⊕₊ 1.0)
end
return fine_norms
end
function powernormboundsnoise(B; Q=DiscretizedOperator(B, D), NK = NK::NoiseKernel)
m = 8
computed_norms = []
while true
computed_norms = norms_of_powers_noise(weak_norm(B), m, Q, NK, integral_covector(B))
if any(computed_norms .< 0.1)
break
end
m = 2*m
end
trivial_norms = norms_of_powers_trivial_noise(weak_norm(B), Q, NK, m)
norms = min.(trivial_norms, computed_norms)
m_extend = 2*m
better_norms = []
while true
better_norms = refine_norms_of_powers(norms, m_extend)
if better_norms[end] < 1e-8
break
end
m_extend = 2*m_extend
end
return better_norms
end
"""
Uses power norm bounds already computed for a coarse operator to estimate
the same norms for a finer operator
"""
function finepowernormboundsnoise(B,
Bfine,
coarse_norms;
Qfine::DiscretizedOperator,
NKfine::NoiseKernel)
m = length(coarse_norms)
trivial_norms_fine = norms_of_powers_trivial_noise(weak_norm(B), Qfine, NKfine, m+1)
twogrid_norms_fine = norms_of_powers_from_coarser_grid_noise(Bfine,
B,
Qfine,
NKfine,
coarse_norms)
norms_fine = min.(trivial_norms_fine, twogrid_norms_fine)
better_norms_fine = refine_norms_of_powers(norms_fine, m+1)
return better_norms_fine
end
function abstractpowernormboundsnoise(B, NK, coarse_norms; m = length(coarse_norms))
trivial_norms_fine = ones(m+1)
abstract_norms_from_coarse = norms_of_powers_from_coarser_grid_noise_abstract(B, NK, coarse_norms)
norms_fine = min.(trivial_norms_fine, abstract_norms_from_coarse)
better_norms_abstract = refine_norms_of_powers(norms_fine, m+1)
return better_norms_abstract
end
"""
Return a numerical approximation to the (hopefully unique) invariant vector
of the dynamic with discretized operator Q.
The vector is normalized so that integral_covector(B)*w ≈ 1
"""
function invariant_vector_noise(B::Basis, Q::DiscretizedOperator, NK::NoiseKernel; tol = 0.0)
mQ = mid(Q)
v = one_vector(B)
for i in 1:30
v = NK*(mQ*v)
end
v = v ./ (mid.(integral_covector(B))*v)
return v
end
"""
Return an upper bound to Q_h*w - w in the given norm
"""
function residualboundnoise(N::Type{<:NormKind}, Q::DiscretizedOperator, NK::NoiseKernel, w::AbstractVector)
v = Q*w
v = NK*v # actually, a priori estimate... TODO modify mult NK*v
return normbound(N, v - w)
end
"""
Bounds rigorously the distance of w from the fixed point of Q (normalized with integral = 1),
using a vector of bounds norms[k] ≥ ||Q_h^k|_{U_h^0}||.
"""
function distance_from_invariant_noise(B::Basis,
Q::DiscretizedOperator,
NK::NoiseKernel,
w::AbstractVector,
norms::Vector;
ε₁::Float64 = residualboundnoise(weak_norm(B), Q, NK, w),
ε₂::Float64 = mag(integral_covector(B) * w - 1),
normQ::Float64 = opnormbound(weak_norm(B), Q))
if ε₂ > 1e-8
@error "w does not seem normalized correctly"
end
_ , us = dfly(strong_norm(B), aux_norm(B), NK)
Cs = infinite_sum_norms(norms)
Kh = BasisDefinition.weak_projection_error(B)
normw = normbound(weak_norm(B), w)
return Cs ⊗₊ (2. ⊗₊ Kh ⊗₊ (1. ⊕₊ normQ) ⊗₊ us ⊕₊ ε₁ ⊘₊ (1. ⊖₋ ε₂)) ⊕₊ ε₂ ⊘₊ (1. ⊖₋ ε₂) ⊗₊ normw
end | InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 3135 |
import TaylorModels
function integrate(f, I; steps = 1024, degree = 6)
lo = I.lo
hi = I.hi
l = diam(I)
int_center = Interval(0.0)
int_error = Interval(0.0)
for i in 1:steps
left = lo+(i-1)*(l/steps)
right = lo+i*l/steps
r = (1/2)*(l/steps)
J = interval(left, right)
TM = TaylorModels.TaylorModel1(degree, J)
FM = f(TM)
#@info FM
for i in 0:Int64(floor(degree/2))
int_center+=2*(FM.pol[2*i]*r^(2*i+1))/(2*i+1)
int_error +=2*FM.rem*r
end
end
return int_center+int_error
end
function adaptive_integration(f, I::Interval; tol = 2^-10, steps = 8, degree = 6) # tol 2^-10, steps = 8 are default values
lo = I.lo
hi = I.hi
l = diam(I)
int_value = Interval(0.0)
for i in 1:steps
left = lo+(i-1)*(l/steps)
right = lo+i*l/steps
Istep = Interval(left, right)
val = integrate(f, Istep)
if radius(val)<tol
int_value += val
else
I₁, I₂ = bisect(I)
val₁ = adaptive_integration(f, I₁; tol = tol/2, steps = steps, degree = degree+2)
val₂ = adaptive_integration(f, I₁; tol = tol/2, steps = steps, degree = degree+2)
int_value +=val₁+val₂
end
end
return int_value
end
struct Observable
B
v::Vector
end
### TODO: Actually some assumptions are made, as the fact that
# the Ulam base is equispaced
function Observable(B::Ulam, ϕ::Function; tol = 2^-10)
v = zeros(Interval{Float64}, length(B))
for i in 1:length(B)
I = Interval(B.p[i], B.p[i+1])
v[i] = adaptive_integration(ϕ, I; tol = tol, steps =1, degree = 2)*length(B)
end
return Observable(B, v)
end
import TaylorSeries
function discretizationlogder(B, D::PwMap; degree = 7)
v = zeros(Interval{Float64}, length(B))
endpoints = D.endpoints
delicate_indexes = Int64.(floor.(length(B)*[x.lo for x in endpoints])).+1
for i in 1:(length(delicate_indexes)-1)
for j in delicate_indexes[i]:delicate_indexes[i+1]-1
I = Interval(B.p[j], B.p[j+1])
r = Interval(radius(I))
Tmid = TaylorSeries.Taylor1([Interval(mid(I)), Interval(1)], degree)
Tint = TaylorSeries.Taylor1([I, Interval(1)], degree)
Fmid = log(TaylorModels.derivative(D.Ts[i](Tmid)))
Fint = log(TaylorModels.derivative(D.Ts[i](Tint)))
ϵ = mag(Fint[degree]-Fmid[degree])
for k in 0:Int64(floor(Float64(degree)/2))
v[j]+=(Fmid[2*k]*r^(2*k+1))/(2*k+1)
end
v[j]+=Interval(-ϵ, ϵ)*r^(degree+1)/(degree+1)
end
end
#correction since the endpoints may be wide intervals
for i in 2:length(endpoints)-1
x = endpoints[i]
Tx = TaylorSeries.Taylor1([x, Interval(1)], 1)
corr = 2*Interval(radius(x))*(abs(log(TaylorModels.derivative(D.Ts[i-1](Tx))))
+abs(log(TaylorModels.derivative(D.Ts[i](Tx)))))[0]
v[delicate_indexes[i]]+=corr
end
v*=length(B)
return Observable(B, v)
end | InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 3459 | module PwDynamicDefinition
using ValidatedNumerics
using ..DynamicDefinition
using ..Contractors
using TaylorSeries: Taylor1
using ..DynamicDefinition: derivative, orientation
export PwMap, preim, nbranches, plottable
"""
Dynamic based on a piecewise monotonic map.
The map is defined as T(x) = Ts[k](x) if x ∈ [endpoints[k], endpoints[k+1]).
`y_endpoints` (kx2 matrix) contains the result of applying Ts to the endpoints of each interval. These can be filled in automatically from `endpoints`,
but sometimes they are known to higher accuracy, for instance for `x -> mod(3x, 1)` we know that it is full-branch exactly.
"""
struct PwMap{FT} <: Dynamic
Ts::FT
endpoints::Vector{Interval{Float64}}
y_endpoints::Matrix{Interval{Float64}}
increasing::Vector{Bool} # these will be filled in automatically, usually
end
Base.show(io::IO, D::PwMap) = print(io, "Piecewise-defined dynamic with $(nbranches(D)) branches")
DynamicDefinition.domain(S::PwMap) = hull(S.endpoints[1], S.endpoints[end])
PwMap(Ts, endpoints) = PwMap(Ts, endpoints, hcat([Ts[k](Interval(endpoints[k])) for k in 1:length(Ts)], [Ts[k](Interval(endpoints[k+1])) for k in 1:length(Ts)]))
PwMap(Ts, endpoints, y_endpoints) = PwMap{typeof(Ts)}(Ts, map(Interval, endpoints), y_endpoints, [unique_increasing(y_endpoints[k,1], y_endpoints[k,2]) for k=1:length(Ts)])
DynamicDefinition.nbranches(D::PwMap) = length(D.endpoints)-1
DynamicDefinition.endpoints(D::PwMap) = D.endpoints
DynamicDefinition.orientation(D::PwMap, k) = D.increasing[k] ? 1. : -1.
DynamicDefinition.is_full_branch(D::PwMap) = all(r == [0.,1.] || r == [1.,0.] for r in eachrow(D.y_endpoints)) # TODO: this assumes domain == [0,1] unnecessarily
function DynamicDefinition.preim(D::PwMap, k, y, ϵ = 1e-15)
@assert 1 <= k <= nbranches(D)
domain = hull(D.endpoints[k], D.endpoints[k+1])
return preimage(y, D.Ts[k], domain, ϵ)
end
"""
Intersect an Interval or TaylorSeries with I
"""
restrict(I, x) = I ∩ x
restrict(I, x::Taylor1) = Taylor1([I ∩ x[0]; x[1:end]], x.order)
"""
function that evaluates the k-th branch of a dynamic on a point x
(assuming it's in its domain, otherwise ∅)
"""
function DynamicDefinition.branch(D::PwMap, k)
return x -> D.Ts[k](restrict(hull(D.endpoints[k], D.endpoints[k+1]), x))
end
# Unused as of now
# """
# hull of an iterable of intervals
# """
# common_hull(S) = interval(minimum(x.lo for x in S), maximum(x.hi for x in S))
# Rather than defining derivatives of a PwMap, we define Taylor1 expansions directly
# and let the generic functions in DynamicDefinition to the work
"""
Function call, and Taylor expansion, of a PwMap.
Note that this ignores discontinuities; users are free to shoot themselves
in the foot and call this on a non-smooth piecewise map. No better solutions for now.
"""
function (D::PwMap)(x::Taylor1)
fx = fill(∅, x.order+1)
x_restricted = deepcopy(x)
for i = 1:length(D.endpoints)-1
x_restricted[0] = x[0] ∩ hull(D.endpoints[i],D.endpoints[i+1])
if !isempty(x_restricted[0])
fx_restricted = D.Ts[i](x_restricted)
fx = fx .∪ fx_restricted.coeffs
end
end
@debug "Piecewise f($(x)) = $(Taylor1(fx, x.order))"
return Taylor1(fx, x.order)
end
function DynamicDefinition.plottable(D::PwMap, x)
@assert 0 <= x <= 1
for k in 1:nbranches(D)
domain = hull(D.endpoints[k], D.endpoints[k+1])
if x in domain
return D.Ts[k](x)
end
end
end
using RecipesBase
@recipe f(::Type{PM}, D::PM) where {PM <: PwMap} = x -> plottable(D, x)
end
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 6009 | using ..BasisDefinition, ..DynamicDefinition, ..Contractors, ..Mod1DynamicDefinition, ..PwDynamicDefinition
using ValidatedNumerics, LinearAlgebra
import Base: iterate
import ..BasisDefinition: one_vector, integral_covector, is_integral_preserving, strong_norm, weak_norm, aux_norm
"""
Equispaced Ulam basis on [0,1] of size n
"""
struct Ulam{T<:AbstractVector} <:Basis
p::T
# TODO: check in constructor that p is sorted, starts with 0 and ends with 1
end
Ulam(n::Integer) = Ulam(LinRange(0., 1., n+1))
Base.length(B::Ulam) = length(B.p) - 1
function BasisDefinition.is_dual_element_empty(::Ulam, d)
return isempty(d[1])
end
Base.length(S::DualComposedWithDynamic{<:Ulam, <:Dynamic}) = length(S.basis) * nbranches(S.dynamic)
"""
Returns dual elements which are pairs (i, (a,b))
i is an interval index, and (a,b) are the endpoints of its preimage
"""
function Base.iterate(S::DualComposedWithDynamic{<:Ulam, <:Dynamic}, state = (1, 1, nothing))
# a = preim(S.dynamic, k, S.basis.p[i], S.ϵ) is cached in the state (when i \neq 1)
i, k, a = state
if k == nbranches(S.dynamic)+1
return nothing
end
if a == nothing
@assert i==1
a = preim(S.dynamic, k, S.basis.p[i], S.ϵ)
end
# a = preim(S.dynamic, k, S.basis.p[i], S.ϵ) # moved into state
b = preim(S.dynamic, k, S.basis.p[i+1], S.ϵ)
if isempty(a) && !isempty(b)
ep = endpoints(S.dynamic)
if orientation(S.dynamic, k) > 0
a = convert(typeof(b), ep[k])
else
a = convert(typeof(b), ep[k+1])
end
elseif isempty(b) && !isempty(a)
ep = endpoints(S.dynamic)
if orientation(S.dynamic, k) > 0
b = convert(typeof(a), ep[k+1])
else
b = convert(typeof(a), ep[k])
end
end
if i == length(S.basis)
return ((i, (a, b)), (1, k+1, nothing))
else
return ((i, (a, b)), (i+1, k, b))
end
end
Base.eltype(f::DualComposedWithDynamic{<:Ulam, <:Dynamic}) = Tuple{Int64,Tuple{Interval{Float64},Interval{Float64}}}
"""
Returns the indices of the elements of the Ulam basis that intersect with the interval y
We do not assume an order of a and b; this should not matter unless
the preimages are computed with very low accuracy
We assume, though, that y comes from the (possibly inexact) numerical approximation
of an interval in [0,1], i.e., we restrict to y ∩ [0,1]
"""
function BasisDefinition.nonzero_on(B::Ulam, (a, b))
y = hull(a, b)
# finds in which semi-open interval [p[k], p[k+1]) y.lo and y.hi fall
lo = searchsortedlast(B.p, y.lo)
hi = searchsortedlast(B.p, y.hi)
# they may be n+1 if y.hi==1
lo = clamp(lo, 1, length(B))
hi = clamp(hi, 1, length(B))
return (lo, hi)
end
"""
Relative measure of the intersection of (a,b) wrt the whole interval (c,d)
Assumes that a,b and c,d are sorted correctly
"""
function relative_measure((a,b)::Tuple{<:Interval,<:Interval}, (c,d)::Tuple{<:Interval,<:Interval})
# this is a bit lazy because we could compute orientations and tell which is which
# but it won't matter unless a,b are computed with very low precision
a, b = min(a,b), max(a,b)
lower = max(a, c)
upper = min(b, d)
intersection = max(upper - lower, 0) / (d-c)
return intersection
end
"""
Given a preimage of an interval ```I_i```, this iterator returns
its relative intersection with all the elements of the Ulam basis that
have nonzero intersection with it
"""
function Base.iterate(S::ProjectDualElement{BT,DT}, state = S.j_min) where {BT<:Ulam,DT}
if state == S.j_max+1
return nothing
end
j = state
x = relative_measure(S.dual_element,
(Interval(S.basis.p[j]),
Interval(S.basis.p[j+1])))
return (j, x), state+1
end
Base.eltype(f::ProjectDualElement{<:Ulam, DT}) where{DT} = Tuple{Int64,Interval{Float64}}
BasisDefinition.evaluate(B::Ulam{T}, i, x) where {T} = (x>(i-1)/n) && (x<i/n) ? 1 : 0
BasisDefinition.evaluate_integral(B::Ulam{S}, i, T::Type) where{S} = T(i)/length(B)
function Base.iterate(S::AverageZero{Ulam{T}}, state = 1) where{T}
n = length(S.basis)
if state == n
return nothing
end
v = zeros(Float64, n)
v[1] = 1
v[state+1]=-1
return (v, state+1)
end
BasisDefinition.is_refinement(Bf::Ulam, Bc::Ulam) = Bc.p ⊆ Bf.p
function integral_covector(B::Ulam{T}) where{T}
n = length(B)
return 1/n * ones(Interval{Float64}, n)'
end
is_integral_preserving(B::Ulam{T}) where {T} = true
function one_vector(B::Ulam)
return ones(length(B))
end
BasisDefinition.strong_norm(B::Ulam) = TotalVariation
BasisDefinition.weak_norm(B::Ulam) = L1
BasisDefinition.aux_norm(B::Ulam) = L1
# See BasisDefinition for docs on these constants
# These must be rounded up correctly!
BasisDefinition.weak_projection_error(B::Ulam) = 0.5 ⊘₊ Float64(length(B), RoundDown)
BasisDefinition.aux_normalized_projection_error(B::Ulam) = 0.
BasisDefinition.strong_weak_bound(B::Ulam) = Float64(length(B), RoundUp)
BasisDefinition.aux_weak_bound(B::Ulam) = 1.
BasisDefinition.weak_by_strong_and_aux_bound(B::Ulam) = (0., 1.)
BasisDefinition.bound_weak_norm_from_linalg_norm(B::Ulam) = (1., 0.)
BasisDefinition.bound_linalg_norm_L1_from_weak(B::Ulam) = 1.
BasisDefinition.bound_linalg_norm_L∞_from_weak(B::Ulam) = Float64(length(B), RoundUp)
BasisDefinition.bound_weak_norm_abstract(B::Ulam) = 1.
function BasisDefinition.invariant_measure_strong_norm_bound(B::Ulam, D::Dynamic)
A, B = dfly(strong_norm(B), aux_norm(B), D)
@assert A < 1.
return B ⊘₊ (1. ⊖₋ A)
end
using RecipesBase
using LaTeXStrings
"""
Plots a function in the Ulam basis
"""
@recipe function f(B::Ulam, w::AbstractVector)
legend --> :bottomright
if eltype(w) <: Interval
w = mid.(w)
end
@series begin
seriestype --> :steppost
label --> L"f_{\delta}"
ylims --> (0, NaN)
B.p, vcat(w, w[end])
end
end
"""
Displays error on a function in the Ulam basis
The w argument is unused, but kept for compatibility with other functions
for different bases
"""
@recipe function f(B::Ulam, error::Number, w=nothing)
if isfinite(error)
@series begin
seriestype --> :path
seriesalpha --> 0.5
fillrange --> 0
label --> "L1 Error"
[0; sqrt(error)], [sqrt(error); sqrt(error)]
end
end
end
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 3733 | """
Modifies the implementation of sinpi() and cospi() from IntervalArithmetic
to improve it so that sinpi(1) == 0
"""
using ValidatedNumerics
using ValidatedNumerics.IntervalArithmetic: atomic, SVector, @round
function find_quadrantspi(x::T) where {T}
temp = IntervalArithmetic.atomic(Interval{T}, x) * 2
return IntervalArithmetic.SVector(floor(temp.lo), floor(temp.hi))
end
sinpi(a...) = Base.Math.sinpi(a...)
function sinpi(a::Interval{T}) where T
isempty(a) && return a
whole_range = Interval{T}(-1, 1)
diam(a) > 2 && return whole_range
# The following is equiavlent to doing temp = a / half_pi and
# taking floor(a.lo), floor(a.hi)
lo_quadrant = minimum(find_quadrantspi(a.lo))
hi_quadrant = maximum(find_quadrantspi(a.hi))
if hi_quadrant - lo_quadrant > 4 # close to limits
return whole_range
end
lo_quadrant = mod(lo_quadrant, 4)
hi_quadrant = mod(hi_quadrant, 4)
# Different cases depending on the two quadrants:
if lo_quadrant == hi_quadrant
a.hi - a.lo > 1 && return whole_range # in same quadrant but separated by almost 2pi
lo = @round(sinpi(a.lo), sinpi(a.lo)) # Interval(sin(a.lo, RoundDown), sin(a.lo, RoundUp))
hi = @round(sinpi(a.hi), sinpi(a.hi)) # Interval(sin(a.hi, RoundDown), sin(a.hi, RoundUp))
return hull(lo, hi)
elseif lo_quadrant==3 && hi_quadrant==0
return @round(sinpi(a.lo), sinpi(a.hi)) # Interval(sin(a.lo, RoundDown), sin(a.hi, RoundUp))
elseif lo_quadrant==1 && hi_quadrant==2
return @round(sinpi(a.hi), sinpi(a.lo)) # Interval(sin(a.hi, RoundDown), sin(a.lo, RoundUp))
elseif ( lo_quadrant == 0 || lo_quadrant==3 ) && ( hi_quadrant==1 || hi_quadrant==2 )
return @round(min(sinpi(a.lo), sinpi(a.hi)), 1)
# Interval(min(sin(a.lo, RoundDown), sin(a.hi, RoundDown)), one(T))
elseif ( lo_quadrant == 1 || lo_quadrant==2 ) && ( hi_quadrant==3 || hi_quadrant==0 )
return @round(-1, max(sinpi(a.lo), sinpi(a.hi)))
# Interval(-one(T), max(sin(a.lo, RoundUp), sin(a.hi, RoundUp)))
else #if( lo_quadrant == 0 && hi_quadrant==3 ) || ( lo_quadrant == 2 && hi_quadrant==1 )
return whole_range
end
end
cospi(a...) = Base.Math.cospi(a...)
function cospi(a::Interval{T}) where T
isempty(a) && return a
whole_range = Interval(-one(T), one(T))
diam(a) > 2 && return whole_range
lo_quadrant = minimum(find_quadrantspi(a.lo))
hi_quadrant = maximum(find_quadrantspi(a.hi))
if hi_quadrant - lo_quadrant > 4 # close to limits
return Interval(-one(T), one(T))
end
lo_quadrant = mod(lo_quadrant, 4)
hi_quadrant = mod(hi_quadrant, 4)
# Different cases depending on the two quadrants:
if lo_quadrant == hi_quadrant # Interval limits in the same quadrant
a.hi - a.lo > 1 && return whole_range
lo = @round(cospi(a.lo), cospi(a.lo))
hi = @round(cospi(a.hi), cospi(a.hi))
return hull(lo, hi)
elseif lo_quadrant == 2 && hi_quadrant==3
return @round(cospi(a.lo), cospi(a.hi))
elseif lo_quadrant == 0 && hi_quadrant==1
return @round(cospi(a.hi), cospi(a.lo))
elseif ( lo_quadrant == 2 || lo_quadrant==3 ) && ( hi_quadrant==0 || hi_quadrant==1 )
return @round(min(cospi(a.lo), cospi(a.hi)), 1)
elseif ( lo_quadrant == 0 || lo_quadrant==1 ) && ( hi_quadrant==2 || hi_quadrant==3 )
return @round(-1, max(cospi(a.lo), cospi(a.hi)))
else#if ( lo_quadrant == 3 && hi_quadrant==2 ) || ( lo_quadrant == 1 && hi_quadrant==0 )
return whole_range
end
end
# These definitions were missing, too
using TaylorSeries: Taylor1
sinpi(x::Taylor1) = sin(x*π)
cospi(x::Taylor1) = cos(x*π)
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 567 | # following the tutorial on https://julialang.org/blog/2021/01/precompile_tutorial/
# we implement some precompile solutions
precompile(DiscretizedOperator, (Ulam, PwMap))
precompile(DiscretizedOperator, (Hat, PwMap))
precompile(powernormbounds, (Ulam, PwMap, Int64, Int64, IntegralPreservingDiscretizedOperator))
precompile(powernormbounds, (Hat, PwMap, Int64, Int64, NonIntegralPreservingDiscretizedOperator))
precompile(invariant_vector, (Ulam, IntegralPreservingDiscretizedOperator))
precompile(invariant_vector, (Hat, NonIntegralPreservingDiscretizedOperator))
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 11029 | """
Compute preimages of monotonic sequences
"""
using IntervalArithmetic
using .Contractors
"""
Type used to represent a "branch" of a dynamic. The branch is represented by a monotonic map `f` with domain `X=(a,b)` with a≤b (where typically a,b are intervals).
`Y=(f(a),f(b))` and `increasing` may be provided (for instance if we know that `Y=(0,1)`), otherwise they are computed automatically.
"""
struct Branch{T,S}
f::T
X::Tuple{S, S}
Y::Tuple{S, S}
increasing::Bool
end
Branch(f, X, Y=(f(Interval(X[1])), f(Interval(X[2]))), increasing=unique_increasing(Y[1], Y[2])) = Branch{typeof(f), typeof(interval(X[1]))}(f, X, Y, increasing)
"""
Return Branches for a given PwMap, in an iterable.
TODO: in future, maybe it is a better idea to replace the type PwMap directly with an array of branches, since that's all we need
"""
function branches(D::PwMap)
return [Branch(D.Ts[k], (D.endpoints[k], D.endpoints[k+1]), (D.y_endpoints[k,1], D.y_endpoints[k,2]), D.increasing[k]) for k in 1:length(D.Ts)]
end
"""
Smallest possible i such that a is in the semi-open interval [y[i], y[i+1]).
This should work properly even if `a, y` are intervals; in this case it returns the *smallest* possible value of i over all possible "assignments" of a, y inside those intervals.
Assumes y is sorted, i.e., map(y, x->Interval(x).lo) and map(y, x->Interval(x).hi) are sorted.
"""
function first_overlapping(y, a)
if iszero(a) # avoids -0 crap
a = zero(a)
end
searchsortedlast(y, Interval(a).lo, by=x->Interval(x).hi)
end
"""
Largest possible j such that a-ε is in the semi-open interval [y[j], y[j+1]).
This should work properly even if `a, y` are intervals; in this case it returns the *largest* possible value of i over all possible "assignments" of a, y inside those intervals.
Assumes y is sorted, i.e., map(y, x->Interval(x).lo) and map(y, x->Interval(x).hi) are sorted.
"""
function last_overlapping(y, a)
if iszero(a) # avoids -0 crap
a = zero(a)
end
searchsortedfirst(y, Interval(a).hi, by=x->Interval(x).lo) - 1
end
"""
Construct preimages of an increasing array y under a monotonic branch defined on X = (a, b), propagating additional labels `ylabel`
The sequence y subdivides the y-axis into semi-open intervals [y[l], y[l+1]); each of them is identified by the label `ylabel[l]`. We construct an increasing sequence
x that splits X (in the x-axis) into semi-open intervals, each of them with f([x[k], x[k+1]) ⊂ [y[l], y[l+1]) for a certain l.
We set xlabel[k] = ylabel[l], and return the pair (x, xlabel).
In the simplest case where D is full-branch, the points in x are preimages of the points in y, but in the general case they can also include D.endpoints:
in general, there may be a certain number of points in y that have no preimage at the beginning and the end of the sequence, because
they fall out of the range R = [f(a), f(b)]. In the worst case, no point has a preimage, because y[i] < R < y[i+1] for some
i (or vice versa with orientations), and in this case we just return the 1-element vectors x = [branch.X[1]] and xlabel = [i].
x[begin] always coincides with branch.X[1], while branch.X[2] is "the point after x[end]", and is not stored explicitly in x, for easier composing.
In this way x and xlabel have the same length.
This function fills the array by using a bisection strategy to save computations: if y ∈ [a,b], then f⁻¹(y) ∈ [f⁻¹(a),f⁻¹(b)] (paying attention to orientation).
So we can fill v by filling in first entries `v[k+1]` with higher dyadic valuation of k.
For a dynamic with multiple branches, preimages(y, D) is simply the concatenation of x, xlabel for b in all branches. These values still form an increasing sequence that
splits X into intervals, each of which is mapped into a different semi-open interval [y[k], y[k+1]).
"""
function preimages(y, br::Branch, ylabel = 1:length(y), ϵ = 0.0)
if br.increasing
i = first_overlapping(y, br.Y[1]) # smallest possible i such that a is in the semi-open interval [y[i], y[i+1]).
j = last_overlapping(y, br.Y[2]) # largest possible j such that b-ε is in the semi-open interval [y[j], y[j+1]).
n = j - i + 1
x = fill((-∞..∞)::typeof(Interval(br.X[1])), n)
xlabel = collect(ylabel[i:j]) # we collect to avoid potential type instability, since this may be an UnitRange while in the other branch we could have a StepRange
x[1] = br.X[1]
if n == 1
return (x, xlabel)
end
# the bisection strategy: fill the array in "strides" of length `stride`, then halve the stride and repeat
# for instance, if the array is 1..13 (with x[1] filled in already), we first take stride=8 and fill in x[9],
# then stride=4 and fill in x[5], x[13], (note that the distance is `2stride`, since x[9], and in general all the even multiples of `stride`, is already filled in)
# then stride=2 and fill in x[3], x[7], x[11],
# then stride=1 and fill in x[2], x[4], x[6], x[8], x[10], x[12]
# at each step we have bracketed the preimage in a "search range" given by already-computed preimages x[k-stride] and x[k+stride].
stride = prevpow(2, n-1)
while stride >= 1
# fill in v[i] using x[i-stride].lo and x[i+stride].hi as range for the preimage search
for k = 1+stride:2*stride:n
search_range = Interval(x[k-stride].lo, (k+stride <= n ? x[k+stride] : Interval(br.X[2])).hi)
x[k] = preimage(y[i-1+k], br.f, search_range, ϵ)
end
stride = stride ÷ 2
end
else # branch decreasing
i = last_overlapping(y, br.Y[1]) # largest possible j such that b-ε is in the semi-open interval [y[j], y[j+1]).
j = first_overlapping(y, br.Y[2]) # smallest possible i such that a is in the semi-open interval [y[i], y[i+1]).
n = i - j + 1
x = fill((-∞..∞)::typeof(Interval(br.X[1])), n)
xlabel = collect(ylabel[i:-1:j])
x[1] = br.X[1]
if n == 1
return (x, xlabel)
end
stride = prevpow(2, n-1)
while stride >= 1
# fill in v[i] using x[i-stride].lo and x[i+stride].hi as range for the preimage search
for k = 1+stride:2*stride:n
search_range = Interval(x[k-stride].lo, (k+stride <= n ? x[k+stride] : Interval(br.X[2])).hi)
x[k] = preimage(y[i+2-k], br.f, search_range, ϵ)
end
stride = stride ÷ 2
end
end
return (x, xlabel)
end
function preimages(y, D::Dynamic, ylabel = 1:length(y), ϵ = 0.0)
results = collect(preimages(y, b, ylabel, ϵ) for b in branches(D))
x = vcat((result[1] for result in results)...)
xlabel = vcat((result[2] for result in results)...)
return x, xlabel
end
"""
Compute preimages of D *and* the derivatives f'(x) in each point.
Returns: x, xlabel, x′
Assumes that the dynamic is full-branch, because otherwise things may compose the wrong way.
This is not restrictive because we'll need it only for the Hat assembler (at the moment)
We combine them in a single function because there are avenues to optimize by recycling some computations (not completely exploited for now)
"""
function preimages_and_derivatives(y, br::Branch, ylabel = 1:length(y), ϵ = 0.0)
x, xlabel = preimages(y, br, ylabel, ϵ)
f′ = Contractors.derivative(br.f)
x′ = f′.(x)
return x, xlabel, x′
end
function preimages_and_derivatives(y, D::Dynamic, ylabel = 1:length(y), ϵ = 0.0)
@assert is_full_branch(D)
results = collect(preimages_and_derivatives(y, b, ylabel, ϵ) for b in branches(D))
x = vcat((result[1] for result in results)...)
xlabel = vcat((result[2] for result in results)...)
x′ = vcat((result[3] for result in results)...)
return x, xlabel, x′
end
"""
Composed map D1 ∘ D2 ∘ D3. We store with [D1, D2, D3] in this order.
We overwrite ∘ in base, so one can simply write D1 ∘ D2 or ∘(D1, D2, D3) to construct them.
"""
struct ComposedDynamic <: Dynamic
dyns::Tuple{Vararg{Dynamic}}
end
Base.:∘(d::Dynamic...) = ComposedDynamic(d)
"""
Utility function to return the domain of a dynamic
"""
domain(D::PwMap) = (D.endpoints[begin], D.endpoints[end])
domain(D::ComposedDynamic) = domain(D.dyns[end])
function preimages(z, Ds::ComposedDynamic, zlabel = 1:length(z), ϵ = 0.0)
for d in Ds.dyns
z, zlabel = preimages(z, d, zlabel, ϵ)
end
return z, zlabel
end
function preimages_and_derivatives(z, Ds::ComposedDynamic, zlabel = 1:length(z), ϵ = 0.0)
derivatives = fill(1, length(z))
for d in Ds.dyns
z, zindex, z′ = preimages_and_derivatives(z, d, 1:length(z), ϵ)
zlabel = zlabel[zindex]
derivatives = derivatives[zindex] .* z′
end
return z, zlabel, derivatives
end
"""
Replacement of DualComposedWithDynamic.
"""
abstract type Dual end
struct UlamDual <: Dual
x::Vector{Interval} #TODO: a more generic type may be needed in future
xlabel::Vector{Int}
lastpoint::Interval
end
Dual(B::Ulam, D, ϵ) = UlamDual(preimages(B.p, D, 1:length(B.p)-1, ϵ)..., domain(D)[end])
Base.length(dual::UlamDual) = length(dual.x)
Base.eltype(dual::UlamDual) = Tuple{eltype(dual.xlabel), Tuple{eltype(dual.x), eltype(dual.x)}}
function iterate(dual::UlamDual, state = 1)
n = length(dual.x)
if state < n
return (dual.xlabel[state], (dual.x[state], dual.x[state+1])), state+1
elseif state == n
return (dual.xlabel[n], (dual.x[n], dual.lastpoint)), state+1
else
return nothing
end
end
struct HatDual <: Dual
x::Vector{Interval} #TODO: a more generic type may be needed in future
xlabel::Vector{Int}
x′::Vector{Interval}
end
Dual(B::Hat, D, ϵ) = HatDual(preimages_and_derivatives(B.p, D, 1:length(B.p)-1, ϵ)...)
Base.length(dual::HatDual) = length(dual.x)
Base.eltype(dual::HatDual) = Tuple{eltype(dual.xlabel), Tuple{eltype(dual.x), eltype(dual.x′)}}
function iterate(dual::HatDual, state=1)
if state <= length(dual.x)
return ((dual.xlabel[state], (dual.x[state], abs(dual.x′[state]))), state+1)
else
return nothing
end
end
# Variants of assemble and DiscretizedOperator; the code is repeated here for easier comparison with the older algorithm
function assemble2(B, D, ϵ=0.0; T = Float64)
I = Int64[]
J = Int64[]
nzvals = Interval{T}[]
n = length(B)
# TODO: reasonable size hint?
for (i, dual_element) in Dual(B, D, ϵ)
if !is_dual_element_empty(B, dual_element)
for (j, x) in ProjectDualElement(B, dual_element)
push!(I, i)
push!(J, mod(j,1:n))
push!(nzvals, x)
end
end
end
return sparse(I, J, nzvals, n, n)
end
function DiscretizedOperator2(B, D, ϵ=0.0; T = Float64)
L = assemble2(B, D, ϵ; T)
if is_integral_preserving(B)
return IntegralPreservingDiscretizedOperator(L)
else
f = integral_covector(B)
e = one_vector(B)
w = f - f*L #will use interval arithmetic when L is an interval matrix
return NonIntegralPreservingDiscretizedOperator(L, e, w)
end
end | InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 4334 | """
Implement some ball matrix arithmetic, i.e., verified matrix operations
for (abstract) interval matrices stored in midpoint-radius format.
The 'radius' for a matrix can be either a matrix of the same dimension, or
a scalar which represents a norm.
A type parameter specifies the "norm type" for the radius. It can be `:Abs` for
entrywise radii, or `OpnormL1`, `OpnormLinf` for normwise radii (specifying which norm to use)
"""
using FastRounding
abstract type NormKind end
struct OpnormL1 <: NormKind end
struct OpnormLinf <: NormKind end
struct Abs <: NormKind end
"""
Certified upper bound to ||A||, abs(A), or whatever is used in the specified radius
"""
function normbound(A::AbstractVecOrMat{T}, ::OpnormL1) where T
# partly taken from JuliaLang's LinearAlgebra/src/generic.jl
Tnorm = typeof(float(real(zero(T))))
Tsum = promote_type(Float64, Tnorm)
nrm::Tsum = 0
@inbounds begin
for j = 1:size(A, 2)
nrmj::Tsum = 0
for i = 1:size(A, 1)
nrmj = nrmj ⊕₊ abs(A[i,j])
end
nrm = max(nrm,nrmj)
end
end
return convert(Tnorm, nrm)
end
function normbound(A::AbstractVecOrMat{T}, ::OpnormLinf) where T
# partly taken from JuliaLang's LinearAlgebra/src/generic.jl
Tnorm = typeof(float(real(zero(T))))
Tsum = promote_type(Float64, Tnorm)
nrm::Tsum = 0
@inbounds begin
for i = 1:size(A, 1)
nrmi::Tsum = 0
for j = 1:size(A, 2)
nrmi = nrmi ⊕₊ abs(A[i,j])
end
nrm = max(nrm,nrmi)
end
end
return convert(Tnorm, nrm)
end
function normbound(A::AbstractArray{T}, ::Abs) where T
return abs.(A)
end
radiustype(MatrixType, ::OpnormLinf) = Float64
radiustype(MatrixType, ::OpnormL1) = Float64
radiustype(MatrixType, ::Abs) = MatrixType
"""
Type for ball arrays. Comes in two variants, one with cached normbound(midpoint)
(handy if you have to compute many products with it) and one without.
"""
abstract type BallArray{N<:NormKind, MatrixType<:Union{AbstractArray,Number}, RadiusType<:Union{AbstractArray, Number}} end
struct BallArrayWithoutCachedNorm{N, MatrixType, RadiusType} <: BallArray{N, MatrixType, RadiusType}
midpoint::MatrixType
radius::RadiusType
function BallArrayWithoutCachedNorm{N, MatrixType, RadiusType}(midpoint, radius=zero(RadiusType)) where {N, MatrixType, RadiusType}
if !isa(radius, radiustype(MatrixType, N()))
error("Wrong radius type specified")
end
return new(midpoint, radius)
end
end
struct BallArrayWithCachedNorm{N, MatrixType, RadiusType} <: BallArray{N, MatrixType, RadiusType}
midpoint::MatrixType
radius::RadiusType
norm::RadiusType
function BallArrayWithCachedNorm{N, MatrixType, RadiusType}(midpoint, radius=zero(RadiusType)) where {N, MatrixType, RadiusType}
if !isa(radius, radiustype(MatrixType, N()))
error("Wrong radius type specified")
end
return new{N, MatrixType, RadiusType}(midpoint, radius, normbound(midpoint, N()))
end
end
function BallArray{N}(midpoint, radius) where {N}
return BallArrayWithoutCachedNorm{N, typeof(midpoint), typeof(radius)}(midpoint, radius)
end
function BallArrayWithCachedNorm{N}(midpoint, radius) where {N}
return BallArrayWithCachedNorm{N, typeof(midpoint), typeof(radius)}(midpoint, radius)
end
function midpoint_norm(A::BallArrayWithCachedNorm)
return A.norm
end
function midpoint_norm(A::BallArrayWithoutCachedNorm{N, MT, RT}) where {N, MT, RT}
return normbound(A, N())
end
"""
Product between ball matrices / vectors.
"""
function Base.:*(A::BallArray, B::BallArray)
# A*maximum(B, dims=2) TODO
end
# TODO: using this file for tests
function matvec(A, v)
w = zero(v)
rows = rowvals(A)
vals = nonzeros(A)
m, n = size(A)
for j = 1:n
for k in nzrange(A, j)
@inbounds i = rows[k]
@inbounds Aij = vals[k]
@inbounds w[i] += Aij * v[j]
end
end
end
function matvec_roundup(A, v)
w = zero(v)
rows = rowvals(A)
vals = nonzeros(A)
m, n = size(A)
for j = 1:n
for k in nzrange(A, j)
@inbounds i = rows[k]
@inbounds Aij = vals[k]
@inbounds w[i] = w[i] ⊕₊ Aij ⊗₊ v[j]
end
end
end
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 1633 | """
Generic function to estimate decay time on a single vector
Args:
PP (scipy matrix): approximation of the discretized matrix to use
v (numpy vector):
my_norm (function):
M, K (reals with RNDU): constants such that ||P-PP|| ≤ K` and ||P^n|| ≤ M`,
where P is the exact discretized matrix.
Returns:
n such that ||PP^n v|| ≤ target`.
Raises:
ValueError if insufficient precision is detected
"""
function vector_decay_time(B::Basis, PP, v, M=0, K=0, target = 0.5)
current_norm = weak_norm(B, v)
error_on_computed_norm = zero(M)
n = 0
MK = M * K
error_propagation_constant = K #this constant is K at the first step and MK afterwards; see notes
while current_norm + error_on_computed_norm >= target
n += 1
v = PP * v
current_norm = weak_norm(B, v)
error_on_computed_norm += error_propagation_constant * current_norm
error_propagation_constant = MK
if error_on_computed_norm > target
raise() # ValueError, 'Insufficient precision'
end
end
return n
end
"""
Number of iterations needed to contract all vectors in `basis.contracting_pairs()` to a given target alpha
"""
function decay_time(D::Dynamic, B::Basis, P::AbstractMatrix{Interval{T}}, alpha = 0.5, n_jobs = 1) where {T}
PP = mid.(P)
M = bound_on_norms_of_powers(B, D, project_left=True, project_right=True)
K = numerical_error(B, D, P, PP)
my_norm(v) = weak_norm(B, v)
#alpha = Rdown(alpha) # we need a lower bound to alpha*s to make sure we contract below it
#decay_times = Parallel(n_jobs=n_jobs, verbose=1)(delayed(vector_decay_time)(PP, v, my_norm, M, K, alpha*s) for v, s in basis.contracting_pairs())
return max(decay_times)
end
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 4621 | """
Compute preimages of monotonic sequences
"""
using IntervalArithmetic
using .Contractors
"""
Strict-precedes on intervals, extended to non-intervals
"""
lt(a::Interval, b::Interval) = strictprecedes(a, b)
lt(a, b::Interval) = a < b.lo
lt(a::Interval, b) = a.hi < b
lt(a, b) = a < b
gt(a, b) = lt(b, a)
"""
Computes the pair (skip, last), where `skip` is the number of elements
at the beginning of `seq.v` that do not intersect with `X`, and `last` is the
last element after `skip` to intersect with `X`.
If `X` falls entirely between `v[i]` and `v[i+1]`, then (i, i) is returned.
"""
function skipandlast(seq, X)
cmp = ifelse(seq.increasing, lt, gt)
return (searchsortedfirst(seq.v, X, lt=cmp) - 1, searchsortedlast(seq.v, X, lt=cmp))
end
"""
Type used to store monotonic sequences of preimages. `skip` is a certain number of initial elements that are skipped
with respect to an original reference array: for instance, preimages of a certain vector `y = [y(1), y(2), y(3), y(4), y(5)]`
may only contain f^{-1}(y(3)) and f^{-1}(y(4)), so we set skip=2 and construct a v of length 2.
`increasing` tells if the monotonic sequence `v` is increasing (true) or decreasing (false)
"""
struct PointSequence{T<:AbstractVector}
v::T
skip::Int
increasing::Bool
end
PointSequence(v, skip=0, increasing=unique_increasing(v[begin], v[end])) = PointSequence{typeof(v)}(v, skip, increasing)
"""
Type used to represent a "branch" of a dynamic. The branch is represented by a monotonic map `f` with domain `X=(a,b)` with a≤b (where typically a,b are intervals).
`Y=(f(a),f(b))` and `increasing` may be provided (for instance if we know that `Y=(0,1)`), otherwise they are computed automatically.
"""
struct Branch{T,S}
f::T
X::Tuple{S, S}
Y::Tuple{S, S}
increasing::Bool
end
Branch(f, X, Y=(f(Interval(X[1])), f(Interval(X[2]))), increasing=unique_increasing(Y[1], Y[2])) = Branch{typeof(f), typeof(X[1])}(f, X, Y, increasing)
"""
Construct preimages of a monotonic array y under a monotonic function f in a domain X.
In general, there may be a certain number of points in y that have no preimage at the beginning and the end of the sequence, because
they fall out of the interval R = [f(X.lo), f(X.hi)]. In the worst case, no point has a preimage, because y[i] < R < y[i+1] for some
i (or vice versa with orientations).
We return a PointSequence where `v` is a vector of preimages, and `skip` is the number of elements of y that have no preimage
at the *beginning* of the interval. So for instance if f and y are increasing but y[i] < R < y[i+1], we return skip=i and v = [].
The number of intervals that have no image at the *end* of the interval is then `length(y) - length(v) - skip`
Rationale: this is more efficient than returning a vector with a lot of empty intervals at the beginning/end, and callers
need to know how many of the empty values come from the *beginning* of the array rather than its end.
Fills the array by using a bisection strategy to save computations: if y ∈ [a,b], then f⁻¹(y) ∈ [f⁻¹(a),f⁻¹(b)] (paying attention to orientation).
So we can fill v by filling in first entries `v[k+1]` with higher dyadic valuation of k.
Currently this works only for 1-based 1-dimensional arrays y.
TODO: Idea: replace this with a function that returns an *increasing* vector v that is guaranteed to start/end with v = (a,...,b), and a sequence of indices `locs` like k:m or m:-1:k
such that v[i], v[i+1] is inside `locs[i]` for all `i`. This constructs "slices" more explicitly, and composes much better. Unclear: how to keep 'full-branched' info in this framework?
"""
function preimages(seq, branch, ϵ = 0.0)
v_increasing = !(seq.increasing ⊻ branch.increasing)
(skip, last) = skipandlast(seq, hull(branch.Y...))
n = last - skip
v = fill((-∞..∞)::typeof(Interval(branch.X[1])), n)
if n == 0
return PointSequence(v, seq.skip+skip, v_increasing)
end
v[1] = preimage(seq.v[skip+1], branch.f, hull(branch.X...), ϵ)
if n == 1
return PointSequence(v, seq.skip+skip, v_increasing)
end
v[end] = preimage(seq.v[skip+n], branch.f, hull(branch.X...), ϵ)
stride = prevpow(2, n-1)
while stride >= 1
# fill in v[i] using v[i-stride] and v[i+stride]
for i = 1+stride:2*stride:n-1
X = hull(v[i-stride], v[min(i+stride, n)]) #TODO: this hull() could be replaced with the proper [a.lo..b.hi], since we know orientations
v[i] = preimage(seq.v[skip+i], branch.f, X, ϵ)
end
stride = stride ÷ 2
end
return PointSequence(v, seq.skip+skip, v_increasing)
end
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 3200 | using IntervalArithmetic
using .DynamicDefinition
"""
Return Branches for a given dynamic, in an iterable
"""
function branches(D::PwMap)
return [Branch(D.Ts[k], (D.endpoints[k], D.endpoints[k+1]), D.is_full[k] ? (Interval(0),Interval(1)) : (D.Ts[k](Interval(D.endpoints[k]), D.Ts[k](Interval(D.endpoints[k+1])))), D.orientations[k]==1) for k in 1:length(D.Ts)]
end
"""
Return preimages of a certain sequence on all branches of a dynamic
"""
function preimages(seq, D::Dynamic, ϵ = 0.0)
return [preimages(seq, branch, ϵ) for branch in branches(D)]
end
# TODO: overload for IterateDynamic
"""
Constructs associated "Ulam duals" for a branch b,
i.e., the sequence (k, (T⁻¹(p[k]), T⁻¹(p[k+1]))),
handling endpoints correctly. A callback `f(k, (a, b))` is called on each dual.
This should eventually replace `DualComposedWithDynamic`.
"""
function callback_duals(f, B::Ulam, branch::Branch, preims=nothing)
if preims === nothing
preims = preimages(PointSequence(B.p), b)
end
n = length(preims.v)
# duals = Tuple{Int, Tuple{eltype(preims), eltype(preims)}}[]
# sizehint!(duals, n+2)
if n==0 # special case: only one preimage
first_endpoint = preims.increasing ? branch.X[1] : branch.X[2]
last_endpoint = preims.increasing ? branch.X[2] : branch.X[1]
f(preims.skip, (first_endpoint, last_endpoint))
return nothing
end
if preims.skip > 0
first_endpoint = preims.increasing ? branch.X[1] : branch.X[2]
if first_endpoint != preims.v[1]
f(preims.skip, (first_endpoint, preims.v[1]))
end
end
for k in 1:n-1
f(preims.skip+k, (preims.v[k], preims.v[k+1]))
end
if preims.skip + n < length(B.p) # if there are skipped entries at the end of preims
last_endpoint = preims.increasing ? b.X[2] : b.X[1]
if last_endpoint != preims.v[n]
f(preims.skip+n, (preims.v[n], last_endpoint))
end
end
return nothing
end
function callback_duals(f, B::Hat, branch::Branch, preims=nothing)
if preims === nothing
preims = preimages(PointSequence(B.p), b)
end
@assert preims.skip == 0
@assert length(preims.v) == length(B.p) # The Hat basis estimation works only for full-branch maps
for i = 1:length(B) # this skips p[n+1]==1
x = preims.v[i]
absT′ = abs(derivative(branch.f, x))
f(i, (x, absT′))
end
end
function callback_duals(f, B::Basis, D::Dynamic)
for (branch, preim) in zip(branches(D), preimages(PointSequence(B.p), D))
callback_duals(f, B, branch, preim)
end
end
function assemble2(B::Basis, D::Dynamic, ϵ=0.0; T = Float64)
# putting types here in hope to improve callback inference
I::Vector{Int64} = Int64[]
J::Vector{Int64} = Int64[]
nzvals::Vector{Interval{T}} = Interval{T}[]
n::Int64 = length(B)
# TODO: reasonable size hint?
function assemble_callback(i, dual_element)
if !is_dual_element_empty(B, dual_element)
for (j, x) in ProjectDualElement(B, dual_element)
push!(I, i)
push!(J, mod(j,1:n))
push!(nzvals, x)
end
end
end
callback_duals(assemble_callback, B, D)
return sparse(I, J, nzvals, n, n)
end
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 846 | using InvariantMeasures
using ValidatedNumerics
@testset "Ulam assembler" begin
D = Mod1Dynamic(x->2*x)
B = Ulam(8)
P = assemble(B, D)
Ptrue = [
0.5 0.5 0 0 0 0 0 0;
0 0 0.5 0.5 0 0 0 0;
0 0 0 0 0.5 0.5 0 0;
0 0 0 0 0 0 0.5 0.5;
0.5 0.5 0 0 0 0 0 0;
0 0 0.5 0.5 0 0 0 0;
0 0 0 0 0.5 0.5 0 0;
0 0 0 0 0 0 0.5 0.5;
]
Ptrue = Ptrue'
@test all(contains_zero.(P-Ptrue))
# mod1_dynamic with a non-Markov dynamic
D = mod1_dynamic(x->x+0.5)
B = Ulam(8)
P = assemble(B, D)
Ptrue = [
0 0 0 0 1 0 0 0;
0 0 0 0 0 1 0 0;
0 0 0 0 0 0 1 0;
0 0 0 0 0 0 0 1;
1 0 0 0 0 0 0 0;
0 1 0 0 0 0 0 0;
0 0 1 0 0 0 0 0;
0 0 0 1 0 0 0 0;
]
@test all(contains_zero.(P-Ptrue))
@test opnormbound(L1, DiscretizedOperator(B, D)) == 1
@test opnormbound(Linf, DiscretizedOperator(B, D)) == 1
end
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 917 | using InvariantMeasures
using ValidatedNumerics
using LinearAlgebra
@testset "Hat assembler" begin
D = Mod1Dynamic(x->2*x)
B = Hat(8)
P = assemble(B, D)
Ptrue = [
0.5 0.25 0 0 0 0 0 0.25;
0 0.25 0.5 0.25 0 0 0 0 ;
0 0 0 0.25 0.5 0.25 0 0 ;
0 0 0 0 0 0.25 0.5 0.25;
0.5 0.25 0 0 0 0 0 0.25;
0 0.25 0.5 0.25 0 0 0 0 ;
0 0 0 0.25 0.5 0.25 0 0 ;
0 0 0 0 0 0.25 0.5 0.25;
]
Ptrue = Ptrue'
@test all(contains_zero.(P-Ptrue))
@test opnormbound(L1, DiscretizedOperator(B, D)) == 1
@test opnormbound(Linf, DiscretizedOperator(B, D)) == 1
Q = DiscretizedOperator(B, D)
@test size(Q) == (8,8)
n = size(Q)[1]
e = randn(n)
x = similar(e)
x[:] .= 2.
mQ = mid(Q)
@test mul!(x, mQ, e, 1, 0) == mQ*e
fQ = mQ.L + mQ.e * mQ.w
@test fQ * e ≈ mQ * e
end
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 382 | using InvariantMeasures
@testset "Chebyshev assembler" begin
N = 16
B = InvariantMeasures.Chebyshev(N)
D = mod1_dynamic(x->2*x)
L(ϕ, x) = (ϕ(x/2)+ϕ(x/2+0.5))/2
M = assemble(B, D)
using LinearAlgebra
all_true = true
for i in 1:N
w = mid.(L.(B[i], B.p))
z = InvariantMeasures.chebtransform(w)
all_true = all_true && norm(z-M[:, i], Inf)< 10^-13
end
@test all_true
end
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 603 | using InvariantMeasures: root, nthpreimage!
using StaticArrays
using ValidatedNumerics
@testset "Contractors" begin
@test root(x -> x^2-2, 1..2, 1e-13) ≈ sqrt(2)
# to ensure quadratic convergence
@test root(x -> x^2-2, 1..2, 1e-13; max_iter = 6) ≈ sqrt(2)
fs = (x -> x^2, x -> x^3)
X = [0..1, 0..1]
y = 0.1
nthpreimage!(y, fs, X; max_iter = 100)
@test X[1]^6 ≈ y
@test X[2]^3 ≈ y
X = @MVector[0..1, 0..1]
nthpreimage!(y, fs, X; max_iter = 9)
@test X[1]^6 ≈ y
@test X[2]^3 ≈ y
fs = (x -> x^2, x -> x^3, x -> x^4)
X = @MVector[0..1, 0..1, 0..1]
nthpreimage!(y, fs, X)
@test X[1]^24 ≈ y
end #testset
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 2574 | using InvariantMeasures
using ValidatedNumerics
@testset "Dynamics" begin
D = Mod1Dynamic(x->2*x)
@test D.T(0.1) == 0.2
@test dfly(Lipschitz, L1, D) == (0.5, 0.0)
@test dfly(TotalVariation, L1, D) == (0.5, 0.0)
D = PwMap([ x-> x^2+0.25, x -> 4*x-2, x -> 4*x-3], [0, 0.5, 0.75, 1])
@test InvariantMeasures.DynamicDefinition.derivative(D, 0.1..0.1) ≈ 0.2
@test InvariantMeasures.DynamicDefinition.derivative(D, 0.2..0.3) ≈ 0.4..0.6
@test InvariantMeasures.DynamicDefinition.derivative(D, 0.4..0.6) ≈ 0.8..4
@test InvariantMeasures.DynamicDefinition.derivative(D, 0.7..0.8) ≈ 4
D = PwMap([ x-> 4*x, x -> 2*x-0.5, x -> 4*x-3], [0, 0.25, 0.75, 1])
@test InvariantMeasures.DynamicDefinition.derivative(D, 0.1..0.3) ≈ 2..4
@test InvariantMeasures.DynamicDefinition.derivative(D, 0.1..0.2) ≈ 4..4
D = PwMap([x->2*x, x->2-2*x], [@interval(0), @interval(0.5), @interval(1)])
@test InvariantMeasures.orientation(D, 1) == 1
@test InvariantMeasures.orientation(D, 2) == -1
D = mod1_dynamic(x -> 3.5x, (0,1))
@test D.endpoints ≈ [0, 2/7, 4/7, 6/7, 1]
@test D.Ts[2](0.5) == 0.75
@test D.y_endpoints == [0 1; 0 1; 0 1; 0 0.5]
@test D.increasing == [1,1,1,1]
D = mod1_dynamic(x -> 3.5x + 0.5)
@test D.endpoints ≈ [0, 1/7, 3/7, 5/7, 1]
@test D.Ts[end](1) == 1
@test D.y_endpoints == [0.5 1; 0 1; 0 1; 0 1]
@test D.increasing == [1,1,1,1]
D = mod1_dynamic(x -> -3.5x + 0.5)
@test D.endpoints ≈ [0, 1/7, 3/7, 5/7, 1]
@test D.Ts[end](5/7) == 1
@test D.y_endpoints == [0.5 0; 1 0; 1 0; 1 0]
@test D.increasing == [0,0,0,0]
D = Iterate(mod1_dynamic(x->2*x), 3)
using InvariantMeasures.DynamicDefinition: derivative, distorsion, nbranches
@test derivative(D, 0.1..0.2) == 8..8
@test distorsion(D, 0.1..0.2) == 0..0
@test map(k->preim(D, k, 0.5), 1:8) == [1//16, 3//16, 5//16, 7//16, 9//16, 11//16, 13//16, 15//16]
# Lanford map, so that we test also something that is not linear
f = x->2*x+0.5*x*(1-x)
D = Iterate(mod1_dynamic(f), 3)
# Interval Newton should converge in 4 steps here, so we set max_iter = 4
# to make sure that we haven't inadvertently implemented something that is not quadratically convergent
# (for instance, by using the wrong Jacobian)
its = map(k->preim(D, k, 0.5, 1e-15, max_iter = 4), 1:nbranches(D))
g(x) = f(x) - floor(f(x))
@test g.(g.(g.(its))) ≈ fill(0.5, 8)
@test endpoints(D) ≈ [0.0, 0.07389363935392047, 0.18200396341631753, 0.28117073603385473, 0.4384471871911697, 0.5287080193012084, 0.6633983486837269, 0.7902622123165944, 1.0]
@test [branch(D, k)(Interval(0.2)) for k in 1:nbranches(D)] ≈ [∅, ∅, g(g(g(Interval(0.2)))), ∅, ∅, ∅, ∅, ∅]
end
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 200 | @testset "Estimator" begin
D = Mod1Dynamic(x->2*x)
B = Ulam(32)
P = assemble(B, D)
# C = InvariantMeasures.boundnorm(B, P, 10)
# @test C == [1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
end
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 1015 | @testset "Hat basis" begin
using ValidatedNumerics
using InvariantMeasures
using InvariantMeasures: HatFunction, HatFunctionOnTorus, IntervalOnTorus, nonzero_on
f = HatFunction(1., 2, 3)
@test f(1.5) == 0.5
@test f(1..1.5) == 0..0.5
f = HatFunctionOnTorus(0.125, 0.25, 0.375)
x = IntervalOnTorus(0.375..1.1875)
@test f(x) == 0..0.5
@test f(IntervalOnTorus(0..1)) == 0..1
f = HatFunctionOnTorus(0.125, 0.25, 0.375)
x = IntervalOnTorus(3.1875..3.25)
@test f(x) == 0.5..1
f = HatFunctionOnTorus(0, 0.125, 0.25)
x = IntervalOnTorus(0..0.0625)
@test f(x) == 0..0.5
f = HatFunctionOnTorus(0.875, 0, 0.125)
x = IntervalOnTorus(0..0.0625)
@test f(x) == 0.5..1
f = HatFunctionOnTorus(0.875, 0, 0.125)
x = IntervalOnTorus(0.9375..1)
@test f(x) == 0.5..1
B = Hat(4)
@test nonzero_on(B, (0.1..0.3, NaN)) == (1,3)
@test nonzero_on(B, (0..1, NaN)) == (1,4)
@test nonzero_on(B, (0.3..0.31, NaN)) == (2,3)
@test is_refinement(Hat(8), Hat(4))
@test is_refinement(Hat(8), Hat(8))
@test !is_refinement(Hat(4), Hat(8))
end
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 1205 | using InvariantMeasures
using ValidatedNumerics
using LinearAlgebra: I, opnorm
using SparseArrays: sparse
using Test
@testset "Norms of powers" begin
import Random
Random.seed!(1) #ensures tests are deterministic
n = 9
m = 10
M = 0.2*randn(n, n)
R = 1e-14*rand(n, n)
LL = sparse(interval_from_midpoint_radius.(M, R))
Q = IntegralPreservingDiscretizedOperator(LL)
U = [ones(1,n-1); -Matrix(I, n-1,n-1)]
@test norms_of_powers(Linf, m, Q, zeros(Interval{Float64}, 1)) ≈ [opnorm(M^k*U,Inf) for k = 1:m]
@test norms_of_powers(L1, m, Q, zeros(Interval{Float64}, 1)) ≈ [opnorm(M^k*U,1) for k = 1:m]
e = ones(n)
f = adjoint(e) / n
intf = map(Interval, f)
Q = NonIntegralPreservingDiscretizedOperator(LL, e, f)
@test norms_of_powers(Linf, m, Q, intf) ≈ [opnorm((M+e*(f-f*M))^k*U,Inf) for k = 1:m]
@test norms_of_powers(L1, m, Q, intf) ≈ [opnorm((M+e*(f-f*M))^k*U,1) for k = 1:m]
@test refine_norms_of_powers([0.5, 1, 2, 0.001]) == [0.5, 0.25, 0.125, 0.001]
@test refine_norms_of_powers([0.5, 1, 2, 1e-3], 8) == [0.5, 0.25, 0.125, 1e-3, 0.5e-3, 0.25e-3, 0.125e-3, 1.0000000000000002e-6] # also tests correct rounding
@test refine_norms_of_powers([2,0.2,0.1],4) == [2, 0.2, 0.1, 0.04000000000000001]
end
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 2510 | using Test
@testset "preimages" begin
# Testing skip_beginning / skip_end
a1 = LinRange(0,1,12)
a2 = [0, 0..0.1, 0..0.2, 0.1..0.3, 0.2..0.4, 0.3..0.5, 0.3..0.6, 0.6..0.8, 0.8..1, 1..1] #tricky weakly sorted vector
x1 = 0.25..0.25
x2 = 0.1..0.8
x3 = 2
x4 = -1
x5 = 0
x6 = 0.3
for a = (a1, a2)
for x = (x1, x2, x3, x4, x5, x6)
i = InvariantMeasures.first_overlapping(a, x)
@test i==0 || Interval(a[i]).hi <= Interval(x).lo
@test i==length(a) || !(Interval(a[i+1]).hi <= Interval(x).lo)
j = InvariantMeasures.last_overlapping(a, x)
@test j==0 || !(Interval(a[j]).lo >= Interval(x).hi)
@test j==length(a) || Interval(a[j+1]).lo >= Interval(x).hi
end
end
# checking for equality with looser tolerance
function approxintervals(a, b)
return a.lo ≈ b.lo && a.hi ≈ b.hi
end
for y = (a1, a2)
for f in (x->x/2, x->1+x/2, x-> 1-x/2)
b = InvariantMeasures.Branch(f, (@interval(0), @interval(1)))
ylabel = 1:length(y)
x, xlabel = preimages(y, b, ylabel)
@test x[1] == b.X[1]
@test x[end] != b.X[2] # to make sure the last entry isn't there
@test length(x) == length(xlabel)
y1 = filter(x->!isempty(x),intersect.(map(Interval,y), hull(b.Y[1], b.Y[2]+1e-15))) # the 1e-15 is there to simulate "disjoint intervals"
y2 = f.(x)
if !b.increasing
y2 = reverse(y2)
end
@test all(approxintervals.(y1, y2))
end
end
D = mod1_dynamic(x -> 2x)
DD = ∘(D, D, D, D)
p = [0, 0.2, 0.4, 0.6, 0.8]
x, xlabel = preimages(p, DD)
@test x ≈ 0:1/80:79/80
@test xlabel == repeat([1,2,3,4,5],16)
# test composing two different functions, to check that composition is handled in the correct order
f = x-> 2x
g = x -> (x+x^2)/2
D1 = mod1_dynamic(f)
D2 = mod1_dynamic(g)
y = 0:0.2:1
x, xlabel = preimages(y, D1 ∘ D2)
@test f.(g.(x)) ≈ 0:0.2:1.8
@test xlabel ≈ repeat(1:5, 2)
D1 = PwMap([x->2x, x->6x-3, x->3x-2], [0, 0.5, @interval(2/3), 1], [0 1; 0 1; 0 1])
D2 = PwMap([x->2x, x->4x-2, x->4x-3], [0, 0.5, 0.75, 1], [0 1; 0 1; 0 1])
z = 0:0.3:1
y, ylabel, y′ = InvariantMeasures.preimages_and_derivatives(z, D1)
@test y ≈ [0, 0.15, 0.3, 0.45, 0.5, 0.5+0.05, 0.5+0.1, 0.5+0.15, 2/3, 2/3+0.1, 2/3+0.2, 2/3+0.3]
@test ylabel == repeat(1:4, 3)
@test y′ ≈ [2,2,2,2,6,6,6,6,3,3,3,3]
x, xlabel, x′ = InvariantMeasures.preimages_and_derivatives(z, D1∘D2)
@test all(x .≈ vcat(y/2, 0.5 .+ y/4, 0.75 .+ y/4))
@test x′ == kron([4,12,6,8,24,12,8,24,12], [1,1,1,1])
end #testset | InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 497 | @testset "Ulam basis" begin
using ValidatedNumerics
using InvariantMeasures
using InvariantMeasures.BasisDefinition
B = Ulam(4)
@test nonzero_on(B, (@interval(0.1), @interval(0.3))) == (1,2)
@test nonzero_on(B, (@interval(0), @interval(1))) == (1,4)
@test nonzero_on(B, (@interval(0.3), @interval(0.31))) == (2,2)
@test nonzero_on(B, (@interval(1), @interval(1))) == (4,4)
@test is_refinement(Ulam(8), Ulam(4))
@test is_refinement(Ulam(8), Ulam(8))
@test !is_refinement(Ulam(4), Ulam(8))
end
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | code | 391 | using InvariantMeasures
using Test
@testset "InvariantMeasures.jl" begin
include("TestContractors.jl")
include("TestDynamic.jl")
include("TestHat.jl")
include("TestUlam.jl")
include("TestAssemble.jl")
include("TestAssembleHat.jl")
include("TestEstimate.jl")
include("TestNormOfPowers.jl")
include("TestPreimages.jl")
include("TestChebyshev.jl")
end
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.2 | 7d97b4f88233e10a2ce4802c57be35bb40b3992d | docs | 3968 | # InvariantMeasures
[](https://travis-ci.com/orkolorko/InvariantMeasures.jl)
This Package provides methods for the rigorous approximation of Absolutely Continuous Invariant Measures for one dimensional dynamical systems,
using the results in [[2]](#2) and subsequent works.
## Mathematical background
By Birkhoff Ergodic Theorem we know that if a dynamical system T admits an ergodic invariant measure μ, for μ-almost every point x the frequence of the visits to a set E corresponds to the measure of the set with respect to μ.
Therefore, being able to approximate invariant measures with a large basin is interesting to investigate the statistical properties of the dynamical system T. [[4]](#4)
The existence of absolutely continuous invariant measures for one dimensional maps is a delicate topic; with this package we present approximation schemes for the invariant measures of system that satisfy a Lasota-Yorke inequality through the use of a coarse-fine scheme and a posteriori estimates on the mixing time, i.e., this means that the mixing time is estimated by our algorithm and we do not need an a priori estimate, which is usually difficult to obtain.
The Ulam approximation schemes works under relatively weak hypothesis on the dynamics and was used in [[3]](#3) to approximate the invariant measure for the geometric Lorenz 1-dimensional map.
We are currently working on the implementation of the Ulam scheme for system with additive uniform noise, as the one used in
[[1]](#1)
## Basic Usage
Examples of usage are present in the directory examples.
```julia
using InvariantMeasures
D = Mod1Dynamic(x -> 4x + 0.01InvariantMeasures.sinpi(8x))
B = Ulam(1024)
Q = DiscretizedOperator(B, D)
```
The code snippet above defines a dynamic obtained by reducing f(x) = 4x+0.01 sin(8πx) modulo 1, a basis B associated to the Ulam discretization on a partition of 1024 homogenous intervals, and computes the discretized operator Q, a Markov chain whose entries are P[T(x)∈ Iᵢ | x ∈ Iⱼ].
Note the usage of `InvariantMeasures.sinpi(8*x)` rather than `Base.sinpi` or `Base.sin(8\pi*x)`. This detail is required to ensure that f(1) == 4 exactly.
```julia
norms = norms = powernormbounds(B, D; Q=Q)
```
This function computes the L¹ norm of Q^k, for k = 1,2,...,k_max (up to a sufficiently large number of powers to observe decay) when restricted to the space U of average-0 vectors. This gives us the a posteriori estimate for the mixing time of the Markov chain and is used in our rigorous estimate.
```julia
w = invariant_vector(B, Q)
distance_from_invariant(B, D, Q, w, norms)
```
This computes a (non-rigorous) approximation of the invariant measure of D; then `distance_from_invariant` computes an upper bound for the L¹ distance between w and the density of the absolutely continuous invariant measure of the system.
Inside the examples it is showed how to use the coarse-fine scheme to obtain better L¹ bounds and reduce the computational time.
### Caveat
The function `sinpi` in the interval arithmetic package that we are using relies on the `CRlibm.jl` package, which currently works only under Linux. So the examples that use trigonometric functions only work on this OS.
## References
<a id="1">[1]</a>
Galatolo S., Monge M., Nisoli I., Existence of noise induced order, a computer aided proof Nonlinearity 33 (9), 4237 (2020)
<a id="2">[2]</a>
Galatolo S., Nisoli I., An elementary approach to rigorous approximation of invariant
measures SIAM J. Appl Dyn Sys.13 pp. 958-985 (2014)
<a id="3">[3]</a> Galatolo S., Nisoli I. Rigorous computation of invariant measures and fractal dimension for maps with contracting fibers: 2D Lorenz-like maps
Ergodic Theory and Dynamical Systems 36 (6), 1865-1891 (2016)
<a id="4">[4]</a> Viana M., Olivera K. Foundations of Ergodic Theory
Cambridge studies in advanced mathematics, Cambridge University Press 2016
| InvariantMeasures | https://github.com/orkolorko/InvariantMeasures.jl.git |
|
[
"MIT"
] | 0.1.0 | 2a928ffe1d85382b22a2d232fb7ebf07c5fa5210 | code | 2339 | module AlignedArrays
import Mmap
export AlignedArray, AlignedVector, AlignedMatrix, AlignedVecOrMat
export PageAlignedArray, PageAlignedVector, PageAlignedMatrix, PageAlignedVecOrMat
const PAGESIZE = Mmap.PAGESIZE
struct AlignedArray{T, N, A} <: DenseArray{T, N}
parent::Array{T, N}
addr::Ref{Ptr{Cvoid}}
function AlignedArray{T, N, A}(::UndefInitializer, dims::NTuple{N, Integer}) where {T, N, A}
ispow2(A) || error("Alignment must be a power of two")
isconcretetype(T) || error("Element type must be a concrete type")
size = isempty(dims) ? 0 : reduce(*, dims)
@static if Sys.islinux()
addr = Ref(C_NULL)
ccall(:posix_memalign, Cint, (Ptr{Ptr{Cvoid}}, Csize_t, Csize_t), addr, A, size) == 0 || error("Failed to allocate aligned memory")
else
error("Operating system not yet supported")
end
a = new{T, N, A}(unsafe_wrap(Array{T, N}, reinterpret(Ptr{T}, addr[]), dims, own = false), addr)
finalizer(a.addr) do x
@static if Sys.islinux()
ccall(:free, Cvoid, (Ptr{Cvoid},), x[])
end
end
return a
end
end
AlignedArray{T, N, A}(u::UndefInitializer, dims::Integer...) where {T, N, A} = AlignedArray{T, N, A}(u, dims)
const AlignedVector{T, A} = AlignedArray{T, 1, A}
const AlignedMatrix{T, A} = AlignedArray{T, 2, A}
const AlignedVecOrMat{T, A} = Union{AlignedVector{T, A}, AlignedMatrix{T, A}}
const PageAlignedArray{T, N} = AlignedArray{T, N, PAGESIZE}
const PageAlignedVector{T} = AlignedVector{T, PAGESIZE}
const PageAlignedMatrix{T} = AlignedMatrix{T, PAGESIZE}
const PageAlignedVecOrMat{T} = AlignedVecOrMat{T, PAGESIZE}
Base.parent(a::AlignedArray) = a.parent
Base.pointer(a::AlignedArray) = pointer(parent(a))
Base.size(a::AlignedArray) = size(parent(a))
Base.length(a::AlignedArray) = length(parent(a))
Base.axes(a::AlignedArray) = axes(parent(a))
Base.IndexStyle(::Type{A}) where {T, N, A<:AlignedArray{T, N}} = IndexStyle(Array{T, N})
Base.getindex(a::AlignedArray, args...) = getindex(parent(a), args...)
Base.setindex!(a::AlignedArray, args...) = setindex!(parent(a), args...)
Base.iterate(a::AlignedArray, args...) = iterate(parent(a), args...)
Base.similar(a::AlignedArray, args...) = similar(parent(a), args...)
Base.show(io::IO, m::MIME"text/plain", a::AlignedArray) = show(io, m, parent(a))
end
| AlignedArrays | https://github.com/analytech-solutions/AlignedArrays.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.