licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.3.0 | bd00a6c0f899a0f8e7f8cac53e78c6f24cbd8080 | code | 6439 | ##
function _locpoly_alt1!(
eX::Matrix{T}, w::Vector{T},
X::Matrix{T}, z::Vector{T}, y::Vector{T},
z0::T, degree::Int64, kernel::SmoothingKernel{T}) where {T <: AbstractFloat}
@. w = evaluate(kernel, z, z0)
_expand_X!(eX, X, z, z0, degree)
((eX' * diagm(w)) * eX) \ ((eX' * diagm(w)) * y)
end
function locpoly_alt1(
X::Matrix{T}, z::Vector{T}, y::Vector{T},
zgrid::Vector{T},
degree::Int64, # degree of the polynomial
kernel::SmoothingKernel{T}=GaussianKernel(one(T))) where {T <: AbstractFloat}
n, p = size(X)
ep = p * (degree + 1)
out = Array{T}(ep, length(zgrid))
w = Array{T}(n)
eX = Array{T}(n, ep)
ind = 0
for z0 in zgrid
ind += 1
out[:, ind] = _locpoly_alt1!(eX, w, X, z, y, z0, degree, kernel)
end
out
end
##
function _locpoly_alt!(
Xt_w_X::Matrix{T}, Xt_w_Y::Vector{T}, w::Vector{T},
X::Matrix{T}, z::Vector{T}, y::Vector{T},
z0::T, degree::Int64, kernel::SmoothingKernel{T}) where {T <: AbstractFloat}
@. w = evaluate(kernel, z, z0)
_expand_Xt_w_X!(Xt_w_X, w, X, z, z0, degree)
_expand_Xt_w_Y!(Xt_w_Y, w, X, z, y, z0, degree)
cholfact!(Xt_w_X) \ Xt_w_Y
end
function locpoly_alt(
X::Matrix{T}, z::Vector{T}, y::Vector{T},
zgrid::Vector{T},
degree::Int64, # degree of the polynomial
kernel::SmoothingKernel{T}) where {T <: AbstractFloat}
n, p = size(X)
ep = p * (degree + 1)
out = Array{T}(ep, length(zgrid))
w = Array{T}(n)
Xt_w_X = Array{T}(ep, ep)
Xt_w_Y = Array{T}(ep)
ind = 0
for z0 in zgrid
ind += 1
out[:, ind] = _locpoly_alt!(Xt_w_X, Xt_w_Y, w, X, z, y, z0, degree, kernel)
end
out
end
##
function locpolyl1_alt(
X::Matrix{T}, z::Vector{T}, y::Vector{T},
zgrid::Vector{T},
degree::Int64,
kernel::SmoothingKernel{T},
λ0::T,
options::CDOptions=CDOptions()) where {T <: AbstractFloat}
# construct inner options because we do not want warmStart = false
# we want to start from the previous iteration since the points
# on the grid should be close to each other
opt = CDOptions(options.maxIter, options.optTol, options.randomize, true, options.numSteps)
n, p = size(X)
ep = p * (degree + 1)
out = Array{SparseVector{T, Int64}}(length(zgrid))
# temporary storage
w = Array{T}(n)
wX = Array{T}(n, ep)
stdX = ones(T, ep)
ind = 0
for z0 in zgrid
ind += 1
# the following two should update f
@. w = evaluate(kernel, z, z0)
_expand_X!(wX, X, z, z0, degree)
# compute std for each column
@inbounds for j=1:ep
v = zero(T)
@simd for i=1:n
v += wX[i, j] * wX[i, j] * w[i]
end
stdX[j] = sqrt(v / n)
end
@. w = sqrt(w)
f = CDLeastSquaresLoss(diagm(w)*y, diagm(w)*wX)
g = ProximalBase.ProxL1(λ0, stdX)
β = ProximalBase.SparseIterate(ep)
# solve for β
coordinateDescent!(β, f, g, opt)
out[ind] = convert(SparseVector, β)
end
out
end
##
function _expand_Xt_w_X_alt!(
Xt_w_X::Matrix{T},
w::Vector{T}, X::Matrix{T},
z::Vector{T}, z0::T, degree::Int64) where {T <: AbstractFloat}
n, p = size(X)
fill!(Xt_w_X, zero(T))
@inbounds for j=1:p, jj=0:degree
col=(j-1)*(degree+1)+1+jj
for k=j:p
if k != j
krange = 0:degree
else
krange = jj:degree
end
for kk=krange
row = (k-1)*(degree+1)+1+kk
# compute Xt_w_X[row, col]
for i=1:n
Xt_w_X[row, col] += X[i, j] * (z[i] - z0)^(jj+kk) * X[i, k] * w[i]
end
if row != col
Xt_w_X[col, row] = Xt_w_X[row, col]
end
end
end
end
Xt_w_X
end
function genData(n, p)
X = randn(n, p)
Z = rand(n)
ɛ = 0.1 * randn(n)
Y = zeros(n)
betaMat = zeros(p, n)
randBeta = [rand([2,4,6,8]) for j=1:p]
for i=1:n
betaMat[:, i] = sin.(randBeta * Z[i])
Y[i] = dot(betaMat[:, i], X[i, :]) + ɛ[i]
end
Y, X, Z, betaMat
end
function genDataIID(n, p)
X = randn(n, p)
Z = rand(n)
ɛ = 0.1 * randn(n)
Y = zeros(n)
beta = 0.5+rand(p)
for i=1:n
Y[i] = dot(beta, X[i, :]) + ɛ[i]
end
Y, X, Z, beta
end
# using BenchmarkTools
####
#
# n, p = 100, 2
# Y, X, Z, betaMat = genData(n, p)
# zgrid = collect(0.01:0.1:0.99)
#
# gk = GaussianKernel(0.2)
# degree = 3
#
# @time o1 = locpoly(X, Z, Y, zgrid, 1, gk)
# @time o2 = locpoly_alt(X, Z, Y, zgrid, 1, gk)
# @time o3 = locpoly_alt1(X, Z, Y, zgrid, 1, gk)
#
# maximum(abs.(o1-o2))
# maximum(abs.(o1-o3))
#
# @benchmark locpoly($X, $Z, $Y, $zgrid, $degree, $gk)
# @benchmark locpoly_alt($X, $Z, $Y, $zgrid, $degree, $gk)
# @benchmark locpoly_alt1($X, $Z, $Y, $zgrid, $degree, $gk)
####
# p = 10
# X = randn(100, p)
# z = rand(100)
# w = zeros(100)
# k = GaussianKernel(0.2)
# @. w = evaluate(k, z, 0.5)
#
#
# degree = 2
# cp = p*(degree+1)
#
# eX = zeros(100, cp)
# _expand_X!(eX, X, z, 0.5, degree)
#
# Xt_w_X = zeros(cp, cp)
# Xt_w_X1 = zeros(cp, cp)
# o1 = _expand_Xt_w_X!(Xt_w_X, w, X, z, 0.5, degree)
# o2 = _expand_Xt_w_X_alt!(Xt_w_X1, w, X, z, 0.5, degree)
# o3 = (eX'*diagm(w))*eX
#
# maximum(abs.(o1 - o2))
# maximum(abs.(o1 - o3))
#
# using BenchmarkTools
# @benchmark _expand_Xt_w_X!($Xt_w_X, $w, $X, $z, 0.5, degree)
# @benchmark _expand_Xt_w_X_alt!($Xt_w_X1, $w, $X, $z, 0.5, degree)
### benchmark local lasso
#
#
# n, s, p = 500, 2, 50
# degree = 2
# gk = GaussianKernel(0.2)
# w = zeros(n)
# sw = zeros(n)
#
#
# Y, X, Z, betaMat = genData(n, s)
# X = [X zeros(n, p-s)]
# z0 = rand()
#
# cp = p*(degree+1)
# eX = zeros(n, cp)
# CoordinateDescent._expand_X!(eX, X, Z, z0, degree)
#
# @. w = evaluate(gk, Z, z0)
# @. sw = sqrt(w)
#
# λ = 0.1
# g = ProximalBase.ProxL1(λ)
# f1 = CDLeastSquaresLoss(diagm(sw) * Y, diagm(sw)*X)
# f2 = CDWeightedLSLoss(Y, X, w)
#
# opt = CDOptions(;maxIter=5000, optTol=1e-8, warmStart=false, randomize=true)
# x1 = convert(ProximalBase.SparseIterate, sprand(p, 0.6))
# x2 = convert(ProximalBase.SparseIterate, sprand(p, 0.6))
#
# @benchmark coordinateDescent!($x1, $f1, $g, $opt)
# @benchmark coordinateDescent!($x2, $f2, $g, $opt)
## benchmark locpolyl1
#
# n, s, p = 500, 2, 50
# degree = 0
# gk = GaussianKernel(0.2)
#
# Y, X, Z, betaT = genData(n, s)
# X = [X zeros(n, p-s)]
# zgrid = collect(0.01:0.1:0.99)
#
# λ0 = 0.01
#
# opt = CDOptions(;randomize=false)
# o1 = locpolyl1(X,Z,Y,zgrid,degree,gk,λ0, opt)
# o2 = locpolyl1_alt(X,Z,Y,zgrid,degree,gk,λ0, opt)
#
# maximum( maximum(abs.(o1[i] - o2[i])) for i=1:length(zgrid) )
#
| CoordinateDescent | https://github.com/mlakolar/CoordinateDescent.jl.git |
|
[
"MIT"
] | 0.3.0 | bd00a6c0f899a0f8e7f8cac53e78c6f24cbd8080 | code | 726 | module CoordinateDescent
using ProximalBase
using DataStructures: nlargest
using SparseArrays
using Statistics
using LinearAlgebra
export
lasso, sqrtLasso, feasibleLasso!, scaledLasso, scaledLasso!,
LassoPath, refitLassoPath,
IterLassoOptions,
# CD
CoordinateDifferentiableFunction,
CDLeastSquaresLoss, CDWeightedLSLoss, CDQuadraticLoss, CDSqrtLassoLoss,
CDOptions,
coordinateDescent, coordinateDescent!,
# var coef
GaussianKernel, SmoothingKernel, EpanechnikovKernel, evaluate, createKernel,
locpoly, locpolyl1
include("utils.jl")
include("atom_iterator.jl")
include("cd_differentiable_function.jl")
include("coordinate_descent.jl")
include("lasso.jl")
include("varying_coefficient_lasso.jl")
end
| CoordinateDescent | https://github.com/mlakolar/CoordinateDescent.jl.git |
|
[
"MIT"
] | 0.3.0 | bd00a6c0f899a0f8e7f8cac53e78c6f24cbd8080 | code | 2601 | abstract type AtomIterator end
Base.eltype(::AtomIterator) = Int64
Base.length(x::AtomIterator) = x.fullPass ? ProximalBase.numCoordinates(x.iterate) : nnz(x.iterate)
# T = SparseIterate, SymmetricSparseIterate, AtomIterate
# last argument represents full pass
mutable struct OrderedIterator{T} <: AtomIterator
iterate::T
fullPass::Bool
OrderedIterator{T}(iterate::Union{SparseIterate,SymmetricSparseIterate,AtomIterate}, fullPass) where {T} = new(iterate, fullPass)
end
OrderedIterator(iterate) = OrderedIterator{typeof(iterate)}(iterate, true)
Base.iterate(x::OrderedIterator) = iterate(x, 1)
function Base.iterate(x::OrderedIterator, i)
# check if done
done = x.fullPass ? i > ProximalBase.numCoordinates(x.iterate) : i > nnz(x.iterate)
if done
return nothing
end
x.fullPass ? (i, i + 1) : (x.iterate.nzval2ind[i], i + 1)
end
# Base.start(x::OrderedIterator) = 1
# # current item, next state
# Base.next(x::OrderedIterator, i) = x.fullPass ? (i, i + 1) : (x.iterate.nzval2ind[i], i + 1)
# Base.done(x::OrderedIterator, i) = x.fullPass ? i > ProximalBase.numCoordinates(x.iterate) : i > nnz(x.iterate)
#
function reset!(x::OrderedIterator, fullPass::Bool)
x.fullPass = fullPass
x
end
mutable struct RandomIterator{T} <: AtomIterator
iterate::T
order::Vector{Int64}
fullPass::Bool
RandomIterator{T}(iterate::Union{SparseIterate,SymmetricSparseIterate,AtomIterate}, order, fullPass) where {T} =
new(iterate, order, fullPass)
end
RandomIterator(iterate::Union{SparseIterate,SymmetricSparseIterate,AtomIterate}) =
RandomIterator{typeof(iterate)}(iterate, collect(1:ProximalBase.numCoordinates(iterate)), true)
function reset!(x::RandomIterator, fullPass::Bool)
x.fullPass = fullPass
newLength = fullPass ? ProximalBase.numCoordinates(x.iterate) : nnz(x.iterate)
@inbounds for i=1:newLength
x.order[i] = i
end
@inbounds for i=1:newLength-1
j = rand(i:newLength)
x.order[i], x.order[j] = x.order[j], x.order[i]
end
x
end
Base.iterate(x::RandomIterator) = iterate(x, 1)
function Base.iterate(x::RandomIterator, i)
# check if done
done = x.fullPass ? i > ProximalBase.numCoordinates(x.iterate) : i > nnz(x.iterate)
if done
return nothing
end
x.fullPass ? (x.order[i], i + 1) : (x.iterate.nzval2ind[x.order[i]], i + 1)
end
# Base.start(x::RandomIterator) = 1
# Base.next(x::RandomIterator, i) = x.fullPass ? (x.order[i], i + 1) : (x.iterate.nzval2ind[x.order[i]], i + 1)
# Base.done(x::RandomIterator, i) = x.fullPass ? i > ProximalBase.numCoordinates(x.iterate) : i > nnz(x.iterate)
| CoordinateDescent | https://github.com/mlakolar/CoordinateDescent.jl.git |
|
[
"MIT"
] | 0.3.0 | bd00a6c0f899a0f8e7f8cac53e78c6f24cbd8080 | code | 7905 | abstract type CoordinateDifferentiableFunction end
"""
Set internal parameters of the function f at the point x.
"""
initialize!(f, x) = error("initialize! not implemented for $(typeof(f))")
"""
Coordinate k of the gradient of f evaluated at x.
"""
gradient(f, x, k) = error("gradient not implemented for $(typeof(f))")
"""
This should return number of coordinates or blocks of coordinates over
which the coordinate descent iterates.
"""
numCoordinates(f) = error("numCoordinates not implemented for $(typeof(f))")
"""
Arguments:
* f is CoordinateDifferentiableFunction
* g is a prox function
This function does two things:
* It finds h such that f(x + e_k⋅h) + g(x_k + h) decreses f(x) + g(x).
Often h = arg_min f(x + e_k⋅h) + g(x_k + h), but it could also
minimize a local quadratic approximation.
* The function also updates its internals. This is done by expecting
that the algorithm to call this function again is coordinate descent.
In future, we may want to implement other variants of coordinate descent.
"""
descendCoordinate!(f, g, x, k) = error("descendCoordinate not implemented for $(typeof(f))")
####################################
#
# loss |Y - X⋅β|^2 / (2⋅n)
#
####################################
struct CDLeastSquaresLoss{T<:AbstractFloat, S, U} <: CoordinateDifferentiableFunction
y::S
X::U
r::Vector{T}
CDLeastSquaresLoss{T, S, U}(y::AbstractVector{T}, X::AbstractMatrix{T}, r::Vector{T}) where {T,S,U} =
new(y,X,r)
end
function CDLeastSquaresLoss(y::AbstractVector{T}, X::AbstractMatrix{T}) where {T<:AbstractFloat}
length(y) == size(X, 1) || throw(DimensionMismatch())
CDLeastSquaresLoss{T, typeof(y), typeof(X)}(y,X,copy(y))
end
numCoordinates(f::CDLeastSquaresLoss) = size(f.X, 2)
function initialize!(f::CDLeastSquaresLoss{T}, x::SparseIterate{T}) where {T<:AbstractFloat}
# compute residuals for the loss
X = f.X
y = f.y
r = f.r
n = size(X, 1)
@simd for i=1:n
@inbounds r[i] = y[i] - A_mul_B_row(X, x, i)
end
nothing
end
gradient(f::CDLeastSquaresLoss{T}, x::SparseIterate{T}, j::Int64) where {T<:AbstractFloat} =
- At_mul_B_row(f.X, f.r, j) / size(f.X, 1)
# a = X[:, k]' X[:, k]
# b = X[:, k]' r
#
# h = arg_min a/(2n) (h-b/a)^2 + λ_k⋅|x_k + h|
# xnew[k] = arg_min a/(2n) (xnew_k - (x_k + b/a))^2 + λ_k⋅|xnew_k|
function descendCoordinate!(
f::CDLeastSquaresLoss{T},
g::ProxL1{T},
x::SparseIterate{T},
k::Int64) where {T<:AbstractFloat}
y = f.y
X = f.X
r = f.r
n = length(f.y)
a = zero(T)
b = zero(T)
@inbounds @simd for i=1:n
a += X[i, k] * X[i, k]
b += r[i] * X[i, k]
end
oldVal = x[k]
x[k] += b / a
newVal = cdprox!(g, x, k, n / a)
h = newVal - oldVal
# update internals -- residuls = y - X * xnew
@inbounds @simd for i=1:n
r[i] -= X[i, k] * h
end
h
end
################################################################################
#
# loss ∑_i w_i ⋅ |Y_i - X_i⋅β|^2 / (2⋅n)
#
################################################################################
struct CDWeightedLSLoss{T<:AbstractFloat, S, U} <: CoordinateDifferentiableFunction
y::S
X::U
w::S
r::Vector{T}
CDWeightedLSLoss{T, S, U}(y::AbstractVector{T}, X::AbstractMatrix{T}, w::AbstractVector{T}, r::Vector{T}) where {T,S,U} =
new(y,X,w,r)
end
function CDWeightedLSLoss(y::AbstractVector{T}, X::AbstractMatrix{T}, w::AbstractVector{T}) where {T<:AbstractFloat}
length(y) == size(X, 1) == length(w) || throw(DimensionMismatch())
CDWeightedLSLoss{T, typeof(y), typeof(X)}(y,X,w,copy(y))
end
numCoordinates(f::CDWeightedLSLoss) = size(f.X, 2)
function initialize!(f::CDWeightedLSLoss{T}, x::SparseIterate{T}) where {T<:AbstractFloat}
# compute residuals for the loss
X = f.X
y = f.y
r = f.r
n = size(X, 1)
@simd for i=1:n
@inbounds r[i] = y[i] - A_mul_B_row(X, x, i)
end
nothing
end
function gradient(f::CDWeightedLSLoss{T}, x::SparseIterate{T}, j::Int64) where {T<:AbstractFloat}
out = zero(T)
n = length(f.r)
@inbounds @simd for i=1:n
out += f.w[i] * f.X[i, j] * f.r[i]
end
- out / n
end
# a = X[:, k]' X[:, k]
# b = X[:, k]' r
#
# h = arg_min a/(2n) (h-b/a)^2 + λ_k⋅|x_k + h|
# xnew[k] = arg_min a/(2n) (xnew_k - (x_k + b/a))^2 + λ_k⋅|xnew_k|
function descendCoordinate!(
f::CDWeightedLSLoss{T},
g::ProxL1{T},
x::SparseIterate{T},
k::Int64) where {T<:AbstractFloat}
y = f.y
X = f.X
r = f.r
w = f.w
n = length(f.y)
a = zero(T)
b = zero(T)
@inbounds @simd for i=1:n
a += X[i, k] * X[i, k] * w[i]
b += r[i] * X[i, k] * w[i]
end
oldVal = x[k]
x[k] += b / a
newVal = cdprox!(g, x, k, n / a)
h = newVal - oldVal
# update internals -- residuls = y - X * xnew
@inbounds @simd for i=1:n
r[i] -= X[i, k] * h
end
h
end
####################################
#
# loss |Y - X⋅β|_2 / sqrt(n)
#
####################################
struct CDSqrtLassoLoss{T<:AbstractFloat, S, U} <: CoordinateDifferentiableFunction
y::S
X::U
r::Vector{T}
CDSqrtLassoLoss{T, S, U}(y::AbstractVector{T}, X::AbstractMatrix{T}, r::Vector{T}) where {T,S,U} =
new(y,X,r)
end
function CDSqrtLassoLoss(y::AbstractVector{T}, X::AbstractMatrix{T}) where {T<:AbstractFloat}
length(y) == size(X, 1) || throw(DimensionMismatch())
CDSqrtLassoLoss{T, typeof(y), typeof(X)}(y,X,copy(y))
end
numCoordinates(f::CDSqrtLassoLoss) = size(f.X, 2)
function initialize!(f::CDSqrtLassoLoss{T}, x::SparseIterate{T}) where {T<:AbstractFloat}
# compute residuals for the loss
X = f.X
y = f.y
r = f.r
n, p = size(f.X)
@simd for i=1:n
@inbounds r[i] = y[i] - A_mul_B_row(X, x, i)
end
nothing
end
gradient(f::CDSqrtLassoLoss{T}, x::SparseIterate{T}, j::Int64) where {T<:AbstractFloat} =
- At_mul_B_row(f.X, f.r, j) / norm(f.r)
# a = X[:, k]' X[:, k]
# b = X[:, k]' r
#
# h = arg_min a/(2n) (h-b/a)^2 + λ_k⋅|x_k + h|
# xnew[k] = arg_min a/(2n) (xnew_k - (x_k + b/a))^2 + λ_k⋅|xnew_k|
function descendCoordinate!(
f::CDSqrtLassoLoss{T},
g::ProxL1{T},
x::SparseIterate{T},
k::Int64) where {T<:AbstractFloat}
y = f.y
X = f.X
r = f.r
n = length(f.y)
# residuls = y - X * x + X[:, k] * x[k]
@inbounds @simd for i=1:n
r[i] += X[i, k] * x[k]
end
# r = y - X*x + X[:,k]*x[k]
s = zero(T)
xsqr = zero(T)
rsqr = zero(T)
@inbounds @simd for i=1:n
xsqr += X[i, k] * X[i, k]
s += r[i] * X[i, k]
rsqr += r[i] * r[i]
end
# s = dot(r, X[:,k])
# xsqr = dot(X[:,k], X[:, k])
# rsqr = dot(r, r)
λ = g.λ0
if !isa(g, ProxL1{T, Nothing})
λ *= g.λ[k]
end
oldVal = x[k]
if abs(s) <= λ * sqrt(rsqr)
x[k] = zero(T)
elseif s > λ * sqrt(rsqr)
x[k] = ( s - λ / sqrt(1 - λ^2 / xsqr) * sqrt(rsqr - s^2/xsqr) ) / xsqr
else
x[k] = ( s + λ / sqrt(1 - λ^2 / xsqr) * sqrt(rsqr - s^2/xsqr) ) / xsqr
end
# update internals -- residuls = y - X * xnew
@inbounds @simd for i=1:n
r[i] -= X[i, k] * x[k]
end
x[k] - oldVal
end
####################################
#
# quadratic x'Ax/2 + x'b
#
####################################
struct CDQuadraticLoss{T<:AbstractFloat, S, U} <: CoordinateDifferentiableFunction
A::S
b::U
end
function CDQuadraticLoss(A::AbstractMatrix{T}, b::AbstractVector{T}) where {T<:AbstractFloat}
(issymmetric(A) && length(b) == size(A, 2)) || throw(ArgumentError())
CDQuadraticLoss{T, typeof(A), typeof(b)}(A,b)
end
numCoordinates(f::CDQuadraticLoss) = length(f.b)
initialize!(f::CDQuadraticLoss, x::SparseIterate) = nothing
gradient(f::CDQuadraticLoss{T}, x::SparseIterate{T}, j::Int64) where {T<:AbstractFloat} =
At_mul_B_row(f.A, x, j) + f.b[j]
function descendCoordinate!(
f::CDQuadraticLoss{T},
g::ProxL1{T},
x::SparseIterate{T},
k::Int64) where {T<:AbstractFloat}
a = f.A[k,k]
b = gradient(f, x, k)
oldVal = x[k]
a = one(T) / a
x[k] -= b * a
newVal = cdprox!(g, x, k, a)
h = newVal - oldVal
end
| CoordinateDescent | https://github.com/mlakolar/CoordinateDescent.jl.git |
|
[
"MIT"
] | 0.3.0 | bd00a6c0f899a0f8e7f8cac53e78c6f24cbd8080 | code | 3716 | #
# minimize f(x) + ∑ λi⋅|xi|
#
# If warmStart is true, the descent will start from the supplied x
# otherwise it will start from 0 by setting a large value of λ which is
# decreased to the target value
function coordinateDescent!(
x::Union{SparseIterate,SymmetricSparseIterate,AtomIterate},
f::CoordinateDifferentiableFunction,
g::ProxL1,
options::CDOptions=CDOptions())
ProximalBase.numCoordinates(x) == numCoordinates(f) || throw(DimensionMismatch())
if !isa(g, ProxL1{typeof(g.λ0), Nothing}) # TODO: create a test for this
length(g.λ) == numCoordinates(f) || throw(DimensionMismatch())
end
coef_iterator = options.randomize ? RandomIterator(x) : OrderedIterator(x)
if options.warmStart
initialize!(f, x)
return _coordinateDescent!(x, f, g, coef_iterator, options)
else
# set x to zero and initialize
fill!(x, zero(eltype(x)))
initialize!(f, x)
# find λmax
λmax = _findLambdaMax(x, f, g)
# find decreasing schedule for λ
l1, l2 = log(λmax), log(g.λ0)
for l in l1:(l2-l1)/options.numSteps:l2
g1 = ProxL1(exp(l), g.λ)
_coordinateDescent!(x, f, g1, coef_iterator, options)
end
return x
end
end
function coordinateDescent!(
x::Union{SparseIterate,SymmetricSparseIterate,AtomIterate},
f::CoordinateDifferentiableFunction,
g::ProxZero,
options::CDOptions=CDOptions())
ProximalBase.numCoordinates(x) == numCoordinates(f) || throw(DimensionMismatch())
coef_iterator = options.randomize ? RandomIterator(x) : OrderedIterator(x)
if options.warmStart
initialize!(f, x)
else
# set x to zero and initialize
fill!(x, zero(eltype(x)))
initialize!(f, x)
end
return _coordinateDescent!(x, f, g, coef_iterator, options)
end
# assumes that f is initialized before the call here
function _coordinateDescent!(
x::Union{SparseIterate,SymmetricSparseIterate,AtomIterate},
f::CoordinateDifferentiableFunction,
g::Union{ProxL1, ProxZero},
coef_iterator::AtomIterator,
options::CDOptions)
prev_converged = false
converged = true
for iter=1:options.maxIter
if converged
reset!(coef_iterator, true)
# maxH = fullPass!(x, f, g)
else
reset!(coef_iterator, false)
# maxH = nonZeroPass!(x, f, g)
end
maxH = _cdPass!(x, f, g, coef_iterator)
prev_converged = converged
# test for convergence
converged = maxH < options.optTol
prev_converged && converged && break
end
x
end
function _cdPass!(
x::Union{SparseIterate,SymmetricSparseIterate,AtomIterate},
f::CoordinateDifferentiableFunction,
g::Union{ProxL1, ProxZero},
coef_iterator::AtomIterator
)
maxH = zero(eltype(x))
for ipred = coef_iterator # coef_iterator produces original indexes
h = descendCoordinate!(f, g, x, ipred)
if abs(h) > maxH
maxH = abs(h)
end
end
dropzeros!(x)
maxH
end
######
"""
Helper function that finds the smallest value of λ for which the solution is equal to zero.
"""
function _findLambdaMax(x::Union{SparseIterate{T},SymmetricSparseIterate{T}},
f::CoordinateDifferentiableFunction,
::ProxL1{T, Nothing}) where {T<:AbstractFloat}
λmax = zero(T)
for k=1:ProximalBase.numCoordinates(x)
f_g = gradient(f, x, k)
t = abs(f_g)
if t > λmax
λmax = t
end
end
λmax
end
"""
Helper function that finds the smallest value of λ0 for which the solution is equal to zero.
"""
function _findLambdaMax(x::SparseIterate{T},
f::CoordinateDifferentiableFunction,
g::ProxL1{T, S}) where {T<:AbstractFloat} where S <: AbstractArray
λmax = zero(T)
for k=1:length(x)
f_g = gradient(f, x, k)
t = abs(f_g) / g.λ[k]
if t > λmax
λmax = t
end
end
λmax
end
| CoordinateDescent | https://github.com/mlakolar/CoordinateDescent.jl.git |
|
[
"MIT"
] | 0.3.0 | bd00a6c0f899a0f8e7f8cac53e78c6f24cbd8080 | code | 5880 | ######################################################################
#
# Lasso Solution
#
######################################################################
struct LassoSolution{T, S}
x::SparseIterate{T}
residuals::Vector{T}
penalty::S
σ::Union{T, Nothing}
LassoSolution{T, S}(x::SparseIterate{T}, residuals::AbstractVector{T}, penalty::ProxL1{T}, σ::T) where {T, S} =
new(x, residuals, penalty, σ)
LassoSolution{T, S}(x::SparseIterate{T}, residuals::AbstractVector{T}, penalty::ProxL1{T}) where {T, S} =
new(x, residuals, penalty, nothing)
end
######################################################################
#
# Lasso Interface
#
######################################################################
function lasso(
X::StridedMatrix{T},
y::StridedVector{T},
λ::T,
options::CDOptions=CDOptions()) where {T<:AbstractFloat}
x = SparseIterate( size(X, 2) )
f = CDLeastSquaresLoss(y, X)
g = ProxL1(λ)
coordinateDescent!(x, f, g, options)
LassoSolution{T, typeof(g)}(x, f.r, g, std(f.r))
end
function lasso(
X::StridedMatrix{T},
y::StridedVector{T},
λ::T,
ω::Array{T},
options::CDOptions=CDOptions()) where {T<:AbstractFloat}
x = SparseIterate( size(X, 2) )
f = CDLeastSquaresLoss(y, X)
g = ProxL1(λ, ω)
coordinateDescent!(x, f, g, options)
LassoSolution{T, typeof(g)}(x, f.r, g, std(f.r))
end
######################################################################
#
# Sqrt-Lasso Interface
#
######################################################################
function sqrtLasso(
X::StridedMatrix{T},
y::StridedVector{T},
λ::T,
options::CDOptions=CDOptions();
standardizeX=true) where {T<:AbstractFloat}
p = size(X, 2)
x = SparseIterate( p )
f = CDSqrtLassoLoss(y, X)
if standardizeX
stdX = Array{T}(p)
_stdX!(stdX, X)
g = ProxL1(λ, stdX)
else
g = ProxL1(λ)
end
coordinateDescent!(x, f, g, options)
LassoSolution{T, typeof(g)}(x, f.r, g, std(f.r))
end
function sqrtLasso(
X::StridedMatrix{T},
y::StridedVector{T},
λ::T,
ω::Array{T},
options::CDOptions=CDOptions()) where {T<:AbstractFloat}
p = size(X, 2)
x = SparseIterate( p )
f = CDSqrtLassoLoss(y, X)
g = ProxL1(λ, ω)
coordinateDescent!(x, f, g, options)
LassoSolution{T, typeof(g)}(x, f.r, g, std(f.r))
end
######################################################################
#
# Scaled Lasso Interface
#
######################################################################
function scaledLasso!(
x::SparseIterate{T},
X::AbstractMatrix{T},
y::AbstractVector{T},
λ::T,
ω::AbstractVector{T},
options::IterLassoOptions=IterLassoOptions()
) where {T<:AbstractFloat}
n, p = size(X)
f = CDLeastSquaresLoss(y,X)
# initializa σ
if options.initProcedure == :Screening
σ = _findInitSigma!(X, y, options.sinit, f.r)
elseif options.initProcedure == :InitStd
σ = options.σinit
elseif options.initProcedure == :WarmStart
initialize!(f, x)
σ = std(f.r)
else
throw(ArgumentError("Incorrect initialization Symbol"))
end
g = ProxL1(λ * σ, ω)
for iter=1:options.maxIter
coordinateDescent!(x, f, g, options.optionsCD)
σnew = sqrt( sum(abs2, f.r) / n )
if abs(σnew - σ) / σ < options.optTol
break
end
σ = σnew
g = ProxL1(λ * σ, ω)
end
LassoSolution{T, typeof(g)}(x, f.r, g, std(f.r))
end
######################################################################
#
# Lasso Path Interface
#
######################################################################
function feasibleLasso!(
x::SparseIterate{T},
X::AbstractMatrix{T},
y::AbstractVector{T},
λ0::T,
options::IterLassoOptions=IterLassoOptions()
) where {T<:AbstractFloat}
n, p = size(X)
f = CDLeastSquaresLoss(y,X)
Γ = Array{T}(p) # stores loadings
Γold = Array{T}(p)
# initializa residuals
if options.initProcedure == :Screening
_findInitResiduals!(X, y, options.sinit, f.r)
elseif options.initProcedure == :InitStd
σ = options.σinit
_stdX!(Γ, X)
coordinateDescent!(x, f, ProxL1(λ0*σ, Γ), options.optionsCD)
elseif options.initProcedure == :WarmStart
initialize!(f, x)
else
throw(ArgumentError("Incorrect initialization Symbol"))
end
_getLoadings!(Γ, X, f.r)
g = ProxL1(λ0, Γ)
for iter=1:options.maxIter
copy!(Γold, Γ)
coordinateDescent!(x, f, g, options.optionsCD)
_getLoadings!(Γ, X, f.r)
if maximum(abs.(Γold - Γ)) / maximum(Γ) < options.optTol
break
end
end
LassoSolution(x, f.r, g, std(f.r))
end
######################################################################
#
# Lasso Path Interface
#
######################################################################
struct LassoPath{T<:AbstractFloat}
λpath::Vector{T}
βpath::Vector{SparseIterate{T,1}}
end
function refitLassoPath(
path::LassoPath{T},
X::StridedMatrix{T},
Y::StridedVector{T}) where {T<:AbstractFloat}
λpath = path.λpath
βpath = path.βpath
out = Dict{Vector{Int64},Vector{Float64}}()
for i=1:length(λpath)
S = findall(!iszero, βpath[i])
if haskey(out, S)
continue
end
out[S] = X[:, S] \ Y
end
out
end
# λArr is in decreasing order
function LassoPath(
X::StridedMatrix{T},
Y::StridedVector{T},
λpath::Vector{T},
options=CDOptions();
max_hat_s=Inf, standardizeX::Bool=true) where {T<:AbstractFloat}
n, p = size(X)
stdX = Array{T}(undef, p)
if standardizeX
_stdX!(stdX, X)
else
fill!(stdX, one(T))
end
x = SparseIterate(T, p)
f = CDLeastSquaresLoss(Y, X)
numλ = length(λpath)
βpath = Vector{SparseIterate{T}}(undef, numλ)
for indλ=1:numλ
coordinateDescent!(x, f, ProxL1(λpath[indλ], stdX), options)
βpath[indλ] = copy(x)
if nnz(x) > max_hat_s
resize!(λpath, indλ)
break
end
end
LassoPath{T}(copy(λpath), βpath)
end
| CoordinateDescent | https://github.com/mlakolar/CoordinateDescent.jl.git |
|
[
"MIT"
] | 0.3.0 | bd00a6c0f899a0f8e7f8cac53e78c6f24cbd8080 | code | 4084 | ####################################
#
# options
#
####################################
struct CDOptions
maxIter::Int64
optTol::Float64
randomize::Bool # coordinate are visitied in a randomized order or not
warmStart::Bool # when running CD, should we do path following or not
numSteps::Int64 # when pathFollowing, how many points are there on the path
end
CDOptions(;
maxIter::Int64=2000,
optTol::Float64=1e-7,
randomize::Bool=true,
warmStart::Bool=true,
numSteps::Int=50) = CDOptions(maxIter, optTol, randomize, warmStart, numSteps)
# this is used for ScaledLasso and FeasibleLasso
struct IterLassoOptions
maxIter::Int64
optTol::Float64
initProcedure::Symbol # :Screening, :InitStd, :WarmStart
sinit::Int64 # how many columns of X to use to estimate initial variance or obtain initial residuals
σinit::Float64
optionsCD::CDOptions
end
IterLassoOptions(;
maxIter::Int64=20,
optTol::Float64=1e-2,
initProcedure::Symbol=:Screening,
sinit::Int64=5,
σinit::Float64=1.,
optionsCD::CDOptions=CDOptions()) = IterLassoOptions(maxIter, optTol, initProcedure, sinit, σinit, optionsCD)
####################################
#
# helper functions
#
####################################
"""
Helper function that finds an initial estimate for σ that is needed
for Lasso and ScaledLasso procedures.
The procedure works as follows:
* s input variables that are most correlated with the response y are found
* y is regressed on those s features
* σ is estimated based on the residuals, which gives an upper bound on the true sigma
"""
_findInitSigma!(
X::AbstractMatrix{T},
y::AbstractVector{T},
s::Int,
storage::Vector{T}) where {T <: AbstractFloat} = std(_findInitResiduals!(X, y, s, storage))
function _findInitResiduals!(
X::AbstractMatrix{T},
y::AbstractVector{T},
s::Int,
storage::Vector{T}) where {T <: AbstractFloat}
S = _findLargestCorrelations(X, y, s)
Xs = view(X, :, S)
mul!(storage, Xs, Xs \ y)
@. storage = y - storage
return storage
end
function _findInitResiduals!(
w::AbstractVector{T},
X::AbstractMatrix{T},
y::AbstractVector{T},
s::Int,
storage::Vector{T}) where {T <: AbstractFloat}
S = _findLargestCorrelations(w, X, y, s)
Xs = view(X, :, S)
mul!(storage, Xs, (Xs' * Diagonal(w) * Xs) \ (Xs' * Diagonal(w) * y))
@. storage = y - storage
return storage
end
# return a bit array containing indices of columns
function _findLargestCorrelations(
X::AbstractMatrix{T},
y::AbstractVector{T},
s::Int) where {T <: AbstractFloat}
p = size(X, 2)
storage = Array{T}(undef, p)
mul!(storage, transpose(X), y)
@. storage = abs(storage)
S = storage .>= nlargest(s, storage)[end]
end
function _findLargestCorrelations(
w::AbstractVector{T},
X::AbstractMatrix{T},
y::AbstractVector{T},
s::Int) where {T <: AbstractFloat}
n, p = size(X)
storage = Array{T}(undef, p)
@inbounds for j=1:p
val = zero(T)
@simd for i=1:n
val += X[i,j] * w[i] * y[i]
end
storage[j] = abs(val)
end
S = storage .>= nlargest(s, storage)[end]
end
function _stdX!(out::Vector{T}, X::AbstractMatrix{T}) where {T <: AbstractFloat}
n, p = size(X)
@inbounds for j=1:p
v = zero(T)
@simd for i=1:n
v += X[i, j]^2.
end
out[j] = sqrt(v / n)
end
out
end
function _stdX!(out::Vector{T}, w::AbstractVector{T}, X::AbstractMatrix{T}) where {T <: AbstractFloat}
n, p = size(X)
@inbounds for j=1:p
v = zero(T)
@simd for i=1:n
v += w[i] * X[i, j]^2.
end
out[j] = sqrt(v / n)
end
out
end
function _getLoadings!(out::Vector{T}, X::AbstractMatrix{T}, e::AbstractVector{T}) where {T <: AbstractFloat}
n, p = size(X)
@inbounds for j=1:p
v = zero(T)
@simd for i=1:n
v += (X[i, j]*e[i])^2.
end
out[j] = sqrt(v / n)
end
out
end
function _getSigma(w::AbstractVector{T}, r::AbstractVector{T}) where {T <: AbstractFloat}
n = length(w)
σ = zero(T)
for ii = 1:n
σ += r[ii]^2 * w[ii]
end
σ /= sum(w)
sqrt(σ)
end
| CoordinateDescent | https://github.com/mlakolar/CoordinateDescent.jl.git |
|
[
"MIT"
] | 0.3.0 | bd00a6c0f899a0f8e7f8cac53e78c6f24cbd8080 | code | 16896 |
### Kernel functions
abstract type SmoothingKernel{T} end
struct GaussianKernel{T} <: SmoothingKernel{T}
h::T
end
struct EpanechnikovKernel{T} <: SmoothingKernel{T}
h::T
end
createKernel(::Type{GaussianKernel{T}}, h::T) where {T <: AbstractFloat} = GaussianKernel{T}(h)
createKernel(::Type{EpanechnikovKernel{T}}, h::T) where {T <: AbstractFloat} = EpanechnikovKernel{T}(h)
evaluate(k::GaussianKernel{T}, x::T, y::T) where {T <: AbstractFloat} = exp(-(x-y)^2. / k.h) / k.h
function evaluate(k::EpanechnikovKernel{T}, x::T, y::T) where {T <: AbstractFloat}
u = (x - y) / k.h
abs(u) >= 1. ? zero(T) : 0.75 * (1. - u^2.) / k.h
end
############################################################
#
# local polynomial regression with lasso
#
############################################################
function locpolyl1(
X::Matrix{T}, z::Vector{T}, y::Vector{T},
zgrid::Vector{T},
degree::Int64,
kernel::SmoothingKernel{T},
λ0::T,
refit::Bool,
options::CDOptions=CDOptions()) where {T <: AbstractFloat}
# construct inner options because we do not want warmStart = false
# we want to start from the previous iteration since the points
# on the grid should be close to each other
opt = CDOptions(options.maxIter, options.optTol, options.randomize, true, options.numSteps)
n, p = size(X)
ep = p * (degree + 1)
out = spzeros(T, ep, length(zgrid))
outR = spzeros(T, ep, length(zgrid))
# temporary storage
w = Array{T}(undef, n)
expandX = Array{T}(undef, n, ep)
S = BitArray(undef, ep)
stdX = Array{T}(undef, ep)
f = CDWeightedLSLoss(y, expandX, w) # inner parts of f will be modified in a loop
g = ProxL1(λ0, stdX)
β = SparseIterate(ep)
ind = 0
for z0 in zgrid
ind += 1
# the following two should update f
w .= evaluate.(Ref(kernel), z, Ref(z0))
_expand_X!(expandX, X, z, z0, degree)
_stdX!(stdX, w, expandX)
# solve for β
coordinateDescent!(β, f, g, opt)
out[:, ind] = β
if refit
get_nonzero_coordinates!(S, β, p, degree, true)
Xs = view(expandX, :, S)
tmp = Xs' * Diagonal(w)
outR[S, ind] = (tmp * Xs) \ (tmp * y)
end
end
out, outR
end
# leave one out for h selection
function lvocv_locpolyl1(
X::Matrix{T}, z::Vector{T}, y::Vector{T},
degree::Int64, # degree of the polynomial
hArr::Vector{T},
kernelType::Type{<:SmoothingKernel},
λ0::T,
options::CDOptions=CDOptions()) where {T <: AbstractFloat}
n, p = size(X)
numH = length(hArr)
MSE = zeros(numH)
opt = CDOptions(options.maxIter, options.optTol, options.randomize, true, options.numSteps)
ep = p * (degree + 1)
w = Array{T}(undef, n)
wX = Array{T}(undef, n, ep)
stdX = Array{T}(undef, ep)
S = BitArray(undef, ep)
f = CDWeightedLSLoss(y, wX, w)
β = SparseIterate(ep)
for indH = 1:numH
kernel = createKernel(kernelType, hArr[indH])
for i = 1:n
# update variables
z0 = z[i]
w .= evaluate.(Ref(kernel), z, Ref(z0))
w[i] = zero(T)
_expand_X!(wX, X, z, z0, degree)
_stdX!(stdX, w, wX)
_findInitResiduals!(w, wX, y, min(10, ep), f.r)
# compute sigma
σ = _getSigma(w, f.r)
g = ProxL1(λ0 * σ, stdX)
for iter=1:10
coordinateDescent!(β, f, g, opt)
σnew = _getSigma(w, f.r)
if abs(σnew - σ) / σ < 1e-2
break
end
σ = σnew
g = ProxL1(λ0 * σ, stdX)
end
# refit and make prediction
get_nonzero_coordinates!(S, β, p, degree, true)
Xs = view(wX, :, S)
Yh = dot(wX[i, S], (Xs' * Diagonal(w) * Xs) \ (Xs' * Diagonal(w) * y))
MSE[indH] += (Yh - y[i])^2.
end
end
MSE
end
function refit_locpolyl1(
X::Matrix{T}, z::Vector{T}, y::Vector{T},
z0::T,
degree::Int64,
kernel::SmoothingKernel{T},
β::Union{SparseIterate{T}, SparseVector{T}}
) where {T <: AbstractFloat}
n, p = size(X)
ep = p * (degree + 1)
S = BitArray(undef, p)
get_nonzero_coordinates!(S, β, p, degree, false)
βr = locpoly(view(X, :, S), z, y, z0, degree, kernel)
(βr, S)
end
# function refit_locpolyl1(
# X::Matrix{T}, z::Vector{T}, y::Vector{T},
# zgrid::Vector{T},
# degree::Int64,
# kernel::SmoothingKernel{T},
# β::SparseMatrixCSC{T}
# ) where {T <: AbstractFloat}
#
# n, p = size(X)
# ep = p * (degree + 1)
# S = BitArray(undef, p, length(zgrid))
# fill!(S, false)
# w = Array{T}(undef, n)
# expandedX = Array{T}(undef, n, ep)
# βr = Array{Array{T}}(undef, length(zgrid))
# tS = BitArray(undef, p)
#
# for indZ = 1:length(zgrid)
# o_coordinates!(tS, β[:, indZ], p, degree, false)
# @show β[:, indZ]
# @show S[:, indZ] = tS
# z0 = zgrid[indZ]
# num_col_refit = sum(S[:, indZ])*(degree+1)
# Xs = view(X, :, S[:, indZ])
# βr[indZ] = _locpoly!(view(expandedX, :, 1:num_col_refit), w, Xs, z, y, z0, degree, kernel)
# end
# (βr, S)
# end
############################################################
#
# local polynomial regression low dimensions
#
############################################################
function _locpoly!(
wX::Union{SubArray{T, 2}, Matrix{T}}, w::Vector{T},
X::Union{SubArray{T, 2}, Matrix{T}},
z::Union{SubArray{T, 1}, Vector{T}},
y::Union{SubArray{T, 1}, Vector{T}},
z0::T,
degree::Int64,
kernel::SmoothingKernel{T}) where {T <: AbstractFloat}
w .= sqrt.(evaluate.(Ref(kernel), z, Ref(z0))) # square root of kernel weights
_expand_wX!(wX, w, X, z, z0, degree) # √w ⋅ x ⊗ [1 (zi - z0) ... (zi-z0)^q]
@. w *= y # √w ⋅ y
qr!(wX) \ w
end
locpoly(
X::Union{SubArray{T, 2}, Matrix{T}}, z::Vector{T}, y::Vector{T},
z0::T, degree::Int64, kernel::SmoothingKernel=GaussianKernel(one(T))) where {T <: AbstractFloat} =
_locpoly!(Array{T}(undef, length(y), size(X, 2) * (degree+1)), similar(y), X, z, y, z0, degree, kernel)
function locpoly(
X::Union{SubArray{T, 2}, Matrix{T}}, z::Vector{T}, y::Vector{T},
zgrid::Vector{T},
degree::Int64, # degree of the polynomial
kernel::SmoothingKernel{T}=GaussianKernel(one(T))) where {T <: AbstractFloat}
n, p = size(X)
ep = p * (degree + 1)
out = Array{T}(undef, ep, length(zgrid))
w = Array{T}(undef, n)
wX = Array{T}(undef, n, ep)
ind = 0
for z0 in zgrid
ind += 1
out[:, ind] = _locpoly!(wX, w, X, z, y, z0, degree, kernel)
end
out
end
function getResiduals!(
ϵhat::Vector{T},
X::Union{SubArray{T, 2}, Matrix{T}}, z::Vector{T}, y::Vector{T},
zgrid::Vector{T},
βhat::Union{Matrix{T}, SparseMatrixCSC{T}},
degree::Int64,
kernel::SmoothingKernel{T}=GaussianKernel(one(T))
) where {T <: AbstractFloat}
n, p = size(X)
ep = p * (degree + 1)
βi = spzeros(ep)
for i=1:n
get_beta!(βi, zgrid, βhat, z[i])
ϵhat[i] = y[i] - dot(X[i, :], βi[1:(degree+1):ep])
end
ϵhat
end
function getStandardError(
X::Union{SubArray{T, 2}, Matrix{T}}, z::Vector{T},
σ2::T,
z0::T,
degree::Int64, # degree of the polynomial
kernel::SmoothingKernel{T}) where {T <: AbstractFloat}
n, p = size(X)
ep = p * (degree + 1)
out = Array{T}(undef, p)
w = Array{T}(undef, n)
w1 = Array{T}(undef, n)
XtwX = Array{T}(undef, ep, ep)
XtwwX = Array{T}(undef, ep, ep)
w .= evaluate.(Ref(kernel), z, Ref(z0))
_expand_Xt_w_X!(XtwX, w, X, z, z0, degree)
w .= w .* w
_expand_Xt_w_X!(XtwwX, w, X, z, z0, degree)
A = inv(XtwX)
varMat = A * XtwwX * A
for j=1:p
out[j] = varMat[(j-1)*(degree+1)+1, (j-1)*(degree+1)+1]
end
out
end
function getStandardErrorHEW(
X::Union{SubArray{T, 2}, Matrix{T}}, z::Vector{T},
ϵ_sqr::Vector{T},
z0::T,
degree::Int64, # degree of the polynomial
kernel::SmoothingKernel{T}) where {T <: AbstractFloat}
n, p = size(X)
ep = p * (degree + 1)
out = Array{T}(undef, p)
w = Array{T}(undef, n)
w1 = Array{T}(undef, n)
XtwX = Array{T}(undef, ep, ep)
XtwΨwX = Array{T}(undef, ep, ep)
w .= evaluate.(Ref(kernel), z, Ref(z0))
_expand_Xt_w_X!(XtwX, w, X, z, z0, degree)
@. w = w * w * ϵ_sqr
_expand_Xt_w_X!(XtwΨwX, w, X, z, z0, degree)
A = inv(XtwX)
varMat = A * XtwΨwX * A
for j=1:p
out[j] = varMat[(j-1)*(degree+1)+1, (j-1)*(degree+1)+1]
end
out
end
# function locpoly_alt(
# X::Matrix{T}, z::Vector{T}, y::Vector{T},
# zgrid::Vector{T},
# degree::Int64, # degree of the polynomial
# kernel::SmoothingKernel{T}=GaussianKernel(one(T))) where {T <: AbstractFloat}
#
# n, p = size(X)
# ep = p * (degree + 1)
# out = Array{T}(undef, ep, length(zgrid))
# w = Array{T}(undef, n)
# Xt_w_Y = Array{T}(undef, ep)
# Xt_w_X = Array{T}(undef, ep, ep)
#
# ind = 0
# for z0 in zgrid
# w .= evaluate.(Ref(kernel), z, Ref(z0))
# _expand_Xt_w_Y!(Xt_w_Y, w, X, z, y, z0, degree)
# _expand_Xt_w_X!(Xt_w_X, w, X, z, z0, degree)
# ind += 1
# out[:, ind] = Xt_w_X \ Xt_w_Y
# end
# out
# end
# leave one out for h selection
function lvocv_locpoly(
X::Matrix{T}, z::Vector{T}, y::Vector{T},
degree::Int64, # degree of the polynomial
hArr::Vector{T},
kernelType::Type{<:SmoothingKernel}) where {T <: AbstractFloat}
n, p = size(X)
numH = length(hArr)
MSE = zeros(numH)
ep = p * (degree + 1)
w = Array{T}(undef, n-1)
wX = Array{T}(undef, n-1, ep)
indOut = BitArray(undef, n)
for indH = 1:numH
kernel = createKernel(kernelType, hArr[indH])
for i = 1:n
fill!(indOut, true)
indOut[i] = false
Xview = view(X, indOut, :)
Yview = view(y, indOut)
Zview = view(z, indOut)
hbeta = _locpoly!(wX, w, Xview, Zview, Yview, z[i], degree, kernel)
# make prediction
Yh = dot(X[i, :], hbeta[1:(degree+1):ep])
MSE[indH] += (Yh - y[i])^2.
end
end
MSE
end
# data split for h selection
function split_locpoly(
X::Matrix{T}, z::Vector{T}, y::Vector{T},
Xtest::Matrix{T}, ztest::Vector{T}, ytest::Vector{T},
zgrid::Vector{T},
degree::Int64, # degree of the polynomial
hArr::Vector{T},
kernelType::Type{<:SmoothingKernel}) where {T <: AbstractFloat}
n, p = size(X)
numH = length(hArr)
MSE = zeros(numH)
ep = p * (degree + 1)
βhati = zeros(ep)
for indH = 1:numH
kernel = createKernel(kernelType, hArr[indH])
βhat = locpoly(X, z, y, zgrid, degree, kernel)
for i = 1:n
get_beta!(βhati, zgrid, βhat, ztest[i])
# make prediction
Yh = dot(Xtest[i, :], βhati[1:(degree+1):ep])
MSE[indH] += (ytest[i] - Yh)^2.
end
end
MSE
end
# # leave one out for h selection
# function lvocv_locpoly(
# X::Matrix{T}, z::Vector{T}, y::Vector{T},
# degree::Int64, # degree of the polynomial
# hArr::Vector{T},
# kernelType::Type{<:SmoothingKernel}) where {T <: AbstractFloat}
#
# n, p = size(X)
# numH = length(hArr)
# MSE = zeros(numH)
#
# ep = p * (degree + 1)
# w = Array{T}(undef, n)
# Xt_w_Y = Array{T}(undef, ep)
# Xt_w_X = Array{T}(undef, ep, ep)
#
# for indH = 1:numH
# kernel = createKernel(kernelType, hArr[indH])
# for i = 1:n
# z0 = z[i]
# w .= evaluate.(Ref(kernel), z, Ref(z0))
# w[i] = zero(T)
# _expand_Xt_w_Y!(Xt_w_Y, w, X, z, y, z0, degree)
# _expand_Xt_w_X!(Xt_w_X, w, X, z, z0, degree)
# hbeta = Xt_w_X \ Xt_w_Y
# # make prediction
# Yh = dot(X[i, :], hbeta[1:(degree+1):ep])
# MSE[indH] += (Yh - y[i])^2.
# end
# end
# MSE
# end
############################################################
#
# utils
#
############################################################
"""
For a given z0 finds two closest points in zgrid
and corresponding values of beta. The output is obtained
by interpolating the beta values.
"""
function get_beta!(
out::Union{SparseVector{T}, Vector{T}},
zgrid::Vector{T},
beta_grid::Union{Matrix{T}, SparseMatrixCSC{T}},
z0::T) where {T <: AbstractFloat}
id1 = searchsortedlast(zgrid, z0)
id2 = searchsortedfirst(zgrid, z0)
if id1 == id2
out .= beta_grid[:, id1]
else
z1 = zgrid[id1]
z2 = zgrid[id2]
α = (z0 - z1) / (z2 - z1)
out .= α * beta_grid[:, id1] .+ (1-α) * beta_grid[:, id2]
end
out
end
get_nonzero_coordinates(
β::Union{SparseVector{T}, SparseIterate{T}},
p::Int,
degree::Int,
expanded::Bool
) where {T <: AbstractFloat} =
expanded ? get_nonzero_coordinates!(BitArray(undef, p*(degree+1)), β, p, degree, expanded) : get_nonzero_coordinates!(BitArray(undef, p), β, p, degree, expanded)
function get_nonzero_coordinates!(
S::BitArray,
β::Union{SparseVector{T}, SparseIterate{T}},
p::Int,
degree::Int,
expanded::Bool) where {T <: AbstractFloat}
fill!(S, false)
for j = 1:p
nonzero = false
for k=((j-1)*(degree+1)+1):(j*(degree+1))
nonzero = nonzero | !iszero(β[k])
end
if nonzero
if expanded
for k=((j-1)*(degree+1)+1):(j*(degree+1))
S[k] = true
end
else
S[j] = true
end
end
end
S
end
"""
Computes matrix whose each row is equal to
w[i] ⋅ (X[i, :] ⊗ [1, (z - z0), ..., (z-z0)^q])
where q is the degree of the polynomial.
The output matrix is preallocated.
"""
function _expand_wX!(
wX::Union{SubArray{T, 2}, Matrix{T}},
w::Vector{T},
X::Union{SubArray{T, 2}, Matrix{T}},
z::Union{SubArray{T, 1}, Vector{T}}, z0::T, degree::Int64) where {T <: AbstractFloat}
n, p = size(X)
# wX = zeros(n, p*(degree+1))
for j=1:p
@inbounds for i=1:n
v = X[i, j] * w[i]
df = z[i] - z0
col = (j-1)*(degree+1) + 1
@inbounds wX[i, col] = v
for l=1:degree
v *= df
@inbounds wX[i, col + l] = v
end
end
end
# return qrfact!(tX)
wX
end
# expands the data matrix X
# each row becomes
# (X[i, :] ⊗ [1, (z - z0), ..., (z-z0)^q])
function _expand_X!(
tX::Matrix{T},
X::Union{SubArray{T, 2}, Matrix{T}},
z::Union{SubArray{T, 1}, Vector{T}}, z0::T, degree::Int64) where {T <: AbstractFloat}
n, p = size(X)
for j=1:p
for i=1:n
v = X[i, j]
df = z[i] - z0
col = (j-1)*(degree+1) + 1
@inbounds tX[i, col] = v
for l=1:degree
v *= df
@inbounds tX[i, col + l] = v
end
end
end
tX
end
function _expand_Xt_w_X!(
Xt_w_X::Matrix{T},
w::Vector{T}, X::Matrix{T},
z::Vector{T}, z0::T, degree::Int64) where {T <: AbstractFloat}
n, p = size(X)
fill!(Xt_w_X, zero(T))
@inbounds for j=1:p
@inbounds for k=j:p
@inbounds for i=1:n
# if iszero(w[i])
# continue
# end
v1 = X[i, j] * w[i]
df1 = z[i] - z0
col=(j-1)*(degree+1)+1
for jj=0:degree
v2 = X[i, k]
df2 = z[i] - z0
if k != j
krange = 0:degree
else
krange = jj:degree
v2 = v2 * df2^jj
end
row = (k-1)*(degree+1)+1
for kk=krange
# need X[i, j] * (z[i] - z0)^(jj+kk) * X[i, k] * w[i]
# v2 = (z[i] - z0)^kk * X[i, k]
# v1 = (z[i] - z0)^jj * X[i, k] * w[i]
Xt_w_X[row+kk, col+jj] += v2 * v1
v2 *= df2
end
v1 *= df1
end
end
end
end
@inbounds for c=1:size(Xt_w_X, 2)
for r=c+1:size(Xt_w_X, 1)
Xt_w_X[c, r] = Xt_w_X[r, c]
end
end
Xt_w_X
end
function _expand_Xt_w_Y!(
Xt_w_Y::Vector{T},
w::Vector{T}, X::Matrix{T}, z::Vector{T}, y::Vector{T},
z0::T, degree::Int64) where {T <: AbstractFloat}
n, p = size(X)
fill!(Xt_w_Y, zero(T))
@inbounds for j=1:p
@inbounds for i=1:n
# if iszero(w[i])
# continue
# end
v = X[i, j] * w[i] * y[i]
df = z[i] - z0
col=(j-1)*(degree+1)+1
@inbounds for jj=0:degree
# need X[i, j] * (z[i] - z0)^jj * Y[i, k] * w[i]
Xt_w_Y[col+jj] += v
v *= df
end
end
end
Xt_w_Y
end
| CoordinateDescent | https://github.com/mlakolar/CoordinateDescent.jl.git |
|
[
"MIT"
] | 0.3.0 | bd00a6c0f899a0f8e7f8cac53e78c6f24cbd8080 | code | 2249 | module AtomIteratorTest
using Test
using ProximalBase
using CoordinateDescent
using Random
Random.seed!(1)
@testset "Ordered" begin
p = 5
x = SparseIterate(p)
x[2] = 1.
x[1] = 2.
it = CoordinateDescent.OrderedIterator(x)
fullPass = collect(1:5)
sparsePass = [2, 1]
@test collect(it) == fullPass # this should be a full pass
CoordinateDescent.reset!(it, true)
@test collect(it) == fullPass # this should be a full pass
CoordinateDescent.reset!(it, false)
@test collect(it) == sparsePass # pass over non-zeros
# SymmetricSparseIterate
x = SymmetricSparseIterate(3)
x[2] = 1.
x[1] = 2.
it = CoordinateDescent.OrderedIterator(x)
fullPass = collect(1:6)
sparsePass = [2, 1]
@test collect(it) == fullPass # this should be a full pass
CoordinateDescent.reset!(it, true)
@test collect(it) == fullPass # this should be a full pass
CoordinateDescent.reset!(it, false)
@test collect(it) == sparsePass # pass over non-zeros
end
@testset "Random" begin
p = 50
s = 10
x = SparseIterate(p)
for i=1:s
x[rand(1:p)] = randn()
end
it = CoordinateDescent.RandomIterator(x)
@test collect(it) == collect(1:ProximalBase.numCoordinates(it.iterate)) # this should be a full pass over 1:p
CoordinateDescent.reset!(it, true)
@test collect(it) == it.order # this should be a full pass over 1:p in a random order
CoordinateDescent.reset!(it, false)
@test collect(it) == [x.nzval2ind[it.order[i]] for i=1:nnz(x)] # this should be a sparse pass
# SymmetricSparseIterate
x = SymmetricSparseIterate(10)
for i=1:s
x[rand(1:55)] = randn()
end
it = CoordinateDescent.RandomIterator(x)
@test collect(it) == collect(1:ProximalBase.numCoordinates(it.iterate)) # this should be a full pass over 1:p
CoordinateDescent.reset!(it, true)
@test collect(it) == it.order # this should be a full pass over 1:p in a random order
CoordinateDescent.reset!(it, false)
@test collect(it) == [x.nzval2ind[it.order[i]] for i=1:nnz(x)] # this should be a sparse pass
end
end
| CoordinateDescent | https://github.com/mlakolar/CoordinateDescent.jl.git |
|
[
"MIT"
] | 0.3.0 | bd00a6c0f899a0f8e7f8cac53e78c6f24cbd8080 | code | 4738 | module CoordinateDescentTest
const NUMBER_REPEAT = 1
using CoordinateDescent
using Test
using ProximalBase
using Random
using LinearAlgebra
using SparseArrays
# test |Y - β|_2^2 + λ⋅|β|_1
@testset "Small ProxL1" begin
Y = [1., 1.5]
λ = 1.2
g = ProximalBase.ProxL1(λ)
f = CDQuadraticLoss(Matrix(1.0I, 2, 2), -Y)
opt = CDOptions(;maxIter=100, optTol=1e-8, warmStart=true, randomize=false)
x = SparseIterate(2)
coordinateDescent!(x, f, g, opt)
@test Vector(x) ≈ Vector([0., 0.3])
end
# check that the warm start and non warm start produce the same result
@testset "ProxL1" begin
for i=1:NUMBER_REPEAT
n = 500
p = 50
s = 5
X = randn(n, p)
β = randn(s)
Y = X[:,1:s] * β + randn(n)
λ = 0.02
g = ProximalBase.ProxL1(λ)
f = CDLeastSquaresLoss(Y, X)
opt1 = CDOptions(;maxIter=5000, optTol=1e-12, warmStart=true, randomize=false)
opt2 = CDOptions(;maxIter=5000, optTol=1e-12, warmStart=true, randomize=true)
opt3 = CDOptions(;maxIter=5000, optTol=1e-12, warmStart=false, randomize=false)
opt4 = CDOptions(;maxIter=5000, optTol=1e-12, warmStart=false, randomize=true)
x1 = SparseIterate(sprand(p, 0.6))
x2 = SparseIterate(sprand(p, 0.6))
x3 = SparseIterate(sprand(p, 0.6))
x4 = SparseIterate(sprand(p, 0.6))
coordinateDescent!(x1, f, g, opt1)
coordinateDescent!(x2, f, g, opt2)
coordinateDescent!(x3, f, g, opt3)
coordinateDescent!(x4, f, g, opt4)
@test Vector(x1) ≈ Vector(x2) atol=1e-5
@test Vector(x3) ≈ Vector(x2) atol=1e-5
@test Vector(x4) ≈ Vector(x2) atol=1e-5
end
end
@testset "AProxL1" begin
for i=1:NUMBER_REPEAT
n = 500
p = 50
s = 10
X = randn(n, p)
β = randn(s)
Y = X[:,1:s] * β + randn(n)
λ = 0.01
g = ProximalBase.ProxL1(λ, rand(p))
f = CDLeastSquaresLoss(Y, X)
opt1 = CDOptions(;maxIter=5000, optTol=1e-8, warmStart=true, randomize=false)
opt2 = CDOptions(;maxIter=5000, optTol=1e-8, warmStart=true, randomize=true)
opt3 = CDOptions(;maxIter=5000, optTol=1e-8, warmStart=false, randomize=false)
opt4 = CDOptions(;maxIter=5000, optTol=1e-8, warmStart=false, randomize=true)
x1 = SparseIterate(sprand(p, 0.6))
x2 = SparseIterate(sprand(p, 0.6))
x3 = SparseIterate(sprand(p, 0.6))
x4 = SparseIterate(sprand(p, 0.6))
coordinateDescent!(x1, f, g, opt1)
coordinateDescent!(x2, f, g, opt2)
coordinateDescent!(x3, f, g, opt3)
coordinateDescent!(x4, f, g, opt4)
@test Vector(x1) ≈ Vector(x2) atol=1e-5
@test Vector(x3) ≈ Vector(x2) atol=1e-5
@test Vector(x4) ≈ Vector(x2) atol=1e-5
end
end
# @testset "weighted least squares loss" begin
#
# n, s, p = 500, 2, 50
# gk = GaussianKernel(0.3)
# w = zeros(n)
# sw = zeros(n)
#
# for i=1:NUMBER_REPEAT
# for degree=0:2
# Y, X, Z, betaMat = genData(n, s)
# X = [X zeros(n, p-s)]
# z0 = rand()
#
# cp = p*(degree+1)
# eX = zeros(n, cp)
# _expand_X!(eX, X, Z, z0, degree)
#
# @. w = evaluate(gk, Z, z0)
# @. sw = sqrt(w)
#
# λ = 0.001
# g = ProximalBase.ProxL1(λ)
# f1 = CDLeastSquaresLoss(diagm(sw) * Y, diagm(sw)*X)
# f2 = CDWeightedLSLoss(Y, X, w)
#
# opt1 = CDOptions(;maxIter=5000, optTol=1e-8, warmStart=true, randomize=false)
# opt2 = CDOptions(;maxIter=5000, optTol=1e-8, warmStart=true, randomize=true)
# opt3 = CDOptions(;maxIter=5000, optTol=1e-8, warmStart=false, randomize=false)
# opt4 = CDOptions(;maxIter=5000, optTol=1e-8, warmStart=false, randomize=true)
#
# x1 = SparseIterate(sprand(p, 0.6))
# x2 = SparseIterate(sprand(p, 0.6))
# x3 = SparseIterate(sprand(p, 0.6))
# x4 = SparseIterate(sprand(p, 0.6))
# x5 = SparseIterate(sprand(p, 0.6))
# x6 = SparseIterate(sprand(p, 0.6))
# x7 = SparseIterate(sprand(p, 0.6))
# x8 = SparseIterate(sprand(p, 0.6))
#
# coordinateDescent!(x1, f1, g, opt1)
# coordinateDescent!(x2, f1, g, opt2)
# coordinateDescent!(x3, f1, g, opt3)
# coordinateDescent!(x4, f1, g, opt4)
#
# coordinateDescent!(x5, f2, g, opt1)
# coordinateDescent!(x6, f2, g, opt2)
# coordinateDescent!(x7, f2, g, opt3)
# coordinateDescent!(x8, f2, g, opt4)
#
#
# @test Vector(x1) ≈ Vector(x2) atol=1e-5
# @test Vector(x3) ≈ Vector(x2) atol=1e-5
# @test Vector(x4) ≈ Vector(x2) atol=1e-5
# @test Vector(x5) ≈ Vector(x2) atol=1e-5
# @test Vector(x6) ≈ Vector(x2) atol=1e-5
# @test Vector(x7) ≈ Vector(x2) atol=1e-5
# @test Vector(x8) ≈ Vector(x2) atol=1e-5
# end
# end
#
# end
end
| CoordinateDescent | https://github.com/mlakolar/CoordinateDescent.jl.git |
|
[
"MIT"
] | 0.3.0 | bd00a6c0f899a0f8e7f8cac53e78c6f24cbd8080 | code | 6752 | module LassoTest
const NUMBER_REPEAT = 1
using CoordinateDescent
using Test
using ProximalBase
using Random
using LinearAlgebra
using SparseArrays
Random.seed!(1)
##############################################
#
# Lasso
#
##############################################
@testset "lasso" begin
@testset "zero" begin
n = 100
p = 10
X = randn(n, p)
Y = X * ones(p) + 0.1 * randn(n)
Xy = X' * Y / n
lambda = maximum(abs.(Xy)) + 0.1
out = lasso(X, Y, lambda)
@test out.x == SparseIterate(p)
end
@testset "non-zero" begin
for i=1:NUMBER_REPEAT
n = 100
p = 10
s = 5
X = randn(n, p)
Y = X[:,1:s] * ones(s) + 0.1 * randn(n)
λ = fill(0.3, p)
beta = lasso(X, Y, 1., λ, CDOptions(;optTol=1e-12))
f = CDQuadraticLoss(X'X/n, -X'Y/n)
g = ProximalBase.ProxL1(1., λ)
x1 = SparseIterate( p )
coordinateDescent!(x1, f, g, CDOptions(;optTol=1e-12))
@test beta.x ≈ x1 atol=1e-5
@test (maximum(abs.(X'*(Y - X*beta.x) / n)) - 0.3) / 0.3 ≈ 0. atol=1e-5
end
end
@testset "different interfaces" begin
n = 500
p = 500
s = 50
X = randn(n, p)
β = randn(s)
Y = X[:,1:s] * β + randn(n)
λ = 0.1
x1 = lasso(X, Y, λ)
x2 = lasso(X, Y, λ, ones(p))
@test Vector(x1.x) ≈ Vector(x2.x) atol=1e-5
end
end
@testset "cd lasso" begin
for i=1:NUMBER_REPEAT
n = 200
p = 50
s = 10
X = randn(n, p)
β = randn(s)
Y = X[:,1:s] * β + 0.1 * randn(n)
g = ProximalBase.ProxL1(0.2)
f1 = CDQuadraticLoss(X'X/n, -X'Y/n)
f2 = CDLeastSquaresLoss(Y, X)
x1 = SparseIterate(p)
x2 = SparseIterate(p)
coordinateDescent!(x1, f1, g, CDOptions(;optTol=1e-12))
coordinateDescent!(x2, f2, g, CDOptions(;optTol=1e-12))
@test maximum(abs.(x1 - x2)) ≈ 0. atol=1e-5
@test (maximum(abs.(X'*(Y - X*x1) / n)) - 0.2) / 0.2 ≈ 0. atol=1e-5
@test (maximum(abs.(X'*(Y - X*x2) / n)) - 0.2) / 0.2 ≈ 0. atol=1e-5
end
end
@testset "cd sqrt-lasso" begin
@testset "kkt" begin
for i=1:NUMBER_REPEAT
n = 100
p = 50
s = 5
X = randn(n, p)
β = randn(s)
Y = X[:,1:s] * β + randn(n)
λ = 2.8
g = ProximalBase.ProxL1(λ)
f = CDSqrtLassoLoss(Y, X)
x1 = SparseIterate(p)
coordinateDescent!(x1, f, g, CDOptions(;maxIter=5000, optTol=1e-8))
@test max(0, maximum(abs.(X'*(Y - X*x1) / norm(Y - X*x1))) - λ) / λ ≈ 0. atol=1e-3
end
end
@testset "interfaces" begin
for i=1:NUMBER_REPEAT
n = 500
p = 500
s = 50
X = randn(n, p)
β = randn(s)
Y = X[:,1:s] * β + randn(n)
opt1 = CDOptions(;maxIter=5000, optTol=1e-10, warmStart=true, randomize=false)
opt2 = CDOptions(;maxIter=5000, optTol=1e-10, warmStart=true, randomize=true)
opt3 = CDOptions(;maxIter=5000, optTol=1e-10, warmStart=false, randomize=false)
opt4 = CDOptions(;maxIter=5000, optTol=1e-10, warmStart=false, randomize=true)
x1 = SparseIterate(sprand(p, 0.6))
x2 = SparseIterate(sprand(p, 0.6))
x3 = SparseIterate(sprand(p, 0.6))
x4 = SparseIterate(sprand(p, 0.6))
λ = 1.5
g = ProximalBase.ProxL1(λ)
f = CDSqrtLassoLoss(Y, X)
coordinateDescent!(x1, f, g, opt1)
coordinateDescent!(x2, f, g, opt2)
coordinateDescent!(x3, f, g, opt3)
coordinateDescent!(x4, f, g, opt4)
@test Vector(x1) ≈ Vector(x2) atol=1e-4
@test Vector(x3) ≈ Vector(x2) atol=1e-4
@test Vector(x4) ≈ Vector(x2) atol=1e-4
y1 = sqrtLasso(X, Y, λ, opt1, standardizeX=false)
y2 = sqrtLasso(X, Y, λ, opt2, standardizeX=false)
y3 = sqrtLasso(X, Y, λ, opt3, standardizeX=false)
y4 = sqrtLasso(X, Y, λ, opt4, standardizeX=false)
@test Vector(y1.x) ≈ Vector(x2) atol=1e-4
@test Vector(y2.x) ≈ Vector(x2) atol=1e-4
@test Vector(y3.x) ≈ Vector(x2) atol=1e-4
@test Vector(y4.x) ≈ Vector(x2) atol=1e-4
z1 = sqrtLasso(X, Y, λ, ones(p), opt1)
z2 = sqrtLasso(X, Y, λ, ones(p), opt2)
z3 = sqrtLasso(X, Y, λ, ones(p), opt3)
z4 = sqrtLasso(X, Y, λ, ones(p), opt4)
@test Vector(z1.x) ≈ Vector(x2) atol=1e-4
@test Vector(z2.x) ≈ Vector(x2) atol=1e-4
@test Vector(z3.x) ≈ Vector(x2) atol=1e-4
@test Vector(z4.x) ≈ Vector(x2) atol=1e-4
end
end
end
@testset "scaled lasso" begin
for i=1:NUMBER_REPEAT
n = 1000
p = 500
s = 50
X = randn(n, p)
β = randn(s)
Y = X[:,1:s] * β + randn(n)
λ = rand() / 5.
opt1 = IterLassoOptions(;maxIter=100, optTol=1e-8, optionsCD=CDOptions(;maxIter=5000, optTol=1e-8))
opt2 = IterLassoOptions(;maxIter=100, optTol=1e-8, initProcedure=:InitStd, σinit=2., optionsCD=CDOptions(;maxIter=5000, optTol=1e-8))
x1 = SparseIterate(p)
x2 = SparseIterate(p)
sol1 = scaledLasso!(x1, X, Y, λ, ones(p), opt1)
sol2 = scaledLasso!(x2, X, Y, λ, ones(p), opt2)
σ1 = sol1.σ
σ2 = sol2.σ
@test max.((maximum(abs.(X'*(Y - X*x1) / n)) - λ*σ1), 0.) / (σ1*λ) ≈ 0. atol=1e-4
@test max.((maximum(abs.(X'*(Y - X*x2) / n)) - λ*σ2), 0.) / (σ2*λ) ≈ 0. atol=1e-4
@test Vector(x1) ≈ Vector(x2) atol=1e-4
end
end
@testset "lasso path" begin
@testset "standardizeX = false" begin
n = 1000
p = 500
s = 50
X = randn(n, p)
β = randn(s)
Y = X[:,1:s] * β + randn(n)
λ1 = 0.3
λ2 = 0.1
opt = CDOptions(;maxIter=5000, optTol=1e-8)
x1 = lasso(X, Y, λ1)
x2 = lasso(X, Y, λ2)
λpath = [λ1, λ2]
path = LassoPath(X, Y, λpath, opt; standardizeX=false)
@test typeof(path) == LassoPath{Float64}
@test Vector(path.βpath[1]) ≈ Vector(x1.x) atol=1e-5
@test Vector(path.βpath[2]) ≈ Vector(x2.x) atol=1e-5
S1 = findall(!iszero, x1.x)
S2 = findall(!iszero, x2.x)
rf = refitLassoPath(path, X, Y)
@test rf[S1] ≈ X[:,S1] \ Y atol=1e-5
@test rf[S2] ≈ X[:,S2] \ Y atol=1e-5
end
@testset "standardizeX = true" begin
n = 1000
p = 500
s = 50
X = randn(n, p)
β = randn(s)
Y = X[:,1:s] * β + randn(n)
loadingX = Array{Float64}(undef, p)
CoordinateDescent._stdX!(loadingX, X)
λ1 = 0.3
λ2 = 0.1
opt = CDOptions(;maxIter=5000, optTol=1e-8)
x1 = lasso(X, Y, λ1, loadingX, opt)
x2 = lasso(X, Y, λ2, loadingX, opt)
λpath = [λ1, λ2]
path = LassoPath(X, Y, λpath, opt)
@test typeof(path) == LassoPath{Float64}
@test Vector(path.βpath[1]) ≈ Vector(x1.x) atol=1e-5
@test Vector(path.βpath[2]) ≈ Vector(x2.x) atol=1e-5
S1 = findall(!iszero, x1.x)
S2 = findall(!iszero, x2.x)
rf = refitLassoPath(path, X, Y)
@test rf[S1] ≈ X[:,S1] \ Y atol=1e-5
@test rf[S2] ≈ X[:,S2] \ Y atol=1e-5
end
end
end
| CoordinateDescent | https://github.com/mlakolar/CoordinateDescent.jl.git |
|
[
"MIT"
] | 0.3.0 | bd00a6c0f899a0f8e7f8cac53e78c6f24cbd8080 | code | 263 | using CoordinateDescent
include(joinpath(@__DIR__, "..", "benchmark", "locpoly_bench.jl"))
tests = [
"atom_iterator",
"coordinate_descent",
"lasso",
"varying_coefficient_lasso"
]
for t in tests
f = "$t.jl"
println("* running $f ...")
include(f)
end
| CoordinateDescent | https://github.com/mlakolar/CoordinateDescent.jl.git |
|
[
"MIT"
] | 0.3.0 | bd00a6c0f899a0f8e7f8cac53e78c6f24cbd8080 | code | 3030 | module VarCoeffLassoTest
const NUMBER_REPEAT = 1
using CoordinateDescent
using Test
using ProximalBase
using Random
using LinearAlgebra
using SparseArrays
Random.seed!(1)
@testset "kernels" begin
x = 0.3
y = 0.4
k = GaussianKernel(1.)
@test evaluate(k, x, y) ≈ exp(-0.01)
function evaluate!(g::GaussianKernel{T}, buffer::Array{T}, x::Array{T}, y::Float64) where {T <: AbstractFloat}
size(buffer) == size(x) || throw(DimensionMismatch())
@inbounds @simd for i=1:length(x)
buffer[i] = evaluate(g, x[i], y)
end
buffer
end
x = rand(100, 1000)
y = rand()
k = GaussianKernel(0.5)
o1 = zero(x)
o2 = zero(x)
o1 = evaluate.(Ref(k), x, Ref(y))
evaluate!(k, o2, x, y)
@test o1 ≈ o2
end
@testset "expand_X" begin
X = reshape(collect(1.:6.), 2, 3)
z = [0.2, 0.4]
z0 = 0.3
tX = similar(X)
@test CoordinateDescent._expand_X!(tX, X, z, z0, 0) == X
tX = zeros(2, 6)
tX1 = zeros(2, 6)
Q = [1. -0.1; 1. 0.1]
for i=1:2
tX1[i, :] = kron(X[i,:], Q[i, :])
end
@test CoordinateDescent._expand_X!(tX, X, z, z0, 1) ≈ tX1
tX = zeros(2, 9)
tX1 = zeros(2, 9)
Q = [1. -0.1 0.01; 1. 0.1 0.01]
for i=1:2
tX1[i, :] = kron(X[i,:], Q[i, :])
end
@test CoordinateDescent._expand_X!(tX, X, z, z0, 2) ≈ tX1
end
@testset "expand_X multiplications" begin
p = 10
X = randn(100, p)
Y = randn(100)
z = rand(100)
w = zeros(100)
k = GaussianKernel(0.2)
w = evaluate.(Ref(k), z, Ref(0.5))
for degree=0:2
cp = p*(degree+1)
eX = zeros(100, cp)
ewX = zeros(100, cp)
CoordinateDescent._expand_X!(eX, X, z, 0.5, degree)
@test CoordinateDescent._expand_wX!(ewX, w, X, z, 0.5, degree) ≈ Diagonal(w)*eX
Xt_w_Y = zeros(cp)
@test CoordinateDescent._expand_Xt_w_Y!(Xt_w_Y, w, X, z, Y, 0.5, degree) ≈ eX' * Diagonal(w) * Y
Xt_w_X = zeros(cp, cp)
@test CoordinateDescent._expand_Xt_w_X!(Xt_w_X, w, X, z, 0.5, degree) ≈ (eX'*Diagonal(w))*eX
end
end
#
# @testset "locpoly" begin
#
# n, p = 500, 2
# Y, X, Z, betaMat = genData(n, p)
# zgrid = collect(0.01:0.2:0.99)
#
# gk = GaussianKernel(0.4)
#
# w = zeros(n)
#
# for degree=0:2
# @test locpoly(X, Z, Y, zgrid, degree, gk) ≈ locpoly_alt(X, Z, Y, zgrid, degree, gk)
#
# z0 = 0.5
# cp = p*(degree+1)
#
# eX = zeros(n, cp)
# _expand_X!(eX, X, Z, z0, degree)
#
# @. w = evaluate(gk, Z, z0)
#
# @test locpoly(X, Z, Y, z0, degree, gk) ≈ (eX' * diagm(w) * eX)\(eX' * diagm(w) * Y)
# end
# end
#
#
#
# @testset "locpolyl1" begin
#
# n, s, p = 500, 10, 50
# gk = GaussianKernel(0.1)
# zgrid = collect(0.01:0.1:0.99)
# opt = CDOptions(;randomize=false)
#
# for i=1:NUMBER_REPEAT
# Y, X, Z, betaT = genData(n, s)
# X = [X zeros(n, p-s)]
#
# λ0 = rand() / 10
#
# for degree=0:2
# o1 = locpolyl1(X,Z,Y,zgrid,degree,gk,λ0, opt)
# o2 = locpolyl1_alt(X,Z,Y,zgrid,degree,gk,λ0, opt)
#
# @test maximum( maximum(abs.(o1[i] - o2[i])) for i=1:length(zgrid) ) ≈ 0. atol=1e-4
# end
# end
#
# end
end
| CoordinateDescent | https://github.com/mlakolar/CoordinateDescent.jl.git |
|
[
"MIT"
] | 0.3.0 | bd00a6c0f899a0f8e7f8cac53e78c6f24cbd8080 | docs | 669 | # CoordinateDescent.jl
[](https://travis-ci.org/mlakolar/CoordinateDescent.jl) [](https://codecov.io/gh/mlakolar/CoordinateDescent.jl)
Implements coordinate descent for a smooth function plus penalty that decomposes across coordinates.
Curently a naive version of the active-set coordinate descent is implemented that works for L1 and weighted L1 penalty.
Examples:
* Lasso
* Sqrt-Lasso
* Scaled-Lasso
Package depends on [ProximalBase.jl](https://github.com/mlakolar/ProximalBase.jl)
| CoordinateDescent | https://github.com/mlakolar/CoordinateDescent.jl.git |
|
[
"MIT"
] | 0.2.0 | 4cb65f7f7e50feed0a8c9bbbf215d641f7116f3e | code | 322 | module ModelBasedCF
# package code goes here
using Persa
using ProgressMeter
using Statistics
using LinearAlgebra: norm
using Random: shuffle
abstract type MatrixFactorization{T} <: Persa.Model{T}
end
include("irsvd.jl")
include("rsvd.jl")
include("train.jl")
include("baseline.jl")
include("random.jl")
end # module
| ModelBasedCF | https://github.com/JuliaRecsys/ModelBasedCF.jl.git |
|
[
"MIT"
] | 0.2.0 | 4cb65f7f7e50feed0a8c9bbbf215d641f7116f3e | code | 1171 | mutable struct Baseline{T} <: MatrixFactorization{T}
μ::Float64
bias_user::Array
bias_item::Array
preference::Persa.Preference{T}
users::Int
items::Int
end
function Baseline(dataset::Persa.Dataset)
(users, items) = size(dataset)
μ = mean(dataset)
bu = zeros(users)
bv = zeros(items)
return Baseline(μ, bu, bv, dataset.preference, Persa.users(dataset), Persa.items(dataset))
end
Persa.predict(model::Baseline, user::Int, item::Int) = model.μ + model.bias_user[user] + model.bias_item[item]
function objective(model::Baseline, dataset::Persa.Dataset, λ::Float64)
total = 0
for (u, v, r) in dataset
total += (r - model[u, v])^2
total += λ * (model.bias_user[u]^2 + model.bias_item[v]^2)
end
return total
end
function update!(model::Baseline, dataset::Persa.Dataset, γ::Float64, λ::Float64)
idx = shuffle(1:length(dataset))
for i = 1:length(dataset)
(u, v, r) = dataset[idx[i]]
e = r - Persa.predict(model, u, v)
model.bias_user[u,:] += γ * (e .- λ * model.bias_user[u,:]);
model.bias_item[v,:] += γ * (e .- λ * model.bias_item[v,:]);
end
end
| ModelBasedCF | https://github.com/JuliaRecsys/ModelBasedCF.jl.git |
|
[
"MIT"
] | 0.2.0 | 4cb65f7f7e50feed0a8c9bbbf215d641f7116f3e | code | 1653 | mutable struct ImprovedRegularizedSVD{T} <: MatrixFactorization{T}
μ::Float64
bias_user::Array
bias_item::Array
P::Array
Q::Array
preference::Persa.Preference{T}
users::Int
items::Int
end
const IRSVD = ImprovedRegularizedSVD
function ImprovedRegularizedSVD(dataset::Persa.Dataset, features::Int)
(users, items) = size(dataset)
μ = mean(dataset)
bu = zeros(users)
bv = zeros(items)
P = rand(users, features)
Q = rand(items, features)
return ImprovedRegularizedSVD(μ, bu, bv, P, Q, dataset.preference, Persa.users(dataset), Persa.items(dataset))
end
Persa.predict(model::ImprovedRegularizedSVD, user::Int, item::Int) = model.μ + model.bias_user[user] + model.bias_item[item] + model.P[user, :]' * model.Q[item, :]
function objective(model::ImprovedRegularizedSVD, dataset::Persa.Dataset, λ::Float64)
total = 0
for (u, v, r) in dataset
total += (r - model[u, v])^2
total += λ * (model.bias_user[u]^2 + model.bias_item[v]^2)
total += λ * (norm(model.P[u,:])^2 + norm(model.Q[v,:])^2)
end
return total
end
function update!(model::ImprovedRegularizedSVD, dataset::Persa.Dataset, γ::Float64, λ::Float64)
idx = shuffle(1:length(dataset))
for i = 1:length(dataset)
(u, v, r) = dataset[idx[i]]
e = r - Persa.predict(model, u, v)
model.bias_user[u,:] += γ * (e .- λ * model.bias_user[u,:]);
model.bias_item[v,:] += γ * (e .- λ * model.bias_item[v,:]);
P = model.P[u,:]
Q = model.Q[v,:]
model.P[u,:] += γ * (e .* Q .- λ .* P)
model.Q[v,:] += γ * (e .* P .- λ .* Q)
end
end
| ModelBasedCF | https://github.com/JuliaRecsys/ModelBasedCF.jl.git |
|
[
"MIT"
] | 0.2.0 | 4cb65f7f7e50feed0a8c9bbbf215d641f7116f3e | code | 339 | using Random
mutable struct RandomModel{T} <: Persa.Model{T}
preference::Persa.Preference{T}
users::Int
items::Int
end
RandomModel(dataset::Persa.Dataset) = RandomModel(dataset.preference, Persa.users(dataset), Persa.items(dataset))
Persa.predict(model::RandomModel, user::Int, item::Int) = rand(model.preference.possibles)
| ModelBasedCF | https://github.com/JuliaRecsys/ModelBasedCF.jl.git |
|
[
"MIT"
] | 0.2.0 | 4cb65f7f7e50feed0a8c9bbbf215d641f7116f3e | code | 1197 | mutable struct RegularizedSVD{T} <: MatrixFactorization{T}
P::Array
Q::Array
preference::Persa.Preference{T}
users::Int
items::Int
end
const RSVD = RegularizedSVD
function RegularizedSVD(dataset::Persa.Dataset, features::Int)
(users, items) = size(dataset)
P = rand(users, features)
Q = rand(items, features)
return RegularizedSVD(P, Q, dataset.preference, Persa.users(dataset), Persa.items(dataset))
end
Persa.predict(model::RegularizedSVD, user::Int, item::Int) = model.P[user, :]' * model.Q[item, :]
function objective(model::RegularizedSVD, dataset::Persa.Dataset, λ::Float64)
total = 0
for (u, v, r) in dataset
total += (r - model[u, v])^2
total += λ * (norm(model.P[u,:])^2 + norm(model.Q[v,:])^2)
end
return total
end
function update!(model::RegularizedSVD, dataset::Persa.Dataset, γ::Float64, λ::Float64)
idx = shuffle(1:length(dataset))
for i = 1:length(dataset)
(u, v, r) = dataset[idx[i]]
e = r - Persa.predict(model, u, v)
P = model.P[u,:]
Q = model.Q[v,:]
model.P[u,:] += γ * (e .* Q .- λ .* P)
model.Q[v,:] += γ * (e .* P .- λ .* Q)
end
end
| ModelBasedCF | https://github.com/JuliaRecsys/ModelBasedCF.jl.git |
|
[
"MIT"
] | 0.2.0 | 4cb65f7f7e50feed0a8c9bbbf215d641f7116f3e | code | 510 | function Persa.train!(model::MatrixFactorization,
dataset::Persa.Dataset;
γ = 0.001,
λ = 0.02,
max_epochs = 1000)
err = Inf
p = Progress(max_epochs)
for epoch = 1:max_epochs
update!(model, dataset, γ, λ)
e = objective(model, dataset, 0.1)
if e > err
break
end
err = e
ProgressMeter.next!(p; showvalues = [(:epoch, epoch), (:error, err)])
end
return nothing
end
| ModelBasedCF | https://github.com/JuliaRecsys/ModelBasedCF.jl.git |
|
[
"MIT"
] | 0.2.0 | 4cb65f7f7e50feed0a8c9bbbf215d641f7116f3e | code | 827 | using Persa
using Test
using DatasetsCF
using ModelBasedCF
dataset = DatasetsCF.MovieLens()
@testset "Random Model Tests" begin
model = ModelBasedCF.RandomModel(dataset)
Persa.train!(model, dataset)
@test !isnan(model[1,1])
end
@testset "Factorization Matrix Models Tests" begin
@testset "Baseline Tests" begin
model = ModelBasedCF.Baseline(dataset)
Persa.train!(model, dataset, max_epochs = 1)
@test !isnan(model[1,1])
end
@testset "RSVD Tests" begin
model = ModelBasedCF.RSVD(dataset, 1)
Persa.train!(model, dataset, max_epochs = 1)
@test !isnan(model[1,1])
end
@testset "IRSVD Tests" begin
model = ModelBasedCF.IRSVD(dataset, 1)
Persa.train!(model, dataset, max_epochs = 1)
@test !isnan(model[1,1])
end
end | ModelBasedCF | https://github.com/JuliaRecsys/ModelBasedCF.jl.git |
|
[
"MIT"
] | 0.2.0 | 4cb65f7f7e50feed0a8c9bbbf215d641f7116f3e | docs | 1491 | # ModelBasedCF.jl
*Model based algorithms for Collaborative Filtering in Julia*
[![][ci-img]][ci-url]
[![][codecov-img]][codecov-url]
**Installation**: at the Julia REPL, `Pkg.add("ModelBasedCF")`
**Reporting Issues and Contributing**: See [CONTRIBUTING.md](CONTRIBUTING.md)
## Example
```
julia> using DatasetsCF
julia> dataset = DatasetsCF.MovieLens();
julia> using ModelBasedCF
julia> model = ModelBasedCF.IRSVD(dataset, 10)
julia> Persa.train!(model, dataset, max_epochs = 10)
julia> model[1,1]
```
## Models
List of package models:
Models | Title
-------------|------------------------------------------------------------------------
Baseline | Koren, Y. (2009). Collaborative filtering with temporal dynamics. Knowledge Discovery and Data Mining {KDD}, 447–456.
Regularized SVD | Koren, Y., Bell, R., & Volinsky, C. (2009). Matrix factorization techniques for recommender systems. Computer, 42(8), 30–37.
Improved Regularized SVD | Koren, Y. (2009). Collaborative filtering with temporal dynamics. Knowledge Discovery and Data Mining {KDD}, 447–456.
[ci-img]: https://img.shields.io/github/checks-status/JuliaRecsys/ModelBasedCF.jl/master?style=flat-square
[ci-url]: https://github.com/JuliaRecsys/ModelBasedCF.jl/actions
[codecov-img]: https://img.shields.io/codecov/c/github/JuliaRecsys/ModelBasedCF.jl?style=flat-square
[codecov-url]: https://codecov.io/gh/JuliaRecsys/ModelBasedCF.jl
[issues-url]: https://github.com/JuliaRecsys/ModelBasedCF.jl/issues
| ModelBasedCF | https://github.com/JuliaRecsys/ModelBasedCF.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 438 | using Plots, Roots, FileIO
julia_colors = [:royalblue, :brown3, :forestgreen, :mediumorchid3]
f(x) = x^5 - x - 1/4
a, b = -9/8, 5/4
zs = find_zeros(f, (a,b))
p = plot(f, a, b; color=:black,
linewidth=2, grid=false, legend=false, showaxis = false,
background_color = :transparent)
plot!(p, zero, color=julia_colors[1])
scatter!(p, zs, zero.(zs), color=julia_colors[2:4], markersize=10)
save("src/assets/logo.png", p)
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 468 | ENV["GKSwstype"] = "100"
using Documenter
using Roots
DocMeta.setdocmeta!(Roots, :DocTestSetup, :(using Roots); recursive=true)
makedocs(
sitename = "Roots",
format = Documenter.HTML(ansicolor=true),
modules = [Roots],
pages=[
"Home" => "index.md",
"Overview" => "roots.md",
"Reference/API" => "reference.md",
"Geometry" => "geometry-zero-finding.md"
]
)
deploydocs(
repo = "github.com/JuliaMath/Roots.jl"
)
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 4573 | module RootsChainRulesCoreExt
using Roots
import ChainRulesCore
# View find_zero as solving `f(x, p) = 0` for `xᵅ(p)`.
# This is implicitly defined. By the implicit function theorem, we have:
# ∇f = 0 => ∂/∂ₓ f(xᵅ, p) ⋅ ∂xᵅ/∂ₚ + ∂/∂ₚf(x\^α, p) ⋅ I = 0
# or ∂xᵅ/∂ₚ = - ∂/∂ₚ f(xᵅ, p) / ∂/∂ₓ f(xᵅ, p)
# There are two cases considered
# F(p) = find_zero(f(x,p), x₀, M, p) # f a function
# G(p) = find_zero(𝐺(p), x₀, M) # 𝐺 a functor
# For G(p) first order derivatives are working
# **but** hessian is not with Zygote. *MOREOVER* it fails
# with the **wrong answer** not an error.
#
# (`Zygote.hessian` calls `ForwardDiff` and that isn't working with a functor;
# `Zygote.hessian_reverse` doesn't seem to work here, though perhaps
# that is fixable.)
# this assumes a function and a parameter `p` passed in
function ChainRulesCore.frule(
config::ChainRulesCore.RuleConfig{>:ChainRulesCore.HasForwardsMode},
(_, _, _, Δp),
::typeof(solve),
ZP::ZeroProblem,
M::Roots.AbstractUnivariateZeroMethod,
p;
kwargs...,
)
xᵅ = solve(ZP, M, p; kwargs...)
# Use a single reverse-mode AD call with `rrule_via_ad` if `config` supports it?
F = p -> Roots.Callable_Function(M, ZP.F, p)
fₓ(x) = first(F(p)(x))
fₚ(p) = first(F(p)(xᵅ))
fx = ChainRulesCore.frule_via_ad(config, (ChainRulesCore.NoTangent(), true), fₓ, xᵅ)[2]
fp = ChainRulesCore.frule_via_ad(config, (ChainRulesCore.NoTangent(), Δp), fₚ, p)[2]
xᵅ, -fp / fx
end
# Case of Functor carrying parameters
ChainRulesCore.frule(
config::ChainRulesCore.RuleConfig{>:ChainRulesCore.HasForwardsMode},
xdots,
::typeof(solve),
ZP::ZeroProblem,
M::Roots.AbstractUnivariateZeroMethod,
::Nothing;
kwargs...,
) = ChainRulesCore.frule(config, xdots, solve, ZP, M; kwargs...)
function ChainRulesCore.frule(
config::ChainRulesCore.RuleConfig{>:ChainRulesCore.HasForwardsMode},
(_, Δq, _),
::typeof(solve),
ZP::ZeroProblem,
M::Roots.AbstractUnivariateZeroMethod;
kwargs...,
)
# no `p`; make ZP.F the parameter (issue 408)
foo = ZP.F
zprob2 = ZeroProblem(|>, ZP.x₀)
nms = fieldnames(typeof(foo))
nt = NamedTuple{nms}(getfield(foo, n) for n in nms)
dfoo = ChainRulesCore.Tangent{typeof(foo)}(; nt...)
return ChainRulesCore.frule(
config,
(ChainRulesCore.NoTangent(), ChainRulesCore.NoTangent(), ChainRulesCore.NoTangent(), dfoo),
solve,
zprob2,
M,
foo,
)
end
##
## modified from
## https://github.com/gdalle/ImplicitDifferentiation.jl/blob/main/src/implicit_function.jl
# this is for passing a parameter `p`
function ChainRulesCore.rrule(
rc::ChainRulesCore.RuleConfig{>:ChainRulesCore.HasReverseMode},
::typeof(solve),
ZP::ZeroProblem,
M::Roots.AbstractUnivariateZeroMethod,
p;
kwargs...,
)
xᵅ = solve(ZP, M, p; kwargs...)
f(x, p) = first(Roots.Callable_Function(M, ZP.F, p)(x))
_, pullback_f = ChainRulesCore.rrule_via_ad(rc, f, xᵅ, p)
_, fx, fp = pullback_f(true)
yp = -fp / fx
function pullback_solve_ZeroProblem(dy)
dp = yp * dy
return (
ChainRulesCore.NoTangent(),
ChainRulesCore.NoTangent(),
ChainRulesCore.NoTangent(),
dp,
)
end
return xᵅ, pullback_solve_ZeroProblem
end
# this assumes a functor 𝐺(p) for the function *and* no parameter
ChainRulesCore.rrule(
rc::ChainRulesCore.RuleConfig{>:ChainRulesCore.HasReverseMode},
::typeof(solve),
ZP::ZeroProblem,
M::Roots.AbstractUnivariateZeroMethod,
::Nothing;
kwargs...,
) = ChainRulesCore.rrule(rc, solve, ZP, M; kwargs...)
function ChainRulesCore.rrule(
rc::ChainRulesCore.RuleConfig{>:ChainRulesCore.HasReverseMode},
::typeof(solve),
ZP::ZeroProblem,
M::Roots.AbstractUnivariateZeroMethod;
kwargs...,
)
𝑍𝑃 = ZeroProblem(|>, ZP.x₀)
xᵅ = solve(ZP, M; kwargs...)
f(x, p) = first(Roots.Callable_Function(M, 𝑍𝑃.F, p)(x))
_, pullback_f = ChainRulesCore.rrule_via_ad(rc, f, xᵅ, ZP.F)
_, fx, fp = pullback_f(true)
yp = NamedTuple{keys(fp)}(-fₚ / fx for fₚ in values(fp))
function pullback_solve_ZeroProblem(dy)
dF = ChainRulesCore.Tangent{typeof(ZP.F)}(; yp...)
dZP = ChainRulesCore.Tangent{typeof(ZP)}(; F=dF, x₀=ChainRulesCore.NoTangent())
dsolve = ChainRulesCore.NoTangent()
dM = ChainRulesCore.NoTangent()
dp = ChainRulesCore.NoTangent()
return dsolve, dZP, dM, dp
end
return xᵅ, pullback_solve_ZeroProblem
end
end # module
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 1809 | module RootsForwardDiffExt
using Roots
using ForwardDiff
import ForwardDiff: Dual, value, partials, Partials, derivative, gradient!
# What works
# F(p) = find_zero(f, x0, M, p)
# G(p) = find_zero(𝐺(p), x0, M)
# F G
# ForwardDiff.derivative ✓ x (wrong answer, 0.0)
# ForwardDiff.gradient ✓ x (wrong answer, 0.0)
# ForwardDiff.hessian ✓ x (wrong answer, 0.0)
# Zygote.gradient ✓ ✓
# Zygote.hessian ✓ x (wrong answer!)
# Zygote.hessian_reverse ✓ x (MethodError)
function Roots.solve(ZP::ZeroProblem,
M::Roots.AbstractUnivariateZeroMethod,
𝐩::Dual{T};
kwargs...) where {T}
# p_and_dp = 𝐩
p, dp = value.(𝐩), partials.(𝐩)
xᵅ = solve(ZP, M, p; kwargs...)
f = ZP.F
fₓ = derivative(_x -> f(_x, p), xᵅ)
fₚ = derivative(_p -> f(xᵅ, _p), p)
# x and dx
dx = - (fₚ * dp) / fₓ
Dual{T}(xᵅ, dx)
end
# cf https://discourse.julialang.org/t/custom-rule-for-differentiating-through-newton-solver-using-forwarddiff-works-for-gradient-fails-for-hessian/93002/22
function Roots.solve(ZP::ZeroProblem,
M::Roots.AbstractUnivariateZeroMethod,
𝐩::AbstractArray{<:Dual{T,R,N}};
kwargs...) where {T,R,N}
# p_and_dp = 𝐩
p, dp = value.(𝐩), partials.(𝐩)
xᵅ = solve(ZP, M, p; kwargs...)
f = ZP.F
fₓ = derivative(_x -> f(_x, p), xᵅ)
fₚ = similar(𝐩) # <-- need this, not output of gradient(p->f(x,p), p)
gradient!(fₚ, _p -> f(xᵅ, _p), p)
# x_and_dx
dx = - (fₚ' * dp) / fₓ
Dual{T}(xᵅ, Partials(ntuple(k -> dx[k], Val(N))))
end
end
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 779 | module RootsIntervalRootFindingExt
using IntervalRootFinding
using Roots
function Roots.find_zeros(f, x0::IntervalRootFinding.Interval{T′}, M=IntervalRootFinding.Newton; kwargs...) where {T′}
rts = IntervalRootFinding.roots(f, x0, M)
T = float(T′)
unique_roots = T[find_zero(f, (interval(r).lo, interval(r).hi)) for r ∈ rts if r.status == :unique]
unknown = Interval{T}[interval(r) for r ∈ rts if r.status != :unique]
(zeros = unique_roots, unknown=unknown)
end
Roots.find_zeros(f, x0::IntervalRootFinding.Interval, M::Roots.Newton; kwargs...) =
Roots.find_zeros(f, x0, IntervalRootFinding.Newton)
Roots.find_zeros(f, x0::IntervalRootFinding.Interval, M::Roots.Bisection; kwargs...) =
Roots.find_zeros(f, x0, IntervalRootFinding.Bisection)
end
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 682 | module RootsSymPyExt
using Roots
using SymPy
## Allow equations to specify a problem to solve
function Roots.Callable_Function(M::Roots.AbstractUnivariateZeroMethod, f::SymPy.Sym, p=nothing)
if f.is_Equality == true
f = lhs(f) - rhs(f)
end
Roots.Callable_Function(M, lambdify(f), p)
end
function Roots.FnWrapper(f::SymPy.Sym)
if f.is_Equality == true
f = lhs(f) - rhs(f)
end
Roots.FnWrapper(lambdify(f))
end
## allow find_zeros to use symbolic equation
function Roots.find_zeros(f::SymPy.Sym, a, b=nothing; kwargs...)
if f.is_Equality == true
f = lhs(f) - rhs(f)
end
find_zeros(lambdify(f), a, b; kwargs...)
end
end
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 733 | module RootsSymPyPythonCallExt
using Roots
using SymPyPythonCall
## Allow equations to specify a problem to solve
function Roots.Callable_Function(M::Roots.AbstractUnivariateZeroMethod, f::SymPyPythonCall.Sym, p=nothing)
if f.is_Equality == true
f = lhs(f) - rhs(f)
end
Roots.Callable_Function(M, lambdify(f), p)
end
function Roots.FnWrapper(f::SymPyPythonCall.Sym)
if f.is_Equality == true
f = lhs(f) - rhs(f)
end
Roots.FnWrapper(lambdify(f))
end
## allow find_zeros to use symbolic equation
function Roots.find_zeros(f::SymPyPythonCall.Sym, a, b=nothing; kwargs...)
if f.is_Equality == true
f = lhs(f) - rhs(f)
end
find_zeros(lambdify(f), a, b; kwargs...)
end
end
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 2126 | """
Roots
A package for solving `f(x) = 0` for univariate, scalar functions.
The basic methods are
* [`find_zero`](@ref) for using one of several methods to identify a zero
* [`ZeroProblem`](@ref) for solving for a zero using the `CommonSolve` interface
* [`find_zeros`](@ref) for heuristically identifying all zeros in a specified interval
# Extended help
$(replace(read(joinpath(@__DIR__, "..", "README.md"), String), "```julia" => "```jldoctest readme"))
"""
module Roots
if isdefined(Base, :Experimental) && isdefined(Base.Experimental, Symbol("@optlevel"))
@eval Base.Experimental.@optlevel 1
end
using Printf
import CommonSolve
import CommonSolve: solve, solve!, init
using Accessors
export fzero, fzeros, secant_method
export find_zero,
find_zeros,
ZeroProblem,
solve,
solve!,
init,
Order0,
Secant,
Order1,
Orderφ,
Steffensen,
Order2,
Order5,
Order8,
Order16,
AllZeros
export Bisection, A42, AlefeldPotraShi, FalsePosition
include("utils.jl")
include("abstract_types.jl")
include("state.jl")
include("convergence.jl")
include("functions.jl")
include("trace.jl")
include("find_zero.jl")
include("hybrid.jl")
include("Bracketing/bracketing.jl")
include("Bracketing/bisection.jl")
include("Bracketing/alefeld_potra_shi.jl")
include("Bracketing/brent.jl")
include("Bracketing/ridders.jl")
include("Bracketing/itp.jl")
include("Bracketing/chandrapatlu.jl")
include("Bracketing/false_position.jl")
include("DerivativeFree/derivative_free.jl")
include("DerivativeFree/secant.jl")
include("DerivativeFree/steffensen.jl")
include("DerivativeFree/order5.jl")
include("DerivativeFree/order8.jl")
include("DerivativeFree/order16.jl")
include("DerivativeFree/king.jl")
include("DerivativeFree/esser.jl")
include("DerivativeFree/order0.jl")
include("Derivative/newton.jl")
include("Derivative/halley_like.jl")
include("Derivative/thukralb.jl")
include("Derivative/lith.jl")
include("find_zeros.jl")
include("simple.jl")
include("alternative_interfaces.jl")
if !isdefined(Base, :get_extension)
include("../ext/RootsChainRulesCoreExt.jl")
end
end
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 1056 | ### Method types
abstract type AbstractUnivariateZeroMethod end
Base.broadcastable(method::AbstractUnivariateZeroMethod) = Ref(method)
abstract type AbstractBracketingMethod <: AbstractUnivariateZeroMethod end
abstract type AbstractBisectionMethod <: AbstractBracketingMethod end
abstract type AbstractNonBracketingMethod <: AbstractUnivariateZeroMethod end
abstract type AbstractSecantMethod <: AbstractNonBracketingMethod end
abstract type AbstractDerivativeMethod <: AbstractNonBracketingMethod end
abstract type AbstractNewtonLikeMethod <: AbstractDerivativeMethod end
abstract type AbstractHalleyLikeMethod <: AbstractDerivativeMethod end
abstract type AbstractΔMethod <: AbstractHalleyLikeMethod end
# deprecated but not clear way to do so, hence these definitions not to be used
const AbstractBracketing = AbstractBracketingMethod
const AbstractBisection = AbstractBisectionMethod
const AbstractNonBracketing = AbstractNonBracketingMethod
const AbstractSecant = AbstractSecantMethod
### State
abstract type AbstractUnivariateZeroState{T,S} end
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 8323 | ## Some **legacy** alternative interfaces.
## several named interfaces to methods
## newton, halley, quadratic_inverse, superhalley, chebyshevlike
"""
Roots.newton(f, fp, x0; kwargs...)
Implementation of Newton's method: `xᵢ₊₁ = xᵢ - f(xᵢ)/f'(xᵢ)`.
Arguments:
* `f::Function` -- function to find zero of
* `fp::Function` -- the derivative of `f`.
* `x0::Number` -- initial guess. For Newton's method this may be complex.
With the `ForwardDiff` package derivatives may be computed automatically. For example, defining
`D(f) = x -> ForwardDiff.derivative(f, float(x))` allows `D(f)` to be used for the first derivative.
Keyword arguments are passed to `find_zero` using the `Roots.Newton()` method.
See also `Roots.newton((f,fp), x0)` and `Roots.newton(fΔf, x0)` for simpler implementations.
"""
newton(f, fp, x0; kwargs...) = find_zero((f, fp), x0, Newton(); kwargs...)
## --------------------------------------------------
#=
"""
Roots.halley(f, fp, fpp, x0; kwargs...)
Implementation of Halley's method (cf `?Roots.Halley()`).
Arguments:
* `f::Function` -- function to find zero of
* `fp::Function` -- derivative of `f`.
* `fpp:Function` -- second derivative of `f`.
* `x0::Number` -- initial guess
With the `ForwardDiff` package derivatives may be computed automatically. For example, defining
`D(f) = x -> ForwardDiff.derivative(f, float(x))` allows `D(f)` and `D(D(f))` to be used for the first and second
derivatives, respectively.
Keyword arguments are passed to `find_zero` using the `Roots.Halley()` method.
"""
=#
halley(f, fp, fpp, x0; kwargs...) = find_zero((f, fp, fpp), x0, Halley(); kwargs...)
#=
"""
Roots.quadratic_inverse(f, fp, fpp, x0; kwargs...)
Implementation of the quadratic inverse method (cf `?Roots.QuadraticInverse()`).
Arguments:
* `f::Function` -- function to find zero of
* `fp::Function` -- derivative of `f`.
* `fpp:Function` -- second derivative of `f`.
* `x0::Number` -- initial guess
With the `ForwardDiff` package derivatives may be computed automatically. For example, defining
`D(f) = x -> ForwardDiff.derivative(f, float(x))` allows `D(f)` and `D(D(f))` to be used for the first and second
derivatives, respectively.
Keyword arguments are passed to `find_zero` using the `Roots.QuadraticInverse()` method.
"""
=#
quadratic_inverse(f, fp, fpp, x0; kwargs...) =
find_zero((f, fp, fpp), x0, QuadraticInverse(); kwargs...)
superhalley(f, fp, fpp, x0; kwargs...) =
find_zero((f, fp, fpp), x0, SuperHalley(); kwargs...)
chebyshev_like(f, fp, fpp, x0; kwargs...) =
find_zero((f, fp, fpp), x0, ChebyshevLike(); kwargs...)
## --------------------------------------------------
## MATLAB interface to find_zero
## Main functions are
## * fzero(f, ...) to find _a_ zero of f, a univariate function
## * fzeros(f, ...) to attempt to find all zeros of f, a univariate function
## unlike `find_zero` these do not specialize on f, so
## will be faster the first use, and slower for subsequent uses (for the same f)
struct FnWrapper
f
end
(F::FnWrapper)(x::Number) = first(F.f(x))
"""
fzero(f, x0; order=0; kwargs...)
fzero(f, x0, M; kwargs...)
fzero(f, x0, M, N; kwargs...)
fzero(f, x0; kwargs...)
fzero(f, a::Number, b::Number; kwargs...)
fzero(f, a::Number, b::Number; order=?, kwargs...)
fzero(f, fp, a::Number; kwargs...)
Find zero of a function using one of several iterative algorithms.
* `f`: a scalar function or callable object
* `x0`: an initial guess, a scalar value or tuple of two values
* `order`: An integer, symbol, or string indicating the algorithm to
use for `find_zero`. The `Order0` default may be specified directly
by `order=0`, `order=:0`, or `order="0"`; `Order1()` by `order=1`,
`order=:1`, `order="1"`, or `order=:secant`; `Order1B()` by
`order="1B"`, etc.
* `M`: a specific method, as would be passed to `find_zero`, bypassing
the use of the `order` keyword
* `N`: a specific bracketing method. When given, if a bracket is
identified, method `N` will be used to finish instead of method `M`.
* `a`, `b`: When two values are passed along, if no `order` value is
specified, `Bisection` will be used over the bracketing interval
`(a,b)`. If an `order` value is specified, the value of `x0` will be set to
`(a,b)` and the specified method will be used.
* `fp`: when `fp` is specified (assumed to compute the derivative of `f`),
Newton's method will be used
* `kwargs...`: See `find_zero` for the specification of tolerances and other keyword arguments
Examples:
```
fzero(sin, 3) # use Order0() method, the default
fzero(sin, 3, order=:secant) # use secant method (also just `order=1`)
fzero(sin, 3, Roots.Order1B()) # use secant method variant for multiple roots.
fzero(sin, 3, 4) # use bisection method over (3,4)
fzero(sin, 3, 4, xatol=1e-6) # use bisection method until |x_n - x_{n-1}| <= 1e-6
fzero(sin, 3, 3.1, order=1) # use secant method with x_0=3.0, x_1 = 3.1
fzero(sin, (3, 3.1), order=2) # use Steffensen's method with x_0=3.0, x_1 = 3.1
fzero(sin, cos, 3) # use Newton's method
```
!!! note
Unlike `find_zero`, `fzero` does not specialize on the type of the function argument.
This has the advantage of making the first use of the function `f` faster, but subsequent uses slower.
"""
function fzero(f, x0::Number; kwargs...)
x = float(x0)
isinf(x) && throw(ConvergenceFailed("An initial value must be finite"))
derivative_free(f, x; kwargs...)
end
function fzero(f, x0, M::AbstractUnivariateZeroMethod; kwargs...)
find_zero(FnWrapper(f), x0, M; kwargs...)
end
function fzero(
f,
x0,
M::AbstractUnivariateZeroMethod,
N::AbstractBracketingMethod;
kwargs...,
)
find_zero(FnWrapper(f), x0, M, N; kwargs...)
end
function fzero(f, bracket::Tuple{T,S}; kwargs...) where {T<:Number,S<:Number}
d = Dict(kwargs)
if haskey(d, :order)
find_zero(FnWrapper(f), bracket, _method_lookup[d[:order]]; kwargs...)
else
find_zero(FnWrapper(f), bracket, Bisection(); kwargs...)
end
end
fzero(f, a::Number, b::Number, args...; kwargs...) = fzero(f, (a, b), args...; kwargs...)
fzero(f, x; kwargs...) = find_zero(FnWrapper(f), x; kwargs...)
fzero(f::Function, fp::Function, x0::Real; kwargs...) =
find_zero((f, fp), x0, Newton(); kwargs...)
# match fzero up with find_zero
_method_lookup = Dict(
0 => Order0(),
:0 => Order0(),
"0" => Order0(),
1 => Order1(),
:1 => Order1(),
"1" => Order1(),
:secant => Order1(),
:Secant => Order1(),
"1B" => Order1B(),
:king => Order1B(),
:King => Order1B(),
2 => Order2(),
:2 => Order2(),
:steffensen => Order2(),
:Steffensen => Order2(),
"2" => Order2(),
"2B" => Order2B(),
:esser => Order2B(),
:Esser => Order2B(),
5 => Order5(),
:5 => Order5(),
"5" => Order5(),
8 => Order8(),
:8 => Order8(),
"8" => Order8(),
16 => Order16(),
:16 => Order16(),
"16" => Order16(),
)
@noinline function derivative_free(f, x0; order=0, kwargs...)
if haskey(_method_lookup, order)
M = _method_lookup[order]
else
warn("Invalid order specified. See ?fzero.")
throw(ArgumentError())
end
# d = (kv[1] == :ftol ? :atol=>kv[2] :
# kv[1] == :ftolrel ? :rtol=>kv[2] :
# kv[1] == :xtol ? :xatol=>kv[2] :
# kv[1] == :xtolrel ? xrtol=>kv[2] :
# kv[1] => kv[1] for kv in kwargs)
d = Dict(kwargs)
for (o, n) in ((:ftol, :atol), (:ftolrel, :rtol), (:xtol, :xatol), (:xtolrel, :xrtol))
if haskey(d, o)
d[n] = d[o]
end
end
find_zero(FnWrapper(f), x0, M; d...)
end
## fzeros
"""
fzeros(f, a, b; kwargs...)
fzeros(f, ab; kwargs...)
Searches for all zeros of `f` within an interval `(a,b)`. Assumes neither `a` or `b` is a zero.
Compatibility interface for [`find_zeros`](@ref).
"""
function fzeros(f, a::Number, b::Number; kwargs...)
find_zeros(FnWrapper(f), float(a), float(b); kwargs...)
end
fzeros(f, ab; kwargs...) = fzeros(f, _extrema(ab)...; kwargs...)
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 11492 | ### Options
abstract type AbstractUnivariateZeroOptions end
struct UnivariateZeroOptions{Q,R,S,T} <: AbstractUnivariateZeroOptions
xabstol::Q
xreltol::R
abstol::S
reltol::T
maxiters::Int
strict::Bool
end
struct XExactOptions{S,T} <: AbstractUnivariateZeroOptions
abstol::S
reltol::T
maxiters::Int
strict::Bool
end
struct FExactOptions{S,T} <: AbstractUnivariateZeroOptions
xabstol::S
xreltol::T
maxiters::Int
strict::Bool
end
struct ExactOptions <: AbstractUnivariateZeroOptions
maxiters::Int
strict::Bool
end
init_options(
M::AbstractUnivariateZeroMethod,
state::AbstractUnivariateZeroState{T,S};
kwargs...,
) where {T,S} = init_options(M, T, S; kwargs...)
# this function is an issue (#446) it is type unstable.
# this is a fall back now, but in #446 more
# specific choices based on M are made.
function init_options(M, T=Float64, S=Float64; kwargs...)
d = kwargs
defs = default_tolerances(M, T, S)
δₐ = get(d, :xatol, get(d, :xabstol, defs[1]))
δᵣ = get(d, :xrtol, get(d, :xreltol, defs[2]))
ϵₐ = get(d, :atol, get(d, :abstol, defs[3]))
ϵᵣ = get(d, :rtol, get(d, :reltol, defs[4]))
maxiters = get(d, :maxiters, get(d, :maxevals, get(d, :maxsteps, defs[5])))
strict = get(d, :strict, defs[6])
iszero(δₐ) && iszero(δᵣ) && iszero(ϵₐ) && iszero(ϵᵣ) && return ExactOptions(maxiters, strict)
iszero(δₐ) && iszero(δᵣ) && return XExactOptions(ϵₐ, ϵᵣ, maxiters, strict)
iszero(ϵₐ) && iszero(ϵᵣ) && return FExactOptions(δₐ, δᵣ, maxiters, strict)
return UnivariateZeroOptions(δₐ, δᵣ, ϵₐ, ϵᵣ, maxiters, strict)
end
function init_options(
M::AbstractNonBracketingMethod,
state::AbstractUnivariateZeroState{T,S};
kwargs...,
) where {T,S}
d = kwargs
defs = default_tolerances(M, T, S)
δₐ = get(d, :xatol, get(d, :xabstol, defs[1]))
δᵣ = get(d, :xrtol, get(d, :xreltol, defs[2]))
ϵₐ = get(d, :atol, get(d, :abstol, defs[3]))
ϵᵣ = get(d, :rtol, get(d, :reltol, defs[4]))
maxiters = get(d, :maxiters, get(d, :maxevals, get(d, :maxsteps, defs[5])))
strict = get(d, :strict, defs[6])
return UnivariateZeroOptions(δₐ, δᵣ, ϵₐ, ϵᵣ, maxiters, strict)
end
## --------------------------------------------------
"""
default_tolerances(M::AbstractUnivariateZeroMethod, [T], [S])
The default tolerances for most methods are `xatol=eps(T)`,
`xrtol=eps(T)`, `atol=4eps(S)`, and `rtol=4eps(S)`, with the proper
units (absolute tolerances have the units of `x` and `f(x)`; relative
tolerances are unitless). For `Complex{T}` values, `T` is used.
The number of iterations is limited by `maxiters=40`.
"""
default_tolerances(M::AbstractUnivariateZeroMethod) =
default_tolerances(M, Float64, Float64)
function default_tolerances(
::AbstractUnivariateZeroMethod,
::Type{T},
::Type{S},
) where {T,S}
xatol = eps(real(T)) * oneunit(real(T))
xrtol = eps(real(T)) # unitless
atol = 4 * eps(real(float(S))) * oneunit(real(S))
rtol = 4 * eps(real(float(S))) * one(real(S))
maxiters = 40
strict = false
(xatol, xrtol, atol, rtol, maxiters, strict)
end
## --------------------------------------------------
# ## Assess convergence
## test f == 0 not f ≈ 0
function is_exact_zero_f(
::AbstractNonBracketingMethod,
state::AbstractUnivariateZeroState,
options,
)
fb = state.fxn1
iszero(fb)
end
function is_exact_zero_f(
::AbstractBracketingMethod,
state::AbstractUnivariateZeroState,
options,
)
fa, fb = state.fxn0, state.fxn1
iszero(fa) || iszero(fb)
end
## test f ≈ 0 not f == 0
function is_approx_zero_f(
::AbstractUnivariateZeroMethod,
state::AbstractUnivariateZeroState,
options::O,
) where {O<:AbstractUnivariateZeroOptions}
ab, afb = abs(state.xn1), abs(state.fxn1)
ϵₐ, ϵᵣ = options.abstol, options.reltol
Δ = max(_unitless(ϵₐ), _unitless(ab) * ϵᵣ)
afb ≤ Δ * oneunit(afb)
end
## test f ≈ 0 not f == 0
function is_approx_zero_f(
::AbstractBracketingMethod,
state::AbstractUnivariateZeroState,
options::O,
) where {O<:AbstractUnivariateZeroOptions}
ab₁, afb₁ = abs(state.xn1), abs(state.fxn1)
ab₀, afb₀ = abs(state.xn0), abs(state.fxn0)
ϵₐ, ϵᵣ = options.abstol, options.reltol
u, fu = afb₀ < afb₁ ? (ab₀, afb₀) : (ab₁, afb₁)
Δ = max(_unitless(ϵₐ), _unitless(u) * ϵᵣ)
fu ≤ Δ * oneunit(fu)
end
function is_approx_zero_f(
::AbstractUnivariateZeroMethod,
state::AbstractUnivariateZeroState,
options::O,
relaxed::Any,
) where {O<:AbstractUnivariateZeroOptions}
ab, afb = abs(state.xn1), abs(state.fxn1)
ϵₐ, ϵᵣ = options.abstol, options.reltol
Δ = max(_unitless(ϵₐ), _unitless(ab) * ϵᵣ)
Δ = cbrt(abs(_unitless(Δ))) * oneunit(afb) # relax test
afb <= Δ
end
function is_approx_zero_f(
::AbstractUnivariateZeroMethod,
state::AbstractUnivariateZeroState,
options::O,
) where {O<:Union{ExactOptions,FExactOptions}}
false
end
function is_approx_zero_f(
::AbstractBracketingMethod,
state::AbstractUnivariateZeroState,
options::O,
) where {O<:Union{ExactOptions,FExactOptions}}
false
end
## --------------------------------------------------
# test xₙ₊₁ - xₙ ≈ 0
function iszero_Δx(
::AbstractUnivariateZeroMethod,
state::AbstractUnivariateZeroState,
options::O,
) where {O<:Union{ExactOptions,XExactOptions}}
a, b = state.xn0, state.xn1
if b < a
a, b = b, a
end
nextfloat(float(a)) == float(b)
end
function iszero_Δx(
::AbstractBracketingMethod,
state::AbstractUnivariateZeroState,
options::O,
) where {O<:Union{FExactOptions,UnivariateZeroOptions}}
a, b, fa, fb = state.xn0, state.xn1, state.fxn0, state.fxn1
u, fu = choose_smallest(a, b, fa, fb)
δₐ, δᵣ = options.xabstol, options.xreltol
δₓ = max(δₐ, 2 * abs(u) * δᵣ) # needs non-zero δₐ to stop near 0
abs(b - a) ≤ δₓ
end
function iszero_Δx(
::AbstractNonBracketingMethod,
state::AbstractUnivariateZeroState,
options::O,
) where {O<:Union{FExactOptions,UnivariateZeroOptions}}
a, b, fa, fb = state.xn0, state.xn1, state.fxn0, state.fxn1
δₐ, δᵣ = options.xabstol, options.xreltol
isapprox(a, b, atol=δₐ, rtol=δᵣ)
end
# test when fconverged to ensure not runawa
function is_small_Δx(
M::AbstractUnivariateZeroMethod,
state::AbstractUnivariateZeroState,
options,
)
δ = _unitless(abs(state.xn1 - state.xn0))
δₐ, δᵣ = options.xabstol, options.xreltol
Δₓ = max(_unitless(δₐ), _unitless(abs(state.xn1)) * δᵣ)
Δₓ = sqrt(sqrt(sqrt((abs(_unitless(Δₓ)))))) # faster than x^(1/8)
return δ ≤ Δₓ
end
isnan_f(M::AbstractBracketingMethod, state) = isnan(state.fxn1) || isnan(state.fxn0)
isnan_f(M::AbstractNonBracketingMethod, state) = isnan(state.fxn1)
isinf_f(M::AbstractBracketingMethod, state) = isinf(state.fxn1) || isinf(state.fxn0)
isinf_f(M::AbstractNonBracketingMethod, state) = isinf(state.fxn1)
## --------------------------------------------------
"""
Roots.assess_convergence(method, state, options)
Assess if algorithm has converged.
Return a convergence flag and a Boolean indicating if algorithm has terminated (converged or not converged)
If algorithm hasn't converged this returns `(:not_converged, false)`.
If algorithm has stopped or converged, return flag and `true`. Flags are:
* `:x_converged` if `xn1 ≈ xn`, typically with non-zero tolerances specified.
* `:f_converged` if `|f(xn1)| < max(atol, |xn1|*rtol)`
* `:nan` or `:inf` if fxn1 is `NaN` or an infinity.
* `:not_converged` if algorithm should continue
Does not check number of steps taken nor number of function evaluations.
In `decide_convergence`, stopped values (and `:x_converged` when `strict=false`) are checked for convergence with a relaxed tolerance.
"""
function assess_convergence(M::Any, state::AbstractUnivariateZeroState, options)
# return convergence_flag, boolean
is_exact_zero_f(M, state, options) && return (:exact_zero, true)
isnan_f(M, state) && return (:nan, true)
isinf_f(M, state) && return (:inf, true)
is_approx_zero_f(M, state, options) && return (:f_converged, true)
iszero_Δx(M, state, options) && return (:x_converged, true)
return (:not_converged, false)
end
# speeds up exact f values by just a bit (2% or so) over the above, so guess this is worth it.
function assess_convergence(
M::AbstractBracketingMethod,
state::AbstractUnivariateZeroState,
options::FExactOptions,
# options::Union{ExactOptions,FExactOptions},
)
(iszero(state.fxn1) || iszero(state.fxn0)) && return (:exact_zero, true)
(isnan(state.fxn1) || isnan(state.fxn0)) && return (:nan, true)
a, b, fa, fb = state.xn0, state.xn1, state.fxn0, state.fxn1
u, fu = choose_smallest(a, b, fa, fb)
δₐ, δᵣ = options.xabstol, options.xreltol
δₓ = max(δₐ, 2 * abs(u) * δᵣ) # needs non-zero δₐ to stop near 0
abs(b - a) ≤ δₓ && return (:x_converged, true)
return (:not_converged, false)
end
# state has stopped, this identifies if it has converged
#=
"""
decice_convergence(M,F,state,options, convergence_flag)
When the algorithm terminates, this function decides the stopped value or returns NaN
"""
=#
function decide_convergence(
M::AbstractNonBracketingMethod,
F,
state::AbstractUnivariateZeroState{T,S},
options,
val,
) where {T,S}
xn0, xn1 = state.xn0, state.xn1
fxn1 = state.fxn1
val ∈ (:f_converged, :exact_zero, :converged) && return xn1
## XXX this could be problematic
val == :nan && return xn1
val == :inf_nan && return xn1
## stopping is a heuristic, x_converged can mask issues
## if strict=true or the tolerance for f is 0 this will return xn1 if x_converged
## if strict == false, this will also check f(xn) ~ - with a relaxed
## tolerance
if options.strict || isa(options, ExactOptions) || isa(options, FExactOptions) #|| (iszero(options.abstol) && iszero(options.reltol))
val == :x_converged && return xn1
is_approx_zero_f(M, state, options) && return xn1
#_is_f_approx_0(fxn1, xn1, options.abstol, options.reltol) && return xn1
else
if val == :x_converged
# The XExact case isn't always spelled out in the type, so
# we replicate a bit here
δ, ϵ = options.abstol, options.reltol
iszero(δ) && iszero(ϵ) && return xn1
is_approx_zero_f(M, state, options, true) && return xn1
elseif val == :not_converged
# this is the case where runaway can happen
## XXX Need a good heuristic to catch that
is_approx_zero_f(M, state, options, :relaxed) &&
is_small_Δx(M, state, options) &&
return xn1
end
end
nan(T) * xn1
end
# assumes stopped = :x_converged
function decide_convergence(
::AbstractBracketingMethod,
F,
state::AbstractUnivariateZeroState,
options,
val,
)
a, b = state.xn0, state.xn1
fa, fb = state.fxn0, state.fxn1
iszero(fa) && return a
iszero(fb) && return b
isnan(fa) && return a
isnan(fb) && return b
# get as close as possible with one extra function call when closeness
# is requested
if b == nextfloat(nextfloat(float(a)))
c = nextfloat(float(a))
fc = first(F(c))
m = minimum(abs, (fa, fb, fc))
abs(fc) == m && return c
abs(fa) == m && return a
return b
end
abs(fa) < abs(fb) ? a : b
end
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 16615 | """
find_zero(f, x0, M, [N::AbstractBracketingMethod], [p=nothing]; kwargs...)
Interface to one of several methods for finding zeros of a univariate function, e.g. solving ``f(x)=0``.
# Arguments
## Positional arguments
* `f`: the function (univariate or `f(x,p)` with `p` holding parameters)
* `x0`: the initial condition (a value, initial values, or bracketing interval)
* `M`: some `AbstractUnivariateZeroMethod` specifying the solver
* `N`: some bracketing method, when specified creates a hybrid method
* `p`: for specifying a parameter to `f`. Also can be a keyword, but a positional argument is helpful with broadcasting.
## Keyword arguments
* `xatol`, `xrtol`: absolute and relative tolerance to decide if `xₙ₊₁ ≈ xₙ`
* `atol`, `rtol`: absolute and relative tolerance to decide if `f(xₙ) ≈ 0`
* `maxiters`: specify the maximum number of iterations the algorithm can take.
* `verbose::Bool`: specifies if details about algorithm should be shown
* `tracks`: allows specification of `Tracks` objects
# Extended help
# Initial starting value
For most methods, `x0` is a scalar value indicating the initial value
in the iterative procedure. (Secant methods can have a tuple specify
their initial values.) Values must be a subtype of `Number` and have
methods for `float`, `real`, and `oneunit` defined.
For bracketing intervals, `x0` is specified using a tuple, a vector,
or any iterable with `extrema` defined. A bracketing interval,
``[a,b]``, is one where ``f(a)`` and ``f(b)`` have different signs.
# Return value
If the algorithm succeeds, the approximate root identified is
returned. A `ConvergenceFailed` error is thrown if the algorithm
fails. The alternate form `solve(ZeroProblem(f,x0), M)` returns `NaN`
in case of failure.
# Specifying a method
A method is specified to indicate which algorithm to employ:
* There are methods where a bracket is specified: [`Bisection`](@ref),
[`A42`](@ref), [`AlefeldPotraShi`](@ref), [`Roots.Brent`](@ref),
among others. Bisection is the default for basic floating point
types, but `A42` generally requires far fewer iterations.
* There are several derivative-free methods: cf. [`Order0`](@ref),
[`Order1`](@ref) (also [`Roots.Secant`](@ref)), [`Order2`](@ref)
(also [`Steffensen`](@ref)), [`Order5`](@ref),
[`Order8`](@ref), and [`Order16`](@ref), where the number indicates
the order of the convergence.
* There are some classical methods where derivatives need specification:
[`Roots.Newton`](@ref), [`Roots.Halley`](@ref),
[`Roots.Schroder`](@ref), among others.
* Methods intended for problems with multiplicities include [`Roots.Order1B`](@ref),
[`Roots.Order2B`](@ref), and `Roots.ThukralXB` for different `X`s.
* The family [`Roots.LithBoonkkampIJzerman{S,D}`](@ref) ,for different
`S` and `D`, uses a linear multistep method root finder. The `(2,0)`
method is the secant method, `(1,1)` is Newton's method.
For more detail, see the help page for each method (e.g.,
`?Order1`). Non-exported methods must be qualified with the module name,
as in `?Roots.Schroder`.
If no method is specified, the default method depends on `x0`:
* If `x0` is a scalar, the default is the more robust `Order0` method.
* If `x0` is a tuple, vector, or iterable with `extrema` defined
indicating a *bracketing* interval, then the `Bisection` method is
used for `Float64`, `Float32` or `Float16` types; otherwise the
`A42` method is used.
The default methods are chosen to be robust; they may not be as efficient as some others.
# Specifying the function
The function(s) are passed as the first argument.
For the few methods that use one or more derivatives (`Newton`,
`Halley`, `Schroder`, `LithBoonkkampIJzerman(S,D)`, etc.) a tuple of
functions is used. For the classical algorithms, a function returning
`(f(x), f(x)/f'(x), [f'(x)/f''(x)])` may be used.
# Optional arguments (tolerances, limit evaluations, tracing)
* `xatol` - absolute tolerance for `x` values.
* `xrtol` - relative tolerance for `x` values.
* `atol` - absolute tolerance for `f(x)` values.
* `rtol` - relative tolerance for `f(x)` values.
* `maxiters` - limit on maximum number of iterations.
* `strict` - if `false` (the default), when the algorithm stops, possible zeros are checked with a relaxed tolerance.
* `verbose` - if `true` a trace of the algorithm will be shown on successful completion. See the internal [`Roots.Tracks`](@ref) object to save this trace.
See the help string for `Roots.assess_convergence` for details on
convergence. See the help page for `Roots.default_tolerances(method)`
for details on the default tolerances.
In general, with floating point numbers, convergence must be
understood as not an absolute statement. Even if mathematically `α` is
an answer and `xstar` the floating point realization, it may be that
`f(xstar) - f(α) ≈ xstar ⋅ f'(α) ⋅ eps(α)`, so the role of tolerances must be
appreciated, and at times specified.
For the `Bisection` methods, convergence is guaranteed over `Float64`
values, so the tolerances are set to be ``0`` by default.
If a bracketing method is passed in after the method specification,
then whenever a bracket is identified during the algorithm, the method
will switch to the bracketing method to identify the zero. (Bracketing
methods are mathematically guaranteed to converge, non-bracketing
methods may or may not converge.) This is what `Order0` does by
default, with an initial secant method switching to the
`AlefeldPotraShi` method should a bracket be encountered.
Note: The order of the method is hinted at in the naming scheme. A
scheme is order `r` if, with `eᵢ = xᵢ - α`, `eᵢ₊₁ = C⋅eᵢʳ`. If the
error `eᵢ` is small enough, then essentially the error will gain `r`
times as many leading zeros each step. However, if the error is not
small, this will not be the case. Without good initial guesses, a high
order method may still converge slowly, if at all. The `OrderN`
methods have some heuristics employed to ensure a wider range for
convergence at the cost of not faithfully implementing the method,
though those are available through unexported methods.
# Examples:
Default methods.
```jldoctest find_zero
julia> using Roots
julia> find_zero(sin, 3) # use Order0()
3.141592653589793
julia> find_zero(sin, (3,4)) # use Bisection()
3.141592653589793
```
Specifying a method,
```jldoctest find_zero
julia> find_zero(sin, (3,4), Order1()) # can specify two starting points for secant method
3.141592653589793
julia> find_zero(sin, 3.0, Order2()) # Use Steffensen method
3.1415926535897936
julia> find_zero(sin, big(3.0), Order16()) # rapid convergence
3.141592653589793238462643383279502884197169399375105820974944592307816406286198
julia> find_zero(sin, (3, 4), A42()) # fewer function calls than Bisection(), in this case
3.141592653589793
julia> find_zero(sin, (3, 4), FalsePosition(8)) # 1 of 12 possible algorithms for false position
3.141592653589793
julia> find_zero((sin,cos), 3.0, Roots.Newton()) # use Newton's method
3.141592653589793
julia> find_zero((sin, cos, x->-sin(x)), 3.0, Roots.Halley()) # use Halley's method
3.141592653589793
```
Changing tolerances.
```jldoctest find_zero
julia> fn = x -> (2x*cos(x) + x^2 - 3)^10/(x^2 + 1);
julia> x0, xstar = 3.0, 2.9947567209477;
julia> fn(find_zero(fn, x0, Order2())) <= 1e-14 # f(xₙ) ≈ 0, but Δxₙ can be largish
true
julia> find_zero(fn, x0, Order2(), atol=0.0, rtol=0.0) # error: x_n ≉ x_{n-1}; just f(x_n) ≈ 0
ERROR: Roots.ConvergenceFailed("Algorithm failed to converge")
[...]
julia> fn = x -> (sin(x)*cos(x) - x^3 + 1)^9;
julia> x0, xstar = 1.0, 1.112243913023029;
julia> isapprox(find_zero(fn, x0, Order2()), xstar; atol=1e-4)
true
julia> find_zero(fn, x0, Order2(), maxiters=3) # need more steps to converge
ERROR: Roots.ConvergenceFailed("Algorithm failed to converge")
[...]
```
# Tracing
Passing `verbose=true` will show details on the steps of the algorithm.
The `tracks` argument allows
the passing of a [`Roots.Tracks`](@ref) object to record the values of `x` and `f(x)` used in
the algorithm.
!!! note
See [`solve!`](@ref) and [`ZeroProblem`](@ref) for an alternate interface.
"""
function find_zero(
f,
x0,
M::AbstractUnivariateZeroMethod,
p′=nothing;
p=nothing,
verbose=false,
tracks::AbstractTracks=NullTracks(),
kwargs...,
)
xstar = solve(
ZeroProblem(f, x0),
M,
p′ === nothing ? p : p′;
verbose=verbose,
tracks=tracks,
kwargs...,
)
isnan(xstar) && throw(ConvergenceFailed("Algorithm failed to converge"))
xstar
end
# defaults when method is not specified
# if a number, use Order0
# O/w use a bracketing method of an assumed iterable
find_zero_default_method(x0::Number) = Order0()
function find_zero_default_method(x0)
T = eltype(float.(_extrema(x0)))
T <: Union{Float16,Float32,Float64} ? Bisection() : A42()
end
find_zero(f, x0; kwargs...) = find_zero(f, x0, find_zero_default_method(x0); kwargs...)
find_zero(f, x0, p; kwargs...) =
find_zero(f, x0, find_zero_default_method(x0), p; kwargs...)
## ---------------
## Create an Iterator interface
# returns NaN, not an error, if there are issues
"""
ZeroProblem{F,X}
A container for a function and initial guess to be used with `solve`.
"""
struct ZeroProblem{F,X}
F::F
x₀::X
end
Base.broadcastable(p::ZeroProblem) = Ref(p)
# possible unicode operator to lighten `solve(ZeroProblem(f,x), M)` at `solve(f ≀ x, M)`
f ≀ x = ZeroProblem(f, x) # \wr[tab]
## --------------------------------------------------
## The actual iterating object
struct ZeroProblemIterator{M,N,F,S,O,L}
M::M
N::N
F::F
state::S
options::O
logger::L
end
Base.show(io::IO, Z::ZeroProblemIterator) =
print(io, "A problem object to pass to `solve!`")
## Initialize a Zero Problem Iterator
## init(Z,p)
## init(Z,M,p)
## init(M,F,state, [options], [logger])
## p is both a keyword and positional
## positional allows broadcasting
## keyword more explicit
function init(
𝑭𝑿::ZeroProblem,
M::AbstractUnivariateZeroMethod,
p′=nothing;
p=nothing,
verbose::Bool=false,
tracks=NullTracks(),
kwargs...,
)
F = Callable_Function(M, 𝑭𝑿.F, something(p′, p, missing))
state = init_state(M, F, 𝑭𝑿.x₀)
options = init_options(M, state; kwargs...)
l = Tracks(verbose, tracks, state)
incfn(l, initial_fncalls(M))
ZeroProblemIterator(M, nothing, F, state, options, l)
end
function init(𝑭𝑿::ZeroProblem, p′=nothing; kwargs...)
M = length(𝑭𝑿.x₀) == 1 ? Order0() : AlefeldPotraShi()
init(𝑭𝑿, M, p′; kwargs...)
end
function init(
M::AbstractUnivariateZeroMethod,
F,
state::AbstractUnivariateZeroState,
options::AbstractUnivariateZeroOptions=init_options(M, state),
l::AbstractTracks=NullTracks(),
)
ZeroProblemIterator(M, Nothing, Callable_Function(M, F), state, options, l)
end
"""
solve!(P::ZeroProblemIterator)
solve(fx::ZeroProblem, [M], [N]; p=nothing, kwargs...)
init(fx::ZeroProblem, [M], [N];
p=nothing,
verbose=false, tracks=NullTracks(), kwargs...)
Solve for the zero of a scalar-valued univariate function specified through `ZeroProblem` or
`ZeroProblemIterator` using the `CommonSolve` interface.
The defaults for `M` and `N` depend on the `ZeroProblem`: if `x0` is a
number, then `M=Secant()` and `N=AlefeldPotraShi()` is used (`Order0`); if `x0`
has `2` (or more values) then it is assumed to be a bracketing
interval and `M=AlefeldPotraShi()` is used.
The methods involved with this interface are:
* `ZeroProblem`: used to specify a problem with a function (or functions) and an initial guess
* `solve`: to solve for a zero in a `ZeroProblem`
The latter calls the following, which can be useful independently:
* `init`: to initialize an iterator with a method for solution, any
adjustments to the default tolerances, and a specification to log
the steps or not.
* `solve!` to iterate to convergence.
Returns `NaN`, not an error like `find_zero`, when the problem can not
be solved. Tested for zero allocations.
## Examples:
```jldoctest find_zero
julia> using Roots
julia> fx = ZeroProblem(sin, 3)
ZeroProblem{typeof(sin), Int64}(sin, 3)
julia> solve(fx)
3.141592653589793
```
Or, if the iterable is required
```jldoctest find_zero
julia> problem = init(fx);
julia> solve!(problem)
3.141592653589793
```
keyword arguments can be used to adjust the default tolerances.
```jldoctest find_zero
julia> solve(fx, Order5(); atol=1/100)
3.1425464815525403
```
The above is equivalent to:
```jldoctest find_zero
julia> problem = init(fx, Order5(), atol=1/100);
julia> solve!(problem)
3.1425464815525403
```
The keyword argument `p` may be used if the function(s) to be solved
depend on a parameter in their second positional argument (e.g.,
`f(x, p)`). For example
```jldoctest find_zero
julia> f(x,p) = exp(-x) - p # to solve p = exp(-x)
f (generic function with 1 method)
julia> fx = ZeroProblem(f, 1)
ZeroProblem{typeof(f), Int64}(f, 1)
julia> solve(fx; p=1/2) # log(2)
0.6931471805599453
```
This would be recommended, as there is no recompilation due to the function changing.
For use with broadcasting, `p` may also be the last positional argument.
The argument `verbose=true` for `init` instructs that steps to be logged;
The iterator interface allows for the creation of hybrid solutions,
such as is used when two methods are passed to `solve`.
For example, this is essentially how the hybrid default is constructed:
```jldoctest find_zero
julia> function order0(f, x)
fx = ZeroProblem(f, x)
p = init(fx, Roots.Secant())
xᵢ,st = ϕ = iterate(p)
while ϕ !== nothing
xᵢ, st = ϕ
state, ctr = st
fᵢ₋₁, fᵢ = state.fxn0, state.fxn1
if sign(fᵢ₋₁)*sign(fᵢ) < 0 # check for bracket
x0 = (state.xn0, state.xn1)
fx′ = ZeroProblem(f, x0)
p = init(fx′, Bisection())
xᵢ = solve!(p)
break
end
ϕ = iterate(p, st)
end
xᵢ
end
order0 (generic function with 1 method)
julia> order0(sin, 3)
3.141592653589793
```
"""
function solve!(P::ZeroProblemIterator; verbose=false)
M, F, state, options, l = P.M, P.F, P.state, P.options, P.logger
val, stopped = :not_converged, false
ctr = 1
log_step(l, M, state; init=true)
while !stopped
val, stopped = assess_convergence(M, state, options)
stopped && break
ctr > options.maxiters && break
state, stopped = update_state(M, F, state, options, l)
log_step(l, M, state)
ctr += 1
end
val, stopped = assess_convergence(M, state, options) # update val flag
α = decide_convergence(M, F, state, options, val)
log_convergence(l, val)
log_method(l, M)
log_last(l, α)
verbose && display(l)
α
end
# thread verbose through
"""
solve(fx::ZeroProblem, args...; verbose=false, kwargs...)
Disptaches to `solve!(init(fx, args...; kwargs...))`. See [`solve!`](@ref) for details.
"""
function solve(𝑭𝑿::ZeroProblem, args...; verbose=false, kwargs...)
Z = init(𝑭𝑿, args...; verbose=verbose, kwargs...)
solve!(Z; verbose=verbose)
end
# avoid splatting (issue #323, caused allocations)
function solve(
𝑭𝑿::ZeroProblem,
M::AbstractUnivariateZeroMethod,
p=nothing;
verbose=false,
kwargs...,
)
Z = init(𝑭𝑿, M, p; verbose=verbose, kwargs...)
solve!(Z; verbose=verbose)
end
# Optional iteration interface to handle looping
# * returns xₙ or (aₙ, bₙ) depending
# * throws error on non-convergence
function Base.iterate(P::ZeroProblemIterator, st=nothing)
## st = (val, (state, ctr, flag, stopped))
M, F, options, l = P.M, P.F, P.options, P.logger
if st === nothing
state = P.state
ctr, flag, stopped = 1, :not_converged, false
log_method(l, M)
log_step(l, M, state; init=true)
else
state, ctr, flag, stopped = st
ctr += 1
end
stopped && return nothing
ctr > options.maxiters && return nothing
state, stopped = update_state(M, F, state, options, l)
log_step(l, M, state)
flag, stopped = assess_convergence(M, state, options)
if stopped
α = decide_convergence(M, F, state, options, flag)
log_last(l, α)
isnan(α) && throw(ConvergenceFailed("Algorithm did not converge."))
end
return (last(state, M), (state, ctr, flag, stopped))
end
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 13477 | # Attempt to find all zeros in an interval (a,b)
# Algorithm due to @djsegal in https://github.com/JuliaMath/Roots.jl/pull/113
# A naive approach to find zeros: split (a,b) by n points, look into each for a zero
# * k is oversampling rate for bisection. (It is relatively cheap to check for a bracket so we
# oversample our intervals looking for brackets
# * assumes f(a) *not* a zero
function _fz(f, a, b, no_pts, k=4)
zs = Real[]
_fz!(zs, f, a, b, no_pts, k)
zs
end
function _fz!(zs, f, a::T, b, no_pts, k=4) where {T}
pts = range(a, stop=b, length=(no_pts - 1) * k + 1)
n::Int = length(pts)
fs = f.(pts)
sfs = sign.(fs)
u::T = first(pts) # keep track of bigger interval
found_bisection_zero = false
for (i, x) in enumerate(pts[1:end])
q, r = divrem(i - 1, k)
if i > 1 && iszero(r)
v::T = x
if !found_bisection_zero
try
p1::T = identify_starting_point(u, v, sfs[(i - k):i])
rt::T = dfree(f, p1)
if !isnan(rt) && u < rt <= v
push!(zs, rt)
end
catch err
end
end
u = v
found_bisection_zero = false
end
if i < n
if iszero(fs[i + 1])
found_bisection_zero = true # kinda
push!(zs, pts[i + 1])
elseif sfs[i] * sfs[i + 1] < 0
found_bisection_zero = true
rt = bisection(f, x, pts[i + 1])
push!(zs, rt)
end
end
end
sort!(zs)
end
# the algorithm first scans for zeros using the naive approach, then
# splits (a,b) by these zeros. This struct holds the subintervals
struct Interval{T}
a::T
b::T
depth::Int
end
Base.show(io::IO, alpha::Interval) = print(io, "($(alpha.a), $(alpha.b))")
# check if f(a) is non zero using tolerances max(atol, eps()), rtol
function _non_zero(fa, a::T, atol, rtol) where {T}
abs(fa) >= max(atol, abs(a) * rtol * oneunit(fa) / oneunit(a), oneunit(fa) * eps(T))
end
# After splitting by zeros we have intervals (zm, zn) this is used to shrink
# to (zm+, zn-) where both are non-zeros, as defined above
function find_non_zero(f, a::T, barrier, xatol, xrtol, atol, rtol) where {T}
nan = (0 * a) / (0 * a) # try to get typed NaN
xtol = max(xatol, abs(a) * xrtol, oneunit(a) * eps(T))
sgn = barrier > a ? 1 : -1
ctr = 0
x = a + 2^ctr * sgn * xtol
while !_non_zero(f(x), x, atol, rtol)
ctr += 1
x += 2^ctr * sgn * xtol
((sgn > 0 && x > barrier) || (sgn < 0 && x < barrier)) && return nan
ctr > 100 && return nan
end
x
end
# split a < z1 < z2 < ... < zn < b into intervals (a+,z1-), (z1+, z2-), ...
# where possible; push! onto ints
function make_intervals!(ints, f, a, b, zs, depth, xatol, xrtol, atol, rtol)
pts = vcat(a, zs, b)
for (u, v) in zip(pts[1:(end - 1)], pts[2:end])
ur = find_non_zero(f, u, v, xatol, xrtol, atol, rtol)
isnan(ur) && continue
vl = find_non_zero(f, v, u, xatol, xrtol, atol, rtol)
isnan(vl) && continue
push!(ints, Interval(ur, vl, depth))
end
end
# adjust what we mean by x1 ~ x2 for purposes of adding a new zero
function approx_close(z1, z2, xatol, xrtol)
z₁, z₂, δ, ϵ = _unitless.((z1, z2, xatol, xrtol))
return isapprox(z₁, z₂; atol=sqrt(δ), rtol=sqrt(ϵ))
end
# is proposed not near xs? (false and we add proposed)
function not_near(proposed, xs, xatol, xrtol)
n = length(xs)
n <= 1 && return true
ind = n + 1
for (i, rt) in enumerate(xs)
if proposed < rt
ind = i
break
end
end
if ind > 1 # xs[ind-1] <= propose < xs[ind]
rt = xs[ind - 1]
approx_close(proposed, rt, xatol, xrtol) && return false
end
if ind <= n # value to right
rt = xs[ind]
approx_close(proposed, rt, xatol, xrtol) && return false
end
return true
end
"""
find_zeros(f, a, [b]; [no_pts=12, k=8, naive=false, xatol, xrtol, atol, rtol])
Search for zeros of `f` in the interval `[a,b]` with an heuristic
algorithm.
* `f`: a function or callable object
* `a`, `b`: If `b` is specified, the interval ``[a,b]`` is used. If only `a` is specified, it is passed to `extrema` to define the interval to search over. It is assumed that neither endpoint is a zero.
Returns a vector of zeros in sorted order, possibly empty.
# Extended help
# Examples
```jldoctest find_zeros
julia> using Roots
julia> find_zeros(x -> exp(x) - x^4, -5, 20) # a few well-spaced zeros
3-element Vector{Float64}:
-0.8155534188089606
1.4296118247255556
8.613169456441398
julia> find_zeros(x -> sin(x^2) + cos(x)^2, 0, 2pi) # many zeros
12-element Vector{Float64}:
1.78518032659534
2.391345462376604
3.2852368649448853
3.3625557095737544
4.016412952618305
4.325091924521049
4.68952781386834
5.00494459113514
5.35145266881871
5.552319796014526
5.974560835055425
6.039177477770888
julia> find_zeros(x -> cos(x) + cos(2x), (0, 4pi)) # mix of simple, non-simple zeros
6-element Vector{Float64}:
1.0471975511965976
3.141592653589943
5.235987755982988
7.330382858376184
9.424777960769228
11.519173063162574
julia> f(x) = (x-0.5) * (x-0.5001) * (x-1) # nearby zeros
f (generic function with 1 method)
julia> find_zeros(f, 0, 2)
3-element Vector{Float64}:
0.5
0.5001
1.0
julia> f(x) = (x-0.5) * (x-0.5001) * (x-4) * (x-4.001) * (x-4.2)
f (generic function with 1 method)
julia> find_zeros(f, 0, 10)
3-element Vector{Float64}:
0.5
0.5001
4.2
julia> f(x) = (x-0.5)^2 * (x-0.5001)^3 * (x-4) * (x-4.001) * (x-4.2)^2 # hard to identify
f (generic function with 1 method)
julia> find_zeros(f, 0, 10, no_pts=21) # too hard for default
5-element Vector{Float64}:
0.49999999999999994
0.5001
4.0
4.001
4.200000000000001
```
!!! note
Some cases where the number of zeros may be underreported:
* if the initial interval, `(a,b)`, is too wide
* if there are zeros that are very nearby
* the function is flat, e.g., `x->0`.
----
The basic algorithm checks for zeros among the endpoints, and then
divides the interval `(a,b)` into `no_pts-1` subintervals and then
proceeds to look for zeros through bisection or a derivative-free
method. As checking for a bracketing interval is relatively cheap and
bisection is guaranteed to converge, each interval has `k` pairs of
intermediate points checked for a bracket.
If any zeros are found, the algorithm uses these to partition `(a,b)`
into subintervals. Each subinterval is shrunk so that the endpoints
are not zeros and the process is repeated on the subinterval. If the
initial interval is too large, then the naive scanning for zeros may
be fruitless and no zeros will be reported. If there are nearby zeros,
the shrinking of the interval may jump over them, though as seen in
the examples, nearby roots can be identified correctly, though for
really nearby points, or very flat functions, it may help to increase
`no_pts`.
The tolerances are used to shrink the intervals, but not to find zeros
within a search. For searches, bisection is guaranteed to converge
with no specified tolerance. For the derivative free search, a
modification of the `Order0` method is used, which at worst case
compares `|f(x)| <= 8*eps(x)` to identify a zero. The algorithm might
identify more than one value for a zero, due to floating point
approximations. If a potential pair of zeros satisfy
`isapprox(a,b,atol=sqrt(xatol), rtol=sqrt(xrtol))` then they are
consolidated.
The algorithm can make many function calls. When zeros are found in an
interval, the naive search is carried out on each subinterval. To cut
down on function calls, though with some increased chance of missing
some zeros, the adaptive nature can be skipped with the argument
`naive=true` or the number of points stepped down.
The algorithm is derived from one in a
[PR](https://github.com/JuliaMath/Roots.jl/pull/113) by @djsegal.
!!! note
The `IntervalRootFinding` package provides a rigorous alternative to this heuristic one.
That package uses interval arithmetic, so can compute bounds on the size of the image of
an interval under `f`. If this image includes `0`, then it can look for the zero.
Bisection, on the other hand, only will look for a zero if the two endpoints have different signs,
a much more rigid condition for a potential zero.
!!! note "`IntervalRootFinding` extension"
As of version `1.9` an extension is provided so that when the `IntervalRootFinding` package is loaded,
the `find_zeros` function will call `IntervalRootFinding.roots` to find the isolating brackets and
`find_zero` to find the roots, when possible, **if** the interval is specified as an `Interval` object,
as created by `-1..1`, say.
For example, this function (due to `@truculentmath`) is particularly tricky, as it is positive at every floating point number, but has two zeros (the vertical asymptote at `15//11` is only negative within adjacent floating point values):
```
julia> using IntervalArithmetic, IntervalRootFinding, Roots
julia> g(x) = x^2 + 1 +log(abs( 11*x-15 ))/99
g (generic function with 1 method)
julia> find_zeros(g, -3, 3)
Float64[]
julia> IntervalRootFinding.roots(g, -3..3, IntervalRootFinding.Bisection)
1-element Vector{Root{Interval{Float64}}}:
Root([1.36363, 1.36364], :unknown)
```
A less extreme usage might be the following, where `unique` indicates Bisection could be useful and indeed `find_zeros` will identify these values:
```
julia> g(x) = exp(x) - x^5
g (generic function with 1 method)
julia> rts = IntervalRootFinding.roots(g, -20..20)
2-element Vector{Root{Interval{Float64}}}:
Root([12.7132, 12.7133], :unique)
Root([1.29585, 1.29586], :unique)
julia> find_zeros(g, -20, 20)
2-element Vector{Float64}:
1.2958555090953687
12.713206788867632
```
"""
function find_zeros(f, a, b=nothing; no_pts=12, k=8, naive=false, kwargs...)
if b === nothing
a0, b0 = map(float, _extrema(a))
else
a0, b0 = promote(float(a), float(b))
end
a0 = isinf(a0) ? nextfloat(a0) : a0
b0 = isinf(b0) ? prevfloat(b0) : b0
# set tolerances if not specified
fa0, fb0 = promote(float(f(a0)), float(f(b0)))
d = Dict(kwargs)
T, S = real(eltype(a0)), real(eltype(fa0))
xatol::T = get(d, :xatol, eps(one(T))^(4 / 5) * oneunit(T))
xrtol = get(d, :xrtol, eps(one(T)) * one(T))
atol::S = get(d, :atol, eps(float(S)) * oneunit(S))
rtol = get(d, :rtol, eps(float(S)) * one(S))
zs = T[] # collect zeros
# check endpoints for exact zeros, then narrow
abs(fa0) * oneunit(T) / oneunit(S) <= 8 * eps(a0) && push!(zs, a0)
abs(fb0) * oneunit(T) / oneunit(S) <= 8 * eps(b0) && push!(zs, b0)
a0 = find_non_zero(f, a0, b0, xatol, xrtol, atol, rtol)
b0 = find_non_zero(f, b0, a0, xatol, xrtol, atol, rtol)
(isnan(a0) || isnan(b0)) && throw(DomainError("no non-zero initial points found."))
_fz!(zs, f, a0, b0, no_pts, k) # initial zeros
ints = Interval{T}[] # collect subintervals
!naive &&
!isempty(zs) &&
make_intervals!(ints, f, a0, b0, zs, 1, xatol, xrtol, atol, rtol)
nzs = T[]
cnt = 0
while !naive && !isempty(ints)
cnt += 1
i = pop!(ints)
# this is fussy. Ideally, we would want to explore intervals
# with fewer points (each interval is already probed for k=4
# bisections) but how many fewer? We already had ~ (i.b-i.a) *
# no_pts / (b-a) points. Would want an increased density so n
# > (i.b - i.a) / (b - a) * no_pts but this doesn't perform as
# well as we might expect
# sub_no_pts = ceil(Int, (i.b - i.a) / (b-a) * no_pts * 2^(i.depth))
#sub_no_pts <= 2 && continue # stop on depth, always divide if roots
#sub_no_pts = max(3, floor(Int, no_pts / (2.0)^(i.depth)))
sub_no_pts = floor(Int, no_pts / (2.0)^(i.depth))
empty!(nzs)
if sub_no_pts >= 2
_fz!(nzs, f, i.a, i.b, sub_no_pts, k)
end
if !isempty(nzs)
azs = filter(rt -> not_near(rt, zs, xatol, xrtol), nzs) # trim out nearby roots
length(azs) == 0 && continue
append!(zs, azs)
sort!(zs)
i.depth > 4 && continue
make_intervals!(ints, f, i.a, i.b, azs, i.depth + 1, xatol, xrtol, atol, rtol)
end
end
length(zs) <= 1 && return zs
sort!(zs)
# may identify same zero with nearby values along the way
# this trims out with a relaxed tolerance on how close
# nearby roots can be. Default is epsilon^(2/5) ≈ 5e-7
inds = Int[1]
z1 = first(zs)
for i in 2:length(zs)
z2 = zs[i]
if !approx_close(z1, z2, xatol, xrtol)
push!(inds, i)
end
z1 = z2
end
return zs[inds]
end
# solve interface
"""
AllZeros
Type to indicate to `solve` that `find_zeros` should be used to solve the given `ZeroProblem`.
## Example
```
julia> Z = ZeroProblem(cos, (0, 2pi));
julia> solve(Z, AllZeros())
2-element Vector{Float64}:
1.5707963267948966
4.71238898038469
```
"""
struct AllZeros <: AbstractUnivariateZeroMethod end
function solve(𝑭𝑿::ZeroProblem, ::AllZeros; kwargs...)
F, x₀ = 𝑭𝑿.F, 𝑭𝑿.x₀
find_zeros(F, x₀; kwargs...)
end
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 4527 | ## Wrapper for functions to abstract out f, (f,f'), ...
# indicate if we expect f() to return one or multiple values (e.g. Newton)
fn_argout(::AbstractUnivariateZeroMethod) = 1
# A hacky means to call a function so that parameters can be passed as desired
# and the correct number of outputs are computed
struct Callable_Function{Single,Tup,F,P}
f::F
p::P
end
function Callable_Function(M::AbstractUnivariateZeroMethod, f, p=nothing)
Single = Val{fn_argout(M)}
Tup = Val{isa(f, Tuple)}
F = typeof(f)
p′ = ismissing(p) ? nothing : p
P′ = typeof(p′)
Callable_Function{Single,Tup,F,P′}(f, p′)
end
function Callable_Function(M::AbstractUnivariateZeroMethod, F::Callable_Function, p=F.p)
Callable_Function(M, F.f, p)
end
# return f(x); (f(x), f(x)/f'(x)); *or* f(x), (f(x)/f'(x), f'(x)/f''(x), ...) # so N=1, 2 are special cased
# Callable_Function(output_arity, input_arity, F, p)
# First handle: x -> (f,f/f', f'/f'', ...)
(F::Callable_Function{Val{1},Val{false},𝑭,Nothing})(x) where {𝑭} = first(F.f(x))
(F::Callable_Function{Val{1},Val{false},𝑭,P})(x) where {𝑭,P} = first(F.f(x, F.p))
(F::Callable_Function{Val{2},Val{false},𝑭,Nothing})(x) where {𝑭} = F.f(x)[1:2]
(F::Callable_Function{Val{2},Val{false},𝑭,P})(x) where {𝑭,P} = F.f(x, F.p)[1:2]
# N ≥ 3 returns (f, (...))
function (F::Callable_Function{Val{N},Val{false},𝑭,Nothing})(x) where {N,𝑭}
fs = F.f(x)
fs[1], ntuple(i -> fs[i + 1], Val(N - 1))
end
function (F::Callable_Function{Val{N},Val{false},𝑭,P})(x) where {N,𝑭,P}
fs = F.f(x, F.p)
fs[1], ntuple(i -> fs[i + 1], Val(N - 1))
end
## f is specified as a tuple (f,f',f'', ...)
## N =1 return f(x)
(F::Callable_Function{Val{1},Val{true},𝑭,Nothing})(x) where {𝑭} = first(F.f)(x)
(F::Callable_Function{Val{1},Val{true},𝑭,P})(x) where {𝑭,P} = first(F.f)(x, F.p)
## N=2 return (f(x), f(x)/f'(x))
function (F::Callable_Function{Val{2},Val{true},𝑭,Nothing})(x) where {𝑭}
f, f′ = (F.f[1])(x), (F.f[2])(x)
f, f / f′
end
function (F::Callable_Function{Val{2},Val{true},𝑭,P})(x) where {𝑭,P}
f, f′ = (F.f[1])(x, F.p), (F.f[2])(x, F.p)
f, f / f′
end
## For N ≥ 3 we return (f, (f/f', f'/f'', ...);
## Pay no attention to this code; we hand write a bunch, as the
## general formula later runs more slowly.
function (F::Callable_Function{Val{3},Val{true},𝑭,Nothing})(x) where {𝑭}
f, f′, f′′ = (F.f[1])(x), (F.f[2])(x), (F.f[3])(x)
f, (f / f′, f′ / f′′)
end
function (F::Callable_Function{Val{3},Val{true},𝑭,P})(x) where {𝑭,P}
f, f′, f′′ = (F.f[1])(x, F.p), (F.f[2])(x, F.p), (F.f[3])(x, F.p)
f, (f / f′, f′ / f′′)
end
function (F::Callable_Function{Val{4},Val{true},𝑭,Nothing})(x) where {𝑭}
f, f′, f′′, f′′′ = (F.f[1])(x), (F.f[2])(x), (F.f[3])(x), (F.f[4])(x)
f, (f / f′, f′ / f′′, f′′ / f′′′)
end
function (F::Callable_Function{Val{4},Val{true},𝑭,P})(x) where {𝑭,P}
f, f′, f′′, f′′′ =
(F.f[1])(x, F.p), (F.f[2])(x, F.p), (F.f[3])(x, F.p), (F.f[4])(x, F.p)
𝐓 = eltype(f / f′)
f, NTuple{3,𝐓}((f / f′, f′ / f′′, f′′ / f′′′))
end
function (F::Callable_Function{Val{5},Val{true},𝑭,Nothing})(x) where {𝑭}
f, f′, f′′, f′′′, f′′′′ =
(F.f[1])(x), (F.f[2])(x), (F.f[3])(x), (F.f[4])(x), (F.f[5])(x)
f, (f / f′, f′ / f′′, f′′ / f′′′, f′′′ / f′′′′)
end
function (F::Callable_Function{Val{5},Val{true},𝑭,P})(x) where {𝑭,P}
f, f′, f′′, f′′′, f′′′′ = (F.f[1])(x, F.p),
(F.f[2])(x, F.p),
(F.f[3])(x, F.p),
(F.f[4])(x, F.p),
(F.f[5])(x, F.p)
f, (f / f′, f′ / f′′, f′′ / f′′′, f′′′ / f′′′′)
end
function (F::Callable_Function{Val{6},Val{true},𝑭,Nothing})(x) where {𝑭}
f, f′, f′′, f′′′, f′′′′, f′′′′′ =
(F.f[1])(x), (F.f[2])(x), (F.f[3])(x), (F.f[4])(x), (F.f[5])(x), (F.f[6])(x)
f, (f / f′, f′ / f′′, f′′ / f′′′, f′′′ / f′′′′, f′′′′ / f′′′′′)
end
function (F::Callable_Function{Val{6},Val{true},𝑭,P})(x) where {𝑭,P}
f, f′, f′′, f′′′, f′′′′, f′′′′′ = (F.f[1])(x, F.p),
(F.f[2])(x, F.p),
(F.f[3])(x, F.p),
(F.f[4])(x, F.p),
(F.f[5])(x, F.p),
(F.f[6])(x, F.p)
f, (f / f′, f′ / f′′, f′′ / f′′′, f′′′ / f′′′′, f′′′′ / f′′′′′)
end
# faster with the above written out, should generate them...
function (F::Callable_Function{Val{𝐍},Val{true},𝑭,Nothing})(x) where {𝐍,𝑭}
fs = ntuple(i -> F.f[i](x), Val(𝐍))
first(fs), ntuple(i -> fs[i] / fs[i + 1], Val(𝐍 - 1))
end
function (F::Callable_Function{Val{𝐍},Val{true},𝑭,P})(x) where {𝐍,𝑭,P}
fs = ntuple(i -> F.f[i](x, F.p), Val(𝐍))
first(fs), ntuple(i -> fs[i] / fs[i + 1], Val(𝐍 - 1))
end
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 5197 | ## Init for hybrid method -- start with a non-bracketing, finish with bracketing
## When passing 2 methods, any parameters must be passed as a named argument through
## the keyword p
function init(
𝑭𝑿::ZeroProblem,
M::AbstractNonBracketingMethod,
N::AbstractBracketingMethod,
p′=nothing;
p=nothing,
verbose::Bool=false,
tracks=NullTracks(),
kwargs...,
)
F = Callable_Function(M, 𝑭𝑿.F, something(p′, p, missing))
state = init_state(M, F, 𝑭𝑿.x₀)
options = init_options(M, state; kwargs...)
l = Tracks(verbose, tracks, state)
incfn(l, initial_fncalls(M))
ZeroProblemIterator(M, N, F, state, options, l)
end
# Robust version using some tricks: idea from algorithm described in
# [The SOLVE button from the
# HP-34]C(http://www.hpl.hp.com/hpjournal/pdfs/IssuePDFs/1979-12.pdf).
# * use bracketing method if one is identified
# * limit steps so as not too far or too near the previous one
# * if not decreasing, use a quad step upto 4 times to bounce out of trap, if possible
# First uses M, then N if bracket is identified
function solve!(
𝐙::ZeroProblemIterator{𝐌,𝐍};
verbose=false,
) where {𝐌,𝐍<:AbstractBracketingMethod}
M, N, F, state, options, l = 𝐙.M, 𝐙.N, 𝐙.F, 𝐙.state, 𝐙.options, 𝐙.logger
incfn(l, 2)
log_step(l, M, state; init=true)
log_method(l, M)
log_nmethod(l, N)
quad_ctr = 0
flag = :not_converged
ctr = 0
α = nan(typeof(state.xn1)) * state.xn1
while true
ctr += 1
flag, converged = assess_convergence(M, state, options)
converged && break
ctr >= options.maxiters && break
state0 = state
state0, stopped = update_state(M, F, state0, options) # state0 is proposed step
## did we find a zero or a bracketing interval?
if iszero(state0.fxn1)
state = state0
break
elseif sign(state0.fxn0) * sign(state0.fxn1) < 0
log_step(l, M, state0)
!isa(l, NullTracks) && log_message(
l,
"Used bracketing method $N on [$(min(state0.xn0,state0.xn1)),$(max(state0.xn0,state0.xn1))]",
)
Fₙ = Callable_Function(N, F)
stateₙ = init_state(N, state0, Fₙ) # save function calls by using state0 values
optionsₙ = init_options(N, stateₙ)
α = solve!(init(N, Fₙ, stateₙ, optionsₙ, l))
log_method(l, M)
verbose && display(l)
return α
end
## did we move too far?
adj = false
r, a, b = state0.xn1, state.xn0, state.xn1
Δr = abs(r - b)
Δx = abs(b - a)
ts, TB = one(r) / 1000, 100 * one(r) # too small, too big
if Δr >= TB * Δx
adj = true
r = b + sign(r - b) * TB * Δx ## too big
elseif Δr <= ts * Δx
adj = true
r = b + sign(r - b) * ts * Δx
end
@reset state0.xn1 = r
@reset state0.fxn1 = first(F(r))
incfn(l)
# a sign change after shortening?
if sign(state.fxn1) * sign(state0.fxn1) < 0
log_step(l, M, state)
a, b = state.xn1, state0.xn1
fa, fb = state.fxn1, state0.fxn1
!isa(l, NullTracks) && log_message(l, "Used bracketing method $N on [$a,$b]")
Fₙ = Callable_Function(N, F)
stateₙ = init_state(N, Fₙ, a, b, fa, fb)
optionsₙ = init_options(N, stateₙ)
α = solve!(init(N, Fₙ, stateₙ, optionsₙ, l))
log_method(l, M)
verbose && display(l)
return α
end
## did we improve?
if adj || abs(state0.fxn1) < abs(state.fxn1)
if isnan(state0.xn1) ||
isnan(state0.fxn1) ||
isinf(state0.xn1) ||
isinf(state0.fxn1)
break
end
state = state0
log_step(l, M, state)
quad_ctr = 0
continue
end
## try quad_vertex, unless that has gotten old
if quad_ctr > 4
state = state0
break
else
quad_ctr += 1
r = quad_vertex(
state0.xn1,
state0.fxn1,
state.xn1,
state.fxn1,
state.xn0,
state.fxn0,
)
if isnan(r) || isinf(r)
state = state0
else
fr = F(r)
incfn(l)
@reset state0.xn1 = r
@reset state0.fxn1 = fr
state = state0
end
end
log_step(l, M, state)
end
val, stopped = assess_convergence(M, state, options)
α = decide_convergence(M, F, state, options, val)
log_convergence(l, val)
log_last(l, α)
verbose && display(l)
isnan(α) ? decide_convergence(M, F, state, options, flag) : α
end
function find_zero(
fs,
x0,
M::AbstractUnivariateZeroMethod,
N::AbstractBracketingMethod,
p′=nothing;
verbose=false,
kwargs...,
)
𝐏 = ZeroProblem(fs, x0)
solve!(init(𝐏, M, N, p′; verbose=verbose, kwargs...), verbose=verbose)
end
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 17049 | # some simpler (and faster) implementations for root finding
#
# Not exported
#
# These avoid the setup costs of the `find_zero` method, so should be faster
# though they will take similar number of function calls.
#
# `Roots.bisection(f, a, b)` (Bisection).
# `Roots.secant_method(f, xs)` (Order1) secant method
# `Roots.dfree(f, xs)` (Order0) more robust secant method
#
## Bisection
##
## Essentially from Jason Merrill https://gist.github.com/jwmerrill/9012954
## cf. http://squishythinking.com/2014/02/22/bisecting-floats/
## This also borrows a trick from https://discourse.julialang.org/t/simple-and-fast-bisection/14886
## where we keep x1 so that y1 is negative, and x2 so that y2 is positive
## this allows the use of signbit over y1*y2 < 0 which avoid < and a multiplication
## this has a small, but noticeable impact on performance.
"""
bisection(f, a, b; [xatol, xrtol])
Performs bisection method to find a zero of a continuous
function.
It is assumed that `(a,b)` is a bracket, that is, the function has
different signs at `a` and `b`. The interval `(a,b)` is converted to floating point
and shrunk when `a` or `b` is infinite. The function `f` may be infinite for
the typical case. If `f` is not continuous, the algorithm may find
jumping points over the x axis, not just zeros.
If non-trivial tolerances are specified, the process will terminate
when the bracket `(a,b)` satisfies `isapprox(a, b, atol=xatol,
rtol=xrtol)`. For zero tolerances, the default, for `Float64`, `Float32`,
or `Float16` values, the process will terminate at a value `x` with
`f(x)=0` or `f(x)*f(prevfloat(x)) < 0 ` or `f(x) * f(nextfloat(x)) <
0`. For other number types, the `Roots.A42` method is used.
"""
function bisection(f, a::Number, b::Number; xatol=nothing, xrtol=nothing)
x1, x2 = adjust_bracket(float.((a, b)))
T = eltype(x1)
atol = xatol === nothing ? zero(T) : abs(xatol)
rtol = xrtol === nothing ? zero(one(T)) : abs(xrtol)
CT = iszero(atol) && iszero(rtol) ? Val(:exact) : Val(:inexact)
x1, x2 = float(x1), float(x2)
y1, y2 = f(x1), f(x2)
_unitless(y1 * y2) >= 0 && error("the interval provided does not bracket a root")
if isneg(y2)
x1, x2, y1, y2 = x2, x1, y2, y1
end
xm = Roots._middle(x1, x2) # for possibly mixed sign x1, x2
ym = f(xm)
while true
if has_converged(CT, x1, x2, xm, ym, atol, rtol)
return xm
end
if isneg(ym)
x1, y1 = xm, ym
else
x2, y2 = xm, ym
end
xm = Roots.__middle(x1, x2)
ym = f(xm)
end
end
# -0.0 not returned by __middle, so isneg true on [-Inf, 0.0)
@inline isneg(x::T) where {T<:AbstractFloat} = signbit(x)
@inline isneg(x) = _unitless(x) < 0
@inline function has_converged(::Val{:exact}, x1, x2, m, ym, atol, rtol)
iszero(ym) && return true
isnan(ym) && return true
x1 != m && m != x2 && return false
return true
end
@inline function has_converged(::Val{:inexact}, x1, x2, m, ym, atol, rtol)
iszero(ym) && return true
isnan(ym) && return true
val = abs(x1 - x2) <= atol + max(abs(x1), abs(x2)) * rtol
return val
end
#=
"""
Roots.a42(f, ab; atol=nothing, rtol=nothing, λ=0.7, μ = 0.5)
Direct implementation of Alefeld, Potra, and Shi's Algorithm 4.2. See also [`A42()`](@ref).
* `f`: function to find zero of. (If `f` is 4-times continuously differentiable, convergence to a simple root will be like ``(2 + 7^{1/2})^{1/3} = 1.6686...``
* `ab`: a *bracketing interval
* `atol`, `rtol`: optional tolerances. These are `0` and `eps` respectively by default.
Not exported
"""
=#
function a42(f, ab; atol=nothing, rtol=nothing, λ=0.7, μ=0.5)
a, b = adjust_bracket(ab)
δ₀ = b - a
fa, fb = f(a), f(b)
assert_bracket(fa, fb)
tols = (
λ = λ,
atol = isnothing(atol) ? zero(one(a)) : atol,
rtol = isnothing(rtol) ? eps(one(a)) : rtol,
)
c = a - fa * (b - a) / (fb - fa)
c = avoid_boundaries(a, c, b, fa, fb, tols)
fc = f(c)
iszero(fc) && return c
e, fee = c, fc
a, b, d, fa, fb, fd = bracket(a, b, c, fa, fb, fc)
n = 2
while true
δ = tolₑ(a, b, fa, fb, tols.atol, tols.rtol)
(b - a) ≤ δ && return (abs(fa) < abs(fb) ? a : b)
ee, fee = d, fd
for k in 1:2
if n == 2 || iszero(_pairwise_prod(fa, fb, fd, fee))
c = newton_quadratic(a, b, d, fa, fb, fd, k + 1)
else
c = ipzero(a, b, d, ee, fa, fb, fd, fee)
if (c <= a || b <= c)
c = newton_quadratic(a, b, d, fa, fb, fd, k + 1)
end
end
n += 1
c = avoid_boundaries(a, c, b, fa, fb, tols)
fc = f(c)
iszero(fc) && return c
ee, fee = d, fd
a, b, d, fa, fb, fd = bracket(a, b, c, fa, fb, fc)
δ = tolₑ(a, b, fa, fb, tols.atol, tols.rtol)
(b - a) ≤ 2δ && return (abs(fa) < abs(fb) ? a : b)
end
n += 1
u, fu = abs(fa) < abs(fb) ? (a, fa) : (b, fb)
c = u - 2 * fu * (b - a) / (fb - fa)
if 2abs(c - u) > (b - a)
c = a / 2 + b / 2
end
c = avoid_boundaries(a, c, b, fa, fb, tols)
fc = f(c)
iszero(fc) && return c
ee, fee = d, fd
a, b, d, fa, fb, fd = bracket(a, b, c, fa, fb, fc)
δ = tolₑ(a, b, fa, fb, tols.atol, tols.rtol)
(b - a) ≤ 2δ && return (abs(fa) < abs(fb) ? a : b)
if (b - a) ≥ μ * δ₀
c = a / 2 + b / 2
fc = f(c)
iszero(fc) && return c
ee, fee = d, fd
a, b, d, fa, fb, fd = bracket(a, b, c, fa, fb, fc)
end
n += 1
end
end
"""
secant_method(f, xs; [atol=0.0, rtol=8eps(), maxevals=1000])
Perform secant method to solve `f(x) = 0.`
The secant method is an iterative method with update step
given by `b - fb/m` where `m` is the slope of the secant line between
`(a,fa)` and `(b,fb)`.
The initial values can be specified as a pair of 2, as in `(x₀, x₁)` or
`[x₀, x₁]`, or as a single value, `x₁` in which case a value of `x₀` is chosen.
The algorithm returns m when `abs(fm) <= max(atol, abs(m) * rtol)`.
If this doesn't occur before `maxevals` steps or the algorithm
encounters an issue, a value of `NaN` is returned. If too many steps
are taken, the current value is checked to see if there is a sign
change for neighboring floating point values.
The `Order1` method for `find_zero` also implements the secant
method. This one should be slightly faster, as there are fewer setup costs.
Examples:
```julia
Roots.secant_method(sin, (3,4))
Roots.secant_method(x -> x^5 -x - 1, 1.1)
```
!!! note "Specialization"
This function will specialize on the function `f`, so that the initial
call can take more time than a call to the `Order1()` method, though
subsequent calls will be much faster. Using `FunctionWrappers.jl` can
ensure that the initial call is also equally as fast as subsequent
ones.
"""
function secant_method(
f,
xs;
atol=zero(float(real(first(xs)))),
rtol=8eps(one(float(real(first(xs))))),
maxevals=100,
)
if length(xs) == 1 # secant needs x0, x1; only x0 given
a = float(xs[1])
h = eps(one(real(a)))^(1 / 3)
da = h * oneunit(a) + abs(a) * h^2 # adjust for if eps(a) > h
b = a + da
else
a, b = promote(float(xs[1]), float(xs[2]))
end
secant(f, a, b, atol, rtol, maxevals)
end
function secant(f, a::T, b::T, atol=zero(T), rtol=8eps(T), maxevals=100) where {T}
nan = (0a) / (0a)
cnt = 0
fa, fb = f(a), f(b)
fb == fa && return nan
uatol = atol / oneunit(atol) * oneunit(real(a))
adjustunit = oneunit(real(fb)) / oneunit(real(b))
while cnt < maxevals
m = b - (b - a) * fb / (fb - fa)
fm = f(m)
iszero(fm) && return m
isnan(fm) && return nan
abs(fm) <= adjustunit * max(uatol, abs(m) * rtol) && return m
if fm == fb
sign(fm) * sign(f(nextfloat(m))) <= 0 && return m
sign(fm) * sign(f(prevfloat(m))) <= 0 && return m
return nan
end
a, b, fa, fb = b, m, fb, fm
cnt += 1
end
return nan
end
"""
muller(f, xᵢ; xatol=nothing, xrtol=nothing, maxevals=100)
muller(f, xᵢ₋₂, xᵢ₋₁, xᵢ; xatol=nothing, xrtol=nothing, maxevals=100)
> *Muller’s method* generalizes the secant method, but uses quadratic
> interpolation among three points instead of linear interpolation between two.
> Solving for the zeros of the quadratic allows the method to find complex
> pairs of roots.
> Given three previous guesses for the root `xᵢ₋₂`, `xᵢ₋₁`, `xᵢ`, and the values
> of the polynomial `f` at those points, the next approximation `xᵢ₊₁` is produced.
Excerpt and the algorithm taken from
> W.H. Press, S.A. Teukolsky, W.T. Vetterling and B.P. Flannery
> *Numerical Recipes in C*, Cambridge University Press (2002), p. 371
Convergence here is decided by `xᵢ₊₁ ≈ xᵢ` using the tolerances specified,
which both default to `eps(one(typeof(abs(xᵢ))))^4/5` in the appropriate units.
Each iteration performs three evaluations of `f`.
The first method picks two remaining points at random in relative proximity of `xᵢ`.
Note that the method may return complex result even for real initial values
as this depends on the function.
Examples:
```
muller(x->x^3-1, 0.5, 0.5im, -0.5) # → -0.500 + 0.866…im
muller(x->x^2+2, 0.0, 0.5, 1.0) # → ≈ 0.00 - 1.41…im
muller(x->(x-5)*x*(x+5), rand(3)...) # → ≈ 0.00
muller(x->x^3-1, 1.5, 1.0, 2.0) # → 2.0, Not converged
```
"""
muller(f, x₀::T; kwargs...) where {T} = muller(f, (rand(T, 2) .* x₀)..., x₀; kwargs...)
muller(f, xᵢ₋₂, xᵢ₋₁, xᵢ; kwargs...) = muller(f, promote(xᵢ₋₂, xᵢ₋₁, xᵢ)...; kwargs...)
function muller(
f,
oldest::T,
older::T,
old::T;
xatol=nothing,
xrtol=nothing,
maxevals=300,
) where {T}
@assert old ≠ older ≠ oldest ≠ old # we want q to be non-degenerate
xᵢ₋₂, xᵢ₋₁, xᵢ = oldest, older, old
fxᵢ₋₂, fxᵢ₋₁ = f(xᵢ₋₂), f(xᵢ₋₁)
RT = typeof(abs(oldest))
atol = xatol !== nothing ? xatol : oneunit(RT) * (eps(one(RT)))^(4 / 5)
rtol = xrtol !== nothing ? xrtol : eps(one(RT))^(4 / 5)
for i in 1:(maxevals ÷ 3)
# one evaluation per iteration
fxᵢ = f(xᵢ)
x = muller_step(xᵢ₋₂, xᵢ₋₁, xᵢ, fxᵢ₋₂, fxᵢ₋₁, fxᵢ)
if isnan(x)
@warn "The algorithm might not have converged, stopping at i=$i:" abs(xᵢ - xᵢ₋₁)
return xᵢ
end
# @debug "Iteration $i:" xᵢ₋₂ xᵢ₋₁ xᵢ x abs(x-xᵢ)
xᵢ₋₂, xᵢ₋₁, xᵢ = xᵢ₋₁, xᵢ, x
fxᵢ₋₂, fxᵢ₋₁ = fxᵢ₋₁, fxᵢ
#stopping criterion
isapprox(xᵢ, xᵢ₋₁, atol=atol, rtol=rtol) && return xᵢ
end
@warn "The algorithm might not have converged, maxevals limit hit:" abs(xᵢ₋₁ - xᵢ)
return xᵢ
end
function muller_step(a, b, c, fa, fb, fc)
a, b, c = promote(a, b, c)
q = qq(a, b, c)
q² = q^2
q1 = q + one(q)
A = q * fc - q * q1 * fb + q² * fa
B = (q1 + q) * fc - q1^2 * fb + q² * fa
C = q1 * fc
den = let
Δ = B^2 - 4A * C
typeof(Δ) <: Real &&
Δ < 0 &&
throw(
DomainError(
Δ,
"Discriminant is negative and the function most likely has complex roots. You might want to call muller with complex input.",
),
)
Δ = √Δ
d⁺ = B + Δ
d⁻ = B - Δ
abs(d⁺) > abs(d⁻) ? d⁺ : d⁻
end
return c - (c - b) * 2C / den
end
@inline qq(a, b, c) = (c - b) / (b - a)
struct TupleWrapper{F,Fp}
f::F
fp::Fp
end
(F::TupleWrapper)(x) = begin
u, v = F.f(x), F.fp(x)
return (u, u / v)
end
#=
"""
newton((f, f'), x0; xatol=nothing, xrtol=nothing, maxevals=100)
newton(fΔf, x0; xatol=nothing, xrtol=nothing, maxevals=100)
Newton's method.
Function may be passed in as a tuple (f, f') *or* as function which returns (f,f/f').
Examples:
```
newton((sin, cos), 3.0)
newton(x -> (sin(x), sin(x)/cos(x)), 3.0, xatol=1e-10, xrtol=1e-10)
```
Note: unlike the call `newton(f, fp, x0)`--which dispatches to a method of `find_zero`, these
two interfaces will specialize on the function that is passed in. This means, these functions
will be faster for subsequent calls, but may be slower for an initial call.
Convergence here is decided by x_n ≈ x_{n-1} using the tolerances specified, which both default to
`eps(T)^4/5` in the appropriate units.
If the convergence fails, will return a `ConvergenceFailed` error.
"""
=#
newton(f::Tuple, x0; kwargs...) = newton(TupleWrapper(f[1], f[2]), x0; kwargs...)
function newton(f, x0; xatol=nothing, xrtol=nothing, maxevals=100)
x = float(x0)
T = typeof(x)
atol = xatol !== nothing ? xatol : oneunit(T) * (eps(one(T)))^(4 / 5)
rtol = xrtol !== nothing ? xrtol : eps(one(T))^(4 / 5)
xo = Inf
for i in 1:maxevals
fx, Δx = f(x)
iszero(fx) && return x
x -= Δx
if isapprox(x, xo, atol=atol, rtol=rtol)
return x
end
xo = x
end
throw(ConvergenceFailed("No convergence"))
end
## --------------------------------------------------
## This is basically Order0(), but with different, default, tolerances employed
## It takes more function calls, but works harder to find exact zeros
## where exact means either iszero(fx), adjacent floats have sign change, or
## abs(fxn) <= 8 eps(xn)
"""
dfree(f, xs)
A more robust secant method implementation
Solve for `f(x) = 0` using an algorithm from *Personal Calculator Has Key
to Solve Any Equation f(x) = 0*, the SOLVE button from the
[HP-34C](http://www.hpl.hp.com/hpjournal/pdfs/IssuePDFs/1979-12.pdf).
This is also implemented as the `Order0` method for `find_zero`.
The initial values can be specified as a pair of two values, as in
`(a,b)` or `[a,b]`, or as a single value, in which case a value of `b`
is computed, possibly from `fb`. The basic idea is to follow the
secant method to convergence unless:
* a bracket is found, in which case `AlefeldPotraShi` is used;
* the secant method is not converging, in which case a few steps of a
quadratic method are used to see if that improves matters.
Convergence occurs when `f(m) == 0`, there is a sign change between
`m` and an adjacent floating point value, or `f(m) <= 2^3*eps(m)`.
A value of `NaN` is returned if the algorithm takes too many steps
before identifying a zero.
# Examples
```julia
Roots.dfree(x -> x^5 - x - 1, 1.0)
```
"""
function dfree(f, xs)
if length(xs) == 1
a = float(xs[1])
fa = f(a)
h = eps(one(a))^(1 / 3)
da = h * oneunit(a) + abs(a) * h^2 # adjust for if eps(a) > h
b = float(a + da)
fb = f(b)
else
a, b = promote(float(xs[1]), float(xs[2]))
fa, fb = f(a), f(b)
end
nan = (0 * a) / (0 * a) # try to preserve type
cnt, MAXCNT = 0, 5 * ceil(Int, -log(eps(one(a)))) # must be higher for BigFloat
MAXQUAD = 3
if abs(fa) > abs(fb)
a, fa, b, fb = b, fb, a, fa
end
# we keep a, b, fa, fb, gamma, fgamma
quad_ctr = 0
while !iszero(fb)
cnt += 1
if sign(fa) * sign(fb) < 0
return solve(ZeroProblem(f, (a, b))) # faster than bisection(f, a, b)
end
# take a secant step
gamma = float(b - (b - a) * fb / (fb - fa))
# modify if gamma is too small or too big
if iszero(abs(gamma - b))
gamma = b + 1 / 1000 * abs(b - a) # too small
elseif abs(gamma - b) >= 100 * abs(b - a)
gamma = b + sign(gamma - b) * 100 * abs(b - a) ## too big
end
fgamma = f(gamma)
# change sign
if sign(fgamma) * sign(fb) < 0
return solve(ZeroProblem(f, (gamma, b))) # faster than bisection(f, gamma, b)
end
# decreasing
if abs(fgamma) < abs(fb)
a, fa, b, fb = b, fb, gamma, fgamma
quad_ctr = 0
cnt < MAXCNT && continue
end
gamma = float(quad_vertex(a, fa, b, fb, gamma, fgamma))
fgamma = f(gamma)
# decreasing now?
if abs(fgamma) < abs(fb)
a, fa, b, fb = b, fb, gamma, fgamma
quad_ctr = 0
cnt < MAXCNT && continue
end
quad_ctr += 1
if (quad_ctr > MAXQUAD) || (cnt > MAXCNT) || iszero(gamma - b) || isnan(gamma)
bprev, bnext = prevfloat(b), nextfloat(b)
fbprev, fbnext = f(bprev), f(bnext)
sign(fb) * sign(fbprev) < 0 && return b
sign(fb) * sign(fbnext) < 0 && return b
for (u, fu) in ((b, fb), (bprev, fbprev), (bnext, fbnext))
abs(fu) / oneunit(fu) <= 2^3 * eps(u / oneunit(u)) && return u
end
return nan # Failed.
end
if abs(fgamma) < abs(fb)
b, fb, a, fa = gamma, fgamma, b, fb
else
a, fa = gamma, fgamma
end
end
b
end
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 1645 | ### States
struct UnivariateZeroState{T,S} <: AbstractUnivariateZeroState{T,S}
xn1::T
xn0::T
fxn1::S
fxn0::S
end
TS(::AbstractUnivariateZeroState{T,S}) where {T,S} = T, S
# simple helper to set main properties of a state object
function _set(state, xf1)
x, fx = xf1
@reset state.xn1 = x
@reset state.fxn1 = fx
state
end
function _set(state, xf1, xf0)
x, fx = xf1
@reset state.xn1 = x
@reset state.fxn1 = fx
x, fx = xf0
@reset state.xn0 = x
@reset state.fxn0 = fx
state
end
# init_state(M, F, x; kwargs...)
# init_state(M, F x₀,x₁,fx₀,fx₁; kwargs...)
# init_state(M, state, F)
#
# A state holds at a minimum:
#
# * the values xₙ₋₁, xₙ and f(xₙ₋₁), f(xₙ) along with
# * some method-specific values
#
#
# A state is initialized with `init_state(M, F, x)` which sets up xₙ₋₁, xₙ, f(xₙ₋₁), f(xₙ)
# which then calls `init_state(M, F, xₙ₋₁, xₙ, f(xₙ₋₁), f(xₙ))` to finish the initialization
# to change to a new state use `init_state(M, state, F)`
# basic idea to convert from N to M:
# Fₘ = some state
# stateₘ = init_state(M, stateₙ, Fₘ)
function init_state(M::AbstractUnivariateZeroMethod, state::AbstractUnivariateZeroState, F)
init_state(M, F, state.xn0, state.xn1, state.fxn0, state.fxn1)
end
# init_state(M,F,x) --> call init_state(M,F,x₀,x₁,fx₀, fx₁)
function init_state(M::AbstractUnivariateZeroMethod, F, x)
error("no default method")
end
# initialize from xs, fxs
function init_state(::AbstractUnivariateZeroMethod, F, x₀, x₁, fx₀, fx₁)
error("no default method")
end
Base.last(state::AbstractUnivariateZeroState, M::AbstractNonBracketingMethod) = state.xn1
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 10499 | ### --------------------------------------------------
## Tracks (for logging actual steps)
## when no logging this should get optimized out
## when logging, this allocates
abstract type AbstractTracks end
struct NullTracks <: AbstractTracks end
# logging api
# how many function evaluations in init_state
# this is a worst case estimate leading to the function call count being an upper bound only
initial_fncalls(M::AbstractUnivariateZeroState) = @warn "initial_fncalls fix $M"
log_step(s::NullTracks, M, x; init=false) = nothing # log a step (x,f(x)) or (a,b)
log_iteration(::NullTracks, n=1) = nothing # log an iteration (call to update state)
log_fncall(::NullTracks, n=1) = nothing # add a function call (aliased to incfn); only one called in update_step
log_message(::NullTracks, msg) = nothing # append to a message
log_convergence(::NullTracks, msg) = nothing # flag for convergence
log_last(::NullTracks, state) = nothing # log α
log_method(::NullTracks, method) = nothing # record M
log_nmethod(::NullTracks, method) = nothing # record N (if hybrid)
incfn(T::AbstractTracks, i=1) = log_fncall(T, i) # legacy alias
# a tracks object to record tracks
"""
Roots.Tracks{T,S}
A `Tracks` instance is used to record the progress of an algorithm.
`T` is the type of function inputs, and `S` is the type of function outputs. They
both default to `Float64`. Note that because this type is not exported, you have to
write `Roots.Tracks()` to construct a `Tracks` object.
By default, no tracking is done while finding a root.
To change this, construct a `Tracks` object, and pass it to the keyword argument `tracks`.
This will modify the `Tracks` object, storing the input and function values at each iteration,
along with additional information about the root-finding process.
`Tracks` objects are shown in an easy-to-read
format. Internally either a tuple of `(x,f(x))` pairs or `(aₙ, bₙ)`
pairs are stored, the latter for bracketing methods. (These
implementation details may change without notice.) The methods
`empty!`, to reset the `Tracks` object; `get`, to get the tracks;
`last`, to get the value converted to, may be of interest.
If you only want to print the information, but you don't need it later, this can conveniently be
done by passing `verbose=true` to the root-finding function. This will not
effect the return value, which will still be the root of the function.
## Examples
```jldoctest Tracks
julia> using Roots
julia> f(x) = x^2-2
f (generic function with 1 method)
julia> tracker = Roots.Tracks()
Algorithm has not been run
julia> find_zero(f, (0, 2), Roots.Secant(), tracks=tracker) ≈ √2
true
julia> tracker
Results of univariate zero finding:
* Converged to: 1.4142135623730947
* Algorithm: Secant()
* iterations: 7
* function evaluations ≈ 9
* stopped as |f(x_n)| ≤ max(δ, |x|⋅ϵ) using δ = atol, ϵ = rtol
Trace:
x₁ = 0, fx₁ = -2
x₂ = 2, fx₂ = 2
x₃ = 1, fx₃ = -1
x₄ = 1.3333333333333333, fx₄ = -0.22222222222222232
x₅ = 1.4285714285714286, fx₅ = 0.04081632653061229
x₆ = 1.4137931034482758, fx₆ = -0.0011890606420930094
x₇ = 1.4142114384748701, fx₇ = -6.0072868388605372e-06
x₈ = 1.4142135626888697, fx₈ = 8.9314555751229818e-10
x₉ = 1.4142135623730947, fx₉ = -8.8817841970012523e-16
julia> empty!(tracker) # resets
julia> find_zero(sin, (3, 4), Roots.A42(), tracks=tracker) ≈ π
true
julia> get(tracker)
4-element Vector{NamedTuple{names, Tuple{Float64, Float64}} where names}:
(a = 3.0, b = 4.0)
(a = 3.0, b = 3.157162792479947)
(a = 3.141592614491745, b = 3.1415926926910007)
(a = 3.141592653589793, b = 3.141592653589794)
julia> last(tracker)
3.141592653589793
```
!!! note
As designed, the `Tracks` object may not record actions taken
while the state object is initialized. An example is the default
bisection algorithm where an initial midpoint is found to ensure
the bracket does not straddle ``0``.
"""
mutable struct Tracks{T,S} <: AbstractTracks
xfₛ::Vector{Tuple{T,S}} # (x,f(x))
abₛ::Vector{Tuple{T,T}} # (aᵢ, bᵢ)
steps::Int
fncalls::Int
convergence_flag::Symbol
message::String
alpha::T
method
nmethod
end
Tracks(T, S) = Tracks(
Tuple{T,S}[],
Tuple{T,T}[],
0,
0,
:algorithm_not_run,
"",
nan(T),
nothing,
nothing,
)
Tracks(s::AbstractUnivariateZeroState{T,S}) where {T,S} = Tracks(T, S)
Tracks(verbose, tracks, state::AbstractUnivariateZeroState{T,S}) where {T,S} =
(verbose && isa(tracks, NullTracks)) ? Tracks(T, S) : tracks
Tracks() = Tracks(Float64, Float64) # give default
function log_step(l::Tracks, M::AbstractNonBracketingMethod, state; init=false)
init && push!(l.xfₛ, (state.xn0, state.fxn0))
push!(l.xfₛ, (state.xn1, state.fxn1))
!init && log_iteration(l, 1)
nothing
end
log_fncall(l::Tracks, i=1) = (l.fncalls += i; nothing)
log_iteration(l::Tracks, n=1) = (l.steps += n; nothing)
log_message(l::Tracks, msg) = (l.message *= msg; nothing)
log_convergence(l::Tracks, msg) = (l.convergence_flag = msg; nothing)
log_last(l::Tracks, α) = (l.alpha = α; nothing)
log_method(l::Tracks, method) = (l.method = method; nothing)
log_nmethod(l::Tracks, method) = (l.nmethod = method; nothing)
# copy some API from ValueHistories
Base.first(l::AbstractTracks) = (@warn "No tracking information was kept"; nothing)
function Base.first(l::Tracks)
l.convergence_flag == :algorithm_not_run && error("Algorithm not run")
!isempty(l.xfₛ) && return (x₁=l.xfₛ[1][1], f₁=l.xfₛ[1][2])
return (a₁=l.abₛ[1][1], b₁=l.abₛ[1][2])
end
# return last value of algorithm
Base.last(l::AbstractTracks) = (@warn "No tracking information was kept"; nothing)
function Base.last(l::Tracks)
convergence_flag = l.convergence_flag
α = l.alpha
if convergence_flag == :algorithm_not_run
@warn "The algorithm has not run"
end
α
end
# Returns all available observations.
Base.get(l::NullTracks) = (@warn "No tracking information was kept"; nothing)
function Base.get(l::Tracks)
xf = [(xn=xᵢ, fn=fᵢ) for (xᵢ, fᵢ) in l.xfₛ]
ab = [(a=min(u, v), b=max(u, v)) for (u, v) in l.abₛ]
vcat(xf, ab)
end
# reset tracker
Base.empty!(l::NullTracks) = nothing
function Base.empty!(l::Tracks{T,S}) where {T,S}
empty!(l.xfₛ)
empty!(l.abₛ)
l.steps = 0
l.fncalls = 0
l.convergence_flag = :algorithm_not_run
l.message = ""
l.alpha = nan(T)
l.method = l.nmethod = nothing
nothing
end
Base.show(io::IO, l::Tracks) = show_trace(io, l.method, l.nmethod, l)
function show_trace(io::IO, method, N, tracks)
flag = tracks.convergence_flag
if flag == :algorithm_not_run
print(io, "Algorithm has not been run")
return nothing
end
converged = !isnan(tracks.alpha)
println(io, "Results of univariate zero finding:\n")
if converged
println(io, "* Converged to: $(tracks.alpha)")
if N === nothing || length(tracks.abₛ) == 0
println(io, "* Algorithm: $(method)")
else
println(io, "* Algorithm: $(method); finished with bracketing method $N")
end
println(io, "* iterations: $(tracks.steps)")
println(io, "* function evaluations ≈ $(tracks.fncalls)")
tracks.convergence_flag == :x_converged &&
println(io, "* stopped as x_n ≈ x_{n-1} using atol=xatol, rtol=xrtol")
tracks.convergence_flag == :f_converged &&
tracks.message == "" &&
println(io, "* stopped as |f(x_n)| ≤ max(δ, |x|⋅ϵ) using δ = atol, ϵ = rtol")
tracks.convergence_flag == :exact_zero && println(io, "* stopped as f(x_n) = 0")
tracks.message != "" && println(io, "* Note: $(tracks.message)")
else
println(io, "* Convergence failed: $(tracks.message)")
println(io, "* Algorithm $(method)")
end
println(io, "")
println(io, "Trace:")
show_tracks(io, tracks, method)
end
function show_tracks(io::IO, s::Tracks, M::AbstractUnivariateZeroMethod)
# show (x,f(x))
for (i, (xi, fxi)) in enumerate(s.xfₛ)
println(
io,
@sprintf(
"%s%s = %.17g,\t %s%s = %.17g",
"x",
sprint(io -> unicode_subscript(io, i)),
float(xi),
"fx",
sprint(io -> unicode_subscript(io, i)),
float(fxi)
)
)
end
# show bracketing
i₀ = length(s.xfₛ)
for (i, (a, b)) in enumerate(s.abₛ)
j = i₀ + i
println(
io,
@sprintf(
"(%s%s, %s%s) = ( %.17g, %.17g )",
"a",
sprint(io -> unicode_subscript(io, j - 1)),
"b",
sprint(io -> unicode_subscript(io, j - 1)),
a,
b
)
)
end
println(io, "")
end
# support for complex values
# Issue 336. (Could DRY this up...)
function show_tracks(
io::IO,
s::Roots.Tracks{T,S},
M::Roots.AbstractUnivariateZeroMethod,
) where {T<:Complex,S<:Complex}
# show (x,f(x))
for (i, (xi, fxi)) in enumerate(s.xfₛ)
println(
io,
@sprintf(
"%s%s = (%.17g, %.17g),\t %s%s = (%.17g, %.17g)",
"x",
sprint(io -> Roots.unicode_subscript(io, i)),
real(xi),
imag(xi),
"fx",
sprint(io -> Roots.unicode_subscript(io, i)),
real(fxi),
imag(fxi)
)
)
end
# show bracketing
i₀ = length(s.xfₛ)
for (i, (a, b)) in enumerate(s.abₛ)
j = i₀ + i
println(
io,
@sprintf(
"(%s%s, %s%s) = ( %.17g, %.17g )",
"a",
sprint(io -> unicode_subscript(io, j - 1)),
"b",
sprint(io -> unicode_subscript(io, j - 1)),
a,
b
)
)
end
println(io, "")
end
## needs better name, but is this useful?
#=
"""
find_zerov(f, x, M; kwargs...)
Run `find_zero` return a `Tracks` object, not the value, which can be extracted via the `last` method.
"""
=#
function find_zerov(f, x, M; verbose=nothing, kwargs...)
Z = init(ZeroProblem(f, x), M; verbose=true, kwargs...)
solve!(Z)
Z.logger
end
find_zerov(f, x; verbose=nothing, kwargs...) =
find_zerov(f, x, find_zero_default_method(x); kwargs...)
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 5137 | ##################################################
# type to throw on successful convergence
mutable struct StateConverged
x0::Number
end
# type to throw on failure
mutable struct ConvergenceFailed
reason::AbstractString
end
##################################################
## Helpers for the various methods
_unitless(x) = x / oneunit(x)
# NaN of correct type
nan(::Type{Float16}) = NaN16
nan(::Type{Float32}) = NaN32
nan(::Type{Float64}) = NaN
nan(x::T) where {T<:Number} = NaN * one(T)
nan(x::Type{T}) where {T<:Number} = NaN * one(T)
nan(::Any) = NaN
## issue with approx derivative
isissue(x) = iszero(x) || isnan(x) || isinf(x)
# of (a,fa), (b,fb) choose pair where |f| is smallest
@inline choose_smallest(a, b, fa, fb) = abs(fa) < abs(fb) ? (a, fa) : (b, fb)
@inline sort_smallest(a, b, fa, fb) = abs(fa) < abs(fb) ? (a, b, fa, fb) : (b, a, fb, fa)
# from an interable extract a bracketing interval and promote to floating point values
# used when an interval is desired
_extrema(x::Number) = throw(ArgumentError("Need extrema to return two distinct values"))
function _extrema(x)
a, b = extrema(x)
a == b && throw(ArgumentError("Need extrema to return two distinct values"))
a, b
end
# fix type instability issues of tuples of mixed types
function _extrema(x::Tuple{<:Number,<:Number})
a, b = x
a == b && throw(ArgumentError("Need extrema to return two distinct values"))
extrema(promote(a, b))
end
# used by secant. Get x₀, x₁ for x
function x₀x₁(x::Number)
x₁ = float(x)
promote(_default_secant_step(x₁), x₁)
end
function x₀x₁(x)
x₀, x₁′ = x
x₁ = first(x₁′)
promote(float(x₀), float(x₁))
end
## find a default secant step
function _default_secant_step(x1)
ϵ = eps(one(real(x1)))
h = cbrt(ϵ)
dx = h * oneunit(x1) + abs(x1) * h^2 # adjust for if eps(x1) > h
x0 = x1 + dx
x0
end
# a bit better than a - fa/f_ab
@inline secant_step(a, b, fa, fb) = a - fa * (b - a) / (fb - fa)
function guarded_secant_step(alpha, beta, falpha, fbeta)
fp = (fbeta - falpha) / (beta - alpha)
Δ = fbeta / fp
## odd, we get allocations if we define Delta, then beta - Delta
## Δ = beta - fbeta * (beta - alpha) / (fbeta - falpha)
if isissue(Δ)
Δ = oneunit(alpha) / 1000
elseif abs(Δ) >= 100 * abs(alpha - beta) # guard runaway
Δ = sign(Δ) * 100 * min(oneunit(alpha), abs(alpha - beta))
end
if isissue(Δ)
return (alpha + (beta - alpha) * (0.5), true) # midpoint
else
return (beta - Δ, false)
end
end
#=
"""
steff_step(M, x, fx)
Return first Steffensen step x + fx (with proper units).
May be overridden to provide a guard when fx is too large.
"""
=#
function steff_step(M::Any, x::T, fx::S) where {T,S}
x + fx * oneunit(T) / oneunit(S)
end
# return vertex of parabola through (a,fa),(b,fb),(c,fc)
# first time through, we have picture of a > b > c; |fa|, |fc| > |fb|, all same sign
function quad_vertex(c, fc, b, fb, a, fa)
fba = (fb - fa) / (b - a)
fbc = (fb - fc) / (b - c)
((a + b) - fba / (fbc - fba) * (c - a)) / 2
end
## inverse quadratic
function inverse_quadratic_step(a::T, b, c, fa, fb, fc) where {T}
s = zero(T)
s += a * fb * fc / (fa - fb) / (fa - fc) # quad step
s += b * fa * fc / (fb - fa) / (fb - fc)
s += c * fa * fb / (fc - fa) / (fc - fb)
s
end
## Different functions for approximating f'(xn)
## return fpxn and whether it is an issue
## use f[a,b] to approximate f'(x)
function _fbracket(a, b, fa, fb)
num, den = fb - fa, b - a
iszero(num) && iszero(den) && return Inf, true
out = num / den
out, isissue(out)
end
## use f[y,z] - f[x,y] + f[x,z] to approximate
function _fbracket_diff(a, b, c, fa, fb, fc)
x1, issue = _fbracket(b, c, fb, fc)
issue && return x1, issue
x2, issue = _fbracket(a, b, fa, fb)
issue && return x2, issue
x3, issue = _fbracket(a, c, fa, fc)
issue && return x3, issue
out = x1 - x2 + x3
out, isissue(out)
end
## use f[a,b] * f[a,c] / f[b,c]
function _fbracket_ratio(a, b, c, fa, fb, fc)
x1, _ = _fbracket(a, b, fa, fb)
x2, _ = _fbracket(a, c, fa, fc)
x3, _ = _fbracket(b, c, fb, fc)
out = (x1 * x2) / x3
out, isissue(out)
end
## from https://core.ac.uk/download/pdf/82282744.pdf
## signum based iteration allows one to guarantee a valid starting point
## if N is big enough. (THough this depends on algorithm, a, b and function)
## N here would need to be tuned. But, as is, this may prove useful anyways.
function identify_starting_point(f, a, b, N)
pts = range(a, stop=b, length=N + 1)
fxs = f.(pts)
sfxs = sign.(f.(pts))
identify_starting_point(a, b, sfxs)
end
function identify_starting_point(a, b, sfxs)
N = length(sfxs) - 1
p0 = a + (b - a) / 2
p1 = p0 + (b - a) / (2N) * sfxs[1] * sum(s for s in sfxs[2:(end - 1)])
p1
end
## not used
function _unicode_subscript(io, j)
a = ("⁻", "", "", "₀", "₁", "₂", "₃", "₄", "₅", "₆", "₇", "₈", "₉")
for i in string(j)
print(io, a[Int(i) - 44])
end
end
unicode_subscript(io, j) = _unicode_subscript.(Ref(io), reverse(digits(j)))
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 12103 | ## --------------------------------------------------
## Algorithms of Alefeld, Potra, and Shi
## Main paper is
## [Algorithm 748: enclosing zeros of continuous functions](https://na.math.kit.edu/alefeld/download/1995_Algorithm_748_Enclosing_Zeros_of_Continuous_Functions.pdf) with supporting material at
## [Algorithm 748: enclosing zeros of continuous functions](https://doi.org/10.1145/210089.210111)
## --------------------------------------------------
#=
"""
AbstractAlefeldPotraShi
An abstract type for Alefeld-Potra-Shi type bracketing problems, as discussed in [*Algorithm 748: enclosing zeros of continuous functions*](https://na.math.kit.edu/alefeld/download/1995_Algorithm_748_Enclosing_Zeros_of_Continuous_Functions.pdf). These consist of an interpolation step, such as quadratic interpolation or inverse cubic interpolation along with a double-secant step. For a smooth function and finite bracketing interval, these methods should always converge.
The `update_step` method calls a `calculateΔ` method that can be customized to turn an algorithm based on interpolation into a bracketed algorithm. See [`Roots.BracketedHalley`](@ref) for an example.
This implementation deviates slightly from the printed algorithm, as it may use an initial call to `_middle` rather than a secant step, depending on the signs of ``a`` and ``b``.
"""
=#
abstract type AbstractAlefeldPotraShi <: AbstractBracketingMethod end
initial_fncalls(::AbstractAlefeldPotraShi) = 3 # worst case assuming fx₀, fx₁,fc must be computed
## initial step, needs to log a,b,d
function log_step(l::Tracks, M::AbstractAlefeldPotraShi, state; init::Bool=false)
a, b, c = state.xn0, state.xn1, state.d
init && push!(l.abₛ, extrema((a, b, c)))
init && log_iteration(l, 1) # take an initial step
push!(l.abₛ, (a, b))
!init && log_iteration(l, 1)
nothing
end
struct AbstractAlefeldPotraShiState{T,S} <: AbstractUnivariateZeroState{T,S}
xn1::T
xn0::T
d::T
ee::T
fxn1::S
fxn0::S
fd::S
fee::S
end
# basic init state is like bracketing state
# keep a < b
# set d, ee to a
function init_state(::AbstractAlefeldPotraShi, F, x₀, x₁, fx₀, fx₁; c=nothing, fc=nothing)
a, b, fa, fb = x₀, x₁, fx₀, fx₁
iszero(fa) && return AbstractAlefeldPotraShiState(
promote(b, a, a, a)...,
promote(fb, fa, fa, fa)...,
)
iszero(fb) && return AbstractAlefeldPotraShiState(
promote(b, a, a, a)...,
promote(fb, fa, fa, fa)...,
)
assert_bracket(fa, fb)
if a > b
a, b, fa, fb = b, a, fb, fa
end
if c === nothing # need c, fc to be defined if one is
c = a < zero(a) < b ? _middle(a, b) : secant_step(a, b, fa, fb)
fc = first(F(c))
end
(iszero(fc) || !isfinite(fc)) && return AbstractAlefeldPotraShiState(
promote(c, a, a, a)...,
promote(fc, fa, fa, fa)...,
)
a, b, d, fa, fb, fd = bracket(a, b, c, fa, fb, fc)
assert_bracket(fa, fb)
T = typeof(d)
ee, fe = T(NaN) / oneunit(T(NaN)) * d, fd # use NaN for initial ee value
AbstractAlefeldPotraShiState(promote(b, a, d, ee)..., promote(fb, fa, fd, fe)...)
end
# avoid type-stability issue due to dynamic dispatch based on kwargs
function init_options(
M::AbstractAlefeldPotraShi,
state::AbstractUnivariateZeroState{T,S};
kwargs...,
) where {T,S}
d = kwargs
defs = default_tolerances(M, T, S)
δₐ = get(d, :xatol, get(d, :xabstol, defs[1]))
δᵣ = get(d, :xrtol, get(d, :xreltol, defs[2]))
maxiters = get(d, :maxiters, get(d, :maxevals, get(d, :maxsteps, defs[5])))
strict = get(d, :strict, defs[6])
Roots.FExactOptions(δₐ, δᵣ, maxiters, strict)
end
# fn calls w/in calculateΔ
# 1 is default, but this should be adjusted for different methods
fncalls_per_step(::AbstractAlefeldPotraShi) = 1
function update_state(
M::AbstractAlefeldPotraShi,
F::Callable_Function,
o::AbstractAlefeldPotraShiState{T,S},
options,
l=NullTracks(),
) where {T,S}
atol, rtol = options.xabstol, options.xreltol
μ, λ = oftype(rtol, 0.5), oftype(rtol, 0.7)
tols = (; λ=λ, atol=atol, rtol=rtol)
a::T, b::T, d::T, ee::T = o.xn0, o.xn1, o.d, o.ee
fa::S, fb::S, fd::S, fee::S = o.fxn0, o.fxn1, o.fd, o.fee
δ₀ = b - a
# use c to track smaller of |fa|, |fb|
c, fc = abs(fa) < abs(fb) ? (a, fa) : (b, fb)
ps = (; a=a, b=b, d=d, ee=ee, fa=fa, fb=fb, fd=fd, fee=fee, atol=atol, rtol=rtol)
# (may modify ps) <<----
Δ, ps = calculateΔ(M, F, c, ps)
incfn(l, fncalls_per_step(M))
a, b, d = ps.a, ps.b, ps.d
fa, fb, fd = ps.fa, ps.fb, ps.fd
if iszero(fa) || iszero(fb) || (b - a) <= tolₑ(a, b, fa, fb, atol, rtol)
@reset o.xn0 = a
@reset o.xn1 = b
@reset o.fxn0 = fa
@reset o.fxn1 = fb
return o, true
end
x = c - Δ
x = avoid_boundaries(a, x, b, fa, fb, tols)
fx = first(F(x))
incfn(l)
ā, b̄, d, fā, fb̄, fd = bracket(a, b, x, fa, fb, fx)
if iszero(fx) || (b̄ - ā) <= tolₑ(ā, b̄, fā, fb̄, atol, rtol)
@reset o.xn0 = ā
@reset o.xn1 = b̄
@reset o.fxn0 = fā
@reset o.fxn1 = fb̄
return o, true
end
u, fu = abs(fā) < abs(fb̄) ? (ā, fā) : (b̄, fb̄)
# 4.16 double secant step
fab⁻¹ = (b̄ - ā) / (fb̄ - fā)
c̄ = u - 2 * fab⁻¹ * fu # 4.16
if 2abs(u - c̄) > b̄ - ā # 4.17
c̄ = __middle(ā, b̄)
end
c̄ = avoid_boundaries(ā, c̄, b̄, fā, fb̄, tols)
fc̄ = first(F(c̄))
incfn(l)
(iszero(fc̄) || !isfinite(fc̄)) && return (_set(o, (c̄, fc̄)), true)
â, b̂, d̂, fâ, fb̂, fd̂ = bracket(ā, b̄, c̄, fā, fb̄, fc̄) # 4.18
if (b̂ - â) < μ * δ₀ # 4.19
ee, fee = d, fd
a, b, d, fa, fb, fd = â, b̂, d̂, fâ, fb̂, fd̂
else
m = __middle(ā, b̄)
m = avoid_boundaries(â, m, b̂, fâ, fb̂, tols)
fm = first(F(m))
incfn(l)
(iszero(fm) || !isfinite(fm)) && return (_set(o, (m, fm)), true)
ee, fee = d̂, fd̂
a, b, d, fa, fb, fd = bracket(â, b̂, m, fâ, fb̂, fm)
end
@reset o.xn0 = a
@reset o.xn1 = b
@reset o.d = d
@reset o.ee = ee
@reset o.fxn0 = fa
@reset o.fxn1 = fb
@reset o.fd = fd
@reset o.fee = fee
return o, false
end
## --- Methods
# algorithms 2.4 and 2.5 can be implemented this way:
struct A2425{K} <: AbstractAlefeldPotraShi end
function calculateΔ(::A2425{K}, F::Callable_Function, c₀::T, ps) where {K,T}
a, b, d, ee = ps.a, ps.b, ps.d, ps.ee
fa, fb, fd, fee = ps.fa, ps.fb, ps.fd, ps.fee
tols = (λ=oftype(ps.rtol, 0.7), atol=ps.atol, rtol=ps.rtol)
c = a
for k in 1:K
c = newton_quadratic(a, b, d, fa, fb, fd, k + 1)
k == K && break
c = avoid_boundaries(a, c, b, fa, fb, tols)
fc = first(F(c))
a, b, d, fa, fb, fd = bracket(a, b, c, fa, fb, fc)
iszero(fc) && break
if (isnan(fc) || !isfinite(c))
c = c₀
break
end
end
@reset ps.a = a
@reset ps.fa = fa
@reset ps.b = b
@reset ps.fb = fb
@reset ps.d = d
@reset ps.fd = fd
c₀ - c, ps
end
"""
Roots.AlefeldPotraShi()
Follows Algorithm 4.1 in "ON ENCLOSING SIMPLE ROOTS OF NONLINEAR
EQUATIONS", by Alefeld, Potra, Shi; DOI:
[10.1090/S0025-5718-1993-1192965-2](https://doi.org/10.1090/S0025-5718-1993-1192965-2).
The order of convergence is `2 + √5`; asymptotically there are 3 function evaluations per step.
Asymptotic efficiency index is ``(2+√5)^{1/3} ≈ 1.618...``. Less efficient, but can run faster than the related [`A42`](@ref) method.
Originally by John Travers.
"""
const AlefeldPotraShi = A2425{2}
# Algorithm 5.7 is parameterized by K
# 4.1 -> K=1; 4.2 -> K=2
struct A57{K} <: AbstractAlefeldPotraShi end
fncalls_per_step(::A57{K}) where {K} = K - 1
function calculateΔ(::A57{K}, F::Callable_Function, c₀::T, ps) where {K,T}
a, b, d, ee = ps.a, ps.b, ps.d, ps.ee
fa, fb, fd, fee = ps.fa, ps.fb, ps.fd, ps.fee
tols = (λ=oftype(ps.rtol, 0.7), atol=ps.atol, rtol=ps.rtol)
c, fc = a, fa
for k in 1:K
if isnan(ee) || iszero(_pairwise_prod(fa, fb, fd, fee))
c = newton_quadratic(a, b, d, fa, fb, fd, k + 1)
else
c = ipzero(a, b, d, ee, fa, fb, fd, fee)
if (c <= a || b <= c)
c = newton_quadratic(a, b, d, fa, fb, fd, k + 1)
end
end
k == K && break
ee, fee = d, fd
c = avoid_boundaries(a, c, b, fa, fb, tols)
fc = first(F(c))
a, b, d, fa, fb, fd = bracket(a, b, c, fa, fb, fc)
iszero(fc) && break # fa or fb is 0
if (!isfinite(fc) || !isfinite(c))
c = c₀
break
end
end
@reset ps.a = a
@reset ps.fa = fa
@reset ps.b = b
@reset ps.fb = fb
@reset ps.d = d
@reset ps.fd = fd
@reset ps.ee = ee
@reset ps.fee = fee
c₀ - c, ps
end
"""
Roots.A42()
Bracketing method which finds the root of a continuous function within
a provided bracketing interval `[a, b]`, without requiring derivatives. It is based
on Algorithm 4.2 described in: G. E. Alefeld, F. A. Potra, and
Y. Shi, "Algorithm 748: enclosing zeros of continuous functions," ACM
Trans. Math. Softw. 21, 327–344 (1995), DOI: [10.1145/210089.210111](https://doi.org/10.1145/210089.210111).
The asymptotic efficiency index, ``q^{1/k}``, is ``(2 + 7^{1/2})^{1/3} = 1.6686...``.
Originally by John Travers.
!!! note
The paper refenced above shows that for a continuously differentiable ``f`` over ``[a,b]`` with a simple root the algorithm terminates at a zero or asymptotically the steps are of the inverse cubic type (Lemma 5.1). This is proved under an assumption that ``f`` is four-times continuously differentiable.
"""
const A42 = A57{2}
## --- utilities
## Brent-style tole from paper
function tolₑ(a, b, fa, fb, atol, rtol)
u = abs(fa) < abs(fb) ? abs(a) : abs(b)
return 2 * u * rtol + atol
end
## adjustment before calling bracket
function avoid_boundaries(a, c, b, fa, fb, tols)
δ = tols.λ * tolₑ(a, b, fa, fb, tols.atol, tols.rtol)
if (b - a) ≤ 4δ
c = a / 2 + b / 2
elseif c ≤ a + 2δ
c = a + 2δ
elseif c ≥ b - 2δ
c = b - 2δ
end
c
end
# assume fc != 0
## return a1,b1,d with a < a1 < < b1 < b, d not there
@inline function bracket(a, b, c, fa, fb, fc)
if isbracket(fa, fc)
# switch b,c
return (a, c, b, fa, fc, fb)
else
# switch a,c
return (c, b, a, fc, fb, fa)
end
end
# f[a, b] divided differences
@inline f_ab(a, b, fa, fb) = (fb - fa) / (b - a)
# f[a,b,d]
@inline function f_abd(a, b, d, fa, fb, fd)
fab, fbd = f_ab(a, b, fa, fb), f_ab(b, d, fb, fd)
(fbd - fab) / (d - a)
end
# iterative quadratic solution to P(x) = 0 where P=f(a) + f[a,b]*(x-a) + f[a,b,d]*(x-a)*(x-b)
function newton_quadratic(a, b, d, fa, fb, fd, k::Int)
A = f_abd(a, b, d, fa, fb, fd)
B = f_ab(a, b, fa, fb)
(iszero(A) || !isfinite(A)) && return a - fa / B
r = sign(A) * sign(fa) > 0 ? a : b
for i in 1:k
P = fa + B * (r - a) + A * (r - a) * (r - b)
P′ = (B + A * (2r - a - b))
r -= P / P′
end
return r
end
# zero of inverse interpolation polynomial through (a,fa), (b,fb), (c,fc), (d, fd)
# may not lie in [a,b], though asymptotically will under smoothness assumptions
function ipzero(a, b, c, d, fa, fb, fc, fd)
Q11 = (c - d) * fc / (fd - fc)
Q21 = (b - c) * fb / (fc - fb)
Q31 = (a - b) * fa / (fb - fa)
D21 = (b - c) * fc / (fc - fb)
D31 = (a - b) * fb / (fb - fa)
Q22 = (D21 - Q11) * fb / (fd - fb)
Q32 = (D31 - Q21) * fa / (fc - fa)
D32 = (D31 - Q21) * fc / (fc - fa)
Q33 = (D32 - Q22) * fa / (fd - fa)
a + (Q31 + Q32 + Q33)
end
# check if fa,fb,fc,fd are distinct
function _pairwise_prod(as...)
t = one(first(as))
n = length(as)
for i in 1:(n - 1)
for j in (i + 1):n
t *= (as[i] - as[j])
end
end
t
end
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 7071 | """
Bisection()
If possible, will use the bisection method over `Float64` values. The
bisection method starts with a bracketing interval `[a,b]` and splits
it into two intervals `[a,c]` and `[c,b]`, If `c` is not a zero, then
one of these two will be a bracketing interval and the process
continues. The computation of `c` is done by `_middle`, which
reinterprets floating point values as unsigned integers and splits
there. It was contributed by [Jason Merrill](https://gist.github.com/jwmerrill/9012954).
This method avoids floating point issues and when the
tolerances are set to zero (the default) guarantees a "best" solution
(one where a zero is found or the bracketing interval is of the type
`[a, nextfloat(a)]`).
When tolerances are given, this algorithm terminates when the interval
length is less than or equal to the tolerance `max(δₐ, 2abs(u)δᵣ)` with `u` in
`{a,b}` chosen by the smaller of `|f(a)|` and `|f(b)|`, or
or the function value is less than
`max(tol, min(abs(a), abs(b)) * rtol)`. The latter is used only if the default
tolerances (`atol` or `rtol`) are adjusted.
When solving ``f(x,p) = 0`` for ``x^*(p)`` using `Bisection` one can not take the derivative directly via automatatic differentiation, as the algorithm is not differentiable. See [Sensitivity](https://juliamath.github.io/Roots.jl/stable/roots/#Sensitivity) in the documentation for alternatives.
"""
struct Bisection <: AbstractBisectionMethod end
initial_fncalls(::AbstractBisectionMethod) = 3 # middle
# Bisection using __middle should have a,b on same side of 0.0 (though
# possibly, may be -0.0, 1.0 so not guaranteed to be of same sign)
function init_state(
::AbstractBisectionMethod,
F,
x₀,
x₁,
fx₀,
fx₁;
m=_middle(x₀, x₁),
fm=F(m),
)
if x₀ > x₁
x₀, x₁, fx₀, fx₁ = x₁, x₀, fx₁, fx₀
end
# handle interval if fa*fb ≥ 0 (explicit, but also not needed)
(iszero(fx₀) || iszero(fx₁)) &&
return UnivariateZeroState(promote(x₁, x₀)..., promote(fx₁, fx₀)...)
assert_bracket(fx₀, fx₁)
if sign(fm) * fx₀ < 0 * oneunit(fx₀)
a, b, fa, fb = x₀, m, fx₀, fm
else
a, b, fa, fb = m, x₁, fm, fx₁
end
# handles case where a=-0.0, b=1.0 without error
sign(a) * sign(b) < 0 && throw(ArgumentError("_middle error"))
UnivariateZeroState(promote(b, a)..., promote(fb, fa)...)
end
const FloatNN = Union{Float64,Float32,Float16}
# for Bisection, the defaults are zero tolerances and strict=true
"""
default_tolerances(M::AbstractBisectionMethod, [T], [S])
For `Bisection` when the `x` values are of type `Float64`, `Float32`,
or `Float16`, the default tolerances are zero and there is no limit on
the number of iterations. In this case, the
algorithm is guaranteed to converge to an exact zero, or a point where
the function changes sign at one of the answer's adjacent floating
point values.
For other types, default non-zero tolerances for `xatol` and `xrtol` are given.
"""
function default_tolerances(
::AbstractBisectionMethod,
::Type{T},
::Type{S′},
) where {T<:FloatNN,S′}
S = real(float(S′))
xatol = 0 * oneunit(S)
xrtol = 0 * one(T)
atol = 0 * oneunit(S)
rtol = 0 * one(S)
maxiters = typemax(Int)
strict = true
(xatol, xrtol, atol, rtol, maxiters, strict)
end
# not float uses some non-zero tolerances for `x`
function default_tolerances(::AbstractBisectionMethod, ::Type{T′}, ::Type{S′}) where {T′,S′}
T, S = real(float(T′)), real(float(S′))
xatol = eps(T)^3 * oneunit(T)
xrtol = eps(T) * one(T) # unitless
atol = 0 * oneunit(S)
rtol = 0 * one(S)
maxiters = typemax(Int)
strict = true
(xatol, xrtol, atol, rtol, maxiters, strict)
end
# find middle of (a,b) with convention that
# * if a, b finite, they are made non-finite
# if a,b of different signs, middle is 0
# middle falls back to a/2 + b/2, but
# for Float64 values, middle is over the
# reinterpreted unsigned integer.
function _middle(x, y)
a = isinf(x) ? nextfloat(x) : x
b = isinf(y) ? prevfloat(y) : y
if sign(a) * sign(b) < 0
return zero(a)
else
__middle(a, b)
end
end
## find middle assuming a,b same sign, finite
## Alternative "mean" definition that operates on the binary representation
## of a float. Using this definition, bisection will never take more than
## 64 steps (over Float64)
__middle(x::Float64, y::Float64) = __middle(Float64, UInt64, x, y)
__middle(x::Float32, y::Float32) = __middle(Float32, UInt32, x, y)
__middle(x::Float16, y::Float16) = __middle(Float16, UInt16, x, y)
## fallback for non FloatNN number types
__middle(x::Number, y::Number) = x / 2 + y / 2
function __middle(T, S, x, y)
# Use the usual float rules for combining non-finite numbers
# do division over unsigned integers with bit shift
xint = reinterpret(S, abs(x))
yint = reinterpret(S, abs(y))
mid = (xint + yint) >> 1
# reinterpret in original floating point
sign(x + y) * reinterpret(T, mid)
end
## --------------------------------------------------
function update_state(
M::AbstractBisectionMethod,
F,
o::AbstractUnivariateZeroState{T,S},
options,
l=NullTracks(),
) where {T,S}
a, b = o.xn0, o.xn1
fa, fb = o.fxn0, o.fxn1
c::T = __middle(a, b)
fc::S = F(c)
incfn(l)
if sign(fa) * sign(fc) < 0
b, fb = c, fc
else
a, fa = c, fc
end
@reset o.xn0 = a
@reset o.xn1 = b
@reset o.fxn0 = fa
@reset o.fxn1 = fb
return o, false
end
## Special case default method for `find_zero(f, (a,b))`; gives ~10% speedup by avoiding assess_convergence/update state dispatch
function solve!(
P::ZeroProblemIterator{𝑴,𝑵,𝑭,𝑺,𝑶,𝑳};
verbose=false,
) where {𝑴<:Bisection,𝑵,𝑭,𝑺,𝑶<:ExactOptions,𝑳}
M, F, state, options, l = P.M, P.F, P.state, P.options, P.logger
val, stopped = :not_converged, false
ctr = 1
log_step(l, M, state; init=true)
T, S = TS(state)
while !stopped
a::T, b::T = state.xn0, state.xn1
fa::S, fb::S = state.fxn0, state.fxn1
## assess_convergence
if nextfloat(a) ≥ b
val = :x_converged
break
end
if (isnan(fa) || isnan(fb))
val = :nan
break
end
if (iszero(fa) || iszero(fb))
val = :exact_zero
break
end
ctr > options.maxiters && break
# ----
## update step
c = __middle(a, b)
fc = F(c)
incfn(l)
if sign(fa) * sign(fc) < 0
b, fb = c, fc
else
a, fa = c, fc
end
## ----
@reset state.xn0 = a
@reset state.xn1 = b
@reset state.fxn0 = fa
@reset state.fxn1 = fb
log_step(l, M, state)
ctr += 1
end
# val, stopped = assess_convergence(M, state, options) # update val flag
α = decide_convergence(M, F, state, options, val)
log_convergence(l, val)
log_method(l, M)
log_last(l, α)
verbose && display(l)
α
end
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 2226 | ### Bracketing method defaults
function init_state(M::AbstractBracketingMethod, F::Callable_Function, x)
x₀, x₁ = adjust_bracket(x)
fx₀, fx₁ = first(F(x₀)), first(F(x₁))
state = init_state(M, F, x₀, x₁, fx₀, fx₁)
end
function init_state(M::AbstractBracketingMethod, F, x₀, x₁, fx₀, fx₁)
(iszero(fx₀) || iszero(fx₁)) && return UnivariateZeroState(x₁, x₀, fx₁, fx₀)
assert_bracket(fx₀, fx₁)
a, b, fa, fb = (x₀ < x₁) ? (x₀, x₁, fx₀, fx₁) : (x₁, x₀, fx₁, fx₀)
UnivariateZeroState(promote(b, a)..., promote(fb, fa)...)
end
Base.last(state::AbstractUnivariateZeroState, M::AbstractBracketingMethod) =
state.xn0 < state.xn1 ? (state.xn0, state.xn1) : (state.xn1, state.xn0)
fn_argout(::AbstractBracketingMethod) = 1
initial_fncalls(::AbstractBracketingMethod) = 2
## tracks for bisection, different from secant, we show bracketing interval
## No init here; for Bisection() [a₀, b₀] is just lost.
function log_step(l::Tracks, M::AbstractBracketingMethod, state; init::Bool=false)
a, b = state.xn0, state.xn1
push!(l.abₛ, a < b ? (a, b) : (b, a))
!init && log_iteration(l, 1)
nothing
end
# use xatol, xrtol only, but give some breathing room over the strict ones and cap number of steps
function default_tolerances(::AbstractBracketingMethod, ::Type{T}, ::Type{S}) where {T,S}
xatol = eps(real(T))^3 * oneunit(real(T))
xrtol = eps(real(T)) # unitless
atol = zero(oneunit(real(S)))
rtol = zero(one(real(S)))
maxevals = 60
strict = true
(xatol, xrtol, atol, rtol, maxevals, strict)
end
## --------------------------------------------------
const bracketing_error = """The interval [a,b] is not a bracketing interval.
You need f(a) and f(b) to have different signs (f(a) * f(b) < 0).
Consider a different bracket or try fzero(f, c) with an initial guess c.
"""
## utils
@inline isbracket(fa, fb) = sign(fa) * sign(fb) < 0
assert_bracket(fx0, fx1) = isbracket(fx0, fx1) || throw(ArgumentError(bracketing_error))
## helper function: floating point, sorted, finite
function adjust_bracket(x0)
u, v = map(float, _extrema(x0))
if u > v
u, v = v, u
end
isinf(u) && (u = nextfloat(u))
isinf(v) && (v = prevfloat(v))
u, v
end
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 2273 | """
Roots.Brent()
An implementation of
[Brent's](https://en.wikipedia.org/wiki/Brent%27s_method) (or Brent-Dekker) method.
This method uses a choice of inverse quadratic interpolation or a secant
step, falling back on bisection if necessary.
"""
struct Brent <: AbstractBracketingMethod end
struct BrentState{T,S} <: AbstractUnivariateZeroState{T,S}
xn1::T
xn0::T
c::T
d::T
fxn1::S
fxn0::S
fc::S
mflag::Bool
end
# # we store mflag as -1, or +1 in state.mflag
function init_state(::Brent, F, x₀, x₁, fx₀, fx₁)
u, v, fu, fv = x₀, x₁, fx₀, fx₁
if abs(fu) > abs(fv)
u, v, fu, fv = v, u, fv, fu
end
# check if fu*fv ≥ 0
(iszero(fu) || iszero(fv)) &&
return BrentState(promote(u, v, v, v)..., promote(fu, fv, fv)..., true)
assert_bracket(fu, fv)
BrentState(promote(u, v, v, v)..., promote(fu, fv, fv)..., true)
end
function update_state(
::Brent,
f,
state::BrentState{T,S},
options,
l=NullTracks(),
) where {T,S}
mflag = state.mflag
a, b, c, d = state.xn0, state.xn1, state.c, state.d
fa, fb, fc = state.fxn0, state.fxn1, state.fc
# next step depends on points; inverse quadratic
s::T = inverse_quadratic_step(a, b, c, fa, fb, fc)
(isnan(s) || isinf(s)) && (s = secant_step(a, b, fa, fb))
# guard step
u, v = (3a + b) / 4, b
if u > v
u, v = v, u
end
tol = max(options.xabstol, max(abs(b), abs(c), abs(d)) * options.xreltol)
if !(u < s < v) ||
(mflag && abs(s - b) >= abs(b - c) / 2) ||
(!mflag && abs(s - b) >= abs(b - c) / 2) ||
(mflag && abs(b - c) <= tol) ||
(!mflag && abs(c - d) <= tol)
s = _middle(a, b)
mflag = true
else
mflag = false
end
fs::S = f(s)
incfn(l)
iszero(fs) && return (_set(state, (s, fs)), true)
(isnan(fs) || isinf(fs)) && return (state, true)
d = c
c, fc = b, fb
if sign(fa) * sign(fs) < 0
b, fb = s, fs
else
a, fa = s, fs
end
if abs(fa) < abs(fb)
a, b, fa, fb = b, a, fb, fa
end
state = _set(state, (b, fb), (a, fa))
@reset state.c = c
@reset state.d = d
@reset state.fc = fc
@reset state.mflag = mflag
return state, false
end
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 1639 | """
Roots.Chandrapatla()
Use [Chandrapatla's
algorithm](https://doi.org/10.1016/S0965-9978(96)00051-8)
(cf. [Scherer](https://www.google.com/books/edition/Computational_Physics/cC-8BAAAQBAJ?hl=en&gbpv=1&pg=PA95&printsec=frontcover))
to solve ``f(x) = 0``.
Chandrapatla's algorithm chooses between an inverse quadratic step or a bisection step based on a computed inequality.
"""
struct Chandrapatla <: AbstractBracketingMethod end
struct ChandrapatlaState{T,S} <: AbstractUnivariateZeroState{T,S}
xn1::T
xn0::T
c::T # keep xₙ₋₂ around for quadratic step
fxn1::S
fxn0::S
fc::S
end
# a = most recent, b prior
function init_state(::Chandrapatla, F, x₀, x₁, fx₀, fx₁)
a, b, fa, fb = x₁, x₀, fx₁, fx₀
assert_bracket(fa, fb)
c, fc = a, fa
ChandrapatlaState(promote(a, b, c)..., promote(fa, fb, fc)...)
end
function update_state(
::Chandrapatla,
F,
o::ChandrapatlaState{T,S},
options,
l=NullTracks(),
) where {T,S}
a, b, c = o.xn1, o.xn0, o.c
fa, fb, fc = o.fxn1, o.fxn0, o.fc
# encoding: a = xₙ, b=xₙ₋₁, c= xₙ₋₂
ξ = (a - b) / (c - b)
ϕ = (fa - fb) / (fc - fb)
ϕ² = ϕ^2
Δ = (ϕ² < ξ) && (1 - 2ϕ + ϕ² < 1 - ξ) # Chandrapatla's inequality to determine next step
xₜ::T = Δ ? inverse_quadratic_step(a, b, c, fa, fb, fc) : _middle(a, b)
fₜ::S = F(xₜ)
incfn(l)
if sign(fₜ) * sign(fa) < 0
a, b, c = xₜ, a, b
fa, fb, fc = fₜ, fa, fb
else
a, c = xₜ, a
fa, fc = fₜ, fa
end
o = _set(o, (a, fa), (b, fb)) # a is xₙ, b is xₙ₋₁
@reset o.c = c
@reset o.fc = fc
return (o, false)
end
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 3587 | struct FalsePosition{R} <: AbstractSecantMethod end
"""
FalsePosition([galadino_factor])
Use the [false
position](https://en.wikipedia.org/wiki/False_position_method) method
to find a zero for the function `f` within the bracketing interval
`[a,b]`.
The false position method is a modified bisection method, where the
midpoint between `[aₖ, bₖ]` is chosen to be the intersection point
of the secant line with the ``x`` axis, and not the average between the
two values.
To speed up convergence for concave functions, this algorithm
implements the ``12`` reduction factors of Galdino (*A family of regula
falsi root-finding methods*). These are specified by number, as in
`FalsePosition(2)` or by one of three names `FalsePosition(:pegasus)`,
`FalsePosition(:illinois)`, or `FalsePosition(:anderson_bjork)` (the
default). The default choice has generally better performance than the
others, though there are exceptions.
For some problems, the number of function calls can be greater than
for the `Bisection` method, but generally this algorithm will make
fewer function calls.
Examples
```
find_zero(x -> x^5 - x - 1, (-2, 2), FalsePosition())
```
"""
FalsePosition
FalsePosition(x=:anderson_bjork) = FalsePosition{x}()
# 12 is tough; needs more evaluations
function default_tolerances(::FalsePosition{12}, ::Type{T}, ::Type{S}) where {T,S}
xatol = eps(real(T)) * oneunit(real(T))
xrtol = eps(real(T)) # unitless
atol = 4 * eps(real(float(S))) * oneunit(real(S))
rtol = 4 * eps(real(float(S))) * one(real(S))
maxiters = 250
strict = false
(xatol, xrtol, atol, rtol, maxiters, strict)
end
init_state(M::FalsePosition, F, x₀, x₁, fx₀, fx₁) =
init_state(Bisection(), F, x₀, x₁, fx₀, fx₁)
function update_state(
method::FalsePosition,
fs,
o::AbstractUnivariateZeroState{T,S},
options,
l=NullTracks(),
) where {T,S}
a, b = o.xn0, o.xn1
fa, fb = o.fxn0, o.fxn1
lambda = fb / (fb - fa)
ϵ = √eps(T) / 100 # some engineering to avoid short moves; still fails on some
ϵ ≤ lambda ≤ 1 - ϵ || (lambda = 1 / 2)
x::T = b - lambda * (b - a)
fx::S = fs(x)
incfn(l)
iszero(fx) && return (_set(o, (x, fx)), true)
if sign(fx) * sign(fb) < 0
a, fa = b, fb
else
fa = galdino_reduction(method, fa, fb, fx)
end
b, fb = x, fx
o = _set(o, (b, fb), (a, fa))
return (o, false)
end
# the 12 reduction factors offered by Galdino
# In RootsTesting.jl, we can see :12 has many more failures.
galdino = Dict{Union{Int,Symbol},Function}(
:1 => (fa, fb, fx) -> fa * fb / (fb + fx),
:2 => (fa, fb, fx) -> (fa - fb) / 2,
:3 => (fa, fb, fx) -> (fa - fx) / (2 + fx / fb),
:4 => (fa, fb, fx) -> (fa - fx) / (1 + fx / fb)^2,
:5 => (fa, fb, fx) -> (fa - fx) / (1.5 + fx / fb)^2,
:6 => (fa, fb, fx) -> (fa - fx) / (2 + fx / fb)^2,
:7 => (fa, fb, fx) -> (fa + fx) / (2 + fx / fb)^2,
:8 => (fa, fb, fx) -> fa / 2,
:9 => (fa, fb, fx) -> fa / (1 + fx / fb)^2,
:10 => (fa, fb, fx) -> (fa - fx) / 4,
:11 => (fa, fb, fx) -> fx * fa / (fb + fx),
:12 => (fa, fb, fx) -> (fa * (1 - fx / fb > 0 ? 1 - fx / fb : 1 / 2)),
)
# give common names
for (nm, i) in [(:pegasus, 1), (:illinois, 8), (:anderson_bjork, 12)]
galdino[nm] = galdino[i]
end
# from Chris Elrod; https://raw.githubusercontent.com/chriselrod/AsymptoticPosteriors.jl/master/src/false_position.jl
@generated function galdino_reduction(methods::FalsePosition{R}, fa, fb, fx) where {R}
f = galdino[R]
quote
$Expr(:meta, :inline)
$f(fa, fb, fx)
end
end
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 3882 | """
Roots.ITP(;[κ₁-0.2, κ₂=2, n₀=1])
Use the [ITP](https://en.wikipedia.org/wiki/ITP_method) bracketing
method. This method claims it "is the first root-finding algorithm
that achieves the superlinear convergence of the secant method
while retaining the optimal worst-case performance of the bisection
method."
The values `κ1`, `κ₂`, and `n₀` are tuning parameters.
The
[suggested](https://docs.rs/kurbo/0.8.1/kurbo/common/fn.solve_itp.html)
value of `κ₁` is `0.2/(b-a)`, but the default here is `0.2`. The value
of `κ₂` is `2`, and the default value of `n₀` is `1`.
## Note:
Suggested on
[discourse](https://discourse.julialang.org/t/julia-implementation-of-the-interpolate-truncate-project-itp-root-finding-algorithm/77739)
by `@TheLateKronos`, who supplied the original version of the code.
"""
struct ITP{T,S} <: AbstractBracketingMethod
κ₁::T
κ₂::S
n₀::Int
function ITP(κ₁::T′, κ₂::S, n₀::Int) where {T′,S}
0 ≤ κ₁ < Inf || throw(ArgumentError("κ₁ must be between 0 and ∞"))
1 ≤ κ₂ < (3 + √5) / 2 ||
throw(ArgumentError("κ₂ must be between 1 and 1 plus the golden ratio"))
0 < n₀ < Inf || throw(ArgumentError("n₀ must be between 0 and ∞"))
T = float(T′)
## κ₂ == 2 || throw(ArgumentError("κ₂ is hardcoded to be 2"))
new{T,S}(float(κ₁), κ₂, n₀)
end
end
ITP(; κ₁=0.2, κ₂=2, n₀=1) = ITP(κ₁, κ₂, n₀)
struct ITPState{T,S,R} <: AbstractUnivariateZeroState{T,S}
xn1::T
xn0::T
fxn1::S
fxn0::S
j::Int
ϵ2n₁₂::R
d::T
end
function init_state(M::ITP, F, x₀, x₁, fx₀, fx₁)
if x₀ > x₁
x₀, x₁, fx₀, fx₁ = x₁, x₀, fx₁, fx₀
end
## we compute this once the options and initial state are known
ϵ2n₁₂ = zero(float(x₁) / x₁) # ϵ*2^(ceil(Int, log2((b-a)/(2*ϵ))) + n₀)
# handle interval if fa*fb ≥ 0 (explicit, but also not needed)
(iszero(fx₀) || iszero(fx₁)) &&
return ITPState(promote(x₁, x₀)..., promote(fx₁, fx₀)..., 0, ϵ2n₁₂, x₁)
assert_bracket(fx₀, fx₁)
a, b, fa, fb = x₀, x₁, fx₀, fx₁
ITPState(promote(b, a)..., promote(fb, fa)..., 0, ϵ2n₁₂, a)
end
function init_options(
M::ITP,
state::AbstractUnivariateZeroState{T,S};
kwargs...,
) where {T,S}
d = kwargs
defs = default_tolerances(M, T, S)
δₐ = get(d, :xatol, get(d, :xabstol, defs[1]))
δᵣ = get(d, :xrtol, get(d, :xreltol, defs[2]))
ϵₐ = get(d, :atol, get(d, :abstol, defs[3]))
ϵᵣ = get(d, :rtol, get(d, :reltol, defs[4]))
maxiters = get(d, :maxiters, get(d, :maxevals, get(d, :maxsteps, defs[5])))
strict = get(d, :strict, defs[6])
return UnivariateZeroOptions(δₐ, δᵣ, ϵₐ, ϵᵣ, maxiters, strict)
end
function update_state(M::ITP, F, o::ITPState{T,S,R}, options, l=NullTracks()) where {T,S,R}
a, b = o.xn0, o.xn1
fa, fb = o.fxn0, o.fxn1
j, ϵ2n₁₂ = o.j, o.ϵ2n₁₂
κ₁, κ₂ = M.κ₁, M.κ₂
if iszero(ϵ2n₁₂)
# we need the options to set the ϵ⋅2^n₁₂ part of r.
ϵ = max(options.xabstol, max(abs(a), abs(b)) * options.xreltol)
ϵ2n₁₂ = ϵ * exp2(ceil(Int, log2((b - a) / (2ϵ))) + M.n₀)
@reset o.ϵ2n₁₂ = ϵ2n₁₂
end
Δ = b - a
x₁₂ = a + Δ / 2 # middle must be (a+b)/2
r = ϵ2n₁₂ * exp2(-j) - Δ / 2
δ′ = κ₁ * Δ^κ₂ # a numeric literal for κ₂ is faster
δ = δ′ / oneunit(δ′)
# δ = κ₁ * Δ^2
xᵣ = (b * fa - a * fb) / (fa - fb)
σ = sign(x₁₂ - xᵣ)
xₜ = δ ≤ abs(x₁₂ - xᵣ) / oneunit(xᵣ) ? xᵣ + σ * δ * oneunit(xᵣ) : x₁₂
c::T = xᵢₜₚ = abs(xₜ - x₁₂) ≤ r ? xₜ : x₁₂ - σ * r
if !(a < c < b)
nextfloat(a) ≥ b &&
log_message(l, "Algorithm stopped narrowing bracketing interval")
return (o, true)
end
fc::S = F(c)
incfn(l)
if sign(fa) * sign(fc) < 0
b, fb = c, fc
else
a, fa = c, fc
end
o = _set(o, (b, fb), (a, fa))
@reset o.j = j + 1
return o, false
end
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 1837 | """
Roots.Ridders()
Implements [Ridders'](https://en.wikipedia.org/wiki/Ridders%27_method) method.
This bracketing method finds the midpoint, `x₁`; then interpolates an exponential; then uses false position with the interpolated value to find `c`. If `c` and `x₁` form a bracket is used, otherwise the subinterval `[a,c]` or `[c,b]` is used.
Example:
```jldoctest
julia> using Roots
julia> find_zero(x -> exp(x) - x^4, (5, 15), Roots.Ridders()) ≈ 8.61316945644
true
julia> find_zero(x -> x*exp(x) - 10, (-100, 100), Roots.Ridders()) ≈ 1.74552800274
true
julia> find_zero(x -> tan(x)^tan(x) - 1e3, (0, 1.5), Roots.Ridders()) ≈ 1.3547104419
true
```
[Ridders](https://cs.fit.edu/~dmitra/SciComp/Resources/RidderMethod.pdf) showed the error satisfies `eₙ₊₁ ≈ 1/2 eₙeₙ₋₁eₙ₋₂ ⋅ (g^2-2fh)/f` for
`f=F', g=F''/2, h=F'''/6`, suggesting converence at rate `≈ 1.839...`. It uses two function evaluations per step, so
its order of convergence is `≈ 1.225...`.
"""
struct Ridders <: AbstractBracketingMethod end
function update_state(
M::Ridders,
F,
o::AbstractUnivariateZeroState{T,S},
options,
l=NullTracks(),
) where {T,S}
a, b = o.xn0, o.xn1
fa, fb = o.fxn0, o.fxn1
x₁ = a + (b - a) / 2
fx₁ = F(x₁)
incfn(l)
c::T = x₁ + (x₁ - a) * sign(fa) * fx₁ / sqrt(fx₁^2 - fa * fb)
fc::S = F(c)
incfn(l)
if !(a < c < b)
nextfloat(a) ≥ b &&
log_message(l, "Algorithm stopped narrowing bracketing interval")
return (o, true)
end
# choose bracketing interval from [x₁, c], [c, x₁], [a,c], [c,b]
if sign(fx₁) * sign(fc) < 0
a, b, fa, fc = x₁ < c ? (x₁, c, fx₁, fc) : (c, x₁, fc, fx₁)
elseif sign(fa) * sign(fc) < 0
b, fb = c, fc
else
a, fa = c, fc
end
o = _set(o, (b, fb), (a, fa))
return o, false
end
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 7803 | struct HalleyState{T,S} <: AbstractUnivariateZeroState{T,S}
xn1::T
xn0::T
Δ::T
ΔΔ::T
fxn1::S
fxn0::S
end
# we compute one step here to get x₁
function init_state(M::AbstractHalleyLikeMethod, F::Callable_Function, x)
x₀ = float(first(x))
T = eltype(x₀)
fx₀, (Δ, ΔΔ) = F(x₀)
Δx = calculateΔ(M, Δ, ΔΔ)
x₁::T = x₀ - Δx
state = init_state(M, F, x₀, x₁, fx₀, fx₀)
end
function init_state(::AbstractHalleyLikeMethod, F, x₀::T, x₁::T, fx₀, fx₁) where {T}
fx₁, (Δ::T, ΔΔ::T) = F(x₁)
HalleyState(promote(x₁, x₀)..., Δ, ΔΔ, promote(fx₁, fx₀)...)
end
initial_fncalls(M::AbstractHalleyLikeMethod) = 2 * 3
fn_argout(::AbstractHalleyLikeMethod) = 3
function update_state(
M::AbstractΔMethod,
F,
o::HalleyState{T,S},
options,
l=NullTracks(),
) where {T,S}
xn = o.xn1
fxn = o.fxn1
r1::T, r2::T = o.Δ, o.ΔΔ
Δ::T = calculateΔ(M, r1, r2)
if isissue(Δ)
log_message(l, "Issue with computing `Δ`")
return (o, true)
end
xn1::T = xn - Δ
fxn1::S, (r1, r2) = F(xn1)
incfn(l, 3)
@reset o.xn0 = xn
@reset o.xn1 = xn1
@reset o.fxn0 = fxn
@reset o.fxn1 = fxn1
@reset o.Δ = r1
@reset o.ΔΔ = r2
return o, false
end
"""
Roots.Halley()
Implements Halley's [method](https://en.wikipedia.org/wiki/Halley%27s_method),
`xᵢ₊₁ = xᵢ - (f/f')(xᵢ) * (1 - (f/f')(xᵢ) * (f''/f')(xᵢ) * 1/2)^(-1)` This method
is cubically converging, it requires ``3`` function calls per
step. Halley's method finds `xₙ₊₁` as the zero of a hyperbola at the
point `(xₙ, f(xₙ))` matching the first and second derivatives of `f`.
The function, its derivative and second derivative can be passed either as a tuple of ``3`` functions *or*
as a function returning values for ``(f, f/f', f'/f'')``, which could be useful when function evaluations are expensive.
## Examples
```jldoctest with_derivative
julia> using Roots
julia> find_zero((sin, cos, x->-sin(x)), 3.0, Roots.Halley()) ≈ π
true
julia> function f(x)
s,c = sincos(x)
(s, s/c, -c/s)
end;
julia> find_zero(f, 3.0, Roots.Halley()) ≈ π
true
```
This can be advantageous if the derivatives are easily computed from
the computation for f, but otherwise would be expensive to compute separately.
----
The error, `eᵢ = xᵢ - α`, satisfies
`eᵢ₊₁ ≈ -(2f'⋅f''' -3⋅(f'')²)/(12⋅(f'')²) ⋅ eᵢ³` (all evaluated at `α`).
"""
struct Halley <: AbstractΔMethod end
"""
Roots.QuadraticInverse()
Implements the [quadratic inverse method](https://doi.org/10.2307/2322644) also known as
[Chebyshev's method](https://dl.acm.org/doi/10.1080/00207160802208358),
`xᵢ₊₁ = xᵢ - (f/f')(xᵢ) * (1 + (f/f')(xᵢ) * (f''/f')(xᵢ) * 1/2)`.
This method is cubically converging, it requires ``3`` function calls per step.
Example
```jldoctest with_derivative
julia> using Roots
julia> find_zero((sin, cos, x->-sin(x)), 3.0, Roots.QuadraticInverse()) ≈ π
true
```
If function evaluations are expensive one can pass in a function which
returns `(f, f/f',f'/f'')` as follows
```jldoctest with_derivative
julia> find_zero(x -> (sin(x), sin(x)/cos(x), -cos(x)/sin(x)), 3.0, Roots.QuadraticInverse()) ≈ π
true
```
This can be advantageous if the derivatives are easily computed from
the computation for f, but otherwise would be expensive to compute separately.
The error, `eᵢ = xᵢ - α`, [satisfies](https://dl.acm.org/doi/10.1080/00207160802208358)
`eᵢ₊₁ ≈ (1/2⋅(f''/f')² - 1/6⋅f'''/f')) ⋅ eᵢ³` (all evaluated at `α`).
"""
struct QuadraticInverse <: AbstractΔMethod end
"""
Chebyshev-like methods and quadratic equations (J. A. Ezquerro, J. M. Gutiérrez, M. A. Hernández and M. A. Salanova)
"""
struct ChebyshevLike <: AbstractΔMethod end
"""
An acceleration of Newton's method: Super-Halley method (J.M. Gutierrez, M.A. Hernandez)
"""
struct SuperHalley <: AbstractΔMethod end
# also Euler. Fits a parabola to point (x_n, f(x_n))
struct IrrationalHalley <: AbstractΔMethod end
"""
Roots.Schroder()
Schröder's method, like Halley's method, utilizes `f`, `f'`, and
`f''`. Unlike Halley it is quadratically converging, but this is
independent of the multiplicity of the zero (cf. Schröder, E. "Über
unendlich viele Algorithmen zur Auflösung der Gleichungen."
Math. Ann. 2, 317-365, 1870;
[mathworld](http://mathworld.wolfram.com/SchroedersMethod.html)).
Schröder's method applies Newton's method to `f/f'`, a function with
all simple zeros.
## Example
```
m = 2
f(x) = (cos(x)-x)^m
fp(x) = (-x + cos(x))*(-2*sin(x) - 2)
fpp(x) = 2*((x - cos(x))*cos(x) + (sin(x) + 1)^2)
find_zero((f, fp, fpp), pi/4, Roots.Halley()) # 14 steps
find_zero((f, fp, fpp), 1.0, Roots.Schroder()) # 3 steps
```
(Whereas, when `m=1`, Halley is 2 steps to Schröder's 3.)
If function evaluations are expensive one can pass in a function which
returns `(f, f/f',f'/f'')` as follows
```
find_zero(x -> (sin(x), sin(x)/cos(x), -cos(x)/sin(x)), 3.0, Roots.Schroder())
```
This can be advantageous if the derivatives are easily computed from
the value of `f`, but otherwise would be expensive to compute.
The error, `eᵢ = xᵢ - α`, is the same as `Newton` with `f` replaced by `f/f'`.
"""
struct Schroder <: AbstractΔMethod end
const Schroeder = Schroder # either spelling
const Schröder = Schroder
## r1, r2 are o.Δ, o.ΔΔ
calculateΔ(::IrrationalHalley, r1, r2) = 2 / (1 + sqrt(1 - 2r1 / r2)) * r1
calculateΔ(::Halley, r1, r2) = 2r2 / (2r2 - r1) * r1
calculateΔ(::QuadraticInverse, r1, r2) = (1 + r1 / (2r2)) * r1
calculateΔ(::ChebyshevLike, r1, r2) = (1 + r1 / (2r2) * (1 + r1 / r2)) * r1
calculateΔ(::SuperHalley, r1, r2) = (1 + r1 / (2r2 - 2r1)) * r1
calculateΔ(::Schroder, r1, r2) = r2 / (r2 - r1) * r1
## Bracketed versions of Halley and Chebyshev using Alefeld, Potra, Shi approach
## calculateΔ has *different* calling pattern
## May take more steps and function evaluations, but should always converge
"""
BracketedHalley
For a bracket `[a,b]`, uses the [`Roots.Halley`](@ref) method starting at the `x` value for which `fa` or `fb` is closest to `0`. Uses the `Roots.AbstractAlefeldPotraShi` framework to enforce the bracketing, taking an additional double secant step each time.
"""
struct BracketedHalley <: AbstractAlefeldPotraShi end
fn_argout(::BracketedHalley) = 3
fncalls_per_step(::BracketedHalley) = 3
function calculateΔ(::BracketedHalley, F::Callable_Function, c, ps)
a, b = ps.a, ps.b
fc, (Δ, Δ₂) = F(c)
d = calculateΔ(Halley(), Δ, Δ₂)
!(a <= c - d <= b) && (d = Δ) # Newton
d, ps
end
"""
BracketedChebyshev
For a bracket `[a,b]`, uses the [`Roots.QuadraticInverse`](@ref) method starting at the `x` value for which `fa` or `fb` is closest to `0`. Uses the `Roots.AbstractAlefeldPotraShi` framework to enforce the bracketing, taking an additional double secant step each time.
"""
struct BracketedChebyshev <: AbstractAlefeldPotraShi end
fn_argout(::BracketedChebyshev) = 3
fncalls_per_step(::BracketedChebyshev) = 3
function calculateΔ(::BracketedChebyshev, F::Callable_Function, c, ps)
a, b = ps.a, ps.a
fc, (Δ, Δ₂) = F(c)
d = calculateΔ(QuadraticInverse(), Δ, Δ₂)
!(a <= c - d <= b) && (d = Δ) # Newton
d, ps
end
"""
BracketedSchroder
For a bracket `[a,b]`, uses the [`Roots.Schroder`](@ref) method starting at the `x` value for which `fa` or `fb` is closest to `0`. Uses the `Roots.AbstractAlefeldPotraShi` framework to enforce the bracketing, taking an additional double secant step each time.
"""
struct BracketedSchroder <: AbstractAlefeldPotraShi end
fn_argout(::BracketedSchroder) = 3
fncalls_per_step(::BracketedSchroder) = 3
function calculateΔ(::BracketedSchroder, F::Callable_Function, c, ps)
a, b = ps.a, ps.b
fc, (Δ, Δ₂) = F(c)
d = calculateΔ(Schroder(), Δ, Δ₂)
!(a <= c - d <= b) && (d = Δ) # Newton
d, ps
end
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 68487 | """
LithBoonkkampIJzerman{S,D} <: AbstractNewtonLikeMethod
LithBoonkkampIJzerman(S,D)
A family of different methods that includes the secant method and Newton's method.
Specifies a linear multistep solver with `S` steps and `D` derivatives following [Lith, Boonkkamp, and
IJzerman](https://doi.org/10.1016/j.amc.2017.09.003).
# Extended help
## Examples
```jldoctest lith
julia> using Roots
julia> find_zero(sin, 3, Roots.LithBoonkkampIJzerman(2,0)) ≈ π # the secant method
true
julia> find_zero((sin,cos), 3, Roots.LithBoonkkampIJzerman(1,1)) ≈ π # Newton's method
true
julia> find_zero((sin,cos), 3, Roots.LithBoonkkampIJzerman(3,1)) ≈ π # Faster convergence rate
true
julia> find_zero((sin,cos, x->-sin(x)), 3, Roots.LithBoonkkampIJzerman(1,2)) ≈ π # Halley-like method
true
```
The method can be more robust to the initial condition. This example is from the paper (p13). Newton's method (the `S=1`, `D=1` case) fails if `|x₀| ≥ 1.089` but methods with more memory succeed.
```jldoctest lith
julia> fx = ZeroProblem((tanh,x->sech(x)^2), 1.239); # zero at 0.0
julia> solve(fx, Roots.LithBoonkkampIJzerman(1,1)) |> isnan# Newton, NaN
true
julia> solve(fx, Roots.LithBoonkkampIJzerman(2,1)) |> abs |> <(eps())
true
julia> solve(fx, Roots.LithBoonkkampIJzerman(3,1)) |> abs |> <(eps())
true
```
Multiple derivatives can be constructed automatically using automatic differentiation. For example,
```jldoctest lith
julia> using ForwardDiff
julia> function δ(f, n::Int=1)
n <= 0 && return f
n == 1 && return x -> ForwardDiff.derivative(f,float(x))
δ(δ(f,1),n-1)
end;
julia> fs(f,n) = ntuple(i -> δ(f,i-1), Val(n+1));
julia> f(x) = cbrt(x) * exp(-x^2); # cf. Table 6 in paper, α = 0
julia> fx = ZeroProblem(fs(f,1), 0.1147);
julia> opts = (xatol=2eps(), xrtol=0.0, atol=0.0, rtol=0.0); # converge if |xₙ - xₙ₋₁| <= 2ϵ
julia> solve(fx, Roots.LithBoonkkampIJzerman(1, 1); opts...) |> isnan # NaN -- no convergence
true
julia> solve(fx, Roots.LithBoonkkampIJzerman(2, 1); opts...) |> abs |> <(eps()) # converges
true
julia> fx = ZeroProblem(fs(f,2), 0.06); # need better starting point
julia> solve(fx, Roots.LithBoonkkampIJzerman(2, 2); opts...) |> abs |> <(eps()) # converges
true
```
For the case `D=1`, a bracketing method based on this approach is implemented in [`LithBoonkkampIJzermanBracket`](@ref)
## Reference
In [Lith, Boonkkamp, and
IJzerman](https://doi.org/10.1016/j.amc.2017.09.003) an analysis is
given of the convergence rates when using linear multistep methods to
solve `0=f(x)` using `f⁻¹(0) = x` when `f` is a sufficiently smooth
linear function. The reformulation, attributed to Grau-Sanchez, finds
a differential equation for `f⁻¹`: `dx/dy = [f⁻¹]′(y) = 1/f′(x) = F` as
`x(0) = x₀ + ∫⁰_y₀ F(x(y)) dy`.
A linear multi-step method is used to solve this equation
numerically. Let S be the number of memory steps (S= 1,2,...) and D be
the number of derivatives employed, then, with `F(x) = dx/dy`
`x_{n+S} = ∑_{k=0}^{S-1} aₖ x_{n+k} +∑d=1^D ∑_{k=1}^{S-1} aᵈ_{n+k}F⁽ᵈ⁾(x_{n+k})`.
The `aₖ`s and `aᵈₖ`s are computed each step.
This table is from Tables 1 and 3 of the paper and gives the
convergence rate for simple roots identified therein:
```
s: number of steps remembered
d: number of derivatives uses
s/d 0 1 2 3 4
1 . 2 3 4 5
2 1.62 2.73 3.79 4.82 5.85
3 1.84 2.91 3.95 4.97 5.98
4 1.92 2.97 3.99 4.99 5.996
5 1.97 . . . .
```
That is, more memory leads to a higher convergence rate; more
derivatives leads to a higher convergence rate. However, the
interval about `α`, the zero, where the convergence rate is guaranteed
may get smaller.
!!! note
For the larger values of `S`, the expressions to compute the next value get quite involved.
The higher convergence rate is likely only to be of help for finding solutions to high precision.
"""
struct LithBoonkkampIJzerman{S,D} <: AbstractNewtonLikeMethod end
LithBoonkkampIJzerman(s::Int, d::Int) = LithBoonkkampIJzerman{s,d}()
fn_argout(::LithBoonkkampIJzerman{S,D}) where {S,D} = 1 + D
struct LithBoonkkampIJzermanState{S′,D⁺,T,S} <: AbstractUnivariateZeroState{T,S}
xn1::T
xn0::T
m::NTuple{S′,T}
fxn1::S
fxn0::S
fm::NTuple{D⁺,NTuple{S′,S}}
end
log_step(l::Tracks, M::LithBoonkkampIJzerman, state; init=false) =
log_step(l, Secant(), state; init=init)
# return f^(i-1)(x) for i in 0:N-1; not the same as default eval call
function evalf(
F::Callable_Function{S,T,𝑭,P},
x,
) where {N,S<:Val{N},T<:Val{true},𝑭,P<:Nothing}
fi = map(f -> f(x), F.f) #recommended on Slack to not allocate
R = typeof(float(first(fi)))
convert(NTuple{N,R}, fi)
end
function evalf(F::Callable_Function{S,T,𝑭,P}, x) where {N,S<:Val{N},T<:Val{true},𝑭,P}
fi = map(f -> f(x, F.p), F.f)
R = typeof(float(first(fi)))
convert(NTuple{N,R}, fi)
end
#specializations for N = 1,2,3,4,5,6
## lmm(::Roots.LithBoonkkampIJzerman{1, D}) is defined up until D = 6, so specialize those
function evalf(F::Callable_Function{S,T,𝑭,P}, x) where {S<:Val{1},T<:Val{false},𝑭,P}
F(x)
end
function evalf(F::Callable_Function{S,T,𝑭,P}, x) where {S<:Val{2},T<:Val{false},𝑭,P}
f, Δ = F(x)
f′ = f / Δ
(f, f′)
end
function evalf(F::Callable_Function{S,T,𝑭,P}, x) where {S<:Val{3},T<:Val{false},𝑭,P}
f, Δ = F(x)
Δ₁, Δ₂ = Δ
f′ = f / Δ₁
f′′ = f′ / Δ₂
(f, f′, f′′)
end
function evalf(F::Callable_Function{S,T,𝑭,P}, x) where {S<:Val{4},T<:Val{false},𝑭,P}
f, Δ = F(x)
Δ₁, Δ₂, Δ₃ = Δ
f′ = f / Δ₁
f′′ = f′ / Δ₂
f′′′ = f′′ / Δ₃
(f, f′, f′′, f′′′)
end
function evalf(F::Callable_Function{S,T,𝑭,P}, x) where {S<:Val{5},T<:Val{false},𝑭,P}
f, Δ = F(x)
Δ₁, Δ₂, Δ₃, Δ₄ = Δ
f′ = f / Δ₁
f′′ = f′ / Δ₂
f′′′ = f′′ / Δ₃
f′′′′ = f′′′ / Δ₄
(f, f′, f′′, f′′′, f′′′′)
end
function evalf(F::Callable_Function{S,T,𝑭,P}, x) where {S<:Val{6},T<:Val{false},𝑭,P}
f, Δ = F(x)
Δ₁, Δ₂, Δ₃, Δ₄, Δ₅ = Δ
f′ = f / Δ₁
f′′ = f′ / Δ₂
f′′′ = f′′ / Δ₃
f′′′′ = f′′′ / Δ₄
f′′′′′ = f′′′′ / Δ₅
(f, f′, f′′, f′′′, f′′′′, f′′′′′)
end
#function to obtain just the first value. optimized in case of tuple function
function only_f(F::Callable_Function{S,T,𝑭,P}, x) where {N,S<:Val{N},T<:Val{true},𝑭,P}
return F.f[1](x, F.p)
end
function only_f(
F::Callable_Function{S,T,𝑭,P},
x,
) where {N,S<:Val{N},T<:Val{true},𝑭,P<:Nothing}
return F.f[1](x)
end
function only_f(F::Callable_Function{S,T,𝑭,P}, x) where {N,S<:Val{N},T<:Val{false},𝑭,P}
return first(F(x))
end
function init_state(L::LithBoonkkampIJzerman{S,0}, F::Callable_Function, x) where {S}
x₀, x₁ = x₀x₁(x)
fx₀, fx₁ = only_f(F, x₀), only_f(F, x₁)
state = init_state(L, F, x₀, x₁, fx₀, fx₁, nothing)
end
function init_state(L::LithBoonkkampIJzerman{S,D}, F::Callable_Function, x) where {S,D}
x₀ = float(first(x))
ys₀ = evalf(F, x₀)
fx₀ = first(ys₀)
state = init_state(L, F, nan(x₀), x₀, nan(fx₀), fx₀, ys₀)
end
function init_state(
L::LithBoonkkampIJzerman{S,D},
F,
x₀,
x₁::R,
fx₀,
fx₁::T,
ys₀,
) where {S,D,R,T}
xs, ys = init_lith(L, F, x₁, fx₁, x₀, fx₀, ys₀) # [x₀,x₁,…,xₛ₋₁], ...
# skip unit consideration here, as won't fit within storage of ys
state = LithBoonkkampIJzermanState{S,D + 1,R,T}(
xs[end], # xₙ
S > 1 ? xs[end - 1] : nan(xs[end]), # xₙ₋₁
xs, # all xs
ys[1][end], # fₙ
S > 1 ? ys[1][end - 1] : nan(ys[1]), # fₙ₋₁
ys, #
)
state
end
initial_fncalls(::LithBoonkkampIJzerman{S,D}) where {S,D} = S * (D + 1)
function update_state(
L::LithBoonkkampIJzerman{S,D},
F::Callable_Function,
o::LithBoonkkampIJzermanState{S⁺,D′,R,T},
options,
l=NullTracks(),
) where {S,D,S⁺,D′,R,T}
xs, ys = o.m, o.fm
xᵢ::R = lmm(L, xs, ys...)
isissue(o.xn1 - xᵢ) && return (o, true)
for i in 1:(S - 1)
@reset xs[i] = xs[i + 1]
end
@reset xs[end] = xᵢ
ysᵢ = evalf(F, xᵢ)
for i in 0:D
i′ = i + 1
for j in 1:(S - 1)
@reset ys[i′][j] = ys[i′][j + 1]
end
yij::T = ysᵢ[i′]
@reset ys[i′][end] = yij
end
incfn(l, 1 + D)
@reset o.xn0 = o.xn1
@reset o.xn1 = xᵢ
@reset o.fxn0 = o.fxn1
@reset o.fxn1 = ys[1][end]
@reset o.m = xs
@reset o.fm = ys
return (o, false)
end
# manufacture initial xs, ys
# use lower memory terms to boot strap up. Secant uses initial default step
#D=0, generate [x0].x1,...,xs
function init_lith(
L::LithBoonkkampIJzerman{S,0},
F::Callable_Function{Si,Tup,𝑭,P},
x₁::R,
fx₁::T,
x₀::R,
fx₀::T,
ys₀,
) where {S,Si,Tup,𝑭,P,R,T}
xs = NTuple{S,R}(ntuple(_ -> one(R), Val(S)))
yᵢ = NTuple{S,T}(ntuple(_ -> one(T), Val(S)))
ys = NTuple{1,NTuple{S,T}}((yᵢ,))
# build up to get S of them
x0::R = zero(R)
if isnan(x₀)
x0 = _default_secant_step(x₁)
fx0::T = only_f(F, x0)
else
x0, fx0 = x₀, fx₀
end
@reset xs[1] = x0
@reset xs[2] = x₁
@reset ys[1][1] = fx0
@reset ys[1][2] = fx₁
# build up xs, ys
# redundant code, but here to avoid allocations
S < 3 && return (xs, ys)
xᵢ = lmm(Val(2), Val(0), xs, ys)
y1i = only_f(F, xᵢ)
@reset xs[3] = xᵢ
@reset ys[1][3] = y1i
S < 4 && return (xs, ys)
xᵢ = lmm(Val(3), Val(0), xs, ys)
y1i = only_f(F, xᵢ)
@reset xs[4] = xᵢ
@reset ys[1][4] = y1i
S < 5 && return (xs, ys)
xᵢ = lmm(Val(4), Val(0), xs, ys)
y1i = only_f(F, xᵢ)
@reset xs[5] = xᵢ
@reset ys[1][5] = y1i
S < 6 && return (xs, ys)
xᵢ = lmm(Val(5), Val(0), xs, ys)
y1i = only_f(F, xᵢ)
@reset xs[6] = xᵢ
@reset ys[1][6] = y1i
for i in 7:S #3:S
xᵢ::R = lmm(Val(i - 1), Val(0), xs, ys) # XXX allocates due to runtime i-1
y1i::T = only_f(F, xᵢ)
@reset xs[i] = xᵢ
@reset ys[1][i] = y1i
end
xs, ys
end
#D≥1. ignore x₀
function init_lith(
L::LithBoonkkampIJzerman{S,D},
F::Callable_Function{Si,Tup,𝑭,P},
x₁::R,
fx₁::T,
x₀::R,
fx₀::T,
ys₀,
) where {S,D,Si,Tup,𝑭,P,R,T}
xs = NTuple{S,R}(ntuple(_ -> one(R), Val(S)))
yᵢ = NTuple{S,T}(ntuple(_ -> one(T), Val(S)))
ys = NTuple{D + 1,NTuple{S,T}}(ntuple(_ -> yᵢ, Val(D + 1)))
@reset xs[1] = x₁
for j in 1:(D + 1)
@reset ys[j][1] = ys₀[j]
end
# build up to get S of them
# redundant code, but here to avoid allocations
S < 2 && return xs, ys
xᵢ = lmm(Val(1), Val(D), xs, ys)
@reset xs[2] = xᵢ
ysᵢ = evalf(F, xᵢ)
for j in 1:(D + 1)
@reset ys[j][2] = ysᵢ[j]
end
S < 3 && return xs, ys
xᵢ = lmm(Val(2), Val(D), xs, ys)
@reset xs[3] = xᵢ
ysᵢ = evalf(F, xᵢ)
for j in 1:(D + 1)
@reset ys[j][3] = ysᵢ[j]
end
S < 4 && return xs, ys
xᵢ = lmm(Val(3), Val(D), xs, ys)
@reset xs[4] = xᵢ
ysᵢ = evalf(F, xᵢ)
for j in 1:(D + 1)
@reset ys[j][4] = ysᵢ[j]
end
for i in 5:S
xᵢ::R = lmm(Val(i - 1), Val(D), xs, ys) # XXX allocates! clean up
@reset xs[i] = xᵢ
ysᵢ = evalf(F, xᵢ)
for j in 1:(D + 1)
@reset ys[j][i] = ysᵢ[j]
end
end
return xs, ys
end
"""
LithBoonkkampIJzermanBracket()
A bracketing method which is a modification of Brent's method due to
[Lith, Boonkkamp, and
IJzerman](https://doi.org/10.1016/j.amc.2017.09.003). The best
possible convergence rate is 2.91.
A function, its derivative, and a bracketing interval need to be specified.
The state includes the 3 points -- a bracket `[a,b]` (`b=xₙ` has
`f(b)` closest to `0`) and `c=xₙ₋₁` -- and the corresponding values
for the function and its derivative at these three points.
The next proposed step is either a `S=2` or `S=3` selection for the
[`LithBoonkkampIJzerman`](@ref) methods with derivative information
included only if it would be of help. The proposed is modified if it
is dithering. The proposed is compared against a bisection step; the
one in the bracket and with the smaller function value is chosen as
the next step.
"""
struct LithBoonkkampIJzermanBracket <: AbstractBracketingMethod end
struct LithBoonkkampIJzermanBracketState{T,S,R} <: AbstractUnivariateZeroState{T,S}
xn1::T
xn0::T
c::T
fxn1::S
fxn0::S
fc::S
fp1::R
fp0::R
fpc::R
end
fn_argout(::LithBoonkkampIJzermanBracket) = 2
function init_state(M::LithBoonkkampIJzermanBracket, F::Callable_Function, x)
x₀, x₁ = adjust_bracket(x)
fx₀, Δfx₀ = F(x₀)
fx₁, Δfx₁ = F(x₁)
a, b, fa, fb, f′a, f′b = x₀, x₁, fx₀, fx₁, fx₀ / Δfx₀, fx₁ / Δfx₁
if abs(fa) < abs(fb)
a, b, fa, fb = b, a, fb, fa
end
assert_bracket(fa, fb)
c, fc, f′c = a, fa, f′a
# skip unit consideration here, as won't fit within storage of ys
state = LithBoonkkampIJzermanBracketState(
promote(b, a, c)..., # xₙ, xₙ₋₁
promote(fb, fa, fc)..., # fₙ, fₙ₋₁
promote(f′b, f′a, f′c)...,
)
state
end
function update_state(
M::LithBoonkkampIJzermanBracket,
F,
state::LithBoonkkampIJzermanBracketState{T,S,R},
options,
l=NullTracks(),
) where {T,S,R}
b::T, c::T, a::T = state.xn1, state.c, state.xn0
fb::S, fc::S, fa::S = state.fxn1, state.fc, state.fxn0
f′a::R, f′c::R, f′b::R = state.fp0, state.fpc, state.fp1
# Get next interpolating step
# decide on S and D;
# S is 3 if a,b,c are distinct; D=1 unless all derivative info will be of the wrong sign.
s::Int = ((a == c) || (b == c)) ? 2 : 3
# which derivatives do we include
sₘ = sign((fb - fa) / (b - a))
mc, mb = sign(f′c) == sₘ, sign(f′b) == sₘ
d₀::S = zero(S)
if s == 2
if mc || mb
# D = 1
a2s, b2s = lmm_coefficients(LithBoonkkampIJzerman{2,1}(), (c, b), (fc, fb))
h = -fb
d₀ = -sum(a2s .* (c, b))
mb && (d₀ += h * b2s[2] / f′b)
mc && (d₀ += h * b2s[1] / f′c)
else
d₀ = lmm(LithBoonkkampIJzerman{2,0}(), (c, b), (fc, fb))
end
else
ma = sign(f′a) == sₘ
if mc || mb || ma
# D = 1
as, bs = lmm_coefficients(LithBoonkkampIJzerman{3,1}(), (a, c, b), (fa, fc, fb))
h = -fb
d₀ = -sum(as .* (a, c, b))
mb && (d₀ += h * bs[end] / f′b) # only when helpful
mc && (d₀ += h * bs[end - 1] / f′c)
ma && (d₀ += h * bs[end - 2] / f′a)
else
d₀ = lmm(LithBoonkkampIJzerman{3,0}(), (a, c, b), (fa, fc, fb))
end
end
# If the step is smaller than the tolerance, use the tolerance as step size.
xatol, xrtol = options.xabstol, options.xreltol
δ = xatol + abs(b) * xrtol
Δ₀ = b - d₀
if abs(Δ₀) <= δ
d₀ = b - sign(Δ₀) * δ
end
# compare to bisection step; extra function evaluation
d₁ = a + (b - a) * (0.5) #_middle(a, b)
f₀, Δf₀ = F(d₀)
f₁, Δf₁ = F(d₁)
# interpolation outside a,b or bisection better use that
d::T, fd::S, f′d::S = zero(T), zero(S), zero(S)
if (abs(f₀) < abs(f₁)) && (min(a, b) < d₀ < max(a, b))
d, fd, f′d = d₀, f₀, f₀ / Δf₀# interp
else
d, fd, f′d = d₁, f₁, f₁ / Δf₁# bisection
end
# either [a,d] a bracket or [d,b]
# [a < d] < b ...c -- b -> d, c-> b (update?)
# a < [d < b] ...c -- a -> d (update?)
if sign(fa) * sign(fd) <= 0
c, fc, f′c = b, fb, f′b
b, fb, f′b = d, fd, f′d
else
a, fa, f′a = d, fd, f′d
end
# a,b bracket; keep |fb| ≤ |fa|
if abs(fa) < abs(fb)
c, fc, f′c = b, fb, f′b
a, b, fa, fb, f′a, f′b = b, a, fb, fa, f′b, f′a
end
incfn(l, 3)
@reset state.xn1 = b
@reset state.xn0 = a
@reset state.c = c
@reset state.fxn1 = fb
@reset state.fxn0 = fa
@reset state.fc = fc
@reset state.fp0 = f′a
@reset state.fpc = f′c
@reset state.fp1 = f′b
return (state, false)
end
function default_tolerances(
::M,
::Type{T},
::Type{S},
) where {M<:LithBoonkkampIJzermanBracket,T,S}
xatol = 2eps(T)
xrtol = zero(one(T))
atol = zero(float(one(S))) * oneunit(S)
rtol = 2eps(float(one(S))) * one(S)
maxevals = typemax(Int)
strict = true
(xatol, xrtol, atol, rtol, maxevals, strict)
end
### ------
# Script used to generate expressions
#
# Finds expressions by assuming an interpolating polynomial
# goes through the points. From (20) in the paper
# some cases take a **long** time to run.
# At some point, early on, this gets to be more of an academic exercise than
# a performant solution
#= -------
using SymPy
# see Ansatz (20) on p10
function inverse_polynomial_interpretation(s=2,d=2)
@vars y
hs = [Sym("h$i") for i ∈ 0:(1+d)*s-1]
xs = [Sym("x$i") for i ∈ 0:s-1]
fs = [Sym("f$i") for i ∈ 0:s-1]
f′s = [Sym("f′$i") for i ∈ 0:s-1]
f′′s = [Sym("f′′$i") for i ∈ 0:s-1]
f′′′s = [Sym("f′′′$i") for i ∈ 0:s-1]
f′′′′s = [Sym("f′′′′$i") for i ∈ 0:s-1]
f′′′′′s = [Sym("f′′′′′$i") for i ∈ 0:s-1]
f′′′′′′s = [Sym("f′′′′′′$i") for i ∈ 0:s-1]
h0 = first(hs)
H(y) = sum(hs[i+1]*(y-fs[s])^i for i ∈ 0:(1+d)*s-1)
Hⁱ = H⁰ = H(y)
Hⁱs = eltype(H⁰)[]
for i ∈ 1:d
Hⁱ = diff(Hⁱ,y)
push!(Hⁱs, Hⁱ)
end
eqs = Sym[subs(H(fs[i]), Dict(h0=>xs[s])) - xs[i] for i ∈ 1:s-1]
for i ∈ 1:s
# cf. Liptag
f1,f2,f3,f4,f5,f6 = f′s[i],f′′s[i],f′′′s[i],f′′′′s[i],f′′′′′s[i],f′′′′′′s[i]
g′ = 1/f1
g′′ = -f2/f1^3
g′′′ = (3*f2^2 - f1*f3)/(f1^5)
g′′′′ = -(15*f2^2 + 10*f1*f2*f3+f1^2*f4)/f1^7
g′′′′′ = (105*f2^4 -105*f1*f2^2*f3 + 10*f1^2*f3^2 + 15*f1^2*f2*f4 -f1^3*f5)/f1^9
g′′′′′′ = (-f1^4*f6 + 21*f1^3*f2*f5 + 35*f1^3*f3*f4 - 210*f1^2*f2^2*f4 - 280*f1^2*f2*f3^2 + 1260*f1*f2^3*f3 - 945*f2^5)/f1^11
gⁱs = [g′,g′′,g′′′, g′′′′,g′′′′′,g′′′′′′]
for j ∈ 1:d
push!(eqs, subs(Hⁱs[j], Dict(y=>fs[i], h0=>xs[s])) - gⁱs[j])
end
end
ϕ = sympy.linsolve(eqs, hs[2:end]...)
ϕ = first(elements(ϕ))
ϕ = Sym.(convert(Tuple, ϕ.__pyobject__))
D = Dict{Any,Any}(h0=>xs[s])
for i in 1:(d+1)*s-1
D[hs[i+1]] = ϕ[i]
end
subs(H(0), D) |> simplify
end
# For g = f⁻¹ return [g', g'', g''',..., g⁽ⁿ⁾]
# (cf [Liptaj](https://vixra.org/pdf/1703.0295v1.pdf)
function liptag(N)
@vars x₀ Δₓ
fs = [Sym("f$i") for i ∈ 1:N]
gs = [Sym("g$i") for i ∈ 1:N]
a(i) = fs[i]/factorial(i)
b(i) = gs[i]/factorial(i)
gᵏs = [1/fs[1]]
for n ∈ 2:N
Δy = sum(a(j) * Δₓ^j for j ∈ 1:n)
l = x₀ + Δₓ
r = x₀ + sum(b(i)*Δy^i for i ∈ 1:n)
ϕ = solve(l-r, gs[n])[1]
for j ∈ 1:n-1
ϕ = subs(ϕ, gs[j] => gᵏs[j])
end
L = limit(ϕ, Δₓ => 0)
push!(gᵏs, L)
end
gᵏs
end
=#
# have computed these
# S/D 0 1 2 3 4 5 6
# 1 x ✓ ✓ ✓ ✓ ✓ ✓
# 2 ✓ ✓ ✓ ✓ ✓ ✓ ✓
# 3 ✓ ✓ - - x x x
# 4 ✓ - x x x x x
# 5 ✓ x x x x x x
# 6 - x x x x x x
# - can be found with script, but answers are too long for
# inclusion here
## We have two means to do this:
## Using coefficients as,bs, ... returned by lmm_coefficients
## x = ∑ aᵢxᵢ + ∑ⱼ₊₁ⁿ ∑ᵢ bʲᵢFʲᵢ, where Fʲ is the jth derivative of g⁻¹ (F¹ = 1/f'...)
## Using a polynomial interpolant, H(y), going through (xᵢ,fʲ(xᵢ)), j ∈ 0:N)
function lmm(::Val{S}, ::Val{D}, xs, ys) where {S,D}
xi = ntuple(ii -> xs[ii], Val(S))
yi = ntuple(ii -> ntuple(j -> ys[ii][j], Val(S)), Val(D + 1))
lmm(LithBoonkkampIJzerman{S,D}(), xi, yi...)
end
# secant
function lmm(::LithBoonkkampIJzerman{2,0}, xs, fs)
x0, x1 = xs
f0, f1 = fs
(f0 * x1 - f1 * x0) / (f0 - f1)
end
function lmm(::LithBoonkkampIJzerman{3,0}, xs, fs)
x0, x1, x2 = xs
f0, f1, f2 = fs
(
f0^2 * f1 * x2 - f0^2 * f2 * x1 - f0 * f1^2 * x2 + f0 * f2^2 * x1 + f1^2 * f2 * x0 -
f1 * f2^2 * x0
) / (f0^2 * f1 - f0^2 * f2 - f0 * f1^2 + f0 * f2^2 + f1^2 * f2 - f1 * f2^2)
end
function lmm(::LithBoonkkampIJzerman{4,0}, xs, fs)
x0, x1, x2, x3 = xs
f0, f1, f2, f3 = fs
(
f0^3 * f1^2 * f2 * x3 - f0^3 * f1^2 * f3 * x2 - f0^3 * f1 * f2^2 * x3 +
f0^3 * f1 * f3^2 * x2 +
f0^3 * f2^2 * f3 * x1 - f0^3 * f2 * f3^2 * x1 - f0^2 * f1^3 * f2 * x3 +
f0^2 * f1^3 * f3 * x2 +
f0^2 * f1 * f2^3 * x3 - f0^2 * f1 * f3^3 * x2 - f0^2 * f2^3 * f3 * x1 +
f0^2 * f2 * f3^3 * x1 +
f0 * f1^3 * f2^2 * x3 - f0 * f1^3 * f3^2 * x2 - f0 * f1^2 * f2^3 * x3 +
f0 * f1^2 * f3^3 * x2 +
f0 * f2^3 * f3^2 * x1 - f0 * f2^2 * f3^3 * x1 - f1^3 * f2^2 * f3 * x0 +
f1^3 * f2 * f3^2 * x0 +
f1^2 * f2^3 * f3 * x0 - f1^2 * f2 * f3^3 * x0 - f1 * f2^3 * f3^2 * x0 +
f1 * f2^2 * f3^3 * x0
) / (
f0^3 * f1^2 * f2 - f0^3 * f1^2 * f3 - f0^3 * f1 * f2^2 +
f0^3 * f1 * f3^2 +
f0^3 * f2^2 * f3 - f0^3 * f2 * f3^2 - f0^2 * f1^3 * f2 +
f0^2 * f1^3 * f3 +
f0^2 * f1 * f2^3 - f0^2 * f1 * f3^3 - f0^2 * f2^3 * f3 +
f0^2 * f2 * f3^3 +
f0 * f1^3 * f2^2 - f0 * f1^3 * f3^2 - f0 * f1^2 * f2^3 +
f0 * f1^2 * f3^3 +
f0 * f2^3 * f3^2 - f0 * f2^2 * f3^3 - f1^3 * f2^2 * f3 +
f1^3 * f2 * f3^2 +
f1^2 * f2^3 * f3 - f1^2 * f2 * f3^3 - f1 * f2^3 * f3^2 + f1 * f2^2 * f3^3
)
end
function lmm(::LithBoonkkampIJzerman{5,0}, xs, fs)
x0, x1, x2, x3, x4 = xs
f0, f1, f2, f3, f4 = fs
(
f0^4 * f1^3 * f2^2 * f3 * x4 - f0^4 * f1^3 * f2^2 * f4 * x3 -
f0^4 * f1^3 * f2 * f3^2 * x4 +
f0^4 * f1^3 * f2 * f4^2 * x3 +
f0^4 * f1^3 * f3^2 * f4 * x2 - f0^4 * f1^3 * f3 * f4^2 * x2 -
f0^4 * f1^2 * f2^3 * f3 * x4 +
f0^4 * f1^2 * f2^3 * f4 * x3 +
f0^4 * f1^2 * f2 * f3^3 * x4 - f0^4 * f1^2 * f2 * f4^3 * x3 -
f0^4 * f1^2 * f3^3 * f4 * x2 +
f0^4 * f1^2 * f3 * f4^3 * x2 +
f0^4 * f1 * f2^3 * f3^2 * x4 - f0^4 * f1 * f2^3 * f4^2 * x3 -
f0^4 * f1 * f2^2 * f3^3 * x4 +
f0^4 * f1 * f2^2 * f4^3 * x3 +
f0^4 * f1 * f3^3 * f4^2 * x2 - f0^4 * f1 * f3^2 * f4^3 * x2 -
f0^4 * f2^3 * f3^2 * f4 * x1 +
f0^4 * f2^3 * f3 * f4^2 * x1 +
f0^4 * f2^2 * f3^3 * f4 * x1 - f0^4 * f2^2 * f3 * f4^3 * x1 -
f0^4 * f2 * f3^3 * f4^2 * x1 + f0^4 * f2 * f3^2 * f4^3 * x1 -
f0^3 * f1^4 * f2^2 * f3 * x4 +
f0^3 * f1^4 * f2^2 * f4 * x3 +
f0^3 * f1^4 * f2 * f3^2 * x4 - f0^3 * f1^4 * f2 * f4^2 * x3 -
f0^3 * f1^4 * f3^2 * f4 * x2 +
f0^3 * f1^4 * f3 * f4^2 * x2 +
f0^3 * f1^2 * f2^4 * f3 * x4 - f0^3 * f1^2 * f2^4 * f4 * x3 -
f0^3 * f1^2 * f2 * f3^4 * x4 +
f0^3 * f1^2 * f2 * f4^4 * x3 +
f0^3 * f1^2 * f3^4 * f4 * x2 - f0^3 * f1^2 * f3 * f4^4 * x2 -
f0^3 * f1 * f2^4 * f3^2 * x4 +
f0^3 * f1 * f2^4 * f4^2 * x3 +
f0^3 * f1 * f2^2 * f3^4 * x4 - f0^3 * f1 * f2^2 * f4^4 * x3 -
f0^3 * f1 * f3^4 * f4^2 * x2 +
f0^3 * f1 * f3^2 * f4^4 * x2 +
f0^3 * f2^4 * f3^2 * f4 * x1 - f0^3 * f2^4 * f3 * f4^2 * x1 -
f0^3 * f2^2 * f3^4 * f4 * x1 +
f0^3 * f2^2 * f3 * f4^4 * x1 +
f0^3 * f2 * f3^4 * f4^2 * x1 - f0^3 * f2 * f3^2 * f4^4 * x1 +
f0^2 * f1^4 * f2^3 * f3 * x4 - f0^2 * f1^4 * f2^3 * f4 * x3 -
f0^2 * f1^4 * f2 * f3^3 * x4 +
f0^2 * f1^4 * f2 * f4^3 * x3 +
f0^2 * f1^4 * f3^3 * f4 * x2 - f0^2 * f1^4 * f3 * f4^3 * x2 -
f0^2 * f1^3 * f2^4 * f3 * x4 +
f0^2 * f1^3 * f2^4 * f4 * x3 +
f0^2 * f1^3 * f2 * f3^4 * x4 - f0^2 * f1^3 * f2 * f4^4 * x3 -
f0^2 * f1^3 * f3^4 * f4 * x2 +
f0^2 * f1^3 * f3 * f4^4 * x2 +
f0^2 * f1 * f2^4 * f3^3 * x4 - f0^2 * f1 * f2^4 * f4^3 * x3 -
f0^2 * f1 * f2^3 * f3^4 * x4 +
f0^2 * f1 * f2^3 * f4^4 * x3 +
f0^2 * f1 * f3^4 * f4^3 * x2 - f0^2 * f1 * f3^3 * f4^4 * x2 -
f0^2 * f2^4 * f3^3 * f4 * x1 +
f0^2 * f2^4 * f3 * f4^3 * x1 +
f0^2 * f2^3 * f3^4 * f4 * x1 - f0^2 * f2^3 * f3 * f4^4 * x1 -
f0^2 * f2 * f3^4 * f4^3 * x1 + f0^2 * f2 * f3^3 * f4^4 * x1 -
f0 * f1^4 * f2^3 * f3^2 * x4 +
f0 * f1^4 * f2^3 * f4^2 * x3 +
f0 * f1^4 * f2^2 * f3^3 * x4 - f0 * f1^4 * f2^2 * f4^3 * x3 -
f0 * f1^4 * f3^3 * f4^2 * x2 +
f0 * f1^4 * f3^2 * f4^3 * x2 +
f0 * f1^3 * f2^4 * f3^2 * x4 - f0 * f1^3 * f2^4 * f4^2 * x3 -
f0 * f1^3 * f2^2 * f3^4 * x4 +
f0 * f1^3 * f2^2 * f4^4 * x3 +
f0 * f1^3 * f3^4 * f4^2 * x2 - f0 * f1^3 * f3^2 * f4^4 * x2 -
f0 * f1^2 * f2^4 * f3^3 * x4 +
f0 * f1^2 * f2^4 * f4^3 * x3 +
f0 * f1^2 * f2^3 * f3^4 * x4 - f0 * f1^2 * f2^3 * f4^4 * x3 -
f0 * f1^2 * f3^4 * f4^3 * x2 +
f0 * f1^2 * f3^3 * f4^4 * x2 +
f0 * f2^4 * f3^3 * f4^2 * x1 - f0 * f2^4 * f3^2 * f4^3 * x1 -
f0 * f2^3 * f3^4 * f4^2 * x1 +
f0 * f2^3 * f3^2 * f4^4 * x1 +
f0 * f2^2 * f3^4 * f4^3 * x1 - f0 * f2^2 * f3^3 * f4^4 * x1 +
f1^4 * f2^3 * f3^2 * f4 * x0 - f1^4 * f2^3 * f3 * f4^2 * x0 -
f1^4 * f2^2 * f3^3 * f4 * x0 +
f1^4 * f2^2 * f3 * f4^3 * x0 +
f1^4 * f2 * f3^3 * f4^2 * x0 - f1^4 * f2 * f3^2 * f4^3 * x0 -
f1^3 * f2^4 * f3^2 * f4 * x0 +
f1^3 * f2^4 * f3 * f4^2 * x0 +
f1^3 * f2^2 * f3^4 * f4 * x0 - f1^3 * f2^2 * f3 * f4^4 * x0 -
f1^3 * f2 * f3^4 * f4^2 * x0 +
f1^3 * f2 * f3^2 * f4^4 * x0 +
f1^2 * f2^4 * f3^3 * f4 * x0 - f1^2 * f2^4 * f3 * f4^3 * x0 -
f1^2 * f2^3 * f3^4 * f4 * x0 +
f1^2 * f2^3 * f3 * f4^4 * x0 +
f1^2 * f2 * f3^4 * f4^3 * x0 - f1^2 * f2 * f3^3 * f4^4 * x0 -
f1 * f2^4 * f3^3 * f4^2 * x0 +
f1 * f2^4 * f3^2 * f4^3 * x0 +
f1 * f2^3 * f3^4 * f4^2 * x0 - f1 * f2^3 * f3^2 * f4^4 * x0 -
f1 * f2^2 * f3^4 * f4^3 * x0 + f1 * f2^2 * f3^3 * f4^4 * x0
) / (
f0^4 * f1^3 * f2^2 * f3 - f0^4 * f1^3 * f2^2 * f4 - f0^4 * f1^3 * f2 * f3^2 +
f0^4 * f1^3 * f2 * f4^2 +
f0^4 * f1^3 * f3^2 * f4 - f0^4 * f1^3 * f3 * f4^2 - f0^4 * f1^2 * f2^3 * f3 +
f0^4 * f1^2 * f2^3 * f4 +
f0^4 * f1^2 * f2 * f3^3 - f0^4 * f1^2 * f2 * f4^3 - f0^4 * f1^2 * f3^3 * f4 +
f0^4 * f1^2 * f3 * f4^3 +
f0^4 * f1 * f2^3 * f3^2 - f0^4 * f1 * f2^3 * f4^2 - f0^4 * f1 * f2^2 * f3^3 +
f0^4 * f1 * f2^2 * f4^3 +
f0^4 * f1 * f3^3 * f4^2 - f0^4 * f1 * f3^2 * f4^3 - f0^4 * f2^3 * f3^2 * f4 +
f0^4 * f2^3 * f3 * f4^2 +
f0^4 * f2^2 * f3^3 * f4 - f0^4 * f2^2 * f3 * f4^3 - f0^4 * f2 * f3^3 * f4^2 +
f0^4 * f2 * f3^2 * f4^3 - f0^3 * f1^4 * f2^2 * f3 +
f0^3 * f1^4 * f2^2 * f4 +
f0^3 * f1^4 * f2 * f3^2 - f0^3 * f1^4 * f2 * f4^2 - f0^3 * f1^4 * f3^2 * f4 +
f0^3 * f1^4 * f3 * f4^2 +
f0^3 * f1^2 * f2^4 * f3 - f0^3 * f1^2 * f2^4 * f4 - f0^3 * f1^2 * f2 * f3^4 +
f0^3 * f1^2 * f2 * f4^4 +
f0^3 * f1^2 * f3^4 * f4 - f0^3 * f1^2 * f3 * f4^4 - f0^3 * f1 * f2^4 * f3^2 +
f0^3 * f1 * f2^4 * f4^2 +
f0^3 * f1 * f2^2 * f3^4 - f0^3 * f1 * f2^2 * f4^4 - f0^3 * f1 * f3^4 * f4^2 +
f0^3 * f1 * f3^2 * f4^4 +
f0^3 * f2^4 * f3^2 * f4 - f0^3 * f2^4 * f3 * f4^2 - f0^3 * f2^2 * f3^4 * f4 +
f0^3 * f2^2 * f3 * f4^4 +
f0^3 * f2 * f3^4 * f4^2 - f0^3 * f2 * f3^2 * f4^4 + f0^2 * f1^4 * f2^3 * f3 -
f0^2 * f1^4 * f2^3 * f4 - f0^2 * f1^4 * f2 * f3^3 +
f0^2 * f1^4 * f2 * f4^3 +
f0^2 * f1^4 * f3^3 * f4 - f0^2 * f1^4 * f3 * f4^3 - f0^2 * f1^3 * f2^4 * f3 +
f0^2 * f1^3 * f2^4 * f4 +
f0^2 * f1^3 * f2 * f3^4 - f0^2 * f1^3 * f2 * f4^4 - f0^2 * f1^3 * f3^4 * f4 +
f0^2 * f1^3 * f3 * f4^4 +
f0^2 * f1 * f2^4 * f3^3 - f0^2 * f1 * f2^4 * f4^3 - f0^2 * f1 * f2^3 * f3^4 +
f0^2 * f1 * f2^3 * f4^4 +
f0^2 * f1 * f3^4 * f4^3 - f0^2 * f1 * f3^3 * f4^4 - f0^2 * f2^4 * f3^3 * f4 +
f0^2 * f2^4 * f3 * f4^3 +
f0^2 * f2^3 * f3^4 * f4 - f0^2 * f2^3 * f3 * f4^4 - f0^2 * f2 * f3^4 * f4^3 +
f0^2 * f2 * f3^3 * f4^4 - f0 * f1^4 * f2^3 * f3^2 +
f0 * f1^4 * f2^3 * f4^2 +
f0 * f1^4 * f2^2 * f3^3 - f0 * f1^4 * f2^2 * f4^3 - f0 * f1^4 * f3^3 * f4^2 +
f0 * f1^4 * f3^2 * f4^3 +
f0 * f1^3 * f2^4 * f3^2 - f0 * f1^3 * f2^4 * f4^2 - f0 * f1^3 * f2^2 * f3^4 +
f0 * f1^3 * f2^2 * f4^4 +
f0 * f1^3 * f3^4 * f4^2 - f0 * f1^3 * f3^2 * f4^4 - f0 * f1^2 * f2^4 * f3^3 +
f0 * f1^2 * f2^4 * f4^3 +
f0 * f1^2 * f2^3 * f3^4 - f0 * f1^2 * f2^3 * f4^4 - f0 * f1^2 * f3^4 * f4^3 +
f0 * f1^2 * f3^3 * f4^4 +
f0 * f2^4 * f3^3 * f4^2 - f0 * f2^4 * f3^2 * f4^3 - f0 * f2^3 * f3^4 * f4^2 +
f0 * f2^3 * f3^2 * f4^4 +
f0 * f2^2 * f3^4 * f4^3 - f0 * f2^2 * f3^3 * f4^4 + f1^4 * f2^3 * f3^2 * f4 -
f1^4 * f2^3 * f3 * f4^2 - f1^4 * f2^2 * f3^3 * f4 +
f1^4 * f2^2 * f3 * f4^3 +
f1^4 * f2 * f3^3 * f4^2 - f1^4 * f2 * f3^2 * f4^3 - f1^3 * f2^4 * f3^2 * f4 +
f1^3 * f2^4 * f3 * f4^2 +
f1^3 * f2^2 * f3^4 * f4 - f1^3 * f2^2 * f3 * f4^4 - f1^3 * f2 * f3^4 * f4^2 +
f1^3 * f2 * f3^2 * f4^4 +
f1^2 * f2^4 * f3^3 * f4 - f1^2 * f2^4 * f3 * f4^3 - f1^2 * f2^3 * f3^4 * f4 +
f1^2 * f2^3 * f3 * f4^4 +
f1^2 * f2 * f3^4 * f4^3 - f1^2 * f2 * f3^3 * f4^4 - f1 * f2^4 * f3^3 * f4^2 +
f1 * f2^4 * f3^2 * f4^3 +
f1 * f2^3 * f3^4 * f4^2 - f1 * f2^3 * f3^2 * f4^4 - f1 * f2^2 * f3^4 * f4^3 +
f1 * f2^2 * f3^3 * f4^4
)
end
function lmm(::LithBoonkkampIJzerman{6,0}, xs, fs)
x0, x1, x2, x3, x4, x5 = xs
f0, f1, f2, f3, f4, f5 = fs
error("not implemented")
end
## d = 1; Newton-like
# return (as, bs⁰,[bs¹,...,bsⁿ⁻¹])
# where -∑ aᵢ xᵢ + h ⋅ ∑ₙ (∑ bsʲᵢ Fʲᵢ)l
function lmm_coefficients(::LithBoonkkampIJzerman{1,1}, xs, fs)
a0 = -one(xs[1])
b0 = one(fs[1])
return (a0,), (b0,)
end
function lmm_coefficients(::LithBoonkkampIJzerman{2,1}, xs, fs)
q = fs[1] / fs[2]
# from the paper
# x2 + a1 x1 + a0x0 = h3 * (b1 * 1/fp1 + b0 * 1/fp0)
a0 = (1 - 3q) / (q - 1)^3
a1 = -1 - a0
b0 = q / (q - 1)^2
b1 = q * b0
return (a0, a1), (b0, b1)
end
function lmm_coefficients(::LithBoonkkampIJzerman{3,1}, xs, fs)
# from the paper
q0 = fs[3 - 2] / fs[3]
q1 = fs[3 - 1] / fs[3]
a0 = (q1^2 * (q0 * (3 + 3q1 - 5q0) - q1)) / ((q0 - 1)^3 * (q0 - q1)^3)
a1 = (q0^2 * (q1 * (5q1 - 3q0 - 3) + q0)) / ((q1 - 1)^3 * (q0 - q1)^3)
a2 = (q0^2 * q1^2 * (3q1 - q0 * (q1 - 3) - 5)) / ((q0 - 1)^3 * (q1 - 1)^3) # minor typo in (27c)
b0 = (q0 * q1^2) / ((q0 - 1)^2 * (q0 - q1)^2)
b1 = (q0^2 * q1) / ((q0 - q1)^2 * (q1 - 1)^2)
b2 = (q0^2 * q1^2) / ((q0 - 1)^2 * (q1 - 1)^2)
return (a0, a1, a2), (b0, b1, b2)
end
function lmm_coefficients(::LithBoonkkampIJzerman{S,1}, xs, fs) where {S}
error("not computed")
end
function lmm(L::LithBoonkkampIJzerman{S,1}, xs, fs, f′s) where {S}
as, bs = lmm_coefficients(L, xs, fs)
Fs = 1 ./ f′s # F = (g⁻¹)'
h = -fs[S]
-sum(as[i] * xs[i] for i in 1:S) + h * sum(bs[i] * Fs[i] for i in 1:S)
end
function lmm(::LithBoonkkampIJzerman{4,1}, xs, fs, f′s)
x0, x1, x2, x3 = xs
f0, f1, f2, f3 = fs
f′0, f′1, f′2, f′3 = f′s
# can get with script, but too long as found
error("not implemented")
end
## d = 2; Halley-like
function lmm(::LithBoonkkampIJzerman{1,2}, xs, fs, f′s, f′′s)
x0 = xs[1]
f0 = fs[1]
f′0 = f′s[1]
f′′0 = f′′s[1]
-f0^2 * f′′0 / (2 * f′0^3) - f0 / f′0 + x0
end
function lmm(::LithBoonkkampIJzerman{2,2}, xs, fs, f′s, f′′s)
x0, x1 = xs
f0, f1 = fs
f′0, f′1 = f′s
f′′0, f′′1 = f′′s
(
-f0^5 * f1^2 * f′0^3 * f′′1 / 2 - f0^5 * f1 * f′0^3 * f′1^2 +
f0^5 * f′0^3 * f′1^3 * x1 +
f0^4 * f1^3 * f′0^3 * f′′1 +
f0^4 * f1^3 * f′1^3 * f′′0 / 2 +
5 * f0^4 * f1^2 * f′0^3 * f′1^2 - 5 * f0^4 * f1 * f′0^3 * f′1^3 * x1 -
f0^3 * f1^4 * f′0^3 * f′′1 / 2 - f0^3 * f1^4 * f′1^3 * f′′0 -
4 * f0^3 * f1^3 * f′0^3 * f′1^2 +
4 * f0^3 * f1^3 * f′0^2 * f′1^3 +
10 * f0^3 * f1^2 * f′0^3 * f′1^3 * x1 +
f0^2 * f1^5 * f′1^3 * f′′0 / 2 - 5 * f0^2 * f1^4 * f′0^2 * f′1^3 -
10 * f0^2 * f1^3 * f′0^3 * f′1^3 * x0 +
f0 * f1^5 * f′0^2 * f′1^3 +
5 * f0 * f1^4 * f′0^3 * f′1^3 * x0 - f1^5 * f′0^3 * f′1^3 * x0
) / (
f′0^3 *
f′1^3 *
(f0^5 - 5 * f0^4 * f1 + 10 * f0^3 * f1^2 - 10 * f0^2 * f1^3 + 5 * f0 * f1^4 - f1^5)
)
end
function lmm(::LithBoonkkampIJzerman{3,2}, xs, fs, f′s, f′′s)
x0, x1, x2 = xs
f0, f1, f2 = fs
f′0, f′1, f′2 = f′s
f′′0, f′′1, f′′2 = f′′s
## can get from script, but too long for inclusion here
error("not implemented")
end
function lmm(::LithBoonkkampIJzerman{4,2}, xs, fs, f′s, f′′s)
error("Not computed")
end
## d = 3
function lmm(::LithBoonkkampIJzerman{1,3}, xs, fs, f′s, f′′s, f′′′s)
x0 = xs[1]
f0 = fs[1]
f′0 = f′s[1]
f′′0 = f′′s[1]
f′′′0 = f′′′s[1]
(
f0^3 * (f′0 * f′′′0 - 3 * f′′0^2) / 6 - f0^2 * f′0^2 * f′′0 / 2 - f0 * f′0^4 +
f′0^5 * x0
) / f′0^5
end
function lmm(::LithBoonkkampIJzerman{2,3}, xs, fs, f′s, f′′s, f′′′s)
x0, x1 = xs
f0, f1 = fs
f′0, f′1 = f′s
f′′0, f′′1 = f′′s
f′′′0, f′′′1 = f′′′s
(
f0^7 * f1^3 * f′0^5 * f′1 * f′′′1 - 3 * f0^7 * f1^3 * f′0^5 * f′′1^2 -
3 * f0^7 * f1^2 * f′0^5 * f′1^2 * f′′1 - 6 * f0^7 * f1 * f′0^5 * f′1^4 +
6 * f0^7 * f′0^5 * f′1^5 * x1 - 3 * f0^6 * f1^4 * f′0^5 * f′1 * f′′′1 +
9 * f0^6 * f1^4 * f′0^5 * f′′1^2 +
f0^6 * f1^4 * f′0 * f′1^5 * f′′′0 - 3 * f0^6 * f1^4 * f′1^5 * f′′0^2 +
21 * f0^6 * f1^3 * f′0^5 * f′1^2 * f′′1 +
42 * f0^6 * f1^2 * f′0^5 * f′1^4 - 42 * f0^6 * f1 * f′0^5 * f′1^5 * x1 +
3 * f0^5 * f1^5 * f′0^5 * f′1 * f′′′1 - 9 * f0^5 * f1^5 * f′0^5 * f′′1^2 -
3 * f0^5 * f1^5 * f′0 * f′1^5 * f′′′0 + 9 * f0^5 * f1^5 * f′1^5 * f′′0^2 -
33 * f0^5 * f1^4 * f′0^5 * f′1^2 * f′′1 - 15 * f0^5 * f1^4 * f′0^2 * f′1^5 * f′′0 -
126 * f0^5 * f1^3 * f′0^5 * f′1^4 + 126 * f0^5 * f1^2 * f′0^5 * f′1^5 * x1 -
f0^4 * f1^6 * f′0^5 * f′1 * f′′′1 +
3 * f0^4 * f1^6 * f′0^5 * f′′1^2 +
3 * f0^4 * f1^6 * f′0 * f′1^5 * f′′′0 - 9 * f0^4 * f1^6 * f′1^5 * f′′0^2 +
15 * f0^4 * f1^5 * f′0^5 * f′1^2 * f′′1 +
33 * f0^4 * f1^5 * f′0^2 * f′1^5 * f′′0 +
90 * f0^4 * f1^4 * f′0^5 * f′1^4 - 90 * f0^4 * f1^4 * f′0^4 * f′1^5 -
210 * f0^4 * f1^3 * f′0^5 * f′1^5 * x1 - f0^3 * f1^7 * f′0 * f′1^5 * f′′′0 +
3 * f0^3 * f1^7 * f′1^5 * f′′0^2 - 21 * f0^3 * f1^6 * f′0^2 * f′1^5 * f′′0 +
126 * f0^3 * f1^5 * f′0^4 * f′1^5 +
210 * f0^3 * f1^4 * f′0^5 * f′1^5 * x0 +
3 * f0^2 * f1^7 * f′0^2 * f′1^5 * f′′0 - 42 * f0^2 * f1^6 * f′0^4 * f′1^5 -
126 * f0^2 * f1^5 * f′0^5 * f′1^5 * x0 +
6 * f0 * f1^7 * f′0^4 * f′1^5 +
42 * f0 * f1^6 * f′0^5 * f′1^5 * x0 - 6 * f1^7 * f′0^5 * f′1^5 * x0
) / (
6 *
f′0^5 *
f′1^5 *
(
f0^7 - 7 * f0^6 * f1 + 21 * f0^5 * f1^2 - 35 * f0^4 * f1^3 + 35 * f0^3 * f1^4 -
21 * f0^2 * f1^5 + 7 * f0 * f1^6 - f1^7
)
)
end
function lmm(::LithBoonkkampIJzerman{3,3}, xs, fs, f′s, f′′s, f′′′s)
x0, x1, x2 = xs
f0, f1, f2 = fs
f′0, f′1, f′2 = f′s
f′′0, f′′1, f′′2 = f′′s
f′′′0, f′′′1, f′′′2 = f′′′s
# can get from script, but too long for inclusion here
error("not implemented")
end
function lmm(::LithBoonkkampIJzerman{4,3}, xs, fs, f′s, f′′s, f′′′s)
x0, x1, x2, x3 = xs
f0, f1, f2, f3 = fs
f′0, f′1, f′2, f′3 = f′s
f′′0, f′′1, f′′2, f′′3 = f′′s
f′′′0, f′′′1, f′′′2, f′′′3 = f′′′s
error("not computed")
end
## d = 4
function lmm(::LithBoonkkampIJzerman{1,4}, xs, fs, f′s, f′′s, f′′′s, f′′′′s)
x0 = xs[1]
f0 = fs[1]
f′0 = f′s[1]
f′′0 = f′′s[1]
f′′′0 = f′′′s[1]
f′′′′0 = f′′′′s[1]
(
-f0^4 * (f′0^2 * f′′′′0 + 10 * f′0 * f′′0 * f′′′0 + 15 * f′′0^2) / 24 +
f0^3 * f′0^2 * (f′0 * f′′′0 - 3 * f′′0^2) / 6 - f0^2 * f′0^4 * f′′0 / 2 -
f0 * f′0^6 + f′0^7 * x0
) / f′0^7
end
function lmm(::LithBoonkkampIJzerman{2,4}, xs, fs, f′s, f′′s, f′′′s, f′′′′s)
x0, x1 = xs
f0, f1 = fs
f′0, f′1 = f′s
f′′0, f′′1 = f′′s
f′′′0, f′′′1 = f′′′s
f′′′′0, f′′′′1 = f′′′′s
(
-f0^9 * f1^4 * f′0^7 * f′1^2 * f′′′′1 -
10 * f0^9 * f1^4 * f′0^7 * f′1 * f′′1 * f′′′1 - 15 * f0^9 * f1^4 * f′0^7 * f′′1^2 +
4 * f0^9 * f1^3 * f′0^7 * f′1^3 * f′′′1 -
12 * f0^9 * f1^3 * f′0^7 * f′1^2 * f′′1^2 -
12 * f0^9 * f1^2 * f′0^7 * f′1^4 * f′′1 - 24 * f0^9 * f1 * f′0^7 * f′1^6 +
24 * f0^9 * f′0^7 * f′1^7 * x1 +
4 * f0^8 * f1^5 * f′0^7 * f′1^2 * f′′′′1 +
40 * f0^8 * f1^5 * f′0^7 * f′1 * f′′1 * f′′′1 +
60 * f0^8 * f1^5 * f′0^7 * f′′1^2 +
f0^8 * f1^5 * f′0^2 * f′1^7 * f′′′′0 +
10 * f0^8 * f1^5 * f′0 * f′1^7 * f′′0 * f′′′0 +
15 * f0^8 * f1^5 * f′1^7 * f′′0^2 - 36 * f0^8 * f1^4 * f′0^7 * f′1^3 * f′′′1 +
108 * f0^8 * f1^4 * f′0^7 * f′1^2 * f′′1^2 +
108 * f0^8 * f1^3 * f′0^7 * f′1^4 * f′′1 +
216 * f0^8 * f1^2 * f′0^7 * f′1^6 - 216 * f0^8 * f1 * f′0^7 * f′1^7 * x1 -
6 * f0^7 * f1^6 * f′0^7 * f′1^2 * f′′′′1 -
60 * f0^7 * f1^6 * f′0^7 * f′1 * f′′1 * f′′′1 - 90 * f0^7 * f1^6 * f′0^7 * f′′1^2 -
4 * f0^7 * f1^6 * f′0^2 * f′1^7 * f′′′′0 -
40 * f0^7 * f1^6 * f′0 * f′1^7 * f′′0 * f′′′0 - 60 * f0^7 * f1^6 * f′1^7 * f′′0^2 +
84 * f0^7 * f1^5 * f′0^7 * f′1^3 * f′′′1 -
252 * f0^7 * f1^5 * f′0^7 * f′1^2 * f′′1^2 -
24 * f0^7 * f1^5 * f′0^3 * f′1^7 * f′′′0 +
72 * f0^7 * f1^5 * f′0^2 * f′1^7 * f′′0^2 -
432 * f0^7 * f1^4 * f′0^7 * f′1^4 * f′′1 - 864 * f0^7 * f1^3 * f′0^7 * f′1^6 +
864 * f0^7 * f1^2 * f′0^7 * f′1^7 * x1 +
4 * f0^6 * f1^7 * f′0^7 * f′1^2 * f′′′′1 +
40 * f0^6 * f1^7 * f′0^7 * f′1 * f′′1 * f′′′1 +
60 * f0^6 * f1^7 * f′0^7 * f′′1^2 +
6 * f0^6 * f1^7 * f′0^2 * f′1^7 * f′′′′0 +
60 * f0^6 * f1^7 * f′0 * f′1^7 * f′′0 * f′′′0 +
90 * f0^6 * f1^7 * f′1^7 * f′′0^2 - 76 * f0^6 * f1^6 * f′0^7 * f′1^3 * f′′′1 +
228 * f0^6 * f1^6 * f′0^7 * f′1^2 * f′′1^2 +
76 * f0^6 * f1^6 * f′0^3 * f′1^7 * f′′′0 -
228 * f0^6 * f1^6 * f′0^2 * f′1^7 * f′′0^2 +
588 * f0^6 * f1^5 * f′0^7 * f′1^4 * f′′1 +
252 * f0^6 * f1^5 * f′0^4 * f′1^7 * f′′0 +
2016 * f0^6 * f1^4 * f′0^7 * f′1^6 - 2016 * f0^6 * f1^3 * f′0^7 * f′1^7 * x1 -
f0^5 * f1^8 * f′0^7 * f′1^2 * f′′′′1 -
10 * f0^5 * f1^8 * f′0^7 * f′1 * f′′1 * f′′′1 - 15 * f0^5 * f1^8 * f′0^7 * f′′1^2 -
4 * f0^5 * f1^8 * f′0^2 * f′1^7 * f′′′′0 -
40 * f0^5 * f1^8 * f′0 * f′1^7 * f′′0 * f′′′0 - 60 * f0^5 * f1^8 * f′1^7 * f′′0^2 +
24 * f0^5 * f1^7 * f′0^7 * f′1^3 * f′′′1 -
72 * f0^5 * f1^7 * f′0^7 * f′1^2 * f′′1^2 -
84 * f0^5 * f1^7 * f′0^3 * f′1^7 * f′′′0 +
252 * f0^5 * f1^7 * f′0^2 * f′1^7 * f′′0^2 -
252 * f0^5 * f1^6 * f′0^7 * f′1^4 * f′′1 -
588 * f0^5 * f1^6 * f′0^4 * f′1^7 * f′′0 - 1344 * f0^5 * f1^5 * f′0^7 * f′1^6 +
1344 * f0^5 * f1^5 * f′0^6 * f′1^7 +
3024 * f0^5 * f1^4 * f′0^7 * f′1^7 * x1 +
f0^4 * f1^9 * f′0^2 * f′1^7 * f′′′′0 +
10 * f0^4 * f1^9 * f′0 * f′1^7 * f′′0 * f′′′0 +
15 * f0^4 * f1^9 * f′1^7 * f′′0^2 +
36 * f0^4 * f1^8 * f′0^3 * f′1^7 * f′′′0 -
108 * f0^4 * f1^8 * f′0^2 * f′1^7 * f′′0^2 +
432 * f0^4 * f1^7 * f′0^4 * f′1^7 * f′′0 - 2016 * f0^4 * f1^6 * f′0^6 * f′1^7 -
3024 * f0^4 * f1^5 * f′0^7 * f′1^7 * x0 - 4 * f0^3 * f1^9 * f′0^3 * f′1^7 * f′′′0 +
12 * f0^3 * f1^9 * f′0^2 * f′1^7 * f′′0^2 -
108 * f0^3 * f1^8 * f′0^4 * f′1^7 * f′′0 +
864 * f0^3 * f1^7 * f′0^6 * f′1^7 +
2016 * f0^3 * f1^6 * f′0^7 * f′1^7 * x0 +
12 * f0^2 * f1^9 * f′0^4 * f′1^7 * f′′0 - 216 * f0^2 * f1^8 * f′0^6 * f′1^7 -
864 * f0^2 * f1^7 * f′0^7 * f′1^7 * x0 +
24 * f0 * f1^9 * f′0^6 * f′1^7 +
216 * f0 * f1^8 * f′0^7 * f′1^7 * x0 - 24 * f1^9 * f′0^7 * f′1^7 * x0
) / (
24 *
f′0^7 *
f′1^7 *
(
f0^9 - 9 * f0^8 * f1 + 36 * f0^7 * f1^2 - 84 * f0^6 * f1^3 + 126 * f0^5 * f1^4 -
126 * f0^4 * f1^5 + 84 * f0^3 * f1^6 - 36 * f0^2 * f1^7 + 9 * f0 * f1^8 - f1^9
)
)
end
function lmm(::LithBoonkkampIJzerman{3,4}, xs, fs, f′s, f′′s, f′′′s, f′′′′s)
x0, x1, x2 = xs
f0, f1, f2 = fs
f′0, f′1, f′2 = f′s
f′′0, f′′1, f′′2 = f′′s
f′′′0, f′′′1, f′′′2 = f′′′s
f′′′′0, f′′′′1, f′′′′2 = f′′′′s
error("not computed")
end
# n = 5
function lmm(::LithBoonkkampIJzerman{1,5}, xs, fs, f′s, f′′s, f′′′s, f′′′′s, f′′′′′s)
x0 = xs[1]
f0 = fs[1]
f′0 = f′s[1]
f′′0 = f′′s[1]
f′′′0 = f′′′s[1]
f′′′′0 = f′′′′s[1]
f′′′′′0 = f′′′′′s[1]
(
f0^5 * (
f′0^3 * f′′′′′0 - 15 * f′0^2 * f′′0 * f′′′′0 - 10 * f′0^2 * f′′′0^2 +
105 * f′0 * f′′0^2 * f′′′0 - 105 * f′′0^4
) - 5 * f0^4 * f′0^2 * (f′0^2 * f′′′′0 + 10 * f′0 * f′′0 * f′′′0 + 15 * f′′0^2) +
20 * f0^3 * f′0^4 * (f′0 * f′′′0 - 3 * f′′0^2) - 60 * f0^2 * f′0^6 * f′′0 -
120 * f0 * f′0^8 + 120 * f′0^9 * x0
) / (120 * f′0^9)
end
function lmm(::LithBoonkkampIJzerman{2,5}, xs, fs, f′s, f′′s, f′′′s, f′′′′s, f′′′′′s)
x0, x1 = xs
f0, f1 = fs
f′0, f′1 = f′s
f′′0, f′′1 = f′′s
f′′′0, f′′′1 = f′′′s
f′′′′0, f′′′′1 = f′′′′s
f′′′′′0, f′′′′′1 = f′′′′′s
(
f0^11 * f1^5 * f′0^9 * f′1^3 * f′′′′′1 -
15 * f0^11 * f1^5 * f′0^9 * f′1^2 * f′′1 * f′′′′1 -
10 * f0^11 * f1^5 * f′0^9 * f′1^2 * f′′′1^2 +
105 * f0^11 * f1^5 * f′0^9 * f′1 * f′′1^2 * f′′′1 -
105 * f0^11 * f1^5 * f′0^9 * f′′1^4 - 5 * f0^11 * f1^4 * f′0^9 * f′1^4 * f′′′′1 -
50 * f0^11 * f1^4 * f′0^9 * f′1^3 * f′′1 * f′′′1 -
75 * f0^11 * f1^4 * f′0^9 * f′1^2 * f′′1^2 +
20 * f0^11 * f1^3 * f′0^9 * f′1^5 * f′′′1 -
60 * f0^11 * f1^3 * f′0^9 * f′1^4 * f′′1^2 -
60 * f0^11 * f1^2 * f′0^9 * f′1^6 * f′′1 - 120 * f0^11 * f1 * f′0^9 * f′1^8 +
120 * f0^11 * f′0^9 * f′1^9 * x1 - 5 * f0^10 * f1^6 * f′0^9 * f′1^3 * f′′′′′1 +
75 * f0^10 * f1^6 * f′0^9 * f′1^2 * f′′1 * f′′′′1 +
50 * f0^10 * f1^6 * f′0^9 * f′1^2 * f′′′1^2 -
525 * f0^10 * f1^6 * f′0^9 * f′1 * f′′1^2 * f′′′1 +
525 * f0^10 * f1^6 * f′0^9 * f′′1^4 +
f0^10 * f1^6 * f′0^3 * f′1^9 * f′′′′′0 -
15 * f0^10 * f1^6 * f′0^2 * f′1^9 * f′′0 * f′′′′0 -
10 * f0^10 * f1^6 * f′0^2 * f′1^9 * f′′′0^2 +
105 * f0^10 * f1^6 * f′0 * f′1^9 * f′′0^2 * f′′′0 -
105 * f0^10 * f1^6 * f′1^9 * f′′0^4 +
55 * f0^10 * f1^5 * f′0^9 * f′1^4 * f′′′′1 +
550 * f0^10 * f1^5 * f′0^9 * f′1^3 * f′′1 * f′′′1 +
825 * f0^10 * f1^5 * f′0^9 * f′1^2 * f′′1^2 -
220 * f0^10 * f1^4 * f′0^9 * f′1^5 * f′′′1 +
660 * f0^10 * f1^4 * f′0^9 * f′1^4 * f′′1^2 +
660 * f0^10 * f1^3 * f′0^9 * f′1^6 * f′′1 +
1320 * f0^10 * f1^2 * f′0^9 * f′1^8 - 1320 * f0^10 * f1 * f′0^9 * f′1^9 * x1 +
10 * f0^9 * f1^7 * f′0^9 * f′1^3 * f′′′′′1 -
150 * f0^9 * f1^7 * f′0^9 * f′1^2 * f′′1 * f′′′′1 -
100 * f0^9 * f1^7 * f′0^9 * f′1^2 * f′′′1^2 +
1050 * f0^9 * f1^7 * f′0^9 * f′1 * f′′1^2 * f′′′1 -
1050 * f0^9 * f1^7 * f′0^9 * f′′1^4 - 5 * f0^9 * f1^7 * f′0^3 * f′1^9 * f′′′′′0 +
75 * f0^9 * f1^7 * f′0^2 * f′1^9 * f′′0 * f′′′′0 +
50 * f0^9 * f1^7 * f′0^2 * f′1^9 * f′′′0^2 -
525 * f0^9 * f1^7 * f′0 * f′1^9 * f′′0^2 * f′′′0 +
525 * f0^9 * f1^7 * f′1^9 * f′′0^4 - 170 * f0^9 * f1^6 * f′0^9 * f′1^4 * f′′′′1 -
1700 * f0^9 * f1^6 * f′0^9 * f′1^3 * f′′1 * f′′′1 -
2550 * f0^9 * f1^6 * f′0^9 * f′1^2 * f′′1^2 -
35 * f0^9 * f1^6 * f′0^4 * f′1^9 * f′′′′0 -
350 * f0^9 * f1^6 * f′0^3 * f′1^9 * f′′0 * f′′′0 -
525 * f0^9 * f1^6 * f′0^2 * f′1^9 * f′′0^2 +
1100 * f0^9 * f1^5 * f′0^9 * f′1^5 * f′′′1 -
3300 * f0^9 * f1^5 * f′0^9 * f′1^4 * f′′1^2 -
3300 * f0^9 * f1^4 * f′0^9 * f′1^6 * f′′1 - 6600 * f0^9 * f1^3 * f′0^9 * f′1^8 +
6600 * f0^9 * f1^2 * f′0^9 * f′1^9 * x1 -
10 * f0^8 * f1^8 * f′0^9 * f′1^3 * f′′′′′1 +
150 * f0^8 * f1^8 * f′0^9 * f′1^2 * f′′1 * f′′′′1 +
100 * f0^8 * f1^8 * f′0^9 * f′1^2 * f′′′1^2 -
1050 * f0^8 * f1^8 * f′0^9 * f′1 * f′′1^2 * f′′′1 +
1050 * f0^8 * f1^8 * f′0^9 * f′′1^4 +
10 * f0^8 * f1^8 * f′0^3 * f′1^9 * f′′′′′0 -
150 * f0^8 * f1^8 * f′0^2 * f′1^9 * f′′0 * f′′′′0 -
100 * f0^8 * f1^8 * f′0^2 * f′1^9 * f′′′0^2 +
1050 * f0^8 * f1^8 * f′0 * f′1^9 * f′′0^2 * f′′′0 -
1050 * f0^8 * f1^8 * f′1^9 * f′′0^4 +
230 * f0^8 * f1^7 * f′0^9 * f′1^4 * f′′′′1 +
2300 * f0^8 * f1^7 * f′0^9 * f′1^3 * f′′1 * f′′′1 +
3450 * f0^8 * f1^7 * f′0^9 * f′1^2 * f′′1^2 +
145 * f0^8 * f1^7 * f′0^4 * f′1^9 * f′′′′0 +
1450 * f0^8 * f1^7 * f′0^3 * f′1^9 * f′′0 * f′′′0 +
2175 * f0^8 * f1^7 * f′0^2 * f′1^9 * f′′0^2 -
2180 * f0^8 * f1^6 * f′0^9 * f′1^5 * f′′′1 +
6540 * f0^8 * f1^6 * f′0^9 * f′1^4 * f′′1^2 +
560 * f0^8 * f1^6 * f′0^5 * f′1^9 * f′′′0 -
1680 * f0^8 * f1^6 * f′0^4 * f′1^9 * f′′0^2 +
9900 * f0^8 * f1^5 * f′0^9 * f′1^6 * f′′1 +
19800 * f0^8 * f1^4 * f′0^9 * f′1^8 - 19800 * f0^8 * f1^3 * f′0^9 * f′1^9 * x1 +
5 * f0^7 * f1^9 * f′0^9 * f′1^3 * f′′′′′1 -
75 * f0^7 * f1^9 * f′0^9 * f′1^2 * f′′1 * f′′′′1 -
50 * f0^7 * f1^9 * f′0^9 * f′1^2 * f′′′1^2 +
525 * f0^7 * f1^9 * f′0^9 * f′1 * f′′1^2 * f′′′1 -
525 * f0^7 * f1^9 * f′0^9 * f′′1^4 - 10 * f0^7 * f1^9 * f′0^3 * f′1^9 * f′′′′′0 +
150 * f0^7 * f1^9 * f′0^2 * f′1^9 * f′′0 * f′′′′0 +
100 * f0^7 * f1^9 * f′0^2 * f′1^9 * f′′′0^2 -
1050 * f0^7 * f1^9 * f′0 * f′1^9 * f′′0^2 * f′′′0 +
1050 * f0^7 * f1^9 * f′1^9 * f′′0^4 - 145 * f0^7 * f1^8 * f′0^9 * f′1^4 * f′′′′1 -
1450 * f0^7 * f1^8 * f′0^9 * f′1^3 * f′′1 * f′′′1 -
2175 * f0^7 * f1^8 * f′0^9 * f′1^2 * f′′1^2 -
230 * f0^7 * f1^8 * f′0^4 * f′1^9 * f′′′′0 -
2300 * f0^7 * f1^8 * f′0^3 * f′1^9 * f′′0 * f′′′0 -
3450 * f0^7 * f1^8 * f′0^2 * f′1^9 * f′′0^2 +
1840 * f0^7 * f1^7 * f′0^9 * f′1^5 * f′′′1 -
5520 * f0^7 * f1^7 * f′0^9 * f′1^4 * f′′1^2 -
1840 * f0^7 * f1^7 * f′0^5 * f′1^9 * f′′′0 +
5520 * f0^7 * f1^7 * f′0^4 * f′1^9 * f′′0^2 -
12240 * f0^7 * f1^6 * f′0^9 * f′1^6 * f′′1 -
5040 * f0^7 * f1^6 * f′0^6 * f′1^9 * f′′0 - 39600 * f0^7 * f1^5 * f′0^9 * f′1^8 +
39600 * f0^7 * f1^4 * f′0^9 * f′1^9 * x1 - f0^6 * f1^10 * f′0^9 * f′1^3 * f′′′′′1 +
15 * f0^6 * f1^10 * f′0^9 * f′1^2 * f′′1 * f′′′′1 +
10 * f0^6 * f1^10 * f′0^9 * f′1^2 * f′′′1^2 -
105 * f0^6 * f1^10 * f′0^9 * f′1 * f′′1^2 * f′′′1 +
105 * f0^6 * f1^10 * f′0^9 * f′′1^4 +
5 * f0^6 * f1^10 * f′0^3 * f′1^9 * f′′′′′0 -
75 * f0^6 * f1^10 * f′0^2 * f′1^9 * f′′0 * f′′′′0 -
50 * f0^6 * f1^10 * f′0^2 * f′1^9 * f′′′0^2 +
525 * f0^6 * f1^10 * f′0 * f′1^9 * f′′0^2 * f′′′0 -
525 * f0^6 * f1^10 * f′1^9 * f′′0^4 +
35 * f0^6 * f1^9 * f′0^9 * f′1^4 * f′′′′1 +
350 * f0^6 * f1^9 * f′0^9 * f′1^3 * f′′1 * f′′′1 +
525 * f0^6 * f1^9 * f′0^9 * f′1^2 * f′′1^2 +
170 * f0^6 * f1^9 * f′0^4 * f′1^9 * f′′′′0 +
1700 * f0^6 * f1^9 * f′0^3 * f′1^9 * f′′0 * f′′′0 +
2550 * f0^6 * f1^9 * f′0^2 * f′1^9 * f′′0^2 -
560 * f0^6 * f1^8 * f′0^9 * f′1^5 * f′′′1 +
1680 * f0^6 * f1^8 * f′0^9 * f′1^4 * f′′1^2 +
2180 * f0^6 * f1^8 * f′0^5 * f′1^9 * f′′′0 -
6540 * f0^6 * f1^8 * f′0^4 * f′1^9 * f′′0^2 +
5040 * f0^6 * f1^7 * f′0^9 * f′1^6 * f′′1 +
12240 * f0^6 * f1^7 * f′0^6 * f′1^9 * f′′0 +
25200 * f0^6 * f1^6 * f′0^9 * f′1^8 - 25200 * f0^6 * f1^6 * f′0^8 * f′1^9 -
55440 * f0^6 * f1^5 * f′0^9 * f′1^9 * x1 - f0^5 * f1^11 * f′0^3 * f′1^9 * f′′′′′0 +
15 * f0^5 * f1^11 * f′0^2 * f′1^9 * f′′0 * f′′′′0 +
10 * f0^5 * f1^11 * f′0^2 * f′1^9 * f′′′0^2 -
105 * f0^5 * f1^11 * f′0 * f′1^9 * f′′0^2 * f′′′0 +
105 * f0^5 * f1^11 * f′1^9 * f′′0^4 - 55 * f0^5 * f1^10 * f′0^4 * f′1^9 * f′′′′0 -
550 * f0^5 * f1^10 * f′0^3 * f′1^9 * f′′0 * f′′′0 -
825 * f0^5 * f1^10 * f′0^2 * f′1^9 * f′′0^2 -
1100 * f0^5 * f1^9 * f′0^5 * f′1^9 * f′′′0 +
3300 * f0^5 * f1^9 * f′0^4 * f′1^9 * f′′0^2 -
9900 * f0^5 * f1^8 * f′0^6 * f′1^9 * f′′0 +
39600 * f0^5 * f1^7 * f′0^8 * f′1^9 +
55440 * f0^5 * f1^6 * f′0^9 * f′1^9 * x0 +
5 * f0^4 * f1^11 * f′0^4 * f′1^9 * f′′′′0 +
50 * f0^4 * f1^11 * f′0^3 * f′1^9 * f′′0 * f′′′0 +
75 * f0^4 * f1^11 * f′0^2 * f′1^9 * f′′0^2 +
220 * f0^4 * f1^10 * f′0^5 * f′1^9 * f′′′0 -
660 * f0^4 * f1^10 * f′0^4 * f′1^9 * f′′0^2 +
3300 * f0^4 * f1^9 * f′0^6 * f′1^9 * f′′0 - 19800 * f0^4 * f1^8 * f′0^8 * f′1^9 -
39600 * f0^4 * f1^7 * f′0^9 * f′1^9 * x0 -
20 * f0^3 * f1^11 * f′0^5 * f′1^9 * f′′′0 +
60 * f0^3 * f1^11 * f′0^4 * f′1^9 * f′′0^2 -
660 * f0^3 * f1^10 * f′0^6 * f′1^9 * f′′0 +
6600 * f0^3 * f1^9 * f′0^8 * f′1^9 +
19800 * f0^3 * f1^8 * f′0^9 * f′1^9 * x0 +
60 * f0^2 * f1^11 * f′0^6 * f′1^9 * f′′0 - 1320 * f0^2 * f1^10 * f′0^8 * f′1^9 -
6600 * f0^2 * f1^9 * f′0^9 * f′1^9 * x0 +
120 * f0 * f1^11 * f′0^8 * f′1^9 +
1320 * f0 * f1^10 * f′0^9 * f′1^9 * x0 - 120 * f1^11 * f′0^9 * f′1^9 * x0
) / (
120 *
f′0^9 *
f′1^9 *
(
f0^11 - 11 * f0^10 * f1 + 55 * f0^9 * f1^2 - 165 * f0^8 * f1^3 +
330 * f0^7 * f1^4 - 462 * f0^6 * f1^5 + 462 * f0^5 * f1^6 - 330 * f0^4 * f1^7 +
165 * f0^3 * f1^8 - 55 * f0^2 * f1^9 + 11 * f0 * f1^10 - f1^11
)
)
end
function lmm(::LithBoonkkampIJzerman{3,5}, xs, fs, f′s, f′′s, f′′′s, f′′′′s, f′′′′′s)
x0, x1, x2 = xs
f0, f1, f2 = fs
f′0, f′1, f′2 = f′s
f′′0, f′′1, f′′2 = f′′s
f′′′0, f′′′1, f′′′2 = f′′′s
f′′′′0, f′′′′1, f′′′′2 = f′′′′s
f′′′′′0, f′′′′′1, f′′′′′2 = f′′′′′s
error("not computed")
end
## n = 6
function lmm(
::LithBoonkkampIJzerman{1,6},
xs,
fs,
f′s,
f′′s,
f′′′s,
f′′′′s,
f′′′′′s,
f′′′′′′s,
)
x0 = xs[1]
f0 = fs[1]
f′0 = f′s[1]
f′′0 = f′′s[1]
f′′′0 = f′′′s[1]
f′′′′0 = f′′′′s[1]
f′′′′′0 = f′′′′′s[1]
f′′′′′′0 = f′′′′′′s[1]
(
f0^6 * (
-f′0^4 * f′′′′′′0 + 21 * f′0^3 * f′′0 * f′′′′′0 + 35 * f′0^3 * f′′′0 * f′′′′0 -
210 * f′0^2 * f′′0^2 * f′′′′0 - 280 * f′0^2 * f′′0 * f′′′0^2 +
1260 * f′0 * f′′0^3 * f′′′0 - 945 * f′′0^5
) +
6 *
f0^5 *
f′0^2 *
(
f′0^3 * f′′′′′0 - 15 * f′0^2 * f′′0 * f′′′′0 - 10 * f′0^2 * f′′′0^2 +
105 * f′0 * f′′0^2 * f′′′0 - 105 * f′′0^4
) - 30 * f0^4 * f′0^4 * (f′0^2 * f′′′′0 + 10 * f′0 * f′′0 * f′′′0 + 15 * f′′0^2) +
120 * f0^3 * f′0^6 * (f′0 * f′′′0 - 3 * f′′0^2) - 360 * f0^2 * f′0^8 * f′′0 -
720 * f0 * f′0^10 + 720 * f′0^11 * x0
) / (720 * f′0^11)
end
function lmm(
::LithBoonkkampIJzerman{2,6},
xs,
fs,
f′s,
f′′s,
f′′′s,
f′′′′s,
f′′′′′s,
f′′′′′′s,
)
x0, x1 = xs
f0, f1 = fs
f′0, f′1 = f′s
f′′0, f′′1 = f′′s
f′′′0, f′′′1 = f′′′s
f′′′′0, f′′′′1 = f′′′′s
f′′′′′0, f′′′′′1 = f′′′′′s
f′′′′′′0, f′′′′′′1 = f′′′′′′s
(
-f0^13 * f1^6 * f′0^11 * f′1^4 * f′′′′′′1 +
21 * f0^13 * f1^6 * f′0^11 * f′1^3 * f′′1 * f′′′′′1 +
35 * f0^13 * f1^6 * f′0^11 * f′1^3 * f′′′1 * f′′′′1 -
210 * f0^13 * f1^6 * f′0^11 * f′1^2 * f′′1^2 * f′′′′1 -
280 * f0^13 * f1^6 * f′0^11 * f′1^2 * f′′1 * f′′′1^2 +
1260 * f0^13 * f1^6 * f′0^11 * f′1 * f′′1^3 * f′′′1 -
945 * f0^13 * f1^6 * f′0^11 * f′′1^5 + 6 * f0^13 * f1^5 * f′0^11 * f′1^5 * f′′′′′1 -
90 * f0^13 * f1^5 * f′0^11 * f′1^4 * f′′1 * f′′′′1 -
60 * f0^13 * f1^5 * f′0^11 * f′1^4 * f′′′1^2 +
630 * f0^13 * f1^5 * f′0^11 * f′1^3 * f′′1^2 * f′′′1 -
630 * f0^13 * f1^5 * f′0^11 * f′1^2 * f′′1^4 -
30 * f0^13 * f1^4 * f′0^11 * f′1^6 * f′′′′1 -
300 * f0^13 * f1^4 * f′0^11 * f′1^5 * f′′1 * f′′′1 -
450 * f0^13 * f1^4 * f′0^11 * f′1^4 * f′′1^2 +
120 * f0^13 * f1^3 * f′0^11 * f′1^7 * f′′′1 -
360 * f0^13 * f1^3 * f′0^11 * f′1^6 * f′′1^2 -
360 * f0^13 * f1^2 * f′0^11 * f′1^8 * f′′1 - 720 * f0^13 * f1 * f′0^11 * f′1^10 +
720 * f0^13 * f′0^11 * f′1^11 * x1 +
6 * f0^12 * f1^7 * f′0^11 * f′1^4 * f′′′′′′1 -
126 * f0^12 * f1^7 * f′0^11 * f′1^3 * f′′1 * f′′′′′1 -
210 * f0^12 * f1^7 * f′0^11 * f′1^3 * f′′′1 * f′′′′1 +
1260 * f0^12 * f1^7 * f′0^11 * f′1^2 * f′′1^2 * f′′′′1 +
1680 * f0^12 * f1^7 * f′0^11 * f′1^2 * f′′1 * f′′′1^2 -
7560 * f0^12 * f1^7 * f′0^11 * f′1 * f′′1^3 * f′′′1 +
5670 * f0^12 * f1^7 * f′0^11 * f′′1^5 +
f0^12 * f1^7 * f′0^4 * f′1^11 * f′′′′′′0 -
21 * f0^12 * f1^7 * f′0^3 * f′1^11 * f′′0 * f′′′′′0 -
35 * f0^12 * f1^7 * f′0^3 * f′1^11 * f′′′0 * f′′′′0 +
210 * f0^12 * f1^7 * f′0^2 * f′1^11 * f′′0^2 * f′′′′0 +
280 * f0^12 * f1^7 * f′0^2 * f′1^11 * f′′0 * f′′′0^2 -
1260 * f0^12 * f1^7 * f′0 * f′1^11 * f′′0^3 * f′′′0 +
945 * f0^12 * f1^7 * f′1^11 * f′′0^5 -
78 * f0^12 * f1^6 * f′0^11 * f′1^5 * f′′′′′1 +
1170 * f0^12 * f1^6 * f′0^11 * f′1^4 * f′′1 * f′′′′1 +
780 * f0^12 * f1^6 * f′0^11 * f′1^4 * f′′′1^2 -
8190 * f0^12 * f1^6 * f′0^11 * f′1^3 * f′′1^2 * f′′′1 +
8190 * f0^12 * f1^6 * f′0^11 * f′1^2 * f′′1^4 +
390 * f0^12 * f1^5 * f′0^11 * f′1^6 * f′′′′1 +
3900 * f0^12 * f1^5 * f′0^11 * f′1^5 * f′′1 * f′′′1 +
5850 * f0^12 * f1^5 * f′0^11 * f′1^4 * f′′1^2 -
1560 * f0^12 * f1^4 * f′0^11 * f′1^7 * f′′′1 +
4680 * f0^12 * f1^4 * f′0^11 * f′1^6 * f′′1^2 +
4680 * f0^12 * f1^3 * f′0^11 * f′1^8 * f′′1 +
9360 * f0^12 * f1^2 * f′0^11 * f′1^10 - 9360 * f0^12 * f1 * f′0^11 * f′1^11 * x1 -
15 * f0^11 * f1^8 * f′0^11 * f′1^4 * f′′′′′′1 +
315 * f0^11 * f1^8 * f′0^11 * f′1^3 * f′′1 * f′′′′′1 +
525 * f0^11 * f1^8 * f′0^11 * f′1^3 * f′′′1 * f′′′′1 -
3150 * f0^11 * f1^8 * f′0^11 * f′1^2 * f′′1^2 * f′′′′1 -
4200 * f0^11 * f1^8 * f′0^11 * f′1^2 * f′′1 * f′′′1^2 +
18900 * f0^11 * f1^8 * f′0^11 * f′1 * f′′1^3 * f′′′1 -
14175 * f0^11 * f1^8 * f′0^11 * f′′1^5 -
6 * f0^11 * f1^8 * f′0^4 * f′1^11 * f′′′′′′0 +
126 * f0^11 * f1^8 * f′0^3 * f′1^11 * f′′0 * f′′′′′0 +
210 * f0^11 * f1^8 * f′0^3 * f′1^11 * f′′′0 * f′′′′0 -
1260 * f0^11 * f1^8 * f′0^2 * f′1^11 * f′′0^2 * f′′′′0 -
1680 * f0^11 * f1^8 * f′0^2 * f′1^11 * f′′0 * f′′′0^2 +
7560 * f0^11 * f1^8 * f′0 * f′1^11 * f′′0^3 * f′′′0 -
5670 * f0^11 * f1^8 * f′1^11 * f′′0^5 +
300 * f0^11 * f1^7 * f′0^11 * f′1^5 * f′′′′′1 -
4500 * f0^11 * f1^7 * f′0^11 * f′1^4 * f′′1 * f′′′′1 -
3000 * f0^11 * f1^7 * f′0^11 * f′1^4 * f′′′1^2 +
31500 * f0^11 * f1^7 * f′0^11 * f′1^3 * f′′1^2 * f′′′1 -
31500 * f0^11 * f1^7 * f′0^11 * f′1^2 * f′′1^4 -
48 * f0^11 * f1^7 * f′0^5 * f′1^11 * f′′′′′0 +
720 * f0^11 * f1^7 * f′0^4 * f′1^11 * f′′0 * f′′′′0 +
480 * f0^11 * f1^7 * f′0^4 * f′1^11 * f′′′0^2 -
5040 * f0^11 * f1^7 * f′0^3 * f′1^11 * f′′0^2 * f′′′0 +
5040 * f0^11 * f1^7 * f′0^2 * f′1^11 * f′′0^4 -
2340 * f0^11 * f1^6 * f′0^11 * f′1^6 * f′′′′1 -
23400 * f0^11 * f1^6 * f′0^11 * f′1^5 * f′′1 * f′′′1 -
35100 * f0^11 * f1^6 * f′0^11 * f′1^4 * f′′1^2 +
9360 * f0^11 * f1^5 * f′0^11 * f′1^7 * f′′′1 -
28080 * f0^11 * f1^5 * f′0^11 * f′1^6 * f′′1^2 -
28080 * f0^11 * f1^4 * f′0^11 * f′1^8 * f′′1 -
56160 * f0^11 * f1^3 * f′0^11 * f′1^10 +
56160 * f0^11 * f1^2 * f′0^11 * f′1^11 * x1 +
20 * f0^10 * f1^9 * f′0^11 * f′1^4 * f′′′′′′1 -
420 * f0^10 * f1^9 * f′0^11 * f′1^3 * f′′1 * f′′′′′1 -
700 * f0^10 * f1^9 * f′0^11 * f′1^3 * f′′′1 * f′′′′1 +
4200 * f0^10 * f1^9 * f′0^11 * f′1^2 * f′′1^2 * f′′′′1 +
5600 * f0^10 * f1^9 * f′0^11 * f′1^2 * f′′1 * f′′′1^2 -
25200 * f0^10 * f1^9 * f′0^11 * f′1 * f′′1^3 * f′′′1 +
18900 * f0^10 * f1^9 * f′0^11 * f′′1^5 +
15 * f0^10 * f1^9 * f′0^4 * f′1^11 * f′′′′′′0 -
315 * f0^10 * f1^9 * f′0^3 * f′1^11 * f′′0 * f′′′′′0 -
525 * f0^10 * f1^9 * f′0^3 * f′1^11 * f′′′0 * f′′′′0 +
3150 * f0^10 * f1^9 * f′0^2 * f′1^11 * f′′0^2 * f′′′′0 +
4200 * f0^10 * f1^9 * f′0^2 * f′1^11 * f′′0 * f′′′0^2 -
18900 * f0^10 * f1^9 * f′0 * f′1^11 * f′′0^3 * f′′′0 +
14175 * f0^10 * f1^9 * f′1^11 * f′′0^5 -
540 * f0^10 * f1^8 * f′0^11 * f′1^5 * f′′′′′1 +
8100 * f0^10 * f1^8 * f′0^11 * f′1^4 * f′′1 * f′′′′1 +
5400 * f0^10 * f1^8 * f′0^11 * f′1^4 * f′′′1^2 -
56700 * f0^10 * f1^8 * f′0^11 * f′1^3 * f′′1^2 * f′′′1 +
56700 * f0^10 * f1^8 * f′0^11 * f′1^2 * f′′1^4 +
246 * f0^10 * f1^8 * f′0^5 * f′1^11 * f′′′′′0 -
3690 * f0^10 * f1^8 * f′0^4 * f′1^11 * f′′0 * f′′′′0 -
2460 * f0^10 * f1^8 * f′0^4 * f′1^11 * f′′′0^2 +
25830 * f0^10 * f1^8 * f′0^3 * f′1^11 * f′′0^2 * f′′′0 -
25830 * f0^10 * f1^8 * f′0^2 * f′1^11 * f′′0^4 +
6060 * f0^10 * f1^7 * f′0^11 * f′1^6 * f′′′′1 +
60600 * f0^10 * f1^7 * f′0^11 * f′1^5 * f′′1 * f′′′1 +
90900 * f0^10 * f1^7 * f′0^11 * f′1^4 * f′′1^2 +
1080 * f0^10 * f1^7 * f′0^6 * f′1^11 * f′′′′0 +
10800 * f0^10 * f1^7 * f′0^5 * f′1^11 * f′′0 * f′′′0 +
16200 * f0^10 * f1^7 * f′0^4 * f′1^11 * f′′0^2 -
34320 * f0^10 * f1^6 * f′0^11 * f′1^7 * f′′′1 +
102960 * f0^10 * f1^6 * f′0^11 * f′1^6 * f′′1^2 +
102960 * f0^10 * f1^5 * f′0^11 * f′1^8 * f′′1 +
205920 * f0^10 * f1^4 * f′0^11 * f′1^10 -
205920 * f0^10 * f1^3 * f′0^11 * f′1^11 * x1 -
15 * f0^9 * f1^10 * f′0^11 * f′1^4 * f′′′′′′1 +
315 * f0^9 * f1^10 * f′0^11 * f′1^3 * f′′1 * f′′′′′1 +
525 * f0^9 * f1^10 * f′0^11 * f′1^3 * f′′′1 * f′′′′1 -
3150 * f0^9 * f1^10 * f′0^11 * f′1^2 * f′′1^2 * f′′′′1 -
4200 * f0^9 * f1^10 * f′0^11 * f′1^2 * f′′1 * f′′′1^2 +
18900 * f0^9 * f1^10 * f′0^11 * f′1 * f′′1^3 * f′′′1 -
14175 * f0^9 * f1^10 * f′0^11 * f′′1^5 -
20 * f0^9 * f1^10 * f′0^4 * f′1^11 * f′′′′′′0 +
420 * f0^9 * f1^10 * f′0^3 * f′1^11 * f′′0 * f′′′′′0 +
700 * f0^9 * f1^10 * f′0^3 * f′1^11 * f′′′0 * f′′′′0 -
4200 * f0^9 * f1^10 * f′0^2 * f′1^11 * f′′0^2 * f′′′′0 -
5600 * f0^9 * f1^10 * f′0^2 * f′1^11 * f′′0 * f′′′0^2 +
25200 * f0^9 * f1^10 * f′0 * f′1^11 * f′′0^3 * f′′′0 -
18900 * f0^9 * f1^10 * f′1^11 * f′′0^5 +
510 * f0^9 * f1^9 * f′0^11 * f′1^5 * f′′′′′1 -
7650 * f0^9 * f1^9 * f′0^11 * f′1^4 * f′′1 * f′′′′1 -
5100 * f0^9 * f1^9 * f′0^11 * f′1^4 * f′′′1^2 +
53550 * f0^9 * f1^9 * f′0^11 * f′1^3 * f′′1^2 * f′′′1 -
53550 * f0^9 * f1^9 * f′0^11 * f′1^2 * f′′1^4 -
510 * f0^9 * f1^9 * f′0^5 * f′1^11 * f′′′′′0 +
7650 * f0^9 * f1^9 * f′0^4 * f′1^11 * f′′0 * f′′′′0 +
5100 * f0^9 * f1^9 * f′0^4 * f′1^11 * f′′′0^2 -
53550 * f0^9 * f1^9 * f′0^3 * f′1^11 * f′′0^2 * f′′′0 +
53550 * f0^9 * f1^9 * f′0^2 * f′1^11 * f′′0^4 -
7590 * f0^9 * f1^8 * f′0^11 * f′1^6 * f′′′′1 -
75900 * f0^9 * f1^8 * f′0^11 * f′1^5 * f′′1 * f′′′1 -
113850 * f0^9 * f1^8 * f′0^11 * f′1^4 * f′′1^2 -
4590 * f0^9 * f1^8 * f′0^6 * f′1^11 * f′′′′0 -
45900 * f0^9 * f1^8 * f′0^5 * f′1^11 * f′′0 * f′′′0 -
68850 * f0^9 * f1^8 * f′0^4 * f′1^11 * f′′0^2 +
60600 * f0^9 * f1^7 * f′0^11 * f′1^7 * f′′′1 -
181800 * f0^9 * f1^7 * f′0^11 * f′1^6 * f′′1^2 -
14400 * f0^9 * f1^7 * f′0^7 * f′1^11 * f′′′0 +
43200 * f0^9 * f1^7 * f′0^6 * f′1^11 * f′′0^2 -
257400 * f0^9 * f1^6 * f′0^11 * f′1^8 * f′′1 -
514800 * f0^9 * f1^5 * f′0^11 * f′1^10 +
514800 * f0^9 * f1^4 * f′0^11 * f′1^11 * x1 +
6 * f0^8 * f1^11 * f′0^11 * f′1^4 * f′′′′′′1 -
126 * f0^8 * f1^11 * f′0^11 * f′1^3 * f′′1 * f′′′′′1 -
210 * f0^8 * f1^11 * f′0^11 * f′1^3 * f′′′1 * f′′′′1 +
1260 * f0^8 * f1^11 * f′0^11 * f′1^2 * f′′1^2 * f′′′′1 +
1680 * f0^8 * f1^11 * f′0^11 * f′1^2 * f′′1 * f′′′1^2 -
7560 * f0^8 * f1^11 * f′0^11 * f′1 * f′′1^3 * f′′′1 +
5670 * f0^8 * f1^11 * f′0^11 * f′′1^5 +
15 * f0^8 * f1^11 * f′0^4 * f′1^11 * f′′′′′′0 -
315 * f0^8 * f1^11 * f′0^3 * f′1^11 * f′′0 * f′′′′′0 -
525 * f0^8 * f1^11 * f′0^3 * f′1^11 * f′′′0 * f′′′′0 +
3150 * f0^8 * f1^11 * f′0^2 * f′1^11 * f′′0^2 * f′′′′0 +
4200 * f0^8 * f1^11 * f′0^2 * f′1^11 * f′′0 * f′′′0^2 -
18900 * f0^8 * f1^11 * f′0 * f′1^11 * f′′0^3 * f′′′0 +
14175 * f0^8 * f1^11 * f′1^11 * f′′0^5 -
246 * f0^8 * f1^10 * f′0^11 * f′1^5 * f′′′′′1 +
3690 * f0^8 * f1^10 * f′0^11 * f′1^4 * f′′1 * f′′′′1 +
2460 * f0^8 * f1^10 * f′0^11 * f′1^4 * f′′′1^2 -
25830 * f0^8 * f1^10 * f′0^11 * f′1^3 * f′′1^2 * f′′′1 +
25830 * f0^8 * f1^10 * f′0^11 * f′1^2 * f′′1^4 +
540 * f0^8 * f1^10 * f′0^5 * f′1^11 * f′′′′′0 -
8100 * f0^8 * f1^10 * f′0^4 * f′1^11 * f′′0 * f′′′′0 -
5400 * f0^8 * f1^10 * f′0^4 * f′1^11 * f′′′0^2 +
56700 * f0^8 * f1^10 * f′0^3 * f′1^11 * f′′0^2 * f′′′0 -
56700 * f0^8 * f1^10 * f′0^2 * f′1^11 * f′′0^4 +
4590 * f0^8 * f1^9 * f′0^11 * f′1^6 * f′′′′1 +
45900 * f0^8 * f1^9 * f′0^11 * f′1^5 * f′′1 * f′′′1 +
68850 * f0^8 * f1^9 * f′0^11 * f′1^4 * f′′1^2 +
7590 * f0^8 * f1^9 * f′0^6 * f′1^11 * f′′′′0 +
75900 * f0^8 * f1^9 * f′0^5 * f′1^11 * f′′0 * f′′′0 +
113850 * f0^8 * f1^9 * f′0^4 * f′1^11 * f′′0^2 -
48600 * f0^8 * f1^8 * f′0^11 * f′1^7 * f′′′1 +
145800 * f0^8 * f1^8 * f′0^11 * f′1^6 * f′′1^2 +
48600 * f0^8 * f1^8 * f′0^7 * f′1^11 * f′′′0 -
145800 * f0^8 * f1^8 * f′0^6 * f′1^11 * f′′0^2 +
297000 * f0^8 * f1^7 * f′0^11 * f′1^8 * f′′1 +
118800 * f0^8 * f1^7 * f′0^8 * f′1^11 * f′′0 +
926640 * f0^8 * f1^6 * f′0^11 * f′1^10 -
926640 * f0^8 * f1^5 * f′0^11 * f′1^11 * x1 -
f0^7 * f1^12 * f′0^11 * f′1^4 * f′′′′′′1 +
21 * f0^7 * f1^12 * f′0^11 * f′1^3 * f′′1 * f′′′′′1 +
35 * f0^7 * f1^12 * f′0^11 * f′1^3 * f′′′1 * f′′′′1 -
210 * f0^7 * f1^12 * f′0^11 * f′1^2 * f′′1^2 * f′′′′1 -
280 * f0^7 * f1^12 * f′0^11 * f′1^2 * f′′1 * f′′′1^2 +
1260 * f0^7 * f1^12 * f′0^11 * f′1 * f′′1^3 * f′′′1 -
945 * f0^7 * f1^12 * f′0^11 * f′′1^5 -
6 * f0^7 * f1^12 * f′0^4 * f′1^11 * f′′′′′′0 +
126 * f0^7 * f1^12 * f′0^3 * f′1^11 * f′′0 * f′′′′′0 +
210 * f0^7 * f1^12 * f′0^3 * f′1^11 * f′′′0 * f′′′′0 -
1260 * f0^7 * f1^12 * f′0^2 * f′1^11 * f′′0^2 * f′′′′0 -
1680 * f0^7 * f1^12 * f′0^2 * f′1^11 * f′′0 * f′′′0^2 +
7560 * f0^7 * f1^12 * f′0 * f′1^11 * f′′0^3 * f′′′0 -
5670 * f0^7 * f1^12 * f′1^11 * f′′0^5 +
48 * f0^7 * f1^11 * f′0^11 * f′1^5 * f′′′′′1 -
720 * f0^7 * f1^11 * f′0^11 * f′1^4 * f′′1 * f′′′′1 -
480 * f0^7 * f1^11 * f′0^11 * f′1^4 * f′′′1^2 +
5040 * f0^7 * f1^11 * f′0^11 * f′1^3 * f′′1^2 * f′′′1 -
5040 * f0^7 * f1^11 * f′0^11 * f′1^2 * f′′1^4 -
300 * f0^7 * f1^11 * f′0^5 * f′1^11 * f′′′′′0 +
4500 * f0^7 * f1^11 * f′0^4 * f′1^11 * f′′0 * f′′′′0 +
3000 * f0^7 * f1^11 * f′0^4 * f′1^11 * f′′′0^2 -
31500 * f0^7 * f1^11 * f′0^3 * f′1^11 * f′′0^2 * f′′′0 +
31500 * f0^7 * f1^11 * f′0^2 * f′1^11 * f′′0^4 -
1080 * f0^7 * f1^10 * f′0^11 * f′1^6 * f′′′′1 -
10800 * f0^7 * f1^10 * f′0^11 * f′1^5 * f′′1 * f′′′1 -
16200 * f0^7 * f1^10 * f′0^11 * f′1^4 * f′′1^2 -
6060 * f0^7 * f1^10 * f′0^6 * f′1^11 * f′′′′0 -
60600 * f0^7 * f1^10 * f′0^5 * f′1^11 * f′′0 * f′′′0 -
90900 * f0^7 * f1^10 * f′0^4 * f′1^11 * f′′0^2 +
14400 * f0^7 * f1^9 * f′0^11 * f′1^7 * f′′′1 -
43200 * f0^7 * f1^9 * f′0^11 * f′1^6 * f′′1^2 -
60600 * f0^7 * f1^9 * f′0^7 * f′1^11 * f′′′0 +
181800 * f0^7 * f1^9 * f′0^6 * f′1^11 * f′′0^2 -
118800 * f0^7 * f1^8 * f′0^11 * f′1^8 * f′′1 -
297000 * f0^7 * f1^8 * f′0^8 * f′1^11 * f′′0 -
570240 * f0^7 * f1^7 * f′0^11 * f′1^10 +
570240 * f0^7 * f1^7 * f′0^10 * f′1^11 +
1235520 * f0^7 * f1^6 * f′0^11 * f′1^11 * x1 +
f0^6 * f1^13 * f′0^4 * f′1^11 * f′′′′′′0 -
21 * f0^6 * f1^13 * f′0^3 * f′1^11 * f′′0 * f′′′′′0 -
35 * f0^6 * f1^13 * f′0^3 * f′1^11 * f′′′0 * f′′′′0 +
210 * f0^6 * f1^13 * f′0^2 * f′1^11 * f′′0^2 * f′′′′0 +
280 * f0^6 * f1^13 * f′0^2 * f′1^11 * f′′0 * f′′′0^2 -
1260 * f0^6 * f1^13 * f′0 * f′1^11 * f′′0^3 * f′′′0 +
945 * f0^6 * f1^13 * f′1^11 * f′′0^5 +
78 * f0^6 * f1^12 * f′0^5 * f′1^11 * f′′′′′0 -
1170 * f0^6 * f1^12 * f′0^4 * f′1^11 * f′′0 * f′′′′0 -
780 * f0^6 * f1^12 * f′0^4 * f′1^11 * f′′′0^2 +
8190 * f0^6 * f1^12 * f′0^3 * f′1^11 * f′′0^2 * f′′′0 -
8190 * f0^6 * f1^12 * f′0^2 * f′1^11 * f′′0^4 +
2340 * f0^6 * f1^11 * f′0^6 * f′1^11 * f′′′′0 +
23400 * f0^6 * f1^11 * f′0^5 * f′1^11 * f′′0 * f′′′0 +
35100 * f0^6 * f1^11 * f′0^4 * f′1^11 * f′′0^2 +
34320 * f0^6 * f1^10 * f′0^7 * f′1^11 * f′′′0 -
102960 * f0^6 * f1^10 * f′0^6 * f′1^11 * f′′0^2 +
257400 * f0^6 * f1^9 * f′0^8 * f′1^11 * f′′0 -
926640 * f0^6 * f1^8 * f′0^10 * f′1^11 -
1235520 * f0^6 * f1^7 * f′0^11 * f′1^11 * x0 -
6 * f0^5 * f1^13 * f′0^5 * f′1^11 * f′′′′′0 +
90 * f0^5 * f1^13 * f′0^4 * f′1^11 * f′′0 * f′′′′0 +
60 * f0^5 * f1^13 * f′0^4 * f′1^11 * f′′′0^2 -
630 * f0^5 * f1^13 * f′0^3 * f′1^11 * f′′0^2 * f′′′0 +
630 * f0^5 * f1^13 * f′0^2 * f′1^11 * f′′0^4 -
390 * f0^5 * f1^12 * f′0^6 * f′1^11 * f′′′′0 -
3900 * f0^5 * f1^12 * f′0^5 * f′1^11 * f′′0 * f′′′0 -
5850 * f0^5 * f1^12 * f′0^4 * f′1^11 * f′′0^2 -
9360 * f0^5 * f1^11 * f′0^7 * f′1^11 * f′′′0 +
28080 * f0^5 * f1^11 * f′0^6 * f′1^11 * f′′0^2 -
102960 * f0^5 * f1^10 * f′0^8 * f′1^11 * f′′0 +
514800 * f0^5 * f1^9 * f′0^10 * f′1^11 +
926640 * f0^5 * f1^8 * f′0^11 * f′1^11 * x0 +
30 * f0^4 * f1^13 * f′0^6 * f′1^11 * f′′′′0 +
300 * f0^4 * f1^13 * f′0^5 * f′1^11 * f′′0 * f′′′0 +
450 * f0^4 * f1^13 * f′0^4 * f′1^11 * f′′0^2 +
1560 * f0^4 * f1^12 * f′0^7 * f′1^11 * f′′′0 -
4680 * f0^4 * f1^12 * f′0^6 * f′1^11 * f′′0^2 +
28080 * f0^4 * f1^11 * f′0^8 * f′1^11 * f′′0 -
205920 * f0^4 * f1^10 * f′0^10 * f′1^11 -
514800 * f0^4 * f1^9 * f′0^11 * f′1^11 * x0 -
120 * f0^3 * f1^13 * f′0^7 * f′1^11 * f′′′0 +
360 * f0^3 * f1^13 * f′0^6 * f′1^11 * f′′0^2 -
4680 * f0^3 * f1^12 * f′0^8 * f′1^11 * f′′0 +
56160 * f0^3 * f1^11 * f′0^10 * f′1^11 +
205920 * f0^3 * f1^10 * f′0^11 * f′1^11 * x0 +
360 * f0^2 * f1^13 * f′0^8 * f′1^11 * f′′0 - 9360 * f0^2 * f1^12 * f′0^10 * f′1^11 -
56160 * f0^2 * f1^11 * f′0^11 * f′1^11 * x0 +
720 * f0 * f1^13 * f′0^10 * f′1^11 +
9360 * f0 * f1^12 * f′0^11 * f′1^11 * x0 - 720 * f1^13 * f′0^11 * f′1^11 * x0
) / (
720 *
f′0^11 *
f′1^11 *
(
f0^13 - 13 * f0^12 * f1 + 78 * f0^11 * f1^2 - 286 * f0^10 * f1^3 +
715 * f0^9 * f1^4 - 1287 * f0^8 * f1^5 + 1716 * f0^7 * f1^6 -
1716 * f0^6 * f1^7 + 1287 * f0^5 * f1^8 - 715 * f0^4 * f1^9 +
286 * f0^3 * f1^10 - 78 * f0^2 * f1^11 + 13 * f0 * f1^12 - f1^13
)
)
end
function lmm(
::LithBoonkkampIJzerman{3,6},
xs,
fs,
f′s,
f′′s,
f′′′s,
f′′′′s,
f′′′′′s,
f′′′′′′s,
)
error("not computed")
end
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 2354 | # 1 step taken in set up
function log_step(l::Tracks, M::AbstractDerivativeMethod, state; init=false)
init && push!(l.xfₛ, (state.xn0, state.fxn0))
push!(l.xfₛ, (state.xn1, state.fxn1))
init && log_iteration(l, 1)
!init && log_iteration(l, 1)
nothing
end
"""
Roots.Newton()
Implements Newton's [method](https://en.wikipedia.org/wiki/Newton%27s_method):
`xᵢ₊₁ = xᵢ - f(xᵢ)/f'(xᵢ)`. This is a quadratically convergent method requiring
one derivative and two function calls per step.
## Examples
```jldoctest with_derivative
julia> using Roots
julia> find_zero((sin,cos), 3.0, Roots.Newton()) ≈ π
true
```
If function evaluations are expensive one can pass in a function which returns (f, f/f') as follows
```jldoctest with_derivative
julia> find_zero(x -> (sin(x), sin(x)/cos(x)), 3.0, Roots.Newton()) ≈ π
true
```
This can be advantageous if the derivative is easily computed from the
value of f, but otherwise would be expensive to compute.
----
The error, `eᵢ = xᵢ - α`, can be expressed as `eᵢ₊₁ =
f[xᵢ,xᵢ,α]/(2f[xᵢ,xᵢ])eᵢ²` (Sidi, Unified treatment of regula falsi,
Newton-Raphson, secant, and Steffensen methods for nonlinear
equations).
"""
struct Newton <: AbstractNewtonLikeMethod end
fn_argout(::AbstractNewtonLikeMethod) = 2
# we store x0,x1,fx0,fx1 **and** Δ = fx1/f'(x1)
struct NewtonState{T,S} <: AbstractUnivariateZeroState{T,S}
xn1::T
xn0::T
Δ::T
fxn1::S
fxn0::S
end
function init_state(M::Newton, F::Callable_Function, x)
x₀ = float(first(x))
T = eltype(x₀)
fx₀, Δ::T = F(x₀)
x₁::T = x₀ - Δ
state = init_state(M, F, x₀, x₁, fx₀, fx₀)
end
# compute fx₁, Δ
function init_state(::Newton, F, x₀::T, x₁::T, fx₀, fx₁) where {T}
fx₁, Δ::T = F(x₁)
NewtonState(promote(x₁, x₀)..., Δ, promote(fx₁, fx₀)...)
end
initial_fncalls(M::Newton) = 2
function update_state(
M::Newton,
F,
o::NewtonState{T,S},
options,
l=NullTracks(),
) where {T,S}
xn0, xn1 = o.xn0, o.xn1
fxn0, fxn1 = o.fxn0, o.fxn1
Δ::T = o.Δ
if isissue(Δ)
log_message(l, "Issue with `f/f′'")
return o, true
end
xn0, xn1::T = xn1, xn1 - Δ
fxn0 = fxn1
fxn1::S, Δ = F(xn1)
incfn(l, 2)
@reset o.xn0 = xn0
@reset o.xn1 = xn1
@reset o.Δ = Δ
@reset o.fxn0 = fxn0
@reset o.fxn1 = fxn1
return o, false
end
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 3591 | ## Thukral 3,4,5 (2) is Schroder
"""
AbstractThukralBMethod
Abstract type for `ThukralXB` methods for `X` being `2`,`3`,`4`, or `5`.
These are a family of methods which are
* efficient (order `X`) for non-simple roots (e.g. `Thukral2B` is the `Schroder` method)
* take `X+1` function calls per step
* require `X` derivatives. These can be passed as a tuple of functions, `(f, f', f'', …)`, *or* as
a function returning the ratios: `x -> (f(x), f(x)/f'(x), f'(x)/f''(x), …)`.
## Examples
```julia
using ForwardDiff
Base.adjoint(f::Function) = x -> ForwardDiff.derivative(f, float(x))
f(x) = (exp(x) + x - 2)^6
x0 = 1/4
find_zero((f, f', f''), x0, Roots.Halley()) # 14 iterations; ≈ 48 function evaluations
find_zero((f, f', f''), big(x0), Roots.Thukral2B()) # 3 iterations; ≈ 9 function evaluations
find_zero((f, f', f'', f'''), big(x0), Roots.Thukral3B()) # 2 iterations; ≈ 8 function evaluations
```
## Reference
*Introduction to a family of Thukral ``k``-order method for finding multiple zeros of nonlinear equations*,
R. Thukral, JOURNAL OF ADVANCES IN MATHEMATICS 13(3):7230-7237, DOI: [10.24297/jam.v13i3.6146](https://doi.org/10.24297/jam.v13i3.6146).
"""
abstract type AbstractThukralBMethod <: AbstractHalleyLikeMethod end
initial_fncalls(M::AbstractThukralBMethod) = fn_argout(M)
struct Thukral2B <: AbstractThukralBMethod end
fn_argout(::Thukral2B) = 3
struct Thukral3B <: AbstractThukralBMethod end
fn_argout(::Thukral3B) = 4
struct Thukral4B <: AbstractThukralBMethod end
fn_argout(::Thukral4B) = 5
struct Thukral5B <: AbstractThukralBMethod end
fn_argout(::Thukral5B) = 6
struct ThukralBState{N,T,S} <: AbstractUnivariateZeroState{T,S}
xn1::T
xn0::T
Δs::NTuple{N,T}
fxn1::S
fxn0::S
end
function init_state(M::AbstractThukralBMethod, F::Callable_Function, x)
x₁ = float(first(x))
fx₁, Δs = F(x₁)
state = init_state(M, F, nan(x₁), x₁, nan(fx₁), fx₁; Δs=Δs)
end
function init_state(
M::AbstractThukralBMethod,
F,
x₀::T,
x₁::T,
fx₀,
fx₁;
Δs=nothing,
) where {T}
ThukralBState(promote(x₁, x₀)..., NTuple{fn_argout(M) - 1,T}(Δs), promote(fx₁, fx₀)...)
end
function update_state(
M::AbstractThukralBMethod,
F,
o::AbstractUnivariateZeroState{T,S},
options,
l=NullTracks(),
) where {T,S}
x₀ = o.xn1
Δ = compute_thukral_Δ(M, o)
isissue(Δ) && return (o, true)
x₁::T = x₀ - Δ
fx₁::S, Δs = F(x₁)
incfn(l, fn_argout(M))
@reset o.xn0 = x₀
@reset o.fxn0 = o.fxn1
@reset o.Δs = NTuple{fn_argout(M) - 1,T}(Δs)
@reset o.xn1 = x₁
@reset o.fxn1 = fx₁
return (o, false)
end
function compute_thukral_Δ(M::Thukral2B, o)
r₁, r₂ = o.Δs
t₁, t₂ = 1 / r₁, 1 / r₂
Δ = one(o.xn1)
Δ /= (t₁ - t₂)
Δ
end
function compute_thukral_Δ(M::Thukral3B, o)
r₁, r₂, r₃ = o.Δs
t₁, t₂, t₃ = 1 / r₁, 1 / r₂, 1 / r₃
Δ = (2t₁ - 2t₂)
Δ /= (2t₁^2 - 3t₁ * t₂ + t₂ * t₃)
Δ
end
function compute_thukral_Δ(M::Thukral4B, o)
r₁, r₂, r₃, r₄ = o.Δs
t₁, t₂, t₃, t₄ = 1 / r₁, 1 / r₂, 1 / r₃, 1 / r₄
Δ = 6t₁^2 - 9t₁ * t₂ + 3t₂ * t₃
Δ /= 6t₁^3 - 12 * t₁^2 * t₂ + 4t₁ * t₂ * t₃ - t₂ * t₃ * t₄ + 3 * t₁ * t₂^2
Δ
end
function compute_thukral_Δ(M::Thukral5B, o)
r₁, r₂, r₃, r₄, r₅ = o.Δs
t₁, t₂, t₃, t₄, t₅ = 1 / r₁, 1 / r₂, 1 / r₃, 1 / r₄, 1 / r₅
Δ = 24 * t₁^3 - 48t₁^2 * t₂ + 16 * t₁ * t₂ * t₃ - 4 * t₂ * t₃ * t₄ + 12t₁ * t₂^2
Δ /=
24t₁^4 - 60t₁^3 * t₂ + 20 * t₁^2 * t₂ * t₃ - 5 * t₁ * t₂ * t₃ * t₄ + 30t₁^2 * t₂^2 -
10 * t₁ * t₂^2 * t₃ + t₂ * t₃ * t₄ * t₅
Δ
end
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 2538 | ## Derivative free methods inherit from abstract secant
# init_state(M,F,x) --> call init_state(M,F,x₀,x₁,fx₀, fx₁)
function init_state(M::AbstractSecantMethod, F::Callable_Function, x)
x₀, x₁ = x₀x₁(x)
fx₀, fx₁ = first(F(x₀)), first(F(x₁))
state = init_state(M, F, x₀, x₁, fx₀, fx₁)
end
# initialize from xs, fxs
function init_state(::AbstractSecantMethod, F, x₀, x₁, fx₀, fx₁)
UnivariateZeroState(promote(x₁, x₀)..., promote(fx₁, fx₀)...)
end
initial_fncalls(::AbstractSecantMethod) = 2
##################################################
## Guard against non-robust algorithms
##
## By default, we do this by deciding if we
## should take a secant step instead of the algorithm For example, for
## Steffensen which is quadratically convergent and the Secant method
## which is only superlinear,
## the error, e_{n+1} = x_{n+1} - alpha, may be smaller after a secant
## step than a Steffensen step. (It is only once x_n is close enough
## to alpha that the method is quadratically convergent.
## The Steffensen error is
## Δn+1 = f[x,x+fx, alpha]/f[x, x+fx] * (1 - f[x, alpha]) (x-alpha)^2
## ≈ f''/(2f') * ( 1 + f') Δn^2
## The Secant error is
## Δn+1 = f[x,x_{-1},alpha] / f[x,x_{-1}] * (x-alpha) * (x_{-1} - alpha)
## ≈ f''/(2f') Δn ⋅ Δn-1
## The ratio is ≈ (1 + f')(Δn / Δn-1)
## It seems reasonable, that a Steffensen step is preferred when
## the ratio satisfies -1 < (1+f') ⋅ Δn /Δn-1 < 1
## We could use f' ~ fp = (fx1-fx0)/(x1-x0); but our proxy for
## Δn/Δn-1 is problematic, as we don't know alpha, and using xn-x_{n-1}
## can be an issue when only x1 and not x0 is specified. This needs
## working around.
##
## Instead, as Steffensen is related to Newton as much as
## (f(x+fx) - fx)/fx ≈ f'(x), we take a Steffensen step if |fx|
## is small enough. For this we use |fx| <= x/1000; which
## seems to work reasonably well over several different test cases.
@inline function do_guarded_step(
M::AbstractSecantMethod,
o::AbstractUnivariateZeroState{T,S},
) where {T,S}
x, fx = o.xn1, o.fxn1
1000 * abs(fx) > max(oneunit(S), abs(x) * oneunit(S) / oneunit(T)) * one(T)
end
# check if we should guard against step for method M; call N if yes, P if not
function update_state_guarded(
M::AbstractSecantMethod,
N::AbstractUnivariateZeroMethod,
P::AbstractUnivariateZeroMethod,
fs,
o,
options,
l=NullTracks(),
)
if do_guarded_step(M, o)
return update_state(N, fs, o, options, l)
else
update_state(P, fs, o, options, l)
end
end
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 2286 | ### Order2B() Esser method
"""
Roots.Esser()
Esser's method. This is a quadratically convergent method that, like
Schroder's method, does not depend on the multiplicity of the
zero. Schroder's method has update step `x - r2/(r2-r1) * r1`, where `ri =
fⁱ⁻¹/fⁱ`. Esser approximates `f' ~ f[x-h, x+h], f'' ~
f[x-h,x,x+h]`, where `h = fx`, as with Steffensen's method, Requiring 3
function calls per step. The implementation `Order2B` uses a secant
step when `|fx|` is considered too large.
Esser, H. Computing (1975) 14: 367. DOI: [10.1007/BF02253547](https://doi.org/10.1007/BF02253547)
Eine stets quadratisch konvergente Modification des Steffensen-Verfahrens
## Examples
```
f(x) = cos(x) - x
g(x) = f(x)^2
x0 = pi/4
find_zero(f, x0, Order2(), verbose=true) # 3 steps / 7 function calls
find_zero(f, x0, Roots.Order2B(), verbose=true) # 4 / 9
find_zero(g, x0, Order2(), verbose=true) # 22 / 45
find_zero(g, x0, Roots.Order2B(), verbose=true) # 4 / 10
```
"""
struct Esser <: AbstractSecantMethod end
"""
Roots.Order2B()
[`Esser`](@ref) method with guarded secant step.
"""
struct Order2B <: AbstractSecantMethod end
function update_state(
M::Order2B,
fs,
o::AbstractUnivariateZeroState,
options,
l=NullTracks(),
)
update_state_guarded(M, Secant(), Esser(), fs, o, options, l)
end
function update_state(
::Esser,
F,
o::AbstractUnivariateZeroState{T,S},
options,
l=NullTracks(),
) where {T,S}
x1, fx1 = o.xn1, o.fxn1
f0 = fx1
f1 = F(x1 + f0 * oneunit(T) / oneunit(S))
f_1 = F(x1 - f0 * oneunit(T) / oneunit(S))
incfn(l, 2)
# h = f0
# r1 = f/f' ~ f/f[x+h,x-h]
# r2 = f'/f'' ~ f[x+h, x-h]/f[x-h,x,x+h]
r1 = f0 * 2 * f0 / (f1 - f_1) * oneunit(T) / oneunit(S)
r2 = (f1 - f_1) / (f1 - 2 * f0 + f_1) * f0 / 2 * oneunit(T) / oneunit(S)
k = r2 / (r2 - r1) # ~ m
if abs(k) <= 1e-2 * oneunit(k)
log_message(l, "Estimate for multiplicity had issues. ")
return o, true
end
delta = k * r1
if isissue(delta)
log_message(l, "Increment `Δx` has issues. ")
return o, true
end
x0::T, x1::T = x1, x1 - delta
fx0::S, fx1::S = fx1, F(x1)
incfn(l)
o = _set(o, (x1, fx1), (x0, fx0))
return o, false
end
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 2374 | """
Roots.King()
A superlinear (order `1.6...`) modification of the secant method for multiple roots.
Presented in A SECANT METHOD FOR MULTIPLE ROOTS, by RICHARD F. KING, BIT 17 (1977), 321-328
The basic idea is similar to Schroder's method: apply the secant method
to `f/f'`. However, this uses `f' ~ fp = (fx - f(x-fx))/fx` (a Steffensen step). In
this implementation, `Order1B`, when `fx` is too big, a single secant step of `f`
is used.
The *asymptotic* error, `eᵢ = xᵢ - α`, is given by
`eᵢ₊₂ = 1/2⋅G''/G'⋅ eᵢ⋅eᵢ₊₁ + (1/6⋅G'''/G' - (1/2⋅G''/G'))^2⋅eᵢ⋅eᵢ₊₁⋅(eᵢ+eᵢ₊₁)`.
"""
struct King <: AbstractSecantMethod end
"""
Roots.Order1B()
[`King`](@ref) method with guarded secant step.
"""
struct Order1B <: AbstractSecantMethod end
struct KingState{T,S} <: AbstractUnivariateZeroState{T,S}
xn1::T
xn0::T
fxn1::S
fxn0::S
G0::S
end
function init_state(::Union{King,Order1B}, F, x₀, x₁, fx₀, fx₁)
fₛ₀ = F(x₀ - fx₀ * oneunit(x₀) / oneunit(fx₀))
G₀ = -fx₀^2 / (fₛ₀ - fx₀)
KingState(promote(x₁, x₀)..., promote(fx₁, fx₀, G₀)...)
end
initial_fncalls(::Union{King,Order1B}) = 3
function update_state(::Order1B, F, o::KingState, options, l=NullTracks())
if do_guarded_step(Order1B(), o)
state, flag = update_state(Order1(), F, o, options, l)
x0, fx0 = state.xn0, state.fxn0 # clunky! Need to update G₀ after Order1() step
fₛ = F(x0 - fx0 * oneunit(x0) / oneunit(fx0))
incfn(l)
G₀ = -fx0^2 / (fₛ - fx0)
@reset state.G0 = G₀
return (state, flag)
else
update_state(King(), F, o, options, l)
end
end
function update_state(::King, F, o::KingState{T,S}, options, l=NullTracks()) where {T,S}
x0, x1 = o.xn0, o.xn1
fx0, fx1 = o.fxn0, o.fxn1
G₀ = o.G0
fₛ₁ = F(x1 - fx1 * oneunit(x1) / oneunit(fx1))
incfn(l)
G₁ = -fx1^2 / (fₛ₁ - fx1)
m = (x1 - x0) / (G₁ - G₀) # approximate value of `m`, the multiplicity
if abs(m) <= 1e-2 * oneunit(m)
log_message(l, "Estimate for multiplicity has issues. ")
return (o, true)
end
Δ = G₁ * (x1 - x0) / (G₁ - G₀)
if isissue(Δ)
log_message(l, "Increment `Δx` has issues. ")
return o, true
end
x0, x1::T = x1, x1 - Δ
fx0, fx1::S = fx1, F(x1)
incfn(l)
o = _set(o, (x1, fx1), (x0, fx0))
@reset o.G0 = G₁
return o, false
end
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 1608 | """
Order0()
The `Order0` method is engineered to be a more robust, though possibly
slower, alternative to the other derivative-free root-finding
methods. The implementation roughly follows the algorithm described in
*Personal Calculator Has Key to Solve Any Equation ``f(x) = 0``*, the
SOLVE button from the
[HP-34C](http://www.hpl.hp.com/hpjournal/pdfs/IssuePDFs/1979-12.pdf).
The basic idea is to use a secant step. If along the way a bracket is
found, switch to a bracketing algorithm, using `AlefeldPotraShi`. If the secant
step fails to decrease the function value, a quadratic step is used up
to ``4`` times.
This is not really ``0``-order: the secant method has order
``1.6...`` [Wikipedia](https://en.wikipedia.org/wiki/Secant_method#Comparison_with_other_root-finding_methods)
and the the bracketing method has order
``1.6180...`` [Wikipedia](http://www.ams.org/journals/mcom/1993-61-204/S0025-5718-1993-1192965-2/S0025-5718-1993-1192965-2.pdf)
so for reasonable starting points and functions, this algorithm should be
superlinear, and relatively robust to non-reasonable starting points.
"""
struct Order0 <: AbstractSecantMethod end
# special case Order0 to be hybrid
function init(
𝑭𝑿::ZeroProblem,
M::Order0,
p′=nothing;
p=nothing,
verbose::Bool=false,
tracks=NullTracks(),
kwargs...,
)
p = p′ === nothing ? p : p′
init(𝑭𝑿, Secant(), AlefeldPotraShi(); p=p, verbose=verbose, tracks=tracks, kwargs...)
end
init(::ZeroProblem, ::Order0, ::AbstractBracketingMethod; kwargs...) =
throw(ArgumentError("No bracketing method specified with Order0"))
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 3486 | """
Order16()
Thukral16()
Implements the order 16 algorithm from
*New Sixteenth-Order Derivative-Free Methods for Solving Nonlinear Equations*
by R. Thukral,
American Journal of Computational and Applied Mathematics
p-ISSN: 2165-8935; e-ISSN: 2165-8943; 2012; 2(3): 112-118
DOI: [10.5923/j.ajcam.20120203.08](https://doi.org/10.5923/j.ajcam.20120203.08).
Five function calls per step are required. Though rapidly converging,
this method generally isn't faster (fewer function calls/steps) over
other methods when using `Float64` values, but may be useful for
solving over `BigFloat`. The `Order16` method replaces a Steffensen step with a secant
step when `f(x)` is large.
The error, `eᵢ = xᵢ - α`, is expressed as `eᵢ₊₁ = K⋅eᵢ¹⁶` for an explicit `K`
in equation (50) of the paper.
"""
struct Order16 <: AbstractSecantMethod end
struct Thukral16 <: AbstractSecantMethod end
function update_state(
M::Order16,
fs,
o::UnivariateZeroState{T,S},
options,
l=NullTracks(),
) where {T,S}
update_state_guarded(M, Secant(), Thukral16(), fs, o, options, l)
end
function update_state(
M::Thukral16,
F,
o::AbstractUnivariateZeroState{T,S},
options,
l=NullTracks(),
) where {T,S}
xn = o.xn1
fxn = o.fxn1
wn::T = steff_step(M, xn, fxn)
fwn::S = F(wn)
incfn(l)
fp, issue = _fbracket(xn, wn, fxn, fwn)
if issue
log_message(l, "issue with f[xn,wn]")
o = _set(o, (wn, fwn), (xn, fxn))
return o, true
end
yn::T = xn - fxn / fp
fyn::S = F(yn)
incfn(l)
fp, issue = _fbracket_ratio(yn, xn, wn, fyn, fxn, fwn)
if issue
log_message(l, "issue with f[xn,yn]*f[yn,wn]/f[xn,wn]")
o = _set(o, (yn, fyn), (xn, fxn))
return o, true
end
zn::T = yn - fyn / fp
fzn::S = F(zn)
incfn(l)
fp, issue = _fbracket_diff(xn, yn, zn, fxn, fyn, fzn)
u2, u3, u4 = fzn / fwn, fyn / fxn, fyn / fwn
eta = 1 / (1 + 2 * u3 * u4^2) / (1 - u2)
if issue
log_message(l, "Approximate derivative failed")
o = _set(o, (zn, fzn), (xn, fxn))
return o, true
end
an::T = zn - eta * fzn / fp
fan::S = F(an)
incfn(l)
fp, issue = _fbracket_ratio(an, yn, zn, fan, fyn, fzn)
if issue
log_message(l, "Approximate derivative failed")
o = _set(o, (an, fan), (xn, fxn))
return o, true
end
u1, u5, u6 = fzn / fxn, fan / fxn, fan / fwn
fp1, issue = _fbracket(xn, yn, fxn, fyn)
sigma =
1 + u1 * u2 - u1 * u3 * u4^2 +
u5 +
u6 +
u1^2 * u4 +
u2^2 * u3 +
3 * u1 * u4^2 * (u3^2 - u4^2) / (fp1 / oneunit(fp1))
xn1::T = an - sigma * fan / fp
fxn1::S = F(xn1)
incfn(l)
o = _set(o, (xn1, fxn1), (xn, fxn))
return o, false
end
##################################################
## some means of guarding against large fx when taking a steffensen step
## for Orders 5, 8, and 16 we restrict the size of the steffensen step
## to make the underlying methods more robust. As we need the types defined
## below, this must be included after Order5, Order8, and Order16 are defined
##
function steff_step(M::Union{Order5,Order8,Order16}, x::S, fx::T) where {S,T}
xbar, fxbar = real(x / oneunit(x)), fx / oneunit(fx)
thresh = max(1, abs(xbar)) * sqrt(eps(one(xbar))) #^(1/2) # max(1, sqrt(abs(x/fx))) * 1e-6
out = abs(fxbar) <= thresh ? fxbar : sign(fx) * thresh
x + out * oneunit(x)
end
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 2571 | """
Order5()
KumarSinghAkanksha()
Implements an order 5 algorithm from *A New Fifth Order Derivative
Free Newton-Type Method for Solving Nonlinear Equations* by Manoj
Kumar, Akhilesh Kumar Singh, and Akanksha, Appl. Math. Inf. Sci. 9,
No. 3, 1507-1513 (2015), DOI: [10.12785/amis/090346](https://doi.org/10.12785/amis/090346). Four function
calls per step are needed. The `Order5` method replaces a Steffensen step with a secant
step when `f(x)` is large.
The error, `eᵢ = xᵢ - α`, satisfies
`eᵢ₊₁ = K₁ ⋅ K₅ ⋅ M ⋅ eᵢ⁵ + O(eᵢ⁶)`
"""
struct Order5 <: AbstractSecantMethod end
struct KumarSinghAkanksha <: AbstractSecantMethod end
function update_state(M::Order5, fs, o::UnivariateZeroState, options, l=NullTracks())
update_state_guarded(M, Secant(), KumarSinghAkanksha(), fs, o, options, l)
end
function update_state(
M::KumarSinghAkanksha,
F,
o::AbstractUnivariateZeroState{T,S},
options,
l=NullTracks(),
) where {T,S}
xn = o.xn1
fxn = o.fxn1
wn::T = steff_step(M, o.xn1, o.fxn1)
fwn::S = F(wn)
incfn(l)
fp, issue = _fbracket(o.xn1, wn, o.fxn1, fwn)
if issue
log_message(l, "Issue with divided difference f[xn, wn]. ")
o = _set(o, (wn, fwn), (xn, fxn))
return o, true
end
yn::T = o.xn1 - o.fxn1 / fp
fyn::S = F(yn)
incfn(l)
zn::T = xn - (fxn + fyn) / fp
fzn::S = F(zn)
incfn(l)
fp, issue = _fbracket_ratio(yn, o.xn1, wn, fyn, o.fxn1, fwn)
if issue
log_message(l, "Issue with f[xn,yn] * f[yn,wn] / f[xn, wn].")
o = _set(o, (yn, fyn), (xn, fxn))
return o, true
end
x₁::T = zn - fzn / fp
f₁ = F(x₁)
incfn(l)
o = _set(o, (x₁, f₁), (xn, fxn))
return o, false
# nothing
end
struct Order5Derivative <: AbstractSecantMethod end
fn_argout(::Order5Derivative) = 2
function update_state(
m::Order5Derivative,
f,
o::AbstractUnivariateZeroState{T,S},
options,
l=NullTracks(),
) where {T,S}
xn, fxn = o.xn1, o.fxn1
a::T, b::S = f(xn)
fpxn = a / b
incfn(l)
if isissue(fpxn)
return o, true
end
yn::T = xn - fxn / fpxn
fyn::S, Δyn::T = f(yn)
fpyn = fyn / Δyn
incfn(l, 2)
if isissue(fpyn)
log_message(l, "Issue computing `fpyn`")
o = _set(o, (yn, fyn), (o.xn1, o.fxn1))
return o, true
end
zn::T = xn - (fxn + fyn) / fpxn
fzn::S, _ = f(zn)
incfn(l, 2)
xn1::T = zn - fzn / fpyn
fxn1::S, _ = f(xn1)
incfn(l, 2)
o = _set(o, (xn1, fxn1), (xn, fxn))
return o
end
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 2305 | """
Order8()
Thukral8()
Implements an eighth-order algorithm from *New Eighth-Order
Derivative-Free Methods for Solving Nonlinear Equations* by Rajinder
Thukral, International Journal of Mathematics and Mathematical
Sciences Volume 2012 (2012), Article ID 493456, 12 pages DOI:
[10.1155/2012/493456](https://doi.org/10.1155/2012/493456). Four
function calls per step are required. The `Order8` method replaces a
Steffensen step with a secant step when `f(x)` is large.
The error, `eᵢ = xᵢ - α`, is expressed as `eᵢ₊₁ = K ⋅ eᵢ⁸` in
(2.25) of the paper for an explicit `K`.
"""
struct Order8 <: AbstractSecantMethod end
struct Thukral8 <: AbstractSecantMethod end
## cf also: https://doi.org/10.1515/tmj-2017-0049
function update_state(
M::Order8,
fs,
o::UnivariateZeroState{T,S},
options,
l=NullTracks(),
) where {T,S}
update_state_guarded(M, Secant(), Thukral8(), fs, o, options, l)
end
function update_state(
M::Thukral8,
F,
o::AbstractUnivariateZeroState{T,S},
options,
l=NullTracks(),
) where {T,S}
xn = o.xn1
fxn = o.fxn1
wn::T = steff_step(M, xn, fxn)
fwn::S = F(wn)
incfn(l)
if isissue(fwn)
log_message(l, "issue with Steffensen step fwn")
o = _set(o, (wn, fwn), (xn, fxn))
return o, true
end
fp, issue = _fbracket(xn, wn, fxn, fwn)
if issue
log_message(l, "issue with divided difference f[xn, wn]. ")
return o, true
end
yn::T = xn - fxn / fp
fyn::S = F(yn)
incfn(l)
fp, issue = _fbracket(yn, xn, fyn, fxn)
if issue #fp
log_message(l, "issue with divided difference f[xn, yn]. ")
o = _set(o, (yn, fyn), (xn, fxn))
return o, true
end
phi = (1 + fyn / fwn) # pick one of options
zn::T = yn - phi * fyn / fp
fzn::S = F(zn)
incfn(l)
fp, issue = _fbracket_diff(xn, yn, zn, fxn, fyn, fzn)
if issue
log_message(l, "issue with divided difference f[y,z] - f[x,y] + f[x,z]. ")
o = _set(o, (zn, fzn), (xn, fxn))
return o, true
end
w = 1 / (1 - fzn / fwn)
xi = (1 - 2fyn * fyn * fyn / (fwn * fwn * fxn))
xn1::T = zn - w * xi * fzn / fp
fxn1::S = F(xn1)
incfn(l)
o = _set(o, (xn1, fxn1), (xn, fxn))
return o, false
end
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 1100 | """
Secant()
Order1()
Orderφ()
The `Order1()` method is an alias for `Secant`. It specifies the
[secant method](https://en.wikipedia.org/wiki/Secant_method).
This method keeps two values in its state, `xₙ` and `xₙ₋₁`. The
updated point is the intersection point of ``x`` axis with the secant line
formed from the two points. The secant method uses ``1`` function
evaluation per step and has order `φ≈ (1+sqrt(5))/2`.
The error, `eᵢ = xᵢ - α`, satisfies
`eᵢ₊₂ = f[xᵢ₊₁,xᵢ,α] / f[xᵢ₊₁,xᵢ] * (xᵢ₊₁-α) * (xᵢ - α)`.
"""
struct Secant <: AbstractSecantMethod end
const Order1 = Secant
const Orderφ = Secant
function update_state(
::Order1,
F,
o::AbstractUnivariateZeroState{T,S},
options,
l=NullTracks(),
) where {T,S}
xn0, xn1 = o.xn0, o.xn1
fxn0, fxn1 = o.fxn0, o.fxn1
Δ = fxn1 * (xn1 - xn0) / (fxn1 - fxn0)
if isissue(Δ)
log_message(l, "Increment `Δx` has issues. ")
return o, true
end
x0::T, x1::T = xn1, xn1 - Δ
fx0::S, fx1::S = fxn1, F(x1)
incfn(l)
o = _set(o, (x1, fx1), (x0, fx0))
return o, false
end
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 1676 | """
Steffensen()
The quadratically converging
[Steffensen](https://en.wikipedia.org/wiki/Steffensen's_method#Simple_description)
method is used for the derivative-free `Order2()` algorithm. Unlike
the quadratically converging Newton's method, no derivative is
necessary, though like Newton's method, two function calls per step
are. Steffensen's algorithm is more sensitive than Newton's method to
poor initial guesses when `f(x)` is large, due to how `f'(x)` is
approximated. The `Order2` method replaces a Steffensen step with a secant
step when `f(x)` is large.
The error, `eᵢ - α`, satisfies
`eᵢ₊₁ = f[xᵢ, xᵢ+fᵢ, α] / f[xᵢ,xᵢ+fᵢ] ⋅ (1 - f[xᵢ,α] ⋅ eᵢ²`
"""
struct Steffensen <: AbstractSecantMethod end
"""
Order2
[`Steffensen`](@ref) with a guard on the secant step.
"""
struct Order2 <: AbstractSecantMethod end
function update_state(
M::Order2,
fs,
o::UnivariateZeroState{T,S},
options,
l=NullTracks(),
) where {T,S}
update_state_guarded(M, Secant(), Steffensen(), fs, o, options, l)
end
function update_state(
::Steffensen,
F,
o::AbstractUnivariateZeroState{T,S},
options,
l=NullTracks(),
) where {T,S}
x0, x1 = o.xn0, o.xn1
fx0, fx1 = o.fxn0, o.fxn1
sgn = sign((fx1 - fx0) / (x1 - x0))
x2 = x1 - sgn * fx1 / oneunit(S) * oneunit(T)
f0 = fx1
f1::S = F(x2)
incfn(l, 1)
delta = -sgn * f0 * f0 / (f1 - f0) * oneunit(T) / oneunit(S)
if isissue(delta)
log_message(l, "Increment `Δx` has issues. ")
return o, true
end
x0, x1::T = x1, x1 - delta
fx0, fx1::S = fx1, F(x1)
incfn(l)
o = _set(o, (x1, fx1), (x0, fx0))
return o, false
end
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 3813 | # v1.0 only
# Import dependency.
using BenchmarkTools, Roots, Statistics
#Create benchmark group and benchmarks
benchmarks = BenchmarkGroup()
#Put in specific benchmarks
bracket_methods =
(bisection=Roots.Bisection(), a42=Roots.A42(), aps=Roots.AlefeldPotraShi())
derivative_free_methods = (
o0=Roots.Order0(),
o1=Roots.Order1(),
o1b=Roots.Order1B(),
o2=Roots.Order2(),
o2b=Roots.Order2B(),
o5=Roots.Order5(),
o8=Roots.Order8(),
o16=Roots.Order16(),
)
# collection of doable problems
problems = Dict(
"f1" => (x -> sin(x), 3.0, (3.0, 4.0)),
"f2" => (x -> x^5 - x - 1, 1.0, (0.5, 5.0)),
"f3" => (x -> exp(x) - x^4, 7.0, (5.0, 20.0)),
"f4" => (x -> cos(x) - x / 2, pi / 4, (0.0, pi / 2)),
"f5" => (x -> x^2 - exp(x) - 3x + 2, -0.5, (-1.0, 1.0)),
"f6" => (x -> x^2 - exp(x) - 3x + 2, 2.0, (0.0, 3.0)),
"f7" => (x -> tanh(x) - tan(x), 7.6, (4.0, 8.0)),
"f8" => (x -> exp(-x^2 + x + 2) - cos(x) + x^3 + 1, -0.5, (-2.0, 1.0)),
"f9" => (x -> log(x) + sqrt(x) - 5, 7, (7.0, 10.0)),
"f10" => (x -> log(x) + sqrt(x) - 5, 20, (7.0, 10.0)),
)
function run_bracket(problems, Ms)
for (nm, prob) in problems
fn, x0, ab = prob
for (mnm, M) in zip(fieldnames(typeof(Ms)), Ms)
solve(ZeroProblem(fn, ab), M)
#find_zero(fn, ab, M)
end
end
end
function run_bracketing(problems, Ms)
rts = Float64[]
for (nm, prob) in problems
fn, x0, ab = prob
for M in Ms
rt = solve(ZeroProblem(fn, ab), M)
#rt = find_zero(fn, ab, M)
push!(rts, rt)
end
end
rts
end
function run_derivative_free(problems, Ms)
rts = Float64[]
for (nm, prob) in problems
fn, x0, ab = prob
for M in Ms
if M == Order0()
rt = find_zero(fn, x0, M)
else
rt = solve(ZeroProblem(fn, ab), M)
end
push!(rts, rt)
end
end
rts
end
function run_simple(problems)
rts = Float64[]
for (nm, prob) in problems
fn, x0, ab = prob
push!(rts, Roots.bisection(fn, ab[1], ab[2]))
push!(rts, Roots.bisection(fn, ab[1], ab[2], xatol=1e-6))
push!(rts, Roots.secant_method(fn, x0))
end
rts
end
benchmarks = BenchmarkGroup()
benchmarks["bracketing"] = @benchmarkable run_bracketing($problems, $bracket_methods)
benchmarks["derivative_free"] =
@benchmarkable run_derivative_free($problems, $derivative_free_methods)
benchmarks["simple"] = @benchmarkable run_simple($problems)
for (nm, prob) in problems
fn, x0, ab = prob
@assert fn(ab[1]) * fn(ab[2]) < 0
Ms = bracket_methods
for (mnm, M) in zip(fieldnames(typeof(Ms)), Ms)
benchmarks[nm * "-" * string(mnm)] = @benchmarkable find_zero($fn, $ab, $M)
end
Ms = derivative_free_methods
for (mnm, M) in zip(fieldnames(typeof(Ms)), Ms)
benchmarks[nm * "-" * string(mnm)] = @benchmarkable find_zero($fn, $x0, $M)
end
# simple methods
u, v = ab
benchmarks[nm * "-bisection"] = @benchmarkable Roots.bisection($fn, $u, $v)
benchmarks[nm * "-bisection-atol"] =
@benchmarkable Roots.bisection($fn, $u, $v, xatol=1e-6)
benchmarks[nm * "-secant"] = @benchmarkable Roots.secant_method($fn, $x0)
end
results = run(benchmarks) # Get results.
results = median(results) # Condense to median.
nm = "benchmarks.json"
fname = joinpath(@__DIR__, nm)
if isinteractive()
println("""
To save results, manually call in the REPL: BenchmarkTools.save("benchmarks.json", results)
""")
end
#Compare to old results
try
oldresults = BenchmarkTools.load(fname)[1]
judge(oldresults, results)
catch err
error("Couldn't load file- make sure that you've previously saved results.", err.prefix)
end
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 925 | using Roots
using Test
using Aqua
import SpecialFunctions.erf
struct SomeInterval{T}
a::T
b::T
end
SomeInterval(a, b) = SomeInterval(promote(a, b)...)
Base.extrema(I::SomeInterval) = I.a < I.b ? (I.a, I.b) : (I.b, I.a)
# count function calls
mutable struct Cnt
cnt::Int
f
Cnt(f) = new(0, f)
end
(f::Cnt)(x) = (f.cnt += 1; f.f(x))
avg(x) = sum(x) / length(x)
include("./test_find_zero.jl")
include("./test_bracketing.jl")
include("./test_derivative_free.jl")
include("./test_simple.jl")
include("./test_find_zeros.jl")
include("./test_fzero.jl")
include("./test_newton.jl")
include("./test_chain_rules.jl")
include("./test_simple.jl")
include("./test_composable.jl")
VERSION >= v"1.6.0" && include("./test_allocations.jl")
VERSION >= v"1.9.0" && include("./test_extensions.jl")
#include("./runbenchmarks.jl")
#include("./test_derivative_free_interactive.jl")
Aqua.test_all(Roots; ambiguities=false)
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 3826 | using Test
import BenchmarkTools
@testset "solve: zero allocations" begin
fs = (sin, cos, x -> -sin(x))
x0 = (3, 4)
x0′ = big.(x0)
Ms = (
Order0(),
Order1(),
Order2(),
Order5(),
Order8(),
Order16(),
Roots.Order1B(),
Roots.Order2B(),
Roots.Bisection(),
Roots.A42(),
Roots.AlefeldPotraShi(),
Roots.Brent(),
Roots.Ridders(),
Roots.ITP(),
) # not FalsePosition()
Ns = (Roots.Newton(), Roots.Halley(), Roots.Schroder())
for M in Ms
@test BenchmarkTools.@ballocated(solve(ZeroProblem($fs, $x0), $M)) == 0
@inferred solve(ZeroProblem(fs, x0′), M)
end
for M in Ns
@test BenchmarkTools.@ballocated(solve(ZeroProblem($fs, $x0), $M)) == 0
@inferred solve(ZeroProblem(fs, x0′), M)
end
# Allocations in Lith
F(x) = (sin(x), tan(x), -cot(x), tan(x), -cot(x), tan(x), -cot(x), tan(x))
x0 = collect(range(3, 4, length=6))
VERSION >= v"1.7" && @test BenchmarkTools.@ballocated(
solve(ZeroProblem($F, $x0), Roots.LithBoonkkampIJzerman{1,1}())
) == 0
@test BenchmarkTools.@ballocated(
solve(ZeroProblem($F, $x0), Roots.LithBoonkkampIJzerman{1,2}())
) == 0
@test BenchmarkTools.@ballocated(
solve(ZeroProblem($F, $x0), Roots.LithBoonkkampIJzerman{1,3}())
) == 0
@test BenchmarkTools.@ballocated(
solve(ZeroProblem($F, $x0), Roots.LithBoonkkampIJzerman{1,4}())
) == 0
@test BenchmarkTools.@ballocated(
solve(ZeroProblem($F, $x0), Roots.LithBoonkkampIJzerman{1,5}())
) == 0
@test BenchmarkTools.@ballocated(
solve(ZeroProblem($F, $x0), Roots.LithBoonkkampIJzerman{2,0}())
) == 0
VERSION >= v"1.7" && @test BenchmarkTools.@ballocated(
solve(ZeroProblem($F, $x0), Roots.LithBoonkkampIJzerman{2,1}())
) == 0
@test BenchmarkTools.@ballocated(
solve(ZeroProblem($F, $x0), Roots.LithBoonkkampIJzerman{2,2}())
) == 0
@test BenchmarkTools.@ballocated(
solve(ZeroProblem($F, $x0), Roots.LithBoonkkampIJzerman{2,3}())
) == 0
@test BenchmarkTools.@ballocated(
solve(ZeroProblem($F, $x0), Roots.LithBoonkkampIJzerman{2,4}())
) == 0
@test BenchmarkTools.@ballocated(
solve(ZeroProblem($F, $x0), Roots.LithBoonkkampIJzerman{2,5}())
) == 0
@test BenchmarkTools.@ballocated(
solve(ZeroProblem($F, $x0), Roots.LithBoonkkampIJzerman{3,0}())
) == 0
VERSION >= v"1.7" && @test BenchmarkTools.@ballocated(
solve(ZeroProblem($F, $x0), Roots.LithBoonkkampIJzerman{3,1}())
) == 0
@test BenchmarkTools.@ballocated(
solve(ZeroProblem($F, $x0), Roots.LithBoonkkampIJzerman{4,0}())
) == 0
@test BenchmarkTools.@ballocated(
solve(ZeroProblem($F, $x0), Roots.LithBoonkkampIJzerman{5,0}())
) == 0
@test BenchmarkTools.@ballocated(
solve(ZeroProblem($F, $x0), Roots.LithBoonkkampIJzerman{6,0}())
) == 0
# issue #323, test allocations with parameter
f(x, p) = x^2 - p
x0 = (1.0, 2.0)
p = 2.0
for M in Ms
@test BenchmarkTools.@ballocated(solve(ZeroProblem($f, $x0), $M, $p)) == 0
@test BenchmarkTools.@ballocated(solve(ZeroProblem($f, $x0), $M; p=$p)) == 0
end
# issue #423 type instability
for M in Ms
@test @inferred solve(ZeroProblem(sin, 3.0f0)) ≈ pi
end
end
@testset "simple: zero allocations" begin
@test BenchmarkTools.@ballocated(Roots.bisection(sin, 3, 4)) == 0
@test BenchmarkTools.@ballocated(Roots.secant_method(sin, 3)) == 0
@test BenchmarkTools.@ballocated(Roots.muller(sin, 2.9, 3.0, 3.1)) == 0
@test BenchmarkTools.@ballocated(Roots.newton((sin, cos), 3)) == 0
@test BenchmarkTools.@ballocated(Roots.dfree(sin, 3)) == 0
end
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 12767 | using Roots
using Test
using Printf
## testing bracketing methods
## Originally by John Travers
#
#
## This set of tests is very useful for benchmarking the number of function
## calls, failures, and max errors for the various bracketing methods.
## Table 1 from TOMS748 by Alefeld, Potra, Shi
mutable struct Func
name::Symbol
val::Function
bracket::Function
params::Vector{Any}
end
function show(io::IO, f::Func)
@printf io "Func(%s)" f.name
end
## Construct a function object, and check root brackets
macro Func(name)
@gensym f p b
esc(quote
$f = Func($name, val, bracket, params)
for $p in params
$b = bracket($p)
@assert val($p, $b[1]) * $f.val($p, $b[2]) < 0 "Invalid bracket"
end
push!(known_functions, $f)
$f
end)
end
known_functions = Func[]
## This set of tests is very useful for benchmarking the number of function
## calls, failures, and max errors for the various bracketing methods.
## Table 1 from TOMS748 by Alefeld, Potra, Shi
func1 = let
val = (_, x) -> sin(x) - x / 2
bracket(_) = [0.5pi, pi]
params = [()]
@Func :func1
end
func2 = let
val = (n, x) -> -2 * sum([(2i - 5)^2 / (x - i * i)^3 for i in 1:20])
bracket(n) = [n^2 + 1e-9, (n + 1)^2 - 1e-9]
params = 1:10
@Func :func2
end
func3 = let
val = (p, x) -> p[1] * x * exp(p[2] * x)
bracket(p) = [-9.0, 31.0]
params = [(-40.0, -1.0), (-100.0, -2.0), (-200.0, -3.0)]
@Func :func3
end
func4 = let
val = (p, x) -> x^p[2] - p[1]
bracket(p) = p[3]
params = Tuple{Float64,Float64,Vector{Float64}}[]
for a_ in [0.2, 1.0], n in 4:2:12
push!(params, (a_, n, [0.0, 5.0]))
end
for n in 8:2:14
push!(params, (1.0, n, [-0.95, 4.05]))
end
@Func :func4
end
func5 = let
val = (p, x) -> sin(x) - 0.5
bracket(p) = [0.0, 1.5]
params = [()]
@Func :func5
end
func6 = let
val = (n, x) -> 2x * exp(-n) - 2exp(-n * x) + 1.0
bracket(n) = [0.0, 1.0]
params = vcat(1:5, 20:20:100)
@Func :func6
end
func7 = let
val = (n, x) -> (1 + (1 - n)^2) * x - (1 - n * x)^2
bracket(n) = [0.0, 1.0]
params = [5.0, 10.0, 20.0]
@Func :func7
end
func8 = let
val = (n, x) -> x^2 - (1 - x)^n
bracket(n) = [0.0, 1.0]
params = [2.0, 5.0, 10.0, 15.0, 20.0]
@Func :func8
end
func9 = let
val = (n, x) -> (1 + (1 - n)^4) * x - (1 - n * x)^4
bracket(n) = [0.0, 1.0]
params = [1.0, 2.0, 4.0, 5.0, 8.0, 15.0, 20.0]
@Func :func9
end
func10 = let
val = (n, x) -> exp(-n * x) * (x - 1) + x^n
bracket(n) = [0.0, 1.0]
params = [1, 5, 10, 15, 20]
@Func :func10
end
func11 = let
val = (n, x) -> (n * x - 1) / ((n - 1) * x)
bracket(n) = [0.01, 1.0]
params = [2, 5, 15, 20]
@Func :func11
end
func12 = let
val = (n, x) -> x^(1 / n) - n^(1 / n)
bracket(n) = [1.0, 100.0]
params = vcat(2:6, 7:2:33)
@Func :func12
end
func13 = let
val = (n, x) -> x == 0.0 ? 0.0 : x / exp(1 / (x * x))
bracket(n) = [-1.0, 4.0]
params = [()]
@Func :func13
end
func14 = let
val = (n, x) -> x >= 0 ? n / 20 * (x / 1.5 + sin(x) - 1) : -n / 20
bracket(n) = [-1e4, 0.5pi]
params = 1:40
@Func :func14
end
func15 = let
val = (n, x) -> begin
if x > 2e-3 / (1 + n)
exp(1) - 1.859
elseif x < 0
-0.859
else
exp(0.5e3(n + 1)x) - 1.859
end
end
bracket(n) = [-1e4, 1e-4]
params = vcat(20:40, 100:100:1000)
@Func :func15
end
mutable struct MethodResults
name
evalcount::Int
maxresidual::Float64
failures::Vector{Tuple{Func,Int}}
end
MethodResults() = MethodResults(nothing, 0, 0.0, Tuple{Func,Int}[])
show(io::IO, results::MethodResults) = print(
io,
"MethodResults($(results.name), evalcount=$(results.evalcount), numfailures=$(length(results.failures)), maxresidual=$(results.maxresidual))",
)
## Run a method on all known functions.
function run_tests(method; verbose=false, trace=false, name=nothing, abandon=false)
results = MethodResults()
results.name = name
for f in known_functions
for i in 1:length(f.params)
p = f.params[i]
evalcount = 0
function feval(x)
evalcount += 1
result = f.val(p, x)
trace && @printf "%s[%d]: %s ⇒ %s\n" f i x result
result
end
result, residual = nothing, nothing
try
result = method(feval, f.bracket(p))
isnan(result) && error("NaN")
residual = f.val(p, result)
verbose &&
@printf "%s[%d] ⇒ %d / %s, residual %.5e\n" f i evalcount result residual
catch ex
verbose && @printf "%s[%d] ⇒ FAILED: %s\n" f i ex
push!(results.failures, (f, i))
abandon && rethrow(ex)
end
results.evalcount += evalcount
## Some functions might return non-real values on failures
if isa(result, AbstractFloat) &&
isa(residual, AbstractFloat) &&
isfinite(residual)
results.maxresidual = max(results.maxresidual, abs(residual))
end
end
end
results
end
## Run a method on all known functions.
function run_test(f, M; verbose=false, trace=false, name=nothing, abandon=false, kwargs...)
d = DataFrame(
i=Int[],
cnt=Int[],
steps=Int[],
Δ=Float64[],
residual=Float64[],
result=Float64[],
)
for (i, p) in enumerate(f.params)
evalcount = 0
function feval(x)
evalcount += 1
result = f.val(p, x)
result
end
result, residual = nothing, nothing
l = Roots.Tracks()
try
result = find_zero(feval, f.bracket(p), M; tracks=l, kwargs...)
isnan(result) && error("NaN")
residual = f.val(p, result)
catch ex
result = NaN
residual = NaN
end
Δ = isempty(l.abₛ) ? NaN : -(l.abₛ[end]...)
push!(d, (i=i, cnt=evalcount, steps=l.steps, Δ=Δ, residual=residual, result=result))
end
d
end
function get_stats(M; kwargs...)
d = DataFrame(
fn=Int[],
i=Int[],
cnt=Int[],
steps=Int[],
Δ=Float64[],
residual=Float64[],
result=Float64[],
)
for (j, fn) in enumerate(known_functions)
dⱼ = run_test(fn, M; kwargs...)
dⱼ.fn .= j
append!(d, dⱼ)
end
d = transform(d, 5:7 => ByRow((Δ, ϵ, a) -> min(abs(a)^(-1) * abs(Δ), abs(ϵ))) => :min)
end
# used to test BracketedHalley
# function rt(f, M)
# for i in 1:length(f.params)
# p = f.params[i]
# fn = x ->f.val(p,x)
# try
# x = find_zero((fn, fn', fn''), f.bracket(p), M)#; atol=0.0, rtol=0.0)
# @show x, fn(x)
# catch err
# @show :err
# end
# end
# end
@testset "bracketing methods" begin
## Test for failures, ideally all of these would be 0
## test for residual, ideally small
## test for evaluation counts, ideally not so low for these problems
## exact_bracket
Ms = [
Roots.Brent(),
Roots.A42(),
Roots.AlefeldPotraShi(),
Roots.Chandrapatla(),
Roots.ITP(),
Roots.Ridders(),
Roots.Bisection(),
]
results = [run_tests((f, b) -> find_zero(f, b, M), name="$M") for M in Ms]
maxfailures = maximum(length(result.failures) for result in results)
maxresidual = maximum(result.maxresidual for result in results)
cnts = [result.evalcount for result in results]
@test maxfailures == 0
@test maxresidual <= 5e-13
@test avg(cnts) <= 4700
## False position has larger residuals
## Fn #13 fails on numbers 2 and 4 until maxsteps is increased; 100 works
Ms = [Roots.FalsePosition(i) for i in 1:12]
results = [run_tests((f, b) -> find_zero(f, b, M), name="$M") for M in Ms]
maxfailures = maximum(length(result.failures) for result in results)
maxresidual = maximum(result.maxresidual for result in results)
cnts = [result.evalcount for result in results]
@test maxfailures <= 1
@test maxresidual <= 1e-5
@test avg(cnts) <= 3000
## issue 412 check for bracket
for M in Ms
@test_throws ArgumentError find_zero(x -> x - 1, (-3, 0), M)
@test_throws ArgumentError find_zero(x -> 1 + x^2, (10, 20), M)
end
end
## Some tests for FalsePosition methods
@testset "FalsePosition" begin
galadino_probs = [
(x -> x^3 - 1, [0.5, 1.5]),
(x -> x^2 * (x^2 / 3 + sqrt(2) * sin(x)) - sqrt(3) / 18, [0.1, 1]),
(x -> 11x^11 - 1, [0.1, 1]),
(x -> x^3 + 1, [-1.8, 0]),
(x -> x^3 - 2x - 5, [2.0, 3]),
((x, n=5) -> 2x * exp(-n) + 1 - 2exp(-n * x), [0, 1]),
((x, n=10) -> 2x * exp(-n) + 1 - 2exp(-n * x), [0, 1]),
((x, n=20) -> 2x * exp(-n) + 1 - 2exp(-n * x), [0, 1]),
((x, n=5) -> (1 + (1 - n)^2) * x^2 - (1 - n * x)^2, [0, 1]),
((x, n=10) -> (1 + (1 - n)^2) * x^2 - (1 - n * x)^2, [0, 1]),
((x, n=20) -> (1 + (1 - n)^2) * x^2 - (1 - n * x)^2, [0, 1]),
((x, n=5) -> x^2 - (1 - x)^n, [0, 1]),
((x, n=10) -> x^2 - (1 - x)^n, [0, 1]),
((x, n=20) -> x^2 - (1 - x)^n, [0, 1]),
((x, n=5) -> (1 + (1 - n)^4) * x - (1 - n * x)^4, [0, 1]),
((x, n=10) -> (1 + (1 - n)^4) * x - (1 - n * x)^4, [0, 1]),
((x, n=20) -> (1 + (1 - n)^4) * x - (1 - n * x)^4, [0, 1]),
((x, n=5) -> exp(-n * x) * (x - 1) + x^n, [0, 1]),
((x, n=10) -> exp(-n * x) * (x - 1) + x^n, [0, 1]),
((x, n=20) -> exp(-n * x) * (x - 1) + x^n, [0, 1]),
((x, n=5) -> x^2 + sin(x / n) - 1 / 4, [0, 1]),
((x, n=10) -> x^2 + sin(x / n) - 1 / 4, [0, 1]),
((x, n=20) -> x^2 + sin(x / n) - 1 / 4, [0, 1]),
]
for (fn_, ab) in galadino_probs
for M in (FalsePosition(i) for i in 1:12)
g = Cnt(fn_)
x0_ = find_zero(g, ab, M)
@test abs(fn_(x0_)) <= 1e-7
@test g.cnt <= 50
end
end
end
@testset "Bracketing edge cases" begin
Ms = (Bisection(), Roots.A42(), Roots.AlefeldPotraShi())
# Endpoints can be infinite
for M in Ms
@test find_zero(sign, (-Inf, Inf), M) ≈ 0 atol = 1e-16
end
# Function can be infinite for Bisection and Float64
@test @inferred(find_zero(x -> Inf * sign(x - pi), (-Inf, Inf), Bisection())) ≈ pi
# finds discontinuities, not necessarily zeros
f = (x, p=0.0) -> 1 / (x - p) #avoid issue with `0` being identified by `_middle`
for M in Ms
@test find_zero(f, (-1, 1), M, p=eps()) ≈ eps() atol = 2eps()
end
@test iszero(@inferred(find_zero(f, (-1, 1), Roots.Bisection())))
# XXX changes with relaxed tolerance (adding non-zero xatol)
#@test_throws Roots.ConvergenceFailed find_zero(f, (-1, 1), Roots.A42())
#@test_throws Roots.ConvergenceFailed find_zero(f, (-1, 1), Roots.AlefeldPotraShi())
# subnormals should still be okay
α = nextfloat(nextfloat(0.0))
f = x -> x - α
for M in (Bisection(),) #Ms XXX NOT A42, AlefeldPotraShi with xatol !==0
@test find_zero(f, (-1, 1), M) == α
end
# with NaN, not Inf
f = x -> abs(x) / x
for M in Ms
@test find_zero(f, (-1, 1), M) ≈ 0 atol = eps()
end
# points are not evaluated outside boundary; issue #233
a, b = -1, 1
f = x -> abs(x) > 1 ? error("out of bounds") : 1.0 - x
for M in Ms
@test find_zero(f, (a, b), M) ≈ 1
end
f = x -> abs(x) > 1 ? error("out of bounds") : prevfloat(1.0) - x
for M in Ms
@test find_zero(f, (a, b), M) ≈ 1
end
# check if fa*fb ≥ 0
for M in (
Roots.Bisection(),
Roots.A42(),
Roots.AlefeldPotraShi(),
Roots.Brent(),
Roots.Ridders(),
Roots.Chandrapatla(),
Roots.ITP(),
Roots.FalsePosition(),
)
x = find_zero(x -> sin(x), (0, 1))
@test iszero(x)
@test_throws ArgumentError find_zero(x -> sin(x), (2, 3)) # no bracket
end
# last bit of accuracy, when close issue #368
@test find_zero(x -> sinpi(-1 / 40 + x / 40) + 1 - x, (0, 2), A42()) == 1.0
# sloppy bug using isfinite (#373)
f = x -> 1 - x / (x - 1)^2
xleft = 1 + eps(BigFloat)
xright = 3 * xleft
x = find_zero(f, (xleft, xright))
@test abs(f(x)) <= 2eps(BigFloat)
# simple a42()
m = run_tests(Roots.a42)
VERSION >= v"1.6" && @test isempty(m.failures)
@test m.evalcount <= 3000 # paper says 2884, this has 2877
end
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 3180 | using Roots
using ChainRulesTestUtils
using Zygote
using Test
# issue #325 add frule, rrule
struct 𝐺
p
end
(g::𝐺)(x) = cos(x) - g.p * x
G₃(p) = find_zero(𝐺(p), (0, pi / 2), Bisection())
F₃(p) = find_zero((x, p) -> cos(x) - p * x, (0, pi / 2), Bisection(), p)
@testset "Test frule and rrule" begin
# Type inference tests of `test_frule` and `test_rrule` with the default
# rule config `ChainRulesTestUtils.TestConfig()` fail due to an issue
# with ChainRulesTestUtils: https://github.com/JuliaDiff/ChainRulesTestUtils.jl/issues/246
# single function
f(x, p) = log(x) - p
test_frule(solve, ZeroProblem(f, 1), Order1(), 1.0; check_inferred=false)
test_rrule(solve, ZeroProblem(f, 1), Order1(), 1.0; check_inferred=false)
if isdefined(Zygote, :ZygoteRuleConfig)
test_rrule(
Zygote.ZygoteRuleConfig(),
solve,
ZeroProblem(f, 1),
Order1(),
1.0;
check_inferred=false,
)
end
F(p) = find_zero(f, 1, Order1(), p)
@test first(Zygote.gradient(F, 1)) ≈ exp(1)
g(x, p) = x^2 - p[1] * x - p[2]
test_frule(solve, ZeroProblem(g, 1), Order1(), [0.0, 4.0]; check_inferred=false)
test_rrule(solve, ZeroProblem(g, 1), Order1(), [0.0, 4.0]; check_inferred=false)
G(p) = find_zero(g, 1, Order1(), p)
@test first(Zygote.gradient(G, [0, 4])) ≈ [1 / 2, 1 / 4]
# a tuple of functions
fx(x, p) = 1 / x
test_frule(solve, ZeroProblem((f, fx), 1), Roots.Newton(), 1.0; check_inferred=false)
test_rrule(solve, ZeroProblem((f, fx), 1), Roots.Newton(), 1.0; check_inferred=false)
if isdefined(Zygote, :ZygoteRuleConfig)
test_rrule(
Zygote.ZygoteRuleConfig(),
solve,
ZeroProblem((f, fx), 1),
Roots.Newton(),
1.0;
check_inferred=false,
)
end
F2(p) = find_zero((f, fx), 1, Roots.Newton(), p)
@test first(Zygote.gradient(F2, 1)) ≈ exp(1)
gx(x, p) = 2x - p[1]
test_frule(
solve,
ZeroProblem((g, gx), 1),
Roots.Newton(),
[0.0, 4.0];
check_inferred=false,
)
test_rrule(
solve,
ZeroProblem((g, gx), 1),
Roots.Newton(),
[0.0, 4.0];
check_inferred=false,
)
G2(p) = find_zero((g, gx), 1, Roots.Newton(), p)
@test first(Zygote.gradient(G2, [0, 4])) ≈ [1 / 2, 1 / 4]
# test Functor; issue #408
x = rand()
@test first(Zygote.gradient(F₃, x)) ≈ first(Zygote.gradient(G₃, x))
# ForwardDiff extension makes this fail.
VERSION >= v"1.9.0" &&
@test_broken first(Zygote.hessian(F₃, x)) ≈ first(Zygote.hessian(G₃, x))
# test_frule, test_rrule aren't successful
#=
# DimensionMismatch: arrays could not be broadcast to a common size; got a dimension with lengths 3 and 2
test_frule(
solve,
ZeroProblem(𝐺(2), (0.0, pi/2)),
Roots.Bisection();
check_inferred=false,
)
# MethodError: no method matching keys(::NoTangent)
test_rrule(
solve,
ZeroProblem(𝐺(2), (0.0, pi/2)),
Roots.Bisection();
check_inferred=false,
)
=#
end
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 2538 | # Simple tests to make sure Roots works with other
# packages. This is not part of runtests, as no expectation
# that CI should run these
using Roots
using Test
using Unitful
using Polynomials
using ForwardDiff
@testset "Test composability with other packages" begin
orders = [
Order0(),
Order1(),
Roots.Order1B(),
Order2(),
Roots.Order2B(),
Order5(),
Order8(),
Order16(),
]
# Unitful
@testset "find zero(s) with Unitful" begin
s = u"s"
m = u"m"
g = (9 + 8//10) * m / s^2
v0 = 10m / s
y0 = 16m
y(t) = -g * t^2 + v0 * t + y0
for order in orders
@test find_zero(y, 1.8s, order) ≈ 1.886053370668014s
@test find_zero(y, 1.8f0s, order) isa typeof(1.88f0s)
end
for M in [Roots.Bisection(), Roots.A42(), Roots.AlefeldPotraShi()]
@test find_zero(y, (1.8s, 1.9s), M) ≈ 1.886053370668014s
@test find_zero(y, (1.8f0s, 1.9f0s), M) isa typeof(1.88f0s)
end
xrts = find_zeros(y, 0s, 10s)
@test length(xrts) == 1
@test xrts[1] ≈ 1.886053370668014s
# issue #434
xzs1 = find_zeros(x -> cos(x / 1u"m"), -1.6u"m", 2u"m")
@test length(xzs1) == 2 && maximum(xzs1) ≈ 1.5707963267948966 * u"m"
FX = ZeroProblem(y, (0f0s, 2f0s))
prob = Roots.init(FX, Roots.AlefeldPotraShi())
@test Roots.is_small_Δx(prob.M, prob.state, prob.options) isa Bool # does not throw
end
# Polynomials
@testset "find zero(s) with Polynomials" begin
m = s = 1.0
g = 9.8 * m / s^2
v0 = 10m / s
y0 = 16m
y(t) = -g * t^2 + v0 * t + y0
x = variable()
for order in orders
@test find_zero(y(x), 1.8, order) ≈ 1.8860533706680143
end
for M in [Roots.Bisection(), Roots.A42(), Roots.AlefeldPotraShi()]
@test find_zero(y(x), (1.8, 1.9), M) ≈ 1.8860533706680143
end
xrts = find_zeros(y(x), 0, 10)
@test length(xrts) == 1
@test xrts[1] ≈ 1.8860533706680143
end
# ForwardDiff
# taking a derivative of a function that using find_zero.
D(f) = x -> ForwardDiff.derivative(f, x)
F(x, y) = (x + 1) * (x^2 + y^2) - 2x^2 # has loop for x ∈ (0,1)
h(x) = find_zero(y -> F(x, y), -1 / 4 * one(x)) # one(x) to get proper type for D
α = find_zero(D(h), (0, 1)) # find lowest point on loop
@test h(α) ≤ h(α + 0.1)
@test h(α) ≤ h(α - 0.1)
end
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 10985 | using Roots
using Test
using JSON
using Printf
## testing/benchmarking derivative-free methods
mutable struct Func1
name::Symbol
val::Function
x0::Function
alpha # a value or values
params::Vector{Any}
end
function Base.show(io::IO, f::Func1)
@printf io "Func(%s)" f.name
end
#Construct a function object,
macro Func1(name)
@gensym f p x
esc(quote
$f = Func1($name, val, x0, alpha, params)
push!(known_functions, $f)
$f
end)
end
known_functions = Func1[]
## This set of tests is useful for benchmarking the number of function
## calls, failures, and max errors for the various derivative_free methods.
# easy ones
func1 = let
val = (_, x) -> cos(x) - x / 2
a, b, N = 0.0, pi / 2, 10
x0(n) = range(a, stop=b, length=N)[n]
params = 1:N
alpha = 1.0298665293222589
@Func1 :func1
end
func2 = let
val = (_, x) -> exp(x) - x^4
a, b, N = 5.0, 20.0, 11
x0(n) = range(a, stop=b, length=N)[n]
params = 1:N
alpha = find_zeros(x -> val(0, x), -5.0, 20.0)
@Func1 :func2
end
func3 = let
val = (_, x) -> x^5 - x - 1
a, b, N = 0.5, 2.0, 10
x0(n) = range(a, stop=b, length=N)[n]
params = 1:N
alpha = 0.1673039782614187
@Func1 :func3
end
# wider range
func4 = let
val = (_, x) -> (1 + cos(x)) * (exp(x) - 2)
a, b, N = 0.0, 1.7, 15
x0(n) = range(a, stop=b, length=N)[n]
params = 1:N
alpha = 0.6931471805599453
@Func1 :func4
end
func5 = let
val = (_, x) -> 2 * x - exp(-x)
a, b, N = -5.0, 5.0, 15
x0(n) = range(a, stop=b, length=N)[n]
params = 1:N
alpha = 0.35173371124919584
@Func1 :func5
end
# once over hump, hard to get back to 0
func6 = let
val = (_, x) -> x * exp(-x)
a, b, N = -5.0, 5.0, 15
x0(n) = range(a, stop=b, length=N)[n]
params = 1:N
alpha = 0.0
@Func1 :func6
end
# once outside of extrema, points away from zero
func7 = let
val = (_, x) -> 20.0 * x / (100.0 * x^2 + 1.0)
a, b, N = -0.2, 0.2, 15
x0(n) = range(a, stop=b, length=N)[n]
params = 1:N
alpha = 0.0
@Func1 :func7
end
# should have some failures here
func8 = let
val = (_, x) -> cos(x) - x / 2
a, b, N = 0.0, 3pi / 4, 10
x0(n) = range(a, stop=b, length=N)[n]
params = 1:N
alpha = 1.0298665293222589
@Func1 :func8
end
func9 = let
val = (_, x) -> tanh(x) - tan(x)
a, b, N = 5.0, 10.0, 8
x0(n) = range(a, stop=b, length=N)[n]
params = 1:N
alpha = find_zeros(x -> val(0, x), 5, 12)
@Func1 :func9
end
func10 = let
val = (_, x) -> asin(x^2 - 1) - x / 2 + 1
a, b, N = -0.5, 0.9, 10
x0(n) = range(a, stop=b, length=N)[n]
params = 1:N
alpha = find_zeros(x -> val(0, x), -0.5, 1.0)
@Func1 :func10
end
func11 = let
val = (_, x) -> exp(-x) - cos(x)
a, b, N = -2.0, 2.0, 10
x0(n) = range(a, stop=b, length=N)[n]
params = 1:N
alpha = find_zeros(x -> exp(-x) - cos(x), -2, 2)
@Func1 :func11
end
func12 = let
val = (_, x) -> sqrt(x) - 1 / x - 3
a, b, N = 5, 20, 10
x0(n) = range(a, stop=b, length=N)[n]
params = 1:N
alpha = 9.633595562832696
@Func1 :func12
end
## multiplicity: http://ir.igsnrr.ac.cn/bitstream/311030/8840/1/%E4%BE%AF%E9%BA%9F%E7%A7%91(SCI)2.pdf
func13 = let
val = (_, x) -> (x - sqrt(5))^4 / ((x - 1)^2 + 2)
a, b, N = 3.0, 4.5, 10
x0(n) = range(a, stop=b, length=N)[n]
params = 1:N
alpha = 2.236067977499790
@Func1 :func13
end
func14 = let
val = (_, x) -> (sin(x)^2 - 2x + 1)^5
a, b, N = 3.0, 5.0, 10
x0(n) = range(a, stop=b, length=N)[n]
params = 1:N
alpha = 0.71483582544138924
@Func1 :func14
end
func15 = let
val = (_, x) -> (8x * exp(-x^2) - 2x - 3)^8
a, b, N = -2.0, -1.5, 10
x0(n) = range(a, stop=b, length=N)[n]
params = 1:N
alpha = -1.7903531791589544
@Func1 :func15
end
# this is 1e6 * (x-1)^7, has many "zeros" near 1
func16 = let
val = (_, x) -> 1e6 * (x^7 - 7x^6 + 21x^5 - 35x^4 + 35x^3 - 21x^2 + 7x - 1)
a, b, N = -0.0, 2.0, 10
x0(n) = range(a, stop=b, length=N)[n]
params = 1:N
alpha = 1.0
@Func1 :func16
end
func17 = let
val = (_, x) -> (exp(-x^2 + x + 3) - x + 2)^9
a, b, N = 2.4, 2.5, 10
x0(n) = range(a, stop=b, length=N)[n]
params = 1:N
alpha = 2.4905398276083051
@Func1 :func17
end
func18 = let
val = (_, x) -> (exp(-x) + 2sin(x))^4
a, b, N = 3.0, 3.4, 10
x0(n) = range(a, stop=b, length=N)[n]
params = 1:N
alpha = 3.1627488709263654
@Func1 :func18
end
## hard w/o aggressive bracketing
func19 = let
val = (_, x) -> cbrt(x)
a, b, N = -3.0, 3.0, 10
x0(n) = range(a, stop=b, length=N)[n]
params = 1:N
alpha = 0.0
@Func1 :func19
end
func20 = let
val = (_, x) -> x < -1 / 4 ? 3 / 4 * x - 0.3125 : x < 1 / 4 ? 2x : 3 / 4 * x + 0.3125
a, b, N = -1.0, 1.0, 10
x0(n) = range(a, stop=b, length=N)[n]
params = 1:N
alpha = 0.0
@Func1 :func20
end
mutable struct MethodResults1
name
problems::Int
evalcount::Int
maxresidual::Float64
maxdifference::Float64
failures::Vector{Tuple{Func1,Int}}
end
MethodResults1() = MethodResults1(nothing, 0, 0, 0.0, Inf, Tuple{Func1,Int}[])
Base.show(io::IO, results::MethodResults1) = print(
io,
"MethodResults($(results.name), evalcount=$(results.evalcount), numfailures=$(length(results.failures)), maxresidual=$(results.maxresidual))",
)
## Run a method on all known functions.
mindiff(a, alpha) = minimum([a - i for i in alpha])
function run_df_tests(method; verbose=false, trace=false, name=nothing, abandon=false)
results = MethodResults1()
results.name = name
results.problems = 0
for f in known_functions
for i in 1:length(f.params)
p = f.params[i]
evalcount = 0
function feval(x)
evalcount += 1
result = f.val(p, x)
trace && @printf "%s[%d]: %s ⇒ %s\n" f i x result
result
end
result, residual = nothing, nothing
try
result = method(feval, f.x0(p))
md = abs(mindiff(result, f.alpha))
isnan(result) && error("NaN")
(md > 1) && error("ran away")
results.problems += 1
results.evalcount += evalcount
residual = f.val(p, result)
verbose &&
@printf "%s[%d] ⇒ %d / %s, residual %.5e\n" f.name i evalcount result residual
## Some functions might return non-real values on failures
if isa(result, AbstractFloat) &&
isa(residual, AbstractFloat) &&
isfinite(residual)
results.maxresidual = max(results.maxresidual, abs(residual))
results.maxdifference =
max(results.maxdifference, mindiff(result, f.alpha))
end
verbose &&
abs(residual) > 1 / 10 &&
@printf "Large Residual [%s] %d/%d\n" f.name i p
catch ex
verbose && @printf "%s[%d] ⇒ FAILED: %s\n" f.name i ex
push!(results.failures, (f, i))
abandon && rethrow(ex)
end
end
end
results
end
D(f, h=1e-4) = x -> (f(x + h) - f(x - h)) / (2h)
D2(f, h=1e-4) = x -> (f(x + h) - 2f(x) + f(x - h)) / h^2
if !isinteractive()
@testset "derivative free methods" begin
## Test for failures, ideally all of these would be 0
## test for residual, ideally small
## test for evaluation counts, ideally not so low for these problems
## basic methods
Ms = [
Roots.Order1(),
Roots.Order1B(),
Roots.Order2(),
Roots.Order2B(),
Roots.Order5(),
Roots.Order8(),
Roots.Order16(),
]
results = [run_df_tests((f, b) -> find_zero(f, b, M), name="$M") for M in Ms]
failures = [length(result.failures) for result in results]
residuals = [result.maxresidual for result in results]
cnts = [result.evalcount / result.problems for result in results]
@test maximum(failures) <= 60
@test maximum(residuals) <= 5e-5
@test avg(cnts) <= 40
## methods which fall back to bisection when bracket found
Ms = [Roots.Order0()]
results = [run_df_tests((f, b) -> find_zero(f, b, M), name="$M") for M in Ms]
failures = [length(result.failures) for result in results]
residuals = [result.maxresidual for result in results]
cnts = [result.evalcount / result.problems for result in results]
@test maximum(failures) <= 30
@test maximum(residuals) <= 1e-5
@test avg(cnts) <= 40
## Newton and Halley
Fs = [
(f, b) -> find_zero((f, D(f)), b, Roots.Newton()),
(f, b) -> find_zero((f, D(f), D2(f)), b, Roots.Halley()),
(f, b) -> find_zero((f, D(f), D2(f)), b, Roots.Schroder()),
]
results = [run_df_tests(F) for F in Fs]
failures = [length(result.failures) for result in results]
residuals = [result.maxresidual for result in results]
cnts = [result.evalcount / result.problems for result in results]
@test maximum(failures) <= 70
@test maximum(residuals) <= 5e-4
@test avg(cnts) <= 50
end
@testset "derivative free, non Float64" begin
Ms = [
Roots.Order0(),
Roots.Order1(),
Roots.Order1B(),
Roots.Order2(),
Roots.Order2B(),
Roots.Order5(),
Roots.Order8(),
Roots.Order16(),
]
Ts = [Float16, Float32, BigFloat]
fn = x -> x^3 - 2x - 5
alpha =
2.094551481542326591482386540579302963857306105628239180304128529045312189983499
x0 = 2.0
for T in Ts
for M in Ms
xstar = find_zero(fn, T(x0), M)
@test xstar ≈ T(alpha) atol = max(sqrt(eps(T)), eps())
@test isa(xstar, T)
end
end
for T in Ts
xstar = Roots.find_zero((fn, D(fn, sqrt(eps(T)))), T(x0), Roots.Newton())
@test xstar ≈ T(alpha) atol = max(sqrt(eps(T)), eps())
@test isa(xstar, T)
xstar = Roots.find_zero(
(fn, D(fn, sqrt(eps(T))), D2(fn, sqrt(eps(T)))),
T(x0),
Roots.Halley(),
)
@test xstar ≈ T(alpha) atol = max(sqrt(eps(T)), eps())
@test isa(xstar, T)
xstar = Roots.find_zero(
(fn, D(fn, sqrt(eps(T))), D2(fn, sqrt(eps(T)))),
T(x0),
Roots.Schroder(),
)
@test xstar ≈ T(alpha) atol = max(sqrt(eps(T)), eps())
@test isa(xstar, T)
end
end
end
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 16275 | ## A set of functions for exploring convergence
## in an interactive manner:
### Benchmarking tests
## We have
##
## * visualize_diagonostics(which): to see summaries of the methods
## over the different functions with which in (:summary, :counts,
## :residuals)
##
## * identify_regression(): to identify regressions in counts,
## residuals, failures, or exact answers, as compared with a past
## diagnostic run
##
## * write_out(): to save results of a diagnostic run. Must be run in the Roots/test directory
##
## * compare_convergence(Methods): to generate a table of convergence orders
using Roots
using Test
using JSON
using Printf
## This uses functions defined here:
include(joinpath(@__DIR__, "test_derivative_free.jl"))
mutable struct Wrapper
f
n
end
Wrapper(f) = Wrapper(f, 0)
(F::Wrapper)(x) = (F.n += 1; F.f(x))
struct TallyCard
cnts::Vector{Int} # fn evals
res::Vector{Float64} # rts
rs::Vector{Float64} # residual
exact::Vector{Int} # exact 1, 0, -1 (for fail)
ds::Vector{Float64} # difference from alpha(s)
rng::Vector{Float64} # range sampled
end
TallyCard() = TallyCard(Int[], Float64[], Float64[], Int[], Float64[], Float64[])
# test a single function, create tally card
function test_function(method, F)
tc = TallyCard()
m, M = Inf, -Inf
for p in F.params
f = Wrapper(x -> F.val(p, x))
result, residual = nothing, nothing
x0 = F.x0(p)
m = x0 < m ? x0 : m
M = x0 > M ? x0 : M
try
result = method(f, x0)
cnt = f.n
residual = f(result)
d = minimum(abs.(result .- F.alpha))
exact =
(
iszero(residual) ||
f(result) * f(nextfloat(result)) < 0 ||
f(result) * f(prevfloat(result)) < 0
) ? 1 : 0
push!(tc.cnts, cnt)
push!(tc.res, result)
push!(tc.ds, d)
push!(tc.rs, residual)
push!(tc.exact, exact)
catch err
push!(tc.cnts, -1)
push!(tc.res, NaN)
push!(tc.ds, NaN)
push!(tc.rs, NaN)
push!(tc.exact, -1)
end
end
append!(tc.rng, (m, M))
tc
end
# vector of vectors to an array
function vvta(vs, T=eltype(vs[1]))
n = length(vs)
m = length(vs[1])
A = zeros(T, n, m)
for j in 1:n
A[j, :] = vs[j]
end
A
end
function vvta1(vs, T)
n = length(vs)
m = length(vs[1])
A = zeros(T, m, n)
for j in 1:n
A[:, j] = vs[j]
end
A
end
## Return Dict of arrays
function create_diagonostics(Ms, Fs, nms)
@assert length(nms) == length(Ms)
out = Array{Any}(undef, length(Ms), length(Fs))
for (i, M) in enumerate(Ms)
for (j, F) in enumerate(Fs)
out[i, j] = test_function(M, F)
end
end
m, n = length(Ms), length(Fs)
D = Dict(
"counts" => Dict(),
"rts" => Dict(),
"residuals" => Dict(),
"exact" => Dict(),
"delta" => Dict(),
"failed" => Dict(),
"nms" => nms,
"size" => (m, n),
)
# Counts
for j in eachindex(Fs)
jj = string(j) # for JSON serialization
cnts = [out[i, j].cnts for i in eachindex(Ms)]
rts = [out[i, j].res for i in eachindex(Ms)]
rs = [out[i, j].rs for i in eachindex(Ms)]
exact = [out[i, j].exact for i in eachindex(Ms)]
ds = [out[i, j].ds for i in eachindex(Ms)]
M = length(cnts)
N = div(M, m)
D["counts"][jj] = vvta(cnts)
D["rts"][jj] = vvta(rts)
D["residuals"][jj] = vvta(rs)
D["exact"][jj] = vvta(exact)
D["delta"][jj] = vvta(ds)
# failed
fc = D["counts"][jj] .< 0
fd = D["delta"][jj] .> 1.0
fr = D["residuals"][jj] .> 1.0
D["failed"][jj] =
[fc[i, j] || fd[i, j] || fr[i, j] for i in 1:size(fc)[1], j in 1:size(fc)[2]]
end
D
end
function visualize_diagnostics(::Val{:summary}, D)
## Some diagnostics
n = length(D["counts"]) # number of functions
m = size(D["counts"]["1"])[1]
for i in 1:m
fs, cnt, exact, maxresidual = 0, 0, 0, 0.0
for j in 1:n
jj = string(j)
fail = D["failed"][jj][i, :]
cnts = D["counts"][jj][i, :]
resids = D["residuals"][jj][i, :]
exacts = D["exact"][jj][i, :]
fs += sum(fail)
cnt += sum(cnts[.!fail])
exact += sum(exacts .== 1)
rs = resids[.!fail]
if !isempty(rs)
maxresidual = max(maxresidual, maximum(rs))
end
end
nm = D["nms"][i]
print(rpad(nm, 15))
print(rpad(cnt, 6))
print(rpad(exact, 6))
print(rpad(fs, 5))
println(maxresidual)
end
end
function visualize_diagnostics(::Val{:counts}, D, j)
## Some diagnostics
n = length(D["counts"]) # number of functions
m = size(D["counts"]["1"])[1]
# for j in 1:n
jj = string(j)
counts = D["counts"][jj]
fails = D["failed"][jj]
println(" --- Function $jj ---")
for i in 1:m
nm = D["nms"][i]
print(rpad(nm, 15))
tot, ntot = 0, 0
for (fail, cnt) in zip(fails[i, :], counts[i, :])
if fail
print(lpad(".", 5))
else
tot += cnt
ntot += 1
print(lpad(cnt, 5))
end
end
avg = round(tot / ntot, digits=1)
println("\t$avg")
end
# end
end
function visualize_diagnostics(::Val{:residuals}, D, j)
## Some diagnostics
n = length(D["counts"]) # number of functions
m = size(D["counts"]["1"])[1]
jj = string(j)
resids = D["residuals"][jj]
exacts = D["exact"][jj]
fails = D["failed"][jj]
println(" --- Function $jj ---")
for i in 1:m
nm = D["nms"][i]
print(rpad(nm, 15))
tot, ntot = 0, 0
for (fail, res, exact) in zip(fails[i, :], resids[i, :], exacts)
if fail
ch = "X"
elseif res > 1e-3
ch = "x"
elseif res > 1e-12
ch = "~"
elseif exact == 1
ch = "∘"
else
ch = "."
end
print(lpad(ch, 2))
end
println("")
end
end
# which in (:all, :summary, :counts, :residuals)
function visualize_diagnostics(D, which=:summary) # :cnts, ...
## Some diagnostics
n = length(D["counts"]) # number of functions
m = size(D["counts"]["1"])[1]
println("Visualize diagnostics: :$which ∈ (:all, :summary, :counts, :residuals)\n")
# summary
if which in (:all, :summary)
println("Method fncnt exact fail maxresidual")
# one row summarizing each method
visualize_diagnostics(Val(:summary), D)
end
# Counts
if which in (:all, :counts)
for j in 1:n
visualize_diagnostics(Val(:counts), D, j)
end
end
# residuals
if which in (:all, :residuals)
println("Residual summary")
println(
"Key:\t`∘` - exact\n\t`.` - res < 1e-12\n\t`~` - 1e-12 < res < 1e-3`\n\t`x` - 1e-3 < res\n\t`X` - failed",
)
for j in 1:n
visualize_diagnostics(Val(:residuals), D, j)
end
end
end
## write out/ read in summaries to file
function write_out(fname, D)
io = open(fname, "w")
JSON.Writer.print(io, D)
close(io)
end
# annoyingly need to convert values to proper type after
# JSON serializations
function read_in(fname)
D = JSON.parsefile(fname)
E = Dict()
E["nms"] = string.(D["nms"])
E["size"] = Tuple(D["size"])
for nm in ("counts", "exact", "failed")
E[nm] = Dict()
for (k, v) in D[nm]
E[nm][k] = vvta1(v, Int)
end
end
E["residuals"] = Dict()
for (k, v) in D["residuals"]
for vi in v
vi[vi .=== nothing] .= NaN
end
E["residuals"][k] = vvta1(v, Float64)
end
E
end
# compare D to E
function identify_regressions(Dnew, Dold)
out = String[]
Dnew["nms"] == Dold["nms"] || return "Names are different"
Dnew["size"] == Dold["size"] || return "sizes are different"
for (k, v) in Dnew["counts"]
A, A1 = v, Dold["counts"][k]
sum(A[A .> 0]) <= sum(A1[A1 .> 0]) || push!(out, "counts increased for function $k")
sum(A[A .> 0]) < sum(A1[A1 .> 0]) &&
push!(out, "✓ counts decreased for function $k")
end
for (k, v) in Dnew["exact"]
A, A1 = v, Dold["exact"][k]
sum(A .== 1) >= sum(A1 .== 1) || push!(out, "exact decreased for function $k")
sum(A .== 1) > sum(A1 .== 1) && push!(out, "✓ exact increased for function $k")
end
for (k, v) in Dnew["failed"]
A, A1 = v, Dold["failed"][k]
sum(A) <= sum(A1) || push!(out, "failed increased for function $k")
sum(A) < sum(A1) && push!(out, "✓ failed decreased for function $k")
end
for (k, v) in Dnew["residuals"]
A, A1 = v, Dold["residuals"][k]
for i in eachindex(A)
newi, oldi = A[i], A1[i]
if abs(newi) > 1.1 * abs(oldi)
push!(out, "residuals increased for function $k")
break
end
if abs(newi) < 0.9 * abs(oldi)
push!(out, "✓ residuals decreased for function $k")
break
end
end
end
return out
end
## Main interface for interactive use
fname = joinpath(@__DIR__, "derivative_free_diagnostics.json")
elide_ascii(x, n=12) = length(x) > n ? x[1:(n - 3)] * "..." * x[(end - 1):end] : x
function create_diagonostics()
meths = [
Order0(),
Order1(),
Roots.Order1B(),
Roots.King(),
Order2(),
Roots.Steffensen(),
Roots.Order2B(),
Roots.Esser(),
Order5(),
Roots.KumarSinghAkanksha(),
Order8(),
Roots.Thukral8(),
Order16(),
Roots.Thukral16(),
]
Ms = [(f, b) -> find_zero(f, b, M) for M in meths] # F(f,b), name
nms = elide_ascii.([replace(string(M), r"^Roots." => "") for M in meths])
Fs = known_functions
create_diagonostics(Ms, Fs, nms)
end
## write out current diagnostic test
function write_out()
println("Creating diagonostics to save")
write_out(fname, create_diagonostics())
end
## visualize state
"""
visualize_diagnostics(which=:summary)
Show diagnostics summary
`which` is one of `(:all, :summary, :counts, :residuals)`
"""
visualize_diagnostics(which=:summary) = visualize_diagnostics(create_diagonostics(), which)
## identify regressions from currently saved state
"""
identify_regressions()
Compare current state to saved state.
Use `write_out` to save a state.
"""
function identify_regressions()
if !isfile(fname)
println("No previous diagnostic to compare with")
return String[] # empty
end
Dnew = create_diagonostics()
Dold = read_in(fname)
out = identify_regressions(Dnew, Dold)
out
end
## tests for newton, halley
import ForwardDiff: derivative
D(f, n=1) = n > 1 ? D(D(f), n - 1) : x -> derivative(f, float(x))
function derivative_based_diagonostics()
Ms = (
(f, b) -> Roots.find_zero((f, D(f)), b, Roots.Newton()),
(f, b) -> Roots.find_zero((f, D(f), D(f, 2)), b, Roots.Halley()),
(f, b) -> Roots.find_zero((f, D(f), D(f, 2)), b, Roots.Schroder()),
(f, b) -> Roots.find_zero((f, D(f)), b, Roots.Order5()),
)
nms = ("Newton", "Halley", "Schroder", "Order5")
Fs = known_functions
create_diagonostics(Ms, Fs, nms)
end
## Order of convergence
## assuming e_n = x_n - alpha and
## e_{n+1} = C e_n^q this tries to find q by:
## alpha known: e_{n+1}/e_n = (e_n/e_{n-1})^q,
## alpha unknown: f(x_n) - f(alpha) ~ f'(alpha)*(en), so en ~ Cf(x_n)
function COC(M, f, x0, alpha=missing)
op = precision(BigFloat)
setprecision(BigFloat, 8 * 256)
tracks = Roots.Tracks(BigFloat[], BigFloat[])
try
find_zero(f, big(x0), M, tracks=tracks)
catch err
setprecision(BigFloat, op)
rethrow()
end
setprecision(BigFloat, op)
if ismissing(alpha)
fs = tracks.fs
[
Float64(log(abs(fs[k + 2] / fs[k + 1])) / log(abs(fs[k + 1] / fs[k]))) for
k in 1:(length(fs) - 4)
]
else
xs = tracks.xs
es = xs .- alpha
[Float64(log(abs(es[k + 1])) / log(abs(es[k]))) for k in 1:(length(xs) - 3)]
end
end
### Traditional tests: start nearby and compare convergence
"""
Compare convergences. For each method and for several test functions, computes
* abs(x1-alpha), abs(x2-alpha), abs(x3-alpha);
* the computational order of convergence rc = log|f(xk+1)/f(xk)|/log|f(xk)/f(xk-1)|
* the computed zero
Example
```
compare_convergence((Order1(), Order2(), Order8()))
##
## Example (errors from Sidi, Unified treatment...)
## Let error be En = xn - alpha
## Secant: E_{n+1} = f[x_n, x_{n-1}, alpha] / f[x_n, x_{n-1}] E_n E_{n-1}
## Newton: E_{n+1} = f[xn, xn, alpha]/f'(xn) E_n^2
## Steffensen: E_{n+1} = f[xn, xn+fxn, alpha] / f[xn, xn + fxn] ⋅ (1 f[xn,alpha] ⋅ En^2
using ForwardDiff; D(f) = x -> ForwardDiff.derivative(f, float(x))
struct MNewton <: Roots.AbstractSecant end
function Roots.update_state(M::MNewton, f, o, opts)
o.xn0, o.fxn0 = o.xn1, o.fxn1
o.xn1 -= f(o.xn1) / D(f)(o.xn1)
o.fxn1 = f(o.xn1)
nothing
end
compare_convergence((Roots.Secant(), MNewton(), Roots.Steffensen()))
```
"""
function compare_convergence(Ms; F=identity)
fns = (
(
x -> log(x^2 + x + 2) - x + 1,
big"4.4",
4.152590736757158274996989004767139785813809448259893154635015805935085336704627,
),
(
x -> sin(x)^2 - x^2 + 1,
big"1.4",
1.404491648215341226035086817786868077176602575918625035145218238569654850906246,
),
(x -> exp(-x^2 + x + 2) - cos(x + 1) + x^3 + 1, -big"0.5", -big"1.0"),
(
x -> x^11 + x + 1,
-big"1",
-8.4439752879202298306029802319765064181966469706324549911391920468474414245176e-01,
),
(x -> (x - 2) * (x^10 + x + 1) * exp(-x - 1), big"1.9", big"2"),
(
x -> sin(3x) + x * cos(x),
big"1",
1.197769535216271165938579472950989827411047786536025790115116815210444571657156,
),
(
x -> exp(-x) - 1 + x / 5,
big"4.5",
4.96511423174427630369875913132289394405558498679725097281444614478046398795746,
),
(
x -> exp(sin(x)) - x + 1,
big"2.3",
2.630664147927903633975327052350598568584731954733163386430717083451519883744738,
),
(x -> (exp(x^2 + x - 6) - 1) * (x^2 + 1), big"2.2", big"2"),
(
x -> x * exp(x^2) - sin(x)^2 + 3cos(x) + 5,
-big"1.2",
-1.207647827130918927009416758356084097760235818949538815205924601763336168539887,
),
(x -> x^8 - x^7 + x^4 - x^3 + x - 1, big"1.2", big"1"),
)
# formatting fns
Log10 = x -> begin # formatting
if iszero(x)
return (0, 0)
end
n = trunc(Int, log10(x)) - 1
x / (10.0^n), -n
end
elide_ascii(x, n=12) = length(x) > n ? x[1:(n - 2)] * ".." * x[(end - 1):end] : x
for (i, u) in enumerate(fns)
fn_, x0, xstar = u
fn = F(fn_)
for M in Ms
tracks = Roots.Tracks(BigFloat, BigFloat)
a = try
find_zero(fn, x0, M, tracks=tracks)
catch err
NaN * x0
end
x1, x2, x3 = tracks.xs[1:3]
coc = log(abs(fn_(x3) / fn_(x2))) / log(abs(fn_(x2) / fn_(x1)))
X1, X2, X3 = abs.((x1, x2, x3) .- xstar)
@printf "%s: %s\t%.3f(-%02i)\t%.3f(-%02i)\t%.3f(-%02i)\t%.2f\t%.8f\n" "f$i" elide_ascii(
replace(string(M), "Roots." => ""),
8,
) Log10(X1)... Log10(X2)... Log10(X3)... coc a
end
println("")
end
end
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 3145 | #=
using SymPy
@testset "SymPy" begin
SymPy.@syms x
@test find_zero(cos(x) ~ 1/2, (0, pi/2)) ≈ find_zero(x -> cos(x) - 1/2, (0, pi/2))
@test find_zero(1/2 ~ cos(x), (0, pi/2)) ≈ find_zero(x -> 1/2 - cos(x), (0, pi/2))
@test find_zero(cos(x) ~ x/2, (0, pi/2)) ≈ find_zero(x -> cos(x) - x/2, (0, pi/2))
@test find_zeros(cos(x) ~ 1/2, (0, pi/2)) ≈ find_zeros(x -> cos(x) - 1/2, (0, pi/2))
@test find_zeros(1/2 ~ cos(x), (0, pi/2)) ≈ find_zeros(x -> 1/2 - cos(x), (0, pi/2))
@test find_zeros(cos(x) ~ x/2, (0, pi/2)) ≈ find_zeros(x -> cos(x) - x/2, (0, pi/2))
@test fzero(cos(x) ~ 1/2, 0, pi/2) ≈ fzero(x -> cos(x) - 1/2, 0, pi/2)
@test fzero(1/2 ~ cos(x), 0, pi/2) ≈ fzero(x -> 1/2 - cos(x), 0, pi/2)
@test fzero(cos(x) ~ x/2, 0, pi/2) ≈ fzero(x -> cos(x) - x/2, 0, pi/2)
@test fzeros(cos(x) ~ 1/2, 0, pi/2) ≈ fzeros(x -> cos(x) - 1/2, 0, pi/2)
@test fzeros(1/2 ~ cos(x), 0, pi/2) ≈ fzeros(x -> 1/2 - cos(x), 0, pi/2)
@test fzeros(cos(x) ~ x/2, 0, pi/2) ≈ fzeros(x -> cos(x) - x/2, 0, pi/2)
end
=#
#=
using SymPyPythonCall
@testset "SymPythonCall" begin
SymPyPythonCall.@syms x
@test find_zero(cos(x) ~ 1/2, (0, pi/2)) ≈ find_zero(x -> cos(x) - 1/2, (0, pi/2))
@test find_zero(1/2 ~ cos(x), (0, pi/2)) ≈ find_zero(x -> 1/2 - cos(x), (0, pi/2))
@test find_zero(cos(x) ~ x/2, (0, pi/2)) ≈ find_zero(x -> cos(x) - x/2, (0, pi/2))
@test find_zeros(cos(x) ~ 1/2, (0, pi/2)) ≈ find_zeros(x -> cos(x) - 1/2, (0, pi/2))
@test find_zeros(1/2 ~ cos(x), (0, pi/2)) ≈ find_zeros(x -> 1/2 - cos(x), (0, pi/2))
@test find_zeros(cos(x) ~ x/2, (0, pi/2)) ≈ find_zeros(x -> cos(x) - x/2, (0, pi/2))
@test fzero(cos(x) ~ 1/2, 0, pi/2) ≈ fzero(x -> cos(x) - 1/2, 0, pi/2)
@test fzero(1/2 ~ cos(x), 0, pi/2) ≈ fzero(x -> 1/2 - cos(x), 0, pi/2)
@test fzero(cos(x) ~ x/2, 0, pi/2) ≈ fzero(x -> cos(x) - x/2, 0, pi/2)
@test fzeros(cos(x) ~ 1/2, 0, pi/2) ≈ fzeros(x -> cos(x) - 1/2, 0, pi/2)
@test fzeros(1/2 ~ cos(x), 0, pi/2) ≈ fzeros(x -> 1/2 - cos(x), 0, pi/2)
@test fzeros(cos(x) ~ x/2, 0, pi/2) ≈ fzeros(x -> cos(x) - x/2, 0, pi/2)
end
=#
using ForwardDiff
@testset "ForwardDiff" begin
f = (x, p) -> x^2 - p
Z = ZeroProblem(f, (0, 1000))
F = p -> solve(Z, Roots.Bisection(), p)
for p in (3, 5, 7, 11)
@test F(p) ≈ sqrt(p)
@test ForwardDiff.derivative(F, p) ≈ 1 / (2sqrt(p))
end
# Hessian is *fixed* for F(p) = find_zero(f, x₀, M, p)
f = (x, p) -> x^2 - sum(p .^ 2)
Z = ZeroProblem(f, (0, 1000))
F = p -> solve(Z, Roots.Bisection(), p)
Z = ZeroProblem(f, (0, 1000))
F = p -> solve(Z, Roots.Bisection(), p)
for p in ([1, 2], [1, 3], [1, 4])
@test F(p) ≈ sqrt(sum(p .^ 2))
a, b = p
n = sqrt(a^2 + b^2)^3
@test ForwardDiff.hessian(F, p) ≈ [b^2 -a*b; -a*b a^2] / n
end
end
#=
using IntervalRootFinding
@testset "IntervalRootFinding" begin
f(x) = sin(x + sin(x + sin(x)))
@test find_zeros(f, (-5, 5)) ≈ [-pi, 0, pi]
out = find_zeros(f, -5..5, Roots.Newton())
@test sort(out.zeros) ≈ sort([-pi,0,pi])
@test isempty(out.unknown)
end
=#
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 18050 | ## tests of find_zero interface
using Roots
using Test
using ForwardDiff;
Base.adjoint(f) = x -> ForwardDiff.derivative(f, float(x));
# for a user-defined method
import Roots.Accessors
import Roots.Accessors: @reset
struct Order3_Test <: Roots.AbstractSecantMethod end
## Test the interface
@testset "find_zero interface tests" begin
meths = [
Order0(),
Order1(),
Roots.Order1B(),
Roots.King(),
Order2(),
Roots.Steffensen(),
Roots.Order2B(),
Roots.Esser(),
Order5(),
Roots.KumarSinghAkanksha(),
Order8(),
Roots.Thukral8(),
Order16(),
Roots.Thukral16(),
Roots.LithBoonkkampIJzerman(3, 0),
Roots.LithBoonkkampIJzerman(4, 0),
]
## different types of initial values
for m in meths
@test find_zero(sin, 3, m) ≈ pi
@test find_zero(sin, 3.0, m) ≈ pi
@test find_zero(sin, big(3), m) ≈ pi
@test find_zero(sin, big(3.0), m) ≈ pi
@test find_zero(x -> x^2 - 2.0f0, 2.0f0, m) ≈ sqrt(2) # issue 421
@test isnan(solve(ZeroProblem(x -> x^2 + 2, 0.5f0)))
end
## defaults for method argument
@test find_zero(sin, 3.0) ≈ pi # order0()
@test @inferred(find_zero(sin, (3, 4))) ≈ π # Bisection()
@test @inferred(find_zero(sin, [3, 4])) ≈ π # Bisection()
## test tolerance arguments
## xatol, xrtol, atol, rtol, maxevals, strict
fn, xstar = x -> sin(x) - x + 1, 1.9345632107520243
x0, M = 20.0, Order2()
@test find_zero(fn, x0, M) ≈ xstar # needs 16 iterations, 33 fn evaluations, difference is exact
# test of maxevals
@test_throws Roots.ConvergenceFailed find_zero(fn, x0, M, maxevals=2)
# tolerance on f, atol, rtol: f(x) ~ 0
M = Order2()
h = 1e-2
rt = find_zero(fn, x0, M, atol=h, rtol=0.0)
@test abs(fn(rt)) > h^2 / 100
rt = find_zero(fn, x0, M, atol=0.0, rtol=h)
@test abs(fn(rt)) > h^2 / 100
## test of tolerances xatol, xrtol with bisection
a, b = 1.5, 2.0
h = 1e-6
M = Roots.Bisection()
tracks = Roots.Tracks(Float64, Float64)
if VERSION >= v"1.6.0"
@inferred(find_zero(fn, (a, b), M, tracks=tracks, xatol=h, xrtol=0.0))
u, v = tracks.abₛ[end]
@test h >= abs(u - v) >= h / 2
end
## test of strict
fn, x0 = x -> cos(x) - 1, pi / 4
@test fn(find_zero(fn, x0, Order5())) <= 1e-8
@test_throws Roots.ConvergenceFailed find_zero(fn, x0, Order5(), strict=true)
# xn increment needs atol setting for zeros near 0.0 if strict=true
M = Order1()
fn = x -> x * exp(x) + nextfloat(0.0)
@test_throws Roots.ConvergenceFailed find_zero(
fn,
1.0,
M,
atol=0.0,
rtol=0.0,
strict=true,
xatol=0.0,
)
@test abs(find_zero(fn, 1.0, M, atol=0.0, rtol=0.0, strict=true)) <= eps()
## test of extreme values for fn, bisection
c = pi
fn = x -> Inf * sign(x - c)
@inferred(find_zero(fn, (-Inf, Inf))) ≈ c
fn = x -> Inf * x / abs(x) # stop at NaN values
@inferred(find_zero(fn, (-Inf, Inf))) ≈ 0
bracketing_meths = (
Roots.Bisection(),
Roots.A42(),
Roots.AlefeldPotraShi(),
Roots.Brent(),
Roots.Ridders(),
Roots.ITP(),
Roots.Ridders(),
Roots.FalsePosition(),
Roots.FalsePosition(2),
)
# test flexbility in interval specification
for M in bracketing_meths
@test @inferred(find_zero(sin, (3, 4))) ≈ pi
@test @inferred(find_zero(sin, [3, 4])) ≈ pi
@test @inferred(find_zero(sin, 3:4)) ≈ pi
@test @inferred(find_zero(sin, SomeInterval(3, 4))) ≈ pi
@test @inferred(find_zero(sin, range(3, stop=4, length=20))) ≈ pi
end
# test issue when non type stalbe
h(x) = x < 2000 ? -1000 : -1000 + 0.1 * (x - 2000)
a, b, xᵅ = 0, 20_000, 12_000
for M in bracketing_meths
@test find_zero(h, (a, b), M) ≈ xᵅ
end
end
@testset "non simple zeros" begin
Ms = (
Roots.Order1B(),
Roots.Order2B(),
Roots.Schroder(),
Roots.Thukral2B(),
Roots.Thukral3B(),
Roots.Thukral4B(),
Roots.Thukral5B(),
)
g(x) = exp(x) + x - 2
f(x) = g(x)^2
x₀ = 1 / 4
α = find_zero(g, x₀)
fs = (f, f', f'', f''', f'''', f''''', f'''''')
for M in Ms
@test find_zero(fs, x₀, M) ≈ α atol = 1e-6
end
end
@testset "find_zero internals" begin
## init_state method
g1 = x -> x^5 - x - 1
x0_, xstar_ = (1.0, 2.0), 1.1673039782614187
M = Roots.A42()
G1 = Roots.Callable_Function(M, g1)
state = @inferred(Roots.init_state(M, G1, x0_))
options = Roots.init_options(M, state)
for M in (Roots.A42(), Roots.Bisection(), Roots.FalsePosition())
Gₘ = Roots.Callable_Function(M, G1)
stateₘ = @inferred(Roots.init_state(M, state, Gₘ))
@test @inferred(solve(M, Gₘ, stateₘ)) ≈ xstar_
end
# iterator interface (ZeroProblem, solve; init, solve!)
meths = [
Order0(),
Order1(),
Roots.Order1B(),
Roots.King(),
Order2(),
Roots.Steffensen(),
Roots.Order2B(),
Roots.Esser(),
Order5(),
Roots.KumarSinghAkanksha(),
Order8(),
Roots.Thukral8(),
Order16(),
Roots.Thukral16(),
]
g1(x) = x^5 - x - 1
x0_, xstar_ = 1.16, 1.1673039782614187
fx = ZeroProblem(g1, x0_)
for M in meths
@test solve(fx, M) ≈ xstar_
P = init(fx, M)
@test solve!(P) ≈ xstar_
end
# solve and parameters
# should be positional, but named supported for now
g2 = (x, p) -> cos(x) - x / p
fx = ZeroProblem(g2, (0, pi / 2))
@test solve(fx, 2) ≈ @inferred(find_zero(x -> cos(x) - x / 2, (0, pi / 2)))
@test solve(fx, p=2) ≈ @inferred(find_zero(x -> cos(x) - x / 2, (0, pi / 2)))
@test @inferred(solve(fx, p=3)) ≈ @inferred(find_zero(x -> cos(x) - x / 3, (0, pi / 2)))
g3 = (x, p) -> cos(x) + p[1] * x - p[2]
fx = ZeroProblem(g3, (0, pi / 2))
@test @inferred(solve(fx, p=[-1 / 10, 1 / 10])) ≈
@inferred(find_zero(x -> cos(x) - x / 10 - 1 / 10, (0, pi / 2)))
### issue 321, solve and broadcasting
myfun(x, p) = x * sin(x) - p
prob = ZeroProblem(myfun, (0.0, 2.0))
ps = (1 / 4, 1 / 2, 3 / 4, 1)
as = (0.5111022402679033, 0.7408409550954906, 0.9333080372907439, 1.1141571408719302)
@test all(solve.(prob, Bisection(), ps) .≈ as)
## test with early evaluation of bracket
f = x -> sin(x)
xs = (3.0, 4.0)
fxs = f.(xs)
M = Bisection()
state = @inferred(Roots.init_state(M, f, xs..., fxs..., m=3.5, fm=f(3.5)))
@test @inferred(solve!(init(M, f, state))) ≈ π
# ## hybrid
g1 = x -> exp(x) - x^4
x0_, xstar_ = (5.0, 20.0), 8.613169456441398
M = Roots.Bisection()
G1 = Roots.Callable_Function(M, g1)
state = @inferred(Roots.init_state(M, G1, x0_))
options = Roots.init_options(M, state, xatol=1 / 2)
ZPI = @inferred(init(M, G1, state, options))
ϕ = iterate(ZPI)
while ϕ !== nothing
val, st = ϕ
state, ctr = st
ϕ = iterate(ZPI, st)
end
N = Roots.Order1() # switch to N
G2 = Roots.Callable_Function(N, G1)
stateₙ = Roots.init_state(N, state, G2)
options = Roots.init_options(N, stateₙ)
x = solve(N, G2, stateₙ, options)
@test x ≈ xstar_
## test creation of new methods
## xn - f/f' - f'' * f^2 / 2(f')^3 = xn - r1 - r1^2/r2 is third order,
# had to previousely define:
function Roots.update_state(
M::Order3_Test,
f,
o::Roots.AbstractUnivariateZeroState{T,S},
options,
l=Roots.NullTracks(),
) where {T,S}
# xn - f/f' - f'' * f^2 / 2(f')^3 = xn - r1 - r1^2/r2 is third order
xn_1, xn = o.xn0, o.xn1
fxn_1, fxn = o.fxn0, o.fxn1
f_10 = (fxn - fxn_1) / (xn - xn_1)
xn1::T = xn - fxn / f_10
fxn1::S = f(xn1)
f01 = (fxn1 - fxn) / (xn1 - xn)
if isnan(f_10) || iszero(f_10) || isnan(f01) || iszero(f01)
return (o, true)
end
r1 = fxn1 / f01
r2 = f01 / ((f01 - f_10) / (xn1 - xn_1))
wn = xn1 - r1 - r1^2 / r2
fwn::S = f(wn)
@reset o.xn0 = xn
@reset o.xn1 = wn
@reset o.fxn0 = fxn
@reset o.fxn1 = fwn
return (o, false)
end
g1 = x -> exp(x) - x^4
@test find_zero(g1, 8.3, Order3_Test()) ≈ find_zero(g1, 8.3, Order1())
# test many different calling styles
f(x) = (sin(x), sin(x) / cos(x)) # x -> (f(x), f(x)/f′(x))
fs(x) = (sin, cos) # (f, f′)
x0 = (3, 4)
g(x, p) = begin
fx = cos(x) - x / p
(fx, fx / (-sin(x) - 1 / p))
end
x0a = (0.0, pi / 2)
α₂, α₃ = 1.0298665293222589, 1.1701209500026262
@test find_zero(f, x0) ≈ π
@test find_zero(f, first(x0)) ≈ π
@test find_zero(g, x0a, p=2) ≈ α₂
@test find_zero(g, first(x0a), p=2) ≈ α₂
Z = ZeroProblem(f, x0)
Za = ZeroProblem(g, x0a)
@test solve(Z) ≈ π
@test solve(Za, 3) ≈ α₃
@test solve(Za, p=2) ≈ α₂
@test solve!(init(Z)) ≈ π
@test solve!(init(Za, 3)) ≈ α₃
@test solve!(init(Za, p=3)) ≈ α₃
Ms = (Roots.Secant(), Roots.Bisection(), Roots.Newton())
for M in Ms
@test find_zero(f, x0, M) ≈ π
@test solve(Z, M) ≈ π
@test solve!(init(Z, M)) ≈ π
@test find_zero(g, x0a, M, p=2) ≈ α₂
@test solve(Za, M, 2) ≈ α₂
@test solve(Za, M, p=2) ≈ α₂
@test solve!(init(Za, M, 2)) ≈ α₂
end
## test broadcasting semantics with ZeroProblem
## This assume parameters can be passed in a positional manner, a
## style which is discouraged, as it is confusing
Z = ZeroProblem((x, p) -> cos(x) - x / p, pi / 4)
@test all(solve.(Z, (1, 2)) .≈ (solve(Z, 1), solve(Z, 2)))
end
@testset "find_zero issue tests" begin
## Misc tests
Ms = [Order0(), Order1(), Order2(), Order5(), Order8(), Order16()]
## issues with starting near a maxima. Some bounce out of it, but
## one would expect all to have issues
fn, xstar = x -> x^3 + 4x^2 - 10, 1.365230013414097
for M in [Order1(), Roots.Order1B(), Order2(), Roots.Order2B(), Order5()]
@test_throws Roots.ConvergenceFailed find_zero(fn, -1.0, M)
end
for M in [Order0(), Roots.Thukral8(), Roots.Thukral16()]
@test find_zero(fn, -1.0, M) ≈ xstar
end
## non-simple root
## convergence can depend on relaxed convergence checked after an issue
fn, xstar, x0 = x -> cos(x) - 1, 0.0, 0.1
for M in Ms
xn = find_zero(fn, x0, M)
@test abs(fn(xn)) <= 1e-10
end
for M in [Roots.Order1B(), Order2(), Roots.Order2B(), Order5(), Order8(), Order16()]
@test_throws Roots.ConvergenceFailed find_zero(fn, x0, M, strict=true)
end
## issue with large steps
fn, x0 = x -> x^20 - 1, 0.5
for M in Ms[2:end] # not 0, as it uses bracket
@test_throws Roots.ConvergenceFailed find_zero(fn, x0, M)
end
## issue with large f''
fn, x0 = cbrt, 1.0
for M in [Order1(), Order2(), Order5()]
@test_throws Roots.ConvergenceFailed find_zero(fn, x0, M)
end
### these stop but only because rtol is used for checking f(xn) ~ 0
for M in [Roots.Thukral8(), Roots.Thukral16()]
@test abs(find_zero(fn, x0, M) - 0.0) >= 100
end
## similar (http://people.sc.fsu.edu/~jburkardt/cpp_src/test_zero/test_zero.html)
function newton_baffler(x)
a = 1 / 10
m, b = 1 / 4, 1 / 8
if x < -a
m * x - b
elseif x > a
m * x + b
else
(m * a + b) / a * (x + a) + (-m * a - b)
end
end
for M in
(Order0(), Order1(), Roots.Order1B(), Order2(), Roots.Order2B(), Order5(), Order8())
@test abs(find_zero(newton_baffler, 1.0, M)) <= 1e-15
end
for M in (Roots.KumarSinghAkanksha(), Roots.Thukral8(), Roots.Thukral16())
@test_throws Roots.ConvergenceFailed find_zero(newton_baffler, 1.0, M)
end
## Closed issues ###
## issue tests: put in tests to ensure closed issues don't reappear.
## issue #94; tolerances not matching documentation
function test_94(; kwargs...)
g, T = 1.62850, 14.60000
α, t1, tf = 0.00347, 40.91375, 131.86573
y, ya, yf = 0.0, 9000.0, 10000.0
vy = sqrt(2g * (ya - y))
θ0, θ1 = atan(α * tf), atan(α * (tf - t1))
I_sintan(x) = tan(x) / 2cos(x) - atanh(tan(x / 2))
I_sintan(x, y) = I_sintan(y) - I_sintan(x)
function lhs(θ)
tRem = (vy - T / α * (sec(θ1) - sec(θ))) / g
val = -yf + y + vy * tRem - 0.5g * tRem^2 - T / α^2 * I_sintan(θ, θ1)
val
end
M = Roots.FalsePosition()
x0 = [atan(α * tf), atan(α * (tf - t1))]
F = Roots.Callable_Function(M, lhs, nothing) #Roots.DerivativeFree(lhs)
state = Roots.init_state(M, F, x0)
options = Roots.init_options(M, state)
l = Roots.Tracks(state)
solve(ZeroProblem(lhs, x0), M; tracks=l)
@test l.steps <= 45 # 15
end
test_94()
## Issue with quad_step after truncated M-step PR #140
@test find_zero(x -> tanh(x) - tan(x), 7.36842, Order0()) ≈ 7.068582745628732
## Use tolerance on f, not x with bisectoin
atol = 0.01
if VERSION >= v"1.6.0"
u = @inferred(find_zero(sin, (3, 4), atol=atol))
@test atol >= abs(sin(u)) >= atol^2
## issue #159 bracket with zeros should be found
@test @inferred(find_zero(x -> x + 1, (-1, 1))) == -1
end
## issue #178 passinig through method
@test fzero(sin, 3, 4, Roots.Brent()) ≈ π
## issue #188 with A42
f = let a = 0.18
x -> x * (1 - x^2) / ((x^2 + a^2) * (1 + a^2 * x^2))
end
r = 0.05
xs = (r + 1e-12, 1.0)
@test find_zero(x -> f(r) - f(x), xs, Roots.A42()) ≈ 0.4715797678171889
## issue #336 verbose=true with complex values
## just test that this does not error
for M in (Order1(), Roots.Newton())
T = Complex{Float64}
tracks = Roots.Tracks(T, T)
find_zero((sin, cos), 1.0 + 1.0im, M; tracks=tracks)
Roots.show_tracks(IOBuffer(), tracks, M)
end
## Issue #343 non-type stable f
f(t) = t <= 0 ? -1 : log(t)
for M in (
Roots.Order0(),
Roots.Order1(),
Roots.Order2(),
Roots.Order5(),
Roots.Order8(),
Roots.Order16(),
)
@test find_zero(f, 3, M) ≈ 1
end
end
struct _SampleCallableObject end
(::_SampleCallableObject)(x) = x^5 - x - 1
@testset "find_zero with other callable types" begin
Ms = [
Order0(),
Order1(),
Roots.Order1B(),
Order2(),
Roots.Order2B(),
Order5(),
Order8(),
Order16(),
]
for M in Ms
@test find_zero(_SampleCallableObject(), 1.1, M) ≈ 1.1673039782614187
end
for M in Ms
g = Cnt(x -> x^5 - x - 1)
@test find_zero(g, 1.1, M) ≈ 1.1673039782614187
@test g.cnt <= 30
end
end
@testset "function evaluations" begin
function wrapper(f)
cnt = 0
x -> begin
cnt += 1
f(x)
end
end
# as of v"1.3.0", no more maxfnevals for stopping, just maxevals
# this is an alternative
function fz(f, x0::Number, M; maxfnevals=10, kwargs...)
F = wrapper(f)
ZPI = init(ZeroProblem(F, x0), M; kwargs...)
x = NaN * float(x0)
ϕ = iterate(ZPI)
while ϕ !== nothing
x, st = ϕ
F.cnt.contents >= maxfnevals && return NaN * float(x0)
ϕ = iterate(ZPI, st)
end
x
end
f(x) = x^20 - 1
x0 = 0.9
M = Order1()
@test isnan(fz(f, x0, M)) # takes 19 fn evals, not 10
# test that for update state, fnevals are correctly counted for simpler
# methods
fn = (x) -> sin(x)
x0 = (3, 4)
M = Order1()
state = Roots.init_state(M, Roots.Callable_Function(M, fn), x0)
options = Roots.init_options(M, state)
for M in (
Order1(),
Order2(),
Order5(),
Order8(),
Order16(),
Roots.Order1B(),
Roots.Order2B(),
Roots.Bisection(),
Roots.Brent(),
Roots.Ridders(),
Roots.ITP(),
Roots.A42(),
Roots.AlefeldPotraShi(),
)
# test initial count
g = wrapper(fn)
G = Roots.Callable_Function(M, g)
Roots.init_state(M, G, x0)
@test g.cnt.contents ≤ Roots.initial_fncalls(M)
# test update state
g = wrapper(fn)
stateₘ = Roots.init_state(M, state, Roots.Callable_Function(M, fn))
G = Roots.Callable_Function(M, g)
l = Roots.Tracks(Float64, Float64)
Roots.update_state(M, G, stateₘ, options, l)
@test g.cnt.contents == l.fncalls
end
end
@testset "_extrema" begin
if VERSION >= v"1.6.0"
@test @inferred(Roots._extrema((π, 0))) === (0.0, Float64(π))
@test @inferred(Roots._extrema([π, 0])) === (0.0, Float64(π))
end
@test_throws ArgumentError Roots._extrema(π)
@test_throws ArgumentError Roots._extrema((π, π))
@test_throws ArgumentError Roots._extrema([π, π])
end
@testset "sensitivity" begin
# Issue #349
if VERSION >= v"1.9.0-"
f(x, p) = cos(x) - first(p) * x
x₀ = (0, pi / 2)
F(p) = solve(ZeroProblem(f, x₀), Bisection(), p)
G(p) = find_zero(f, x₀, Bisection(), p)
H(p) = find_zero(f, x₀, Bisection(); p=p)
∂ = -0.4416107917053284
@test ForwardDiff.derivative(F, 1.0) ≈ -0.4416107917053284
@test ForwardDiff.gradient(F, [1.0, 2])[1] ≈ -0.4416107917053284
@test ForwardDiff.derivative(G, 1.0) ≈ -0.4416107917053284
@test ForwardDiff.gradient(G, [1.0, 2])[1] ≈ -0.4416107917053284
@test ForwardDiff.derivative(H, 1.0) ≈ -0.4416107917053284
@test ForwardDiff.gradient(H, [1.0, 2])[1] ≈ -0.4416107917053284
end
end
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 4210 | # tests for find_zeros
using Roots
using Test
mutable struct CallableFunction
f
n
end
(F::CallableFunction)(x) = (F.n += 1; F.f(x))
@testset "find_zeros" begin
function azero(f, x)
fx = f(x)
iszero(fx) && return true
sign(fx) * sign(f(nextfloat(x))) < 0 && return true
sign(fx) * sign(f(prevfloat(x))) < 0 && return true
abs(fx) <= 8eps(x) && return true
false
end
## easier ones, counting steps
F = CallableFunction(x -> exp(x) - x^4, 0)
xrts = find_zeros(F, -5, 20)
@test length(xrts) == 3
@test all(azero.((F,), xrts))
@test F.n <= 1500 #3000
F = CallableFunction(x -> cos(x) + cos(2x), 0)
xrts = find_zeros(F, 0, 4pi)
@test length(xrts) == 6
@test all(azero.((F,), xrts))
@test F.n <= 2000 # 5000
T11(x) = 1024x^11 - 2816x^9 + 2816x^7 - 1232x^5 + 220x^3 - 11x
U9(x) = 512x^9 - 1024x^7 + 672x^5 - 160x^3 + 10x
F = CallableFunction(T11, 0)
xrts = find_zeros(F, -1, 1)
@test length(xrts) == 11
@test all(azero.((F,), xrts))
@test F.n <= 2500 # 10_000
F = CallableFunction(U9, 0)
xrts = find_zeros(F, -1, 1)
@test length(xrts) == 9
@test all(azero.((F,), xrts))
@test F.n <= 2500 # 10_000
W(n) = x -> prod(x - i for i in 1:n)
Wi(n) = x -> prod((x - i)^i for i in 1:n)
F = CallableFunction(W(20), 0)
xrts = find_zeros(F, -1, 21)
@test length(xrts) == 20
@test all(azero.((F,), xrts))
@test F.n <= 4000 #20_000
F = CallableFunction(Wi(6), 0)
xrts = find_zeros(F, -1, 7)
@test length(xrts) == 6
@test all(azero.((F,), xrts))
@test F.n <= 10_000
## Harder ones
f1(x) = 2 * exp(0.5 * x) * (sin(5 * x) + sin(101 * x))
tiger_tail(x) = f1(x) - round(f1(x)) # (-2,1) then filter
f2(x) = (x - 0.5) * (x - 0.5001) * (x - 4) * (x - 4.05) * (x - 9.3) # (0,10)
f3(x) = (x - 3)^2 * (x - 4)^2 # (0,5)
delta = 0.001
f4(x) = (x - 0.5)^3 * (x - (0.5 + delta)) * (x - 1)
f5(x) = (x - 0.5)^3 * (x - (0.5 + delta))^3 * (x - 4) * (x - (4 + delta)) * (x - 4.2)^2
M(n) = x -> prod((x - (0.5 - (1 / 10)^i)) for i in 1:n)
f6 = M(4)
f7 = M(5) # too much
xrts = find_zeros(tiger_tail, -2.0, 1.0)
xrts = filter(u -> -1 / 4 < tiger_tail(u) < 1 / 4, xrts)
@test 345 <= length(xrts) <= 345
@test all(azero.((tiger_tail,), xrts))
xrts = find_zeros(f2, 0.0, 10.0)
@test length(xrts) == 5
@test all(azero.((f2,), xrts))
xrts = find_zeros(f3, 0.0, 5.0)
@test length(xrts) == 2
@test all(azero.((f3,), xrts))
xrts = find_zeros(f4, 0.0, 1.5)
@test length(xrts) == 3
@test all(azero.((f4,), xrts))
xrts = find_zeros(f5, 0.0, 5.0)
@test length(xrts) >= 3 # too hard to get 5 w/o luck, as with no_pts=21/k=4
@test all(azero.((f5,), xrts))
xrts = find_zeros(f6, 0.0, 10.0)
@test length(xrts) == 4
@test all(azero.((f6,), xrts))
xrts = find_zeros(f7, 0.0, 10.0) # too sensitive to interval
@test length(xrts) >= 3 # should be 5
@test all(azero.((f7,), xrts))
# Issue #141 solve over [a,b], not (a,b)
@test length(find_zeros(p -> p * (1 - p), 0, 1)) == 2
@test length(find_zeros(sin, 0, 5pi)) == 5 + 1
# test different ways to specify an interval (just need `extrema` defined)
@test find_zeros(sin, (3, 4)) ≈ [float(pi)]
@test find_zeros(sin, [3, 4]) ≈ [float(pi)]
@test find_zeros(sin, 3:4) ≈ [float(pi)]
@test find_zeros(sin, SomeInterval(3, 4)) ≈ [float(pi)]
@test find_zeros(sin, range(3, stop=4, length=20)) ≈ [float(pi)]
# test with constant function
@test isempty(find_zeros(x -> 4, -10, 10))
# test with zero function (Issue #339)
@test_throws DomainError find_zeros(x -> 0, -2, 2)
# issue #369
g4(x) = sqrt(abs(x^2 - 1)) / (x * sign(x^2 - 1))
@test isempty(find_zeros(g4, 1.1, 2))
# solve interface
Z = ZeroProblem(x -> prod(x - i for i in 1:5), (0, 6))
@test solve(Z, AllZeros()) ≈ 1:5
end
@testset "find_zeros: not Float64 types" begin
for T in [Float16, Float32, BigFloat]
rts = find_zeros(x -> cos(x) - x / 10, T(0.0), T(10.0))
@test eltype(rts) == T
end
end
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 2217 | using Test
import Roots.fzero
## Test `fzero` interface to `find_zero`
## test `fzeros` interface for functions
@testset "fzero(s) interface" begin
## test interface
@test fzero(sin, 3) ≈ pi # order0
@test fzero(sin, (3, 3.1), order=1) ≈ pi # use order if specified
@test fzero(sin, (3, 4)) ≈ pi # bracketing
@test fzero(sin, 3, 4) ≈ pi # bracketing
@test fzero(sin, [3, 4]) ≈ pi # bracketing
@test_throws ArgumentError fzero(sin, (3, 3.1)) # not a bracket
@test fzero(sin, cos, 3) ≈ pi # newton
## order keyword:
for o in keys(Roots._method_lookup)
@test fzero(x -> x^3 - x, 0.9, order=o) ≈ 1.0
end
## bypass order keyword
for M in
[Roots.Order0(), Roots.Order1(), Roots.Order1B(), Roots.Order2(), Roots.Order2B()]
@test fzero(x -> x^3 - x, 0.7, M) ≈ 1.0
end
for M in [Roots.Order1(), Roots.Order1B(), Roots.Order2(), Roots.Order2B()]
N = Roots.Bisection()
@test fzero(x -> x^3 - x, 0.7, M, N) ≈ 1.0
end
### test tolerances
fn, xstar, x0, br = x -> x^5 - x - 1, 1.1673039782614187, (1.0, 1.1), [1.0, 2.0]
@test fzero(fn, x0, order=1) ≈ xstar
@test !(fzero(fn, x0, order=1, xatol=1e-4, atol=1) ≈ xstar)
@test_throws Roots.ConvergenceFailed fzero(fn, x0, order=1, maxevals=3)
@test !(fzero(fn, x0, order=1, maxevals=3, atol=1e-3) ≈ xstar)
## Infinities
## f(b) = Inf
f = x -> x + exp(x)
@test fzero(f, -1e6, 1e6) ≈ -0.5671432904097838
## f(a) = Inf
f = x -> 1 / x - 1
@test fzero(f, 0, 2) ≈ 1.0
## test infinite range
@test fzero(x -> x, -Inf, Inf) ≈ 0.0
##################################################
## fzeros function
## test interface
@test fzeros(sin, 3, 4) ≈ [pi]
@test fzeros(sin, (3, 4)) ≈ [pi]
@test fzeros(sin, [3, 4]) ≈ [pi]
@test length(fzeros(x -> exp(x) - x^4, -10, 20)) == 3
rts = 1:5
@test all(
(abs.(fzeros(x -> prod([x - r for r in rts]), 0, 10)) .- collect(1:5)) .<= 1e-15,
)
fn = x -> cos(10 * pi * x)
@test length(fzeros(fn, 0, 1)) == 10
### issue with fzeros and roots near 'b'
@test 0 < maximum(fzeros(x -> sin(x) - 1 / 1000 * x, 0, pi)) < pi
end
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 6414 | using Test
import Roots.newton,
Roots.halley, Roots.superhalley, Roots.quadratic_inverse, Roots.chebyshev_like
@testset "Test Newton, Halley, Schroder methods" begin
@test abs(newton(sin, cos, 0.5) - 0.0) <= 100 * eps(1.0)
@test newton(cos, x -> -sin(x), 1.0) ≈ pi / 2
@test newton(x -> x^2 - 2x - 1, x -> 2x - 2, 3.0) ≈ 2.414213562373095
@test abs(newton(x -> exp(x) - cos(x), x -> exp(x) + sin(x), 3.0) - 0.0) <= 1e-14
@test halley(x -> x^2 - 2x - 1, x -> 2x - 2, x -> 2, 3.0) ≈ 2.414213562373095
@test quadratic_inverse(x -> x^2 - 2x - 1, x -> 2x - 2, x -> 2, 3.0) ≈ 2.414213562373095
@test superhalley(x -> x^2 - 2x - 1, x -> 2x - 2, x -> 2, 3.0) ≈ 2.414213562373095
@test chebyshev_like(x -> x^2 - 2x - 1, x -> 2x - 2, x -> 2, 3.0) ≈ 2.414213562373095
a = halley(x -> exp(x) - cos(x), x -> exp(x) + sin(x), x -> exp(x) + cos(x), 3.0)
@test abs(a - 0.0) <= 1e-14
# find_zero calls
@test find_zero((x -> x^2 - 2x - 1, x -> 2x - 2), 3.0, Roots.Newton()) ≈
2.414213562373095
@test find_zero((x -> x^2 - 2x - 1, x -> 2x - 2, x -> 2), 3.0, Roots.Halley()) ≈
2.414213562373095
@test find_zero((x -> x^2 - 2x - 1, x -> 2x - 2, x -> 2), 3.0, Roots.SuperHalley()) ≈
2.414213562373095
@test find_zero((x -> x^2 - 2x - 1, x -> 2x - 2, x -> 2), 3.0, Roots.ChebyshevLike()) ≈
2.414213562373095
@test find_zero(
(x -> x^2 - 2x - 1, x -> 2x - 2, x -> 2),
3.0,
Roots.QuadraticInverse(),
) ≈ 2.414213562373095
@test find_zero((x -> x^2 - 2x - 1, x -> 2x - 2, x -> 2), 3.0, Roots.Schroder()) ≈
2.414213562373095
## test with Complex input
@test real(Roots.newton(x -> x^3 - 1, x -> 3x^2, 1 + im)) ≈ 1.0
@test real(Roots.newton(x -> x^3 - 1, x -> 3x^2, 1 + 10im)) ≈ (-1 / 2)
## Issue #143 test with new interface
Roots.newton(sin, cos, 3.0) ≈ π # uses find_zero
Roots.newton((sin, cos), 3.0) ≈ π # uses simple
fdf = x -> (sin(x), sin(x) / cos(x)) # (f, f/f')
@test Roots.find_zero(fdf, 3.0, Roots.Newton()) ≈ π # uses find_zero
Roots.newton(fdf, 3.0) ≈ π # uses simple
fdfdf = x -> (sin(x), sin(x) / cos(x), -cos(x) / sin(x), sin(x) / cos(x)) # (f, f/f', f'/f'', f''/f''')
@test Roots.find_zero(fdfdf, 3.0, Roots.Halley()) ≈ π
# check that functions with multiple return values can work with other
# methods
for M in [
Roots.Schroder(),
Roots.Halley(),
Roots.Newton(),
Roots.Order1(),
Roots.Order0(),
Roots.QuadraticInverse(),
Roots.SuperHalley(),
Roots.ChebyshevLike(),
]
@test Roots.find_zero(fdfdf, 3.0, M) ≈ π # can pass function to others
end
for M in [Roots.Bisection(), Roots.A42(), Roots.AlefeldPotraShi()]
@test Roots.find_zero(fdfdf, (3.0, 4.0), M) ≈ π # can pass function to others
end
@test find_zero(
x -> (x^2 - 2, (x^2 - 2) / 2x),
1.0,
Roots.Newton(),
Roots.Bisection(),
) ≈ sqrt(2)
@test_throws Roots.ConvergenceFailed Roots.newton((x -> x^2 + 1, x -> 2x), 0)
end
@testset "bracketed Halley" begin
x₀, x̃₀, α = 1.0, 1.1, 1.1673039782614187
f(x) = x^5 - x - 1
fp(x) = 5x^4 - 1
fpp(x) = 20x^3
for M in (Roots.BracketedHalley(), Roots.BracketedChebyshev())
@test find_zero((f, fp, fpp), (1, 2), M) ≈ α
end
end
@testset "Lith Boonkkamp IJzerman methods" begin
x₀, x̃₀, α = 1.0, 1.1, 1.1673039782614187
f(x, p=1) = x^5 - x - p
fp(x) = 5x^4 - 1
fpp(x) = 20x^3
fppp(x) = 60x
fpppp(x) = 60
function fdf1(x, p=1)
fx = f(x, p)
∂fx = fp(x)
return fx, fx / ∂fx
end
function fdf2(x, p=1)
fx = f(x, p)
∂fx = fp(x)
∂2fx = fpp(x)
return fx, fx / ∂fx, ∂fx / ∂2fx
end
function fdf3(x, p=1)
fx = f(x, p)
∂fx = fp(x)
∂2fx = fpp(x)
∂3fx = fppp(x)
return fx, fx / ∂fx, ∂fx / ∂2fx, ∂2fx / ∂3fx
end
function fdf4(x, p=1)
fx = f(x, p)
∂fx = fp(x)
∂2fx = fpp(x)
∂3fx = fppp(x)
∂4fx = fpppp(x)
return fx, fx / ∂fx, ∂fx / ∂2fx, ∂2fx / ∂3fx, ∂3fx / ∂4fx
end
@test solve(ZeroProblem((f,), x₀), Roots.LithBoonkkampIJzerman(3, 0)) ≈ α
@test solve(ZeroProblem(f, x₀), Roots.LithBoonkkampIJzerman(3, 0)) ≈ α
@test solve(ZeroProblem((f,), x₀), Roots.LithBoonkkampIJzerman(4, 0)) ≈ α
@test solve(ZeroProblem(f, x₀), Roots.LithBoonkkampIJzerman(4, 0)) ≈ α
@test solve(ZeroProblem((f,), x₀), Roots.LithBoonkkampIJzerman(3, 0), 1) ≈ α
@test solve(ZeroProblem(f, x₀), Roots.LithBoonkkampIJzerman(3, 0), 1) ≈ α
@test solve(ZeroProblem((f, fp), x₀), Roots.LithBoonkkampIJzerman(2, 1)) ≈ α
@test solve(ZeroProblem((f, fp), x₀), Roots.LithBoonkkampIJzerman(3, 1)) ≈ α
@test solve(ZeroProblem(fdf1, x₀), Roots.LithBoonkkampIJzerman(2, 1)) ≈ α
@test solve(ZeroProblem(fdf1, x₀), Roots.LithBoonkkampIJzerman(2, 1), 1) ≈ α
@test solve(ZeroProblem((f, fp, fpp), x₀), Roots.LithBoonkkampIJzerman(1, 2)) ≈ α
@test solve(ZeroProblem((f, fp, fpp), x₀), Roots.LithBoonkkampIJzerman(2, 2)) ≈ α
@test solve(ZeroProblem(fdf2, x₀), Roots.LithBoonkkampIJzerman(1, 2)) ≈ α
@test solve(ZeroProblem(fdf2, x₀), Roots.LithBoonkkampIJzerman(1, 2), 1) ≈ α
@test solve(ZeroProblem((f, fp, fpp, fppp), x₀), Roots.LithBoonkkampIJzerman(1, 3)) ≈ α
@test solve(ZeroProblem((f, fp, fpp, fppp), x₀), Roots.LithBoonkkampIJzerman(2, 3)) ≈ α
@test solve(ZeroProblem(fdf3, x₀), Roots.LithBoonkkampIJzerman(1, 3)) ≈ α
@test solve(ZeroProblem(fdf3, x₀), Roots.LithBoonkkampIJzerman(1, 3), 1) ≈ α
@test solve(ZeroProblem(fdf4, x₀), Roots.LithBoonkkampIJzerman(1, 4)) ≈ α
@test solve(ZeroProblem(fdf4, x₀), Roots.LithBoonkkampIJzerman(1, 4), 1) ≈ α
@test solve(
ZeroProblem((f, fp, fpp, fppp, fpppp), x₀),
Roots.LithBoonkkampIJzerman(1, 4),
) ≈ α
@test solve(
ZeroProblem((f, fp, fpp, fppp, fpppp), x̃₀),
Roots.LithBoonkkampIJzerman(2, 4),
) ≈ α # needs closer
# bracketing
@test solve(ZeroProblem((f, fp), (1, 2)), Roots.LithBoonkkampIJzermanBracket()) ≈ α
@test solve(ZeroProblem(fdf1, (1, 2)), Roots.LithBoonkkampIJzermanBracket()) ≈ α
@test solve(ZeroProblem(fdf1, (1, 2)), Roots.LithBoonkkampIJzermanBracket(), 1) ≈ α
end
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | code | 1273 | using Test
using BenchmarkTools
@testset "simpler implementations" begin
# bisection
xrt = Roots.bisection(sin, 3.0, 4.0)
@test isapprox(xrt, pi)
xrt = Roots.bisection(sin, 3.0, 4.0, xatol=1e-3)
@test abs(sin(xrt)) >= 1e-7 # not to0 close
xrt = Roots.bisection(sin, big(3.0), big(4.0))
@test isapprox(xrt, pi)
# secant_method
fpoly = x -> x^5 - x - 1
xrt = Roots.secant_method(fpoly, 1.0)
@test abs(fpoly(xrt)) <= 1e-15
xrt = Roots.secant_method(fpoly, (1, 2))
@test abs(fpoly(xrt)) <= 1e-14
# muller
fpoly = x -> x^5 - x - 1
xrt = Roots.muller(fpoly, 1.0)
@test xrt isa Real
@test abs(fpoly(xrt)) <= 1e-15
@test_throws DomainError Roots.muller(fpoly, -1.0)
xrt = Roots.muller(fpoly, -1.0 + 0im)
@test xrt isa Complex
@test abs(fpoly(xrt)) <= 1e-15
@test Roots.muller(cos, 1.0) ≈ π / 2
expoly(z) = log(-z) * asin(z) / tanh(z)
@test Roots.muller(expoly, -0.7 - 0.5im) ≈ -1.0
# dfree
fpoly = x -> x^5 - x - 1
xrt = Roots.dfree(fpoly, 1.0)
@test abs(fpoly(xrt)) <= 1e-14
# newton
@test Roots.newton((sin, cos), 3.0) ≈ pi
u = Roots.newton(x -> (sin(x), sin(x) / cos(x)), 3.0, xatol=1e-10, xrtol=1e-10)
@test abs(u - pi) <= 1e-8
end
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | docs | 1451 | CHANGES in v0.7.4
* set find_zero(s) to specialize on the function, fzero(s) to not. (#148)
* adjust Steffensen method logic to take secant step or steffensen
step, rather than modified steffensen step. Seems to improve
robustness. (#147)
* add Schroder method (order 2 for multiplicity with derivative), King (1B)
(superlinear for multiplicity, no derivative), Esser (2B) (order 2
for multipicity, no derivative) (#143, #147)
* close issue #143 by allowing fns to Newton, Halley to compute f, f/fp, fp/fpp
* add `newton` function to simple.jl
* change find_zeros to identify zeros on [a,b], not (a,b). Closes #141.
* bug fix: issue with quad step after a truncated M-step in find_zero(M,N,...)
* bug fix: verbose argument for Bisection method (#139)
* bug fix: unintentional widening of types in initial secant step (#139)
CHANGES in v0.7.3
* fix bug with find_zeros and Float32
* speeds up bisection function
CHANGES in v0.7.2
* speed up bisection
CHANGES in v0.7.1
* refactor Bisection method to reduce conditional checks inside loop
* took algorithm from Order0, and made it an alternative for find_zero allowing other non-bracketing methods to be more robust
* In FalsePosition there is a parameter to adjust when a bisection step should be used. This was changed in v0.7.0, the old value is restored. (This method is too sensitive to this parameter. It is recommended that either A42 or AlefeldPotraShi be used as alternatives.
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | docs | 10362 | # Root finding functions for Julia
[](https://JuliaMath.github.io/Roots.jl/stable)
[](https://JuliaMath.github.io/Roots.jl/dev)
[](https://github.com/JuliaMath/Roots.jl/actions/workflows/ci.yml)
[](https://codecov.io/gh/JuliaMath/Roots.jl)
This package contains simple routines for finding roots, or zeros, of
scalar functions of a single real variable using floating-point math. The `find_zero` function
provides the primary interface. The basic call is
`find_zero(f, x0, [M], [p]; kws...)` where, typically, `f` is a function, `x0` a starting point or
bracketing interval, `M` is used to adjust the default algorithms used, and `p` can be used to pass in parameters.
The various algorithms include:
* Bisection-like algorithms. For functions where a bracketing interval
is known (one where `f(a)` and `f(b)` have alternate signs), a
bracketing method, like `Bisection`, can be specified. The default
is `Bisection`, for most floating point number types, employed in a
manner exploiting floating point storage conventions. For other
number types (e.g. `BigFloat`), an algorithm of Alefeld, Potra, and
Shi is used by default. These default methods are guaranteed to
converge. Other bracketing methods are available.
* Several derivative-free algorithms. These are specified
through the methods `Order0`, `Order1` (the secant method), `Order2`
(the Steffensen method), `Order5`, `Order8`, and `Order16`. The
number indicates, roughly, the order of convergence. The `Order0`
method is the default, and the most robust, but may take more
function calls to converge, as it employs a bracketing method when
possible. The higher order methods promise faster
convergence, though don't always yield results with fewer function
calls than `Order1` or `Order2`. The methods `Roots.Order1B` and
`Roots.Order2B` are superlinear and quadratically converging methods
independent of the multiplicity of the zero.
* There are historic algorithms that require a derivative or two to be
specified: `Roots.Newton` and `Roots.Halley`. `Roots.Schroder`
provides a quadratic method, like Newton's method, which is
independent of the multiplicity of the zero. This is generalized by
`Roots.ThukralXB` (with `X` being 2,3,4, or 5).
* There are several non-exported algorithms, such as, `Roots.Brent()`,
`Roots.LithBoonkkampIJzermanBracket`, and
`Roots.LithBoonkkampIJzerman`.
Each method's documentation has additional detail.
Some examples:
```julia
julia> using Roots
julia> f(x) = exp(x) - x^4;
julia> α₀, α₁, α₂ = -0.8155534188089607, 1.4296118247255556, 8.6131694564414;
julia> find_zero(f, (8,9), Bisection()) ≈ α₂ # a bisection method has the bracket specified
true
julia> find_zero(f, (-10, 0)) ≈ α₀ # Bisection is default if x in `find_zero(f, x)` is not scalar
true
julia> find_zero(f, (-10, 0), Roots.A42()) ≈ α₀ # fewer function evaluations than Bisection
true
```
For non-bracketing methods, the initial position is passed in as a
scalar, or, possibly, for secant-like methods an iterable like `(x_0, x_1)`:
```julia
julia> find_zero(f, 3) ≈ α₁ # find_zero(f, x0::Number) will use Order0()
true
julia> find_zero(f, 3, Order1()) ≈ α₁ # same answer, different method (secant)
true
julia> find_zero(f, (3, 2), Order1()) ≈ α₁ # start secant method with (3, f(3), (2, f(2))
true
julia> find_zero(sin, BigFloat(3.0), Order16()) ≈ π # 2 iterations to 6 using Order1()
true
```
The `find_zero` function can be used with callable objects:
```julia
julia> using Polynomials;
julia> x = variable();
julia> find_zero(x^5 - x - 1, 1.0) ≈ 1.1673039782614187
true
```
The function should respect the units of the `Unitful` package:
```julia
julia> using Unitful
julia> s, m = u"s", u"m";
julia> g, v₀, y₀ = 9.8*m/s^2, 10m/s, 16m;
julia> y(t) = -g*t^2 + v₀*t + y₀
y (generic function with 1 method)
julia> find_zero(y, 1s) ≈ 1.886053370668014s
true
```
Newton's method can be used without taking derivatives by hand. The
following examples use the `ForwardDiff` package:
```julia
julia> using ForwardDiff
julia> D(f) = x -> ForwardDiff.derivative(f,float(x))
D (generic function with 1 method)
```
Now we have:
```julia
julia> f(x) = x^3 - 2x - 5
f (generic function with 1 method)
julia> x0 = 2
2
julia> find_zero((f, D(f)), x0, Roots.Newton()) ≈ 2.0945514815423265
true
```
Automatic derivatives allow for easy solutions to finding critical
points of a function.
```julia
julia> using Statistics: mean, median
julia> as = rand(5);
julia> M(x) = sum((x-a)^2 for a in as)
M (generic function with 1 method)
julia> find_zero(D(M), .5) ≈ mean(as)
true
julia> med(x) = sum(abs(x-a) for a in as)
med (generic function with 1 method)
julia> find_zero(D(med), (0, 1)) ≈ median(as)
true
```
### The CommonSolve interface
The
[DifferentialEquations](https://github.com/SciML/DifferentialEquations.jl)
interface of setting up a problem; initializing the problem; then
solving the problem is also implemented using the types
`ZeroProblem` and the methods `init`, `solve!`, and `solve` (from [CommonSolve](https://github.com/SciML/CommonSolve.jl)).
For example, we can solve a problem with many different methods, as follows:
```julia
julia> f(x) = exp(-x) - x^3
f (generic function with 1 method)
julia> x0 = 2.0
2.0
julia> fx = ZeroProblem(f, x0)
ZeroProblem{typeof(f), Float64}(f, 2.0)
julia> solve(fx) ≈ 0.7728829591492101
true
```
With no default, and a single initial point specified, the default
`Order1` method is used. The `solve` method allows other root-solving
methods to be passed, along with other options. For example, to use
the `Order2` method using a convergence criteria (see below) that
`|xₙ - xₙ₋₁| ≤ δ`, we could make this call:
```julia
julia> solve(fx, Order2(); atol=0.0, rtol=0.0) ≈ 0.7728829591492101
true
```
Unlike `find_zero`, which errors on non-convergence, `solve` returns
`NaN` on non-convergence.
This next example has a zero at `0.0`, but
for most initial values will escape towards `±∞`, sometimes causing a
relative tolerance to return a misleading value. Here we can see the
differences:
```julia
julia> f(x) = cbrt(x) * exp(-x^2)
f (generic function with 1 method)
julia> x0 = 0.1147
0.1147
julia> find_zero(f, x0, Roots.Order5()) ≈ 5.936596662527689 # stopped as |f(xₙ)| ≤ |xₙ|ϵ
true
julia> find_zero(f, x0, Roots.Order1(), atol=0.0, rtol=0.0) # error as no check on `|f(xn)|`
ERROR: Roots.ConvergenceFailed("Algorithm failed to converge")
[...]
julia> fx = ZeroProblem(f, x0);
julia> solve(fx, Roots.Order1(), atol=0.0, rtol=0.0) # NaN, not an error
NaN
julia> fx = ZeroProblem((f, D(f)), x0); # higher order methods can identify zero of this function
julia> solve(fx, Roots.LithBoonkkampIJzerman(2,1), atol=0.0, rtol=0.0)
0.0
```
Functions may be parameterized, as illustrated:
```julia
julia> f(x, p=2) = cos(x) - x/p
f (generic function with 2 methods)
julia> Z = ZeroProblem(f, pi/4)
ZeroProblem{typeof(f), Float64}(f, 0.7853981633974483)
julia> solve(Z, Order1()) ≈ 1.0298665293222586 # use p=2 default
true
julia> solve(Z, Order1(), p=3) ≈ 1.170120950002626 # use p=3
true
julia> solve(Z, Order1(), 4) ≈ 1.2523532340025887 # by position, uses p=4
true
```
### Multiple zeros
The `find_zeros` function can be used to search for all zeros in a
specified interval. The basic algorithm essentially splits the interval into many
subintervals. For each, if there is a bracket, a bracketing algorithm
is used to identify a zero, otherwise a derivative free method is used
to search for zeros. This heuristic algorithm can miss zeros for various reasons, so the
results should be confirmed by other means.
```julia
julia> f(x) = exp(x) - x^4
f (generic function with 2 methods)
julia> find_zeros(f, -10,10) ≈ [α₀, α₁, α₂] # from above
true
```
The interval can also be specified using a structure with `extrema`
defined, where `extrema` returns two different values:
```julia
julia> using IntervalSets
julia> find_zeros(f, -10..10) ≈ [α₀, α₁, α₂]
true
```
(For tougher problems, the
[IntervalRootFinding](https://github.com/JuliaIntervals/IntervalRootFinding.jl)
package gives guaranteed results, rather than the heuristically
identified values returned by `find_zeros`.)
### Convergence
For most algorithms, convergence is decided when
* The value `|f(x_n)| <= tol` with `tol = max(atol, abs(x_n)*rtol)`, or
* the values `x_n ≈ x_{n-1}` with tolerances `xatol` and `xrtol` *and*
`f(x_n) ≈ 0` with a *relaxed* tolerance based on `atol` and `rtol`.
The `find_zero` algorithm stops if
* it encounters an `NaN` or an `Inf`, or
* the number of iterations exceed `maxevals`
If the algorithm stops and the relaxed convergence criteria is met,
the suspected zero is returned. Otherwise an error is thrown
indicating no convergence. To adjust the tolerances, `find_zero`
accepts keyword arguments `atol`, `rtol`, `xatol`, and `xrtol`, as
seen in some examples above.
The `Bisection` and `Roots.A42` methods are guaranteed to converge
even if the tolerances are set to zero, so these are the
defaults. Non-zero values for `xatol` and `xrtol` can be specified to
reduce the number of function calls when lower precision is required.
```julia
julia> fx = ZeroProblem(sin, (3,4));
julia> solve(fx, Bisection(); xatol=1/16)
3.125
```
## An alternate interface
This functionality is provided by the `fzero` function, familiar to
MATLAB users. `Roots` also provides this alternative interface:
* `fzero(f, x0::Real; order=0)` calls a
derivative-free method. with the order specifying one of
`Order0`, `Order1`, etc.
* `fzero(f, a::Real, b::Real)` calls the `find_zero` algorithm with the
`Bisection` method.
* `fzeros(f, a::Real, b::Real)` will call `find_zeros`.
### Usage examples
```julia
julia> f(x) = exp(x) - x^4
f (generic function with 2 methods)
julia> fzero(f, 8, 9) ≈ α₂ # bracketing
true
julia> fzero(f, -10, 0) ≈ α₀
true
julia> fzeros(f, -10, 10) ≈ [α₀, α₁, α₂]
true
julia> fzero(f, 3) ≈ α₁ # default is Order0()
true
julia> fzero(sin, big(3), order=16) ≈ π # uses higher order method
true
```
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | docs | 16186 | # Visualizing a step for various zero-finding algorithms
We illustrate the geometry behind a single step of several different, non-bracketing, zero-finding algorithms, beginning with, perhaps, the most famous, Newton's method.
## Newton's method
In addition to `Roots`, we use the `Plots` and `ForwardDiff` packages:
```@example geometry
using Roots
using Plots, ForwardDiff
Base.adjoint(f::Function) = x -> ForwardDiff.derivative(f, float(x)) # f' will compute derivative
```
A zero-finding algorithm solves ``f(x) = 0`` or possibly ``f(x,p) = 0`` for a value of ``x``. Here we discuss iterative algorithms which take one *or more* past steps to produce the next step. (That is ``x_{n+1} = F(x_n, x_{n-1}, ..., x_1, x_0)``, for some ``F`` representing the algorithm).
[Newton's Method](https://en.wikipedia.org/wiki/Newton%27s_method) is a zero-finding *iterative algorithm* easily introduced in an introductory calculus class once the concept of a *tangent line* is presented.
The value ``x_{n+1}`` is described as the *intersection point* of the ``x``-axis with the tangent line through ``(x_n, f(x_n))``. To be explicit, we substitute ``(x_{n+1},0)`` into the tangent line equation ``y = f(x_n) + f'(x_n)\cdot(x-x_n)``:
```math
0 = f(x_n) + f'(x_n) \cdot (x_{n+1} - x_n).
```
Solving gives the update formula:
```math
x_{n+1} = x_n - \frac{f(x_n)}{f'(x_n)}.
```
The convergence is not guaranteed for all initial guesses, ``x_0``, but for a *simple zero* of a continuously differentiable function ``f(x)`` there is **some** interval about the zero, ``\alpha``, such that *quadratic convergence* will happen.
The geometry of Newton's method can be illustrated by graphing the tangent line.
The function ``f(x) = x^5 - x - 1`` does not have a readily available closed-form solution for its lone real zero, it being a fifth-degree polynomial. However, a graph, or other means, can show the function has one zero between ``1`` and ``2``, closer to ``1``. Starting with ``x_0=1.4``, we get a visual of ``x_1`` as follows:
```@example geometry
f(x) = x^5 - x - 1
x0 = 1.4
α = find_zero((f, f'), x0, Roots.Newton())
tl(x) = f(x0) + f'(x0)*(x-x0)
x1 = x0 - f(x0)/f'(x0)
p = plot(f, 1.1, 1.5; legend=false, linewidth=3)
plot!(zero)
plot!(tl; color="red", linewidth=3)
scatter!([x0, x1], [0, 0]; markercolor="blue")
annotate!([(x0,0,"x0", :bottom), (x1, 0, "x1", :bottom)])
scatter!([x0], [f(x0)]; markercolor=:blue)
scatter!([α], [0]; markercolor=:blue)
annotate!([(α, 0, "α", :top)])
p
```
We used `Roots.Newton()` to identify the zero.
## Secant method
The secant method is much older than Newton's method, though similar in that the intersection of a line with the ``x``-axis is used as the next step in the algorithm. The slope of the secant line is (historically) easy to compute, unlike the slope of the tangent line which requires the notion of a derivative. The secant method begins with *two* initial points, ``x_0`` and ``x_1`` and uses the secant line instead of the tangent line. The secant line has slope ``(f(x_1)-f(x_0))/(x_1-x_0)``. This yields the algorithm:
```math
x_{n+1} = x_n - \left(\frac{f(x_n)-f(x_{n-1})}{x_n-x_{n-1}}\right)^{-1} \cdot f(x_n).
```
We can visualize the secant method easily enough. Suppose we start with ``x_0=1.4`` and ``x_1=1.3``:
```@example geometry
x0, x1 = 1.4, 1.3
x2 = x1 - (x1-x0)/(f(x1)-f(x0)) * f(x1)
sl(x) = f(x1) + (f(x1)-f(x0))/(x1-x0) * (x-x1)
p = plot(f, 1.1, 1.5; legend=false, linewidth=3)
plot!(zero)
plot!(sl, color=:red, linewidth=3)
scatter!([x0, x1, x2], [0,0,0]; markercolor=:blue)
annotate!([(x0,0,"x0", :bottom), (x1, 0, "x1", :bottom), (x2,0,"x2", :bottom)])
scatter!([x0, x1], [f(x0), f(x1)]; markercolor=:blue)
scatter!([α],[0]; markercolor=:blue)
annotate!([(α, 0, "α", :top)])
p
```
The secant method is implemented in `Secant()`. As the tangent line is the best local approximation to the function near a point, it should be expected that the secant method converges a slower rate than Newton's method.
Steffensen's method (`Root.Steffensen()`) is related to the secant method, though the points are not ``x_n`` and ``x_{n-1}``, rather ``x_n + f(x_n)`` and ``x_n``. As ``x_n`` gets close to ``\alpha``, ``f(x_n)`` gets close to ``0``, so this method converges at an asymptotic rate like Newton's method. (Though with a tradeoff, as the secant method needs only one new function evaluation per step, Steffensen's require two.)
### An inverse view
The secant line is a natural object as presented above, but can be viewed a bit differently. Consider the two points ``(f(x_0), x_0)`` and ``(f(x_1),x_1)``. Two non-equal points determine a line. In this case, we have inverted the ``x`` and ``y`` values, so we invert the coordinates of the line. To find ``x = my + b``, or some other form of the line involves solving two equations with two unknowns. Each equation comes by using the known point:
```math
\begin{align*}
x_0 &= m \cdot f(x_0) + b\\
x_1 &= m \cdot f(x_1) + b
\end{align*}
```
This *linear* set of equations can be solved, some `SymPy` code would look like:
```
using SymPy
@syms x0, y0, x1, y1, m ,b
u = solve([x0 ~ y0 * m + b, x1 ~ y1 * m + b], (m,b))
```
Producing
```
Dict{Any, Any} with 2 entries:
b => (-x0*y1 + x1*y0)/(y0 - y1)
m => (x0 - x1)/(y0 - y1)
```
The value of `m` is the reciprocal of the slope, as we have inverted the perspective. The value of `b` is where the inverse line intersects the ``x`` axis and is the same as the secant method algorithm:
```
sm = x1 - y1 * (x1-x0)/(y1-y0)
simplify(sm - u[b])
```
leading to:
```
0
```
## Inverse quadratic and cubic methods
Brent's method (`Roots.Brent()`) is a bracketing method which utilizes an inverse quadratic step to speed up convergence beyond the secant method. The inverse quadratic step uses the fact that three (non-collinear) points determine a quadratic polynomial. As above, this is done with the inverse of the points, ``(x_{n-2}, f(x_{n-2}))``, ``(x_{n-1}, f(x_{n-1}))``, and ``(x_{n}, f(x_{n}))``. Using the same method illustrated above, it can be shown that, with ``\Delta_{i,j} = f(x_i) - f(x_j)``:
```math
x_{n+1} = \frac{x_{n-2}f(x_{n-1})f(x_n)}{\Delta_{i-2, i-1}\Delta_{i-2,i}}
+ \frac{f(x_{n-2})x_{n-1}f(x_n)}{\Delta_{n-1, n-2}\Delta_{n-1,n}}
+ \frac{f(x_{n-2})f(x_{n-1})x_n}{\Delta_{n,n-2}\Delta_{n,n-1}}.
```
```
x0, x1, x2 = xs = 1.4, 1.3, 1.2
fx0, fx1, fx2 = ys = f.(xs)
@syms x[0:2], y[0:2], a, b, c
u = solve([xᵢ ~ a*yᵢ^2 + b * yᵢ + c for (xᵢ, yᵢ) ∈ zip(x, y)], (a, b, c))
x3 = u[c]
for (k, v) ∈ u
for (xᵢ, yᵢ, x,y) ∈ zip(x, y, xs, ys)
v = v(xᵢ => x, yᵢ => y)
end
u[k] = v
end
u[a], u[b], u[c]
```
Which returns
```
(-0.00930682152998560, 0.104752944517765, 1.17057129242798)
```
This last value, `c`, is also computed in the `(3,0)` method of the `LithBoonkkampIJzerman` algorithm, which implements this method:
```@example geometry
x0, x1, x2 = 1.4, 1.3, 1.2
xs = [x0, x1,x2]
x3 = Roots.lmm(Roots.LithBoonkkampIJzerman{3,0}(), xs, f.(xs))
```
With this, we can visualize:
```@example geometry
a, b, c = (-0.00930682152998560, 0.104752944517765, 1.17057129242798)
iq(y) = a * y^2 + b * y + c
p = plot(f, 1.1, 1.5; legend=false, linewidth=3)
plot!(zero)
ys′ = range(f(1.1), f(1.5), length=100)
plot!(iq.(ys′), ys′; color=:red, linewidth=3)
scatter!(xs, f.(xs); markercolor=:blue)
scatter!([x3], [f(x3)]; markercolor=:blue)
annotate!([(x0,0,"x0", :bottom), (x1, 0, "x1", :bottom),
(x2,0,"x2", :bottom), (x3,0,"x3", :bottom)])
scatter!(xs, zero.(xs); markercolor=:blue)
scatter!([α],[0]; markercolor=:blue)
annotate!([(α, 0, "α", :top)])
p
```
Inverse cubic is similar to the above, though we have ``4`` past points used to determine the next one. In this example, we solve the resulting linear system of equations numerically (the `Roots.LithBoonkkampIJzerman{4,0}()` method implements the algorithm):
```@example geometry
xs = [1.4, 1.35, 1.3, 1.25]
ys = f.(xs)
A = zeros(Float64, 4, 4)
for i ∈ reverse(0:3)
A[:,4-i] .= ys.^i
end
a, b, c, d = A \ xs
ic(y) = a * y^3 + b * y^2 + c * y + d
p = plot(f, 1.1, 1.5; legend=false, linewidth=3)
plot!(zero)
plot!(ic.(ys′), ys′; color=:red, linewidth=3)
x4 = d
scatter!(vcat(xs, x4), zeros(5); markercolor=:blue)
for (i,x) ∈ enumerate(xs)
annotate!([(x, 0, "x$(i-1)", :bottom)])
end
annotate!([(x4, 0, "x4", :bottom)])
scatter!(xs, f.(xs); markercolor=:blue)
scatter!([α],[0]; markercolor=:blue)
annotate!([(α, 0, "α", :top)])
p
```
We can see graphically that for this function and the chosen values, the inverse quadratic and inverse cubic fits are very close to the actual zero, suggesting a rapid convergence. `Roots.Brent()`, `Roots.Chandrapatlu()`, and `Roots.AlefeldPotraShi()` are different bracketing methods which use an inverse quadratic step *when* the algorithm deems it productive, falling back to other estimates when not. Similarly, the inverse cubic step is utilized by `Roots.A42()`, as possible. The `LithBoonkkampIJzerman{S,0}` methods use ``S`` previous points (``S \geq 2``) and the corresponding inverse polynomial step to progress. Since these are not bracketed, the algorithms are only guaranteed to converge for nearby initial guesses.
## Higher derivative variations on Newton's method
We can visualize Newton's method differently than as an intersection of the ``x``-axis with a specific tangent line, rather we can think of it as an intersection of the ``x``-axis with *a* line ``ax + by = c`` with *two* constraints:
* the point ``(x_n, f(x_n))`` is on the line
* the line matches the slope of the tangent line at that point (a tangency condition)
Combined, these say that the constrained line has slope ``f'(x_n)`` and goes through the point ``(x_n, f(x_n))``, so is the tangent line; Newton's method follows.
When a quadratic approximation to the graph of ``f(x)`` is chosen *at* ``(x_n, f(x_n))`` other algorithms become possible.
[Geometric constructions of iterative functions to solve nonlinear equations](https://doi.org/10.1016/S0377-0427(03)00420-5) by Amat, Busquier, and Gutiérrez has a systematic approach we follow.
### Euler method
Consider the quadratic expression ``y + ax^2 + bx + c = 0``.
Assuming the expression goes through the point ``(x_n, f(x_n))`` and the tangency conditions: ``y'(x) = f'(x)`` **and** ``y''(x) = f''(x)``, we get the second-order Taylor polynomial ``y(x) = f(x_n) + f'(x_n)(x-x_n) + f''(x_n)/2 \cdot(x-x_n)^2`` as the solution.
Let
```math
L_f(x) = \frac{f(x)/f'(x)}{f'(x)/f''(x)} = \frac{f(x)f''(x)}{f'(x)^2}.
```
Then the intersection of the curve with the ``x``-axis can be represented as:
```math
x_{n+1} = x_n - \frac{2}{1 + \sqrt{1 - 2L_f(x_n)}} \cdot \frac{f(x_n)}{f'(x_n)}.
```
This is known as Euler's method or the irrational Halley method and implemented in `Roots.IrrationalHalley()`:
```@example geometry
L_f(x) = f(x) * f''(x) / (f'(x))^2
x0 = 1.4
x1 = x0 - 2 / (1 + sqrt(1 - 2L_f(x0))) * f(x0)/f'(x0)
t2(x) = f(x0) + f'(x0)*(x-x0) + f''(x0)/2 * (x - x0)^2
a, b = 1.1, 1.5
p = plot(f, a, b; legend=false, linewidth=3)
plot!(zero)
plot!(t2; color=:red, linewidth=3)
scatter!([x0, x1], [0,0]; markercolor=:blue)
annotate!([(x0,0,"x0", :bottom), (x1, 0, "x1", :bottom)])
scatter!([x0], [f(x0)]; markercolor=:blue)
scatter!([α],[0]; markercolor=:blue)
annotate!([(α, 0, "α", :top)])
p
```
### Halley's method
The general form of a quadratic equation ``Ax^2 + By^2 + Cxy + Dx + Ey + F = 0`` is specialized above by setting ``B=C=0`` and ``E=1`` and then imposing the point ``(x_n, f(x_n))`` as a solution along with tangency conditions. The famous Halley's method can be seen as the specialization to a hyperbola: ``axy + y + bx + c = 0``. This yields the curve
```math
y - (f(x_n) + f'(x_n)(x-x_n) + \frac{f''(x_n)}{2f'(x_n)}(x-x_n) \cdot (y-f(x_n))) = 0
```
and the iterative algorithm:
```math
x_{n+1} = x_n - \frac{2}{2 - L_f(x_n)} \cdot \frac{f(x_n)}{f'(x_n)}.
```
We can visualize, as follows, using a contour plot to represent the hyperbola.
```@example geometry
x1 = x0 - 2 / (2 - L_f(x0)) * f(x0)/f'(x0)
F(x,y) = y - f(x0) - f'(x0)*(x-x0) - f''(x0)/(2f'(x0)) * (x-x0) * (y-f(x0))
a, b = 1.1, 1.5
p = plot(f, a, b; legend=false, linewidth=3)
plot!(zero)
xs, ys = range(a, b, length=50), range(f(a), f(b), length=50);
zs = [F(x,y) for y ∈ ys, x ∈ xs];
contour!(xs, ys, zs; levels = [0], color=:red, linewidth=3)
scatter!([x0, x1], [0,0]; markercolor=:blue)
annotate!([(x0,0,"x0", :bottom), (x1, 0, "x1", :bottom)])
scatter!([x0], [f(x0)]; markercolor=:blue)
scatter!([α],[0]; markercolor=:blue)
annotate!([(α, 0, "α", :top)])
p
```
`Roots.Halley()` provides an implementation.
### Chebyshev's method
Chebyshev's method uses an inverse quadratic fit, specialized with ``ay^2 + y + bx + c = 0``, to compute the next iterate. It can be expressed via:
```math
\frac{-f''(x_n)}{2f'(x_n)^2}(y - f(x_n))^2 + y - f(x_n) - f'(x_n)(x-x_n) = 0
```
and the algorithm becomes:
```math
x_{n+1} = x_n - (1 + \frac{1}{2} L_f(x_n)) \frac{f(x_n)}{f'(x_n)}.
```
This is visualized in a similar manner as the last example:
```@example geometry
x1 = x0 - (1 + 1/2 * L_f(x0)) * f(x0) / f'(x0)
F(x, y) = -f''(x0)/(2f'(x0)^2) * (y-f(x0))^2 + y - f(x0) - f'(x0) * (x- x0)
a, b = 1.1, 1.5
p = plot(f, a, b; legend=false, linewidth=3)
plot!(zero)
xs, ys = range(a, b, length=50), range(f(a), f(b), length=50);
zs = [F(x,y) for y ∈ ys, x ∈ xs];
contour!(xs, ys, zs; levels = [0], color=:red, linewidth=3)
scatter!([x0, x1], [0,0]; markermarkercolor=:blue)
annotate!([(x0,0,"x0", :bottom), (x1, 0, "x1", :bottom)])
scatter!([x0], [f(x0)]; markermarkercolor=:blue)
scatter!([α],[0]; markermarkercolor=:blue)
annotate!([(α, 0, "α", :top)])
p
```
`Roots.InverseQuadratic()` provides an implementation; `Roots.ChebyshevLike()` an accelerated version.
Amat, Busquier, and Gutierrez also consider the hyperbola
```math
ay^2 + bxy + y + cx + d = 0
```
As there are ``4`` unknowns and only ``3`` constraints, the solution will depend on a parameter, they call ``b_n,`` yielding:
```math
\begin{align*}
c_n &= -f'(x_n)\\
a_n &= -\frac{f''(x_n)}{2f'(x_n)^2} - \frac{b_n}{f'(x_n)}\\
0 &= x - x_n + (y-f(x_n)) \frac{1 + a_n \cdot (y - f(x_n))}{b_n\cdot (y-f(x_n)) + c_n}
\end{align*}
```
This gives the algorithm:
```math
x_{n+1} = x_n - \left(1 + \frac{1}{2}\frac{L_f(x_n)}{1 + b_n \cdot (f(x_n)/f'(x_n))}\right) \cdot \frac{f(x_n)}{f'(x_n)}.
```
Newton's method is recovered by letting ``b_n \rightarrow 0``, Chebyshev's method is when ``b_n=0``, Halley's method is when ``b_n = -f''(x_n)/(2f'(x_n))``.
The super-Halley method is when ``b_n = -f''(x_n)/f'(x_n)``. We can visualize this:
```@example geometry
cn = -f'(x0)
bn = -f''(x0)/f'(x0)
an = -f''(x0)/(2f'(x0)^2) - bn/f'(x0)
x1 = x0 - (1 + 1/2 * (L_f(x0) / (1 + bn * f(x0) / f'(x0)))) * f(x0)/f'(x0)
F(x, y) = x - x0 + (y-f(x0)) * (1 + an * (y - f(x0))) / (bn * (y - f(x0)) + cn)
a, b= 1.1, 1.5
p = plot(f, a, b; legend=false, linewidth=3)
plot!(zero)
xs, ys = range(a, b, length=50), range(f(a), f(b), length=50);
zs = [F(x,y) for y ∈ ys, x ∈ xs];
contour!(xs, ys, zs; levels = [0], color=:red, linewidth=3)
scatter!([x0, x1], [0,0]; markercolor=:blue)
annotate!([(x0,0,"x0", :bottom), (x1, 0, "x1", :bottom)])
scatter!([x0], [f(x0)]; markercolor=:blue)
scatter!([α],[0]; markercolor=:blue)
annotate!([(α, 0, "α", :top)])
p
```
`Roots.SuperHalley()` provides an implementation.
The author's discuss using different osculating curves, such as a cubic equation. As they mention, all their methods take the form (using [big O notation](https://en.wikipedia.org/wiki/Big_O_notation)):
```math
x_{n+1} = x_n - (1 + \frac{1}{2}L_f(x_n) + \mathcal{O}(L_f(x_n)^2)) \cdot \frac{f(x_n)}{f'(x_n)}.
```
The algorithms implemented in the `Roots.LithBoonkkampIJzerman{S,D}()` methods use a differential equations approach to evaluate the inverse of $f(x)$ at $0$. The methods all find inverse polynomial approximations to $f^{-1}$. The methods for $D=0$, are, as seen, for $S=2$ an inverse secant line, for $S=3$ an inverse quadratic approximation, for $S=4$ an inverse cubic; when $S=1$ and $D=1$ Euler's method turns into Newton's method, for $S=1$ and $D=2$ the inverse quadratic or Chebyshev method is used.
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | docs | 2497 | # Roots.jl
Documentation for [Roots.jl](https://github.com/JuliaMath/Roots.jl)
## About
`Roots` is a `Julia` package for finding zeros of continuous
scalar functions of a single real variable using floating point numbers. That is solving ``f(x)=0`` for ``x`` adjusting for floating-point idiosyncrasies.
The `find_zero` function provides the
primary interface. It supports various algorithms through the
specification of a method. These include:
* Bisection-like methods. For functions where a bracketing interval
is known (one where ``f(a)`` and ``f(b)`` have alternate signs),
there are several bracketing methods, including `Bisection`. For
most floating point number types, bisection occurs in a manner
exploiting floating point storage conventions leading to an exact
zero or a bracketing interval as small as floating point
computations allows. Other methods include `A42`,
`AlefeldPotraShi`, `Roots.Brent`, `Roots.Chandrapatlu`,
`Roots.ITP`, `Roots.Ridders`, and ``12``-flavors of
`FalsePosition`. The default bracketing method for
the basic floating-point types is `Bisection` , as it is more robust to some inputs,
but `A42` and `AlefeldPotraShi` typically converge in a few
iterations and are more performant.
* Several derivative-free methods. These are specified
through the methods `Order0`, `Order1` (the secant method), `Order2`
(the Steffensen method), `Order5`, `Order8`, and `Order16`. The
number indicates, roughly, the order of convergence. The `Order0`
method is the default, and the most robust, as it finishes off with
a bracketing method when a bracket is encountered, The higher order
methods promise higher order (faster) convergence, though don't
always yield results with fewer function calls than `Order1` or
`Order2`. The methods `Roots.Order1B` and `Roots.Order2B` are
superlinear and quadratically converging methods independent of the
multiplicity of the zero.
* Methods requiring one or more derivatives: `Roots.Newton`,
`Roots.Halley` are classical ones, `Roots.QuadraticInverse`,
`Roots.ChebyshevLike`, `Roots.SuperHalley` are others.
`Roots.Schroder` provides a quadratic method, like Newton's method,
which is independent of the multiplicity of the zero. The
`Roots.ThukralXB`, `X=2`, `3`, `4`, or `5` are also multiplicity
free. The `X` denotes the number of derivatives that need
specifying. The `Roots.LithBoonkkampIJzerman{S,D}` methods remember
`S` steps and use `D` derivatives.
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | docs | 17798 | # Reference/API
The `Roots` package provides several different algorithms to solve `f(x)=0`.
```@index
Pages = ["reference.md"]
```
```@setup reference
using Roots
```
```@meta
DocTestSetup = quote
using Roots
end
```
```@meta
CurrentModule = Roots
```
```@docs
Roots
```
## The `find_zero` and `find_zeros` functions
There are two main functions: `find_zero` to identify a zero of ``f`` given some initial starting value or bracketing interval and `find_zeros` to heuristically identify all zeros in a specified interval.
```@docs
find_zero
find_zeros
```
## CommonSolve interface
The problem-algorithm-solve interface is a pattern popularized in `Julia` by the `DifferentialEquations.jl` suite of packages. This can be used as an alternative to `find_zero`. Unlike `find_zero`, `solve` will return `NaN` on non-convergence.
```@docs
Roots.solve!
Roots.solve
Roots.ZeroProblem
```
## Classical methods based on derivatives
We begin by describing the classical methods even though they are not necessarily recommended because they require more work of the user, as they give insight into why there are a variety of methods available.
The classical methods of [Newton](https://en.wikipedia.org/wiki/Newton%27s_method) and [Halley](https://en.wikipedia.org/wiki/Halley%27s_method) utilize information about the function and its derivative(s) in an iterative manner to converge to a zero of ``f(x)`` given an initial starting value.
Newton's method is easily described:
From an initial point, the next point in the iterative algorithm is found by identifying the intersection of the ``x`` axis with the tangent line of ``f`` at the initial point. This is repeated until convergence or the realization that convergence won't happen for the initial point. Mathematically,
``x_{n+1} = x_{n} - f(x_n)/f'(x_n).``
Some facts are helpful to understand the different methods available in `Roots`:
* For Newton's method there is a formula for the error: Set
``\epsilon_n = \alpha - x_n``, where ``\alpha`` is the zero, then
``\epsilon_{n+1} = -f''(\xi_n)/(2f'(\xi_n) \cdot \epsilon_n^2,``
here ``\xi_n`` is some value between ``\alpha`` and ``x_n``.
* The error term, when of the form ``|\epsilon_{n+1}| \leq
C\cdot|\epsilon_n|^2``, can be used to identify an interval around
``\alpha`` for which convergence is guaranteed. Such convergence is
termed *quadratic* (order 2). For floating point solutions,
quadratic convergence and a well chosen initial point can lead to
convergence in 4 or 5 iterations. In general, convergence is termed
order ``q`` when ``|\epsilon_{n+1}| \approx C\cdot|\epsilon_n|^q``
* The term ``-f''(\xi_n)/(2f'(\xi_n)`` indicates possible issues when ``f''`` is too big near ``\alpha`` or ``f'`` is too small near ``\alpha``. In particular if ``f'(\alpha) = 0``, there need not be quadratic convergence, and convergence can take many iterations. A zero for which ``f(x) = (x-\alpha)^{1+\beta}\cdot g(x)``, with ``g(\alpha) \neq 0`` is called *simple* when ``\beta=0`` and non-simple when ``\beta > 0``. Newton's method is quadratic near *simple zeros* and need not be quadratic near *non-simple* zeros.
As well, if ``f''`` is too big near ``\alpha``, or ``f'`` too small near ``\alpha``, or ``x_n`` too far from ``\alpha`` (that is, ``|\epsilon_n|>1``) the error might actually increase and convergence is not guaranteed.
* The explicit form of the error function can be used to guarantee convergence for functions with a certain shape (monotonic, convex functions where the sign of ``f''`` and ``f'`` don't change). Quadratic convergence may only occur once the algorithm is near the zero.
* The number of function evaluations per step for Newton's method is 2.
----
```@docs
Roots.Newton
Roots.Halley
Roots.QuadraticInverse
Roots.ChebyshevLike
Roots.SuperHalley
```
Newton and Halley's method are members of this family of methods:
```@docs
Roots.LithBoonkkampIJzerman{S,D}
```
## Derivative free methods
The [secant](https://en.wikipedia.org/wiki/Secant_method) method replaces the derivative term in Newton's method with the slope of a secant line using two prior values:
``x_{n+1} = x_n - (\frac{f(x_n)-f(x_{n-1})}{x_n - x_{n-1}})^{-1}\cdot f(x_n).``
Though the secant method has convergence rate of order ``\approx 1.618`` -- i.e., is not quadratic -- it
only requires one new function call per step so can be very effective. Often function evaluations are the slowest part of the computation and, as well, no derivative is needed. Because it can be very efficient, the secant method is used in the default method of `find_zero` when called with a single initial starting point.
[Steffensen's](https://en.wikipedia.org/wiki/Steffensen%27s_method) method is a quadratically converging. derivative-free method which uses a secant line based on ``x_n`` and ``x_n + f(x_n)``. Though of higher order, it requires additional function calls per step and depends on a good initial starting value. Other derivative free methods are available, trading off increased function calls for higher-order convergence. They may be of interest when arbitrary precision is needed. A measure of efficiency is ``q^{1/r}`` where ``q`` is the order of convergence and ``r`` the number of function calls per step. With this measure, the secant method would be ``\approx (1.618)^{1/1}`` and Steffensen's would be less (``2^{1/2}``).
----
```@docs
Secant
Order1
Steffensen
Order2
Order5
Order8
Order16
```
## Bracketing methods
The [bisection](https://en.wikipedia.org/wiki/Bisection_method) method identifies a zero of a *continuous* function between ``a`` and ``b`` when ``f(a)`` and ``f(b)`` have different signs. (The interval ``[a,b]`` is called a bracketing interval when ``f(a)\cdot f(b) <0``.) The basic algorithm is particularly simple, an interval ``[a_i,b_i]`` is split at ``c = (a_i+b_i)/2``. Either ``f(c)=0``, or one of ``[a_i,c]`` or ``[c,b_i]`` is a bracketing interval, which is called ``[a_{i+1},b_{i+1}]``. From this description, we see that ``[a_i,b_i]`` has length ``2^{-i}`` times the length of ``[a_0,b_0]``, so the intervals will eventually terminate by finding a zero, ``c``, or converge to a zero. This convergence is slow (the efficiency is only ``1``, but guaranteed. For `16`-, `32`-, and `64`-bit floating point values, a reinterpretation of how the midpoint (``c``) is found leads to convergence in no more than ``64`` iterations, unlike the midpoint found above, where some cases can take many more steps to converge.
In floating point, by guaranteed convergence we have either an exact zero or a bracketing interval consisting of two adjacent floating point values. When applied to *non*-continuous functions, this algorithm will identify an exact zero or a zero crossing of the function. (E.g., applied to ``f(x)=1/x`` it will find ``0``.)
The default selection of midpoint described above includes no information about the function ``f`` beyond its sign. Algorithms exploiting the shape of the function can be significantly more efficient. For example, the bracketing method `Roots.AlefeldPotraShi` due to [Alefeld, Potra, and Shi](https://dl.acm.org/doi/10.1145/210089.210111) has efficiency ``\approx 1.6686``. This method is also used in the default method for `find_zero` when a single initial starting point is given if a bracketing interval is identified.
----
```@docs
Bisection
Roots.A42
Roots.AlefeldPotraShi
Roots.Brent
Roots.Chandrapatla
Roots.Ridders
Roots.ITP
FalsePosition
Roots.LithBoonkkampIJzermanBracket
Roots.BracketedHalley
Roots.BracketedChebyshev
Roots.BracketedSchroder
```
## Non-simple zeros
The order of convergence for most methods is for *simple* zeros, values ``\alpha`` where ``f(x) = (x-\alpha) \cdot g(x)``, with ``g(\alpha)`` being non-zero. For methods which are of order ``k`` for non-simple zeros, usually an additional function call is needed per step. For example, this is the case for `Roots.Newton` as compared to `Roots.Schroder`.
Derivative-free methods for non-simple zeros have the following implemented:
```@docs
Roots.King
Roots.Order1B
Roots.Esser
Roots.Order2B
```
For non-simple zeros, Schroder showed an additional derivative can be used to yield quadratic convergence based on Newton's method:
```@docs
Roots.Schroder
```
A family of methods for non-simple zeros which require ``k`` derivatives to be order ``k``, with ``k=2`` yielding Schroder's method, are implemented in:
```@docs
Roots.AbstractThukralBMethod
```
## Hybrid methods
A useful strategy is to begin with a non-bracketing method and switch to a bracketing method should a bracket be encountered. This allows for the identification of zeros which are not surrounded by a bracket, and have guaranteed convergence should a bracket be encountered. It is used by default by `find_zero(f,a)`.
```@docs
Roots.Order0
```
## All zeros
The `find_zeros` function heuristically scans an interval for all zeros using a combination of bracketing and non-bracketing methods. The `AllZeros` method may be passed to `solve` to call this.
```@docs
Roots.AllZeros
```
## Rates of convergence
The order of a method is ``q``, where ``e_{i+1} \approx
e_i^q``. Newton's method is famously quadratic **for** simple roots;
the secant method of order ``q \approx \varphi=1.618\dots``. However,
``p=2`` calls are needed for Newton's method, and only ``p=1`` for the
secant method. The asymptotic efficiency is ``q^{1/p}``, which
penalizes function calls. There are other order ``k`` methods taking
``k`` function calls per step, e.g., Halley's; others take fewer, as
seen below. Many use inverse quadratic steps, others inverse
cubic--these have order ``q`` solving ``q^{s+1}-2q^s+1`` (``s=3`` for
quadratic). For robust methods, generally ``1`` additional function
call is needed to achieve the convergence rate, `Schroder` being a
good example.
| Type | Method | Order | F evals | Asymptotic efficiency |
|:--------------- | :--------------------------- | :--------------------- | :------ | :------------------------------------ |
| Hybrid | Order0 | | | ``\approx 1.618\dots`` |
| Derivative Free | Secant | ``\varphi=1.618\dots`` | ``1`` | ``1.618\dots`` |
| Derivative Free | Steffensen | ``2`` | ``2`` | ``1.414\dots`` |
| Derivative Free | Order5 | ``5`` | ``4`` | ``1.495\dots`` |
| Derivative Free | Order8 | ``8`` | ``4`` | ``1.681\dots`` |
| Derivative Free | Order16 | ``16`` | ``5`` | ``1.718\dots`` |
| Classical | Newton | ``2`` | ``2`` | ``1.414\dots`` |
| Classical | Halley | ``3`` | ``3`` | ``1.442\dots`` |
| Classical | QuadraticInverse | ``3`` | ``3`` | ``1.442\dots`` |
| Classical | ChebyshevLike | ``3`` | ``3`` | ``1.442\dots`` |
| Classical | SuperHalley | ``3`` | ``3`` | ``1.442\dots`` |
| MultiStep | LithBoonkkampIJzerman{S,D} | ``p^s=\sum p^k(d+\sigma_k)`` | ``D+1`` | varies, ``1.92\dots`` max |
| Bracketing | BisectionExact | ``1`` | ``1`` | ``1`` |
| Bracketing | A42 | ``(2 + 7^{1/2})`` | ``3,4`` |``(2 + 7^{1/2})^{1/3} = 1.6686\dots`` |
| Bracketing | AlefeldPotraShi | | ``3,4`` | ``1.618\dots`` |
| Bracketing | Brent | ``\leq 1.89\dots`` | ``1`` | ``\leq 1.89\dots`` |
| Bracketing | ITP | ``\leq \varphi`` | ``1`` | ``\leq \varphi`` |
| Bracketing | Ridders | ``1.83\dots`` | ``2`` | ``1.225\dots`` |
| Bracketing | FalsePosition | ``1.442\dots`` | ``1`` | ``1.442\dots`` |
| Bracketing | LithBoonkkampIJzermanBracket | ``2.91`` | ``3`` | ``1.427\dots`` |
| Robust | King | ``\varphi=1.618\dots`` | ``2`` | ``1.272\dots`` |
| Robust | Esser | ``2`` | ``3`` | ``1.259\dots`` |
| Robust | Schroder | ``2`` | ``3`` | ``1.259\dots`` |
| Robust | Thukral3 | ``3`` | ``4`` | ``1.316\dots`` |
| Robust | Thukral4 | ``4`` | ``5`` | ``1.319\dots`` |
| Robust | Thukral5 | ``5`` | ``6`` | ``1.307\dots`` |
## Convergence
Identifying when an algorithm converges or diverges requires specifications of tolerances and convergence criteria.
In the case of exact bisection, convergence is mathematically
guaranteed. For floating point numbers, either an *exact* zero is
found, or the bracketing interval can be subdivided into ``[a_n,b_n]``
with ``a_n`` and ``b_n`` being adjacent floating point values. That is
``b_n-a_n`` is as small as possible in floating point numbers. This can
be considered a stopping criteria in ``\Delta x``. For early termination
(less precision but fewer function calls) a tolerance can be given so
that if ``\Delta_n=b_n-a_n`` is small enough the algorithm stops
successfully. In floating point, assessing if ``b_n \approx a_n``
requires two tolerances: a *relative* tolerance, as the minimal
differences in floating point values depend on the size of ``b_n`` and
``a_n``, and an absolute tolerance for values near ``0``. The values
`xrtol` and `xatol` are passed to the `Base.isapprox` function to
determine closeness.
Relying on the closeness of two ``x`` values will not be adequate for
all problems, as there are examples where the difference
``\Delta_n=|x_n-x_{n-1}|`` can be quite small, ``0`` even, yet ``f(x_n)`` is
not near a ``0``. As such, for non-bracketing methods, a check on the
size of ``f(x_n)`` is also used. As we find floating point
approximations to ``\alpha``, the zero, we must consider values small
when ``f(\alpha(1+\epsilon))`` is small. By Taylor's approximation, we
can expect this to be around
``\alpha\cdot \epsilon \cdot f'(\alpha)``.
That is, small depends on the size of ``\alpha`` and the
derivative at ``\alpha``. The former is handled by both relative and absolute
tolerances (`rtol` and `atol`). The size of ``f'(\alpha)`` is problem
dependent, and can be accommodated by larger relative or absolute
tolerances.
When an algorithm returns an `NaN` value, it terminates. This can happen near convergence or may indicate some issues. Early termination is checked for convergence in the size of ``f(x_n)`` with a relaxed tolerance when `strict=false` is specified (the default).
!!! note "Relative tolerances and assessing `f(x) ≈ 0`"
The use of relative tolerances to check if ``f(x) \approx 0`` can lead to spurious answers where ``x`` is very large (and hence the relative tolerance is large). The return of very large solutions should be checked against expectations of the answer.
Deciding if an algorithm won't terminate is done through counting the number or iterations performed; the default adjusted through `maxiters`. As most algorithms are superlinear, convergence happens rapidly near the answer, but all the algorithms can take a while to get near an answer, even when progress is made. As such, the maximum must be large enough to consider linear cases, yet small enough to avoid too many steps when an algorithm is non-convergent.
Convergence criteria are method dependent and are determined by the `Roots.assess_convergence` methods.
```@docs
Roots.assess_convergence
```
Default tolerances are specified through the `Roots.default_tolerances` methods.
```@docs
Roots.default_tolerances
```
## Simplified versions
The abstractions and many checks for convergence employed by `find_zero` have a performance cost. When that is a critical concern, there are several "simple" methods provided which can offer improved performance.
```@docs
Roots.secant_method
Roots.bisection
Roots.muller
Roots.newton
Roots.dfree
```
## MATLAB interface
The initial naming scheme used `fzero` instead of `find_zero`, following the name of the MATLAB function [fzero](https://www.mathworks.com/help/matlab/ref/fzero.html). This interface is not recommended, but, for now, still maintained.
```@docs
fzero
fzeros
```
## Tracking iterations
It is possible to add the keyword argument `verbose=true` when calling the `find_zero` function to get detailed information about the solution and data from each iteration. To save this data a `Tracks`object may be passed in to `tracks`.
----
```@docs
Roots.Tracks
```
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 2.2.1 | 3a7c7e5c3f015415637f5debdf8a674aa2c979c4 | docs | 42266 | # An overview of `Roots`
The `Roots` package contains simple routines for finding zeros of
continuous scalar functions of a single real variable. A zero of $f$
is a value $c$ where $f(c) = 0$. The basic interface is through the
function `find_zero`, which through multiple dispatch can handle many different cases.
The [NonlinearSolve](https://github.com/JuliaComputing/NonlinearSolve.jl) package provides an alternative.
In the following, we will use `ForwardDiff` to take derivatives.
```jldoctest roots
julia> using Roots, ForwardDiff
```
## Basic usage
Consider the polynomial function ``f(x) = x^5 - x + 1/2``. As a polynomial, its roots, or zeros, could be identified with the `roots` function of the `Polynomials` package. However, even that function uses a numeric method to identify the values, as no solution with radicals is available. That is, even for polynomials, non-linear root finders are needed to solve ``f(x)=0``. (Though polynomial root-finders can exploit certain properties not available for general non-linear functions.)
The `Roots` package provides a variety of algorithms for this task. In this quick overview, only the default ones are illustrated.
For the function ``f(x) = x^5 - x + 1/2`` a simple plot over ``[-2,2]``will show a zero somewhere **between** ``-1.5`` and ``-0.5`` and two zeros near ``0.6``. ("Between", as the continuous function has different signs at ``-1.5`` and ``-0.5``.)
For the zero between two values at which the function changes sign, a
bracketing method is useful, as bracketing methods are guaranteed to
converge for continuous functions by the intermediate value
theorem. A bracketing algorithm will be used when the initial data is
passed as a tuple:
```jldoctest find_zero
julia> using Roots
julia> f(x) = x^5 - x + 1/2
f (generic function with 1 method)
julia> find_zero(f, (-3/2, -1/2)) ≈ -1.0983313019186336
true
```
The default algorithm is guaranteed to have an answer nearly as accurate as is possible given the limitations of floating point computations.
For the zeros **near** a point, a non-bracketing method is often used, as generally the algorithms are more efficient and can be used in cases where a zero does not cross the ``x`` axis. Passing just an initial guess will dispatch to such a method:
```jldoctest find_zero
julia> find_zero(f, 0.6) ≈ 0.550606579334135
true
```
This finds the answer to the left of the starting point. To get the other nearby zero, a starting point closer to the answer can be used.
However, an initial graph might convince one that any of the up-to-``5`` real roots will occur between ``-2`` and ``2``. The `find_zeros` function uses heuristics and a few of the algorithms to identify all zeros between the specified range. Here the method successfully identifies all ``3``:
```jldoctest find_zero
julia> find_zeros(f, -2, 2)
3-element Vector{Float64}:
-1.0983313019186334
0.5506065793341349
0.7690997031778959
```
This shows the two main entry points of `Roots`: `find_zero` to locate a zero between or near values using one of many methods and `find_zeros` to heuristically identify all zeros within some interval.
## Bracketing methods
For a function $f$ (univariate, real-valued) a *bracket* is a pair $ a < b $
for which $f(a) \cdot f(b) < 0$. That is the function values have
different signs at $a$ and $b$. If
$f$ is a continuous function this ensures
([Bolzano](https://en.wikipedia.org/wiki/Intermediate_value_theorem))
there will be a zero in the interval $[a,b]$. If $f$ is not
continuous, then there must be a point $c$ in $[a,b]$ where the function
"jumps" over $0$.
Such values can be found, up to floating point
round off. That is, given `f(a) * f(b) < 0`, a value `c` with `a < c < b` can be
found where either `f(c) == 0.0` or `f(prevfloat(c)) * f(c) < 0` or
`f(c) * f(nextfloat(c)) < 0`.
To illustrate, consider the function $f(x) = \cos(x) - x$. From trigonometry
we can see readily that $[0,\pi/2]$ is a bracket.
The `Roots` package includes the bisection algorithm through
`find_zero`. We use a structure for which `extrema` returns `(a,b)`
with `a < b`, such as a vector or tuple, to specify the initial
condition and `Bisection()` to specify the algorithm:
```jldoctest roots
julia> f(x) = cos(x) - x;
julia> x = find_zero(f, (0, pi/2), Bisection())
0.7390851332151607
julia> x, f(x)
(0.7390851332151607, 0.0)
```
For this function we see that `f(x)` is `0.0`.
Functions may be parameterized. The following is a similar function as above, still having ``(0, \pi/2)`` as a bracket when ``p>0``. By passing in values of `p` to `find_zero`, different, related problems may be solved.
```jldoctest roots
julia> g(x, p=1) = cos(x) - x/p;
julia> x0, M = (0, pi/2), Bisection()
((0, 1.5707963267948966), Bisection())
julia> find_zero(g, x0, M) # as before, solve cos(x) - x = 0 using default p=1
0.7390851332151607
julia> find_zero(g, x0, M; p=2) # solves cos(x) - x/2 = 0
1.0298665293222589
julia> find_zero(g, x0, M, 2) # positional argument; useful with broadcasting
1.0298665293222589
```
----
Next consider $f(x) = \sin(x)$. A known zero is $\pi$. Trigonometry
tells us that $[\pi/2, 3\pi/2]$ will be a bracket. The calling
pattern for `find_zero` is `find_zero(f, x0, M; kwargs...)`, where
`kwargs` can specify details about parameters for the problem or
tolerances for the solver. In this call `Bisection()` is not
specified, as it will be the default (as the initial value is
not specified as a number is over `Float64` values:
```jldoctest roots
julia> f(x) = sin(x);
julia> x = find_zero(f, (pi/2, 3pi/2))
3.141592653589793
julia> x, f(x)
(3.141592653589793, 1.2246467991473532e-16)
```
This value of `x` does not exactly produce a zero, however, it is as close as can be:
```jldoctest roots
julia> f(prevfloat(x)) * f(x) < 0.0 || f(x) * f(nextfloat(x)) < 0.0
true
```
That is, at `x` the function is changing sign.
From a mathematical perspective, a zero is guaranteed for a
*continuous* function. However, the computer algorithm doesn't assume
continuity, it just looks for changes of sign. As such, the algorithm
will identify discontinuities, not just zeros. For example:
```jldoctest roots
julia> find_zero(x -> 1/x, (-1, 1))
0.0
```
The endpoints and function values can even be infinite for the default `Bisection` algorithm over the standard floating point types:
```jldoctest roots
julia> find_zero(x -> Inf*sign(x), (-Inf, Inf)) # Float64 only
0.0
```
The basic algorithm used for bracketing when the values are simple
floating point values is a modification of the bisection method, where
the midpoint is taken over the bit representation of `a` and `b`.
For big float values, bisection is the default (with non-zero
tolerances), but its use is definitely not suggested. Simple
bisection over `BigFloat` values can take *many* more
iterations. For the problem of finding a zero of `sin` in the interval
`(big(3), big(4))`, the default bisection takes ``252`` iterations,
whereas the `A42` method takes ``4``.
The algorithms of Alefeld, Potra, and Shi and the well known algorithm
of Brent, also start with a bracketing algorithm. For many problems
these will take far fewer steps than the bisection algorithm to reach
convergence. These may be called directly. For example,
```jldoctest roots
julia> find_zero(sin, (3,4), A42())
3.141592653589793
```
By default, bisection will converge to machine tolerance. This may
provide more accuracy than desired. A tolerance may be specified to
terminate early, thereby utilizing fewer resources. For example, the following
``4`` steps to reach accuracy to $1/16$ (without specifying `xatol` it uses
``53`` steps):
```jldoctest roots
julia> rt = find_zero(sin, (3.0, 4.0), xatol=1/16)
3.125
julia> rt - pi
-0.016592653589793116
```
## Non-bracketing methods
Bracketing methods have guaranteed convergence, but in general may
require many more function calls than are otherwise needed to produce
an answer and not all zeros of a function may be bracketed. If a good
initial guess is known, then the `find_zero` function provides an
interface to some different iterative algorithms that are more
efficient. Unlike bracketing methods, these algorithms may not
converge to the desired root if the initial guess is not well chosen.
The default algorithm is modeled after an algorithm used for
[HP-34 calculators](http://www.hpl.hp.com/hpjournal/pdfs/IssuePDFs/1979-12.pdf). This
algorithm is designed to be more forgiving of the quality of the
initial guess at the cost of possibly performing more steps than
other algorithms, as if the algorithm encounters a bracket, a bracketing method
will be used (an efficient one, though).
For example, the answer to our initial problem is visibly seen from a
graph to be near 1. Given this,
the zero is found through:
```jldoctest roots
julia> f(x) = cos(x) - x;
julia> x = find_zero(f , 1)
0.7390851332151607
julia> x, f(x)
(0.7390851332151607, 0.0)
```
For the polynomial $f(x) = x^3 - 2x - 5$, an initial guess of $2$ seems reasonable:
```jldoctest roots
julia> f(x) = x^3 - 2x - 5;
julia> x = find_zero(f, 2)
2.0945514815423265
julia> f(x), sign(f(prevfloat(x)) * f(x)), sign(f(x) * f(nextfloat(x)))
(-8.881784197001252e-16, 1.0, -1.0)
```
For even more precision, `BigFloat` numbers can be used
```jldoctest roots
julia> x = find_zero(sin, big(3))
3.141592653589793238462643383279502884197169399375105820974944592307816406286198
julia> x, sin(x), x - pi
(3.141592653589793238462643383279502884197169399375105820974944592307816406286198, 1.096917440979352076742130626395698021050758236508687951179005716992142688513354e-77, 0.0)
```
### Higher-order methods
The default call to `find_zero` uses a first order method and then
possibly bracketing, which potentially involves more function
calls than necessary. There may be times where a more efficient algorithm is sought.
For such, a higher-order method might be better suited. There are
algorithms `Order1` (secant method), `Order2`
([Steffensen](http://en.wikipedia.org/wiki/Steffensen's_method)),
`Order5`, `Order8`, and `Order16`. The order $1$ or $2$ methods are
generally quite efficient in terms of steps needed over floating point
values. The even-higher-order ones are potentially useful when more
precision is used. These algorithms are accessed by specifying the
method after the initial starting point:
```jldoctest roots
julia> f(x) = 2x - exp(-x);
julia> x = find_zero(f, 1, Order1())
0.3517337112491958
julia> x, f(x)
(0.3517337112491958, -1.1102230246251565e-16)
```
Similarly,
```jldoctest roots
julia> f(x) = (x + 3) * (x - 1)^2;
julia> x = find_zero(f, -2, Order2())
-3.0
julia> x, f(x)
(-3.0, 0.0)
```
```jldoctest roots
julia> x = find_zero(f, 2, Order8())
1.0000000131073141
julia> x, f(x)
(1.0000000131073141, 6.87206736323862e-16)
```
Starting at ``2`` the algorithm converges towards ``1``, showing that zeros need not be simple zeros to be found. A simple zero, $c,$ has $f(x) = (x-c) \cdot g(x)$ where $g(c) \neq 0$.
Generally speaking, non-simple zeros are
expected to take many more function calls, as the methods are no
longer super-linear. This is the case here, where `Order2` uses $51$
function calls, `Order8` uses $42$, and `Order0` takes $80$. The `Roots.Order2B` method is useful
when a multiplicity is expected; on this problem it takes ``17`` function calls.
To investigate an algorithm and its convergence, the argument
`verbose=true` may be specified. A `Roots.Tracks` object can be used to store the intermediate values.
For some functions, adjusting the default tolerances may be necessary
to achieve convergence. The tolerances include `atol` and `rtol`, which are
used to check if $f(x_n) \approx 0$;
`xatol` and `xrtol`, to check if $x_n \approx x_{n-1}$; and `maxiters` to limit the
number of iterations in the algorithm.
## Classical methods
The package provides some classical methods for root finding, such as
`Roots.Newton`, `Roots.Halley`, and `Roots.Schroder`. (Currently
these are not exported, so must be prefixed with the package name to
be used.) We can see how each works on a problem studied by Newton.
Newton's method uses the function and its derivative:
```jldoctest roots
julia> f(x) = x^3 - 2x - 5;
julia> fp(x) = 3x^2 - 2;
julia> x = Roots.find_zero((f, fp), 2, Roots.Newton())
2.0945514815423265
julia> x, f(x)
(2.0945514815423265, -8.881784197001252e-16)
```
The functions are specified using a tuple, or through a function returning `(f(x), f(x)/f'(x))`. The latter is convenient when `f'` is easily computed when `f` is, but otherwise may be expensive to compute.
Halley's method has cubic convergence, as compared to Newton's
quadratic convergence. It uses the second derivative as well:
```jldoctest roots
julia> fpp(x) = 6x;
julia> x = Roots.find_zero((f, fp, fpp), 2, Roots.Halley())
2.0945514815423265
julia> x, f(x), sign(f(prevfloat(x)) * f(nextfloat(x)))
(2.0945514815423265, -8.881784197001252e-16, -1.0)
```
(Halley's method takes 3 steps, Newton's 4, but Newton's uses 5
function calls to Halley's 10.)
For many functions, their derivatives can be computed automatically. The
`ForwardDiff` package provides a means. Here we define an operator `D`
to compute a derivative:
```jldoctest roots
julia> function D(f, n::Int=1)
n <= 0 && return f
n == 1 && return x -> ForwardDiff.derivative(f,float(x))
D(D(f,1),n-1)
end
D (generic function with 2 methods)
julia> dfᵏs(f,k) = ntuple(i->D(f,i-1), Val(k+1)) # (f, f′, f′′, …)
dfᵏs (generic function with 1 method)
```
```jldoctest roots
julia> find_zero((f,D(f)), 2, Roots.Newton())
2.0945514815423265
```
Or, for Halley's method:
```jldoctest roots
julia> find_zero((f, D(f), D(f,2)), 2, Roots.Halley())
2.0945514815423265
```
The family of solvers implemented in `Roots.LithBoonkkampIJzerman(S,D)` where `S` is the number of prior points used to generate the next, and `D` is the number of derivatives used, has both the secant method (`S=2, D=0`) and Newton's method (`S=1, D=1`) as members, but also provides others. By adding more memory or adding more derivatives the convergence rate increases, at the expense of more complicated expressions or more function calls per step.
```
julia> find_zero(dfᵏs(f, 0), 2, Roots.LithBoonkkampIJzerman(3,0)) # like secant
2.0945514815423265
julia> find_zero(dfᵏs(f, 1), 2, Roots.LithBoonkkampIJzerman(2,1)) # like Newton
2.0945514815423265
julia> find_zero(dfᵏs(f, 2), 2, Roots.LithBoonkkampIJzerman(2,2)) # like Halley
2.0945514815423265
```
## The problem-algorithm-solve interface
The problem-algorithm-solve interface is a pattern popularized in `Julia` by the `DifferentialEquations.jl` suite of packages. The pattern consists of setting up a *problem* then *solving* the problem by specifying an *algorithm*. This is very similar to what is specified in the `find_zero(f, x0, M)` interface where `f` and `x0` specify the problem, `M` the algorithm, and `find_zero` calls the solver.
To break this up into steps, `Roots` has the type `ZeroProblem` and methods for `init`, `solve`, and `solve!` from the `CommonSolve.jl` package.
Consider solving ``\sin(x) = 0`` using the `Secant` method starting with the interval ``[3,4]``.
```jldoctest roots
julia> f(x) = sin(x);
julia> x0 = (3, 4)
(3, 4)
julia> M = Secant()
Secant()
julia> Z = ZeroProblem(f, x0)
ZeroProblem{typeof(f), Tuple{Int64, Int64}}(f, (3, 4))
julia> solve(Z, M)
3.141592653589793
```
Changing the method is easy:
```jldoctest roots
julia> solve(Z, Order2())
3.1415926535897944
```
The `solve` interface works with parameterized functions, as well:
```jldoctest roots
julia> g(x, p=1) = cos(x) - x/p;
julia> Z = ZeroProblem(g, (0.0, pi/2))
ZeroProblem{typeof(g), Tuple{Float64, Float64}}(g, (0.0, 1.5707963267948966))
julia> solve(Z, Secant(), 2) # uses p=2 as a positional argument
1.0298665293222589
julia> solve(Z, Bisection(); p=3, xatol=1/16) # p=3; uses keywords for position and tolerances
1.1959535058744393
```
Positional arguments are useful for broadcasting over several parameter values.
## Examples
### Intersections
A [discourse](https://discourse.julialang.org/t/help-to-plot-a-surface-plot-with-infinite-roots/98291) post involved finding the roots of ``\tan(x) = x /(B(\Lambda x^2 - 1)``. As the right hand side decays, we can see that for each positive arm of the periodic tangent function, there will be one intersection point in ``(k\pi, (k+1/2)\pi)`` for each ``k=0,1,\dots``. The standard way to find when $f(x) = g(x)$ with this package is to define an auxiliary function $h(x) = f(x) - g(x)$, as below:
```jldoctest roots
julia> k, B, Λ = 3, 1, 1;
julia> f(x) = tan(x); g(x) = x/(B*(Λ*x^2 - 1));
julia> h(x) = f(x) - g(x)
h (generic function with 1 method)
julia> x = find_zero(h, (k*pi, (k + 1/2)*pi)); x, h(x)
(9.530477156207574, 8.326672684688674e-16)
```
As of version 1.9 of `Julia`, an extension is provided so that when `SymPy` is loaded, an equation can be used to specify the left and right hand sides, as with:
```
using SymPy
@syms x
find_zero(tan(x) ~ x/(B*(Λ*x^2 - 1)), (k*pi, (k + 1/2)*pi))
```
### Inverse functions
The `find_zero` function can be used to identify inverse functions. Suppose ``f`` is a monotonic function on ``[a,b]``. Then an inverse function solves ``y = f(x)`` for ``x`` given a ``y``. This will do that task and return values in a function form:
```@example roots
function inverse_function(f, a, b, args...; kwargs...)
fa, fb = f(a), f(b)
m, M = fa < fb ? (fa, fb) : (fb, fa)
y -> begin
@assert m ≤ y ≤ M
find_zero(x ->f(x) - y, (a,b), args...; kwargs...)
end
end
```
The fact that ``f`` is monotonic, ensures that a bracket of ``[a,b]`` can be used supposing ``y`` is between ``f(a)`` and ``f(b)``, so no guess is needed.
Here we numerically find the inverse function of ``f(x) = x - \sin(x)``:
```@example roots
using Plots, Roots;
f(x) = x - sin(x)
a, b = 0, 5pi
plot(inverse_function(f, a, b), f(a), f(b))
savefig("inversefunction.svg"); nothing # hide
```

### Finding critical points
The `D` function, defined above, makes it straightforward to find critical points
(typically where the derivative is $0$ but also where it is undefined). For example, the critical
point of the function $f(x) = 1/x^2 + x^3, x > 0$ near $1.0$ is where
the derivative is $0$ and can be found through:
```jldoctest roots
julia> f(x) = 1/x^2 + x^3;
julia> find_zero(D(f), 1)
0.9221079114817278
```
For more complicated expressions, `D` may need some technical
adjustments to be employed. In this example, we have a function that
models the flight of an arrow on a windy day:
```jldoctest roots
julia> function flight(x, theta)
k = 1/2
a = 200*cosd(theta)
b = 32/k
tand(theta)*x + (b/a)*x - b*log(a/(a-x))
end
flight (generic function with 1 method)
```
The total distance flown is when `flight(x) == 0.0` for some `x > 0`:
This can be solved for different `theta` with `find_zero`. In the
following, we note that `log(a/(a-x))` will have an asymptote at `a`,
so we start our search at `a-5`:
```jldoctest roots
julia> function howfar(theta)
a = 200*cosd(theta)
find_zero(x -> flight(x, theta), a-5) # starting point has type determined by `theta`.
end
howfar (generic function with 1 method)
```
To visualize the trajectory if shot at ``45`` degrees, we would have:
```@example roots
using Roots, ForwardDiff # hide
using Plots;
flight(x, theta) = (k = 1/2; a = 200*cosd(theta); b = 32/k; tand(theta)*x + (b/a)*x - b*log(a/(a-x)))
howfar(theta) = (a = 200*cosd(theta); find_zero(x -> flight(x, theta), a-5))
howfarp(t) = ForwardDiff.derivative(howfar,t)
theta = 45
tstar = find_zero(howfarp, 45)
plot(x -> flight(x, theta), 0, howfar(theta))
savefig("flight.svg"); nothing #hide
```

To maximize the range we solve for the lone critical point of `howfar`
within reasonable starting points.
As of version `v"1.9"` of `Julia`, the automatic differentiation provided by
`ForwardDiff` will bypass working through a call to `find_zero`. Prior
to this version, automatic differentiation will work *if* the
initial point has the proper type (depending on an expression of
`theta` in this case). As we use `200*cosd(theta)-5` for a starting
point, this is satisfied.
```jldoctest roots
julia> (tstar = find_zero(D(howfar), 45)) ≈ 26.2623089
true
```
This graph would show the differences in the trajectories:
```@example roots
plot(x -> flight(x, 45), 0, howfar(45))
plot!(x -> flight(x, tstar), 0, howfar(tstar))
#show(current()) # hide
savefig("flight-diff.svg"); nothing # hide
```

## Sensitivity
In the last example, the question of how the distance varies with the angle is clearly important.
In general, for functions with parameters, ``f(x,p)``, derivatives with respect to the ``p`` variable(s) may be of interest.
A first attempt, as shown above, may be to try and auto-differentiate the output of `find_zero`. For example:
```@example roots
f(x, p) = x^2 - p # p a scalar
p = 2
```
```@example roots
F(p) = find_zero(f, one(p), Order1(), p)
ForwardDiff.derivative(F, p)
```
Prior to version `v"1.9"` of `Julia`,
there were issues with this approach, though in this case it finds the correct answer, as will be seen: a) it is not as performant as what we will discuss next, b) the subtle use of `one(p)` for the starting point is needed to ensure the type for the ``x`` values is correct, and c) not all algorithms will work, in particular `Bisection` is not amenable to this approach.
```@example roots
F(p) = find_zero(f, (zero(p), one(p)), Roots.Bisection(), p)
ForwardDiff.derivative(F, 1/2)
```
This will be `0.0` if the differentiation is propagated through the algorithm.
With `v"1.9"` of `Julia` or later, the derivative is calculated correctly through the method described below.
Using the implicit function theorem and following these [notes](https://math.mit.edu/~stevenj/18.336/adjoint.pdf), this [paper](https://arxiv.org/pdf/2105.15183.pdf) on the adjoint method, or the methods more generally applied in the [ImplicitDifferentiation](https://github.com/gdalle/ImplicitDifferentiation.jl) package we can auto-differentiate without pushing that machinery through `find_zero`.
The solution, ``x^*(p)``, provided by `find_zero` depends on the parameter(s), ``p``. Notationally,
```math
f(x^*(p), p) = 0
```
The implicit function theorem has conditions guaranteeing the
existence and differentiability of ``x^*(p)``. Assuming these hold, taking the gradient (derivative) in ``p`` of both sides, gives by the chain rule:
```math
\frac{\partial}{\partial_x}f(x^*(p),p)
\frac{\partial}{\partial_p}x^*(p) +
\frac{\partial}{\partial_p}f(x^*(p),p) I = 0.
```
Since the partial in ``x`` is a scalar quantity, we can divide to solve:
```math
\frac{\partial}{\partial_p}x^*(p) =
-\frac{
\frac{\partial}{\partial_p}f(x^*(p),p)
}{
\frac{\partial}{\partial_x}f(x^*(p),p)
}
```
For example, using `ForwardDiff`, we have:
```@example roots
xᵅ = find_zero(f, 1, Order1(), p)
fₓ = ForwardDiff.derivative(x -> f(x, p), xᵅ)
fₚ = ForwardDiff.derivative(p -> f(xᵅ, p), p)
- fₚ / fₓ
```
This problem can be solved analytically, of course, to see ``x^\alpha(p) = \sqrt{p}``, so we can easily compare:
```@example roots
ForwardDiff.derivative(sqrt, 2)
```
The use with a vector of parameters is similar, only `derivative` is replaced by `gradient` for the `p` variable:
```@example roots
f(x, p) = x^2 - p[1]*x + p[2]
p = [3.0, 1.0]
x₀ = 1.0
xᵅ = find_zero(f, x₀, Order1(), p)
fₓ = ForwardDiff.derivative(x -> f(x, p), xᵅ)
fₚ = ForwardDiff.gradient(p -> f(xᵅ, p), p)
- fₚ / fₓ
```
The package provides a package extension to use `ForwardDiff` directly to find derivatives or gradients, as above, with version `v"1.9"` or later of `Julia`, and a `ChainRulesCore.rrule` and `ChainRulesCore.frule` implementation that should allow automatic differentiation packages relying on `ChainRulesCore` (e.g., `Zygote`) to differentiate in `p` using the same approach. (Thanks to `@devmotion` for much help here.)
## Potential issues
The higher-order methods are basically various derivative-free
versions of Newton's method (which has update step $x - f(x)/f'(x)$). For
example, Steffensen's method (`Order2()`) essentially replaces $f'(x)$ with
$(f(x + f(x)) - f(x))/f(x)$. This is a forward-difference
approximation to the derivative with "$h$" being $f(x)$, which
presumably is close to $0$ already. The methods with higher order
combine this with different secant line approaches that minimize the
number of function calls. These higher-order methods can be
susceptible to some of the usual issues found with Newton's method:
poor initial guess, small first derivative, or large second derivative
near the zero.
When the first derivative is near $0$, the value of the next step can
be quite different, as the next step generally tracks the intersection point of
the tangent line. We see that starting at a $\pi/2$ causes this search
to be problematic:
```jldoctest roots
julia> try find_zero(sin, pi/2, Order1()) catch err "Convergence failed" end
"Convergence failed"
```
(Whereas, starting at `pi/2 + 0.3`--where the slope of the tangent
is sufficiently close to point towards $\pi$--will find convergence at $\pi$.)
For a classic example where a large second derivative is
the issue, we have $f(x) = x^{1/3}$:
```jldoctest roots
julia> f(x) = cbrt(x);
julia> x = try find_zero(f, 1, Order2()) catch err "Convergence failed" end # all of 2, 5, 8, and 16 fail or diverge towards infinity
"Convergence failed"
```
However, the default finds the root here, as a bracket is identified:
```jldoctest roots
julia> x = find_zero(f, 1)
0.0
julia> x, f(x)
(0.0, 0.0)
```
Finally, for many functions, all of these methods need a good initial
guess. For example, the polynomial function $f(x) = x^5 - x - 1$ has
its one real zero near $1.16$.
If we start far from the zero, convergence may happen, but it isn't
guaranteed:
```jldoctest roots
julia> f(x) = x^5 - x - 1;
julia> x0 = 0.1
0.1
julia> try find_zero(f, x0) catch err "Convergence failed" end
"Convergence failed"
```
A graph shows the issue. Running the following shows ``15`` steps of Newton's
method, the other algorithms being somewhat similar:
```@example roots
f(x) = x^5 - x - 1; # hide
D(f) = x -> ForwardDiff.derivative(f,float(x)) # hide
xs = [0.1] # x0
n = 15
for i in 1:(n-1) push!(xs, xs[end] - f(xs[end])/D(f)(xs[end])) end
ys = [zeros(Float64,n)';map(f, xs)'][1:2n]
xs = xs[repeat(collect(1:n), inner=[2], outer=[1])]
plot(f, -1.25, 1.5, linewidth=3, legend=false)
plot!(zero, -1.25, 1.5, linewidth=3)
plot!(xs, ys)
#show(current()) # hide
savefig("newton.svg"); nothing # hide
```

Graphically only a few of the steps are discernible, as the function's relative maximum
causes a trap for this algorithm. Starting to the right of the
relative minimum--nearer the zero--would avoid this trap. The default
method employs a trick to bounce out of such traps, though it doesn't
always work.
### Tolerances
Mathematically solving for a zero of a nonlinear function may be
impossible, so numeric methods are utilized. However, using floating
point numbers to approximate the real numbers leads to some nuances.
For example, consider the polynomial $f(x) = (3x-1)^5$ with one zero
at $1/3$ and its equivalent expression $f1(x) = -1 + 15\cdot x - 90\cdot
x^2 + 270\cdot x^3 - 405\cdot x^4 + 243\cdot x^5$. Mathematically
these are the same, however not so when evaluated in floating
point. Here we look at the 21 floating point numbers near $1/3$:
```jldoctest roots
julia> f(x) = (3x-1)^5;
julia> f1(x) = -1 + 15*x - 90*x^2 + 270*x^3 - 405*x^4 + 243*x^5;
julia> above = accumulate((x,y) -> nextfloat(x), 1:10, init=1/3);
julia> below = accumulate((x,y) -> prevfloat(x), 1:10, init=1/3);
julia> ns = sort([below...,1/3, above...]); # floating point numbers around 1/3
julia> maximum(abs.(f.(ns) - f1.(ns))) < 1e-14
true
```
The exponents are:
```
julia> zs .|> abs .|> log10 .|> x -> floor(Int, x)
21-element Vector{Int64}:
-16
-16
-16
-15
-15
-16
-76
-77
⋮
-15
-16
-75
```
We see the function values are close for each point, as the maximum difference
is like $10^{-15}$. This is roughly as expected, where even one
addition may introduce a relative error as big as $2\cdot 10^{-16}$ and here
there are several such.
!!! note
(These values are subject to the vagaries of floating point evaluation, so may differ depending on the underlying computer architecture.)
Generally this variation is not even a thought, as the differences are generally
negligible, but when we want to identify if a value is zero, these
small differences might matter. Here we look at the signs of the
function values for a run of the above:
```julia
julia> fs = sign.(f.(ns));
julia> f1s = sign.(f1.(ns));
julia> [ns.-1/3 fs f1s]
21×3 Matrix{Float64}:
-5.55112e-16 -1.0 -1.0
-4.996e-16 -1.0 -1.0
-4.44089e-16 -1.0 1.0
-3.88578e-16 -1.0 1.0
-3.33067e-16 -1.0 1.0
-2.77556e-16 -1.0 1.0
-2.22045e-16 -1.0 -1.0
-1.66533e-16 -1.0 -1.0
-1.11022e-16 -1.0 1.0
-5.55112e-17 -1.0 1.0
0.0 0.0 -1.0
5.55112e-17 0.0 1.0
1.11022e-16 1.0 1.0
1.66533e-16 1.0 -1.0
2.22045e-16 1.0 -1.0
2.77556e-16 1.0 -1.0
3.33067e-16 1.0 1.0
3.88578e-16 1.0 1.0
4.44089e-16 1.0 1.0
4.996e-16 1.0 -1.0
5.55112e-16 1.0 0.0
```
Parsing this shows a few surprises. First, there are two zeros of
`f(x)` identified--not just one as expected mathematically--the
floating point value of `1/3` and the next largest floating point
number.
```
julia> findall(iszero, fs)
2-element Vector{Int64}:
11
12
```
For `f1(x)` there is only one zero, but it isn't the floating
point value for `1/3` but rather $10$ floating point numbers
away.
```
julia> findall(iszero, f1s)
1-element Vector{Int64}:
21
```
Further, there are several sign changes of the function values for `f1s`:
```
julia> findall(!iszero, diff(sign.(f1s)))
9-element Vector{Int64}:
2
6
8
10
11
13
16
19
20
```
There is no guarantee that a zero will be present, but for a
mathematical function that changes sign, there will be at least one
sign change.
With this in mind, an exact zero of `f` would be either where `iszero(f(x))` is true *or* where the function has a sign change (either `f(x)*f(prevfloat(x))<0` or `f(x)*f(nextfloat(x)) < 0`).
As mentioned, the default `Bisection()` method of `find_zero`
identifies such zeros for `f` provided an initial bracketing interval
is specified when `Float64` numbers are used. However, if a
mathematical function does not cross the $x$ axis at a zero, then
there is no guarantee the floating point values will satisfy either of
these conditions.
----
Now consider the function `f(x) = exp(x)-x^4`. The value`x=8.613169456441398` is a zero in this sense, as there is a change of sign:
```jldoctest roots
julia> f(x) = exp(x) - x^4;
julia> F(x) = sign(f(x));
julia> x=8.613169456441398
8.613169456441398
julia> F(prevfloat(x)), F(x), F(nextfloat(x))
(-1.0, -1.0, 1.0)
```
However, the value of `f(x)` is not as small as one might initially
expect for a zero:
```
julia> f(x), abs(f(x)/eps(x))
(-2.7284841053187847e-12, 1536.0)
```
The value `x` is an approximation to the actual mathematical zero,
call it $x$. There is a difference between $f(x)$ (the mathematical answer) and `f(x)` (the floating point answer). Roughly speaking we expect `f(x)` to be about $f(x) + f'(x)\cdot \delta$, where $\delta$ is the difference between `x` and $x$. This will be on the scale of `abs(x) * eps()`, so all told we expect an answer to be in the range of $0$ plus or minus this value:
```jldoctest roots
julia> fp(x) = exp(x) - 4x^3; # the derivative
julia> fp(x) * abs(x) * eps()
5.637565490466956e-12
```
which is about what we see.
----
Bisection can be a slower method than others. For `Float64` values, `Bisection()` takes no more than 64 steps, but other methods may be able to converge to a zero in 4-5 steps (assuming good starting values are specified).
When fewer function calls are desirable, then checking for an
*approximate* zero may be preferred over assessing if a sign change
occurs, as generally that will take two additional function calls per
step. Besides, a sign change isn't guaranteed for all zeros. An approximate zero would be one where $f(x) \approx 0$.
By the above, we see that we must consider an appropriate
tolerance. The first example shows differences in floating point
evaluations from the mathematical ones might introduce errors on the
scale of `eps` regardless of the size of `x`. As seen in the second
example, the difference between the floating point approximation to
the zero and the zero introduces a potential error *proportional* to
the size of `x`. So a tolerance might consider both types of
errors. An absolute tolerance is used as well as a relative tolerance,
so a check might look like:
```verbatim
abs(f(x)) < max(atol, abs(x) * rtol)
```
This is different from `Julia`'s `isapprox(f(x), 0.0)`, as that would use `abs(f(x))` as the multiplier, which renders a relative tolerance useless for this question.
One issue with relative tolerances is that for functions with
sublinear growth, extremely large values will be considered zeros.
Returning to an earlier example, with `Thukral8` we have a misidentified zero:
```jldoctest roots
julia> find_zero(cbrt, 1, Roots.Thukral8())
1.725042287244107e23
```
The algorithm rapidly marches off towards infinity so the relative
tolerance $\approx |x| \cdot \epsilon$ used to check if $f(x) \approx
0$ is large compared to the far-from zero $f(x)$.
Either the users must be educated about this possibility, or the
relative tolerance should be set to $0$. In that case, the absolute
tolerance must be relatively generous. A conservative choice of
absolute tolerance might be `sqrt(eps())`, or about `1e-8`,
essentially the one made in SciPy.
Though this tolerance won't be able to work for really large values:
```
julia> find_zero(x -> sqrt(eps()) - eps(x), (0,Inf))
9.981132799999999e7
```
This is **not** the choice made in `Roots`. The fact that bisection can
produce zeros as exact as possible, and the fact that the error in
function evaluation, $f'(x)|x|\epsilon$, is not typically on the scale
of `1e-8`, leads to a desire for more precision, if available.
In `Roots`, the faster algorithms use a check on both the size of
`f(xn)` and the size of the difference between the last two `xn` values. The check on `f(xn)`
is done with a tight tolerance, as is the check on $x_n \approx
x_{n-1}$. If the function values get close to zero, an
approximate zero is declared. Further, if the $x$ values get close to each other
*and* the function value is close to zero with a *relaxed* tolerance,
then an approximate zero is declared. In practice this seems to work
reasonably well. The relaxed tolerance uses the cube root of the
absolute and relative tolerances.
## Searching for all zeros in an interval
The methods described above are used to identify one of possibly
several zeros. The `find_zeros` function searches the interval $(a,b)$
for all zeros of a function $f$. It is straightforward to use:
```jldoctest roots
julia> f(x) = exp(x) - x^4;
julia> a, b = -10, 10
(-10, 10)
julia> zs = find_zeros(f, a, b)
3-element Vector{Float64}:
-0.8155534188089606
1.4296118247255556
8.613169456441398
```
The search interval, $(a,b)$, is specified either through two
arguments or through a single argument using a structure, such as a
tuple or vector, where `extrema` returns two distinct values in
increasing order. It is assumed that neither endpoint is a zero.
----
The algorithm used to search for all zeros in an interval is confounded by a few things:
* too many zeros in the interval $(a,b)$
* nearby zeros ("nearby" depends on the size of $(a,b)$, should this be very wide)
The algorithm is adaptive, so that it can succeed when there are many
zeros, but it may be necessary to increase `no_pts` from the default
of 12, at the cost of possibly taking longer for the search.
Here the algorithm identifies all the zeros, despite there being several:
```jldoctest roots
julia> f(x) = cos(x)^2 + cos(x^2); a,b = 0, 10;
julia> rts = find_zeros(f, a, b);
julia> length(rts)
32
```
For nearby zeros, the algorithm does pretty well, though it isn't
perfect.
Here we see for $f(x) = \sin(1/x)$--with infinitely many zeros around
$0$--it finds many:
```jldoctest roots
julia> f(x) = iszero(x) ? NaN : sin(1/x); # avoid sin(Inf) error
julia> rts = find_zeros(f, -1, 1);
julia> length(rts) # 88 zeros identified
88
```
The function, $f(x) = (x-0.5)^3 \cdot (x-0.499)^3$, looks *too* much like
$g(x) = x^6$ to `find_zeros` for success, as the two zeros are very nearby:
```jldoctest roots
julia> f(x) = (x-0.5)^3 * (x-0.499)^3;
julia> find_zeros(f, 0, 1)
1-element Vector{Float64}:
0.5
```
The issue here isn't *just* that the algorithm can't identify zeros
within $0.001$ of each other, but that the high power makes many
nearby values approximately zero.
The algorithm will have success when the powers are smaller
```jldoctest roots
julia> f(x) = (x-0.5)^2 * (x-0.499)^2;
julia> find_zeros(f, 0, 1)
2-element Vector{Float64}:
0.49899999999999994
0.5
```
It can have success for closer pairs of zeros:
```jldoctest roots
julia> f(x) = (x-0.5) * (x - 0.49999);
julia> find_zeros(f, 0, 1)
2-element Vector{Float64}:
0.49999
0.5
```
Combinations of large (even) multiplicity zeros or very nearby
zeros, can lead to misidentification.
### IntervalRootFinding
The [IntervalRootFinding](https://github.com/JuliaIntervals/IntervalRootFinding.jl) package rigorously identifies isolating intervals for the zeros of a function. This example, from that package's README, is used to illustrate the differences:
```julia
julia> using IntervalArithmetic, IntervalRootFinding, Roots
julia> f(x) = sin(x) - 0.1*x^2 + 1
f (generic function with 1 method)
julia> rts = roots(f, -10..10)
4-element Vector{Root{Interval{Float64}}}:
Root([3.14959, 3.1496], :unique)
Root([-4.42654, -4.42653], :unique)
Root([-3.10682, -3.10681], :unique)
Root([-1.08205, -1.08204], :unique)
julia> find_zeros(f, -10, 10)
4-element Vector{Float64}:
-4.426534982071949
-3.1068165552293254
-1.0820421327607177
3.1495967624505226
```
Using that in this case, the intervals are bracketing intervals for `f`, we can find the zeros from the `roots` output with the following:
```julia
julia> [find_zero(f, (interval(u).lo, interval(u).hi)) for u ∈ rts if u.status == :unique]
4-element Vector{Float64}:
3.1495967624505226
-4.426534982071949
-3.1068165552293254
-1.082042132760718
```
!!! note "`IntervalRootFinding` extension"
As of version `1.9` of `Julia` an extension is provided so that when the `IntervalRootFinding` package is loaded, the `find_zeros` function will call `IntervalRootFinding.roots` to find the isolating brackets and `find_zero` to find the roots, when possible, **if** the interval is specified as an `Interval` object, as created by `-1..1`, say.
## Adding a solver
To add a solver the minimum needed is a type to declare the solver and an `update_state` method. In this example, we also define a state object, as the algorithm, as employed, uses more values stored than the default.
The [Wikipedia](https://en.wikipedia.org/wiki/Brent%27s_method) page for Brent's method suggest a modern improvement, Chandrapatla's method, described [here](https://www.google.com/books/edition/Computational_Physics/cC-8BAAAQBAJ?hl=en&gbpv=1&pg=PA95&printsec=frontcover). That description is mostly followed below and in the package implementation `Roots.Chandrapatla`.
To implement Chandrapatla's algorithm we first define a type to indicate the method and a state object which records the values ``x_n``, ``x_{n-1}``, and ``x_{n-2}``, needed for the inverse quadratic step.
```julia
julia> struct Chandrapatla <: Roots.AbstractBracketingMethod end
julia> struct ChandrapatlaState{T,S} <: Roots.AbstractUnivariateZeroState{T,S}
xn1::T
xn0::T
c::T
fxn1::S
fxn0::S
fc::S
end
```
An `init_state` method can be used by some methods to add more detail to the basic state object. Here it starts the old value, `c`, off as `a` as a means to ensure an initial bisection step.
```julia
julia> function init_state(::Chandrapatla, F, x₀, x₁, fx₀, fx₁)
a, b, fa, fb = x₁, x₀, fx₁, fx₀
c, fc = a, fa
ChandrapatlaState(b, a, c, fb, fa, fc)
end
```
The main algorithm is implemented in the `update_state` method. The `@reset` macro from `Accessors.jl` is used to modify a state object, which otherwise is immutable.
```julia
julia> import Roots.Accessors: @reset;
julia> function Roots.update_state(::Chandrapatla, F, o, options, l=NullTracks())
b, a, c = o.xn1, o.xn0, o.c
fb, fa, fc = o.fxn1, o.fxn0, o.fc
# encoding: a = xₙ, b = xₙ₋₁, c = xₙ₋₂
ξ = (a - b) / (c - b)
ϕ = (fa - fb) / (fc - fb)
ϕ² = ϕ^2
Δ = (ϕ² < ξ) && (1 - 2ϕ + ϕ² < 1 - ξ) # Chandrapatla's inequality to determine next step
xₜ = Δ ? Roots.inverse_quadratic_step(a, b, c, fa, fb, fc) : a + (b-a)/2
fₜ = F(xₜ)
incfn(l)
if sign(fₜ) * sign(fa) < 0
a, b, c = xₜ, a, b
fa, fb, fc = fₜ, fa, fb
else
a, c = xₜ, a
fa, fc = fₜ, fa
end
@reset o.xn0 = a
@reset o.xn1 = b
@reset o.c = c
@reset o.fxn0 = fa
@reset o.fxn1 = fb
@reset o.fc = fc
return (o, false)
end
```
This algorithm chooses between an inverse quadratic step or a bisection step depending on the relationship between the computed `ξ` and `Φ`. The tolerances are from the default for `AbstractBracketingMethod`.
To see that the algorithm works, we have:
```julia
julia> find_zero(sin, (3,4), Chandrapatla())
3.1415926535897927
julia> find_zero(x -> exp(x) - x^4, (8,9), Chandrapatla())
8.613169456441398
julia> find_zero(x -> x^5 - x - 1, (1,2), Chandrapatla())
1.1673039782614185
```
| Roots | https://github.com/JuliaMath/Roots.jl.git |
|
[
"MIT"
] | 0.1.1 | 15063c379d8db819a4f77d7c1b5b43172440b4f0 | code | 7585 | """
Vectorized mathematical functions
This module exports nothing, its purpose is to specialize
mathematical functions in Base and Base.FastMath for SIMD.Vec arguments
using vectorized implementations from SLEEFPirates.
See: `is_supported`, `is_fast`, `fast_functions`, `vmap`, `tolerance`.
"""
module SIMDMathFunctions
import SLEEFPirates as SP
import Base.FastMath as FM
import VectorizationBase as VB
import SIMD
const Floats = Union{Float32, Float64}
const Vec{T,N} = SIMD.Vec{N,T} # NB: swapped type parameters
const Vec32{N} = SIMD.Vec{N, Float32}
const Vec64{N} = SIMD.Vec{N, Float64}
"""
tol = tolerance(fun)
Let `x::SIMD.Vec{N,T}` and `ref` be obtained by applying
`fun` on each element of `x`. Now `fun(x)` may differ
from `ref` by an amount of `tol(fun)*eps(T)*abs(res)`.
`tol==1` except for a few functions, for which `tol==2`.
"""
tolerance(op) = 1
"""
`vmap(fun, x)` applies `fun` to each element of `x::SIMD.Vec` and returns
a `SIMD.Vec`.
a = vmap(fun, x)
If `fun` returns a 2-uple, `vmap` returns a 2-uple of `SIMD.Vec` :
a, b = vmap(fun, x) # `fun(x)` returns a 2-uple, e.g. `sincos`
`vmap(fun, x, y)` works similarly when `fun` takes two input arguments (e.g. `atan(x,y)`)
a = vmap(fun, x, y)
a, b = vmap(fun, x, y) # `fun(x,y)` returns a 2-uple
Generic implementations are provided, which call `fun` and provide no performance
benefit. `vmap` may be specialized for argument `fun`. Such optimized implementations
may return a different result than `fun`, within some tolerance bounds (see [`tolerance`](@ref)).
Currently optimized implementations are provided by `SLEEFPirates.jl`.
"""
@inline vmap(op, x) = vmap_unop(op, x)
@inline vmap(op, x,y) = vmap_binop(op, x, y)
# fallback implementations : calls op on each element of vector
@inline vmap_unop(op, x::Vec) = vec(map(op, values(x)))
@inline vmap_binop(op, x::V, y::V) where {V<:Vec} = vec(map(op, values(x), values(y)))
@inline vmap_binop(op, x::Vec{T}, y::T) where T = vec(map(xx->op(xx,y), values(x)))
@inline vmap_binop(op, x::T, y::Vec{T}) where T = vec(map(yy->op(x,yy), values(y)))
@inline values(x)=map(d->d.value, x.data)
@inline vec(t::NTuple{N, <:SIMD.VecTypes}) where N = SIMD.Vec(t...)
@inline vec(t::NTuple{N, T}) where {N, VT<:SIMD.VecTypes, T<:Tuple{Vararg{VT}}} =
map(x->SIMD.Vec(x...), tuple(zip(t...)...))
"""
funs = fast_functions()
unary_ops = fast_functions(1)
binary_ops = fast_functions(2)
Returns a vector of fast mathematical functions taking `inputs` input arguments.
"""
fast_functions() =
[m.sig.parameters[2].instance for m in methods(vmap) if (m.sig.parameters[2]!=Any)]
fast_functions(inputs::Int) =
[m.sig.parameters[2].instance for m in methods(vmap) if (m.sig.parameters[2]!=Any && length(m.sig.parameters)==inputs+2)]
"""
flag = is_supported(fun)
Returns `true` if `fun` accepts `SIMD.Vec` arguments.
"""
@inline function is_supported(::F) where {F<:Function}
V = SIMD.Vec{4,Float64}
hasmethod(F.instance, Tuple{V}) || hasmethod(F.instance, Tuple{V,V})
end
"""
flag = is_fast(fun)
Returns `true` if there is a specialization of `vmap` for `fun`, `false` otherwise.
"""
@inline function is_fast(f::F) where {F<:Function}
V = SIMD.Vec{4,Float64}
any(m.sig.parameters[2]==F for m in methods(vmap, Tuple{F, V})) && return true
any(m.sig.parameters[2]==F for m in methods(vmap, Tuple{F, V, V}))
end
#================ Fast functions from SLEEFPirates =================#
@fastmath begin
tolerance(::typeof(exp))=2
tolerance(::typeof(exp10))=2
tolerance(::typeof(log))=2
tolerance(::typeof(tanh))=2
tolerance(::typeof(log10))=2
tolerance(::typeof(asin))=2
tolerance(::typeof(^))=2
end
tolerance(::typeof(exp))=2
tolerance(::typeof(exp10))=2
tolerance(::typeof(^))=2
tolerance(::typeof(hypot))=2
# Since SLEEFPirates works with VB.Vec but not with SIMD.Vec,
# we convert between SIMD.Vec and VB.Vec.
# However constructing a VB.Vec of length exceeding the native vector length
# returns a VB.VecUnroll => we must handle also this type
# Constructors SIMD.Vec and VB.Vec accept x... as arguments where x is iterable
# so we make SIMD.Vec and VB.VecUnroll iterable (VB.Vec can be converted to Tuple).
# To avoid messing up existing behavior of Base.iterate for SIMD and VB types, we define a wrapper type Iter{V}
struct Iter{V}
vec::V
end
@inline Base.iterate(v::Iter, args...) = iter(v.vec, args...)
# iterate over SIMD.Vec
@inline iter(v::SIMD.Vec) = v[1], 2
@inline iter(v::SIMD.Vec{N}, i) where {N} = (i > N ? nothing : (v[i], i + 1))
# iterate over VB.VecUnroll
@inline function iter(v::VB.VecUnroll)
data = VB.data(v)
return data[1](1), (1, 1)
end
@inline function iter(v::VB.VecUnroll{N,W}, (i, j)) where {N,W}
data = VB.data(v)
if j < W
return data[i](j + 1), (i, j + 1)
elseif i <= N # there are N+1 vectors
return data[i+1](1), (i + 1, 1)
else
return nothing
end
end
@inline SIMDVec(v::VB.Vec) = SIMD.Vec(Tuple(v)...)
@inline SIMDVec(vu::VB.VecUnroll) = SIMD.Vec(Iter(vu)...)
@inline VBVec(v::Vec) = VB.Vec(Iter(v)...)
@inline VBVec(v::Floats) = v
# some operators have a fast version in FastMath, but not all
# and some operators have a fast version in SP, but not all !
const not_unops = (:eval, :include, :evalpoly, :hypot, :ldexp, :sincos, :sincos_fast, :pow_fast)
const broken_unops = (:cospi, :sinpi)
is_unop(n) = !occursin("#", string(n)) && !in(n, union(not_unops, broken_unops))
const unops_SP = filter(is_unop, names(SP; all = true))
const unops_FM = filter(is_unop, names(FM; all = true))
# "slow" operators provided by SP
const unops_Base_SP = intersect(unops_SP, names(Base))
# FastMath operators provided by SP
const unops_FM_SP = intersect(unops_SP, unops_FM)
# FastMath operators with only a slow version provided by SP
const unops_FM_SP_slow = filter(unops_SP) do op
n = Symbol(op, :_fast)
in(n, unops_FM) && !in(n, unops_SP)
end
# one input, one output
for (mod, unops, fastop) in (
(Base, unops_Base_SP, identity),
(FM, unops_FM_SP, identity),
(FM, unops_FM_SP_slow, sym->Symbol(sym, :_fast)))
for op in unops
op_fast = fastop(op)
op_SP = getfield(SP, op)
@eval begin
@inline $mod.$op_fast(x::Vec32) = vmap($mod.$op_fast, x)
@inline $mod.$op_fast(x::Vec64) = vmap($mod.$op_fast, x)
@inline vmap(::typeof($mod.$op_fast), x) = SIMDVec($op_SP(VBVec(x)))
end
end
end
# one input, two outputs
for (mod, op) in ((Base, :sincos), (FM, :sincos_fast))
@eval begin
@inline $mod.$op(x::Vec{<:Floats}) = vmap($mod.$op, x)
@inline vmap(::typeof($mod.$op), x) = map(SIMDVec, SP.$op(VBVec(x)))
end
end
# two inputs, one output
binops = ((Base,:hypot,SP.hypot), (Base,:^,SP.pow), (FM,:pow_fast, SP.pow_fast))
for (mod, op_slow, op_fast) in binops
@eval begin
@inline $mod.$op_slow(x::Vec{T}, y::Vec{T}) where {T<:Floats} = vmap($mod.$op_slow, x,y)
@inline $mod.$op_slow(x::T, y::Vec{T}) where {T<:Floats} = vmap($mod.$op_slow, x,y)
@inline $mod.$op_slow(x::Vec{T}, y::T) where {T<:Floats} = vmap($mod.$op_slow, x,y)
@inline vmap(::typeof($mod.$op_slow), x, y) = SIMDVec($op_fast(VBVec(x), VBVec(y)))
end
end
# precompilation
for op in fast_functions(1), F in (Float32, Float64), N in (4, 8, 16)
precompile(op, (Vec{F,N},))
end
for op in fast_functions(2), F in (Float32, Float64), N in (4, 8, 16)
precompile(op, (Vec{F,N},Vec{F,N}))
precompile(op, (Vec{F,N},F))
precompile(op, (F,Vec{F,N}))
end
end
| SIMDMathFunctions | https://github.com/ClimFlows/SIMDMathFunctions.jl.git |
|
[
"MIT"
] | 0.1.1 | 15063c379d8db819a4f77d7c1b5b43172440b4f0 | code | 1953 | using SIMDMathFunctions:
SIMD, tolerance, fast_functions, is_supported, is_fast, vmap
using Test
data(F, N, ::Function) = range(F(0.01), F(0.9), length = N)
data(F, N, ::typeof(acosh)) = range(F(1.1), F(1.9), length = N)
data(F, N, ::typeof(@fastmath acosh)) = range(F(1.1), F(1.9), length = N)
data_binop(F, N, ::Function) =
range(F(0.01), F(0.9), length = N), range(F(0.01), F(0.9), length = N)
function validate(res::SIMD.Vec, ref, tol)
err = relative_error(res, ref)
err, any(err > tol)
end
function validate(res::Tuple, ref, tol)
err = map(relative_error, res, ref)
err, any(map(err -> any(err > tol), err))
end
relative_error(res, ref) = abs(res - ref) / abs(ref)
@testset "Two-argument functions" begin
for fun in sort(fast_functions(2), by = string)
@assert is_supported(fun) "$fun is not supported"
@assert is_fast(fun) "$fun has no fast implementation"
@info "--- $(string(fun))"
tol = tolerance(fun)
for F in (Float32, Float64), N in (4, 8, 16, 32)
x, y = data_binop(F, N, fun)
xv, yv = SIMD.Vec(x...), SIMD.Vec(x...)
for (xx, yy) in ((xv, yv), (x[N>>1], yv), (xv, y[N>>1]))
res, ref = fun(xx, yy), vmap(fun, xx, yy)
err, fail = validate(res, ref, tol * eps(F))
fail && @warn fun (xx, yy) ref res err
@test !fail
end
end
end
end
@testset "One-argument functions" begin
for fun in sort(fast_functions(1), by = string)
@assert is_supported(fun)
@assert is_fast(fun)
@info "--- $(string(fun))"
tol = tolerance(fun)
for F in (Float32, Float64), N in (4, 8, 16, 32)
d = SIMD.Vec(data(F, N, fun)...)
res, ref = fun(d), vmap(fun, d)
err, fail = validate(res, ref, tol * eps(F))
fail && @warn fun arg ref res err
@test !fail
end
end
end
| SIMDMathFunctions | https://github.com/ClimFlows/SIMDMathFunctions.jl.git |
|
[
"MIT"
] | 0.1.1 | 15063c379d8db819a4f77d7c1b5b43172440b4f0 | docs | 2875 | # SIMDMathFunctions
Fast vectorized mathematical functions for SIMD.jl , using SLEEFPirates.jl .
[](https://github.com/ClimFlows/SIMDMathFunctions.jl/actions/workflows/CI.yml)
[](https://codecov.io/gh/ClimFlows/SIMDMathFunctions)
## Installing
This package is registered. To install it :
```Julia
] add SIMDMathFunctions
```
## Overview
The primary goal of `SIMDMathFunctions` is to provide efficient methods for mathematical functions with `SIMD.Vec` arguments. Under the hood, optimized implementations provided by `SLEEFPirates.jl` are used. This allows explicitly vectorized code using `SIMD.jl` to benefit from fast vectorized math functions.
```Julia
using SIMD: VecRange
using SIMDMathFunctions: is_supported, is_fast, fast_functions
using BenchmarkTools
function exp!(xs::Vector{T}, ys::Vector{T}) where {T}
@inbounds for i in eachindex(xs,ys)
xs[i] = @fastmath exp(ys[i])
end
end
function exp!(xs::Vector{T}, ys::Vector{T}, ::Val{N}) where {N, T}
@assert length(ys) == length(xs)
@assert length(xs) % N == 0
@assert is_supported(@fastmath exp)
@inbounds for istart in 1:N:length(xs)
i = VecRange{N}(istart)
xs[i] = @fastmath exp(ys[i])
end
end
y=randn(Float32, 1024*1024); x=similar(y);
@benchmark exp!($x, $y)
@benchmark exp!($x, $y, Val(8))
@benchmark exp!($x, $y, Val(16))
@benchmark exp!($x, $y, Val(32))
is_fast(exp)
unary_funs = fast_functions(1)
binary_funs = fast_functions(2)
```
`is_supported(fun)` returns `true` if function `fun` supports `SIMD.Vec` arguments. Similarly `is_fast(fun)` returns `true` if `fun` has an optimized implementation.
`fast_functions([ninputs])` returns a vector of functions benefitting from a fast implementation, restricted to those accepting `ninputs` input arguments if `ninputs` is provided.
`SIMDMathFunctions` also provides a helper function `vmap` to vectorize not-yet-supported mathematical functions. For example :
```Julia
using SIMD: Vec
import SIMDMathFunctions: vmap
import SpecialFunctions: erf
erf(x::Vec) = vmap(erf, x)
erf(x::Vec, y::Vec) = vmap(erf, x, y)
erf(x::Vec{N,T}, y::T) where {N,T} = vmap(erf, x, y)
x = Vec(randn(Float32, 16)...)
@benchmark erf($x)
```
The default `vmap` method simply calls `erf` on each element of `x`. There is no performance benefit, but it allows generic code to use `erf`. If `erf_SIMD` is optimized for vector inputs, you can provide a specialized method for `vmap`:
```Julia
using VectorizationBase: verf # vectorized implementation
using SIMDMathFunctions: SIMDVec, VBVec # VectorizationBase <=> SIMD conversion
erf_SIMD(x) = SIMDVec(verf(VBVec(x)))
vmap(::typeof(erf), x) = erf_SIMD(x)
@benchmark erf($x)
```
| SIMDMathFunctions | https://github.com/ClimFlows/SIMDMathFunctions.jl.git |
|
[
"MIT"
] | 0.1.6 | b347591e31387d63c84e9ca602b813a6dc67582d | code | 137 | using LoopManagers
using BenchmarkTools
SUITE = BenchmarkGroup()
SUITE["rand"] = @benchmarkable rand(10)
# Write your benchmarks here.
| LoopManagers | https://github.com/ClimFlows/LoopManagers.jl.git |
|
[
"MIT"
] | 0.1.6 | b347591e31387d63c84e9ca602b813a6dc67582d | code | 529 | using LoopManagers
using Documenter
DocMeta.setdocmeta!(LoopManagers, :DocTestSetup, :(using LoopManagers); recursive=true)
makedocs(;
modules=[LoopManagers],
authors="The ClimFlows contributors",
sitename="LoopManagers.jl",
format=Documenter.HTML(;
canonical="https://ClimFlows.github.io/LoopManagers.jl",
edit_link="main",
assets=String[],
),
pages=[
"Home" => "index.md",
],
)
deploydocs(;
repo="github.com/ClimFlows/LoopManagers.jl",
devbranch="main",
)
| LoopManagers | https://github.com/ClimFlows/LoopManagers.jl.git |
|
[
"MIT"
] | 0.1.6 | b347591e31387d63c84e9ca602b813a6dc67582d | code | 1390 | module KA_Ext
using KernelAbstractions: @kernel, @index, synchronize as KA_sync
import KernelAbstractions.Adapt.adapt_storage
using LoopManagers: LoopManagers, Range1, Range2
import ManagedLoops: synchronize, offload, DeviceManager
struct KA_GPU{A, G} <: DeviceManager
gpu::G
end
LoopManagers.KernelAbstractions_GPU(gpu::G, A) where G = KA_GPU{A,G}(gpu)
adapt_storage(mgr::KA_GPU, x) = adapt_storage(mgr.gpu, x)
synchronize(mgr::KA_GPU) = KA_sync(mgr.gpu)
@inline function offload(fun, backend::KA_GPU, irange::Range1, args...)
(; gpu) = backend
kernel = kernel_KA_1D(gpu)
kernel(fun, first(irange)-1, args ; ndrange=length(irange))
return nothing
end
@inline function offload(fun::Fun, backend::KA_GPU, (irange, jrange)::Range2, args...) where Fun
(; gpu) = backend
M, N = length(irange), length(jrange)
i0, j0 = first(irange)-1, first(jrange)-1
# kernel = kernel_KA_2D(gpu, (32,32), (32,N))
kernel = kernel_KA_2D(gpu)
kernel(fun, i0, j0, last(irange), args; ndrange=map(length, (irange,jrange)))
return nothing
end
@kernel function kernel_KA_1D(fun, i0, args)
i = @index(Global, Linear)
@inline fun((i+i0,), args...)
end
@kernel function kernel_KA_2D(fun::Fun, i0, j0, M, args) where Fun
i, j = @index(Global, NTuple)
# ranges = (i+i0):32:M, (j+j0,)
ranges = (i+i0,), (j+j0,)
@inline fun(ranges, args...)
end
end
| LoopManagers | https://github.com/ClimFlows/LoopManagers.jl.git |
|
[
"MIT"
] | 0.1.6 | b347591e31387d63c84e9ca602b813a6dc67582d | code | 2698 | """
Module `LoopManagers` provides computing managers to pass to functions using the
performance portability module `ManagedLoops`. It implements the API functions defined by `ManagedLoops` for the provided managers.
Currently supported are SIMD and/or multithreaded execution on the CPU. Offloading to GPU via CUDA and oneAPI is experimental.
Additional iteration/offloading strategies (e.g. cache-friendly iteration) can be implemented by defining
new manager types and implementing specialized versions of `ManagedLoops.offload`.
"""
module LoopManagers
using ManagedLoops
import ManagedLoops: offload, no_simd, parallel, barrier, master, share
export PlainCPU, MultiThreadCPU, VectorizedCPU, MultiThreadSIMD
export CUDA_GPU, oneAPI_GPU
# Conservative defaults
ManagedLoops.default_manager(::Type{HostManager}) = PlainCPU()
# force specialization on `args...`
const VArgs{N}=Vararg{Any,N}
const Range1 = AbstractUnitRange
const Range2{I,J} = Tuple{I,J}
const Range3{I,J,K} = Tuple{I,J,K}
const Range4{I,J,K,L} = Tuple{I,J,K,L}
@inline call_single_index(i, fun, args) = fun((i,), args...)
@inline call_single_index((i,j)::Range2, fun, args) = fun(((i,),(j,)), args...)
@inline call_single_index((i,j,k)::Range3, fun, args) = fun(((i,),(j,),(k,)), args...)
@inline call_single_index((i,j,k,l)::Range4, fun, args) = fun(((i,),(j,),(k,),(l,)), args...)
import HostCPUFeatures # for single.jl
import SIMD # for single.jl
import Polyester # for threads.jl
# helper functions
include("julia/check_closure.jl")
# include("julia/strict_float.jl")
# CPU managers
# include("julia/CPU/simd.jl")
include("julia/CPU/single.jl")
include("julia/CPU/threads.jl")
# composite managers
include("julia/GPU/fakeGPU.jl")
include("julia/tune.jl")
# KernalAbstractions manager, active only if KA is loaded somewhere
# we define the types here so that they are available and documented
# The implementation is in ext/KA_Ext.jl
"""
gpu = KernelAbstractions_GPU(gpu::KernelAbstractions.GPU, ArrayType)
# examples
gpu = KernelAbstractions_GPU(CUDABackend(), CuArray)
gpu = KernelAbstractions_GPU(ROCBackend(), ROCArray)
gpu = KernelAbstractions_GPU(oneBackend(), oneArray)
Returns a manager that offloads computations to a `KernelAbstractions` GPU backend.
The returned manager will call `ArrayType(data)` when it needs
to transfer `data` to the device.
!!! note
While `KA_GPU` is always available, implementations of [`offload`]
are available only if the module `KernelAbstractions` is loaded by the main program or its dependencies.
"""
function KernelAbstractions_GPU end
using PackageExtensionCompat
function __init__()
@require_extensions
end
end # module LoopManagers
| LoopManagers | https://github.com/ClimFlows/LoopManagers.jl.git |
|
[
"MIT"
] | 0.1.6 | b347591e31387d63c84e9ca602b813a6dc67582d | code | 2259 | @generated boxed_variables(::F) where {F} = filter(n -> fieldtype(F,n) <: Core.Box, fieldnames(F))
function check_boxed_variables(fun::Fun) where Fun
boxed = boxed_variables(fun)
if !isempty(boxed)
error("""
Your use of either
* the `@offload` macro
* the `forall(...) do ... end` construct
* the `offload(...) do ... end` construct
has produced closure `$fun` (=`do ... end` block) which captures variable(s) `$(boxed...)` in a `Core.Box`. This is a serious performance issue and is forbidden.
To avoid this, you may either :
* check if the offending variable is redefined outside of the closure ; if so, use another name rather than reusing the same name.
* explicitly pass this variable to the closure via `@offload` / `offload` / `forall`.
* enclose the whole construct in a `let ... end` block (see 'Performance of captured variables' in the Julia manual).
""")
end
end
function check_closure(fun::Fun) where Fun
err = false
msg=""
names = propertynames(fun)
if ! isbits(fun)
msg*= """
It seems your use of the forall(...) do ... end construct results in closure
$fun capturing variables $names, some of which are not isbits.
This is discouraged since some backends require that the compute kernels have only isbits arguments.
"""
for name in names
var = getproperty(fun, name)
if ! isbits(var)
msg*= " Variable $name captured by $fun has type $(typeof(var)) which is not isbits.\n"
end
end
err = true
end
if sizeof(fun)>0
msg*= """It seems your use of the forall(...) do ... end construct results in closure
$fun capturing variables $names, some of which have size>0.
This is discouraged since this may severely affect the performance of some backends.
"""
for name in names
s = sizeof(getproperty(fun, name))
if s>0
msg*= " Variable $name captured by $fun has size $s>0.\n"
end
end
err = true
end
err && error(msg*"Cannot execute forall statement.")
end
| LoopManagers | https://github.com/ClimFlows/LoopManagers.jl.git |
|
[
"MIT"
] | 0.1.6 | b347591e31387d63c84e9ca602b813a6dc67582d | code | 2419 | # const Managers = Vector{HostManager}
# Log-normal distributions
struct LogNormal
N::Int
mean::Float64
var::Float64
end
LogNormal()=LogNormal(0,0,0)
function draw(law::LogNormal)
law.N<2 && return -Inf
return randn()*sqrt(law.var)+law.mean
end
function push(law::LogNormal, t)
(;N, var, mean) = law
logt = log10(t)
mean_new = (N*mean + logt)/(N+1)
var_new = (N*var + (logt-mean)*(logt-mean_new))/(N+1)
return LogNormal(N+1, mean_new, var_new)
end
# Statistics of calling a certain function signature
struct TunedCall
stats::Vector{LogNormal}
end
TunedCall(n::Int) = TunedCall([LogNormal() for i=1:n])
# Auto-tuning manager
struct Tune <: ManagedLoops.HostManager
backends::Vector{HostManager}
calls::Dict{Any,TunedCall}
end
function Base.show(io::IO, mgr::Tune)
mgrs = join(("$mgr" for mgr in mgr.backends), ",")
print(io, "tune([$mgrs])")
end
tune(backends) = Tune(backends, Dict{Any, TunedCall}())
function tune()
avx(vlen) = LoopManagers.VectorizedCPU(vlen)
threaded(vlen) = LoopManagers.MultiThread(avx(vlen))
backends = ([threaded(vlen), avx(vlen)] for vlen in (8, 16, 32))
return Tune(vcat(backends...), Dict{Any, TunedCall}())
end
# implementation of ManagedLoops API
ManagedLoops.parallel(fun, b::Tune) = fun(b)
function ManagedLoops.offload(fun::Fun, b::Tune, range, args::Vararg{Any,N}) where {Fun<:Function, N}
(; backends, calls) = b
sig = (fun, range, signature(args))
if !(sig in keys(calls))
calls[sig] = TunedCall(length(backends))
end
call = calls[sig]
stats = call.stats
picked = pick(stats)
stats[picked] = sample(stats[picked], backends[picked], fun, range, args)
return nothing
end
pick(stats) = argmin(draw(law) for law in stats)
# signature of function call
signature(x) = typeof(x)
signature(a::AbstractArray) = eltype(a), axes(a)
signature(t::Union{Tuple, NamedTuple}) = map(signature, t)
function sample(law, backend, fun::Fun, range, args) where Fun
compile_time = Base.cumulative_compile_time_ns()
Base.cumulative_compile_timing(true)
start = time_ns()
ManagedLoops.offload(fun, backend, range, args...)
elapsed = (time_ns()-start)*1e-9
Base.cumulative_compile_timing(false)
if Base.cumulative_compile_time_ns() == compile_time # no time spent compiling
return push(law, elapsed)
else
return law
end
end
| LoopManagers | https://github.com/ClimFlows/LoopManagers.jl.git |
|
[
"MIT"
] | 0.1.6 | b347591e31387d63c84e9ca602b813a6dc67582d | code | 6639 | """
abstract type SingleCPU<:HostManager end
Parent type for manager executing on a single core. Derived types should specialize
`distribute`[@ref] or `offload_single`[@ref] and leave `offload` as it is.
"""
abstract type SingleCPU<:HostManager end
@inline function offload(fun::Fun, b::SingleCPU, range, args::VArgs{NA}) where {Fun<:Function, NA}
@inline offload_single(fun, b, range, args, 1,1)
end
@inline function offload_single(fun::Fun, b::SingleCPU, range, args, NT, id) where {Fun<:Function}
check_boxed_variables(fun)
drange = distribute(range, b, NT, id)
@inline fun(drange, args...)
end
"""
manager = PlainCPU()
Manager for sequential execution on the CPU. LLVM will try to vectorize loops marked with `@simd`.
This works mostly for simple loops and arithmetic computations.
For Julia-side vectorization, especially of mathematical functions, see `VectorizedCPU'.
"""
struct PlainCPU <: SingleCPU end
# Divide work among CPU threads.
@inline distribute(range, ::PlainCPU, NT, id) = distribute_plain(range, NT, id)
@inline function distribute_plain(range::Range1, NT, id)
start, len = first(range), length(range)
return (start+div(len*(id-1),NT)):(start-1+div(len*id,NT))
end
# distribute outer (last) range over threads
@inline distribute_plain((ri,rj)::Range2, NT, id) = (ri, distribute_plain(rj,NT,id))
@inline distribute_plain((ri,rj,rk)::Range3, NT, id) = (ri, rj, distribute_plain(rk,NT,id))
@inline distribute_plain((ri,rj,rk,rl)::Range4, NT, id) = (ri, rj, rk, distribute_plain(rl,NT,id))
"""
manager = VectorizedCPU()
Returns a manager for executing loops with optional explicit SIMD vectorization. Only inner loops
marked with `@vec` will use explicit vectorization. If this causes errors, use `@simd` instead of `@vec`.
Vectorization of loops marked with `@simd` is left to the Julia/LLVM compiler, as with PlainCPU.
!!! note
`ManagedLoops.no_simd(::VectorizedCPU)` returns a `PlainCPU`.
"""
struct VectorizedCPU{VLen} <: SingleCPU end
no_simd(::VectorizedCPU) = PlainCPU()
const N32 = Int64(HostCPUFeatures.pick_vector_width(Float32))
VectorizedCPU(len=N32) = VectorizedCPU{len}()
Base.show(io::IO, ::VectorizedCPU{VLen}) where VLen= print(io, "VectorizedCPU($VLen)")
"""
Divide work among vectorized CPU threads.
"""
@inline distribute(range, b::VectorizedCPU, NT, id) = distribute_simd(range, b, NT, id)
@inline function _distribute_simd(range::Range1, ::VectorizedCPU{VSize}, NT, id) where VSize
# this implementation avoids tails except for the last thread
# but it has a non-identified bug
start, len = first(range), length(range)
nvec = div(len, VSize) # number of vectors that fit entirely in range
tail = mod(len, VSize)
work = nvec + tail # add tail to estimate and divide work to be done
vstop = min(nvec, div(id*work, NT))
stop = (id==NT) ? last(range) : (start+VSize*vstop-1)
vstart = min(nvec, div((id-1)*work, NT))
start = start+VSize*vstart
return VecRange{VSize}(start, stop)
end
@inline function distribute_simd(range::Range1, ::VectorizedCPU{VSize}, NT, id) where VSize
r = distribute_plain(range, NT, id)
return VecRange{VSize}(first(r), last(r))
end
# distribute outer (last) range over threads, vectorize inner (first) range
@inline distribute_simd((ri,rj)::Range2, b, NT, id) = (distribute_simd(ri,b,1,1), distribute_plain(rj,NT,id))
@inline distribute_simd((ri,rj,rk)::Range3, b, NT, id) = (distribute_simd(ri,b,1,1), rj, distribute_plain(rk,NT,id))
@inline distribute_simd((ri,rj,rk,rl)::Range4, b, NT, id) = (distribute_simd(ri,b,1,1), rj, rk, distribute_plain(rl,NT,id))
#======================= Vectorized range ====================#
struct VecRange{N} <: AbstractUnitRange{Int}
start::Int
vstop::Int # bulk = start:N:vstop (vstop excluded)
stop::Int # tail = vstop:stop
function VecRange{N}(start,stop) where N
vlen = div(stop+1-start, N)
vstop = start + N*vlen
new{N}(start, vstop, stop)
end
end
# AbstractUnitRange
@inline Base.length(range::VecRange{N}) where N = range.stop-range.start+1
@inline Base.first(range::VecRange) = range.start
@inline Base.last(range::VecRange) = range.stop
# normal / @simd iteration
@inline Base.firstindex(::VecRange) = 0
@inline Base.getindex(range::VecRange, i::Integer) = range.start+i
@inline Base.iterate(range::VecRange) = next_item(range.stop, range.start)
@inline Base.iterate(range::VecRange, prev) = next_item(range.stop, prev+1)
@inline next_item(stop, next) = (next <= stop) ? (next, next) : nothing
# @vec iteration
ManagedLoops.bulk(range::VecRange{N}) where N = VecBulk{N}(range.start, range.vstop)
ManagedLoops.tail(range::VecRange) = range.vstop:range.stop
struct VecBulk{N}
start::Int
vstop::Int # bulk = start:N:vstop (vstop excluded)
end
@inline Base.length(range::VecBulk{N}) where N = div(range.vstop-range.start, N)
@inline Base.firstindex(::VecBulk) = 0
@inline Base.getindex(range::VecBulk{N}, i) where N = SIMD.VecRange{N}(range.start+N*i)
@inline Base.iterate(range::VecBulk) = next_bulk(range, range.start)
@inline Base.iterate(range::VecBulk{N}, prev) where N = next_bulk(range, prev+N)
@inline next_bulk(range::VecBulk{N}, next) where N = (next < range.vstop) ? (SIMD.VecRange{N}(next), next) : nothing
# support for @vec if ... else
@inline function ManagedLoops.choose(m::SIMD.Vec{N,Bool}, iftrue, iffalse) where N
all(m) && return iftrue()
any(m) || return iffalse()
return SIMD.vifelse(m, iftrue(), iffalse())
end
# support for managed broadcasting with SIMD
# FIXME: Base.@propagate_inbounds would be safer than @inbounds
@inline function Base.getindex(bc::Broadcast.Broadcasted, i::SIMD.VecRange, J::Vararg{Union{Int, CartesianIndex},N}) where N
broadcast_getindex_vec(bc, i, CartesianIndex(J))
end
@inline function broadcast_getindex_vec(bc::Broadcast.Broadcasted{<:Any,<:Any,<:Any,<:Any}, i, J)
args = getargs(bc.args, i, J)
return bc.f(args...)
end
# recursively constructs (args[1][i,J], args[2][i,J], ...)
@inline getargs(args::Tuple, i, J) = ( getarg(args[1], i, J), getargs(Base.tail(args), i, J)...)
@inline getargs(args::Tuple{Any}, i, J) = ( getarg(args[1], i, J), )
@inline getargs(::Tuple{}, i, J) = ()
# get a single argument at index [i,J]
@inline getarg(A::Union{Ref,AbstractArray{<:Any,0},Number}, _, _) = A[] # Scalar-likes can just ignore all indices
@inline getarg(A, i, J) = @inbounds A[i, CartesianIndex(new_index(A, J))]
@inline getarg(A::AbstractArray, i, J) = @inbounds A[i, new_index(A, J)...]
# truncate indices beyond rank
@inline new_index(A, J::CartesianIndex) = J.I[1:(ndims(A)-1)]
| LoopManagers | https://github.com/ClimFlows/LoopManagers.jl.git |
|
[
"MIT"
] | 0.1.6 | b347591e31387d63c84e9ca602b813a6dc67582d | code | 6867 | """
manager = MultiThread(b=PlainCPU(), nt=Threads.nthreads())
Returns a multithread manager derived from `cpu_manager`, with a fork-join pattern.
When `manager` is passed to `ManagedLoops.offload`, `manager.nthreads` threads are spawn (fork).
They each work on a subset of indices. Progress continues only after all threads have finished (join),
so that `barrier` is not needed between two uses of `offload` and does nothing.
!!! tip
It is highly recommended to pin the Julia threads to specific cores.
The simplest way is probably to set `JULIA_EXCLUSIVE=1` before launching Julia.
See also [Julia Discourse](https://discourse.julialang.org/t/compact-vs-scattered-pinning/69722)
"""
struct MultiThread{Manager<:SingleCPU} <: HostManager
single::Manager
nthreads::Int
MultiThread(mgr::M = PlainCPU(), nt = Threads.nthreads()) where M = new{M}(mgr, nt)
end
Base.show(io::IO, mgr::MultiThread) = print(io, "MultiThread($(mgr.single), $(mgr.nthreads))")
no_simd(mgr::MultiThread) = MultiThread(no_simd(mgr.single), mgr.nthreads)
# Wraps arguments in order to avoid conversion to PtrArray by Polyester
struct Args{T}
args::T
end
function offload(fun::Fun, mgr::MultiThread, range, args::VArgs{NA}) where {Fun<:Function,NA}
check_boxed_variables(fun)
args = Args(args)
Polyester.@batch for id = 1:mgr.nthreads
offload_single(fun, mgr.single, range, args.args, mgr.nthreads, id)
end
end
mutable struct ConditionBarrier
const barrier::Threads.Condition # condition for Barrier()
arrived::Tuple{Int,Int,Symbol} # (arrived, id, there) where arrived = number of threads having reached the barrier
shared::Any
ConditionBarrier() = new(Threads.Condition(), (0, 0, :open))
end
"""
manager = MainThread(cpu_manager=PlainCPU(), nthreads=Threads.nthreads())
Returns a multithread manager derived from `cpu_manager`, initially in sequential mode.
In this mode, `manager` behaves exactly like `cpu_manager`.
When `manager` is passed to `ManagedLoops.parallel`, `nthreads` threads are spawn.
The `manager` passed to threads works in parallel mode.
In this mode, `manager` behaves like `cpu_manager`,
except that the outer loop is distributed among threads.
Furthermore `ManagedLoops.barrier` and `ManagedLoops.share`
allow synchronisation and data-sharing across threads.
```julia
main_mgr = MainThread()
LoopManagers.parallel(main_mgr) do thread_mgr
x = LoopManagers.share(thread_mgr) do master_mgr
randn()
end
println("Thread \$(Threads.threadid()) has drawn \$x.")
end
```
"""
struct MainThread{Manager} <: HostManager
cbarrier::ConditionBarrier
manager::Manager
nthreads::Int
end
MainThread(manager = PlainCPU(), nt=Threads.nthreads()) = MainThread(ConditionBarrier(), manager, nt)
Base.show(io::IO, main::MainThread) = print(io, "MainThread($(main.manager))")
@inline function no_simd(main::MainThread)
(; cbarrier, manager) = main
return MainThread(cbarrier, no_simd(manager))
end
# It is crucial to store the thread id in the WorkThread because
# there is no guarantee that Threads.threadid() remains the same over the lifetime
# of the thread. When two successive loops have the same loop range,
# relying on Threadid() can lead to the same thread computing over different parts
# of the range. In the absence of a barrier between the loops
# (which should not be necessary), incorrect data may be read in the second loop.
struct WorkThread{Manager} <: HostManager
cbarrier::ConditionBarrier
manager::Manager
N::Int
id::Int
end
@inline function no_simd(worker::WorkThread)
(; cbarrier, manager, N, id) = worker
return WorkThread(cbarrier, no_simd(manager), N, id)
end
@inline function offload(
fun::Fun,
main::MainThread,
ranges,
args::Vararg{Any,N},
) where {Fun<:Function,N}
@inline offload_single(fun, main.manager, ranges, args, 1, 1)
end
@inline function offload(
fun::Fun,
worker::WorkThread,
ranges,
args::Vararg{Any,N},
) where {Fun<:Function,N}
@inline offload_single(fun, worker.manager, ranges, args, worker.N, worker.id)
end
#============== parallel, barrier, share ==============#
parallel(::Any, worker::WorkThread) =
error("Nested used of ManagedLoops.parallel is forbidden.")
function parallel(action::Fun, main::MainThread, args::Vararg{Any,N}) where {Fun,N}
main.cbarrier.arrived = (0, 0, :open) # reset barrier
@sync for id=1:main.nthreads
worker = WorkThread(main.cbarrier, main.manager, main.nthreads, id)
Threads.@spawn action(worker, args...)
end
return nothing
end
function master(fun::Fun, worker::WorkThread, args::Vararg{Any,N}) where {Fun,N}
barrier(worker)
if worker.id == 1
manager = worker.manager
fun(manager, args...)
end
barrier(worker)
return nothing
end
function share(fun, worker::WorkThread, args::Vararg{Any,N}) where {N}
b = worker.cbarrier
# wait for other worker threads to finish their work before master thread calls fun
barrier(worker)
id = worker.id
# only master thread is allowed to write to barrier.shared
id == 1 && (b.shared = fun(worker.manager, args...))
# barrier ensures that other threads read *after* the master has written
barrier(worker)
shared = b.shared # ::Any => type instability
# barrier ensures that master waits until others have read the result
barrier(worker)
# now all threads have read, do not keep the result alive longer than needed
id == 1 && (b.shared = nothing)
return shared
end
function barrier(worker::WorkThread, there::Symbol = :unknown)
(; cbarrier, N, id) = worker
wait_condition_barrier(cbarrier, N, id, there)
end
function wait_condition_barrier(cb::ConditionBarrier, size, id, there)
# @info "Enter condition_barrier" there id size
lock(cb.barrier)
try
(; barrier, arrived) = cb
(arrived_old, id_old, there_old) = arrived
arrived_new = arrived_old + 1
if (arrived_new > 1) && (there_old != there)
err = """
Race condition detected. Worker $id is waiting at :
$there
while worker $id_old is waiting at :
$there_old
"""
error(err)
end
if arrived_new == size # we are the last thread arriving at the barrier
cb.arrived = (0, 0, :open) # reset barrier for future use
# @info "We are the last thread" id cb.arrived
notify(barrier) # release other threads waiting at barrier
else
cb.arrived = (arrived_new, id, there)
# @info "waiting for more threads to arrive at barrier" id cb.arrived
wait(barrier)
end
finally
unlock(cb.barrier)
end
# @info "Leave condition_barrier" there id
return nothing
end
| LoopManagers | https://github.com/ClimFlows/LoopManagers.jl.git |
|
[
"MIT"
] | 0.1.6 | b347591e31387d63c84e9ca602b813a6dc67582d | code | 2877 | struct FakeGPU{Host} <: DeviceManager
host::Host
end
struct DeviceArray{T,N, Data<:AbstractArray{T,N}} <: AbstractArray{T,N}
data :: Data
end
Base.eltype(dev::DeviceArray) = eltype(dev.data)
Base.eachindex(dev::DeviceArray) = eachindex(dev.data)
Base.size(dev::DeviceArray, args...) = size(dev.data, args...)
Base.axes(dev::DeviceArray, args...) = axes(dev.data, args...)
Base.view(dev::DeviceArray, args...) = DeviceArray(view(dev.data, args...))
Base.similar(dev::DeviceArray, dims::Union{Integer, AbstractUnitRange}...) = DeviceArray(similar(dev.data, dims...))
Base.similar(dev::DeviceArray, F::Type, dims::Union{Integer, AbstractUnitRange}...) = DeviceArray(similar(dev.data, F, dims...))
function Base.copy!(a::DeviceArray{F, 1, <:AbstractVector{F}}, b::AbstractVector{F}) where F
copy!(a.data, b)
return a
end
function Base.getindex(a::DeviceArray, index...)
error("""Elements of a device array are accessible only from offloaded code. If this error is triggered from offloaded code,
this means that the array has not been properly managed by the device backend. This may be because it is
part of a struct, or captured by a closure. A possible fix is to pass this array explicitly as an argument to the
offloaded function/closure. This is the role of the extra arguments of `offload` and `@offload`. """)
end
@inline unwrap(x)=x
@inline unwrap(x::Tuple)=Tuple(map(unwrap,x))
@inline unwrap(ddata::DeviceArray)=ddata.data
unwrap(x::AbstractArray) = error(
"$(typeof(x)) is not on the device. You must use `to_device` to
transfer array arguments to the device before calling `offload` with `backend::FakeGPU`")
to_host(x::Array)=copy(x)
to_host(x::DeviceArray, ::FakeGPU) = copy(x.data)
to_device(x::AbstractArray, ::FakeGPU) = DeviceArray(copy(x))
@inline function forall(fun::Fun, backend::FakeGPU, range, args::VArgs{NA}) where {Fun<:Function, NA}
check_boxed_variables(fun)
@inline forall(fun, backend.host, range, unwrap(args)...)
end
@inline function offload(fun::Fun, backend::FakeGPU, range, args::VArgs{NA}) where {Fun<:Function, NA}
check_boxed_variables(fun)
@inline offload(fun, backend.host, range, unwrap(args)...)
end
function map_closure(f, closure::Function)
# replaces by `f(x)` each object `x` captured by `closure`
captured = map(n->f(getproperty(closure, n)), propertynames(closure))
return replace_captured(closure, captured)
end
@generated function replace_captured(closure::Closure, args) where {Closure<:Function}
# here closure and args are Types : typeof(f), Tuple{Type1, ...} and
# in the returned expression, they are the actual arguments
basetype = closure.name.wrapper
types = args.parameters
N = length(types)
if N>0
return Expr(:new, basetype{types...}, ( :(args[$i]) for i=1:N)... )
else
return :(closure)
end
end
| LoopManagers | https://github.com/ClimFlows/LoopManagers.jl.git |
|
[
"MIT"
] | 0.1.6 | b347591e31387d63c84e9ca602b813a6dc67582d | code | 1206 | mutable struct Barrier{Lock}
const lock::Lock
const size::Int
left::Int # number of threads not having reached the barrier
end
function Base.wait(barrier::Barrier)
start = time()
left = lock(barrier.lock) do
barrier.left = barrier.left-1
end
if left==0 # we are the last thread arriving at the barrier
lock(barrier.lock) do
barrier.left = barrier.size
end
else # wait until barrier.left == barrier.size
while left<barrier.size
sleep(0.0)
left = lock(barrier.lock) do
barrier.left
end
end
end
return time()-start
end
function run_thread(id, barrier)
@info "run_thread" id barrier.size barrier.left
for i=1:20
sleep(0.05*rand())
wait(barrier)
end
@info "run_thread" id barrier.size barrier.left
end
function main(N)
barrier = Barrier(Threads.ReentrantLock(), N, N)
@info "main" barrier.size barrier.left
@sync begin
for id=1:N
Threads.@spawn run_thread(id, barrier)
end
end
@info "main" barrier.size barrier.left
println()
end
# main(4)
# main(5)
map(main, (1,4,5,10,100))
| LoopManagers | https://github.com/ClimFlows/LoopManagers.jl.git |
|
[
"MIT"
] | 0.1.6 | b347591e31387d63c84e9ca602b813a6dc67582d | code | 1428 | @loops function my_cumsum!(_, a, b, ptop, fac)
let (irange, jrange) = (axes(a, 1), axes(a, 2))
nz = size(a,3)
for j in jrange
@vec for i in irange
@inbounds a[i, j, nz] = ptop + (fac/2)*b[i, j, nz]
end
end
for j in jrange
for k = nz:-1:2
@vec for i in irange
@inbounds a[i, j, k-1] = a[i, j, k] + (b[i, j, k-1] + b[i,j,k])*(fac/2)
end
end
end
end
end
@loops function my_cumsum2!(_, a, b, ptop, fac)
let (irange, jrange) = (axes(a, 1), axes(a, 2))
nz = size(a,3)
for j in jrange
@vec for i in irange
@inbounds a[i, j, nz] = ptop + (fac/2)*b[i, j, nz]
end
for k = nz:-1:2
@vec for i in irange
@inbounds a[i, j, k-1] = a[i, j, k] + (b[i, j, k-1] + b[i,j,k])*(fac/2)
end
end
end
end
end
@loops function my_cumsum3!(_, a, b, ptop, fac)
let (irange, jrange) = (axes(a, 1), axes(a, 2))
nz = size(a,3)
for j in jrange
@vec for i in irange
@inbounds a[i, j, nz] = ptop + (fac/2)*b[i, j, nz]
for k = nz:-1:2
@inbounds a[i, j, k-1] = a[i, j, k] + (b[i, j, k-1] + b[i,j,k])*(fac/2)
end
end
end
end
end
| LoopManagers | https://github.com/ClimFlows/LoopManagers.jl.git |
|
[
"MIT"
] | 0.1.6 | b347591e31387d63c84e9ca602b813a6dc67582d | code | 3467 | using LoopManagers
using ManagedLoops: @loops, @vec
using SIMDMathFunctions
using KernelAbstractions
using ThreadPinning
pinthreads(:cores)
threadinfo()
using InteractiveUtils: versioninfo, @code_native
using Chairmarks: @be
using Test
include("cumsum.jl")
versioninfo()
myfun(x) = @vec if x > 0
exp(log(x))
else
log(exp(x))
end
@loops function loop!(_, fun, a, b)
let (irange, jrange) = axes(a)
@vec for i in irange, j in jrange
@inbounds a[i, j] = fun(b[i, j])
end
end
end
function test(mgr, b)
a = similar(b)
@info mgr
loop!(mgr, exp, a, b)
display(@be loop!(mgr, myfun, a, b) seconds = 1)
return nothing
end
function timed(fun, N)
fun()
times = [(@timed fun()).time for _ = 1:N+10]
sort!(times)
return Float32(sum(times[1:N]) / N)
end
function scaling(fun, name, N, simd = VectorizedCPU())
@info "====== Multi-thread scaling: $name ======"
single = 1e9
@info "Threads \t elapsed \t speedup \t efficiency"
for nt = 1:Threads.nthreads()
mgr = LoopManagers.MultiThread(simd, nt)
elapsed = timed(() -> fun(mgr), N)
nt == 1 && (single = elapsed)
percent(x) = round(100x; digits = 0)
speedup = single / elapsed
@info "$nt \t\t $elapsed \t $(percent(speedup))% \t $(percent(speedup/nt))%"
end
println()
end
let b = randn(1023, 1023), a = similar(b)
scaling("compute-bound loop", 100) do mgr
loop!(mgr, myfun, a, b)
end
end
println()
let b = randn(128, 64, 30), a = similar(b)
for vlen in (4, 16, 64)
scaling_cumsum(msg, fun!) =
scaling("$msg vlen=$vlen", 1000, VectorizedCPU(vlen)) do mgr
fun!(mgr, a, b, 1.0, 1.234)
end
scaling_cumsum("reverse cumsum_1", my_cumsum!)
scaling_cumsum("reverse cumsum_2", my_cumsum2!)
scaling_cumsum("reverse cumsum_3", my_cumsum3!)
end
end
@testset "OpenMP-like manager" begin
main = LoopManagers.MainThread(PlainCPU(), 10)
@info "Testing MainThread with $(main.nthreads) threads."
LoopManagers.parallel(main) do worker
@info "Worker $(worker.id)"
x = LoopManagers.share(worker) do master
randn()
end
println("Thread $(Threads.threadid()) has drawn $x.")
end
@test true
end
println()
@testset "SIMD, multithread and auto-tuned managers" begin
managers = Any[
LoopManagers.PlainCPU(),
LoopManagers.VectorizedCPU(),
LoopManagers.MultiThread(),
LoopManagers.MultiThread(VectorizedCPU(8)),
LoopManagers.MultiThread(VectorizedCPU(16)),
LoopManagers.MultiThread(VectorizedCPU(32)),
]
openMP = LoopManagers.MainThread(VectorizedCPU())
let b = randn(Float32, 1023, 1023)
auto = LoopManagers.tune(managers)
for mgr in vcat(managers, openMP, auto)
test(mgr, b)
@test true
end
end
end
test_bc(mgr, a, b, c) = @. mgr[a] = log(exp(b) * exp(c))
println()
@testset "Managed broadcasting" begin
managers = Any[
LoopManagers.PlainCPU(),
LoopManagers.VectorizedCPU(),
LoopManagers.MultiThread(),
LoopManagers.MultiThread(VectorizedCPU()),
]
for dims in (10000, (100, 100), (100, 10, 10), (10, 10, 10, 10))
a, b, c = (randn(Float32, dims) for i = 1:3)
for mgr in managers
test_bc(mgr, a, b, c)
@test true
end
end
end
| LoopManagers | https://github.com/ClimFlows/LoopManagers.jl.git |
|
[
"MIT"
] | 0.1.6 | b347591e31387d63c84e9ca602b813a6dc67582d | docs | 1715 | # LoopManagers
[](https://ClimFlows.github.io/LoopManagers.jl/stable/)
[](https://ClimFlows.github.io/LoopManagers.jl/dev/)
[](https://github.com/ClimFlows/LoopManagers.jl/actions/workflows/CI.yml?query=branch%3Amain)
[](https://codecov.io/gh/ClimFlows/LoopManagers.jl)
LoopManagers is the companion package of [MangedLoops](https://github.com/ClimFlows/ManagedLoops.jl). It provides managers to execute loops with SIMD, on multiple threads or on GPUs. There is also a meta-manager that selects among a provided set of managers the one with the shortest execution time, on a per-function basis.
## Example
```
# Would belong to a 'provider' module, depending only on ManagedLoops
using ManagedLoops: @loops, @vec
@loops function loop!(_, a, b)
let (irange, jrange) = axes(a)
@vec for i in irange, j in jrange
@inbounds a[i, j] = @fastmath exp(b[i, j])
end
end
end
# Belongs to a 'consumer' module/program, that requires LoopManagers to run
using LoopManagers: PlainCPU, VectorizedCPU, MultiThread
using SIMDMathFunctions # for vectorized exp
using BenchmarkTools
using InteractiveUtils
versioninfo() # check JULIA_EXCLUSIVE and JULIA_NUM_THREADS
scalar = PlainCPU()
simd = VectorizedCPU(8)
threads = MultiThread(simd)
b = randn(1024, 1024);
a = similar(b);
for mgr in (scalar, simd, threads)
@info mgr
display(@benchmark loop!($mgr, $a, $b))
end
```
| LoopManagers | https://github.com/ClimFlows/LoopManagers.jl.git |
|
[
"MIT"
] | 0.1.6 | b347591e31387d63c84e9ca602b813a6dc67582d | docs | 197 | ```@meta
CurrentModule = LoopManagers
```
# LoopManagers
Documentation for [LoopManagers](https://github.com/ClimFlows/LoopManagers.jl).
```@index
```
```@autodocs
Modules = [LoopManagers]
```
| LoopManagers | https://github.com/ClimFlows/LoopManagers.jl.git |
|
[
"MIT"
] | 0.2.6 | a03658185b723439c24832cc6dd3f0016966e6df | code | 757 | module DifferentiableTrajectoryOptimization
using ChainRulesCore: ChainRulesCore, NoTangent, ProjectTo, @thunk
using Symbolics: Symbolics, @variables, scalarize
using Ipopt: Ipopt
using OSQP: OSQP
using SparseArrays: SparseArrays, findnz, sparse, spzeros
using LinearAlgebra: ColumnNorm, qr, I
using ForwardDiff: ForwardDiff, Dual
using PATHSolver: PATHSolver
include("utils.jl")
include("parametric_optimization_problem.jl")
include("qp_solver.jl")
include("nlp_solver.jl")
include("mcp_solver.jl")
include("autodiff.jl")
include("optimizer.jl")
# Public API
export Optimizer,
ParametricTrajectoryOptimizationProblem,
parameter_dimension,
get_constraints_from_box_bounds,
QPSolver,
MCPSolver,
NLPSolver,
is_thread_safe
end
| DifferentiableTrajectoryOptimization | https://github.com/lassepe/DifferentiableTrajectoryOptimization.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.