licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.16.5 | 7c4d085f436b699e746e19173a3c640a79ceec8a | code | 9801 | module RegularizedLeastSquares
import Base: length, iterate, findfirst, copy
using LinearAlgebra
import LinearAlgebra.BLAS: gemv, gemv!
import LinearAlgebra: BlasFloat, normalize!, norm, rmul!, lmul!
using SparseArrays
using IterativeSolvers
using Random
using VectorizationBase
using VectorizationBase: shufflevector, zstridedpointer
using FLoops
using LinearOperators: opEye
using StatsBase
using LinearOperatorCollection
using InteractiveUtils
export AbstractLinearSolver, AbstractSolverState, createLinearSolver, init!, deinit, solve!, linearSolverList, linearSolverListReal, applicableSolverList, power_iterations
abstract type AbstractLinearSolver end
abstract type AbstractSolverState{S} end
"""
solve!(solver::AbstractLinearSolver, b; x0 = 0, callbacks = (_, _) -> nothing)
Solves an inverse problem for the data vector `b` using `solver`.
# Required Arguments
* `solver::AbstractLinearSolver` - linear solver (e.g., `ADMM` or `FISTA`), containing forward/normal operator and regularizer
* `b::AbstractVector` - data vector if `A` was supplied to the solver, back-projection of the data otherwise
# Optional Keyword Arguments
* `x0::AbstractVector` - initial guess for the solution; default is zero
* `callbacks` - (optionally a vector of) function or callable struct that takes the two arguments `callback(solver, iteration)` and, e.g., stores, prints, or plots the intermediate solutions or convergence parameters. Be sure not to modify `solver` or `iteration` in the callback function as this would japaridze convergence. The default does nothing.
# Examples
The optimization problem
```math
argmin_x ||Ax - b||_2^2 + λ ||x||_1
```
can be solved with the following lines of code:
```jldoctest solveExample
julia> using RegularizedLeastSquares
julia> A = [0.831658 0.96717
0.383056 0.39043
0.820692 0.08118];
julia> x = [0.5932234523399985; 0.2697534345340015];
julia> b = A * x;
julia> S = ADMM(A);
julia> x_approx = solve!(S, b)
2-element Vector{Float64}:
0.5932234523399984
0.26975343453400163
```
Here, we use [`L1Regularization`](@ref), which is default for [`ADMM`](@ref). All regularization options can be found in [API for Regularizers](@ref).
The following example solves the same problem, but stores the solution `x` of each interation in `tr`:
```jldoctest solveExample
julia> tr = Dict[]
Dict[]
julia> store_trace!(tr, solver, iteration) = push!(tr, Dict("iteration" => iteration, "x" => solver.x, "beta" => solver.β))
store_trace! (generic function with 1 method)
julia> x_approx = solve!(S, b; callbacks=(solver, iteration) -> store_trace!(tr, solver, iteration))
2-element Vector{Float64}:
0.5932234523399984
0.26975343453400163
julia> tr[3]
Dict{String, Any} with 3 entries:
"iteration" => 2
"x" => [0.593223, 0.269753]
"beta" => [1.23152, 0.927611]
```
The last example show demonstrates how to plot the solution at every 10th iteration and store the solvers convergence metrics:
```julia
julia> using Plots
julia> conv = StoreConvergenceCallback()
julia> function plot_trace(solver, iteration)
if iteration % 10 == 0
display(scatter(solver.x))
end
end
plot_trace (generic function with 1 method)
julia> x_approx = solve!(S, b; callbacks = [conv, plot_trace]);
```
The keyword `callbacks` allows you to pass a (vector of) callable objects that takes the arguments `solver` and `iteration` and prints, stores, or plots intermediate result.
See also [`StoreSolutionCallback`](@ref), [`StoreConvergenceCallback`](@ref), [`CompareSolutionCallback`](@ref) for a number of provided callback options.
"""
function solve!(solver::AbstractLinearSolver, b; callbacks = (_, _) -> nothing, kwargs...)
if !(callbacks isa Vector)
callbacks = [callbacks]
end
init!(solver, b; kwargs...)
foreach(cb -> cb(solver, 0), callbacks)
for (iteration, _) = enumerate(solver)
foreach(cb -> cb(solver, iteration), callbacks)
end
return solversolution(solver)
end
"""
solve!(cb, solver, b; kwargs...)
Pass `cb` as the callback to `solve!`
# Examples
```julia
julia> x_approx = solve!(solver, b) do solver, iteration
println(iteration)
end
```
"""
solve!(cb, solver::AbstractLinearSolver, b; kwargs...) = solve!(solver, b; kwargs..., callbacks = cb)
include("MultiThreading.jl")
export AbstractRowActionSolver
abstract type AbstractRowActionSolver <: AbstractLinearSolver end
export AbstractDirectSolver
abstract type AbstractDirectSolver <: AbstractLinearSolver end
export AbstractPrimalDualSolver
abstract type AbstractPrimalDualSolver <: AbstractLinearSolver end
export AbstractProximalGradientSolver
abstract type AbstractProximalGradientSolver <: AbstractLinearSolver end
export AbstractKrylovSolver
abstract type AbstractKrylovSolver <: AbstractLinearSolver end
# Fallback function
setlambda(S::AbstractMatrix, λ::Real) = nothing
include("Transforms.jl")
include("Regularization/Regularization.jl")
include("proximalMaps/ProximalMaps.jl")
export solversolution, solverconvergence, solverstate
"""
solversolution(solver::AbstractLinearSolver)
Return the current solution of the solver
"""
solversolution(solver::AbstractLinearSolver) = solversolution(solverstate(solver))
"""
solversolution(state::AbstractSolverState)
Return the current solution of the solver's state
"""
solversolution(state::AbstractSolverState) = state.x
"""
solverconvergence(solver::AbstractLinearSolver)
Return a named tuple of the solvers current convergence metrics
"""
function solverconvergence end
"""
solverstate(solver::AbstractLinearSolver)
Return the current state of the solver
"""
solverstate(solver::AbstractLinearSolver) = solver.state
solverconvergence(solver::AbstractLinearSolver) = solverconvergence(solverstate(solver))
"""
init!(solver::AbstractLinearSolver, b; kwargs...)
Prepare the solver for iteration based on the given data vector `b` and `kwargs`.
"""
init!(solver::AbstractLinearSolver, b; kwargs...) = init!(solver, solverstate(solver), b; kwargs...)
iterate(solver::AbstractLinearSolver) = iterate(solver, solverstate(solver))
include("Utils.jl")
include("Kaczmarz.jl")
#include("DAXKaczmarz.jl")
#include("DAXConstrained.jl")
include("CGNR.jl")
include("Direct.jl")
include("FISTA.jl")
include("OptISTA.jl")
include("POGM.jl")
include("ADMM.jl")
include("SplitBregman.jl")
#include("PrimalDualSolver.jl")
include("Callbacks.jl")
include("deprecated.jl")
"""
Return a list of all available linear solvers
"""
function linearSolverList()
#filter(s -> s ∉ [DaxKaczmarz, DaxConstrained, PrimalDualSolver], linearSolverListReal())
linearSolverListReal()
end
function linearSolverListReal()
union(subtypes.(subtypes(AbstractLinearSolver))...) # For deeper nested type extend this to loop for types with isabstracttype == true
end
export isapplicable
isapplicable(solver::AbstractLinearSolver, args...) = isapplicable(typeof(solver), args...)
isapplicable(x, reg::AbstractRegularization) = isapplicable(x, [reg])
isapplicable(::Type{T}, reg::Vector{<:AbstractRegularization}) where T <: AbstractLinearSolver = false
function isapplicable(::Type{T}, reg::Vector{<:AbstractRegularization}) where T <: AbstractRowActionSolver
applicable = true
applicable &= length(findsinks(AbstractParameterizedRegularization, reg)) <= 2
applicable &= length(findsinks(L2Regularization, reg)) == 1
return applicable
end
function isapplicable(::Type{T}, reg::Vector{<:AbstractRegularization}) where T <: AbstractPrimalDualSolver
# TODO
return true
end
function isapplicable(::Type{T}, reg::Vector{<:AbstractRegularization}) where T <: AbstractProximalGradientSolver
applicable = true
applicable &= length(findsinks(AbstractParameterizedRegularization, reg)) == 1
return applicable
end
function isapplicable(::Type{T}, A, x) where T <: AbstractLinearSolver
# TODO
applicable = true
return applicable
end
"""
isapplicable(solverType::Type{<:AbstractLinearSolver}, A, x, reg)
return `true` if a `solver` of type `solverType` is applicable to system matrix `A`, data `x` and regularization terms `reg`.
"""
isapplicable(::Type{T}, A, x, reg) where T <: AbstractLinearSolver = isapplicable(T, A, x) && isapplicable(T, reg)
"""
applicable(args...)
list all `solvers` that are applicable to the given arguments. Arguments are the same as for `isapplicable` without the `solver` type.
See also [`isapplicable`](@ref), [`linearSolverList`](@ref).
"""
applicableSolverList(args...) = filter(solver -> isapplicable(solver, args...), linearSolverListReal())
function filterKwargs(T::Type, kwargWarning, kwargs)
table = methods(T)
keywords = union(Base.kwarg_decl.(table)...)
filtered = filter(in(keywords), keys(kwargs))
if length(filtered) < length(kwargs) && kwargWarning
filteredout = filter(!in(keywords), keys(kwargs))
@warn "The following arguments were passed but filtered out: $(join(filteredout, ", ")). Please watch closely if this introduces unexpexted behaviour in your code."
end
return [key=>kwargs[key] for key in filtered]
end
"""
createLinearSolver(solver::AbstractLinearSolver, A; kargs...)
This method creates a solver. The supported solvers are methods typically used for solving
regularized linear systems. All solvers return an approximate solution to Ax = b.
TODO: give a hint what solvers are available
"""
function createLinearSolver(solver::Type{T}, A; kwargWarning::Bool = true, kwargs...) where {T<:AbstractLinearSolver}
return solver(A; filterKwargs(T,kwargWarning,kwargs)...)
end
function createLinearSolver(solver::Type{T}; AHA, kwargWarning::Bool = true, kwargs...) where {T<:AbstractLinearSolver}
return solver(; filterKwargs(T,kwargWarning,kwargs)..., AHA = AHA)
end
end | RegularizedLeastSquares | https://github.com/JuliaImageRecon/RegularizedLeastSquares.jl.git |
|
[
"MIT"
] | 0.16.5 | 7c4d085f436b699e746e19173a3c640a79ceec8a | code | 11907 | export SplitBregman
mutable struct SplitBregman{matT,opT,R,ropT,P,preconT} <: AbstractPrimalDualSolver
# operators and regularization
A::matT
reg::Vector{R}
regTrafo::Vector{ropT}
proj::Vector{P}
# fields and operators for x update
AHA::opT
# other parameters
precon::preconT
normalizeReg::AbstractRegularizationNormalization
verbose::Bool
iterations::Int64
iterationsInner::Int64
iterationsCG::Int64
state::AbstractSolverState{<:SplitBregman}
end
mutable struct SplitBregmanState{rT <: Real, rvecT <: AbstractVector{rT}, vecT <: Union{AbstractVector{rT}, AbstractVector{Complex{rT}}}} <: AbstractSolverState{SplitBregman}
y::vecT
# fields and operators for x update
β::vecT
β_y::vecT
# fields for primal & dual variables
x::vecT
z::Vector{vecT}
zᵒˡᵈ::Vector{vecT}
u::Vector{vecT}
# other paremters
ρ::rvecT
iteration::Int64
iter_cnt::Int64
# state variables for CG
cgStateVars::CGStateVariables
# convergence parameters
rᵏ::rvecT
sᵏ::rvecT
ɛᵖʳⁱ::rvecT
ɛᵈᵘᵃ::rvecT
σᵃᵇˢ::rT
absTol::rT
relTol::rT
tolInner::rT
end
"""
SplitBregman(A; AHA = A'*A, precon = Identity(), reg = L1Regularization(zero(real(eltype(AHA)))), regTrafo = opEye(eltype(AHA), size(AHA,1)), normalizeReg = NoNormalization(), rho = 1e-1, iterations = 10, iterationsInner = 10, iterationsCG = 10, absTol = eps(real(eltype(AHA))), relTol = eps(real(eltype(AHA))), tolInner = 1e-5, verbose = false)
SplitBregman( ; AHA = , precon = Identity(), reg = L1Regularization(zero(real(eltype(AHA)))), regTrafo = opEye(eltype(AHA), size(AHA,1)), normalizeReg = NoNormalization(), rho = 1e-1, iterations = 10, iterationsInner = 10, iterationsCG = 10, absTol = eps(real(eltype(AHA))), relTol = eps(real(eltype(AHA))), tolInner = 1e-5, verbose = false)
Creates a `SplitBregman` object for the forward operator `A` or normal operator `AHA`.
# Required Arguments
* `A` - forward operator
OR
* `AHA` - normal operator (as a keyword argument)
# Optional Keyword Arguments
* `AHA` - normal operator is optional if `A` is supplied
* `precon` - preconditionner for the internal CG algorithm
* `reg::AbstractParameterizedRegularization` - regularization term; can also be a vector of regularization terms
* `regTrafo` - transformation to a space in which `reg` is applied; if `reg` is a vector, `regTrafo` has to be a vector of the same length. Use `opEye(eltype(AHA), size(AHA,1))` if no transformation is desired.
* `normalizeReg::AbstractRegularizationNormalization` - regularization normalization scheme; options are `NoNormalization()`, `MeasurementBasedNormalization()`, `SystemMatrixBasedNormalization()`
* `rho::Real` - weights for condition on regularized variables; can also be a vector for multiple regularization terms
* `iterations::Int` - maximum number of outer iterations. Set to 1 for unconstraint split Bregman (equivalent to ADMM)
* `iterationsInner::Int` - maximum number of inner iterations
* `iterationsCG::Int` - maximum number of (inner) CG iterations
* `absTol::Real` - absolute tolerance for stopping criterion
* `relTol::Real` - relative tolerance for stopping criterion
* `tolInner::Real` - relative tolerance for CG stopping criterion
* `verbose::Bool` - print residual in each iteration
This algorithm solves the constraint problem (Eq. (4.7) in [Tom Goldstein and Stanley Osher](https://doi.org/10.1137/080725891)), i.e. `||R(x)||₁` such that `||Ax -b||₂² < σ²`. In order to solve the unconstraint problem (Eq. (4.8) in [Tom Goldstein and Stanley Osher](https://doi.org/10.1137/080725891)), i.e. `||Ax -b||₂² + λ ||R(x)||₁`, you can either set `iterations=1` or use ADMM instead, which is equivalent (`iterations=1` in SplitBregman in implied in ADMM and the SplitBregman variable `iterationsInner` is simply called `iterations` in ADMM)
Like ADMM, SplitBregman differs from ISTA-type algorithms in the sense that the proximal operation is applied separately from the transformation to the space in which the penalty is applied. This is reflected by the interface which has `reg` and `regTrafo` as separate arguments. E.g., for a TV penalty, you should NOT set `reg=TVRegularization`, but instead use `reg=L1Regularization(λ), regTrafo=RegularizedLeastSquares.GradientOp(Float64; shape=(Nx,Ny,Nz))`.
See also [`createLinearSolver`](@ref), [`solve!`](@ref).
"""
SplitBregman(; AHA, kwargs...) = SplitBregman(nothing; kwargs..., AHA = AHA)
function SplitBregman(A
; AHA = A'*A
, precon = Identity()
, reg = L1Regularization(zero(real(eltype(AHA))))
, regTrafo = opEye(eltype(AHA), size(AHA,1), S = LinearOperators.storage_type(AHA))
, normalizeReg::AbstractRegularizationNormalization = NoNormalization()
, rho = 1e-1
, iterations::Int = 10
, iterationsInner::Int = 10
, iterationsCG::Int = 10
, absTol::Real = eps(real(eltype(AHA)))
, relTol::Real = eps(real(eltype(AHA)))
, tolInner::Real = 1e-5
, verbose = false
)
T = eltype(AHA)
rT = real(T)
reg = isa(reg, AbstractVector) ? reg : [reg]
regTrafo = isa(regTrafo, AbstractVector) ? regTrafo : [regTrafo]
@assert length(reg) == length(regTrafo) "reg and regTrafo must have the same length"
indices = findsinks(AbstractProjectionRegularization, reg)
proj = [reg[i] for i in indices]
proj = identity.(proj)
deleteat!(reg, indices)
deleteat!(regTrafo, indices)
if typeof(rho) <: Number
rho = [rT.(rho) for _ ∈ eachindex(reg)]
else
rho = rT.(rho)
end
x = Vector{T}(undef, size(AHA,2))
y = similar(x)
β = similar(x)
β_y = similar(x)
# fields for primal & dual variables
z = [similar(x, size(regTrafo[i],1)) for i ∈ eachindex(reg)]
zᵒˡᵈ = [similar(z[i]) for i ∈ eachindex(reg)]
u = [similar(z[i]) for i ∈ eachindex(reg)]
# statevariables for CG
# we store them here to prevent CG from allocating new fields at each call
cgStateVars = CGStateVariables(zero(x),similar(x),similar(x))
# convergence parameters
rᵏ = Array{rT}(undef, length(reg))
sᵏ = similar(rᵏ)
ɛᵖʳⁱ = similar(rᵏ)
ɛᵈᵘᵃ = similar(rᵏ)
# normalization parameters
reg = normalize(SplitBregman, normalizeReg, reg, A, nothing)
state = SplitBregmanState(y, β, β_y, x, z, zᵒˡᵈ, u, rho, 1, 1, cgStateVars,rᵏ,sᵏ,ɛᵖʳⁱ,ɛᵈᵘᵃ,rT(0),rT(absTol),rT(relTol),rT(tolInner))
return SplitBregman(A,reg,regTrafo,proj,AHA,precon,normalizeReg,verbose,iterations,iterationsInner,iterationsCG,state)
end
function init!(solver::SplitBregman, state::SplitBregmanState{rT, rvecT, vecT}, b::otherT; kwargs...) where {rT, rvecT, vecT, otherT <: AbstractVector}
y = similar(b, size(state.y)...)
x = similar(b, size(state.x)...)
β = similar(b, size(state.β)...)
β_y = similar(b, size(state.β_y)...)
z = [similar(b, size(state.z[i])...) for i ∈ eachindex(solver.reg)]
zᵒˡᵈ = [similar(b, size(state.zᵒˡᵈ[i])...) for i ∈ eachindex(solver.reg)]
u = [similar(b, size(state.u[i])...) for i ∈ eachindex(solver.reg)]
cgStateVars = CGStateVariables(zero(x),similar(x),similar(x))
state = SplitBregmanState(y, β, β_y, x, z, zᵒˡᵈ, u, state.ρ, state.iteration, state.iter_cnt, cgStateVars,
state.rᵏ, state.sᵏ, state.ɛᵖʳⁱ, state.ɛᵈᵘᵃ, state.σᵃᵇˢ, state.absTol, state.relTol, state.tolInner)
solver.state = state
init!(solver, state, b; kwargs...)
end
"""
init!(solver::SplitBregman, b; x0 = 0)
(re-) initializes the SplitBregman iterator
"""
function init!(solver::SplitBregman, state::SplitBregmanState{rT, rvecT, vecT}, b::vecT; x0 = 0) where {rT, rvecT, vecT <: AbstractVector}
state.x .= x0
# right hand side for the x-update
if solver.A === nothing
state.β_y .= b
else
mul!(state.β_y, adjoint(solver.A), b)
end
state.y .= state.β_y
# primal and dual variables
for i ∈ eachindex(solver.reg)
state.z[i] .= solver.regTrafo[i]*state.x
state.u[i] .= 0
end
# convergence parameter
state.rᵏ .= Inf
state.sᵏ .= Inf
state.ɛᵖʳⁱ .= 0
state.ɛᵈᵘᵃ .= 0
state.σᵃᵇˢ = sqrt(length(b)) * state.absTol
# normalization of regularization parameters
solver.reg = normalize(solver, solver.normalizeReg, solver.reg, solver.A, b)
# reset interation counter
state.iter_cnt = 1
state.iteration = 1
end
solverconvergence(state::SplitBregmanState) = (; :primal => state.rᵏ, :dual => state.sᵏ)
function iterate(solver::SplitBregman, state::SplitBregmanState)
if done(solver, state) return nothing end
solver.verbose && println("SplitBregman Iteration #$(state.iteration) – Outer iteration $(state.iter_cnt)")
# update x
state.β .= state.β_y
AHA = solver.AHA
for i ∈ eachindex(solver.reg)
mul!(state.β, adjoint(solver.regTrafo[i]), state.z[i], state.ρ[i], 1)
mul!(state.β, adjoint(solver.regTrafo[i]), state.u[i], -state.ρ[i], 1)
AHA += state.ρ[i] * adjoint(solver.regTrafo[i]) * solver.regTrafo[i]
end
solver.verbose && println("conjugated gradients: ")
cg!(state.x, AHA, state.β, Pl = solver.precon, maxiter = solver.iterationsCG, reltol = state.tolInner, statevars = state.cgStateVars, verbose = solver.verbose)
for proj in solver.proj
prox!(proj, state.x)
end
# proximal map for regularization terms
for i ∈ eachindex(solver.reg)
# swap z and zᵒˡᵈ w/o copying data
tmp = state.zᵒˡᵈ[i]
state.zᵒˡᵈ[i] = state.z[i]
state.z[i] = tmp
# 2. update z using the proximal map of 1/ρ*g(x)
mul!(state.z[i], solver.regTrafo[i], state.x)
state.z[i] .+= state.u[i]
if state.ρ[i] != 0
prox!(solver.reg[i], state.z[i], λ(solver.reg[i])/state.ρ[i]) # λ is divided by 2 to match the ISTA-type algorithms
end
# 3. update u
mul!(state.u[i], solver.regTrafo[i], state.x, 1, 1)
state.u[i] .-= state.z[i]
# update convergence criteria (one for each constraint)
state.rᵏ[i] = norm(solver.regTrafo[i] * state.x - state.z[i]) # primal residual (x-z)
state.sᵏ[i] = norm(state.ρ[i] * adjoint(solver.regTrafo[i]) * (state.z[i] .- state.zᵒˡᵈ[i])) # dual residual (concerning f(x))
state.ɛᵖʳⁱ[i] = max(norm(solver.regTrafo[i] * state.x), norm(state.z[i]))
state.ɛᵈᵘᵃ[i] = norm(state.ρ[i] * adjoint(solver.regTrafo[i]) * state.u[i])
if solver.verbose
println("rᵏ[$i]/ɛᵖʳⁱ[$i] = $(state.rᵏ[i]/state.ɛᵖʳⁱ[i])")
println("sᵏ[$i]/ɛᵈᵘᵃ[$i] = $(state.sᵏ[i]/state.ɛᵈᵘᵃ[i])")
flush(stdout)
end
end
if converged(solver, state) || state.iteration >= solver.iterationsInner
state.β_y .+= state.y
mul!(state.β_y, solver.AHA, state.x, -1, 1)
# reset z and b
for i ∈ eachindex(solver.reg)
mul!(state.z[i], solver.regTrafo[i], state.x)
state.u[i] .= 0
end
state.iter_cnt += 1
state.iteration = 0
end
state.iteration += 1
return state.x, state
end
function converged(solver::SplitBregman, state)
for i ∈ eachindex(solver.reg)
(state.rᵏ[i] >= state.σᵃᵇˢ + state.relTol * state.ɛᵖʳⁱ[i]) && return false
(state.sᵏ[i] >= state.σᵃᵇˢ + state.relTol * state.ɛᵈᵘᵃ[i]) && return false
end
return true
end
@inline done(solver::SplitBregman,state) = converged(solver, state) || (state.iteration == 1 && state.iter_cnt > solver.iterations) | RegularizedLeastSquares | https://github.com/JuliaImageRecon/RegularizedLeastSquares.jl.git |
|
[
"MIT"
] | 0.16.5 | 7c4d085f436b699e746e19173a3c640a79ceec8a | code | 1839 | abstract type AbstractTransform end
# has to implement
# transform(transform::AbstractTransform, x)
abstract type AbstractInveratableTransform <: AbstractTransform end
# has to implement
# inverse_transform(transform::AbstractInveratableTransform, x)
struct MinMaxTransform <: AbstractInveratableTransform
min
max
end
MinMaxTransform(x)= MinMaxTransform(minimum(x), maximum(x))
function transform(transform::MinMaxTransform, x)
return (x .- transform.min) ./ (transform.max .- transform.min)
end
function inverse_transform(transform::MinMaxTransform, x)
return x .* (transform.max .- transform.min) .+ transform.min
end
struct IdentityTransform <: AbstractInveratableTransform
end
IdentityTransform(x)= IdentityTransform()
function transform(::IdentityTransform, x)
return x
end
function inverse_transform(::IdentityTransform, x)
return x
end
struct ZTransform <: AbstractInveratableTransform
mean
std
end
ZTransform(x)= ZTransform(mean(x), std(x))
function transform(transform::ZTransform, x)
return (x .- transform.mean) ./ transform.std
end
function inverse_transform(transform::ZTransform, x)
return x .* transform.std .+ transform.mean
end
struct ClampedScalingTransform <: AbstractInveratableTransform
v_min
v_max
mask
x
end
function ClampedScalingTransform(x, v_min, v_max)
mask = (x .< v_min) .| (x .>= v_max)
return ClampedScalingTransform(v_min, v_max, mask, x)
end
function transform(transform::ClampedScalingTransform, x)
return (clamp.(x, transform.v_min, transform.v_max) .- transform.v_min) ./ (transform.v_max - transform.v_min)
end
function inverse_transform(transform::ClampedScalingTransform, x)
out = x .* (transform.v_max - transform.v_min) .+ transform.v_min
out[transform.mask] = transform.x[transform.mask]
return out
end | RegularizedLeastSquares | https://github.com/JuliaImageRecon/RegularizedLeastSquares.jl.git |
|
[
"MIT"
] | 0.16.5 | 7c4d085f436b699e746e19173a3c640a79ceec8a | code | 8547 | export rownorm², nrmsd
"""
This function computes the 2-norm² of a rows of S for dense matrices.
"""
function rownorm²(B::Transpose{T,S},row::Int64) where {T,S<:DenseMatrix}
A = B.parent
U = real(eltype(A))
res::U = BLAS.nrm2(size(A,1), pointer(A,(LinearIndices(size(A)))[1,row]), 1)^2
return res
end
function rownorm²(A::AbstractMatrix,row::Int64)
T = real(eltype(A))
res = zero(T)
@simd for n=1:size(A,2)
res += abs2(A[row,n])
end
return res
end
rownorm²(A::AbstractLinearOperator,row::Int64) = rownorm²(Matrix(A[row, :]), 1)
rownorm²(A::ProdOp{T, <:WeightingOp, matT}, row::Int64) where {T, matT} = A.A.weights[row]^2*rownorm²(A.B, row)
"""
This function computes the 2-norm² of a rows of S for sparse matrices.
"""
function rownorm²(B::Transpose{T,S},row::Int64) where {T,S<:SparseMatrixCSC}
A = B.parent
U = real(eltype(A))
res::U = BLAS.nrm2(A.colptr[row+1]-A.colptr[row], pointer(A.nzval,A.colptr[row]), 1)^2
return res
end
function rownorm²(A, rows)
res = zero(real(eltype(A)))
@simd for row in rows
res += rownorm²(A, row)
end
return res
end
### dot_with_matrix_row ###
# Fallback implementation
#=function dot_with_matrix_row_simd{T<:Complex}(A::AbstractMatrix{T}, x::Vector{T}, k::Int64)
res = zero(T)
@simd for n=1:size(A,2)
@inbounds res += conj(A[k,n])*x[n]
end
return res
end=#
"""
This funtion calculates ∑ᵢ Aᵢₖxᵢ for dense matrices.
"""
function dot_with_matrix_row(A::DenseMatrix{T}, x::Vector{T}, k::Int64) where {T<:Complex}
BLAS.dotu(length(x), pointer(A, (LinearIndices(size(A)))[k,1]), size(A,1), pointer(x,1), 1)
end
function dot_with_matrix_row(B::Transpose{T,S},
x::Vector{T}, k::Int64) where {T<:Complex,S<:DenseMatrix}
A = B.parent
BLAS.dotu(length(x), pointer(A,(LinearIndices(size(A)))[1,k]), 1, pointer(x,1), 1)
end
"""
This funtion calculates ∑ᵢ Aᵢₖxᵢ for dense matrices.
"""
function dot_with_matrix_row(A::DenseMatrix{T}, x::Vector{T}, k::Int64) where {T<:Real}
BLAS.dot(length(x), pointer(A,(LinearIndices(size(A)))[k,1]), size(A,1), pointer(x,1), 1)
end
"""
This funtion calculates ∑ᵢ Aᵢₖxᵢ for dense matrices.
"""
function dot_with_matrix_row(B::Transpose{T,S},
x::Vector{T}, k::Int64) where {T<:Real,S<:DenseMatrix}
A = B.parent
BLAS.dot(length(x), pointer(A,(LinearIndices(size(A)))[1,k]), 1, pointer(x,1), 1)
end
"""
This funtion calculates ∑ᵢ Aᵢₖxᵢ for sparse matrices.
"""
function dot_with_matrix_row(B::Transpose{T,S},
x::Vector{T}, k::Int64) where {T,S<:SparseMatrixCSC}
A = B.parent
tmp = zero(T)
N = A.colptr[k+1]-A.colptr[k]
for n=A.colptr[k]:N-1+A.colptr[k]
@inbounds tmp += A.nzval[n]*x[A.rowval[n]]
end
tmp
end
function dot_with_matrix_row(prod::ProdOp{T, <:WeightingOp, matT}, x::AbstractVector{T}, k) where {T, matT}
A = prod.B
return prod.A.weights[k]*dot_with_matrix_row(A, x, k)
end
### enfReal! / enfPos! ###
"""
This function enforces the constraint of a real solution.
"""
function enfReal!(x::AbstractArray{T}) where {T<:Complex}
#Returns x as complex vector with imaginary part set to zero
@simd for i in 1:length(x)
@inbounds (x[i] = complex(x[i].re))
end
end
"""
This function enforces the constraint of a real solution.
"""
enfReal!(x::AbstractArray{T}) where {T<:Real} = nothing
"""
This function enforces positivity constraints on its input.
"""
function enfPos!(x::AbstractArray{T}) where {T<:Complex}
#Return x as complex vector with negative parts projected onto 0
@simd for i in 1:length(x)
@inbounds (x[i].re < 0) && (x[i] = im*x[i].im)
end
end
"""
This function enforces positivity constraints on its input.
"""
function enfPos!(x::AbstractArray{T}) where {T<:Real}
#Return x as complex vector with negative parts projected onto 0
@simd for i in 1:length(x)
@inbounds (x[i] < 0) && (x[i] = zero(T))
end
end
function applyConstraints(x, sparseTrafo, enforceReal, enforcePositive, constraintMask=ones(Bool, length(x)) )
mask = (constraintMask != nothing) ? constraintMask : ones(Bool, length(x))
if sparseTrafo != nothing
x[:] = sparseTrafo * x
end
enforceReal && enfReal!(x, mask)
enforcePositive && enfPos!(x, mask)
if sparseTrafo != nothing
x[:] = adjoint(sparseTrafo)*x
end
end
### im2col / col2im ###
"""
This function rearranges distinct image blocks into columns of a matrix.
"""
function im2colDistinct(A::Array{T}, blocksize::NTuple{2,Int64}) where T
nrows = blocksize[1]
ncols = blocksize[2]
nelem = nrows*ncols
# padding for A such that patches can be formed
row_ext = mod(size(A,1),nrows)
col_ext = mod(size(A,2),nrows)
pad_row = (row_ext != 0)*(nrows-row_ext)
pad_col = (col_ext != 0)*(ncols-col_ext)
# rearrange matrix
A1 = zeros(T, size(A,1)+pad_row, size(A,2)+pad_col)
A1[1:size(A,1),1:size(A,2)] = A
t1 = reshape( A1,nrows, floor(Int,size(A1,1)/nrows), size(A,2) )
t2 = reshape( permutedims(t1,[1 3 2]), size(t1,1)*size(t1,3), size(t1,2) )
t3 = permutedims( reshape( t2, nelem, floor(Int,size(t2,1)/nelem), size(t2,2) ),[1 3 2] )
res = reshape(t3,nelem,size(t3,2)*size(t3,3))
return res
end
"""
This funtion rearrange columns of a matrix into blocks of an image.
"""
function col2imDistinct(A::Array{T}, blocksize::NTuple{2,Int64},
matsize::NTuple{2,Int64}) where T
# size(A) should not be larger then (blocksize[1]*blocksize[2], matsize[1]*matsize[2]).
# otherwise the bottom (right) lines (columns) will be cut.
# matsize should be divisble by blocksize.
if mod(matsize[1],blocksize[1]) != 0 || mod(matsize[2],blocksize[2]) != 0
error("matsize should be divisible by blocksize")
end
blockrows = blocksize[1]
blockcols = blocksize[2]
matrows = matsize[1]
matcols = matsize[2]
mblock = floor(Int,matrows/blockrows) # number of blocks per row
nblock = floor(Int,matcols/blockcols) # number of blocks per column
# padding for A such that patches can be formed and arranged into a matrix of
# adequate size
row_ext = blockrows*blockcols-size(A,1)
col_ext = mblock*nblock-size(A,2)
pad_row = (row_ext > 0 )*row_ext
pad_col = (col_ext > 0 )*col_ext
A1 = zeros(T, size(A,1)+pad_row, size(A,2)+pad_col)
A1[1:blockrows*blockcols, 1:mblock*nblock] = A[1:blockrows*blockcols, 1:mblock*nblock]
# rearrange matrix
t1 = reshape( A1, blockrows,blockcols,mblock*nblock )
t2 = reshape( permutedims(t1,[1 3 2]), matrows,nblock,blockcols )
res = reshape( permutedims(t2,[1 3 2]), matrows,matcols)
end
### NRMS ###
function nrmsd(I,Ireco)
N = length(I)
# This is a little trick. We usually are not interested in simple scalings
# and therefore "calibrate" them away
alpha = norm(Ireco)>0 ? (dot(vec(I),vec(Ireco))+dot(vec(Ireco),vec(I))) /
(2*dot(vec(Ireco),vec(Ireco))) : 1.0
I2 = Ireco.*alpha
RMS = 1.0/sqrt(N)*norm(vec(I)-vec(I2))
NRMS = RMS/(maximum(abs.(I))-minimum(abs.(I)) )
return NRMS
end
"""
power_iterations(AᴴA; rtol=1e-3, maxiter=30, verbose=false)
Power iterations to determine the maximum eigenvalue of a normal operator or square matrix.
For custom AᴴA which are not an abstract array or an `AbstractLinearOperator` one can pass a vector `b` of `size(AᴴA, 2)` to be used during the computation.
# Arguments
* `AᴴA` - operator or matrix; has to be square
* b - (optional), vector to be used during computation
# Keyword Arguments
* `rtol=1e-3` - relative tolerance; function terminates if the change of the max. eigenvalue is smaller than this values
* `maxiter=30` - maximum number of power iterations
* `verbose=false` - print maximum eigenvalue if `true`
# Output
maximum eigenvalue of the operator
"""
power_iterations(AᴴA::AbstractArray; kwargs...) = power_iterations(AᴴA, similar(AᴴA, size(AᴴA, 2)); kwargs...)
power_iterations(AᴴA::AbstractLinearOperator; kwargs...) = power_iterations(AᴴA, similar(LinearOperators.storage_type(AᴴA), size(AᴴA, 2)); kwargs...)
function power_iterations(AᴴA, b; rtol=1e-3, maxiter=30, verbose=false)
copyto!(b, randn(eltype(b), size(AᴴA, 2)))
bᵒˡᵈ = similar(b)
λ = Inf
for i = 1:maxiter
b ./= norm(b)
# swap b and bᵒˡᵈ (pointers only, no data is moved or allocated)
bᵗᵐᵖ = bᵒˡᵈ
bᵒˡᵈ = b
b = bᵗᵐᵖ
mul!(b, AᴴA, bᵒˡᵈ)
λᵒˡᵈ = λ
λ = abs(bᵒˡᵈ' * b) # λ is real-valued for Hermitian matrices
verbose && println("iter = $i; λ = $λ; abs(λ/λᵒˡᵈ - 1) = $(abs(λ/λᵒˡᵈ - 1)) <? $rtol")
abs(λ/λᵒˡᵈ - 1) < rtol && return λ
end
return λ
end | RegularizedLeastSquares | https://github.com/JuliaImageRecon/RegularizedLeastSquares.jl.git |
|
[
"MIT"
] | 0.16.5 | 7c4d085f436b699e746e19173a3c640a79ceec8a | code | 854 | @deprecate createLinearSolver(solver, A, x; kargs...) createLinearSolver(solver, A; kargs...)
function Base.vec(reg::AbstractRegularization)
Base.depwarn("vec(reg::AbstractRegularization) will be removed in a future release. Use `reg = isa(reg, AbstractVector) ? reg : [reg]` instead.", reg; force=true)
return AbstractRegularization[reg]
end
function Base.vec(reg::AbstractVector{AbstractRegularization})
Base.depwarn("vec(reg::AbstractRegularization) will be removed in a future release. Use reg = `isa(reg, AbstractVector) ? reg : [reg]` instead.", reg; force=true)
return reg
end
export ConstraintTransformedRegularization
function ConstraintTransformedRegularization(args...)
error("ConstraintTransformedRegularization has been removed. ADMM and SplitBregman now take the regularizer and the transform as separat inputs.")
end | RegularizedLeastSquares | https://github.com/JuliaImageRecon/RegularizedLeastSquares.jl.git |
|
[
"MIT"
] | 0.16.5 | 7c4d085f436b699e746e19173a3c640a79ceec8a | code | 1078 | export MaskedRegularization
"""
MaskedRegularization
Nested regularization term that only applies `prox!` and `norm` to elements of `x` for which the mask is `true`.
# Examples
```julia
julia> positive = PositiveRegularization();
julia> masked = MaskedRegularization(reg, [true, false, true, false]);
julia> prox!(masked, fill(-1, 4))
4-element Vector{Float64}:
0.0
-1.0
0.0
-1.0
```
"""
struct MaskedRegularization{S, R<:AbstractRegularization} <: AbstractNestedRegularization{S}
reg::R
mask::Vector{Bool}
MaskedRegularization(reg::R, mask) where R <: AbstractRegularization = new{R, R}(reg, mask)
MaskedRegularization(reg::R, mask) where {S, R<:AbstractNestedRegularization{S}} = new{S,R}(reg, mask)
end
innerreg(reg::MaskedRegularization) = reg.reg
function prox!(reg::MaskedRegularization, x::AbstractArray, args...)
z = view(x, findall(reg.mask))
prox!(reg.reg, z, args...)
return x
end
function norm(reg::MaskedRegularization, x::AbstractArray, args...)
z = view(x, findall(reg.mask))
result = norm(reg.reg, z, args...)
return result
end | RegularizedLeastSquares | https://github.com/JuliaImageRecon/RegularizedLeastSquares.jl.git |
|
[
"MIT"
] | 0.16.5 | 7c4d085f436b699e746e19173a3c640a79ceec8a | code | 1227 | export AbstractNestedRegularization
abstract type AbstractNestedRegularization{S} <: AbstractRegularization end
"""
innerreg(reg::AbstractNestedRegularization)
return the `inner` regularization term of `reg`. Nested regularization terms also implement the iteration interface.
"""
innerreg(::R) where R<:AbstractNestedRegularization = error("Nested regularization term $R must implement nested")
"""
sink(reg::AbstractNestedRegularization)
return the innermost regularization term.
"""
sink(reg::AbstractNestedRegularization{S}) where S = last(collect(reg))
"""
sinktype(reg::AbstractNestedRegularization)
return the type of the innermost regularization term.
See also [`sink`](@ref).
"""
sinktype(::AbstractNestedRegularization{S}) where S = S
λ(reg::AbstractNestedRegularization) = λ(innerreg(reg))
prox!(reg::AbstractNestedRegularization{S}, x) where S <: AbstractParameterizedRegularization = prox!(reg, x, λ(reg))
norm(reg::AbstractNestedRegularization{S}, x) where S <: AbstractParameterizedRegularization = norm(reg, x, λ(reg))
prox!(reg::AbstractNestedRegularization, x, args...) = prox!(innerreg(reg), x, args...)
norm(reg::AbstractNestedRegularization, x, args...) = norm(innerreg(reg), x, args...) | RegularizedLeastSquares | https://github.com/JuliaImageRecon/RegularizedLeastSquares.jl.git |
|
[
"MIT"
] | 0.16.5 | 7c4d085f436b699e746e19173a3c640a79ceec8a | code | 3729 | export AbstractRegularizationNormalization, NormalizedRegularization, NoNormalization, MeasurementBasedNormalization, SystemMatrixBasedNormalization
abstract type AbstractRegularizationNormalization end
"""
NoNormalization
No normalization to `λ` is applied.
"""
struct NoNormalization <: AbstractRegularizationNormalization end
"""
MeasurementBasedNormalization
`λ` is normalized by the 1-norm of `b` divided by its length.
"""
struct MeasurementBasedNormalization <: AbstractRegularizationNormalization end
"""
SystemMatrixBasedNormalization
`λ` is normalized by the energy of the system matrix rows.
"""
struct SystemMatrixBasedNormalization <: AbstractRegularizationNormalization end
# TODO weighted systemmatrix, maybe weighted measurementbased?
"""
NormalizedRegularization
Nested regularization term that scales `λ` according to normalization scheme. This term is commonly applied by a solver based on a given normalization keyword
#See also [`NoNormalization`](@ref), [`MeasurementBasedNormalization`](@ref), [`SystemMatrixBasedNormalization`](@ref).
"""
struct NormalizedRegularization{T, S, R} <: AbstractScaledRegularization{T, S}
reg::R
factor::T
NormalizedRegularization(reg::R, factor) where {T, R <: AbstractParameterizedRegularization{<:AbstractArray{T}}} = new{T, R, R}(reg, factor)
NormalizedRegularization(reg::R, factor) where {T, R <: AbstractParameterizedRegularization{T}} = new{T, R, R}(reg, factor)
NormalizedRegularization(reg::R, factor) where {T, RN <: AbstractParameterizedRegularization{T}, R<:AbstractNestedRegularization{RN}} = new{T, RN, R}(reg, factor)
end
innerreg(reg::NormalizedRegularization) = reg.reg
scalefactor(reg::NormalizedRegularization) = reg.factor
function normalize(::MeasurementBasedNormalization, A, b::AbstractArray)
return norm(b, 1)/length(b)
end
normalize(::MeasurementBasedNormalization, A, b::Nothing) = one(real(eltype(A)))
normalize(::SystemMatrixBasedNormalization, ::Nothing, _) = error("SystemMatrixBasedNormalization requires supplying A to the constructor of the solver")
function normalize(::SystemMatrixBasedNormalization, A, b)
M = size(A, 1)
N = size(A, 2)
energy = zeros(real(eltype(A)), M)
for m=1:M
energy[m] = sqrt(rownorm²(A,m))
end
trace = norm(energy)^2/N
return trace
end
normalize(::NoNormalization, A, b) = nothing
function normalize(norm::AbstractRegularizationNormalization, regs::Vector{R}, A, b) where {R<:AbstractRegularization}
factor = normalize(norm, A, b)
return map(x-> normalize(x, factor), regs)
end
function normalize(norm::AbstractRegularizationNormalization, reg::R, A, b) where {R<:AbstractRegularization}
factor = normalize(norm, A, b)
return normalize(reg, factor)
end
normalize(reg::R, ::Nothing) where {R<:AbstractRegularization} = reg
normalize(reg::AbstractProjectionRegularization, factor::Number) = reg
normalize(reg::NormalizedRegularization, factor::Number) = NormalizedRegularization(reg.reg, factor) # Update normalization
normalize(reg::AbstractParameterizedRegularization, factor::Number) = NormalizedRegularization(reg, factor)
function normalize(reg::AbstractRegularization, factor::Number)
if sink(reg) isa AbstractParameterizedRegularization
return NormalizedRegularization(reg, factor)
end
return reg
end
normalize(solver::AbstractLinearSolver, norm, regs, A, b) = normalize(typeof(solver), norm, regs, A, b)
normalize(solver::Type{T}, norm::AbstractRegularizationNormalization, regs, A, b) where T<:AbstractLinearSolver = normalize(norm, regs, A, b)
# System matrix based normalization is already done in constructor, can just return regs
normalize(solver::AbstractLinearSolver, norm::SystemMatrixBasedNormalization, regs, A, b) = regs
| RegularizedLeastSquares | https://github.com/JuliaImageRecon/RegularizedLeastSquares.jl.git |
|
[
"MIT"
] | 0.16.5 | 7c4d085f436b699e746e19173a3c640a79ceec8a | code | 1995 |
export PnPRegularization, PlugAndPlayRegularization
"""
PlugAndPlayRegularization
Regularization term implementing a given plug-and-play proximal mapping.
The actual regularization term is indirectly defined by the learned proximal mapping and as such there is no `norm` implemented.
# Arguments
* `λ` - regularization paramter
# Keywords
* `model` - model applied to the image
* `shape` - dimensions of the image
* `input_transform` - transform of image before `model`
"""
struct PlugAndPlayRegularization{T, M, I} <: AbstractParameterizedRegularization{T}
model::M
λ::T
shape::Vector{Int}
input_transform::I
ignoreIm::Bool
PlugAndPlayRegularization(λ::T; model::M, shape, input_transform::I=RegularizedLeastSquares.MinMaxTransform, ignoreIm = false, kargs...) where {T, M, I} = new{T, M, I}(model, λ, shape, input_transform, ignoreIm)
end
PlugAndPlayRegularization(model, shape; kwargs...) = PlugAndPlayRegularization(one(Float32); kwargs..., model = model, shape = shape)
function prox!(self::PlugAndPlayRegularization, x::AbstractArray{Complex{T}}, λ::T) where {T <: Real}
if self.ignoreIm
copyto!(x, prox!(self, real.(x), λ) + imag.(x) * one(T)im)
else
copyto!(x, prox!(self, real.(x), λ) + prox!(self, imag.(x), λ) * one(T)im)
end
return x
end
function prox!(self::PlugAndPlayRegularization, x::AbstractArray{T}, λ::T) where {T <: Real}
if λ != self.λ && (λ < 0.0 || λ > 1.0)
temp = clamp(λ, zero(T), one(T))
@warn "$(typeof(self)) was given λ with value $λ. Valid range is [0, 1]. λ changed to temp"
λ = temp
end
out = copy(x)
out = reshape(out, self.shape...)
tf = self.input_transform(out)
out = RegularizedLeastSquares.transform(tf, out)
out = out - λ * (out - self.model(out))
out = RegularizedLeastSquares.inverse_transform(tf, out)
copyto!(x, vec(out))
return x
end
PnPRegularization = PlugAndPlayRegularization | RegularizedLeastSquares | https://github.com/JuliaImageRecon/RegularizedLeastSquares.jl.git |
|
[
"MIT"
] | 0.16.5 | 7c4d085f436b699e746e19173a3c640a79ceec8a | code | 4198 | export AbstractRegularization, AbstractParameterizedRegularization, AbstractProjectionRegularization, prox!, sink, sinktype, λ, findsink, findsinks
abstract type AbstractRegularization end
innerreg(::AbstractRegularization) = nothing
iterate(reg::AbstractRegularization, state = reg) = isnothing(state) ? nothing : (state, innerreg(state))
Base.IteratorSize(::AbstractRegularization) = Base.SizeUnknown()
sink(reg::AbstractRegularization) = reg
sinktype(reg::AbstractRegularization) = typeof(sink(reg))
abstract type AbstractParameterizedRegularization{T} <: AbstractRegularization end
"""
prox!(reg::AbstractParameterizedRegularization, x)
perform the proximal mapping defined by `reg` on `x`. Uses the regularization parameter defined for `reg`.
"""
prox!(reg::AbstractParameterizedRegularization, x::AbstractArray) = prox!(reg, x, λ(reg))
"""
norm(reg::AbstractParameterizedRegularization, x)
returns the value of the `reg` regularization term on `x`. Uses the regularization parameter defined for `reg`.
"""
norm(reg::AbstractParameterizedRegularization, x::AbstractArray) = norm(reg, x, λ(reg))
"""
λ(reg::AbstractParameterizedRegularization)
return the regularization parameter `λ` of `reg`
"""
λ(reg::AbstractParameterizedRegularization) = reg.λ
# Conversion (https://github.com/JuliaLang/julia/issues/52978#issuecomment-1900698430)
prox!(reg::R, x::Union{AbstractArray{T}, AbstractArray{Complex{T}}}, λ::otherT) where {R<:AbstractParameterizedRegularization, T <: Real, otherT} = prox!(reg, x, convert(T, λ))
norm(reg::R, x::Union{AbstractArray{T}, AbstractArray{Complex{T}}}, λ::otherT) where {R<:AbstractParameterizedRegularization, T <: Real, otherT} = norm(reg, x, convert(T, λ))
"""
prox!(regType::Type{<:AbstractParameterizedRegularization}, x, λ; kwargs...)
construct a regularization term of type `regType` with given `λ` and `kwargs` and apply its `prox!` on `x`
"""
prox!(regType::Type{<:AbstractParameterizedRegularization}, x, λ; kwargs...) = prox!(regType(λ; kwargs...), x, λ)
"""
norm(regType::Type{<:AbstractParameterizedRegularization}, x, λ; kwargs...)
construct a regularization term of type `regType` with given `λ` and `kwargs` and apply its `norm` on `x`
"""
norm(regType::Type{<:AbstractParameterizedRegularization}, x, λ; kwargs...) = norm(regType(λ; kwargs...), x, λ)
abstract type AbstractProjectionRegularization <: AbstractRegularization end
λ(::AbstractProjectionRegularization) = nothing
"""
prox!(regType::Type{<:AbstractProjectionRegularization}, x; kwargs...)
construct a regularization term of type `regType` with given `kwargs` and apply its `prox!` on `x`
"""
prox!(regType::Type{<:AbstractProjectionRegularization}, x; kwargs...) = prox!(regType(;kwargs...), x)
"""
norm(regType::Type{<:AbstractProjectionRegularization}, x; kwargs...)
construct a regularization term of type `regType` with given `kwargs` and apply its `norm` on `x`
"""
norm(regType::Type{<:AbstractProjectionRegularization}, x; kwargs...) = norm(regType(;kwargs...), x)
include("NestedRegularization.jl")
include("ScaledRegularization.jl")
include("NormalizedRegularization.jl")
include("TransformedRegularization.jl")
include("MaskedRegularization.jl")
include("PlugAndPlayRegularization.jl")
function findfirst(::Type{S}, reg::AbstractRegularization) where S <: AbstractRegularization
regs = collect(reg)
idx = findfirst(x->x isa S, regs)
isnothing(idx) ? nothing : regs[idx]
end
function findsink(::Type{S}, reg::Vector{<:AbstractRegularization}) where S <: AbstractRegularization
all = findall(x -> sinktype(x) <: S, reg)
if isempty(all)
return nothing
elseif length(all) == 1
return first(all)
else
error("Cannot unambigiously retrieve reg term of type $S, found $(length(all)) instances")
end
end
findsinks(::Type{S}, reg::Vector{<:AbstractRegularization}) where S <: AbstractRegularization = findall(x -> sinktype(x) <: S, reg)
"""
RegularizationList()
Returns a list of all available Regularizations
"""
function RegularizationList()
return subtypes(RegularizationList) # TODO loop over abstract types and push! to list
end
norm0(x::Array{T}, λ::T; sparseTrafo=nothing, kargs...) where T = 0.0
| RegularizedLeastSquares | https://github.com/JuliaImageRecon/RegularizedLeastSquares.jl.git |
|
[
"MIT"
] | 0.16.5 | 7c4d085f436b699e746e19173a3c640a79ceec8a | code | 3574 | export AbstractScaledRegularization
"""
AbstractScaledRegularization
Nested regularization term that applies a `scalefactor` to the regularization parameter `λ` of its `inner` term.
See also [`scalefactor`](@ref), [`λ`](@ref), [`innerreg`](@ref).
"""
abstract type AbstractScaledRegularization{T, S<:AbstractParameterizedRegularization{<:Union{T, <:AbstractArray{T}}}} <: AbstractNestedRegularization{S} end
"""
scalescalefactor(reg::AbstractScaledRegularization)
return the scaling `scalefactor` for `λ`
"""
scalefactor(::R) where R <: AbstractScaledRegularization = error("Scaled regularization term $R must implement scalefactor")
"""
λ(reg::AbstractScaledRegularization)
return `λ` of `inner` regularization term scaled by `scalefactor(reg)`.
See also [`scalefactor`](@ref), [`innerreg`](@ref).
"""
λ(reg::AbstractScaledRegularization) = λ(innerreg(reg)) .* scalefactor(reg)
export FixedScaledRegularization
struct FixedScaledRegularization{T, S, R} <: AbstractScaledRegularization{T, S}
reg::R
factor::T
FixedScaledRegularization(reg::R, factor) where {T, R <: AbstractParameterizedRegularization{T}} = new{T, R, R}(reg, factor)
FixedScaledRegularization(reg::R, factor) where {T, RN <: AbstractParameterizedRegularization{T}, R<:AbstractNestedRegularization{RN}} = new{T, RN, R}(reg, factor)
end
innerreg(reg::FixedScaledRegularization) = reg.reg
scalefactor(reg::FixedScaledRegularization) = reg.factor
export FixedParameterRegularization
"""
FixedParameterRegularization
Nested regularization term that discards any `λ` passed to it and instead uses `λ` from its inner regularization term. This can be used to selectively disallow normalization.
"""
struct FixedParameterRegularization{T, S, R} <: AbstractScaledRegularization{T, S}
reg::R
FixedParameterRegularization(reg::R) where {T, R <: AbstractParameterizedRegularization{T}} = new{T, R, R}(reg)
FixedScaledRegularization(reg::R) where {T, RN <: AbstractParameterizedRegularization{T}, R<:AbstractNestedRegularization{RN}} = new{T, RN, R}(reg)
end
scalefactor(reg::FixedParameterRegularization) = 1.0
innerreg(reg::FixedParameterRegularization) = reg.reg
# Drop any incoming λ and subsitute inner
prox!(reg::FixedParameterRegularization, x, discard) = prox!(innerreg(reg), x, λ(innerreg(reg)))
norm(reg::FixedParameterRegularization, x, discard) = norm(innerreg(reg), x, λ(innerreg(reg)))
export AutoScaledRegularization
mutable struct AutoScaledRegularization{T, S, R} <: AbstractScaledRegularization{T, S}
reg::R
factor::Union{Nothing, T}
AutoScaledRegularization(reg::R) where {T, R <: AbstractParameterizedRegularization{T}} = new{T, R, R}(reg, nothing)
AutoScaledRegularization(reg::R) where {T, RN <: AbstractParameterizedRegularization{T}, R<:AbstractNestedRegularization{RN}} = new{T, RN, R}(reg, nothing)
end
initFactor!(reg::AutoScaledRegularization, x::AbstractArray) = reg.factor = maximum(abs.(x))
innerreg(reg::AutoScaledRegularization) = reg.reg
# A bit hacky: Factor can only be computed once x is seen, therefore hide factor in λ and silently add it in prox!/norm calls
scalefactor(reg::AutoScaledRegularization) = isnothing(reg.factor) ? 1.0 : reg.factor
function prox!(reg::AutoScaledRegularization, x, λ)
if isnothing(reg.factor)
initFactor!(reg, x)
return prox!(reg.reg, x, λ * reg.factor)
else
return prox!(reg.reg, x, λ)
end
end
function norm(reg::AutoScaledRegularization, x, λ)
if isnothing(reg.factor)
initFactor!(reg, x)
return norm(reg.reg, x, λ * reg.factor)
else
return norm(reg.reg, x, λ)
end
end | RegularizedLeastSquares | https://github.com/JuliaImageRecon/RegularizedLeastSquares.jl.git |
|
[
"MIT"
] | 0.16.5 | 7c4d085f436b699e746e19173a3c640a79ceec8a | code | 1235 | export TransformedRegularization
"""
TransformedRegularization(reg, trafo)
Nested regularization term that applies `prox!` or `norm` on `z = trafo * x` and returns (inplace) `x = adjoint(trafo) * z`.
# Example
```julia
julia> core = L1Regularization(0.8)
L1Regularization{Float64}(0.8)
julia> wop = WaveletOp(Float32, shape = (32,32));
julia> reg = TransformedRegularization(core, wop);
julia> prox!(reg, randn(32*32)); # Apply soft-thresholding in Wavelet domain
```
"""
struct TransformedRegularization{S, R<:AbstractRegularization, TR} <: AbstractNestedRegularization{S}
reg::R
trafo::TR
TransformedRegularization(reg::R, trafo::TR) where {R<:AbstractRegularization, TR} = new{R, R, TR}(reg, trafo)
TransformedRegularization(reg::R, trafo::TR) where {S, R<:AbstractNestedRegularization{S}, TR} = new{S,R, TR}(reg, trafo)
end
innerreg(reg::TransformedRegularization) = reg.reg
function prox!(reg::TransformedRegularization, x::AbstractArray, args...)
z = reg.trafo * x
result = prox!(reg.reg, z, args...)
copyto!(x, adjoint(reg.trafo) * result)
return x
end
function norm(reg::TransformedRegularization, x::AbstractArray, args...)
z = reg.trafo * x
result = norm(reg.reg, z, args...)
return result
end | RegularizedLeastSquares | https://github.com/JuliaImageRecon/RegularizedLeastSquares.jl.git |
|
[
"MIT"
] | 0.16.5 | 7c4d085f436b699e746e19173a3c640a79ceec8a | code | 829 | export L1Regularization
"""
L1Regularization
Regularization term implementing the proximal map for the Lasso problem.
"""
struct L1Regularization{T} <: AbstractParameterizedRegularization{T}
λ::T
L1Regularization(λ::T; kargs...) where T = new{T}(λ)
end
"""
prox!(reg::L1Regularization, x, λ)
performs soft-thresholding - i.e. proximal map for the Lasso problem.
"""
function prox!(::L1Regularization, x::Union{AbstractArray{T}, AbstractArray{Complex{T}}}, λ::T) where {T <: Real}
ε = eps(T)
x .= max.((abs.(x).-λ),0) .* (x.+ε)./(abs.(x).+ε)
return x
end
"""
norm(reg::L1Regularization, x, λ)
returns the value of the L1-regularization term.
"""
function norm(::L1Regularization, x::Union{AbstractArray{T}, AbstractArray{Complex{T}}}, λ::T) where {T <: Real}
l1Norm = λ*norm(x,1)
return l1Norm
end
| RegularizedLeastSquares | https://github.com/JuliaImageRecon/RegularizedLeastSquares.jl.git |
|
[
"MIT"
] | 0.16.5 | 7c4d085f436b699e746e19173a3c640a79ceec8a | code | 983 | export L2Regularization
"""
L2Regularization
Regularization term implementing the proximal map for Tikhonov regularization.
"""
struct L2Regularization{T} <: AbstractParameterizedRegularization{T}
λ::T
L2Regularization(λ::T; kargs...) where T = new{T}(λ)
end
"""
prox!(reg::L2Regularization, x, λ)
performs the proximal map for Tikhonov regularization.
"""
function prox!(::L2Regularization, x::Union{AbstractArray{T}, AbstractArray{Complex{T}}}, λ::T) where {T <: Real}
x[:] .*= 1. ./ (1. .+ 2. .*λ)#*x
return x
end
"""
norm(reg::L2Regularization, x, λ)
returns the value of the L2-regularization term
"""
norm(::L2Regularization, x::Union{AbstractArray{T}, AbstractArray{Complex{T}}}, λ::T) where {T <: Real} = λ*norm(x,2)^2
function norm(::L2Regularization, x::Union{AbstractArray{T}, AbstractArray{Complex{T}}}, λ::AbstractArray{T}) where {T <: Real}
res = zero(real(eltype(x)))
for i in eachindex(x)
res+= λ[i]*abs2(x[i])
end
return res
end | RegularizedLeastSquares | https://github.com/JuliaImageRecon/RegularizedLeastSquares.jl.git |
|
[
"MIT"
] | 0.16.5 | 7c4d085f436b699e746e19173a3c640a79ceec8a | code | 1396 | export L21Regularization
"""
L21Regularization
Regularization term implementing the proximal map for group-soft-thresholding.
# Arguments
* `λ` - regularization paramter
# Keywords
* `slices=1` - number of elements per group
"""
struct L21Regularization{T} <: AbstractParameterizedRegularization{T}
λ::T
slices::Int64
end
L21Regularization(λ; slices::Int64 = 1, kargs...) = L21Regularization(λ, slices)
"""
prox!(reg::L21Regularization, x, λ)
performs group-soft-thresholding for l1/l2-regularization.
"""
function prox!(reg::L21Regularization, x::Union{AbstractArray{T}, AbstractArray{Complex{T}}},λ::T) where {T <: Real}
return proxL21!(x, λ, reg.slices)
end
function proxL21!(x::Union{AbstractArray{T}, AbstractArray{Complex{T}}}, λ::T, slices::Int64) where T
sliceLength = div(length(x),slices)
groupNorm = [norm(x[i:sliceLength:end]) for i=1:sliceLength]
copyto!(x, [ x[i]*max( (groupNorm[mod1(i,sliceLength)]-λ)/groupNorm[mod1(i,sliceLength)],0 ) for i=1:length(x)])
return x
end
"""
norm(reg::L21Regularization, x, λ)
return the value of the L21-regularization term.
"""
function norm(reg::L21Regularization, x::Union{AbstractArray{T}, AbstractArray{Complex{T}}}, λ::T) where {T <: Real}
sliceLength = div(length(x),reg.slices)
groupNorm = [norm(x[i:sliceLength:end]) for i=1:sliceLength]
return λ*norm(groupNorm,1)
end
| RegularizedLeastSquares | https://github.com/JuliaImageRecon/RegularizedLeastSquares.jl.git |
|
[
"MIT"
] | 0.16.5 | 7c4d085f436b699e746e19173a3c640a79ceec8a | code | 6423 | export LLRRegularization
"""
LLRRegularization
Regularization term implementing the proximal map for locally low rank (LLR) regularization using singular-value-thresholding.
# Arguments
* `λ` - regularization paramter
# Keywords
* `shape::Tuple{Int}` - dimensions of the image
* `blockSize::Tuple{Int}=(2,2)` - size of patches to perform singular value thresholding on
* `randshift::Bool=true` - randomly shifts the patches to ensure translation invariance
* `fullyOverlapping::Bool=false` - choose between fully overlapping block or non-overlapping blocks
"""
struct LLRRegularization{T, N, TI} <: AbstractParameterizedRegularization{T} where {N, TI<:Integer}
λ::T
shape::NTuple{N,TI}
blockSize::NTuple{N,TI}
randshift::Bool
fullyOverlapping::Bool
L::Int64
end
LLRRegularization(λ; shape::NTuple{N,TI}, blockSize::NTuple{N,TI} = ntuple(_ -> 2, N), randshift::Bool = true, fullyOverlapping::Bool = false, L::Int64 = 1, kargs...) where {N,TI<:Integer} =
LLRRegularization(λ, shape, blockSize, randshift, fullyOverlapping, L)
"""
prox!(reg::LLRRegularization, x, λ)
performs the proximal map for LLR regularization using singular-value-thresholding
"""
function prox!(reg::LLRRegularization, x::Union{AbstractArray{T}, AbstractArray{Complex{T}}}, λ::T) where {T <: Real}
reg.fullyOverlapping ? proxLLROverlapping!(reg, x, λ) : proxLLRNonOverlapping!(reg, x, λ)
end
"""
proxLLRNonOverlapping!(reg::LLRRegularization, x, λ)
performs the proximal map for LLR regularization using singular-value-thresholding on non-overlapping blocks
"""
function proxLLRNonOverlapping!(reg::LLRRegularization{TR, N, TI}, x::Union{AbstractArray{T}, AbstractArray{Complex{T}}}, λ::T) where {TR, N, TI, T}
shape = reg.shape
blockSize = reg.blockSize
randshift = reg.randshift
x = reshape(x, tuple(shape..., length(x) ÷ prod(shape)))
block_idx = CartesianIndices(blockSize)
K = size(x)[end]
if randshift
# Random.seed!(1234)
shift_idx = (Tuple(rand(block_idx))..., 0)
xs = circshift(x, shift_idx)
else
xs = x
end
ext = mod.(shape, blockSize)
pad = mod.(blockSize .- ext, blockSize)
if any(pad .!= 0)
xp = similar(x, eltype(x), (shape .+ pad)..., K)
fill!(xp, zero(eltype(x)))
xp[CartesianIndices(x)] .= xs
else
xp = xs
end
bthreads = BLAS.get_num_threads()
try
BLAS.set_num_threads(1)
blocks = CartesianIndices(StepRange.(TI(0), blockSize, shape .- 1))
xᴸᴸᴿ = [similar(x, prod(blockSize), K) for _ = 1:length(blocks)]
let xp = xp # Avoid boxing error
@floop for (id, i) ∈ enumerate(blocks)
@views xᴸᴸᴿ[id] .= reshape(xp[i.+block_idx, :], :, K)
ub = sqrt(norm(xᴸᴸᴿ[id]' * xᴸᴸᴿ[id], Inf)) #upper bound on singular values given by matrix infinity norm
if λ >= ub #save time by skipping the SVT as recommended by Ong/Lustig, IEEE 2016
xp[i.+block_idx, :] .= 0
else # threshold singular values
SVDec = svd!(xᴸᴸᴿ[id])
prox!(L1Regularization, SVDec.S, λ)
xp[i.+block_idx, :] .= reshape(SVDec.U * Diagonal(SVDec.S) * SVDec.Vt, blockSize..., :)
end
end
end
finally
BLAS.set_num_threads(bthreads)
end
if any(pad .!= 0)
xs .= xp[CartesianIndices(xs)]
end
if randshift
x .= circshift(xs, -1 .* shift_idx)
end
x = vec(x)
return x
end
"""
norm(reg::LLRRegularization, x, λ)
returns the value of the LLR-regularization term. The norm is only implemented for 2D, non-fully overlapping blocks.
"""
function norm(reg::LLRRegularization, x::Union{AbstractArray{T}, AbstractArray{Complex{T}}}, λ::T) where {T <: Real}
shape = reg.shape
blockSize = reg.blockSize
randshift = reg.randshift
L = reg.L
Nvoxel = prod(shape)
K = floor(Int, length(x) / (Nvoxel * L))
normᴸᴸᴿ = 0.0
for i = 1:L
normᴸᴸᴿ += blockNuclearNorm(
x[(i-1)*Nvoxel*K+1:i*Nvoxel*K],
shape;
blockSize = blockSize,
randshift = randshift,
)
end
return λ * normᴸᴸᴿ
end
function blockNuclearNorm(
x::Vector{T},
shape::NTuple{N,TI};
blockSize::NTuple{N,TI} = ntuple(_ -> 2, N),
randshift::Bool = true,
kargs...,
) where {N,T,TI<:Integer}
x = reshape(x, tuple(shape..., floor(Int64, length(x) / prod(shape))))
Wy = blockSize[1]
Wz = blockSize[2]
if randshift
srand(1234)
shift_idx = [rand(1:Wy) rand(1:Wz) 0]
x = circshift(x, shift_idx)
end
ny, nz, K = size(x)
# reshape into patches
L = floor(Int, ny * nz / Wy / Wz) # number of patches, assumes that image dimensions are divisble by the blocksizes
xᴸᴸᴿ = zeros(T, Wy * Wz, L, K)
for i = 1:K
xᴸᴸᴿ[:, :, i] = im2colDistinct(x[:, :, i], (Wy, Wz))
end
xᴸᴸᴿ = permutedims(xᴸᴸᴿ, [1 3 2])
# L1-norm of singular values
normᴸᴸᴿ = 0.0
for i = 1:L
SVDec = svd(xᴸᴸᴿ[:, :, i])
normᴸᴸᴿ += norm(SVDec.S, 1)
end
return normᴸᴸᴿ
end
"""
proxLLROverlapping!(reg::LLRRegularization, x, λ)
performs the proximal map for LLR regularization using singular-value-thresholding with fully overlapping blocks
"""
function proxLLROverlapping!(reg::LLRRegularization{TR, N, TI}, x::Union{AbstractArray{T}, AbstractArray{Complex{T}}}, λ::T) where {TR, N, TI, T}
shape = reg.shape
blockSize = reg.blockSize
x = reshape(x, tuple(shape..., length(x) ÷ prod(shape)))
block_idx = CartesianIndices(blockSize)
K = size(x)[end]
ext = mod.(shape, blockSize)
pad = mod.(blockSize .- ext, blockSize)
if any(pad .!= 0)
xp = zeros(eltype(x), (shape .+ pad)..., K)
xp[CartesianIndices(x)] .= x
else
xp = copy(x)
end
x .= 0 # from here on x is the output
bthreads = BLAS.get_num_threads()
try
BLAS.set_num_threads(1)
for is ∈ block_idx
shift_idx = (Tuple(is)..., 0)
xs = circshift(xp, shift_idx)
proxLLRNonOverlapping!(reg, xs, λ)
x .+= circshift(xs, -1 .* shift_idx)[CartesianIndices(x)]
end
finally
BLAS.set_num_threads(bthreads)
end
x ./= length(block_idx)
return vec(x)
end
| RegularizedLeastSquares | https://github.com/JuliaImageRecon/RegularizedLeastSquares.jl.git |
|
[
"MIT"
] | 0.16.5 | 7c4d085f436b699e746e19173a3c640a79ceec8a | code | 1175 | export NuclearRegularization
"""
NuclearRegularization
Regularization term implementing the proximal map for singular value soft-thresholding.
# Arguments:
* `λ` - regularization paramter
# Keywords
* `svtShape::NTuple` - size of the underlying matrix
"""
struct NuclearRegularization{T} <: AbstractParameterizedRegularization{T}
λ::T
svtShape::NTuple
end
NuclearRegularization(λ; svtShape::NTuple=[], kargs...) = NuclearRegularization(λ, svtShape)
"""
prox!(reg::NuclearRegularization, x, λ)
performs singular value soft-thresholding - i.e. the proximal map for the nuclear norm regularization.
"""
function prox!(reg::NuclearRegularization, x::Union{AbstractArray{T}, AbstractArray{Complex{T}}}, λ::T) where {T <: Real}
U,S,V = svd(reshape(x, reg.svtShape))
prox!(L1Regularization, S, λ)
copyto!(x, vec(U*Diagonal(S)*V'))
return x
end
"""
norm(reg::NuclearRegularization, x, λ)
returns the value of the nuclear norm regularization term.
"""
function norm(reg::NuclearRegularization, x::Union{AbstractArray{T}, AbstractArray{Complex{T}}}, λ::T) where {T <: Real}
U,S,V = svd( reshape(x, reg.svtShape) )
return λ*norm(S,1)
end
| RegularizedLeastSquares | https://github.com/JuliaImageRecon/RegularizedLeastSquares.jl.git |
|
[
"MIT"
] | 0.16.5 | 7c4d085f436b699e746e19173a3c640a79ceec8a | code | 721 | export PositiveRegularization
"""
PositiveRegularization
Regularization term implementing a projection onto positive and real numbers.
"""
struct PositiveRegularization <: AbstractProjectionRegularization
end
"""
proxPositive!(reg::PositiveRegularization, x) where T
enforce positivity and realness of solution `x`.
"""
function prox!(::PositiveRegularization, x::AbstractArray{T}) where T
enfReal!(x)
enfPos!(x)
return x
end
"""
norm(reg::PositiveRegularization, x)
returns the value of the characteristic function of real, positive numbers.
"""
function norm(reg::PositiveRegularization, x::AbstractArray{T}) where T
y = copy(x)
prox!(reg, y)
if y != x
return Inf
end
return 0
end
| RegularizedLeastSquares | https://github.com/JuliaImageRecon/RegularizedLeastSquares.jl.git |
|
[
"MIT"
] | 0.16.5 | 7c4d085f436b699e746e19173a3c640a79ceec8a | code | 574 | export ProjectionRegularization
struct ProjectionRegularization <: AbstractProjectionRegularization
projFunc::Function
end
ProjectionRegularization(; projFunc::Function=x->x, kargs...) = ProjectionRegularization(projFunc)
function prox!(reg::ProjectionRegularization, x::AbstractArray{Tc}) where {T, Tc <: Union{T, Complex{T}}}
copyto!(x, reg.projFunc(x))
return x
end
function norm(reg::ProjectionRegularization, x::AbstractArray{Tc}) where {T, Tc <: Union{T, Complex{T}}}
y = copy(x)
copyto!(y, prox!(reg, y))
if y != x
return Inf
end
return 0.
end
| RegularizedLeastSquares | https://github.com/JuliaImageRecon/RegularizedLeastSquares.jl.git |
|
[
"MIT"
] | 0.16.5 | 7c4d085f436b699e746e19173a3c640a79ceec8a | code | 635 | export RealRegularization
"""
RealRegularization
Regularization term implementing a projection onto real numbers.
"""
struct RealRegularization <: AbstractProjectionRegularization
end
"""
prox!(reg::RealRegularization, x, λ)
enforce realness of solution `x`.
"""
function prox!(::RealRegularization, x::AbstractArray{T}) where T
enfReal!(x)
return x
end
"""
norm(reg::RealRegularization, x)
returns the value of the characteristic function of real, Real numbers.
"""
function norm(reg::RealRegularization, x::AbstractArray{T}) where T
y = copy(x)
prox!(reg, y)
if y != x
return Inf
end
return 0
end
| RegularizedLeastSquares | https://github.com/JuliaImageRecon/RegularizedLeastSquares.jl.git |
|
[
"MIT"
] | 0.16.5 | 7c4d085f436b699e746e19173a3c640a79ceec8a | code | 5172 | export TVRegularization
mutable struct TVParams{Tc,vecTc <: AbstractVector{Tc}, matT}
pq::vecTc
rs::vecTc
pqOld::vecTc
xTmp::vecTc
∇::matT
end
"""
TVRegularization
Regularization term implementing the proximal map for TV regularization. Calculated with the Condat algorithm if the TV is calculated only along one real-valued dimension and with the Fast Gradient Projection algorithm otherwise.
Reference for the Condat algorithm:
https://lcondat.github.io/publis/Condat-fast_TV-SPL-2013.pdf
Reference for the FGP algorithm:
A. Beck and T. Teboulle, "Fast Gradient-Based Algorithms for Constrained
Total Variation Image Denoising
and Deblurring Problems", IEEE Trans. Image Process. 18(11), 2009
# Arguments
* `λ::T` - regularization parameter
# Keywords
* `shape::NTuple` - size of the underlying image
* `dims` - Dimension to perform the TV along. If `Integer`, the Condat algorithm is called, and the FDG algorithm otherwise.
* `iterationsTV=20` - number of FGP iterations
"""
mutable struct TVRegularization{T,N,TI} <: AbstractParameterizedRegularization{T} where {N,TI<:Integer}
λ::T
dims
shape::NTuple{N,TI}
iterationsTV::Int64
params::Union{TVParams, Nothing}
end
TVRegularization(λ; shape=(0,), dims=1:length(shape), iterationsTV=10, kargs...) = TVRegularization(λ, dims, shape, iterationsTV, nothing)
function TVParams(shape, T::Type=Float64; dims=1:length(shape))
return TVParams(Vector{T}(undef, prod(shape)); shape=shape, dims=dims)
end
function TVParams(x::AbstractVector{Tc}; shape, dims=1:length(shape)) where {Tc}
∇ = GradientOp(Tc; shape, dims, S = typeof(x))
# allocate storage
xTmp = similar(x)
pq = similar(x, size(∇, 1))
rs = similar(pq)
pqOld = similar(pq)
return TVParams(pq, rs, pqOld, xTmp, ∇)
end
"""
prox!(reg::TVRegularization, x, λ)
Proximal map for TV regularization. Calculated with the Condat algorithm if the TV is calculated only along one dimension and with the Fast Gradient Projection algorithm otherwise.
"""
prox!(reg::TVRegularization, x::Union{AbstractArray{T}, AbstractArray{Complex{T}}}, λ::T) where {T <: Real} = proxTV!(reg, x, λ, shape=reg.shape, dims=reg.dims, iterationsTV=reg.iterationsTV)
function proxTV!(reg, x, λ; shape, dims=1:length(shape), kwargs...) # use kwargs for shape and dims
return proxTV!(reg, x, λ, shape, dims; kwargs...) # define shape and dims w/o kwargs to enable multiple dispatch on dims
end
proxTV!(reg, x, shape, dims::Integer; kwargs...) = proxTV!(reg, x, shape, dims; kwargs...)
function proxTV!(x::AbstractVector{T}, λ::T, shape, dims::Integer; kwargs...) where {T<:Real}
x_ = reshape(x, shape)
i = CartesianIndices((ones(Int, dims - 1)..., 0:shape[dims]-1, ones(Int, length(shape) - dims)...))
Threads.@threads for j ∈ CartesianIndices((shape[1:dims-1]..., 1, shape[dims+1:end]...))
@views @inbounds tv_denoise_1d_condat!(x_[j.+i], shape[dims], λ)
end
return x
end
# Reuse TvParams if possible
function proxTV!(reg, x::AbstractVector{Tc}, λ::T, shape, dims; iterationsTV=10, kwargs...) where {T<:Real,Tc<:Union{T,Complex{T}}}
if isnothing(reg.params) || length(x) != length(reg.params.xTmp) || typeof(x) != typeof(reg.params.xTmp)
reg.params = TVParams(x; shape = shape, dims = dims)
end
return proxTV!(x, λ, reg.params; iterationsTV=iterationsTV)
end
function proxTV!(x::AbstractVector{Tc}, λ::T, p::TVParams{Tc}; iterationsTV=10, kwargs...) where {T<:Real,Tc<:Union{T,Complex{T}}}
@assert length(p.xTmp) == length(x)
@assert length(p.rs) == length(p.pq)
@assert length(p.rs) == length(p.pq)
# initialize dual variables
p.xTmp .= 0
p.pq .= 0
p.rs .= 0
p.pqOld .= 0
t = one(T)
for _ = 1:iterationsTV
pqTmp = p.pqOld
p.pqOld = p.pq
p.pq = p.rs
# gradient projection step for dual variables
tv_copy!(p.xTmp, x)
mul!(p.xTmp, transpose(p.∇), p.rs, -λ, 1) # xtmp = x-λ*transpose(∇)*rs
mul!(p.pq, p.∇, p.xTmp, 1 / (8λ), 1) # rs = ∇*xTmp/(8λ)
tv_restrictMagnitude!(p.pq)
# form linear combination of old and new estimates
tOld = t
t = (1 + sqrt(1 + 4 * tOld^2)) / 2
t2 = ((tOld - 1) / t)
t3 = 1 + t2
p.rs = pqTmp
tv_linearcomb!(p.rs, t3, p.pq, t2, p.pqOld)
end
mul!(x, transpose(p.∇), p.pq, -λ, one(Tc)) # x .-= λ*transpose(∇)*pq
return x
end
tv_copy!(dest, src) = copyto!(dest, src)
function tv_copy!(dest::Vector{T}, src::Vector{T}) where T
Threads.@threads for i ∈ eachindex(dest, src)
@inbounds dest[i] = src[i]
end
end
# restrict x to a number smaller then one
function tv_restrictMagnitude!(x)
Threads.@threads for i in eachindex(x)
@inbounds x[i] /= max(1, abs(x[i]))
end
end
function tv_linearcomb!(rs, t3, pq, t2, pqOld)
Threads.@threads for i ∈ eachindex(rs, pq, pqOld)
@inbounds rs[i] = t3 * pq[i] - t2 * pqOld[i]
end
end
"""
norm(reg::TVRegularization, x, λ)
returns the value of the TV-regularization term.
"""
function norm(reg::TVRegularization, x::Union{AbstractArray{T}, AbstractArray{Complex{T}}}, λ::T) where {T <: Real}
∇ = GradientOp(eltype(x); shape=reg.shape, dims=reg.dims)
return λ * norm(∇ * x, 1)
end
| RegularizedLeastSquares | https://github.com/JuliaImageRecon/RegularizedLeastSquares.jl.git |
|
[
"MIT"
] | 0.16.5 | 7c4d085f436b699e746e19173a3c640a79ceec8a | code | 5723 |
function proxTVCondat!(x::Vector{T}, λ::Float64; shape=[], kargs...) where T
x_ = reshape(x, shape...)
nhood = Int64[1 0 0;0 1 0; 0 0 1]
omega = T[1,1,1]
y = copy(x_)
y[:] .= 0.0
for d=1:length(omega)
y_ = copy(x_)
tv_denoise_3d_condat!(y_, nhood[d,:], λ*omega[d])
y .+= y_ ./ length(omega)
end
copyto!(x, y)
return y
end
mutable struct StartRange <: AbstractArray{CartesianIndices,3}
x::CartesianIndices
y::CartesianIndices
z::CartesianIndices
end
Base.size(R::StartRange) = (Int(3),)
Base.IndexStyle(::Type{StartRange}) = IndexLinear()
Base.getindex(R::StartRange,i::Int) = if i==1 R.x elseif i==2 R.y elseif i==3 R.z end;
"""
This function checks if the cartesian index exceeds size.
"""
function inrange(size::Tuple{Int64,Int64,Int64},range::CartesianIndex{3})
if range.I[1] > size[1] || range.I[1] < 1 || range.I[2] > size[2] ||
range.I[2] < 1 || range.I[3] > size[3] || range.I[3] < 1
return false
else
return true
end
end
"""
This function returns a StartRange variable, which contains the start planes for
the 1d tv extraction.
"""
function get_startrange(size::Tuple{Int64,Int64,Int64},step::Array{T,1}) where {T<:Real}
output = StartRange(
CartesianIndices(CartesianIndex((0,0,0))),
CartesianIndices(CartesianIndex((0,0,0))),
CartesianIndices(CartesianIndex((0,0,0)))
)
output.x = CartesianIndices((1:step[1],1:size[2],1:size[3]))
output.y = CartesianIndices((1:size[1],1:step[2],1:size[3]))
output.z = CartesianIndices((1:size[1],1:size[2],1:step[3]))
if step[1] < 0
output.x = CartesianIndices(((size[1]+step[1]):size[1],1:size[2],1:size[3]))
end
if step[2] < 0
output.y = CartesianIndices((1:size[1],(size[2]+step[2]):size[2],1:size[3]))
end
if step[3] < 0
output.z = CartesianIndices((1:size[1],1:size[2],(size[3]+step[3]):size[3]))
end
return output
end
"""
This function returns the one dimensional tv problem depending on a start pixel
neighbor and a direction increment.
"""
function tv_get_onedim_data!(tvData::Array{T,3}, tvOneDim::Array{T,1}, neighbor,
increment, arrayCount::Array{Int64,1}) where {T<:Real}
tvSize = size(tvData)
arrayCount[1] = 0
# While neighbor does not exceeds tvSize, add data to 1d problem
while true
if inrange(tvSize,neighbor)
arrayCount[1] = arrayCount[1]+1
@inbounds tvOneDim[arrayCount[1]] = tvData[neighbor]
neighbor = neighbor + increment
else
break
end
end
end
"""
This function sorts the 1d tv result back into the 3d data.
"""
function tv_push_onedim_data!(tvData::Array{T,3},tvOneDim::Array{T,1},
arrayCount::Int64,neighbor,increment) where {T<:Real}
for i=1:arrayCount
@inbounds tvData[neighbor] = tvOneDim[i]
neighbor = neighbor + increment
end
end
"""
This function extracts 1d problems from the 3d data and starts the 1d tv function.
"""
function tv_denoise_3d_condat!(tvData::Array{T,3}, nhood::Array{Int64,1}, lambda) where {T<:Real}
tvSize = size(tvData)
cartRange = get_startrange(tvSize,nhood[:])
increment = CartesianIndex((nhood[1],nhood[2],nhood[3]));
tvOneDim = Array{eltype(tvData)}(undef, Int64(ceil(sqrt(tvSize[1]*tvSize[2]*tvSize[3]))))
arrayCount = Array{Int64}(undef, 1)
for R in cartRange
for k in R
neighbor = k;
tv_get_onedim_data!(tvData,tvOneDim,neighbor,increment,arrayCount)
tv_denoise_1d_condat!(tvOneDim,arrayCount[1],lambda)
neighbor = k
tv_push_onedim_data!(tvData,tvOneDim,arrayCount[1],neighbor,increment)
end
end
end
"""
This function performs the 1d tv algorithm.
"""
function tv_denoise_1d_condat!(c, width, lambda)
cLength = width
k = 1
k0 = 1
umin = lambda
umax = -lambda
vmin = c[1] - lambda
vmax = c[1] + lambda
kplus = 1
kminus = 1
twolambda = 2*lambda
minlambda = -lambda
while true
while k == cLength
if umin < 0
while true
c[k0] = vmin
k0 = k0+1
!(k0<=kminus) && break
end
k=k0
kminus=k
vmin=c[kminus]
umin=lambda
umax = vmin + umin - vmax
elseif umax > 0
while true
c[k0] = vmax
k0 = k0+1
!(k0<=kplus) && break
end
k=k0
kplus=k
vmax=c[kplus]
umax=minlambda
umin = vmax + umax - vmin
else
vmin = vmin + umin/(k-k0+1)
while true
c[k0] = vmin
k0 = k0 + 1
!(k0<=k) && break
end
return
end
end
umin = umin + c[k+1] - vmin
if umin < minlambda
# Inplace soft thresholding
#vmin = vmin>mu ? vmin-mu : vmin<-mu ? vmin+mu : 0.0
while true
c[k0] = vmin
k0 = k0 + 1
!(k0<=kminus) && break
end
k=k0
kminus=k
kplus=kminus
vmin = c[kplus]
vmax = vmin + twolambda
umin = lambda
umax = minlambda
else
umax = umax + c[k+1] - vmax
if umax > lambda
# Inplace soft thresholding
#vmax = vmax>mu ? vmax-mu : vmax<-mu ? vmax+mu : 0.0;
while true
c[k0]=vmax
k0 = k0 + 1
!(k0<=kplus) && break
end
k = k0
kminus = k
kplus = kminus
vmax = c[kplus]
vmin = vmax - twolambda
umin = lambda
umax = minlambda
else
k = k + 1
if umin >= lambda
kminus = k
vmin = vmin + (umin-lambda)/(kminus-k0+1)
umin = lambda
end
if umax <= minlambda
kplus = k
vmax = vmax + (umax+lambda)/(kplus -k0 +1)
umax = minlambda
end
end
end
end
end
| RegularizedLeastSquares | https://github.com/JuliaImageRecon/RegularizedLeastSquares.jl.git |
|
[
"MIT"
] | 0.16.5 | 7c4d085f436b699e746e19173a3c640a79ceec8a | code | 257 | include("ProxL1.jl")
include("ProxL2.jl")
include("ProxL21.jl")
include("ProxLLR.jl")
# includes/ProxSLR.jl")
include("ProxPositive.jl")
include("ProxProj.jl")
include("ProxReal.jl")
include("ProxTV.jl")
include("ProxTVCondat.jl")
include("ProxNuclear.jl")
| RegularizedLeastSquares | https://github.com/JuliaImageRecon/RegularizedLeastSquares.jl.git |
|
[
"MIT"
] | 0.16.5 | 7c4d085f436b699e746e19173a3c640a79ceec8a | code | 500 | using RegularizedLeastSquares, LinearAlgebra, RegularizedLeastSquares.LinearOperatorCollection
# Packages for testing only
using Random, Test
using FFTW
using JLArrays
areTypesDefined = @isdefined arrayTypes
arrayTypes = areTypesDefined ? arrayTypes : [Array, JLArray]
@testset "RegularizedLeastSquares" begin
include("testCreation.jl")
include("testKaczmarz.jl")
include("testProxMaps.jl")
include("testSolvers.jl")
include("testRegularization.jl")
include("testMultiThreading.jl")
end | RegularizedLeastSquares | https://github.com/JuliaImageRecon/RegularizedLeastSquares.jl.git |
|
[
"MIT"
] | 0.16.5 | 7c4d085f436b699e746e19173a3c640a79ceec8a | code | 1544 | @testset "Test Callbacks" begin
A = rand(32, 32)
x = rand(32)
b = A * x
iterations = 10
solver = createLinearSolver(CGNR, A; iterations = iterations, relTol = 0.0)
@testset "Store Solution Callback" begin
cbk = StoreSolutionCallback()
x_approx = solve!(solver, b; callbacks = cbk)
@test length(cbk.solutions) == iterations + 1
@test cbk.solutions[end] == x_approx
end
@testset "Compare Solution Callback" begin
cbk = CompareSolutionCallback(x)
x_approx = solve!(solver, b; callbacks = cbk)
@test length(cbk.results) == iterations + 1
@test cbk.results[1] > cbk.results[end]
end
@testset "Store Solution Callback" begin
cbk = StoreConvergenceCallback()
x_approx = solve!(solver, b; callbacks = cbk)
@test length(first(values(cbk.convMeas))) == iterations + 1
conv = solverconvergence(solver)
@test cbk.convMeas[keys(conv)[1]][end] == conv[1]
end
@testset "Do-Syntax Callback" begin
counter = 0
solve!(solver, b) do solver, it
counter +=1
end
@test counter == iterations + 1
end
@testset "Multiple Callbacks" begin
callbacks = [StoreSolutionCallback(), StoreConvergenceCallback()]
x_approx = solve!(solver, b; callbacks)
cbk = callbacks[1]
@test length(cbk.solutions) == iterations + 1
@test cbk.solutions[end] == x_approx
cbk = callbacks[2]
@test length(first(values(cbk.convMeas))) == iterations + 1
conv = solverconvergence(solver)
@test cbk.convMeas[keys(conv)[1]][end] == conv[1]
end
end | RegularizedLeastSquares | https://github.com/JuliaImageRecon/RegularizedLeastSquares.jl.git |
|
[
"MIT"
] | 0.16.5 | 7c4d085f436b699e746e19173a3c640a79ceec8a | code | 196 | @testset "Creation of solvers" begin
@test_logs (:warn, Regex("The following arguments were passed but filtered out: testKwarg*")) createLinearSolver(Kaczmarz, zeros(42, 42), testKwarg=1337)
end | RegularizedLeastSquares | https://github.com/JuliaImageRecon/RegularizedLeastSquares.jl.git |
|
[
"MIT"
] | 0.16.5 | 7c4d085f436b699e746e19173a3c640a79ceec8a | code | 3790 | Random.seed!(12345)
@testset "Test Kaczmarz" begin
for arrayType in arrayTypes
@testset "$arrayType" begin
for T in [Float32, Float64, ComplexF32, ComplexF64]
@testset "test Kaczmarz update $T" begin
# set up
M = 127
N = 16
A = arrayType(rand(T, M, N))
Aᵀ = transpose(A)
b = arrayType(zeros(T, M))
β = rand(T)
k = rand(1:N)
# end set up
RegularizedLeastSquares.kaczmarz_update!(Aᵀ, b, k, β)
@test Array(b) ≈ β * conj(Array(A[:, k]))
# set up
M = 127
N = 16
A = arrayType(rand(T, N, M))
b = arrayType(zeros(T, M))
β = rand(T)
k = rand(1:N)
# end set up
RegularizedLeastSquares.kaczmarz_update!(A, b, k, β)
@test Array(b) ≈ β * conj(Array(A[k, :]))
end
end
# Test Tikhonov regularization matrix
@testset "Kaczmarz Tikhonov matrix" begin
A = rand(3, 2) + im * rand(3, 2)
x = rand(2) + im * rand(2)
b = A * x
regMatrix = rand(2) # Tikhonov matrix
solver = Kaczmarz
S = createLinearSolver(solver, arrayType(A), iterations=200, reg=[L2Regularization(arrayType(regMatrix))])
x_approx = Array(solve!(S, arrayType(b)))
#@info "Testing solver $solver ...: $x == $x_approx"
@test norm(x - x_approx) / norm(x) ≈ 0 atol = 0.1
## Test spatial regularization
M = 12
N = 8
A = rand(M, N) + im * rand(M, N)
x = rand(N) + im * rand(N)
b = A * x
# regularization
λ = rand(1)
regMatrix = rand(N)
# @show A, x, regMatrix
# use regularization matrix
S = createLinearSolver(solver, arrayType(A), iterations=100, reg=[L2Regularization(arrayType(regMatrix))])
x_matrix = Array(solve!(S, arrayType(b)))
# use standard reconstruction
S = createLinearSolver(solver, arrayType(A * Diagonal(1 ./ sqrt.(regMatrix))), iterations=100)
x_approx = Array(solve!(S, arrayType(b))) ./ sqrt.(regMatrix)
# test
#@info "Testing solver $solver ...: $x_matrix == $x_approx"
@test norm(x_approx - x_matrix) / norm(x_approx) ≈ 0 atol = 0.1
end
@testset "Kaczmarz Weighting Matrix" begin
M = 12
N = 8
A = rand(M, N) + im * rand(M, N)
x = rand(N) + im * rand(N)
b = A * x
w = WeightingOp(rand(M))
d = diagm(w.weights)
reg = L2Regularization(rand())
solver = Kaczmarz
S = createLinearSolver(solver, d * A, iterations=200, reg=reg)
S_weighted = createLinearSolver(solver, *(ProdOp, w, A), iterations=200, reg=reg)
x_approx = solve!(S, d * b)
x_weighted = solve!(S_weighted, d * b)
#@info "Testing solver $solver ...: $x == $x_approx"
@test isapprox(x_approx, x_weighted)
end
# Test Kaczmarz parameters
@testset "Kaczmarz parameters" begin
M = 12
N = 8
A = rand(M, N) + im * rand(M, N)
x = rand(N) + im * rand(N)
b = A * x
solver = Kaczmarz
S = createLinearSolver(solver, arrayType(A), iterations=200)
x_approx = Array(solve!(S, arrayType(b)))
@test norm(x - x_approx) / norm(x) ≈ 0 atol = 0.1
S = createLinearSolver(solver, arrayType(A), iterations=200, shuffleRows=true)
x_approx = Array(solve!(S, arrayType(b)))
@test norm(x - x_approx) / norm(x) ≈ 0 atol = 0.1
S = createLinearSolver(solver, arrayType(A), iterations=2000, randomized=true)
x_approx = Array(solve!(S, arrayType(b)))
@test norm(x - x_approx) / norm(x) ≈ 0 atol = 0.1
end
end
end
end
| RegularizedLeastSquares | https://github.com/JuliaImageRecon/RegularizedLeastSquares.jl.git |
|
[
"MIT"
] | 0.16.5 | 7c4d085f436b699e746e19173a3c640a79ceec8a | code | 1015 | function testMultiThreadingSolver(; arrayType = Array, scheduler = MultiDataState)
A = rand(ComplexF32, 3, 2)
x = rand(ComplexF32, 2, 4)
b = A * x
solvers = linearSolverList()
@testset "$(solvers[i])" for i = 1:length(solvers)
S = createLinearSolver(solvers[i], arrayType(A), iterations = 100)
x_sequential = hcat([Array(solve!(S, arrayType(b[:, j]))) for j = 1:size(b, 2)]...)
@test x_sequential ≈ x rtol = 0.1
x_approx = Array(solve!(S, arrayType(b), scheduler=scheduler))
@test x_approx ≈ x rtol = 0.1
# Does sequential/normal reco still works after multi-threading
x_vec = Array(solve!(S, arrayType(b[:, 1])))
@test x_vec ≈ x[:, 1] rtol = 0.1
end
end
@testset "Test MultiThreading Support" begin
for arrayType in arrayTypes
@testset "$arrayType" begin
for scheduler in [SequentialState, MultiThreadingState]
@testset "$scheduler" begin
testMultiThreadingSolver(; arrayType, scheduler)
end
end
end
end
end | RegularizedLeastSquares | https://github.com/JuliaImageRecon/RegularizedLeastSquares.jl.git |
|
[
"MIT"
] | 0.16.5 | 7c4d085f436b699e746e19173a3c640a79ceec8a | code | 12169 | # check Thikonov proximal map
function testL2Prox(N=256; numPeaks=5, λ=0.01, arrayType = Array)
@info "test L2-regularization"
Random.seed!(1234)
x = zeros(N)
for i=1:numPeaks
x[rand(1:N)] = rand()
end
# x_l2 = 1. / (1. + 2. *λ)*x
x_l2 = copy(x)
x_l2 = Array(prox!(L2Regularization, arrayType(x_l2), λ))
@test norm(x_l2 - 1.0/(1.0+2.0*λ)*x) / norm(1.0/(1.0+2.0*λ)*x) ≈ 0 atol=0.001
# check decrease of objective function
@test 0.5*norm(x-x_l2)^2 + norm(L2Regularization, x_l2, λ) <= norm(L2Regularization, x, λ)
end
# denoise a signal consisting of a number of delta peaks
function testL1Prox(N=256; numPeaks=5, σ=0.03, arrayType = Array)
@info "test L1-regularization"
Random.seed!(1234)
x = zeros(N)
for i=1:numPeaks
x[rand(1:N)] = (1-2*σ)*rand()+2*σ
end
σ = sum(abs.(x))/length(x)*σ
xNoisy = x .+ σ/sqrt(2.0)*(randn(N)+1im*randn(N))
x_l1 = copy(xNoisy)
x_l1 = Array(prox!(L1Regularization, arrayType(x_l1), 2*σ))
# solution should be better then without denoising
@info "rel. L1 error : $(norm(x - x_l1)/ norm(x)) vs $(norm(x - xNoisy)/ norm(x))"
@test norm(x - x_l1) <= norm(x - xNoisy)
@test norm(x - x_l1) / norm(x) ≈ 0 atol=0.1
# check decrease of objective function
@test 0.5*norm(xNoisy-x_l1)^2+norm(L1Regularization, x_l1, 2*σ) <= norm(L1Regularization, xNoisy, 2*σ)
end
# denoise a signal consisting of multiple slices with delta peaks at the same locations
# only the last slices are noisy.
# Thus, the first slices serve as a reference to enhance denoising
function testL21Prox(N=256; numPeaks=5, numSlices=8, noisySlices=2, σ=0.05, arrayType = Array)
@info "test L21-regularization"
Random.seed!(1234)
x = zeros(ComplexF64,N,numSlices)
for i=1:numPeaks
x[rand(1:N),:] = (1-2*σ)*rand(numSlices) .+ 2*σ
end
x = vec(x)
xNoisy = copy(x)
noise = randn(N*noisySlices)
σ = sum(abs.(x))/length(x)*σ
xNoisy[(numSlices-noisySlices)*N+1:end] .+= σ/sqrt(2.0)*(randn(N*noisySlices)+1im*randn(N*noisySlices)) #noise
x_l1 = copy(xNoisy)
prox!(L1Regularization, x_l1, 2*σ)
x_l21 = copy(xNoisy)
x_l21 = Array(prox!(L21Regularization, arrayType(x_l21), 2*σ,slices=numSlices))
# solution should be better then without denoising and with l1-denoising
@info "rel. L21 error : $(norm(x - x_l21)/ norm(x)) vs $(norm(x - xNoisy)/ norm(x))"
@test norm(x - x_l21) <= norm(x - xNoisy)
@test norm(x - x_l21) <= norm(x - x_l1)
@test norm(x - x_l21) / norm(x) ≈ 0 atol=0.05
# check decrease of objective function
@test 0.5*norm(xNoisy-x_l21)^2+norm(L21Regularization, x_l21,2*σ,slices=numSlices) <= norm(L21Regularization, xNoisy,2*σ,slices=numSlices)
@test 0.5*norm(xNoisy-x_l21)^2+norm(L21Regularization, x_l21,2*σ,slices=numSlices) <= 0.5*norm(xNoisy-x_l1)^2+norm(L21Regularization, x_l1,2*σ,slices=numSlices)
end
# denoise a piece-wise constant signal using TV regularization
function testTVprox(N=256; numEdges=5, σ=0.05, arrayType = Array)
@info "test TV-regularization"
Random.seed!(1234)
x = zeros(ComplexF64,N,N)
for i=1:numEdges
idx1 = rand(0:N-1)
idx2 = rand(0:N-1)
x[idx1+1:end, idx2+1:end] .+= randn()
end
x= vec(x)
xNoisy = copy(x)
σ = sum(abs.(x))/length(x)*σ
xNoisy[:] += σ/sqrt(2.0)*(randn(N*N)+1im*randn(N*N))
x_l1 = copy(xNoisy)
prox!(L1Regularization, x_l1, 2*σ)
x_tv = copy(xNoisy)
x_tv = Array(prox!(TVRegularization, arrayType(x_tv), 2*σ, shape=(N,N), dims=1:2))
@info "rel. TV error : $(norm(x - x_tv)/ norm(x)) vs $(norm(x - xNoisy)/ norm(x))"
@test norm(x - x_tv) <= norm(x - xNoisy)
@test norm(x - x_tv) <= norm(x - x_l1)
@test norm(x - x_tv) / norm(x) ≈ 0 atol=0.05
# check decrease of objective function
@test 0.5*norm(xNoisy-x_tv)^2+norm(TVRegularization, x_tv,2*σ,shape=(N,N)) <= norm(TVRegularization, xNoisy,2*σ,shape=(N,N))
@test 0.5*norm(xNoisy-x_tv)^2+norm(TVRegularization, x_tv,2*σ,shape=(N,N)) <= 0.5*norm(xNoisy-x_l1)^2+norm(TVRegularization, x_l1,2*σ,shape=(N,N))
end
# denoise a signal that is piecewise constant along a given direction
function testDirectionalTVprox(N=256; numEdges=5, σ=0.05, T=ComplexF64, arrayType = Array)
x = zeros(T,N,N)
for i=1:numEdges
idx = rand(0:N-1)
x[:,idx+1:end,:] .+= randn(T)
end
xNoisy = copy(x)
σ = sum(abs.(x))/length(x)*σ
xNoisy .+= (σ/sqrt(2)) .* randn(T, N, N)
x_tv = copy(xNoisy)
x_tv = Array(reshape(prox!(TVRegularization, arrayType(vec(x_tv)), 2*σ, shape=(N,N), dims=1), N, N))
x_tv2 = copy(xNoisy)
for i=1:N
x_tmp = x_tv2[:,i]
prox!(TVRegularization, x_tmp, 2*σ, shape=(N,), dims=1)
x_tv2[:,i] .= x_tmp
end
# directional TV and 1d TV should yield the same result
@test norm(x_tv-x_tv2) / norm(x) ≈ 0 atol=1e-8
# check decrease of error
@test norm(x - x_tv) <= norm(x-xNoisy)
## cf. Condat and gradient based algorithm
x_tv3 = copy(xNoisy)
x_tv3 = Array(reshape(prox!(TVRegularization, vec(x_tv3), 2*σ, shape=(N,N), dims=(1,)), N, N))
@test norm(x_tv-x_tv3) / norm(x) ≈ 0 atol=1e-2
end
# test enforcement of positivity constraint
function testPositive(N=256; arrayType = Array)
@info "test positivity-constraint"
Random.seed!(1234)
x = randn(N) .+ 1im*randn(N)
xPos = real.(x)
xPos[findall(x->x<0,xPos)] .= 0
xProj = copy(x)
xProj = Array(prox!(PositiveRegularization, arrayType(xProj)))
@test norm(xProj-xPos)/norm(xPos) ≈ 0 atol=1.e-4
# check decrease of objective function
@test 0.5*norm(x-xProj)^2+norm(PositiveRegularization, xProj) <= norm(PositiveRegularization, x)
end
# test enforcement of "realness"-constraint
function testProj(N=1012; arrayType = Array)
@info "test realness-constraint"
Random.seed!(1234)
x = randn(N) .+ 1im*randn(N)
xReal = real.(x)
xProj = copy(x)
xProj = Array(prox!(ProjectionRegularization, arrayType(xProj), projFunc=x->real(x)))
@test norm(xProj-xReal)/norm(xReal) ≈ 0 atol=1.e-4
# check decrease of objective function
@test 0.5*norm(x-xProj)^2+norm(ProjectionRegularization, xProj,projFunc=x->real(x)) <= norm(ProjectionRegularization, x,projFunc=x->real(x))
end
# test denoising of a low-rank matrix
function testNuclear(N=32,rank=2;σ=0.05, arrayType = Array)
@info "test nuclear norm regularization"
Random.seed!(1234)
x = zeros(ComplexF64,N,N);
for i=1:rank
x[:,i] = (0.3+0.7*randn())*cos.(2*pi/N*rand(1:div(N,4))*collect(1:N));
end
for i=rank+1:N
for j=1:rank
x[:,i] .+= rand()*x[:,j];
end
end
x = vec(x)
σ = sum(abs.(x))/length(x)*σ
xNoisy = copy(x)
xNoisy[:] += σ/sqrt(2.0)*(randn(N*N)+1im*randn(N*N))
x_lr = copy(xNoisy)
x_lr = Array(prox!(NuclearRegularization, arrayType(x_lr),5*σ,svtShape=(32,32)))
@test norm(x - x_lr) <= norm(x - xNoisy)
@test norm(x - x_lr) / norm(x) ≈ 0 atol=0.05
@info "rel. LR error : $(norm(x - x_lr)/ norm(x)) vs $(norm(x - xNoisy)/ norm(x))"
# check decrease of objective function
@test 0.5*norm(xNoisy-x_lr)^2+norm(NuclearRegularization, x_lr,5*σ,svtShape=(N,N)) <= norm(NuclearRegularization, xNoisy,5*σ,svtShape=(N,N))
end
function testLLR(shape=(32,32,80),blockSize=(4,4);σ=0.05, arrayType = Array)
@info "test LLR regularization"
Random.seed!(1234)
x = zeros(ComplexF64,shape);
for j=1:div(shape[2],blockSize[2])
for i=1:div(shape[1],blockSize[1])
ampl = rand()
r = rand()
for t=1:shape[3]
x[(i-1)*blockSize[1]+1:i*blockSize[1],(j-1)*blockSize[2]+1:j*blockSize[2],t] .= ampl*exp.(-r*t)
end
end
end
x = vec(x)
xNoisy = copy(x)
σ = sum(abs.(x))/length(x)*σ
xNoisy[:] += σ/sqrt(2.0)*(randn(prod(shape))+1im*randn(prod(shape)))
x_llr = copy(xNoisy)
x_llr = Array(prox!(LLRRegularization, arrayType(x_llr),10*σ,shape=shape[1:2],blockSize=blockSize,randshift=false))
@test norm(x - x_llr) <= norm(x - xNoisy)
@test norm(x - x_llr) / norm(x) ≈ 0 atol=0.05
@info "rel. LLR error : $(norm(x - x_llr)/ norm(x)) vs $(norm(x - xNoisy)/ norm(x))"
# check decrease of objective function
@test 0.5*norm(xNoisy-x_llr)^2+norm(LLRRegularization, x_llr,10*σ,shape=shape[1:2],blockSize=blockSize,randshift=false) <= norm(LLRRegularization, xNoisy,10*σ,shape=shape[1:2],blockSize=blockSize,randshift=false)
end
function testLLROverlapping(shape=(32,32,80),blockSize=(4,4);σ=0.05, arrayType = Array)
@info "test Overlapping LLR regularization"
Random.seed!(1234)
x = zeros(ComplexF64,shape);
for j=1:div(shape[2],blockSize[2])
for i=1:div(shape[1],blockSize[1])
ampl = rand()
r = rand()
for t=1:shape[3]
x[(i-1)*blockSize[1]+1:i*blockSize[1],(j-1)*blockSize[2]+1:j*blockSize[2],t] .= ampl*exp.(-r*t)
end
end
end
x = vec(x)
xNoisy = copy(x)
σ = sum(abs.(x))/length(x)*σ
xNoisy[:] += σ/sqrt(2.0)*(randn(prod(shape))+1im*randn(prod(shape)))
x_llr = copy(xNoisy)
prox!(LLRRegularization, x_llr,10*σ,shape=shape[1:2],blockSize=blockSize, fullyOverlapping = true)
@test norm(x - x_llr) <= norm(x - xNoisy)
@test norm(x - x_llr) / norm(x) ≈ 0 atol=0.05
@info "rel. LLR error : $(norm(x - x_llr)/ norm(x)) vs $(norm(x - xNoisy)/ norm(x))"
# check decrease of objective function
#@test 0.5*norm(xNoisy-x_llr)^2+normLLR(x_llr,10*σ,shape=shape[1:2],blockSize=blockSize,randshift=false) <= normLLR(xNoisy,10*σ,shape=shape[1:2],blockSize=blockSize,randshift=false)
end
function testLLR_3D(shape=(32,32,32,80),blockSize=(4,4,4);σ=0.05, arrayType = Array)
@info "test LLR 3D regularization"
Random.seed!(1234)
x = zeros(ComplexF64,shape)
for k=1:div(shape[3],blockSize[3])
for j=1:div(shape[2],blockSize[2])
for i=1:div(shape[1],blockSize[1])
ampl = rand()
r = rand()
for t=1:shape[4]
x[(i-1)*blockSize[1]+1:i*blockSize[1],(j-1)*blockSize[2]+1:j*blockSize[2],(k-1)*blockSize[3]+1:k*blockSize[3],t] .= ampl*exp.(-r*t)
end
end
end
end
x = vec(x)
xNoisy = copy(x)
σ = sum(abs.(x))/length(x)*σ
xNoisy[:] += σ/sqrt(2.0)*(randn(prod(shape))+1im*randn(prod(shape)))
x_llr = copy(xNoisy)
x_llr = Array(prox!(LLRRegularization, arrayType(x_llr),10*σ,shape=shape[1:end-1],blockSize=blockSize,randshift=false))
@test norm(x - x_llr) <= norm(x - xNoisy)
@test norm(x - x_llr) / norm(x) ≈ 0 atol=0.05
@info "rel. LLR 3D error : $(norm(x - x_llr)/ norm(x)) vs $(norm(x - xNoisy)/ norm(x))"
# check decrease of objective function # TODO: Implement norm as ND
# @test 0.5*norm(xNoisy-x_llr)^2+normLLR(x_llr,10*σ,shape=shape[1:3],blockSize=blockSize,randshift=false) <= normLLR(xNoisy,10*σ,shape=shape[1:3],blockSize=blockSize,randshift=false)
end
function testConversion()
for (xType, lambdaType) in [(Float32, Float64), (Float64, Float32), (Complex{Float32}, Float64), (Complex{Float64}, Float32)]
for prox in [L1Regularization, L21Regularization, L2Regularization, LLRRegularization, NuclearRegularization, TVRegularization]
@info "Test λ conversion for prox!($prox, $xType, $lambdaType)"
@test try prox!(prox, zeros(xType, 10), lambdaType(0.0); shape=(2, 5), svtShape=(2, 5))
true
catch e
false
end skip = in(prox, [LLRRegularization])
@test try norm(prox, zeros(xType, 10), lambdaType(0.0); shape=(2, 5), svtShape=(2, 5))
true
catch e
false
end skip = in(prox, [LLRRegularization])
end
end
end
@testset "Proximal Maps" begin
for arrayType in arrayTypes
@testset "$arrayType" begin
@testset "L2 Prox" testL2Prox(;arrayType)
@testset "L1 Prox" testL1Prox(;arrayType)
@testset "L21 Prox" testL21Prox(;arrayType)
@testset "TV Prox" testTVprox(;arrayType)
@testset "TV Prox Directional" testDirectionalTVprox(;arrayType)
@testset "Positive Prox" testPositive(;arrayType)
@testset "Projection Prox" testProj(;arrayType)
if !areTypesDefined # Don't run these tests on GPUs/buildkite, since svd can fail
@testset "Nuclear Prox" testNuclear(;arrayType)
@testset "LLR Prox: $arrayType" testLLR(;arrayType)
@testset "LLR Prox Overlapping: $arrayType" testLLROverlapping(;arrayType)
@testset "LLR Prox 3D: $arrayType" testLLR_3D(;arrayType)
end
end
end
@testset "Prox Lambda Conversion" testConversion()
end
| RegularizedLeastSquares | https://github.com/JuliaImageRecon/RegularizedLeastSquares.jl.git |
|
[
"MIT"
] | 0.16.5 | 7c4d085f436b699e746e19173a3c640a79ceec8a | code | 2797 |
@testset "PnP Constructor" begin
model(x) = x
# reduced constructor, checking defaults
pnp_reg = PnPRegularization(model, [2])
@test pnp_reg.λ == 1.0
@test pnp_reg.model == model
@test pnp_reg.shape == [2]
@test pnp_reg.input_transform == RegularizedLeastSquares.MinMaxTransform
@test pnp_reg.ignoreIm == false
# full constructor
pnp_reg = PnPRegularization(0.1; model=model, shape=[2], input_transform=x -> x, ignoreIm=true)
# full constructor defaults
pnp_reg = PnPRegularization(0.1; model=model, shape=[2])
@test pnp_reg.input_transform == RegularizedLeastSquares.MinMaxTransform
@test pnp_reg.ignoreIm == false
# unnecessary kwargs are ignored
pnp_reg = PnPRegularization(0.1; model=model, shape=[2], input_transform=x -> x, ignoreIm=true, sMtHeLsE=1)
end
@testset "PnP Compatibility" begin
supported_solvers = [Kaczmarz, ADMM]
A = rand(3, 2)
x = rand(2)
pnp_reg = PnPRegularization(x -> x, [2])
b = A * x
for solver in supported_solvers
@test try
S = createLinearSolver(solver, A, iterations=2; reg=[pnp_reg])
x_approx = solve!(S, b)
@info "PnP Regularization and $solver Compatibility"
true
catch ex
false
end
end
end
@testset "PnP Prox Real" begin
pnp_reg = PnPRegularization(0.1; model=x -> zeros(eltype(x), size(x)), shape=[2], input_transform=RegularizedLeastSquares.IdentityTransform)
out = prox!(pnp_reg, [1.0, 2.0], 0.1)
@info out
@test out == [0.9, 1.8]
end
@testset "PnP Prox Complex" begin
# ignoreIm = false
pnp_reg = PnPRegularization(
0.1; model=x -> zeros(eltype(x), size(x)), shape=[2],
input_transform=RegularizedLeastSquares.IdentityTransform
)
out = prox!(pnp_reg, [1.0 + 1.0im, 2.0 + 2.0im], 0.1)
@test real(out) == [0.9, 1.8]
@test imag(out) == [0.9, 1.8]
# ignoreIm = true
pnp_reg = PnPRegularization(
0.1; model=x -> zeros(eltype(x), size(x)), shape=[2],
input_transform=RegularizedLeastSquares.IdentityTransform,
ignoreIm=true
)
out = prox!(pnp_reg, [1.0 + 1.0im, 2.0 + 2.0im], 0.1)
@test real(out) == [0.9, 1.8]
@test imag(out) == [1.0, 2.0]
end
@testset "PnP Prox λ clipping" begin
pnp_reg = PnPRegularization(0.1; model=x -> zeros(eltype(x), size(x)), shape=[2], input_transform=RegularizedLeastSquares.IdentityTransform)
out = @test_warn "$(typeof(pnp_reg)) was given λ with value 1.5. Valid range is [0, 1]. λ changed to temp" prox!(pnp_reg, [1.0, 2.0], 1.5)
@test out == [0.0, 0.0]
out = @test_warn "$(typeof(pnp_reg)) was given λ with value -1.5. Valid range is [0, 1]. λ changed to temp" prox!(pnp_reg, [1.0, 2.0], -1.5)
@test out == [1.0, 2.0]
end | RegularizedLeastSquares | https://github.com/JuliaImageRecon/RegularizedLeastSquares.jl.git |
|
[
"MIT"
] | 0.16.5 | 7c4d085f436b699e746e19173a3c640a79ceec8a | code | 7954 | Random.seed!(12345)
function testRealLinearSolver(; arrayType = Array, elType = Float32)
A = rand(elType, 3, 2)
x = rand(elType, 2)
b = A * x
solvers = linearSolverListReal()
@testset for solver in solvers
@test try
S = createLinearSolver(solver, arrayType(A), iterations = 200)
x_approx = Array(solve!(S, arrayType(b)))
@info "Testing solver $solver: $x ≈ $x_approx"
@test x_approx ≈ x rtol = 0.1
true
catch e
@error e
false
end skip = arrayType != Array && solver <: AbstractDirectSolver #
end
end
function testComplexLinearSolver(; arrayType = Array, elType = Float32)
A = rand(elType, 3, 2) + im * rand(elType, 3, 2)
x = rand(elType, 2) + im * rand(elType, 2)
b = A * x
solvers = linearSolverList()
@testset for solver in solvers
@test try
S = createLinearSolver(solver, arrayType(A), iterations = 100)
x_approx = Array(solve!(S, arrayType(b)))
@info "Testing solver $solver: $x ≈ $x_approx"
@test x_approx ≈ x rtol = 0.1
true
catch e
@error e
false
end skip = arrayType != Array && solver <: AbstractDirectSolver
end
end
function testComplexLinearAHASolver(; arrayType = Array, elType = Float32)
A = rand(elType, 3, 2) + im * rand(elType, 3, 2)
x = rand(elType, 2) + im * rand(elType, 2)
AHA = A'*A
b = AHA * x
solvers = filter(s -> s ∉ [DirectSolver, PseudoInverse, Kaczmarz], linearSolverListReal())
@testset for solver in solvers
@test try
S = createLinearSolver(solver, nothing; AHA=arrayType(AHA), iterations = 100)
x_approx = Array(solve!(S, arrayType(b)))
@info "Testing solver $solver: $x ≈ $x_approx"
@test x_approx ≈ x rtol = 0.1
true
catch e
@error e
false
end
end
end
function testConvexLinearSolver(; arrayType = Array, elType = Float32)
# fully sampled operator, image and data
N = 256
numPeaks = 5
F = [1 / sqrt(N) * exp(-2im * π * j * k / N) for j = 0:N-1, k = 0:N-1]
x = zeros(N)
for i = 1:3
x[rand(1:N)] = rand()
end
b = 1 / sqrt(N) * fft(x)
# random undersampling
idx = sort(unique(rand(1:N, div(N, 2))))
b = arrayType(b[idx])
F = arrayType(F[idx, :])
for solver in [POGM, OptISTA, FISTA, ADMM]
reg = L1Regularization(elType(1e-3))
S = createLinearSolver(
solver,
F;
reg = reg,
iterations = 200,
normalizeReg = NoNormalization(),
)
x_approx = Array(solve!(S, b))
@info "Testing solver $solver w/o restart: relative error = $(norm(x - x_approx) / norm(x))"
@test x ≈ x_approx rtol = 0.1
#additionally test the gradient restarting scheme
if solver == POGM || solver == FISTA
S = createLinearSolver(
solver,
F;
reg = reg,
iterations = 200,
normalizeReg = NoNormalization(),
restart = :gradient,
)
x_approx = Array(solve!(S, b))
@info "Testing solver $solver w/ gradient restart: relative error = $(norm(x - x_approx) / norm(x))"
@test x ≈ x_approx rtol = 0.1
end
# test invariance to the maximum eigenvalue
reg = L1Regularization(elType(reg.λ * length(b) / norm(b, 1)))
scale_F = 1e3
S = createLinearSolver(
solver,
F .* scale_F;
reg = reg,
iterations = 200,
normalizeReg = MeasurementBasedNormalization(),
)
x_approx = Array(solve!(S, b))
x_approx .*= scale_F
@info "Testing solver $solver w/o restart and after re-scaling: relative error = $(norm(x - x_approx) / norm(x))"
@test x ≈ x_approx rtol = 0.1
end
# test ADMM with option vary_rho
solver = ADMM
reg = L1Regularization(elType(1.e-3))
S = createLinearSolver(
solver,
F;
reg = reg,
iterations = 200,
normalizeReg = NoNormalization(),
rho = 1e6,
vary_rho = :balance,
verbose = false,
)
x_approx = Array(solve!(S, b))
@info "Testing solver $solver: relative error = $(norm(x - x_approx) / norm(x))"
@test x ≈ x_approx rtol = 0.1
S = createLinearSolver(
solver,
F;
reg = reg,
iterations = 200,
normalizeReg = NoNormalization(),
rho = 1e-6,
vary_rho = :balance,
verbose = false,
)
x_approx = Array(solve!(S, b))
@info "Testing solver $solver: relative error = $(norm(x - x_approx) / norm(x))"
@test x ≈ x_approx rtol = 0.1
# the PnP scheme only increases rho, hence we only test it with a small initial rho
S = createLinearSolver(
solver,
F;
reg = reg,
iterations = 200,
normalizeReg = NoNormalization(),
rho = 1e-6,
vary_rho = :PnP,
verbose = false,
)
x_approx = Array(solve!(S, b))
@info "Testing solver $solver: relative error = $(norm(x - x_approx) / norm(x))"
@test x ≈ x_approx rtol = 0.1
##
solver = SplitBregman
reg = L1Regularization(elType(2e-3))
S = createLinearSolver(
solver,
F;
reg = reg,
iterations = 5,
iterationsInner = 40,
rho = 1.0,
normalizeReg = NoNormalization(),
)
x_approx = Array(solve!(S, b))
@info "Testing solver $solver: relative error = $(norm(x - x_approx) / norm(x))"
@test x ≈ x_approx rtol = 0.1
reg = L1Regularization(elType(reg.λ * length(b) / norm(b, 1)))
S = createLinearSolver(
solver,
F;
reg = reg,
iterations = 5,
iterationsInner = 40,
rho = 1.0,
normalizeReg = MeasurementBasedNormalization(),
)
x_approx = Array(solve!(S, b))
@info "Testing solver $solver: relative error = $(norm(x - x_approx) / norm(x))"
@test x ≈ x_approx rtol = 0.1
#=
solver = PrimalDualSolver
reg = [L1Regularization(1.e-4), TVRegularization(1.e-4, shape = (0,0))]
FR = [real.(F ./ norm(F)); imag.(F ./ norm(F))]
bR = [real.(b ./ norm(F)); imag.(b ./ norm(F))]
S = createLinearSolver(
solver,
FR;
reg = reg,
iterations = 1000,
)
x_approx = solve!(S, bR)
@info "Testing solver $solver: relative error = $(norm(x - x_approx) / norm(x))"
@test x ≈ x_approx rtol = 0.1
=#
end
function testVerboseSolvers(; arrayType = Array, elType = Float32)
A = rand(elType, 3, 2)
x = rand(elType, 2)
b = A * x
solvers = [ADMM, FISTA, POGM, OptISTA, SplitBregman]
for solver in solvers
@test try
S = createLinearSolver(solver, arrayType(A), iterations = 3, verbose = true)
solve!(S, arrayType(b))
true
catch e
@error e
false
end
end
end
@testset "Test Solvers" begin
for arrayType in arrayTypes
@testset "$arrayType" begin
for elType in [Float32, Float64]
@testset "Real Linear Solver: $elType" begin
testRealLinearSolver(; arrayType, elType)
end
@testset "Complex Linear Solver: $elType" begin
testComplexLinearSolver(; arrayType, elType)
end
@testset "Complex Linear Solver w/ AHA Interface: $elType" begin
testComplexLinearAHASolver(; arrayType, elType)
end
@testset "General Convex Solver: $elType" begin
testConvexLinearSolver(; arrayType, elType)
end
end
end
end
testVerboseSolvers()
end | RegularizedLeastSquares | https://github.com/JuliaImageRecon/RegularizedLeastSquares.jl.git |
|
[
"MIT"
] | 0.16.5 | 7c4d085f436b699e746e19173a3c640a79ceec8a | code | 86 | using CUDA
arrayTypes = [CuArray]
include(joinpath(@__DIR__(), "..", "runtests.jl")) | RegularizedLeastSquares | https://github.com/JuliaImageRecon/RegularizedLeastSquares.jl.git |
|
[
"MIT"
] | 0.16.5 | 7c4d085f436b699e746e19173a3c640a79ceec8a | code | 89 | using AMDGPU
arrayTypes = [ROCArray]
include(joinpath(@__DIR__(), "..", "runtests.jl")) | RegularizedLeastSquares | https://github.com/JuliaImageRecon/RegularizedLeastSquares.jl.git |
|
[
"MIT"
] | 0.16.5 | 7c4d085f436b699e746e19173a3c640a79ceec8a | docs | 731 | # RegularizedLeastSquares
[](https://github.com/JuliaImageRecon/RegularizedLeastSquares.jl/actions)
[](http://codecov.io/github/JuliaImageRecon/RegularizedLeastSquares.jl?branch=master)
# Documentation
Read the documentation here:
[](https://JuliaImageRecon.github.io/RegularizedLeastSquares.jl/latest)
# Community Standards
This project is part of the Julia community and follows the [Julia community standards](https://julialang.org/community/standards/).
| RegularizedLeastSquares | https://github.com/JuliaImageRecon/RegularizedLeastSquares.jl.git |
|
[
"MIT"
] | 0.16.5 | 7c4d085f436b699e746e19173a3c640a79ceec8a | docs | 2498 | # RegularizedLeastSquares.jl
*Solvers for Linear Inverse Problems using Regularization Techniques*
## Introduction
RegularizedLeastSquares.jl is a Julia package for solving large linear systems using various types of algorithms. Ill-conditioned problems arise in many areas of practical interest. Regularisation techniques and nonlinear problem formulations are often used to solve these problems. This package provides implementations for a variety of solvers used in areas such as MPI and MRI. In particular, this package serves as the optimization backend of the Julia packages [MPIReco.jl](https://github.com/MagneticParticleImaging/MPIReco.jl) and [MRIReco.jl](https://github.com/MagneticResonanceImaging/MRIReco.jl).
The implemented methods range from the $l^2_2$-regularized CGNR method to more general optimizers such as the Alternating Direction of Multipliers Method (ADMM) or the Split-Bregman method.
For convenience, implementations of popular regularizers, such as $l_1$-regularization and TV regularization, are provided. On the other hand, hand-crafted regularizers can be used quite easily.
Depending on the problem, it becomes unfeasible to store the full system matrix at hand. For this purpose, RegularizedLeastSquares.jl allows for the use of matrix-free operators. Such operators can be realized using the interface provided by the package [LinearOperators.jl](https://github.com/JuliaSmoothOptimizers/LinearOperators.jl).
Other interfaces can be used as well, as long as the product `*(A,x)` and the adjoint `adjoint(A)` are provided. A number of common matrix-free operators are provided by the package [LinearOperatorColection.jl](https://github.com/JuliaImageRecon/LinearOperatorCollection.jl).
## Features
* Variety of optimization algorithms optimized for least squares problems
* Support for matrix-free operators
* GPU support
## Usage
* See [Getting Started](@ref) for an introduction to using the package
## See also
Packages:
* [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl)
* [ProximalOperators.jl](https://github.com/JuliaFirstOrder/ProximalOperators.jl)
* [Krylov.jl](https://github.com/JuliaSmoothOptimizers/Krylov.jl)
* [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl)
Organizations:
* [JuliaNLSolvers](https://github.com/JuliaNLSolvers)
* [JuliaSmoothOptimizers](https://github.com/JuliaSmoothOptimizers)
* [JuliaFirstOrder](https://github.com/JuliaFirstOrder) | RegularizedLeastSquares | https://github.com/JuliaImageRecon/RegularizedLeastSquares.jl.git |
|
[
"MIT"
] | 0.16.5 | 7c4d085f436b699e746e19173a3c640a79ceec8a | docs | 3486 | # Solvers
RegularizedLeastSquares.jl provides a variety of solvers, which are used in fields such as MPI and MRI. The following is a non-exhaustive list of the implemented solvers:
* Kaczmarz algorithm (`Kaczmarz`, also called Algebraic reconstruction technique)
* Conjugate Gradients Normal Residual method (`CGNR`)
* Fast Iterative Shrinkage Thresholding Algorithm (`FISTA`)
* Alternating Direction of Multipliers Method (`ADMM`)
The solvers are organized in a type-hierarchy and inherit from:
```julia
abstract type AbstractLinearSolver
```
The type hierarchy is further differentiated into solver categories such as `AbstractRowAtionSolver`, `AbstractPrimalDualSolver` or `AbstractProximalGradientSolver`. A list of all available solvers can be returned by the `linearSolverList` function.
## Solver Construction
To create a solver, one can invoke the method `createLinearSolver` as in
```julia
solver = createLinearSolver(CGNR, A; reg=reg, kwargs...)
```
Here `A` denotes the operator and reg are the [Regularization](generated/explanations/regularization.md) terms to be used by the solver. All further solver parameters can be passed as keyword arguments and are solver specific. To make things more compact, it can be usefull to collect all parameters
in a `Dict{Symbol,Any}`. In this way, the code snippet above can be written as
```julia
params=Dict{Symbol,Any}()
params[:reg] = ...
...
solver = createLinearSolver(CGNR, A; params...)
```
This notation can be convenient when a large number of parameters are set manually.
It is also possible to construct a solver directly with its specific keyword arguments:
```julia
solver = CGNR(A, reg = reg, ...)
```
## Solver Usage
Once constructed, a solver can be used to approximate a solution to a given measurement vector:
```julia
x_approx = solve!(solver, b; kwargs...)
```
The keyword arguments can be used to supply an inital solution `x0`, one or more `callbacks` to interact and monitor the solvers state and more. See the How-To and the API for more information.
It is also possible to explicitly invoke the solvers iterations using Julias iterate interface:
```julia
init!(solver, b; kwargs...)
for (iteration, x_approx) in enumerate(solver)
println("Iteration $iteration")
end
```
## Solver Internals
The fields of a solver can be divided into two groups. The first group are intended to be immutable fields that do not change during iterations, the second group are mutable fields that do change. Examples of the first group are the operator itself and examples of the second group are the current solution or the number of the current iteration.
The second group is usually encapsulated in its own state struct:
```julia
mutable struct Solver{matT, ...}
A::matT
# Other "static" fields
state::AbstractSolverState{<:Solver}
end
mutable struct SolverState{T, tempT} <: AbstractSolverState{Solver}
x::tempT
rho::T
# ...
iteration::Int64
end
```
States are subtypes of the parametric `AbstractSolverState{S}` type. The state fields of solvers can be exchanged with different state belonging to the correct solver `S`. This means that the states can be used to realize custom variants of an existing solver:
```julia
mutable struct VariantState{T, tempT} <: AbstractSolverState{Solver}
x::tempT
other::tempT
# ...
iteration::Int64
end
SolverVariant(A; kwargs...) = Solver(A, VariantState(kwargs...))
function iterate(solver::Solver, state::VarianteState)
# Custom iteration
end
``` | RegularizedLeastSquares | https://github.com/JuliaImageRecon/RegularizedLeastSquares.jl.git |
|
[
"MIT"
] | 0.16.5 | 7c4d085f436b699e746e19173a3c640a79ceec8a | docs | 1849 | # API for Regularizers
This page contains documentation of the public API of the RegularizedLeastSquares. In the Julia
REPL one can access this documentation by entering the help mode with `?`
```@docs
RegularizedLeastSquares.L1Regularization
RegularizedLeastSquares.L2Regularization
RegularizedLeastSquares.L21Regularization
RegularizedLeastSquares.LLRRegularization
RegularizedLeastSquares.NuclearRegularization
RegularizedLeastSquares.TVRegularization
```
## Projection Regularization
```@docs
RegularizedLeastSquares.PositiveRegularization
RegularizedLeastSquares.RealRegularization
```
## Nested Regularization
```@docs
RegularizedLeastSquares.innerreg(::AbstractNestedRegularization)
RegularizedLeastSquares.sink(::AbstractNestedRegularization)
RegularizedLeastSquares.sinktype(::AbstractNestedRegularization)
```
## Scaled Regularization
```@docs
RegularizedLeastSquares.AbstractScaledRegularization
RegularizedLeastSquares.scalefactor
RegularizedLeastSquares.NormalizedRegularization
RegularizedLeastSquares.NoNormalization
RegularizedLeastSquares.MeasurementBasedNormalization
RegularizedLeastSquares.SystemMatrixBasedNormalization
RegularizedLeastSquares.FixedParameterRegularization
```
## Misc. Nested Regularization
```@docs
RegularizedLeastSquares.MaskedRegularization
RegularizedLeastSquares.TransformedRegularization
RegularizedLeastSquares.PlugAndPlayRegularization
```
## Miscellaneous Functions
```@docs
RegularizedLeastSquares.prox!(::AbstractParameterizedRegularization, ::AbstractArray)
RegularizedLeastSquares.prox!(::Type{<:AbstractParameterizedRegularization}, ::Any, ::Any)
RegularizedLeastSquares.norm(::AbstractParameterizedRegularization, ::AbstractArray)
RegularizedLeastSquares.λ(::AbstractParameterizedRegularization)
RegularizedLeastSquares.norm(::Type{<:AbstractParameterizedRegularization}, ::Any, ::Any)
``` | RegularizedLeastSquares | https://github.com/JuliaImageRecon/RegularizedLeastSquares.jl.git |
|
[
"MIT"
] | 0.16.5 | 7c4d085f436b699e746e19173a3c640a79ceec8a | docs | 1276 | # API for Solvers
This page contains documentation of the public API of the RegularizedLeastSquares. In the Julia
REPL one can access this documentation by entering the help mode with `?`
## solve!
```@docs
RegularizedLeastSquares.solve!(::AbstractLinearSolver, ::Any)
RegularizedLeastSquares.init!(::AbstractLinearSolver, ::Any)
RegularizedLeastSquares.init!(::AbstractLinearSolver, ::AbstractSolverState, ::AbstractMatrix)
```
## ADMM
```@docs
RegularizedLeastSquares.ADMM
```
## CGNR
```@docs
RegularizedLeastSquares.CGNR
```
## Kaczmarz
```@docs
RegularizedLeastSquares.Kaczmarz
```
## FISTA
```@docs
RegularizedLeastSquares.FISTA
```
## OptISTA
```@docs
RegularizedLeastSquares.OptISTA
```
## POGM
```@docs
RegularizedLeastSquares.POGM
```
## SplitBregman
```@docs
RegularizedLeastSquares.SplitBregman
```
## Miscellaneous
```@docs
RegularizedLeastSquares.solverstate
RegularizedLeastSquares.solversolution
RegularizedLeastSquares.solverconvergence
RegularizedLeastSquares.StoreSolutionCallback
RegularizedLeastSquares.StoreConvergenceCallback
RegularizedLeastSquares.CompareSolutionCallback
RegularizedLeastSquares.linearSolverList
RegularizedLeastSquares.createLinearSolver
RegularizedLeastSquares.applicableSolverList
RegularizedLeastSquares.isapplicable
``` | RegularizedLeastSquares | https://github.com/JuliaImageRecon/RegularizedLeastSquares.jl.git |
|
[
"MIT"
] | 0.18.1 | bb212ead98022455eed514cff0adfa5de8645258 | code | 592 | using Documenter, TaylorSeries
makedocs(
modules = [TaylorSeries],
format = Documenter.HTML(prettyurls = get(ENV, "CI", nothing) == "true"),
sitename = "TaylorSeries.jl",
authors = "Luis Benet and David P. Sanders",
pages = [
"Home" => "index.md",
"Background" => "background.md",
"User guide" => "userguide.md",
"Examples" => "examples.md",
"API" => "api.md"
]
)
deploydocs(
repo = "github.com/JuliaDiff/TaylorSeries.jl.git",
target = "build",
deps = nothing,
make = nothing,
push_preview = true
)
| TaylorSeries | https://github.com/JuliaDiff/TaylorSeries.jl.git |
|
[
"MIT"
] | 0.18.1 | bb212ead98022455eed514cff0adfa5de8645258 | code | 8396 | module TaylorSeriesIAExt
using TaylorSeries
import Base: ^, log, asin, acos, acosh, atanh, power_by_squaring
import TaylorSeries: evaluate, _evaluate, normalize_taylor, square
isdefined(Base, :get_extension) ? (using IntervalArithmetic) : (using ..IntervalArithmetic)
# Method used for Taylor1{Interval{T}}^n
for T in (:Taylor1, :TaylorN)
@eval begin
function ^(a::$T{Interval{S}}, n::Integer) where {S<:Real}
n == 0 && return one(a)
n == 1 && return copy(a)
n == 2 && return square(a)
n < 0 && return a^float(n)
return power_by_squaring(a, n)
end
^(a::$T{Interval{S}}, r::Rational) where {S<:Real} = a^float(r)
end
end
function ^(a::Taylor1{Interval{T}}, r::S) where {T<:Real, S<:Real}
a0 = constant_term(a) ∩ Interval(zero(T), T(Inf))
aux = one(a0)^r
iszero(r) && return Taylor1(aux, a.order)
aa = one(aux) * a
aa[0] = one(aux) * a0
r == 1 && return aa
r == 2 && return square(aa)
r == 1/2 && return sqrt(aa)
l0 = findfirst(a)
lnull = trunc(Int, r*l0 )
if (a.order-lnull < 0) || (lnull > a.order)
return Taylor1( zero(aux), a.order)
end
c_order = l0 == 0 ? a.order : min(a.order, trunc(Int,r*a.order))
c = Taylor1(zero(aux), c_order)
for k = 0:c_order
TS.pow!(c, aa, c, r, k)
end
return c
end
function ^(a::TaylorN{Interval{T}}, r::S) where {T<:Real, S<:Real}
a0 = constant_term(a) ∩ Interval(zero(T), T(Inf))
a0r = a0^r
aux = one(a0r)
iszero(r) && return TaylorN(aux, a.order)
aa = aux * a
aa[0] = aux * a0
r == 1 && return aa
r == 2 && return square(aa)
r == 1/2 && return sqrt(aa)
isinteger(r) && return aa^round(Int,r)
# @assert !iszero(a0)
iszero(a0) && throw(DomainError(a,
"""The 0-th order TaylorN coefficient must be non-zero
in order to expand `^` around 0."""))
c = TaylorN( a0r, a.order)
for ord in 1:a.order
TS.pow!(c, aa, c, r, ord)
end
return c
end
for T in (:Taylor1, :TaylorN)
@eval function log(a::$T{Interval{S}}) where {S<:Real}
iszero(constant_term(a)) && throw(DomainError(a,
"""The 0-th order coefficient must be non-zero in order to expand `log` around 0."""))
a0 = constant_term(a) ∩ Interval(S(0.0), S(Inf))
order = a.order
aux = log(a0)
aa = one(aux) * a
aa[0] = one(aux) * a0
c = $T( aux, order )
for k in eachindex(a)
TS.log!(c, aa, k)
end
return c
end
@eval function asin(a::$T{Interval{S}}) where {S<:Real}
a0 = constant_term(a) ∩ Interval(-one(S), one(S))
a0^2 == one(a0) && throw(DomainError(a,
"""Series expansion of asin(x) diverges at x = ±1."""))
order = a.order
aux = asin(a0)
aa = one(aux) * a
aa[0] = one(aux) * a0
c = $T( aux, order )
r = $T( sqrt(1 - a0^2), order )
for k in eachindex(a)
TS.asin!(c, aa, r, k)
end
return c
end
@eval function acos(a::$T{Interval{S}}) where {S<:Real}
a0 = constant_term(a) ∩ Interval(-one(S), one(S))
a0^2 == one(a0) && throw(DomainError(a,
"""Series expansion of asin(x) diverges at x = ±1."""))
order = a.order
aux = acos(a0)
aa = one(aux) * a
aa[0] = one(aux) * a0
c = $T( aux, order )
r = $T( sqrt(1 - a0^2), order )
for k in eachindex(a)
TS.acos!(c, aa, r, k)
end
return c
end
@eval function acosh(a::$T{Interval{S}}) where {S<:Real}
a0 = constant_term(a) ∩ Interval(one(S), S(Inf))
a0^2 == one(a0) && throw(DomainError(a,
"""Series expansion of acosh(x) diverges at x = ±1."""))
order = a.order
aux = acosh(a0)
aa = one(aux) * a
aa[0] = one(aux) * a0
c = $T( aux, order )
r = $T( sqrt(a0^2 - 1), order )
for k in eachindex(a)
TS.acosh!(c, aa, r, k)
end
return c
end
@eval function atanh(a::$T{Interval{S}}) where {S<:Real}
order = a.order
a0 = constant_term(a) ∩ Interval(-one(S), one(S))
aux = atanh(a0)
aa = one(aux) * a
aa[0] = one(aux) * a0
c = $T( aux, order)
r = $T(one(aux) - a0^2, order)
iszero(constant_term(r)) && throw(DomainError(a,
"""Series expansion of atanh(x) diverges at x = ±1."""))
for k in eachindex(a)
TS.atanh!(c, aa, r, k)
end
return c
end
end
function evaluate(a::Taylor1, dx::Interval{S}) where {S<:Real}
order = a.order
uno = one(dx)
dx2 = dx^2
if iseven(order)
kend = order-2
@inbounds sum_even = a[end]*uno
@inbounds sum_odd = a[end-1]*zero(dx)
else
kend = order-3
@inbounds sum_odd = a[end]*uno
@inbounds sum_even = a[end-1]*uno
end
@inbounds for k in kend:-2:0
sum_odd = sum_odd*dx2 + a[k+1]
sum_even = sum_even*dx2 + a[k]
end
return sum_even + sum_odd*dx
end
function evaluate(a::TaylorN, dx::IntervalBox{N,T}) where {T<:Real,N}
@assert N == get_numvars()
a_length = length(a)
suma = zero(constant_term(a)) + Interval{T}(0, 0)
@inbounds for homPol in reverse(eachindex(a))
suma += evaluate(a[homPol], dx)
end
return suma
end
function evaluate(a::HomogeneousPolynomial, dx::IntervalBox{N,T}) where {T<:Real,N}
@assert N == get_numvars()
dx == IntervalBox(-1..1, Val(N)) && return _evaluate(a, dx, Val(true))
dx == IntervalBox( 0..1, Val(N)) && return _evaluate(a, dx, Val(false))
return evaluate(a, dx...)
end
function _evaluate(a::HomogeneousPolynomial, dx::IntervalBox{N,T}, ::Val{true} ) where {T<:Real,N}
a.order == 0 && return a[1] + Interval{T}(0, 0)
ct = TS.coeff_table[a.order+1]
@inbounds suma = a[1]*Interval{T}(0,0)
Ieven = Interval{T}(0,1)
for (i, a_coeff) in enumerate(a.coeffs)
iszero(a_coeff) && continue
if isodd(sum(ct[i]))
tmp = dx[1]
else
tmp = Ieven
for n in eachindex(ct[i])
iseven(ct[i][n]) && continue
tmp *= dx[1]
end
end
suma += a_coeff * tmp
end
return suma
end
function _evaluate(a::HomogeneousPolynomial, dx::IntervalBox{N,T}, ::Val{false} ) where {T<:Real,N}
a.order == 0 && return a[1] + Interval{T}(0, 0)
@inbounds suma = zero(a[1])*dx[1]
@inbounds for homPol in a.coeffs
suma += homPol*dx[1]
end
return suma
end
"""
normalize_taylor(a::Taylor1, I::Interval, symI::Bool=true)
Normalizes `a::Taylor1` such that the interval `I` is mapped
by an affine transformation to the interval `-1..1` (`symI=true`)
or to `0..1` (`symI=false`).
"""
normalize_taylor(a::Taylor1, I::Interval{T}, symI::Bool=true) where {T<:Real} =
_normalize(a, I, Val(symI))
"""
normalize_taylor(a::TaylorN, I::IntervalBox, symI::Bool=true)
Normalize `a::TaylorN` such that the intervals in `I::IntervalBox`
are mapped by an affine transformation to the intervals `-1..1`
(`symI=true`) or to `0..1` (`symI=false`).
"""
normalize_taylor(a::TaylorN, I::IntervalBox{N,T}, symI::Bool=true) where {T<:Real,N} =
_normalize(a, I, Val(symI))
# I -> -1..1
function _normalize(a::Taylor1, I::Interval{T}, ::Val{true}) where {T<:Real}
order = get_order(a)
t = Taylor1(T, order)
tnew = mid(I) + t*radius(I)
return a(tnew)
end
# I -> 0..1
function _normalize(a::Taylor1, I::Interval{T}, ::Val{false}) where {T<:Real}
order = get_order(a)
t = Taylor1(T, order)
tnew = inf(I) + t*diam(I)
return a(tnew)
end
# I -> IntervalBox(-1..1, Val(N))
function _normalize(a::TaylorN, I::IntervalBox{N,T}, ::Val{true}) where {T<:Real,N}
order = get_order(a)
x = Vector{typeof(a)}(undef, N)
for ind in eachindex(x)
x[ind] = mid(I[ind]) + TaylorN(ind, order=order)*radius(I[ind])
end
return a(x)
end
# I -> IntervalBox(0..1, Val(N))
function _normalize(a::TaylorN, I::IntervalBox{N,T}, ::Val{false}) where {T<:Real,N}
order = get_order(a)
x = Vector{typeof(a)}(undef, N)
for ind in eachindex(x)
x[ind] = inf(I[ind]) + TaylorN(ind, order=order)*diam(I[ind])
end
return a(x)
end
end | TaylorSeries | https://github.com/JuliaDiff/TaylorSeries.jl.git |
|
[
"MIT"
] | 0.18.1 | bb212ead98022455eed514cff0adfa5de8645258 | code | 2378 | module TaylorSeriesJLD2Ext
import Base: convert
using TaylorSeries
if isdefined(Base, :get_extension)
import JLD2: writeas
else
import ..JLD2: writeas
end
@doc raw"""
TaylorNSerialization{T}
Custom serialization struct to save a `TaylorN{T}` to a `.jld2` file.
# Fields
- `vars::Vector{String}`: jet transport variables.
- `varorder::Int`: order of jet transport perturbations.
- `x::Vector{T}`: vector of coefficients.
"""
struct TaylorNSerialization{T}
vars::Vector{String}
varorder::Int
x::Vector{T}
end
# Tell JLD2 to save TaylorN{T} as TaylorNSerialization{T}
writeas(::Type{TaylorN{T}}) where {T} = TaylorNSerialization{T}
# Convert method to write .jld2 files
function convert(::Type{TaylorNSerialization{T}}, eph::TaylorN{T}) where {T}
# Variables
vars = TS.get_variable_names()
# Number of variables
n = length(vars)
# TaylorN order
varorder = eph.order
# Number of coefficients in each TaylorN
L = varorder + 1
# Number of coefficients in each HomogeneousPolynomial
M = binomial(n + varorder, varorder)
# Vector of coefficients
x = Vector{T}(undef, M)
# Save coefficients
i = 1
for i_1 in 0:varorder
# Iterate over i_1 order HomogeneousPolynomial
for i_2 in 1:binomial(n + i_1 - 1, i_1)
x[i] = eph.coeffs[i_1+1].coeffs[i_2]
i += 1
end
end
return TaylorNSerialization{T}(vars, varorder, x)
end
# Convert method to read .jld2 files
function convert(::Type{TaylorN{T}}, eph::TaylorNSerialization{T}) where {T}
# Variables
vars = eph.vars
# Number of variables
n = length(vars)
# TaylorN order
varorder = eph.varorder
# Number of coefficients in each TaylorN
L = varorder + 1
# Number of coefficients in each HomogeneousPolynomial
M = binomial(n + varorder, varorder)
# Set variables
if TS.get_variable_names() != vars
TS.set_variables(T, vars, order = varorder)
end
# Reconstruct TaylorN
i = 1
TaylorN_coeffs = Vector{HomogeneousPolynomial{T}}(undef, L)
for i_1 in 0:varorder
# Reconstruct HomogeneousPolynomials
TaylorN_coeffs[i_1 + 1] = HomogeneousPolynomial(eph.x[i : i + binomial(n + i_1 - 1, i_1)-1], i_1)
i += binomial(n + i_1 - 1, i_1)
end
x = TaylorN{T}(TaylorN_coeffs, varorder)
return x
end
end | TaylorSeries | https://github.com/JuliaDiff/TaylorSeries.jl.git |
|
[
"MIT"
] | 0.18.1 | bb212ead98022455eed514cff0adfa5de8645258 | code | 258 | module TaylorSeriesRATExt
using TaylorSeries
isdefined(Base, :get_extension) ? (import RecursiveArrayTools) : (import ..RecursiveArrayTools)
function RecursiveArrayTools.recursivecopy(a::AbstractArray{<:AbstractSeries, N}) where N
deepcopy(a)
end
end | TaylorSeries | https://github.com/JuliaDiff/TaylorSeries.jl.git |
|
[
"MIT"
] | 0.18.1 | bb212ead98022455eed514cff0adfa5de8645258 | code | 444 | module TaylorSeriesSAExt
using TaylorSeries
import Base.promote_op
import LinearAlgebra: matprod
isdefined(Base, :get_extension) ? (using StaticArrays) : (using ..StaticArrays)
promote_op(::typeof(adjoint), ::Type{T}) where {T<:AbstractSeries} = T
promote_op(::typeof(matprod), ::Type{T}, ::Type{U}) where {T <: AbstractSeries, U <: AbstractFloat} = T
promote_op(::typeof(matprod), ::Type{T}, ::Type{T}) where {T <: AbstractSeries} = T
end | TaylorSeries | https://github.com/JuliaDiff/TaylorSeries.jl.git |
|
[
"MIT"
] | 0.18.1 | bb212ead98022455eed514cff0adfa5de8645258 | code | 1692 | using TaylorSeries
const order = 20
const x, y, z, w = set_variables(Int128, "x", numvars=4, order=2order)
function fateman1(degree::Int)
T = Int128
oneH = convert(HomogeneousPolynomial{T},1)#HomogeneousPolynomial(one(T), 0)
# s = 1 + x + y + z + w
s = TaylorN( [oneH, HomogeneousPolynomial([one(T),one(T),one(T),one(T)],1)], degree )
s = s^degree
# s is converted to order 2*ndeg
s = TaylorN(s.coeffs, 2*degree)
s * ( s+1 )
end
function fateman2(degree::Int)
T = Int128
oneH = convert(HomogeneousPolynomial{T},1)#HomogeneousPolynomial(one(T), 0)
# s = 1 + x + y + z + w
s = TaylorN( [oneH, HomogeneousPolynomial([one(T),one(T),one(T),one(T)],1)], degree )
s = s^degree
# s is converted to order 2*ndeg
s = TaylorN(s.coeffs, 2*degree)
return s^2 + s
end
function fateman3(degree::Int)
s = x + y + z + w + 1
s = s^degree
s * (s+1)
end
function fateman4(degree::Int)
s = x + y + z + w + 1
s = s^degree
s^2 + s
end
function run_fateman(N)
results = Any[]
nn = 5
for f in (fateman1, fateman2, fateman3, fateman4)
f(0)
println("Running $f")
@time result = f(N)
# push!(results, result) # This may take a lot of memory
t = Inf
tav = 0.0
for i = 1:nn
ti = @elapsed f(N)
tav += ti
t = min(t,ti)
end
println("\tAverage time of $nn runs: ", tav/nn)
println("\tMinimum time of $nn runs: ", t)
end
results
end
println("Running Fateman with order $order...")
results = run_fateman(order);
println("Done.")
# @assert results[1] == results[2] == results[3] == results[4]
| TaylorSeries | https://github.com/JuliaDiff/TaylorSeries.jl.git |
|
[
"MIT"
] | 0.18.1 | bb212ead98022455eed514cff0adfa5de8645258 | code | 2751 | # This file is part of the TaylorSeries.jl Julia package, MIT license
#
# Luis Benet & David P. Sanders
# UNAM
#
# Handles Taylor series of arbitrary but finite order
"""
TaylorSeries
A Julia package for Taylor expansions in one or more independent variables.
The basic constructors are [`Taylor1`](@ref) and [`TaylorN`](@ref);
see also [`HomogeneousPolynomial`](@ref).
"""
module TaylorSeries
using SparseArrays: SparseMatrixCSC
using Markdown
if !isdefined(Base, :get_extension)
using Requires
end
using LinearAlgebra: norm, mul!,
lu, lu!, LinearAlgebra.lutype, LinearAlgebra.copy_oftype,
LinearAlgebra.issuccess
if VERSION >= v"1.7.0-DEV.1188"
using LinearAlgebra: NoPivot, RowMaximum
end
import LinearAlgebra: norm, mul!, lu
import Base: ==, +, -, *, /, ^
import Base: iterate, size, eachindex, firstindex, lastindex,
eltype, length, getindex, setindex!, axes, copyto!
import Base: zero, one, zeros, ones, isinf, isnan, iszero, isless,
convert, promote_rule, promote, show,
real, imag, conj, adjoint,
rem, mod, mod2pi, abs, abs2,
sqrt, exp, expm1, log, log1p,
sin, cos, sincos, sinpi, cospi, sincospi, tan,
asin, acos, atan, sinh, cosh, tanh, atanh, asinh, acosh,
power_by_squaring,
rtoldefault, isfinite, isapprox, rad2deg, deg2rad
import Base.float
export Taylor1, TaylorN, HomogeneousPolynomial, AbstractSeries, TS
export getcoeff, derivative, integrate, differentiate,
evaluate, evaluate!, inverse, inverse_map, set_taylor1_varname,
show_params_TaylorN, show_monomials, displayBigO, use_show_default,
get_order, get_numvars,
set_variables, get_variables,
get_variable_names, get_variable_symbols,
# jacobian, hessian, jacobian!, hessian!,
∇, taylor_expand, update!,
constant_term, linear_polynomial, nonlinear_polynomial,
normalize_taylor, norm
const TS = TaylorSeries
include("parameters.jl")
include("hash_tables.jl")
include("constructors.jl")
include("conversion.jl")
include("auxiliary.jl")
include("arithmetic.jl")
include("power.jl")
include("functions.jl")
include("other_functions.jl")
include("evaluate.jl")
include("calculus.jl")
include("dictmutfunct.jl")
include("broadcasting.jl")
include("printing.jl")
function __init__()
@static if !isdefined(Base, :get_extension)
@require IntervalArithmetic = "d1acc4aa-44c8-5952-acd4-ba5d80a2a253" begin
include("../ext/TaylorSeriesIAExt.jl")
end
@require StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" begin
include("../ext/TaylorSeriesSAExt.jl")
end
@require JLD2 = "033835bb-8acc-5ee8-8aae-3f567f8a3819" begin
include("../ext/TaylorSeriesJLD2Ext.jl")
end
end
end
end # module
| TaylorSeries | https://github.com/JuliaDiff/TaylorSeries.jl.git |
|
[
"MIT"
] | 0.18.1 | bb212ead98022455eed514cff0adfa5de8645258 | code | 39000 | # This file is part of the TaylorSeries.jl Julia package, MIT license
#
# Luis Benet & David P. Sanders
# UNAM
#
# MIT Expat license
#
# Arithmetic operations: +, -, *, /
## Equality ##
for T in (:Taylor1, :TaylorN)
@eval begin
==(a::$T{T}, b::$T{S}) where {T<:Number,S<:Number} = ==(promote(a,b)...)
function ==(a::$T{T}, b::$T{T}) where {T<:Number}
if a.order != b.order
a, b = fixorder(a, b)
end
return a.coeffs == b.coeffs
end
end
end
function ==(a::Taylor1{TaylorN{T}}, b::TaylorN{Taylor1{S}}) where {T, S}
R = promote_type(T, S)
return a == convert(Taylor1{TaylorN{R}}, b)
end
==(b::TaylorN{Taylor1{S}}, a::Taylor1{TaylorN{T}}) where {T, S} = a == b
function ==(a::HomogeneousPolynomial, b::HomogeneousPolynomial)
a.order == b.order && return a.coeffs == b.coeffs
return iszero(a.coeffs) && iszero(b.coeffs)
end
## Total ordering ##
for T in (:Taylor1, :TaylorN)
@eval begin
@inline function isless(a::$T{<:Number}, b::Real)
a0 = constant_term(a)
a0 != b && return isless(a0, b)
nz = findfirst(a-b)
if nz == -1
return isless(zero(a0), zero(b))
else
return isless(a[nz], zero(b))
end
end
@inline function isless(b::Real, a::$T{<:Number})
a0 = constant_term(a)
a0 != b && return isless(b, a0)
nz = findfirst(b-a)
if nz == -1
return isless(zero(b), zero(a0))
else
return isless(zero(b), a[nz])
end
end
#
@inline isless(a::$T{T}, b::$T{S}) where {T<:Number, S<:Number} =
isless(promote(a,b)...)
@inline isless(a::$T{T}, b::$T{T}) where {T<:Number} =
isless(a - b, zero(constant_term(a)))
end
end
@inline function isless(a::HomogeneousPolynomial{<:Number}, b::Real)
orda = get_order(a)
if orda == 0
return isless(a[1], b)
else
!iszero(b) && return isless(zero(a[1]), b)
nz = max(findfirst(a), 1)
return isless(a[nz], b)
end
end
@inline function isless(b::Real, a::HomogeneousPolynomial{<:Number})
orda = get_order(a)
if orda == 0
return isless(b, a[1])
else
!iszero(b) && return isless(b, zero(a[1]))
nz = max(findfirst(a),1)
return isless(b, a[nz])
end
end
#
@inline isless(a::HomogeneousPolynomial{T}, b::HomogeneousPolynomial{S}) where
{T<:Number, S<:Number} = isless(promote(a,b)...)
@inline function isless(a::HomogeneousPolynomial{T},
b::HomogeneousPolynomial{T}) where {T<:Number}
orda = get_order(a)
ordb = get_order(b)
if orda == ordb
return isless(a-b, zero(a[1]))
elseif orda < ordb
return isless(a, zero(a[1]))
else
return isless(-b, zero(a[1]))
end
end
# Mixtures
@inline isless(a::Taylor1{TaylorN{T}}, b::Taylor1{TaylorN{T}}) where
{T<:NumberNotSeries} = isless(a - b, zero(T))
@inline function isless(a::HomogeneousPolynomial{Taylor1{T}},
b::HomogeneousPolynomial{Taylor1{T}}) where {T<:NumberNotSeries}
orda = get_order(a)
ordb = get_order(b)
if orda == ordb
return isless(a-b, zero(T))
elseif orda < ordb
return isless(a, zero(T))
else
return isless(-b, zero(T))
end
end
@inline isless(a::TaylorN{Taylor1{T}}, b::TaylorN{Taylor1{T}}) where
{T<:NumberNotSeries} = isless(a - b, zero(T))
#= TODO: Nested Taylor1s; needs careful thinking; iss #326. The following works:
@inline isless(a::Taylor1{Taylor1{T}}, b::Taylor1{Taylor1{T}}) where {T<:Number} = isless(a - b, zero(T))
# Is the following correct?
# ti = Taylor1(3)
# to = Taylor1([zero(ti), one(ti)], 9)
# tito = ti * to
# ti > to > 0 # ok
# to^2 < toti < ti^2 # ok
# ti > ti^2 > to # is this ok?
=#
@doc doc"""
isless(a::Taylor1{<:Real}, b::Real)
isless(a::TaylorN{<:Real}, b::Real)
Compute `isless` by comparing the `constant_term(a)` and `b`. If they are equal,
returns `a[nz] < 0`, with `nz` the first
non-zero coefficient after the constant term. This defines a total order.
For many variables, the ordering includes a lexicographical convention in order to be
total. We have opted for the simplest one, where the *larger* variable appears *before*
when the `TaylorN` variables are defined (e.g., through [`set_variables`](@ref)).
Refs:
- M. Berz, AIP Conference Proceedings 177, 275 (1988); https://doi.org/10.1063/1.37800
- M. Berz, "Automatic Differentiation as Nonarchimedean Analysis", Computer Arithmetic and
Enclosure Methods, (1992), Elsevier, 439-450.
---
isless(a::Taylor1{<:Real}, b::Taylor1{<:Real})
isless(a::TaylorN{<:Real}, b::Taylor1{<:Real})
Returns `isless(a - b, zero(b))`.
""" isless
## zero and one ##
for T in (:Taylor1, :HomogeneousPolynomial, :TaylorN)
@eval iszero(a::$T) = iszero(a.coeffs)
end
for T in (:Taylor1, :TaylorN)
@eval zero(a::$T) = $T(zero.(a.coeffs))
@eval function one(a::$T)
b = zero(a)
b[0] = one(b[0])
return b
end
end
zero(a::HomogeneousPolynomial{T}) where {T<:Number} =
HomogeneousPolynomial(zero.(a.coeffs), a.order)
function zeros(a::HomogeneousPolynomial{T}, order::Int) where {T<:Number}
order == 0 && return [HomogeneousPolynomial([zero(a[1])], 0)]
v = Array{HomogeneousPolynomial{T}}(undef, order+1)
@simd for ord in eachindex(v)
@inbounds v[ord] = HomogeneousPolynomial(zero(a[1]), ord-1)
end
return v
end
zeros(::Type{HomogeneousPolynomial{T}}, order::Int) where {T<:Number} =
zeros( HomogeneousPolynomial([zero(T)], 0), order)
function one(a::HomogeneousPolynomial{T}) where {T<:Number}
v = one.(a.coeffs)
return HomogeneousPolynomial(v, a.order)
end
function ones(a::HomogeneousPolynomial{T}, order::Int) where {T<:Number}
order == 0 && return [HomogeneousPolynomial([one(a[1])], 0)]
v = Array{HomogeneousPolynomial{T}}(undef, order+1)
@simd for ord in eachindex(v)
@inbounds num_coeffs = size_table[ord]
@inbounds v[ord] = HomogeneousPolynomial(ones(T, num_coeffs), ord-1)
end
return v
end
ones(::Type{HomogeneousPolynomial{T}}, order::Int) where {T<:Number} =
ones( HomogeneousPolynomial([one(T)], 0), order)
## Addition and subtraction ##
for (f, fc) in ((:+, :(add!)), (:-, :(subst!)))
for T in (:Taylor1, :TaylorN)
@eval begin
($f)(a::$T{T}, b::$T{S}) where {T<:Number, S<:Number} =
($f)(promote(a, b)...)
function ($f)(a::$T{T}, b::$T{T}) where {T<:Number}
if a.order != b.order
a, b = fixorder(a, b)
end
c = $T( zero(constant_term(a)), a.order)
for k in eachindex(a)
($fc)(c, a, b, k)
end
return c
end
function ($f)(a::$T)
c = $T( zero(constant_term(a)), a.order)
for k in eachindex(a)
($fc)(c, a, k)
end
return c
end
($f)(a::$T{T}, b::S) where {T<:Number, S<:Number} = ($f)(promote(a, b)...)
function ($f)(a::$T{T}, b::T) where {T<:Number}
coeffs = copy(a.coeffs)
@inbounds coeffs[1] = $f(a[0], b)
return $T(coeffs, a.order)
end
# ($f)(b::S, a::$T{T}) where {T<:Number,S<:Number} = $f(promote(b, a)...)
function ($f)(b::T, a::$T{T}) where {T<:Number}
coeffs = similar(a.coeffs)
@__dot__ coeffs = ($f)(a.coeffs)
@inbounds coeffs[1] = $f(b, a[0])
return $T(coeffs, a.order)
end
## add! and subst! ##
function ($fc)(v::$T{T}, a::$T{T}, k::Int) where {T<:Number}
if $T == Taylor1
@inbounds v[k] = ($f)(a[k])
else
@inbounds for l in eachindex(v[k])
v[k][l] = ($f)(a[k][l])
end
end
return nothing
end
function ($fc)(v::$T{T}, a::T, k::Int) where {T<:Number}
@inbounds v[k] = k==0 ? ($f)(a) : zero(a)
return nothing
end
if $T == Taylor1
function ($fc)(v::$T, a::$T, b::$T, k::Int)
@inbounds v[k] = ($f)(a[k], b[k])
return nothing
end
else
function ($fc)(v::$T, a::$T, b::$T, k::Int)
@inbounds for i in eachindex(v[k])
v[k][i] = ($f)(a[k][i], b[k][i])
end
return nothing
end
end
function ($fc)(v::$T, a::$T, b::Number, k::Int)
@inbounds v[k] = k==0 ?
($f)(constant_term(a), b) : ($f)(a[k], zero(b))
return nothing
end
function ($fc)(v::$T, a::Number, b::$T, k::Int)
@inbounds v[k] = k==0 ?
($f)(a, constant_term(b)) : ($f)(zero(a), b[k])
return nothing
end
end
end
@eval ($f)(a::T, b::S) where {T<:Taylor1, S<:TaylorN} = ($f)(promote(a, b)...)
@eval ($f)(a::T, b::S) where {T<:TaylorN, S<:Taylor1} = ($f)(promote(a, b)...)
@eval begin
($f)(a::HomogeneousPolynomial{T}, b::HomogeneousPolynomial{S}) where
{T<:NumberNotSeriesN,S<:NumberNotSeriesN} = ($f)(promote(a,b)...)
function ($f)(a::HomogeneousPolynomial{T}, b::HomogeneousPolynomial{T}) where
{T<:NumberNotSeriesN}
@assert a.order == b.order
v = similar(a.coeffs)
@__dot__ v = ($f)(a.coeffs, b.coeffs)
return HomogeneousPolynomial(v, a.order)
end
# NOTE add! and subst! for HomogeneousPolynomial's act as += or -=
function ($fc)(res::HomogeneousPolynomial{T}, a::HomogeneousPolynomial{T},
b::HomogeneousPolynomial{T}, k::Int) where {T<:NumberNotSeriesN}
res[k] += ($f)(a[k], b[k])
return nothing
end
function ($f)(a::HomogeneousPolynomial)
v = similar(a.coeffs)
@__dot__ v = ($f)(a.coeffs)
return HomogeneousPolynomial(v, a.order)
end
function ($f)(a::TaylorN{Taylor1{T}}, b::Taylor1{S}) where
{T<:NumberNotSeries, S<:NumberNotSeries}
@inbounds aux = $f(a[0][1], b)
R = TS.numtype(aux)
coeffs = Array{HomogeneousPolynomial{Taylor1{R}}}(undef, a.order+1)
coeffs .= a.coeffs
@inbounds coeffs[1] = aux
return TaylorN(coeffs, a.order)
end
function ($f)(b::Taylor1{S}, a::TaylorN{Taylor1{T}}) where
{T<:NumberNotSeries, S<:NumberNotSeries}
@inbounds aux = $f(b, a[0][1])
R = TS.numtype(aux)
coeffs = Array{HomogeneousPolynomial{Taylor1{R}}}(undef, a.order+1)
@__dot__ coeffs = $f(a.coeffs)
@inbounds coeffs[1] = aux
return TaylorN(coeffs, a.order)
end
function ($f)(a::Taylor1{TaylorN{T}}, b::TaylorN{S}) where
{T<:NumberNotSeries,S<:NumberNotSeries}
@inbounds aux = $f(a[0], b)
c = Taylor1( zero(aux), a.order)
for k in eachindex(a)
($fc)(c, a, b, k)
end
return c
end
function ($f)(b::TaylorN{S}, a::Taylor1{TaylorN{T}}) where
{T<:NumberNotSeries,S<:NumberNotSeries}
@inbounds aux = $f(b, a[0])
c = Taylor1( zero(aux), a.order)
for k in eachindex(a)
($fc)(c, a, b, k)
end
return c
end
end
end
for (f, fc) in ((:+, :(add!)), (:-, :(subst!)))
@eval begin
function ($f)(a::Taylor1{TaylorN{T}}, b::Taylor1{TaylorN{T}}) where {T<:NumberNotSeries}
if a.order != b.order || any(get_order.(a.coeffs) .!= get_order.(b.coeffs))
a, b = fixorder(a, b)
end
c = zero(a)
for k in eachindex(a)
($fc)(c, a, b, k)
end
return c
end
function ($fc)(v::Taylor1{TaylorN{T}}, a::Taylor1{TaylorN{T}}, b::Taylor1{TaylorN{T}}, k::Int) where {T<:NumberNotSeries}
@inbounds for i in eachindex(v[k])
for j in eachindex(v[k][i])
v[k][i][j] = ($f)(a[k][i][j], b[k][i][j])
end
end
return nothing
end
function ($fc)(v::Taylor1{TaylorN{T}}, a::NumberNotSeries, b::Taylor1{TaylorN{T}}, k::Int) where {T<:NumberNotSeries}
@inbounds for i in eachindex(v[k])
for j in eachindex(v[k][i])
v[k][i][j] = ($f)(k==0 && i==0 && j==1 ? a : zero(a), b[k][i][j])
end
end
return nothing
end
function ($fc)(v::Taylor1{TaylorN{T}}, a::Taylor1{TaylorN{T}}, k::Int) where {T<:NumberNotSeries}
@inbounds for l in eachindex(v[k])
for m in eachindex(v[k][l])
v[k][l][m] = ($f)(a[k][l][m])
end
end
return nothing
end
end
end
## Multiplication ##
for T in (:Taylor1, :HomogeneousPolynomial, :TaylorN)
@eval begin
function *(a::T, b::$T{S}) where {T<:NumberNotSeries, S<:NumberNotSeries}
@inbounds aux = a * b.coeffs[1]
v = Array{typeof(aux)}(undef, length(b.coeffs))
@__dot__ v = a * b.coeffs
return $T(v, b.order)
end
*(b::$T{S}, a::T) where {T<:NumberNotSeries, S<:NumberNotSeries} = a * b
function *(a::T, b::$T{T}) where {T<:Number}
v = Array{T}(undef, length(b.coeffs))
@__dot__ v = a * b.coeffs
return $T(v, b.order)
end
*(b::$T{T}, a::T) where {T<:Number} = a * b
end
end
for T in (:HomogeneousPolynomial, :TaylorN)
@eval begin
function *(a::Taylor1{T}, b::$T{Taylor1{S}}) where
{T<:NumberNotSeries, S<:NumberNotSeries}
@inbounds aux = a * b.coeffs[1]
R = typeof(aux)
coeffs = Array{R}(undef, length(b.coeffs))
@__dot__ coeffs = a * b.coeffs
return $T(coeffs, b.order)
end
*(b::$T{Taylor1{R}}, a::Taylor1{T}) where
{T<:NumberNotSeries, R<:NumberNotSeries} = a * b
function *(a::$T{T}, b::Taylor1{$T{S}}) where {T<:NumberNotSeries, S<:NumberNotSeries}
@inbounds aux = a * b[0]
R = typeof(aux)
coeffs = Array{R}(undef, length(b.coeffs))
@__dot__ coeffs = a * b.coeffs
return Taylor1(coeffs, b.order)
end
*(b::Taylor1{$T{S}}, a::$T{T}) where {T<:NumberNotSeries, S<:NumberNotSeries} = a * b
end
end
for (T, W) in ((:Taylor1, :Number), (:TaylorN, :NumberNotSeriesN))
@eval function *(a::$T{T}, b::$T{T}) where {T<:$W}
if a.order != b.order
a, b = fixorder(a, b)
end
c = $T(zero(constant_term(a)), a.order)
for ord in eachindex(c)
mul!(c, a, b, ord) # updates c[ord]
end
return c
end
end
*(a::HomogeneousPolynomial{T}, b::HomogeneousPolynomial{S}) where
{T<:NumberNotSeriesN,S<:NumberNotSeriesN} = *(promote(a,b)...)
function *(a::HomogeneousPolynomial{T}, b::HomogeneousPolynomial{T}) where
{T<:NumberNotSeriesN}
order = a.order + b.order
# NOTE: the following returns order 0, but could be get_order(), or get_order(a)
order > get_order() && return HomogeneousPolynomial(zero(a[1]), get_order(a))
res = HomogeneousPolynomial(zero(a[1]), order)
mul!(res, a, b)
return res
end
function *(a::Taylor1{TaylorN{T}}, b::Taylor1{TaylorN{S}}) where
{T<:NumberNotSeries, S<:NumberNotSeries}
R = promote_type(T,S)
return *(convert(Taylor1{TaylorN{R}}, a), convert(Taylor1{TaylorN{R}}, b))
end
function *(a::Taylor1{TaylorN{T}}, b::Taylor1{TaylorN{T}}) where {T<:NumberNotSeries}
if (a.order != b.order) || any(get_order.(a.coeffs) .!= get_order.(b.coeffs))
a, b = fixorder(a, b)
end
res = Taylor1(zero(a[0]), a.order)
for ordT in eachindex(a)
mul!(res, a, b, ordT)
end
return res
end
# Internal multiplication functions
for T in (:Taylor1, :TaylorN)
# NOTE: For $T = TaylorN, `mul!` *accumulates* the result of a * b in c[k]
@eval @inline function mul!(c::$T{T}, a::$T{T}, b::$T{T}, k::Int) where {T<:Number}
if $T == Taylor1
@inbounds c[k] = a[0] * b[k]
@inbounds for i = 1:k
c[k] += a[i] * b[k-i]
end
else
@inbounds mul!(c[k], a[0], b[k])
@inbounds for i = 1:k
mul!(c[k], a[i], b[k-i])
end
end
return nothing
end
@eval @inline function mul_scalar!(c::$T{T}, scalar::NumberNotSeries, a::$T{T}, b::$T{T}, k::Int) where {T<:Number}
if $T == Taylor1
@inbounds c[k] = scalar * a[0] * b[k]
@inbounds for i = 1:k
c[k] += scalar * a[i] * b[k-i]
end
else
@inbounds mul_scalar!(c[k], scalar, a[0], b[k])
@inbounds for i = 1:k
mul_scalar!(c[k], scalar, a[i], b[k-i])
end
end
return nothing
end
@eval begin
if $T == Taylor1
@inline function mul!(v::$T, a::$T, b::NumberNotSeries, k::Int)
@inbounds v[k] = a[k] * b
return nothing
end
@inline function mul!(v::$T, a::NumberNotSeries, b::$T, k::Int)
@inbounds v[k] = a * b[k]
return nothing
end
@inline function muladd!(v::$T, a::$T, b::NumberNotSeries, k::Int)
@inbounds v[k] += a[k] * b
return nothing
end
@inline function muladd!(v::$T, a::NumberNotSeries, b::$T, k::Int)
@inbounds v[k] += a * b[k]
return nothing
end
else
@inline function mul!(v::$T, a::$T, b::NumberNotSeries, k::Int)
@inbounds for i in eachindex(v[k])
v[k][i] = a[k][i] * b
end
return nothing
end
@inline function mul!(v::$T, a::NumberNotSeries, b::$T, k::Int)
@inbounds for i in eachindex(v[k])
v[k][i] = a * b[k][i]
end
return nothing
end
@inline function muladd!(v::$T, a::$T, b::NumberNotSeries, k::Int)
@inbounds for i in eachindex(v[k])
v[k][i] += a[k][i] * b
end
return nothing
end
@inline function muladd!(v::$T, a::NumberNotSeries, b::$T, k::Int)
@inbounds for i in eachindex(v[k])
v[k][i] += a * b[k][i]
end
return nothing
end
end
end
@eval @inline function mul!(v::$T, a::$T, b::NumberNotSeries)
for k in eachindex(v)
mul!(v, a, b, k)
end
return nothing
end
@eval @inline function mul!(v::$T, a::NumberNotSeries, b::$T)
for k in eachindex(v)
mul!(v, a, b, k)
end
return nothing
end
end
# in-place product: `a` <- `a*b`
# this method computes the product `a*b` and saves it back into `a`
# assumes `a` and `b` are of same order
function mul!(a::TaylorN{T}, b::TaylorN{T}) where {T<:Number}
@inbounds for k in reverse(eachindex(a))
mul!(a, a, b[0][1], k)
for l in 1:k
mul!(a[k], a[k-l], b[l])
end
end
return nothing
end
function mul!(a::Taylor1{T}, b::Taylor1{T}) where {T<:Number}
@inbounds for k in reverse(eachindex(a))
# a[k] <- a[k]*b[0]
mul!(a, a, b[0], k)
for l in 1:k
# a[k] <- a[k] + a[k-l] * b[l]
a[k] += a[k-l] * b[l]
end
end
return nothing
end
function mul!(a::Taylor1{TaylorN{T}}, b::Taylor1{TaylorN{T}}) where {T<:NumberNotSeries}
@inbounds for k in reverse(eachindex(a))
mul!(a, a, b[0], k)
for l in 1:k
# a[k] += a[k-l] * b[l]
for m in eachindex(a[k])
mul!(a[k], a[k-l], b[l], m)
end
end
end
return nothing
end
function mul!(res::Taylor1{TaylorN{T}}, a::Taylor1{TaylorN{T}}, b::Taylor1{TaylorN{T}},
ordT::Int) where {T<:NumberNotSeries}
# Sanity
zero!(res, ordT)
for k in 0:ordT
@inbounds for ordQ in eachindex(a[ordT])
mul!(res[ordT], a[k], b[ordT-k], ordQ)
end
end
return nothing
end
@inline function mul!(res::Taylor1{TaylorN{T}}, a::NumberNotSeries,
b::Taylor1{TaylorN{T}}, k::Int) where {T<:NumberNotSeries}
for l in eachindex(b[k])
for m in eachindex(b[k][l])
res[k][l][m] = a*b[k][l][m]
end
end
return nothing
end
mul!(res::Taylor1{TaylorN{T}}, a::Taylor1{TaylorN{T}},
b::NumberNotSeries, k::Int) where {T<:NumberNotSeries} = mul!(res, b, a, k)
# in-place product (assumes equal order among TaylorNs)
# NOTE: the result of the product is *accumulated* in c[k]
function mul!(c::TaylorN, a::TaylorN, b::TaylorN)
for k in eachindex(c)
mul!(c, a, b, k)
end
end
function mul_scalar!(c::TaylorN, scalar::NumberNotSeries, a::TaylorN, b::TaylorN)
for k in eachindex(c)
mul_scalar!(c, scalar, a, b, k)
end
end
@doc doc"""
mul!(c, a, b, k::Int) --> nothing
Update the `k`-th expansion coefficient `c[k]` of `c = a * b`,
where all `c`, `a`, and `b` are either `Taylor1` or `TaylorN`.
Note that for `TaylorN` the result of `a * b` is accumulated in `c[k]`.
The coefficients are given by
```math
c_k = \sum_{j=0}^k a_j b_{k-j}.
```
""" mul!
"""
mul!(c, a, b) --> nothing
Accumulates in `c` the result of `a*b` with minimum allocation. Arguments
c, a and b are `HomogeneousPolynomial`.
"""
@inline function mul!(c::HomogeneousPolynomial, a::HomogeneousPolynomial,
b::HomogeneousPolynomial)
(iszero(b) || iszero(a)) && return nothing
@inbounds num_coeffs_a = size_table[a.order+1]
@inbounds num_coeffs_b = size_table[b.order+1]
@inbounds posTb = pos_table[c.order+1]
@inbounds indTa = index_table[a.order+1]
@inbounds indTb = index_table[b.order+1]
@inbounds for na in 1:num_coeffs_a
ca = a[na]
# iszero(ca) && continue
inda = indTa[na]
@inbounds for nb in 1:num_coeffs_b
cb = b[nb]
# iszero(cb) && continue
indb = indTb[nb]
pos = posTb[inda + indb]
c[pos] += ca * cb
end
end
return nothing
end
"""
mul_scalar!(c, scalar, a, b) --> nothing
Accumulates in `c` the result of `scalar*a*b` with minimum allocation. Arguments
c, a and b are `HomogeneousPolynomial`; `scalar` is a NumberNotSeries.
"""
@inline function mul_scalar!(c::HomogeneousPolynomial, scalar::NumberNotSeries, a::HomogeneousPolynomial,
b::HomogeneousPolynomial)
(iszero(b) || iszero(a)) && return nothing
@inbounds num_coeffs_a = size_table[a.order+1]
@inbounds num_coeffs_b = size_table[b.order+1]
@inbounds posTb = pos_table[c.order+1]
@inbounds indTa = index_table[a.order+1]
@inbounds indTb = index_table[b.order+1]
@inbounds for na in 1:num_coeffs_a
ca = a[na]
# iszero(ca) && continue
inda = indTa[na]
@inbounds for nb in 1:num_coeffs_b
cb = b[nb]
# iszero(cb) && continue
indb = indTb[nb]
pos = posTb[inda + indb]
c[pos] += scalar * ca * cb
end
end
return nothing
end
## Division ##
function /(a::Taylor1{Rational{T}}, b::S) where {T<:Integer, S<:NumberNotSeries}
R = typeof( a[0] // b)
v = Array{R}(undef, a.order+1)
@__dot__ v = a.coeffs // b
return Taylor1(v, a.order)
end
for T in (:Taylor1, :HomogeneousPolynomial, :TaylorN)
@eval function /(a::$T{T}, b::S) where {T<:NumberNotSeries, S<:NumberNotSeries}
@inbounds aux = a.coeffs[1] / b
v = Array{typeof(aux)}(undef, length(a.coeffs))
@__dot__ v = a.coeffs / b
return $T(v, a.order)
end
@eval function /(a::$T{T}, b::T) where {T<:Number}
@inbounds aux = a.coeffs[1] / b
# v = Array{typeof(aux)}(undef, length(a.coeffs))
# @__dot__ v = a.coeffs / b
# return $T(v, a.order)
c = $T( zero(aux), a.order )
for ord in eachindex(c)
div!(c, a, b, ord) # updates c[ord]
end
return c
end
end
for T in (:HomogeneousPolynomial, :TaylorN)
@eval function /(b::$T{Taylor1{S}}, a::Taylor1{T}) where
{T<:NumberNotSeries, S<:NumberNotSeries}
@inbounds aux = b.coeffs[1] / a
R = typeof(aux)
coeffs = Array{R}(undef, length(b.coeffs))
@__dot__ coeffs = b.coeffs / a
return $T(coeffs, b.order)
end
@eval function /(b::$T{Taylor1{T}}, a::S) where {T<:NumberNotSeries, S<:NumberNotSeries}
@inbounds aux = b.coeffs[1] / a
R = typeof(aux)
coeffs = Array{R}(undef, length(b.coeffs))
@__dot__ coeffs = b.coeffs / a
return $T(coeffs, b.order)
end
@eval function /(b::Taylor1{$T{S}}, a::$T{T}) where
{T<:NumberNotSeries, S<:NumberNotSeries}
@inbounds aux = b[0] / a
# R = typeof(aux)
# coeffs = Array{R}(undef, length(b.coeffs))
# @__dot__ coeffs = b.coeffs / a
# return Taylor1(coeffs, b.order)
v = Taylor1(zero(aux), b.order)
@inbounds for k in eachindex(b)
v[k] = b[k] / a
end
return v
end
end
/(a::Taylor1{T}, b::Taylor1{S}) where {T<:Number, S<:Number} = /(promote(a,b)...)
function /(a::Taylor1{T}, b::Taylor1{T}) where {T<:Number}
iszero(a) && !iszero(b) && return zero(a)
if a.order != b.order
a, b = fixorder(a, b)
end
# order and coefficient of first factorized term
ordfact, cdivfact = divfactorization(a, b)
c = Taylor1(cdivfact, a.order-ordfact)
for ord in eachindex(c)
div!(c, a, b, ord) # updates c[ord]
end
return c
end
/(a::TaylorN{T}, b::TaylorN{S}) where
{T<:NumberNotSeriesN, S<:NumberNotSeriesN} = /(promote(a,b)...)
function /(a::TaylorN{T}, b::TaylorN{T}) where {T<:NumberNotSeriesN}
@assert !iszero(constant_term(b))
if a.order != b.order
a, b = fixorder(a, b)
end
# first coefficient
@inbounds cdivfact = a[0] / constant_term(b)
c = TaylorN(cdivfact, a.order)
for ord in eachindex(c)
div!(c, a, b, ord) # updates c[ord]
end
return c
end
function /(a::Taylor1{TaylorN{T}}, b::Taylor1{TaylorN{T}}) where {T<:NumberNotSeries}
iszero(a) && !iszero(b) && return zero(a)
if (a.order != b.order) || any(get_order.(a.coeffs) .!= get_order.(b.coeffs))
a, b = fixorder(a, b)
end
# order and coefficient of first factorized term
ordfact, cdivfact = divfactorization(a, b)
res = Taylor1(cdivfact, a.order-ordfact)
for ordT in eachindex(res)
div!(res, a, b, ordT)
end
return res
end
function /(a::S, b::Taylor1{TaylorN{T}}) where {S<:NumberNotSeries, T<:NumberNotSeries}
R = promote_type(TaylorN{S}, TaylorN{T})
res = convert(Taylor1{R}, zero(b))
iszero(a) && !iszero(b) && return res
for ordT in eachindex(res)
div!(res, a, b, ordT)
end
return res
end
function /(a::TaylorN{T}, b::Taylor1{TaylorN{T}}) where {T<:NumberNotSeries}
res = zero(b)
iszero(a) && !iszero(b) && return res
aa = Taylor1(a, b.order)
for ordT in eachindex(res)
div!(res, aa, b, ordT)
end
return res
end
@inline function divfactorization(a1::Taylor1, b1::Taylor1)
# order of first factorized term; a1 and b1 assumed to be of the same order
a1nz = findfirst(a1)
b1nz = findfirst(b1)
a1nz = a1nz ≥ 0 ? a1nz : a1.order
b1nz = b1nz ≥ 0 ? b1nz : a1.order
ordfact = min(a1nz, b1nz)
cdivfact = a1[ordfact] / b1[ordfact]
# Is the polynomial factorizable?
iszero(b1[ordfact]) && throw( ArgumentError(
"""Division does not define a Taylor1 polynomial;
order k=$(ordfact) => coeff[$(ordfact)]=$(cdivfact).""") )
return ordfact, cdivfact
end
## TODO: Implement factorization (divfactorization) for TaylorN polynomials
# Homogeneous coefficient for the division
@doc doc"""
div!(c, a, b, k::Int)
Compute the `k-th` expansion coefficient `c[k]` of `c = a / b`,
where all `c`, `a` and `b` are either `Taylor1` or `TaylorN`.
The coefficients are given by
```math
c_k = \frac{1}{b_0} \big(a_k - \sum_{j=0}^{k-1} c_j b_{k-j}\big).
```
For `Taylor1` polynomials, a similar formula is implemented which
exploits `k_0`, the order of the first non-zero coefficient of `a`.
""" div!
@inline function div!(c::Taylor1, a::Taylor1, b::Taylor1, k::Int)
# order and coefficient of first factorized term
ordfact, cdivfact = divfactorization(a, b)
if k == 0
@inbounds c[0] = cdivfact
return nothing
end
imin = max(0, k+ordfact-b.order)
@inbounds c[k] = c[imin] * b[k+ordfact-imin]
@inbounds for i = imin+1:k-1
c[k] += c[i] * b[k+ordfact-i]
end
if k+ordfact ≤ b.order
@inbounds c[k] = (a[k+ordfact]-c[k]) / b[ordfact]
else
@inbounds c[k] = - c[k] / b[ordfact]
end
return nothing
end
@inline function div!(v::Taylor1, a::Taylor1, b::NumberNotSeries, k::Int)
@inbounds v[k] = a[k] / b
return nothing
end
@inline function div!(c::Taylor1{T}, a::NumberNotSeries,
b::Taylor1{T}, k::Int) where {T<:Number}
zero!(c, k)
iszero(a) && !iszero(b) && return nothing
# order and coefficient of first factorized term
# In this case, since a[k]=0 for k>0, we can simplify to:
# ordfact, cdivfact = 0, a/b[0]
if k == 0
@inbounds c[0] = a/b[0]
return nothing
end
@inbounds c[k] = c[0] * b[k]
@inbounds for i = 1:k-1
c[k] += c[i] * b[k-i]
end
@inbounds c[k] = -c[k]/b[0]
return nothing
end
@inline function div!(c::Taylor1{TaylorN{T}}, a::NumberNotSeries,
b::Taylor1{TaylorN{T}}, k::Int) where {T<:NumberNotSeries}
zero!(c, k)
iszero(a) && !iszero(b) && return nothing
# order and coefficient of first factorized term
# In this case, since a[k]=0 for k>0, we can simplify to:
# ordfact, cdivfact = 0, a/b[0]
if k == 0
@inbounds div!(c[0], a, b[0])
return nothing
end
@inbounds mul!(c[k], c[0], b[k])
@inbounds for i = 1:k-1
# c[k] += c[i] * b[k-i]
mul!(c[k], c[i], b[k-i])
end
# @inbounds c[k] = -c[k]/b[0]
@inbounds div_scalar!(c[k], -1, b[0])
return nothing
end
# TODO: avoid allocations when T isa Taylor1
@inline function div!(v::HomogeneousPolynomial{T}, a::HomogeneousPolynomial{T}, b::NumberNotSeriesN) where {T <: Number}
@inbounds for k in eachindex(v)
v[k] = a[k] / b
end
return nothing
end
# NOTE: Due to the use of `zero!`, this `div!` method does *not* accumulate the result of a / b in c[k] (k > 0)
@inline function div!(c::TaylorN, a::TaylorN, b::TaylorN, k::Int)
if k==0
@inbounds c[0][1] = constant_term(a) / constant_term(b)
return nothing
end
zero!(c, k)
@inbounds for i = 0:k-1
mul!(c[k], c[i], b[k-i])
end
@inbounds for i in eachindex(c[k])
c[k][i] = (a[k][i] - c[k][i]) / constant_term(b)
end
return nothing
end
# In-place division and assignment: c[k] = (c/a)[k]
# NOTE: Here `div!` *accumulates* the result of (c/a)[k] in c[k] (k > 0)
#
# Recursion algorithm:
#
# k = 0: c[0] <- c[0]/a[0]
# k = 1: c[1] <- c[1] - c[0]*a[1]
# c[1] <- c[1]/a[0]
# k = 2: c[2] <- c[2] - c[0]*a[2] - c[1]*a[1]
# c[2] <- c[2]/a[0]
# etc.
@inline function div!(c::TaylorN, a::TaylorN, k::Int)
if k==0
@inbounds c[0][1] = constant_term(c) / constant_term(a)
return nothing
end
@inbounds for i = 0:k-1
mul_scalar!(c[k], -1, c[i], a[k-i])
end
@inbounds for i in eachindex(c[k])
c[k][i] = c[k][i] / constant_term(a)
end
return nothing
end
# In-place division and assignment: c[k] <- scalar * (c/a)[k]
# NOTE: Here `div!` *accumulates* the result of scalar * (c/a)[k] in c[k] (k > 0)
#
# Recursion algorithm:
#
# k = 0: c[0] <- scalar*c[0]/a[0]
# k = 1: c[1] <- scalar*c[1] - c[0]*a[1]
# c[1] <- c[1]/a[0]
# k = 2: c[2] <- scalar*c[2] - c[0]*a[2] - c[1]*a[1]
# c[2] <- c[2]/a[0]
# etc.
@inline function div_scalar!(c::TaylorN, scalar::NumberNotSeries, a::TaylorN, k::Int)
if k==0
@inbounds c[0][1] = scalar*constant_term(c) / constant_term(a)
return nothing
end
@inbounds mul!(c, scalar, c, k)
@inbounds for i = 0:k-1
mul_scalar!(c[k], -1, c[i], a[k-i])
end
@inbounds for i in eachindex(c[k])
c[k][i] = c[k][i] / constant_term(a)
end
return nothing
end
# NOTE: Here `div!` *accumulates* the result of a[k] / b[k] in c[k] (k > 0)
@inline function div!(c::TaylorN, a::NumberNotSeries, b::TaylorN, k::Int)
if k==0
@inbounds c[0][1] = a / constant_term(b)
return nothing
end
@inbounds for i = 0:k-1
mul!(c[k], c[i], b[k-i])
end
@inbounds for i in eachindex(c[k])
c[k][i] = ( -c[k][i] ) / constant_term(b)
end
return nothing
end
# in-place division c <- c/a (assumes equal order among TaylorNs)
function div!(c::TaylorN, a::TaylorN)
@inbounds for k in eachindex(c)
div!(c, a, k)
end
return nothing
end
# in-place division c <- scalar*c/a (assumes equal order among TaylorNs)
function div_scalar!(c::TaylorN, scalar::NumberNotSeries, a::TaylorN)
@inbounds for k in eachindex(c)
div_scalar!(c, scalar, a, k)
end
return nothing
end
# c[k] <- (a/b)[k]
function div!(c::TaylorN, a::TaylorN, b::TaylorN)
@inbounds for k in eachindex(c)
div!(c, a, b, k)
end
return nothing
end
# c[k] <- (a/b)[k], where a is a scalar
function div!(c::TaylorN, a::NumberNotSeries, b::TaylorN)
@inbounds for k in eachindex(c)
div!(c, a, b, k)
end
return nothing
end
# c[k] <- a[k]/b, where b is a scalar
function div!(c::TaylorN, a::TaylorN, b::NumberNotSeries)
@inbounds for k in eachindex(c)
div!(c[k], a[k], b)
end
return nothing
end
# NOTE: Here `div!` *accumulates* the result of a / b in res[k] (k > 0)
@inline function div!(c::Taylor1{TaylorN{T}}, a::Taylor1{TaylorN{T}},
b::Taylor1{TaylorN{T}}, k::Int) where {T<:NumberNotSeriesN}
# order and coefficient of first factorized term
# ordfact, cdivfact = divfactorization(a, b)
anz = findfirst(a)
bnz = findfirst(b)
anz = anz ≥ 0 ? anz : a.order
bnz = bnz ≥ 0 ? bnz : a.order
ordfact = min(anz, bnz)
# Is the polynomial factorizable?
iszero(b[ordfact]) && throw( ArgumentError(
"""Division does not define a Taylor1 polynomial;
order k=$(ordfact) => coeff[$(ordfact)]=$(cdivfact).""") )
zero!(c, k)
if k == 0
# @inbounds c[0] = a[ordfact]/b[ordfact]
@inbounds div!(c[0], a[ordfact], b[ordfact])
return nothing
end
imin = max(0, k+ordfact-b.order)
@inbounds mul!(c[k], c[imin], b[k+ordfact-imin])
@inbounds for i = imin+1:k-1
mul!(c[k], c[i], b[k+ordfact-i])
end
if k+ordfact ≤ b.order
# @inbounds c[k] = (a[k+ordfact]-c[k]) / b[ordfact]
@inbounds for l in eachindex(c[k])
subst!(c[k], a[k+ordfact], c[k], l)
end
@inbounds div!(c[k], b[ordfact])
else
# @inbounds c[k] = (-c[k]) / b[ordfact]
@inbounds div_scalar!(c[k], -1, b[ordfact])
end
return nothing
end
@inline function div!(res::Taylor1{TaylorN{T}}, a::Taylor1{TaylorN{T}},
b::NumberNotSeries, k::Int) where {T<:NumberNotSeries}
for l in eachindex(a[k])
for m in eachindex(a[k][l])
res[k][l][m] = a[k][l][m]/b
end
end
return nothing
end
"""
mul!(Y, A, B)
Multiply A*B and save the result in Y.
"""
function mul!(y::Vector{Taylor1{T}},
a::Union{Matrix{T},SparseMatrixCSC{T}},
b::Vector{Taylor1{T}}) where {T<:Number}
n, k = size(a)
@assert (length(y)== n && length(b)== k)
# determine the maximal order of b
# order = maximum([b1.order for b1 in b])
order = maximum(get_order.(b))
# Use matrices of coefficients (of proper size) and mul!
# B = zeros(T, k, order+1)
B = Array{T}(undef, k, order+1)
B = zero.(B)
for i = 1:k
@inbounds ord = b[i].order
@inbounds for j = 1:ord+1
B[i,j] = b[i][j-1]
end
end
Y = Array{T}(undef, n, order+1)
mul!(Y, a, B)
@inbounds for i = 1:n
# y[i] = Taylor1( collect(Y[i,:]), order)
y[i] = Taylor1( Y[i,:], order)
end
return y
end
# Adapted from (Julia v1.2) stdlib/v1.2/LinearAlgebra/src/dense.jl#721-734,
# licensed under MIT "Expat".
# Specialize a method of `inv` for Matrix{Taylor1{T}}. Simply, avoid pivoting,
# since the polynomial field is not an ordered one.
# function Base.inv(A::StridedMatrix{Taylor1{T}}) where T
# checksquare(A)
# S = Taylor1{typeof((one(T)*zero(T) + one(T)*zero(T))/one(T))}
# AA = convert(AbstractArray{S}, A)
# if istriu(AA)
# Ai = triu!(parent(inv(UpperTriangular(AA))))
# elseif istril(AA)
# Ai = tril!(parent(inv(LowerTriangular(AA))))
# else
# # Do not use pivoting !!
# Ai = inv!(lu(AA, Val(false)))
# Ai = convert(typeof(parent(Ai)), Ai)
# end
# return Ai
# end
# see https://github.com/JuliaLang/julia/pull/40623
const LU_RowMaximum = VERSION >= v"1.7.0-DEV.1188" ? RowMaximum() : Val(true)
const LU_NoPivot = VERSION >= v"1.7.0-DEV.1188" ? NoPivot() : Val(false)
# Adapted from (Julia v1.2) stdlib/v1.2/LinearAlgebra/src/lu.jl#240-253
# and (Julia v1.4.0-dev) stdlib/LinearAlgebra/v1.4/src/lu.jl#270-274,
# licensed under MIT "Expat".
# Specialize a method of `lu` for Matrix{Taylor1{T}}, which avoids pivoting,
# since the polynomial field is not an ordered one.
# We can't assume an ordered field so we first try without pivoting
function lu(A::AbstractMatrix{Taylor1{T}}; check::Bool = true) where {T<:Number}
S = Taylor1{lutype(T)}
F = lu!(copy_oftype(A, S), LU_NoPivot; check = false)
if issuccess(F)
return F
else
return lu!(copy_oftype(A, S), LU_RowMaximum; check = check)
end
end
| TaylorSeries | https://github.com/JuliaDiff/TaylorSeries.jl.git |
|
[
"MIT"
] | 0.18.1 | bb212ead98022455eed514cff0adfa5de8645258 | code | 13367 | # This file is part of the TaylorSeries.jl Julia package, MIT license
#
# Luis Benet & David P. Sanders
# UNAM
#
# MIT Expat license
#
## Auxiliary function ##
"""
resize_coeffs1!{T<Number}(coeffs::Array{T,1}, order::Int)
If the length of `coeffs` is smaller than `order+1`, it resizes
`coeffs` appropriately filling it with zeros.
"""
function resize_coeffs1!(coeffs::Array{T,1}, order::Int) where {T<:Number}
lencoef = length(coeffs)
resize!(coeffs, order+1)
c1 = coeffs[1]
if order > lencoef-1
@simd for ord in lencoef+1:order+1
@inbounds coeffs[ord] = zero(c1)
end
end
return nothing
end
"""
resize_coeffsHP!{T<Number}(coeffs::Array{T,1}, order::Int)
If the length of `coeffs` is smaller than the number of coefficients
correspondinf to `order` (given by `size_table[order+1]`), it resizes
`coeffs` appropriately filling it with zeros.
"""
function resize_coeffsHP!(coeffs::Array{T,1}, order::Int) where {T<:Number}
lencoef = length( coeffs )
@inbounds num_coeffs = size_table[order+1]
@assert order ≤ get_order() && lencoef ≤ num_coeffs
num_coeffs == lencoef && return nothing
resize!(coeffs, num_coeffs)
c1 = coeffs[1]
@simd for ord in lencoef+1:num_coeffs
@inbounds coeffs[ord] = zero(c1)
end
return nothing
end
## Minimum order of an HomogeneousPolynomial compatible with the vector's length
function orderH(coeffs::Array{T,1}) where {T<:Number}
ord = 0
ll = length(coeffs)
for i = 1:get_order()+1
@inbounds num_coeffs = size_table[i]
ll ≤ num_coeffs && break
ord += 1
end
return ord
end
## Maximum order of a HomogeneousPolynomial vector; used by TaylorN constructor
function maxorderH(v::Array{HomogeneousPolynomial{T},1}) where {T<:Number}
m = 0
@inbounds for i in eachindex(v)
m = max(m, v[i].order)
end
return m
end
## getcoeff ##
"""
getcoeff(a, n)
Return the coefficient of order `n::Int` of a `a::Taylor1` polynomial.
"""
getcoeff(a::Taylor1, n::Int) = (@assert 0 ≤ n ≤ a.order; return a[n])
getindex(a::Taylor1, n::Int) = a.coeffs[n+1]
getindex(a::Taylor1, u::UnitRange{Int}) = view(a.coeffs, u .+ 1 )
getindex(a::Taylor1, c::Colon) = view(a.coeffs, c)
getindex(a::Taylor1{T}, u::StepRange{Int,Int}) where {T<:Number} =
view(a.coeffs, u[:] .+ 1)
setindex!(a::Taylor1{T}, x::T, n::Int) where {T<:Number} = a.coeffs[n+1] = x
setindex!(a::Taylor1{T}, x::T, n::Int) where {T<:AbstractSeries} = setindex!(a.coeffs, deepcopy(x), n+1)
setindex!(a::Taylor1{T}, x::T, u::UnitRange{Int}) where {T<:Number} =
a.coeffs[u .+ 1] .= x
function setindex!(a::Taylor1{T}, x::Array{T,1}, u::UnitRange{Int}) where {T<:Number}
@assert length(u) == length(x)
for ind in eachindex(x)
a.coeffs[u[ind]+1] = x[ind]
end
end
setindex!(a::Taylor1{T}, x::T, c::Colon) where {T<:Number} = a.coeffs[c] .= x
setindex!(a::Taylor1{T}, x::Array{T,1}, c::Colon) where {T<:Number} = a.coeffs[c] .= x
setindex!(a::Taylor1{T}, x::T, u::StepRange{Int,Int}) where {T<:Number} =
a.coeffs[u[:] .+ 1] .= x
function setindex!(a::Taylor1{T}, x::Array{T,1}, u::StepRange{Int,Int}) where {T<:Number}
@assert length(u) == length(x)
for ind in eachindex(x)
a.coeffs[u[ind]+1] = x[ind]
end
end
"""
getcoeff(a, v)
Return the coefficient of `a::HomogeneousPolynomial`, specified by `v`,
which is a tuple (or vector) with the indices of the specific
monomial.
"""
function getcoeff(a::HomogeneousPolynomial, v::NTuple{N,Int}) where {N}
@assert N == get_numvars() && all(v .>= 0)
kdic = in_base(get_order(),v)
@inbounds n = pos_table[a.order+1][kdic]
a[n]
end
getcoeff(a::HomogeneousPolynomial, v::Array{Int,1}) = getcoeff(a, (v...,))
getindex(a::HomogeneousPolynomial, n::Int) = a.coeffs[n]
getindex(a::HomogeneousPolynomial, n::UnitRange{Int}) = view(a.coeffs, n)
getindex(a::HomogeneousPolynomial, c::Colon) = view(a.coeffs, c)
getindex(a::HomogeneousPolynomial, u::StepRange{Int,Int}) = view(a.coeffs, u[:])
setindex!(a::HomogeneousPolynomial{T}, x::T, n::Int) where {T<:Number} =
a.coeffs[n] = x
setindex!(a::HomogeneousPolynomial{T}, x::T, n::UnitRange{Int}) where {T<:Number} =
a.coeffs[n] .= x
setindex!(a::HomogeneousPolynomial{T}, x::Array{T,1}, n::UnitRange{Int}) where {T<:Number} =
a.coeffs[n] .= x
setindex!(a::HomogeneousPolynomial{T}, x::T, c::Colon) where {T<:Number} =
a.coeffs[c] .= x
setindex!(a::HomogeneousPolynomial{T}, x::Array{T,1}, c::Colon) where {T<:Number} =
a.coeffs[c] = x
setindex!(a::HomogeneousPolynomial{T}, x::T, u::StepRange{Int,Int}) where {T<:Number} =
a.coeffs[u[:]] .= x
setindex!(a::HomogeneousPolynomial{T}, x::Array{T,1}, u::StepRange{Int,Int}) where {T<:Number} =
a.coeffs[u[:]] .= x[:]
"""
getcoeff(a, v)
Return the coefficient of `a::TaylorN`, specified by `v`,
which is a tuple (or vector) with the indices of the specific
monomial.
"""
function getcoeff(a::TaylorN, v::NTuple{N,Int}) where {N}
order = sum(v)
@assert order ≤ a.order
getcoeff(a[order], v)
end
getcoeff(a::TaylorN, v::Array{Int,1}) = getcoeff(a, (v...,))
getindex(a::TaylorN, n::Int) = a.coeffs[n+1]
getindex(a::TaylorN, u::UnitRange{Int}) = view(a.coeffs, u .+ 1)
getindex(a::TaylorN, c::Colon) = view(a.coeffs, c)
getindex(a::TaylorN, u::StepRange{Int,Int}) = view(a.coeffs, u[:] .+ 1)
function setindex!(a::TaylorN{T}, x::HomogeneousPolynomial{T}, n::Int) where {T<:Number}
@assert x.order == n
a.coeffs[n+1] = x
end
setindex!(a::TaylorN{T}, x::T, n::Int) where {T<:Number} =
a.coeffs[n+1] = HomogeneousPolynomial(x, n)
function setindex!(a::TaylorN{T}, x::T, u::UnitRange{Int}) where {T<:Number}
for ind in u
a[ind] = x
end
a[u]
end
function setindex!(a::TaylorN{T}, x::Array{HomogeneousPolynomial{T},1}, u::UnitRange{Int}) where {T<:Number}
@assert length(u) == length(x)
for ind in eachindex(x)
a[u[ind]] = x[ind]
end
end
function setindex!(a::TaylorN{T}, x::Array{T,1}, u::UnitRange{Int}) where {T<:Number}
@assert length(u) == length(x)
for ind in eachindex(x)
a[u[ind]] = x[ind]
end
end
setindex!(a::TaylorN{T}, x::T, ::Colon) where {T<:Number} =
(a[0:end] = x; a[:])
setindex!(a::TaylorN{T}, x::Array{HomogeneousPolynomial{T},1}, ::Colon) where
{T<:Number} = (a[0:end] = x; a[:])
setindex!(a::TaylorN{T}, x::Array{T,1}, ::Colon) where {T<:Number} =
(a[0:end] = x; a[:])
function setindex!(a::TaylorN{T}, x::T, u::StepRange{Int,Int}) where {T<:Number}
for ind in u
a[ind] = x
end
a[u]
end
function setindex!(a::TaylorN{T}, x::Array{HomogeneousPolynomial{T},1}, u::StepRange{Int,Int}) where {T<:Number}
# a[u[:]] .= x[:]
@assert length(u) == length(x)
for ind in eachindex(x)
a[u[ind]] = x[ind]
end
end
function setindex!(a::TaylorN{T}, x::Array{T,1}, u::StepRange{Int,Int}) where {T<:Number}
@assert length(u) == length(x)
for ind in eachindex(x)
a[u[ind]] = x[ind]
end
end
## eltype, length, get_order, etc ##
for T in (:Taylor1, :HomogeneousPolynomial, :TaylorN)
@eval begin
if $T == HomogeneousPolynomial
@inline iterate(a::$T, state=1) = state > length(a) ? nothing : (a.coeffs[state], state+1)
# Base.iterate(rS::Iterators.Reverse{$T}, state=rS.itr.order) = state < 0 ? nothing : (a.coeffs[state], state-1)
@inline length(a::$T) = size_table[a.order+1]
@inline firstindex(a::$T) = 1
@inline lastindex(a::$T) = length(a)
else
@inline iterate(a::$T, state=0) = state > a.order ? nothing : (a.coeffs[state+1], state+1)
# Base.iterate(rS::Iterators.Reverse{$T}, state=rS.itr.order) = state < 0 ? nothing : (a.coeffs[state], state-1)
@inline length(a::$T) = length(a.coeffs)
@inline firstindex(a::$T) = 0
@inline lastindex(a::$T) = a.order
end
@inline eachindex(a::$T) = firstindex(a):lastindex(a)
@inline numtype(::$T{S}) where {S<:Number} = S
@inline size(a::$T) = size(a.coeffs)
@inline get_order(a::$T) = a.order
@inline axes(a::$T) = ()
end
end
numtype(a) = eltype(a)
@doc doc"""
numtype(a::AbstractSeries)
Returns the type of the elements of the coefficients of `a`.
""" numtype
# Dumb methods included to properly export normalize_taylor (if IntervalArithmetic is loaded)
@inline normalize_taylor(a::AbstractSeries) = a
## _minorder
function _minorder(a, b)
minorder, maxorder = minmax(a.order, b.order)
if minorder ≤ 0
minorder = maxorder
end
return minorder
end
## fixorder ##
for T in (:Taylor1, :TaylorN)
@eval begin
@inline function fixorder(a::$T, b::$T)
a.order == b.order && return a, b
minorder = _minorder(a, b)
return $T(copy(a.coeffs), minorder), $T(copy(b.coeffs), minorder)
end
end
end
function fixorder(a::HomogeneousPolynomial, b::HomogeneousPolynomial)
@assert a.order == b.order
return a, b
end
for T in (:HomogeneousPolynomial, :TaylorN)
@eval function fixorder(a::Taylor1{$T{T}}, b::Taylor1{$T{S}}) where
{T<:NumberNotSeries, S<:NumberNotSeries}
(a.order == b.order) && (all(get_order.(a.coeffs) .== get_order.(b.coeffs))) && return a, b
minordT = _minorder(a, b)
aa = Taylor1(copy(a.coeffs), minordT)
bb = Taylor1(copy(b.coeffs), minordT)
for ind in eachindex(aa)
aa[ind].order == bb[ind].order && continue
minordQ = _minorder(aa[ind], bb[ind])
aa[ind] = TaylorN(aa[ind].coeffs, minordQ)
bb[ind] = TaylorN(bb[ind].coeffs, minordQ)
end
return aa, bb
end
end
# Finds the first non zero entry; extended to Taylor1
function Base.findfirst(a::Taylor1{T}) where {T<:Number}
first = findfirst(x->!iszero(x), a.coeffs)
isnothing(first) && return -1
return first-1
end
# Finds the last non-zero entry; extended to Taylor1
function Base.findlast(a::Taylor1{T}) where {T<:Number}
last = findlast(x->!iszero(x), a.coeffs)
isnothing(last) && return -1
return last-1
end
# Finds the first non zero entry; extended to HomogeneousPolynomial
function Base.findfirst(a::HomogeneousPolynomial{T}) where {T<:Number}
first = findfirst(x->!iszero(x), a.coeffs)
isa(first, Nothing) && return -1
return first
end
function Base.findfirst(a::TaylorN{T}) where {T<:Number}
first = findfirst(x->!iszero(x), a.coeffs)
isa(first, Nothing) && return -1
return first-1
end
# Finds the last non-zero entry; extended to HomogeneousPolynomial
function Base.findlast(a::HomogeneousPolynomial{T}) where {T<:Number}
last = findlast(x->!iszero(x), a.coeffs)
isa(last, Nothing) && return -1
return last
end
# Finds the last non-zero entry; extended to TaylorN
function Base.findlast(a::TaylorN{T}) where {T<:Number}
last = findlast(x->!iszero(x), a.coeffs)
isa(last, Nothing) && return -1
return last-1
end
## copyto! ##
# Inspired from base/abstractarray.jl, line 665
for T in (:Taylor1, :HomogeneousPolynomial, :TaylorN)
@eval function copyto!(dst::$T{T}, src::$T{T}) where {T<:Number}
length(dst) < length(src) && throw(ArgumentError(string("Destination has fewer elements than required; no copy performed")))
destiter = eachindex(dst)
y = iterate(destiter)
for x in src
dst[y[1]] = x
y = iterate(destiter, y[2])
end
return dst
end
end
"""
constant_term(a)
Return the constant value (zero order coefficient) for `Taylor1`
and `TaylorN`. The fallback behavior is to return `a` itself if
`a::Number`, or `a[1]` when `a::Vector`.
"""
constant_term(a::Taylor1) = a[0]
constant_term(a::TaylorN) = a[0][1]
constant_term(a::Vector{T}) where {T<:Number} = constant_term.(a)
constant_term(a::Number) = a
"""
linear_polynomial(a)
Returns the linear part of `a` as a polynomial (`Taylor1` or `TaylorN`),
*without* the constant term. The fallback behavior is to return `a` itself.
"""
linear_polynomial(a::Taylor1) = Taylor1([zero(a[1]), a[1]], a.order)
linear_polynomial(a::HomogeneousPolynomial) = HomogeneousPolynomial(a[1], a.order)
linear_polynomial(a::TaylorN) = TaylorN(a[1], a.order)
linear_polynomial(a::Vector{T}) where {T<:Number} = linear_polynomial.(a)
linear_polynomial(a::Number) = a
"""
nonlinear_polynomial(a)
Returns the nonlinear part of `a`. The fallback behavior is to return `zero(a)`.
"""
nonlinear_polynomial(a::AbstractSeries) = a - constant_term(a) - linear_polynomial(a)
nonlinear_polynomial(a::Vector{T}) where {T<:Number} = nonlinear_polynomial.(a)
nonlinear_polynomial(a::Number) = zero(a)
"""
@isonethread (expr)
Internal macro used to check the number of threads in use, to prevent a data race
that modifies `coeff_table` when using `differentiate` or `integrate`; see
https://github.com/JuliaDiff/TaylorSeries.jl/issues/318.
This macro is inspired by the macro `@threaded`; see https://github.com/trixi-framework/Trixi.jl/blob/main/src/auxiliary/auxiliary.jl;
and https://github.com/trixi-framework/Trixi.jl/pull/426/files.
"""
macro isonethread(expr)
return esc(quote
if Threads.nthreads() == 1
$(expr)
else
copy($(expr))
end
end)
end
| TaylorSeries | https://github.com/JuliaDiff/TaylorSeries.jl.git |
|
[
"MIT"
] | 0.18.1 | bb212ead98022455eed514cff0adfa5de8645258 | code | 6723 | # This file is part of the TaylorSeries.jl Julia package, MIT license
#
# Luis Benet & David P. Sanders
# UNAM
#
# MIT Expat license
#
## Broadcast for Taylor1 and TaylorN
import .Broadcast: BroadcastStyle, Broadcasted, broadcasted
# BroadcastStyle definitions and basic precedence rules
struct Taylor1Style{T} <: Base.Broadcast.AbstractArrayStyle{0} end
Taylor1Style{T}(::Val{N}) where {T,N}= Base.Broadcast.DefaultArrayStyle{N}()
BroadcastStyle(::Type{<:Taylor1{T}}) where {T} = Taylor1Style{T}()
BroadcastStyle(::Taylor1Style{T}, ::Base.Broadcast.DefaultArrayStyle{0}) where {T} = Taylor1Style{T}()
BroadcastStyle(::Taylor1Style{T}, ::Base.Broadcast.DefaultArrayStyle{1}) where {T} = Base.Broadcast.DefaultArrayStyle{1}()
#
struct HomogeneousPolynomialStyle{T} <: Base.Broadcast.AbstractArrayStyle{0} end
HomogeneousPolynomialStyle{T}(::Val{N}) where {T,N}= Base.Broadcast.DefaultArrayStyle{N}()
BroadcastStyle(::Type{<:HomogeneousPolynomial{T}}) where {T} = HomogeneousPolynomialStyle{T}()
BroadcastStyle(::HomogeneousPolynomialStyle{T}, ::Base.Broadcast.DefaultArrayStyle{0}) where {T} = HomogeneousPolynomialStyle{T}()
BroadcastStyle(::HomogeneousPolynomialStyle{T}, ::Base.Broadcast.DefaultArrayStyle{1}) where {T} = Base.Broadcast.DefaultArrayStyle{1}()
#
struct TaylorNStyle{T} <: Base.Broadcast.AbstractArrayStyle{0} end
TaylorNStyle{T}(::Val{N}) where {T, N}= Base.Broadcast.DefaultArrayStyle{N}()
BroadcastStyle(::Type{<:TaylorN{T}}) where {T} = TaylorNStyle{T}()
BroadcastStyle(::TaylorNStyle{T}, ::Base.Broadcast.DefaultArrayStyle{0}) where {T} = TaylorNStyle{T}()
BroadcastStyle(::TaylorNStyle{T}, ::Base.Broadcast.DefaultArrayStyle{1}) where {T} = Base.Broadcast.DefaultArrayStyle{1}()
# Precedence rules for mixtures
BroadcastStyle(::TaylorNStyle{Taylor1{T}}, ::Taylor1Style{T}) where {T} = TaylorNStyle{Taylor1{T}}()
BroadcastStyle(::Taylor1Style{TaylorN{T}}, ::TaylorNStyle{T}) where {T} = Taylor1Style{TaylorN{T}}()
# Extend eltypes so things like [1.0] .+ t work
Base.Broadcast.eltypes(t::Tuple{Taylor1,AbstractArray}) =
Tuple{Base.Broadcast._broadcast_getindex_eltype([t[1]]), Base.Broadcast._broadcast_getindex_eltype(t[2])}
Base.Broadcast.eltypes(t::Tuple{AbstractArray,Taylor1}) =
Tuple{Base.Broadcast._broadcast_getindex_eltype(t[1]), Base.Broadcast._broadcast_getindex_eltype([t[2]])}
Base.Broadcast.eltypes(t::Tuple{HomogeneousPolynomial,AbstractArray}) =
Tuple{Base.Broadcast._broadcast_getindex_eltype([t[1]]), Base.Broadcast._broadcast_getindex_eltype(t[2])}
Base.Broadcast.eltypes(t::Tuple{AbstractArray,HomogeneousPolynomial}) =
Tuple{Base.Broadcast._broadcast_getindex_eltype(t[1]), Base.Broadcast._broadcast_getindex_eltype([t[2]])}
Base.Broadcast.eltypes(t::Tuple{TaylorN,AbstractArray}) =
Tuple{Base.Broadcast._broadcast_getindex_eltype([t[1]]), Base.Broadcast._broadcast_getindex_eltype(t[2])}
Base.Broadcast.eltypes(t::Tuple{AbstractArray,TaylorN}) =
Tuple{Base.Broadcast._broadcast_getindex_eltype(t[1]), Base.Broadcast._broadcast_getindex_eltype([t[2]])}
# # We follow https://docs.julialang.org/en/v1/manual/interfaces/#man-interface-iteration-1
# "`A = find_taylor(As)` returns the first Taylor1 among the arguments."
# find_taylor(bc::Broadcasted) = find_taylor(bc.args)
# find_taylor(args::Tuple) = find_taylor(find_taylor(args[1]), Base.tail(args))
# find_taylor(x) = x
# find_taylor(a::Taylor1, rest) = a
# find_taylor(a::HomogeneousPolynomial, rest) = a
# find_taylor(a::TaylorN, rest) = a
# find_taylor(::AbstractArray, rest) = find_taylor(rest)
#
# # Extend similar
# function similar(bc::Broadcasted{Taylor1Style{S}}, ::Type{T}) where {S, T}
# # Proper promotion
# R = Base.Broadcast.combine_eltypes(bc.f, bc.args)
# # Scan the inputs for the Taylor1:
# A = find_taylor(bc)
# # Create the output
# return Taylor1(similar(A.coeffs, R), A.order)
# end
#
# function similar(bc::Broadcasted{HomogeneousPolynomialStyle{S}}, ::Type{T}) where {S, T}
# # Proper promotion
# # combine_eltypes(f, args::Tuple) = Base._return_type(f, eltypes(args))
# R = Base.Broadcast.combine_eltypes(bc.f, bc.args)
# # Scan the inputs for the HomogeneousPolynomial:
# A = find_taylor(bc)
# # Create the output
# return HomogeneousPolynomial(similar(A.coeffs, R), A.order)
# end
#
# function similar(bc::Broadcasted{TaylorNStyle{S}}, ::Type{T}) where {S, T}
# # Proper promotion
# R = Base.Broadcast.combine_eltypes(bc.f, bc.args)
# # Scan the inputs for the TaylorN:
# A = find_taylor(bc)
# # Create the output
# return TaylorN(similar(A.coeffs, R), A.order)
# end
# Adapted from Base.Broadcast.copyto!, base/broadcasting.jl, line 832
for T in (:Taylor1, :HomogeneousPolynomial, :TaylorN)
@eval begin
@inline function copyto!(dest::$T{T}, bc::Broadcasted) where {T<:Number}
axes(dest) == axes(bc) || Base.Broadcast.throwdm(axes(dest), axes(bc))
# Performance optimization: broadcast!(identity, dest, A) is equivalent to copyto!(dest, A) if indices match
if bc.f === identity && bc.args isa Tuple{$T{T}} # only a single input argument to broadcast!
A = bc.args[1]
if axes(dest) == axes(A)
return copyto!(dest, A)
end
end
bc′ = Base.Broadcast.preprocess(dest, bc)
copyto!(dest, bc′[1])
return dest
end
end
end
# Broadcasted extensions
@inline broadcasted(::Taylor1Style{T}, ::Type{Float32}, a::Taylor1{T}) where {T<:Number} =
Taylor1(Float32.(a.coeffs), a.order)
@inline broadcasted(::TaylorNStyle{T}, ::Type{Float32}, a::TaylorN{T}) where {T<:Number} =
convert(TaylorN{Float32}, a)
# # This prevents broadcasting being applied to the Taylor1/TaylorN params
# # for the mutating functions, and to act only in `k`
# for (T, TS) in ((:Taylor1, :Taylor1Style), (:TaylorN, :TaylorNStyle))
# for f in (add!, subst!, sqr!, sqrt!, exp!, log!, identity!, zero!,
# one!, abs!, abs2!, deg2rad!, rad2deg!)
# @eval begin
# @inline function broadcasted(::$TS{T}, fn::typeof($f), r::$T{T}, a::$T{T}, k) where {T}
# @inbounds for i in eachindex(k)
# fn(r, a, k[i])
# end
# nothing
# end
# end
# end
# for f in (sincos!, tan!, asin!, acos!, atan!, sinhcosh!, tanh!)
# @eval begin
# @inline function broadcasted(::$TS{T}, fn::typeof($f), r::$T{T}, a::$T{T}, b::Taylor1{T}, k) where {T}
# @inbounds for i in eachindex(k)
# fn(r, a, b, k[i])
# end
# nothing
# end
# end
# end
# end
| TaylorSeries | https://github.com/JuliaDiff/TaylorSeries.jl.git |
|
[
"MIT"
] | 0.18.1 | bb212ead98022455eed514cff0adfa5de8645258 | code | 11445 | # This file is part of the TaylorSeries.jl Julia package, MIT license
#
# Luis Benet & David P. Sanders
# UNAM
#
# MIT Expat license
#
## Differentiating ##
"""
differentiate(a)
Return the `Taylor1` polynomial of the differential of `a::Taylor1`.
The order of the result is `a.order-1`.
The function `derivative` is an exact synonym of `differentiate`.
"""
function differentiate(a::Taylor1)
res = Taylor1(zero(a[0]), get_order(a)-1)
for ord in eachindex(res)
differentiate!(res, a, ord)
end
return res
end
"""
derivative
An exact synonym of [`differentiate`](@ref).
"""
const derivative = differentiate
"""
differentiate!(res, a) --> nothing
In-place version of `differentiate`. Compute the `Taylor1` polynomial of the
differential of `a::Taylor1` and return it as `res` (order of `res` remains
unchanged).
"""
function differentiate!(res::Taylor1, a::Taylor1)
for ord in eachindex(res)
differentiate!(res, a, ord)
end
return nothing
end
"""
differentiate!(p, a, k) --> nothing
Update in-place the `k-th` expansion coefficient `p[k]` of `p = differentiate(a)`
for both `p` and `a` `Taylor1`.
The coefficients are given by
```math
p_k = (k+1) a_{k+1}.
```
"""
function differentiate!(p::Taylor1, a::Taylor1, k::Int)
k >= a.order && return nothing
@inbounds p[k] = (k+1)*a[k+1]
return nothing
end
function differentiate!(p::Taylor1{TaylorN{T}}, a::Taylor1{TaylorN{T}}, k::Int) where
{T<:NumberNotSeries}
k >= a.order && return nothing
@inbounds p[k] = (k+1)*a[k+1]
return nothing
end
"""
differentiate(a, n)
Compute recursively the `Taylor1` polynomial of the n-th derivative of
`a::Taylor1`. The order of the result is `a.order-n`.
"""
function differentiate(a::Taylor1{T}, n::Int) where {T <: Number}
if n > a.order
return Taylor1(zero(T), 0)
elseif n == a.order
return Taylor1(differentiate(n, a), 0)
elseif n==0
return a
end
res = differentiate(a)
for i = 2:n
differentiate!(res, res)
end
return Taylor1(res.coeffs[1:a.order-n+1])
end
"""
differentiate(n, a)
Return the value of the `n`-th differentiate of the polynomial `a`.
"""
function differentiate(n::Int, a::Taylor1{T}) where {T<:Number}
@assert a.order ≥ n ≥ 0
return factorial( widen(n) ) * a[n] :: T
end
## Integrating ##
"""
integrate(a, [x])
Return the integral of `a::Taylor1`. The constant of integration
(0-th order coefficient) is set to `x`, which is zero if omitted.
Note that the order of the result is `a.order+1`.
"""
function integrate(a::Taylor1{T}, x::S) where {T<:Number, S<:Number}
order = get_order(a)
aa = a[0]/1 + zero(x)
R = typeof(aa)
coeffs = Array{typeof(aa)}(undef, order+1)
# fill!(coeffs, zero(aa))
@inbounds for i = 1:order
coeffs[i+1] = a[i-1] / i
end
@inbounds coeffs[1] = convert(R, x)
return Taylor1(coeffs, a.order)
end
integrate(a::Taylor1{T}) where {T<:Number} = integrate(a, zero(a[0]))
## Differentiation ##
"""
differentiate(a, r)
Partial differentiation of `a::HomogeneousPolynomial` series with respect
to the `r`-th variable.
"""
function differentiate(a::HomogeneousPolynomial, r::Int)
@assert 1 ≤ r ≤ get_numvars()
T = TS.numtype(a)
a.order == 0 && return HomogeneousPolynomial(zero(a[1]), 0)
@inbounds num_coeffs = size_table[a.order]
coeffs = zeros(T, num_coeffs)
@inbounds posTb = pos_table[a.order]
@inbounds num_coeffs = size_table[a.order+1]
ct = deepcopy(coeff_table[a.order+1])
@inbounds for i = 1:num_coeffs
# iind = @isonethread coeff_table[a.order+1][i]
iind = ct[i]
n = iind[r]
n == 0 && continue
iind[r] -= 1
kdic = in_base(get_order(), iind)
pos = posTb[kdic]
coeffs[pos] = n * a[i]
iind[r] += 1
end
return HomogeneousPolynomial{T}(coeffs, a.order-1)
end
differentiate(a::HomogeneousPolynomial, s::Symbol) = differentiate(a, lookupvar(s))
"""
differentiate(a, r)
Partial differentiation of `a::TaylorN` series with respect
to the `r`-th variable. The `r`-th variable may be also
specified through its symbol.
"""
function differentiate(a::TaylorN, r=1::Int)
T = TS.numtype(a)
coeffs = Array{HomogeneousPolynomial{T}}(undef, a.order)
@inbounds for ord = 1:a.order
coeffs[ord] = differentiate( a[ord], r)
end
return TaylorN{T}( coeffs, a.order )
end
differentiate(a::TaylorN, s::Symbol) = differentiate(a, lookupvar(s))
"""
differentiate(a::TaylorN{T}, ntup::NTuple{N,Int})
Return a `TaylorN` with the partial derivative of `a` defined
by `ntup::NTuple{N,Int}`, where the first entry is the number
of derivatives with respect to the first variable, the second is
the number of derivatives with respect to the second, and so on.
"""
function differentiate(a::TaylorN, ntup::NTuple{N,Int}) where {N}
@assert N == get_numvars() && all(ntup .>= 0)
sum(ntup) > a.order && return zero(a)
sum(ntup) == 0 && return copy(a)
aa = copy(a)
for nvar in 1:get_numvars()
for _ in 1:ntup[nvar]
aa = differentiate(aa, nvar)
end
end
return aa
end
"""
differentiate(ntup::NTuple{N,Int}, a::TaylorN{T})
Returns the value of the coefficient of `a` specified by
`ntup::NTuple{N,Int}`, multiplied by the corresponding
factorials.
"""
function differentiate(ntup::NTuple{N,Int}, a::TaylorN) where {N}
@assert N == get_numvars() && all(ntup .>= 0)
c = getcoeff(a, [ntup...])
for ind = 1:get_numvars()
c *= factorial(ntup[ind])
end
return c
end
## Gradient, jacobian and hessian
"""
```
gradient(f)
∇(f)
```
Compute the gradient of the polynomial `f::TaylorN`.
"""
function gradient(f::TaylorN)
T = TS.numtype(f)
numVars = get_numvars()
grad = Array{TaylorN{T}}(undef, numVars)
@inbounds for nv = 1:numVars
grad[nv] = differentiate(f, nv)
end
return grad
end
const ∇ = TS.gradient
"""
```
jacobian(vf)
jacobian(vf, [vals])
```
Compute the jacobian matrix of `vf`, a vector of `TaylorN` polynomials,
evaluated at the vector `vals`. If `vals` is omitted, it is evaluated at zero.
"""
function jacobian(vf::Array{TaylorN{T},1}) where {T<:Number}
numVars = get_numvars()
@assert length(vf) == numVars
jac = Array{T}(undef, numVars, numVars)
@inbounds for comp = 1:numVars
jac[:,comp] = vf[comp][1][1:end]
end
return transpose(jac)
end
function jacobian(vf::Array{TaylorN{T},1}, vals::Array{S,1}) where {T<:Number,S<:Number}
R = promote_type(T,S)
numVars = get_numvars()
@assert length(vf) == numVars == length(vals)
jac = Array{R}(undef, numVars, numVars)
for comp = 1:numVars
@inbounds grad = gradient( vf[comp] )
@inbounds for nv = 1:numVars
jac[nv,comp] = evaluate(grad[nv], vals)
end
end
return transpose(jac)
end
function jacobian(vf::Array{Taylor1{TaylorN{T}},1}) where {T<:Number}
vv = convert(Array{TaylorN{Taylor1{T}},1}, vf)
jacobian(vv)
end
"""
```
jacobian!(jac, vf)
jacobian!(jac, vf, [vals])
```
Compute the jacobian matrix of `vf`, a vector of `TaylorN` polynomials
evaluated at the vector `vals`, and write results to `jac`. If `vals` is omitted,
it is evaluated at zero.
"""
function jacobian!(jac::Array{T,2}, vf::Array{TaylorN{T},1}) where {T<:Number}
numVars = get_numvars()
@assert length(vf) == numVars
@assert (numVars, numVars) == size(jac)
for comp2 = 1:numVars
for comp1 = 1:numVars
@inbounds jac[comp1,comp2] = vf[comp1][1][comp2]
end
end
nothing
end
function jacobian!(jac::Array{T,2}, vf::Array{TaylorN{T},1},
vals::Array{T,1}) where {T<:Number}
numVars = get_numvars()
@assert length(vf) == numVars == length(vals)
@assert (numVars, numVars) == size(jac)
for comp = 1:numVars
@inbounds for nv = 1:numVars
jac[nv,comp] = evaluate(differentiate(vf[nv], comp), vals)
end
end
nothing
end
"""
```
hessian(f)
hessian(f, [vals])
```
Return the hessian matrix (jacobian of the gradient) of `f::TaylorN`,
evaluated at the vector `vals`. If `vals` is omitted, it is evaluated at
zero.
"""
hessian(f::TaylorN{T}, vals::Array{S,1}) where {T<:Number,S<:Number} =
(R = promote_type(T,S); jacobian( gradient(f), vals::Array{R,1}) )
hessian(f::TaylorN{T}) where {T<:Number} = hessian( f, zeros(T, get_numvars()) )
"""
```
hessian!(hes, f)
hessian!(hes, f, [vals])
```
Return the hessian matrix (jacobian of the gradient) of `f::TaylorN`,
evaluated at the vector `vals`, and write results to `hes`. If `vals` is
omitted, it is evaluated at zero.
"""
hessian!(hes::Array{T,2}, f::TaylorN{T}, vals::Array{T,1}) where {T<:Number} =
jacobian!(hes, gradient(f), vals)
hessian!(hes::Array{T,2}, f::TaylorN{T}) where {T<:Number} =
jacobian!(hes, gradient(f))
##Integration
"""
integrate(a, r)
Integrate the `a::HomogeneousPolynomial` with respect to the `r`-th
variable. The returned `HomogeneousPolynomial` has no added constant of
integration. If the order of a corresponds to `get_order()`, a zero
`HomogeneousPolynomial` of 0-th order is returned.
"""
function integrate(a::HomogeneousPolynomial, r::Int)
@assert 1 ≤ r ≤ get_numvars()
order_max = get_order()
# NOTE: the following returns order 0, but could be get_order(), or get_order(a)
a.order == order_max && return HomogeneousPolynomial(zero(a[1]/1), 0)
@inbounds posTb = pos_table[a.order+2]
@inbounds num_coeffs = size_table[a.order+1]
T = promote_type(TS.numtype(a), TS.numtype(a[1]/1))
coeffs = zeros(T, size_table[a.order+2])
ct = deepcopy(coeff_table[a.order+1])
@inbounds for i = 1:num_coeffs
# iind = @isonethread coeff_table[a.order+1][i]
iind = ct[i]
n = iind[r]
n == order_max && continue
iind[r] += 1
kdic = in_base(get_order(), iind)
pos = posTb[kdic]
coeffs[pos] = a[i] / (n+1)
iind[r] -= 1
end
return HomogeneousPolynomial(coeffs, a.order+1)
end
integrate(a::HomogeneousPolynomial, s::Symbol) = integrate(a, lookupvar(s))
"""
integrate(a, r, [x0])
Integrate the `a::TaylorN` series with respect to the `r`-th variable,
where `x0` the integration constant and must be independent
of the `r`-th variable; if `x0` is omitted, it is taken as zero.
"""
function integrate(a::TaylorN, r::Int)
T = promote_type(TS.numtype(a), TS.numtype(a[0]/1))
order_max = min(get_order(), a.order+1)
coeffs = zeros(HomogeneousPolynomial{T}, order_max)
@inbounds for ord = 0:order_max-1
coeffs[ord+1] = integrate( a[ord], r)
end
return TaylorN(coeffs)
end
function integrate(a::TaylorN, r::Int, x0::TaylorN)
# Check constant of integration is independent of re
@assert differentiate(x0, r) == 0.0 """
The integration constant ($x0) must be independent of the
$(_params_TaylorN_.variable_names[r]) variable"""
res = integrate(a, r)
return x0+res
end
integrate(a::TaylorN, r::Int, x0) =
integrate(a,r,TaylorN(HomogeneousPolynomial([convert(TS.numtype(a),x0)], 0)))
integrate(a::TaylorN, s::Symbol) = integrate(a, lookupvar(s))
integrate(a::TaylorN, s::Symbol, x0::TaylorN) = integrate(a, lookupvar(s), x0)
integrate(a::TaylorN, s::Symbol, x0) = integrate(a, lookupvar(s), x0)
| TaylorSeries | https://github.com/JuliaDiff/TaylorSeries.jl.git |
|
[
"MIT"
] | 0.18.1 | bb212ead98022455eed514cff0adfa5de8645258 | code | 6785 | # This file is part of the Taylor1Series.jl Julia package, MIT license
#
# Luis Benet & David P. Sanders
# UNAM
#
# MIT Expat license
#
"""
AbstractSeries{T<:Number} <: Number
Parameterized abstract type for [`Taylor1`](@ref),
[`HomogeneousPolynomial`](@ref) and [`TaylorN`](@ref).
"""
abstract type AbstractSeries{T<:Number} <: Number end
## Constructors ##
######################### Taylor1
"""
Taylor1{T<:Number} <: AbstractSeries{T}
DataType for polynomial expansions in one independent variable.
**Fields:**
- `coeffs :: Array{T,1}` Expansion coefficients; the ``i``-th
component is the coefficient of degree ``i-1`` of the expansion.
- `order :: Int` Maximum order (degree) of the polynomial.
Note that `Taylor1` variables are callable. For more information, see
[`evaluate`](@ref).
"""
struct Taylor1{T<:Number} <: AbstractSeries{T}
coeffs :: Array{T,1}
order :: Int
## Inner constructor ##
function Taylor1{T}(coeffs::Array{T,1}, order::Int) where T<:Number
resize_coeffs1!(coeffs, order)
return new{T}(coeffs, order)
end
end
## Outer constructors ##
Taylor1(x::Taylor1{T}) where {T<:Number} = x
Taylor1(coeffs::Array{T,1}, order::Int) where {T<:Number} = Taylor1{T}(coeffs, order)
Taylor1(coeffs::Array{T,1}) where {T<:Number} = Taylor1(coeffs, length(coeffs)-1)
function Taylor1(x::T, order::Int) where {T<:Number}
v = [zero(x) for _ in 1:order+1]
v[1] = deepcopy(x)
return Taylor1(v, order)
end
# Methods using 1-d views to create Taylor1's
Taylor1(a::SubArray{T,1}, order::Int) where {T<:Number} = Taylor1(a.parent[a.indices...], order)
Taylor1(a::SubArray{T,1}) where {T<:Number} = Taylor1(a.parent[a.indices...])
# Shortcut to define Taylor1 independent variables
"""
Taylor1([T::Type=Float64], order::Int)
Shortcut to define the independent variable of a `Taylor1{T}` polynomial of
given `order`. The default type for `T` is `Float64`.
```julia
julia> Taylor1(16)
1.0 t + 𝒪(t¹⁷)
julia> Taylor1(Rational{Int}, 4)
1//1 t + 𝒪(t⁵)
```
"""
Taylor1(::Type{T}, order::Int) where {T<:Number} = Taylor1( [zero(T), one(T)], order)
Taylor1(order::Int) = Taylor1(Float64, order)
######################### HomogeneousPolynomial
"""
HomogeneousPolynomial{T<:Number} <: AbstractSeries{T}
DataType for homogeneous polynomials in many (>1) independent variables.
**Fields:**
- `coeffs :: Array{T,1}` Expansion coefficients of the homogeneous
polynomial; the ``i``-th component is related to a monomial, where the degrees
of the independent variables are specified by `coeff_table[order+1][i]`.
- `order :: Int` order (degree) of the homogeneous polynomial.
Note that `HomogeneousPolynomial` variables are callable. For more information,
see [`evaluate`](@ref).
"""
struct HomogeneousPolynomial{T<:Number} <: AbstractSeries{T}
coeffs :: Array{T,1}
order :: Int
function HomogeneousPolynomial{T}(coeffs::Array{T,1}, order::Int) where T<:Number
resize_coeffsHP!(coeffs, order)
return new{T}(coeffs, order)
end
end
HomogeneousPolynomial(x::HomogeneousPolynomial{T}) where {T<:Number} = x
HomogeneousPolynomial(coeffs::Array{T,1}, order::Int) where {T<:Number} =
HomogeneousPolynomial{T}(coeffs, order)
HomogeneousPolynomial(coeffs::Array{T,1}) where {T<:Number} =
HomogeneousPolynomial(coeffs, orderH(coeffs))
HomogeneousPolynomial(x::T, order::Int) where {T<:Number} =
HomogeneousPolynomial([x], order)
# Shortcut to define HomogeneousPolynomial independent variable
"""
HomogeneousPolynomial([T::Type=Float64], nv::Int])
Shortcut to define the `nv`-th independent `HomogeneousPolynomial{T}`.
The default type for `T` is `Float64`.
```julia
julia> HomogeneousPolynomial(1)
1.0 x₁
julia> HomogeneousPolynomial(Rational{Int}, 2)
1//1 x₂
```
"""
function HomogeneousPolynomial(::Type{T}, nv::Int) where {T<:Number}
@assert 0 < nv ≤ get_numvars()
v = zeros(T, get_numvars())
@inbounds v[nv] = one(T)
return HomogeneousPolynomial(v, 1)
end
HomogeneousPolynomial(nv::Int) = HomogeneousPolynomial(Float64, nv)
######################### TaylorN
"""
TaylorN{T<:Number} <: AbstractSeries{T}
DataType for polynomial expansions in many (>1) independent variables.
**Fields:**
- `coeffs :: Array{HomogeneousPolynomial{T},1}` Vector containing the
`HomogeneousPolynomial` entries. The ``i``-th component corresponds to the
homogeneous polynomial of degree ``i-1``.
- `order :: Int` maximum order of the polynomial expansion.
Note that `TaylorN` variables are callable. For more information, see
[`evaluate`](@ref).
"""
struct TaylorN{T<:Number} <: AbstractSeries{T}
coeffs :: Array{HomogeneousPolynomial{T},1}
order :: Int
function TaylorN{T}(v::Array{HomogeneousPolynomial{T},1}, order::Int) where T<:Number
coeffs = isempty(v) ? zeros(HomogeneousPolynomial{T}, order) : zeros(v[1], order)
@inbounds for i in eachindex(v)
ord = v[i].order
if ord ≤ order
coeffs[ord+1] += v[i]
end
end
new{T}(coeffs, order)
end
end
TaylorN(x::TaylorN{T}) where {T<:Number} = x
function TaylorN(x::Array{HomogeneousPolynomial{T},1}, order::Int) where {T<:Number}
if order == 0
order = maxorderH(x)
end
return TaylorN{T}(x, order)
end
TaylorN(x::Array{HomogeneousPolynomial{T},1}) where {T<:Number} =
TaylorN(x, maxorderH(x))
TaylorN(x::HomogeneousPolynomial{T}, order::Int) where {T<:Number} =
TaylorN( [x], order )
TaylorN(x::HomogeneousPolynomial{T}) where {T<:Number} = TaylorN(x, x.order)
TaylorN(x::T, order::Int) where {T<:Number} =
TaylorN(HomogeneousPolynomial([x], 0), order)
# Shortcut to define TaylorN independent variables
"""
TaylorN([T::Type=Float64], nv::Int; [order::Int=get_order()])
Shortcut to define the `nv`-th independent `TaylorN{T}` variable as a
polynomial. The order is defined through the keyword parameter `order`,
whose default corresponds to `get_order()`. The default of type for
`T` is `Float64`.
```julia
julia> TaylorN(1)
1.0 x₁ + 𝒪(‖x‖⁷)
julia> TaylorN(Rational{Int},2)
1//1 x₂ + 𝒪(‖x‖⁷)
```
"""
TaylorN(::Type{T}, nv::Int; order::Int=get_order()) where {T<:Number} =
TaylorN( HomogeneousPolynomial(T, nv), order )
TaylorN(nv::Int; order::Int=get_order()) = TaylorN(Float64, nv, order=order)
# A `Number` which is not an `AbstractSeries`
const NumberNotSeries = Union{Real,Complex}
# A `Number` which is not `TaylorN` nor a `HomogeneousPolynomial`
const NumberNotSeriesN = Union{Real,Complex,Taylor1}
## Additional Taylor1 and TaylorN outer constructor ##
Taylor1{T}(x::S) where {T<:Number,S<:NumberNotSeries} = Taylor1([convert(T,x)], 0)
TaylorN{T}(x::S) where {T<:Number,S<:NumberNotSeries} = TaylorN(convert(T, x), TaylorSeries.get_order())
| TaylorSeries | https://github.com/JuliaDiff/TaylorSeries.jl.git |
|
[
"MIT"
] | 0.18.1 | bb212ead98022455eed514cff0adfa5de8645258 | code | 7786 | # This file is part of the Taylor1Series.jl Julia package, MIT license
#
# Luis Benet & David P. Sanders
# UNAM
#
# MIT Expat license
#
## Conversion
convert(::Type{Taylor1{T}}, a::Taylor1) where {T<:Number} =
Taylor1(convert(Array{T,1}, a.coeffs), a.order)
convert(::Type{Taylor1{T}}, a::Taylor1{T}) where {T<:Number} = a
convert(::Type{Taylor1{Rational{T}}}, a::Taylor1{S}) where
{T<:Integer,S<:AbstractFloat} = Taylor1(rationalize.(a[:]), a.order)
convert(::Type{Taylor1{T}}, b::Array{T,1}) where {T<:Number} =
Taylor1(b, length(b)-1)
convert(::Type{Taylor1{T}}, b::Array{S,1}) where {T<:Number,S<:Number} =
Taylor1(convert(Array{T,1},b), length(b)-1)
convert(::Type{Taylor1{T}}, b::S) where {T<:Number,S<:Number} =
Taylor1([convert(T,b)], 0)
convert(::Type{Taylor1{T}}, b::T) where {T<:Number} = Taylor1([b], 0)
convert(::Type{Taylor1}, a::T) where {T<:Number} = Taylor1(a, 0)
convert(::Type{HomogeneousPolynomial{T}}, a::HomogeneousPolynomial) where {T<:Number} =
HomogeneousPolynomial(convert(Array{T,1}, a.coeffs), a.order)
convert(::Type{HomogeneousPolynomial{T}}, a::HomogeneousPolynomial{T}) where {T<:Number} =
a
function convert(::Type{HomogeneousPolynomial{Rational{T}}},
a::HomogeneousPolynomial{S}) where {T<:Integer,S<:AbstractFloat}
la = length(a.coeffs)
v = Array{Rational{T}}(undef, la)
v .= rationalize.(a[1:la], tol=eps(one(S)))
return HomogeneousPolynomial(v, a.order)
end
convert(::Type{HomogeneousPolynomial{T}}, b::Array{S,1}) where {T<:Number,S<:Number} =
HomogeneousPolynomial(convert(Array{T,1}, b), orderH(b))
convert(::Type{HomogeneousPolynomial{T}}, b::S) where {T<:Number,S<:Number}=
HomogeneousPolynomial([convert(T,b)], 0)
convert(::Type{HomogeneousPolynomial{T}}, b::Array{T,1}) where {T<:Number} =
HomogeneousPolynomial(b, orderH(b))
convert(::Type{HomogeneousPolynomial{T}}, b::T) where {T<:Number} =
HomogeneousPolynomial([b], 0)
convert(::Type{HomogeneousPolynomial}, a::Number) = HomogeneousPolynomial([a],0)
convert(::Type{TaylorN{T}}, a::TaylorN) where {T<:Number} =
TaylorN( convert(Array{HomogeneousPolynomial{T},1}, a.coeffs), a.order)
convert(::Type{TaylorN{T}}, a::TaylorN{T}) where {T<:Number} = a
convert(::Type{TaylorN{T}}, b::HomogeneousPolynomial{S}) where {T<:Number,S<:Number} =
TaylorN( [convert(HomogeneousPolynomial{T}, b)], b.order)
convert(::Type{TaylorN{T}}, b::Array{HomogeneousPolynomial{S},1}) where {T<:Number,S<:Number} =
TaylorN( convert(Array{HomogeneousPolynomial{T},1}, b), maxorderH(b))
convert(::Type{TaylorN{T}}, b::S) where {T<:Number,S<:Number} =
TaylorN( [HomogeneousPolynomial([convert(T, b)], 0)], 0)
convert(::Type{TaylorN{T}}, b::HomogeneousPolynomial{T}) where {T<:Number} =
TaylorN( [b], b.order)
convert(::Type{TaylorN{T}}, b::Array{HomogeneousPolynomial{T},1}) where {T<:Number} =
TaylorN( b, maxorderH(b))
convert(::Type{TaylorN{T}}, b::T) where {T<:Number} =
TaylorN( [HomogeneousPolynomial([b], 0)], 0)
convert(::Type{TaylorN}, b::Number) = TaylorN( [HomogeneousPolynomial([b], 0)], 0)
function convert(::Type{TaylorN{Taylor1{T}}}, s::Taylor1{TaylorN{T}}) where {T<:NumberNotSeries}
orderN = maximum(get_order.(s[:]))
r = zeros(HomogeneousPolynomial{Taylor1{T}}, orderN)
v = zeros(T, s.order+1)
@inbounds for ordT in eachindex(s)
v[ordT+1] = one(T)
@inbounds for ordHP in 0:s[ordT].order
@inbounds for ic in eachindex(s[ordT][ordHP].coeffs)
coef = s[ordT][ordHP][ic]
r[ordHP+1][ic] += Taylor1( coef.*v )
end
end
v[ordT+1] = zero(T)
end
return TaylorN(r)
end
function convert(::Type{Taylor1{TaylorN{T}}}, s::TaylorN{Taylor1{T}}) where {T<:NumberNotSeries}
ordert = 0
for ordHP in eachindex(s)
ordert = max(ordert, s[ordHP][1].order)
end
vT = Array{TaylorN{T}}(undef, ordert+1)
@inbounds for ordT in eachindex(vT)
vT[ordT] = TaylorN(zero(T), s.order)
end
@inbounds for ordN in eachindex(s)
vHP = HomogeneousPolynomial(zeros(T, length(s[ordN])))
@inbounds for ihp in eachindex(s[ordN].coeffs)
@inbounds for ind in eachindex(s[ordN][ihp].coeffs)
c = s[ordN][ihp][ind-1]
vHP[ihp] = c
vT[ind] += TaylorN(vHP, s.order)
vHP[ihp] = zero(T)
end
end
end
return Taylor1(vT)
end
# Promotion
promote_rule(::Type{Taylor1{T}}, ::Type{Taylor1{T}}) where {T<:Number} = Taylor1{T}
promote_rule(::Type{Taylor1{T}}, ::Type{Taylor1{S}}) where {T<:Number,S<:Number} =
Taylor1{promote_type(T,S)}
promote_rule(::Type{Taylor1{T}}, ::Type{Array{T,1}}) where {T<:Number} = Taylor1{T}
promote_rule(::Type{Taylor1{T}}, ::Type{Array{S,1}}) where {T<:Number,S<:Number} =
Taylor1{promote_type(T,S)}
promote_rule(::Type{Taylor1{T}}, ::Type{T}) where {T<:Number} = Taylor1{T}
promote_rule(::Type{Taylor1{T}}, ::Type{S}) where {T<:Number,S<:Number} =
Taylor1{promote_type(T,S)}
promote_rule(::Type{Taylor1{Taylor1{T}}}, ::Type{Taylor1{T}}) where {T<:Number} =
Taylor1{Taylor1{T}}
promote_rule(::Type{HomogeneousPolynomial{T}},
::Type{HomogeneousPolynomial{S}}) where {T<:Number,S<:Number} =
HomogeneousPolynomial{promote_type(T,S)}
promote_rule(::Type{HomogeneousPolynomial{T}},
::Type{HomogeneousPolynomial{T}}) where {T<:Number} = HomogeneousPolynomial{T}
promote_rule(::Type{HomogeneousPolynomial{T}},
::Type{Array{S,1}}) where {T<:Number,S<:Number} = HomogeneousPolynomial{promote_type(T,S)}
promote_rule(::Type{HomogeneousPolynomial{T}}, ::Type{S}) where
{T<:Number,S<:NumberNotSeries} = HomogeneousPolynomial{promote_type(T,S)}
promote_rule(::Type{TaylorN{T}}, ::Type{TaylorN{S}}) where {T<:Number,S<:Number} =
TaylorN{promote_type(T,S)}
promote_rule(::Type{TaylorN{T}}, ::Type{HomogeneousPolynomial{S}}) where
{T<:Number,S<:Number} = TaylorN{promote_type(T,S)}
promote_rule(::Type{TaylorN{T}}, ::Type{Array{HomogeneousPolynomial{S},1}}) where
{T<:Number,S<:Number} = TaylorN{promote_type(T,S)}
promote_rule(::Type{TaylorN{T}}, ::Type{S}) where {T<:Number,S<:Number} =
TaylorN{promote_type(T,S)}
promote_rule(::Type{S}, ::Type{T}) where
{S<:AbstractIrrational,T<:AbstractSeries} = promote_rule(T,S)
promote_rule(::Type{Taylor1{T}}, ::Type{TaylorN{S}}) where {T<:NumberNotSeries,S<:NumberNotSeries} =
throw(ArgumentError("There is no reasonable promotion among `Taylor1{$T}` and `TaylorN{$S}` types"))
promote_rule(::Type{Taylor1{TaylorN{T}}}, ::Type{TaylorN{Taylor1{S}}}) where
{T<:NumberNotSeries, S<:NumberNotSeries} = Taylor1{TaylorN{promote_type(T,S)}}
promote_rule(::Type{TaylorN{Taylor1{T}}}, ::Type{Taylor1{TaylorN{S}}}) where
{T<:NumberNotSeries, S<:NumberNotSeries} = Taylor1{TaylorN{promote_type(T,S)}}
# Nested Taylor1's
function promote(a::Taylor1{Taylor1{T}}, b::Taylor1{T}) where {T<:NumberNotSeriesN}
order_a = get_order(a)
order_b = get_order(b)
zb = zero(b)
new_bcoeffs = similar(a.coeffs)
new_bcoeffs[1] = b
@inbounds for ind in 2:order_a+1
new_bcoeffs[ind] = zb
end
return a, Taylor1(b, order_a)
end
promote(b::Taylor1{T}, a::Taylor1{Taylor1{T}}) where {T<:NumberNotSeriesN} =
reverse(promote(a, b))
# float
float(::Type{Taylor1{T}}) where T<:Number = Taylor1{float(T)}
float(::Type{HomogeneousPolynomial{T}}) where T<:Number = HomogeneousPolynomial{float(T)}
float(::Type{TaylorN{T}}) where T<:Number = TaylorN{float(T)}
float(x::Taylor1{T}) where T<:Number = convert(Taylor1{float(T)}, x)
float(x::HomogeneousPolynomial{T}) where T<:Number = convert(HomogeneousPolynomial{float(T)}, x)
float(x::TaylorN{T}) where T<:Number = convert(TaylorN{float(T)}, x)
| TaylorSeries | https://github.com/JuliaDiff/TaylorSeries.jl.git |
|
[
"MIT"
] | 0.18.1 | bb212ead98022455eed514cff0adfa5de8645258 | code | 7537 | # This file is part of the TaylorSeries.jl Julia package, MIT license
#
# Luis Benet & David P. Sanders
# UNAM
#
# MIT Expat license
#
#=
This file contains some dictionary and functions to build
the dictionaries `_dict_unary_calls` and `_dict_binary_calls`
which allow to call the internal mutating functions. This
may be used to improve memory usage, e.g., to construct the
jet-coefficients used to integrate ODEs.
=#
"""
`_InternalMutFuncs`
Contains parameters and expressions that allow a simple
programmatic construction for calling the internal mutating
functions.
"""
struct _InternalMutFuncs{N}
namef :: Symbol # internal name of the function
argsf :: NTuple{N,Symbol} # arguments
defexpr :: Expr # defining expr
auxexpr :: Expr # auxiliary expr
end
# Constructor
function _InternalMutFuncs( namef::Tuple )
if length(namef) == 3
return _InternalMutFuncs( namef[1], namef[2], namef[3], Expr(:nothing) )
else
return _InternalMutFuncs( namef...)
end
end
"""
`_dict_binary_ops`
`Dict{Symbol, Array{Any,1}}` with the information to
construct the `_InternalMutFuncs` related to binary
operations.
The keys correspond to the function symbols.
The arguments of the array are the function name (e.g. `add!`), a tuple
with the function arguments, and an `Expr` with the calling pattern. The
convention for the arguments of the functions and the calling pattern
is to use `:_res` for the (mutated) result, `:_arg1` and `_arg2`
for the required arguments, and `:_k` for the computed order
of `:_res`.
"""
const _dict_binary_ops = Dict(
:+ => (:add!, (:_res, :_arg1, :_arg2, :_k), :(_res = _arg1 + _arg2)),
:- => (:subst!, (:_res, :_arg1, :_arg2, :_k), :(_res = _arg1 - _arg2)),
:* => (:mul!, (:_res, :_arg1, :_arg2, :_k), :(_res = _arg1 * _arg2)),
:/ => (:div!, (:_res, :_arg1, :_arg2, :_k), :(_res = _arg1 / _arg2)),
:^ => (:pow!, (:_res, :_arg1, :_aux, :_arg2, :_k), :(_res = _arg1 ^ float(_arg2)), :(_aux = zero(_arg1))),
);
"""
`_dict_binary_ops`
`Dict{Symbol, Array{Any,1}}` with the information to
construct the `_InternalMutFuncs` related to unary
operations.
The keys correspond to the function symbols.
The arguments of the array are the function name (e.g. `add!`), a tuple
with the function arguments, and an `Expr` with the calling pattern. The
convention for the arguments of the functions and the calling pattern
is to use `:_res` for the (mutated) result, `:_arg1`, for the required
argument, possibly `:_aux` when there is an auxiliary expression
needed, and `:_k` for the computed order of `:_res`. When an auxiliary
expression is required, an `Expr` defining its calling pattern is
added as the last entry of the vector.
"""
const _dict_unary_ops = Dict(
:+ => (:add!, (:_res, :_arg1, :_k), :(_res = + _arg1)),
:- => (:subst!, (:_res, :_arg1, :_k), :(_res = - _arg1)),
:sqr => (:sqr!, (:_res, :_arg1, :_k), :(_res = sqr(_arg1))),
:sqrt => (:sqrt!, (:_res, :_arg1, :_k), :(_res = sqrt(_arg1))),
:exp => (:exp!, (:_res, :_arg1, :_k), :(_res = exp(_arg1))),
:expm1 => (:expm1!, (:_res, :_arg1, :_k), :(_res = expm1(_arg1))),
:log => (:log!, (:_res, :_arg1, :_k), :(_res = log(_arg1))),
:log1p => (:log1p!, (:_res, :_arg1, :_k), :(_res = log1p(_arg1))),
:identity => (:identity!, (:_res, :_arg1, :_k), :(_res = identity(_arg1))),
:zero => (:zero!, (:_res, :_arg1, :_k), :(_res = zero(_arg1))),
:one => (:one!, (:_res, :_arg1, :_k), :(_res = one(_arg1))),
:abs => (:abs!, (:_res, :_arg1, :_k), :(_res = abs(_arg1))),
:abs2 => (:abs2!, (:_res, :_arg1, :_k), :(_res = abs2(_arg1))),
:deg2rad => (:deg2rad!, (:_res, :_arg1, :_k), :(_res = deg2rad(_arg1))),
:rad2deg => (:rad2deg!, (:_res, :_arg1, :_k), :(_res = rad2deg(_arg1))),
#
:sin => (:sincos!, (:_res, :_aux, :_arg1, :_k), :(_res = sin(_arg1)),
:(_aux = cos(_arg1))),
:cos => (:sincos!, (:_aux, :_res, :_arg1, :_k), :(_res = cos(_arg1)),
:(_aux = sin(_arg1))),
:sinpi => (:sincospi!, (:_res, :_aux, :_arg1, :_k), :(_res = sinpi(_arg1)),
:(_aux = cospi(_arg1))),
:cospi => (:sincospi!, (:_aux, :_res, :_arg1, :_k), :(_res = cospi(_arg1)),
:(_aux = sinpi(_arg1))),
:tan => (:tan!, (:_res, :_arg1, :_aux, :_k), :(_res = tan(_arg1)),
:(_aux = tan(_arg1)^2)),
:asin => (:asin!, (:_res, :_arg1, :_aux, :_k), :(_res = asin(_arg1)),
:(_aux = sqrt(1 - _arg1^2))),
:acos => (:acos!, (:_res, :_arg1, :_aux, :_k), :(_res = acos(_arg1)),
:(_aux = sqrt(1 - _arg1^2))),
:atan => (:atan!, (:_res, :_arg1, :_aux, :_k), :(_res = atan(_arg1)),
:(_aux = 1 + _arg1^2)),
:sinh => (:sinhcosh!, (:_res, :_aux, :_arg1, :_k), :(_res = sinh(_arg1)),
:(_aux = cosh(_arg1))),
:cosh => (:sinhcosh!, (:_aux, :_res, :_arg1, :_k), :(_res = cosh(_arg1)),
:(_aux = sinh(_arg1))),
:tanh => (:tanh!, (:_res, :_arg1, :_aux, :_k), :(_res = tanh(_arg1)),
:(_aux = tanh(_arg1)^2)),
:asinh => (:asinh!, (:_res, :_arg1, :_aux, :_k), :(_res = asinh(_arg1)),
:(_aux = sqrt(_arg1^2 + 1))),
:acosh => (:acosh!, (:_res, :_arg1, :_aux, :_k), :(_res = acosh(_arg1)),
:(_aux = sqrt(_arg1^2 - 1))),
:atanh => (:atanh!, (:_res, :_arg1, :_aux, :_k), :(_res = atanh(_arg1)),
:(_aux = 1 - _arg1^2)),
);
"""
```
_internalmutfunc_call( fn :: _InternalMutFuncs )
```
Creates the appropriate call to the internal mutating
function defined by the `_InternalMutFuncs` object.
This is used to construct [`_dict_unary_calls`](@ref)
and [`_dict_binary_calls`](@ref).
The call contains the prefix `TaylorSeries.`.
"""
_internalmutfunc_call( fn :: _InternalMutFuncs ) = (
Expr( :call, Meta.parse("TaylorSeries.$(fn.namef)"), fn.argsf... ), fn.defexpr, fn.auxexpr )
"""
`_populate_dicts!()`
Function that populates the internal dictionaries [`_dict_unary_calls`](@ref) and
[`_dict_binary_calls`](@ref)
"""
function _populate_dicts!()
#Populates the constant vector `_dict_unary_calls`.
_dict_unary_calls = Dict{Symbol, NTuple{3,Expr}}()
for kk in keys(_dict_unary_ops)
res = _internalmutfunc_call( _InternalMutFuncs(_dict_unary_ops[kk]) )
push!(_dict_unary_calls, kk => res )
end
#Populates the constant vector `_dict_binary_calls`.
_dict_binary_calls = Dict{Symbol, NTuple{3,Expr}}()
for kk in keys(_dict_binary_ops)
res = _internalmutfunc_call( _InternalMutFuncs(_dict_binary_ops[kk]) )
push!(_dict_binary_calls, kk => res )
end
return _dict_unary_calls, _dict_binary_calls
end
const _dict_unary_calls, _dict_binary_calls = _populate_dicts!()
@doc """
`_dict_unary_calls::Dict{Symbol, NTuple{2,Expr}}`
Dictionary with the expressions that define the
internal unary functions and the auxiliary functions,
whenever they exist. The keys correspond to those
functions, passed as symbols, with the defined
internal mutating functions.
Evaluating the entries generates expressions that represent
the actual calls to the internal mutating functions.
""" _dict_unary_calls
@doc """
`_dict_binary_calls::Dict{Symbol, NTuple{2,Expr}}`
Dictionary with the expressions that define the
internal binary functions and the auxiliary functions,
whenever they exist. The keys correspond to those
functions, passed as symbols, with the defined
internal mutating functions.
Evaluating the entries generates symbols that represent
the actual calls to the internal mutating functions.
""" _dict_binary_calls
| TaylorSeries | https://github.com/JuliaDiff/TaylorSeries.jl.git |
|
[
"MIT"
] | 0.18.1 | bb212ead98022455eed514cff0adfa5de8645258 | code | 19148 | # This file is part of the TaylorSeries.jl Julia package, MIT license
#
# Luis Benet & David P. Sanders
# UNAM
#
# MIT Expat license
#
## Evaluating ##
"""
evaluate(a, [dx])
Evaluate a `Taylor1` polynomial using Horner's rule (hand coded). If `dx` is
omitted, its value is considered as zero. Note that the syntax `a(dx)` is
equivalent to `evaluate(a,dx)`, and `a()` is equivalent to `evaluate(a)`.
"""
function evaluate(a::Taylor1{T}, dx::T) where {T<:Number}
@inbounds suma = zero(a[end])
@inbounds for k in reverse(eachindex(a))
suma = suma * dx + a[k]
end
return suma
end
function evaluate(a::Taylor1{T}, dx::S) where {T<:Number, S<:Number}
suma = a[end]*zero(dx)
@inbounds for k in reverse(eachindex(a))
suma = suma * dx + a[k]
end
return suma
end
evaluate(a::Taylor1{T}) where {T<:Number} = a[0]
"""
evaluate(x, δt)
Evaluates each element of `x::AbstractArray{Taylor1{T}}`,
representing the dependent variables of an ODE, at *time* δt. Note that the
syntax `x(δt)` is equivalent to `evaluate(x, δt)`, and `x()`
is equivalent to `evaluate(x)`.
"""
evaluate(x::AbstractArray{Taylor1{T}}, δt::S) where
{T<:Number, S<:Number} = evaluate.(x, δt)
evaluate(a::AbstractArray{Taylor1{T}}) where {T<:Number} = getcoeff.(a, 0)
"""
evaluate(a::Taylor1, x::Taylor1)
Substitute `x::Taylor1` as independent variable in a `a::Taylor1` polynomial.
Note that the syntax `a(x)` is equivalent to `evaluate(a, x)`.
"""
evaluate(a::Taylor1{T}, x::Taylor1{S}) where {T<:Number, S<:Number} =
evaluate(promote(a, x)...)
function evaluate(a::Taylor1{T}, x::Taylor1{T}) where {T<:Number}
if a.order != x.order
a, x = fixorder(a, x)
end
@inbounds suma = a[end]*zero(x)
aux = zero(suma)
_horner!(suma, a, x, aux)
return suma
end
function evaluate(a::Taylor1{Taylor1{T}}, x::Taylor1{T}) where {T<:NumberNotSeriesN}
@inbounds suma = a[end]*zero(x)
aux = zero(suma)
_horner!(suma, a, x, aux)
return suma
end
function evaluate(a::Taylor1{T}, x::Taylor1{Taylor1{T}}) where {T<:NumberNotSeriesN}
@inbounds suma = a[end]*zero(x)
aux = zero(suma)
_horner!(suma, a, x, aux)
return suma
end
evaluate(p::Taylor1{T}, x::AbstractArray{S}) where {T<:Number, S<:Number} =
evaluate.(Ref(p), x)
# Substitute a TaylorN into a Taylor1
function evaluate(a::Taylor1{T}, dx::TaylorN{T}) where {T<:NumberNotSeries}
suma = TaylorN( zero(T), dx.order)
aux = TaylorN( zero(T), dx.order)
_horner!(suma, a, dx, aux)
return suma
end
function evaluate(a::Taylor1{T}, dx::Taylor1{TaylorN{T}}) where {T<:NumberNotSeries}
if a.order != dx.order
a, dx = fixorder(a, dx)
end
suma = Taylor1( zero(dx[0]), a.order)
aux = Taylor1( zero(dx[0]), a.order)
_horner!(suma, a, dx, aux)
return suma
end
# Evaluate a Taylor1{TaylorN{T}} on Vector{TaylorN} is interpreted
# as a substitution on the TaylorN vars
function evaluate(a::Taylor1{TaylorN{T}}, dx::Vector{TaylorN{T}}) where {T<:NumberNotSeries}
@assert length(dx) == get_numvars()
suma = Taylor1( zero(a[0]), a.order)
suma.coeffs .= evaluate.(a[:], Ref(dx))
return suma
end
function evaluate(a::Taylor1{TaylorN{T}}, ind::Int, dx::T) where {T<:NumberNotSeries}
@assert (1 ≤ ind ≤ get_numvars()) "Invalid `ind`; it must be between 1 and `get_numvars()`"
suma = Taylor1( zero(a[0]), a.order)
for ord in eachindex(suma)
for ordQ in eachindex(a[0])
_evaluate!(suma[ord], a[ord][ordQ], ind, dx)
end
end
return suma
end
function evaluate(a::Taylor1{TaylorN{T}}, ind::Int, dx::TaylorN{T}) where {T<:NumberNotSeries}
@assert (1 ≤ ind ≤ get_numvars()) "Invalid `ind`; it must be between 1 and `get_numvars()`"
suma = Taylor1( zero(a[0]), a.order)
aux = zero(dx)
for ord in eachindex(suma)
for ordQ in eachindex(a[0])
_evaluate!(suma[ord], a[ord][ordQ], ind, dx, aux)
end
end
return suma
end
#function-like behavior for Taylor1
(p::Taylor1)(x) = evaluate(p, x)
(p::Taylor1)() = evaluate(p)
#function-like behavior for Vector{Taylor1} (asumes Julia version >= 1.6)
(p::Array{Taylor1{T}})(x) where {T<:Number} = evaluate.(p, x)
(p::SubArray{Taylor1{T}})(x) where {T<:Number} = evaluate.(p, x)
(p::Array{Taylor1{T}})() where {T<:Number} = evaluate.(p)
(p::SubArray{Taylor1{T}})() where {T<:Number} = evaluate.(p)
"""
evaluate(a, [vals])
Evaluate a `HomogeneousPolynomial` polynomial at `vals`. If `vals` is omitted,
it's evaluated at zero. Note that the syntax `a(vals)` is equivalent to
`evaluate(a, vals)`; and `a()` is equivalent to `evaluate(a)`.
"""
function evaluate(a::HomogeneousPolynomial, vals::NTuple{N,<:Number}) where {N}
@assert length(vals) == get_numvars()
return _evaluate(a, vals)
end
evaluate(a::HomogeneousPolynomial{T}, vals::AbstractArray{S,1} ) where
{T<:Number,S<:NumberNotSeriesN} = evaluate(a, (vals...,))
evaluate(a::HomogeneousPolynomial, v, vals::Vararg{Number,N}) where {N} =
evaluate(a, promote(v, vals...,))
evaluate(a::HomogeneousPolynomial, v) = evaluate(a, promote(v...,))
function evaluate(a::HomogeneousPolynomial{T}) where {T}
a.order == 0 && return a[1]
return zero(a[1])
end
# Internal method that avoids checking that the length of `vals` is the appropriate
function _evaluate(a::HomogeneousPolynomial{T}, vals::NTuple) where {T}
# @assert length(vals) == get_numvars()
a.order == 0 && return a[1]*one(vals[1])
ct = coeff_table[a.order+1]
suma = zero(a[1])*vals[1]
vv = vals .^ ct[1]
for (i, a_coeff) in enumerate(a.coeffs)
iszero(a_coeff) && continue
@inbounds vv .= vals .^ ct[i]
tmp = prod( vv )
suma += a_coeff * tmp
end
return suma
end
function _evaluate!(res::TaylorN{T}, a::HomogeneousPolynomial{T}, vals::NTuple{N,<:TaylorN{T}},
valscache::Vector{TaylorN{T}}, aux::TaylorN{T}) where {N,T<:NumberNotSeries}
ct = coeff_table[a.order+1]
for el in eachindex(valscache)
power_by_squaring!(valscache[el], vals[el], aux, ct[1][el])
end
for (i, a_coeff) in enumerate(a.coeffs)
iszero(a_coeff) && continue
# valscache .= vals .^ ct[i]
@inbounds for el in eachindex(valscache)
power_by_squaring!(valscache[el], vals[el], aux, ct[i][el])
end
# aux = one(valscache[1])
for ord in eachindex(aux)
@inbounds one!(aux, valscache[1], ord)
end
for j in eachindex(valscache)
# aux *= valscache[j]
mul!(aux, valscache[j])
end
# res += a_coeff * aux
for ord in eachindex(aux)
muladd!(res, a_coeff, aux, ord)
end
end
return nothing
end
function _evaluate(a::HomogeneousPolynomial{T}, vals::NTuple{N,<:TaylorN{T}}) where
{N,T<:NumberNotSeries}
# @assert length(vals) == get_numvars()
a.order == 0 && return a[1]*one(vals[1])
suma = TaylorN(zero(T), vals[1].order)
valscache = [zero(val) for val in vals]
aux = zero(suma)
_evaluate!(suma, a, vals, valscache, aux)
return suma
end
function _evaluate(a::HomogeneousPolynomial{T}, ind::Int, val::T) where {T<:NumberNotSeries}
suma = TaylorN(zero(T), get_order())
_evaluate!(suma, a, ind, val)
return suma
end
#function-like behavior for HomogeneousPolynomial
(p::HomogeneousPolynomial)(x) = evaluate(p, x)
(p::HomogeneousPolynomial)(x, v::Vararg{Number,N}) where {N} =
evaluate(p, promote(x, v...,))
(p::HomogeneousPolynomial)() = evaluate(p)
"""
evaluate(a, [vals]; sorting::Bool=true)
Evaluate the `TaylorN` polynomial `a` at `vals`.
If `vals` is omitted, it's evaluated at zero. The
keyword parameter `sorting` can be used to avoid
sorting (in increasing order by `abs2`) the
terms that are added.
Note that the syntax `a(vals)` is equivalent to
`evaluate(a, vals)`; and `a()` is equivalent to
`evaluate(a)`; use a(b::Bool, x) corresponds to
evaluate(a, x, sorting=b).
"""
function evaluate(a::TaylorN, vals::NTuple{N,<:Number};
sorting::Bool=true) where {N}
@assert get_numvars() == N
return _evaluate(a, vals, Val(sorting))
end
function evaluate(a::TaylorN, vals::NTuple{N,<:AbstractSeries};
sorting::Bool=false) where {N}
@assert get_numvars() == N
return _evaluate(a, vals, Val(sorting))
end
evaluate(a::TaylorN{T}, vals::AbstractVector{<:Number};
sorting::Bool=true) where {T<:NumberNotSeries} =
evaluate(a, (vals...,); sorting=sorting)
evaluate(a::TaylorN{T}, vals::AbstractVector{<:AbstractSeries};
sorting::Bool=false) where {T<:NumberNotSeries} =
evaluate(a, (vals...,); sorting=sorting)
evaluate(a::TaylorN{Taylor1{T}}, vals::AbstractVector{S};
sorting::Bool=false) where {T, S} =
evaluate(a, (vals...,); sorting=sorting)
function evaluate(a::TaylorN{T}, s::Symbol, val::S) where
{T<:Number, S<:NumberNotSeriesN}
ind = lookupvar(s)
@assert (1 ≤ ind ≤ get_numvars()) "Symbol is not a TaylorN variable; see `get_variable_names()`"
return evaluate(a, ind, val)
end
function evaluate(a::TaylorN{T}, ind::Int, val::S) where
{T<:Number, S<:NumberNotSeriesN}
@assert (1 ≤ ind ≤ get_numvars()) "Invalid `ind`; it must be between 1 and `get_numvars()`"
R = promote_type(T,S)
return _evaluate(convert(TaylorN{R}, a), ind, convert(R, val))
end
function evaluate(a::TaylorN{T}, s::Symbol, val::TaylorN) where {T<:Number}
ind = lookupvar(s)
@assert (1 ≤ ind ≤ get_numvars()) "Symbol is not a TaylorN variable; see `get_variable_names()`"
return evaluate(a, ind, val)
end
function evaluate(a::TaylorN{T}, ind::Int, val::TaylorN) where {T<:Number}
@assert (1 ≤ ind ≤ get_numvars()) "Invalid `ind`; it must be between 1 and `get_numvars()`"
a, val = fixorder(a, val)
a, val = promote(a, val)
return _evaluate(a, ind, val)
end
evaluate(a::TaylorN{T}, x::Pair{Symbol,S}) where {T, S} =
evaluate(a, first(x), last(x))
evaluate(a::TaylorN{T}) where {T<:Number} = constant_term(a)
# _evaluate
_evaluate(a::TaylorN{T}, vals::NTuple, ::Val{true}) where {T<:NumberNotSeries} =
sum( sort!(_evaluate(a, vals), by=abs2) )
_evaluate(a::TaylorN{T}, vals::NTuple, ::Val{false}) where {T<:Number} =
sum( _evaluate(a, vals) )
function _evaluate(a::TaylorN{T}, vals::NTuple{N,<:TaylorN}, ::Val{false}) where {N,T<:Number}
R = promote_type(T, TS.numtype(vals[1]))
res = TaylorN(zero(R), vals[1].order)
valscache = [zero(val) for val in vals]
aux = zero(res)
@inbounds for homPol in eachindex(a)
_evaluate!(res, a[homPol], vals, valscache, aux)
end
return res
end
function _evaluate(a::TaylorN{T}, vals::NTuple{N,<:Number}) where {N,T<:Number}
R = promote_type(T, typeof(vals[1]))
suma = zeros(R, length(a))
@inbounds for homPol in eachindex(a)
suma[homPol+1] = _evaluate(a[homPol], vals)
end
return suma
end
function _evaluate!(res::Vector{TaylorN{T}}, a::TaylorN{T}, vals::NTuple{N,<:TaylorN},
valscache::Vector{TaylorN{T}}, aux::TaylorN{T}) where {N,T<:Number}
@inbounds for homPol in eachindex(a)
_evaluate!(res[homPol+1], a[homPol], vals, valscache, aux)
end
return nothing
end
function _evaluate(a::TaylorN{T}, vals::NTuple{N,<:TaylorN}) where {N,T<:Number}
R = promote_type(T, TS.numtype(vals[1]))
suma = [TaylorN(zero(R), vals[1].order) for _ in eachindex(a)]
valscache = [zero(val) for val in vals]
aux = zero(suma[1])
_evaluate!(suma, a, vals, valscache, aux)
return suma
end
function _evaluate(a::TaylorN{T}, ind::Int, val::T) where {T<:NumberNotSeriesN}
suma = TaylorN(zero(a[0]*val), a.order)
vval = convert(numtype(suma), val)
suma, a = promote(suma, a)
@inbounds for ordQ in eachindex(a)
_evaluate!(suma, a[ordQ], ind, vval)
end
return suma
end
function _evaluate(a::TaylorN{T}, ind::Int, val::TaylorN{T}) where {T<:NumberNotSeriesN}
suma = TaylorN(zero(a[0]), a.order)
aux = zero(suma)
@inbounds for ordQ in eachindex(a)
_evaluate!(suma, a[ordQ], ind, val, aux)
end
return suma
end
function _evaluate!(suma::TaylorN{T}, a::HomogeneousPolynomial{T}, ind::Int, val::T) where
{T<:NumberNotSeriesN}
order = a.order
if order == 0
suma[0] = a[1]*one(val)
return nothing
end
vv = val .^ (0:order)
# ct = @isonethread coeff_table[order+1]
ct = deepcopy(coeff_table[order+1])
for (i, a_coeff) in enumerate(a.coeffs)
iszero(a_coeff) && continue
if ct[i][ind] == 0
suma[order][i] += a_coeff
continue
end
vpow = ct[i][ind]
red_order = order - vpow
ct[i][ind] -= vpow
kdic = in_base(get_order(), ct[i])
ct[i][ind] += vpow
pos = pos_table[red_order+1][kdic]
suma[red_order][pos] += a_coeff * vv[vpow+1]
end
return nothing
end
function _evaluate!(suma::TaylorN{T}, a::HomogeneousPolynomial{T}, ind::Int,
val::TaylorN{T}, aux::TaylorN{T}) where {T<:NumberNotSeriesN}
order = a.order
if order == 0
suma[0] = a[1]
return nothing
end
vv = zero(suma)
ct = coeff_table[order+1]
za = zero(a)
for (i, a_coeff) in enumerate(a.coeffs)
iszero(a_coeff) && continue
if ct[i][ind] == 0
suma[order][i] += a_coeff
continue
end
za[i] = a_coeff
zero!(aux)
_evaluate!(aux, za, ind, one(T))
za[i] = zero(T)
vpow = ct[i][ind]
# vv = val ^ vpow
if constant_term(val) == 0
vv = val ^ vpow
else
for ordQ in eachindex(val)
zero!(vv, ordQ)
pow!(vv, val, vv, vpow, ordQ)
end
end
for ordQ in eachindex(suma)
mul!(suma, vv, aux, ordQ)
end
end
return nothing
end
#High-dim array evaluation
function evaluate(A::AbstractArray{TaylorN{T},N}, δx::Vector{S}) where
{T<:Number, S<:Number, N}
R = promote_type(T,S)
return evaluate(convert(Array{TaylorN{R},N},A), convert(Vector{R},δx))
end
function evaluate(A::Array{TaylorN{T}}, δx::Vector{T}) where {T<:Number}
Anew = Array{T}(undef, size(A)...)
evaluate!(A, δx, Anew)
return Anew
end
evaluate(A::AbstractArray{TaylorN{T}}) where {T<:Number} = evaluate.(A)
#function-like behavior for TaylorN
(p::TaylorN)(x) = evaluate(p, x)
(p::TaylorN)() = evaluate(p)
(p::TaylorN)(s::S, x) where {S<:Union{Symbol, Int}}= evaluate(p, s, x)
(p::TaylorN)(x::Pair) = evaluate(p, first(x), last(x))
(p::TaylorN)(x, v::Vararg{T}) where {T} = evaluate(p, (x, v...,))
(p::TaylorN)(b::Bool, x) = evaluate(p, x, sorting=b)
(p::TaylorN)(b::Bool, x, v::Vararg{T}) where {T} = evaluate(p, (x, v...,), sorting=b)
#function-like behavior for AbstractArray{TaylorN{T}}
(p::Array{TaylorN{T}})(x) where {T<:Number} = evaluate(p, x)
(p::SubArray{TaylorN{T}})(x) where {T<:Number} = evaluate(p, x)
(p::Array{TaylorN{T}})() where {T<:Number} = evaluate(p)
(p::SubArray{TaylorN{T}})() where {T<:Number} = evaluate(p)
"""
evaluate!(x, δt, x0)
Evaluates each element of `x::AbstractArray{Taylor1{T}}`,
representing the Taylor expansion for the dependent variables
of an ODE at *time* `δt`. It updates the vector `x0` with the
computed values.
"""
function evaluate!(x::AbstractArray{Taylor1{T}}, δt::S,
x0::AbstractArray{T}) where {T<:Number, S<:Number}
x0 .= evaluate.( x, δt )
return nothing
end
# function evaluate!(x::AbstractArray{Taylor1{Taylor1{T}}}, δt::Taylor1{T},
# x0::AbstractArray{Taylor1{T}}) where {T<:Number}
# x0 .= evaluate.( x, Ref(δt) )
# # x0 .= evaluate.( x, δt )
# return nothing
# end
## In place evaluation of multivariable arrays
function evaluate!(x::AbstractArray{TaylorN{T}}, δx::Array{T,1},
x0::AbstractArray{T}) where {T<:Number}
x0 .= evaluate.( x, Ref(δx) )
return nothing
end
function evaluate!(x::AbstractArray{TaylorN{T}}, δx::Array{TaylorN{T},1},
x0::AbstractArray{TaylorN{T}}; sorting::Bool=true) where {T<:NumberNotSeriesN}
x0 .= evaluate.( x, Ref(δx), sorting = sorting)
return nothing
end
function evaluate!(a::TaylorN{T}, vals::NTuple{N,TaylorN{T}}, dest::TaylorN{T},
valscache::Vector{TaylorN{T}}, aux::TaylorN{T}) where {N,T<:Number}
@inbounds for homPol in eachindex(a)
_evaluate!(dest, a[homPol], vals, valscache, aux)
end
return nothing
end
function evaluate!(a::AbstractArray{TaylorN{T}}, vals::NTuple{N,TaylorN{T}},
dest::AbstractArray{TaylorN{T}}) where {N,T<:Number}
# initialize evaluation cache
valscache = [zero(val) for val in vals]
aux = zero(dest[1])
# loop over elements of `a`
for i in eachindex(a)
(!iszero(dest[i])) && zero!(dest[i])
evaluate!(a[i], vals, dest[i], valscache, aux)
end
return nothing
end
# In-place Horner methods, used when the result of an evaluation (substitution)
# is Taylor1{}
function _horner!(suma::Taylor1{T}, a::Taylor1{T}, x::Taylor1{T},
aux::Taylor1{T}) where {T<:Number}
@inbounds for k in reverse(eachindex(a))
for ord in eachindex(aux)
mul!(aux, suma, x, ord)
end
for ord in eachindex(aux)
identity!(suma, aux, ord)
end
add!(suma, suma, a[k], 0)
end
return nothing
end
function _horner!(suma::Taylor1{T}, a::Taylor1{Taylor1{T}}, x::Taylor1{T},
aux::Taylor1{T}) where {T<:Number}
@inbounds for k in reverse(eachindex(a))
for ord in eachindex(aux)
mul!(aux, suma, x, ord)
end
for ord in eachindex(aux)
identity!(suma, aux, ord)
add!(suma, suma, a[k], ord)
end
end
return nothing
end
function _horner!(suma::Taylor1{Taylor1{T}}, a::Taylor1{T}, x::Taylor1{Taylor1{T}},
aux::Taylor1{Taylor1{T}}) where {T<:Number}
@inbounds for k in reverse(eachindex(a))
for ord in eachindex(aux)
mul!(aux, suma, x, ord)
end
for ord in eachindex(aux)
identity!(suma, aux, ord)
add!(suma, suma, a[k], ord)
end
end
return nothing
end
function _horner!(suma::TaylorN{T}, a::Taylor1{T}, dx::TaylorN{T},
aux::TaylorN{T}) where {T<:NumberNotSeries}
@inbounds for k in reverse(eachindex(a))
for ordQ in eachindex(suma)
zero!(aux, ordQ)
mul!(aux, suma, dx, ordQ)
end
for ordQ in eachindex(suma)
identity!(suma, aux, ordQ)
end
add!(suma, suma, a[k], 0)
end
return nothing
end
function _horner!(suma::Taylor1{TaylorN{T}}, a::Taylor1{T}, dx::Taylor1{TaylorN{T}},
aux::Taylor1{TaylorN{T}}) where {T<:NumberNotSeries}
@inbounds for k in reverse(eachindex(a))
# aux = suma * dx
for ord in eachindex(aux)
zero!(aux, ord)
mul!(aux, suma, dx, ord)
end
for ord in eachindex(aux)
identity!(suma, aux, ord)
end
add!(suma, suma, a[k], 0)
end
return suma
end | TaylorSeries | https://github.com/JuliaDiff/TaylorSeries.jl.git |
|
[
"MIT"
] | 0.18.1 | bb212ead98022455eed514cff0adfa5de8645258 | code | 44320 | # This file is part of the TaylorSeries.jl Julia package, MIT license
#
# Luis Benet & David P. Sanders
# UNAM
#
# MIT Expat license
#
# Functions
for T in (:Taylor1, :TaylorN)
@eval begin
function exp(a::$T)
order = a.order
aux = exp(constant_term(a))
aa = one(aux) * a
c = $T( aux, order )
for k in eachindex(a)
exp!(c, aa, k)
end
return c
end
function expm1(a::$T)
order = a.order
aux = expm1(constant_term(a))
aa = one(aux) * a
c = $T( aux, order )
for k in eachindex(a)
expm1!(c, aa, k)
end
return c
end
function log(a::$T)
iszero(constant_term(a)) && throw(DomainError(a,
"""The 0-th order coefficient must be non-zero in order to expand `log` around 0."""))
order = a.order
aux = log(constant_term(a))
aa = one(aux) * a
c = $T( aux, order )
for k in eachindex(a)
log!(c, aa, k)
end
return c
end
function log1p(a::$T)
# constant_term(a) < -one(constant_term(a)) && throw(DomainError(a,
# """The 0-th order coefficient must be larger than -1 in order to expand `log1`."""))
order = a.order
aux = log1p(constant_term(a))
aa = one(aux) * a
c = $T( aux, order )
for k in eachindex(a)
log1p!(c, aa, k)
end
return c
end
sin(a::$T) = sincos(a)[1]
cos(a::$T) = sincos(a)[2]
function sincos(a::$T)
order = a.order
aux = sin(constant_term(a))
aa = one(aux) * a
s = $T( aux, order )
c = $T( cos(constant_term(a)), order )
for k in eachindex(a)
sincos!(s, c, aa, k)
end
return s, c
end
sinpi(a::$T) = sincospi(a)[1]
cospi(a::$T) = sincospi(a)[2]
function sincospi(a::$T)
order = a.order
aux = sinpi(constant_term(a))
aa = one(aux) * a
s = $T( aux, order )
c = $T( cospi(constant_term(a)), order )
for k in eachindex(a)
sincospi!(s, c, aa, k)
end
return s, c
end
function tan(a::$T)
order = a.order
aux = tan(constant_term(a))
aa = one(aux) * a
c = $T(aux, order)
c2 = $T(aux^2, order)
for k in eachindex(a)
tan!(c, aa, c2, k)
end
return c
end
function asin(a::$T)
a0 = constant_term(a)
a0^2 == one(a0) && throw(DomainError(a,
"""Series expansion of asin(x) diverges at x = ±1."""))
order = a.order
aux = asin(a0)
aa = one(aux) * a
c = $T( aux, order )
r = $T( sqrt(1 - a0^2), order )
for k in eachindex(a)
asin!(c, aa, r, k)
end
return c
end
function acos(a::$T)
a0 = constant_term(a)
a0^2 == one(a0) && throw(DomainError(a,
"""Series expansion of asin(x) diverges at x = ±1."""))
order = a.order
aux = acos(a0)
aa = one(aux) * a
c = $T( aux, order )
r = $T( sqrt(1 - a0^2), order )
for k in eachindex(a)
acos!(c, aa, r, k)
end
return c
end
function atan(a::$T)
order = a.order
a0 = constant_term(a)
aux = atan(a0)
aa = one(aux) * a
c = $T( aux, order)
r = $T(one(aux) + a0^2, order)
iszero(constant_term(r)) && throw(DomainError(a,
"""Series expansion of atan(x) diverges at x = ±im."""))
for k in eachindex(a)
atan!(c, aa, r, k)
end
return c
end
function atan(a::$T, b::$T)
c = atan(a/b)
c[0] = atan(constant_term(a), constant_term(b))
return c
end
sinh(a::$T) = sinhcosh(a)[1]
cosh(a::$T) = sinhcosh(a)[2]
function sinhcosh(a::$T)
order = a.order
aux = sinh(constant_term(a))
aa = one(aux) * a
s = $T( aux, order)
c = $T( cosh(constant_term(a)), order)
for k in eachindex(a)
sinhcosh!(s, c, aa, k)
end
return s, c
end
function tanh(a::$T)
order = a.order
aux = tanh( constant_term(a) )
aa = one(aux) * a
c = $T( aux, order)
c2 = $T( aux^2, order)
for k in eachindex(a)
tanh!(c, aa, c2, k)
end
return c
end
function asinh(a::$T)
order = a.order
a0 = constant_term(a)
aux = asinh(a0)
aa = one(aux) * a
c = $T( aux, order )
r = $T( sqrt(a0^2 + 1), order )
iszero(constant_term(r)) && throw(DomainError(a,
"""Series expansion of asinh(x) diverges at x = ±im."""))
for k in eachindex(a)
asinh!(c, aa, r, k)
end
return c
end
function acosh(a::$T)
a0 = constant_term(a)
a0^2 == one(a0) && throw(DomainError(a,
"""Series expansion of acosh(x) diverges at x = ±1."""))
order = a.order
aux = acosh(a0)
aa = one(aux) * a
c = $T( aux, order )
r = $T( sqrt(a0^2 - 1), order )
for k in eachindex(a)
acosh!(c, aa, r, k)
end
return c
end
function atanh(a::$T)
order = a.order
a0 = constant_term(a)
aux = atanh(a0)
aa = one(aux) * a
c = $T( aux, order)
r = $T(one(aux) - a0^2, order)
iszero(constant_term(r)) && throw(DomainError(a,
"""Series expansion of atanh(x) diverges at x = ±1."""))
for k in eachindex(a)
atanh!(c, aa, r, k)
end
return c
end
end
end
# Recursive functions (homogeneous coefficients)
@inline function zero!(a::Taylor1{T}, k::Int) where {T<:NumberNotSeries}
a[k] = zero(a[k])
return nothing
end
@inline function zero!(a::Taylor1{T}) where {T<:NumberNotSeries}
for k in eachindex(a)
zero!(a, k)
end
return nothing
end
@inline function zero!(a::HomogeneousPolynomial{T}, k::Int) where {T<:NumberNotSeries}
a[k] = zero(a[k])
return nothing
end
@inline function zero!(a::HomogeneousPolynomial{T}) where {T<:NumberNotSeries}
for k in eachindex(a)
zero!(a, k)
end
return nothing
end
@inline function zero!(a::TaylorN{T}, k::Int) where {T<:NumberNotSeries}
zero!(a[k])
return nothing
end
@inline function zero!(a::TaylorN{T}) where {T<:NumberNotSeries}
for k in eachindex(a)
zero!(a, k)
end
return nothing
end
@inline function zero!(a::Taylor1{Taylor1{T}}, k::Int) where {T<:NumberNotSeries}
for l in eachindex(a[k])
zero!(a[k], l)
end
return nothing
end
@inline function zero!(a::Taylor1{Taylor1{T}}) where {T<:NumberNotSeries}
for k in eachindex(a)
zero!(a, k)
end
return nothing
end
@inline function zero!(a::Taylor1{TaylorN{T}}, k::Int) where {T<:NumberNotSeries}
zero!(a[k])
return nothing
end
@inline function zero!(a::Taylor1{TaylorN{T}}) where {T<:NumberNotSeries}
for k in eachindex(a)
zero!(a, k)
end
return nothing
end
@inline function zero!(a::TaylorN{Taylor1{T}}, k::Int) where {T<:NumberNotSeries}
for l in eachindex(a[k])
zero!(a[k][l])
end
return nothing
end
@inline function zero!(a::TaylorN{Taylor1{T}}) where {T<:NumberNotSeries}
for k in eachindex(a)
zero!(a, k)
end
return nothing
end
@inline function one!(c::Taylor1{TaylorN{T}}, k::Int) where {T<:NumberNotSeries}
zero!(c, k)
if k == 0
@inbounds c[0][0][1] = one(constant_term(c[0][0][1]))
end
return nothing
end
@inline function identity!(c::HomogeneousPolynomial{T}, a::HomogeneousPolynomial{T}, k::Int) where {T<:NumberNotSeries}
@inbounds c[k] = identity(a[k])
return nothing
end
@inline function identity!(c::HomogeneousPolynomial{Taylor1{T}}, a::HomogeneousPolynomial{Taylor1{T}}, k::Int) where {T<:NumberNotSeries}
@inbounds for l in eachindex(c[k])
identity!(c[k], a[k], l)
end
return nothing
end
for T in (:Taylor1, :TaylorN)
@eval begin
@inline function identity!(c::$T{T}, a::$T{T}, k::Int) where {T<:NumberNotSeries}
if $T == Taylor1
@inbounds c[k] = identity(a[k])
else
@inbounds for l in eachindex(c[k])
identity!(c[k], a[k], l)
end
end
return nothing
end
@inline function identity!(c::$T{T}, a::$T{T}, k::Int) where {T<:AbstractSeries}
@inbounds for l in eachindex(c[k])
identity!(c[k], a[k], l)
end
return nothing
end
@inline function zero!(c::$T{T}, a::$T{T}, k::Int) where {T<:Number}
@inbounds zero!(c, k)
return nothing
end
if $T == Taylor1
@inline function one!(c::$T{T}, a::$T{T}, k::Int) where {T<:Number}
zero!(c, k)
(k == 0) && (@inbounds c[0] = one(constant_term(a)))
return nothing
end
else
@inline function one!(c::$T{T}, a::$T{T}, k::Int) where {T<:Number}
zero!(c, k)
(k == 0) && (@inbounds c[0][1] = one(constant_term(a)))
return nothing
end
end
@inline function abs!(c::$T{T}, a::$T{T}, k::Int) where {T<:Number}
z = zero(constant_term(a))
if constant_term(constant_term(a)) > constant_term(z)
return add!(c, a, k)
elseif constant_term(constant_term(a)) < constant_term(z)
return subst!(c, a, k)
else
throw(DomainError(a,
"""The 0th order coefficient must be non-zero
(abs(x) is not differentiable at x=0)."""))
end
return nothing
end
@inline abs2!(c::$T{T}, a::$T{T}, k::Int) where {T<:Number} = sqr!(c, a, k)
@inline function exp!(c::$T{T}, a::$T{T}, k::Int) where {T<:Number}
if k == 0
@inbounds c[0] = exp(constant_term(a))
return nothing
end
zero!(c, k)
if $T == Taylor1
@inbounds for i = 0:k-1
c[k] += (k-i) * a[k-i] * c[i]
end
@inbounds div!(c, c, k, k)
else
@inbounds for i = 0:k-1
mul_scalar!(c[k], k-i, a[k-i], c[i])
end
@inbounds div!(c[k], c[k], k)
end
return nothing
end
@inline function expm1!(c::$T{T}, a::$T{T}, k::Int) where {T<:Number}
if k == 0
@inbounds c[0] = expm1(constant_term(a))
return nothing
end
zero!(c, k)
c0 = c[0]+one(c[0])
if $T == Taylor1
@inbounds c[k] = k * a[k] * c0
@inbounds for i = 1:k-1
c[k] += (k-i) * a[k-i] * c[i]
end
@inbounds div!(c, c, k, k)
else
@inbounds mul_scalar!(c[k], k, a[k], c0)
@inbounds for i = 1:k-1
mul_scalar!(c[k], k-i, a[k-i], c[i])
end
@inbounds div!(c[k], c[k], k)
end
return nothing
end
@inline function log!(c::$T{T}, a::$T{T}, k::Int) where {T<:Number}
if k == 0
@inbounds c[0] = log(constant_term(a))
return nothing
elseif k == 1
@inbounds c[1] = a[1] / constant_term(a)
return nothing
end
zero!(c, k)
if $T == Taylor1
@inbounds for i = 1:k-1
c[k] += (k-i) * a[i] * c[k-i]
end
else
@inbounds for i = 1:k-1
mul_scalar!(c[k], k-i, a[i], c[k-i])
end
end
@inbounds c[k] = (a[k] - c[k]/k) / constant_term(a)
return nothing
end
@inline function log1p!(c::$T{T}, a::$T{T}, k::Int) where {T<:Number}
if k == 0
a0 = constant_term(a)
@inbounds c[0] = log1p(a0)
return nothing
elseif k == 1
a0 = constant_term(a)
a0p1 = a0+one(a0)
@inbounds c[1] = a[1] / a0p1
return nothing
end
a0 = constant_term(a)
a0p1 = a0+one(a0)
zero!(c, k)
if $T == Taylor1
@inbounds for i = 1:k-1
c[k] += (k-i) * a[i] * c[k-i]
end
else
@inbounds for i = 1:k-1
mul_scalar!(c[k], k-i, a[i], c[k-i])
end
end
@inbounds c[k] = (a[k] - c[k]/k) / a0p1
return nothing
end
@inline function sincos!(s::$T{T}, c::$T{T}, a::$T{T}, k::Int) where {T<:Number}
if k == 0
a0 = constant_term(a)
if $T == Taylor1
@inbounds s[0], c[0] = sincos( a0 )
else
@inbounds s[0][1], c[0][1] = sincos( a0 )
end
return nothing
end
zero!(s, k)
zero!(c, k)
if $T == Taylor1
@inbounds for i = 1:k
x = i * a[i]
s[k] += x * c[k-i]
c[k] -= x * s[k-i]
end
else
@inbounds for i = 1:k
mul_scalar!(s[k], i, a[i], c[k-i])
mul_scalar!(c[k], -i, a[i], s[k-i])
end
end
if $T == Taylor1
s[k] = s[k] / k
c[k] = c[k] / k
else
@inbounds div!(s[k], s[k], k)
@inbounds div!(c[k], c[k], k)
end
return nothing
end
@inline function sincospi!(s::$T{T}, c::$T{T}, a::$T{T}, k::Int) where {T<:Number}
if k == 0
a0 = constant_term(a)
if $T == Taylor1
@inbounds s[0], c[0] = sincospi( a0 )
else
@inbounds s[0][1], c[0][1] = sincospi( a0 )
end
return nothing
end
mul!(a, pi, a, k)
sincos!(s, c, a, k)
return nothing
end
@inline function tan!(c::$T{T}, a::$T{T}, c2::$T{T}, k::Int) where {T<:Number}
if k == 0
@inbounds aux = tan( constant_term(a) )
if $T == Taylor1
@inbounds c[0] = aux
@inbounds c2[0] = aux^2
else
@inbounds c[0][1] = aux
@inbounds c2[0][1] = aux^2
end
return nothing
end
zero!(c, k)
if $T == Taylor1
@inbounds for i = 0:k-1
c[k] += (k-i) * a[k-i] * c2[i]
end
# c[k] <- c[k]/k
div!(c, c, k, k)
else
@inbounds for i = 0:k-1
mul_scalar!(c[k], k-i, a[k-i], c2[i])
end
# c[k] <- c[k]/k
div!(c[k], c[k], k)
end
# c[k] <- c[k] + a[k]
add!(c, a, c, k)
sqr!(c2, c, k)
return nothing
end
@inline function asin!(c::$T{T}, a::$T{T}, r::$T{T}, k::Int) where {T<:Number}
if k == 0
a0 = constant_term(a)
if $T == Taylor1
@inbounds c[0] = asin( a0 )
@inbounds r[0] = sqrt( 1 - a0^2 )
else
@inbounds c[0][1] = asin( a0 )
@inbounds r[0][1] = sqrt( 1 - a0^2 )
end
return nothing
end
zero!(c, k)
if $T == Taylor1
@inbounds for i in 1:k-1
c[k] += (k-i) * r[i] * c[k-i]
end
else
@inbounds for i in 1:k-1
mul_scalar!(c[k], k-i, r[i], c[k-i])
end
end
# Compute k-th coefficient of auxiliary term s=1-a^2
zero!(r, k) # r[k] <- 0
sqr!(r, a, k) # r[k] <- (a^2)[k]
subst!(r, r, k) # r[k] <- -r[k]
sqrt!(r, r, k) # r[k] <- (sqrt(r))[k]
if $T == Taylor1
@inbounds c[k] = (a[k] - c[k]/k) / constant_term(r)
else
for l in eachindex(c[k])
@inbounds c[k][l] = (a[k][l] - c[k][l]/k) / constant_term(r)
end
end
return nothing
end
@inline function acos!(c::$T{T}, a::$T{T}, r::$T{T}, k::Int) where {T<:Number}
if k == 0
a0 = constant_term(a)
if $T == Taylor1
@inbounds c[0] = acos( a0 )
@inbounds r[0] = sqrt( 1 - a0^2 )
else
@inbounds c[0][1] = acos( a0 )
@inbounds r[0][1] = sqrt( 1 - a0^2 )
end
return nothing
end
zero!(c, k)
if $T == Taylor1
@inbounds for i in 1:k-1
c[k] += (k-i) * r[i] * c[k-i]
end
else
@inbounds for i in 1:k-1
mul_scalar!(c[k], k-i, r[i], c[k-i])
end
end
# Compute k-th coefficient of auxiliary term s=1-a^2
zero!(r, k) # r[k] <- 0
sqr!(r, a, k) # r[k] <- (a^2)[k]
subst!(r, r, k) # r[k] <- -r[k]
sqrt!(r, r, k) # r[k] <- (sqrt(r))[k]
if $T == Taylor1
@inbounds c[k] = -(a[k] + c[k]/k) / constant_term(r)
else
for l in eachindex(c[k])
@inbounds c[k][l] = -(a[k][l] + c[k][l]/k) / constant_term(r)
end
end
return nothing
end
@inline function atan!(c::$T{T}, a::$T{T}, r::$T{T}, k::Int) where {T<:Number}
if k == 0
a0 = constant_term(a)
@inbounds c[0] = atan( a0 )
@inbounds r[0] = 1 + a0^2
return nothing
end
zero!(c, k)
if $T == Taylor1
@inbounds for i in 1:k-1
c[k] += (k-i) * r[i] * c[k-i]
end
@inbounds sqr!(r, a, k)
@inbounds c[k] = (a[k] - c[k]/k) / constant_term(r)
else
@inbounds for i in 1:k-1
mul_scalar!(c[k], k-i, r[i], c[k-i])
end
@inbounds sqr!(r, a, k)
for l in eachindex(c[k])
@inbounds c[k][l] = (a[k][l] - c[k][l]/k) / constant_term(r)
end
end
return nothing
end
@inline function sinhcosh!(s::$T{T}, c::$T{T}, a::$T{T}, k::Int) where {T<:Number}
if k == 0
@inbounds s[0] = sinh( constant_term(a) )
@inbounds c[0] = cosh( constant_term(a) )
return nothing
end
x = a[1]
zero!(s, k)
zero!(c, k)
if $T == Taylor1
@inbounds for i = 1:k
x = i * a[i]
s[k] += x * c[k-i]
c[k] += x * s[k-i]
end
@inbounds div!(s, s, k, k)
@inbounds div!(c, c, k, k)
else
@inbounds for i = 1:k
mul_scalar!(s[k], i, a[i], c[k-i])
mul_scalar!(c[k], i, a[i], s[k-i])
end
@inbounds div!(s[k], s[k], k)
@inbounds div!(c[k], c[k], k)
end
return nothing
end
@inline function tanh!(c::$T{T}, a::$T{T}, c2::$T{T}, k::Int) where {T<:Number}
if k == 0
@inbounds aux = tanh( constant_term(a) )
@inbounds c[0] = aux
@inbounds c2[0] = aux^2
return nothing
end
zero!(c, k)
if $T == Taylor1
@inbounds for i = 0:k-1
c[k] += (k-i) * a[k-i] * c2[i]
end
@inbounds c[k] = a[k] - c[k]/k
else
@inbounds for i = 0:k-1
mul_scalar!(c[k], k-i, a[k-i], c2[i])
end
@inbounds for l in eachindex(c[k])
c[k][l] = a[k][l] - c[k][l]/k
end
end
sqr!(c2, c, k)
return nothing
end
@inline function asinh!(c::$T{T}, a::$T{T}, r::$T{T}, k::Int) where {T<:Number}
if k == 0
a0 = constant_term(a)
@inbounds c[0] = asinh( a0 )
@inbounds r[0] = sqrt( a0^2 + 1 )
return nothing
end
zero!(c, k)
if $T == Taylor1
@inbounds for i in 1:k-1
c[k] += (k-i) * r[i] * c[k-i]
end
else
@inbounds for i in 1:k-1
mul_scalar!(c[k], k-i, r[i], c[k-i])
end
end
sqrt!(r, a^2+1, k)
@inbounds c[k] = (a[k] - c[k]/k) / constant_term(r)
return nothing
end
@inline function acosh!(c::$T{T}, a::$T{T}, r::$T{T}, k::Int) where {T<:Number}
if k == 0
a0 = constant_term(a)
@inbounds c[0] = acosh( a0 )
@inbounds r[0] = sqrt( a0^2 - 1 )
return nothing
end
zero!(c, k)
if $T == Taylor1
@inbounds for i in 1:k-1
c[k] += (k-i) * r[i] * c[k-i]
end
else
@inbounds for i in 1:k-1
mul_scalar!(c[k], k-i, r[i], c[k-i])
end
end
sqrt!(r, a^2-1, k)
@inbounds c[k] = (a[k] - c[k]/k) / constant_term(r)
return nothing
end
@inline function atanh!(c::$T{T}, a::$T{T}, r::$T{T}, k::Int) where {T<:Number}
if k == 0
a0 = constant_term(a)
@inbounds c[0] = atanh( a0 )
@inbounds r[0] = 1 - a0^2
return nothing
end
zero!(c, k)
if $T == Taylor1
@inbounds for i in 1:k-1
c[k] += (k-i) * r[i] * c[k-i]
end
else
@inbounds for i in 1:k-1
mul_scalar!(c[k], k-i, r[i], c[k-i])
end
end
@inbounds sqr!(r, a, k)
@inbounds c[k] = (a[k] + c[k]/k) / constant_term(r)
return nothing
end
end
end
# Mutating functions for Taylor1{TaylorN{T}}
@inline function exp!(res::Taylor1{TaylorN{T}}, a::Taylor1{TaylorN{T}},
k::Int) where {T<:NumberNotSeries}
if k == 0
@inbounds for ordQ in eachindex(a[0])
# zero!(res[0], a[0], ordQ)
exp!(res[0], a[0], ordQ)
end
return nothing
end
# The recursion formula
zero!(res[k])
for i = 0:k-1
@inbounds for ordQ in eachindex(a[0])
mul_scalar!(res[k], k-i, res[i], a[k-i], ordQ)
end
end
div!(res, res, k, k)
return nothing
end
@inline function expm1!(res::Taylor1{TaylorN{T}}, a::Taylor1{TaylorN{T}},
k::Int) where {T<:NumberNotSeries}
if k == 0
@inbounds for ordQ in eachindex(a[0])
# zero!(res[0], a[0], ordQ)
expm1!(res[0], a[0], ordQ)
end
return nothing
end
# The recursion formula
tmp = TaylorN( zero(a[k][0][1]), a[0].order)
zero!(res[k])
# i=0 term of sum
@inbounds for ordQ in eachindex(a[0])
one!(tmp, a[0], ordQ)
add!(tmp, res[0], tmp, ordQ)
tmp[ordQ] = k * tmp[ordQ]
# zero!(res[k], a[0], ordQ)
mul!(res[k], a[k], tmp, ordQ)
end
for i = 1:k-1
@inbounds for ordQ in eachindex(a[0])
tmp[ordQ] = (k-i) * res[i][ordQ]
mul!(res[k], tmp, a[k-i], ordQ)
end
end
div!(res, res, k, k)
return nothing
end
@inline function log!(res::Taylor1{TaylorN{T}}, a::Taylor1{TaylorN{T}},
k::Int) where {T<:NumberNotSeries}
if k == 0
@inbounds for ordQ in eachindex(a[0])
# zero!(res[k], a[0], ordQ)
log!(res[k], a[0], ordQ)
end
return nothing
elseif k == 1
@inbounds for ordQ in eachindex(a[0])
zero!(res[k][ordQ])
div!(res[k], a[1], a[0], ordQ)
end
return nothing
end
# The recursion formula
tmp = TaylorN( zero(a[k][0][1]), a[0].order)
zero!(res[k])
for i = 1:k-1
@inbounds for ordQ in eachindex(a[0])
tmp[ordQ] = (k-i) * res[k-i][ordQ]
mul!(res[k], tmp, a[i], ordQ)
end
end
div!(res, res, k, k)
@inbounds for ordQ in eachindex(a[0])
subst!(tmp, a[k], res[k], ordQ)
zero!(res[k][ordQ])
div!(res[k], tmp, a[0], ordQ)
end
return nothing
end
@inline function log1p!(res::Taylor1{TaylorN{T}}, a::Taylor1{TaylorN{T}},
k::Int) where {T<:NumberNotSeries}
if k == 0
@inbounds for ordQ in eachindex(a[0])
# zero!(res[k], a[0], ordQ)
log1p!(res[k], a[0], ordQ)
end
return nothing
end
tmp1 = TaylorN( zero(a[k][0][1]), a[0].order)
zero!(res[k])
@inbounds for ordQ in eachindex(a[0])
# zero!(res[k], a[0], ordQ)
one!(tmp1, a[0], ordQ)
add!(tmp1, tmp1, a[0], ordQ)
end
if k == 1
@inbounds for ordQ in eachindex(a[0])
div!(res[k], a[1], tmp1, ordQ)
end
return nothing
end
# The recursion formula
tmp = TaylorN( zero(a[k][0][1]), a[0].order)
for i = 1:k-1
@inbounds for ordQ in eachindex(a[0])
tmp[ordQ] = (k-i) * res[k-i][ordQ]
mul!(res[k], tmp, a[i], ordQ)
end
end
div!(res, res, k, k)
@inbounds for ordQ in eachindex(a[0])
subst!(tmp, a[k], res[k], ordQ)
zero!(res[k][ordQ])
div!(res[k], tmp, tmp1, ordQ)
end
return nothing
end
@inline function sincos!(s::Taylor1{TaylorN{T}}, c::Taylor1{TaylorN{T}},
a::Taylor1{TaylorN{T}}, k::Int) where {T<:NumberNotSeries}
if k == 0
@inbounds for ordQ in eachindex(a[0])
sincos!(s[0], c[0], a[0], ordQ)
end
return nothing
end
# The recursion formula
# x = TaylorN( a[1][0][1], a[0].order )
zero!(s[k])
zero!(c[k])
@inbounds for i = 1:k
for ordQ in eachindex(a[0])
# x[ordQ].coeffs .= i .* a[i][ordQ].coeffs
mul_scalar!(s[k], i, a[i], c[k-i], ordQ)
mul_scalar!(c[k], i, a[i], s[k-i], ordQ)
end
end
div!(s, s, k, k)
subst!(c, c, k)
div!(c, c, k, k)
return nothing
end
@inline function sincospi!(s::Taylor1{TaylorN{T}}, c::Taylor1{TaylorN{T}},
a::Taylor1{TaylorN{T}}, k::Int) where {T<:NumberNotSeries}
if k == 0
@inbounds for ordQ in eachindex(a[0])
sincospi!(s[0], c[0], a[0], ordQ)
end
return nothing
end
# aa = pi * a
aa = Taylor1(zero(a[0]), a.order)
@inbounds for ordT in eachindex(a)
mul!(aa, pi, a, ordT)
end
sincos!(s, c, aa, k)
return nothing
end
@inline function tan!(res::Taylor1{TaylorN{T}}, a::Taylor1{TaylorN{T}},
res2::Taylor1{TaylorN{T}}, k::Int) where {T<:NumberNotSeries}
if k == 0
@inbounds res[0] = tan( a[0] )
# zero!(res2, res, 0)
sqr!(res2, res, 0)
return nothing
end
# The recursion formula
zero!(res[k])
for i = 0:k-1
@inbounds for ordQ in eachindex(a[0])
mul_scalar!(res[k], k-i, res2[i], a[k-i], ordQ)
end
end
@inbounds for ordQ in eachindex(a[0])
div!(res[k][ordQ], res[k][ordQ], k)
add!(res[k], a[k], res[k], ordQ)
end
sqr!(res2, res, k)
return nothing
end
@inline function asin!(res::Taylor1{TaylorN{T}}, a::Taylor1{TaylorN{T}},
r::Taylor1{TaylorN{T}}, k::Int) where {T<:NumberNotSeries}
if k == 0
@inbounds for ordQ in eachindex(res[0])
asin!(res[0], a[0], r[0], ordQ)
end
return nothing
end
# The recursion formula
@inbounds zero!(res[k])
@inbounds for i in 1:k-1
for ordQ in eachindex(a[0])
mul_scalar!(res[k], k-i, res[k-i], r[i], ordQ)
end
end
@inbounds div!(res, res, k, k)
@inbounds for ordQ in eachindex(a[0])
subst!(res[k], a[k], res[k], ordQ)
div!(res[k], r[0], ordQ)
end
# Compute k-th coefficient of auxiliary term s=1-a^2
@inbounds zero!(r, k) # r[k] <- 0
@inbounds sqr!(r, a, k) # r[k] <- (a^2)[k]
@inbounds subst!(r, r, k) # r[k] <- -r[k]
@inbounds sqrt!(r, r, k) # r[k] <- (sqrt(r))[k]
return nothing
end
@inline function acos!(res::Taylor1{TaylorN{T}}, a::Taylor1{TaylorN{T}},
r::Taylor1{TaylorN{T}}, k::Int) where {T<:NumberNotSeries}
if k == 0
@inbounds for ordQ in eachindex(res[0])
acos!(res[0], a[0], r[0], ordQ)
end
return nothing
end
# The recursion formula
zero!(res[k])
for i in 1:k-1
@inbounds for ordQ in eachindex(a[0])
mul_scalar!(res[k], k-i, res[k-i], r[i], ordQ)
end
end
div!(res, res, k, k)
@inbounds for ordQ in eachindex(a[0])
add!(res[k], a[k], res[k], ordQ)
subst!(res[k], res[k], ordQ)
div!(res[k], r[0], ordQ)
end
# Compute k-th coefficient of auxiliary term s=1-a^2
@inbounds zero!(r, k) # r[k] <- 0
@inbounds sqr!(r, a, k) # r[k] <- (a^2)[k]
@inbounds subst!(r, r, k) # r[k] <- -r[k]
@inbounds sqrt!(r, r, k) # r[k] <- (sqrt(r))[k]
return nothing
end
@inline function atan!(res::Taylor1{TaylorN{T}}, a::Taylor1{TaylorN{T}},
r::Taylor1{TaylorN{T}}, k::Int) where {T<:NumberNotSeries}
if k == 0
res[0] = atan( a[0] )
# zero!(r, a, 0)
sqr!(r, a, 0)
add!(r, r, one(a[0][0][1]), 0)
return nothing
end
# The recursion formula
zero!(res[k])
for i in 1:k-1
@inbounds for ordQ in eachindex(a[0])
mul_scalar!(res[k], k-i, res[k-i], r[i], ordQ)
end
end
tmp = TaylorN( zero(a[0][0][1]), a[0].order )
@inbounds for ordQ in eachindex(a[0])
# zero!(tmp, res[k], ordQ)
tmp[ordQ] = - res[k][ordQ] / k
add!(tmp, a[k], tmp, ordQ)
zero!(res[k][ordQ])
div!(res[k], tmp, r[0], ordQ)
end
zero!(r[k])
sqr!(r, a, k)
return nothing
end
@inline function sinhcosh!(s::Taylor1{TaylorN{T}}, c::Taylor1{TaylorN{T}},
a::Taylor1{TaylorN{T}}, k::Int) where {T<:NumberNotSeries}
if k == 0
@inbounds for ordQ in eachindex(a[0])
sinhcosh!(s[0], c[0], a[0], ordQ)
end
return nothing
end
# The recursion formula
zero!(s[k])
zero!(c[k])
@inbounds for i = 1:k
for ordQ in eachindex(a[0])
mul_scalar!(s[k], i, a[i], c[k-i], ordQ)
mul_scalar!(c[k], i, a[i], s[k-i], ordQ)
end
end
div!(s, s, k, k)
div!(c, c, k, k)
return nothing
end
@inline function tanh!(res::Taylor1{TaylorN{T}}, a::Taylor1{TaylorN{T}},
res2::Taylor1{TaylorN{T}}, k::Int) where {T<:NumberNotSeries}
if k == 0
@inbounds res[0] = tanh( a[0] )
# zero!(res2, res, 0)
sqr!(res2, res, 0)
return nothing
end
# The recursion formula
zero!(res[k])
for i = 0:k-1
@inbounds for ordQ in eachindex(a[0])
mul_scalar!(res[k], k-i, res2[i], a[k-i], ordQ)
end
end
tmp = TaylorN( zero(a[0][0][1]), a[0].order)
@inbounds for ordQ in eachindex(a[0])
# zero!(tmp, res[k], ordQ)
tmp[ordQ] = res[k][ordQ] / k
subst!(res[k], a[k], tmp, ordQ)
end
zero!(res2[k])
sqr!(res2, res, k)
return nothing
end
@inline function asinh!(res::Taylor1{TaylorN{T}}, a::Taylor1{TaylorN{T}},
r::Taylor1{TaylorN{T}}, k::Int) where {T<:NumberNotSeries}
if k == 0
@inbounds res[0] = asinh( a[0] )
# r[0] = sqrt(1+a[0]^2)
tmp = TaylorN( zero(a[0][0][1]), a[0].order)
r[0] = square(a[0])
for ordQ in eachindex(a[0])
one!(tmp, a[0], ordQ)
add!(tmp, tmp, r[0], ordQ)
# zero!(r[0], tmp, ordQ)
sqrt!(r[0], tmp, ordQ)
end
return nothing
end
# The recursion formula
zero!(res[k])
for i in 1:k-1
@inbounds for ordQ in eachindex(a[0])
mul_scalar!(res[k], k-i, res[k-i], r[i], ordQ)
end
end
div!(res, res, k, k)
tmp = TaylorN( zero(a[0][0][1]), a[0].order)
@inbounds for ordQ in eachindex(a[0])
subst!(tmp, a[k], res[k], ordQ)
zero!(res[k][ordQ])
div!(res[k], tmp, r[0], ordQ)
end
# Compute auxiliary term s=1+a^2
s = Taylor1(zero(a[0]), a.order)
for i = 0:k
sqr!(s, a, i)
if i == 0
s[0] = one(s[0]) + s[0]
add!(s, one(s), s, 0)
end
end
# Update aux term r = sqrt(s) = sqrt(1+a^2)
sqrt!(r, s, k)
return nothing
end
@inline function acosh!(res::Taylor1{TaylorN{T}}, a::Taylor1{TaylorN{T}},
r::Taylor1{TaylorN{T}}, k::Int) where {T<:NumberNotSeries}
if k == 0
@inbounds res[0] = acosh( a[0] )
# r[0] = sqrt(a[0]^2-1)
tmp = TaylorN( zero(a[0][0][1]), a[0].order)
r[0] = square(a[0])
for ordQ in eachindex(a[0])
one!(tmp, a[0], ordQ)
subst!(tmp, r[0], tmp, ordQ)
# zero!(r[0], tmp, ordQ)
sqrt!(r[0], tmp, ordQ)
end
return nothing
end
# The recursion formula
zero!(res[k])
for i in 1:k-1
@inbounds for ordQ in eachindex(a[0])
mul_scalar!(res[k], k-i, res[k-i], r[i], ordQ)
end
end
div!(res, res, k, k)
tmp = TaylorN( zero(a[0][0][1]), a[0].order)
@inbounds for ordQ in eachindex(a[0])
subst!(tmp, a[k], res[k], ordQ)
zero!(res[k][ordQ])
div!(res[k], tmp, r[0], ordQ)
end
# Compute auxiliary term s=a^2-1
s = Taylor1(zero(a[0]), a.order)
for i = 0:k
sqr!(s, a, i)
if i == 0
s[0] = one(s[0]) + s[0]
subst!(s, s, one(s), 0)
end
end
# Update aux term r = sqrt(s) = sqrt(a^2-1)
sqrt!(r, s, k)
return nothing
end
@inline function atanh!(res::Taylor1{TaylorN{T}}, a::Taylor1{TaylorN{T}},
r::Taylor1{TaylorN{T}}, k::Int) where {T<:NumberNotSeries}
if k == 0
res[0] = atanh( a[0] )
# zero!(r, a, 0)
sqr!(r, a, 0)
subst!(r, one(a[0][0][1]), r, 0)
return nothing
end
# The recursion formula
tmp = TaylorN( zero(a[0][0][1]), a[0].order )
zero!(res[k])
for i in 1:k-1
@inbounds for ordQ in eachindex(a[0])
mul_scalar!(res[k], k-i, res[k-i], r[i], ordQ)
end
end
@inbounds for ordQ in eachindex(a[0])
# zero!(tmp, res[k], ordQ)
tmp[ordQ] = res[k][ordQ] / k
add!(tmp, a[k], tmp, ordQ)
zero!(res[k][ordQ])
div!(res[k], tmp, r[0], ordQ)
end
zero!(r[k])
sqr!(r, a, k)
return nothing
end
@doc doc"""
inverse(f)
Return the Taylor expansion of ``f^{-1}(t)``, of order `N = f.order`,
for `f::Taylor1` polynomial, assuming the first coefficient of `f` is zero.
Otherwise, a `DomainError` is thrown.
The algorithm implements Lagrange inversion at ``t=0`` if ``f(0)=0``:
```math
\begin{equation*}
f^{-1}(t) = \sum_{n=1}^{N} \frac{t^n}{n!} \left.
\frac{{\rm d}^{n-1}}{{\rm d} z^{n-1}}\left(\frac{z}{f(z)}\right)^n
\right\vert_{z=0}.
\end{equation*}
```
""" inverse
function inverse(f::Taylor1{T}) where {T<:Number}
if !iszero(f[0])
throw(DomainError(f,
"""
Evaluation of Taylor1 series at 0 is non-zero; revert
a Taylor1 series with constant coefficient 0 and re-expand about f(0).
"""))
end
z = Taylor1(T, f.order)
zdivf = z/f
zdivfpown = zdivf
res = Taylor1(zero(TS.numtype(zdivf)), f.order)
@inbounds for ord in 1:f.order
res[ord] = zdivfpown[ord-1]/ord
zdivfpown *= zdivf
end
return res
end
@doc doc"""
inverse_map(f)
Return the Taylor expansion of ``f^{-1}(t)``, of order `N = f.order`,
for `Taylor1` or `TaylorN` polynomials, assuming the first coefficient of `f` is zero.
Otherwise, a `DomainError` is thrown.
This method is based in the algorithm by M. Berz, Modern map methods in
Particle Beam Physics, Academic Press (1999), Sect 2.3.1.
See [`inverse`](@ref) (for `f::Taylor1`).
"""
function inverse_map(p::Taylor1)
if !iszero(constant_term(p))
throw(DomainError(p,
"""
Evaluation of Taylor1 series at 0 is non-zero; revert
a Taylor1 series with constant coefficient 0 and re-expand about f(0).
"""))
end
inv_m_pol = inv(linear_polynomial(p)[1])
n_pol = inv_m_pol * nonlinear_polynomial(p)
scaled_ident = inv_m_pol * Taylor1(p.order)
res = scaled_ident
aux1 = zero(res)
aux2 = zero(res)
for ord in 1:p.order
_horner!(aux2, n_pol, res, aux1)
subst!(res, scaled_ident, aux2, ord)
end
return res
end
function inverse_map(p::Vector{TaylorN{T}}) where {T<:NumberNotSeries}
if !iszero(constant_term(p))
throw(DomainError(p,
"""
Evaluation of Taylor1 series at 0 is non-zero; revert
a Taylor1 series with constant coefficient 0 and re-expand about f(0).
"""))
end
@assert length(p) == get_numvars()
inv_m_pol = inv(jacobian(p))
n_pol = inv_m_pol * nonlinear_polynomial(p)
scaled_ident = inv_m_pol * TaylorN.(1:get_numvars(), order=get_order(p[1]))
res = deepcopy(scaled_ident)
aux = zero.(res)
auxvec = [zero(res[1]) for val in eachindex(res[1])]
valscache = [zero(val) for val in res]
aaux = zero(res[1])
for ord in 1:get_order(p[1])
t_res = (res...,)
for i = 1:get_numvars()
zero!.(auxvec)
_evaluate!(auxvec, n_pol[i], t_res, valscache, aaux)
aux[i] = sum( auxvec )
subst!(res[i], scaled_ident[i], aux[i], ord)
end
end
return res
end
# Documentation for the recursion relations
@doc doc"""
exp!(c, a, k) --> nothing
Update the `k-th` expansion coefficient `c[k+1]` of `c = exp(a)`
for both `c` and `a` either `Taylor1` or `TaylorN`.
The coefficients are given by
```math
\begin{equation*}
c_k = \frac{1}{k} \sum_{j=0}^{k-1} (k-j) a_{k-j} c_j.
\end{equation*}
```
""" exp!
@doc doc"""
log!(c, a, k) --> nothing
Update the `k-th` expansion coefficient `c[k+1]` of `c = log(a)`
for both `c` and `a` either `Taylor1` or `TaylorN`.
The coefficients are given by
```math
\begin{equation*}
c_k = \frac{1}{a_0} \big(a_k - \frac{1}{k} \sum_{j=0}^{k-1} j a_{k-j} c_j \big).
\end{equation*}
```
""" log!
@doc doc"""
sincos!(s, c, a, k) --> nothing
Update the `k-th` expansion coefficients `s[k+1]` and `c[k+1]`
of `s = sin(a)` and `c = cos(a)` simultaneously, for `s`, `c` and `a`
either `Taylor1` or `TaylorN`.
The coefficients are given by
```math
\begin{aligned}
s_k &= \frac{1}{k}\sum_{j=0}^{k-1} (k-j) a_{k-j} c_j ,\\
c_k &= -\frac{1}{k}\sum_{j=0}^{k-1} (k-j) a_{k-j} s_j.
\end{aligned}
```
""" sincos!
@doc doc"""
tan!(c, a, p, k::Int) --> nothing
Update the `k-th` expansion coefficients `c[k+1]` of `c = tan(a)`,
for `c` and `a` either `Taylor1` or `TaylorN`; `p = c^2` and
is passed as an argument for efficiency.
The coefficients are given by
```math
\begin{equation*}
c_k = a_k + \frac{1}{k} \sum_{j=0}^{k-1} (k-j) a_{k-j} p_j.
\end{equation*}
```
""" tan!
@doc doc"""
asin!(c, a, r, k)
Update the `k-th` expansion coefficients `c[k+1]` of `c = asin(a)`,
for `c` and `a` either `Taylor1` or `TaylorN`; `r = sqrt(1-c^2)` and
is passed as an argument for efficiency.
```math
\begin{equation*}
c_k = \frac{1}{ \sqrt{r_0} }
\big( a_k - \frac{1}{k} \sum_{j=1}^{k-1} j r_{k-j} c_j \big).
\end{equation*}
```
""" asin!
@doc doc"""
acos!(c, a, r, k)
Update the `k-th` expansion coefficients `c[k+1]` of `c = acos(a)`,
for `c` and `a` either `Taylor1` or `TaylorN`; `r = sqrt(1-c^2)` and
is passed as an argument for efficiency.
```math
\begin{equation*}
c_k = - \frac{1}{ r_0 }
\big( a_k - \frac{1}{k} \sum_{j=1}^{k-1} j r_{k-j} c_j \big).
\end{equation*}
```
""" acos!
@doc doc"""
atan!(c, a, r, k)
Update the `k-th` expansion coefficients `c[k+1]` of `c = atan(a)`,
for `c` and `a` either `Taylor1` or `TaylorN`; `r = 1+a^2` and
is passed as an argument for efficiency.
```math
\begin{equation*}
c_k = \frac{1}{r_0}\big(a_k - \frac{1}{k} \sum_{j=1}^{k-1} j r_{k-j} c_j\big).
\end{equation*}
```
""" atan!
@doc doc"""
sinhcosh!(s, c, a, k)
Update the `k-th` expansion coefficients `s[k+1]` and `c[k+1]`
of `s = sinh(a)` and `c = cosh(a)` simultaneously, for `s`, `c` and `a`
either `Taylor1` or `TaylorN`.
The coefficients are given by
```math
\begin{aligned}
s_k = \frac{1}{k} \sum_{j=0}^{k-1} (k-j) a_{k-j} c_j, \\
c_k = \frac{1}{k} \sum_{j=0}^{k-1} (k-j) a_{k-j} s_j.
\end{aligned}
```
""" sinhcosh!
@doc doc"""
tanh!(c, a, p, k)
Update the `k-th` expansion coefficients `c[k+1]` of `c = tanh(a)`,
for `c` and `a` either `Taylor1` or `TaylorN`; `p = a^2` and
is passed as an argument for efficiency.
```math
\begin{equation*}
c_k = a_k - \frac{1}{k} \sum_{j=0}^{k-1} (k-j) a_{k-j} p_j.
\end{equation*}
```
""" tanh!
@doc doc"""
asinh!(c, a, r, k)
Update the `k-th` expansion coefficients `c[k+1]` of `c = asinh(a)`,
for `c` and `a` either `Taylor1` or `TaylorN`; `r = sqrt(1-c^2)` and
is passed as an argument for efficiency.
```math
\begin{equation*}
c_k = \frac{1}{ \sqrt{r_0} }
\big( a_k - \frac{1}{k} \sum_{j=1}^{k-1} j r_{k-j} c_j \big).
\end{equation*}
```
""" asinh!
@doc doc"""
acosh!(c, a, r, k)
Update the `k-th` expansion coefficients `c[k+1]` of `c = acosh(a)`,
for `c` and `a` either `Taylor1` or `TaylorN`; `r = sqrt(c^2-1)` and
is passed as an argument for efficiency.
```math
\begin{equation*}
c_k = \frac{1}{ r_0 }
\big( a_k - \frac{1}{k} \sum_{j=1}^{k-1} j r_{k-j} c_j \big).
\end{equation*}
```
""" acosh!
@doc doc"""
atanh!(c, a, r, k)
Update the `k-th` expansion coefficients `c[k+1]` of `c = atanh(a)`,
for `c` and `a` either `Taylor1` or `TaylorN`; `r = 1-a^2` and
is passed as an argument for efficiency.
```math
\begin{equation*}
c_k = \frac{1}{r_0}\big(a_k + \frac{1}{k} \sum_{j=1}^{k-1} j r_{k-j} c_j\big).
\end{equation*}
```
""" atanh!
| TaylorSeries | https://github.com/JuliaDiff/TaylorSeries.jl.git |
|
[
"MIT"
] | 0.18.1 | bb212ead98022455eed514cff0adfa5de8645258 | code | 3569 | # This file is part of the TaylorSeries.jl Julia package, MIT license
# Hash tables for HomogeneousPolynomial and TaylorN
"""
generate_tables(num_vars, order)
Return the hash tables `coeff_table`, `index_table`, `size_table`
and `pos_table`. Internally, these are treated as `const`.
# Hash tables
coeff_table :: Array{Array{Array{Int,1},1},1}
The ``i+1``-th component contains a vector with the vectors of all the possible
combinations of monomials of a `HomogeneousPolynomial` of order ``i``.
index_table :: Array{Array{Int,1},1}
The ``i+1``-th component contains a vector of (hashed) indices that represent
the distinct monomials of a `HomogeneousPolynomial` of order (degree) ``i``.
size_table :: Array{Int,1}
The ``i+1``-th component contains the number of distinct monomials of the
`HomogeneousPolynomial` of order ``i``, equivalent to `length(coeff_table[i])`.
pos_table :: Array{Dict{Int,Int},1}
The ``i+1``-th component maps the hash index to the (lexicographic) position
of the corresponding monomial in `coeffs_table`.
"""
function generate_tables(num_vars, order)
coeff_table = [generate_index_vectors(num_vars, i) for i in 0:order]
index_table = Vector{Int}[map(x->in_base(order, x), coeffs) for coeffs in coeff_table]
# Check uniqueness of labels as "non-collision" test
@assert all(allunique.(index_table))
pos_table = map(make_inverse_dict, index_table)
size_table = map(length, index_table)
# The next line tests the consistency of the number of monomials,
# but it's commented because it may not pass due to the `binomial`
# @assert sum(size_table) == binomial(num_vars+order, min(num_vars,order))
return (coeff_table, index_table, size_table, pos_table)
end
"""
generate_index_vectors(num_vars, degree)
Return a vector of index vectors with `num_vars` (number of variables) and
degree.
"""
function generate_index_vectors(num_vars, degree)
if num_vars == 1
return Vector{Int}[ [degree] ]
end
indices = Vector{Int}[]
for k in degree:-1:0
new_indices = [ [k, x...] for x in generate_index_vectors(num_vars-1, degree-k) ]
append!(indices, new_indices)
end
return indices
end
function make_forward_dict(v::Vector)
Dict(Dict(i=>x for (i,x) in enumerate(v)))
end
"""
make_inverse_dict(v)
Return a Dict with the enumeration of `v`: the elements of `v` point to
the corresponding index.
It is used to construct `pos_table` from `index_table`.
"""
make_inverse_dict(v::Vector) = Dict(Dict(x=>i for (i,x) in enumerate(v)))
"""
in_base(order, v)
Convert vector `v` of non-negative integers to base `oorder`, where
`oorder` is the next odd integer of `order`.
"""
function in_base(order, v)
oorder = iseven(order) ? order+1 : order+2 # `oorder` is the next odd integer to `order`
result = 0
all(iszero.(v)) && return result
for i in v
result = result*oorder + i
end
return result
end
const coeff_table, index_table, size_table, pos_table =
generate_tables(get_numvars(), get_order())
# Garbage-collect here to free memory
GC.gc();
"""
show_monomials(ord::Int) --> nothing
List the indices and corresponding of a `HomogeneousPolynomial`
of degree `ord`.
"""
function show_monomials(ord::Int)
z = zeros(Int, TS.size_table[ord+1])
for (index, value) in enumerate(TS.coeff_table[ord+1])
z[index] = 1
pol = HomogeneousPolynomial(z)
println(" $index --> $(homogPol2str(pol)[4:end])")
z[index] = 0
end
nothing
end
| TaylorSeries | https://github.com/JuliaDiff/TaylorSeries.jl.git |
|
[
"MIT"
] | 0.18.1 | bb212ead98022455eed514cff0adfa5de8645258 | code | 9580 | # This file is part of the TaylorSeries.jl Julia package, MIT license
#
# Luis Benet & David P. Sanders
# UNAM
#
# MIT Expat license
#
for T in (:Taylor1, :HomogeneousPolynomial, :TaylorN)
## real, imag, conj and ctranspose ##
for f in (:real, :imag, :conj)
@eval ($f)(a::$T) = $T($f(a.coeffs), a.order)
end
@eval adjoint(a::$T) = conj(a)
## isinf and isnan ##
@eval isinf(a::$T) = any(isinf, a.coeffs)
@eval isnan(a::$T) = any(isnan, a.coeffs)
end
## Division functions: rem and mod ##
for op in (:mod, :rem)
for T in (:Taylor1, :TaylorN)
@eval begin
function ($op)(a::$T{T}, x::T) where {T<:Real}
coeffs = copy(a.coeffs)
@inbounds coeffs[1] = ($op)(constant_term(a), x)
return $T(coeffs, a.order)
end
function ($op)(a::$T{T}, x::S) where {T<:Real,S<:Real}
R = promote_type(T, S)
a = convert($T{R}, a)
return ($op)(a, convert(R,x))
end
end
end
@eval begin
function ($op)(a::TaylorN{Taylor1{T}}, x::T) where {T<:Real}
coeffs = copy(a.coeffs)
@inbounds coeffs[1] = ($op)(constant_term(a), x)
return TaylorN( coeffs, a.order )
end
function ($op)(a::TaylorN{Taylor1{T}}, x::S) where {T<:Real,S<:Real}
R = promote_type(T,S)
a = convert(TaylorN{Taylor1{R}}, a)
return ($op)(a, convert(R,x))
end
function ($op)(a::Taylor1{TaylorN{T}}, x::T) where {T<:Real}
coeffs = copy(a.coeffs)
@inbounds coeffs[1] = ($op)(constant_term(a), x)
return Taylor1( coeffs, a.order )
end
@inbounds function ($op)(a::Taylor1{TaylorN{T}}, x::S) where {T<:Real,S<:Real}
R = promote_type(T,S)
a = convert(Taylor1{TaylorN{R}}, a)
return ($op)(a, convert(R,x))
end
end
end
## mod2pi and abs ##
for T in (:Taylor1, :TaylorN)
@eval begin
function mod2pi(a::$T{T}) where {T<:Real}
coeffs = copy(a.coeffs)
@inbounds coeffs[1] = mod2pi( constant_term(a) )
return $T( coeffs, a.order)
end
function abs(a::$T{T}) where {T<:Real}
if constant_term(a) > 0
return a
elseif constant_term(a) < 0
return -a
else
throw(DomainError(a,
"""The 0th order Taylor1 coefficient must be non-zero
(abs(x) is not differentiable at x=0)."""))
end
end
abs2(a::$T) = real(a)^2 + imag(a)^2
abs(x::$T{T}) where {T<:Complex} = sqrt(abs2(x))
end
end
function mod2pi(a::TaylorN{Taylor1{T}}) where {T<:Real}
coeffs = copy(a.coeffs)
@inbounds coeffs[1] = mod2pi( constant_term(a) )
return TaylorN( coeffs, a.order )
end
function mod2pi(a::Taylor1{TaylorN{T}}) where {T<:Real}
coeffs = copy(a.coeffs)
@inbounds coeffs[1] = mod2pi( constant_term(a) )
return Taylor1( coeffs, a.order )
end
function abs(a::TaylorN{Taylor1{T}}) where {T<:Real}
if constant_term(a)[0] > 0
return a
elseif constant_term(a)[0] < 0
return -a
else
throw(DomainError(a,
"""The 0th order TaylorN{Taylor1{T}} coefficient must be non-zero
(abs(x) is not differentiable at x=0)."""))
end
end
function abs(a::Taylor1{TaylorN{T}}) where {T<:Real}
if constant_term(a[0]) > 0
return a
elseif constant_term(a[0]) < 0
return -a
else
throw(DomainError(a,
"""The 0th order Taylor1{TaylorN{T}} coefficient must be non-zero
(abs(x) is not differentiable at x=0)."""))
end
end
abs(x::Taylor1{TaylorN{T}}) where {T<:Complex} = sqrt(abs2(x))
abs(x::TaylorN{Taylor1{T}}) where {T<:Complex} = sqrt(abs2(x))
abs(x::Taylor1{Taylor1{T}}) where {T<:Complex} = sqrt(abs2(x))
@doc doc"""
abs(a)
For a `Real` type returns `a` if `constant_term(a) > 0` and `-a` if `constant_term(a) < 0` for
`a <:Union{Taylor1,TaylorN}`.
For a `Complex` type, such as `Taylor1{ComplexF64}`, returns `sqrt(real(a)^2 + imag(a)^2)`.
Notice that `typeof(abs(a)) <: AbstractSeries` and
that for a `Complex` argument a `Real` type is returned (e.g. `typeof(abs(a::Taylor1{ComplexF64})) == Taylor1{Float64}`).
""" abs
#norm
@doc doc"""
norm(x::AbstractSeries, p::Real)
Returns the p-norm of an `x::AbstractSeries`, defined by
```math
\begin{equation*}
\left\Vert x \right\Vert_p = \left( \sum_k | x_k |^p \right)^{\frac{1}{p}},
\end{equation*}
```
which returns a non-negative number.
""" norm
norm(x::AbstractSeries, p::Real=2) = norm( norm.(x.coeffs, p), p)
#norm for Taylor vectors
norm(v::Vector{T}, p::Real=2) where {T<:AbstractSeries} = norm( norm.(v, p), p)
# rtoldefault
for T in (:Taylor1, :HomogeneousPolynomial, :TaylorN)
@eval rtoldefault(::Type{$T{T}}) where {T<:Number} = rtoldefault(T)
@eval rtoldefault(::$T{T}) where {T<:Number} = rtoldefault(T)
end
# isfinite
"""
isfinite(x::AbstractSeries) -> Bool
Test whether the coefficients of the polynomial `x` are finite.
"""
isfinite(x::AbstractSeries) = !isnan(x) && !isinf(x)
# isapprox; modified from Julia's Base.isapprox
"""
isapprox(x::AbstractSeries, y::AbstractSeries; rtol::Real=sqrt(eps), atol::Real=0, nans::Bool=false)
Inexact equality comparison between polynomials: returns `true` if
`norm(x-y,1) <= atol + rtol*max(norm(x,1), norm(y,1))`, where `x` and `y` are
polynomials. For more details, see [`Base.isapprox`](@ref).
"""
function isapprox(x::T, y::S; rtol::Real=rtoldefault(x,y,0), atol::Real=0.0,
nans::Bool=false) where {T<:AbstractSeries,S<:AbstractSeries}
x == y || (isfinite(x) && isfinite(y) &&
norm(x-y,1) <= atol + rtol*max(norm(x,1), norm(y,1))) ||
(nans && isnan(x) && isnan(y))
end
#isapprox for vectors of Taylors
function isapprox(x::Vector{T}, y::Vector{S}; rtol::Real=rtoldefault(T,S,0), atol::Real=0.0,
nans::Bool=false) where {T<:AbstractSeries,S<:AbstractSeries}
x == y || norm(x-y,1) <= atol + rtol*max(norm(x,1), norm(y,1)) ||
(nans && isnan(x) && isnan(y))
end
#taylor_expand function for Taylor1
"""
taylor_expand(f, x0; order)
Computes the Taylor expansion of the function `f` around the point `x0`.
If `x0` is a scalar, a `Taylor1` expansion will be returned. If `x0` is a vector,
a `TaylorN` expansion will be computed. If the dimension of x0 (`length(x0)`)
is different from the variables set for `TaylorN` (`get_numvars()`), an
`AssertionError` will be thrown.
"""
function taylor_expand(f::F; order::Int=15) where {F}
a = Taylor1(order)
return f(a)
end
function taylor_expand(f::F, x0::T; order::Int=15) where {T<:Number, F}
a = Taylor1(T[x0, one(x0)], order)
return f(a)
end
#taylor_expand function for TaylorN
function taylor_expand(f::F, x0::Vector{T}; order::Int=get_order()) where {T<:Number, F}
ll = length(x0)
@assert ll == get_numvars() && order <= get_order()
X = Array{TaylorN{T}}(undef, ll)
for i in eachindex(X)
X[i] = x0[i] + TaylorN(T, i, order=order)
end
return f( X )
end
function taylor_expand(f::F, x1::Vararg{Number,N}; order::Int=get_order()) where {F, N}
x0 = promote(x1...)
ll = length(x0)
T = eltype(x0[1])
@assert ll == get_numvars() && order <= get_order()
X = Array{TaylorN{T}}(undef, ll)
for i in eachindex(X)
X[i] = x0[i] + TaylorN(T, i, order=order)
end
return f( X... )
end
#update! function for Taylor1
"""
update!(a, x0)
Takes `a <: Union{Taylo1,TaylorN}` and expands it around the coordinate `x0`.
"""
function update!(a::Taylor1{T}, x0::T) where {T<:Number}
a.coeffs .= evaluate(a, Taylor1([x0, one(x0)], a.order) ).coeffs
return nothing
end
function update!(a::Taylor1{T}, x0::S) where {T<:Number, S<:Number}
xx0 = convert(T, x0)
return update!(a, xx0)
end
#update! function for TaylorN
function update!(a::TaylorN{T}, vals::Vector{T}) where {T<:Number}
a.coeffs .= evaluate(a, get_variables(a.order) .+ vals).coeffs
return nothing
end
function update!(a::TaylorN{T}, vals::Vector{S}) where {T<:Number, S<:Number}
vv = convert(Vector{T}, vals)
return update!(a, vv)
end
function update!(a::Union{Taylor1,TaylorN})
#shifting around zero shouldn't change anything...
nothing
end
for T in (:Taylor1, :TaylorN)
@eval deg2rad(z::$T{T}) where {T<:AbstractFloat} = z * (convert(T, pi) / 180)
@eval deg2rad(z::$T{T}) where {T<:Real} = z * (convert(float(T), pi) / 180)
@eval rad2deg(z::$T{T}) where {T<:AbstractFloat} = z * (180 / convert(T, pi))
@eval rad2deg(z::$T{T}) where {T<:Real} = z * (180 / convert(float(T), pi))
end
# Internal mutating deg2rad!, rad2deg! functions
for T in (:Taylor1, :TaylorN)
@eval @inline function deg2rad!(v::$T{T}, a::$T{T}, k::Int) where {T<:AbstractFloat}
@inbounds v[k] = a[k] * (convert(T, pi) / 180)
return nothing
end
@eval @inline function deg2rad!(v::$T{S}, a::$T{T}, k::Int) where {S<:AbstractFloat,T<:Real}
@inbounds v[k] = a[k] * (convert(float(T), pi) / 180)
return nothing
end
@eval @inline function rad2deg!(v::$T{T}, a::$T{T}, k::Int) where {T<:AbstractFloat}
@inbounds v[k] = a[k] * (180 / convert(T, pi))
return nothing
end
@eval @inline function rad2deg!(v::$T{S}, a::$T{T}, k::Int) where {S<:AbstractFloat,T<:Real}
@inbounds v[k] = a[k] * (180 / convert(float(T), pi))
return nothing
end
end
| TaylorSeries | https://github.com/JuliaDiff/TaylorSeries.jl.git |
|
[
"MIT"
] | 0.18.1 | bb212ead98022455eed514cff0adfa5de8645258 | code | 6598 | # This file is part of the TaylorSeries.jl Julia package, MIT license
#
# Parameters for HomogeneousPolynomial and TaylorN
"""
ParamsTaylor1
DataType holding the current variable name for `Taylor1`.
**Field:**
- `var_name :: String` Names of the variables
These parameters can be changed using [`set_taylor1_varname`](@ref)
"""
mutable struct ParamsTaylor1
var_name :: String
end
const _params_Taylor1_ = ParamsTaylor1("t")
"""
set_taylor1_varname(var::String)
Change the displayed variable for `Taylor1` objects.
"""
set_taylor1_varname(var::String) = _params_Taylor1_.var_name = strip(var)
"""
ParamsTaylorN
DataType holding the current parameters for `TaylorN` and
`HomogeneousPolynomial`.
**Fields:**
- `order :: Int` Order (degree) of the polynomials
- `num_vars :: Int` Number of variables
- `variable_names :: Vector{String}` Names of the variables
- `variable_symbols :: Vector{Symbol}` Symbols of the variables
These parameters can be changed using [`set_variables`](@ref)
"""
mutable struct ParamsTaylorN
order :: Int
num_vars :: Int
variable_names :: Vector{String}
variable_symbols :: Vector{Symbol}
end
ParamsTaylorN(order, num_vars, variable_names) = ParamsTaylorN(order, num_vars, variable_names, Symbol.(variable_names))
const _params_TaylorN_ = ParamsTaylorN(6, 2, ["x₁", "x₂"])
## Utilities to get the maximum order, number of variables, their names and symbols
get_order() = _params_TaylorN_.order
get_numvars() = _params_TaylorN_.num_vars
get_variable_names() = _params_TaylorN_.variable_names
get_variable_symbols() = _params_TaylorN_.variable_symbols
function lookupvar(s::Symbol)
ind = findfirst(x -> x==s, _params_TaylorN_.variable_symbols)
isa(ind, Nothing) && return 0
return ind
end
"""
get_variables(T::Type, [order::Int=get_order()])
Return a `TaylorN{T}` vector with each entry representing an
independent variable. It takes the default `_params_TaylorN_` values
if `set_variables` hasn't been changed with the exception that `order`
can be explicitly established by the user without changing internal values
for `num_vars` or `variable_names`. Omitting `T` defaults to `Float64`.
"""
get_variables(::Type{T}, order::Int=get_order()) where {T} =
[TaylorN(T, i, order=order) for i in 1:get_numvars()]
get_variables(order::Int=get_order()) =
[TaylorN(Float64, i, order=order) for i in 1:get_numvars()]
"""
set_variables([T::Type], names::String; [order=get_order(), numvars=-1])
Return a `TaylorN{T}` vector with each entry representing an
independent variable. `names` defines the output for each variable
(separated by a space). The default type `T` is `Float64`,
and the default for `order` is the one defined globally.
Changing the `order` or `numvars` resets the hash_tables.
If `numvars` is not specified, it is inferred from `names`. If only
one variable name is defined and `numvars>1`, it uses this name with
subscripts for the different variables.
```julia
julia> set_variables(Int, "x y z", order=4)
3-element Array{TaylorSeries.TaylorN{Int},1}:
1 x + 𝒪(‖x‖⁵)
1 y + 𝒪(‖x‖⁵)
1 z + 𝒪(‖x‖⁵)
julia> set_variables("α", numvars=2)
2-element Array{TaylorSeries.TaylorN{Float64},1}:
1.0 α₁ + 𝒪(‖x‖⁵)
1.0 α₂ + 𝒪(‖x‖⁵)
julia> set_variables("x", order=6, numvars=2)
2-element Array{TaylorSeries.TaylorN{Float64},1}:
1.0 x₁ + 𝒪(‖x‖⁷)
1.0 x₂ + 𝒪(‖x‖⁷)
```
"""
function set_variables(::Type{R}, names::Vector{T}; order=get_order()) where
{R, T<:AbstractString}
order ≥ 1 || error("Order must be at least 1")
num_vars = length(names)
num_vars ≥ 1 || error("Number of variables must be at least 1")
_params_TaylorN_.variable_names = names
_params_TaylorN_.variable_symbols = Symbol.(names)
if !(order == get_order() && num_vars == get_numvars())
# if these are unchanged, no need to regenerate tables
_params_TaylorN_.order = order
_params_TaylorN_.num_vars = num_vars
resize!(coeff_table,order+1)
resize!(index_table,order+1)
resize!(size_table,order+1)
resize!(pos_table,order+1)
coeff_table[:], index_table[:], size_table[:], pos_table[:] =
generate_tables(num_vars, order)
GC.gc();
end
# return a list of the new variables
TaylorN{R}[TaylorN(R,i) for i in 1:get_numvars()]
end
set_variables(::Type{R}, symbs::Vector{T}; order=get_order()) where
{R,T<:Symbol} = set_variables(R, string.(symbs), order=order)
set_variables(names::Vector{T}; order=get_order()) where {T<:AbstractString} =
set_variables(Float64, names, order=order)
set_variables(symbs::Vector{T}; order=get_order()) where {T<:Symbol} =
set_variables(Float64, symbs, order=order)
function set_variables(::Type{R}, names::T; order=get_order(), numvars=-1) where
{R,T<:AbstractString}
variable_names = split(names)
if length(variable_names) == 1 && numvars ≥ 1
name = variable_names[1]
variable_names = [string(name, subscriptify(i)) for i in 1:numvars]
end
set_variables(R, variable_names, order=order)
end
set_variables(::Type{R}, symbs::Symbol; order=get_order(), numvars=-1) where {R} =
set_variables(R, string(symbs), order=order, numvars=numvars)
set_variables(names::T; order=get_order(), numvars=-1) where {T<:AbstractString} =
set_variables(Float64, names, order=order, numvars=numvars)
set_variables(symbs::Symbol; order=get_order(), numvars=-1) =
set_variables(Float64, string(symbs), order=order, numvars=numvars)
"""
show_params_TaylorN()
Display the current parameters for `TaylorN` and `HomogeneousPolynomial` types.
"""
function show_params_TaylorN()
@info( """
Parameters for `TaylorN` and `HomogeneousPolynomial`:
Maximum order = $(get_order())
Number of variables = $(get_numvars())
Variable names = $(get_variable_names())
Variable symbols = $(Symbol.(get_variable_names()))
""")
nothing
end
# Control the display of the big 𝒪 notation
const bigOnotation = Bool[true]
const _show_default = [false]
"""
displayBigO(d::Bool) --> nothing
Set/unset displaying of the big 𝒪 notation in the output
of `Taylor1` and `TaylorN` polynomials. The initial value is
`true`.
"""
displayBigO(d::Bool) = (bigOnotation[end] = d; d)
"""
use_Base_show(d::Bool) --> nothing
Use `Base.show_default` method (default `show` method
in Base), or a custom display. The initial value is
`false`, so customized display is used.
"""
use_show_default(d::Bool) = (_show_default[end] = d; d)
| TaylorSeries | https://github.com/JuliaDiff/TaylorSeries.jl.git |
|
[
"MIT"
] | 0.18.1 | bb212ead98022455eed514cff0adfa5de8645258 | code | 22012 | # This file is part of the TaylorSeries.jl Julia package, MIT license
#
# Luis Benet & David P. Sanders
# UNAM
#
# MIT Expat license
#
function ^(a::HomogeneousPolynomial, n::Integer)
n == 0 && return one(a)
n == 1 && return copy(a)
n == 2 && return square(a)
n < 0 && throw(DomainError())
return power_by_squaring(a, n)
end
#= The following method computes `a^float(n)` (except for cases like
Taylor1{Interval{T}}^n, where `power_by_squaring` is used), to
use internally `pow!`.
=#
^(a::Taylor1, n::Integer) = a^float(n)
function ^(a::TaylorN{T}, n::Integer) where {T<:Number}
n == 0 && return one(a)
n == 1 && return copy(a)
n == 2 && return square(a)
n < 0 && return inv( a^(-n) )
return power_by_squaring(a, n)
end
for T in (:Taylor1, :TaylorN)
@eval function ^(a::$T{T}, n::Integer) where {T<:Integer}
n == 0 && return one(a)
n == 1 && return copy(a)
n == 2 && return square(a)
n < 0 && throw(DomainError())
return power_by_squaring(a, n)
end
@eval function ^(a::$T{Rational{T}}, n::Integer) where {T<:Integer}
n == 0 && return one(a)
n == 1 && return copy(a)
n == 2 && return square(a)
n < 0 && return inv( a^(-n) )
return power_by_squaring(a, n)
end
@eval ^(a::$T, x::S) where {S<:Rational} = a^(x.num/x.den)
@eval ^(a::$T, b::$T) = exp( b*log(a) )
@eval ^(a::$T, x::T) where {T<:Complex} = exp( x*log(a) )
end
^(a::Taylor1{TaylorN{T}}, n::Integer) where {T<:NumberNotSeries} = a^float(n)
^(a::Taylor1{TaylorN{T}}, r::Rational) where {T<:NumberNotSeries} = a^(r.num/r.den)
# in-place form of power_by_squaring
# this method assumes `y`, `x` and `aux` are of same order
# TODO: add power_by_squaring! method for HomogeneousPolynomial and mixtures
for T in (:Taylor1, :TaylorN)
@eval @inline function power_by_squaring_0!(y::$T{T}, x::$T{T}) where {T<:NumberNotSeries}
for k in eachindex(y)
one!(y, x, k)
end
return nothing
end
@eval @inline function power_by_squaring!(y::$T{T}, x::$T{T}, aux::$T{T},
p::Integer) where {T<:NumberNotSeries}
(p == 0) && return power_by_squaring_0!(y, x)
t = trailing_zeros(p) + 1
p >>= t
# aux = x
for k in eachindex(aux)
identity!(aux, x, k)
end
while (t -= 1) > 0
# aux = square(aux)
for k in reverse(eachindex(aux))
sqr!(aux, k)
end
end
# y = aux
for k in eachindex(y)
identity!(y, aux, k)
end
while p > 0
t = trailing_zeros(p) + 1
p >>= t
while (t -= 1) ≥ 0
# aux = square(aux)
for k in reverse(eachindex(aux))
sqr!(aux, k)
end
end
# y = y * aux
mul!(y, aux)
end
return nothing
end
end
# power_by_squaring; slightly modified from base/intfuncs.jl
# Licensed under MIT "Expat"
for T in (:Taylor1, :HomogeneousPolynomial, :TaylorN)
@eval function power_by_squaring(x::$T, p::Integer)
if p == 0
return one(x)
elseif p == 1
return copy(x)
elseif p == 2
return square(x)
elseif p == 3
return x*square(x)
end
t = trailing_zeros(p) + 1
p >>= t
while (t -= 1) > 0
x = square(x)
end
y = x
while p > 0
t = trailing_zeros(p) + 1
p >>= t
while (t -= 1) ≥ 0
x = square(x)
end
y *= x
end
return y
end
end
# power_by_squaring specializations for non-mixed Taylor1 and TaylorN
# uses internally mutating method `power_by_squaring!`
for T in (:Taylor1, :TaylorN)
@eval function power_by_squaring(x::$T{T}, p::Integer) where {T <: NumberNotSeries}
(p == 0) && return one(x)
(p == 1) && return copy(x)
(p == 2) && return square(x)
(p == 3) && return x*square(x)
y = zero(x)
aux = zero(x)
power_by_squaring!(y, x, aux, p)
return y
end
end
## Real power ##
function ^(a::Taylor1{T}, r::S) where {T<:Number, S<:Real}
a0 = constant_term(a)
aux = one(a0)^r
iszero(r) && return Taylor1(aux, a.order)
aa = aux*a
r == 1 && return aa
r == 2 && return square(aa)
r == 0.5 && return sqrt(aa)
l0 = findfirst(a)
lnull = trunc(Int, r*l0 )
(lnull > a.order) && return Taylor1( zero(aux), a.order)
c_order = l0 == 0 ? a.order : min(a.order, trunc(Int,r*a.order))
c = Taylor1(zero(aux), c_order)
aux0 = deepcopy(c)
for k in eachindex(c)
pow!(c, aa, aux0, r, k)
end
return c
end
## Real power ##
# TODO: get rid of allocations
function ^(a::TaylorN, r::S) where {S<:Real}
a0 = constant_term(a)
aux = one(a0^r)
iszero(r) && return TaylorN(aux, a.order)
aa = aux*a
r == 1 && return aa
r == 2 && return square(aa)
r == 0.5 && return sqrt(aa)
isinteger(r) && return aa^round(Int,r) # uses power_by_squaring
iszero(a0) && throw(DomainError(a,
"""The 0-th order TaylorN coefficient must be non-zero
in order to expand `^` around 0."""))
c = TaylorN( zero(aux), a.order)
aux = deepcopy(c)
for ord in eachindex(a)
pow!(c, aa, aux, r, ord)
end
return c
end
function ^(a::Taylor1{TaylorN{T}}, r::S) where {T<:NumberNotSeries, S<:Real}
a0 = constant_term(a)
aux = one(a0)^r
iszero(r) && return Taylor1(aux, a.order)
aa = aux*a
r == 1 && return aa
r == 2 && return square(aa)
r == 0.5 && return sqrt(aa)
# Is the following needed?
# isinteger(r) && r > 0 && iszero(constant_term(a[0])) &&
# return power_by_squaring(aa, round(Int,r))
l0 = findfirst(a)
lnull = trunc(Int, r*l0 )
(lnull > a.order) && return Taylor1( zero(aux), a.order)
c_order = l0 == 0 ? a.order : min(a.order, trunc(Int,r*a.order))
c = Taylor1(zero(aux), c_order)
aux0 = deepcopy(c)
for k in eachindex(c)
pow!(c, aa, aux0, r, k)
end
return c
end
# Homogeneous coefficients for real power
@doc doc"""
pow!(c, a, r::Real, k::Int)
Update the `k`-th expansion coefficient `c[k]` of `c = a^r`, for
both `c` and `a` either `Taylor1` or `TaylorN`.
The coefficients are given by
```math
c_k = \frac{1}{k a_0} \sum_{j=0}^{k-1} \big(r(k-j) -j\big)a_{k-j} c_j.
```
For `Taylor1` polynomials, a similar formula is implemented which
exploits `k_0`, the order of the first non-zero coefficient of `a`.
""" pow!
@inline function pow!(c::Taylor1{T}, a::Taylor1{T}, ::Taylor1{T}, r::S, k::Int) where
{T<:Number, S <: Real}
if r == 0
return one!(c, a, k)
elseif r == 1
return identity!(c, a, k)
elseif r == 2
return sqr!(c, a, k)
elseif r == 0.5
return sqrt!(c, a, k)
end
# Sanity
zero!(c, k)
# First non-zero coefficient
l0 = findfirst(a)
l0 < 0 && return nothing
# The first non-zero coefficient of the result; must be integer
!isinteger(r*l0) && throw(DomainError(a,
"""The 0-th order Taylor1 coefficient must be non-zero
to raise the Taylor1 polynomial to a non-integer exponent."""))
lnull = trunc(Int, r*l0 )
kprime = k-lnull
(kprime < 0 || lnull > a.order) && return nothing
# Relevant for positive integer r, to avoid round-off errors
isinteger(r) && r > 0 && (k > r*findlast(a)) && return nothing
if k == lnull
@inbounds c[k] = (a[l0])^float(r)
return nothing
end
# The recursion formula
if l0+kprime ≤ a.order
@inbounds c[k] = r * kprime * c[lnull] * a[l0+kprime]
end
for i = 1:k-lnull-1
((i+lnull) > a.order || (l0+kprime-i > a.order)) && continue
aux = r*(kprime-i) - i
@inbounds c[k] += aux * c[i+lnull] * a[l0+kprime-i]
end
@inbounds c[k] = c[k] / (kprime * a[l0])
return nothing
end
@inline function pow!(c::TaylorN{T}, a::TaylorN{T}, ::TaylorN{T}, r::S, k::Int) where
{T<:NumberNotSeriesN, S<:Real}
if r == 0
return one!(c, a, k)
elseif r == 1
return identity!(c, a, k)
elseif r == 2
return sqr!(c, a, k)
elseif r == 0.5
return sqrt!(c, a, k)
end
if k == 0
@inbounds c[0][1] = ( constant_term(a) )^r
return nothing
end
# Sanity
zero!(c, k)
# The recursion formula
@inbounds for i = 0:k-1
aux = r*(k-i) - i
# c[k] += a[k-i]*c[i]*aux
mul_scalar!(c[k], aux, a[k-i], c[i])
end
# c[k] <- c[k]/(k * constant_term(a))
@inbounds div!(c[k], c[k], k * constant_term(a))
return nothing
end
@inline function pow!(res::Taylor1{TaylorN{T}}, a::Taylor1{TaylorN{T}}, aux::Taylor1{TaylorN{T}}, r::S,
ordT::Int) where {T<:NumberNotSeries, S<:Real}
if r == 0
return one!(res, a, ordT)
elseif r == 1
return identity!(res, a, ordT)
elseif r == 2
return sqr!(res, a, ordT)
elseif r == 0.5
return sqrt!(res, a, ordT)
end
# Sanity
zero!(res, ordT)
# First non-zero coefficient
l0 = findfirst(a)
l0 < 0 && return nothing
# The first non-zero coefficient of the result; must be integer
!isinteger(r*l0) && throw(DomainError(a,
"""The 0-th order Taylor1 coefficient must be non-zero
to raise the Taylor1 polynomial to a non-integer exponent."""))
lnull = trunc(Int, r*l0 )
kprime = ordT-lnull
(kprime < 0 || lnull > a.order) && return nothing
# Relevant for positive integer r, to avoid round-off errors
isinteger(r) && r > 0 && (ordT > r*findlast(a)) && return nothing
if ordT == lnull
if isinteger(r)
power_by_squaring!(res[ordT], a[l0], aux[0], round(Int,r))
return nothing
end
a0 = constant_term(a[l0])
iszero(a0) && throw(DomainError(a[l0],
"""The 0-th order TaylorN coefficient must be non-zero
in order to expand `^` around 0."""))
for ordQ in eachindex(a[l0])
pow!(res[ordT], a[l0], aux[0], r, ordQ)
end
return nothing
end
# The recursion formula
for i = 0:ordT-lnull-1
((i+lnull) > a.order || (l0+kprime-i > a.order)) && continue
aux = r*(kprime-i) - i
# res[ordT] += aux*res[i+lnull]*a[l0+kprime-i]
@inbounds mul_scalar!(res[ordT], aux, res[i+lnull], a[l0+kprime-i])
end
# res[ordT] /= a[l0]*kprime
@inbounds div_scalar!(res[ordT], 1/kprime, a[l0])
return nothing
end
## Square ##
"""
square(a::AbstractSeries) --> typeof(a)
Return `a^2`; see [`TaylorSeries.sqr!`](@ref).
""" square
for T in (:Taylor1, :TaylorN)
@eval function square(a::$T)
c = zero(a)
for k in eachindex(a)
sqr!(c, a, k)
end
return c
end
end
function square(a::HomogeneousPolynomial)
order = 2*get_order(a)
# NOTE: the following returns order 0, but could be get_order(), or get_order(a)
order > get_order() && return HomogeneousPolynomial(zero(a[1]), 0)
res = HomogeneousPolynomial(zero(a[1]), order)
accsqr!(res, a)
return res
end
function square(a::Taylor1{TaylorN{T}}) where {T<:NumberNotSeries}
res = Taylor1(zero(a[0]), a.order)
for ordT in eachindex(a)
sqr!(res, a, ordT)
end
return res
end
#auxiliary function to avoid allocations
@inline function sqr_orderzero!(c::Taylor1{T}, a::Taylor1{T}) where {T<:NumberNotSeries}
@inbounds c[0] = a[0]^2
return nothing
end
@inline function sqr_orderzero!(c::TaylorN{T}, a::TaylorN{T}) where {T<:NumberNotSeries}
@inbounds c[0][1] = a[0][1]^2
return nothing
end
@inline function sqr_orderzero!(c::Taylor1{TaylorN{T}}, a::Taylor1{TaylorN{T}}) where {T<:NumberNotSeries}
@inbounds for ord in eachindex(c[0])
sqr!(c[0], a[0], ord)
end
return nothing
end
@inline function sqr_orderzero!(c::TaylorN{Taylor1{T}}, a::TaylorN{Taylor1{T}}) where {T<:NumberNotSeries}
@inbounds for ord in eachindex(c[0][1])
sqr!(c[0][1], a[0][1], ord)
end
return nothing
end
@inline function sqr_orderzero!(c::Taylor1{Taylor1{T}}, a::Taylor1{Taylor1{T}}) where {T<:NumberNotSeriesN}
@inbounds for ord in eachindex(c[0])
sqr!(c[0], a[0], ord)
end
return nothing
end
# Homogeneous coefficients for square
@doc doc"""
sqr!(c, a, k::Int) --> nothing
Update the `k-th` expansion coefficient `c[k]` of `c = a^2`, for
both `c` and `a` either `Taylor1` or `TaylorN`.
The coefficients are given by
```math
\begin{aligned}
c_k &= 2 \sum_{j=0}^{(k-1)/2} a_{k-j} a_j,
\text{ if $k$ is odd,} \\
c_k &= 2 \sum_{j=0}^{(k-2)/2} a_{k-j} a_j + (a_{k/2})^2,
\text{ if $k$ is even.}
\end{aligned}
```
""" sqr!
for T = (:Taylor1, :TaylorN)
@eval begin
@inline function sqr!(c::$T{T}, a::$T{T}, k::Int) where {T<:Number}
if k == 0
sqr_orderzero!(c, a)
return nothing
end
# Sanity
zero!(c, k)
# Recursion formula
kodd = k%2
kend = (k - 2 + kodd) >> 1
if $T == Taylor1
@inbounds for i = 0:kend
c[k] += a[i] * a[k-i]
end
@inbounds c[k] = 2 * c[k]
else
@inbounds for i = 0:kend
mul!(c[k], a[i], a[k-i])
end
@inbounds mul!(c, 2, c, k)
end
kodd == 1 && return nothing
if $T == Taylor1
@inbounds c[k] += a[k >> 1]^2
else
accsqr!(c[k], a[k >> 1])
end
return nothing
end
# in-place squaring: given `c`, compute expansion of `c^2` and save back into `c`
@inline function sqr!(c::$T{T}, k::Int) where {T<:NumberNotSeries}
if k == 0
sqr_orderzero!(c, c)
return nothing
end
# Recursion formula
kodd = k%2
kend = (k - 2 + kodd) >> 1
if $T == Taylor1
(kend ≥ 0) && ( @inbounds c[k] = c[0] * c[k] )
@inbounds for i = 1:kend
c[k] += c[i] * c[k-i]
end
@inbounds c[k] = 2 * c[k]
(kodd == 0) && ( @inbounds c[k] += c[k >> 1]^2 )
else
(kend ≥ 0) && ( @inbounds mul!(c, c[0][1], c, k) )
@inbounds for i = 1:kend
mul!(c[k], c[i], c[k-i])
end
@inbounds mul!(c, 2, c, k)
if (kodd == 0)
accsqr!(c[k], c[k >> 1])
end
end
return nothing
end
end
end
@inline function sqr!(res::Taylor1{TaylorN{T}}, a::Taylor1{TaylorN{T}},
ordT::Int) where {T<:NumberNotSeries}
# Sanity
zero!(res, ordT)
if ordT == 0
@inbounds for ordQ in eachindex(a[0])
@inbounds sqr!(res[0], a[0], ordQ)
end
return nothing
end
# Recursion formula
kodd = ordT%2
kend = (ordT - 2 + kodd) >> 1
(kodd == 0) && @inbounds for ordQ in eachindex(a[0])
sqr!(res[ordT], a[ordT >> 1], ordQ)
mul!(res[ordT], 0.5, res[ordT], ordQ)
end
for i = 0:kend
@inbounds for ordQ in eachindex(a[ordT])
# mul! accumulates the result in res[ordT]
mul!(res[ordT], a[i], a[ordT-i], ordQ)
end
end
@inbounds for ordQ in eachindex(a[ordT])
mul!(res[ordT], 2, res[ordT], ordQ)
end
return nothing
end
"""
accsqr!(c, a)
Returns `c += a*a` with no allocation; all parameters are `HomogeneousPolynomial`.
"""
@inline function accsqr!(c::HomogeneousPolynomial{T}, a::HomogeneousPolynomial{T}) where
{T<:NumberNotSeriesN}
iszero(a) && return nothing
@inbounds num_coeffs_a = size_table[a.order+1]
@inbounds posTb = pos_table[c.order+1]
@inbounds idxTb = index_table[a.order+1]
@inbounds for na = 1:num_coeffs_a
ca = a[na]
iszero(ca) && continue
inda = idxTb[na]
pos = posTb[2*inda]
c[pos] += ca^2
@inbounds for nb = na+1:num_coeffs_a
cb = a[nb]
iszero(cb) && continue
indb = idxTb[nb]
pos = posTb[inda+indb]
c[pos] += 2 * ca * cb
end
end
return nothing
end
## Square root ##
function sqrt(a::Taylor1{T}) where {T<:Number}
# First non-zero coefficient
l0nz = findfirst(a)
aux = zero(sqrt( constant_term(a) ))
if l0nz < 0
return Taylor1(aux, a.order)
elseif isodd(l0nz) # l0nz must be pair
throw(DomainError(a,
"""First non-vanishing Taylor1 coefficient must correspond
to an **even power** in order to expand `sqrt` around 0."""))
end
# The last l0nz coefficients are dropped.
lnull = l0nz >> 1 # integer division by 2
c_order = l0nz == 0 ? a.order : a.order >> 1
c = Taylor1( aux, c_order )
aa = convert(Taylor1{eltype(aux)}, a)
for k in eachindex(c)
sqrt!(c, aa, k, lnull)
end
return c
end
function sqrt(a::TaylorN)
@inbounds p0 = sqrt( constant_term(a) )
if iszero(p0)
throw(DomainError(a,
"""The 0-th order TaylorN coefficient must be non-zero
in order to expand `sqrt` around 0."""))
end
c = TaylorN( p0, a.order)
aa = convert(TaylorN{eltype(p0)}, a)
for k in eachindex(c)
sqrt!(c, aa, k)
end
return c
end
function sqrt(a::Taylor1{TaylorN{T}}) where {T<:NumberNotSeries}
# First non-zero coefficient
l0nz = findfirst(a)
aux = TaylorN( zero(sqrt(constant_term(a[0]))), a[0].order )
if l0nz < 0
return Taylor1( aux, a.order )
elseif isodd(l0nz) # l0nz must be pair
throw(DomainError(a,
"""First non-vanishing Taylor1 coefficient must correspond
to an **even power** in order to expand `sqrt` around 0."""))
end
# The last l0nz coefficients are dropped.
lnull = l0nz >> 1 # integer division by 2
c_order = l0nz == 0 ? a.order : a.order >> 1
c = Taylor1( aux, c_order )
aa = convert(Taylor1{eltype(aux)}, a)
for k in eachindex(c)
sqrt!(c, aa, k, lnull)
end
return c
end
# Homogeneous coefficients for the square-root
@doc doc"""
sqrt!(c, a, k::Int, k0::Int=0)
Compute the `k-th` expansion coefficient `c[k]` of `c = sqrt(a)`
for both`c` and `a` either `Taylor1` or `TaylorN`.
The coefficients are given by
```math
\begin{aligned}
c_k &= \frac{1}{2 c_0} \big( a_k - 2 \sum_{j=1}^{(k-1)/2} c_{k-j}c_j\big),
\text{ if $k$ is odd,} \\
c_k &= \frac{1}{2 c_0} \big( a_k - 2 \sum_{j=1}^{(k-2)/2} c_{k-j}c_j
- (c_{k/2})^2\big), \text{ if $k$ is even.}
\end{aligned}
```
For `Taylor1` polynomials, `k0` is the order of the first non-zero
coefficient, which must be even.
""" sqrt!
@inline function sqrt!(c::Taylor1{T}, a::Taylor1{T}, k::Int, k0::Int=0) where {T<:Number}
k < k0 && return nothing
if k == k0
@inbounds c[k] = sqrt(a[2*k0])
return nothing
end
# Recursion formula
kodd = (k - k0)%2
# kend = div(k - k0 - 2 + kodd, 2)
kend = (k - k0 - 2 + kodd) >> 1
imax = min(k0+kend, a.order)
imin = max(k0+1, k+k0-a.order)
if k+k0 ≤ a.order
@inbounds c[k] = a[k+k0]
end
if kodd == 0
@inbounds c[k] -= (c[kend+k0+1])^2
end
imin ≤ imax && ( @inbounds c[k] -= 2 * c[imin] * c[k+k0-imin] )
@inbounds for i = imin+1:imax
c[k] -= 2 * c[i] * c[k+k0-i]
end
@inbounds c[k] = c[k] / (2*c[k0])
return nothing
end
@inline function sqrt!(c::TaylorN{T}, a::TaylorN{T}, k::Int) where {T<:NumberNotSeriesN}
if k == 0
@inbounds c[0][1] = sqrt( constant_term(a) )
return nothing
end
# Recursion formula
kodd = k%2
kend = (k - 2 + kodd) >> 1
# c[k] <- a[k]
@inbounds for i in eachindex(c[k])
c[k][i] = a[k][i]
end
if kodd == 0
# @inbounds c[k] <- c[k] - (c[kend+1])^2
@inbounds mul_scalar!(c[k], -1, c[kend+1], c[kend+1])
end
@inbounds for i = 1:kend
# c[k] <- c[k] - 2*c[i]*c[k-i]
mul_scalar!(c[k], -2, c[i], c[k-i])
end
# @inbounds c[k] <- c[k] / (2*c[0])
div!(c[k], c[k], 2*constant_term(c))
return nothing
end
@inline function sqrt!(c::Taylor1{TaylorN{T}}, a::Taylor1{TaylorN{T}}, k::Int,
k0::Int=0) where {T<:NumberNotSeries}
k < k0 && return nothing
if k == k0
@inbounds for l in eachindex(c[k])
sqrt!(c[k], a[2*k0], l)
end
return nothing
end
# Recursion formula
kodd = (k - k0)%2
# kend = div(k - k0 - 2 + kodd, 2)
kend = (k - k0 - 2 + kodd) >> 1
imax = min(k0+kend, a.order)
imin = max(k0+1, k+k0-a.order)
if k+k0 ≤ a.order
# @inbounds c[k] += a[k+k0]
### TODO: add in-place add! method for Taylor1, TaylorN and mixtures: c[k] += a[k] -> add!(c, a, k)
### and/or add identity! method such that each coeff is copied individually,
### otherwise memory-mixing issues happen
@inbounds for l in eachindex(c[k])
for m in eachindex(c[k][l])
c[k][l][m] = a[k+k0][l][m]
end
end
end
if kodd == 0
# c[k] <- c[k] - c[kend+1]^2
# TODO: use accsqr! here?
@inbounds mul_scalar!(c[k], -1, c[kend+k0+1], c[kend+k0+1])
end
@inbounds for i = imin:imax
# c[k] <- c[k] - 2 * c[i] * c[k+k0-i]
mul_scalar!(c[k], -2, c[i], c[k+k0-i])
end
# @inbounds c[k] <- c[k] / (2*c[k0])
@inbounds div_scalar!(c[k], 0.5, c[k0])
return nothing
end
| TaylorSeries | https://github.com/JuliaDiff/TaylorSeries.jl.git |
|
[
"MIT"
] | 0.18.1 | bb212ead98022455eed514cff0adfa5de8645258 | code | 1056 | # This file is part of the TaylorSeries.jl Julia package, MIT license
#
# Luis Benet & David P. Sanders
# UNAM
#
# MIT Expat license
#
using PrecompileTools
@setup_workload begin
# Putting some things in `@setup_workload` instead of `@compile_workload` can reduce the size of the
# precompile file and potentially make loading faster.
t = Taylor1(20)
δ = set_variables("δ", order=6, numvars=2)
tN = one(δ[1]) + Taylor1(typeof(δ[1]), 20)
# tb = Taylor1(Float128, 20)
# δb = zero(Float128) .+ δ
# tbN = one(δb[1]) + Taylor1(typeof(δb[1]), 20)
#
@compile_workload begin
# all calls in this block will be precompiled, regardless of whether
# they belong to your package or not (on Julia 1.8 and higher)
for x in (:t, :tN)
@eval begin
T = numtype($x)
zero($x)
sin($x)
cos($x)
$x/sqrt($x^2+(2*$x)^2)
evaluate(($x)^3, 0.125)
($x)[2]
end
end
end
end
| TaylorSeries | https://github.com/JuliaDiff/TaylorSeries.jl.git |
|
[
"MIT"
] | 0.18.1 | bb212ead98022455eed514cff0adfa5de8645258 | code | 7624 | # This file is part of the TaylorSeries.jl Julia package, MIT license
# Printing of TaylorSeries objects
# subscriptify is taken from the ValidatedNumerics.jl package, licensed under MIT "Expat".
# superscriptify is a small variation
const subscript_digits = [c for c in "₀₁₂₃₄₅₆₇₈₉"]
const superscript_digits = [c for c in "⁰¹²³⁴⁵⁶⁷⁸⁹"]
function subscriptify(n::Int)
dig = reverse(digits(n))
join([subscript_digits[i+1] for i in dig])
end
function superscriptify(n::Int)
dig = reverse(digits(n))
join([superscript_digits[i+1] for i in dig])
end
# Fallback
function pretty_print(a::Taylor1)
# z = zero(a[0])
var = _params_Taylor1_.var_name
space = string(" ")
bigO = bigOnotation[end] ?
string("+ 𝒪(", var, superscriptify(a.order+1), ")") :
string("")
# iszero(a) && return string(space, z, space, bigO)
strout::String = space
ifirst = true
for i in eachindex(a)
monom::String = i==0 ? string("") : i==1 ? string(" ", var) :
string(" ", var, superscriptify(i))
@inbounds c = a[i]
# c == z && continue
cadena = numbr2str(c, ifirst)
strout = string(strout, cadena, monom, space)
ifirst = false
end
strout = strout * bigO
strout
end
function pretty_print(a::Taylor1{T}) where {T<:NumberNotSeries}
z = zero(a[0])
var = _params_Taylor1_.var_name
space = string(" ")
bigO = bigOnotation[end] ?
string("+ 𝒪(", var, superscriptify(a.order+1), ")") :
string("")
iszero(a) && return string(space, z, space, bigO)
strout::String = space
ifirst = true
for i in eachindex(a)
monom::String = i==0 ? string("") : i==1 ? string(" ", var) :
string(" ", var, superscriptify(i))
@inbounds c = a[i]
iszero(c) && continue
cadena = numbr2str(c, ifirst)
strout = string(strout, cadena, monom, space)
ifirst = false
end
strout = strout * bigO
strout
end
function pretty_print(a::Taylor1{T} where {T <: AbstractSeries{S}}) where {S<:Number}
z = zero(a[0])
var = _params_Taylor1_.var_name
space = string(" ")
bigO = bigOnotation[end] ?
string("+ 𝒪(", var, superscriptify(a.order+1), ")") :
string("")
iszero(a) && return string(space, z, space, bigO)
strout::String = space
ifirst = true
for i in eachindex(a)
monom::String = i==0 ? string("") : i==1 ? string(" ", var) :
string(" ", var, superscriptify(i))
@inbounds c = a[i]
iszero(c) && continue
cadena = numbr2str(c, ifirst)
ccad::String = i==0 ? cadena : ifirst ? string("(", cadena, ")") :
string(cadena[1:2], "(", cadena[3:end], ")")
strout = string(strout, ccad, monom, space)
ifirst = false
end
strout = strout * bigO
strout
end
function pretty_print(a::HomogeneousPolynomial{T}) where {T<:Number}
z = zero(a[1])
space = string(" ")
iszero(a) && return string(space, z)
strout::String = homogPol2str(a)
strout
end
function pretty_print(a::TaylorN{T}) where {T<:Number}
z = zero(a[0])
space = string("")
bigO::String = bigOnotation[end] ?
string(" + 𝒪(‖x‖", superscriptify(a.order+1), ")") :
string("")
iszero(a) && return string(space, z, space, bigO)
strout::String = space
ifirst = true
for ord in eachindex(a)
pol = a[ord]
iszero(pol) && continue
cadena::String = homogPol2str( pol )
strsgn = (ifirst || ord == 0 || cadena[2] == '-') ?
string("") : string(" +")
strout = string( strout, strsgn, cadena)
ifirst = false
end
strout = strout * bigO
strout
end
function homogPol2str(a::HomogeneousPolynomial{T}) where {T<:Number}
numVars = get_numvars()
order = a.order
z = zero(a.coeffs[1])
space = string(" ")
strout::String = space
ifirst = true
iIndices = zeros(Int, numVars)
for pos = 1:size_table[order+1]
monom::String = string("")
@inbounds iIndices[:] = coeff_table[order+1][pos]
for ivar = 1:numVars
powivar = iIndices[ivar]
if powivar == 1
monom = string(monom, name_taylorNvar(ivar))
elseif powivar > 1
monom = string(monom, name_taylorNvar(ivar), superscriptify(powivar))
end
end
@inbounds c = a[pos]
iszero(c) && continue
cadena = numbr2str(c, ifirst)
strout = string(strout, cadena, monom, space)
ifirst = false
end
return strout[1:prevind(strout, end)]
end
function homogPol2str(a::HomogeneousPolynomial{Taylor1{T}}) where {T<:Number}
numVars = get_numvars()
order = a.order
z = zero(a[1])
space = string(" ")
strout::String = space
ifirst = true
iIndices = zeros(Int, numVars)
for pos = 1:size_table[order+1]
monom::String = string("")
@inbounds iIndices[:] = coeff_table[order+1][pos]
for ivar = 1:numVars
powivar = iIndices[ivar]
if powivar == 1
monom = string(monom, name_taylorNvar(ivar))
elseif powivar > 1
monom = string(monom, name_taylorNvar(ivar),
superscriptify(powivar))
end
end
@inbounds c = a[pos]
iszero(c) && continue
cadena = numbr2str(c, ifirst)
ccad::String = (pos==1 || ifirst) ? string("(", cadena, ")") :
string(cadena[1:2], "(", cadena[3:end], ")")
strout = string(strout, ccad, monom, space)
ifirst = false
end
return strout[1:prevind(strout, end)]
end
function numbr2str(zz, ifirst::Bool=false)
plusmin = ifelse( ifirst, string(""), string("+ ") )
return string(plusmin, zz)
end
function numbr2str(zz::T, ifirst::Bool=false) where
{T<:Union{AbstractFloat,Integer,Rational}}
iszero(zz) && return string( zz )
plusmin = ifelse( zz < zero(T), string("- "),
ifelse( ifirst, string(""), string("+ ")) )
return string(plusmin, abs(zz))
end
function numbr2str(zz::Complex, ifirst::Bool=false)
zT = zero(zz.re)
iszero(zz) && return string(zT)
zre, zim = reim(zz)
if zre > zT
if ifirst
cadena = string("( ", zz, " )")
else
cadena = string("+ ( ", zz, " )")
end
elseif zre < zT
cadena = string("- ( ", -zz, " )")
elseif zre == zT
if zim > zT
if ifirst
cadena = string("( ", zz, " )")
else
cadena = string("+ ( ", zz, " )")
end
elseif zim < zT
cadena = string("- ( ", -zz, " )")
else
if ifirst
cadena = string("( ", zz, " )")
else
cadena = string("+ ( ", zz, " )")
end
end
else
if ifirst
cadena = string("( ", zz, " )")
else
cadena = string("+ ( ", zz, " )")
end
end
return cadena
end
name_taylorNvar(i::Int) = string(" ", get_variable_names()[i])
# summary
summary(a::Taylor1{T}) where {T<:Number} =
string(a.order, "-order ", typeof(a), ":")
function summary(a::Union{HomogeneousPolynomial{T}, TaylorN{T}}) where {T<:Number}
string(a.order, "-order ", typeof(a), " in ", get_numvars(), " variables:")
end
# show
function show(io::IO, a::AbstractSeries)
if _show_default[end]
return Base.show_default(IOContext(io, :compact => false), a)
else
return print(io, pretty_print(a))
end
end
| TaylorSeries | https://github.com/JuliaDiff/TaylorSeries.jl.git |
|
[
"MIT"
] | 0.18.1 | bb212ead98022455eed514cff0adfa5de8645258 | code | 1230 | using Test
using TaylorSeries
using Aqua
@testset "Aqua tests (performance)" begin
# This tests that we don't accidentally run into
# https://github.com/JuliaLang/julia/issues/29393
# Aqua.test_unbound_args(TaylorSeries)
ua = Aqua.detect_unbound_args_recursively(TaylorSeries)
@test length(ua) == 0
# See: https://github.com/SciML/OrdinaryDiffEq.jl/issues/1750
# Test that we're not introducing method ambiguities across deps
ambs = Aqua.detect_ambiguities(TaylorSeries; recursive = true)
pkg_match(pkgname, pkdir::Nothing) = false
pkg_match(pkgname, pkdir::AbstractString) = occursin(pkgname, pkdir)
filter!(x -> pkg_match("TaylorSeries", pkgdir(last(x).module)), ambs)
for method_ambiguity in ambs
@show method_ambiguity
end
if VERSION < v"1.10.0-DEV"
@test length(ambs) == 0
end
end
@testset "Aqua tests (additional)" begin
Aqua.test_undefined_exports(TaylorSeries)
Aqua.test_deps_compat(TaylorSeries)
Aqua.test_stale_deps(TaylorSeries; ignore=[:Requires])
Aqua.test_piracies(TaylorSeries)
Aqua.test_unbound_args(TaylorSeries)
Aqua.test_project_extras(TaylorSeries)
Aqua.test_persistent_tasks(TaylorSeries)
end
nothing
| TaylorSeries | https://github.com/JuliaDiff/TaylorSeries.jl.git |
|
[
"MIT"
] | 0.18.1 | bb212ead98022455eed514cff0adfa5de8645258 | code | 4621 | # This file is part of TaylorSeries.jl, MIT licensed
#
using TaylorSeries
using Test
@testset "Broadcasting with Taylor1 expansions" begin
t = Taylor1(Int, 5)
# @test t .= t
@test t .== t
@test t .≈ t
@test t .!= (1 + t)
@test 1.0 .+ t == 1.0 + t
@test typeof(1.0 .+ t) == Taylor1{Float64}
@test 1.0 .+ [t] == [1.0 + t]
@test typeof(1.0 .+ [t]) == Vector{Taylor1{Float64}}
@test 1.0 .+ [t, 2t] == [1.0 + t, 1.0 + 2t]
@test [1.0,2.0] .+ [t 2t] == [1.0+t 1.0+2t; 2.0+t 2.0+2t]
@test [1.0] .+ t == t .+ [1.0] == [1.0 + t]
@test 1.0 .* t == t
@test typeof(1.0 .* t) == Taylor1{Float64}
st = sin(t)
@test st .== st
@test st == sin.(t)
@test st.(pi/3) == evaluate(st, pi/3)
@test st(pi/3) == evaluate.(st, pi/3)
@test st.([0.0, pi/3]) == evaluate(st, [0.0, pi/3])
@test typeof(Float32.(t)) == Taylor1{Float32}
@test (Float32.(t))[1] == Float32(1.0)
@test_throws MethodError Float32(t)
# Nested Taylor1 tests
t = Taylor1(Int, 3)
ts = zero(t)
ts .= t
@test ts == t
@. ts = 3 * t^2 - 1
@test ts == 3 * t^2 - 1
# `tt` has to be `Taylor1{Taylor1{Float64}}` (instead of `Taylor1{Taylor1{Int}}`)
# since the method a^n (n integer) is equivalent to `a^float(n).`
tt = Taylor1([zero(1.0*t), one(t)], 2)
tts = zero(tt)
@test tt .== tt
@. tts = 3 * tt^2 - 1
@test tts == 3 * tt^2 - 1
ttt = Taylor1([zero(tt), one(tt)])
ttts = zero(ttt)
@test ttt .≈ ttt
@. ttts = 3 * ttt^1 - 1
@test ttts == 3 * ttt^1 - 1
@. ttts = 3 * ttt^3 - 1
@test ttts == - 1.0
end
@testset "Broadcasting with HomogeneousPolynomial and TaylorN" begin
x, y = set_variables("x y", order=3)
xH = x[1]
yH = y[1]
@test xH .== xH
@test yH .≈ yH
@test xH .== xH
@test x[2] .== y[2]
xHs = zero(xH)
xHs .= xH
@test xHs == xH
@. xHs = 2 * xH + yH
@test xHs == 2 * xH + yH
@test 1 .* xH == xH
@test 1 .* [xH] == [xH]
@test [1] .* xH == xH .* [1] == [xH]
@test x .== x
@test y .≈ y
@test x .!= (1 + x)
@test typeof(Float32.(x)) == TaylorN{Float32}
@test (Float32.(x))[1] == HomogeneousPolynomial(Float32[1.0, 0.0])
@test_throws MethodError Float32(x)
p = zero(x)
p .= x
@test p == x
@. p = 1 + 2*x + 3x^2 - x * y
@test p == 1 + 2*x + 3*x^2 - x * y
@test 1.0 .+ x == 1.0 + x
@test y .+ x == y + x
@test typeof(big"1.0" .+ x) == TaylorN{BigFloat}
@test 1.0 .+ [x] == [1.0 + x]
@test typeof(1.0 .+ [y]) == Vector{TaylorN{Float64}}
@test 1.0 .+ [x, 2y] == [1.0 + x, 1.0 + 2y]
@test [1.0,2.0] .+ [x 2y] == [1.0+x 1.0+2y; 2.0+x 2.0+2y]
@test [1.0] .+ x == x .+ [1.0] == [1.0 + x]
@test 1.0 .* y == y
@test typeof(1.0 .* x .* y) == TaylorN{Float64}
end
@testset "Broadcasting with mixtures Taylor1{TalorN{T}}" begin
x, y = set_variables("x", numvars=2, order=6)
tN = Taylor1(TaylorN{Float64}, 3)
@test tN .== tN
@test tN .≈ tN
@test tN .!= (1 + tN)
@test 1.0 .+ tN == 1.0 + tN
@test typeof(1.0 .+ tN) == Taylor1{TaylorN{Float64}}
@test 1.0 .+ [tN] == [1.0 + tN]
@test typeof(1.0 .+ [tN]) == Vector{Taylor1{TaylorN{Float64}}}
@test 1.0 .+ [tN, 2tN] == [1.0 + tN, 1.0 + 2tN]
@test [1.0,2.0] .+ [tN 2tN] == [1.0+tN 1.0+2tN; 2.0+tN 2.0+2tN]
@test [1.0] .+ tN == tN .+ [1.0] == [1.0 + tN]
@test 1.0 .* tN == 1.0 * tN
@test typeof(1.0 .* tN) == Taylor1{TaylorN{Float64}}
tNs = zero(tN)
tNs .= tN
@test tNs == tN
@. tNs = y[1] * tN^2 - 1
@test tNs == y[1] * tN^2 - 1
@. tNs = y * tN^2 - 1
@test tNs == y * tN^2 - 1
end
@testset "Broadcasting with mixtures TaylorN{Talor1{T}}" begin
set_variables("x", numvars=2, order=6)
t = Taylor1(3)
xHt = HomogeneousPolynomial([one(t), zero(t)])
yHt = HomogeneousPolynomial([zero(t), t])
tN1 = TaylorN([HomogeneousPolynomial([t]),xHt,yHt^2])
@test tN1 .== tN1
@test tN1 .≈ tN1
@test tN1 .!= (1 + tN1)
@test 1.0 .+ tN1 == 1.0 + tN1
@test typeof(1.0 .+ tN1) == TaylorN{Taylor1{Float64}}
@test 1.0 .+ [tN1] == [1.0 + tN1]
@test typeof(1.0 .+ [tN1]) == Vector{TaylorN{Taylor1{Float64}}}
@test 1.0 .+ [tN1, 2tN1] == [1.0 + tN1, 1.0 + 2tN1]
@test [1.0, 2.0] .+ [tN1 2tN1] == [1.0+tN1 1.0+2tN1; 2.0+tN1 2.0+2tN1]
@test [1.0] .+ tN1 == tN1 .+ [1.0] == [1.0 + tN1]
@test 1.0 .* tN1 == tN1
@test typeof(1.0 .* tN1) == TaylorN{Taylor1{Float64}}
tN1s = zero(tN1)
tN1s .= tN1
@test tN1s == tN1
@. tN1s = t * tN1^2 - 1
@test tN1s == t * tN1^2 - 1
end
| TaylorSeries | https://github.com/JuliaDiff/TaylorSeries.jl.git |
|
[
"MIT"
] | 0.18.1 | bb212ead98022455eed514cff0adfa5de8645258 | code | 861 | # This file is part of TaylorSeries.jl, MIT licensed
#
using TaylorSeries
using Test
@testset "Test inspired by Fateman (takes a few seconds)" begin
x, y, z, w = set_variables(Int128, "x", numvars=4, order=40)
function fateman2(degree::Int)
T = Int128
oneH = HomogeneousPolynomial([one(T)], 0)
# s = 1 + x + y + z + w
s = TaylorN(
[oneH, HomogeneousPolynomial([one(T),one(T),one(T),one(T)],1)], degree)
s = s^degree
# s is converted to order 2*ndeg
s = TaylorN(s.coeffs, 2*degree)
return s^2 + s
end
function fateman3(degree::Int)
s = x + y + z + w + 1
s = s^degree
s * (s+1)
end
f2 = fateman2(20)
f3 = fateman3(20)
c = getcoeff(f2,[1,6,7,20])
@test c == 128358585324486316800
@test getcoeff(f3,[1,6,7,20]) == c
end
| TaylorSeries | https://github.com/JuliaDiff/TaylorSeries.jl.git |
|
[
"MIT"
] | 0.18.1 | bb212ead98022455eed514cff0adfa5de8645258 | code | 854 | # This file is part of TaylorSeries.jl, MIT licensed
#
using TaylorSeries
using Test
@testset "Testing an identity proved by Euler (8 variables)" begin
make_variable(name, index::Int) = string(name, TaylorSeries.subscriptify(index))
variable_names = String[make_variable("α", i) for i in 1:4]
append!(variable_names, [make_variable("β", i) for i in 1:4])
a1, a2, a3, a4, b1, b2, b3, b4 = set_variables(variable_names, order=4)
lhs1 = a1^2 + a2^2 + a3^2 + a4^2
lhs2 = b1^2 + b2^2 + b3^2 + b4^2
lhs = lhs1 * lhs2
rhs1 = (a1*b1 - a2*b2 - a3*b3 - a4*b4)^2
rhs2 = (a1*b2 + a2*b1 + a3*b4 - a4*b3)^2
rhs3 = (a1*b3 - a2*b4 + a3*b1 + a4*b2)^2
rhs4 = (a1*b4 + a2*b3 - a3*b2 + a4*b1)^2
rhs = rhs1 + rhs2 + rhs3 + rhs4
@test lhs == rhs
v = randn(8)
@test evaluate( rhs, v) == evaluate( lhs, v)
end
| TaylorSeries | https://github.com/JuliaDiff/TaylorSeries.jl.git |
|
[
"MIT"
] | 0.18.1 | bb212ead98022455eed514cff0adfa5de8645258 | code | 5813 | # This file is part of TaylorSeries.jl, MIT licensed
#
using TaylorSeries, IntervalArithmetic
using Test
# eeuler = Base.MathConstants.e
@testset "Tests Taylor1 and TaylorN expansions over Intervals" begin
a = 1..2
b = -1 .. 1
p4(x, a) = x^4 + 4*a*x^3 + 6*a^2*x^2 + 4*a^3*x + a^4
p5(x, a) = x^5 + 5*a*x^4 + 10*a^2*x^3 + 10*a^3*x^2 + 5*a^4*x + a^5
ti = Taylor1(Interval{Float64}, 10)
x, y = set_variables(Interval{Float64}, "x y")
# @test eltype(ti) == Interval{Float64}
# @test eltype(x) == Interval{Float64}
@test eltype(ti) == Taylor1{Interval{Float64}}
@test eltype(x) == TaylorN{Interval{Float64}}
@test TS.numtype(ti) == Interval{Float64}
@test TS.numtype(x) == Interval{Float64}
@test normalize_taylor(ti) == ti
@test normalize_taylor(x) == x
@test p4(ti,-a) == (ti-a)^4
@test p5(ti,-a) == (ti-a)^5
@test p4(ti,-b) == (ti-b)^4
@test all((p5(ti,-b)).coeffs .⊆ ((ti-b)^5).coeffs)
@test p4(x,-y) == (x-y)^4
@test p5(x,-y) == (x-y)^5
@test p4(x,-a) == (x-a)^4
@test p5(x,-a) == (x-a)^5
@test p4(x,-b) == (x-b)^4
for ind in eachindex(p5(x,-b))
@test all((p5(x,-b)[ind]).coeffs .⊆ (((x-b)^5)[ind]).coeffs)
end
# Tests `evaluate`
@test evaluate(p4(x,y), IntervalBox(a,-b)) == p4(a, -b)
@test (p5(x,y))(IntervalBox(a,b)) == p5(a, b)
@test (a-b)^4 ⊆ ((x-y)^4)(a × b)
@test (((x-y)^4)[4])(a × b) == -39 .. 81
p4n = normalize_taylor(p4(x,y), a × b, true)
@test (0..16) ⊆ p4n((-1..1)×(-1..1))
p5n = normalize_taylor(p5(x,y), a × b, true)
@test (-32 .. 32) ⊆ p5n((-1..1)×(-1..1))
p4n = normalize_taylor(p4(x,y), a × b, false)
@test (0..16) ⊆ p4n((0..1)×(0..1))
p5n = normalize_taylor(p5(x,y), a × b, false)
@test (0..32) ⊆ p5n((0..1)×(0..1))
@test evaluate(x*y^3, (-1..1)×(-1..1)) == (-1..1)
@test evaluate(x*y^2, (-1..1)×(-1..1)) == (-1..1)
@test evaluate(x^2*y^2, (-1..1)×(-1..1)) == (0..1)
ii = -1..1
t = Taylor1(1)
@test 0..2 ⊆ (1+t)(ii)
t = Taylor1(2)
@test 0..4 ⊆ ((1+t)^2)(ii)
ii = 0..6
t = Taylor1(4)
f(x) = 0.1 * x^3 - 0.5*x^2 + 1
ft = f(t)
f1 = normalize_taylor(ft, ii, true)
f2 = normalize_taylor(ft, ii, false)
@test Interval(-23/27, f(6)) ⊆ f(ii)
@test Interval(-23/27, f(6)) ⊆ ft(ii)
@test Interval(-23/27, f(6)) ⊆ f1(-1..1)
@test Interval(-23/27, f(6)) ⊆ f2(0..1)
@test f1(-1..1) ⊆ f(ii)
@test diam(f1(-1..1)) < diam(f2(0..1))
# An example from Makino's thesis
ii = 0..1
t = Taylor1(5)
g(x) = 1 - x^4 + x^5
gt = g(t)
g1 = normalize_taylor(gt, 0..1, true)
@test Interval(g(4/5),1) ⊆ g(ii)
@test Interval(g(4/5),1) ⊆ gt(ii)
@test Interval(g(4/5),1) ⊆ g1(-1..1)
@test g1(-1..1) ⊂ g(ii)
@test diam(g1(-1..1)) < diam(gt(ii))
# Test display for Taylor1{Complex{Interval{T}}}
vc = [complex(1.5 .. 2, 0..0 ), complex(-2 .. -1, -1 .. 1 ),
complex( -1 .. 1.5, -1 .. 1.5), complex( 0..0, -1 .. 1.5)]
displayBigO(false)
@test string(Taylor1(vc, 5)) ==
" ( [1.5, 2] + [0, 0]im ) - ( [1, 2] + [-1, 1]im ) t + ( [-1, 1.5] + [-1, 1.5]im ) t² + ( [0, 0] + [-1, 1.5]im ) t³ "
displayBigO(true)
@test string(Taylor1(vc, 5)) ==
" ( [1.5, 2] + [0, 0]im ) - ( [1, 2] + [-1, 1]im ) t + ( [-1, 1.5] + [-1, 1.5]im ) t² + ( [0, 0] + [-1, 1.5]im ) t³ + 𝒪(t⁶)"
# Iss 351 (inspired by a test in ReachabilityAnalysis)
p1 = Taylor1([0 .. 0, (0 .. 0.1) + (0 .. 0.01) * y], 4)
p2 = Taylor1([0 .. 0, (0 .. 0.5) + (0 .. 0.02) * x + (0 .. 0.03) * y], 4)
@test evaluate([p1, p2], 0 .. 1) == [p1[1], p2[1]]
@test typeof(p1(0 .. 1)) == TaylorN{Interval{Float64}}
# Tests related to Iss #311
# `sqrt` and `pow` defined on Interval(0,Inf)
@test_throws DomainError sqrt(ti)
@test sqrt(Interval(0.0, 1.e-15) + ti) == sqrt(Interval(-1.e-15, 1.e-15) + ti)
aa = sqrt(sqrt(Interval(0.0, 1.e-15) + ti))
@test aa == sqrt(sqrt(Interval(-1.e-15, 1.e-15) + ti))
bb = (Interval(0.0, 1.e-15) + ti)^(1/4)
@test bb == (Interval(-1.e-15, 1.e-15) + ti)^(1/4)
@test all(aa.coeffs[2:end] .⊂ bb.coeffs[2:end])
@test_throws DomainError sqrt(x)
@test sqrt(Interval(-1,1)+x) == sqrt(Interval(0,1)+x)
@test (Interval(-1,1)+x)^(1/4) == (Interval(0,1)+x)^(1/4)
# `log` defined on Interval(0,Inf)
@test_throws DomainError log(ti)
@test log(Interval(0.0, 1.e-15) + ti) == log(Interval(-1.e-15, 1.e-15) + ti)
@test_throws DomainError log(y)
@test log(Interval(0.0, 1.e-15) + y) == log(Interval(-1.e-15, 1.e-15) + y)
# `asin` and `acos` defined on Interval(-1,1)
@test_throws DomainError asin(Interval(1.0 .. 2.0) + ti)
@test asin(Interval(-2.0 .. 0.0) + ti) == asin(Interval(-1,0) + ti)
@test_throws DomainError acos(Interval(1.0 .. 2.0) + ti)
@test acos(Interval(-2.0 .. 0.0) + ti) == acos(Interval(-1,0) + ti)
@test_throws DomainError asin(Interval(1.0 .. 2.0) + x)
@test asin(Interval(-2.0 .. 0.0) + x) == asin(Interval(-1,0) + x)
@test_throws DomainError acos(Interval(1.0 .. 2.0) + x)
@test acos(Interval(-2.0 .. 0.0) + x) == acos(Interval(-1,0) + x)
# acosh defined on Interval(1,Inf)
@test_throws DomainError acosh(Interval(0.0 .. 1.0) + ti)
@test acosh(Interval(0.0 .. 2.0) + ti) == acosh(Interval(1.0 .. 2.0) + ti)
@test_throws DomainError acosh(Interval(0.0 .. 1.0) + x)
@test acosh(Interval(0.0 .. 2.0) + x) == acosh(Interval(1.0 .. 2.0) + x)
# atanh defined on Interval(-1,1)
@test_throws DomainError atanh(Interval(1.0 .. 1.0) + ti)
@test atanh(Interval(-2.0 .. 0.0) + ti) == atanh(Interval(-1.0 .. 0.0) + ti)
@test_throws DomainError atanh(Interval(1.0 .. 1.0) + y)
@test atanh(Interval(-2.0 .. 0.0) + y) == atanh(Interval(-1.0 .. 0.0) + y)
end
| TaylorSeries | https://github.com/JuliaDiff/TaylorSeries.jl.git |
|
[
"MIT"
] | 0.18.1 | bb212ead98022455eed514cff0adfa5de8645258 | code | 482 | # This file is part of TaylorSeries.jl, MIT licensed
#
using TaylorSeries, JLD2
using Test
@testset "Test TaylorSeries JLD2 extension" begin
dq = set_variables("q", order=4, numvars=6)
random_TaylorN = [cos(sum(dq .* rand(6))), sin(sum(dq .* rand(6))), tan(sum(dq .* rand(6)))]
jldsave("test.jld2"; random_TaylorN = random_TaylorN)
recovered_taylorN = JLD2.load("test.jld2", "random_TaylorN")
@test recovered_taylorN == random_TaylorN
rm("test.jld2")
end
| TaylorSeries | https://github.com/JuliaDiff/TaylorSeries.jl.git |
|
[
"MIT"
] | 0.18.1 | bb212ead98022455eed514cff0adfa5de8645258 | code | 36144 | # This file is part of TS.jl, MIT licensed#
using TaylorSeries
using Test
# using LinearAlgebra
@testset "Test hash tables" begin
a = TaylorN{Float64}(9//10)
a isa TaylorN{Float64}
@test constant_term(a) == Float64(9//10)
b = TaylorN{Complex{Float64}}(-4//7im)
@test b isa TaylorN{Complex{Float64}}
@test_throws MethodError TaylorN(-4//7im)
# Issue #85 is solved!
set_variables("x", numvars=66, order=1)
@test TS._params_TaylorN_.order == get_order() == 1
@test TS._params_TaylorN_.num_vars == get_numvars() == 66
@test TS._params_TaylorN_.variable_names[end] == "x₆₆"
@test TS._params_TaylorN_.variable_symbols[6] == :x₆
@test sum(TS.size_table) == 67
@test TS.coeff_table[2][1] == vcat([1], zeros(Int, 65))
@test TS.index_table[2][1] == 3^65
@test TS.pos_table[2][3^64] == 2
#
set_variables("x", numvars=66, order=2)
@test TS._params_TaylorN_.order == get_order() == 2
@test TS._params_TaylorN_.num_vars == get_numvars() == 66
@test sum(TS.size_table) == binomial(66+2, 2)
@test TS.coeff_table[2][1] == vcat([1], zeros(Int, 65))
@test TS.index_table[2][1] == 3^65
@test TS.pos_table[2][3^64] == 2
@test eltype(set_variables(Int, "x", numvars=2, order=6)) == TaylorN{Int}
@test eltype(set_variables("x", numvars=2, order=6)) == TaylorN{Float64}
@test eltype(set_variables(BigInt, "x y", order=6)) == TaylorN{BigInt}
@test eltype(set_variables("x y", order=6)) == TaylorN{Float64}
@test eltype(set_variables(Int, :x, numvars=2, order=6)) == TaylorN{Int}
@test eltype(set_variables(:x, numvars=2, order=6)) == TaylorN{Float64}
@test eltype(set_variables(BigInt, [:x, :y], order=6)) == TaylorN{BigInt}
@test eltype(set_variables([:x, :y], order=6)) == TaylorN{Float64}
@test typeof(show_params_TaylorN()) == Nothing
@test typeof(show_monomials(2)) == Nothing
@test TS.coeff_table[2][1] == [1,0]
@test TS.index_table[2][1] == 7
@test TS.in_base(get_order(), [2,1]) == 15
@test TS.pos_table[4][15] == 2
end
@testset "Tests for HomogeneousPolynomial and TaylorN" begin
eeuler = Base.MathConstants.e
@test HomogeneousPolynomial <: AbstractSeries
@test HomogeneousPolynomial{Int} <: AbstractSeries{Int}
@test TaylorN{Float64} <: AbstractSeries{Float64}
set_variables([:x, :y], order=6)
@test get_order() == 6
@test get_numvars() == 2
@test get_variables()[1].order == get_order()
@test get_variables(2)[1].order == 2
@test get_variables(3)[1] == TaylorN(1, order=3)
@test get_variables(Int, 3)[1] == TaylorN(Int, 1, order=3)
@test length(get_variables()) == get_numvars()
x, y = set_variables("x y", order=6)
@test axes(x) == axes(y) == ()
@test axes(x[1]) == axes(y[2]) == ()
@test size(x) == (7,)
@test size(x[1]) == (2,)
@test size(x[2]) == (3,)
@test firstindex(x) == 0
@test firstindex(x[end]) == 1
@test lastindex(y) == get_order()
@test eachindex(x) == 0:6
@test iterate(x) == (HomogeneousPolynomial([0.0], 0), 1)
@test iterate(y, 1) == (HomogeneousPolynomial([0.0, 1.0], 1), 2)
@test isnothing(iterate(x, 7))
@test x.order == 6
@test TS.name_taylorNvar(1) == " x"
@test TS._params_TaylorN_.variable_names == ["x","y"]
@test TS._params_TaylorN_.variable_symbols == [:x, :y]
@test get_variable_symbols() == [:x, :y]
@test TS.lookupvar(:x) == 1
@test TS.lookupvar(:α) == 0
@test TS.get_variable_names() == ["x", "y"]
@test x == HomogeneousPolynomial(Float64, 1)
@test x == HomogeneousPolynomial(1)
@test y == HomogeneousPolynomial(Float64, 2)
@test y == HomogeneousPolynomial(2)
@test !isnan(x)
set_variables("x", numvars=2, order=17)
v = [1,2]
@test typeof(TS.resize_coeffsHP!(v,2)) == Nothing
@test v == [1,2,0]
@test_throws AssertionError TS.resize_coeffsHP!(v,1)
hpol_v = HomogeneousPolynomial(v)
@test findfirst(hpol_v) == 1
@test findlast(hpol_v) == 2
hpol_v[3] = 3
@test v == [1,2,3]
hpol_v[1:3] = 3
@test v == [3,3,3]
hpol_v[1:2:2] = 0
@test v == [0,3,3]
@test findfirst(hpol_v) == 2
@test findlast(hpol_v) == 3
hpol_v[1:1:2] = [1,2]
@test all(hpol_v[1:1:2] .== [1,2])
@test v == [1,2,3]
hpol_v[:] = zeros(Int, 3)
@test hpol_v == 0
@test findfirst(hpol_v) == -1
@test findlast(hpol_v) == -1
xv = TaylorSeries.get_variables()
@test (xv[1] - 1.0)^3 == -1 + 3xv[1] - 3xv[1]^2 + xv[1]^3
@test (xv[1] - 1.0)^4 == 1 - 4xv[1] + 6xv[1]^2 - 4xv[1]^3 + xv[1]^4
@test (xv[2] - 1.0)^3 == -1 + 3xv[2] - 3xv[2]^2 + xv[2]^3
@test (xv[2] - 1.0)^4 == 1 - 4xv[2] + 6xv[2]^2 - 4xv[2]^3 + xv[2]^4
xN = 5.0 + 3xv[1] - 4.5xv[2] + 6.125xv[1]^2 - 7.25xv[1]*xv[2] - 5.5xv[2]^2
xNcopy = deepcopy(xN)
for k in reverse(eachindex(xNcopy))
TaylorSeries.sqr!(xNcopy, k)
end
@test xNcopy == xN^2
@test xN^2 == Base.power_by_squaring(xN, 2)
@test xN*xN*xN == xN*xNcopy
@test xN^3 == Base.power_by_squaring(xN, 3)
for k in reverse(eachindex(xNcopy))
TaylorSeries.sqr!(xNcopy, k)
end
@test xNcopy == xN^4
@test xN^4 == Base.power_by_squaring(xN, 4)
tn_v = TaylorN(HomogeneousPolynomial(zeros(Int, 3)))
tn_v[0] = 1
@test tn_v == 1
tn_v[0:1] = [0, 1]
@test tn_v[0] == 0 && tn_v[1] == HomogeneousPolynomial(1, 1)
tn_v[0:1] = [HomogeneousPolynomial(0, 0), HomogeneousPolynomial([0,1])]
@test tn_v[0] == 0 && tn_v[1] == HomogeneousPolynomial([0,1], 1)
tn_v[:] = [HomogeneousPolynomial(1, 0), HomogeneousPolynomial(0, 1), hpol_v]
@test tn_v == 1
tn_v[:] = 0
@test tn_v == 0
tn_v[:] = [3,1,0]
@test tn_v == TaylorN([HomogeneousPolynomial(3, 0), HomogeneousPolynomial(1, 1)], 2)
tn_v[0:2] = [HomogeneousPolynomial(3, 0), HomogeneousPolynomial(1, 1), HomogeneousPolynomial(0, 2)]
@test tn_v == TaylorN([HomogeneousPolynomial(3, 0), HomogeneousPolynomial(1, 1)], 2)
tn_v[0:2:2] = [0,0]
@test tn_v == TaylorN(HomogeneousPolynomial(1, 1), 2)
xH = HomogeneousPolynomial([1,0])
yH = HomogeneousPolynomial([0,1],1)
@test xH == convert(HomogeneousPolynomial{Float64},xH)
@test HomogeneousPolynomial(xH) == xH
@test HomogeneousPolynomial(0,0) == 0
@test (@inferred conj(xH)) == (@inferred adjoint(xH))
@test (@inferred real(xH)) == xH
xT = TaylorN(xH, 17)
yT = TaylorN(Int, 2, order=17)
@test findfirst(xT) == 1
@test findlast(yT) == 1
@test (@inferred conj(xT)) == (@inferred adjoint(xT))
@test (@inferred real(xT)) == (xT)
zeroT = zero( TaylorN([xH],1) )
@test findfirst(zeroT) == findlast(zeroT) == -1
@test (@inferred imag(xT)) == (zeroT)
@test zeroT.coeffs == zeros(HomogeneousPolynomial{Int}, 1)
@test size(xH) == (2,)
@test firstindex(xH) == 1
@test lastindex(yH) == 2
@test length(zeros(HomogeneousPolynomial{Int}, 1)) == 2
@test one(HomogeneousPolynomial(1,1)) == HomogeneousPolynomial([1,1])
uT = one(convert(TaylorN{Float64},yT))
@test uT == one(HomogeneousPolynomial)
@test uT == convert(TaylorN{Float64},uT)
@test zeroT[0] == HomogeneousPolynomial(0, 0)
@test uT[0] == HomogeneousPolynomial(1, 0)
@test ones(xH,1) == [1, xH+yH]
@test typeof(ones(xH,2)) == Array{HomogeneousPolynomial{Int},1}
@test length(ones(xH,2)) == 3
@test ones(HomogeneousPolynomial{Complex{Int}},0) ==
[HomogeneousPolynomial([complex(1,0)], 0)]
@test !isnan(uT)
@test TS.fixorder(xH,yH) == (xH,yH)
@test_throws AssertionError TS.fixorder(zeros(xH,0)[1],yH)
@testset "Lexicographic order" begin
@test HomogeneousPolynomial([2.1]) > 0.5 * xH > yH > xH^2
@test -1.0*xH < -yH^2 < 0 < HomogeneousPolynomial([1.0])
@test -3 < HomogeneousPolynomial([-2]) < 0.0
@test !(zero(yH^2) > 0)
@test 1 > 0.5 * xT > yT > xT^2 > 0 > TaylorN(-1.0, 3)
@test -1 < -0.25 * yT < -xT^2 < 0.0 < TaylorN(1, 3)
@test !(zero(xT) > 0)
@test !(zero(yT^2) < 0)
end
@test constant_term(xT) == 0
@test constant_term(uT) == 1.0
@test constant_term(xT) == constant_term(yT)
@test constant_term(xH) == xH
@test linear_polynomial(1+xT) == xT
@test get_order(linear_polynomial(1+xT)) == get_order(xT)
@test linear_polynomial(1+xT+xT*yT) == xT
@test linear_polynomial(uT) == zero(yT)
@test nonlinear_polynomial(1+xT+xT*yT) == xT*yT
@test get_order(zeroT) == 1
@test xT[1][1] == 1
@test yH[2] == 1
@test getcoeff(xT,(1,0)) == getcoeff(xT,[1,0]) == 1
@test getcoeff(yH,(1,0)) == getcoeff(yH,[1,0]) == 0
@test typeof(convert(HomogeneousPolynomial,1im)) ==
HomogeneousPolynomial{Complex{Int}}
@test convert(HomogeneousPolynomial,1im) ==
HomogeneousPolynomial([complex(0,1)], 0)
@test convert(HomogeneousPolynomial{Int},[1,1]) == xH+yH
@test convert(HomogeneousPolynomial{Float64},[2,-1]) == 2.0xH-yH
@test typeof(convert(TaylorN,1im)) == TaylorN{Complex{Int}}
@test convert(TaylorN, 1im) ==
TaylorN([HomogeneousPolynomial([complex(0,1)], 0)], 0)
@test convert(TaylorN{Float64}, yH) == 1.0*yT
@test convert(TaylorN{Float64}, [xH,yH]) == xT+1.0*yT
@test convert(TaylorN{Int}, [xH,yH]) == xT+yT
@test promote(xH, [1,1])[2] == xH+yH
@test promote(xH, yT)[1] == xT
@test promote(xT, [xH,yH])[2] == xT+yT
@test typeof(promote(im*xT,[xH,yH])[2]) == TaylorN{Complex{Int}}
@test iszero(zeroT.coeffs)
@test iszero(zero(xH))
@test !iszero(uT)
@test iszero(zeroT)
@test convert(eltype(xH), xH) === xH
@test eltype(xH) == HomogeneousPolynomial{Int}
@test TS.numtype(xH) == Int
@test normalize_taylor(xH) == xH
@test length(xH) == 2
@test zero(xH) == 0*xH
@test one(yH) == xH+yH
@test xH * true == xH
@test false * yH == zero(yH)
@test get_order(yH) == 1
@test get_order(xT) == 17
@test xT * true == xT
@test false * yT == zero(yT)
@test HomogeneousPolynomial([1.0])*xH == xH
@test xT == TaylorN([xH])
@test one(xT) == TaylorN(1,5)
@test TaylorN(uT) == convert(TaylorN{Complex},1)
@test get_numvars() == 2
@test length(uT) == get_order()+1
@test convert(eltype(xT), xT) === xT
@test eltype(convert(TaylorN{Complex{Float64}},1)) == TaylorN{Complex{Float64}}
@test TS.numtype(convert(TaylorN{Complex{Float64}},1)) == Complex{Float64}
@test normalize_taylor(xT) == xT
@test 1+xT+yT == TaylorN(1,1) + TaylorN([xH,yH],1)
@test xT-yT-1 == TaylorN([-1,xH-yH])
@test xT*yT == TaylorN([HomogeneousPolynomial([0,1,0],2)])
@test (1/(1-xT))[3] == HomogeneousPolynomial([1.0],3)
@test xH^20 == HomogeneousPolynomial([0], get_order())
@test (yT/(1-xT))[4] == xH^3 * yH
@test mod(1+xT,1) == +xT
@test (rem(1+xT,1))[0] == 0
@test mod(1+xT,1.0) == +xT
@test (rem(1+xT,1.0))[0] == 0
@test abs(1-xT) == 1-xT
@test abs(-1-xT) == 1+xT
@test abs2(im*xT) == abs2(xT)
@test abs(im*(1+xT)) == abs(1+xT)
@test isapprox(abs2(exp(im*xT)), one(xT))
@test isapprox(abs(exp(im*xT)), one(xT))
@test differentiate(yH,1) == differentiate(xH, :x₂)
@test differentiate(mod2pi(2pi+yT^3),2) == derivative(yT^3, :x₂)
@test differentiate(yT^3, :x₂) == differentiate(yT^3, (0,1))
@test differentiate(yT) == zeroT == differentiate(yT, (1,0))
@test differentiate((0,1), yT) == 1
@test -xT/3im == im*xT/3
@test (xH/3im)' == im*xH/3
@test xT/BigInt(3) == TaylorN(BigFloat,1)/3
@test xT/complex(0,BigInt(3)) == -im*xT/BigInt(3)
@test (xH/complex(0,BigInt(3)))' ==
im*HomogeneousPolynomial([BigInt(1),0])/3
@test evaluate(xH) == zero(eltype(xH))
@test xH() == zero(TS.numtype(xH))
@test xH([1,1]) == evaluate(xH, [1,1])
@test xH((1,1)) == xH(1, 1.0) == evaluate(xH, (1, 1.0)) == 1
hp = -5.4xH+6.89yH
@test hp([1,1]) == evaluate(hp, [1,1])
vr = rand(2)
@test hp(vr) == evaluate(hp, vr)
ctab = copy(TS.coeff_table)
@test integrate(yH,1) == integrate(xH, :x₂)
p = (xT-yT)^6
@test integrate(differentiate(p, 1), 1, yT^6) == p
@test integrate(differentiate(p, :x₁), :x₁, yT^6) == p
@test differentiate(integrate(p, 2), 2) == p
@test differentiate(integrate(p, :x₂), :x₂) == p
@test differentiate(TaylorN(1.0, get_order())) == TaylorN(0.0, get_order())
@test integrate(TaylorN(6.0, get_order()), 1) == 6xT
@test integrate(TaylorN(0.0, get_order()), 2) == TaylorN(0.0, get_order())
@test integrate(TaylorN(0.0, get_order()), 2, xT) == xT
@test integrate(TaylorN(0.0, get_order()), :x₂, xT) == xT
@test integrate(xT^17, 2) == TaylorN(0.0, get_order())
@test integrate(xT^17, 1, yT) == yT
@test integrate(xT^17, 1, 2.0) == TaylorN(2.0, get_order())
@test integrate(xT^17, :x₁, 2.0) == TaylorN(2.0, get_order())
@test ctab == TS.@isonethread(TS.coeff_table)
@test_throws AssertionError integrate(xT, 1, xT)
@test_throws AssertionError integrate(xT, :x₁, xT)
@test_throws AssertionError differentiate(xT, (1,))
@test_throws AssertionError differentiate(xT, (1,2,3))
@test_throws AssertionError differentiate(xT, (-1,2))
@test_throws AssertionError differentiate((1,), xT)
@test_throws AssertionError differentiate((1,2,3), xT)
@test_throws AssertionError differentiate((-1,2), xT)
@test differentiate(2xT*yT^2, (8,8)) == 0
@test differentiate((8,8), 2xT*yT^2) == 0
@test differentiate(2xT*yT^2, 1) == 2yT^2
@test differentiate((1,0), 2xT*yT^2) == 0
@test differentiate(2xT*yT^2, (1,2)) == 4*one(yT)
@test differentiate((1,2), 2xT*yT^2) == 4
@test xT*xT^3 == xT^4
txy = 1.0 + xT*yT - 0.5*xT^2*yT + (1/3)*xT^3*yT + 0.5*xT^2*yT^2
@test getindex((1+TaylorN(1))^TaylorN(2),0:4) == txy.coeffs[1:5]
@test ( (1+TaylorN(1))^TaylorN(2) )[:] == ( (1+TaylorN(1))^TaylorN(2) ).coeffs[:]
@test txy.coeffs[:] == txy[:]
@test txy.coeffs[:] == txy[0:end]
txy[:] .= ( -1.0 + 3xT*yT - xT^2*yT + (4/3)*xT^3*yT + (1/3)*xT*yT^3 + 0.5*xT^2*yT^2 + 0.5*xT*yT^3 )[:]
@test txy[:] == ( -1.0 + 3xT*yT - xT^2*yT + (4/3)*xT^3*yT + (1/3)*xT*yT^3 + 0.5*xT^2*yT^2 + 0.5*xT*yT^3 )[:]
txy[2:end-1] .= ( 1.0 - xT*yT + 0.5*xT^2*yT - (2/3)*xT*yT^3 - 0.5*xT^2*yT^2 + 7*xT^3*yT )[2:end-1]
@test txy[2:end-1] == ( 1.0 - xT*yT + 0.5*xT^2*yT - (2/3)*xT*yT^3 - 0.5*xT^2*yT^2 + 7*xT^3*yT )[2:end-1]
ident = [xT, yT]
pN = [x+y, x-y]
@test evaluate.(inverse_map(pN), Ref(pN)) == ident
@test evaluate.(pN, Ref(inverse_map(pN))) == ident
pN = [exp(xT)-1, log(1+yT)]
@test inverse_map(pN) ≈ [log(1+xT), exp(yT)-1]
@test evaluate.(pN, Ref(inverse_map(pN))) ≈ ident
pN = [tan(xT), atan(yT)]
@test evaluate.(inverse_map(pN), Ref(pN)) ≈ ident
@test evaluate.(pN, Ref(inverse_map(pN))) ≈ ident
pN = [sin(xT), asin(yT)]
@test evaluate.(inverse_map(pN), Ref(pN)) ≈ ident
@test evaluate.(pN, Ref(inverse_map(pN))) ≈ ident
a = -5.0 + sin(xT+yT^2)
b = deepcopy(a)
@test a[:] == a[0:end]
@test a[:] == b[:]
@test a[1:end] == b[1:end]
@test a[end][:] == b[end][:]
@test a[end][1:end] == b[end][1:end]
a[end][:] .= rand.()
rv = a[end][:]
@test a[end][:] == rv
@test a[end][:] != b[end][:]
a[end][1:end] .= rand.()
rv = a[end][1:end]
@test a[end][1:end] == rv
@test a[end][1:end] != b[end][1:end]
@test a[0:2:end] == a.coeffs[1:2:end]
a[0:1:end] .= 0.0
@test a == zero(a)
hp = HomogeneousPolynomial(1)^8
rv1 = rand( length(hp) )
hp[:] = rv1
@test rv1 == hp[:]
rv2 = rand( length(hp)-2 )
hp[1:end-2] .= rv2
@test hp[1:end-2] == rv2
@test hp[end-1:end] == rv1[end-1:end]
hp[3:4] .= 0.0
@test hp[1:2] == rv2[1:2]
@test hp[3:4] == zeros(2)
@test hp[5:end-2] == rv2[5:end]
@test hp[end-1:end] == rv1[end-1:end]
hp[:] = 0.0
@test hp[:] == zero(rv1)
@test all(hp[end-1:1:end] .== 0.0)
pol = sin(xT+yT*xT)+yT^2-(1-xT)^3
q = deepcopy(pol)
q[:] = 0.0
@test get_order.(q[:]) == collect(0:q.order)
@test q[:] == zero(q[:])
q[:] .= pol.coeffs
@test q == pol
@test q[:] == pol[:]
q[2:end-1] .= 0.0
@test q[2:end-1] == zero.(q[2:end-1])
@test q[1] == pol[1]
@test q[end] == pol[end]
# q[:] = pol.coeffs
# zH0 = zero(HomogeneousPolynomial{Float64})
q[:] = 1.0
@test q[1] == HomogeneousPolynomial([1,0])
@test q[2] == HomogeneousPolynomial([1,0,0])
q[:] .= pol.coeffs
q[2:end-1] = one.(q[2:end-1])
@test q[2:end-1] == one.(q[2:end-1])
@test q[2] == HomogeneousPolynomial([1,1,1])
@test q[1] == pol[1]
@test q[end] == pol[end]
q[:] .= pol.coeffs
zHall = zeros(HomogeneousPolynomial{Float64}, q.order)
q[:] .= zHall
@test q[:] == zHall
q[:] .= pol.coeffs
q[1:end-1] .= zHall[2:end-1]
@test q[1:end-1] == zHall[2:end-1]
q[:] .= pol.coeffs
@test q[:] != zeros(q.order+1)
q[:] .= zeros(q.order+1)
@test q[:] == zeros(q.order+1)
q[:] .= pol.coeffs
q[1:end-1] .= zeros(q.order+1)[2:end-1]
@test q != pol
@test all(q[1:1:end-1] .== 0.0)
@test q[1:end-1] == zeros(q.order+1)[2:end-1]
@test q[0] == pol[0]
@test q[end] == pol[end]
q[:] .= pol.coeffs
pol2 = cos(sin(xT)-yT^3*xT)-3yT^2+sqrt(1-xT)
q[2:end-2] .= pol2.coeffs[3:end-2]
@test q[0:1] == pol[0:1]
@test q[2:end-2] == pol2[2:end-2]
@test q[end-1:end] == pol[end-1:end]
@test q[2:2:end-2] == pol2[2:2:end-2]
@test q[end-1:1:end] == pol[end-1:1:end]
q[end-2:2:end] .= [0.0, 0.0]
@test q[end-2] == 0.0
@test_throws AssertionError q[end-2:2:end] = [0.0, 0.0, 0.0]
q[end-2:2:end] .= pol.coeffs[end-2:2:end]
@test q[end-2] == pol[end-2]
q[end-2:2:end] .= pol.coeffs[end-2:2:end]
@test_throws AssertionError q[end-2:2:end] = pol.coeffs[end-1:2:end]
@test_throws AssertionError yT^(-2)
@test_throws AssertionError yT^(-2.0)
@test (1+xT)^(3//2) == ((1+xT)^0.5)^3
@test real(xH) == xH
@test imag(xH) == zero(xH)
@test (@inferred conj(im*yH)) == (@inferred adjoint(im*yH))
@test (@inferred conj(im*yT)) == (@inferred adjoint(im*yT))
@test real( exp(1im * xT)) == cos(xT)
@test getcoeff(convert(TaylorN{Rational{Int}},cos(xT)),(4,0)) ==
1//factorial(4)
cr = convert(TaylorN{Rational{Int}},cos(xT))
@test getcoeff(cr,(4,0)) == 1//factorial(4)
@test imag((exp(yT))^(-1im)') == sin(yT)
exy = exp( xT+yT )
@test evaluate(exy) == 1
@test exy(0.1im, 0.01im) == exp(0.11im)
@test evaluate(exy,(0.1im, 0.01im)) == exp(0.11im)
@test exy((0.1im, 0.01im)) == exp(0.11im)
@test exy(true, (0.1im, 0.01im)) == exp(0.11im)
@test evaluate(exy, (0.1im, 0.01im), sorting=false) == exy(false, (0.1im, 0.01im))
@test evaluate(exy, (0.1im, 0.01im), sorting=false) == exy(false, 0.1im, 0.01im)
@test evaluate(exy,[0.1im, 0.01im]) == exp(0.11im)
@test exy([0.1im, 0.01im]) == exp(0.11im)
@test isapprox(evaluate(exy, (1,1)), eeuler^2)
@test exy(:x₁, 0.0) == exp(yT)
exym1 = expm1(1.0e-16+xT+yT)
@test exym1[0] == expm1(1.0e-16)
@test evaluate(exym1, [1.0e-16, 0.0]) ≈ expm1(2.0e-16)
txy = tan(xT+yT)
@test getcoeff(txy,(8,7)) == 929569/99225
ptxy = xT + yT + (1/3)*( xT^3 + yT^3 ) + xT^2*yT + xT*yT^2
@test getindex(tan(TaylorN(1)+TaylorN(2)),0:4) == ptxy.coeffs[1:5]
@test tan(1+xT+yT) ≈ sin(1+xT+yT)/cos(1+xT+yT)
@test cot(1+xT+yT) ≈ 1/tan(1+xT+yT)
@test evaluate(xH*yH, 1.0, 2.0) == (xH*yH)(1.0, 2.0) == 2.0
@test evaluate(xH*yH, (1.0, 2.0)) == 2.0
@test evaluate(xH*yH, [1.0, 2.0]) == 2.0
@test ptxy(:x₁, -1.0) == -1 + yT + (-1.0+yT^3)/3 + yT - yT^2
@test ptxy(:x₁ => -1.0) == -1 + yT + (-1.0+yT^3)/3 + yT - yT^2
@test evaluate(ptxy, :x₁ => -1.0) == -1 + yT + (-1.0+yT^3)/3 + yT - yT^2
@test evaluate(ptxy, :x₁, -1.0) == -1 + yT + (-1.0+yT^3)/3 + yT - yT^2
@test isa(evaluate(ptxy, :x₁, 1), TaylorN{Float64})
@test evaluate(ptxy, :x₁, xT) == ptxy
@test evaluate(ptxy, 1, 1-yT) ≈ 4/3 + zero(yT)
v = zeros(Int, 2)
@test isnothing(evaluate!([xT, yT], ones(Int, 2), v))
@test v == ones(2)
@test isnothing(evaluate!([xT, yT][1:2], ones(Int, 2), v))
@test v == ones(2)
A_TN = [xT 2xT 3xT; yT 2yT 3yT]
@test evaluate(A_TN, ones(2)) == [1.0 2.0 3.0; 1.0 2.0 3.0]
@test evaluate(A_TN) == [0.0 0.0 0.0; 0.0 0.0 0.0]
@test A_TN() == [0.0 0.0 0.0; 0.0 0.0 0.0]
@test (view(A_TN,:,:))() == [0.0 0.0 0.0; 0.0 0.0 0.0]
t = Taylor1(10)
@test A_TN([t,t^2]) == [t 2t 3t; t^2 2t^2 3t^2]
@test view(A_TN, :, :)(ones(2)) == A_TN(ones(2))
@test view(A_TN, :, 1)(ones(2)) == A_TN[:,1](ones(2))
@test evaluate(sin(asin(xT+yT)), [1.0,0.5]) == 1.5
@test evaluate(asin(sin(xT+yT)), [1.0,0.5]) == 1.5
@test tan(atan(xT+yT)) == xT+yT
@test atan(tan(xT+yT)) == xT+yT
@test atan(sin(1+xT+yT), cos(1+xT+yT)) == atan(sin(1+xT+yT)/cos(1+xT+yT))
@test constant_term(atan(sin(3pi/4+xT+yT), cos(3pi/4+xT+yT))) == 3pi/4
@test asin(xT+yT) + acos(xT+yT) == pi/2
@test -sinh(xT+yT) + cosh(xT+yT) == exp(-(xT+yT))
@test sinh(xT+yT) + cosh(xT+yT) == exp(xT+yT)
@test evaluate(- sinh(xT+yT)^2 + cosh(xT+yT)^2 , rand(2)) == 1
@test evaluate(- sinh(xT+yT)^2 + cosh(xT+yT)^2 , zeros(2)) == 1
@test tanh(xT + yT + 0im) == -1im * tan((xT+yT)*1im)
@test cosh(xT+yT) == real(cos(im*(xT+yT)))
@test sinh(xT+yT) == imag(sin(im*(xT+yT)))
xx = 1.0*zeroT
TS.add!(xx, 1.0*xT, 2yT, 1)
@test xx[1] == HomogeneousPolynomial([1,2])
TS.add!(xx, 5.0, 0)
@test xx[0] == HomogeneousPolynomial([5.0])
TS.add!(xx, -5.0, 1)
@test xx[1] == zero(xx[1])
TS.subst!(xx, 1.0*xT, yT, 1)
@test xx[1] == HomogeneousPolynomial([1,-1])
TS.subst!(xx, 5.0, 0)
@test xx[0] == HomogeneousPolynomial([-5.0])
TS.subst!(xx, -5.0, 1)
@test xx[1] == zero(xx[end])
TS.div!(xx, 1.0+xT, 1.0+xT, 0)
@test xx[0] == HomogeneousPolynomial([1.0])
TS.pow!(xx, 1.0+xT, xx, 0.5, 1)
@test xx[1] == HomogeneousPolynomial([0.5,0.0])
xx = 1.0*zeroT
TS.pow!(xx, 1.0+xT, xx, 1.5, 0)
@test xx[0] == HomogeneousPolynomial([1.0])
TS.pow!(xx, 1.0+xT, xx, 1.5, 1)
@test xx[1] == HomogeneousPolynomial([1.5,0.0])
xx = 1.0*zeroT
TS.pow!(xx, 1.0+xT, xx, 0, 0)
@test xx[0] == HomogeneousPolynomial([1.0])
TS.pow!(xx, 1.0+xT, xx, 1, 1)
@test xx[1] == HomogeneousPolynomial([1.0,0.0])
xx = 1.0*zeroT
TS.pow!(xx, 1.0+xT, xx, 2, 0)
@test xx[0] == HomogeneousPolynomial([1.0])
TS.pow!(xx, 1.0+xT, xx, 2, 1)
@test xx[1] == HomogeneousPolynomial([2.0,0.0])
xx = 1.0*zeroT
TS.sqrt!(xx, 1.0+xT, 0)
TS.sqrt!(xx, 1.0+xT, 1)
@test xx[0] == 1.0
@test xx[1] == HomogeneousPolynomial([0.5,0.0])
xx = 1.0*zeroT
TS.exp!(xx, 1.0*xT, 0)
TS.exp!(xx, 1.0*xT, 1)
@test xx[0] == 1.0
@test xx[1] == HomogeneousPolynomial([1.0,0.0])
xx = 1.0e-16 + 1.0*zeroT
TS.expm1!(xx, 1.0e-16 + 1.0*xT, 0)
TS.expm1!(xx, 1.0e-16 + 1.0*xT, 1)
@test xx[0] == expm1(1.0e-16)
@test xx[1] == HomogeneousPolynomial([1.0,0.0])
xx = 1.0*zeroT
TS.log!(xx, 1.0+xT, 0)
TS.log!(xx, 1.0+xT, 1)
@test xx[0] == 0.0
@test xx[1] == HomogeneousPolynomial(1.0,1)
xx = 1.0*zeroT
TS.log1p!(xx, 0.25+xT, 0)
TS.log1p!(xx, 0.25+xT, 1)
@test xx[0] == log1p(0.25)
@test xx[1] == HomogeneousPolynomial(1/1.25,1)
xx = 1.0*zeroT
cxx = zero(xx)
TS.sincos!(xx, cxx, 1.0*xT, 0)
TS.sincos!(xx, cxx, 1.0*xT, 1)
@test xx[0] == 0.0
@test xx[1] == HomogeneousPolynomial(1.0,1)
@test cxx[0] == 1.0
@test cxx[1] == HomogeneousPolynomial(0.0,1)
xx = 1.0*zeroT
cxx = zero(xx)
TS.tan!(xx, 1.0*xT, cxx, 0)
TS.tan!(xx, 1.0*xT, cxx, 1)
@test xx[0] == 0.0
@test xx[1] == HomogeneousPolynomial(1.0,1)
@test cxx[0] == 0.0
@test cxx[1] == HomogeneousPolynomial(0.0,1)
xx = 1.0*zeroT
cxx = zero(xx)
TS.asin!(xx, 1.0*xT, cxx, 0)
TS.asin!(xx, 1.0*xT, cxx, 1)
@test xx[0] == 0.0
@test xx[1] == HomogeneousPolynomial(1.0,1)
@test cxx[0] == 1.0
@test cxx[1] == HomogeneousPolynomial(0.0,1)
xx = 1.0*zeroT
cxx = zero(xx)
TS.acos!(xx, 1.0*xT, cxx, 0)
TS.acos!(xx, 1.0*xT, cxx, 1)
@test xx[0] == acos(0.0)
@test xx[1] == HomogeneousPolynomial(-1.0,1)
@test cxx[0] == 1.0
@test cxx[1] == HomogeneousPolynomial(0.0,1)
xx = 1.0*zeroT
cxx = zero(xx)
TS.atan!(xx, 1.0*xT, cxx, 0)
TS.atan!(xx, 1.0*xT, cxx, 1)
@test xx[0] == 0.0
@test xx[1] == HomogeneousPolynomial(1.0,1)
@test cxx[0] == 1.0
@test cxx[1] == HomogeneousPolynomial(0.0,1)
xx = 1.0*zeroT
cxx = zero(xx)
TS.sinhcosh!(xx, cxx, 1.0*xT, 0)
TS.sinhcosh!(xx, cxx, 1.0*xT, 1)
@test xx[0] == 0.0
@test xx[1] == HomogeneousPolynomial(1.0,1)
@test cxx[0] == 1.0
@test cxx[1] == HomogeneousPolynomial(0.0,1)
xx = 1.0*zeroT
cxx = zero(xx)
TS.tanh!(xx, 1.0*xT, cxx, 0)
TS.tanh!(xx, 1.0*xT, cxx, 1)
@test xx[0] == 0.0
@test xx[1] == HomogeneousPolynomial(1.0,1)
@test cxx[0] == 0.0
@test cxx[1] == HomogeneousPolynomial(0.0,1)
g1(x, y) = x^3 + 3y^2 - 2x^2 * y - 7x + 2
g2(x, y) = y + x^2 - x^4
f1 = g1(xT, yT)
f2 = g2(xT, yT)
@test TS.gradient(f1) == [ 3*xT^2-4*xT*yT-TaylorN(7,0), 6*yT-2*xT^2 ]
@test ∇(f2) == [2*xT - 4*xT^3, TaylorN(1,0)]
@test TS.jacobian([f1,f2], [2,1]) == TS.jacobian( [g1(xT+2,yT+1), g2(xT+2,yT+1)] )
jac = Array{Int}(undef, 2, 2)
TS.jacobian!(jac, [g1(xT+2,yT+1), g2(xT+2,yT+1)])
@test jac == TS.jacobian( [g1(xT+2,yT+1), g2(xT+2,yT+1)] )
TS.jacobian!(jac, [f1,f2], [2,1])
@test jac == TS.jacobian([f1,f2], [2,1])
@test TS.hessian( f1*f2 ) ==
[differentiate((2,0), f1*f2) differentiate((1,1), (f1*f2));
differentiate((1,1), f1*f2) differentiate((0,2), (f1*f2))] == [4 -7; -7 0]
@test TS.hessian( f1*f2, [xT, yT] ) ==
[differentiate(f1*f2, (2,0)) differentiate((f1*f2), (1,1));
differentiate(f1*f2, (1,1)) differentiate((f1*f2), (0,2))]
@test [xT yT]*TS.hessian(f1*f2)*[xT, yT] == [ 2*TaylorN((f1*f2)[2]) ]
@test TS.hessian(f1^2)/2 == [ [49,0] [0,12] ]
@test TS.hessian(f1-f2-2*f1*f2) == (TS.hessian(f1-f2-2*f1*f2))'
@test TS.hessian(f1-f2,[1,-1]) == TS.hessian(g1(xT+1,yT-1)-g2(xT+1,yT-1))
hes = Array{Int}(undef, 2, 2)
TS.hessian!(hes, f1*f2)
@test hes == TS.hessian(f1*f2)
@test [xT yT]*hes*[xT, yT] == [ 2*TaylorN((f1*f2)[2]) ]
TS.hessian!(hes, f1^2)
@test hes/2 == [ [49,0] [0,12] ]
TS.hessian!(hes, f1-f2-2*f1*f2)
@test hes == hes'
hes1 = Array{Int}(undef, 2, 2)
TS.hessian!(hes1, f1-f2,[1,-1])
TS.hessian!(hes, g1(xT+1,yT-1)-g2(xT+1,yT-1))
@test hes1 == hes
use_show_default(true)
aa = sqrt(2) * xH
ab = sqrt(2) * TaylorN(2, order=1)
@test string(aa) ==
"HomogeneousPolynomial{Float64}([1.4142135623730951, 0.0], 1)"
@test string(ab) ==
"TaylorN{Float64}(HomogeneousPolynomial{Float64}" *
"[HomogeneousPolynomial{Float64}([0.0], 0), " *
"HomogeneousPolynomial{Float64}([0.0, 1.4142135623730951], 1)], 1)"
@test string([aa, aa]) ==
"HomogeneousPolynomial{Float64}[HomogeneousPolynomial{Float64}" *
"([1.4142135623730951, 0.0], 1), HomogeneousPolynomial{Float64}" *
"([1.4142135623730951, 0.0], 1)]"
@test string([ab, ab]) == "TaylorN{Float64}[TaylorN{Float64}" *
"(HomogeneousPolynomial{Float64}[HomogeneousPolynomial{Float64}([0.0], 0), " *
"HomogeneousPolynomial{Float64}([0.0, 1.4142135623730951], 1)], 1), " *
"TaylorN{Float64}(HomogeneousPolynomial{Float64}[HomogeneousPolynomial{Float64}" *
"([0.0], 0), HomogeneousPolynomial{Float64}([0.0, 1.4142135623730951], 1)], 1)]"
use_show_default(false)
@test string(aa) == " 1.4142135623730951 x₁"
@test string(ab) == " 1.4142135623730951 x₂ + 𝒪(‖x‖²)"
displayBigO(false)
@test string(-xH) == " - 1 x₁"
@test string(xT^2) == " 1 x₁²"
@test string(1im*yT) == " ( 0 + 1im ) x₂"
@test string(xT-im*yT) == " ( 1 + 0im ) x₁ - ( 0 + 1im ) x₂"
@test string([ab, ab]) ==
"TaylorN{Float64}[ 1.4142135623730951 x₂, 1.4142135623730951 x₂]"
displayBigO(true)
@test string(-xH) == " - 1 x₁"
@test string(xT^2) == " 1 x₁² + 𝒪(‖x‖¹⁸)"
@test string(1im*yT) == " ( 0 + 1im ) x₂ + 𝒪(‖x‖¹⁸)"
@test string(xT-im*yT) == " ( 1 + 0im ) x₁ - ( 0 + 1im ) x₂ + 𝒪(‖x‖¹⁸)"
@test_throws DomainError abs(xT)
@test_throws AssertionError 1/x
@test_throws AssertionError zero(x)/zero(x)
@test_throws DomainError sqrt(x)
@test_throws AssertionError x^(-2)
@test_throws DomainError log(x)
@test_throws DomainError log1p(-2+x)
@test_throws AssertionError cos(x)/sin(y)
@test_throws BoundsError xH[20]
@test_throws BoundsError xT[20]
a = 3x + 4y +6x^2 + 8x*y
@test typeof( norm(x) ) == Float64
@test norm(x) > 0
@test norm(a) == norm([3,4,6,8.0])
@test norm(a, 4) == sum([3,4,6,8.0].^4)^(1/4.)
@test norm(a, Inf) == 8.0
@test norm((3.0 + 4im)*x) == abs(3.0 + 4im)
@test TS.rtoldefault(TaylorN{Int}) == 0
@test TS.rtoldefault(TaylorN{Float64}) == sqrt(eps(Float64))
@test TS.rtoldefault(TaylorN{BigFloat}) == sqrt(eps(BigFloat))
@test TS.real(TaylorN{Float64}) == TaylorN{Float64}
@test TS.real(TaylorN{Complex{Float64}}) == TaylorN{Float64}
@test isfinite(a)
@test a[0] ≈ a[0]
@test a[1] ≈ a[1]
@test a[2] ≈ a[2]
@test a[3] ≈ a[3]
@test a ≈ a
@test a .≈ a
b = deepcopy(a)
b[2][3] = Inf
@test !isfinite(b)
b[2][3] = NaN
@test !isfinite(b)
b[2][3] = a[2][3]+eps()
@test isapprox(a[2], b[2], rtol=eps())
@test a ≈ b
b[2][2] = a[2][2]+sqrt(eps())
@test a[2] ≈ b[2]
@test a ≈ b
f11(a,b) = (a+b)^a - cos(a*b)*b
f22(a) = (a[1] + a[2])^a[1] - cos(a[1]*a[2])*a[2]
@test taylor_expand(f11, 1.0,2.0) == taylor_expand(f22, [1,2.0])
@test evaluate(taylor_expand(x->x[1] + x[2], [1,2])) == 3.0
f33(x,y) = 3x+y
@test eltype(taylor_expand(f33,1,1)) == TaylorN{eltype(1)}
@test TS.numtype(taylor_expand(f33,1,1)) == eltype(1)
x,y = get_variables()
xysq = x^2 + y^2
update!(xysq,[1.0,-2.0])
@test xysq == (x+1.0)^2 + (y-2.0)^2
update!(xysq,[-1,2])
@test xysq == x^2 + y^2
#test function-like behavior for TaylorN
@test exy() == 1
@test exy([0.1im,0.01im]) == exp(0.11im)
@test isapprox(exy([1,1]), eeuler^2)
@test sin(asin(xT+yT))([1.0,0.5]) == 1.5
@test asin(sin(xT+yT))([1.0,0.5]) == 1.5
@test ( -sinh(xT+yT)^2 + cosh(xT+yT)^2 )(rand(2)) == 1
@test ( -sinh(xT+yT)^2 + cosh(xT+yT)^2 )(zeros(2)) == 1
#number of variables changed to 4...
dx = set_variables("x", numvars=4, order=10)
P = sin.(dx)
v = [1.0,2,3,4]
for i in 1:4
@test P[i](v) == evaluate(P[i], v)
end
@test P.(fill(v, 4)) == fill(P(v), 4)
F(x) = [sin(sin(x[4]+x[3])), sin(cos(x[3]-x[2])), cos(sin(x[1]^2+x[2]^2)), cos(cos(x[2]*x[3]))]
Q = F(v+dx)
@test Q.( fill(v, 4) ) == fill(Q(v), 4)
vr = map(x->rand(4), 1:4)
@test Q.(vr) == map(x->Q(x), vr)
for i in 1:4
@test P[i]() == evaluate(P[i])
@test Q[i]() == evaluate(Q[i])
end
@test P() == evaluate.(P)
@test P() == evaluate(P)
@test Q() == evaluate.(Q)
@test Q() == evaluate(Q)
@test Q[1:3]() == evaluate(Q[1:3])
dx = set_variables("x", numvars=4, order=10)
for i in 1:4
@test deg2rad(180+dx[i]) == pi + deg2rad(1.0)dx[i]
@test rad2deg(pi+dx[i]) == 180.0+rad2deg(1.0)dx[i]
end
p = sin(exp(dx[1]*dx[2])+dx[3]*dx[2])/(1.0+dx[4]^2)
q = zero(p)
TS.deg2rad!(q, p, 0)
@test q[0] == p[0]*(pi/180)
# TS.deg2rad!.(q, p, [1,3,5])
# for i in [0,1,3,5]
# @test q[i] == p[i]*(pi/180)
# end
TS.rad2deg!(q, p, 0)
@test q[0] == p[0]*(180/pi)
# TS.rad2deg!.(q, p, [1,3,5])
# for i in [0,1,3,5]
# @test q[i] == p[i]*(180/pi)
# end
xT = 5+TaylorN(Int, 1, order=10)
yT = TaylorN(2, order=10)
TS.deg2rad!(yT, xT, 0)
@test yT[0] == xT[0]*(pi/180)
TS.rad2deg!(yT, xT, 0)
@test yT[0] == xT[0]*(180/pi)
# Lexicographic tests with 4 vars
@test 1 > dx[1] > dx[2] > dx[3] > dx[4]
@test dx[4]^2 < dx[3]*dx[4] < dx[3]^2 < dx[2]*dx[4] < dx[2]*dx[3] < dx[2]^2 < dx[1]*dx[4] < dx[1]*dx[3] < dx[1]*dx[2] < dx[1]^2
@testset "Test Base.float overloads for HomogeneousPolynomial and TaylorN" begin
@test float(HomogeneousPolynomial(-7, 2)) == HomogeneousPolynomial(-7.0, 2)
@test float(HomogeneousPolynomial(1+im, 2)) == HomogeneousPolynomial(float(1+im), 2)
@test float(TaylorN(Int, 2)) == TaylorN(2)
@test float(TaylorN(Complex{Rational}, 2)) == TaylorN(float(Complex{Rational}), 2)
@test float(HomogeneousPolynomial{Complex{Int}}) == float(HomogeneousPolynomial{Complex{Float64}})
@test float(TaylorN{Complex{Rational}}) == float(TaylorN{Complex{Float64}})
end
@testset "Test evaluate! method for arrays of TaylorN" begin
x = set_variables("x", order=2, numvars=4)
function radntn!(y)
for k in eachindex(y)
for l in eachindex(y[k])
y[k][l] = randn()
end
end
nothing
end
y = zero(x[1])
radntn!(y)
n = 10
v = [zero(x[1]) for _ in 1:n]
r = [zero(x[1]) for _ in 1:n] # output vector
radntn!.(v)
x1 = randn(4) .+ x
# warmup
TaylorSeries.evaluate!(v, (x1...,), r)
# call twice to make sure `r` is reset on second call
TaylorSeries.evaluate!(v, (x1...,), r)
r2 = evaluate.(v, Ref(x1))
@test r == r2
@test iszero(norm(r-r2, Inf))
end
end
@testset "Integrate for several variables" begin
t, x, y = set_variables("t x y")
@test integrate(t, 1) == 0.5*t^2
@test integrate(t, 2) == t * x
@test integrate(t, 3) == t * y
@test integrate(x, 1) == t * x
@test integrate(x, 2) == 0.5*x^2
@test integrate(y, 2) == x * y
end
@testset "Consistency of coeff_table" begin
order = 20
x, y, z, w = set_variables(Int128, "x y z w", numvars=4, order=2order)
ctab = deepcopy(TS.coeff_table);
function fun(degree::Int)
s = x + y + z + w + 1
return s^degree
end
function diffs1(f)
f1 = differentiate(f, 1)
f2 = differentiate(f, 2)
f3 = differentiate(f, 3)
f4 = differentiate(f, 4)
return f1 + f2 + f3 + f4
end
function diffs2(f)
vder = zeros(typeof(f), get_numvars())
Threads.@threads for i = 1:get_numvars()
vder[i] = differentiate(f, i)
end
return sum(vder)
end
f = fun(order);
df_exact = 4*order*fun(order-1);
df1 = diffs1(f);
@test ctab == TS.coeff_table
@test df1 == df_exact
df2 = diffs2(f);
@test ctab == TS.coeff_table
@test df1 == df_exact
function integ1(f)
f1 = integrate(f, 1)
f2 = integrate(f, 2)
f3 = integrate(f, 3)
f4 = integrate(f, 4)
return f1 + f2 + f3 + f4
end
function integ2(f)
T = typeof(integrate(f, 1))
vinteg = zeros(T, get_numvars())
Threads.@threads for i = 1:get_numvars()
vinteg[i] = integrate(f, i)
end
return sum(vinteg)
end
ii1 = integ1(f);
ii2 = integ2(f);
@test ctab == TS.coeff_table
@test ii1 == ii2
function ev1(f)
f1 = f(1, 1.0)
f2 = f(2, 1.0)
f3 = f(3, 1.0)
f4 = f(4, 1.0)
return f1 + f2 + f3 + f4
end
function ev2(f)
T = typeof(evaluate(f, 1, 1.0))
veval = zeros(T, get_numvars())
Threads.@threads for i = 1:get_numvars()
veval[i] = evaluate(f, i, 1.0)
end
return sum(veval)
end
ee1 = ev1(f);
ee2 = ev2(f);
@test ctab == TS.coeff_table
@test ee1 == ee2
end
| TaylorSeries | https://github.com/JuliaDiff/TaylorSeries.jl.git |
|
[
"MIT"
] | 0.18.1 | bb212ead98022455eed514cff0adfa5de8645258 | code | 23465 | # This file is part of TaylorSeries.jl, MIT licensed
#
using TaylorSeries
using Test
# using LinearAlgebra
@testset "Tests with mixtures of Taylor1 and TaylorN" begin
@test TS.NumberNotSeries == Union{Real,Complex}
@test TS.NumberNotSeriesN == Union{Real,Complex,Taylor1}
set_variables("x", numvars=2, order=6)
xH = HomogeneousPolynomial(Int, 1)
yH = HomogeneousPolynomial(Int, 2)
tN = Taylor1(TaylorN{Float64}, 3)
@test findfirst(tN) == 1
@test convert(eltype(tN), tN) == tN
@test eltype(xH) == HomogeneousPolynomial{Int}
@test TS.numtype(xH) == Int
@test eltype(tN) == Taylor1{TaylorN{Float64}}
@test TS.numtype(tN) == TaylorN{Float64}
@test normalize_taylor(tN) == tN
@test tN.order == 3
@testset "Lexicographic order: Taylor1{HomogeneousPolynomial{T}} and Taylor1{TaylorN{T}}" begin
@test HomogeneousPolynomial([1]) > xH > yH > 0.0
@test -xH^2 < -xH*yH < -yH^2 < -xH^3 < -yH^3 < HomogeneousPolynomial([0.0])
@test 1 ≥ tN > 2*tN^2 > 100*tN^3 > 0
@test -2*tN < -tN^2 ≤ 0
end
@test string(zero(tN)) == " 0.0 + 𝒪(‖x‖¹) + 𝒪(t⁴)"
@test string(tN) == " ( 1.0 + 𝒪(‖x‖¹)) t + 𝒪(t⁴)"
@test string(tN + 3Taylor1(Int, 2)) == " ( 4.0 + 𝒪(‖x‖¹)) t + 𝒪(t³)"
@test string(xH * tN) == " ( 1.0 x₁ + 𝒪(‖x‖²)) t + 𝒪(t⁴)"
@test constant_term(xH) == xH
@test constant_term(tN) == zero(TaylorN([xH]))
@test linear_polynomial(xH) == xH
@test linear_polynomial(1+tN+tN^2) == tN
@test nonlinear_polynomial(1+tN+tN^2) == tN^2
tN = Taylor1([zero(TaylorN(Float64,1)), one(TaylorN(Float64,1))], 3)
@test typeof(tN) == Taylor1{TaylorN{Float64}}
@test string(zero(tN)) == " 0.0 + 𝒪(‖x‖⁷) + 𝒪(t⁴)"
@test string(tN) == " ( 1.0 + 𝒪(‖x‖⁷)) t + 𝒪(t⁴)"
@test string(Taylor1([xH+yH])) == " 1 x₁ + 1 x₂ + 𝒪(t¹)"
@test string(Taylor1([zero(xH), xH*yH])) == " ( 1 x₁ x₂) t + 𝒪(t²)"
@test string(tN * Taylor1([0,TaylorN([xH+yH])])) == " 0.0 + 𝒪(‖x‖⁷) + 𝒪(t²)"
t = Taylor1(3)
xHt = HomogeneousPolynomial(typeof(t), 1)
@test findfirst(xHt) == 1
@test convert(eltype(xHt), xHt) === xHt
@test eltype(xHt) == HomogeneousPolynomial{Taylor1{Float64}}
@test TS.numtype(xHt) == Taylor1{Float64}
@test normalize_taylor(xHt) == xHt
@test string(xHt) == " ( 1.0 + 𝒪(t¹)) x₁"
xHt = HomogeneousPolynomial([one(t), zero(t)])
yHt = HomogeneousPolynomial([zero(t), t])
@test findfirst(yHt) == 2
@test string(xHt) == " ( 1.0 + 𝒪(t⁴)) x₁"
@test string(yHt) == " ( 1.0 t + 𝒪(t⁴)) x₂"
@test string(HomogeneousPolynomial([t])) == " ( 1.0 t + 𝒪(t⁴))"
@test 3*xHt == HomogeneousPolynomial([3*one(t), zero(t)])
@test t*xHt == HomogeneousPolynomial([t, zero(t)])
@test complex(0,1)*xHt == HomogeneousPolynomial([1im*one(t), zero(1im*t)])
@test eltype(complex(0,1)*xHt) == HomogeneousPolynomial{Taylor1{Complex{Float64}}}
@test TS.numtype(complex(0,1)*xHt) == Taylor1{Complex{Float64}}
@test (xHt+yHt)(1, 1) == 1+t
@test (xHt+yHt)([1, 1]) == (xHt+yHt)((1, 1))
tN1 = TaylorN([HomogeneousPolynomial([t]), xHt, yHt^2])
@test findfirst(tN1) == 0
@test tN1[0] == HomogeneousPolynomial([t])
@test tN1(t,one(t)) == 2t+t^2
@test findfirst(tN1(t,one(t))) == 1
@test tN1([t,one(t)]) == tN1((t,one(t)))
t1N = convert(Taylor1{TaylorN{Float64}}, tN1)
@test findfirst(zero(tN1)) == -1
@test t1N[0] == HomogeneousPolynomial(1)
ctN1 = convert(TaylorN{Taylor1{Float64}}, t1N)
@test convert(eltype(tN1), tN1) === tN1
@test eltype(tN1) == TaylorN{Taylor1{Float64}}
@test eltype(Taylor1([xH])) == Taylor1{HomogeneousPolynomial{Int}}
@test TS.numtype(xHt) == Taylor1{Float64}
@test TS.numtype(tN1) == Taylor1{Float64}
@test TS.numtype(Taylor1([xH])) == HomogeneousPolynomial{Int}
@test TS.numtype(t1N) == TaylorN{Float64}
@test normalize_taylor(tN1) == tN1
@test get_order(HomogeneousPolynomial([Taylor1(1), 1.0+Taylor1(2)])) == 1
@test 3*tN1 == TaylorN([HomogeneousPolynomial([3t]),3xHt,3yHt^2])
@test t*tN1 == TaylorN([HomogeneousPolynomial([t^2]),xHt*t,t*yHt^2])
@test string(tN1) ==
" ( 1.0 t + 𝒪(t⁴)) + ( 1.0 + 𝒪(t⁴)) x₁ + ( 1.0 t² + 𝒪(t⁴)) x₂² + 𝒪(‖x‖³)"
@test string(t1N) ==
" 1.0 x₁ + 𝒪(‖x‖³) + ( 1.0 + 𝒪(‖x‖³)) t + ( 1.0 x₂² + 𝒪(‖x‖³)) t² + 𝒪(t⁴)"
@test tN1 == ctN1
@test tN1+tN1 == 2*tN1
@test tN1+1im*tN1 == complex(1,1)*tN1
@test tN1+t == t+tN1
@test tN1-t == -t+tN1
zeroN1 = zero(tN1)
oneN1 = one(tN1)
@test tN1-tN1 == zeroN1
@test zero(zeroN1) == zeroN1
@test zeroN1 == zero.(tN1)
@test oneN1[0] == one(tN1[0])
@test oneN1[1:end] == zero.(tN1[1:end])
for i in eachindex(tN1.coeffs)
@test tN1.coeffs[i].order == zeroN1.coeffs[i].order == oneN1.coeffs[i].order
end
@test string(t1N*t1N) ==
" 1.0 x₁² + 𝒪(‖x‖³) + ( 2.0 x₁ + 𝒪(‖x‖³)) t + ( 1.0 + 𝒪(‖x‖³)) t² + ( 2.0 x₂² + 𝒪(‖x‖³)) t³ + 𝒪(t⁴)"
@test !(@inferred isnan(tN1))
@test !(@inferred isinf(tN1))
@testset "Lexicographic order: HomogeneousPolynomial{Taylor1{T}} and TaylorN{Taylor1{T}}" begin
@test 1 > xHt > yHt > xHt^2 > 0
@test -xHt^2 < -xHt*yHt < -yHt^2 < -xHt^3 < -yHt^3 < 0
@test 1 ≥ tN1 > 2*tN1^2 > 100*tN1^3 > 0
@test -2*tN1 < -tN1^2 ≤ 0
end
@test mod(tN1+1,1.0) == 0+tN1
@test mod(tN1-1.125,2) == 0.875+tN1
@test (rem(tN1+1.125,1.0))[0][1] == 0.125 + t
@test (rem(tN1-1.125,2))[0][1] == -1.125 + t
@test mod2pi(-3pi+tN1)[0][1][0] ≈ pi
@test mod2pi(0.125+2pi+tN1)[0][1][0] ≈ 0.125
@test mod(t1N+1.125,1.0) == 0.125+t1N
@test mod(t1N-1.125,2) == 0.875+t1N
@test (rem(t1N+1.125,1.0))[0] == 0.125 + t1N[0]
@test (rem(t1N-1.125,2))[0] == -1.125 + t1N[0]
@test mod2pi(-3pi+t1N)[0][0][1] ≈ pi
@test mod2pi(0.125+2pi+t1N)[0][0][1] ≈ 0.125
@test abs(tN1+1) == 1+tN1
@test abs(tN1-1) == 1-tN1
@test_throws DomainError abs(tN1)
@test_throws DomainError abs(t1N)
@test abs2(im*(tN1+1)) == (1+tN1)^2
@test abs2(im*(tN1-1)) == (1-tN1)^2
@test abs(im*(tN1+1)) == 1+tN1
@test abs(im*(tN1-1)) == 1-tN1
@test convert(Array{Taylor1{TaylorN{Float64}},1}, [tN1, tN1]) == [t1N, t1N]
@test convert(Array{Taylor1{TaylorN{Float64}},2}, [tN1 tN1]) == [t1N t1N]
@test convert(Array{TaylorN{Taylor1{Float64}},1}, [t1N, t1N]) == [tN1, tN1]
@test convert(Array{TaylorN{Taylor1{Float64}},2}, [t1N t1N]) == [tN1 tN1]
@test evaluate(t1N, 0.0) == TaylorN(xH, 2)
@test t1N() == TaylorN(xH, 2)
@test string(evaluate(t1N, 0.0)) == " 1.0 x₁ + 𝒪(‖x‖³)"
@test string(evaluate(t1N^2, 1.0)) == " 1.0 + 2.0 x₁ + 1.0 x₁² + 2.0 x₂² + 𝒪(‖x‖³)"
@test string((t1N^(2//1))(1.0)) == " 1.0 + 2.0 x₁ + 1.0 x₁² + 2.0 x₂² + 𝒪(‖x‖³)"
v = zeros(TaylorN{Float64},2)
@test isnothing(evaluate!([t1N, t1N^2], 0.0, v))
@test v == [TaylorN(1), TaylorN(1)^2]
@test tN1() == t
@test evaluate(tN1, :x₁ => 1.0) == TaylorN([HomogeneousPolynomial([1.0+t]), zero(xHt), yHt^2])
@test evaluate(tN1, 1, 1.0) == TaylorN([HomogeneousPolynomial([1.0+t]), zero(xHt), yHt^2])
@test evaluate(t, t1N) == t1N
@test evaluate(t1N, 0.5) == t1N[0] + t1N[1]/2 + t1N[2]/4
@test evaluate(t1N, [t1N[0], zero(t1N[0])]) == Taylor1([t1N[0], t1N[1]], t.order)
@test evaluate(t1N, 2, 0.0) == Taylor1([t1N[0], t1N[1]], t.order)
@test evaluate(t1N, 1, 0.0) == Taylor1([zero(t1N[0]), t1N[1], t1N[2]], t.order)
# Tests for functions of mixtures
t1N = Taylor1([zero(TaylorN(Float64,1)), one(TaylorN(Float64,1))], 6)
t = Taylor1(3)
xHt = HomogeneousPolynomial([one(t), zero(t)])
yHt = HomogeneousPolynomial([zero(t), t])
x = TaylorN(1, order=2)
y = TaylorN(2, order=2)
xN1 = TaylorN([HomogeneousPolynomial(zero(t), 0), xHt, zero(yHt)], 2)
yN1 = TaylorN([HomogeneousPolynomial(zero(t), 0), zero(xHt), yHt], 2)
for fn in (exp, log, log1p, sin, cos, tan, sinh, cosh, tanh, asin, acos, atan, asinh, acosh, atanh)
if fn == asin || fn == acos || fn == atanh || fn == log1p
cc = 0.5
elseif fn == asinh || fn == acosh
cc = 1.5
else
cc = 1.0
end
@test x*fn(cc+t1N) == fn(cc+t)*xN1
@test t*fn(cc+xN1) == fn(cc+x)*t1N
end
ee = Taylor1(t1N[0:5], 6)
for ord in eachindex(t1N)
TS.differentiate!(ee, exp(t1N), ord)
end
@test iszero(ee[6])
@test getcoeff.(ee, 0:5) == getcoeff.(exp(t1N), 0:5)
ee = differentiate(t1N, get_order(t1N))
@test iszero(ee)
@test iszero(get_order(ee))
vt = zeros(Taylor1{Float64},2)
@test isnothing(evaluate!([tN1, tN1^2], [t, t], vt))
@test vt == [2t, 4t^2]
tint = Taylor1(Int, 10)
t = Taylor1(10)
x = TaylorN( [HomogeneousPolynomial(zero(t), 5), HomogeneousPolynomial([one(t),zero(t)])], 5)
y = TaylorN(typeof(tint), 2, order=5)
@test typeof(x) == TaylorN{Taylor1{Float64}}
@test eltype(y) == TaylorN{Taylor1{Int}}
@test TS.numtype(y) == Taylor1{Int}
@test -x == 0 - x
@test +y == y
@test one(y)/(1+x) == 1 - x + x^2 - x^3 + x^4 - x^5
@test one(y)/(1+y) == 1 - y + y^2 - y^3 + y^4 - y^5
@test (1+y)/one(t) == 1 + y
@test typeof(y+t) == TaylorN{Taylor1{Float64}}
t = Taylor1(4)
xN, yN = get_variables()
@test evaluate(1.0 + t + t^2, xN) == 1.0 + xN + xN^2
v1 = [1.0 + t + t^2 + t^4, 1.0 - t^2 + t^3]
@test v1(yN^2) == [1.0 + yN^2 + yN^4, 1.0 - yN^4 + yN^6]
tN = Taylor1([zero(xN), one(xN)], 4)
q1N = 1 + yN*tN + xN*tN^4
@test q1N(-1.0) == 1.0 - yN + xN
@test q1N(-xN^2) == 1.0 - xN^2*yN
# See #92 and #94
δx, δy = set_variables("δx δy")
xx = 1+Taylor1(δx, 5)
yy = 1+Taylor1(δy, 5)
tt = Taylor1([zero(δx), one(δx)], xx.order)
@test all((xx, yy) .== (TS.fixorder(xx, yy)))
@test typeof(xx) == Taylor1{TaylorN{Float64}}
@test eltype(xx) == Taylor1{TaylorN{Float64}}
@test TS.numtype(tt) == TaylorN{Float64}
@test !(@inferred isnan(xx))
@test !(@inferred isnan(δx))
@test !(@inferred isinf(xx))
@test !(@inferred isinf(δx))
@test +xx == xx
@test -xx == 0 - xx
@test xx/1.0 == 1.0*xx
@test xx + xx == xx*2
@test xx - xx == zero(xx)
@test xx*xx == xx^2
@test xx/xx == one(xx)
@test xx*δx + Taylor1(typeof(δx),5) == δx + δx^2 + Taylor1(typeof(δx),5)
@test xx/(1+δx) == one(xx)
@test 1/(1-tt) == 1 + tt + tt^2 + tt^3 + tt^4 + tt^5
@test xx/(1-tt) == xx * (1 + tt + tt^2 + tt^3 + tt^4 + tt^5)
res = xx * tt
@test 1/(1-xx*tt) == 1 + res + res^2 + res^3 + res^4 + res^5
@test typeof(xx+δx) == Taylor1{TaylorN{Float64}}
res = 1/(1+δx)
@test one(xx)/(xx*(1+tt)) == Taylor1([res, -res, res, -res, res, -res])
res = 1/(1+δx)^2
@test (xx^2 + yy^2)/(xx*yy) == xx/yy + yy/xx
@test ((xx+yy)*tt)^2/((xx+yy)*tt) == (xx+yy)*tt
@test sqrt(xx) == Taylor1(sqrt(xx[0]), xx.order)
@test xx^0.25 == sqrt(sqrt(xx))
@test (xx*yy*tt^2)^0.5 == sqrt(xx*yy)*tt
FF(x,y,t) = (1 + x + y + t)^4
QQ(x,y,t) = x^4.0 + (4*y + (4*t + 4))*x^3 + (6*y^2 + (12*t + 12)*y + (6*t^2 + 12*t + 6))*x^2 +
(4*y^3 + (12*t + 12)*y^2 + (12*t^2 + 24*t + 12)*y + (4*t^3 + 12*t^2 + 12*t + 4))*x +
(y^4 + (4*t + 4)*y^3 + (6*t^2 + 12*t + 6)*y^2 + (4*t^3 + 12*t^2 + 12*t + 4)*y +
(t^4 + 4*t^3 + 6*t^2 + 4*t + 1))
@test FF(xx, yy, tt) == QQ(xx, yy, tt)
@test FF(tt, yy-1, xx-1) == QQ(xx-1, yy-1, tt)
@test (xx+tt)^4 == xx^4 + 4*xx^3*tt + 6*xx^2*tt^2 + 4*xx*tt^3 + tt^4
pp = xx*yy*(1+tt)^4
@test pp^0.25 == sqrt(sqrt(pp))
@test (xx*yy)^(3/2)*(1+tt+tt^2) == (sqrt(xx*yy))^3*(1+tt+tt^2)
@test sqrt((xx+yy+tt)^3) ≈ (xx+yy+tt)^1.5
@test (sqrt(xx*(1+tt)))^4 ≈ xx^2 * (1+tt)^2
@test (sqrt(xx*(1+tt)))^5 ≈ xx^2.5 * (1+tt)^2.5
@test exp(xx) == exp(xx[0]) + zero(tt)
@test exp(xx+tt) ≈ exp(xx)*exp(tt)
@test norm(exp(xx+tt) - exp(xx)*exp(tt), Inf) < 5e-17
@test expm1(xx) == expm1(xx[0]) + zero(tt)
@test expm1(xx+tt) ≈ exp(xx+tt)-1
@test norm(expm1(xx+tt) - (exp(xx+tt)-1), Inf) < 5e-16
@test log(xx) == log(xx[0]) + zero(tt)
@test log((xx+tt)*(yy+tt)) ≈ log(xx+tt)+log(yy+tt)
@test log(pp) ≈ log(xx)+log(yy)+4*log(1+tt)
@test norm(log(pp) - (log(xx)+log(yy)+4*log(1+tt)), Inf) < 1e-15
@test log1p(xx) == log1p(xx[0]) + zero(tt)
@test log1p(xx+tt) == log(1+xx+tt)
exp(log(xx+tt)) == log(exp(xx+tt))
@test exp(log(pp)) ≈ log(exp(pp))
@test sincos(δx + xx) == sincos(δx+xx[0]) .+ zero(tt)
qq = sincos(pp)
@test exp(im*pp) ≈ qq[2] + im*qq[1]
@test qq[2]^2 ≈ 1 - qq[1]^2
@test sincospi(δx + xx - 1) == sincospi(δx + xx[0] - 1) .+ zero(tt)
@test all(sincospi(pp) .≈ sincos(pi*pp))
@test tan(xx) == tan(xx[0]) + zero(tt)
@test tan(xx+tt) ≈ sin(xx+tt)/cos(xx+tt)
@test tan(xx*tt) ≈ sin(xx*tt)/cos(xx*tt)
@test asin(xx-1) == asin(xx[0]-1) + zero(tt)
@test asin(sin(xx+tt)) ≈ xx + tt
@test sin(asin(xx*tt)) == xx * tt
@test_throws DomainError asin(2+xx+tt)
@test acos(xx-1) == acos(xx[0]-1) + zero(tt)
@test acos(cos(xx+tt)) ≈ xx + tt
@test cos(acos(xx*tt)) ≈ xx * tt
@test_throws DomainError acos(2+xx+tt)
@test atan(xx) == atan(xx[0]) + zero(tt)
@test atan(tan(xx+tt)) ≈ xx + tt
@test tan(atan(xx*tt)) == xx * tt
@test TS.sinhcosh(xx) == TS.sinhcosh(xx[0]) .+ zero(tt)
qq = TS.sinhcosh(pp)
@test qq[2]^2 - 1 ≈ qq[1]^2
@test 2*qq[1] ≈ exp(pp)-exp(-pp)
@test 2*qq[2] ≈ exp(pp)+exp(-pp)
qq = TS.sinhcosh(xx+tt)
@test tanh(xx) == tanh(xx[0]) + zero(tt)
@test tanh(xx+tt) ≈ qq[1]/qq[2]
qq = TS.sinhcosh(xx*tt)
@test tanh(xx*tt) ≈ qq[1]/qq[2]
@test asinh(xx-1) == asinh(xx[0]-1) + zero(tt)
@test asinh(sinh(xx+tt)) ≈ xx + tt
@test sinh(asinh(xx*tt)) == xx * tt
@test acosh(xx+1) == acosh(xx[0]+1) + zero(tt)
@test acosh(cosh(xx+1+tt)) ≈ xx + 1 + tt
@test cosh(acosh(2+xx*tt)) ≈ 2 + xx * tt
@test_throws DomainError acosh(xx+tt)
@test atanh(xx-1) == atanh(xx[0]-1) + zero(tt)
@test atanh(tanh(-1+xx+tt)) ≈ -1 + xx + tt
@test tanh(atanh(xx*tt)) ≈ xx * tt
@test_throws DomainError atanh(xx+tt)
# pp = xx*yy*(1+tt)^4
@test evaluate(pp, 1, 0.0) == yy*(1+tt)^4
@test evaluate(pp, 2, 0.0) == xx*(1+tt)^4
@test evaluate(t, tt) == tt
@test evaluate(tt, t) == tt
@test evaluate(xx, 2, δy) == xx
@test evaluate(xx, 1, δy) == yy
#testing evaluate and function-like behavior of Taylor1, TaylorN for mixtures:
t = Taylor1(25)
p = cos(t)
q = sin(t)
a = [p,q]
dx = set_variables("x", numvars=4, order=10)
P = sin.(dx)
v = [1.0,2,3,4]
F(x) = [sin(sin(x[4]+x[3])), sin(cos(x[3]-x[2])), cos(sin(x[1]^2+x[2]^2)), cos(cos(x[2]*x[3]))]
Q = F(v+dx)
diff_evals = cos(sin(dx[1]))-p(P[1])
@test norm(diff_evals, Inf) < 1e-15
#evaluate a Taylor1 at a TaylorN
@test p(P) == evaluate(p, P)
@test q(Q) == evaluate(q, Q)
#evaluate an array of Taylor1s at a TaylorN
aT1 = [p,q,p^2,log(1+q)] #an array of Taylor1s
@test aT1(Q[4]) == evaluate(aT1, Q[4])
@test (aT1.^2)(Q[3]) == evaluate(aT1.^2, Q[3])
#evaluate a TaylorN at an array of Taylor1s
@test P[1](aT1) == evaluate(P[1], aT1)
@test P[1](aT1) == evaluate(P[1], (aT1...,))
@test Q[2](aT1) == evaluate(Q[2], [aT1...])
#evaluate an array of TaylorN{Float64} at an array of Taylor1{Float64}
@test P(aT1) == evaluate(P, aT1)
@test Q(aT1) == evaluate(Q, aT1)
#test evaluation of an Array{TaylorN{Taylor1}} at an Array{Taylor1}
aH1 = [
HomogeneousPolynomial([Taylor1(rand(2))]),
HomogeneousPolynomial([Taylor1(rand(2)),Taylor1(rand(2)),
Taylor1(rand(2)),Taylor1(rand(2))])
]
bH1 = [
HomogeneousPolynomial([Taylor1(rand(2))]),
HomogeneousPolynomial([Taylor1(rand(2)),Taylor1(rand(2)),
Taylor1(rand(2)),Taylor1(rand(2))])
]
aTN1 = TaylorN(aH1); bTN1 = TaylorN(bH1)
x = [aTN1, bTN1]
δx = [Taylor1(rand(3)) for i in 1:4]
@test typeof(x) == Array{TaylorN{Taylor1{Float64}},1}
@test typeof(δx) == Array{Taylor1{Float64},1}
x0 = Array{Taylor1{Float64}}(undef, length(x))
eval_x_δx = evaluate(x,δx)
@test x(δx) == eval_x_δx
evaluate!(x,δx,x0)
@test x0 == eval_x_δx
@test typeof(evaluate(x[1],δx)) == Taylor1{Float64}
@test x() == map(y->y[0][1], x)
for i in eachindex(x)
@test evaluate(x[i],δx) == eval_x_δx[i]
@test x[i](δx) == eval_x_δx[i]
end
p11 = Taylor1([sin(t),cos(t)])
@test evaluate(p11,t) == sin(t)+t*cos(t)
@test p11(t) == sin(t)+t*cos(t)
a11 = Taylor1([t,t^2,exp(-t),sin(t),cos(t)])
b11 = t+t*(t^2)+(t^2)*(exp(-t))+(t^3)*sin(t)+(t^4)*cos(t)
diff_a11b11 = a11(t)-b11
@test norm(diff_a11b11.coeffs, Inf) < 1E-19
X, Y = set_variables(Taylor1{Float64}, "x y")
@test typeof( norm(X) ) == Float64
@test norm(X) > 0
@test norm(X+Y) == sqrt(2)
@test norm(-10X+4Y,Inf) == 10.
X,Y = convert(Taylor1{TaylorN{Float64}},X), convert(Taylor1{TaylorN{Float64}},Y)
@test typeof( norm(X) ) == Float64
@test norm(X) > 0
@test norm(X+Y) == sqrt(2)
@test norm(-10X+4Y,Inf) == 10.
@test TS.rtoldefault(TaylorN{Taylor1{Int}}) == 0
@test TS.rtoldefault(Taylor1{TaylorN{Int}}) == 0
for T in (Float64, BigFloat)
@test TS.rtoldefault(TaylorN{Taylor1{T}}) == sqrt(eps(T))
@test TS.rtoldefault(Taylor1{TaylorN{T}}) == sqrt(eps(T))
@test TS.real(TaylorN{Taylor1{T}}) == TaylorN{Taylor1{T}}
@test TS.real(Taylor1{TaylorN{T}}) == Taylor1{TaylorN{T}}
@test TS.real(TaylorN{Taylor1{Complex{T}}}) == TaylorN{Taylor1{T}}
@test TS.real(Taylor1{TaylorN{Complex{T}}}) == Taylor1{TaylorN{T}}
end
rndT1(ord1) = Taylor1(-1 .+ 2rand(ord1+1)) # generates a random Taylor1 with order `ord`
nmonod(s, d) = binomial(d+s-1, d) #number of monomials in s variables with exact degree d
#rndHP generates a random `ordHP`-th order homog. pol. of Taylor1s, each with order `ord1`
rndHP(ordHP, ord1) = HomogeneousPolynomial( [rndT1(ord1) for i in 1:nmonod(get_numvars(), ordHP)] )
#rndTN generates a random `ordHP`-th order TaylorN of of Taylor1s, each with order `ord1`
rndTN(ordN, ord1) = TaylorN([rndHP(i, ord1) for i in 0:ordN])
P = rndTN(get_order(), 3)
@test P ≈ P
Q = deepcopy(P)
Q[2][2] = Taylor1([NaN, Inf])
@test (@inferred isnan(Q))
@test (@inferred isinf(Q))
@test !isfinite(Q)
Q[2][2] = P[2][2]+sqrt(eps())/2
@test isapprox(P, Q, rtol=1.0)
Q[2][2] = P[2][2]+10sqrt(eps())
@test !isapprox(P, Q, atol=sqrt(eps()), rtol=0)
@test P ≉ Q^2
Q[2][2] = P[2][2]+eps()/2
@test isapprox(Q, Q, atol=eps(), rtol=0)
@test isapprox(Q, P, atol=eps(), rtol=0)
Q[2][1] = P[2][1]-10eps()
@test !isapprox(Q, P, atol=eps(), rtol=0)
@test P ≉ Q^2
X, Y = set_variables(BigFloat, "x y", numvars=2, order=6)
p1N = Taylor1([X^2,X*Y,Y+X,Y^2])
q1N = Taylor1([X^2,(1.0+sqrt(eps(BigFloat)))*X*Y,Y+X,Y^2])
@test p1N ≈ p1N
@test p1N ≈ q1N
Pv = [rndTN(get_order(), 3), rndTN(get_order(), 3)]
Qv = convert.(Taylor1{TaylorN{Float64}}, Pv)
@test TS.jacobian(Pv) == TS.jacobian(Qv)
@test_throws ArgumentError Taylor1(2) + TaylorN(1)
@test_throws ArgumentError Taylor1(2) - TaylorN(1)
@test_throws ArgumentError Taylor1(2) * TaylorN(1)
@test_throws ArgumentError TaylorN(2) / Taylor1(1)
# Issue #342 and PR #343
z0N = -1.333+get_variables()[1]
z = Taylor1(z0N,20)
z[20][1][1] = 5.0
@test z[0][0][1] == -1.333
@test z[20][1][1] == 5.0
for i in 1:19
for j in eachindex(z[i].coeffs)
@test all(z[i].coeffs[j][:] .== 0.0)
end
end
@test all(z[20][1][2:end] .== 0.0)
intz = integrate(z)
intz[20] = z[0]
@test intz[1] == z[0]
@test intz[20] == z[0]
for i in 2:19
@test iszero(intz[i])
end
a = sum(exp.(get_variables()).^2)
b = Taylor1([a])
bcopy = deepcopy(b)
c = Taylor1(constant_term(b),0)
c[0][0][1] = 0.0
b == bcopy
#347
a = Taylor1([1.0+X,-X, Y, X-Y,X])
b = deepcopy(a)
b[0] = zero(a[0])
b.coeffs[2:end] .= zero(b.coeffs[1])
@test iszero(b)
@test b.coeffs[2] === b.coeffs[3]
b.coeffs[2:end] .= zero.(b.coeffs[1])
@test !(b.coeffs[2] === b.coeffs[3])
x = Taylor1([1.0+X,-X, Y, X-Y,X])
z = zero(x)
two = 2one(x[0])
@test two/x == 2/x == 2.0/x
@test (2one(x))/x == 2/x
dq = get_variables()
x = Taylor1(exp.(dq), 5)
x[1] = sin(dq[1]*dq[2])
@test x[1] == sin(dq[1]*dq[2])
@test x[1] !== sin(dq[1]*dq[2])
@testset "Test Base.float overloads for Taylor1 and TaylorN mixtures" begin
q = get_variables(Int)
x1N = Taylor1(q)
@test float(x1N) == Taylor1(float.(q))
xN1 = convert(TaylorN{Taylor1{Int}}, x1N)
@test float(xN1) == convert(TaylorN{Taylor1{Float64}}, Taylor1(float.(q)))
@test float(Taylor1{TaylorN{Int}}) == Taylor1{TaylorN{Float64}}
@test float(TaylorN{Taylor1{Int}}) == TaylorN{Taylor1{Float64}}
@test float(TaylorN{Taylor1{Complex{Int}}}) == TaylorN{Taylor1{Complex{Float64}}}
end
end
@testset "Tests with nested Taylor1s" begin
ti = Taylor1(3)
to = Taylor1([zero(ti), one(ti)], 9)
@test findfirst(to) == 1
@test TS.numtype(to) == Taylor1{Float64}
@test normalize_taylor(to) == to
@test normalize_taylor(Taylor1([zero(to), one(to)], 5)) == Taylor1([zero(to), one(to)], 5)
@test convert(eltype(to), to) === to
@test string(to) == " ( 1.0 + 𝒪(t⁴)) t + 𝒪(t¹⁰)"
@test string(to^2) == " ( 1.0 + 𝒪(t⁴)) t² + 𝒪(t¹⁰)"
@test ti + to == Taylor1([ti, one(ti)], 9)
tito = ti * to
# The next tests are related to iss #326
# @test ti > ti^2 > to > 0
# @test to^2 < toti < ti^2
@test tito == Taylor1([zero(ti), ti], 9)
@test tito / to == ti
@test get_order(tito/to) == get_order(to)-1
@test tito / ti == to
@test get_order(tito/ti) == get_order(to)
@test ti^2-to^2 == (ti+to)*(ti-to)
@test findfirst(ti^2-to^2) == 0
@test sin(to) ≈ Taylor1(one(ti) .* sin(Taylor1(10)).coeffs, 9)
@test to(1 + ti) == 1 + ti
@test to(1 + ti) isa Taylor1{Float64}
@test ti(1 + to) == 1 + to
@test constant_term(ti+to) == ti
@test linear_polynomial(ti*to) == Taylor1([zero(ti), ti], 9)
@test get_order(linear_polynomial(to)) == get_order(to)
@test nonlinear_polynomial(to+ti*to^2) == Taylor1([zero(ti), zero(ti), ti], 9)
@test ti(1 + to) isa Taylor1{Taylor1{Float64}}
@test sqrt(tito^2) == tito
@test get_order(sqrt(tito^2)) == get_order(to) >> 1
@test (tito^3)^(1/3) == tito
@test get_order(sqrt(tito^2)) == get_order(to) >> 1
ti2to = ti^2 * to
tti = (ti2to/to)/ti
@test get_order(tti) == get_order(to)-1
@test get_order(tti[0]) == get_order(ti)-1
@test isapprox(abs2(exp(im*to)), one(to))
@test isapprox(abs(exp(im*to)), one(to))
to = Taylor1([1/(1+ti), one(ti)], 9)
@test to(1.0) == 1 + 1/(1+ti)
@test cos(to)(0.0) == cos(to[0])
@test to(ti) == to[0] + ti
@test evaluate(to*ti, ti) == to[0]*ti + ti^2
@testset "Test setindex! method for nested Taylor1s" begin
t = Taylor1(2)
y = one(t)
x = Taylor1([t,2t,t^2,0t,t^3])
x[3] = y
@test x[3] !== y
y[2] = -5.0
@test x[3][2] == 0.0
end
end
| TaylorSeries | https://github.com/JuliaDiff/TaylorSeries.jl.git |
|
[
"MIT"
] | 0.18.1 | bb212ead98022455eed514cff0adfa5de8645258 | code | 2390 | # This file is part of TaylorSeries.jl, MIT licensed
#
using TaylorSeries
using Test
@testset "Mutating functions" begin
t1 = Taylor1(6)
# Dictionaries with calls
@test length(TS._dict_binary_ops) == 5
# @test length(TS._dict_unary_ops) == 22 # why are these tested?
@test all([haskey(TS._dict_binary_ops, op)
for op in [:+, :-, :*, :/, :^]])
@test all([haskey(TS._dict_binary_calls, op)
for op in [:+, :-, :*, :/, :^]])
for kk in keys(TS._dict_binary_ops)
res = TS._internalmutfunc_call(
TS._InternalMutFuncs(TS._dict_binary_ops[kk]) )
@test TS._dict_binary_calls[kk] == res
end
for kk in keys(TS._dict_unary_ops)
res = TS._internalmutfunc_call(
TS._InternalMutFuncs(TS._dict_unary_ops[kk]) )
@test TS._dict_unary_calls[kk] == res
end
# Some examples
t1 = Taylor1(5)
t2 = zero(t1)
t1aux = deepcopy(t1)
TS.pow!(t2, t1, t1aux, 2, 2)
@test t2[2] == 1.0
#
res = zero(t1)
TS.add!(res, t1, t2, 3)
@test res[3] == 0.0
TS.add!(res, 1, t2, 3)
@test res[3] == 0.0
TS.add!(res, t2, 3, 0)
@test res[0] == 3.0
TS.subst!(res, t1, t2, 2)
@test res[2] == -1.0
TS.subst!(res, t1, 1, 0)
@test res[0] == -1.0
@test res[2] == -1.0
TS.subst!(res, 1, t2, 2)
@test res[2] == -1.0
res[3] = rand()
TS.mul!(res, t1, t2, 3)
@test res[3] == 1.0
TS.mul!(res, res, 2, 3)
@test res[3] == 2.0
TS.mul!(res, 0.5, res, 3)
@test res[3] == 1.0
res[0] = rand()
TS.div!(res, t2-1, 1+t1, 0)
res[1] = rand()
TS.div!(res, t2-1, 1+t1, 1)
@test res[0] == (t1-1)[0]
@test res[1] == (t1-1)[1]
TS.div!(res, res, 2, 0)
@test res[0] == -0.5
res = zero(t1)
TS.identity!(res, t1, 0)
@test res[0] == t1[0]
TS.zero!(res, t1, 0)
TS.zero!(res, t1, 1)
@test res[0] == zero(t1[0])
@test res[1] == zero(t1[1])
TS.one!(res, t1, 0)
TS.one!(res, t1, 0)
@test res[0] == one(t1[0])
@test res[1] == zero(t1[1])
res = zero(t1)
TS.abs!(res, -1-t2, 2)
@test res[2] == 1.0
@test_throws DomainError TS.abs!(res, t2, 2)
res = zero(t1)
TS.abs2!(res, 1-t1, 1)
@test res[1] == -2.0
TS.abs2!(res, t1, 2)
@test res[2] == 1.0
t2 = Taylor1(Int,15)
TaylorSeries.zero!(t2)
@test TaylorSeries.iszero(t2)
end
| TaylorSeries | https://github.com/JuliaDiff/TaylorSeries.jl.git |
|
[
"MIT"
] | 0.18.1 | bb212ead98022455eed514cff0adfa5de8645258 | code | 26826 | # This file is part of TaylorSeries.jl, MIT licensed
#
using TaylorSeries
using Test
using LinearAlgebra, SparseArrays
# This is used to check the fallack of pretty_print
struct SymbNumber <: Number
s :: Symbol
end
Base.iszero(::SymbNumber) = false
@testset "Tests for Taylor1 expansions" begin
eeuler = Base.MathConstants.e
ta(a) = Taylor1([a,one(a)],15)
t = Taylor1(Int,15)
tim = im*t
zt = zero(t)
ot = 1.0*one(t)
tol1 = eps(1.0)
@test TS === TaylorSeries
@test Taylor1 <: AbstractSeries
@test Taylor1{Float64} <: AbstractSeries{Float64}
@test TS.numtype(1.0) == eltype(1.0)
@test TS.numtype([1.0]) == eltype([1.0])
@test TS.normalize_taylor(t) == t
@test TS.normalize_taylor(tim) == tim
@test Taylor1([1,2,3,4,5], 2) == Taylor1([1,2,3])
@test Taylor1(t[0:3]) == Taylor1(t[0:get_order(t)], 4)
@test get_order(t) == 15
@test get_order(Taylor1([1,2,3,4,5], 2)) == 2
@test size(t) == (16,)
@test firstindex(t) == 0
@test lastindex(t) == 15
@test eachindex(t) == 0:15
@test iterate(t) == (0.0, 1)
@test iterate(t, 1) == (1.0, 2)
@test iterate(t, 16) == nothing
@test axes(t) == ()
@test axes([t]) == (Base.OneTo(1),)
@testset "Total order" begin
@test 1 + t ≥ 1.0 > 0.5 + t > t^2 ≥ zero(t)
@test -1.0 < -1/1000 - t < -t < -t^2 ≤ 0
end
v = [1,2]
@test typeof(TS.resize_coeffs1!(v,3)) == Nothing
@test v == [1,2,0,0]
TS.resize_coeffs1!(v,0)
@test v == [1]
TS.resize_coeffs1!(v,3)
setindex!(Taylor1(v),3,2)
@test v == [1,0,3,0]
pol_int = Taylor1(v)
@test pol_int[:] == [1,0,3,0]
@test pol_int[:] == pol_int.coeffs[:]
@test pol_int[1:2:3] == pol_int.coeffs[2:2:4]
setindex!(pol_int,0,0:2)
@test v == zero(v)
setindex!(pol_int,1,:)
@test v == ones(Int, 4)
setindex!(pol_int, v, :)
@test v == ones(Int, 4)
setindex!(pol_int, zeros(Int, 4), 0:3)
@test v == zeros(Int, 4)
pol_int[:] .= 0
@test v == zero(v)
pol_int[0:2:end] = 2
@test all(v[1:2:end] .== 2)
pol_int[0:2:3] = [0, 1]
@test all(v[1:2:3] .== [0, 1])
rv = [rand(0:3) for i in 1:4]
@test Taylor1(rv)[:] == rv
y = sin(Taylor1(16))
@test y[:] == y.coeffs
y[:] .= cos(Taylor1(16))[:]
@test y == cos(Taylor1(16))
@test y[:] == cos(Taylor1(16))[:]
y = sin(Taylor1(16))
rv = rand(5)
y[0:4] .= rv
@test y[0:4] == rv
@test y[5:end] == y.coeffs[6:end]
rv = rand( length(y.coeffs) )
y[:] .= rv
@test y[:] == rv
y[:] .= cos(Taylor1(16)).coeffs
@test y == cos(Taylor1(16))
@test y[:] == cos(Taylor1(16))[:]
y[:] .= 0.0
@test y[:] == zero(y[:])
y = sin(Taylor1(16))
rv = rand.(length(0:4))
y[0:4] .= rv
@test y[0:4] == rv
@test y[6:end] == sin(Taylor1(16))[6:end]
rv = rand.(length(y))
y[:] .= rv
@test y[:] == rv
y[0:4:end] .= 1.0
@test all(y.coeffs[1:4:end] .== 1.0)
y[0:2:8] .= rv[1:5]
@test y.coeffs[1:2:9] == rv[1:5]
@test_throws AssertionError y[0:2:3] = rv
@test Taylor1([0,1,0,0]) == Taylor1(3)
@test getcoeff(Taylor1(Complex{Float64},3),1) == complex(1.0,0.0)
@test Taylor1(Complex{Float64},3)[1] == complex(1.0,0.0)
@test getindex(Taylor1(3),1) == 1.0
@inferred convert(Taylor1{Complex{Float64}},ot) == Taylor1{Complex{Float64}}
@test eltype(convert(Taylor1{Complex{Float64}},ot)) == Taylor1{Complex{Float64}}
@test eltype(convert(Taylor1{Complex{Float64}},1)) == Taylor1{Complex{Float64}}
@test eltype(convert(Taylor1, 1im)) == Taylor1{Complex{Int}}
@test TS.numtype(convert(Taylor1{Complex{Float64}},ot)) == Complex{Float64}
@test TS.numtype(convert(Taylor1{Complex{Float64}},1)) == Complex{Float64}
@test TS.numtype(convert(Taylor1, 1im)) == Complex{Int}
@test convert(Taylor1, 1im) == Taylor1(1im, 0)
@test convert(eltype(t), t) === t
@test convert(eltype(ot), ot) === ot
@test convert(Taylor1{Int},[0,2]) == 2*t
@test convert(Taylor1{Complex{Int}},[0,2]) == (2+0im)*t
@test convert(Taylor1{BigFloat},[0.0, 1.0]) == ta(big(0.0))
@test promote(t,Taylor1(1.0,0)) == (ta(0.0),ot)
@test promote(0,Taylor1(1.0,0)) == (zt,ot)
@test eltype(promote(ta(0),zeros(Int,2))[2]) == Taylor1{Int}
@test eltype(promote(ta(0.0),zeros(Int,2))[2]) == Taylor1{Float64}
@test eltype(promote(0,Taylor1(ot))[1]) == Taylor1{Float64}
@test eltype(promote(1.0+im, zt)[1]) == Taylor1{Complex{Float64}}
@test TS.numtype(promote(ta(0),zeros(Int,2))[2]) == Int
@test TS.numtype(promote(ta(0.0),zeros(Int,2))[2]) == Float64
@test TS.numtype(promote(0,Taylor1(ot))[1]) == Float64
@test TS.numtype(promote(1.0+im, zt)[1]) == Complex{Float64}
@test length(Taylor1(10)) == 11
@test length.( TS.fixorder(zt, Taylor1([1])) ) == (16, 16)
@test length.( TS.fixorder(zt, Taylor1([1], 1)) ) == (2, 2)
@test eltype(TS.fixorder(zt,Taylor1([1]))[1]) == Taylor1{Int}
@test TS.numtype(TS.fixorder(zt,Taylor1([1]))[1]) == Int
@test findfirst(t) == 1
@test findfirst(t^2) == 2
@test findfirst(ot) == 0
@test findfirst(zt) == -1
@test iszero(zero(t))
@test !iszero(one(t))
@test @inferred isinf(Taylor1([typemax(1.0)]))
@test @inferred isnan(Taylor1([typemax(1.0), NaN]))
@test constant_term(2.0) == 2.0
@test constant_term(t) == 0
@test constant_term(tim) == complex(0, 0)
@test constant_term([zt, t]) == [0, 0]
@test linear_polynomial(2) == 2
@test linear_polynomial(t) == t
@test linear_polynomial(1+tim^2) == zero(tim)
@test get_order(linear_polynomial(1+tim^2)) == get_order(tim)
@test linear_polynomial([zero(tim), tim, tim^2]) == [zero(tim), tim, zero(tim)]
@test nonlinear_polynomial(2im) == 0im
@test nonlinear_polynomial(1+t) == zero(t)
@test nonlinear_polynomial(1+tim^2) == tim^2
@test nonlinear_polynomial([zero(tim), tim, 1+tim^2]) == [zero(tim), zero(tim), tim^2]
@test ot == 1
@test 0.0 == zt
@test getcoeff(tim,1) == complex(0,1)
@test zt+1.0 == ot
@test 1.0-ot == zt
@test t+t == 2t
@test t-t == zt
@test +t == -(-t)
tsquare = Taylor1([0,0,1],15)
@test t * true == t
@test false * t == zero(t)
@test t^0 == t^0.0 == one(t)
@test t*t == tsquare
@test t*1 == t
@test 0*t == zt
@test (-t)^2 == tsquare
@test t^3 == tsquare*t
@test zero(t)/t == zero(t)
@test get_order(zero(t)/t) == get_order(t)
@test one(t)/one(t) == 1.0
@test tsquare/t == t
@test get_order(tsquare/t) == get_order(tsquare)-1
@test t/(t*3) == (1/3)*ot
@test get_order(t/(t*3)) == get_order(t)-1
@test t/3im == -tim/3
@test 1/(1-t) == Taylor1(ones(t.order+1))
@test Taylor1([0,1,1])/t == t+1
@test get_order(Taylor1([0,1,1])/t) == 1
@test (t+im)^2 == tsquare+2im*t-1
@test (t+im)^3 == Taylor1([-1im,-3,3im,1],15)
@test (t+im)^4 == Taylor1([1,-4im,-6,4im,1],15)
@test imag(tsquare+2im*t-1) == 2t
@test (Rational(1,2)*tsquare)[2] == 1//2
@test t^2/tsquare == ot
@test get_order(t^2/tsquare) == get_order(t)-2
@test ((1+t)^(1/3))[2]+1/9 ≤ tol1
@test (1.0-tsquare)^3 == (1.0-t)^3*(1.0+t)^3
@test (1-tsquare)^2 == (1+t)^2.0 * (1-t)^2.0
@test (sqrt(1+t))[2] == -1/8
@test ((1-tsquare)^(1//2))^2 == 1-tsquare
@test ((1-t)^(1//4))[14] == -4188908511//549755813888
@test abs(((1+t)^3.2)[13] + 5.4021062656e-5) < tol1
@test Taylor1(BigFloat,5)/6 == 1im*Taylor1(5)/complex(0,BigInt(6))
@test Taylor1(BigFloat,5)/(6*Taylor1(3)) == 1/BigInt(6)
@test Taylor1(BigFloat,5)/(6im*Taylor1(3)) == -1im/BigInt(6)
@test isapprox((1+(1.5+t)/4)^(-2), inv(1+(1.5+t)/4)^2, rtol=eps(Float64))
@test isapprox((1+(big(1.5)+t)/4)^(-2), inv(1+(big(1.5)+t)/4)^2, rtol=eps(BigFloat))
@test isapprox((1+(1.5+t)/4)^(-2), inv(1+(1.5+t)/4)^2, rtol=eps(Float64))
@test isapprox((1+(big(1.5)+t)/4)^(-2), inv(1+(big(1.5)+t)/4)^2, rtol=eps(BigFloat))
@test isapprox((1+(1.5+t)/5)^(-2.5), inv(1+(1.5+t)/5)^2.5, rtol=eps(Float64))
@test isapprox((1+(big(1.5)+t)/5)^(-2.5), inv(1+(big(1.5)+t)/5)^2.5, rtol=2eps(BigFloat))
# These tests involve some sort of factorization
@test t/(t+t^2) == 1/(1+t)
@test get_order(t/(t+t^2)) == get_order(1/(1+t))-1
@test sqrt(t^2+t^3) == t*sqrt(1+t)
@test get_order(sqrt(t^2+t^3)) == get_order(t) >> 1
@test get_order(t*sqrt(1+t)) == get_order(t)
@test (t^3+t^4)^(1/3) ≈ t*(1+t)^(1/3)
@test norm((t^3+t^4)^(1/3) - t*(1+t)^(1/3), Inf) < eps()
@test get_order((t^3+t^4)^(1/3)) == 5
@test ((t^3+t^4)^(1/3))[5] == -10/243
trational = ta(0//1)
@inferred ta(0//1) == Taylor1{Rational{Int}}
@test eltype(trational) == Taylor1{Rational{Int}}
@test TS.numtype(trational) == Rational{Int}
@test trational + 1//3 == Taylor1([1//3,1],15)
@test complex(3,1)*trational^2 == Taylor1([0//1,0//1,complex(3,1)//1],15)
@test trational^2/3 == Taylor1([0//1,0//1,1//3],15)
@test trational^3/complex(7,1) == Taylor1([0,0,0,complex(7//50,-1//50)],15)
@test sqrt(zero(t)) == zero(t)
@test isapprox( rem(4.1 + t,4)[0], 0.1 )
@test isapprox( mod(4.1 + t,4)[0], 0.1 )
@test isapprox( rem(1+Taylor1(Int,4),4.0)[0], 1.0 )
@test isapprox( mod(1+Taylor1(Int,4),4.0)[0], 1.0 )
@test isapprox( mod2pi(2pi+0.1+t)[0], 0.1 )
@test abs(ta(1)) == ta(1)
@test abs(ta(-1.0)) == -ta(-1.0)
@test taylor_expand(x->2x,order=10) == 2*Taylor1(10)
@test taylor_expand(x->x^2+1) == Taylor1(15)*Taylor1(15) + 1
@test evaluate(taylor_expand(cos,0.)) == cos(0.)
@test evaluate(taylor_expand(tan,pi/4)) == tan(pi/4)
@test eltype(taylor_expand(x->x^2+1,1)) == Taylor1{Int}
@test TS.numtype(taylor_expand(x->x^2+1,1)) == Int
tsq = t^2
update!(tsq,2.0)
@test tsq == (t+2.0)^2
update!(tsq,-2)
@test tsq == t^2
@test log(exp(tsquare)) == tsquare
@test exp(log(1-tsquare)) == 1-tsquare
@test constant_term(expm1(1.0e-16+t)) == 1.0e-16
@test expm1(1.e-16+t).coeffs[2:end] == (exp(t)-1).coeffs[2:end]
@test log((1-t)^2) == 2*log(1-t)
@test log1p(0.25 + t) == log(1.25+t)
@test log1p(-t^2) == log(1-t^2)
st, ct = sincos(t)
@test real(exp(tim)) == ct
@test imag(exp(tim)) == st
@test exp(conj(tim)) == ct-im*st == exp(tim')
st, ct = sincospi(t)
@test (st, ct) == sincos(pi*t)
@test real(exp(pi*tim)) == cospi(t)
@test imag(exp(pi*tim)) == sinpi(t)
@test exp(pi*conj(tim)) == ct-im*st == exp(pi*tim')
@test abs2(tim) == tsquare
@test abs(tim) == t
@test isapprox(abs2(exp(tim)), ot)
@test isapprox(abs(exp(tim)), ot)
@test (exp(t))^(2im) == cos(2t)+im*sin(2t)
@test (exp(t))^Taylor1([-5.2im]) == cos(5.2t)-im*sin(5.2t)
@test getcoeff(convert(Taylor1{Rational{Int}},cos(t)),8) == 1//factorial(8)
@test abs((tan(t))[7]- 17/315) < tol1
@test abs((tan(t))[13]- 21844/6081075) < tol1
@test tan(1.3+t) ≈ sin(1.3+t)/cos(1.3+t)
@test cot(1.3+t) ≈ 1/tan(1.3+t)
@test evaluate(exp(Taylor1([0,1],17)),1.0) == 1.0*eeuler
@test evaluate(exp(Taylor1([0,1],1))) == 1.0
@test evaluate(exp(t),t^2) == exp(t^2)
@test evaluate(exp(Taylor1(BigFloat, 15)), t^2) == exp(Taylor1(BigFloat, 15)^2)
@test evaluate(exp(Taylor1(BigFloat, 15)), t^2) isa Taylor1{BigFloat}
#Test function-like behavior for Taylor1s
t17 = Taylor1([0,1],17)
myexpfun = exp(t17)
@test myexpfun(1.0) == 1.0*eeuler
@test myexpfun() == 1.0
@test myexpfun(t17^2) == exp(t17^2)
@test exp(t17^2)(t17) == exp(t17^2)
q, p = sincospi(t17)
@test cospi(-im*t)(1)+im*sinpi(-im*t)(1) == exp(-im*pi*t)(im)
@test p(-im*t17)(1)+im*q(-im*t17)(1) ≈ exp(-im*pi*t17)(im)
q, p = sincos(t17)
@test cos(-im*t)(1)+im*sin(-im*t)(1) == exp(-im*t)(im)
@test p(-im*t17)(1)+im*q(-im*t17)(1) == exp(-im*t17)(im)
cossin1 = x->p(q(x))
@test evaluate(p, evaluate(q, pi/4)) == cossin1(pi/4)
cossin2 = p(q)
@test evaluate(evaluate(p,q), pi/4) == cossin2(pi/4)
@test evaluate(p, q) == cossin2
@test p(q)() == evaluate(evaluate(p, q))
@test evaluate(p, q) == p(q)
@test evaluate(q, p) == q(p)
cs = x->cos(sin(x))
csdiff = (cs(t17)-cossin2(t17)).(-2:0.1:2)
@test norm(csdiff, 1) < 5e-15
a = [p,q]
@test a(0.1) == evaluate.([p,q],0.1)
@test a.(0.1) == a(0.1)
@test a.() == evaluate.([p, q])
@test a.() == [p(), q()]
@test a.() == a()
@test view(a, 1:1)() == [a[1]()]
vr = rand(2)
@test p.(vr) == evaluate.([p], vr)
Mr = rand(3,3,3)
@test p.(Mr) == evaluate.([p], Mr)
vr = rand(5)
@test p(vr) == p.(vr)
@test view(a, 1:1)(vr) == evaluate.([p],vr)
@test p(Mr) == p.(Mr)
@test p(Mr) == evaluate.([p], Mr)
taylor_a = Taylor1(Int,10)
taylor_x = exp(Taylor1(Float64,13))
@test taylor_x(taylor_a) == evaluate(taylor_x, taylor_a)
A_T1 = [t 2t 3t; 4t 5t 6t ]
@test evaluate(A_T1,1.0) == [1.0 2.0 3.0; 4.0 5.0 6.0]
@test evaluate(A_T1,1.0) == A_T1(1.0)
@test evaluate(A_T1) == A_T1()
@test A_T1(tsquare) == [tsquare 2tsquare 3tsquare; 4tsquare 5tsquare 6tsquare]
@test view(A_T1, :, :)(1.0) == A_T1(1.0)
@test view(A_T1, :, 1)(1.0) == A_T1[:,1](1.0)
@test sin(asin(tsquare)) == tsquare
@test tan(atan(tsquare)) == tsquare
@test atan(tan(tsquare)) == tsquare
@test atan(sin(tsquare)/cos(tsquare)) == atan(sin(tsquare), cos(tsquare))
@test constant_term(atan(sin(3pi/4+tsquare), cos(3pi/4+tsquare))) == 3pi/4
@test atan(sin(3pi/4+tsquare)/cos(3pi/4+tsquare)) - atan(sin(3pi/4+tsquare), cos(3pi/4+tsquare)) == -pi
@test sinh(asinh(tsquare)) ≈ tsquare
@test tanh(atanh(tsquare)) ≈ tsquare
@test atanh(tanh(tsquare)) ≈ tsquare
@test asinh(t) ≈ log(t + sqrt(t^2 + 1))
@test cosh(asinh(t)) ≈ sqrt(t^2 + 1)
t_complex = Taylor1(Complex{Int}, 15) # for use with acosh, which in the Reals is only defined for x ≥ 1
@test cosh(acosh(t_complex)) ≈ t_complex
@test differentiate(acosh(t_complex)) ≈ 1/sqrt(t_complex^2 - 1)
@test acosh(t_complex) ≈ log(t_complex + sqrt(t_complex^2 - 1))
@test sinh(acosh(t_complex)) ≈ sqrt(t_complex^2 - 1)
@test asin(t) + acos(t) == pi/2
@test differentiate(acos(t)) == - 1/sqrt(1-Taylor1(t.order-1)^2)
@test get_order(differentiate(acos(t))) == t.order-1
@test - sinh(t) + cosh(t) == exp(-t)
@test sinh(t) + cosh(t) == exp(t)
@test evaluate(- sinh(t)^2 + cosh(t)^2 , rand()) == 1
@test evaluate(- sinh(t)^2 + cosh(t)^2 , 0) == 1
@test tanh(t + 0im) == -1im * tan(t*1im)
@test evaluate(tanh(t/2),1.5) == evaluate(sinh(t) / (cosh(t) + 1),1.5)
@test cosh(t) == real(cos(im*t))
@test sinh(t) == imag(sin(im*t))
ut = 1.0*t
tt = zero(ut)
TS.one!(tt, ut, 0)
@test tt[0] == 1.0
TS.one!(tt, ut, 1)
@test tt[1] == 0.0
TS.abs!(tt, 1.0+ut, 0)
@test tt[0] == 1.0
TS.add!(tt, ut, ut, 1)
@test tt[1] == 2.0
TS.add!(tt, -3.0, 0)
@test tt[0] == -3.0
TS.add!(tt, -3.0, 1)
@test tt[1] == 0.0
TS.subst!(tt, ut, ut, 1)
@test tt[1] == 0.0
TS.subst!(tt, -3.0, 0)
@test tt[0] == 3.0
TS.subst!(tt, -2.5, 1)
@test tt[1] == 0.0
iind, cind = TS.divfactorization(ut, ut)
@test iind == 1
@test cind == 1.0
TS.div!(tt, ut, ut, 0)
@test tt[0] == cind
TS.div!(tt, 1+ut, 1+ut, 0)
@test tt[0] == 1.0
TS.div!(tt, 1, 1+ut, 0)
@test tt[0] == 1.0
aux = tt
TS.pow!(tt, 1.0+t, aux, 1.5, 0)
@test tt[0] == 1.0
TS.pow!(tt, 0.0*t, aux, 1.5, 0)
@test tt[0] == 0.0
TS.pow!(tt, 0.0+t, aux, 18, 0)
@test tt[0] == 0.0
TS.pow!(tt, 1.0+t, aux, 1.5, 0)
@test tt[0] == 1.0
TS.pow!(tt, 1.0+t, aux, 0.5, 1)
@test tt[1] == 0.5
TS.pow!(tt, 1.0+t, aux, 0, 0)
@test tt[0] == 1.0
TS.pow!(tt, 1.0+t, aux, 1, 1)
@test tt[1] == 1.0
tt = zero(ut)
aux = tt
TS.pow!(tt, 1.0+t, aux, 2, 0)
@test tt[0] == 1.0
TS.pow!(tt, 1.0+t, aux, 2, 1)
@test tt[1] == 2.0
TS.pow!(tt, 1.0+t, aux, 2, 2)
@test tt[2] == 1.0
TS.sqrt!(tt, 1.0+t, 0, 0)
@test tt[0] == 1.0
TS.sqrt!(tt, 1.0+t, 0)
@test tt[0] == 1.0
TS.exp!(tt, 1.0*t, 0)
@test tt[0] == exp(t[0])
TS.log!(tt, 1.0+t, 0)
@test tt[0] == 0.0
TS.log1p!(tt, 0.25+t, 0)
@test tt[0] == log1p(0.25)
TS.log1p!(tt, 0.25+t, 1)
@test tt[1] == 1/1.25
ct = zero(ut)
TS.sincos!(tt, ct, 1.0*t, 0)
@test tt[0] == sin(t[0])
@test ct[0] == cos(t[0])
TS.tan!(tt, 1.0*t, ct, 0)
@test tt[0] == tan(t[0])
@test ct[0] == tan(t[0])^2
TS.asin!(tt, 1.0*t, ct, 0)
@test tt[0] == asin(t[0])
@test ct[0] == sqrt(1.0-t[0]^2)
TS.acos!(tt, 1.0*t, ct, 0)
@test tt[0] == acos(t[0])
@test ct[0] == sqrt(1.0-t[0]^2)
TS.atan!(tt, ut, ct, 0)
@test tt[0] == atan(t[0])
@test ct[0] == 1.0+t[0]^2
TS.sinhcosh!(tt, ct, ut, 0)
@test tt[0] == sinh(t[0])
@test ct[0] == cosh(t[0])
TS.tanh!(tt, ut, ct, 0)
@test tt[0] == tanh(t[0])
@test ct[0] == tanh(t[0])^2
v = [sin(t), exp(-t)]
vv = Vector{Float64}(undef, 2)
@test evaluate!(v, zero(Int), vv) == nothing
@test vv == [0.0,1.0]
@test evaluate!(v, 0.0, vv) == nothing
@test vv == [0.0,1.0]
@test evaluate!(v, 0.0, view(vv, 1:2)) == nothing
@test vv == [0.0,1.0]
@test evaluate(v) == vv
@test isapprox(evaluate(v, complex(0.0,0.2)),
[complex(0.0,sinh(0.2)),complex(cos(0.2),sin(-0.2))], atol=eps(), rtol=0.0)
m = [sin(t) exp(-t); cos(t) exp(t)]
m0 = 0.5
mres = Matrix{Float64}(undef, 2, 2)
mres_expected = [sin(m0) exp(-m0); cos(m0) exp(m0)]
@test evaluate!(m, m0, mres) == nothing
@test mres == mres_expected
ee_ta = exp(ta(1.0))
@test get_order(differentiate(ee_ta, 0)) == 15
@test get_order(differentiate(ee_ta, 1)) == 14
@test get_order(differentiate(ee_ta, 16)) == 0
@test differentiate(ee_ta, 0) == ee_ta
expected_result_approx = Taylor1(ee_ta[0:10])
@test differentiate(exp(ta(1.0)), 5) ≈ expected_result_approx atol=eps() rtol=0.0
expected_result_approx = Taylor1(zero(ee_ta),0)
@test differentiate(ee_ta, 16) == Taylor1(zero(ee_ta),0)
@test eltype(differentiate(ee_ta, 16)) == eltype(ee_ta)
ee_ta = exp(ta(1.0pi))
expected_result_approx = Taylor1(ee_ta[0:12])
@test differentiate(ee_ta, 3) ≈ expected_result_approx atol=eps(16.0) rtol=0.0
expected_result_approx = Taylor1(ee_ta[0:5])
@test differentiate(exp(ta(1.0pi)), 10) ≈ expected_result_approx atol=eps(64.0) rtol=0.0
@test differentiate(exp(ta(1.0)), 5)() == exp(1.0)
@test differentiate(exp(ta(1.0pi)), 3)() == exp(1.0pi)
@test isapprox(derivative(exp(ta(1.0pi)), 10)() , exp(1.0pi) )
@test differentiate(5, exp(ta(1.0))) == exp(1.0)
@test differentiate(3, exp(ta(1.0pi))) == exp(1.0pi)
@test isapprox(differentiate(10, exp(ta(1.0pi))) , exp(1.0pi) )
@test integrate(differentiate(exp(t)),1) == exp(t)
@test integrate(cos(t)) == sin(t)
@test promote(ta(0.0), t) == (ta(0.0),ta(0.0))
@test inverse(exp(t)-1) ≈ log(1+t)
cfs = [(-n)^(n-1)/factorial(n) for n = 1:15]
@test norm(inverse(t*exp(t))[1:end]./cfs .- 1) < 4tol1
@test inverse(tan(t))(tan(t)) ≈ t
@test atan(inverse(atan(t))) ≈ t
@test inverse_map(sin(t))(sin(t)) ≈ t
@test sinh(inverse_map(sinh(t))) ≈ t
@test inverse_map(tanh(t)) ≈ inverse(tanh(t))
@test_throws ArgumentError Taylor1([1,2,3], -2)
@test_throws DomainError abs(ta(big(0)))
@test_throws ArgumentError 1/t
@test_throws ArgumentError zt/zt
@test_throws DomainError t^1.5
@test_throws ArgumentError t^(-2)
@test_throws DomainError sqrt(t)
@test_throws DomainError log(t)
@test_throws ArgumentError cos(t)/sin(t)
@test_throws AssertionError differentiate(30, exp(ta(1.0pi)))
@test_throws DomainError inverse(exp(t))
@test_throws DomainError abs(t)
use_show_default(true)
aa = sqrt(2)+Taylor1(2)
@test string(aa) == "Taylor1{Float64}([1.4142135623730951, 1.0, 0.0], 2)"
@test string([aa, aa]) ==
"Taylor1{Float64}[Taylor1{Float64}([1.4142135623730951, 1.0, 0.0], 2), " *
"Taylor1{Float64}([1.4142135623730951, 1.0, 0.0], 2)]"
use_show_default(false)
@test string(aa) == " 1.4142135623730951 + 1.0 t + 𝒪(t³)"
set_taylor1_varname(" x ")
@test string(aa) == " 1.4142135623730951 + 1.0 x + 𝒪(x³)"
set_taylor1_varname("t")
displayBigO(false)
@test string(ta(-3)) == " - 3 + 1 t "
@test string(ta(0)^3-3) == " - 3 + 1 t³ "
@test TS.pretty_print(ta(3im)) == " ( 0 + 3im ) + ( 1 + 0im ) t "
@test string(Taylor1([1,2,3,4,5], 2)) == string(Taylor1([1,2,3]))
displayBigO(true)
@test string(ta(-3)) == " - 3 + 1 t + 𝒪(t¹⁶)"
@test string(ta(0)^3-3) == " - 3 + 1 t³ + 𝒪(t¹⁶)"
@test TS.pretty_print(ta(3im)) == " ( 0 + 3im ) + ( 1 + 0im ) t + 𝒪(t¹⁶)"
@test string(Taylor1([1,2,3,4,5], 2)) == string(Taylor1([1,2,3]))
a = collect(1:12)
t_a = Taylor1(a,15)
t_C = complex(3.0,4.0) * t_a
rnd = rand(10)
@test typeof( norm(Taylor1(rnd)) ) == Float64
@test norm(Taylor1(rnd)) > 0
@test norm(t_a) == norm(a)
@test norm(Taylor1(a,15), 3) == sum((a.^3))^(1/3)
@test norm(t_a, Inf) == 12
@test norm(t_C) == norm(complex(3.0,4.0)*a)
@test TS.rtoldefault(Taylor1{Int}) == 0
@test TS.rtoldefault(Taylor1{Float64}) == sqrt(eps(Float64))
@test TS.rtoldefault(Taylor1{BigFloat}) == sqrt(eps(BigFloat))
@test TS.real(Taylor1{Float64}) == Taylor1{Float64}
@test TS.real(Taylor1{Complex{Float64}}) == Taylor1{Float64}
@test isfinite(t_C)
@test isfinite(t_a)
@test !isfinite( Taylor1([0, Inf]) )
@test !isfinite( Taylor1([NaN, 0]) )
b = convert(Vector{Float64}, a)
b[3] += eps(10.0)
b[5] -= eps(10.0)
t_b = Taylor1(b,15)
t_C2 = t_C+eps(100.0)
t_C3 = t_C+eps(100.0)*im
@test isapprox(t_C, t_C)
@test t_a ≈ t_a
@test t_a ≈ t_b
@test t_C ≈ t_C2
@test t_C ≈ t_C3
@test t_C3 ≈ t_C2
t = Taylor1(25)
p = sin(t)
q = sin(t+eps())
@test t ≈ t
@test t ≈ t+sqrt(eps())
@test isapprox(p, q, atol=eps())
tf = Taylor1(35)
@test Taylor1([180.0, rad2deg(1.0)], 35) == rad2deg(pi+tf)
@test sin(pi/2+deg2rad(1.0)tf) == sin(deg2rad(90+tf))
a = Taylor1(rand(10))
b = Taylor1(rand(10))
c = deepcopy(a)
TS.deg2rad!(b, a, 0)
@test a == c
@test a[0]*(pi/180) == b[0]
# TS.deg2rad!.(b, a, [0,1,2])
# @test a == c
# for i in 0:2
# @test a[i]*(pi/180) == b[i]
# end
a = Taylor1(rand(10))
b = Taylor1(rand(10))
c = deepcopy(a)
TS.rad2deg!(b, a, 0)
@test a == c
@test a[0]*(180/pi) == b[0]
# TS.rad2deg!.(b, a, [0,1,2])
# @test a == c
# for i in 0:2
# @test a[i]*(180/pi) == b[i]
# end
x = Taylor1([5.0,-1.5,3.0,-2.0,-20.0])
@test x*x == x^2
@test x*x*x == x*(x^2) == TaylorSeries.power_by_squaring(x, 3)
@test x*x*x*x == (x^2)*(x^2) == TaylorSeries.power_by_squaring(x, 4)
@test (x - 1.0)^2 == 1 - 2x + x^2
@test (x - 1.0)^3 == -1 + 3x - 3x^2 + x^3
@test (x - 1.0)^4 == 1 - 4x + 6x^2 - 4x^3 + x^4
# Test additional Taylor1 constructors
@test Taylor1{Float64}(true) == Taylor1([1.0])
@test Taylor1{Float64}(false) == Taylor1([0.0])
@test Taylor1{Int}(true) == Taylor1([1])
@test Taylor1{Int}(false) == Taylor1([0])
# Test fallback pretty_print
st = Taylor1([SymbNumber(:x₀), SymbNumber(:x₁)])
@test string(st) == " SymbNumber(:x₀) + SymbNumber(:x₁) t + 𝒪(t²)"
@testset "Test Base.float overloads for Taylor1" begin
@test float(Taylor1(-3, 2)) == Taylor1(-3.0, 2)
@test float(Taylor1(-1//2, 2)) == Taylor1(-0.5, 2)
@test float(Taylor1(3 - 0im, 2)) == Taylor1(3.0 - 0.0im, 2)
x = Taylor1(rand(5))
@test float(x) == x
@test float(Taylor1{Int32}) == Taylor1{Float64}
@test float(Taylor1{Int}) == Taylor1{Float64}
@test float(Taylor1{Complex{Int}}) == Taylor1{ComplexF64}
end
end
@testset "Test inv for Matrix{Taylor1{Float64}}" begin
t = Taylor1(5)
a = Diagonal(rand(0:10,3)) + rand(3, 3)
ainv = inv(a)
b = Taylor1.(a, 5)
binv = inv(b)
c = Symmetric(b)
cinv = inv(c)
tol = 1.0e-11
for its = 1:10
a .= Diagonal(rand(2:12,3)) + rand(3, 3)
ainv .= inv(a)
b .= Taylor1.(a, 5)
binv .= inv(b)
c .= Symmetric(Taylor1.(a, 5))
cinv .= inv(c)
@test norm(binv - ainv, Inf) ≤ tol
@test norm(b*binv - I, Inf) ≤ tol
@test norm(binv*b - I, Inf) ≤ tol
@test norm(triu(b)*inv(UpperTriangular(b)) - I, Inf) ≤ tol
@test norm(inv(LowerTriangular(b))*tril(b) - I, Inf) ≤ tol
ainv .= inv(Symmetric(a))
@test norm(cinv - ainv, Inf) ≤ tol
@test norm(c*cinv - I, Inf) ≤ tol
@test norm(cinv*c - I, Inf) ≤ tol
@test norm(triu(c)*inv(UpperTriangular(c)) - I, Inf) ≤ tol
@test norm(inv(LowerTriangular(c))*tril(c) - I, Inf) ≤ tol
b .= b .+ t
binv .= inv(b)
@test norm(b*binv - I, Inf) ≤ tol
@test norm(binv*b - I, Inf) ≤ tol
@test norm(triu(b)*inv(triu(b)) - I, Inf) ≤ tol
@test norm(inv(tril(b))*tril(b) - I, Inf) ≤ tol
c .= Symmetric(b)
cinv .= inv(c)
@test norm(c*cinv - I, Inf) ≤ tol
@test norm(cinv*c - I, Inf) ≤ tol
end
end
@testset "Matrix multiplication for Taylor1" begin
order = 30
n1 = 100
k1 = 90
order = max(n1,k1)
B1 = randn(n1,order)
Y1 = randn(k1,order)
A1 = randn(k1,n1)
for A in (A1,sparse(A1))
# B and Y contain elements of different orders
B = Taylor1{Float64}[Taylor1(collect(B1[i,1:i]),i) for i=1:n1]
Y = Taylor1{Float64}[Taylor1(collect(Y1[k,1:k]),k) for k=1:k1]
Bcopy = deepcopy(B)
mul!(Y,A,B)
# do we get the same result when using the `A*B` form?
@test A*B≈Y
# Y should be extended after the multilpication
@test reduce(&, [y1.order for y1 in Y] .== Y[1].order)
# B should be unchanged
@test B==Bcopy
# is the result compatible with the matrix multiplication? We
# only check the zeroth order of the Taylor series.
y1=sum(Y)[0]
Y=A*B1[:,1]
y2=sum(Y)
# There is a small numerical error when comparing the generic
# multiplication and the specialized version
@test abs(y1-y2) < n1*(eps(y1)+eps(y2))
@test_throws DimensionMismatch mul!(Y,A[:,1:end-1],B)
@test_throws DimensionMismatch mul!(Y,A[1:end-1,:],B)
@test_throws DimensionMismatch mul!(Y,A,B[1:end-1])
@test_throws DimensionMismatch mul!(Y[1:end-1],A,B)
end
end
| TaylorSeries | https://github.com/JuliaDiff/TaylorSeries.jl.git |
|
[
"MIT"
] | 0.18.1 | bb212ead98022455eed514cff0adfa5de8645258 | code | 436 | # This file is part of TaylorSeries.jl, MIT licensed
#
using TaylorSeries, RecursiveArrayTools
using Test
@testset "Tests TaylorSeries RecursiveArrayTools extension" begin
dq = get_variables()
x = Taylor1([0.9+2dq[1],-1.1dq[1], 0.7dq[2], 0.5dq[1]-0.45dq[2],0.9dq[1]])
xx = [x,x]
yy = recursivecopy(xx)
@test yy == xx # yy and xx are equal...
@test yy !== xx # ...but they're not the same object in memory
end
| TaylorSeries | https://github.com/JuliaDiff/TaylorSeries.jl.git |
|
[
"MIT"
] | 0.18.1 | bb212ead98022455eed514cff0adfa5de8645258 | code | 430 | # This file is part of TaylorSeries.jl, MIT licensed
#
# Tests for TaylorSeries
testfiles = (
"onevariable.jl",
"manyvariables.jl",
"mixtures.jl",
"mutatingfuncts.jl",
"intervals.jl",
"broadcasting.jl",
"identities_Euler.jl",
"fateman40.jl",
"staticarrays.jl",
"jld2.jl",
"rat.jl",
# Run Aqua tests at the very end
"aqua.jl",
)
for file in testfiles
include(file)
end
| TaylorSeries | https://github.com/JuliaDiff/TaylorSeries.jl.git |
|
[
"MIT"
] | 0.18.1 | bb212ead98022455eed514cff0adfa5de8645258 | code | 599 | # This file is part of TaylorSeries.jl, MIT licensed
#
using TaylorSeries, StaticArrays
using Test
@testset "Tests TaylorSeries operations over StaticArrays types" begin
q = set_variables("q", order=2, numvars=2)
m = @SMatrix fill(Taylor1(rand(2).*q), 3, 3)
mt = m'
@test m isa SMatrix{3, 3, Taylor1{TaylorN{Float64}}, 9}
@test mt isa SMatrix{3, 3, Taylor1{TaylorN{Float64}}, 9}
v = @SVector [-1.1, 3.4, 7.62345e-1]
mtv = mt * v
@test mtv isa SVector{3, Taylor1{TaylorN{Float64}}}
mmt = m * mt
@test mmt isa SMatrix{3, 3, Taylor1{TaylorN{Float64}}, 9}
end
| TaylorSeries | https://github.com/JuliaDiff/TaylorSeries.jl.git |
|
[
"MIT"
] | 0.18.1 | bb212ead98022455eed514cff0adfa5de8645258 | docs | 3789 | # TaylorSeries.jl
A [Julia](http://julialang.org) package for Taylor polynomial expansions in one or more
independent variables.
[](https://github.com//JuliaDiff/TaylorSeries.jl/actions)
[](https://coveralls.io/github/JuliaDiff/TaylorSeries.jl?branch=master)
[](https://juliadiff.org/TaylorSeries.jl/stable)
[](https://juliadiff.org/TaylorSeries.jl/latest)
[](https://doi.org/10.21105/joss.01043)
[](https://zenodo.org/record/2601941)
#### Authors
- [Luis Benet](http://www.cicc.unam.mx/~benet/), Instituto de Ciencias Físicas,
Universidad Nacional Autónoma de México (UNAM)
- [David P. Sanders](http://sistemas.fciencias.unam.mx/~dsanders/), Facultad
de Ciencias, Universidad Nacional Autónoma de México (UNAM)
Comments, suggestions and improvements are welcome and appreciated.
#### Examples
Taylor series in one variable
```julia
julia> using TaylorSeries
julia> t = Taylor1(Float64, 5)
1.0 t + 𝒪(t⁶)
julia> exp(t)
1.0 + 1.0 t + 0.5 t² + 0.16666666666666666 t³ + 0.041666666666666664 t⁴ + 0.008333333333333333 t⁵ + 𝒪(t⁶)
julia> log(1 + t)
1.0 t - 0.5 t² + 0.3333333333333333 t³ - 0.25 t⁴ + 0.2 t⁵ + 𝒪(t⁶)
```
Multivariate Taylor series
```julia
julia> x, y = set_variables("x y", order=2);
julia> exp(x + y)
1.0 + 1.0 x + 1.0 y + 0.5 x² + 1.0 x y + 0.5 y² + 𝒪(‖x‖³)
```
Differential and integral calculus on Taylor series:
```julia
julia> x, y = set_variables("x y", order=4);
julia> p = x^3 + 2x^2 * y - 7x + 2
2.0 - 7.0 x + 1.0 x³ + 2.0 x² y + 𝒪(‖x‖⁵)
julia> ∇(p)
2-element Array{TaylorN{Float64},1}:
- 7.0 + 3.0 x² + 4.0 x y + 𝒪(‖x‖⁵)
2.0 x² + 𝒪(‖x‖⁵)
julia> integrate(p, 1)
2.0 x - 3.5 x² + 0.25 x⁴ + 0.6666666666666666 x³ y + 𝒪(‖x‖⁵)
julia> integrate(p, 2)
2.0 y - 7.0 x y + 1.0 x³ y + 1.0 x² y² + 𝒪(‖x‖⁵)
```
For more details, please see the [docs](http://www.juliadiff.org/TaylorSeries.jl/stable).
#### License
`TaylorSeries` is licensed under the [MIT "Expat" license](./LICENSE.md).
#### Installation
`TaylorSeries` can be installed simply with `using Pkg; Pkg.add("TaylorSeries")`.
#### Contributing
There are many ways to contribute to this package:
- Report an issue if you encounter some odd behavior, or if you have suggestions to improve the package.
- Contribute with code addressing some open issues, that add new functionality or that improve the performance.
- When contributing with code, add docstrings and comments, so others may understand the methods implemented.
- Contribute by updating and improving the documentation.
#### References
- W. Tucker, Validated numerics: A short introduction to rigorous
computations, Princeton University Press (2011).
- A. Haro, Automatic differentiation methods in computational dynamical
systems: Invariant manifolds and normal forms of vector fields at fixed points,
[preprint](http://www.maia.ub.es/~alex/admcds/admcds.pdf).
#### Acknowledgments
This project began (using `python`) during a Masters' course in the postgraduate
programs in Physics and in Mathematics at UNAM, during the second half of 2013.
We thank the participants of the course for putting up with the half-baked
material and contributing energy and ideas.
We acknowledge financial support from DGAPA-UNAM PAPIME grants
PE-105911 and PE-107114, and DGAPA-PAPIIT grants IG-101113,
IG-100616, IG-100819 and IG-101122.
LB acknowledges support through a *Cátedra Moshinsky* (2013).
| TaylorSeries | https://github.com/JuliaDiff/TaylorSeries.jl.git |
|
[
"MIT"
] | 0.18.1 | bb212ead98022455eed514cff0adfa5de8645258 | docs | 1524 | # Library
---
```@meta
CurrentModule = TaylorSeries
```
## Module
```@docs
TaylorSeries
```
## Types
```@docs
Taylor1
HomogeneousPolynomial
TaylorN
AbstractSeries
```
## Functions and methods
```@docs
Taylor1(::Type{T}, ::Int) where {T<:Number}
HomogeneousPolynomial(::Type{T}, ::Int) where {T<:Number}
TaylorN(::Type{T}, ::Int; ::Int=get_order()) where {T<:Number}
set_variables
get_variables
show_params_TaylorN
show_monomials
getcoeff
evaluate
evaluate!
taylor_expand
update!
differentiate
derivative
integrate
gradient
jacobian
jacobian!
hessian
hessian!
constant_term
linear_polynomial
nonlinear_polynomial
inverse
inverse_map
abs
norm
isapprox
isless
isfinite
displayBigO
use_show_default
set_taylor1_varname
```
## Internals
```@docs
ParamsTaylor1
ParamsTaylorN
_InternalMutFuncs
generate_tables
generate_index_vectors
in_base
make_inverse_dict
resize_coeffs1!
resize_coeffsHP!
numtype
mul!
mul!(::HomogeneousPolynomial, ::HomogeneousPolynomial, ::HomogeneousPolynomial)
mul_scalar!(::HomogeneousPolynomial, ::NumberNotSeries, ::HomogeneousPolynomial, ::HomogeneousPolynomial)
mul!(::Vector{Taylor1{T}}, ::Union{Matrix{T},SparseMatrixCSC{T}},::Vector{Taylor1{T}}) where {T<:Number}
div!
pow!
square
sqr!
accsqr!
sqrt!
exp!
log!
sincos!
tan!
asin!
acos!
atan!
sinhcosh!
tanh!
asinh!
acosh!
atanh!
differentiate!
_internalmutfunc_call
_dict_unary_ops
_dict_binary_calls
_dict_unary_calls
_dict_binary_ops
_populate_dicts!
@isonethread
```
## Index
```@index
Pages = ["api.md"]
Order = [:type, :function]
```
| TaylorSeries | https://github.com/JuliaDiff/TaylorSeries.jl.git |
|
[
"MIT"
] | 0.18.1 | bb212ead98022455eed514cff0adfa5de8645258 | docs | 4230 | # Background
---
## Introduction
[TaylorSeries.jl](https://github.com/lbenet/TaylorSeries.jl) is an implementation
of high-order
[automatic differentiation](http://en.wikipedia.org/wiki/Automatic_differentiation),
as presented in the book by W. Tucker [[1]](@ref refs). The general
idea is the following.
The Taylor series expansion of an analytical function
``f(t)`` with *one* independent variable ``t`` around ``t_0`` can be written as
```math
f(t) = f_0 + f_1 (t-t_0) + f_2 (t-t_0)^2 + \cdots + f_k (t-t_0)^k + \cdots,
```
where ``f_0=f(t_0)``, and the Taylor coefficients ``f_k = f_k(t_0)`` are the
``k``-th *normalized derivatives* at ``t_0``:
```math
f_k = \frac{1}{k!} \frac{{\rm d}^k f} {{\rm d} t^k}(t_0).
```
Thus, computing the high-order derivatives of ``f(t)`` is equivalent to computing
its Taylor expansion.
In the case of *many* independent variables the same statements hold, though
things become more subtle. Following Alex Haro's approach
[[2]](@ref refs), the Taylor
expansion is an infinite sum of *homogeneous polynomials* in the ``d`` independent
variables ``x_1, x_2, \dots, x_d``, which takes the form
```math
f_k (\mathbf{x_0}) = \sum_{m_1+\cdots+m_d = k}\, f_{m_1,\dots,m_d} \;\,
(x_1-x_{0_1})^{m_1} \cdots (x_d-x_{0_d})^{m_d} =
\sum_{|\mathbf{m}|=k} f_\mathbf{m}\, (\mathbf{x}-\mathbf{x_0})^\mathbf{m}.
```
Here, ``\mathbf{m}\in \mathbb{N}^d`` is a multi-index of the ``k``-th order
homogeneous polynomial and ``\mathbf{x}=(x_1,x_2,\ldots,x_d)`` are the
``d`` independent variables.
In both cases, a Taylor series expansion can be represented by a
vector containing its coefficients. The difference between the cases of
one or more independent variables is that the
coefficients are real or complex numbers in the former case, but
homogeneous polynomials in the latter case. This motivates
the construction of the [`Taylor1`](@ref) and [`TaylorN`](@ref) types.
## Arithmetic operations
Arithmetic operations involving Taylor series can be expressed as
operations on the coefficients:
```math
(f(x) \pm g(x))_k = f_k \pm g_k , \\
(f(x) \cdot g(x))_k = \sum_{i=0}^k f_i \, g_{k-i} , \\
\Big( \frac{f(x)}{g(x)} \Big)_k = \frac{1}{g_0} \Big[ f_k -
\sum_{i=0}^{k-1} \big(\frac{f(x)}{g(x)}\big)_i \, g_{k-i} \Big]. \\
```
## Elementary functions of polynomials
Consider a function ``y(t)`` that satisfies the ordinary differential equation
``\dot{y} = f(y)``, ``y(t_0)=y_0``, where ``t`` is the independent variable.
Writing ``y(t)`` and ``f(t)`` as Taylor polynomials of ``t``, substituting these in the
differential equation and equating equal powers of
the independent variable leads to the recursion relation
```math
y_{n+1} = \frac{f_n}{n+1}.
```
The last equation and the corresponding initial condition
``y(t_0)=y_0`` define a recurrence relation
for the Taylor coefficients of ``y(t)`` around ``t_0``.
The following are examples of such recurrence relations for some
elementary functions:
```math
p(t) =(f(t))^\alpha , \qquad
p_k = \frac{1}{k \, f_0}\sum_{j=0}^{k-1}\big( \alpha(k-j)-j\big)
\, f_{k-j} \, p_j; \\
e(t) = \exp(f(t)) , \qquad
e_k = \frac{1}{k}\sum_{j=0}^{k-1} (k-j) \, f_{k-j} \, e_j; \\
l(t) = \log(f(t)) , \qquad
l_k = \frac{1}{f_0}\big( f_k - \frac{1}{k}\sum_{j=1}^{k-1} j
\, f_{k-j} \, l_j \big); \\
s(t) = \sin(f(t)) , \qquad
s_k = \frac{1}{k}\sum_{j=0}^{k-1} (k-j) \, f_{k-j} \, c_j; \\
c(t) = \cos(f(t)) , \qquad
c_k = -\frac{1}{k}\sum_{j=0}^{k-1} (k-j) \, f_{k-j} \, s_j.
```
The recursion relations for ``s(t) = \sin\big(f(t)\big)`` and
``c(t) = \cos\big(f(t)\big)`` depend
on each other; this reflects the fact that they are solutions of a second-order
differential equation.
All these relations hold for Taylor expansions in one
and more independent variables; in the latter case, the Taylor coefficients
``f_k`` are homogeneous polynomials of degree ``k``;
see [[2]](@ref refs).
## [References](@id refs)
[1] W. Tucker, *Validated Numerics: A Short Introduction to Rigorous
Computations*, Princeton University Press (2011).
[2] A. Haro, *Automatic differentiation methods in computational dynamical
systems: Invariant manifolds and normal forms of vector fields at fixed points*,
[preprint](http://www.maia.ub.es/~alex/admcds/admcds.pdf).
| TaylorSeries | https://github.com/JuliaDiff/TaylorSeries.jl.git |
|
[
"MIT"
] | 0.18.1 | bb212ead98022455eed514cff0adfa5de8645258 | docs | 5783 | # [Examples](@id Examples)
---
```@meta
CurrentModule = TaylorSeries
```
## Expanding exp(x) with `taylor_expand()`
The [`taylor_expand`](@ref) function takes the function to expand as it's first argument, and the point to expand about as the second argument.
A keyword argument `order` determines which order to expand to:
```@repl 1
using TaylorSeries
taylor_expand(exp, 0, order=2)
```
And voìla! It really is that simple to calculate a simple taylor polynomial. The next example is slightly more complicated.
## Expanding exp(x) with a symbolic object
An alternative way to compute the single-variable taylor expansion for a function is by defining a variable of type `Taylor1`,
and using it in the function you wish to expand. The argument given to the [`Taylor1`](@ref) constructor is the order
to expand to:
```@repl 2
using TaylorSeries
x = Taylor1(2)
exp(x)
```
Let's also get rid of the printed error for the next few examples, and set the printed independent variable to `x`:
```@repl 2
displayBigO(false)
set_taylor1_varname("x")
exp(x)
```
### Changing point to expand about
A variable constructed with `Taylor1()` automatically expands about the point `x=0`.
But what if you want to use the symbolic object to expand about a point different from zero?
Because expanding `exp(x)` about `x=1` is exactly the same as expanding `exp(x+1)` about `x=0`, simply replace the `x` in your expression with `x+1` to expand about `x=1`:
```@repl 2
p = exp(x+1)
p(0.01)
exp(1.01)
```
### More examples
You can even use custum functions
```@repl 2
f(a) = 1/(a+1)
f(x)
```
Functions can be nested
```@repl 2
sin(f(x))
```
and complicated further in a modular way
```@repl 2
sin(exp(x+2))/(x+2)+cos(x+2)+f(x+2)
```
## Four-square identity
The first example shows that the four-square identity holds:
```math
(a_1^2 + a_2^2 + a_3^2 + a_4^2)\cdot(b_1^2 + b_2^2 + b_3^2 + b_4^2) = \\
\qquad (a_1 b_1 - a_2 b_2 - a_3 b_3 -a_4 b_4)^2 +
(a_1 b_2 - a_2 b_1 - a_3 b_4 -a_4 b_3)^2 + \\
\qquad (a_1 b_3 - a_2 b_4 - a_3 b_1 -a_4 b_2)^2 +
(a_1 b_4 - a_2 b_3 - a_3 b_2 -a_4 b_1)^2,
```
which was originally proved by Euler. The code can also be found in
[this test](https://github.com/JuliaDiff/TaylorSeries.jl/blob/master/test/identities_Euler.jl) of the package.
First, we reset the maximum degree of the polynomial to 4, since the RHS
of the equation has *a priori* terms of fourth order, and define the 8
independent variables.
```@repl euler
using TaylorSeries
# Define the variables α₁, ..., α₄, β₁, ..., β₄
make_variable(name, index::Int) = string(name, TaylorSeries.subscriptify(index))
variable_names = [make_variable("α", i) for i in 1:4]
append!(variable_names, [make_variable("β", i) for i in 1:4])
# Create the TaylorN variables (order=4, numvars=8)
a1, a2, a3, a4, b1, b2, b3, b4 = set_variables(variable_names, order=4)
a1 # variable a1
```
Now we compute each term appearing in Eq. (\ref{eq:Euler})
```@repl euler
# left-hand side
lhs1 = a1^2 + a2^2 + a3^2 + a4^2 ;
lhs2 = b1^2 + b2^2 + b3^2 + b4^2 ;
lhs = lhs1 * lhs2;
# right-hand side
rhs1 = (a1*b1 - a2*b2 - a3*b3 - a4*b4)^2 ;
rhs2 = (a1*b2 + a2*b1 + a3*b4 - a4*b3)^2 ;
rhs3 = (a1*b3 - a2*b4 + a3*b1 + a4*b2)^2 ;
rhs4 = (a1*b4 + a2*b3 - a3*b2 + a4*b1)^2 ;
rhs = rhs1 + rhs2 + rhs3 + rhs4;
```
We now compare the two sides of the identity,
```@repl euler
lhs == rhs
```
The identity is satisfied. ``\square``
## Fateman test
Richard J. Fateman, from Berkeley, proposed as a stringent test
of polynomial multiplication
the evaluation of ``s\cdot(s+1)``, where ``s = (1+x+y+z+w)^{20}``. This is
implemented in
the function `fateman1` below. We shall also consider the form
``s^2+s`` in `fateman2`,
which involves fewer operations (and makes a fairer comparison to what
Mathematica does).
```@repl fateman
using TaylorSeries
const order = 20
const x, y, z, w = set_variables(Int128, "x", numvars=4, order=2order)
function fateman1(degree::Int)
T = Int128
s = one(T) + x + y + z + w
s = s^degree
s * ( s + one(T) )
end
```
(In the following lines, which are run when the documentation is built,
by some reason the timing appears before the command executed.)
```@repl fateman
@time fateman1(0);
# hide
@time f1 = fateman1(20);
```
Another implementation of the same, but exploiting optimizations
related to `^2` yields:
```@repl fateman
function fateman2(degree::Int)
T = Int128
s = one(T) + x + y + z + w
s = s^degree
s^2 + s
end
fateman2(0);
@time f2 = fateman2(20); # the timing appears above
```
We note that the above functions use expansions in `Int128`. This is actually
required, since some coefficients are larger than `typemax(Int)`:
```@repl fateman
getcoeff(f2, (1,6,7,20)) # coefficient of x y^6 z^7 w^{20}
ans > typemax(Int)
length(f2)
sum(TaylorSeries.size_table)
set_variables("x", numvars=2, order=6) # hide
```
These examples show that
`fateman2` is nearly twice as fast as `fateman1`, and that the series has 135751
monomials in 4 variables.
### Benchmarks
The functions described above have been compared against Mathematica v11.1.
The relevant files used for benchmarking can be found
[here](https://github.com/JuliaDiff/TaylorSeries.jl/tree/master/perf).
Running on a MacPro with Intel-Xeon processors 2.7GHz, we obtain that
Mathematica requires on average (5 runs) 3.075957 seconds for the computation,
while for `fateman1` and `fateman2` above we obtain 2.15408 and 1.08337,
respectively.
Then, with the current version of `TaylorSeries.jl` and using Julia v0.7.0,
our implementation of `fateman1` is about 30%-40% faster.
(The original test by Fateman corresponds to `fateman1` above, which
avoids some optimizations related to squaring; the implementation in Mathematica
is done such that this optimization does not occur.)
| TaylorSeries | https://github.com/JuliaDiff/TaylorSeries.jl.git |
|
[
"MIT"
] | 0.18.1 | bb212ead98022455eed514cff0adfa5de8645258 | docs | 2350 | # TaylorSeries.jl
A [Julia](http://julialang.org) package for Taylor expansions in one or more independent variables.
---
### Authors
- [Luis Benet](http://www.cicc.unam.mx/~benet/), Instituto de Ciencias
Físicas, Universidad Nacional Autónoma de México (UNAM).
- [David P. Sanders](http://sistemas.fciencias.unam.mx/~dsanders/), Facultad
de Ciencias, Universidad Nacional Autónoma de México (UNAM).
### Citing
If you find useful this package, please cite the paper:
Benet, L., & Sanders, D. P. (2019). TaylorSeries.jl: Taylor expansions in one and several variables in Julia. Journal of Open Source Software, 4(36), 1–4. https://doi.org/10.5281/zenodo.2601941
### License
TaylorSeries is licensed under the MIT "Expat" license; see
[LICENSE](https://github.com/lbenet/TaylorSeries.jl/blob/master/LICENSE.md) for
the full license text.
### Installation
TaylorSeries.jl is a [registered package](http://pkg.julialang.org), and is
simply installed by running
```julia
pkg> add("TaylorSeries")
```
### Related packages
- [Polynomials.jl](https://github.com/JuliaMath/Polynomials.jl): Polynomial manipulations
- [PowerSeries.jl](https://github.com/jwmerrill/PowerSeries.jl): Truncated power series for Julia
- [MultivariatePolynomials.jl](https://github.com/JuliaAlgebra/MultivariatePolynomials.jl): Multivariate polynomials interface
- [AbstractAlgebra.jl](https://github.com/Nemocas/AbstractAlgebra.jl): Generic abstract algebra functionality in pure Julia
- [ForwardDiff.jl](https://github.com/JuliaDiff/ForwardDiff.jl): Forward Mode Automatic Differentiation for Julia
- [ReverseDiff.jl](https://github.com/JuliaDiff/ReverseDiff.jl): Reverse Mode Automatic Differentiation for Julia
- [HyperDualNumbers.jl](https://github.com/JuliaDiff/HyperDualNumbers.jl): Julia implementation of HyperDualNumbers
### Acknowledgments
This project began (using Python) during a Masters' course in the postgraduate
programs in Physics and in Mathematics at UNAM, during the second half of 2013.
We thank the participants of the course for putting up with the half-baked
material and contributing energy and ideas.
We acknowledge financial support from DGAPA-UNAM PAPIME grants
PE-105911 and PE-107114, and DGAPA-PAPIIT grants IG-101113,
IG-100616, IG-100819 and IG-101122.
LB acknowledges support through a *Cátedra Marcos Moshinsky* (2013).
| TaylorSeries | https://github.com/JuliaDiff/TaylorSeries.jl.git |
|
[
"MIT"
] | 0.18.1 | bb212ead98022455eed514cff0adfa5de8645258 | docs | 22706 | # User guide
---
```@meta
CurrentModule = TaylorSeries
```
For some simple examples, head over to the [examples section](@ref Examples).
For a detailed guide, keep reading.
[TaylorSeries.jl](https://github.com/JuliaDiff/TaylorSeries.jl)
is a basic polynomial algebraic manipulator in one or more
variables; these two cases are treated separately. Three new types are defined,
[`Taylor1`](@ref), [`HomogeneousPolynomial`](@ref) and [`TaylorN`](@ref),
which correspond to
expansions in one independent variable, homogeneous polynomials of various
variables, and the polynomial
series in many independent variables, respectively. These types are subtypes
of `AbstractSeries`, which in turn is a subtype of `Number`, and are defined
parametrically.
The package is loaded as usual:
```@repl userguide
using TaylorSeries
```
## One independent variable
Taylor expansions in one variable are represented by the [`Taylor1`](@ref) type,
which consists of a vector of coefficients (fieldname `coeffs`) and the maximum
order considered for the expansion (fieldname `order`). The
coefficients are arranged in ascending order with respect to the degree of the
monomial, so that
`coeffs[1]` is the constant term, `coeffs[2]` gives the first order term (`t^1`),
etc. Yet, it is possible to have the natural ordering with respect
to the degree; see below. This is a dense representation of the polynomial.
The order of the polynomial can be
omitted in the constructor, which is then fixed by the length of the
vector of coefficients. If the length of the vector does not correspond with
the `order`, `order` is used, which effectively truncates polynomial to degree `order`.
```@repl userguide
Taylor1([1, 2, 3],4) # Polynomial of order 4 with coefficients 1, 2, 3
Taylor1([0.0, 1im]) # Also works with complex numbers
Taylor1(ones(8), 2) # Polynomial truncated to order 2
shift_taylor(a) = a + Taylor1(typeof(a),5) ## a + taylor-polynomial of order 5
t = shift_taylor(0.0) # Independent variable `t`
```
!!! warning
The information about the maximum order considered is displayed using a big-𝒪 notation.
The convention followed when different orders are combined, and when certain functions
are used in a way that they reduce the order (like [`differentiate`](@ref)), is to be consistent
with the mathematics and the big-𝒪 notation, i.e., to propagate the lowest order.
In some cases, it is desirable to not display the big-𝒪 notation. The function [`displayBigO`](@ref)
allows to control whether it is displayed or not.
```@repl userguide
displayBigO(false) # turn-off displaying big O notation
t
displayBigO(true) # turn it on
t
```
Similarly, it is possible to control some aspects of the format of the
displayed series through the function [`use_show_default`](@ref);
`use_show_default(true)` uses the `Base.show_default`, while
`use_show_default(false)` uses the custom display form (default).
```@repl userguide
use_show_default(true) # use Base.show method
t
use_show_default(false) # use custom `show`
t
```
The definition of `shift_taylor(a)` uses the method
[`Taylor1([::Type{Float64}], order::Int)`](@ref), which is a
shortcut to define the independent variable of a Taylor expansion,
of given type and order (the default is `Float64`).
Defining the independent variable in advance is one of the easiest
ways to use the package.
The usual arithmetic operators (`+`, `-`, `*`, `/`, `^`, `==`) have been
extended to work with the [`Taylor1`](@ref) type, including promotions that
involve `Number`s. The operations return a valid Taylor expansion, consistent
with the order of the series. This is apparent in the penultimate example
below, where the fist non-zero coefficient is beyond the order of the expansion,
and hence the result is zero.
```@repl userguide
t*(3t+2.5)
1/(1-t)
t*(t^2-4)/(t+2)
tI = im*t
(1-t)^3.2
(1+t)^t
Taylor1(3) + Taylor1(5) == 2Taylor1(3) # big-𝒪 convention applies
t^6 # t is of order 5
t^2 / t # The result is of order 4, instead of 5
```
Note that the last example returns a `Taylor1` series of order 4, instead
of order 5; this is be consistent with the number of known coefficients of the
returned series, since the result corresponds to factorize `t` in the numerator
to cancel the same factor in the denominator.
`Taylor1` is also equiped with a total order, provided by overloading [`isless`](@ref).
The ordering is consistent with the interpretation that there are infinitessimal
elements in the algebra; for details see M. Berz, "Automatic Differentiation as
Nonarchimedean Analysis", Computer Arithmetic and Enclosure Methods, (1992), Elsevier,
439-450. This is illustrated by:
```@repl userguide
1 > t > 2*t^2 > 100*t^3 >= 0
```
If no valid Taylor expansion can be computed an error is thrown, for instance,
when a derivative is not defined at the expansion point, or it simply diverges.
```@repl userguide
1/t
t^3.2
abs(t)
```
Several elementary functions have been implemented; their coefficients
are computed recursively. At the moment of this writing, these functions
are `exp`, `log`, `sqrt`, the trigonometric functions
`sin`, `cos` and `tan`, their inverses, as well as the hyperbolic functions
`sinh`, `cosh` and `tanh` and their inverses;
more functions will be added in the future. Note that this way of obtaining the
Taylor coefficients is not a *lazy* way, in particular for many independent
variables. Yet, the implementation is efficient enough, especially for the
integration of ordinary differential equations, which is among the
applications we have in mind (see
[TaylorIntegration.jl](https://github.com/PerezHz/TaylorIntegration.jl)).
```@repl userguide
exp(t)
log(1-t)
sqrt(1 + t)
imag(exp(tI)')
real(exp(Taylor1([0.0,1im],17))) - cos(Taylor1([0.0,1.0],17)) == 0.0
convert(Taylor1{Rational{Int}}, exp(t))
```
Again, errors are thrown whenever it is necessary.
```@repl userguide
sqrt(t)
log(t)
```
To obtain a specific coefficient, [`getcoeff`](@ref) can be used. Another
alternative is to request the specific degree using the vector notation,
where the index corresponds to the degree of the term.
```@repl userguide
expon = exp(t)
getcoeff(expon, 0) == expon[0]
rationalize(expon[3])
```
Note that certain arithmetic operations, or the application of some functions,
may change the order of the result, as mentioned above.
```@repl userguide
t # order 5 independent variable
t^2/t # returns an order 4 series expansion
sqrt(t^2) # returns an order 2 series expansion
(t^4)^(1/4) # order 1 series
```
Differentiating and integrating is straightforward for polynomial expansions in
one variable, using [`differentiate`](@ref) and [`integrate`](@ref). (The
function [`derivative`](@ref) is a synonym of `differentiate`.) These
functions return the corresponding [`Taylor1`](@ref) expansions.
The order of the derivative of a `Taylor1` is reduced by 1.
For the integral, an integration constant may be
set by the user (the default is zero); the order of the integrated polynomial
for the integral is *kept unchanged*. The *value* of the ``n``-th (``n \ge 0``)
derivative is obtained using `differentiate(n,a)`, where `a` is a Taylor series;
likewise, the `Taylor1` polynomial of the ``n``-th derivative is obtained as
`differentiate(a,n)`; the resulting polynomial is of order `get_order(a)-n`.
```@repl userguide
differentiate(exp(t)) # exp(t) is of order 5; the derivative is of order 4
integrate(exp(t)) # the resulting TaylorSeries is of order 5
integrate( exp(t), 1.0)
integrate( differentiate( exp(-t)), 1.0 ) == exp(-t)
differentiate(1, exp(shift_taylor(1.0))) == exp(1.0)
differentiate(5, exp(shift_taylor(1.0))) == exp(1.0) # 5-th differentiate of `exp(1+t)`
derivative(exp(1+t), 3) # Taylor1 polynomial of the 3-rd derivative of `exp(1+t)`
```
We also have methods to invert a `Taylor1` polynomial, using either [`inverse`](@ref), which uses Lagrange inversion, or [`inverse_map`](@ref), which uses an algorithm developed by M. Berz; the latter can also be used for `TaylorN` polynomials; see below.
To evaluate a Taylor series at a given point, Horner's rule is used via the
function `evaluate(a, dt)`. Here, `dt` is the increment from
the point ``t_0`` around which the Taylor expansion of `a` is calculated,
i.e., the series
is evaluated at ``t = t_0 + dt``. Omitting `dt` corresponds to ``dt = 0``;
see [`evaluate`](@ref).
```@repl userguide
evaluate(exp(shift_taylor(1.0))) - ℯ # exp(t) around t0=1 (order 5), evaluated there (dt=0)
evaluate(exp(t), 1) - ℯ # exp(t) around t0=0 (order 5), evaluated at t=1
evaluate(exp( Taylor1(17) ), 1) - ℯ # exp(t) around t0=0, order 17
tBig = Taylor1(BigFloat, 50) # Independent variable with BigFloats, order 50
eBig = evaluate( exp(tBig), one(BigFloat) )
ℯ - eBig
```
Another way to evaluate the value of a `Taylor1` polynomial `p` at a given value `x`,
is to call `p` as if it was a function, i.e., `p(x)`:
```@repl userguide
t = Taylor1(15)
p = sin(t)
evaluate(p, pi/2) # value of p at pi/2 using `evaluate`
p(pi/2) # value of p at pi/2 by evaluating p as a function
p(pi/2) == evaluate(p, pi/2)
p(0.0)
p() == p(0.0) # p() is a shortcut to obtain the 0-th order coefficient of `p`
```
Note that the syntax `p(x)` is equivalent to `evaluate(p, x)`, whereas `p()` is
equivalent to `evaluate(p)`.
Useful shortcuts are [`taylor_expand`](@ref) and [`update!`](@ref).
The former returns
the expansion of a function around a given value `t0`, mimicking the use
of `shift_taylor` above. In turn, `update!`
provides an in-place update of a given Taylor polynomial, that is, it shifts
it further by the provided amount. Note that the type of the `Taylor1` polynomial and the shifted point must be compatible, in the sense that the latter must be convertable into the parametric type of the former.
```@repl userguide
p = taylor_expand( x -> sin(x), pi/2, order=16) # 16-th order expansion of sin(t) around pi/2
update!(p, 0.025) # updates the expansion given by p, by shifting it further by 0.025
p
```
## Many variables
A polynomial in ``N>1`` variables can be represented in (at least) two ways:
As a vector whose coefficients are homogeneous polynomials of fixed degree, or
as a vector whose coefficients are polynomials in ``N-1`` variables. The
current implementation of `TaylorSeries.jl` corresponds to the first option,
though some infrastructure has been built that permits to develop the second
one. An elegant (lazy) implementation of the second representation
was discussed [here](https://groups.google.com/forum/#!msg/julia-users/AkK_UdST3Ig/sNrtyRJHK0AJ).
The structure [`TaylorN`](@ref) is constructed as a vector of parameterized
homogeneous polynomials
defined by the type [`HomogeneousPolynomial`](@ref), which in turn is an ordered vector of
coefficients of given order (degree). This implementation imposes the user
to specify the (maximum) order considered and the number of independent
variables at the beginning, which can be conveniently done using
[`set_variables`](@ref). A vector of the resulting Taylor variables is returned:
```@repl userguide
x, y = set_variables("x y")
typeof(x)
x.order
x.coeffs
```
As shown, the resulting objects are of `TaylorN{Float64}` type.
There is an optional `order` keyword argument in [`set_variables`](@ref),
used to specify the maximum order of the `TaylorN` polynomials. Note that
one can specify the variables using a vector of symbols.
```@repl userguide
set_variables([:x, :y], order=10)
```
Similarly, subindexed variables are also available by specifying a single
variable name and the optional keyword argument `numvars`:
```@repl userguide
set_variables("α", numvars=3)
```
Alternatively to `set_variables`, [`get_variables`](@ref) can be used if one
does not want to change internal dictionaries. `get_variables` returns a vector
of `TaylorN` independent variables of a desired `order`
(lesser than `get_order` so the
internals doesn't have to change) with the length and variable names defined
by `set_variables` initially.
```@repl userguide
get_variables(2) # vector of independent variables of order 2
```
!!! warning
An `OverflowError` is thrown when the construction of the internal tables is not
fully consistent, avoiding silent errors; see [issue #85](https://github.com/JuliaDiff/TaylorSeries.jl/issues/85).
The function [`show_params_TaylorN`](@ref) displays the current values of the
parameters, in an info block.
```@repl userguide
show_params_TaylorN()
```
Internally, changing the parameters (maximum order and number of variables)
redefines the hash-tables that
translate the index of the coefficients of a [`HomogeneousPolynomial`](@ref)
of given order into the corresponding
multi-variable monomials, or the other way around.
Fixing these values from the start is imperative; the initial (default) values
are `order = 6` and `num_vars=2`.
The easiest way to construct a [`TaylorN`](@ref) object is by defining
the independent variables. This can be done using `set_variables` as above,
or through the method [`TaylorN{T<:Number}(::Type{T}, nv::Int)`](@ref)
for the `nv` independent `TaylorN{T}` variable;
the order can be also specified using the optional keyword argument `order`.
```@repl userguide
x, y = set_variables("x y", numvars=2, order=6);
x
TaylorN(1, order=4) # variable 1 of order 4
TaylorN(Int, 2) # variable 2, type Int, order=get_order()=6
```
Other ways of constructing [`TaylorN`](@ref) polynomials involve
using [`HomogeneousPolynomial`](@ref)
objects directly, which is uncomfortable.
```@repl userguide
set_variables(:x, numvars=2); # symbols can be used
HomogeneousPolynomial([1,-1])
TaylorN([HomogeneousPolynomial([1,0]), HomogeneousPolynomial([1,2,3])],4)
```
The Taylor expansions are implemented around 0 for all variables; if the
expansion
is needed around a different value, the trick is a simple translation of
the corresponding independent variable, i.e. ``x \to x+a``.
As before, the usual arithmetic operators (`+`, `-`, `*`, `/`, `^`, `==`)
have been extended to work with [`TaylorN`](@ref) objects, including the
appropriate promotions to deal with the usual numeric types.
Note that some of the arithmetic operations have been extended for
[`HomogeneousPolynomial`](@ref), whenever the result is a
[`HomogeneousPolynomial`](@ref); division, for instance, is not extended.
The same convention used for `Taylor1` objects is used when combining
`TaylorN` polynomials of different order.
Both `HomogeneousPolynomial` and `TaylorN` are equiped with a total *lexicographical*
order, provided by overloading [`isless`](@ref).
The ordering is consistent with the interpretation that there are infinitessimal
elements in the algebra; for details see M. Berz, "Automatic Differentiation as
Nonarchimedean Analysis", Computer Arithmetic and Enclosure Methods, (1992), Elsevier,
439-450.
Essentially, the lexicographic order works as follows: smaller order monomials
are *larger* than higher order monomials; when the order is the same, *larger* monomials
appear before in the hash-tables; the function [`show_monomials`](@ref).
```@repl userguide
x, y = set_variables("x y", order=10);
show_monomials(2)
```
Then, the following then holds:
```@repl userguide
0 < 1e8 * y^2 < x*y < x^2 < y < x/1e8 < 1.0
```
The elementary functions have also been
implemented, again by computing their coefficients recursively:
```@repl userguide
exy = exp(x+y)
```
The function [`getcoeff`](@ref)
gives the normalized coefficient of the polynomial that corresponds to the
monomial specified by the tuple or vector `v` containing the powers.
For instance, for
the polynomial `exy` above, the coefficient of the monomial ``x^3 y^5`` is
obtained using `getcoeff(exy, (3,5))` or `getcoeff(exy, [3,5])`.
```@repl userguide
getcoeff(exy, (3,5))
rationalize(ans)
```
Similar to `Taylor1`, vector notation can be used to request specific
coefficients of `HomogeneousPolynomial` or `TaylorN` objects. For `TaylorN`
objects, the index refers to the degree of the `HomogeneousPolynomial`.
In the case of `HomogeneousPolynomial` the index refers to the position
of the hash table. The function [`show_monomials`](@ref) can be used to
obtain the coefficient a specific monomial, given the degree of the
`HomogeneousPolynomial`.
```@repl userguide
exy[8] # get the 8th order term
show_monomials(8)
exy[8][6] # get the 6th coeff of the 8th order term
```
Partial differentiation is also implemented for [`TaylorN`](@ref) objects,
through the function [`differentiate`](@ref), specifying the number
of the variable, or its symbol, as the second argument.
```@repl userguide
p = x^3 + 2x^2 * y - 7x + 2
q = y - x^4
differentiate( p, 1 ) # partial derivative with respect to 1st variable
differentiate( q, :y ) # partial derivative with respect to :y
```
If we ask for the partial derivative with respect to a non-defined variable,
an error is thrown.
```@repl userguide
differentiate( q, 3 ) # error, since we are dealing with 2 variables
```
To obtain more specific partial derivatives we have two specialized methods
that involve a tuple, which represents the number of derivatives with
respect to each variable (so the tuple's length has to be the
same as the actual number of variables). These methods either return
the `TaylorN` object in question, or the coefficient corresponding to
the specified tuple, normalized by the factorials defined by the tuple.
The latter is in essence the 0-th order coefficient of the former.
```@repl userguide
differentiate(p, (2,1)) # two derivatives on :x and one on :y
differentiate((2,1), p) # 0-th order coefficient of the previous expression
differentiate(p, (1,1)) # one derivative on :x and one on :y
differentiate((1,1), p) # 0-th order coefficient of the previous expression
```
Integration with respect to the `r`-th variable for
`HomogeneousPolynomial`s and `TaylorN` objects is obtained
using [`integrate`](@ref). Note that `integrate` for `TaylorN`
objects allows to specify a constant of integration, which must
be independent from the integrated variable. Again, the integration
variable may be specified by its symbol.
```@repl userguide
integrate( differentiate( p, 1 ), 1) # integrate with respect to the first variable
integrate( differentiate( p, 1 ), :x, 2) # integration with respect to :x, constant of integration is 2
integrate( differentiate( q, 2 ), :y, -x^4) == q
integrate( differentiate( q, 2 ), 2, y)
```
[`evaluate`](@ref) can also be used for [`TaylorN`](@ref) objects, using
it on vectors of
numbers (`Real` or `Complex`); the length of the vector must coincide with the
number of independent variables. [`evaluate`](@ref) also allows to specify only
one variable and a value.
```@repl userguide
evaluate(exy, [.1,.02]) == exp(0.12)
evaluate(exy, :x, 0.0) == exp(y) # evaluate `exy` for :x -> 0
```
Analogously to `Taylor1`, another way to obtain the value of a `TaylorN`
polynomial `p` at a given point `x`, is to call it as if it were a function:
the syntax `p(x)` for `p::TaylorN` is equivalent to `evaluate(p,x)`, and
`p()` is equivalent to `evaluate(p)`.
```@repl userguide
exy([.1,.02]) == exp(0.12)
exy(:x, 0.0)
```
Internally, `evaluate` for `TaylorN` considers separately
the contributions of all `HomogeneousPolynomial`s by `order`,
which are finally added up *after* sorting them in place (which is the default)
in increasing order by `abs2`. This is done in order to
use as many significant figures as possible of all terms
in the final sum, which then should yield a more
accurate result. This default can be changed to a non-sorting
sum thought, which may be more performant or useful for
certain subtypes of `Number` which, for instance, do not have `isless`
defined. See
[this issue](https://github.com/JuliaDiff/TaylorSeries.jl/issues/242)
for a motivating example. This can be done using the keyword
`sorting` in `evaluate`, which expects a `Bool`, or using a
that boolean as the *first* argument in the function-like evaluation.
```@repl userguide
exy([.1,.02]) # default is `sorting=true`
evaluate(exy, [.1,.02]; sorting=false)
exy(false, [.1,.02])
```
In the examples shown above, the first entry corresponds to the
default case (`sorting=true`), which yields the same result as
`exp(0.12)`, and the remaining two illustrate
turning off sorting the terms. Note that the results are not
identical, since [floating point addition is not
associative](https://en.wikipedia.org/wiki/Associative_property#Nonassociativity_of_floating_point_calculation),
which may introduce rounding errors.
The functions `taylor_expand` and `update!` work as well for `TaylorN`.
```@repl userguide
xysq = x^2 + y^2
update!(xysq, [1.0, -2.0]) # expand around (1,-2)
xysq
update!(xysq, [-1.0, 2.0]) # shift-back
xysq == x^2 + y^2
```
Functions to compute the gradient, Jacobian and
Hessian have also been implemented; note that these
functions *are not* exported, so its use require the
prefix `TaylorSeries`. Using the
polynomials ``p = x^3 + 2x^2 y - 7 x + 2`` and ``q = y-x^4`` defined above,
we may use [`TaylorSeries.gradient`](@ref) (or `∇`); the results are of
type `Array{TaylorN{T},1}`. To compute the Jacobian and Hessian of a vector field
evaluated at a point, we use respectively [`TaylorSeries.jacobian`](@ref) and
[`TaylorSeries.hessian`](@ref):
```@repl userguide
∇(p)
TaylorSeries.gradient( q )
r = p-q-2*p*q
TaylorSeries.hessian(ans)
TaylorSeries.jacobian([p,q], [2,1])
TaylorSeries.hessian(r, [1.0,1.0])
```
Other specific applications are described in the
[Examples](@ref).
## Mixtures
As mentioned above, `Taylor1{T}`, `HomogeneousPolynomial{T}` and `TaylorN{T}`
are parameterized structures such that `T<:AbstractSeries`, the latter
is a subtype of `Number`. Then, we may actually define Taylor expansions in
``N+1`` variables, where one of the variables (the `Taylor1` variable) is
somewhat special.
```@repl userguide
x, y = set_variables("x y", order=3)
t1N = Taylor1([zero(x), one(x)], 5)
```
The last line defines a `Taylor1{TaylorN{Float64}}` variable, which is of order
5 in `t` and order 3 in `x` and `y`. Then, we can evaluate functions involving
such polynomials:
```@repl userguide
cos(2.1+x+t1N)
```
This kind of expansions are of interest when studying the dependence of
parameters, for instance in the context of bifurcation theory or when considering
the dependence of the solution of a differential equation on the initial conditions,
around a given solution. In this case, `x` and `y` represent small variations
around a given value of the parameters, or around some specific initial condition.
Such constructions are exploited in the package [`TaylorIntegration.jl`](https://github.com/PerezHz/TaylorIntegration.jl).
```@meta
CurrentModule = nothing
```
| TaylorSeries | https://github.com/JuliaDiff/TaylorSeries.jl.git |
|
[
"MIT"
] | 0.18.1 | bb212ead98022455eed514cff0adfa5de8645258 | docs | 6348 | ---
title: 'TaylorSeries.jl: Taylor expansions in one and several variables in Julia'
tags:
- Taylor series
- Automatic differentiation
- Julia
authors:
- name: Luis Benet
orcid: 0000-0002-8470-9054
affiliation: 1
- name: David P. Sanders
orcid: 0000-0001-5593-1564
affiliation: 2
affiliations:
- name: Instituto de Ciencias Físicas, Universidad Nacional Autónoma de México (UNAM)
index: 1
- name: Departamento de Física, Facultad de Ciencias, Universidad Nacional Autónoma de México (UNAM)
index: 2
date: September 20, 2018
bibliography: paper.bib
---
# Summary
The purpose of the `TaylorSeries.jl` package is to provide a framework to exploit Taylor polynomials in one and several variables
in the [Julia programming language](https://julialang.org) [@julia].
It can be thought of as providing a primitive CAS (computer algebra system),
which works numerically and not symbolically.
The package allows the user to define dense polynomials $p(x)$ of one variable and $p(\mathbf{x})$ of several variables with a specified maximum degree, and perform operations on them, including powers and composition, as well as series expansions
for elementary functions of polynomials, for example $\exp[p(x)]$,
where techniques of automatic differentiation are used
[@Tucker:ValidatedNumerics; @HaroEtAl:ParameterizMeth]. Differentiation and
integration are also implemented.
Two basic immutable types are defined, `Taylor1{T}` and `TaylorN{T}`,
which represent polynomials in one and several variables, respectively; the maximum degree is a field of the types. These types are parametrized by the type `T` of the polynomial coefficients; they essentially consist of one-dimensional arrays of coefficients, ordered by increasing degree.
In the case of `TaylorN`, the
coefficients are `HomogeneousPolynomial`s, which in turn are vectors
of coefficients representing all monomials with a given number of variables
and order (total degree), ordered lexicographically. Higher degree
polynomials require more
memory allocation, especially for several variables; while we have not extensively tested the limits of the degree of the polynomials that can be used, `Taylor1` polynomials up to degree 80 and
`TaylorN` polynomials up to degree 60 in 4 variables have been successfully used. Note that the current implementation of multi-variable series builds up extensive tables in memory which allow to speed up the index calculations.
Julia's parametrized type system allows the construction of Taylor series whose coefficient type is any subtype of the `Number` abstract type. Use cases include complex numbers,
arbitrary precision `BigFloat`s [@MPFR],
`Interval`s [@IntervalArithmetic.jl], `ArbFloat`s [@ArbFloats.jl],
as well as `Taylor1` and `TaylorN` objects themselves.
`TaylorSeries.jl` is the main component of
[`TaylorIntegration.jl`](https://github.com/PerezHz/TaylorIntegration.jl)
[@TaylorIntegration.jl], whose aim is to perform accurate integration
of ODEs using the Taylor method, including jet transport techniques,
where a small region around an initial condition is integrated.
It is also a key component of
[`TaylorModels.jl`](https://github.com/JuliaIntervals/TaylorModels.jl)
[@TaylorModels.jl], whose aim is to construct rigorous polynomial
approximations of functions.
# Examples
We present three examples to illustrate the use of `TaylorSeries.jl`. Other
examples, as well as a detailed user guide, can be found in the
[documentation](http://www.juliadiff.org/TaylorSeries.jl/stable).
## Hermite polynomials
As a first example we describe how to generate the
[Hermite polynomials](https://en.wikipedia.org/wiki/Hermite_polynomials)
("physicist's" version) up to a given maximum order. Firstly we directly exploit the recurrence relation satisfied by the polynomials.
{width=110%}
The example above can be slightly modified to compute, for example, the 100th Hermite polynomial.
In this case, the coefficients will be larger than $2^{63}-1$, so the modular
behavior, under overflow of the standard `Int64` type, will not suffice. Rather, the polynomials should
be generated with `hermite_polynomials(BigInt, 100)` to ensure
the use of arbitrary-length integers.
## Using a generating function
As a second example, we describe a numerical way of obtaining the
Hermite polynomials from their generating function: the $n$th Hermite polynomial
corresponds to the $n$th derivative of the function $\exp(2t \, x - t^2)$.
{width=110%}
This example shows that the calculations are performed numerically and not
symbolically, using `TaylorSeries.jl` as a polynomial manipulator; this
is manifested by the fact that the last coefficient of `HH(6)` is not
identical to an integer.
## Taylor method for integrating ordinary differential equations
As a final example, we give a simple implementation of Picard
iteration to integrate an ordinary differential equation, which is equivalent to
the Taylor method.
We consider the initial-value problem $\dot{x} = x$,
with initial condition $x(0) = 1$. One step of the integration corresponds
to constructing the Taylor series of the solution $x(t)$ in powers of $t$:
{width=110%}
Thus this Taylor expansion of order 20 around $t_0=0$
suffices to obtain the exact solution at $t=1$, while the error at time $t=2$
from the same expansion is $4.53 \times 10^{-14}$.
This indicates that a proper treatment should estimate the size of the required step that should be taken as a function of the solution.
## Acknowledgements
We are thankful for the additions of
[all contributors](https://github.com/JuliaDiff/TaylorSeries.jl/graphs/contributors)
to this project. We acknowledge financial support from PAPIME grants
PE-105911 and PE-107114, and PAPIIT grants IG-101113, IG-100616
and IN-117117. LB and DPS acknowledge support via the Cátedra Marcos Moshinsky (2013 and 2018, respectively).
# References
| TaylorSeries | https://github.com/JuliaDiff/TaylorSeries.jl.git |
|
[
"MIT"
] | 0.2.0 | a0194cdc135937ad2d3016b0af97e119245d11ea | code | 1827 | module CheckedArithmeticCore
export safearg_type, safearg, safeconvert, accumulatortype, acc
"""
newT = CheckedArithmeticCore.safearg_type(::Type{T})
Return a "reasonably safe" type `newT` for computation with numbers of type `T`.
For example, for `UInt8` one might return `UInt128`, because one is much less likely
to overflow with `UInt128`.
"""
function safearg_type end
"""
xsafe = CheckedArithmeticCore.safearg(x)
Return a variant `xsafe` of `x` that is "reasonably safe" for non-overflowing computation.
For numbers, this uses [`CheckedArithmetic.safearg_type`](@ref).
For containers and other non-number types, specialize `safearg` directly.
"""
safearg(x::T) where T = convert(safearg_type(T), x)
"""
xc = safeconvert(T, x)
Convert `x` to type `T`, "safely." This is designed for comparison to results computed by
[`CheckedArithmetic.@check`](@ref), i.e., for arguments converted by
[`CheckedArithmeticCore.safearg`](@ref).
"""
safeconvert(::Type{T}, x) where T = convert(T, x)
"""
Tnew = accumulatortype(op, T1, T2, ...)
Tnew = accumulatortype(T1, T2, ...)
Return a type `Tnew` suitable for accumulation (reduction) of elements of type `T` under
operation `op`.
# Examples
```jldoctest; setup = :(using CheckedArithmetic)
julia> accumulatortype(+, UInt8)
$UInt
```
"""
Base.@pure accumulatortype(op::Function, T1::Type, T2::Type, T3::Type...) =
accumulatortype(op, promote_type(T1, T2, T3...))
Base.@pure accumulatortype(T1::Type, T2::Type, T3::Type...) =
accumulatortype(*, T1, T2, T3...)
accumulatortype(::Type{T}) where T = accumulatortype(*, T)
"""
xacc = acc(x)
Convert `x` to type [`accumulatortype`](@ref)`(typeof(x))`.
"""
acc(x) = convert(accumulatortype(typeof(x)), x)
acc(f::F, x) where F<:Function = convert(accumulatortype(f, typeof(x)), x)
end # module
| CheckedArithmetic | https://github.com/JuliaMath/CheckedArithmetic.jl.git |
|
[
"MIT"
] | 0.2.0 | a0194cdc135937ad2d3016b0af97e119245d11ea | code | 1103 | using CheckedArithmeticCore
using Test
struct MyType end
struct MySafeType end
Base.convert(::Type{MySafeType}, ::MyType) = MySafeType()
CheckedArithmeticCore.safearg_type(::Type{MyType}) = MySafeType
CheckedArithmeticCore.accumulatortype(::typeof(+), ::Type{MyType}) = MyType
CheckedArithmeticCore.accumulatortype(::typeof(+), ::Type{MySafeType}) = MySafeType
CheckedArithmeticCore.accumulatortype(::typeof(*), ::Type{MyType}) = MySafeType
CheckedArithmeticCore.accumulatortype(::typeof(*), ::Type{MySafeType}) = MySafeType
Base.promote_rule(::Type{MyType}, ::Type{MySafeType}) = MySafeType
# fallback
@test safearg(MyType()) === MySafeType()
# fallback
@test safeconvert(UInt16, 0x12) === 0x0012
# fallback
@test accumulatortype(MyType) === MySafeType
@test accumulatortype(MyType, MyType) === MySafeType
@test accumulatortype(+, MyType) === MyType
@test accumulatortype(*, MyType) === MySafeType
@test accumulatortype(+, MyType, MySafeType) === MySafeType
@test accumulatortype(*, MySafeType, MyType) === MySafeType
# acc
@test acc(MyType()) === MySafeType()
@test acc(+, MyType()) === MyType()
| CheckedArithmetic | https://github.com/JuliaMath/CheckedArithmetic.jl.git |
|
[
"MIT"
] | 0.2.0 | a0194cdc135937ad2d3016b0af97e119245d11ea | code | 8596 | module CheckedArithmetic
using CheckedArithmeticCore
import CheckedArithmeticCore: safearg_type, safearg, safeconvert, accumulatortype, acc
using Base.Meta: isexpr
using LinearAlgebra: Factorization, UniformScaling
using Random: AbstractRNG
using Dates
export @checked, @check
export accumulatortype, acc # re-export
const op_checked = Dict(
Symbol("unary-") => :(Base.Checked.checked_neg),
:abs => :(Base.Checked.checked_abs),
:+ => :(Base.Checked.checked_add),
:- => :(Base.Checked.checked_sub),
:* => :(Base.Checked.checked_mul),
:÷ => :(Base.Checked.checked_div),
:div => :(Base.Checked.checked_div),
:% => :(Base.Checked.checked_rem),
:rem => :(Base.Checked.checked_rem),
:fld => :(Base.Checked.checked_fld),
:mod => :(Base.Checked.checked_mod),
:cld => :(Base.Checked.checked_cld),
)
function replace_op!(expr::Expr, op_map::Dict)
if expr.head == :call
f, len = expr.args[1], length(expr.args)
op = isexpr(f, :.) ? f.args[2].value : f # handle module-scoped functions
if op === :+ && len == 2 # unary +
# no action required
elseif op === :- && len == 2 # unary -
op = get(op_map, Symbol("unary-"), op)
if isexpr(f, :.)
f.args[2].value = op
expr.args[1] = f
else
expr.args[1] = op
end
else # arbitrary call
op = get(op_map, op, op)
if isexpr(f, :.)
f.args[2].value = op
expr.args[1] = f
else
expr.args[1] = op
end
end
for a in Iterators.drop(expr.args, 1)
if isa(a, Expr)
replace_op!(a, op_map)
end
end
else
for a in expr.args
if isa(a, Expr)
replace_op!(a, op_map)
end
end
end
return expr
end
"""
@checked expr
Perform all the operations in `expr` using checked arithmetic.
# Examples
```jldoctest
julia> 0xff + 0x10 # operation that overflows
0x0f
julia> @checked 0xff + 0x10
ERROR: OverflowError: 255 + 16 overflowed for type UInt8
```
You can also wrap method definitions (or blocks of code) in `@checked`:
```jldoctest
julia> plus(x, y) = x + y; minus(x, y) = x - y
minus (generic function with 1 method)
julia> @show plus(0xff, 0x10) minus(0x10, 0x20);
plus(0xff, 0x10) = 0x0f
minus(0x10, 0x20) = 0xf0
julia> @checked (plus(x, y) = x + y; minus(x, y) = x - y)
minus (generic function with 1 method)
julia> plus(0xff, 0x10)
ERROR: OverflowError: 255 + 16 overflowed for type UInt8
julia> minus(0x10, 0x20)
ERROR: OverflowError: 16 - 32 overflowed for type UInt8
```
"""
macro checked(expr)
isa(expr, Expr) || return expr
expr = copy(expr)
return esc(replace_op!(expr, op_checked))
end
macro check(expr, kws...)
isexpr(expr, :call) || error("expected :call expression, got ",
isa(expr, Expr) ? QuoteNode(expr.head) : typeof(expr))
safeexpr = copy(expr)
for i = 2:length(expr.args)
safeexpr.args[i] = Expr(:call, :(CheckedArithmetic.safearg), expr.args[i])
end
cmpexpr = isempty(kws) ? :(val == valcmp || error(val, " is not equal to ", valcmp)) :
:(isapprox(val, valcmp; $(esc(kws...))) || error(val, " is not approximately equal to ", valcmp))
return quote
local val = $(esc(expr))
local valcmp = CheckedArithmetic.safeconvert(typeof(val), $(esc(safeexpr)))
if ismissing(val) && ismissing(valcmp)
val
else
$cmpexpr
val
end
end
end
# safearg_type
# ------------
safearg_type(::Type{BigInt}) = BigInt
safearg_type(::Type{Int128}) = Int128
safearg_type(::Type{Int64}) = Int128
safearg_type(::Type{Int32}) = Int128
safearg_type(::Type{Int16}) = Int128
safearg_type(::Type{Int8}) = Int128
safearg_type(::Type{UInt128}) = UInt128
safearg_type(::Type{UInt64}) = UInt128
safearg_type(::Type{UInt32}) = UInt128
safearg_type(::Type{UInt16}) = UInt128
safearg_type(::Type{UInt8}) = UInt128
safearg_type(::Type{Bool}) = Bool # these have a special meaning that must be preserved
safearg_type(::Type{BigFloat}) = BigFloat
safearg_type(::Type{Float64}) = Float64
safearg_type(::Type{Float32}) = Float64
safearg_type(::Type{Float16}) = Float64
safearg_type(::Type{T}) where T<:Base.TwicePrecision = T
safearg_type(::Type{<:Rational}) = Float64
# safearg
#--------
# Containers
safearg(t::Tuple) = map(safearg, t)
safearg(t::NamedTuple) = map(safearg, t)
safearg(p::Pair) = safearg(p.first) => safearg(p.second)
## AbstractArrays and similar
safearg(A::AbstractArray{T}) where T<:Number = convert(AbstractArray{safearg_type(T)}, A)
safearg(A::BitArray) = A
safearg(A::AbstractArray{Bool}) = A
safearg(rng::LinRange) = LinRange(safearg(rng.start), safearg(rng.stop), rng.len)
safearg(rng::StepRangeLen) = StepRangeLen(safearg(rng.ref), safearg(rng.step), rng.len, rng.offset)
safearg(rng::StepRange) = StepRange(safearg(rng.start), safearg(rng.step), safearg(rng.stop))
safearg(rng::AbstractUnitRange{Int}) = rng # AbstractUnitRange{Int} is usually used for indexing, preserve this
safearg(rng::AbstractUnitRange{T}) where T<:Integer = convert(AbstractUnitRange{safearg_type(T)}, rng)
safearg(I::UniformScaling) = UniformScaling(safearg(I.λ))
safearg(F::Factorization{Float64}) = F
safearg(ref::Ref) = Ref(safearg(ref[]))
## AbstractDicts
safearg(d::Dict) = Dict(safearg(p) for p in d)
safearg(d::Base.EnvDict) = d
safearg(d::Base.ImmutableDict) = Base.ImmutableDict(safearg(p) for p in d)
safearg(d::Iterators.Pairs) = Iterators.Pairs(safearg(d.data), d.itr)
safearg(d::IdDict) = IdDict(safearg(p) for p in d)
safearg(d::WeakKeyDict) = WeakKeyDict(k=>safearg(v) for (k, v) in d) # do not convert keys
## AbstractSets
safearg(s::Set) = Set(safearg(x) for x in s)
safearg(s::Base.IdSet) = Base.IdSet(safearg(x) for x in s)
safearg(s::BitSet) = s
# Other common types
safearg(::Nothing) = nothing
safearg(m::Missing) = m
safearg(s::Some) = Some(safearg(s.value))
safearg(rng::AbstractRNG) = rng
safearg(mode::RoundingMode) = mode
safearg(str::AbstractString) = str
safearg(c::AbstractChar) = c
safearg(sym::Symbol) = sym
safearg(mime::MIME) = mime
safearg(rex::Regex) = rex
safearg(rex::RegexMatch) = rex
safearg(chan::AbstractChannel) = chan
safearg(i::Base.AbstractCartesianIndex) = i
safearg(i::Base.UUID) = i
safearg(cmd::Base.AbstractCmd) = cmd
if isdefined(Base, :AbstractLock)
safearg(lock::Base.AbstractLock) = lock
end
safearg(f::Function) = f
safearg(io::IO) = io
safearg(m::Module) = m
safearg(v::Val) = v
safearg(t::Task) = t
safearg(ord::Base.Order.Ordering) = ord
safearg(t::Timer) = t
safearg(d::Dates.AbstractDateTime) = d
safearg(t::Dates.AbstractTime) = t
safearg(t::Dates.AbstractDateToken) = t
# safeconvert
# -----------
safeconvert(::Type{T}, x) where T<:Integer = round(T, x)
safeconvert(::Type{T}, x) where T<:AbstractFloat = T(x)
safeconvert(::Type{AA}, A::AbstractArray) where AA<:AbstractArray{T} where T<:Integer = round.(T, A)
# accumulatortype
# ---------------
const SignPreserving = Union{typeof(+), typeof(*)}
const ArithmeticOp = Union{SignPreserving, typeof(-)}
accumulatortype(::ArithmeticOp, ::Type{BigInt}) = BigInt
accumulatortype(::ArithmeticOp, ::Type{Int128}) = Int128
accumulatortype(::ArithmeticOp, ::Type{Int64}) = Int64
accumulatortype(::ArithmeticOp, ::Type{Int32}) = Int
accumulatortype(::ArithmeticOp, ::Type{Int16}) = Int
accumulatortype(::ArithmeticOp, ::Type{Int8}) = Int
accumulatortype(::ArithmeticOp, ::Type{Bool}) = Int
accumulatortype(::SignPreserving, ::Type{UInt128}) = UInt128
accumulatortype(::SignPreserving, ::Type{UInt64}) = UInt64
accumulatortype(::SignPreserving, ::Type{UInt32}) = UInt
accumulatortype(::SignPreserving, ::Type{UInt16}) = UInt
accumulatortype(::SignPreserving, ::Type{UInt8}) = UInt
accumulatortype(::typeof(-), ::Type{UInt128}) = Int128
accumulatortype(::typeof(-), ::Type{UInt64}) = Int64
accumulatortype(::typeof(-), ::Type{UInt32}) = Int
accumulatortype(::typeof(-), ::Type{UInt16}) = Int
accumulatortype(::typeof(-), ::Type{UInt8}) = Int
accumulatortype(::ArithmeticOp, ::Type{BigFloat}) = BigFloat
accumulatortype(::ArithmeticOp, ::Type{Float64}) = Float64
accumulatortype(::ArithmeticOp, ::Type{Float32}) = Float64
accumulatortype(::ArithmeticOp, ::Type{Float16}) = Float64
end # module
| CheckedArithmetic | https://github.com/JuliaMath/CheckedArithmetic.jl.git |
|
[
"MIT"
] | 0.2.0 | a0194cdc135937ad2d3016b0af97e119245d11ea | code | 4060 |
# Explicitly use `import` instead of `using` to make sure there is no problem with scoping.
import CheckedArithmetic
import CheckedArithmetic: @checked, @check, acc, accumulatortype
using Pkg, Test, Dates
Pkg.test("CheckedArithmeticCore")
@test isempty(detect_ambiguities(CheckedArithmetic, Base, Core))
@checked begin
plus(x, y) = x + y
minus(x, y) = x - y
end
function sumsquares(A::AbstractArray)
s = zero(accumulatortype(eltype(A)))
for a in A
s += acc(a)^2
end
return s
end
@testset "CheckedArithmetic.jl" begin
@testset "@checked" begin
@test @checked(abs(Int8(-2))) === Int8(2)
@test_throws OverflowError @checked(abs(typemin(Int8)))
@test @checked(+2) === 2
@test @checked(+UInt(2)) === UInt(2)
@test @checked(-2) === -2
@test_throws OverflowError @checked(-UInt(2))
@test @checked(0x10 + 0x20) === 0x30
@test_throws OverflowError @checked(0xf0 + 0x20)
@test @checked(0x30 - 0x20) === 0x10
@test_throws OverflowError @checked(0x10 - 0x20)
@test @checked(-7) === -7
@test_throws OverflowError @checked(-UInt(7))
@test @checked(0x10*0x02) === 0x20
@test_throws OverflowError @checked(0x10*0x10)
@test @checked(7 ÷ 2) === 3
@test_throws DivideError @checked(typemin(Int8)÷Int8(-1))
@test @checked(div(0x7, 0x2)) === 0x3
@test_throws DivideError @checked(div(typemin(Int16), Int16(-1)))
@test @checked(rem(typemin(Int8), Int8(-1))) === Int8(0)
@test_throws DivideError @checked(rem(typemax(Int8), Int8(0)))
@test @checked(typemin(Int16) % Int16(-1)) === Int16(0)
@test_throws DivideError @checked(typemax(Int16) % Int16(0))
@test @checked(fld(typemax(Int8), Int8(-1))) === -typemax(Int8)
@test_throws DivideError @checked(fld(typemin(Int8), Int8(-1)))
@test @checked(mod(typemax(Int8), Int8(1))) === Int8(0)
@test_throws DivideError @checked(mod(typemin(Int8), Int8(0)))
@test @checked(cld(typemax(Int8), Int8(-1))) === -typemax(Int8)
@test_throws DivideError @checked(cld(typemin(Int8), Int8(-1)))
@test plus(0x10, 0x20) === 0x30
@test_throws OverflowError plus(0xf0, 0x20)
@test minus(0x30, 0x20) === 0x10
@test_throws OverflowError minus(0x20, 0x30)
end
@testset "@check" begin
@test @check(3+5) == 8
@test_throws InexactError @check(0xf0+0x15)
@test @check([3]+[5]) == [8]
@test_throws InexactError @check([0xf0]+[0x15])
f(t) = t[1] + t[2]
@test @check(f((1,2))) === 3
@test_throws InexactError @check(f((0xf0, 0x20)))
times2(x) = 0x02*x
times2(d::Dict) = Dict(k=>times2(v) for (k,v) in d)
@test @check(times2(Dict("a"=>7))) == Dict("a"=>14)
@test_throws InexactError @check(times2(Dict("a"=>0xf0)))
for item in Any["hi", :hi, (3,), (silly="hi",), trues(3), [true], 1:3, 1:2:5,
LinRange(1, 3, 3), StepRangeLen(1.0, 3.0, 3), 0x01:0x03,
pairs((silly="hi",)), Set([1,3]), BitSet(7), nothing, missing,
Some(nothing), 'c', MIME("text/plain"), IOBuffer(), r"\d+",
Channel(7), CartesianIndex(1, 3), Base.UUID(0), `ls`,
sum, Base, Val(3), Task(()->1), Base.Order.Forward, Timer(0.1),
now()]
@test @check(identity(item)) === item
end
# Roundoff error
function diff2from1(s)
s2 = copy(s + s)
return 1 - s2
end
@test_throws ErrorException @check diff2from1(Rational{Int64}(1, 3))
@test (@check diff2from1(Rational{Int64}(1, 3)) atol=1e-12) == 1//3
end
@testset "acc" begin
@test acc(0x02) === UInt(2)
@test acc(-, 0x02) === 2
@test accumulatortype(-, UInt8) === Int
@test accumulatortype(*, Int16, Float16) === Float64
@test sumsquares(0x00:0xff) == sumsquares(0:255)
end
end
| CheckedArithmetic | https://github.com/JuliaMath/CheckedArithmetic.jl.git |
|
[
"MIT"
] | 0.2.0 | a0194cdc135937ad2d3016b0af97e119245d11ea | docs | 2543 | # CheckedArithmetic
This package aims to make it easier to detect overflow in numeric computations.
It exports two macros, `@check` and `@checked`, as well as functions
`accumulatortype` and `acc`.
Packages can add support for their own types to interact appropriately
with these tools.
## `@checked`
`@checked` converts arithmetic operators to their checked variants.
For example,
```julia
@checked z = x + y
```
rewrites this expression as
```julia
z = Base.Checked.checked_add(x, y)
```
Note that this macro only operates at the level of surface syntax, i.e.,
```julia
@checked z = f(x) + f(y)
```
will not detect overflow caused by `f`.
The [`Base.Checked` module](https://github.com/JuliaLang/julia/blob/master/base/checked.jl) defines numerous checked operations.
These can be specialized for custom types.
## `@check`
`@check` performs an operation in two different ways,
checking that both approaches agree.
`@check` is primarily useful in tests.
For example,
```julia
d = @check ssd(a, b)
```
would perform `ssd(a, b)` with the inputs as given, and also compute `ssd(asafe, bsafe)`
where `asafe` and `bsafe` are "safer" variants of `a` and `b`.
It then tests whether the result obtained from the "safe" arguments is consistent with
the result obtained from `a` and `b`.
If the two differ to within the precision of the "ordinary" (unsafe) result, an
error is thrown.
Optionally, you can supply keywords accepted by `isapprox`:
```julia
@check foo(a) atol=1e-12
```
Packages can specialize `CheckedArithmetic.safearg` to control how `asafe` and `bsafe`
are generated. To guard against oversights, `safearg` must be explicitly defined for
each numeric type---the fallback method for `safearg(::Number)` is to throw an error.
## `accumulatortype` and `acc`
These functions are useful for writing overflow-safe algorithms.
`accumulatortype(T)` or `accumulatortype(T1, T2...)` takes types as input arguments
and returns a type suitable for limited-risk arithmetic operations.
`acc(x)` is just shorthand for `convert(accumulatortype(typeof(x)), x)`.
You can also specialize on the operation. For example,
```julia
julia> accumulatortype(+, UInt8)
UInt64
julia> accumulatortype(-, UInt8)
Int64
```
If you're computing a sum-of-squares and want to make sure you algorithm is (reasonably)
safe for an input array of `UInt8` numbers, you might want to write that as
```julia
function sumsquares(A::AbstractArray)
s = zero(accumulatortype(eltype(A)))
for a in A
s += acc(a)^2
end
return s
end
```
| CheckedArithmetic | https://github.com/JuliaMath/CheckedArithmetic.jl.git |
|
[
"MIT"
] | 0.2.0 | a0194cdc135937ad2d3016b0af97e119245d11ea | docs | 250 | # CheckedArithmeticCore
CheckedArithmeticCore is a component of
[CheckedArithmetic](https://github.com/JuliaMath/CheckedArithmetic.jl).
This package provides a set of low-level APIs for the packages which provide
numeric types and their arithmetic. | CheckedArithmetic | https://github.com/JuliaMath/CheckedArithmetic.jl.git |
|
[
"MIT"
] | 0.1.3 | e21d99c9987d32251cff85204119ac0c07ef399c | code | 973 | using PlotlyDocumenter
using Documenter
using PlotlyBase
using PlotlyLight
using PlotlyJS
DocMeta.setdocmeta!(PlotlyDocumenter, :DocTestSetup, :(using PlotlyDocumenter); recursive=true)
makedocs(;
modules=[PlotlyDocumenter],
authors="Alberto Mengali <[email protected]>",
repo="https://github.com/disberd/PlotlyDocumenter.jl/blob/{commit}{path}#{line}",
sitename="PlotlyDocumenter.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
edit_link="main",
assets=String[],
),
pages=[
"Home" => "index.md",
],
)
# This controls whether or not deployment is attempted. It is based on the value
# of the `SHOULD_DEPLOY` ENV variable, which defaults to the `CI` ENV variables or
# false if not present.
should_deploy = get(ENV,"SHOULD_DEPLOY", get(ENV, "CI", "") === "true")
if should_deploy
@info "Deploying"
deploydocs(
repo = "github.com/disberd/PlotlyDocumenter.jl.git",
)
end | PlotlyDocumenter | https://github.com/disberd/PlotlyDocumenter.jl.git |
|
[
"MIT"
] | 0.1.3 | e21d99c9987d32251cff85204119ac0c07ef399c | code | 329 | module PlotlyBaseExt
using PlotlyDocumenter
using PlotlyBase
function PlotlyDocumenter.to_documenter(p::PlotlyBase.Plot; kwargs...)
data = json(p.data)
layout = json(p.layout)
config = json(p.config)
return PlotlyDocumenter._to_documenter(;kwargs..., data, layout, config)
end
end | PlotlyDocumenter | https://github.com/disberd/PlotlyDocumenter.jl.git |
|
[
"MIT"
] | 0.1.3 | e21d99c9987d32251cff85204119ac0c07ef399c | code | 265 | module PlotlyJSExt
using PlotlyDocumenter
using PlotlyJS
function PlotlyDocumenter.to_documenter(sp::PlotlyJS.SyncPlot; kwargs...)
p = sp.plot
return PlotlyDocumenter.to_documenter(p; kwargs...) # Reuse the PlotlyBase method
end
end | PlotlyDocumenter | https://github.com/disberd/PlotlyDocumenter.jl.git |
|
[
"MIT"
] | 0.1.3 | e21d99c9987d32251cff85204119ac0c07ef399c | code | 655 | module PlotlyLightExt
using HypertextLiteral
using PlotlyDocumenter
using PlotlyLight
import PlotlyLight.JSON3
const settings = if isdefined(PlotlyLight, :DEFAULT_SETTINGS)
# This is for PlotlyLight < v0.8
PlotlyLight.DEFAULT_SETTINGS
else
PlotlyLight.settings
end
function PlotlyDocumenter.to_documenter(p::PlotlyLight.Plot; kwargs...)
data = JSON3.write(p.data)
layout = JSON3.write(merge(settings.layout, p.layout))
config = JSON3.write(merge(settings.config, p.config))
return PlotlyDocumenter._to_documenter(;kwargs..., data, layout, config)
end
end | PlotlyDocumenter | https://github.com/disberd/PlotlyDocumenter.jl.git |
|
[
"MIT"
] | 0.1.3 | e21d99c9987d32251cff85204119ac0c07ef399c | code | 3730 | module PlotlyDocumenter
using PackageExtensionCompat
using HypertextLiteral
using Random
using Downloads: download
export to_documenter
# Taken from PlotlyLight
get_semver(x) = VersionNumber(match(r"v(\d+)\.(\d+)\.(\d+)", x).match[2:end])
# Taken from PlotlyLight
function latest_plotlyjs_version()
file = download("https://github.com/plotly/plotly.js/releases/latest")
get_semver(read(file, String))
end
const DEFAULT_VERSION = v"2.24.2"
const PLOTLY_VERSION = Ref(DEFAULT_VERSION)
"""
change_default_plotly_version(version::String)
Change the plotly version that is used by default to render Plotly plots using [`to_documenter`](@ref)
"""
change_default_plotly_version(v) = PLOTLY_VERSION[] = VersionNumber(string(v))
function _to_documenter(;data, layout, config, version = PLOTLY_VERSION[], id = randstring(10), classes = [], style = (;))
js = HypertextLiteral.JavaScript
v = js(string(version))
plot_obj = (;
data = js(data),
layout = js(layout),
config = js(config),
)
return @htl("""
<div id=$id class=$classes style=$style></div>
<script type="module">
import Plotly from "https://esm.sh/plotly.js-dist-min@$v"
const PLOT = document.getElementById($(id))
const plot_obj = $plot_obj
Plotly.newPlot(PLOT, plot_obj)
// If width is not specified, set it to 100%
PLOT.style.width = plot_obj.layout.width ? "" : "100%"
// For the height we have to also put a fixed value in case the plot is put on a non-fixed-size container (like the default wrapper)
PLOT.style.height = plot_obj.layout.height ? "" : "100%"
</script>
""")
end
# Define the function name. Methods are added in the extension packages
"""
to_documenter(p::P; id, version)
Take a plot object `p` and returns an output that is showable as HTML inside pages generated from Documenter.jl.
This function currently works correctly inside `@example` blocks from Documenter.jl if called as last statement/command.
Check the package [documentation](https://disberd.github.io/PlotlyDocumenter.jl) for seeing it in action.
The object returned as output, when shown as `text/html`, will generate a plotly plot can be interacted with directly in the documentation page.
This package supports the following types as `P`:
- `Plot` from PlotlyBase
- `SyncPlot` from PlotlyBase
- `Plot` from PlotlyLight
# Keyword Arguments
- `id`: The id to be given to the div containing the plot. Defaults to a random string of 10 alphanumeric characters
- `version`: Version of plotly.js to use. Defaults to version $(DEFAULT_VERSION), but can be overridden by providing `version` as a string. To change the default version, use the unexported `change_default_plotly_version` function.
- `classes`: A Vector of Strings representing classes to assign the div containing the plot. Defaults to an empty vector
- `style`: An object containing a list of styles that are applied inline to the plot object. Defaults to an empty NTuple. Supports the synthax of [HypertextLiteral](https://juliapluto.github.io/HypertextLiteral.jl/stable/attribute/#Pairs-and-Dictionaries)
# Notes
- The package does not reexport any plotting packages, so the desired plotting package must be brought in to scope independently.
- The package currently only supports fetching the plotly library from CDN, so it does not support local version of Plotly (even though they are supported by PlotlyLight for example)
"""
function to_documenter(x::P) where P
error("The provided plot type $P does not have an associated method.
Remember to also import the plotly plotting package of your choice.")
end
function __init__()
@require_extensions
end
end
| PlotlyDocumenter | https://github.com/disberd/PlotlyDocumenter.jl.git |
|
[
"MIT"
] | 0.1.3 | e21d99c9987d32251cff85204119ac0c07ef399c | code | 1294 | using PlotlyDocumenter
using PlotlyDocumenter: change_default_plotly_version, DEFAULT_VERSION
using Test
import PlotlyBase
import PlotlyLight
import PlotlyJS
@testset "PlotlyDocumenter.jl" begin
function to_str(o)
io = IOBuffer()
show(io, o)
String(take!(io))
end
y = rand(4)
p_base = PlotlyBase.Plot(PlotlyBase.scatter(;y))
p_js = PlotlyJS.plot(y);
p_light = PlotlyLight.Plot(;y, type="scatter")
for p in (p_base,p_light, p_js)
change_default_plotly_version(DEFAULT_VERSION)
o = to_documenter(p) |> to_str
@test contains(o, "Plotly.newPlot")
@test contains(o, "@$DEFAULT_VERSION")
id = "abcdefghil"
o = to_documenter(p; id) |> to_str
@test contains(o, "id='$id'")
version = "1.6.0"
o = to_documenter(p; version) |> to_str
@test contains(o, "@$version")
o = to_documenter(p; classes = ["asd", "lol"]) |> to_str
@test contains(o, "class='asd lol'")
o = to_documenter(p; style = (;min_height = "500px", color = "red")) |> to_str
@test contains(o, "style='min-height: 500px; color: red;'")
change_default_plotly_version(version)
o = to_documenter(p) |> to_str
@test contains(o, "@$version")
end
end
| PlotlyDocumenter | https://github.com/disberd/PlotlyDocumenter.jl.git |
|
[
"MIT"
] | 0.1.3 | e21d99c9987d32251cff85204119ac0c07ef399c | docs | 935 | # PlotlyDocumenter
[](https://disberd.github.io/PlotlyDocumenter.jl/)
[](https://disberd.github.io/PlotlyDocumenter.jl/dev)
[](https://github.com/disberd/PlotlyDocumenter.jl/actions/workflows/CI.yml?query=branch%3Amain)
[](https://codecov.io/gh/disberd/PlotlyDocumenter.jl)
This package exports a single function `to_documenter` that takes a Plot object from one the following packages:
- PlotlyBase
- PlotlyJS
- PlotlyLight
and returns an object that will show an interactive plot on the HTML pages generated by Documenter.jl.
See the [documentation](https://disberd.github.io/PlotlyDocumenter.jl/) for more details. | PlotlyDocumenter | https://github.com/disberd/PlotlyDocumenter.jl.git |
|
[
"MIT"
] | 0.1.3 | e21d99c9987d32251cff85204119ac0c07ef399c | docs | 606 | ```@meta
CurrentModule = PlotlyDocumenter
```
# PlotlyDocumenter
Documentation for [PlotlyDocumenter](https://github.com/disberd/PlotlyDocumenter.jl).
```@index
```
```@autodocs
Modules = [PlotlyDocumenter]
```
## PlotlyBase
```@example
using PlotlyDocumenter
using PlotlyBase
p = Plot(scatter(;y = rand(5)))
to_documenter(p)
```
## PlotlyJS
```@example
using PlotlyDocumenter
using PlotlyJS
p = plot(scatter(;y = rand(5)), Layout(height = 700))
to_documenter(p)
```
## PlotlyLight
```@example
using PlotlyDocumenter
using PlotlyLight
p = Plot(;y = rand(5), type = "scatter")
to_documenter(p)
``` | PlotlyDocumenter | https://github.com/disberd/PlotlyDocumenter.jl.git |
|
[
"MIT"
] | 0.3.1 | 1467be4e01897dae9b5c739643d2de0d790d25e0 | code | 1434 | #_____________________________________________________________________
# Charlie's code
#
# Here I use `Gnuplot.jl`, but any other package would work...
using Gnuplot
@gp "set size ratio -1" "set grid" "set key bottom right" xr=(-1.5, 2.5) :-
# Methods to plot a Polygon
plotlabel(p::Polygon) = "Polygon (" * string(length(p.x)) * " vert.)"
plotlabel(p::RegularPolygon) = "RegularPolygon (" * string(vertices(p)) * " vert., area=" * string(round(area(p) * 100) / 100) * ")"
plotlabel(p::Named) = name(p) * " (" * string(vertices(p)) * " vert., area=" * string(round(area(p) * 100) / 100) * ")"
function plot(p::AbstractPolygon; dt=1, color="black")
x = coords_x(p); x = [x; x[1]]
y = coords_y(p); y = [y; y[1]]
title = plotlabel(p)
@gp :- x y "w l tit '$title' dt $dt lw 2 lc rgb '$color'"
end
# Finally, let's have fun with the shapes!
line = Polygon([0., 1.], [0., 1.])
triangle = RegularPolygon(3, 1)
square = Named{RegularPolygon}("Square", 4, 1)
plot(line, color="black")
plot(triangle, color="blue")
plot(square, color="red")
rotate!(triangle, 90)
move!(triangle, 1, 1)
scale!(triangle, 0.32)
scale!(square, sqrt(2))
plot(triangle, color="blue", dt=2)
plot(square, color="red", dt=2)
# Increase number of vertices
p = Named{RegularPolygon}("Heptagon", 7, 1)
plot(p, color="orange", dt=4)
circle = Named{RegularPolygon}("Circle", 1000, 1)
plot(circle, color="dark-green", dt=4)
| ReusePatterns | https://github.com/gcalderone/ReusePatterns.jl.git |
|
[
"MIT"
] | 0.3.1 | 1467be4e01897dae9b5c739643d2de0d790d25e0 | code | 2861 | #_____________________________________________________________________
# Alice's code
#
using Statistics
abstract type AbstractPolygon end
mutable struct Polygon <: AbstractPolygon
x::Vector{Float64}
y::Vector{Float64}
end
# Retrieve the number of vertices, and their X and Y coordinates
vertices(p::Polygon) = length(p.x)
coords_x(p::Polygon) = p.x
coords_y(p::Polygon) = p.y
# Move, scale and rotate a polygon
function move!(p::Polygon, dx::Real, dy::Real)
p.x .+= dx
p.y .+= dy
end
function scale!(p::Polygon, scale::Real)
m = mean(p.x); p.x = (p.x .- m) .* scale .+ m
m = mean(p.y); p.y = (p.y .- m) .* scale .+ m
end
function rotate!(p::Polygon, angle_deg::Real)
θ = float(angle_deg) * pi / 180
R = [cos(θ) -sin(θ); sin(θ) cos(θ)]
x = p.x .- mean(p.x)
y = p.y .- mean(p.y)
(x, y) = R * [x, y]
p.x = x .+ mean(p.x)
p.y = y .+ mean(p.y)
end
#_____________________________________________________________________
# Bob's code
#
mutable struct RegularPolygon <: AbstractPolygon
polygon::Polygon
radius::Float64
end
function RegularPolygon(n::Integer, radius::Real)
@assert n >= 3
θ = range(0, stop=2pi-(2pi/n), length=n)
c = radius .* exp.(im .* θ)
return RegularPolygon(Polygon(real(c), imag(c)), radius)
end
# Compute length of a side and the polygon area
side(p::RegularPolygon) = 2 * p.radius * sin(pi / vertices(p))
area(p::RegularPolygon) = side(p)^2 * vertices(p) / 4 / tan(pi / vertices(p))
# Forward methods from `RegularPolygon` to `Polygon`
vertices(p1::RegularPolygon) = vertices(getfield(p1, :polygon))
coords_x(p1::RegularPolygon) = coords_x(getfield(p1, :polygon))
coords_y(p1::RegularPolygon) = coords_y(getfield(p1, :polygon))
move!(p1::RegularPolygon, p2::Real, p3::Real) = move!(getfield(p1, :polygon), p2, p3)
rotate!(p1::RegularPolygon, p2::Real) = rotate!(getfield(p1, :polygon), p2)
function scale!(p::RegularPolygon, scale::Real)
scale!(p.polygon, scale) # call "super" method
p.radius *= scale # update internal state
end
# Attach a label to a polygon
mutable struct Named{T} <: AbstractPolygon
polygon::T
name::String
end
Named{T}(name, args...; kw...) where T = Named{T}(T(args...; kw...), name)
name(p::Named) = p.name
# Forward methods from `Named` to `Polygon`
vertices(p1::Named) = vertices(getfield(p1, :polygon))
coords_x(p1::Named) = coords_x(getfield(p1, :polygon))
coords_y(p1::Named) = coords_y(getfield(p1, :polygon))
move!(p1::Named, p2::Real, p3::Real) = move!(getfield(p1, :polygon), p2, p3)
rotate!(p1::Named, p2::Real) = rotate!(getfield(p1, :polygon), p2)
function scale!(p::Named, scale::Real)
scale!(p.polygon, scale) # call "super" method
end
side(p1::Named) = side(getfield(p1, :polygon))
area(p1::Named) = area(getfield(p1, :polygon))
| ReusePatterns | https://github.com/gcalderone/ReusePatterns.jl.git |
|
[
"MIT"
] | 0.3.1 | 1467be4e01897dae9b5c739643d2de0d790d25e0 | code | 2114 | #_____________________________________________________________________
# Alice's code
#
using Statistics, ReusePatterns
abstract type AbstractPolygon end
mutable struct Polygon <: AbstractPolygon
x::Vector{Float64}
y::Vector{Float64}
end
# Retrieve the number of vertices, and their X and Y coordinates
vertices(p::Polygon) = length(p.x)
coords_x(p::Polygon) = p.x
coords_y(p::Polygon) = p.y
# Move, scale and rotate a polygon
function move!(p::Polygon, dx::Real, dy::Real)
p.x .+= dx
p.y .+= dy
end
function scale!(p::Polygon, scale::Real)
m = mean(p.x); p.x = (p.x .- m) .* scale .+ m
m = mean(p.y); p.y = (p.y .- m) .* scale .+ m
end
function rotate!(p::Polygon, angle_deg::Real)
θ = float(angle_deg) * pi / 180
R = [cos(θ) -sin(θ); sin(θ) cos(θ)]
x = p.x .- mean(p.x)
y = p.y .- mean(p.y)
(x, y) = R * [x, y]
p.x = x .+ mean(p.x)
p.y = y .+ mean(p.y)
end
#_____________________________________________________________________
# Bob's code
#
mutable struct RegularPolygon <: AbstractPolygon
polygon::Polygon
radius::Float64
end
function RegularPolygon(n::Integer, radius::Real)
@assert n >= 3
θ = range(0, stop=2pi-(2pi/n), length=n)
c = radius .* exp.(im .* θ)
return RegularPolygon(Polygon(real(c), imag(c)), radius)
end
# Compute length of a side and the polygon area
side(p::RegularPolygon) = 2 * p.radius * sin(pi / vertices(p))
area(p::RegularPolygon) = side(p)^2 * vertices(p) / 4 / tan(pi / vertices(p))
# Forward methods from `RegularPolygon` to `Polygon`
@forward (RegularPolygon, :polygon) Polygon
function scale!(p::RegularPolygon, scale::Real)
scale!(p.polygon, scale) # call "super" method
p.radius *= scale # update internal state
end
# Attach a label to a polygon
mutable struct Named{T} <: AbstractPolygon
polygon::T
name::String
end
Named{T}(name, args...; kw...) where T = Named{T}(T(args...; kw...), name)
name(p::Named) = p.name
# Forward methods from `Named` to `Polygon`
@forward (Named, :polygon) RegularPolygon
| ReusePatterns | https://github.com/gcalderone/ReusePatterns.jl.git |
|
[
"MIT"
] | 0.3.1 | 1467be4e01897dae9b5c739643d2de0d790d25e0 | code | 873 | using DataFrames, ReusePatterns
struct DataFrameMeta <: AbstractDataFrame
p::DataFrame
meta::Dict{String, Any}
DataFrameMeta(args...; kw...) = new(DataFrame(args...; kw...), Dict{Symbol, Any}())
end
meta(d::DataFrameMeta) = getfield(d,:meta) # <-- new functionality added to DataFrameMeta
@forward((DataFrameMeta, :p), DataFrame) # <-- reuse all existing functionalities
# Use a `DataFrameMeta` object as if it was a common DataFrame object ...
v = ["x","y","z"][rand(1:3, 10)]
df1 = DataFrameMeta(Any[collect(1:10), v, rand(10)], [:A, :B, :C])
df2 = DataFrameMeta(A = 1:10, B = v, C = rand(10))
dump(df1)
dump(df2)
describe(df2)
first(df1, 10)
df1[:, :A] .+ df2[:, :C]
df1[1:4, 1:2]
df1[:, [:A,:C]]
df1[1:2, [:A,:C]]
df1[:, [1,3]]
df1[1:4, :]
df1[1:4, :C]
df1[1:4, :C] = 40. * df1[1:4, :C]
[df1; df2]
size(df1)
meta(df1)["key"] = :value
meta(df1)["key"]
| ReusePatterns | https://github.com/gcalderone/ReusePatterns.jl.git |
|
[
"MIT"
] | 0.3.1 | 1467be4e01897dae9b5c739643d2de0d790d25e0 | code | 2309 | #_____________________________________________________________________
# Alice's code
#
using Statistics, ReusePatterns
abstract type AbstractPolygon end
abstract type Polygon <: AbstractPolygon end
mutable struct Concrete_Polygon <: Polygon
x::Vector{Float64}
y::Vector{Float64}
end
Polygon(args...; kw...) = Concrete_Polygon(args...; kw...)
# Retrieve the number of vertices, and their X and Y coordinates
vertices(p::Polygon) = length(p.x)
coords_x(p::Polygon) = p.x
coords_y(p::Polygon) = p.y
# Move, scale and rotate a polygon
function move!(p::Polygon, dx::Real, dy::Real)
p.x .+= dx
p.y .+= dy
end
function scale!(p::Polygon, scale::Real)
m = mean(p.x); p.x = (p.x .- m) .* scale .+ m
m = mean(p.y); p.y = (p.y .- m) .* scale .+ m
end
function rotate!(p::Polygon, angle_deg::Real)
θ = float(angle_deg) * pi / 180
R = [cos(θ) -sin(θ); sin(θ) cos(θ)]
x = p.x .- mean(p.x)
y = p.y .- mean(p.y)
(x, y) = R * [x, y]
p.x = x .+ mean(p.x)
p.y = y .+ mean(p.y)
end
#_____________________________________________________________________
# Bob's code
#
abstract type RegularPolygon <: Polygon end
mutable struct Concrete_RegularPolygon <: RegularPolygon
x::Vector{Float64}
y::Vector{Float64}
radius::Float64
end
RegularPolygon(args...; kw...) = Concrete_RegularPolygon(args...; kw...)
function RegularPolygon(n::Integer, radius::Real)
@assert n >= 3
θ = range(0, stop=2pi-(2pi/n), length=n)
c = radius .* exp.(im .* θ)
return RegularPolygon(real(c), imag(c), radius)
end
# Compute length of a side and the polygon area
side(p::RegularPolygon) = 2 * p.radius * sin(pi / vertices(p))
area(p::RegularPolygon) = side(p)^2 * vertices(p) / 4 / tan(pi / vertices(p))
function scale!(p::RegularPolygon, scale::Real)
invoke(scale!, Tuple{supertype(RegularPolygon), typeof(scale)}, p, scale) # call "super" method
p.radius *= scale # update internal state
end
# Attach a label to a polygon
mutable struct Named{T} <: AbstractPolygon
polygon::T
name::String
end
Named{T}(name, args...; kw...) where T = Named{T}(T(args...; kw...), name)
name(p::Named) = p.name
# Forward methods from `Named` to `Polygon`
@forward (Named, :polygon) RegularPolygon
| ReusePatterns | https://github.com/gcalderone/ReusePatterns.jl.git |
|
[
"MIT"
] | 0.3.1 | 1467be4e01897dae9b5c739643d2de0d790d25e0 | code | 2061 | #_____________________________________________________________________
# Alice's code
#
using Statistics, ReusePatterns
abstract type AbstractPolygon end
@quasiabstract mutable struct Polygon <: AbstractPolygon
x::Vector{Float64}
y::Vector{Float64}
end
# Retrieve the number of vertices, and their X and Y coordinates
vertices(p::Polygon) = length(p.x)
coords_x(p::Polygon) = p.x
coords_y(p::Polygon) = p.y
# Move, scale and rotate a polygon
function move!(p::Polygon, dx::Real, dy::Real)
p.x .+= dx
p.y .+= dy
end
function scale!(p::Polygon, scale::Real)
m = mean(p.x); p.x = (p.x .- m) .* scale .+ m
m = mean(p.y); p.y = (p.y .- m) .* scale .+ m
end
function rotate!(p::Polygon, angle_deg::Real)
θ = float(angle_deg) * pi / 180
R = [cos(θ) -sin(θ); sin(θ) cos(θ)]
x = p.x .- mean(p.x)
y = p.y .- mean(p.y)
(x, y) = R * [x, y]
p.x = x .+ mean(p.x)
p.y = y .+ mean(p.y)
end
#_____________________________________________________________________
# Bob's code
#
@quasiabstract mutable struct RegularPolygon <: Polygon
radius::Float64
end
function RegularPolygon(n::Integer, radius::Real)
@assert n >= 3
θ = range(0, stop=2pi-(2pi/n), length=n)
c = radius .* exp.(im .* θ)
return RegularPolygon(real(c), imag(c), radius)
end
# Compute length of a side and the polygon area
side(p::RegularPolygon) = 2 * p.radius * sin(pi / vertices(p))
area(p::RegularPolygon) = side(p)^2 * vertices(p) / 4 / tan(pi / vertices(p))
function scale!(p::RegularPolygon, scale::Real)
invoke(scale!, Tuple{supertype(RegularPolygon), typeof(scale)}, p, scale) # call "super" method
p.radius *= scale # update internal state
end
# Attach a label to a polygon
mutable struct Named{T} <: AbstractPolygon
polygon::T
name::String
end
Named{T}(name, args...; kw...) where T = Named{T}(T(args...; kw...), name)
name(p::Named) = p.name
# Forward methods from `Named` to `Polygon`
@forward (Named, :polygon) RegularPolygon
| ReusePatterns | https://github.com/gcalderone/ReusePatterns.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.