licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.6.4 | b87cd13d6d99575da6c0e6ff6727c2fe643d327d | code | 598 | abstract type CalibrationErrorEstimator end
"""
(estimator::CalibrationErrorEstimator)(predictions, targets)
Estimate the calibration error of a model from the set of `predictions`
and corresponding `targets` using the `estimator`.
"""
(::CalibrationErrorEstimator)(predictions, targets)
function check_nsamples(predictions, targets, min::Int=1)
n = length(predictions)
length(targets) == n ||
throw(DimensionMismatch("number of predictions and targets must be equal"))
n ≥ min || error("there must be at least ", min, min == 1 ? " sample" : " samples")
return n
end
| CalibrationErrors | https://github.com/devmotion/CalibrationErrors.jl.git |
|
[
"MIT"
] | 0.6.4 | b87cd13d6d99575da6c0e6ff6727c2fe643d327d | code | 15276 | @doc raw"""
SKCE(k; unbiased::Bool=true, blocksize=identity)
Estimator of the squared kernel calibration error (SKCE) with kernel `k`.
Kernel `k` on the product space of predictions and targets has to be a `Kernel` from the
Julia package
[KernelFunctions.jl](https://github.com/JuliaGaussianProcesses/KernelFunctions.jl)
that can be evaluated for inputs that are tuples of predictions and targets.
One can choose an unbiased or a biased variant with `unbiased=true` or `unbiased=false`,
respectively (see details below).
The SKCE is estimated as the average estimate of different blocks of samples. The number of
samples per block is set by `blocksize`:
- If `blocksize` is a function `blocksize(n::Int)`, then the number of samples per block is
set to `blocksize(n)` where `n` is the total number of samples.
- If `blocksize` is an integer, then the number of samplers per block is set to `blocksize`,
indepedent of the total number of samples.
The default setting `blocksize=identity` implies that a single block with all samples is
used.
The number of samples per block must be at least 1 if `unbiased=false` and 2 if
`unbiased=true`. Additionally, it must be at most the total number of samples. Note that the
last block is neglected if it is incomplete (see details below).
# Details
The unbiased estimator is not guaranteed to be non-negative whereas the biased estimator is
always non-negative.
The sample complexity of the estimator is ``O(mn)``, where ``m`` is the block size and ``n``
is the total number of samples. In particular, with the default setting `blocksize=identity`
the estimator has a quadratic sample complexity.
Let ``(P_{X_i}, Y_i)_{i=1,\ldots,n}`` be a data set of predictions and corresponding
targets. The estimator with block size ``m`` is defined as
```math
{\bigg\lfloor \frac{n}{m} \bigg\rfloor}^{-1} \sum_{b=1}^{\lfloor n/m \rfloor}
|B_b|^{-1} \sum_{(i, j) \in B_b} h_k\big((P_{X_i}, Y_i), (P_{X_j}, Y_j)\big),
```
where
```math
\begin{aligned}
h_k\big((μ, y), (μ', y')\big) ={}& k\big((μ, y), (μ', y')\big)
- 𝔼_{Z ∼ μ} k\big((μ, Z), (μ', y')\big) \\
& - 𝔼_{Z' ∼ μ'} k\big((μ, y), (μ', Z')\big)
+ 𝔼_{Z ∼ μ, Z' ∼ μ'} k\big((μ, Z), (μ', Z')\big)
\end{aligned}
```
and blocks ``B_b`` (``b = 1, \ldots, \lfloor n/m \rfloor``) are defined as
```math
B_b = \begin{cases}
\{(i, j): (b - 1) m < i < j \leq bm \} & \text{(unbiased)}, \\
\{(i, j): (b - 1) m < i, j \leq bm \} & \text{(biased)}.
\end{cases}
```
# References
Widmann, D., Lindsten, F., & Zachariah, D. (2019). [Calibration tests in multi-class
classification: A unifying framework](https://proceedings.neurips.cc/paper/2019/hash/1c336b8080f82bcc2cd2499b4c57261d-Abstract.html).
In: Advances in Neural Information Processing Systems (NeurIPS 2019) (pp. 12257–12267).
Widmann, D., Lindsten, F., & Zachariah, D. (2021). [Calibration tests beyond
classification](https://openreview.net/forum?id=-bxf89v3Nx).
"""
struct SKCE{K<:Kernel,B} <: CalibrationErrorEstimator
"""Kernel of estimator."""
kernel::K
"""Whether the unbiased estimator is used."""
unbiased::Bool
"""Number of samples per block."""
blocksize::B
function SKCE{K,B}(kernel::K, unbiased::Bool, blocksize::B) where {K,B}
if blocksize isa Integer
blocksize ≥ 1 + unbiased || throw(
ArgumentError(
"there must be at least $(1 + unbiased) $(unbiased ? "samples" : "sample") per block",
),
)
end
return new{K,B}(kernel, unbiased, blocksize)
end
end
function SKCE(kernel::Kernel; unbiased::Bool=true, blocksize::B=identity) where {B}
return SKCE{typeof(kernel),B}(kernel, unbiased, blocksize)
end
## estimators without blocks
function (skce::SKCE{<:Kernel,typeof(identity)})(
predictions::AbstractVector, targets::AbstractVector
)
@unpack kernel, unbiased = skce
return if unbiased
unbiasedskce(kernel, predictions, targets)
else
biasedskce(kernel, predictions, targets)
end
end
### unbiased estimator (no blocks)
function unbiasedskce(kernel::Kernel, predictions::AbstractVector, targets::AbstractVector)
# obtain number of samples
nsamples = check_nsamples(predictions, targets, 2)
@inbounds begin
# evaluate the kernel function for the first pair of samples
hij = unsafe_skce_eval(
kernel, predictions[1], targets[1], predictions[2], targets[2]
)
# initialize the estimate
estimate = hij / 1
# for all other pairs of samples
n = 1
for j in 3:nsamples
predictionj = predictions[j]
targetj = targets[j]
for i in 1:(j - 1)
predictioni = predictions[i]
targeti = targets[i]
# evaluate the kernel function
hij = unsafe_skce_eval(kernel, predictioni, targeti, predictionj, targetj)
# update the estimate
n += 1
estimate += (hij - estimate) / n
end
end
end
return estimate
end
### biased estimator (no blocks)
function biasedskce(kernel::Kernel, predictions::AbstractVector, targets::AbstractVector)
# obtain number of samples
nsamples = check_nsamples(predictions, targets, 1)
@inbounds begin
# evaluate kernel function for the first sample
prediction = predictions[1]
target = targets[1]
hij = unsafe_skce_eval(kernel, prediction, target, prediction, target)
# initialize the calibration error estimate
estimate = hij / 1
# for all other pairs of samples
n = 1
for i in 2:nsamples
predictioni = predictions[i]
targeti = targets[i]
for j in 1:(i - 1)
predictionj = predictions[j]
targetj = targets[j]
# evaluate the kernel function
hij = unsafe_skce_eval(kernel, predictioni, targeti, predictionj, targetj)
# update the estimate (add two terms due to symmetry!)
n += 2
estimate += 2 * (hij - estimate) / n
end
# evaluate the kernel function
hij = unsafe_skce_eval(kernel, predictioni, targeti, predictioni, targeti)
# update the estimate
n += 1
estimate += (hij - estimate) / n
end
end
return estimate
end
## estimators with blocks
function (skce::SKCE)(predictions::AbstractVector, targets::AbstractVector)
@unpack kernel, unbiased, blocksize = skce
# obtain number of samples
nsamples = check_nsamples(predictions, targets, 1 + unbiased)
# compute number of blocks
_blocksize = blocksize isa Integer ? blocksize : blocksize(nsamples)
(_blocksize isa Integer && _blocksize >= 1 + unbiased) ||
error("number of samples per block must be an integer >= $(1 + unbiased)")
nblocks = nsamples ÷ _blocksize
nblocks >= 1 || error("at least one block of samples is required")
# create iterator of partitions
blocks = Iterators.take(
zip(
Iterators.partition(predictions, _blocksize),
Iterators.partition(targets, _blocksize),
),
nblocks,
)
# compute average estimate
estimator = SKCE(kernel; unbiased=unbiased)
estimate = mean(
estimator(_predictions, _targets) for (_predictions, _targets) in blocks
)
return estimate
end
"""
unsafe_skce_eval(k, p, y, p̃, ỹ)
Evaluate
```math
k((p, y), (p̃, ỹ)) - E_{z ∼ p}[k((p, z), (p̃, ỹ))] - E_{z̃ ∼ p̃}[k((p, y), (p̃, z̃))] + E_{z ∼ p, z̃ ∼ p̃}[k((p, z), (p̃, z̃))]
```
for kernel `k` and predictions `p` and `p̃` with corresponding targets `y` and `ỹ`.
This method assumes that `p`, `p̃`, `y`, and `ỹ` are valid and specified correctly, and
does not perform any checks.
"""
function unsafe_skce_eval end
# default implementation for classification
# we do not use the symmetry of `kernel` since it seems unlikely that `(p, y) == (p̃, ỹ)`
function unsafe_skce_eval(
kernel::Kernel,
p::AbstractVector{<:Real},
y::Integer,
p̃::AbstractVector{<:Real},
ỹ::Integer,
)
# precomputations
n = length(p)
@inbounds py = p[y]
@inbounds p̃ỹ = p̃[ỹ]
pym1 = py - 1
p̃ỹm1 = p̃ỹ - 1
tuple_p_y = (p, y)
tuple_p̃_ỹ = (p̃, ỹ)
# i = y, j = ỹ
result = kernel((p, y), (p̃, ỹ)) * (1 - py - p̃ỹ + py * p̃ỹ)
# i < y
for i in 1:(y - 1)
@inbounds pi = p[i]
tuple_p_i = (p, i)
# j < ỹ
@inbounds for j in 1:(ỹ - 1)
result += kernel(tuple_p_i, (p̃, j)) * pi * p̃[j]
end
# j = ỹ
result += kernel(tuple_p_i, tuple_p̃_ỹ) * pi * p̃ỹm1
# j > ỹ
@inbounds for j in (ỹ + 1):n
result += kernel(tuple_p_i, (p̃, j)) * pi * p̃[j]
end
end
# i = y, j < ỹ
@inbounds for j in 1:(ỹ - 1)
result += kernel(tuple_p_y, (p̃, j)) * pym1 * p̃[j]
end
# i = y, j > ỹ
@inbounds for j in (ỹ + 1):n
result += kernel(tuple_p_y, (p̃, j)) * pym1 * p̃[j]
end
# i > y
for i in (y + 1):n
@inbounds pi = p[i]
tuple_p_i = (p, i)
# j < ỹ
@inbounds for j in 1:(ỹ - 1)
result += kernel(tuple_p_i, (p̃, j)) * pi * p̃[j]
end
# j = ỹ
result += kernel(tuple_p_i, tuple_p̃_ỹ) * pi * p̃ỹm1
# j > ỹ
@inbounds for j in (ỹ + 1):n
result += kernel(tuple_p_i, (p̃, j)) * pi * p̃[j]
end
end
return result
end
# for binary classification with probabilities (corresponding to parameters of Bernoulli
# distributions) and boolean targets the expression simplifies to
# ```math
# k((p, y), (p̃, ỹ)) = (y(1-p) + (1-y)p)(ỹ(1-p̃) + (1-ỹ)p̃)(k((p, y), (p̃, ỹ)) - k((p, 1-y), (p̃, ỹ)) - k((p, y), (p̃, 1-ỹ)) + k((p, 1-y), (p̃, 1-ỹ)))
# ```
function unsafe_skce_eval(kernel::Kernel, p::Real, y::Bool, p̃::Real, ỹ::Bool)
noty = !y
notỹ = !ỹ
z =
kernel((p, y), (p̃, ỹ)) - kernel((p, noty), (p̃, ỹ)) -
kernel((p, y), (p̃, notỹ)) + kernel((p, noty), (p̃, notỹ))
return (y ? 1 - p : p) * (ỹ ? 1 - p̃ : p̃) * z
end
# evaluation for tensor product kernels
function unsafe_skce_eval(kernel::KernelTensorProduct, p, y, p̃, ỹ)
κpredictions, κtargets = kernel.kernels
return κpredictions(p, p̃) * unsafe_skce_eval_targets(κtargets, p, y, p̃, ỹ)
end
# resolve method ambiguity
function unsafe_skce_eval(
kernel::KernelTensorProduct,
p::AbstractVector{<:Real},
y::Integer,
p̃::AbstractVector{<:Real},
ỹ::Integer,
)
κpredictions, κtargets = kernel.kernels
return κpredictions(p, p̃) * unsafe_skce_eval_targets(κtargets, p, y, p̃, ỹ)
end
function unsafe_skce_eval(kernel::KernelTensorProduct, p::Real, y::Bool, p̃::Real, ỹ::Bool)
κpredictions, κtargets = kernel.kernels
return κpredictions(p, p̃) * unsafe_skce_eval_targets(κtargets, p, y, p̃, ỹ)
end
function unsafe_skce_eval_targets(
κtargets::Kernel,
p::AbstractVector{<:Real},
y::Integer,
p̃::AbstractVector{<:Real},
ỹ::Integer,
)
# ensure that y ≤ ỹ (simplifies the implementation)
y > ỹ && return unsafe_skce_eval_targets(κtargets, p̃, ỹ, p, y)
# precomputations
n = length(p)
@inbounds begin
py = p[y]
pỹ = p[ỹ]
p̃y = p̃[y]
p̃ỹ = p̃[ỹ]
end
pym1 = py - 1
pỹm1 = pỹ - 1
p̃ym1 = p̃y - 1
p̃ỹm1 = p̃ỹ - 1
# i = y, j = ỹ
result = κtargets(y, ỹ) * (1 - py - p̃ỹ + py * p̃ỹ)
# i < y
for i in 1:(y - 1)
@inbounds pi = p[i]
@inbounds p̃i = p̃[i]
# i = j < y ≤ ỹ
result += κtargets(i, i) * pi * p̃i
# i < j < y ≤ ỹ
@inbounds for j in (i + 1):(y - 1)
result += κtargets(i, j) * (pi * p̃[j] + p[j] * p̃i)
end
# i < y < j < ỹ
@inbounds for j in (y + 1):(ỹ - 1)
result += κtargets(i, j) * (pi * p̃[j] + p[j] * p̃i)
end
# i < y ≤ ỹ < j
@inbounds for j in (ỹ + 1):n
result += κtargets(i, j) * (pi * p̃[j] + p[j] * p̃i)
end
end
# y < i < ỹ
for i in (y + 1):(ỹ - 1)
@inbounds pi = p[i]
@inbounds p̃i = p̃[i]
# y < i = j < ỹ
result += κtargets(i, i) * pi * p̃i
# y < i < j < ỹ
@inbounds for j in (i + 1):(ỹ - 1)
result += κtargets(i, j) * (pi * p̃[j] + p[j] * p̃i)
end
# y < i < ỹ < j
@inbounds for j in (ỹ + 1):n
result += κtargets(i, j) * (pi * p̃[j] + p[j] * p̃i)
end
end
# ỹ < i
for i in (ỹ + 1):n
@inbounds pi = p[i]
@inbounds p̃i = p̃[i]
# ỹ < i = j
result += κtargets(i, i) * pi * p̃i
# ỹ < i < j
@inbounds for j in (i + 1):n
result += κtargets(i, j) * (pi * p̃[j] + p[j] * p̃i)
end
end
# handle special case y = ỹ
if y == ỹ
# i < y = ỹ, j = y = ỹ
@inbounds for i in 1:(y - 1)
result += κtargets(i, y) * (p[i] * p̃ym1 + pym1 * p̃[i])
end
# i = y = ỹ, j > y = ỹ
@inbounds for j in (y + 1):n
result += κtargets(y, j) * (pym1 * p̃[j] + p[j] * p̃ym1)
end
else
# i < y
for i in 1:(y - 1)
@inbounds pi = p[i]
@inbounds p̃i = p̃[i]
# j = y < ỹ
result += κtargets(i, y) * (pi * p̃y + pym1 * p̃i)
# y < j = ỹ
result += κtargets(i, ỹ) * (pi * p̃ỹm1 + pỹ * p̃i)
end
# i = y = j < ỹ
result += κtargets(y, y) * pym1 * p̃y
# i = y < j < ỹ and y < i < j = ỹ
for ij in (y + 1):(ỹ - 1)
@inbounds pij = p[ij]
@inbounds p̃ij = p̃[ij]
# i = y < j < ỹ
result += κtargets(y, ij) * (pym1 * p̃ij + pij * p̃y)
# y < i < j = ỹ
result += κtargets(ij, ỹ) * (pij * p̃ỹm1 + pỹ * p̃ij)
end
# i = ỹ = j
result += κtargets(ỹ, ỹ) * pỹ * (p̃ỹ - 1)
# i = y < ỹ < j and i = ỹ < j
for j in (ỹ + 1):n
@inbounds pj = p[j]
@inbounds p̃j = p̃[j]
# i = y < ỹ < j
result += κtargets(y, j) * (pym1 * p̃j + pj * p̃y)
# i = ỹ < j
result += κtargets(ỹ, j) * (p̃ỹm1 * pj + p̃j * pỹ)
end
end
return result
end
function unsafe_skce_eval_targets(
::WhiteKernel,
p::AbstractVector{<:Real},
y::Integer,
p̃::AbstractVector{<:Real},
ỹ::Integer,
)
@inbounds res = (y == ỹ) - p[ỹ] - p̃[y] + dot(p, p̃)
return res
end
function unsafe_skce_eval_targets(κtargets::Kernel, p::Real, y::Bool, p̃::Real, ỹ::Bool)
noty = !y
notỹ = !ỹ
z = κtargets(y, ỹ) - κtargets(noty, ỹ) - κtargets(y, notỹ) + κtargets(noty, notỹ)
return (y ? 1 - p : p) * (ỹ ? 1 - p̃ : p̃) * z
end
function unsafe_skce_eval_targets(::WhiteKernel, p::Real, y::Bool, p̃::Real, ỹ::Bool)
return 2 * (y - p) * (ỹ - p̃)
end
| CalibrationErrors | https://github.com/devmotion/CalibrationErrors.jl.git |
|
[
"MIT"
] | 0.6.4 | b87cd13d6d99575da6c0e6ff6727c2fe643d327d | code | 4819 | @doc raw"""
UCME(k, testpredictions, testtargets)
Estimator of the unnormalized calibration mean embedding (UCME) with kernel `k` and sets of
`testpredictions` and `testtargets`.
Kernel `k` on the product space of predictions and targets has to be a `Kernel` from the
Julia package
[KernelFunctions.jl](https://github.com/JuliaGaussianProcesses/KernelFunctions.jl)
that can be evaluated for inputs that are tuples of predictions and targets.
The number of test predictions and test targets must be the same and at least one.
# Details
The estimator is biased and guaranteed to be non-negative. Its sample complexity
is ``O(mn)``, where ``m`` is the number of test locations and ``n`` is the total number of
samples.
Let ``(T_i)_{i=1,\ldots,m}`` be the set of test locations, i.e., test predictions and
corresponding targets, and let ``(P_{X_j}, Y_j)_{j=1,\ldots,n}`` be a data set of
predictions and corresponding targets. The plug-in estimator of ``\mathrm{UCME}_{k,m}^2``
is defined as
```math
m^{-1} \sum_{i=1}^{m} {\bigg(n^{-1} \sum_{j=1}^n k\big(T_i, (P_{X_j}, Y_j)\big)
- \mathbb{E}_{Z \sim P_{X_j}} k\big(T_i, (P_{X_j}, Z)\big)\bigg)}^2.
```
# References
Widmann, D., Lindsten, F., & Zachariah, D. (2021).
[Calibration tests beyond classification](https://openreview.net/forum?id=-bxf89v3Nx).
To be presented at *ICLR 2021*.
"""
struct UCME{K<:Kernel,TP,TT} <: CalibrationErrorEstimator
"""Kernel."""
kernel::K
"""Test predictions."""
testpredictions::TP
"""Test targets."""
testtargets::TT
function UCME{K,TP,TT}(kernel::K, testpredictions::TP, testtargets::TT) where {K,TP,TT}
check_nsamples(testpredictions, testtargets)
return new{K,TP,TT}(kernel, testpredictions, testtargets)
end
end
function UCME(kernel::Kernel, testpredictions, testtargets)
return UCME{typeof(kernel),typeof(testpredictions),typeof(testtargets)}(
kernel, testpredictions, testtargets
)
end
function (estimator::UCME)(predictions::AbstractVector, targets::AbstractVector)
@unpack kernel, testpredictions, testtargets = estimator
# obtain number of samples
nsamples = check_nsamples(predictions, targets)
# compute average over test locations
estimate = mean(zip(testpredictions, testtargets)) do (tp, ty)
unsafe_ucme_eval_testlocation(kernel, predictions, targets, tp, ty)
end
return estimate
end
function unsafe_ucme_eval_testlocation(
kernel::Kernel,
predictions::AbstractVector,
targets::AbstractVector,
testprediction,
testtarget,
)
# compute average over predictions and targets for the given test location
estimate = mean(zip(predictions, targets)) do (p, y)
unsafe_ucme_eval(kernel, p, y, testprediction, testtarget)
end
return estimate^2
end
function unsafe_ucme_eval(
kernel::Kernel,
p::AbstractVector{<:Real},
y::Integer,
testp::AbstractVector{<:Real},
testy::Integer,
)
res = sum(((z == y) - pz) * kernel((p, z), (testp, testy)) for (z, pz) in enumerate(p))
return res
end
function unsafe_ucme_eval(kernel::Kernel, p::Real, y::Bool, testp::Real, testy::Bool)
return (kernel((p, true), (testp, testy)) - kernel((p, false), (testp, testy))) *
(y - p)
end
function unsafe_ucme_eval(kernel::KernelTensorProduct, p, y, testp, testy)
κpredictions, κtargets = kernel.kernels
return unsafe_ucme_eval_targets(κtargets, p, y, testp, testy) * κpredictions(p, testp)
end
# resolve method ambiguity
function unsafe_ucme_eval(
kernel::KernelTensorProduct,
p::AbstractVector{<:Real},
y::Integer,
testp::AbstractVector{<:Real},
testy::Integer,
)
κpredictions, κtargets = kernel.kernels
return unsafe_ucme_eval_targets(κtargets, p, y, testp, testy) * κpredictions(p, testp)
end
function unsafe_ucme_eval(
kernel::KernelTensorProduct, p::Real, y::Bool, testp::Real, testy::Bool
)
κpredictions, κtargets = kernel.kernels
return unsafe_ucme_eval_targets(κtargets, p, y, testp, testy) * κpredictions(p, testp)
end
function unsafe_ucme_eval_targets(
kernel::Kernel,
p::AbstractVector{<:Real},
y::Integer,
testp::AbstractVector{<:Real},
testy::Integer,
)
return sum(((z == y) - pz) * kernel(z, testy) for (z, pz) in enumerate(p))
end
function unsafe_ucme_eval_targets(
kernel::Kernel, p::Real, y::Bool, testp::Real, testy::Bool
)
return (kernel(true, testy) - kernel(false, testy)) * (y - p)
end
function unsafe_ucme_eval_targets(
κtargets::WhiteKernel,
p::AbstractVector{<:Real},
y::Integer,
testp::AbstractVector{<:Real},
testy::Integer,
)
return @inbounds (y == testy) - p[testy]
end
function unsafe_ucme_eval_targets(
kernel::WhiteKernel, p::Real, y::Bool, testp::Real, testy::Bool
)
return (testy - !testy) * (y - p)
end
| CalibrationErrors | https://github.com/devmotion/CalibrationErrors.jl.git |
|
[
"MIT"
] | 0.6.4 | b87cd13d6d99575da6c0e6ff6727c2fe643d327d | code | 4050 | abstract type AbstractBinningAlgorithm end
mutable struct Bin{T}
"""Number of samples."""
nsamples::Int
"""Mean of predictions."""
mean_predictions::T
"""Proportions of targets."""
proportions_targets::T
function Bin{T}(
nsamples::Int, mean_predictions::T, proportions_targets::T
) where {T<:Real}
nsamples ≥ 0 || throw(ArgumentError("the number of samples must be non-negative"))
return new{T}(nsamples, mean_predictions, proportions_targets)
end
function Bin{T}(
nsamples::Int, mean_predictions::T, proportions_targets::T
) where {T<:AbstractVector{<:Real}}
nsamples ≥ 0 || throw(ArgumentError("the number of samples must be non-negative"))
nclasses = length(mean_predictions)
nclasses > 1 || throw(ArgumentError("the number of classes must be greater than 1"))
nclasses == length(proportions_targets) || throw(
DimensionMismatch(
"the number of predicted classes has to be equal to the number of classes",
),
)
return new{T}(nsamples, mean_predictions, proportions_targets)
end
end
function Bin(nsamples::Int, mean_predictions::T, proportions_targets::T) where {T}
return Bin{T}(nsamples, mean_predictions, proportions_targets)
end
"""
Bin(predictions, targets)
Create bin of `predictions` and corresponding `targets`.
"""
function Bin(predictions::AbstractVector{<:Real}, targets::AbstractVector{Bool})
# compute mean of predictions
mean_predictions = mean(predictions)
# compute proportion of targets
proportions_targets = mean(targets)
return Bin(length(predictions), mean_predictions, proportions_targets)
end
function Bin(
predictions::AbstractVector{<:AbstractVector{<:Real}},
targets::AbstractVector{<:Integer},
)
# compute mean of predictions
mean_predictions = mean(predictions)
# compute proportion of targets
nclasses = length(predictions[1])
proportions_targets = StatsBase.proportions(targets, nclasses)
return Bin(length(predictions), mean_predictions, proportions_targets)
end
"""
Bin(prediction, target)
Create bin of a single `prediction` and corresponding `target`.
"""
function Bin(prediction::Real, target::Bool)
# compute mean of predictions
mean_predictions = prediction / 1
# compute proportion of targets
proportions_targets = target / 1
return Bin(1, mean_predictions, proportions_targets)
end
function Bin(prediction::AbstractVector{<:Real}, target::Integer)
# compute mean of predictions
mean_predictions = prediction ./ 1
# compute proportion of targets
proportions_targets = similar(mean_predictions)
for i in 1:length(proportions_targets)
proportions_targets[i] = i == target ? 1 : 0
end
return Bin(1, mean_predictions, proportions_targets)
end
"""
adddata!(bin::Bin, prediction, target)
Update running statistics of the `bin` by integrating one additional pair of `prediction`s
and `target`.
"""
function adddata!(bin::Bin, prediction::Real, target::Bool)
@unpack mean_predictions, proportions_targets = bin
# update number of samples
nsamples = (bin.nsamples += 1)
# update mean of predictions
mean_predictions += (prediction - mean_predictions) / nsamples
bin.mean_predictions = mean_predictions
# update proportions of targets
proportions_targets += (target - proportions_targets) / nsamples
bin.proportions_targets = proportions_targets
return nothing
end
function adddata!(bin::Bin, prediction::AbstractVector{<:Real}, target::Integer)
@unpack mean_predictions, proportions_targets = bin
# update number of samples
nsamples = (bin.nsamples += 1)
# update mean of predictions
@. mean_predictions += (prediction - mean_predictions) / nsamples
# update proportions of targets
nclasses = length(proportions_targets)
@. proportions_targets += ((1:nclasses == target) - proportions_targets) / nsamples
return nothing
end
| CalibrationErrors | https://github.com/devmotion/CalibrationErrors.jl.git |
|
[
"MIT"
] | 0.6.4 | b87cd13d6d99575da6c0e6ff6727c2fe643d327d | code | 6085 | """
MedianVarianceBinning([minsize::Int = 10, maxbins::Int = typemax(Int)])
Dynamic binning scheme of the probability simplex with at most `maxbins` bins that each
contain at least `minsize` samples.
The data set is split recursively as long as it is possible to split the bins while
satisfying these conditions. In each step, the bin with the maximum variance of predicted
probabilities for any component is selected and split at the median of the predicted
probability of the component with the largest variance.
"""
struct MedianVarianceBinning <: AbstractBinningAlgorithm
minsize::Int
maxbins::Int
function MedianVarianceBinning(minsize, maxbins)
minsize ≥ 1 || error("minimum number of samples must be positive")
maxbins ≥ 1 || error("maximum number of bins must be positive")
return new(minsize, maxbins)
end
end
MedianVarianceBinning(minsize::Int=10) = MedianVarianceBinning(minsize, typemax(Int))
function perform(
alg::MedianVarianceBinning,
predictions::AbstractVector{<:AbstractVector{<:Real}},
targets::AbstractVector{<:Integer},
)
@unpack minsize, maxbins = alg
# check if binning is not possible
nsamples = length(predictions)
nsamples < minsize && error("at least $minsize samples are required")
# check if only trivial binning is possible
minsplit = 2 * minsize
(nsamples < minsplit || maxbins == 1) && return [Bin(predictions, targets)]
# find dimension with maximum variance
idxs_predictions = collect(1:nsamples)
GC.@preserve idxs_predictions begin
max_var_predictions, argmax_var_predictions = max_argmax_var(
predictions, idxs_predictions
)
# create priority queue and empty set of bins
queue = PriorityQueue(
(idxs_predictions, argmax_var_predictions) => max_var_predictions,
Base.Order.Reverse,
)
bins = Vector{typeof(Bin(predictions, targets))}(undef, 0)
nbins = 1
while nbins < maxbins && !isempty(queue)
# pick the set with the largest variance
idxs, argmax_var = dequeue!(queue)
# compute indices of the two subsets when splitting at the median
idxsbelow, idxsabove = unsafe_median_split!(idxs, predictions, argmax_var)
# add a bin of all indices if one of the subsets is too small
# can happen if there are many samples that are equal to the median
if length(idxsbelow) < minsize || length(idxsabove) < minsize
push!(bins, Bin(predictions[idxs], targets[idxs]))
continue
end
for newidxs in (idxsbelow, idxsabove)
if length(newidxs) < minsplit
# add a new bin if the subset can not be split further
push!(bins, Bin(predictions[newidxs], targets[newidxs]))
else
# otherwise update the queue with the new subsets
max_var_newidxs, argmax_var_newidxs = max_argmax_var(
predictions, newidxs
)
enqueue!(queue, (newidxs, argmax_var_newidxs), max_var_newidxs)
end
end
# in total one additional bin was created
nbins += 1
end
# add remaining bins
while !isempty(queue)
# pop queue
idxs, _ = dequeue!(queue)
# create bin
push!(bins, Bin(predictions[idxs], targets[idxs]))
end
end
return bins
end
function max_argmax_var(x::AbstractVector{<:AbstractVector{<:Real}}, idxs)
# compute variance along the first dimension
maxvar = unsafe_variance_welford(x, idxs, 1)
maxdim = 1
for d in 2:length(x[1])
# compute variance along the d-th dimension
vard = unsafe_variance_welford(x, idxs, d)
# update current optimum if required
if vard > maxvar
maxvar = vard
maxdim = d
end
end
return maxvar, maxdim
end
# use Welford algorithm to compute the unbiased sample variance
# taken from: https://github.com/JuliaLang/Statistics.jl/blob/da6057baf849cbc803b952ef7adf979ae3a9f9d2/src/Statistics.jl#L184-L199
# this function is unsafe since it does not perform any bounds checking
function unsafe_variance_welford(
x::AbstractVector{<:AbstractVector{<:Real}}, idxs::Vector{Int}, dim::Int
)
n = length(idxs)
@inbounds begin
M = x[idxs[1]][dim] / 1
S = zero(M)
for i in 2:n
value = x[idxs[i]][dim]
new_M = M + (value - M) / i
S += (value - M) * (value - new_M)
M = new_M
end
end
return S / (n - 1)
end
# this function is unsafe since it leads to undefined behaviour if the
# outputs are accessed afer `idxs` has been garbage collected
function unsafe_median_split!(
idxs::Vector{Int}, x::AbstractVector{<:AbstractVector{<:Real}}, dim::Int
)
n = length(idxs)
if length(idxs) < 2
cutoff = 0
else
# partially sort the indices `idxs` according to the corresponding values in the
# `d`th component of`x`
m = div(n, 2) + 1
f = let x = x, dim = dim
idx -> x[idx][dim]
end
partialsort!(idxs, 1:m; by=f)
# figure out all values < median
# the median is `x[idxs[m]][dim]`` for vectors of odd length
# and `(x[idxs[m - 1]][dim] + x[idxs[m]][dim]) / 2` for vectors of even length
if x[idxs[m - 1]][dim] < x[idxs[m]][dim]
cutoff = m - 1
else
# otherwise obtain the last value < median
firstidxs = unsafe_wrap(Array, pointer(idxs, 1), m - 1)
cutoff = searchsortedfirst(firstidxs, idxs[m]; by=f) - 1
end
end
# create two new arrays that refer to the two subsets of indices
idxsbelow = unsafe_wrap(Array, pointer(idxs, 1), cutoff)
idxsabove = unsafe_wrap(Array, pointer(idxs, cutoff + 1), n - cutoff)
return idxsbelow, idxsabove
end
| CalibrationErrors | https://github.com/devmotion/CalibrationErrors.jl.git |
|
[
"MIT"
] | 0.6.4 | b87cd13d6d99575da6c0e6ff6727c2fe643d327d | code | 3021 | """
UniformBinning(nbins::Int)
Binning scheme of the probability simplex with `nbins` bins of uniform width for
each component.
"""
struct UniformBinning <: AbstractBinningAlgorithm
nbins::Int
function UniformBinning(nbins::Int)
nbins > 0 || error("number of bins must be positive")
return new(nbins)
end
end
function perform(
binning::UniformBinning,
predictions::AbstractVector{<:Real},
targets::AbstractVector{Bool},
)
@unpack nbins = binning
# create dictionary of bins
T = eltype(float(zero(eltype(predictions))))
bins = Dict{Int,Bin{T}}()
# reserve some memory (very rough guess)
nsamples = length(predictions)
sizehint!(bins, min(nbins, nsamples))
# for all other samples
@inbounds for (prediction, target) in zip(predictions, targets)
# compute index of bin
index = binindex(prediction, nbins)
# create new bin or update existing one
bin = get(bins, index, nothing)
if bin === nothing
bins[index] = Bin(prediction, target)
else
adddata!(bin, prediction, target)
end
end
return values(bins)
end
function perform(
binning::UniformBinning,
predictions::AbstractVector{<:AbstractVector{<:Real}},
targets::AbstractVector{<:Integer},
)
return _perform(binning, predictions, targets, Val(length(predictions[1])))
end
function _perform(
binning::UniformBinning,
predictions::AbstractVector{<:AbstractVector{T}},
targets::AbstractVector{<:Integer},
nclasses::Val{N},
) where {T<:Real,N}
@unpack nbins = binning
# create bin for the initial sample
binindices = NTuple{N,Int}[binindex(predictions[1], nbins, nclasses)]
bins = [Bin(predictions[1], targets[1])]
# reserve some memory (very rough guess)
nsamples = length(predictions)
guess = min(nbins, nsamples)
sizehint!(bins, guess)
sizehint!(binindices, guess)
# for all other samples
@inbounds for i in 2:nsamples
# obtain prediction and corresponding target
prediction = predictions[i]
target = targets[i]
# compute index of bin
index = binindex(prediction, nbins, nclasses)
# create new bin or update existing one
j = searchsortedfirst(binindices, index)
if j > length(binindices) || (binindices[j] !== index)
insert!(binindices, j, index)
insert!(bins, j, Bin(prediction, target))
else
bin = bins[j]
adddata!(bin, prediction, target)
end
end
return bins
end
function binindex(
probs::AbstractVector{<:Real}, nbins::Int, ::Val{N}
)::NTuple{N,Int} where {N}
ntuple(N) do i
binindex(probs[i], nbins)
end
end
function binindex(p::Real, nbins::Int)
# check argument
zero(p) ≤ p ≤ one(p) || throw(ArgumentError("predictions must be between 0 and 1"))
# handle special case p = 0
iszero(p) && return 1
return ceil(Int, nbins * p)
end
| CalibrationErrors | https://github.com/devmotion/CalibrationErrors.jl.git |
|
[
"MIT"
] | 0.6.4 | b87cd13d6d99575da6c0e6ff6727c2fe643d327d | code | 299 | abstract type DistributionsPreMetric <: Distances.PreMetric end
abstract type DistributionsSemiMetric <: Distances.SemiMetric end
abstract type DistributionsMetric <: Distances.Metric end
const DistributionsDistance = Union{
DistributionsPreMetric,DistributionsSemiMetric,DistributionsMetric
}
| CalibrationErrors | https://github.com/devmotion/CalibrationErrors.jl.git |
|
[
"MIT"
] | 0.6.4 | b87cd13d6d99575da6c0e6ff6727c2fe643d327d | code | 2374 | struct SqWasserstein <: DistributionsSemiMetric end
# result type (e.g., for pairwise computations)
function Distances.result_type(
::SqWasserstein, ::Type{T1}, ::Type{T2}
) where {T1<:Real,T2<:Real}
return promote_type(T1, T2)
end
# evaluations for normal distributions
function (::SqWasserstein)(a::Normal, b::Normal)
μa, σa = params(a)
μb, σb = params(b)
return abs2(μa - μb) + abs2(σa - σb)
end
function (::SqWasserstein)(a::AbstractMvNormal, b::AbstractMvNormal)
μ1 = mean(a)
μ2 = mean(b)
Σ1 = cov(a)
Σ2 = cov(b)
return Distances.sqeuclidean(μ1, μ2) + OT.sqbures(Σ1, Σ2)
end
function (::SqWasserstein)(a::MvNormal, b::MvNormal)
μa, Σa = params(a)
μb, Σb = params(b)
return Distances.sqeuclidean(μa, μb) + OT.sqbures(Σa, Σb)
end
# evaluations for Laplace distributions
function (::SqWasserstein)(a::Laplace, b::Laplace)
μa, βa = params(a)
μb, βb = params(b)
return abs2(μa - μb) + 2 * abs2(βa - βb)
end
# Wasserstein 2 distance
struct Wasserstein <: DistributionsMetric end
# result type (e.g., for pairwise computations)
function Distances.result_type(
::Wasserstein, ::Type{T1}, ::Type{T2}
) where {T1<:Real,T2<:Real}
return float(promote_type(T1, T2))
end
function (::Wasserstein)(a::Distribution, b::Distribution)
return sqrt(SqWasserstein()(a, b))
end
# Mixture Wasserstein distances
struct SqMixtureWasserstein{S} <: DistributionsSemiMetric
lpsolver::S
end
struct MixtureWasserstein{S} <: DistributionsMetric
lpsolver::S
end
SqMixtureWasserstein() = SqMixtureWasserstein(Tulip.Optimizer())
MixtureWasserstein() = MixtureWasserstein(Tulip.Optimizer())
# result type (e.g., for pairwise computations)
function Distances.result_type(
::SqMixtureWasserstein, ::Type{T1}, ::Type{T2}
) where {T1<:Real,T2<:Real}
return promote_type(T1, T2)
end
function Distances.result_type(
::MixtureWasserstein, ::Type{T1}, ::Type{T2}
) where {T1<:Real,T2<:Real}
return float(promote_type(T1, T2))
end
function (s::SqMixtureWasserstein)(a::AbstractMixtureModel, b::AbstractMixtureModel)
C = Distances.pairwise(SqWasserstein(), components(a), components(b))
return OT.emd2(probs(a), probs(b), C, deepcopy(s.lpsolver))
end
function (m::MixtureWasserstein)(a::AbstractMixtureModel, b::AbstractMixtureModel)
return sqrt(SqMixtureWasserstein(m.lpsolver)(a, b))
end
| CalibrationErrors | https://github.com/devmotion/CalibrationErrors.jl.git |
|
[
"MIT"
] | 0.6.4 | b87cd13d6d99575da6c0e6ff6727c2fe643d327d | code | 2823 | # SKCE
# predicted Laplace distributions with exponential kernel for the targets
function CalibrationErrors.unsafe_skce_eval_targets(
kernel::ExponentialKernel{Euclidean}, p::Laplace, y::Real, p̃::Laplace, ỹ::Real
)
# extract the parameters
μ = p.μ
β = p.θ
μ̃ = p̃.μ
β̃ = p̃.θ
res =
kernel(y, ỹ) - laplace_laplacian_kernel(β, abs(μ - ỹ)) -
laplace_laplacian_kernel(β̃, abs(μ̃ - y)) +
laplace_laplacian_kernel(β, β̃, abs(μ - μ̃))
return res
end
# z = abs(μ - y)
function laplace_laplacian_kernel(β, z)
if isone(β)
(1 + z) * exp(-z) / 2
else
(β * exp(-z / β) - exp(-z)) / (β^2 - 1)
end
end
# z = abs(μ - μ̃)
function laplace_laplacian_kernel(β, β̃, z)
if isone(β)
if isone(β̃)
return (3 + 3 * z + z^2) * exp(-z) / 8
else
c = β̃^2 - 1
csq = c^2
return β̃^3 * exp(-z / β̃) / csq - ((1 + z) / (2 * c) + β̃^2 / csq) * exp(-z)
end
end
if isone(β̃)
c = β^2 - 1
csq = c^2
return β^3 * exp(-z / β) / csq - ((1 + z) / (2 * c) + β^2 / csq) * exp(-z)
elseif β̃ == β
c = β^2 - 1
csq = c^2
return exp(-z) / csq + ((β + z) / (2 * c) - β / csq) * exp(-z / β)
else
c1 = β^2 - 1
c2 = β̃^2 - 1
c3 = β^2 - β̃^2
return β^3 * exp(-z / β) / (c1 * c3) - β̃^3 * exp(-z / β̃) / (c2 * c3) +
exp(-z) / (c1 * c2)
end
end
# UCME
function CalibrationErrors.unsafe_ucme_eval_targets(
kernel::ExponentialKernel{Euclidean}, p::Laplace, y::Real, ::Laplace, testy::Real
)
return kernel(y, testy) - laplace_laplacian_kernel(p.θ, abs(p.μ - testy))
end
# kernels with input transformations
# TODO: scale upfront?
function CalibrationErrors.unsafe_skce_eval_targets(
kernel::TransformedKernel{
ExponentialKernel{Euclidean},<:Union{ScaleTransform,ARDTransform}
},
p::Laplace,
y::Real,
p̃::Laplace,
ỹ::Real,
)
# obtain the transform
t = kernel.transform
return CalibrationErrors.unsafe_skce_eval_targets(
ExponentialKernel(), apply(t, p), t(y), apply(t, p̃), t(ỹ)
)
end
function CalibrationErrors.unsafe_ucme_eval_targets(
kernel::TransformedKernel{
ExponentialKernel{Euclidean},<:Union{ScaleTransform,ARDTransform}
},
p::Laplace,
y::Real,
testp::Laplace,
testy::Real,
)
# obtain the transform
t = kernel.transform
# `testp` is irrelevant for the evaluation and therefore not transformed
return CalibrationErrors.unsafe_ucme_eval_targets(
ExponentialKernel(), apply(t, p), t(y), testp, t(testy)
)
end
# utilities
# internal `apply` avoids type piracy of transforms
apply(t::Union{ScaleTransform,ARDTransform}, d::Laplace) = Laplace(t(d.μ), t(d.θ))
| CalibrationErrors | https://github.com/devmotion/CalibrationErrors.jl.git |
|
[
"MIT"
] | 0.6.4 | b87cd13d6d99575da6c0e6ff6727c2fe643d327d | code | 1115 | function CalibrationErrors.unsafe_skce_eval_targets(
κtargets::Kernel, p::AbstractMixtureModel, y, p̃::AbstractMixtureModel, ỹ
)
p_components = components(p)
p_probs = probs(p)
p̃_components = components(p̃)
p̃_probs = probs(p̃)
probsi = p_probs[1]
componentsi = p_components[1]
s =
probsi *
p̃_probs[1] *
CalibrationErrors.unsafe_skce_eval_targets(
κtargets, componentsi, y, p̃_components[1], ỹ
)
for j in 2:length(p̃_components)
s +=
probsi *
p̃_probs[j] *
CalibrationErrors.unsafe_skce_eval_targets(
κtargets, componentsi, y, p̃_components[j], ỹ
)
end
for i in 2:length(p_components)
probsi = p_probs[i]
componentsi = p_components[i]
for j in 2:length(p̃_components)
s +=
probsi *
p̃_probs[j] *
CalibrationErrors.unsafe_skce_eval_targets(
κtargets, componentsi, y, p̃_components[j], ỹ
)
end
end
return s
end
| CalibrationErrors | https://github.com/devmotion/CalibrationErrors.jl.git |
|
[
"MIT"
] | 0.6.4 | b87cd13d6d99575da6c0e6ff6727c2fe643d327d | code | 3477 | # SKCE
function CalibrationErrors.unsafe_skce_eval_targets(
kernel::SqExponentialKernel{Euclidean},
p::MvNormal,
y::AbstractVector{<:Real},
p̃::MvNormal,
ỹ::AbstractVector{<:Real},
)
# extract the parameters
μ = p.μ
Σ = p.Σ
μ̃ = p̃.μ
Σ̃ = p̃.Σ
# compute inverse scaling matrices
invA = LinearAlgebra.I + Σ
invB = LinearAlgebra.I + Σ̃
invC = invA + Σ̃
res =
kernel(y, ỹ) - mvnormal_gaussian_kernel(invA, μ, ỹ) -
mvnormal_gaussian_kernel(invB, μ̃, y) + mvnormal_gaussian_kernel(invC, μ, μ̃)
return res
end
function mvnormal_gaussian_kernel(invA, y, ỹ)
return exp(-(LinearAlgebra.logdet(invA) + invquad_diff(invA, y, ỹ)) / 2)
end
# UCME
# predicted normal distributions with squared exponential kernel for the targets
function CalibrationErrors.unsafe_ucme_eval_targets(
kernel::SqExponentialKernel{Euclidean},
p::MvNormal,
y::AbstractVector{<:Real},
testp::MvNormal,
testy::AbstractVector{<:Real},
)
# compute inverse scaling matrix
invA = LinearAlgebra.I + p.Σ
return kernel(y, testy) - mvnormal_gaussian_kernel(invA, p.μ, testy)
end
# kernels with input transformations
# TODO: scale upfront?
function CalibrationErrors.unsafe_skce_eval_targets(
kernel::TransformedKernel{
SqExponentialKernel{Euclidean},<:Union{ScaleTransform,ARDTransform,LinearTransform}
},
p::MvNormal,
y::AbstractVector{<:Real},
p̃::MvNormal,
ỹ::AbstractVector{<:Real},
)
# obtain the transform
t = kernel.transform
return CalibrationErrors.unsafe_skce_eval_targets(
SqExponentialKernel(), apply(t, p), t(y), apply(t, p̃), t(ỹ)
)
end
function CalibrationErrors.unsafe_ucme_eval_targets(
kernel::TransformedKernel{
SqExponentialKernel{Euclidean},<:Union{ScaleTransform,ARDTransform,LinearTransform}
},
p::MvNormal,
y::AbstractVector{<:Real},
testp::MvNormal,
testy::AbstractVector{<:Real},
)
# obtain the transform
t = kernel.transform
# `testp` is irrelevant for the evaluation and therefore not transformed
return CalibrationErrors.unsafe_ucme_eval_targets(
SqExponentialKernel(), apply(t, p), t(y), testp, t(testy)
)
end
## utilities
# internal `apply` avoids type piracy of transforms
function apply(t::ScaleTransform, d::MvNormal)
s = first(t.s)
return MvNormal(t(d.μ), s^2 * d.Σ)
end
function apply(t::ARDTransform, d::MvNormal)
return MvNormal(t(d.μ), scale_cov(d.Σ, t.v))
end
# `X_A_Xt` only works with StridedMatrix: https://github.com/JuliaStats/PDMats.jl/issues/96
apply(t::LinearTransform, d::MvNormal) = Matrix(t.A) * d
# scale covariance matrix
# TODO: improve efficiency
scale_cov(Σ::PDMats.ScalMat, v) = PDMats.PDiagMat(v .^ 2 .* Σ.value)
scale_cov(Σ::PDMats.PDiagMat, v) = PDMats.PDiagMat(v .^ 2 .* Σ.diag)
scale_cov(Σ::PDMats.AbstractPDMat, v) = PDMats.X_A_Xt(Σ, LinearAlgebra.diagm(v))
invquad_diff(A::PDMats.ScalMat, x, y) = Distances.sqeuclidean(x, y) / A.value
function invquad_diff(
A::PDMats.PDiagMat, x::AbstractVector{<:Real}, y::AbstractVector{<:Real}
)
n = length(x)
length(y) == n || throw(DimensionMismatch("x and y must be of the same length"))
size(A) == (n, n) ||
throw(DimensionMismatch("size of A is not consistent with x and y"))
return sum(abs2(xi - yi) / wi for (xi, yi, wi) in zip(x, y, A.diag))
end
invquad_diff(A::PDMats.AbstractPDMat, x, y) = PDMats.invquad(A, x .- y)
| CalibrationErrors | https://github.com/devmotion/CalibrationErrors.jl.git |
|
[
"MIT"
] | 0.6.4 | b87cd13d6d99575da6c0e6ff6727c2fe643d327d | code | 2024 | # SKCE
# predicted normal distributions with squared exponential kernel for the targets
function CalibrationErrors.unsafe_skce_eval_targets(
kernel::SqExponentialKernel{Euclidean}, p::Normal, y::Real, p̃::Normal, ỹ::Real
)
# extract parameters
μ = p.μ
σ = p.σ
μ̃ = p̃.μ
σ̃ = p̃.σ
# compute scaling factors
# TODO: use `hypot`?
sqσ = σ^2
sqσ̃ = σ̃^2
α = inv(sqrt(1 + sqσ))
β = inv(sqrt(1 + sqσ̃))
γ = inv(sqrt(1 + sqσ + sqσ̃))
return kernel(y, ỹ) - α * kernel(α * μ, α * ỹ) - β * kernel(β * y, β * μ̃) +
γ * kernel(γ * μ, γ * μ̃)
end
# UCME
function CalibrationErrors.unsafe_ucme_eval_targets(
kernel::SqExponentialKernel{Euclidean}, p::Normal, y::Real, ::Normal, testy::Real
)
# compute scaling factor
# TODO: use `hypot`?
α = inv(sqrt(1 + p.σ^2))
return kernel(y, testy) - α * kernel(α * p.μ, α * testy)
end
# kernels with input transformations
# TODO: scale upfront?
function CalibrationErrors.unsafe_skce_eval_targets(
kernel::TransformedKernel{
SqExponentialKernel{Euclidean},<:Union{ScaleTransform,ARDTransform}
},
p::Normal,
y::Real,
p̃::Normal,
ỹ::Real,
)
# obtain the transform
t = kernel.transform
return CalibrationErrors.unsafe_skce_eval_targets(
SqExponentialKernel(), apply(t, p), t(y), apply(t, p̃), t(ỹ)
)
end
function CalibrationErrors.unsafe_ucme_eval_targets(
kernel::TransformedKernel{
SqExponentialKernel{Euclidean},<:Union{ScaleTransform,ARDTransform}
},
p::Normal,
y::Real,
testp::Normal,
testy::Real,
)
# obtain the transform
t = kernel.transform
# `testp` is irrelevant for the evaluation and therefore not transformed
return CalibrationErrors.unsafe_ucme_eval_targets(
SqExponentialKernel(), apply(t, p), t(y), testp, t(testy)
)
end
# utilities
# internal `apply` avoids type piracy of transforms
apply(t::Union{ScaleTransform,ARDTransform}, d::Normal) = Normal(t(d.μ), t(d.σ))
| CalibrationErrors | https://github.com/devmotion/CalibrationErrors.jl.git |
|
[
"MIT"
] | 0.6.4 | b87cd13d6d99575da6c0e6ff6727c2fe643d327d | code | 498 | @testset "Aqua" begin
# Test ambiguities separately without Base and Core
# Ref: https://github.com/JuliaTesting/Aqua.jl/issues/77
# Only test Project.toml formatting on Julia > 1.6 when running Github action
# Ref: https://github.com/JuliaTesting/Aqua.jl/issues/105
Aqua.test_all(
CalibrationErrors;
ambiguities=false,
project_toml_formatting=VERSION >= v"1.7" || !haskey(ENV, "GITHUB_ACTIONS"),
)
Aqua.test_ambiguities([CalibrationErrors])
end
| CalibrationErrors | https://github.com/devmotion/CalibrationErrors.jl.git |
|
[
"MIT"
] | 0.6.4 | b87cd13d6d99575da6c0e6ff6727c2fe643d327d | code | 2563 | @testset "ece.jl" begin
@testset "Trivial tests" begin
ece = ECE(UniformBinning(10))
# categorical distributions
for predictions in ([[0, 1], [1, 0]], ColVecs([0 1; 1 0]), RowVecs([0 1; 1 0]))
@test iszero(@inferred(ece(predictions, [2, 1])))
end
for predictions in (
[[0, 1], [0.5, 0.5], [0.5, 0.5], [1, 0]],
ColVecs([0 0.5 0.5 1; 1 0.5 0.5 0]),
RowVecs([0 1; 0.5 0.5; 0.5 0.5; 1 0]),
)
@test iszero(@inferred(ece(predictions, [2, 2, 1, 1])))
end
# probabilities
for predictions in ([0, 1], [0.0, 1.0])
@test iszero(@inferred(ece(predictions, [false, true])))
end
@test iszero(@inferred(ece([0, 0.5, 0.5, 1], [false, false, true, true])))
end
@testset "Uniform binning: Basic properties" begin
ece = ECE(UniformBinning(10))
estimates = Vector{Float64}(undef, 1_000)
# categorical distributions
for nclasses in (2, 10, 100)
dist = Dirichlet(nclasses, 1.0)
predictions = [Vector{Float64}(undef, nclasses) for _ in 1:20]
targets = Vector{Int}(undef, 20)
for i in 1:length(estimates)
rand!.(Ref(dist), predictions)
targets .= rand.(Categorical.(predictions))
estimates[i] = ece(predictions, targets)
end
@test all(x -> zero(x) < x < one(x), estimates)
end
# probabilities
predictions = Vector{Float64}(undef, 20)
targets = Vector{Bool}(undef, 20)
for i in 1:length(estimates)
rand!(predictions)
map!(targets, predictions) do p
rand() < p
end
estimates[i] = ece(predictions, targets)
end
@test all(x -> zero(x) < x < one(x), estimates)
end
@testset "Median variance binning: Basic properties" begin
ece = ECE(MedianVarianceBinning(10))
estimates = Vector{Float64}(undef, 1_000)
for nclasses in (2, 10, 100)
dist = Dirichlet(nclasses, 1.0)
predictions = [Vector{Float64}(undef, nclasses) for _ in 1:20]
targets = Vector{Int}(undef, 20)
for i in 1:length(estimates)
rand!.(Ref(dist), predictions)
targets .= rand.(Categorical.(predictions))
estimates[i] = ece(predictions, targets)
end
@test all(x -> zero(x) < x < one(x), estimates)
end
end
end
| CalibrationErrors | https://github.com/devmotion/CalibrationErrors.jl.git |
|
[
"MIT"
] | 0.6.4 | b87cd13d6d99575da6c0e6ff6727c2fe643d327d | code | 906 | using CalibrationErrors
using Aqua
using Distances
using Distributions
using PDMats
using StatsBase
using LinearAlgebra
using Random
using Statistics
using Test
using CalibrationErrors: unsafe_skce_eval, unsafe_ucme_eval
Random.seed!(1234)
@testset "CalibrationErrors" begin
@testset "General" begin
include("aqua.jl")
end
@testset "binning" begin
include("binning/generic.jl")
include("binning/uniform.jl")
include("binning/medianvariance.jl")
end
@testset "distances" begin
include("distances/wasserstein.jl")
end
@testset "ECE" begin
include("ece.jl")
end
@testset "SKCE" begin
include("skce.jl")
end
@testset "UCME" begin
include("ucme.jl")
end
@testset "distributions" begin
include("distributions/normal.jl")
include("distributions/mvnormal.jl")
end
end
| CalibrationErrors | https://github.com/devmotion/CalibrationErrors.jl.git |
|
[
"MIT"
] | 0.6.4 | b87cd13d6d99575da6c0e6ff6727c2fe643d327d | code | 10828 | @testset "skce.jl" begin
# alternative implementation of white kernel
struct WhiteKernel2 <: Kernel end
(::WhiteKernel2)(x, y) = x == y
# alternative implementation TensorProductKernel
struct TensorProduct2{K1<:Kernel,K2<:Kernel} <: Kernel
kernel1::K1
kernel2::K2
end
function (kernel::TensorProduct2)((x1, x2), (y1, y2))
return kernel.kernel1(x1, y1) * kernel.kernel2(x2, y2)
end
@testset "unsafe_skce_eval" begin
@testset "binary classification" begin
# probabilities and boolean targets
p, p̃ = rand(2)
y, ỹ = rand(Bool, 2)
scale = rand()
kernel = SqExponentialKernel() ∘ ScaleTransform(scale)
val = unsafe_skce_eval(kernel ⊗ WhiteKernel(), p, y, p̃, ỹ)
@test unsafe_skce_eval(kernel ⊗ WhiteKernel2(), p, y, p̃, ỹ) ≈ val
@test unsafe_skce_eval(TensorProduct2(kernel, WhiteKernel()), p, y, p̃, ỹ) ≈
val
@test unsafe_skce_eval(TensorProduct2(kernel, WhiteKernel2()), p, y, p̃, ỹ) ≈
val
# corresponding values and kernel for full categorical distribution
pfull = [p, 1 - p]
yint = 2 - y
p̃full = [p̃, 1 - p̃]
ỹint = 2 - ỹ
kernelfull = SqExponentialKernel() ∘ ScaleTransform(scale / sqrt(2))
@test unsafe_skce_eval(kernelfull ⊗ WhiteKernel(), pfull, yint, p̃full, ỹint) ≈
val
@test unsafe_skce_eval(
kernelfull ⊗ WhiteKernel2(), pfull, yint, p̃full, ỹint
) ≈ val
@test unsafe_skce_eval(
TensorProduct2(kernelfull, WhiteKernel()), pfull, yint, p̃full, ỹint
) ≈ val
@test unsafe_skce_eval(
TensorProduct2(kernelfull, WhiteKernel2()), pfull, yint, p̃full, ỹint
) ≈ val
end
@testset "multi-class classification" begin
n = 10
p = rand(n)
p ./= sum(p)
y = rand(1:n)
p̃ = rand(n)
p̃ ./= sum(p̃)
ỹ = rand(1:n)
kernel = SqExponentialKernel() ∘ ScaleTransform(rand())
val = unsafe_skce_eval(kernel ⊗ WhiteKernel(), p, y, p̃, ỹ)
@test unsafe_skce_eval(kernel ⊗ WhiteKernel2(), p, y, p̃, ỹ) ≈ val
@test unsafe_skce_eval(TensorProduct2(kernel, WhiteKernel()), p, y, p̃, ỹ) ≈
val
@test unsafe_skce_eval(TensorProduct2(kernel, WhiteKernel2()), p, y, p̃, ỹ) ≈
val
end
end
@testset "Unbiased: Two-dimensional example" begin
# categorical distributions
skce = SKCE(SqExponentialKernel() ⊗ WhiteKernel())
for predictions in ([[1, 0], [0, 1]], ColVecs([1 0; 0 1]), RowVecs([1 0; 0 1]))
@test iszero(@inferred(skce(predictions, [1, 2])))
@test iszero(@inferred(skce(predictions, [1, 1])))
@test @inferred(skce(predictions, [2, 1])) ≈ -2 * exp(-1)
@test iszero(@inferred(skce(predictions, [2, 2])))
end
# probabilities
skce = SKCE((SqExponentialKernel() ∘ ScaleTransform(sqrt(2))) ⊗ WhiteKernel())
@test iszero(@inferred(skce([1, 0], [true, false])))
@test iszero(@inferred(skce([1, 0], [true, true])))
@test @inferred(skce([1, 0], [false, true])) ≈ -2 * exp(-1)
@test iszero(@inferred(skce([1, 0], [false, false])))
end
@testset "Unbiased: Basic properties" begin
skce = SKCE((ExponentialKernel() ∘ ScaleTransform(0.1)) ⊗ WhiteKernel())
estimates = Vector{Float64}(undef, 1_000)
# categorical distributions
for nclasses in (2, 10, 100)
dist = Dirichlet(nclasses, 1.0)
predictions = [Vector{Float64}(undef, nclasses) for _ in 1:20]
targets = Vector{Int}(undef, 20)
for i in 1:length(estimates)
rand!.(Ref(dist), predictions)
targets .= rand.(Categorical.(predictions))
estimates[i] = skce(predictions, targets)
end
@test any(x -> x > zero(x), estimates)
@test any(x -> x < zero(x), estimates)
@test mean(estimates) ≈ 0 atol = 1e-3
end
# probabilities
predictions = Vector{Float64}(undef, 20)
targets = Vector{Bool}(undef, 20)
for i in 1:length(estimates)
rand!(predictions)
map!(targets, predictions) do p
rand() < p
end
estimates[i] = skce(predictions, targets)
end
@test any(x -> x > zero(x), estimates)
@test any(x -> x < zero(x), estimates)
@test mean(estimates) ≈ 0 atol = 1e-3
end
@testset "Biased: Two-dimensional example" begin
# categorical distributions
skce = SKCE(SqExponentialKernel() ⊗ WhiteKernel(); unbiased=false)
for predictions in ([[1, 0], [0, 1]], ColVecs([1 0; 0 1]), RowVecs([1 0; 0 1]))
@test iszero(@inferred(skce(predictions, [1, 2])))
@test @inferred(skce(predictions, [1, 1])) ≈ 0.5
@test @inferred(skce(predictions, [2, 1])) ≈ 1 - exp(-1)
@test @inferred(skce(predictions, [2, 2])) ≈ 0.5
end
# probabilities
skce = SKCE(
(SqExponentialKernel() ∘ ScaleTransform(sqrt(2))) ⊗ WhiteKernel();
unbiased=false,
)
@test iszero(@inferred(skce([1, 0], [true, false])))
@test @inferred(skce([1, 0], [true, true])) ≈ 0.5
@test @inferred(skce([1, 0], [false, true])) ≈ 1 - exp(-1)
@test @inferred(skce([1, 0], [false, false])) ≈ 0.5
end
@testset "Biased: Basic properties" begin
skce = SKCE(
(ExponentialKernel() ∘ ScaleTransform(0.1)) ⊗ WhiteKernel(); unbiased=false
)
estimates = Vector{Float64}(undef, 1_000)
# categorical distributions
for nclasses in (2, 10, 100)
dist = Dirichlet(nclasses, 1.0)
predictions = [Vector{Float64}(undef, nclasses) for _ in 1:20]
targets = Vector{Int}(undef, 20)
for i in 1:length(estimates)
rand!.(Ref(dist), predictions)
targets .= rand.(Categorical.(predictions))
estimates[i] = skce(predictions, targets)
end
@test all(x -> x > zero(x), estimates)
end
# probabilities
predictions = Vector{Float64}(undef, 20)
targets = Vector{Bool}(undef, 20)
for i in 1:length(estimates)
rand!(predictions)
map!(targets, predictions) do p
rand() < p
end
estimates[i] = skce(predictions, targets)
end
@test all(x -> x > zero(x), estimates)
end
@testset "Block: Two-dimensional example" begin
# categorical distributions
skce = SKCE(SqExponentialKernel() ⊗ WhiteKernel(); blocksize=2)
for predictions in ([[1, 0], [0, 1]], ColVecs([1 0; 0 1]), RowVecs([1 0; 0 1]))
@test iszero(@inferred(skce(predictions, [1, 2])))
@test iszero(@inferred(skce(predictions, [1, 1])))
@test @inferred(skce(predictions, [2, 1])) ≈ -2 * exp(-1)
@test iszero(@inferred(skce(predictions, [2, 2])))
end
# two predictions, ten times replicated
for predictions in (
repeat([[1, 0], [0, 1]], 10),
ColVecs(repeat([1 0; 0 1], 1, 10)),
RowVecs(repeat([1 0; 0 1], 10, 1)),
)
@test iszero(@inferred(skce(predictions, repeat([1, 2], 10))))
@test iszero(@inferred(skce(predictions, repeat([1, 1], 10))))
@test @inferred(skce(predictions, repeat([2, 1], 10))) ≈ -2 * exp(-1)
@test iszero(@inferred(skce(predictions, repeat([2, 2], 10))))
end
# probabilities
skce = SKCE(
(SqExponentialKernel() ∘ ScaleTransform(sqrt(2))) ⊗ WhiteKernel(); blocksize=2
)
@test iszero(@inferred(skce([1, 0], [true, false])))
@test iszero(@inferred(skce([1, 0], [true, true])))
@test @inferred(skce([1, 0], [false, true])) ≈ -2 * exp(-1)
@test iszero(@inferred(skce([1, 0], [false, false])))
# two predictions, ten times replicated
@test iszero(@inferred(skce(repeat([1, 0], 10), repeat([true, false], 10))))
@test iszero(@inferred(skce(repeat([1, 0], 10), repeat([true, true], 10))))
@test @inferred(skce(repeat([1, 0], 10), repeat([false, true], 10))) ≈ -2 * exp(-1)
@test iszero(@inferred(skce(repeat([1, 0], 10), repeat([false, false], 10))))
end
@testset "Block: Basic properties" begin
nsamples = 20
kernel = (ExponentialKernel() ∘ ScaleTransform(0.1)) ⊗ WhiteKernel()
skce = SKCE(kernel)
blockskce = SKCE(kernel; blocksize=2)
blockskce_all = SKCE(kernel; blocksize=nsamples)
estimates = Vector{Float64}(undef, 1_000)
# categorical distributions
for nclasses in (2, 10, 100)
dist = Dirichlet(nclasses, 1.0)
predictions = [Vector{Float64}(undef, nclasses) for _ in 1:nsamples]
targets = Vector{Int}(undef, nsamples)
for i in 1:length(estimates)
rand!.(Ref(dist), predictions)
targets .= rand.(Categorical.(predictions))
estimates[i] = blockskce(predictions, targets)
# consistency checks
@test estimates[i] ≈ mean(
skce(predictions[(2 * i - 1):(2 * i)], targets[(2 * i - 1):(2 * i)]) for
i in 1:(nsamples ÷ 2)
)
@test skce(predictions, targets) == blockskce_all(predictions, targets)
end
@test any(x -> x > zero(x), estimates)
@test any(x -> x < zero(x), estimates)
@test mean(estimates) ≈ 0 atol = 5e-3
end
# probabilities
predictions = Vector{Float64}(undef, nsamples)
targets = Vector{Bool}(undef, nsamples)
for i in 1:length(estimates)
rand!(predictions)
map!(targets, predictions) do p
return rand() < p
end
estimates[i] = blockskce(predictions, targets)
# consistency checks
@test estimates[i] ≈ mean(
skce(predictions[(2 * i - 1):(2 * i)], targets[(2 * i - 1):(2 * i)]) for
i in 1:(nsamples ÷ 2)
)
@test skce(predictions, targets) == blockskce_all(predictions, targets)
end
@test any(x -> x > zero(x), estimates)
@test any(x -> x < zero(x), estimates)
@test mean(estimates) ≈ 0 atol = 5e-3
end
end
| CalibrationErrors | https://github.com/devmotion/CalibrationErrors.jl.git |
|
[
"MIT"
] | 0.6.4 | b87cd13d6d99575da6c0e6ff6727c2fe643d327d | code | 5728 | @testset "ucme.jl" begin
@testset "UCME: Binary examples" begin
# categorical distributions
ucme = UCME(
SqExponentialKernel() ⊗ WhiteKernel(),
[[1.0, 0], [0.5, 0.5], [0.0, 1]],
[1, 1, 2],
)
for predictions in ([[1, 0], [0, 1]], ColVecs([1 0; 0 1]), RowVecs([1 0; 0 1]))
@test iszero(@inferred(ucme(predictions, [1, 2])))
@test @inferred(ucme(predictions, [1, 1])) ≈ (exp(-2) + exp(-0.5) + 1) / 12
@test @inferred(ucme(predictions, [2, 1])) ≈ (1 - exp(-1))^2 / 6
@test @inferred(ucme(predictions, [2, 2])) ≈ (exp(-2) + exp(-0.5) + 1) / 12
end
# probabilities
ucme = UCME(
(SqExponentialKernel() ∘ ScaleTransform(sqrt(2))) ⊗ WhiteKernel(),
[1.0, 0.5, 0.0],
[true, true, false],
)
@test iszero(@inferred(ucme([1, 0], [true, false])))
@test @inferred(ucme([1, 0], [true, true])) ≈ (exp(-2) + exp(-0.5) + 1) / 12
@test @inferred(ucme([1, 0], [false, true])) ≈ (1 - exp(-1))^2 / 6
@test @inferred(ucme([1, 0], [false, false])) ≈ (exp(-2) + exp(-0.5) + 1) / 12
end
@testset "UCME: Basic properties" begin
estimates = Vector{Float64}(undef, 1_000)
for ntest in (1, 5, 10)
# categorical distributions
for nclasses in (2, 10, 100)
dist = Dirichlet(nclasses, 1.0)
testpredictions = [rand(dist) for _ in 1:ntest]
testtargets = rand(1:nclasses, ntest)
ucme = UCME(
(ExponentialKernel() ∘ ScaleTransform(0.1)) ⊗ WhiteKernel(),
testpredictions,
testtargets,
)
predictions = [Vector{Float64}(undef, nclasses) for _ in 1:20]
targets = Vector{Int}(undef, 20)
for i in 1:length(estimates)
rand!.(Ref(dist), predictions)
targets .= rand.(Categorical.(predictions))
estimates[i] = ucme(predictions, targets)
end
@test all(x > zero(x) for x in estimates)
end
# probabilities
testpredictions = rand(ntest)
testtargets = rand(Bool, ntest)
ucme = UCME(
(ExponentialKernel() ∘ ScaleTransform(0.1)) ⊗ WhiteKernel(),
testpredictions,
testtargets,
)
predictions = Vector{Float64}(undef, 20)
targets = Vector{Bool}(undef, 20)
for i in 1:length(estimates)
rand!(predictions)
map!(targets, predictions) do p
return rand() < p
end
estimates[i] = ucme(predictions, targets)
end
@test all(x > zero(x) for x in estimates)
end
end
# alternative implementation of white kernel
struct WhiteKernel2 <: Kernel end
(::WhiteKernel2)(x, y) = x == y
# alternative implementation TensorProductKernel
struct TensorProduct2{K1<:Kernel,K2<:Kernel} <: Kernel
kernel1::K1
kernel2::K2
end
function (kernel::TensorProduct2)((x1, x2), (y1, y2))
return kernel.kernel1(x1, y1) * kernel.kernel2(x2, y2)
end
@testset "binary classification" begin
# probabilities and corresponding full categorical distribution
p, testp = rand(2)
pfull = [p, 1 - p]
testpfull = [testp, 1 - testp]
# kernel for probabilities and corresponding one for full categorical distributions
scale = rand()
kernel = SqExponentialKernel() ∘ ScaleTransform(scale)
kernelfull = SqExponentialKernel() ∘ ScaleTransform(scale / sqrt(2))
# for different targets
for y in (true, false), testy in (true, false)
# check values for probabilities
val = unsafe_ucme_eval(kernel ⊗ WhiteKernel(), p, y, testp, testy)
@test unsafe_ucme_eval(kernel ⊗ WhiteKernel2(), p, y, testp, testy) ≈ val
@test unsafe_ucme_eval(
TensorProduct2(kernel, WhiteKernel()), p, y, testp, testy
) ≈ val
@test unsafe_ucme_eval(
TensorProduct2(kernel, WhiteKernel2()), p, y, testp, testy
) ≈ val
# check values for categorical distributions
yint = 2 - y
testyint = 2 - testy
@test unsafe_ucme_eval(
kernelfull ⊗ WhiteKernel(), pfull, yint, testpfull, testyint
) ≈ val
@test unsafe_ucme_eval(
kernelfull ⊗ WhiteKernel2(), pfull, yint, testpfull, testyint
) ≈ val
@test unsafe_ucme_eval(
TensorProduct2(kernelfull, WhiteKernel()), pfull, yint, testpfull, testyint
) ≈ val
@test unsafe_ucme_eval(
TensorProduct2(kernelfull, WhiteKernel2()), pfull, yint, testpfull, testyint
) ≈ val
end
end
@testset "multi-class classification" begin
n = 10
p = rand(n)
p ./= sum(p)
y = rand(1:n)
testp = rand(n)
testp ./= sum(testp)
testy = rand(1:n)
kernel = SqExponentialKernel() ∘ ScaleTransform(rand())
val = unsafe_ucme_eval(kernel ⊗ WhiteKernel(), p, y, testp, testy)
@test unsafe_ucme_eval(kernel ⊗ WhiteKernel2(), p, y, testp, testy) ≈ val
@test unsafe_ucme_eval(TensorProduct2(kernel, WhiteKernel()), p, y, testp, testy) ≈
val
@test unsafe_ucme_eval(TensorProduct2(kernel, WhiteKernel2()), p, y, testp, testy) ≈
val
end
end
| CalibrationErrors | https://github.com/devmotion/CalibrationErrors.jl.git |
|
[
"MIT"
] | 0.6.4 | b87cd13d6d99575da6c0e6ff6727c2fe643d327d | code | 1971 | @testset "generic.jl" begin
@testset "Simple example" begin
# sample predictions and outcomes
nsamples = 1_000
predictions = rand(nsamples)
outcomes = rand(Bool, nsamples)
# create bin with all predictions and outcomes
bin = CalibrationErrors.Bin(predictions, outcomes)
# check statistics
@test bin.nsamples == nsamples
@test bin.mean_predictions ≈ mean(predictions)
@test bin.proportions_targets == mean(outcomes)
# compare with adding data
bin2 = CalibrationErrors.Bin(predictions[1], outcomes[1])
for i in 2:nsamples
CalibrationErrors.adddata!(bin2, predictions[i], outcomes[i])
end
@test bin2.nsamples == bin.nsamples
@test bin2.mean_predictions ≈ bin.mean_predictions
@test bin2.proportions_targets ≈ bin.proportions_targets
end
@testset "Simple example ($nclasses classes)" for nclasses in (2, 10, 100)
# sample predictions and targets
nsamples = 1_000
dist = Dirichlet(nclasses, 1.0)
predictions = [rand(dist) for _ in 1:nsamples]
targets = rand(1:nclasses, nsamples)
# create bin with all predictions and targets
bin = CalibrationErrors.Bin(predictions, targets)
# check statistics
@test bin.nsamples == nsamples
@test bin.mean_predictions ≈ mean(predictions)
@test bin.proportions_targets == proportions(targets, nclasses)
@test sum(bin.mean_predictions) ≈ 1
@test sum(bin.proportions_targets) ≈ 1
# compare with adding data
bin2 = CalibrationErrors.Bin(predictions[1], targets[1])
for i in 2:nsamples
CalibrationErrors.adddata!(bin2, predictions[i], targets[i])
end
@test bin2.nsamples == bin.nsamples
@test bin2.mean_predictions ≈ bin.mean_predictions
@test bin2.proportions_targets ≈ bin.proportions_targets
end
end
| CalibrationErrors | https://github.com/devmotion/CalibrationErrors.jl.git |
|
[
"MIT"
] | 0.6.4 | b87cd13d6d99575da6c0e6ff6727c2fe643d327d | code | 4864 | @testset "medianvariance.jl" begin
@testset "Constructors" begin
@test_throws ErrorException MedianVarianceBinning(-1)
@test_throws ErrorException MedianVarianceBinning(0)
@test_throws ErrorException MedianVarianceBinning(10, -1)
@test_throws ErrorException MedianVarianceBinning(10, 0)
end
@testset "Basic tests ($nclasses classes)" for nclasses in (2, 10, 100)
nsamples = 1_000
dist = Dirichlet(nclasses, 1.0)
predictions = [rand(dist) for _ in 1:nsamples]
targets = rand(1:nclasses, nsamples)
# set minimum number of samples
for minsize in (1, 10, 100, 500, 1_000)
bins = @inferred(
CalibrationErrors.perform(
MedianVarianceBinning(minsize), predictions, targets
)
)
@test all(bin -> bin.nsamples ≥ minsize, bins)
@test all(bin -> sum(bin.mean_predictions) ≈ 1, bins)
@test all(bin -> sum(bin.proportions_targets) ≈ 1, bins)
@test sum(bin -> bin.nsamples, bins) == nsamples
@test sum(bin -> bin.nsamples .* bin.mean_predictions, bins) ≈ sum(predictions)
@test sum(bin -> bin.nsamples .* bin.proportions_targets, bins) ≈
counts(targets, nclasses)
end
# set maximum number of bins
for maxbins in (1, 10, 100, 500, 1_000)
bins = @inferred(
CalibrationErrors.perform(
MedianVarianceBinning(1, maxbins), predictions, targets
)
)
@test length(bins) ≤ maxbins
@test all(bin -> bin.nsamples ≥ 1, bins)
@test all(bin -> sum(bin.mean_predictions) ≈ 1, bins)
@test all(bin -> sum(bin.proportions_targets) ≈ 1, bins)
@test sum(bin -> bin.nsamples, bins) == nsamples
@test sum(bin -> bin.nsamples .* bin.mean_predictions, bins) ≈ sum(predictions)
@test sum(bin -> bin.nsamples .* bin.proportions_targets, bins) ≈
counts(targets, nclasses)
end
end
@testset "Simple example" begin
predictions = [[0.4, 0.1, 0.5], [0.5, 0.3, 0.2], [0.3, 0.7, 0.0]]
targets = [1, 2, 3]
# maximum possible steps in the order they might occur:
# first step: [1], [2, 3] -> create bin with [1]
# second step: [2], [3] -> create bins with [2] and [3]
bins = CalibrationErrors.perform(MedianVarianceBinning(1), predictions, targets)
@test length(bins) == 3
@test all(bin -> bin.nsamples == 1, bins)
for (i, idx) in enumerate((1, 2, 3))
@test bins[i].mean_predictions == predictions[idx]
@test bins[i].proportions_targets == Matrix{Float64}(I, 3, 3)[:, targets[idx]]
end
bins = CalibrationErrors.perform(MedianVarianceBinning(2), predictions, targets)
@test length(bins) == 1
@test bins[1].nsamples == 3
@test bins[1].mean_predictions == mean(predictions)
@test bins[1].proportions_targets == [1 / 3, 1 / 3, 1 / 3]
predictions = [
[0.4, 0.1, 0.5],
[0.5, 0.3, 0.2],
[0.3, 0.7, 0.0],
[0.1, 0.0, 0.9],
[0.8, 0.1, 0.1],
]
targets = [1, 2, 3, 1, 2]
# maximum possible steps in the order they might occur:
# first step: [3, 5], [1, 2, 4]
# second step: [5], [3], [1, 2, 4] -> create bins with [5] and [3]
# third step: [2], [1, 4] -> create bin with [2]
# fourth step: [1], [4] -> create bins with [1] and [4]
bins = CalibrationErrors.perform(MedianVarianceBinning(1), predictions, targets)
@test length(bins) == 5
@test all(bin -> bin.nsamples == 1, bins)
for (i, idx) in enumerate((5, 3, 2, 1, 4))
@test bins[i].mean_predictions == predictions[idx]
@test bins[i].proportions_targets == Matrix{Float64}(I, 3, 3)[:, targets[idx]]
end
bins = CalibrationErrors.perform(MedianVarianceBinning(2), predictions, targets)
@test length(bins) == 2
@test all(bin -> sum(bin.mean_predictions) ≈ 1, bins)
@test all(bin -> sum(bin.proportions_targets) ≈ 1, bins)
for (i, idxs) in enumerate(([3, 5], [1, 2, 4]))
@test bins[i].nsamples == length(idxs)
@test bins[i].mean_predictions ≈ mean(predictions[idxs])
@test bins[i].proportions_targets ==
vec(mean(Matrix{Float64}(I, 3, 3)[:, targets[idxs]]; dims=2))
end
bins = CalibrationErrors.perform(MedianVarianceBinning(3), predictions, targets)
@test length(bins) == 1
@test bins[1].nsamples == 5
@test bins[1].mean_predictions == mean(predictions)
@test bins[1].proportions_targets == [0.4, 0.4, 0.2]
end
end
| CalibrationErrors | https://github.com/devmotion/CalibrationErrors.jl.git |
|
[
"MIT"
] | 0.6.4 | b87cd13d6d99575da6c0e6ff6727c2fe643d327d | code | 6011 | @testset "uniform.jl" begin
@testset "Constructor" begin
@test_throws ErrorException UniformBinning(-1)
@test_throws ErrorException UniformBinning(0)
end
@testset "Binning indices" begin
# scalars
@test_throws ArgumentError CalibrationErrors.binindex(-0.5, 10)
@test CalibrationErrors.binindex(0, 10) == 1
@test CalibrationErrors.binindex(0.1, 10) == 1
@test CalibrationErrors.binindex(0.45, 10) == 5
@test CalibrationErrors.binindex(1, 10) == 10
@test_throws ArgumentError CalibrationErrors.binindex(1.5, 10)
# vectors
@test_throws ArgumentError CalibrationErrors.binindex([-0.5, 0.5], 10, Val(2))
@test @inferred(CalibrationErrors.binindex([0, 0], 10, Val(2))) == (1, 1)
@test @inferred(CalibrationErrors.binindex([0.1, 0], 10, Val(2))) == (1, 1)
@test @inferred(CalibrationErrors.binindex([0.45, 0.55], 10, Val(2))) == (5, 6)
@test @inferred(CalibrationErrors.binindex([1, 1], 10, Val(2))) == (10, 10)
@test_throws ArgumentError CalibrationErrors.binindex([1.5, 0.5], 10, Val(2))
end
@testset "Basic tests" begin
# sample predictions and targets
nsamples = 1_000
predictions = rand(nsamples)
targets = rand(Bool, nsamples)
for nbins in (1, 10, 100, 500, 1_000)
# bin data in bins of uniform width
bins = @inferred(
CalibrationErrors.perform(UniformBinning(nbins), predictions, targets)
)
# check all bins
for bin in bins
# compute index of bin from average prediction
idx = CalibrationErrors.binindex(bin.mean_predictions, nbins)
# compute indices of all predictions in the same bin
idxs = filter(
i -> idx == CalibrationErrors.binindex(predictions[i], nbins),
1:nsamples,
)
@test bin.nsamples == length(idxs)
@test bin.mean_predictions ≈ mean(predictions[idxs])
@test bin.proportions_targets ≈ mean(targets[idxs])
end
end
end
@testset "Basic tests ($nclasses classes)" for nclasses in (2, 10, 100)
# sample predictions and targets
nsamples = 1_000
dist = Dirichlet(nclasses, 1.0)
predictions = [rand(dist) for _ in 1:nsamples]
targets = rand(1:nclasses, nsamples)
for nbins in (1, 10, 100, 500, 1_000)
# bin data in bins of uniform width
bins = @inferred(
CalibrationErrors.perform(UniformBinning(nbins), predictions, targets)
)
# check all bins
for bin in bins
# compute index of bin from average prediction
idx = CalibrationErrors.binindex(bin.mean_predictions, nbins, Val(nclasses))
# compute indices of all predictions in the same bin
idxs = filter(
i ->
idx ==
CalibrationErrors.binindex(predictions[i], nbins, Val(nclasses)),
1:nsamples,
)
@test bin.nsamples == length(idxs)
@test bin.mean_predictions ≈ mean(predictions[idxs])
@test bin.proportions_targets ≈ proportions(targets[idxs], 1:nclasses)
end
end
end
@testset "Simple example" begin
predictions = [[0.4, 0.1, 0.5], [0.5, 0.3, 0.2], [0.3, 0.7, 0.0]]
targets = [1, 2, 3]
bins = CalibrationErrors.perform(UniformBinning(2), predictions, targets)
@test length(bins) == 2
sort!(bins; by=x -> x.nsamples)
@test all(bin -> sum(bin.mean_predictions) == 1, bins)
@test all(bin -> sum(bin.proportions_targets) == 1, bins)
for (i, idxs) in enumerate(([3], [1, 2]))
@test bins[i].nsamples == length(idxs)
@test bins[i].mean_predictions == mean(predictions[idxs])
@test bins[i].proportions_targets ==
vec(mean(Matrix{Float64}(I, 3, 3)[:, targets[idxs]]; dims=2))
end
bins = CalibrationErrors.perform(UniformBinning(1), predictions, targets)
@test length(bins) == 1
@test bins[1].nsamples == 3
@test bins[1].mean_predictions ≈ mean(predictions)
@test bins[1].proportions_targets ≈ [1 / 3, 1 / 3, 1 / 3]
predictions = [
[0.4, 0.1, 0.5],
[0.5, 0.3, 0.2],
[0.3, 0.7, 0.0],
[0.1, 0.0, 0.9],
[0.8, 0.1, 0.1],
]
targets = [1, 2, 3, 1, 2]
bins = CalibrationErrors.perform(UniformBinning(3), predictions, targets)
sort!(bins; by=x -> x.mean_predictions[1])
@test length(bins) == 5
@test all(bin -> bin.nsamples == 1, bins)
for (i, idx) in enumerate((4, 3, 1, 2, 5))
@test bins[i].mean_predictions == predictions[idx]
@test bins[i].proportions_targets == Matrix{Float64}(I, 3, 3)[:, targets[idx]]
end
bins = CalibrationErrors.perform(UniformBinning(2), predictions, targets)
sort!(bins; by=x -> x.mean_predictions[1])
@test length(bins) == 4
@test all(bin -> sum(bin.mean_predictions) == 1, bins)
@test all(bin -> sum(bin.proportions_targets) == 1, bins)
for (i, idxs) in enumerate(([4], [3], [1, 2], [5]))
@test bins[i].nsamples == length(idxs)
@test bins[i].mean_predictions == mean(predictions[idxs])
@test bins[i].proportions_targets ==
vec(mean(Matrix{Float64}(I, 3, 3)[:, targets[idxs]]; dims=2))
end
bins = CalibrationErrors.perform(UniformBinning(1), predictions, targets)
@test length(bins) == 1
@test bins[1].nsamples == 5
@test bins[1].mean_predictions ≈ mean(predictions)
@test bins[1].proportions_targets == [0.4, 0.4, 0.2]
end
end
| CalibrationErrors | https://github.com/devmotion/CalibrationErrors.jl.git |
|
[
"MIT"
] | 0.6.4 | b87cd13d6d99575da6c0e6ff6727c2fe643d327d | code | 6997 | @testset "wasserstein.jl" begin
@testset "SqWasserstein" begin
μ1, μ2 = randn(2)
σ1, σ2 = rand(2)
normal1 = Normal(μ1, σ1)
normal2 = Normal(μ2, σ2)
@test iszero(SqWasserstein()(normal1, normal1))
@test iszero(SqWasserstein()(normal2, normal2))
@test SqWasserstein()(normal1, normal2) == (μ1 - μ2)^2 + (σ1 - σ2)^2
for (d1, d2) in Iterators.product((normal1, normal2), (normal1, normal2))
mvnormal1 = MvNormal([mean(d1)], fill(var(d1), 1, 1))
mvnormal2 = MvNormal([mean(d2)], fill(var(d2), 1, 1))
@test SqWasserstein()(mvnormal1, mvnormal2) == SqWasserstein()(d1, d2)
mvnormal_fill1 = MvNormal(fill(mean(d1), 10), Diagonal(fill(var(d1), 10)))
mvnormal_fill2 = MvNormal(fill(mean(d2), 10), Diagonal(fill(var(d2), 10)))
@test SqWasserstein()(mvnormal_fill1, mvnormal_fill2) ≈
10 * SqWasserstein()(d1, d2)
end
laplace1 = Laplace(μ1, σ1)
laplace2 = Laplace(μ2, σ2)
@test iszero(SqWasserstein()(laplace1, laplace1))
@test iszero(SqWasserstein()(laplace2, laplace2))
@test SqWasserstein()(laplace1, laplace2) == (μ1 - μ2)^2 + 2 * (σ1 - σ2)^2
# pairwise computations
for (m, n) in ((1, 10), (10, 1), (10, 10))
dists1 = [Normal(randn(), rand()) for _ in 1:m]
dists2 = [Normal(randn(), rand()) for _ in 1:n]
# compute distance matrix
distmat = [SqWasserstein()(x, y) for x in dists1, y in dists2]
# out-of-place
@test pairwise(SqWasserstein(), dists1, dists2) ≈ distmat
# in-place
z = similar(distmat)
pairwise!(z, SqWasserstein(), dists1, dists2)
@test z ≈ distmat
end
end
@testset "Wasserstein" begin
μ1, μ2 = randn(2)
σ1, σ2 = rand(2)
normal1 = Normal(μ1, σ1)
normal2 = Normal(μ2, σ2)
@test iszero(Wasserstein()(normal1, normal1))
@test iszero(Wasserstein()(normal2, normal2))
@test Wasserstein()(normal1, normal2) == sqrt(SqWasserstein()(normal1, normal2))
for (d1, d2) in Iterators.product((normal1, normal2), (normal1, normal2))
mvnormal1 = MvNormal([mean(d1)], fill(var(d1), 1, 1))
mvnormal2 = MvNormal([mean(d2)], fill(var(d2), 1, 1))
@test Wasserstein()(mvnormal1, mvnormal2) == Wasserstein()(d1, d2)
@test Wasserstein()(mvnormal1, mvnormal2) == sqrt(SqWasserstein()(d1, d2))
mvnormal_fill1 = MvNormal(fill(mean(d1), 10), Diagonal(fill(var(d1), 10)))
mvnormal_fill2 = MvNormal(fill(mean(d2), 10), Diagonal(fill(var(d2), 10)))
@test Wasserstein()(mvnormal_fill1, mvnormal_fill2) ≈
sqrt(10) * Wasserstein()(d1, d2)
@test Wasserstein()(mvnormal_fill1, mvnormal_fill2) ==
sqrt(SqWasserstein()(mvnormal_fill1, mvnormal_fill2))
end
laplace1 = Laplace(μ1, σ1)
laplace2 = Laplace(μ2, σ2)
@test iszero(Wasserstein()(laplace1, laplace1))
@test iszero(Wasserstein()(laplace2, laplace2))
@test Wasserstein()(laplace1, laplace2) == sqrt(SqWasserstein()(laplace1, laplace2))
# pairwise computations
for (m, n) in ((1, 10), (10, 1), (10, 10))
dists1 = [Normal(randn(), rand()) for _ in 1:m]
dists2 = [Normal(randn(), rand()) for _ in 1:n]
# compute distance matrix
distmat = [Wasserstein()(x, y) for x in dists1, y in dists2]
# out-of-place
@test pairwise(Wasserstein(), dists1, dists2) ≈ distmat
# in-place
z = similar(distmat)
pairwise!(z, Wasserstein(), dists1, dists2)
@test z ≈ distmat
end
end
@testset "SqMixtureWasserstein" begin
for T in (Normal, Laplace)
mixture1 = MixtureModel(T, [(randn(), rand())], [1.0])
mixture2 = MixtureModel(T, [(randn(), rand())], [1.0])
@test SqMixtureWasserstein()(mixture1, mixture2) ≈
SqWasserstein()(first(components(mixture1)), first(components(mixture2)))
mixture1 = MixtureModel(T, [(randn(), rand()), (randn(), rand())], [1.0, 0.0])
mixture2 = MixtureModel(T, [(randn(), rand()), (randn(), rand())], [0.0, 1.0])
@test SqMixtureWasserstein()(mixture1, mixture2) ≈
SqWasserstein()(first(components(mixture1)), last(components(mixture2)))
mixture1 = MixtureModel(T, fill((randn(), rand()), 10))
mixture2 = MixtureModel(T, fill((randn(), rand()), 10))
@test SqMixtureWasserstein()(mixture1, mixture2) ≈
SqWasserstein()(first(components(mixture1)), first(components(mixture2))) rtol =
10 * sqrt(eps())
mixture1 = MixtureModel(T, fill((randn(), rand()), 10))
mixture2 = MixtureModel(T, [(randn(), rand())])
@test SqMixtureWasserstein()(mixture1, mixture2) ≈
SqWasserstein()(first(components(mixture1)), first(components(mixture2)))
end
end
@testset "MixtureWasserstein" begin
for T in (Normal, Laplace)
mixture1 = MixtureModel(T, [(randn(), rand())], [1.0])
mixture2 = MixtureModel(T, [(randn(), rand())], [1.0])
@test MixtureWasserstein()(mixture1, mixture2) ≈
Wasserstein()(first(components(mixture1)), first(components(mixture2)))
@test MixtureWasserstein()(mixture1, mixture2) ≈
sqrt(SqMixtureWasserstein()(mixture1, mixture2))
mixture1 = MixtureModel(T, [(randn(), rand()), (randn(), rand())], [1.0, 0.0])
mixture2 = MixtureModel(T, [(randn(), rand()), (randn(), rand())], [0.0, 1.0])
@test MixtureWasserstein()(mixture1, mixture2) ≈
Wasserstein()(first(components(mixture1)), last(components(mixture2)))
@test MixtureWasserstein()(mixture1, mixture2) ≈
sqrt(SqMixtureWasserstein()(mixture1, mixture2))
mixture1 = MixtureModel(T, fill((randn(), rand()), 10))
mixture2 = MixtureModel(T, fill((randn(), rand()), 10))
@test MixtureWasserstein()(mixture1, mixture2) ≈
Wasserstein()(first(components(mixture1)), first(components(mixture2))) rtol =
10 * sqrt(eps())
@test MixtureWasserstein()(mixture1, mixture2) ≈
sqrt(SqMixtureWasserstein()(mixture1, mixture2))
mixture1 = MixtureModel(T, fill((randn(), rand()), 10))
mixture2 = MixtureModel(T, [(randn(), rand())])
@test MixtureWasserstein()(mixture1, mixture2) ≈
Wasserstein()(first(components(mixture1)), first(components(mixture2)))
@test MixtureWasserstein()(mixture1, mixture2) ≈
sqrt(SqMixtureWasserstein()(mixture1, mixture2))
end
end
end
| CalibrationErrors | https://github.com/devmotion/CalibrationErrors.jl.git |
|
[
"MIT"
] | 0.6.4 | b87cd13d6d99575da6c0e6ff6727c2fe643d327d | code | 6458 | @testset "mvnormal.jl" begin
@testset "consistency with Normal" begin
nsamples = 1_000
ntestsamples = 5
# create predictions
predictions_μ = randn(nsamples)
predictions_σ = rand(nsamples)
predictions_normal = map(Normal, predictions_μ, predictions_σ)
predictions_mvnormal = map(predictions_μ, predictions_σ) do μ, σ
MvNormal([μ], σ^2 * I)
end
# create targets
targets_normal = randn(nsamples)
targets_mvnormal = map(vcat, targets_normal)
# create test locations
testpredictions_μ = randn(ntestsamples)
testpredictions_σ = rand(ntestsamples)
testpredictions_normal = map(Normal, testpredictions_μ, testpredictions_σ)
testpredictions_mvnormal = map(testpredictions_μ, testpredictions_σ) do μ, σ
MvNormal([μ], σ^2 * I)
end
testtargets_normal = randn(ntestsamples)
testtargets_mvnormal = map(vcat, testtargets_normal)
for kernel in (
ExponentialKernel(; metric=Wasserstein()) ⊗ SqExponentialKernel(),
ExponentialKernel(; metric=Wasserstein()) ⊗
(SqExponentialKernel() ∘ ScaleTransform(rand())),
ExponentialKernel(; metric=Wasserstein()) ⊗
(SqExponentialKernel() ∘ ARDTransform([rand()])),
)
for estimator in
(SKCE(kernel), SKCE(kernel; unbiased=false), SKCE(kernel; blocksize=5))
skce_mvnormal = estimator(predictions_mvnormal, targets_mvnormal)
skce_normal = estimator(predictions_normal, targets_normal)
@test skce_mvnormal ≈ skce_normal
end
ucme_mvnormal = UCME(kernel, testpredictions_mvnormal, testtargets_mvnormal)(
predictions_mvnormal, targets_mvnormal
)
ucme_normal = UCME(kernel, testpredictions_normal, testtargets_normal)(
predictions_normal, targets_normal
)
@test ucme_mvnormal ≈ ucme_normal
end
end
@testset "kernels with input transformations" begin
nsamples = 100
ntestsamples = 5
for dim in (1, 10)
# create predictions and targets
predictions = [MvNormal(randn(dim), rand() * I) for _ in 1:nsamples]
targets = [randn(dim) for _ in 1:nsamples]
# create random test locations
testpredictions = [MvNormal(randn(dim), rand() * I) for _ in 1:ntestsamples]
testtargets = [randn(dim) for _ in 1:ntestsamples]
for γ in (1.0, rand())
kernel1 =
ExponentialKernel(; metric=Wasserstein()) ⊗
(SqExponentialKernel() ∘ ScaleTransform(γ))
kernel2 =
ExponentialKernel(; metric=Wasserstein()) ⊗
(SqExponentialKernel() ∘ ARDTransform(fill(γ, dim)))
kernel3 =
ExponentialKernel(; metric=Wasserstein()) ⊗
(SqExponentialKernel() ∘ LinearTransform(diagm(fill(γ, dim))))
# check evaluation of the first two observations
p1 = predictions[1]
p2 = predictions[2]
t1 = targets[1]
t2 = targets[2]
for f in (
CalibrationErrors.unsafe_skce_eval_targets,
CalibrationErrors.unsafe_ucme_eval_targets,
)
out1 = f(kernel1.kernels[2], p1, t1, p2, t2)
out2 = f(kernel2.kernels[2], p1, t1, p2, t2)
out3 = f(kernel3.kernels[2], p1, t1, p2, t2)
@test out2 ≈ out1
@test out3 ≈ out1
if isone(γ)
@test f(SqExponentialKernel(), p1, t1, p2, t2) ≈ out1
end
end
# check estimates
for estimator in (SKCE, x -> UCME(x, testpredictions, testtargets))
estimate1 = estimator(kernel1)(predictions, targets)
estimate2 = estimator(kernel2)(predictions, targets)
estimate3 = estimator(kernel3)(predictions, targets)
@test estimate2 ≈ estimate1
@test estimate3 ≈ estimate1
if isone(γ)
@test estimator(
ExponentialKernel(; metric=Wasserstein()) ⊗
SqExponentialKernel(),
)(
predictions, targets
) ≈ estimate1
end
end
end
end
end
@testset "apply" begin
dim = 10
μ = randn(dim)
A = randn(dim, dim)
for d in (
MvNormal(μ, rand() * I),
MvNormal(μ, Diagonal(rand(dim))),
MvNormal(μ, Symmetric(I + A' * A)),
)
# unscaled transformation
for t in (
ScaleTransform(1.0),
ARDTransform(ones(dim)),
LinearTransform(Diagonal(ones(dim))),
)
out = CalibrationErrors.apply(t, d)
@test mean(out) ≈ mean(d)
@test cov(out) ≈ cov(d)
end
# scaling
scale = rand()
d_scaled = diagm(fill(scale, dim)) * d
for t in (
ScaleTransform(scale),
ARDTransform(fill(scale, dim)),
LinearTransform(Diagonal(fill(scale, dim))),
)
out = CalibrationErrors.apply(t, d)
@test mean(out) ≈ mean(d_scaled)
@test cov(out) ≈ cov(d_scaled)
end
end
end
@testset "scale_cov" begin
dim = 10
v = rand(dim)
γ = rand()
X = diagm(γ .* v .^ 2)
for A in (ScalMat(dim, γ), PDiagMat(fill(γ, dim)), PDMat(diagm(fill(γ, dim))))
Y = CalibrationErrors.scale_cov(A, v)
@test Matrix(Y) ≈ X
end
end
@testset "invquad_diff" begin
dim = 10
x = rand(dim)
y = rand(dim)
γ = rand()
u = sum(abs2, x - y) / γ
for A in (ScalMat(dim, γ), PDiagMat(fill(γ, dim)), PDMat(diagm(fill(γ, dim))))
v = CalibrationErrors.invquad_diff(A, x, y)
@test v ≈ u
end
end
end
| CalibrationErrors | https://github.com/devmotion/CalibrationErrors.jl.git |
|
[
"MIT"
] | 0.6.4 | b87cd13d6d99575da6c0e6ff6727c2fe643d327d | code | 5249 | @testset "normal.jl" begin
@testset "SKCE: basic example" begin
skce = SKCE(ExponentialKernel(; metric=Wasserstein()) ⊗ SqExponentialKernel())
# only two predictions, i.e., one term in the estimator
normal1 = Normal(0, 1)
normal2 = Normal(1, 2)
@test @inferred(skce([normal1, normal1], [0, 0])) ≈ 1 - sqrt(2) + 1 / sqrt(3)
@test @inferred(skce([normal1, normal2], [1, 0])) ≈
exp(-sqrt(2)) *
(exp(-1 / 2) - 1 / sqrt(2) - 1 / sqrt(5) + exp(-1 / 12) / sqrt(6))
@test @inferred(skce([normal1, normal2], [0, 1])) ≈
exp(-sqrt(2)) * (
exp(-1 / 2) - exp(-1 / 4) / sqrt(2) - exp(-1 / 10) / sqrt(5) +
exp(-1 / 12) / sqrt(6)
)
end
@testset "SKCE: basic example (transformed)" begin
skce = SKCE(
ExponentialKernel(; metric=Wasserstein()) ⊗
(SqExponentialKernel() ∘ ScaleTransform(0.5)),
)
# only two predictions, i.e., one term in the estimator
normal1 = Normal(0, 1)
normal2 = Normal(1, 2)
@test @inferred(skce([normal1, normal1], [0, 0])) ≈
1 - 2 / sqrt(1.25) + 1 / sqrt(1.5)
@test @inferred(skce([normal1, normal2], [1, 0])) ≈
exp(-sqrt(2)) *
(exp(-1 / 8) - 1 / sqrt(1.25) - 1 / sqrt(2) + exp(-1 / 18) / sqrt(2.25))
@test @inferred(skce([normal1, normal2], [0, 1])) ≈
exp(-sqrt(2)) * (
exp(-1 / 8) - exp(-1 / 10) / sqrt(1.25) - exp(-1 / 16) / sqrt(2) +
exp(-1 / 18) / sqrt(2.25)
)
end
@testset "SKCE: basic properties" begin
skce = SKCE(ExponentialKernel(; metric=Wasserstein()) ⊗ SqExponentialKernel())
estimates = map(1:10_000) do _
predictions = map(Normal, randn(20), rand(20))
targets = map(rand, predictions)
return skce(predictions, targets)
end
@test any(x -> x > zero(x), estimates)
@test any(x -> x < zero(x), estimates)
@test mean(estimates) ≈ 0 atol = 1e-4
end
@testset "UCME: basic example" begin
# one test location
ucme = UCME(
ExponentialKernel(; metric=Wasserstein()) ⊗ SqExponentialKernel(),
[Normal(0.5, 0.5)],
[1],
)
# two predictions
normal1 = Normal(0, 1)
normal2 = Normal(1, 2)
@test @inferred(ucme([normal1, normal2], [0, 0.5])) ≈
(
exp(-1 / sqrt(2)) * (exp(-1 / 2) - exp(-1 / 4) / sqrt(2)) +
exp(-sqrt(5 / 2)) * (exp(-1 / 8) - 1 / sqrt(5))
)^2 / 4
# two test locations
ucme = UCME(
ExponentialKernel(; metric=Wasserstein()) ⊗ SqExponentialKernel(),
[Normal(0.5, 0.5), Normal(-1, 1.5)],
[1, -0.5],
)
@test @inferred(ucme([normal1, normal2], [0, 0.5])) ≈
(
(
exp(-1 / sqrt(2)) * (exp(-1 / 2) - exp(-1 / 4) / sqrt(2)) +
exp(-sqrt(5 / 2)) * (exp(-1 / 8) - 1 / sqrt(5))
)^2 +
(
exp(-sqrt(5) / 2) * (exp(-1 / 8) - exp(-1 / 16) / sqrt(2)) +
exp(-sqrt(17) / 2) * (exp(-1 / 2) - exp(-9 / 40) / sqrt(5))
)^2
) / 8
end
@testset "kernels with input transformations" begin
nsamples = 100
ntestsamples = 5
# create predictions and targets
predictions = map(Normal, randn(nsamples), rand(nsamples))
targets = randn(nsamples)
# create random test locations
testpredictions = map(Normal, randn(ntestsamples), rand(ntestsamples))
testtargets = randn(ntestsamples)
for γ in (1.0, rand())
kernel1 =
ExponentialKernel(; metric=Wasserstein()) ⊗
(SqExponentialKernel() ∘ ScaleTransform(γ))
kernel2 =
ExponentialKernel(; metric=Wasserstein()) ⊗
(SqExponentialKernel() ∘ ARDTransform([γ]))
# check evaluation of the first two observations
p1 = predictions[1]
p2 = predictions[2]
t1 = targets[1]
t2 = targets[2]
for f in (
CalibrationErrors.unsafe_skce_eval_targets,
CalibrationErrors.unsafe_ucme_eval_targets,
)
out1 = f(kernel1.kernels[2], p1, t1, p2, t2)
out2 = f(kernel2.kernels[2], p1, t1, p2, t2)
@test out2 ≈ out1
if isone(γ)
@test f(SqExponentialKernel(), p1, t1, p2, t2) ≈ out1
end
end
# check estimates
for estimator in (SKCE, x -> UCME(x, testpredictions, testtargets))
estimate1 = estimator(kernel1)(predictions, targets)
estimate2 = estimator(kernel2)(predictions, targets)
@test estimate2 ≈ estimate1
if isone(γ)
@test estimator(
ExponentialKernel(; metric=Wasserstein()) ⊗ SqExponentialKernel()
)(
predictions, targets
) ≈ estimate1
end
end
end
end
end
| CalibrationErrors | https://github.com/devmotion/CalibrationErrors.jl.git |
|
[
"MIT"
] | 0.6.4 | b87cd13d6d99575da6c0e6ff6727c2fe643d327d | docs | 4387 | # CalibrationErrors.jl
Estimation of calibration errors.
[](https://devmotion.github.io/CalibrationErrors.jl/stable)
[](https://devmotion.github.io/CalibrationErrors.jl/dev)
[](https://github.com/devmotion/CalibrationErrors.jl/actions?query=workflow%3ACI+branch%3Amain)
[](https://zenodo.org/badge/latestdoi/188981243)
[](https://codecov.io/gh/devmotion/CalibrationErrors.jl)
[](https://coveralls.io/github/devmotion/CalibrationErrors.jl?branch=main)
[](https://github.com/invenia/BlueStyle)
[](https://github.com/JuliaTesting/Aqua.jl)
**There are also [Python](https://github.com/devmotion/pycalibration) and [R](https://github.com/devmotion/rcalibration) interfaces for this package**
## Overview
This package implements different estimators of the expected calibration error
(ECE), the squared kernel calibration error (SKCE), and the
unnormalized calibration mean embedding (UCME) in the Julia language.
This package supports calibration error estimation of classification models that output vectors of class probabilities. In addition, SKCE and UCME can be estimated for more general probabilistic predictive models that output probability distributions defined in [Distributions.jl](https://github.com/JuliaStats/Distributions.jl) such as normal and Laplace distributions.
## Example
Calibration errors can be estimated from a data set of predicted probability distributions
and a set of corresponding observed targets by executing
```julia
estimator(predictions, targets)
```
The sets of predictions and targets have to be provided as vectors.
This package implements the estimator `ECE` of the ECE, the estimator `SKCE` for the SKCE
(unbiased and biased variants with different sample complexity), and `UCME` for the UCME.
## Related packages
[CalibrationTests.jl](https://github.com/devmotion/CalibrationTests.jl) implements
statistical hypothesis tests of calibration.
[pycalibration](https://github.com/devmotion/pycalibration) is a Python interface for CalibrationErrors.jl and CalibrationTests.jl.
[rcalibration](https://github.com/devmotion/rcalibration) is an R interface for CalibrationErrors.jl and CalibrationTests.jl.
## Talk at JuliaCon 2021
[](http://www.youtube.com/watch?v=PrLsXFvwzuA)
The slides of the talk are available as [Pluto notebook](https://talks.widmann.dev/2021/07/calibration/).
## Citing
If you use CalibrationErrors.jl as part of your research, teaching, or other activities, please consider citing the following publications:
Widmann, D., Lindsten, F., & Zachariah, D. (2019). [Calibration tests in multi-class
classification: A unifying framework](https://proceedings.neurips.cc/paper/2019/hash/1c336b8080f82bcc2cd2499b4c57261d-Abstract.html). In
*Advances in Neural Information Processing Systems 32 (NeurIPS 2019)* (pp. 12257–12267).
Widmann, D., Lindsten, F., & Zachariah, D. (2021).
[Calibration tests beyond classification](https://openreview.net/forum?id=-bxf89v3Nx).
*International Conference on Learning Representations (ICLR 2021)*.
## Acknowledgements
This work was financially supported by the Swedish Research Council via the projects *Learning of Large-Scale Probabilistic Dynamical Models* (contract number: 2016-04278), *Counterfactual Prediction Methods for Heterogeneous Populations* (contract number: 2018-05040), and *Handling Uncertainty in Machine Learning Systems* (contract number: 2020-04122), by the Swedish Foundation for Strategic Research via the project *Probabilistic Modeling and Inference for Machine Learning* (contract number: ICA16-0015), by the Wallenberg AI, Autonomous Systems and Software Program (WASP) funded by the Knut and Alice Wallenberg Foundation, and by ELLIIT. | CalibrationErrors | https://github.com/devmotion/CalibrationErrors.jl.git |
|
[
"MIT"
] | 0.6.4 | b87cd13d6d99575da6c0e6ff6727c2fe643d327d | docs | 2452 | # [Expected calibration error (ECE)](@id ece)
## Definition
A common calibration measure is the so-called expected calibration error (ECE).
In its most general form, the ECE with respect to distance measure $d(p, p')$
is defined[^WLZ21] as
```math
\mathrm{ECE}_d := \mathbb{E} d\big(P_X, \mathrm{law}(Y \,|\, P_X)\big).
```
As implied by its name, the ECE is the expected distance between the left and
right hand side of the calibration definition with respect to $d$.
Usually, the ECE is used to analyze classification models.[^GPSW17][^VWALRS19]
In this case, $P_X$ and $\mathrm{law}(Y \,|\, P_X)$ can be identified with vectors
in the probability simplex and $d$ can be chosen as a the cityblock distance,
the total variation distance, or the squared Euclidean distance.
For other probabilistic predictive models such as regression models, one has to
choose a more general distance measure $d$ between probability distributions on the
target space since the conditional distributions $\mathrm{law}(Y \,|\, P_X)$ can be
arbitrarily complex in general.
[^GPSW17]: Guo, C., et al. (2017). [On calibration of modern neural networks](http://proceedings.mlr.press/v70/guo17a.html). In *Proceedings of the 34th International Conference on Machine Learning* (pp. 1321-1330).
[^VWALRS19]: Vaicenavicius, J., et al. (2019). [Evaluating model calibration in classification](http://proceedings.mlr.press/v89/vaicenavicius19a.html). In *Proceedings of Machine Learning Research (AISTATS 2019)* (pp. 3459-3467).
[^WLZ21]: Widmann, D., Lindsten, F., & Zachariah, D. (2021). [Calibration tests beyond classification](https://openreview.net/forum?id=-bxf89v3Nx). To be presented at *ICLR 2021*.
## Estimators
The main challenge in the estimation of the ECE is the estimation of the conditional
distribution $\mathrm{law}(Y \,|\, P_X)$ from a finite data set of predictions and
corresponding targets. Typically, predictions are binned and empirical estimates of
the conditional distributions are calculated for each bin. You can construct such
estimators with [`ECE`](@ref).
```@docs
ECE
```
### Binning algorithms
Currently, two binning algorithms are supported. [`UniformBinning`](@ref) is a binning
schemes with bins of fixed bins of uniform size whereas [`MedianVarianceBinning`](@ref)
splits the validation data set of predictions and targets dynamically to reduce the
variance of the predictions.
```@docs
UniformBinning
MedianVarianceBinning
```
| CalibrationErrors | https://github.com/devmotion/CalibrationErrors.jl.git |
|
[
"MIT"
] | 0.6.4 | b87cd13d6d99575da6c0e6ff6727c2fe643d327d | docs | 2334 | # CalibrationErrors.jl
*Estimation of calibration errors.*
A package for estimating calibration errors from data sets of predictions and targets.
## Related packages
[CalibrationTests.jl](https://github.com/devmotion/CalibrationTests.jl) implements
statistical hypothesis tests of calibration.
[pycalibration](https://github.com/devmotion/pycalibration) is a Python interface for CalibrationErrors.jl and CalibrationTests.jl.
[rcalibration](https://github.com/devmotion/rcalibration) is an R interface for CalibrationErrors.jl and CalibrationTests.jl.
## Talk at JuliaCon 2021
```@raw html
<center>
<iframe width="560" style="height:315px" src="https://www.youtube-nocookie.com/embed/PrLsXFvwzuA" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
</center>
```
The slides of the talk are available as [Pluto notebook](https://talks.widmann.dev/2021/07/calibration/).
## Citing
If you use CalibrationErrors.jl as part of your research, teaching, or other activities, please consider citing the following publications:
Widmann, D., Lindsten, F., & Zachariah, D. (2019). [Calibration tests in multi-class
classification: A unifying framework](https://proceedings.neurips.cc/paper/2019/hash/1c336b8080f82bcc2cd2499b4c57261d-Abstract.html). In
*Advances in Neural Information Processing Systems 32 (NeurIPS 2019)* (pp. 12257–12267).
Widmann, D., Lindsten, F., & Zachariah, D. (2021).
[Calibration tests beyond classification](https://openreview.net/forum?id=-bxf89v3Nx).
*International Conference on Learning Representations (ICLR 2021)*.
## Acknowledgements
This work was financially supported by the Swedish Research Council via the projects *Learning of Large-Scale Probabilistic Dynamical Models* (contract number: 2016-04278), *Counterfactual Prediction Methods for Heterogeneous Populations* (contract number: 2018-05040), and *Handling Uncertainty in Machine Learning Systems* (contract number: 2020-04122), by the Swedish Foundation for Strategic Research via the project *Probabilistic Modeling and Inference for Machine Learning* (contract number: ICA16-0015), by the Wallenberg AI, Autonomous Systems and Software Program (WASP) funded by the Knut and Alice Wallenberg Foundation, and by ELLIIT. | CalibrationErrors | https://github.com/devmotion/CalibrationErrors.jl.git |
|
[
"MIT"
] | 0.6.4 | b87cd13d6d99575da6c0e6ff6727c2fe643d327d | docs | 5528 | # Introduction
## Probabilistic predictive models
A probabilistic predictive model predicts a probability distribution over a set
of targets for a given feature. By predicting a distribution, one can express
the uncertainty in the prediction, which might be inherent to the prediction
task (e.g., if the feature does not contain enough information to determine the
target with absolute certainty) or caused by insufficient knowledge of the
underlying relation between feature and target (e.g., if only a small number
of observations of features and corresponding targets are available).[^1]
In the [classification example](../examples/classification) we study the
[Palmer penguins dataset](https://github.com/allisonhorst/palmerpenguins) with
measurements of three different penguin species and consider the task of predicting
the probability of a penguin species (*target*) given the bill and flipper length
(*feature*). For this classification task there exist many different probabilistic
predictive models. We denote the feature by $X$ and the target by $Y$, and let
$P_X$ be the prediction of a specific model $P$ for a feature $X$. Ideally, we would
like that
```math
P_X = \mathrm{law}(Y \,|\, X) \qquad \text{almost surely},
```
i.e., the model should predict the law of target $Y$ given features $X$.[^2] Of course,
usually it is not possible to achieve this in practice.
A very simple class of models are models that yield the same prediction for all features,
i.e., they return the same probabilities for the penguin species regardless of the bill and
flipper length. Clearly, more complicated models take into account also the features and might output
different predictions for different features.
In contrast to probabilistic predictive models, non-probabilistic predictive models
predict a single target instead of a distribution over targets. In fact, such models can be
viewed as a special class of probabilistic predictive models that output only Dirac
distributions, i.e., that always predict 100% probability for one penguin species and
0% probability for all others.
Some other prediction models output a single target together with a confidence score
between 0 and 1. Even these models can be reformulated as probabilistic predictive models,
arguably in a slightly unconventional way: they correspond to a probabilistic model for a
a binary classification problem whose feature space is extended with the predicted target
and whose target is the predicted confidence score.
[^1]: It does not matter how the model is obtained. In particular, both Bayesian and frequentist approaches can be used.
[^2]: In classification problems, the law $\mathrm{law}(Y \,|\, X)$ can be identified with a vector in the probability simplex. Therefore often we just consider this equivalent formulation, both for the predictions $P_X$ and the law $\mathrm{law}(Y \,|\, X)$.
## Calibration
The main motivation for using a probabilistic model is that it provides additional
information about the uncertainty of the predictions, which is valuable for decision
making. A [classic example are weather forecasts](https://www.jstor.org/stable/2987588)
that also report the "probability of rain" instead of only if it will rain or not.
Therefore it is not sufficient if the model predicts an arbitrary distribution.
Instead the predictions should actually express the involved uncertainties "correctly".
One desired property is that the predictions are consistent: if the forecasts predict
an 80% probability of rain for an infinite sequence of days, then ideally on 80% of the
days it rains.
More generally, mathematically we would like
```math
P_X = \mathrm{law}(Y \,|\, P_X) \quad \text{almost surely},
```
i.e., the predicted distribution of targets should be equal to the distribution of targets
conditioned on the predicted distribution.[^3]
This statistical property is called *calibration*. If it is satisfied, a model is
*calibrated*.
Obviously, the ideal model $P_X = \mathrm{law}(Y \,|\, X)$ is calibrated. However,
also the naive model $P_X = \mathrm{law}(Y)$ that always predicts the marginal
distribution of $Y$ independent of the features is calibrated.[^4] In fact, any model of
the form
```math
P_X = \mathrm{law}(Y \,|\, \phi(X)) \quad \text{almost surely},
```
where $\phi$ is some measurable function, is calibrated.
[^3]: The general formulation applies not only to classification but to any prediction task with arbitrary target spaces, including regression.
[^4]: In meteorology, this model is called the *climatology*.
## Calibration error
Calibration errors such as the [expected calibration error](@ref ece) and the
[kernel calibration error](@ref kce) measure the calibration, or rather the
degree of miscalibration, of probabilistic predictive models. They allow a more
fine-tuned analysis of calibration and enable comparisons of calibration of
different models. Intuitively, calibration measures quantify the deviation of
$P_X$ and $\mathrm{law}(Y \,|\, P_X)$, i.e., the left and right hand side in
the calibration definition.
### Estimation
The calibration error of a model depends on the true conditional distribution of
targets which is unknown in practice. Therefore calibration errors have to be
estimated from a validation data set.
Various estimators of different calibration errors such as the
[expected calibration error](@ref ece) and the [kernel calibration error](@ref kce)
are implemented in CalibrationErrors.
```@docs
CalibrationErrors.CalibrationErrorEstimator
```
| CalibrationErrors | https://github.com/devmotion/CalibrationErrors.jl.git |
|
[
"MIT"
] | 0.6.4 | b87cd13d6d99575da6c0e6ff6727c2fe643d327d | docs | 4462 | # [Kernel calibration error (KCE)](@id kce)
## Definition
The kernel calibration error (KCE) is another calibration error. It is based on real-valued
kernels on the product space $\mathcal{P} \times \mathcal{Y}$ of predictions and targets.
The KCE with respect to a real-valued kernel
$k \colon (\mathcal{P} \times \mathcal{Y}) \times (\mathcal{P} \times \mathcal{Y}) \to \mathbb{R}$
is defined[^WLZ21] as
```math
\mathrm{KCE}_k := \sup_{f \in \mathcal{B}_k} \bigg| \mathbb{E}_{Y,P_X} f(P_X, Y) - \mathbb{E}_{Z_X,P_X} f(P_X, Z_X)\bigg|,
```
where $\mathcal{B}_{k}$ is the unit ball in the
[reproducing kernel Hilbert space (RKHS)](https://en.wikipedia.org/wiki/Reproducing_kernel_Hilbert_space)
to $k$ and $Z_X$ is an artificial random variable on the target space $\mathcal{Y}$ whose
conditional law is given by
```math
Z_X \,|\, P_X = \mu \sim \mu.
```
The RKHS to kernel $k$, and hence also the unit ball $\mathcal{B}_k$, consists of
real-valued functions of the form $f \colon \mathcal{P} \times \mathcal{Y} \to \mathbb{R}$.
For classification models with $m$ classes, there exists an equivalent formulation of the
KCE based on matrix-valued kernel
$\tilde{k} \colon \mathcal{P} \times \mathcal{P} \to \mathbb{R}^{m \times m}$ on
the space $\mathcal{P}$ of predictions.[^WLZ19] The definition above can be rewritten as
```math
\mathrm{KCE}_{\tilde{k}} := \sup_{f \in \mathcal{B}_{\tilde{k}}} \bigg| \mathbb{E}_{P_X} \big(\mathrm{law}(Y \,|\, P_X) - P_X\big)^\mathsf{T} f(P_X) \bigg|,
```
where the matrix-valued kernel $\tilde{k}$ is given by
```math
\tilde{k}_{i,j}(p, q) = k((p, i), (q, j)) \quad (i,j=1,\ldots,m),
```
and $\mathcal{B}_{\tilde{k}}$ is the unit ball in the RKHS of $\tilde{k}$, consisting
of vector-valued functions $f \colon \mathcal{P} \to \mathbb{R}^m$. However,
this formulation applies only to classification models whereas the general
definition above covers all probabilistic predictive models.
For a large class of kernels the KCE is zero if and only if the model is
calibrated.[^WLZ21] Moreover, the squared KCE (SKCE) can be formulated in
terms of the kernel $k$ as
```math
\begin{aligned}
\mathrm{SKCE}_{k} := \mathrm{KCE}_k^2 &= \int k(u, v) \, \big(\mathrm{law}(P_X, Y) - \mathrm{law}(P_X, Z_X)\big)(u) \big(\mathrm{law}(P_X, Y) - \mathrm{law}(P_X, Z_X)\big)(v) \\
&= \mathbb{E} h_k\big((P_X, Y), (P_{X'}, Y')\big),
\end{aligned}
```
where $(X',Y')$ is an independent copy of $(X,Y)$ and
```math
\begin{aligned}
h_k\big((\mu, y), (\mu', y')\big) :={}& k\big((\mu, y), (\mu', y')\big) - \mathbb{E}_{Z \sim \mu} k\big((\mu, Z), (\mu', y')\big) \\
&- \mathbb{E}_{Z' \sim \mu'} k\big((\mu, y), (\mu', Z')\big) + \mathbb{E}_{Z \sim \mu, Z' \sim \mu'} k\big((\mu, Z), (\mu', Z')\big).
\end{aligned}
```
The KCE is actually a special case of calibration errors that are formulated as integral
probability metrics of the form
```math
\sup_{f \in \mathcal{F}} \big| \mathbb{E}_{Y,P_X} f(P_X, Y) - \mathbb{E}_{Z_X,P_X} f(P_X, Z_X)\big|,
```
where $\mathcal{F}$ is a space of real-valued functions of the form
$f \colon \mathcal{P} \times \mathcal{Y} \to \mathbb{R}$.[^WLZ21] For classification models,
the [ECE](@ref ece) with respect to common distances such as the total variation distance
or the squared Euclidean distance can be formulated in this way.[^WLZ19]
The maximum mean calibration error (MMCE)[^KSJ] can be viewed as a special case of the KCE, in
which only the most-confident predictions are considered.[^WLZ19]
[^KSJ]: Kumar, A., Sarawagi, S., & Jain, U. (2018). [Trainable calibration measures for neural networks from kernel mean embeddings](http://proceedings.mlr.press/v80/kumar18a.html). In *Proceedings of the 35th International Conference on Machine Learning* (pp. 2805-2814).
[^WLZ19]: Widmann, D., Lindsten, F., & Zachariah, D. (2019). [Calibration tests in multi-class classification: A unifying framework](https://proceedings.neurips.cc/paper/2019/hash/1c336b8080f82bcc2cd2499b4c57261d-Abstract.html). In *Advances in Neural Information Processing Systems 32 (NeurIPS 2019)* (pp. 12257–12267).
[^WLZ21]: Widmann, D., Lindsten, F., & Zachariah, D. (2021). [Calibration tests beyond classification](https://openreview.net/forum?id=-bxf89v3Nx). To be presented at *ICLR 2021*.
## Estimator
For the SKCE biased and unbiased estimators exist. In CalibrationErrors.jl
[`SKCE`](@ref) lets you construct unbiased and biased estimators with quadratic
and sub-quadratic sample complexity.
```@docs
SKCE
```
| CalibrationErrors | https://github.com/devmotion/CalibrationErrors.jl.git |
|
[
"MIT"
] | 0.6.4 | b87cd13d6d99575da6c0e6ff6727c2fe643d327d | docs | 1118 | # Other calibration errors
## Unnormalized calibration mean embedding (UCME)
Instead of the formulation of the calibration error as an integral
probability metric one can consider the unnormalized calibration
mean embedding (UCME).
Let $\mathcal{P} \times \mathcal{Y}$ be the product space of
predictions and targets. The UCME for a real-valued kernel
$k \colon (\mathcal{P} \times \mathcal{Y}) \times (\mathcal{P} \times \mathcal{Y}) \to \mathbb{R}$
and $m$ test locations is defined[^WLZ] as
```math
\mathrm{UCME}_{k,m}^2 := m^{-1} \sum_{i=1}^m \Big(\mathbb{E}_{Y,P_X} k\big(T_i, (P_X, Y)\big) - \mathbb{E}_{Z_X,P_X} k\big(T_i, (P_X, Z_X)\big)\Big)^2,
```
where test locations $T_1, \ldots, T_m$ are i.i.d. random variables whose
law is absolutely continuous with respect to the Lebesgue measure on
$\mathcal{P} \times \mathcal{Y}$.
The plug-in estimator of $\mathrm{UCME}_{k,m}^2$ is available as [`UCME`](@ref).
```@docs
UCME
```
[^WLZ]: Widmann, D., Lindsten, F., & Zachariah, D. (2021). [Calibration tests beyond classification](https://openreview.net/forum?id=-bxf89v3Nx). To be presented at *ICLR 2021*.
| CalibrationErrors | https://github.com/devmotion/CalibrationErrors.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | code | 2146 | using Documenter
using IntervalLinearAlgebra
using Literate
using Plots
const litdir = joinpath(@__DIR__, "literate")
for (root, _, files) in walkdir(litdir)
for file in files
if endswith(file, ".jl")
subfolder = splitpath(root)[end]
input = joinpath(root, file)
output = joinpath(@__DIR__, "src", subfolder)
Literate.markdown(input, output; credit=false, mdstrings=true)
end
end
end
DocMeta.setdocmeta!(IntervalLinearAlgebra, :DocTestSetup, :(using IntervalLinearAlgebra); recursive=true)
makedocs(;
modules=[IntervalLinearAlgebra],
authors="Luca Ferranti",
repo="https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl/blob/{commit}{path}#{line}",
sitename="IntervalLinearAlgebra.jl",
warnonly=true,
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://juliaintervals.github.io/IntervalLinearAlgebra.jl",
assets=String[],
collapselevel=1,
),
pages=[
"Home" => "index.md",
"Tutorials" => [
"Linear systems" => "tutorials/linear_systems.md",
"Eigenvalue computations" => "tutorials/eigenvalues.md"
],
"Applications" => ["Interval FEM" => "applications/FEM_example.md"],
"Explanations" => [
"Interval system solution set" => "explanations/solution_set.md",
"Preconditioning" => "explanations/preconditioning.md"
],
"API" => [
"Interval matrices classification" => "api/classify.md",
"Solver interface" => "api/solve.md",
"Interval linear systems" => "api/algorithms.md",
"Preconditioners" => "api/precondition.md",
"Verified real linear systems" => "api/epsilon_inflation.md",
"Eigenvalues" => "api/eigenvalues.md",
"Miscellaneous" => "api/misc.md"
],
"References" => "references.md",
"Contributing" => "CONTRIBUTING.md"
],
)
deploydocs(;
repo="github.com/JuliaIntervals/IntervalLinearAlgebra.jl",
devbranch = "main",
push_preview=true
)
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | code | 15288 | # # Application of Interval Linear Algebra to FEM analysis
# The Finite Element Method is widely used to solve PDEs in Engineering applications and particularly in Structural Analysis problems [[BAT14]](@ref). The procedure consists in discretizing the domain into _elements_ and constructing (_assembling_) a system of balance equations. For linear problems, this system can be usually written as
# ```math
# K \cdot d = f
# \qquad
# K = \sum_{e=1}^{n_e} K_e
# ```
# where $n_e$ is the number of elements of the domain, $f$ is the vector of external loads, $K_e$ is the stiffness matrix of element $e$ in global coordinates, $K$ is the assembled stiffness matrix and $d$
# is the vector of unknown displacements. This tutorial shows how IntervalLinearAlgebra can
# be used to solve structural mechanics problems with uncertainty in the parameters. Particularly,
# it highlights the importance of parametric interval linear systems.
#
# ## Simple truss structure
#
# A frequent and simple type of structures are _Truss structures_, which are formed by bars connected but not welded. Truss models are usually considered during the conceptual design of bridges or other structures.
#
# ### Stiffness equations
# The stiffness matrix of a truss element in the local coordinate system is given by
# ```math
# K_L = s
# \left(
# \begin{matrix}
# 1 & 0 & -1 & 0 \\
# 0 & 0 & 0 & 0 \\
# -1 & 0 & 1 & 0 \\
# 0 & 0 & 0 & 0
# \end{matrix}
# \right),
# ```
#
# where $s =\frac{E A}{L}$ is the stiffness, $E$ is the Young modulus, $A$ is the area of the cross-section and $L$ is the length of that truss element.
#
# The change-of-basis matrix is given by
# ```math
# _G(Q)_L = Q =
# \left(
# \begin{matrix}
# \cos(\alpha) & -\sin(\alpha) & 0 & 0 \\
# \sin(\alpha) & \cos(\alpha) & 0 & 0 \\
# 0 & 0 & \cos(\alpha) & -\sin(\alpha) \\
# 0 & 0 & \sin(\alpha) & \cos(\alpha)
# \end{matrix}
# \right).
# ```
#
# The system of equations for each element is written in local coordinates as
# ```math
# K_L d_L = f_L
# ```
# and using the change-of-basis we obtain the equations for that element in the global systems of coordinates
# ```math
# K_G d_G = f_G \qquad K_G = Q K_L Q^T.
# ```
# After the system of equations for each element is in global coordinates, the whole system is assembled.
#
# The unitary stiffness matrix (for $s=1$) can be computed using the following function.
function unitaryStiffnessMatrix( coordFirstNode, coordSecondNode )
diff = (coordSecondNode - coordFirstNode)
length = sqrt( diff' * diff )
c = diff[1] / length
s = diff[2] / length
Qloc2glo = [ c -s 0 0 ; s c 0 0 ; 0 0 c -s ; 0 0 s c ]
Kloc = [ 1 0 -1 0 ; 0 0 0 0 ; -1 0 1 0 ; 0 0 0 0 ]
Kglo = Qloc2glo * Kloc * transpose(Qloc2glo)
return Kglo, length
end
#
# ### Example problem
#
# A problem based on Example 4.1 from [[SKA06]](@ref) is considered. The following diagram shows the truss structure considered.
#
# ```@raw html
# <img src="../../assets/trussDiagram.svg" style="width: 100%" alt="truss diagram"/>
# ```
#
# #### Case with fixed parameters
#
# The scalar parameters considered are given by
E = 2e11 ; # Young modulus
A = 5e-3 ; # Cross-section area
# The coordinate matrix is given by
nodesCMatrix = [ 0.0 0.0 ;
1.0 1.0 ;
2.0 0.0 ;
3.0 1.0 ;
4.0 0.0 ];
# the connectivity matrix is given by
connecMatrix = [ 1 2 ;
1 3 ;
2 3 ;
2 4 ;
3 4 ;
3 5 ;
4 5 ];
# and the fixed degrees of freedom (supports) are defined by the vector
fixedDofs = [ 2 9 10 ];
# The number of elements and nodes are computed, as well as the free degrees of freedom.
numNodes = size( nodesCMatrix )[1] # compute the number of nodes
numElems = size( connecMatrix )[1] # compute the number of elements
freeDofs = zeros(Int8, 2*numNodes-length(fixedDofs))
indDof = 1 ; counter = 0
while indDof <= (2*numNodes)
if !(indDof in fixedDofs)
global counter = counter + 1
freeDofs[ counter ] = indDof
end
global indDof = indDof + 1
end
# The global stiffness equations are computed for the unknown displacements (free dofs)
KG = zeros( 2*numNodes, 2*numNodes )
FG = zeros( 2*numNodes )
for elem in 1:numElems
indexFirstNode = connecMatrix[ elem, 1 ]
indexSecondNode = connecMatrix[ elem, 2 ]
dofsElem = [2*indexFirstNode-1 2*indexFirstNode 2*indexSecondNode-1 2*indexSecondNode ]
KGelem, lengthElem = unitaryStiffnessMatrix( nodesCMatrix[ indexSecondNode, : ], nodesCMatrix[ indexFirstNode, : ] )
stiffnessParam = E * A / lengthElem
for i in 1:4
for j in 1:4
KG[ dofsElem[i], dofsElem[j] ] = KG[ dofsElem[i], dofsElem[j] ] + stiffnessParam * KGelem[i,j]
end
end
end
FG[4] = -1e4 ;
KG = KG[ freeDofs, : ]
KG = KG[ :, freeDofs ]
FG = FG[ freeDofs ]
# and the system is solved.
u = KG \ FG
UG = zeros( 2*numNodes )
UG[ freeDofs ] = u
#
# The reference (dashed blue line) and deformed (solid red) configurations of the structure are ploted.
# Since the displacements are very small, a `scaleFactor` is considered to amplify
# the deformation and ease the visualization.
#
using Plots
scaleFactor = 2e3
plot();
for elem in 1:numElems
indexFirstNode = connecMatrix[ elem, 1 ];
indexSecondNode = connecMatrix[ elem, 2 ];
## plot reference element
plot!( nodesCMatrix[ [indexFirstNode, indexSecondNode], 1 ],
nodesCMatrix[ [indexFirstNode, indexSecondNode], 2 ],
linestyle = :dash, aspect_ratio = :equal,
linecolor = "blue", legend = false)
## plot deformed element
plot!( nodesCMatrix[ [indexFirstNode, indexSecondNode], 1 ]
+ scaleFactor* [ UG[indexFirstNode*2-1], UG[indexSecondNode*2-1]] ,
nodesCMatrix[ [indexFirstNode, indexSecondNode], 2 ]
+ scaleFactor* [ UG[indexFirstNode*2 ], UG[indexSecondNode*2 ]] , markershape = :circle, aspect_ratio = :equal, linecolor = "red",
linewidth=1.5, legend = false )
end
xlabel!("x (m)") # hide
ylabel!("y (m)") # hide
title!( "Deformed with scale factor " * string(scaleFactor) ) # hide
savefig("deformed.png") # hide
#
# 
#
#=
#### Problem with interval parameters
Suppose now we have a 10% uncertainty for the stiffness $s_{23}$ associated with the third
element. To model the problem, we introduce the symbolic variable `s23` using the
IntervalLinearAlgebra macro [`@affinevars`](@ref).
=#
using IntervalLinearAlgebra
@affinevars s23
# now we can construct the matrix as before
KGp = zeros(AffineExpression{Float64}, 2*numNodes, 2*numNodes );
for elem in 1:numElems
print(" assembling stiffness matrix of element ", elem , "\n")
indexFirstNode = connecMatrix[ elem, 1 ]
indexSecondNode = connecMatrix[ elem, 2 ]
dofsElem = [2*indexFirstNode-1 2*indexFirstNode 2*indexSecondNode-1 2*indexSecondNode ]
KGelem, lengthElem = unitaryStiffnessMatrix( nodesCMatrix[ indexSecondNode, : ], nodesCMatrix[ indexFirstNode, : ] )
if elem == 3
stiffnessParam = s23
else
stiffnessParam = E * A / lengthElem
end
for i in 1:4
for j in 1:4
KGp[ dofsElem[i], dofsElem[j] ] = KGp[ dofsElem[i], dofsElem[j] ] + stiffnessParam * KGelem[i,j]
end
end
end
KGp = KGp[ freeDofs, : ]
KGp = KGp[ :, freeDofs ]
# Now we can construct the [`AffineParametricArray`](@ref)
KGp = AffineParametricArray(KGp)
# The range of the stiffness is
srange = E * A / sqrt(2) ± 0.1 * E * A / sqrt(2)
# To solve the system, we could of course just subsitute `srange` into the parametric matrix
# `KGp` and solve the "normal" interval linear system (naive approach)
usimple = solve(KGp(srange), Interval.(FG))
# This approach, however suffers from the [dependency problem](https://en.wikipedia.org/wiki/Interval_arithmetic#Dependency_problem)
# and hence the computed displacements will be an overestimation of the true displacements.
# To mitigate this issue, algorithms to solve linear systems with parameters have been developed.
# In this case we use the algorithm presented in [[SKA06]](@ref)
uparam = solve(KGp, FG, srange)
# We can now compare the naive and parametric solution
hcat(usimple, uparam)/1e-6
#=
As you can see, the naive non-parametric approach significantly overestimates the displacements.
It is true that for this very simple and small structure, both displacements are small, however
as the number of nodes increases, the effect of the dependency problem also increases and
the non-parametric approach will fail to give useful results. This is demonstrated in the
next section.
## A continuum mechanics problem
In this problem a simple solid plane problem is considered. The solid is fixed on its bottom edge and loaded with a shear tension on the top edge.
First, we set the geometry and construct a regular grid of points
=#
L = [1.0, 4.0] # dimension in each direction
t = 0.2 # thickness
nx = 10 # number of divisions in direction x
ny = 20 # number of divisions in direction y
nel = [nx, ny]
neltot = 2 * nx * ny; # total number of elements
nnos = nel .+ 1 # number of nodes in each direction
nnosx, nnosy = nnos
nnostot = nnosx * nnosy ; # total number of nodes
# we compute the vector of indexes of the loaded nodes (bottom ones)
startloadnode = (nnosy - 1) * nnosx + 1 # boundary conditions
endinloadnode = nnosx * nnosy
LoadNodes = startloadnode:endinloadnode
lins1 = range(0, L[1], length=nnosx)
lins2 = range(0, L[2], length=nnosy)
# and construct the matrix of coordinates of the nodes
nodes = zeros(nnostot, 2) # nodes: first column x-coord, second column y-coord
for i = 1:nnosy # first discretize along y-coord
idx = (nnosx * (i-1) + 1) : (nnosx*i)
nodes[idx, 1] = lins1
nodes[idx, 2] = fill(lins2[i], nnosx)
end
# The connectivity matrix Mcon is computed, considering 3-node triangular elements
Mcon = Matrix{Int64}(undef, neltot, 3); # connectivity matrix
for j = 1:ny
for i = 1:nx
intri1 = 2*(i-1)+1+2*(j-1)*nx
intri2 = intri1 + 1
Mcon[intri1, :] = [j*nnosx+i, (j-1)*nnosx+i, j*nnosx+i+1 ]
Mcon[intri2, :] = [j*nnosx+i+1, (j-1)*nnosx+i, (j-1)*nnosx+i+1]
end
end
# the undeformed mesh is plotted as follows
Xel = Matrix{Float64}(undef, 3, neltot); Yel = Matrix{Float64}(undef, 3, neltot)
for i = 1:neltot
Xel[:, i] = nodes[Mcon[i, :], 1] # the j-th column has the x value at the j-th element
Yel[:, i] = nodes[Mcon[i, :], 2] # the j-th column has the y value at the j-th element
end
fig = plot(ratio=1, xlimits=(-1, 3), title="Undeformed mesh", xlabel="x", ylabel="y")
plot!(fig, [Xel[:, 1]; Xel[1, 1]], [Yel[:, 1]; Yel[1, 1]], linecolor=:blue, linewidth=1.4, label="")
for i = 2:neltot
plot!(fig, [Xel[:, i]; Xel[1, i]], [Yel[:, i]; Yel[1, i]], linecolor=:blue, linewidth=1.4, label="")
end
savefig("undeformed2.png") # hide
# 
# Let us now define the material parameters. Here we assume a 10% uncertainty on the Young modulus, while Poisson ratio and the density are fixed.
# can be related to a steel plate problem with an unknown composition, thus unknown exact Young modulus value.
ν = 0.25 # Poisson
ρ = 8e3 # density
@affinevars E # Young modulus, defined as symbolic variable
En = 200e9 # nominal value of the young modulus
Erange = En ± 0.1 * En # uncertainty range of the young modulus
# We can now assemble the global stiffness matrix.
# We set the constitutive matrix for a plane stress state.
C = E / (1-ν^2) * [ 1 ν 0 ;
ν 1 0 ;
0 0 (1-ν)/2 ]
# We compute the free and fixed degrees of freedom
function nodes2dofs(u)
v = Vector{Int64}(undef, 2*length(u))
for i in 1:length(u)
v[2i-1] = 2u[i] - 1; v[2i] = 2u[i]
end
return v
end
FixNodes = 1:nnosx
FixDofs = nodes2dofs(FixNodes) # first add all dofs of the nodes
deleteat!(FixDofs, 3:2:(length(FixDofs)-2)) # then remove the free dofs of the nodes
LibDofs = Vector(1:2*nnostot) # free degrees of fredom
deleteat!(LibDofs, FixDofs)
# and we assemble the matrix
function stiffness_matrix(x, y, C, t)
A = det([ones(1, 3); x'; y']) / 2 # element area
B = 1 / (2*A) * [y[2]-y[3] 0 y[3]-y[1] 0 y[1]-y[2] 0 ;
0 x[3]-x[2] 0 x[1]-x[3] 0 x[2]-x[1] ;
x[3]-x[2] y[2]-y[3] x[1]-x[3] y[3]-y[1] x[2]-x[1] y[1]-y[2] ]
K = B' * C * B * A * t ;
return K
end
KG = zeros(AffineExpression{Float64}, 2*nnostot, 2*nnostot);
for i = 1:neltot
Ke = stiffness_matrix(Xel[:, i], Yel[:, i], C, t)
aux = nodes2dofs(Mcon[i, :])
KG[aux, aux] .+= Ke
end
K = AffineParametricArray(KG[LibDofs, LibDofs])
# Finally, we assemble the loads vector
areaelemsup = L[1] / nx * t
f = zeros(2*nnostot);
f[2*LoadNodes[1]] = 0.5 * areaelemsup;
for i in 2:(length(LoadNodes)-1)
idx = 2 * LoadNodes[i]
f[idx-1] = 1 * areaelemsup # horizontal force
end
f[2*LoadNodes[end]] = 0.5 * areaelemsup
q = 1e9 # distributed load on the up_edge
F = q * f;
FLib = F[LibDofs]
nothing # hide
# now we can solve the displacements from the parametric interval linear system and plot
# minimum and maximum displacement.
u = solve(K, FLib, Erange) # solving
# plotting
U = zeros(Interval, 2*nnostot)
U[LibDofs] .= u
Ux = U[1:2:2*nnostot-1]
Uy = U[2:2:2*nnostot]
nodesdef = hcat(nodes[:, 1] + Ux, nodes[:, 2] + Uy);
Xeld = Interval.(copy(Xel))
Yeld = Interval.(copy(Yel))
# build elements coordinate vectors
for i = 1:neltot
Xeld[:, i] = nodesdef[Mcon[i, :], 1] # the j-th column has the x coordinate of the j-th element
Yeld[:, i] = nodesdef[Mcon[i, :], 2] # the j-th column has the y coordinate of the j-th element
end
plot!(fig, [inf.(Xeld[:, 1]); inf.(Xeld[1, 1])], [inf.(Yeld[:, 1]); inf.(Yeld[1, 1])], linecolor=:green, linewidth=1.4, label="", title="Displacements")
for i = 2:neltot
plot!(fig, [inf.(Xeld[:, i]); inf.(Xeld[1, i])], [inf.(Yeld[:, i]); inf.(Yeld[1, i])], linecolor=:green, linewidth=1.4, label="")
end
plot!(fig, [sup.(Xeld[:, 1]); sup.(Xeld[1, 1])], [sup.(Yeld[:, 1]); sup.(Yeld[1, 1])], linecolor=:red, linewidth=1.4, label="", title="Displacements")
for i = 2:neltot
plot!(fig, [sup.(Xeld[:, i]); sup.(Xeld[1, i])], [sup.(Yeld[:, i]); sup.(Yeld[1, i])], linecolor=:red, linewidth=1.4, label="")
end
savefig("displacement2.png") # hide
# 
#=
In this case, ignoring the dependency and treating the problem as a "normal" interval linear
system would fail. The reason for this is that the matrix is not strongly regular, which is
a necessary condition for the implemented algorithms to work.
=#
is_strongly_regular(K(Erange))
#=
## Conclusions
This tutorial showed how interval methods can be useful in engineering applications dealing
with uncertainty. As in most applications the elements in the matrix will depend on some
common parameters, due to the dependency problem neglecting the parametric structure will
result in poor results. This highlights the importance of parametric interval methods in
engineering applications.
=#
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | code | 2200 | using Luxor, IntervalLinearAlgebra, LazySets
A = [2..4 -2..1;-1..2 2..4]
b = [-2..2, -2..2]
polytopes = solve(A, b, LinearOettliPrager())
# convert h polytopes to Luxor objects
function luxify(P::HPolytope)
# convert to vertex representation
V = convert(VPolygon, P)
out = vertices_list(P)
out_luxor = [Luxor.Point(Tuple(p)) for p in out]
return out_luxor
end
luxor_polytopes = luxify.(polytopes)
# draw logo
function logo(polytopes, fname)
Drawing(500, 500, fname)
origin()
# transparent sethue("grey30")
# squircle(O, 248, 248, :fill, rt=0.1)
cols = [Luxor.julia_green, Luxor.julia_blue, Luxor.julia_purple, Luxor.julia_red]
sizes = [1, 2.5, 4]
Luxor.scale(1, -1)
for i in 1:4
sethue("black")
poly(polytopes[i]*60, :stroke, close=true)
sethue(cols[i])
poly(polytopes[i]*60, :fill, close=true)
#setline(rescale(sizes[i], 1, 4, 18, 18))
end
finish()
preview()
end
function lockup(logo, fname)
Drawing(1000, 300, fname)
# background("grey90") # otherwise transparent
origin()
logosvg = readsvg(logo) # this requires Luxor#master
panes = Table([300], [300, 700])
# place SVG
@layer begin
Luxor.translate(panes[1])
Luxor.scale(0.5)
placeimage(logosvg, O, centered=true)
end
# text
@layer begin
sethue("black")#sethue("rebeccapurple")
setline(0.75)
Luxor.translate(boxmiddleleft(BoundingBox(box(panes, 2))))
fontsize(50) #80
fontface("JuliaMono Bold")
titlex = -40
Luxor.text("IntervalLinearAlgebra.jl", Point(titlex, 0))
@layer begin
sethue("grey60")
textoutlines("IntervalLinearAlgebra.jl", Point(titlex, 0), :stroke)
end
fontsize(40) #45
fontface("JuliaMono")
subx = -38
Luxor.text("Linear algebra done rigorously", Point(subx, 45))
@layer begin
sethue("grey60")
textoutlines("Linear algebra done rigorously", Point(subx, 45), :stroke)
end
end
finish()
preview()
end
logo(luxor_polytopes, "logo.svg")
lockup("logo.svg","logo-text.svg")
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | code | 287 | using IntervalLinearAlgebra, LazySets, Plots
A = [2..4 -1..1;-1..1 2..4]
b = [-2..2, -1..1]
Xenclose = solve(A, b)
polytopes = solve(A, b, LinearOettliPrager())
plot(UnionSetArray(polytopes), ratio=1, label="solution set", legend=:top)
plot!(IntervalBox(Xenclose), label="enclosure")
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | code | 751 | using IntervalArithmetic, StaticArrays, IntervalLinearAlgebra
using IntervalRootFinding: gauss_seidel_interval, gauss_elimination_interval, gauss_seidel_contractor # not to overload \ in base
A = @SMatrix [4..6 -1..1 -1..1 -1..1;-1..1 -6.. -4 -1..1 -1..1;-1..1 -1..1 9..11 -1..1;-1..1 -1..1 -1..1 -11.. -9]
b = @SVector [-2..4, 1..8, -4..10, 2..12]
jac = Jacobi()
gs = GaussSeidel()
hbr = HansenBliekRohn()
@btime solve($A, $b, $gs)
@btime solve($A, $b, $jac)
@btime solve($A, $b, $hbr)
# comparison with IntervalRootFinding and base
@btime gauss_seidel_interval($A, $b)
@btime gauss_seidel_contractor($A, $b) #NOTE: THIS IS THE JACOBI method
@btime gauss_elimination_interval($A, $b) # this is the one IRF.jl uses to overload \
@btime $A\$b | IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | code | 3267 | using Random, IntervalLinearAlgebra, Plots
sizes = Iterators.flatten((2:9, 10:10:90, 100:100:1000)) |> collect
times = zeros(length(sizes), 4)
seed = 42
Random.seed!(seed)
random_interval_matrix(N) = Interval.(randn(N, N) .± abs.(randn(N, N)))
for (j, mult_mode) in enumerate((:fast, :rank1, :slow))
set_multiplication_mode(mult_mode)
Random.seed!(seed)
for (i, N) in enumerate(sizes)
A = random_interval_matrix(N)
B = random_interval_matrix(N)
t = @benchmark $A * $B
times[i, j] = minimum(t.times)
@show N
end
end
# normal multiplication with accurate rounding mode
set_multiplication_mode(:slow)
setrounding(Interval, :accurate)
Random.seed!(seed)
for (i, N) in enumerate(sizes)
A = random_interval_matrix(N)
B = random_interval_matrix(N)
t = @benchmark $A * $B
times[i, 4] = minimum(t.times)
@show N
end
# back to default settings
setrounding(Interval, :tight)
set_multiplication_mode(:fast)
# floating point multiplication
times_float = zeros(size(sizes))
for (i, N) in enumerate(sizes)
A = randn(N, N)
B = randn(N, N)
t = @benchmark $A * $B
times_float[i] = minimum(t.times)
@show N
end
# plotting
labels = ["fast" "rank1" "slow-tight" "slow-accurate"]
plot(sizes, times/1e6; label=labels, axis=:log, m=:auto, legend=:topleft)
plot!(sizes, times_float/1e6, label="Float64", m=:auto)
xlabel!("size")
ylabel!("time [ms]")
yticks!(10.0 .^(-3:5))
ratios = times ./ times_float
labels = ["fast" "rank1" "slow-tight" "slow-accurate"]
plot(sizes, ratios; label=labels, axis=:log, m=:auto, legend=:topleft)
xlabel!("size")
ylabel!("ratio")
yticks!(10.0 .^(-3:5))
## overestimate benchmarks
radii = 10.0 .^ (-15:15)
sizes = [10, 50, 100, 200]
Nsamples = 10
min_overestimate = zeros(length(radii), length(sizes))
max_overestimate = zeros(length(radii), length(sizes))
mean_overestimate = zeros(length(radii), length(sizes))
# function random_interval_matrix(N, r)
# Ac = randn(N, N)
# return Ac .± (r * abs.(Ac))
# end
random_interval_matrix(N, r) = randn(N, N) .± r*abs.(randn(N, N))
Random.seed!(seed)
for (i, N) in enumerate(sizes)
for (j, r) in enumerate(radii)
tmp_statistics = zeros(3)
for _ in 1:Nsamples
A = random_interval_matrix(N, r)
B = random_interval_matrix(N, r)
set_multiplication_mode(:fast)
Cfast = A * B
set_multiplication_mode(:slow)
Cslow = A * B
ratios = (diam.(Cfast)./diam.(Cslow) .- 1) * 100
tmp_statistics .+= (minimum(ratios), mean(ratios), maximum(ratios))
end
min_overestimate[j, i] = tmp_statistics[1]/Nsamples
mean_overestimate[j, i] = tmp_statistics[2]/Nsamples
max_overestimate[j, i] = tmp_statistics[3]/Nsamples
println("N=$(i)/$(length(sizes)) r=$(j)/$(length(radii))")
end
end
for (i, N) in enumerate(sizes)
plot(radii, max_overestimate[:, i], xaxis=:log, m=:o, label="max")
plot!(radii, min_overestimate[:, i], m=:o, label="min")
plot!(radii, mean_overestimate[:, i], m=:o, label="mean")
xlabel!("radius")
ylabel!("%-overestimate")
title!("matrix size N=$N")
savefig("accurate_overestimate_N$(N)_random_radius.pdf")
end
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | code | 3099 | module IntervalLinearAlgebra
using StaticArrays, Requires, Reexport
using LinearAlgebra: checksquare
import Base: +, -, *, /, \, ==,
show, convert, promote_rule, zero, one,
getindex, IndexStyle, setindex!, size
import CommonSolve: solve
@reexport using LinearAlgebra, IntervalArithmetic
const IA = IntervalArithmetic
export
set_multiplication_mode, get_multiplication_mode,
LinearKrawczyk, Jacobi, GaussSeidel, GaussianElimination, HansenBliekRohn, NonLinearOettliPrager, LinearOettliPrager,
NoPrecondition, InverseMidpoint, InverseDiagonalMidpoint,
solve, enclose, epsilon_inflation,
comparison_matrix, interval_norm, interval_isapprox, Orthants,
is_H_matrix, is_strongly_regular, is_strictly_diagonally_dominant, is_Z_matrix, is_M_matrix,
rref,
eigenbox, Rohn, Hertz, verify_eigen, bound_perron_frobenius_eigenvalue,
AffineExpression, @affinevars,
AffineParametricArray, AffineParametricMatrix, AffineParametricVector,
Skalna06
include("linear_systems/enclosures.jl")
include("linear_systems/precondition.jl")
include("linear_systems/solve.jl")
include("linear_systems/verify.jl")
include("linear_systems/oettli.jl")
include("multiplication.jl")
include("utils.jl")
include("classify.jl")
include("rref.jl")
include("pils/affine_expressions.jl")
include("pils/affine_parametric_array.jl")
include("pils/pils_solvers.jl")
include("eigenvalues/interval_eigenvalues.jl")
include("eigenvalues/verify_eigs.jl")
include("numerical_test/multithread.jl")
using LinearAlgebra
if Sys.ARCH == :x86_64
using OpenBLASConsistentFPCSR_jll
else
@warn "The behaviour of multithreaded OpenBlas on this architecture is unclear,
we will import MKL"
end
function __init__()
@require IntervalConstraintProgramming = "138f1668-1576-5ad7-91b9-7425abbf3153" include("linear_systems/oettli_nonlinear.jl")
@require LazySets = "b4f0291d-fe17-52bc-9479-3d1a343d9043" include("linear_systems/oettli_linear.jl")
if Sys.ARCH == :x86_64
@info "Switching to OpenBLAS with ConsistentFPCSR = 1 flag enabled, guarantees
correct floating point rounding mode over all threads."
BLAS.lbt_forward(OpenBLASConsistentFPCSR_jll.libopenblas_path; verbose = true)
N = BLAS.get_num_threads()
K = 1024
if NumericalTest.rounding_test(N, K)
@info "OpenBLAS is giving correct rounding on a ($K,$K) test matrix on $N threads"
else
@warn "OpenBLAS is not rounding correctly on the test matrix"
@warn "The number of BLAS threads was set to 1 to ensure rounding mode is consistent"
if !NumericalTest.rounding_test(1, K)
@warn "The rounding test failed on 1 thread"
end
end
else
BLAS.set_num_threads(1)
@warn "The number of BLAS threads was set to 1 to ensure rounding mode is consistent"
if !NumericalTest.rounding_test(1, 1024)
@warn "The rounding test failed on 1 thread"
end
end
end
set_multiplication_mode(config[:multiplication])
end
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | code | 4234 | # Routines to classify interval matrices
"""
is_strongly_regular(A::AbstractMatrix{T}) where {T<:Interval}
Tests whether the square interval matrix ``A`` is strongly regular, i.e. if ``A_c^{-1}A`` is
an H-matrix, where ``A_c`` is the midpoint matrix of ``A```.
For more details see section 4.6 of [[HOR19]](@ref).
### Examples
```jldoctest
julia> A = [2..4 -2..1; -1..2 2..4]
2×2 Matrix{Interval{Float64}}:
[2, 4] [-2, 1]
[-1, 2] [2, 4]
julia> is_strongly_regular(A)
true
julia> A = [0..2 1..1;-1.. -1 0..2]
2×2 Matrix{Interval{Float64}}:
[0, 2] [1, 1]
[-1, -1] [0, 2]
julia> is_strongly_regular(A)
false
```
"""
function is_strongly_regular(A::AbstractMatrix{T}) where {T<:Interval}
m, n = size(A)
m == n || return false
Ac = mid.(A)
rank(Ac) == n || return false
return is_H_matrix(inv(Ac)*A)
end
"""
is_H_matrix(A::AbstractMatrix{T}) where {T<:Interval}
Tests whether the square interval matrix A is an H-matrix, by testing that ``⟨A⟩^{-1}e>0``,
where ``e=[1, 1, …, 1]ᵀ``. Note that in practice it tests that a
_floating point approximation_ of ``⟨A⟩^{-1}e`` satisfies the condition.
For more details see section 4.4 of [[HOR19]](@ref).
### Examples
```jldoctest
julia> A = [2..4 -1..1; -1..1 2..4]
2×2 Matrix{Interval{Float64}}:
[2, 4] [-1, 1]
[-1, 1] [2, 4]
julia> is_H_matrix(A)
true
julia> A = [2..4 -2..1; -1..2 2..4]
2×2 Matrix{Interval{Float64}}:
[2, 4] [-2, 1]
[-1, 2] [2, 4]
julia> is_H_matrix(A)
false
```
"""
function is_H_matrix(A::AbstractMatrix{T}) where {T<:Interval}
m, n = size(A)
m == n || return false
compA = comparison_matrix(A)
F = lu(compA; check=false)
issuccess(F) || return false
return all(>(0), F\ones(n))
end
"""
is_strictly_diagonally_dominant(A::AbstractMatrix{T}) where {T<:Interval}
Checks whether the square interval matrix ``A`` of order ``n`` is stictly diagonally
dominant, that is if ``mig(Aᵢᵢ) > ∑_{k ≠ i} mag(Aᵢₖ)`` for ``i=1,…,n``.
For more details see section 4.5 of [[HOR19]](@ref).
### Examples
```jldoctest
julia> A = [2..4 -1..1; -1..1 2..4]
2×2 Matrix{Interval{Float64}}:
[2, 4] [-1, 1]
[-1, 1] [2, 4]
julia> is_strictly_diagonally_dominant(A)
true
julia> A = [2..4 -2..1; -1..2 2..4]
2×2 Matrix{Interval{Float64}}:
[2, 4] [-2, 1]
[-1, 2] [2, 4]
julia> is_strictly_diagonally_dominant(A)
false
```
"""
function is_strictly_diagonally_dominant(A::AbstractMatrix{T}) where {T<:Interval}
m, n = size(A)
m == n || return false
@inbounds for i=1:m
sum_mag = sum(Interval(mag(A[i, k])) for k=1:n if k ≠ i)
mig(A[i, i]) ≤ inf(sum_mag) && return false
end
return true
end
"""
is_Z_matrix(A::AbstractMatrix{T}) where {T<:Interval}
Checks whether the square interval matrix ``A`` is a Z-matrix, that is whether ``Aᵢⱼ≤0``
for all ``i≠j``. For more details see section 4.2 of [[HOR19]](@ref).
### Examples
```jldoctest
julia> A = [2..4 -2.. -1; -2.. -1 2..4]
2×2 Matrix{Interval{Float64}}:
[2, 4] [-2, -1]
[-2, -1] [2, 4]
julia> is_Z_matrix(A)
true
julia> A = [2..4 -2..1; -1..2 2..4]
2×2 Matrix{Interval{Float64}}:
[2, 4] [-2, 1]
[-1, 2] [2, 4]
julia> is_Z_matrix(A)
false
```
"""
function is_Z_matrix(A::AbstractMatrix{T}) where {T<:Interval}
m, n = size(A)
m == n || return false
@inbounds for j in 1:n
for i in 1:m
if i != j && sup(A[i, j]) > 0
return false
end
end
end
return true
end
"""
is_M_matrix(A::AbstractMatrix{T}) where {T<:Interval}
Checks whether the square interval matrix ``A`` is an M-matrix, that is a Z-matrix with
non-negative inverse. For more details see section 4.2 of [[HOR19]](@ref).
### Examples
```jldoctest
julia> A = [2..2 -1..0; -1..0 2..2]
2×2 Matrix{Interval{Float64}}:
[2, 2] [-1, 0]
[-1, 0] [2, 2]
julia> is_M_matrix(A)
true
julia> A = [2..4 -2..1; -1..2 2..4]
2×2 Matrix{Interval{Float64}}:
[2, 4] [-2, 1]
[-1, 2] [2, 4]
julia> is_M_matrix(A)
false
```
"""
function is_M_matrix(A::AbstractMatrix{T}) where {T<:Interval}
is_Z_matrix(A) || return false
Ainf = inf.(A)
e = ones(size(A, 1))
u = Ainf\e
all(u .> 0) || return false
return all(A*u .> 0)
end
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | code | 4858 | const config = Dict(:multiplication => :fast)
struct MultiplicationType{T} end
get_multiplication_mode() = config
"""
set_multiplication_mode(multype)
Sets the algorithm used to perform matrix multiplication with interval matrices.
### Input
- `multype` -- symbol describing the algorithm used
- `:slow` -- uses traditional matrix multiplication algorithm.
- `:rank1` -- uses rank1 update
- `:fast` -- computes an enclosure of the matrix product using the midpoint-radius
notation of the matrix [[RUM10]](@ref).
### Notes
- By default, `:fast` is used.
- Using `fast` is generally significantly faster, but it may return larger intervals,
especially if midpoint and radius have the same order of magnitude
(50% overestimate at most) [[RUM99]](@ref).
"""
function set_multiplication_mode(multype)
type = MultiplicationType{multype}()
@eval *(A::AbstractMatrix{Interval{T}}, B::AbstractMatrix{Interval{T}}) where T =
*($type, A, B)
@eval *(A::AbstractMatrix{T}, B::AbstractMatrix{Interval{T}}) where T = *($type, A, B)
@eval *(A::AbstractMatrix{Interval{T}}, B::AbstractMatrix{T}) where T = *($type, A, B)
@eval *(A::Diagonal, B::AbstractMatrix{Interval{T}}) where T = *($type, A, B)
@eval *(A::AbstractMatrix{Interval{T}}, B::Diagonal) where T = *($type, A, B)
config[:multiplication] = multype
end
function *(A::AbstractMatrix{Complex{Interval{T}}}, B::AbstractMatrix) where T
return real(A)*B+im*imag(A)*B
end
function *(A::AbstractMatrix, B::AbstractMatrix{Complex{Interval{T}}}) where T
return A*real(B)+im*A*imag(B)
end
function *(A::AbstractMatrix{Complex{Interval{T}}}, B::AbstractMatrix{Complex{Interval{T}}}) where T
rA, iA = real(A), imag(A)
rB, iB = real(B), imag(B)
return rA*rB-iA*iB+im*(iA*rB+rA*iB)
end
function *(A::AbstractMatrix{Complex{T}}, B::AbstractMatrix{Interval{T}}) where {T}
return real(A)*B+im*imag(A)*B
end
function *(A::AbstractMatrix{Interval{T}}, B::AbstractMatrix{Complex{T}}) where {T}
return A*real(B)+im*A*imag(B)
end
function *(::MultiplicationType{:slow}, A, B)
TS = promote_type(eltype(A), eltype(B))
return mul!(similar(B, TS, (size(A,1), size(B,2))), A, B)
end
function *(::MultiplicationType{:fast},
A::AbstractMatrix{Interval{T}},
B::AbstractMatrix{Interval{T}}) where {T<:Real}
Ainf = inf.(A)
Asup = sup.(A)
Binf = inf.(B)
Bsup = sup.(B)
mA, mB, R, Csup = setrounding(T, RoundUp) do
mA = Ainf + 0.5 * (Asup - Ainf)
mB = Binf + 0.5 * (Bsup - Binf)
rA = mA - Ainf
rB = mB - Binf
R = abs.(mA) * rB + rA * (abs.(mB) + rB)
Csup = mA * mB + R
return mA, mB, R, Csup
end
Cinf = setrounding(T, RoundDown) do
mA * mB - R
end
return Interval.(Cinf, Csup)
end
function *(::MultiplicationType{:fast},
A::AbstractMatrix{T},
B::AbstractMatrix{Interval{T}}) where {T<:Real}
Binf = inf.(B)
Bsup = sup.(B)
mB, R, Csup = setrounding(T, RoundUp) do
mB = Binf + 0.5 * (Bsup - Binf)
rB = mB - Binf
R = abs.(A) * rB
Csup = A * mB + R
return mB, R, Csup
end
Cinf = setrounding(T, RoundDown) do
A * mB - R
end
return Interval.(Cinf, Csup)
end
function *(::MultiplicationType{:fast},
A::AbstractMatrix{Interval{T}},
B::AbstractMatrix{T}) where {T<:Real}
Ainf = inf.(A)
Asup = sup.(A)
mA, R, Csup = setrounding(T, RoundUp) do
mA = Ainf + 0.5 * (Asup - Ainf)
rA = mA - Ainf
R = rA * abs.(B)
Csup = mA * B + R
return mA, R, Csup
end
Cinf = setrounding(T, RoundDown) do
mA * B - R
end
return Interval.(Cinf, Csup)
end
function *(::MultiplicationType{:rank1},
A::AbstractMatrix{Interval{T}},
B::AbstractMatrix{Interval{T}}) where {T<:Real}
Ainf = inf.(A)
Asup = sup.(A)
Binf = inf.(B)
Bsup = sup.(B)
n = size(A, 2)
Csup = zeros(T, (size(A,1), size(B,2)))
Cinf = zeros(T, size(A, 1), size(B, 2))
Cinf = setrounding(T, RoundDown) do
for i in 1:n
Cinf .+= min.(view(Ainf, :, i) * view(Binf, i, :)',
view(Ainf, :, i) * view(Bsup, i, :)',
view(Asup, :, i) * view(Binf, i, :)',
view(Asup, :, i) * view(Bsup, i, :)')
end
return Cinf
end
Csup = setrounding(T, RoundUp) do
for i in 1:n
Csup .+= max.(view(Ainf, :, i) * view(Binf, i, :)',
view(Ainf, :, i) * view(Bsup, i, :)',
view(Asup, :, i) * view(Binf, i, :)',
view(Asup, :, i) * view(Bsup, i, :)')
end
return Csup
end
return Interval.(Cinf, Csup)
end
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | code | 2103 | """
rref(A::AbstractMatrix{T}) where {T<:Interval}
Computes the reduced row echelon form of the interval matrix `A` using maximum
mignitude as pivoting strategy.
### Examples
```jldoctest
julia> A = [2..4 -1..1; -1..1 2..4]
2×2 Matrix{Interval{Float64}}:
[2, 4] [-1, 1]
[-1, 1] [2, 4]
julia> rref(A)
2×2 Matrix{Interval{Float64}}:
[2, 4] [-1, 1]
[0, 0] [1.5, 4.5]
```
"""
function rref(A::AbstractMatrix{T}) where {T<:Interval}
A1 = copy(A)
return rref!(A1)
end
"""
rref!(A::AbstractMatrix{T}) where {T<:Interval}
In-place version of [`rref`](@ref).
"""
function rref!(A::AbstractMatrix{T}) where {T<:Interval}
m, n = size(A)
minmn = min(m,n)
@inbounds for k = 1:minmn
if k < m
# find maximum index
migmax, kp = _findmax_mig(view(A, k:m, k))
iszero(migmax) && throw(ArgumentError("Could not find a pivot with non-zero mignitude in column $k."))
kp += k - 1
# Swap rows k and kp if needed
k != kp && _swap!(A, k, kp)
end
# Scale first column
_scale!(A, k)
# Update the rest
_eliminate!(A, k)
end
return A
end
@inline function _findmax_mig(v)
@inbounds begin
migmax = mig(first(v))
kp = firstindex(v)
for (i, vi) in enumerate(v)
migi = mig(vi)
if migi > migmax
kp = i
migmax = migi
end
end
end
return migmax, kp
end
@inline function _swap!(A, k, kp)
@inbounds for i = 1:size(A, 2)
tmp = A[k,i]
A[k,i] = A[kp,i]
A[kp,i] = tmp
end
end
@inline function _scale!(A, k)
@inbounds begin
Akkinv = inv(A[k,k])
for i = k+1:size(A, 1)
A[i,k] *= Akkinv
end
end
end
@inline function _eliminate!(A, k)
m, n = size(A)
@inbounds begin
for j = k+1:n
for i = k+1:m
A[i,j] -= A[i,k]*A[k,j]
end
end
for i = k+1:m
A[i, k] = zero(eltype(A))
end
end
end
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | code | 3967 | """
interval_isapprox(a::Interval, b::Interval; kwargs)
Checks whether the intervals ``a`` and ``b`` are approximate equal, that is both their
lower and upper bound are approximately equal.
### Keywords
Same of `Base.isapprox`
### Example
```jldoctest
julia> a = 1..2
[1, 2]
julia> b = a + 1e-10
[1, 2.00001]
julia> interval_isapprox(a, b)
true
julia> interval_isapprox(a, b; atol=1e-15)
false
```
"""
interval_isapprox(a::Interval, b::Interval; kwargs...) = isapprox(a.lo, b.lo; kwargs...) && isapprox(a.hi, b.hi; kwargs...)
"""
interval_norm(A::AbstractMatrix{T}) where {T<:Interval}
computes the infinity norm of interval matrix ``A``.
### Examples
```jldoctest
julia> A = [2..4 -1..1; -1..1 2..4]
2×2 Matrix{Interval{Float64}}:
[2, 4] [-1, 1]
[-1, 1] [2, 4]
julia> interval_norm(A)
5.0
```
"""
interval_norm(A::AbstractMatrix{T}) where {T<:Interval} = opnorm(mag.(A), Inf) # TODO: proper expand norm function and add 1-norm
"""
interval_norm(A::AbstractVector{T}) where {T<:Interval}
computes the infinity norm of interval vector ``v``.
### Examples
```jldoctest
julia> b = [-2..2, -3..2]
2-element Vector{Interval{Float64}}:
[-2, 2]
[-3, 2]
julia> interval_norm(b)
3.0
```
"""
interval_norm(v::AbstractVector{T}) where {T<:Interval} = maximum(mag.(v))
# ? use manual loops instead
"""
enclose(A::AbstractMatrix{T}, b::AbstractVector{T}) where {T<:Interval}
Computes an enclosure of the solution of the interval linear system ``Ax=b`` using the
algorithm described in sec. 5.7.1 of [[HOR19]](@ref).
"""
function enclose(A::StaticMatrix{N, N, T}, b::StaticVector{N, T}) where {N, T<:Interval}
C = inv(mid.(A))
A1 = Diagonal(ones(N)) - C*A
e = interval_norm(C*b)/(1 - interval_norm(A1))
x0 = MVector{N, T}(ntuple(_ -> -e..e, Val(N)))
return x0
end
function enclose(A::AbstractMatrix{T}, b::AbstractVector{T}) where {T<:Interval}
n = length(b)
C = inv(mid.(A))
A1 = Diagonal(ones(n)) - C*A
e = interval_norm(C*b)/(1 - interval_norm(A1))
x0 = fill(-e..e, n)
return x0
end
"""
comparison_matrix(A::AbstractMatrix{T}) where {T<:Interval}
Computes the comparison matrix ``⟨A⟩`` of the given interval matrix ``A`` according to the
definition ``⟨A⟩ᵢᵢ = mig(Aᵢᵢ)`` and ``⟨A⟩ᵢⱼ = -mag(Aᵢⱼ)`` if ``i≠j``.
### Examples
```jldoctest
julia> A = [2..4 -1..1; -1..1 2..4]
2×2 Matrix{Interval{Float64}}:
[2, 4] [-1, 1]
[-1, 1] [2, 4]
julia> comparison_matrix(A)
2×2 Matrix{Float64}:
2.0 -1.0
-1.0 2.0
```
"""
function comparison_matrix(A::SMatrix{N, N, T, M}) where {N, M, T<:Interval}
n = size(A, 1)
compA = -mag.(A)
@inbounds for (i, idx) in enumerate(diagind(A))
compA = setindex(compA, mig(A[i, i]), idx)
end
return compA
end
function comparison_matrix(A::AbstractMatrix{T}) where {T<:Interval}
n = size(A, 1)
compA = -mag.(A)
@inbounds for i in 1:n
compA[i, i] = mig(A[i, i])
end
return compA
end
"""
Orthants
Iterator to go through all the ``2ⁿ`` vectors of length ``n`` with elements ``±1``.
This is equivalento to going through the orthants of an ``n``-dimensional euclidean space.
### Fields
n::Int -- dimension of the vector space
### Example
```jldoctest
julia> for or in Orthants(2)
@show or
end
or = [1, 1]
or = [-1, 1]
or = [1, -1]
or = [-1, -1]
```
"""
struct Orthants
n::Int
end
Base.eltype(::Type{Orthants}) = Vector{Int}
Base.length(O::Orthants) = 2^(O.n)
function Base.iterate(O::Orthants, state=1)
state > 2 ^ O.n && return nothing
vec = -2*digits(state-1, base=2, pad=O.n) .+ 1
return (vec, state+1)
end
function Base.getindex(O::Orthants, i::Int)
1 <= i <= length(O) || throw(BoundsError(O, i))
return -2*digits(i-1, base=2, pad=O.n) .+ 1
end
Base.firstindex(O::Orthants) = 1
Base.lastindex(O::Orthants) = length(O)
_unchecked_interval(x::Real) = Interval(x)
_unchecked_interval(x::Complex) = Interval(real(x)) + Interval(imag(x)) * im
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | code | 3527 | abstract type AbstractIntervalEigenSolver end
struct Hertz <: AbstractIntervalEigenSolver end
struct Rohn <: AbstractIntervalEigenSolver end
"""
eigenbox(A[, method=Rohn()])
Returns an enclosure of all the eigenvalues of `A`. If `A` is symmetric, then the
output is a real interval, otherwise it is a complex interval.
### Input
- `A` -- square interval matrix
- `method` -- method used to solve the symmetric interval eigenvalue problem (bounding
eigenvalues of general matrices is also reduced to the symmetric case).
Possible values are
- `Rohn` -- (default) fast method to compute an enclosure of the eigenvalues of
a symmetric interval matrix
- `Hertz` -- finds the exact hull of the eigenvalues of a symmetric interval
matrix, but has exponential complexity.
### Algorithm
The algorithms used by the function are described in [[HLA13]](@ref).
### Notes
The enclosure is not rigorous, meaning that the real eigenvalue problems solved internally
utilize normal floating point computations.
### Examples
```jldoctest
julia> A = [0 -1 -1; 2 -1.399.. -0.001 0; 1 0.5 -1]
3×3 Matrix{Interval{Float64}}:
[0, 0] [-1, -1] [-1, -1]
[2, 2] [-1.39901, -0.000999999] [0, 0]
[1, 1] [0.5, 0.5] [-1, -1]
julia> eigenbox(A)
[-1.90679, 0.970154] + [-2.51903, 2.51903]im
julia> eigenbox(A, Hertz())
[-1.64732, 0.520456] + [-2.1112, 2.1112]im
```
"""
function eigenbox(A::Symmetric{Interval{T}, Matrix{Interval{T}}}, ::Rohn) where {T}
AΔ = Symmetric(IntervalArithmetic.radius.(A))
Ac = Symmetric(mid.(A))
ρ = eigmax(AΔ)
λmax = eigmax(Ac)
λmin = eigmin(Ac)
return Interval(λmin - ρ, λmax + ρ)
end
function eigenbox(A::Symmetric{Interval{T}, Matrix{Interval{T}}}, ::Hertz) where {T}
n = checksquare(A)
Amax = Matrix{T}(undef, n, n)
Amin = Matrix{T}(undef, n, n)
λmin = Inf
λmax = -Inf
@inbounds for z in Orthants(n)
first(z) < 0 && continue
for j in 1:n
for i in 1:j
if z[i] == z[j]
Amax[i, j] = sup(A[i, j])
Amin[i, j] = inf(A[i, j])
else
Amax[i, j] = inf(A[i, j])
Amin[i, j] = sup(A[i, j])
end
end
end
candmax = eigmax(Symmetric(Amax))
candmin = eigmin(Symmetric(Amin))
λmin = min(λmin, candmin)
λmax = max(λmax, candmax)
end
return IA.Interval(λmin, λmax)
end
function eigenbox(A::AbstractMatrix{Interval{T}},
method::AbstractIntervalEigenSolver) where {T}
λ = eigenbox(Symmetric(0.5*(A + A')), method)
n = size(A, 1)
μ = eigenbox(Symmetric([zeros(n, n) 0.5*(A - A');
0.5*(A' - A) zeros(n, n)]), method)
return λ + μ*im
end
function eigenbox(M::AbstractMatrix{Complex{Interval{T}}},
method::AbstractIntervalEigenSolver) where {T}
A = real.(M)
B = imag.(M)
λ = eigenbox(Symmetric(0.5*[A+A' B'-B;
B-B' A+A']), method)
μ = eigenbox(Symmetric(0.5*[B+B' A-A';
A'-A B+B']), method)
return λ + μ*im
end
function eigenbox(M::Hermitian{Complex{Interval{T}}, Matrix{Complex{Interval{T}}}},
method::AbstractIntervalEigenSolver) where {T}
A = real(M)
B = imag(M)
return eigenbox(Symmetric([A B';B A]), method)
end
# default
eigenbox(A) = eigenbox(A, Rohn())
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | code | 4196 | """
verify_eigen(A[, λ, X0]; w=0.1, ϵ=1e-16, maxiter=10)
Finds a rigorous bound for the eigenvalues and eigenvectors of `A`. Eigenvalues are treated
as simple.
### Input
- `A` -- matrix
- `λ` -- (optional) approximate value for an eigenvalue of `A`
- `X0` -- (optional) eigenvector associated to `λ`
- `w` -- relative inflation parameter
- `ϵ` -- absolute inflation parameter
- `maxiter` -- maximum number of iterations
### Output
- Interval bounds on eigenvalues and eigenvectors.
- A boolean certificate (or a vector of booleans if all eigenvalues are computed) `cert`.
If `cert[i]==true`, then the bounds for the ith eigenvalue and eigenvectore are rigorous,
otherwise not.
### Algorithm
The algorithm for this function is described in [[RUM01]](@ref).
### Example
```julia
julia> A = Symmetric([1 2;2 3])
2×2 Symmetric{Int64, Matrix{Int64}}:
1 2
2 3
julia> evals, evecs, cert = verify_eigen(A);
julia> evals
2-element Vector{Interval{Float64}}:
[-0.236068, -0.236067]
[4.23606, 4.23607]
julia> evecs
2×2 Matrix{Interval{Float64}}:
[-0.850651, -0.85065] [0.525731, 0.525732]
[0.525731, 0.525732] [0.85065, 0.850651]
julia> cert
2-element Vector{Bool}:
1
1
```
"""
function verify_eigen(A; kwargs...)
evals, evecs = eigen(mid.(A))
T = interval_eigtype(A, evals[1])
evalues = similar(evals, T)
evectors = similar(evecs, T)
cert = Vector{Bool}(undef, length(evals))
@inbounds for (i, λ₀) in enumerate(evals)
λ, v, flag = verify_eigen(A, λ₀, view(evecs, :,i); kwargs...)
evalues[i] = λ
evectors[:, i] .= v
cert[i] = flag
end
return evalues, evectors, cert
end
function verify_eigen(A, λ, X0; kwargs...)
ρ, X, cert = _verify_eigen(A, λ, X0; kwargs...)
return (real(λ) ± ρ) + (imag(λ) ± ρ) * im, X0 + X, cert
end
function verify_eigen(A::Symmetric, λ, X0; kwargs...)
ρ, X, cert = _verify_eigen(A, λ, X0; kwargs...)
return λ ± ρ, X0 + real.(X), cert
end
function _verify_eigen(A, λ::Number, X0::AbstractVector;
w=0.1, ϵ=floatmin(), maxiter=10)
_, v = findmax(abs.(X0))
R = mid.(A) - λ * I
R[:, v] .= -X0
R = inv(R)
C = IA.Interval.(A) - λ * I
Z = -R * (C * X0)
C[:, v] .= -X0
C = I - R * C
Zinfl = w * IA.Interval.(-mag.(Z), mag.(Z)) .+ IA.Interval(-ϵ, ϵ)
X = Complex.(Z)
cert = false
@inbounds for _ in 1:maxiter
Y = (real.(X) + Zinfl) + (imag.(X) + Zinfl) * im
Ytmp = Y * Y[v]
Ytmp[v] = 0
X = Z + C * Y + R * Ytmp
cert = all(isinterior.(X, Y))
cert && break
end
ρ = mag(X[v])
X[v] = 0
return ρ, X, cert
end
"""
bound_perron_frobenius_eigenvalue(A, max_iter=10)
Finds an upper bound for the Perron-Frobenius eigenvalue of the **non-negative** matrix `A`.
### Input
- `A` -- square real non-negative matrix
- `max_iter` -- maximum number of iterations of the power method used internally to compute
an initial approximation of the Perron-Frobenius eigenvector
### Example
```julia-repl
julia> A = [1 2;3 4]
2×2 Matrix{Int64}:
1 2
3 4
julia> bound_perron_frobenius_eigenvalue(A)
5.372281323275249
```
"""
function bound_perron_frobenius_eigenvalue(A::AbstractMatrix{T}, max_iter=10) where {T<:Real}
any(A .< 0) && throw(ArgumentError("Matrix contains negative entries"))
return _bound_perron_frobenius_eigenvalue(A, max_iter)
end
function _bound_perron_frobenius_eigenvalue(M, max_iter=10)
size(M, 1) == 1 && return M[1]
xpf = IA.Interval.(_power_iteration(M, max_iter))
Mxpf = M * xpf
ρ = zero(eltype(M))
@inbounds for (i, xi) in enumerate(xpf)
iszero(xi) && continue
tmp = Mxpf[i] / xi
ρ = max(ρ, tmp.hi)
end
return ρ
end
function _power_iteration(A, max_iter)
n = size(A,1)
xp = rand(n)
@inbounds for _ in 1:max_iter
xp .= A*xp
xp ./= norm(xp)
end
return xp
end
interval_eigtype(::Symmetric, ::T) where {T<:Real} = Interval{T}
interval_eigtype(::AbstractMatrix, ::T) where {T<:Real} = Complex{Interval{T}}
interval_eigtype(::AbstractMatrix, ::Complex{T}) where {T<:Real} = Complex{Interval{T}}
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | code | 10164 | """
AbstractLinearSolver
Abstract type for solvers of interval linear systems.
"""
abstract type AbstractLinearSolver end
"""
AbstractDirectSolver <: AbstractLinearSolver
Abstract type for direct solvers of interval linear systems, such as Gaussian elimination
and Hansen-Bliek-Rohn.
"""
abstract type AbstractDirectSolver <: AbstractLinearSolver end
"""
AbstractIterativeSolver <: AbstractLinearSolver
Abstract type for iterative solvers of interval linear systems, such as Jacobi or
Gauss-Seidel.
"""
abstract type AbstractIterativeSolver <: AbstractLinearSolver end
"""
HansenBliekRohn <: AbstractDirectSolver
Type for the `HansenBliekRohn` solver of the square interval linear system ``Ax=b``.
For more details see section 5.6.2 of [[HOR19]](@ref)
### Notes
- Hansen-Bliek-Rohn works with H-matrices without precondition and with strongly regular
matrices using [`InverseMidpoint`](@ref) precondition
- If the midpoint of ``A`` is a diagonal matrix, then the algorithm returns the exact hull.
- An object of type Hansen-Bliek-Rohn is a callable function with method
(hbr::HansenBliekRohn)(A::AbstractMatrix{T},
b::AbstractVector{T}) where {T<:Interval}
### Examples
```jldoctest
julia> A = [2..4 -1..1;-1..1 2..4]
2×2 Matrix{Interval{Float64}}:
[2, 4] [-1, 1]
[-1, 1] [2, 4]
julia> b = [-2..2, -1..1]
2-element Vector{Interval{Float64}}:
[-2, 2]
[-1, 1]
julia> hbr = HansenBliekRohn()
HansenBliekRohn linear solver
julia> hbr(A, b)
2-element Vector{Interval{Float64}}:
[-1.66667, 1.66667]
[-1.33334, 1.33334]
```
"""
struct HansenBliekRohn <: AbstractDirectSolver end
function (hbr::HansenBliekRohn)(A::AbstractMatrix{T},
b::AbstractVector{T}) where {T<:Interval}
n = length(b)
compA = comparison_matrix(A)
compA_inv, cert = epsilon_inflation(compA, Diagonal(ones(n)))
cert || @warn "Could not find a verified enclosure of ⟨A⟩⁻¹"
all(sum(compA_inv; dims=2) .> 0) || throw(ArgumentError("applying Hanben-Bliek-Rohn to a non-H-matrix."))
u = compA_inv * mag.(b)
d = diag(compA_inv)
_α = sup.(diag(compA) .- 1 ./ d)
α = Interval.(-_α, _α)
_β = @. sup(u/d - mag(b))
β = Interval.(-_β, _β)
return (b .+ β) ./ (diag(A) .+ α)
end
"""
GaussianElimination <: AbstractDirectSolver
Type for the Gaussian elimination solver of the square interval linear system ``Ax=b``.
For more details see section 5.6.1 of [[HOR19]](@ref)
### Notes
- An object of type `GaussianElimination` is a callable function with method
(ge::GaussianElimination)(A::AbstractMatrix{T},
b::AbstractVector{T}) where {T<:Interval}
### Examples
```jldoctest
julia> A = [2..4 -1..1;-1..1 2..4]
2×2 Matrix{Interval{Float64}}:
[2, 4] [-1, 1]
[-1, 1] [2, 4]
julia> b = [-2..2, -1..1]
2-element Vector{Interval{Float64}}:
[-2, 2]
[-1, 1]
julia> ge = GaussianElimination()
GaussianElimination linear solver
julia> ge(A, b)
2-element Vector{Interval{Float64}}:
[-1.66667, 1.66667]
[-1.33334, 1.33334]
```
"""
struct GaussianElimination <: AbstractDirectSolver end
function (ge::GaussianElimination)(A::AbstractMatrix{T},
b::AbstractVector{T}) where {T<:Interval}
n = length(b)
Abrref = rref([A b])
# backsubstitution
x = similar(b)
x[end] = Abrref[n, n+1]/Abrref[n, n]
@inbounds for i = n-1:-1:1
x[i] = (Abrref[i, n+1] - sum(Abrref[i, j]*x[j] for j in i+1:n))/Abrref[i, i]
end
return x
end
## JACOBI
"""
Jacobi <: AbstractIterativeSolver
Type for the Jacobi solver of the interval linear system ``Ax=b``.
For details see Section 5.7.4 of [[HOR19]](@ref)
### Fields
- `max_iterations` -- maximum number of iterations (default 20)
- `atol` -- absolute tolerance (default 0), if at some point ``|xₖ - xₖ₊₁| < atol``
(elementwise), then stop and return ``xₖ₊₁``.
If `atol=0`, then `min(diam(A))*1e-5` is used.
### Notes
- An object of type `Jacobi` is a function with method
(jac::Jacobi)(A::AbstractMatrix{T},
b::AbstractVector{T},
[x]::AbstractVector{T}=enclose(A, b)) where {T<:Interval}
#### Input
- `A` -- N×N interval matrix
- `b` -- interval vector of length N
- `x` -- (optional) initial enclosure for the solution of ``Ax = b``. If not given,
it is automatically computed using [`enclose`](@ref enclose)
### Examples
```jldoctest
julia> A = [2..4 -1..1;-1..1 2..4]
2×2 Matrix{Interval{Float64}}:
[2, 4] [-1, 1]
[-1, 1] [2, 4]
julia> b = [-2..2, -1..1]
2-element Vector{Interval{Float64}}:
[-2, 2]
[-1, 1]
julia> jac = Jacobi()
Jacobi linear solver
max_iterations = 20
atol = 0.0
julia> jac(A, b)
2-element Vector{Interval{Float64}}:
[-1.66668, 1.66668]
[-1.33335, 1.33335]
```
"""
struct Jacobi <: AbstractIterativeSolver
max_iterations::Int
atol::Float64
end
Jacobi() = Jacobi(20, 0.0)
function (jac::Jacobi)(A::AbstractMatrix{T},
b::AbstractVector{T},
x::AbstractVector{T}=enclose(A, b)) where {T<:Interval}
n = length(b)
atol = iszero(jac.atol) ? minimum(diam.(A))*1e-5 : jac.atol
for _ in 1:jac.max_iterations
xold = copy(x)
@inbounds @simd for i in 1:n
x[i] = b[i]
for j in 1:n
(i == j) || (x[i] -= A[i, j] * xold[j])
end
x[i] = (x[i]/A[i, i]) ∩ xold[i]
end
all(interval_isapprox.(x, xold; atol=atol)) && break
end
return x
end
## GAUSS SEIDEL
"""
GaussSeidel <: AbstractIterativeSolver
Type for the Gauss-Seidel solver of the interval linear system ``Ax=b``.
For details see Section 5.7.4 of [[HOR19]](@ref)
### Fields
- `max_iterations` -- maximum number of iterations (default 20)
- `atol` -- absolute tolerance (default 0), if at some point ``|xₖ - xₖ₊₁| < atol``
(elementwise), then stop and return ``xₖ₊₁``.
If `atol=0`, then `min(diam(A))*1e-5` is used.
### Notes
- An object of type `GaussSeidel` is a function with method
(gs::GaussSeidel)(A::AbstractMatrix{T},
b::AbstractVector{T},
[x]::AbstractVector{T}=enclose(A, b)) where {T<:Interval}
#### Input
- `A` -- N×N interval matrix
- `b` -- interval vector of length N
- `x` -- (optional) initial enclosure for the solution of ``Ax = b``. If not given,
it is automatically computed using [`enclose`](@ref enclose)
### Examples
```jldoctest
julia> A = [2..4 -1..1;-1..1 2..4]
2×2 Matrix{Interval{Float64}}:
[2, 4] [-1, 1]
[-1, 1] [2, 4]
julia> b = [-2..2, -1..1]
2-element Vector{Interval{Float64}}:
[-2, 2]
[-1, 1]
julia> gs = GaussSeidel()
GaussSeidel linear solver
max_iterations = 20
atol = 0.0
julia> gs(A, b)
2-element Vector{Interval{Float64}}:
[-1.66668, 1.66668]
[-1.33334, 1.33334]
```
"""
struct GaussSeidel <: AbstractIterativeSolver
max_iterations::Int
atol::Float64
end
GaussSeidel() = GaussSeidel(20, 0.0)
function (gs::GaussSeidel)(A::AbstractMatrix{T},
b::AbstractVector{T},
x::AbstractVector{T}=enclose(A, b)) where {T<:Interval}
n = length(b)
atol = iszero(gs.atol) ? minimum(diam.(A))*1e-5 : gs.atol
@inbounds for _ in 1:gs.max_iterations
xold = copy(x)
@inbounds for i in 1:n
x[i] = b[i]
@inbounds for j in 1:n
(i == j) || (x[i] -= A[i, j] * x[j])
end
x[i] = (x[i]/A[i, i]) .∩ xold[i]
end
all(interval_isapprox.(x, xold; atol=atol)) && break
end
return x
end
## KRAWCZYK
"""
LinearKrawczyk <: AbstractIterativeSolver
Type for the Krawczyk solver of the interval linear system ``Ax=b``.
For details see Section 5.7.3 of [[HOR19]](@ref)
### Fields
- `max_iterations` -- maximum number of iterations (default 20)
- `atol` -- absolute tolerance (default 0), if at some point ``|xₖ - xₖ₊₁| < atol``
(elementwise), then stop and return ``xₖ₊₁``.
If `atol=0`, then `min(diam(A))*1e-5` is used.
### Notes
- An object of type `LinearKrawczyk` is a function with method
(kra::LinearKrawczyk)(A::AbstractMatrix{T},
b::AbstractVector{T},
[x]::AbstractVector{T}=enclose(A, b)) where {T<:Interval}
#### Input
- `A` -- N×N interval matrix
- `b` -- interval vector of length N
- `x` -- (optional) initial enclosure for the solution of ``Ax = b``. If not given,
it is automatically computed using [`enclose`](@ref enclose)
### Examples
```jldoctest
julia> A = [2..4 -1..1;-1..1 2..4]
2×2 Matrix{Interval{Float64}}:
[2, 4] [-1, 1]
[-1, 1] [2, 4]
julia> b = [-2..2, -1..1]
2-element Vector{Interval{Float64}}:
[-2, 2]
[-1, 1]
julia> kra = LinearKrawczyk()
LinearKrawczyk linear solver
max_iterations = 20
atol = 0.0
julia> kra(A, b)
2-element Vector{Interval{Float64}}:
[-2, 2]
[-2, 2]
```
"""
struct LinearKrawczyk <: AbstractIterativeSolver
max_iterations::Int
atol::Float64
end
LinearKrawczyk() = LinearKrawczyk(20, 0.0)
function (kra::LinearKrawczyk)(A::AbstractMatrix{T},
b::AbstractVector{T},
x::AbstractVector{T}=enclose(A, b)) where {T<:Interval}
atol = iszero(kra.atol) ? minimum(diam.(A))*1e-5 : kra.atol
C = inv(mid.(A))
for i = 1:kra.max_iterations
xnew = (C*b - C*(A*x) + x) .∩ x
all(interval_isapprox.(x, xnew; atol=atol)) && return xnew
x = xnew
end
return x
end
# custom printing for solvers
function Base.string(s::AbstractLinearSolver)
str="""$(typeof(s)) linear solver
"""
fields = fieldnames(typeof(s))
for field in fields
str *= """$field = $(getfield(s, field))
"""
end
return str
end
Base.show(io::IO, s::AbstractLinearSolver) = print(io, string(s))
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | code | 2945 | """
LinearOettliPrager <: AbstractDirectSolver
Type for the OettliPrager solver of the interval linear system ``Ax=b``. The solver first
converts the system of interval equalities into a system of real inequalities using
Oettli-Präger theorem [[OET64]](@ref) and then finds the feasible set by solving a LP
problem in each orthant using `LazySets.jl`.
### Notes
- You need to import `LazySets.jl` to use this functionality.
- An object of type `LinearOettliPrager` is a function with methods
(op::LinearOettliPrager)(A::AbstractMatrix{T},
b::AbstractVector{T}) where {T<:Interval}
#### Input
- `A` -- N×N interval matrix
- `b` -- interval vector of length N
### Examples
```julia-repl
julia> A = [2..4 -2..1;-1..2 2..4]
2×2 Matrix{Interval{Float64}}:
[2, 4] [-2, 1]
[-1, 2] [2, 4]
julia> b = [-2..2, -2..2]
2-element Vector{Interval{Float64}}:
[-2, 2]
[-2, 2]
julia> polytopes = solve(A, b, LinearOettliPrager());
julia> typeof(polytopes)
Vector{HPolytope{Float64, SparseArrays.SparseVector{Float64, Int64}}}
```
"""
struct LinearOettliPrager <: AbstractDirectSolver end
"""
NonLinearOettliPrager <: AbstractIterativeSolver
Type for the OettliPrager solver of the interval linear system ``Ax=b``. The solver first
converts the system of interval equalities into a system of real inequalities using
Oettli-Präger theorem [[OET64]](@ref) and then finds the feasible set using
the forward-backward contractor method [[JAU14]](@ref) implemented in
`IntervalConstraintProgramming.jl`.
### Fields
- `tol` -- tolerance for the paving, default 0.01.
### Notes
- You need to import `IntervalConstraintProgramming.jl` to use this functionality.
- An object of type `NonLinearOettliPrager` is a function with methods
(op::NonLinearOettliPrager)(A::AbstractMatrix{T},
b::AbstractVector{T},
[X]::AbstractVector{T}=enclose(A, b)) where {T<:Interval}
(op::NonLinearOettliPrager)(A::AbstractMatrix{T},
b::AbstractVector{T},
X::IntervalBox) where {T<:Interval}
#### Input
- `A` -- N×N interval matrix
- `b` -- interval vector of length N
- `X` -- (optional) initial enclosure for the solution of ``Ax = b``. If not given,
it is automatically computed using [`enclose`](@ref enclose)
### Examples
```julia-repl
julia> A = [2..4 -2..1;-1..2 2..4]
2×2 Matrix{Interval{Float64}}:
[2, 4] [-2, 1]
[-1, 2] [2, 4]
julia> b = [-2..2, -2..2]
2-element Vector{Interval{Float64}}:
[-2, 2]
[-2, 2]
julia> solve(A, b, NonLinearOettliPrager(0.1))
Paving:
- tolerance ϵ = 0.1
- inner approx. of length 1195
- boundary approx. of length 823
```
"""
struct NonLinearOettliPrager <: AbstractIterativeSolver
tol::Float64
end
NonLinearOettliPrager() = NonLinearOettliPrager(0.01)
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | code | 616 | using .LazySets
function (opl::LinearOettliPrager)(A, b)
n = length(b)
Ac = mid.(A)
bc = mid.(b)
Ar = IntervalArithmetic.radius.(A)
br = IntervalArithmetic.radius.(b)
polytopes = Vector{HPolytope}(undef, 2^n)
orthants = DiagDirections(n)
@inbounds for (i, d) in enumerate(orthants)
D = Diagonal(d)
Ard = -Ar*D
A1 = [Ac + Ard; -Ac + Ard; -D]
b1 = [br + bc; br - bc; zeros(n)]
polytopes[i] = HPolytope(A1, b1)
end
return identity.(filter!(!isempty, polytopes))
end
_default_precondition(_, ::LinearOettliPrager) = NoPrecondition()
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | code | 1741 | using .IntervalConstraintProgramming
"""
returns the unrolled expression for \$|a ⋅x - b|\$
\$a\$ and \$x\$ must be vectors of the same length and \$b\$ is scalar.
The absolue value in the equation is taken elementwise.
"""
function oettli_lhs(a, b, x)
ex = :( $(a[1])*$(x[1]))
for i = 2:length(x)
ex = :( $ex + $(a[i])*$(x[i]))
end
return :(abs($ex - $b))
end
"""
returns the unrolled expression for \$a ⋅|x| + b\$
\$a\$ and \$x\$ must be vectors of the same length and \$b\$ is scalar.
The absolue value in the equation is taken elementwise.
"""
function oettli_rhs(a, b, x)
ex = :( $(a[1])*abs($(x[1])))
for i = 2:length(x)
ex = :( $ex + $(a[i])*abs($(x[i])))
end
return :($ex + $b)
end
"""
Returns the separator for the constraint `|a_c ⋅x - b_c| - a_r ⋅|x| - b_r <= 0`.
`a` and `x` must be vectors of the same length and `b` is scalar.
The absolue values in the equation are taken elementwise.
`a_c` and `a_r` are vectors containing midpoints and radii of the intervals in `a`. Similar `b_c` and `b_r`.
"""
function oettli_eq(a, b, x)
ac = mid.(a)
ar = IntervalArithmetic.radius.(a)
bc = mid(b)
br = IntervalArithmetic.radius(b)
lhs = oettli_lhs(ac, bc, x)
rhs = oettli_rhs(ar, br, x)
ex = :(@constraint $lhs - $rhs <= 0)
@eval $ex
end
function (op::NonLinearOettliPrager)(A, b, X::IntervalBox)
vars = ntuple(i -> Symbol(:x, i), length(b))
separators = [oettli_eq(A[i,:], b[i], vars) for i in 1:length(b)]
S = reduce(∩, separators)
return Base.invokelatest(pave, S, X, op.tol)
end
(op::NonLinearOettliPrager)(A, b, X=enclose(A, b)) = op(A, b, IntervalBox(X))
_default_precondition(_, ::NonLinearOettliPrager) = NoPrecondition()
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | code | 3193 | """
AbstractPrecondition
Abstract type for preconditioners of interval linear systems.
"""
abstract type AbstractPrecondition end
"""
NoPrecondition <: AbstractPrecondition
Type of the trivial preconditioner which does nothing.
### Notes
- An object of type `NoPrecondition` is a function with method
(np::NoPrecondition)(A::AbstractMatrix{T},
b::AbstractVector{T}) where {T<:Interval}
### Example
```jldoctest
julia> A = [2..4 -2..1; -1..2 2..4]
2×2 Matrix{Interval{Float64}}:
[2, 4] [-2, 1]
[-1, 2] [2, 4]
julia> b = [-2..2, -2..2]
2-element Vector{Interval{Float64}}:
[-2, 2]
[-2, 2]
julia> np = NoPrecondition()
NoPrecondition()
julia> np(A, b)
(Interval{Float64}[[2, 4] [-2, 1]; [-1, 2] [2, 4]], Interval{Float64}[[-2, 2], [-2, 2]])
```
"""
struct NoPrecondition <: AbstractPrecondition end
(np::NoPrecondition)(A::AbstractMatrix{T}, b::AbstractVector{T}) where {T<:Interval} = A, b
"""
InverseMidpoint <: AbstractPrecondition
Preconditioner that preconditions the linear system ``Ax=b`` with ``A_c^{-1}``,
where ``A_c`` is the midpoint matrix of ``A``.
### Notes
- An object of type `InverseMidpoint` is a function with method
(imp::InverseMidpoint)(A::AbstractMatrix{T},
b::AbstractVector{T}) where {T<:Interval}
### Examples
```jldoctest
julia> A = [2..4 -2..1; -1..2 2..4]
2×2 Matrix{Interval{Float64}}:
[2, 4] [-2, 1]
[-1, 2] [2, 4]
julia> b = [-2..2, -2..2]
2-element Vector{Interval{Float64}}:
[-2, 2]
[-2, 2]
julia> imp = InverseMidpoint()
InverseMidpoint()
julia> imp(A, b)
(Interval{Float64}[[0.594594, 1.40541] [-0.540541, 0.540541]; [-0.540541, 0.540541] [0.594594, 1.40541]], Interval{Float64}[[-0.756757, 0.756757], [-0.756757, 0.756757]])
```
"""
struct InverseMidpoint <: AbstractPrecondition end
function (imp::InverseMidpoint)(A::AbstractMatrix{T},
b::AbstractVector{T}) where {T<:Interval}
R = inv(mid.(A))
return R*A, R*b
end
"""
InverseDiagonalMidpoint <: AbstractPrecondition
Preconditioner that preconditions the linear system ``Ax=b`` with the diagonal matrix of
``A_c^{-1}``, where ``A_c`` is the midpoint matrix of ``A``.
### Notes
- An object of type `InverseDiagonalMidpoint` is a function with method
(idmp::InverseDiagonalMidpoint)(A::AbstractMatrix{T},
b::AbstractVector{T}) where {T<:Interval}
### Example
```jldoctest
julia> A = [2..4 -2..1; -1..2 2..4]
2×2 Matrix{Interval{Float64}}:
[2, 4] [-2, 1]
[-1, 2] [2, 4]
julia> b = [-2..2, -2..2]
2-element Vector{Interval{Float64}}:
[-2, 2]
[-2, 2]
julia> idmp = InverseDiagonalMidpoint()
InverseDiagonalMidpoint()
julia> idmp(A, b)
(Interval{Float64}[[0.666666, 1.33334] [-0.666667, 0.333334]; [-0.333334, 0.666667] [0.666666, 1.33334]], Interval{Float64}[[-0.666667, 0.666667], [-0.666667, 0.666667]])
```
"""
struct InverseDiagonalMidpoint <: AbstractPrecondition end
function (idmp::InverseDiagonalMidpoint)(A::AbstractMatrix{T},
b::AbstractVector{T}) where {T<:Interval}
R = inv(Diagonal(mid.(A)))
return R*A, R*b
end
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | code | 4786 | """
solve(A::AbstractMatrix{T},
b::AbstractVector{T},
solver::AbstractIterativeSolver,
[precondition]::AbstractPrecondition=_default_precondition(A, solver),
[X]::AbstractVector{T}=enclose(A, b)) where {T<:Interval}
Solves the square interval system ``Ax=b`` using the given algorithm, preconditioner
and initial enclosure
### Input
- `A` -- square interval matrix
- `b` -- interval vector
- `solver` -- algorithm used to solve the linear system
- `precondition` -- preconditioner used. If not given, it is automatically computed based on
the matrix `A` and the solver.
- `X` -- initial enclosure.
if not given, it is automatically computed using [`enclose`](@ref)
### Examples
```jldoctest
julia> A = [2..4 -1..1;-1..1 2..4]
2×2 Matrix{Interval{Float64}}:
[2, 4] [-1, 1]
[-1, 1] [2, 4]
julia> b = [-2..2, -1..1]
2-element Vector{Interval{Float64}}:
[-2, 2]
[-1, 1]
julia> solve(A, b, GaussSeidel(), NoPrecondition(), [-10..10, -10..10])
2-element Vector{Interval{Float64}}:
[-1.66668, 1.66668]
[-1.33334, 1.33334]
julia> solve(A, b, GaussSeidel())
2-element Vector{Interval{Float64}}:
[-1.66667, 1.66667]
[-1.33334, 1.33334]
```
"""
function solve(A::AbstractMatrix{T},
b::AbstractVector{T},
solver::AbstractIterativeSolver,
precondition::AbstractPrecondition=_default_precondition(A, solver),
X::AbstractVector{T}=enclose(A, b)) where {T<:Interval}
checksquare(A) == length(b) == length(X) || throw(DimensionMismatch())
A, b = precondition(A, b)
return solver(A, b, X)
end
"""
solve(A::AbstractMatrix{T},
b::AbstractVector{T},
solver::AbstractDirectSolver,
[precondition]::AbstractPrecondition=_default_precondition(A, solver)) where
{T<:Interval}
Solves the square interval system ``Ax=b`` using the given algorithm, preconditioner
and initial enclosure
### Input
- `A` -- square interval matrix
- `b` -- interval vector
- `solver` -- algorithm used to solve the linear system
- `precondition` -- preconditioner used. If not given, it is automatically computed based on
the matrix `A` and the solver.
### Examples
```jldoctest
julia> A = [2..4 -1..1;-1..1 2..4]
2×2 Matrix{Interval{Float64}}:
[2, 4] [-1, 1]
[-1, 1] [2, 4]
julia> b = [-2..2, -1..1]
2-element Vector{Interval{Float64}}:
[-2, 2]
[-1, 1]
julia> solve(A, b, HansenBliekRohn(), InverseMidpoint())
2-element Vector{Interval{Float64}}:
[-1.66667, 1.66667]
[-1.33334, 1.33334]
julia> solve(A, b, HansenBliekRohn())
2-element Vector{Interval{Float64}}:
[-1.66667, 1.66667]
[-1.33334, 1.33334]
```
"""
function solve(A::AbstractMatrix{T},
b::AbstractVector{T},
solver::AbstractDirectSolver,
precondition::AbstractPrecondition=_default_precondition(A, solver)) where
{T<:Interval}
checksquare(A) == length(b) || throw(DimensionMismatch())
A, b = precondition(A, b)
return solver(A, b)
end
# fallback
"""
solve(A::AbstractMatrix{T},
b::AbstractVector{T},
[solver]::AbstractLinearSolver,
[precondition]::AbstractPrecondition=_default_precondition(A, solver)) where
{T<:Interval}
Solves the square interval system ``Ax=b`` using the given algorithm, preconditioner
and initial enclosure
### Input
- `A` -- square interval matrix
- `b` -- interval vector
- `solver` -- algorithm used to solve the linear system. If not given,
[`GaussianElimination`](@ref) is used.
- `precondition` -- preconditioner used. If not given, it is automatically computed based on
the matrix `A` and the solver.
### Examples
```jldoctest
julia> A = [2..4 -1..1;-1..1 2..4]
2×2 Matrix{Interval{Float64}}:
[2, 4] [-1, 1]
[-1, 1] [2, 4]
julia> b = [-2..2, -1..1]
2-element Vector{Interval{Float64}}:
[-2, 2]
[-1, 1]
julia> solve(A, b)
2-element Vector{Interval{Float64}}:
[-1.66667, 1.66667]
[-1.33334, 1.33334]
```
"""
function solve(A::AbstractMatrix{T},
b::AbstractVector{T},
solver::AbstractLinearSolver=_default_solver(),
precondition::AbstractPrecondition=_default_precondition(A, solver)) where {T<:Interval}
checksquare(A) == length(b) || throw(DimensionMismatch())
A, b = precondition(A, b)
return solver(A, b)
end
## Default settings
_default_solver() = GaussianElimination()
function _default_precondition(A, ::AbstractDirectSolver)
if is_strictly_diagonally_dominant(A) || is_M_matrix(A)
return NoPrecondition()
else
return InverseMidpoint()
end
end
# fallback
_default_precondition(_, ::AbstractLinearSolver) = InverseMidpoint()
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | code | 2854 | ##
# routines to give a rigorous solution of the real linear system Ax=B
"""
epsilon_inflation(A::AbstractMatrix{T}, b::AbstractArray{S, N};
r=0.1, ϵ=1e-20, iter_max=20) where {T<:Real, S<:Real, N}
epsilon_inflation(A::AbstractMatrix{T}, b::AbstractArray{S, N};
r=0.1, ϵ=1e-20, iter_max=20) where {T<:Interval, S<:Interval, N}
Gives an enclosure of the solution of the square linear system ``Ax=b``
using the ϵ-inflation algorithm, see algorithm 10.7 of [[RUM10]](@ref)
### Input
* `A` -- square matrix of size n × n
* `b` -- vector of length n or matrix of size n × m
* `r` -- relative inflation, default 10%
* `ϵ` -- absolute inflation, default 1e-20
* `iter_max` -- maximum number of iterations
### Output
* `x` -- enclosure of the solution of the linear system
* `cert` -- Boolean flag, if `cert==true`, then `x` is *certified* to contain the true
solution of the linear system, if `cert==false`, then the algorithm could not prove that ``x``
actually contains the true solution.
### Algorithm
Given the real system ``Ax=b`` and an approximate solution ``̃x``, we initialize
``x₀ = [̃x, ̃x]``. At each iteration the algorithm computes the inflation
``y = xₖ * [1 - r, 1 + r] .+ [-ϵ, ϵ]``
and the update
``xₖ₊₁ = Z + (I - CA)y``,
where ``Z = C(b - Ax₀)`` and ``C`` is an approximate inverse of ``A``. If the condition
``xₖ₊₁ ⊂ y `` is met, then ``xₖ₊₁`` is a proved enclosure of ``A⁻¹b`` and `cert` is set to
true. If the condition is not met by the maximum number of iterations, the
latest computed enclosure is returned, but ``cert`` is set to false, meaning the algorithm
could not prove that the enclosure contains the true solution. For interval systems,
``̃x`` is obtained considering the midpoint of ``A`` and ``b``.
### Notes
- This algorithm is meant for *real* linear systems, or interval systems with
very tiny intervals. For interval linear systems with wider intervals, see the
[`solve`](@ref) function.
### Examples
```jldoctest
julia> A = [1 2;3 4]
2×2 Matrix{Int64}:
1 2
3 4
julia> b = A * ones(2)
2-element Vector{Float64}:
3.0
7.0
julia> x, cert = epsilon_inflation(A, b)
(Interval{Float64}[[0.999999, 1.00001], [0.999999, 1.00001]], true)
julia> ones(2) .∈ x
2-element BitVector:
1
1
julia> cert
true
```
"""
function epsilon_inflation(A::AbstractMatrix{T}, b::AbstractArray{S, N};
r=0.1, ϵ=1e-20, iter_max=20) where {T<:Real, S<:Real, N}
r1 = Interval(1 - r, 1 + r)
ϵ1 = Interval(-ϵ, ϵ)
R = inv(mid.(A))
C = I - R * A
xs = R * mid.(b)
z = R * (b - (A * Interval.(xs)))
x = z
for _ in 1:iter_max
y = r1 * x .+ ϵ1
x = z + C * y
if all(isinterior.(x, y))
return xs + x, true
end
end
return xs + x, false
end
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | code | 660 | module NumericalTest
export rounding_test
function test_matrix(k)
A = zeros(Float64, (k, k))
A[:, end] = fill(2^(-53), k)
for i in 1:k-1
A[i,i] = 1.0
end
return A
end
using LinearAlgebra
"""
rounding_test(n, k)
Let `u=fill(2^(-53), k-1)` and let A be the matrix
[I u;
0 2^(-53)]
This test checks the result of A*A' in different rounding modes,
running BLAS on `n` threads
"""
function rounding_test(n,k)
BLAS.set_num_threads( n )
A = test_matrix( k )
B = setrounding(Float64, RoundUp) do
BLAS.gemm('N', 'T', 1.0, A, A)
end
return all([B[i,i]==nextfloat(1.0) for i in 1:k-1])
end
end | IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | code | 4928 | const _vars_dict = Dict(:vars => Symbol[])
"""
AffineExpression{T}
Data structure to represent affine expressions, such as ``x+2y+z+4``.
### Examples
```jldoctest
julia> @affinevars x y z
3-element Vector{AffineExpression{Int64}}:
x
y
z
julia> p1 = x + 2y + z + 4
x+2y+z+4
```
"""
struct AffineExpression{T}
coeffs::Vector{T}
end
function AffineExpression(x::T) where {T<:Number}
c = zeros(promote_type(T, Int), length(_vars_dict[:vars]) + 1)
c[end] = x
return AffineExpression(c)
end
## VARIABLES CONSTRUCTION
_get_vars(x::Symbol) = [x]
function _get_vars(x...)
if length(x) == 1
s = x[1].args[1]
start = x[1].args[2].args[2]
stop = x[1].args[2].args[3]
return [Symbol(s, i) for i in start:stop]
else
return collect(x)
end
end
"""
@affinevars(x...)
Macro to construct the variables used to represent [`AffineExpression`](@ref).
### Examples
```jldoctest
julia> @affinevars x
1-element Vector{AffineExpression{Int64}}:
x
julia> @affinevars x y z
3-element Vector{AffineExpression{Int64}}:
x
y
z
julia> @affinevars x[1:4]
4-element Vector{AffineExpression{Int64}}:
x1
x2
x3
x4
```
"""
macro affinevars(x...)
vars = _get_vars(x...)
_vars_dict[:vars] = vars
ex = quote end
vars_ex = Expr(:vect)
for (i, s) in enumerate(vars)
c = zeros(Int, length(vars) + 1)
c[i] = 1
push!(ex.args, :($(esc(s)) = AffineExpression($c)))
push!(vars_ex.args, s)
end
push!(ex.args, esc(vars_ex))
return ex
end
(ae::AffineExpression)(p::Vector{<:Number}) = dot(ae.coeffs[1:end-1], p) + ae.coeffs[end]
function show(io::IO, ae::AffineExpression)
first_printed = false
if iszero(ae.coeffs)
print(io, 0)
else
@inbounds for (i, x) in enumerate(_vars_dict[:vars])
c = ae.coeffs[i]
iszero(c) && continue
if c > 0
if first_printed
print(io, "+")
end
else
print(io, "-")
end
if abs(c) != 1
print(io, abs(c))
end
print(io, x)
first_printed = true
end
c = last(ae.coeffs)
if !iszero(c)
if c > 0 && first_printed
print(io, "+")
end
print(io, c)
end
end
end
#########################
# BASIC FUNCTIONS #
#########################
function zero(::AffineExpression{T}) where {T}
return AffineExpression(zeros(T, length(_vars_dict[:vars]) + 1))
end
function zero(::Type{AffineExpression{T}}) where {T}
return AffineExpression(zeros(T, length(_vars_dict[:vars]) + 1))
end
one(::Type{AffineExpression{T}}) where {T} = zero(AffineExpression{T}) + 1
one(::AffineExpression{T}) where {T} = zero(AffineExpression{T}) + 1
# coefficients(ae::AffineExpression) = ae.coeffs[1:end-1]
# coefficient(ae::AffineExpression, i) = ae.coeffs[i]
# offset(ae::AffineExpression) = ae.coeffs[end]
#########################
# ARITHMETIC OPERATIONS #
#########################
for op in (:+, :-)
@eval function $op(ae1::AffineExpression, ae2::AffineExpression)
return AffineExpression($op(ae1.coeffs, ae2.coeffs))
end
@eval function $op(ae::AffineExpression{T}, n::S) where {T<:Number, S<:Number}
TS = promote_type(T, S)
c = similar(ae.coeffs, TS)
c .= ae.coeffs
c[end] = $op(c[end], n)
return AffineExpression(c)
end
end
+(ae::AffineExpression) = ae
-(ae::AffineExpression) = AffineExpression(-ae.coeffs)
function Base.:(*)(ae1::AffineExpression, n::Number)
return AffineExpression(ae1.coeffs * n)
end
function Base.:(/)(ae1::AffineExpression, n::Number)
return AffineExpression(ae1.coeffs / n)
end
for op in (:+, :*)
@eval $op(n::Number, ae::AffineExpression) = $op(ae, n)
end
-(n::Number, ae::AffineExpression) = n + (-ae)
==(ae1::AffineExpression, ae2::AffineExpression) = ae1.coeffs == ae2.coeffs
==(ae1::AffineExpression, n::Number) = iszero(ae1.coeffs[1:end-1]) && ae1.coeffs[end] == n
function *(ae::AffineExpression, A::AbstractArray{T, N}) where {T<:Number, N}
return map(x -> x * ae, A)
end
function *(A::AbstractArray{T, N}, ae::AffineExpression) where {T<:Number, N}
return map(x -> x * ae, A)
end
## Convetion and promotion
function promote_rule(::Type{AffineExpression{T}}, ::Type{S}) where {T<:Number, S<:Number}
AffineExpression{promote_type(T, S)}
end
function promote_rule(::Type{AffineExpression{T}}, ::Type{AffineExpression{S}}) where {T<:Number, S<:Number}
AffineExpression{promote_type(T, S)}
end
convert(::Type{AffineExpression}, x::Number) = AffineExpression(x)
convert(::Type{AffineExpression{T}}, x::Number) where T = AffineExpression(convert(T, x))
convert(::Type{AffineExpression{T}}, ae::AffineExpression) where {T<:Number} = AffineExpression{T}(ae.coeffs)
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | code | 4087 | """
AffineParametricArray{T, N, MT<:AbstractArray{T, N}}
Array whose elements have an affine dependency on a set of parameteres ``p₁, p₂, …, pₙ``.
### Fields
- coeffs::Vector{MT} -- vector of arrays, corresponds to the coefficients of each variable.
### Example
```jldoctest
julia> @affinevars x y z
3-element Vector{AffineExpression{Int64}}:
x
y
z
julia> A = AffineParametricArray([x+y x-1;x+y+z 1])
2×2 AffineParametricMatrix{Int64, Matrix{Int64}}:
x+y x-1
x+y+z 1
```
"""
struct AffineParametricArray{T, N, MT<:AbstractArray{T, N}} <: AbstractArray{T, N}
coeffs::Vector{MT}
end
const AffineParametricMatrix{T, MT} = AffineParametricArray{T, 2, MT} where {T, MT<:AbstractMatrix{T}}
const AffineParametricVector{T, VT} = AffineParametricArray{T, 1, VT} where {T, VT <: AbstractVector{T}}
# apas are callable
function (apa::AffineParametricArray)(p)
length(p) + 1 == length(apa.coeffs) || throw(ArgumentError("dimension mismatch"))
return sum(apa.coeffs[i] * p[i] for i in eachindex(p)) + apa.coeffs[end]
end
# Array interface
IndexStyle(::Type{<:AffineParametricArray}) = IndexLinear()
size(apa::AffineParametricArray) = size(apa.coeffs[1])
function getindex(apa::AffineParametricArray, idx...)
nvars = length(_vars_dict[:vars])
vars = [AffineExpression(Vector(c)) for c in eachcol(Matrix(I, nvars + 1, nvars))]
tmp = sum(getindex(c, idx...) * v for (v, c) in zip(vars, apa.coeffs[1:end-1]))
return getindex(apa.coeffs[end], idx...) + tmp
end
function setindex!(apa::AffineParametricArray, ae::AffineExpression, idx...)
@inbounds for i in eachindex(ae.coeffs)
setindex!(apa.coeffs[i], ae.coeffs[i], idx...)
end
end
function setindex!(apa::AffineParametricArray, num::Number, idx...)
setindex!(apa.coeffs[end], num, idx...)
end
==(apa1::AffineParametricArray, apa2::AffineParametricArray) = apa1.coeffs == apa2.coeffs
# unary operations
+(apa::AffineParametricArray) = apa
-(apa::AffineParametricArray) = AffineParametricArray(-apa.coeffs)
# addition subtraction
for op in (:+, :-)
@eval function $op(apa1::AffineParametricArray, apa2::AffineParametricArray)
return AffineParametricArray($op(apa1.coeffs, apa2.coeffs))
end
@eval function $op(apa::AffineParametricArray{T, N, MT}, B::MS) where {T, N, MT, S, MS<:AbstractArray{S, N}}
MTS = promote_type(MT, MS)
coeffs = similar(apa.coeffs, MTS)
coeffs .= apa.coeffs
coeffs[end] = $op(coeffs[end], B)
return AffineParametricArray(coeffs)
end
@eval function $op(B::MS, apa::AffineParametricArray{T, N, MT}) where {T, N, MT, S, MS<:AbstractArray{S, N}}
MTS = promote_type(MT, MS)
coeffs = similar(apa.coeffs, MTS)
coeffs .= $op(apa.coeffs)
coeffs[end] = $op(B, apa.coeffs[end])
return AffineParametricArray(coeffs)
end
end
# multiplication, backslash
for op in (:*, :\)
@eval function $op(A::AbstractMatrix, apa::AffineParametricArray)
coeffs = [$op(A, coeff) for coeff in apa.coeffs]
return AffineParametricArray(coeffs)
end
end
function *(apa::AffineParametricMatrix, B::AbstractMatrix)
coeffs = [coeff*B for coeff in apa.coeffs]
return AffineParametricArray(coeffs)
end
function *(apa::AffineParametricMatrix, v::AbstractVector)
coeffs = [coeff*v for coeff in apa.coeffs]
return AffineParametricArray(coeffs)
end
*(apa::AffineParametricArray, n::Number) = AffineParametricArray(apa.coeffs * n)
*(n::Number, apa::AffineParametricArray) = AffineParametricArray(apa.coeffs * n)
/(apa::AffineParametricArray, n::Number) = AffineParametricArray(apa.coeffs / n)
# with AffineExpression
function AffineParametricArray(A::AbstractArray{<:AffineExpression})
ncoeffs = length(first(A).coeffs)
coeffs = [map(a -> a.coeffs[i], A) for i in 1:ncoeffs]
return AffineParametricArray(coeffs)
end
function AffineParametricArray(A::AbstractArray{<:Number})
ncoeffs = length(_vars_dict[:vars]) + 1
coeffs = [zero(A) for _ in 1:ncoeffs]
coeffs[end] = A
return AffineParametricArray(coeffs)
end
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | code | 1038 | abstract type ParametricIntervalLinearSolver end
"""
Skalna06
Direct solver for interval linear systems with affine-parametric dependency. For more
information see [[SKA06]](@ref).
"""
struct Skalna06 <: ParametricIntervalLinearSolver end
_eval_vec(b::AffineParametricVector, mp) = b(mp)
_eval_vec(b::AbstractVector, _) = b
function (sk::Skalna06)(A::AffineParametricMatrix,
b::AbstractVector,
p)
mp = map(mid, p)
R = inv(A(mp))
bp = _eval_vec(b, mp)
x0 = R * bp
D = (R * A)(p)
is_H_matrix(D) || throw(ArgumentError("Could not find an enclosure of given parametric system"))
z = (R * (b - A * x0))(p)
Δ = comparison_matrix(D) \ mag.(z)
return x0 + IntervalArithmetic.Interval.(-Δ, Δ)
end
function solve(A::AffineParametricMatrix,
b::AbstractVector,
p,
solver::ParametricIntervalLinearSolver=Skalna06())
checksquare(A) == length(b) || throw(DimensionMismatch())
return solver(A, b, p)
end
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | code | 688 | using IntervalLinearAlgebra, StaticArrays, LazySets, IntervalConstraintProgramming
using Test
const IA = IntervalArithmetic
include("test_classify.jl")
include("test_multiplication.jl")
include("test_utils.jl")
include("test_numerical_test/test_numerical_test.jl")
include("test_eigenvalues/test_interval_eigenvalues.jl")
include("test_eigenvalues/test_verify_eigs.jl")
include("test_solvers/test_enclosures.jl")
include("test_solvers/test_epsilon_inflation.jl")
include("test_solvers/test_precondition.jl")
include("test_solvers/test_oettli_prager.jl")
include("test_pils/test_linexpr.jl")
include("test_pils/test_affine_parametic_array.jl")
include("test_pils/test_pils_solvers.jl")
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | code | 631 | @testset "classify matrices" begin
A = [0..2 1..1;-1.. -1 0..2]
@test !is_H_matrix(A)
@test !is_strongly_regular(A)
B = [-2.. -2 1..1; 5..6 -2.. -2]
@test is_strongly_regular(B)
@test !is_Z_matrix(B)
@test !is_M_matrix(B)
C = [2..2 1..1; 0..2 2..2]
@test is_H_matrix(C)
@test !is_strictly_diagonally_dominant(C)
D = [2..2 -1..0; -1..0 2..2]
@test is_strictly_diagonally_dominant(D)
@test is_Z_matrix(D)
@test is_M_matrix(D)
E = [2..4 -2..1;-1..2 2..4]
@test !is_Z_matrix(E)
@test !is_M_matrix(E)
@test !is_H_matrix(E)
@test is_strongly_regular(E)
end
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | code | 869 | @testset "Matrix multiplication" begin
# test default settings
@test get_multiplication_mode() == Dict(:multiplication => :fast)
A = [2..4 -2..1; -1..2 2..4]
imA = im*A
set_multiplication_mode(:slow)
@test A * A == [0..18 -16..8; -8..16 0..18]
@test imA * imA == -1*[0..18 -16..8; -8..16 0..18]
set_multiplication_mode(:rank1)
@test A * A == [0..18 -16..8; -8..16 0..18]
@test imA * imA == -1*[0..18 -16..8; -8..16 0..18]
set_multiplication_mode(:fast)
@test A * A == [-2..19.5 -16..10; -10..16 -2..19.5]
@test A * mid.(A) == [5..12.5 -8..2; -2..8 5..12.5]
@test mid.(A) * A == [5..12.5 -8..2; -2..8 5..12.5]
@test imA * imA == -1*[-2..19.5 -16..10; -10..16 -2..19.5]
@test mid.(A) * imA == im*[5..12.5 -8..2; -2..8 5..12.5]
@test imA * mid.(A) == im*[5..12.5 -8..2; -2..8 5..12.5]
end
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | code | 240 | @testset "Utils" begin
orthants = Orthants(3)
@test length(orthants) == 8
eltype(Orthants) == Vector{Int}
@test first(orthants) == [1, 1, 1]
@test last(orthants) == [-1, -1, -1]
@test orthants[4] == [-1, -1, 1]
end
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | code | 1349 | @testset "Eigenvalues of interval matrices" begin
# symmetrix matrix
A = Symmetric([-1 0 -1..1;
0 -1 -1..1;
-1..1 -1..1 0.1])
evrohn = eigenbox(A)
@test interval_isapprox(evrohn, -2.4143..1.5143; atol=1e-3)
evhertz = eigenbox(A, Hertz())
@test interval_isapprox(evhertz, -1.9674..1.0674; atol=1e-3)
# real matrix
A = [-3.. -2 4..5 4..6 -1..1.5;
-4.. -3 -4.. -3 -4.. -3 1..2;
-5.. -4 2..3 -5.. -4 -1..0;
-1..0.1 0..1 1..2 -4..2.5]
ev = eigenbox(A)
@test interval_isapprox(real(ev), -8.8221..3.4408; atol=1e-3)
@test interval_isapprox(imag(ev), -10.7497..10.7497; atol=1e-3)
evhertz = eigenbox(A, Hertz())
@test interval_isapprox(real(evhertz), -7.3691..3.2742; atol=1e-3)
@test interval_isapprox(imag(evhertz), -8.794..8.794; atol=1e-3)
# hermitian matrix
A = Hermitian([1..2 (5..9)+(2..5)*im (3..5)+(2..4)im;
(5..9)+(-5.. -2)*im 2..3 (7..8)+(6..10)im;
(3..5)+(-4.. -2)*im (7..8)+(-10.. -6)*im 3..4])
ev = eigenbox(A)
@test interval_isapprox(ev, -15.4447..24.3359; atol=1e-3)
# complex matrix
A = [(1..2)+(3..4)*im 3..4;1+(2..3)*im 4..5]
ev = eigenbox(A)
@test interval_isapprox(real(ev), -1.28812..7.28812; atol=1e-3)
@test interval_isapprox(imag(ev), -2.04649..5.54649; atol=1e-3)
end
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | code | 939 | @testset "verify perron frobenius " begin
A = [1 2;3 4]
ρ = bound_perron_frobenius_eigenvalue(A)
@test (5+sqrt(big(5)))/2 ≤ ρ
end
@testset "verified eigenvalues" begin
n = 5 # matrix size
# symmetric case
ev = sort(randn(n))
D = Diagonal(ev)
Q, _ = qr(rand(n, n))
A = Symmetric(IA.Interval.(Matrix(Q)) * D * IA.Interval.(Matrix(Q')))
evals, evecs, cert = verify_eigen(A)
@test all(cert)
@test all(ev .∈ evals)
# real eigenvalues case
P = rand(n, n)
Pinv, _ = epsilon_inflation(P, Diagonal(ones(n)))
A = IA.Interval.(P) * D * Pinv
evals, evecs, cert = verify_eigen(A)
@test all(cert)
@test all(ev .∈ evals)
# test complex eigenvalues
ev = sort(rand(Complex{Float64}, n), by = x -> (real(x), imag(x)))
A = IA.Interval.(P) * Matrix(Diagonal(ev)) * Pinv
evals, evecs, cert = verify_eigen(A)
@test all(cert)
@test all(ev .∈ evals)
end
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | code | 409 | @testset "Test Numerical Test" begin
A = IntervalLinearAlgebra.NumericalTest.test_matrix(4)
@test A == [1.0 0 0 2^(-53);
0 1.0 0 2^(-53);
0 0 1.0 2^(-53);
0 0 0 2^(-53)]
# we test the singlethread version, so that CI points
# out if setting rounding modes is broken
@test IntervalLinearAlgebra.NumericalTest.rounding_test(1, 2) == true
end | IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | code | 1686 | @testset "Affine Parametric Array construction" begin
@affinevars x y z
vars = [x, y, z]
A = AffineParametricArray([x+1 y+2;x+y+z+1 x-z])
A1 = AffineParametricArray([1 2;3 4])
B = AffineParametricArray([x+1 y+2;x+y+z+1 x-z+1])
@test A.coeffs == [[1 0;1 1], [0 1;1 0], [0 0;1 -1], [1 2;1 0]]
@test A1.coeffs == [[0 0;0 0], [0 0;0 0], [0 0;0 0], [1 2;3 4]]
@test A[1, 2] == y + 2
@test A[:, 1] == [x+1, x+y+z+1]
A1[1, 2] = x
@test A1 == AffineParametricArray([1 x;3 4])
A1[:, 1] = [x+1, z-1]
@test A1 == AffineParametricArray([x+1 x;z-1 4])
@test A([1..2, 2..3, 3..4]) == [2..3 4..5; 7..10 -3.. -1]
end
@testset "Affine parametric array operations" begin
@affinevars x y z
vars = [x, y, z]
A = AffineParametricArray([x+1 y+2;x+y+z+1 x-z])
B = AffineParametricArray([x+1 y+2;x+y+z+1 x-z+1])
@test A == A
@test A != B
@test +A == A
@test -A == AffineParametricArray([-x-1 -y-2; -x-y-z-1 -x+z])
@test A + B == AffineParametricArray([2x+2 2y+4; 2x+2y+2z+2 2x-2z+1])
@test A - B == AffineParametricArray([0 0;0 -1])
C = [2 1;1 1]
@test A + C == AffineParametricArray([x+3 y+3;x+y+z+2 x-z+1])
@test C + A == AffineParametricArray([x+3 y+3;x+y+z+2 x-z+1])
@test A * C == AffineParametricArray([2x+y+4 x+y+3;3x+2y+z+2 2x+y+1])
@test C * A == AffineParametricArray([3x+y+z+3 x+2y-z+4;2x+y+z+2 x+y-z+2])
@test C \ A == inv(C) * A
@test A * 2 == AffineParametricArray([2x+2 2y+4;2x+2y+2z+2 2x-2z])
@test 2 * A == AffineParametricArray([2x+2 2y+4;2x+2y+2z+2 2x-2z])
@test A / 2 == AffineParametricArray([0.5x+0.5 0.5y+1;0.5x+0.5y+0.5z+0.5 0.5x-0.5z])
end
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | code | 2361 | @testset "linear expressions variables" begin
vars = @affinevars x y z
@test vars == [x, y, z]
@test x isa AffineExpression{Int}
@test x.coeffs == [1, 0, 0, 0]
@test y.coeffs == [0, 1, 0, 0]
@test z.coeffs == [0, 0, 1, 0]
vars2 = @affinevars x[1:5]
@test vars2 == [x1, x2, x3, x4, x5]
@test x1 isa AffineExpression{Int}
@test x1.coeffs == [1, 0, 0, 0, 0, 0]
@test x5.coeffs == [0, 0, 0, 0, 1, 0]
vars3 = @affinevars x
@test vars3 == [x]
@test x isa AffineExpression{Int}
@test x.coeffs == [1, 0]
end
@testset "linear expressions operations" begin
@affinevars x y z
p1 = x - y + 3z
@test string(p1) == "x-y+3z"
p2 = y + x - z - 2
@test string(p2) == "x+y-z-2"
@test string(p1 - p1) == "0"
@test +p1 == p1
@test -p1 == -x + y - 3z
@test p2([1, 1, 1]) == -1
psum = p1 + p2
pdiff = p1 - p2
psumnumleft = 1 + p1
psumnumright = p1 + 1
pdiffnumright = p1 - 1
pdiffnumleft = 1 - p1
pprodleft = 2 * p1
pprodright = p1 * 2
pdiv = p1 / 2
@test psum.coeffs == [2, 0, 2, -2]
@test pdiff.coeffs == [0, -2, 4, 2]
@test psumnumleft.coeffs == [1, -1, 3, 1]
@test psumnumright.coeffs == [1, -1, 3, 1]
@test pdiffnumright.coeffs == [1, -1, 3, -1]
@test pdiffnumleft.coeffs == [-1, 1, -3, 1]
@test pprodleft.coeffs == [2, -2, 6, 0]
@test pprodright.coeffs == [2, -2, 6, 0]
@test pdiv.coeffs == [0.5, -0.5, 1.5, 0]
@test p2 + 0.5 == x + y - z - 1.5
A = [1 2;3 4]
@test x * A == [x 2x;3x 4x]
@test A * x == [x 2x;3x 4x]
@test zero(p1) == AffineExpression(0)
@test one(p1) == AffineExpression(1)
@test zero(AffineExpression{Int}) == AffineExpression(0)
@test one(AffineExpression{Int}) == AffineExpression(1)
end
@testset "linear expressions conversions" begin
@test promote_type(AffineExpression{Int}, Float64) == AffineExpression{Float64}
@test promote_type(AffineExpression{Int}, AffineExpression{Float64}) == AffineExpression{Float64}
@affinevars x y
p1 = x + y + 1
a, b = promote(p1, 1.5)
@test a isa AffineExpression{Float64}
@test b isa AffineExpression{Float64}
@test a == x + y + 1
@test b == 1.5
@test convert(AffineExpression, 1.2) isa AffineExpression{Float64}
@test convert(AffineExpression, 1.2) == 1.2
end
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | code | 912 | @testset "test skalna06" begin
E = 2e11
σ = 0.005
s12 = s21 = s34 = s43 = s45 = s54 = E*σ/sqrt(2)
s13 = s31 = s24 = s42 = s35 = s53 = E*σ/2
# dummy variable
@affinevars s23
K = AffineParametricArray([s12/2+s13 -s12/2 -s12/2 -s13 0 0 0;
-s21/2 (s21+s23)/2+s24 (s21-s23)/2 -s23/2 s23/2 -s24 0;
-s21/2 (s21-s23)/2 (s21+s23)/2 s23/2 -s23/2 0 0;
-s31 -s23/2 s23/2 s31+(s23+s34)/2+s35 (s34-s23)/2 -s34/2 -s34/2;
0 s23/2 -s23/2 (s34 - s23)/2 (s34+s23)/2 -s34/2 -s34/2;
0 -s42 0 -s43/2 -s43/2 s42+(s43+s45)/2 0;
0 0 0 -s43/2 -s43/2 0 (s43+s45)/2])
q = [0, 0, -10.0^4, 0, 0, 0, 0]
s = E*σ/sqrt(2) ± 0.1 * E*σ/sqrt(2)
_x = [-20, -2.7.. -2.3, -38.91.. -38.52, -5, -34.53.. -33.75, -12.7.. -12.3, -19.77.. -19.37]
x = solve(K, q, s)/1e-6
@test all(interval_isapprox(xi, _xi; atol=1e-2) for (xi, _xi) in zip(x, _x))
end
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | code | 1870 | @testset "Test linear solvers" begin
As = @SMatrix [4..6 -1..1 -1..1 -1..1;-1..1 -6.. -4 -1..1 -1..1;-1..1 -1..1 9..11 -1..1;-1..1 -1..1 -1..1 -11.. -9]
bs = @SVector [-2..4, 1..8, -4..10, 2..12]
Am = Matrix(As)
bm = Vector(bs)
jac = Jacobi()
gs = GaussSeidel()
hbr = HansenBliekRohn()
kra = LinearKrawczyk()
for (A, b) in zip([As, Am], [bs, bm])
xgs = solve(A, b, gs)
xjac = solve(A, b, jac)
xhbr = solve(A, b, hbr)
xkra = solve(A, b, kra)
@test all(interval_isapprox.(xgs, [-2.6..3.1, -3.9..1.65, -1.48..2.15, -2.35..0.79]; atol=0.01))
@test all(interval_isapprox.(xjac, [-2.6..3.1, -3.9..1.65, -1.48..2.15, -2.35..0.79]; atol=0.01))
@test all(interval_isapprox.(xhbr, [-2.5..3.1, -3.9..1.2, -1.4..2.15, -2.35..0.6]; atol=0.01))
@test all(interval_isapprox.(xkra, [-8..8, -8..8, -8..8, -8..8]; atol=0.01))
end
ge = GaussianElimination()
xge = solve(Am, bm, ge)
@test all(interval_isapprox.(xge, [-2.6..3.1, -3.9..1.5, -1.43..2.15, -2.35..0.6]; atol=0.01))
xdef = solve(Am, bm)
@test all(interval_isapprox.(xdef, [-2.6..3.1, -3.9..1.5, -1.43..2.15, -2.35..0.6]; atol=0.01))
A = [2..4 -2..1; -1..2 2..4]
b = [-2..2, -2..2]
x1 = solve(A, b)
@test all(interval_isapprox.(x1, [-14..14, -14..14]))
x2 = solve(A, b, HansenBliekRohn())
@test all(interval_isapprox.(x2, [-14..14, -14..14]))
# test exceptions
@test_throws DimensionMismatch solve(Am, bm[1:end-1])
@test_throws DimensionMismatch solve(Am[:, 1:end-1], bm, hbr)
@test_throws DimensionMismatch solve(Am, bm, gs, NoPrecondition(), [1..2, 3..4])
end
@testset "Reduced Row Echelon Form" begin
A1 = [1..2 1..2;2..2 3..3]
@test rref(A1) == [2..2 3..3; 0..0 -2..0.5]
A2 = fill(0..0, 2, 2)
@test_throws ArgumentError rref(A2)
end
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | code | 840 | @testset "Verified linear solver" begin
n = 4
Arat = reshape(1 .//(1:n^2), n, n)
brat = Arat*fill(1//1, n)
Afloat = float(Arat)
bfloat = float(brat)
x, cert = epsilon_inflation(Afloat, bfloat)
@test all(ones(n) .∈ x)
@test cert
Ain = convert.(IA.Interval{Float64}, IA.Interval.(Arat, Arat))
bin = convert.(IA.Interval{Float64}, IA.Interval.(brat, brat))
x, cert = epsilon_inflation(Ain, bin)
@test all(ones(n) .∈ x)
@test cert
# big float test
Abig = BigFloat.(Arat)
bbig = BigFloat.(brat)
x, cert = epsilon_inflation(Abig, bbig)
@test cert
@test all(diam.(x) .< 1e-50)
@test all(ones(n) .∈ x)
# case when should not be possible to certify
A = [1..2 1..4;0..1 0..1]
b = A*[1; 1]
_, cert = epsilon_inflation(A, b)
@test !cert
end
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | code | 470 | @testset "oettli-präger method" begin
A = [2..4 -2..1; -1..2 2..4]
b = [-2..2, -2..2]
p = solve(A, b, NonLinearOettliPrager())
polyhedra = solve(A, b, LinearOettliPrager())
for pnt in [[-4, -3], [3, -4], [4, 3], [-3, 4]]
@test any(pnt ∈ x for x in p.boundary)
@test sum(pnt ∈ pol for pol in polyhedra) == 1
end
for pnt in [[-5, 5], [5, 5], [5, -5], [-5, -5]]
@test all(pnt ∉ pol for pol in polyhedra)
end
end
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | code | 670 | @testset "precondition" begin
A = [2..4 -2..1; -1..2 2..4]
b = [-2..2, -2..2]
np = NoPrecondition()
idp = InverseDiagonalMidpoint()
imp = InverseMidpoint()
A1, b1 = np(A, b)
@test A1 == A && b1 == b
A2, b2 = idp(A, b)
Acorrect = [2/3..4/3 -2/3..1/3; -1/3..2/3 2/3..4/3]
bcorrect = [-2/3..2/3, -2/3..2/3]
@test all(interval_isapprox.(A2, Acorrect)) && all(interval_isapprox.(b2, bcorrect))
A3, b3 = imp(A, b)
Acorrect = [22/37..52/37 -20/37..20/37;-20/37..20/37 22/37..52/37]
bcorrect = [-28/37..28/37, -28/37..28/37]
@test all(interval_isapprox.(A3, Acorrect)) && all(interval_isapprox.(b3, bcorrect))
end
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | docs | 6344 | 
| **Pkg Info** | **Build status** | **Documentation** | **Citation** | **Contributing** |
|:------------:|:----------------:|:-----------------:|:------------:|:----------------:|
|![version][ver-img][![license: MIT][mit-img]](LICENSE)|[![CI][ci-img]][ci-url][![codecov][cov-img]][cov-url]|[![docs-stable][stable-img]][stable-url][![docs-dev][dev-img]][dev-url]|[![bibtex][bib-img]][bib-url][![zenodo][doi-img]][doi-url]| [![contributions guidelines][contrib-img]][contrib-url]|
## Overview
This package contains routines to perform numerical linear algebra using interval arithmetic. This can be used both for rigorous computations and uncertainty propagation.
If you use this package in your work, please cite it as
```
@software{ferranti2021interval,
author = {
Luca Feranti and
Marcelo Forets and
David P. Sanders
},
title = {IntervalLinearAlgebra.jl: linear algebra done rigorously},
month = {9},
year = {2021},
doi = {10.5281/zenodo.5363563},
url = {https://github.com/juliaintervals/IntervalLinearAlgebra.jl}
}
```
## Features
**Note**: The package is still under active development and things evolve quickly (or at least should)
- enclosure of the solution of interval linear systems
- exact characterization of the solution set of interval linear systems using Oettli-Präger
- verified solution of floating point linear systems
- enclosure of eigenvalues of interval matrices
- verified computation of eigenvalues and eigenvectors of floating point matrices
## Installation
Open a Julia session and enter
```julia
using Pkg; Pkg.add("IntervalLinearAlgebra")
```
this will download the package and all the necessary dependencies for you. Next you can import the package with
```julia
using IntervalLinearAlgebra
```
and you are ready to go.
## Documentation
- [**STABLE**][stable-url] -- Documentation of the latest release
- [**DEV**][dev-url] -- Documentation of the current version on main (work in progress)
The package was also presented at JuliaCon 2021! The video is available [here](https://youtu.be/fre0TKgLJwg) and the slides [here](https://github.com/lucaferranti/ILAjuliacon2021)
[](https://youtu.be/fre0TKgLJwg)
## Quickstart
Here is a quick demo about solving an interval linear system.
```julia
using IntervalLinearAlgebra, LazySets, Plots
A = [2..4 -1..1;-1..1 2..4]
b = [-2..2, -1..1]
Xenclose = solve(A, b)
polytopes = solve(A, b, LinearOettliPrager())
plot(UnionSetArray(polytopes), ratio=1, label="solution set", legend=:top)
plot!(IntervalBox(Xenclose), label="enclosure")
```
<p align="center">
<img src="docs/src/assets/quickstart.png" alt="IntervalMatrices.jl" width="450"/>
</p>
## Contributing
If you spot something strange in the software (something doesn't work or doesn't behave as expected) do not hesitate to open a [bug issue](https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl/issues/new?assignees=&labels=bug&template=bug_report.md&title=%5BBUG%5D).
If have an idea of how to make the package better (a new feature, a new piece of documentation, an idea to improve some existing feature), you can open an [enhancement issue](https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl/issues/new?assignees=&labels=enhancement&template=feature_request.md&title=%5Bfeature+request%5D%3A+).
If you feel like your issue does not fit any of the above mentioned templates (e.g. you just want to ask something), you can also open a [blank issue](https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl/issues/new).
Pull requests are also very welcome! More details in the [contributing guidelines](https://juliaintervals.github.io/IntervalLinearAlgebra.jl/stable/CONTRIBUTING/)
The core developers of the package can be found in the `#intervals` channel in the Julia slack or zulip, links to join the platforms can be found [here](https://julialang.org/community/). Come to chat with us!
## References
An excellent introduction to interval linear algebra is
J. Horácek, _Interval Linear and Nonlinear Systems_, 2019, available [here](https://kam.mff.cuni.cz/~horacek/source/horacek_phdthesis.pdf)
See also the complete list of [references](https://juliaintervals.github.io/IntervalLinearAlgebra.jl/dev/references) for the concepts and algorithms used in this package.
## Related packages
- [IntervalArithmetic.jl](https://github.com/juliaintervals/IntervalArithmetic.jl) -- Interval computations in Julia
- [IntervalMatrices.jl](https://github.com/JuliaReach/IntervalMatrices.jl) -- Matrices with interval coefficients in Julia.
## Acknowledgment
The development of this package started during the Google Summer of Code (GSoC) 2021 program for the Julia organisation. The author wishes to thank his mentors [David Sanders](https://github.com/dpsanders) and [Marcelo Forets](https://github.com/mforets) for the constant guidance and feedback. During the GSoC program, this project was financially supported by Google.
[ver-img]: https://img.shields.io/github/v/release/juliaintervals/IntervalLinearAlgebra.jl
[mit-img]: https://img.shields.io/badge/license-MIT-yellow.svg
[ci-img]: https://github.com/juliaintervals/IntervalLinearAlgebra.jl/workflows/CI/badge.svg
[ci-url]: https://github.com/juliaintervals/IntervalLinearAlgebra.jl/actions
[cov-img]: https://codecov.io/gh/juliaintervals/IntervalLinearAlgebra.jl/branch/main/graph/badge.svg?token=mgCzKMPiwK
[cov-url]: https://codecov.io/gh/juliaintervals/IntervalLinearAlgebra.jl
[stable-img]: https://img.shields.io/badge/docs-stable-blue.svg
[stable-url]: https://juliaintervals.github.io/IntervalLinearAlgebra.jl/stable
[dev-img]: https://img.shields.io/badge/docs-dev-blue.svg
[dev-url]: https://juliaintervals.github.io/IntervalLinearAlgebra.jl/dev
[bib-img]: https://img.shields.io/badge/bibtex-citation-green
[bib-url]: ./CITATION.bib
[doi-img]: https://img.shields.io/badge/zenodo-DOI-blue
[doi-url]: https://doi.org/10.5281/zenodo.5363563
[contrib-img]: https://img.shields.io/badge/contributing-guidelines-orange
[contrib-url]: https://juliaintervals.github.io/IntervalLinearAlgebra.jl/stable/CONTRIBUTING/
[style-img]: https://img.shields.io/badge/code%20style-blue-4495d1.svg
[style-url]: https://github.com/invenia/BlueStyle
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | docs | 14951 | # IntervalLinearAlgebra.jl contribution guidelines
First of all, huge thanks for your interest in the package! ✨
This page has some hopefully useful guidelines. If this is your first time contributing, please read the [pull request-workflow](#Pull-request-workflow) section, mainly to make sure everything works smoothly and you don't get stuck with some nasty technicalities.
You are also encouraged to read the coding and documentation guidelines, but you don't need to deeply study and memorize those. Core developers are here to help you. Most importantly, relax and have fun!
The core developers of the package can be found in the `#intervals` channel in the Julia slack or zulip, links to join the platforms can be found [here](https://julialang.org/community/)
## Opening issues
If you spot something strange in the software (something doesn't work or doesn't behave as expected) do not hesitate to open a [bug issue](https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl/issues/new?assignees=&labels=bug&template=bug_report.md&title=%5BBUG%5D).
If have an idea of how to make the package better (a new feature, a new piece of documentation, an idea to improve some existing feature), you can open an [enhancement issue](https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl/issues/new?assignees=&labels=enhancement&template=feature_request.md&title=%5Bfeature+request%5D%3A+).
In both cases, try to follow the template, but do not worry if you don't know how to fill something.
If you feel like your issue does not fit any of the above mentioned templates (e.g. you just want to ask something), you can also open a [blank issue](https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl/issues/new).
## Pull request workflow
Pull requests are also warmly welcome. For small fixes/additions, feel free to directly open a PR. For bigger more ambitious PRs, it is preferable to open an issue first to discuss it. As a rule of thumb, every pull request should be as atomic as possible (fix one bug, add one feature, address one issue).
### Setup
!!! note
This is just one way, you can do differently (e.g. clone your fork and add the original repo as `upstream`). In that case, make sure to use the correct remote names
This is something that needs to be done only once, the first time you start contributing
**1.** From the Julia REPL in package mode (you can enter package mode by typing `]`) do
```julia
pkg> dev IntervalLinearAlgebra
```
this will clone the repository into `.julia/dev/IntervalLinearAlgebra`. When you `dev` the package, Julia will use the code in the `dev` folder instead of the official released one. If you want to go back to use the released version, you can do `free IntervalLinearAlgebra`.
**2.** [Fork the repository](https://github.com/juliainterval/IntervalLinearAlgebra.jl).
**3.** Navigate to `.julia/dev/IntervalLinearAlgebra` where you cloned the original repository before. Now you need to add your fork as remote. This can be done with
```
git remote add $your_remote_name $your_fork_url
```
`your_remote_name` can be whatever you want. `your_fork_url` is the url you would use to clone your fork repository. For example if your github username is `lucaferranti` and you want to call the remote `lucaferranti` then the previous command would be
```
git remote add lucaferranti https://github.com/lucaferranti/IntervalLinearAlgebra.jl.git
```
you can verify that you have the correct remotes with `git remote -v` the output should be similar to
```
lucaferranti https://github.com/lucaferranti/IntervalLinearAlgebra.jl.git (fetch)
lucaferranti https://github.com/lucaferranti/IntervalLinearAlgebra.jl.git (push)
origin https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git (fetch)
origin https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git (push)
```
Now everything is set!
### Contribution workflow
**0.** Navigate to `.julia/dev/IntervalLinearAlgebra` and make sure you are on the main branch. You can check with `git branch` and if needed use `git switch main` to switch to the main branch. **The next steps assume you are in the `IntervalLinearAlgebra` folder**.
**1.** Before you start modifying, it's good to make sure that your local main branch is synchronized with the main branch in the package repo. To do so, run
```
git fetch origin
git merge origin/main
```
Since you should **never** directly modify the main branch locally, this should not cause any conflicts. If you didn't follow the previous setup instructions, you may need to change `origin` with the appropriate remote name.
**2.** Now create a new branch for the new feature you want to develop. If possible, the branch should start with your name/initials and have a short but descriptive name of what you are doing (no strict rules). For example, if I (Luca Ferranti) want to fix the code that computes the eigenvalues of a symmetric matrix, I would call the branch `lf-symmetric-eigvals` or something like that. You can create a new branch and switch to it with
```
git switch -c lf-symmetric-eigvals
```
If you are targetting a specific issue, you can also name the branch after the issue number, e.g. `lf-42`.
**3.** Now let the fun begin! Fix bugs, add the new features, modify the docs, whatever you do, it's gonna be awesome! Check also the [coding guidelines](#Coding-guideline) and [documentation guidelines](#Documentation-guideline). Do not worry if it feels like a lot of rules, the core developers are here to help and guide.
**4.** It is important to run the tests of the package locally, to check that you haven't accidentally broken anything. You can run the tests with
```
julia --project test/runtests.jl
```
If you have changed the documentation, you can build it locally with
```
julia --project=docs docs/make.jl
```
This will build the docs in the `docs/build` folder, you can open `docs/build/index.html` and check that everything looks nice. Check also in the terminal that you don't have error messages (no broken links, doctests pass).
**5.** When you are ready, commit your changes. If example you want to commit src/file1.jl, src/file2.jl
```
git add src/file1.jl src/file2.jl
git commit -m "short description of what you did"
```
You can also add and commit all changes at once with
```
git commit -a -m "short description of what you did"
```
finally you are ready to push to your fork. If your fork remote is called `lucaferranti` and your branch is called `lf-symmetric-eigvals`, do
```
git push -u lucaferranti lf-symmetric-eigvals
```
The `-u` flag sets the upstream, so next time you want to push to the same branch you can just do `git push`.
**6.** Next, go to the [package repository](https://github.com/juliaintervals/IntervalLinearAlgebra.jl), you should see a message inviting you to open a pull request, do it! Make sure you are opening the PR to `origin/main`. Try to fill the blanks in the pull request template, but do not worry if you don't know anything. Also, your work needs not be polished and perfect to open the pull request! You are also very welcome to open it as a draft and request feedback, assistance, etc.
**7.** If nothing happens within 7 working days feel free to ping Luca Ferranti (@lucaferranti) every 1-2 days until you get his attention.
## Coding guideline
* Try to roughly follow the [bluestyle](https://github.com/invenia/BlueStyle) style guideline.
* If you add new functionalities, they should also be tested. Exported functions should also have a docstring.
* The test folder should roughly follow the structure of the src folder. That is if you create `src/file1.jl` there should also be `test/test_file1.jl`. There can be exceptions, the main point being that both `test` and `src` should have a logical structure and should be easy to find the tests for a given function.
* The `runtests.jl` should have only inlcude statements.
### Package version
Generally, if the pull request changes the source code, a new version of the package should be released. This means, that if you change the source code, you should also update the `version` entry in the `Project.toml`. Since the package is below version 1, the version update rules are
* update minor version for breaking changes, e.g. `0.3.5` => `0.4.0`
* update patch version for non breaking changes, e.g. `0.3.5` => `0.3.6`
* It is perfectly fine that you are not sure how to update the version. Just mention in the PR and you will receive guidance
* The person who merges the PR also register the new version.
### Add dependency
If the function you are adding needs an external package (say `Example.jl`), this should be added as dependency, to do so
1. Go to `IntervalLinearAlgebra.jl` and start a Julia session and activate the current environment with `julia --project`
2. Enter the package mode (press `]`) and add the package you want to add, e.g `]add Example`.
3. You can verify that the package was added by typing `st` while in package mode. You can exit the package mode by pressing backspace
4. Open the `Project.toml` file, your package should now be listed in the `[deps]` section.
5. In the `[compat]` section, specify the compatibility requirements. Packages are listed alphabetically. More details about specifying compatibility can be found [here](https://pkgdocs.julialang.org/v1/compatibility/)
6. In the `IntervalLinearAlgebra.jl` file, add the line `using Example` together with the other using statements, or `import Example: fun1, fun2` if you are planning to extend those functions.
If the dependency is quite heavy and used only by some functionalities, you may consider adding that as optional dependency. To do so,
1. Repeat the steps 1-5 above
2. In the `[deps]` section of `Project.toml` locate the package you want to make an optional dependency and move the corresponding line to `[extras]`, keep alphabetical ordering.
3. Add the dependency name to the `test` entry in the `[targets]` section
4. In the `IntervalLinearAlgebra.jl` file, locate the `__init__` function and add the line
```julia
@require """Example = "7876af07-990d-54b4-ab0e-23690620f79a" include("file.jl")"""
```
where `file.jl` is the file containing the functions needing `Example.jl`. The line `Example = "7876af07-990d-54b4-ab0e-23690620f79a"` is the same in the Project.toml
5. In `file.jl` the first line should be `using .Example` (or `import .Example: fun1, fun2`), note the dot before the package name. Then write the functions in the file normally
## Documentation guideline
* Documentation is written with [Documenter.jl](https://github.com/JuliaDocs/Documenter.jl). Documentation files are in `docs/src`, generally as markdown file.
* If you want to include a Julia code example that is **not** executed in the markdown file, use ````` ```julia ````` blocks, e.g.
````
```julia
a = 1
b = 2
```
````
* Julia code that **is** executed should use ````` ```@example ````` blocks, e.g.
````
```@example
a = 1
b = 2
```
````
* If you want to reuse variables between `@example` blocks, they should be named, for example
````
```@example filename
a = 1
b = 2
```
... some text ...
```@example filename
c = a + b
```
````
* If you want to run a Julia code block but don't want the output to be displayed, add `nothing # hide` as last line of the code block.
* You can plot and include figures as follows
````
```@example
# code for plotting
savefig("figname.png") # hide
```
![][figname.png]
````
* Use single ticks for inline code ``` `A` ``` and double ticks for maths ``` ``A`` ```. For single line equations, use
* For single-line equations, use ````` ```math ````` blocks, e.g.
````
```math
|A_cx-b_c| \le A_\Delta|x| + b_\Delta,
```
````
* You can refer to functions in the pacakge with ``` [`func_name`](@ref) ```
* You can quote references with `[[REF01]](@ref)`
* If you want to add references, you can use the following template
````
#### [REF01]
```@raw html
<ul><li>
```
Author(s), [*Paper name in italic*](link_to_pdf_if_available), other infos (publisher, year, etc.)
```@raw html
<li style="list-style: none"><details>
<summary>bibtex</summary>
```
```
INSERT BIBTEX HERE
```
```@raw html
</details></li></ul>
```
---
````
* If the pdf of the paper is freely (and legally!) available online, make the title of the paper a link to it.
* The reference code should be first 3 letters of first author surname + last two digits of year, e.g `[FER87]`. To disambiguate duplicates, use letter, e.g. `[FER87a]`, `[FER87b]`.
### Docstrings
* Each exported function should have a docstring. The docstring should roughly follow the following structure
```julia
"""
funname(param1, param2[, optional_param])
A short description (1-2 lines) of what the function does
### Input
Lis of inputs. Not needed if clear from description and signature.
### Output
List of outputs. Not needed if clear from description and signature.
### Notes
Anything else which is important.
### Algorithm
What algorithms the function uses, preferably with references.
### Example
At least one example, formatted as julia REPL, of what the function does.
Preferably, as a doctest.
"""
```
* Optional parameters in the function signature go around brackets.
* List of inputs and outputs can be omitted if the function has few parameters and they are already clearly explained by the function signature and description.
* Examples should be [doctests](https://juliadocs.github.io/Documenter.jl/stable/man/doctests/). Exceptions to this can occur if e.g. the function is not deterministic (random initialization) or requires a heavy optional dependency.
Here is an example
````julia
"""
something(A::Matrix{T}, b::Vector{T}[, tol=1e-10]) where {T<:Interval}
this function computes the somethig product between the interval matrix ``A`` and
interval vector ``b``.
### Input
`A` -- interval matrix
`b` -- interval vector
`tol` -- (optional), tolerance to compute the something product, default 1e-10
### Output
The interval vector representing the something product.
### Notes
If `A` and `b` are real, use the [`somethingelse`](@ref) function instead.
### Algorithm
The function uses the *something sometimes somewhere* algorithm proposed by Someone in [[SOM42]](@ref).
### Example
```jldoctest
julia> A = [1..2 3..4;5..6 7..8]
2×2 Matrix{Interval{Float64}}:
[1, 2] [3, 4]
[5, 6] [7, 8]
julia> b = [-2..2, -2..2]
2-element Vector{Interval{Float64}}:
[-2, 2]
[-2, 2]
julia> something(A, b)
2-element Vector{Interval{Float64}}:
[-1, 1]
[-7, 8]
```
"""
````
## Acknowledgments
Here is a list of useful resources from which this guideline was inspired
* [JuliaReach developers docs](https://github.com/JuliaReach/JuliaReachDevDocs)
* [Making a first Julia pull request](https://kshyatt.github.io/post/firstjuliapr/)
* [ColPrac](https://github.com/SciML/ColPrac)
* [Julia contributing guideline](https://github.com/JuliaLang/julia/blob/master/CONTRIBUTING.md)
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | docs | 830 | ## PR description
<!-- A short description of what is done in this PR. -->
## Before
<!-- Small example showing the functionality before this PR.
Needed only if you are changing existing source code (e.g. bug fix) -->
## After
<!-- Small example showing the functionality added/changed in this PR.
Needed only if you change the source code. -->
## Related issues
<!--
List the issues related to this PR. E.g.
- #01
- #02
If you are closing some issues add "fixes" before the issue number, e.g.
- fixes #01
- #02
-->
## Checklist
<!-- Needed only if you change the source code.
You don't need to get everything done before opening the PR :) -->
- [ ] Updated/added tests
- [ ] Updated/added docstring (needed only for exported functions)
- [ ] Updated Project.toml
## Other
<!-- Add here any other relevant information. --> | IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | docs | 972 | ---
name: Bug report
about: Report a bug for this project
title: "[bug]"
labels: bug
assignees: ''
---
## Bug description
<!-- A clear and concise description of what the bug is. -->
## Minimum (non-)working example
<!-- A short code snippet demonstrating the bug, e.g. a snapshot from the REPL (make sure to include the whole error stracktrace) or a link to a notebook/script to reproduce the bug -->
## Expected behavior
<!-- A clear and concise description of what you expected to happen. -->
## Version info
<!-- you can get the package version with `]st IntervalLinearAlgebra` from the REPL. For the system information, enter `versioninfo()` in the Julia REPL and copy-paste the output. -->
- IntervalLinearAlgebra.jl version:
- System information:
## Related issues
<!-- if you already know or suspect some existing issues are related to this, please mention those here, otherwise leave blank -->
## Additional information
Add any other useful information
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | docs | 521 | ---
name: Feature request
about: Suggest an idea for this project
title: "[enhancement]: "
labels: enhancement
assignees: ''
---
## Feature description
<!-- A clear and concise description of the feature you would like to be added. Try to also give motivation why it would be a nice addition. -->
## Minimum working example
<!-- If you already have in mind what the code for your feature could look like, paste an example code snippet here -->
## Additional information
<!-- Add any other useful information here -->
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | docs | 15320 | # IntervalLinearAlgebra.jl contribution guidelines
First of all, huge thanks for your interest in the package! ✨
This page has some hopefully useful guidelines. If this is your first time contributing, please read the [pull request-workflow](#Pull-request-workflow) section, mainly to make sure everything works smoothly and you don't get stuck with some nasty technicalities.
You are also encouraged to read the coding and documentation guidelines, but you don't need to deeply study and memorize those. Core developers are here to help you. Most importantly, relax and have fun!
The core developers of the package can be found in the `#intervals` channel in the Julia slack or zulip, links to join the platforms can be found [here](https://julialang.org/community/)
## Opening issues
If you spot something strange in the software (something doesn't work or doesn't behave as expected) do not hesitate to open a [bug issue](https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl/issues/new?assignees=&labels=bug&template=bug_report.md&title=%5BBUG%5D).
If have an idea of how to make the package better (a new feature, a new piece of documentation, an idea to improve some existing feature), you can open an [enhancement issue](https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl/issues/new?assignees=&labels=enhancement&template=feature_request.md&title=%5Bfeature+request%5D%3A+).
In both cases, try to follow the template, but do not worry if you don't know how to fill something.
If you feel like your issue does not fit any of the above mentioned templates (e.g. you just want to ask something), you can also open a [blank issue](https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl/issues/new).
## Pull request workflow
Pull requests are also warmly welcome. For small fixes/additions, feel free to directly open a PR. For bigger more ambitious PRs, it is preferable to open an issue first to discuss it. As a rule of thumb, every pull request should be as atomic as possible (fix one bug, add one feature, address one issue).
### Setup
!!! note
This is just one way, you can do differently (e.g. clone your fork and add the original repo as `upstream`). In that case, make sure to use the correct remote names
This is something that needs to be done only once, the first time you start contributing
**1.** From the Julia REPL in package mode (you can enter package mode by typing `]`) do
```julia
pkg> dev IntervalLinearAlgebra
```
this will clone the repository into `.julia/dev/IntervalLinearAlgebra`. When you `dev` the package, Julia will use the code in the `dev` folder instead of the official released one. If you want to go back to use the released version, you can do `free IntervalLinearAlgebra`.
**2.** [Fork the repository](https://github.com/juliainterval/IntervalLinearAlgebra.jl).
**3.** Navigate to `.julia/dev/IntervalLinearAlgebra` where you cloned the original repository before. Now you need to add your fork as remote. This can be done with
```
git remote add $your_remote_name $your_fork_url
```
`your_remote_name` can be whatever you want. `your_fork_url` is the url you would use to clone your fork repository. For example if your github username is `lucaferranti` and you want to call the remote `lucaferranti` then the previous command would be
```
git remote add lucaferranti https://github.com/lucaferranti/IntervalLinearAlgebra.jl.git
```
you can verify that you have the correct remotes with `git remote -v` the output should be similar to
```
lucaferranti https://github.com/lucaferranti/IntervalLinearAlgebra.jl.git (fetch)
lucaferranti https://github.com/lucaferranti/IntervalLinearAlgebra.jl.git (push)
origin https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git (fetch)
origin https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git (push)
```
Now everything is set!
### Contribution workflow
**0.** Navigate to `.julia/dev/IntervalLinearAlgebra` and make sure you are on the main branch. You can check with `git branch` and if needed use `git switch main` to switch to the main branch. **The next steps assume you are in the `IntervalLinearAlgebra` folder**.
**1.** Before you start modifying, it's good to make sure that your local main branch is synchronized with the main branch in the package repo. To do so, run
```
git fetch origin
git merge origin/main
```
Since you should **never** directly modify the main branch locally, this should not cause any conflicts. If you didn't follow the previous setup instructions, you may need to change `origin` with the appropriate remote name.
**2.** Now create a new branch for the new feature you want to develop. If possible, the branch should start with your name/initials and have a short but descriptive name of what you are doing (no strict rules). For example, if I (Luca Ferranti) want to fix the code that computes the eigenvalues of a symmetric matrix, I would call the branch `lf-symmetric-eigvals` or something like that. You can create a new branch and switch to it with
```
git switch -c lf-symmetric-eigvals
```
If you are targetting a specific issue, you can also name the branch after the issue number, e.g. `lf-42`.
**3.** Now let the fun begin! Fix bugs, add the new features, modify the docs, whatever you do, it's gonna be awesome! Check also the [coding guidelines](#Coding-guideline) and [documentation guidelines](#Documentation-guideline). Do not worry if it feels like a lot of rules, the core developers are here to help and guide.
**4.** It is important to run the tests of the package locally, to check that you haven't accidentally broken anything. You can run the tests with
```
julia --project test/runtests.jl
```
If you have changed the documentation, you can build it locally with
```
julia --project=docs docs/make.jl
```
This will build the docs in the `docs/build` folder, you can open `docs/build/index.html` and check that everything looks nice. Check also in the terminal that you don't have error messages (no broken links, doctests pass).
**5.** When you are ready, commit your changes. If example you want to commit src/file1.jl, src/file2.jl
```
git add src/file1.jl src/file2.jl
git commit -m "short description of what you did"
```
You can also add and commit all changes at once with
```
git commit -a -m "short description of what you did"
```
finally you are ready to push to your fork. If your fork remote is called `lucaferranti` and your branch is called `lf-symmetric-eigvals`, do
```
git push -u lucaferranti lf-symmetric-eigvals
```
The `-u` flag sets the upstream, so next time you want to push to the same branch you can just do `git push`.
**6.** Next, go to the [package repository](https://github.com/juliaintervals/IntervalLinearAlgebra.jl), you should see a message inviting you to open a pull request, do it! Make sure you are opening the PR to `origin/main`. Try to fill the blanks in the pull request template, but do not worry if you don't know anything. Also, your work needs not be polished and perfect to open the pull request! You are also very welcome to open it as a draft and request feedback, assistance, etc.
**7.** If nothing happens within 7 working days feel free to ping Luca Ferranti (@lucaferranti) every 1-2 days until you get his attention.
## Coding guideline
* Try to roughly follow the [bluestyle](https://github.com/invenia/BlueStyle) style guideline.
* If you add new functionalities, they should also be tested. Exported functions should also have a docstring.
* The test folder should roughly follow the structure of the src folder. That is if you create `src/file1.jl` there should also be `test/test_file1.jl`. There can be exceptions, the main point being that both `test` and `src` should have a logical structure and should be easy to find the tests for a given function.
* The `runtests.jl` should have only inlcude statements.
### Package version
Generally, if the pull request changes the source code, a new version of the package should be released. This means, that if you change the source code, you should also update the `version` entry in the `Project.toml`. Since the package is below version 1, the version update rules are
* update minor version for breaking changes, e.g. `0.3.5` => `0.4.0`
* update patch version for non breaking changes, e.g. `0.3.5` => `0.3.6`
* It is perfectly fine that you are not sure how to update the version. Just mention in the PR and you will receive guidance
* The person who merges the PR also register the new version.
### Add dependency
If the function you are adding needs an external package (say `Example.jl`), this should be added as dependency, to do so
1. Go to `IntervalLinearAlgebra.jl` and start a Julia session and activate the current environment with `julia --project`
2. Enter the package mode (press `]`) and add the package you want to add, e.g `]add Example`.
3. You can verify that the package was added by typing `st` while in package mode. You can exit the package mode by pressing backspace
4. Open the `Project.toml` file, your package should now be listed in the `[deps]` section.
5. In the `[compat]` section, specify the compatibility requirements. Packages are listed alphabetically. More details about specifying compatibility can be found [here](https://pkgdocs.julialang.org/v1/compatibility/)
6. In the `IntervalLinearAlgebra.jl` file, add the line `using Example` together with the other using statements, or `import Example: fun1, fun2` if you are planning to extend those functions.
If the dependency is quite heavy and used only by some functionalities, you may consider adding that as optional dependency. To do so,
1. Repeat the steps 1-5 above
2. In the `[deps]` section of `Project.toml` locate the package you want to make an optional dependency and move the corresponding line to `[extras]`, keep alphabetical ordering.
3. Add the dependency name to the `test` entry in the `[targets]` section
4. In the `IntervalLinearAlgebra.jl` file, locate the `__init__` function and add the line
```julia
@require """Example = "7876af07-990d-54b4-ab0e-23690620f79a" include("file.jl")"""
```
where `file.jl` is the file containing the functions needing `Example.jl`. The line `Example = "7876af07-990d-54b4-ab0e-23690620f79a"` is the same in the Project.toml
5. In `file.jl` the first line should be `using .Example` (or `import .Example: fun1, fun2`), note the dot before the package name. Then write the functions in the file normally
## Documentation guideline
* Documentation is written with [Documenter.jl](https://github.com/JuliaDocs/Documenter.jl). Documentation files are in `docs/src`, generally as markdown file.
* If you want to modify an existing file, open it and start writing. If you want to add a new page, create a new markdown file in the appropriate subfolder of `docs/src` and add the line `"mytitle" => "path/to/file.md"` to the page structure in the `docs/make.jl` file [here](https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl/blob/main/docs/make.jl#L31-L53).
* If you want to include a Julia code example that is **not** executed in the markdown file, use ````` ```julia ````` blocks, e.g.
````
```julia
a = 1
b = 2
```
````
* Julia code that **is** executed should use ````` ```@example ````` blocks, e.g.
````
```@example
a = 1
b = 2
```
````
* If you want to reuse variables between `@example` blocks, they should be named, for example
````
```@example filename
a = 1
b = 2
```
... some text ...
```@example filename
c = a + b
```
````
* If you want to run a Julia code block but don't want the output to be displayed, add `nothing # hide` as last line of the code block.
* You can plot and include figures as follows
````
```@example
# code for plotting
savefig("figname.png") # hide
```
![][figname.png]
````
* Use single ticks for inline code ``` `A` ``` and double ticks for maths ``` ``A`` ```. For single line equations, use
* For single-line equations, use ````` ```math ````` blocks, e.g.
````
```math
|A_cx-b_c| \le A_\Delta|x| + b_\Delta,
```
````
* You can refer to functions in the pacakge with ``` [`func_name`](@ref) ```
* You can quote references with `[[REF01]](@ref)`
* If you want to add references, you can use the following template
````
#### [REF01]
```@raw html
<ul><li>
```
Author(s), [*Paper name in italic*](link_to_pdf_if_available), other infos (publisher, year, etc.)
```@raw html
<li style="list-style: none"><details>
<summary>bibtex</summary>
```
```
INSERT BIBTEX HERE
```
```@raw html
</details></li></ul>
```
---
````
* If the pdf of the paper is freely (and legally!) available online, make the title of the paper a link to it.
* The reference code should be first 3 letters of first author surname + last two digits of year, e.g `[FER87]`. To disambiguate duplicates, use letter, e.g. `[FER87a]`, `[FER87b]`.
### Docstrings
* Each exported function should have a docstring. The docstring should roughly follow the following structure
```julia
"""
funname(param1, param2[, optional_param])
A short description (1-2 lines) of what the function does
### Input
Lis of inputs. Not needed if clear from description and signature.
### Output
List of outputs. Not needed if clear from description and signature.
### Notes
Anything else which is important.
### Algorithm
What algorithms the function uses, preferably with references.
### Example
At least one example, formatted as julia REPL, of what the function does.
Preferably, as a doctest.
"""
```
* Optional parameters in the function signature go around brackets.
* List of inputs and outputs can be omitted if the function has few parameters and they are already clearly explained by the function signature and description.
* Examples should be [doctests](https://juliadocs.github.io/Documenter.jl/stable/man/doctests/). Exceptions to this can occur if e.g. the function is not deterministic (random initialization) or requires a heavy optional dependency.
Here is an example
````julia
"""
something(A::Matrix{T}, b::Vector{T}[, tol=1e-10]) where {T<:Interval}
this function computes the somethig product between the interval matrix ``A`` and
interval vector ``b``.
### Input
`A` -- interval matrix
`b` -- interval vector
`tol` -- (optional), tolerance to compute the something product, default 1e-10
### Output
The interval vector representing the something product.
### Notes
If `A` and `b` are real, use the [`somethingelse`](@ref) function instead.
### Algorithm
The function uses the *something sometimes somewhere* algorithm proposed by Someone in [[SOM42]](@ref).
### Example
```jldoctest
julia> A = [1..2 3..4;5..6 7..8]
2×2 Matrix{Interval{Float64}}:
[1, 2] [3, 4]
[5, 6] [7, 8]
julia> b = [-2..2, -2..2]
2-element Vector{Interval{Float64}}:
[-2, 2]
[-2, 2]
julia> something(A, b)
2-element Vector{Interval{Float64}}:
[-1, 1]
[-7, 8]
```
"""
````
## Acknowledgments
Here is a list of useful resources from which this guideline was inspired
* [JuliaReach developers docs](https://github.com/JuliaReach/JuliaReachDevDocs)
* [Making a first Julia pull request](https://kshyatt.github.io/post/firstjuliapr/)
* [ColPrac](https://github.com/SciML/ColPrac)
* [Julia contributing guideline](https://github.com/JuliaLang/julia/blob/master/CONTRIBUTING.md)
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | docs | 2939 | 
[](https://github.com/lucaferranti/IntervalLinearAlgebra.jl/blob/main/LICENSE)[](https://github.com/juliaintervals/IntervalLinearAlgebra.jl/actions)[](https://codecov.io/gh/juliaintervals/IntervalLinearAlgebra.jl)[](#Citation)[](https://doi.org/10.5281/zenodo.5363563)
## Overview
This package contains routines to perform numerical linear algebra using interval arithmetic. This can be used both for rigorous computations and uncertainty propagation.
An first overview of the package was given at JuliaCon 2021, the slides are available [here](https://github.com/lucaferranti/ILAjuliacon2021).
```@raw html
<iframe style="width:560px; height:315px" src="https://www.youtube.com/embed/fre0TKgLJwg" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
```
## Features
!!! note
The package is still under active development and things evolve quickly (or at least should)
- enclosure of the solution of interval linear systems
- exact characterization of the solution set of interval linear systems using Oettli-Präger
- verified solution of floating point linear systems
- enclosure of eigenvalues of interval matrices
- verified computation of eigenvalues and eigenvectors of floating point matrices
## Installation
Open a Julia session and enter
```julia
using Pkg; Pkg.add("IntervalLinearAlgebra")
```
this will download the package and all the necessary dependencies for you. Next you can import the package with
```julia
using IntervalLinearAlgebra
```
and you are ready to go.
## Quickstart
```julia
using IntervalLinearAlgebra, LazySets, Plots
A = [2..4 -1..1; -1..1 2..4]
b = [-2..2, -1..1]
Xenclose = solve(A, b)
polytopes = solve(A, b, LinearOettliPrager())
plot(UnionSetArray(polytopes), ratio=1, label="solution set", legend=:top)
plot!(IntervalBox(Xenclose), label="enclosure")
```

## Citation
If you use this package in your work, please cite it as
```
@software{ferranti2021interval,
author = {
Luca Feranti and
Marcelo Forets and
David P. Sanders
},
title = {IntervalLinearAlgebra.jl: linear algebra done rigorously},
month = {9},
year = {2021},
doi = {10.5281/zenodo.5363563},
url = {https://github.com/juliaintervals/IntervalLinearAlgebra.jl}
}
```
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | docs | 7411 | # [References](@id all_ref)
#### [BAT14]
```@raw html
<ul><li>
```
K.-J. Bathe, Finite Element Procedures, Watertown, USA, 2014.
```@raw html
<li style="list-style: none"><details>
<summary>bibtex</summary>
```
```
@book{Bathe2014,
address = {Watertown, USA},
author = {Bathe, Klaus-J{\"{u}}rgen},
edition = {2},
title = {{Finite Element Procedures}},
year = {2014},
url = {https://web.mit.edu/kjb/www/Books/FEP_2nd_Edition_4th_Printing.pdf}
}
```
```@raw html
</details></li></ul>
```
---
#### [HLA13]
```@raw html
<ul><li>
```
M. Hladík. Bounds on eigenvalues of real and complex interval matrices. Appl. Math. Comput., 219(10):5584–5591, 2013.
```@raw html
<li style="list-style: none"><details>
<summary>bibtex</summary>
```
```
@article{Hla2013a,
author = "Milan Hlad\'{\i}k",
title = "Bounds on eigenvalues of real and complex interval matrices",
journal = "Appl. Math. Comput.",
fjournal = "Applied Mathematics and Computation",
volume = "219",
number = "10",
pages = "5584-5591",
year = "2013",
issn = "0096-3003",
doi = "10.1016/j.amc.2012.11.075",
}
```
```@raw html
</details></li></ul>
```
---
#### [HOR19]
```@raw html
<ul><li>
```
J. Horácek, [*Interval Linear and Nonlinear Systems*](https://kam.mff.cuni.cz/~horacek/source/horacek_phdthesis.pdf), PhD dissertation, 2019
```@raw html
<li style="list-style: none"><details>
<summary>bibtex</summary>
```
```
@article{horavcek2019interval,
title={Interval linear and nonlinear systems},
author={Hor{\'a}{\v{c}}ek, Jaroslav},
year={2019},
publisher={Univerzita Karlova, Matematicko-fyzik{\'a}ln{\'\i} fakulta}
}
```
```@raw html
</details></li></ul>
```
---
#### [JAU14]
```@raw html
<ul><li>
```
L. Jaulin and B. Desrochers, [*Introduction to the algebra of separators with application to path planning*](https://www.ensta-bretagne.fr/jaulin/paper_seppath.pdf), Engineering Applications of Artificial Intelligence 33 (2014): 141-147
```@raw html
<li style="list-style: none"><details>
<summary>bibtex</summary>
```
```
@article{jaulin2014introduction,
title={Introduction to the algebra of separators with application to path planning},
author={Jaulin, Luc and Desrochers, Beno{\^\i}t},
journal={Engineering Applications of Artificial Intelligence},
volume={33},
pages={141--147},
year={2014},
publisher={Elsevier}
}
```
```@raw html
</details></li></ul>
```
---
#### [NEU90]
```@raw html
<ul><li>
```
A. Neumaier, *Interval methods for systems of equations*, Cambridge university press, 1990
```@raw html
<li style="list-style: none"><details>
<summary>bibtex</summary>
```
```
@book{neumaier1990interval,
title={Interval methods for systems of equations},
author={Neumaier, Arnold and Neumaier, Arnold},
number={37},
year={1990},
publisher={Cambridge university press}
}
```
```@raw html
</details></li></ul>
```
---
#### [OET64]
```@raw html
<ul><li>
```
W. Oettli and W. Prager, *Compatibility of approximate solution of linear equations with given error bounds for coefficients and right-hand sides*, Numerische
Mathematik, 6(1):405–409, 1964.
```@raw html
<li style="list-style: none"><details>
<summary>bibtex</summary>
```
```
@article{oettli1964compatibility,
title={Compatibility of approximate solution of linear equations with given error bounds for coefficients and right-hand sides},
author={Oettli, Werner and Prager, William},
journal={Numerische Mathematik},
volume={6},
number={1},
pages={405--409},
year={1964},
publisher={Springer}
}
```
```@raw html
</details></li></ul>
```
---
#### [ROH06]
```@raw html
<ul><li>
```
J. Rohn. *Solvability of systems of interval linear equations and inequalities*, Linear optimization problems with inexact data, pages 35–77. Springer, 2006
```@raw html
<li style="list-style: none"><details>
<summary>bibtex</summary>
```
```
@incollection{rohn2006solvability,
title={Solvability of systems of interval linear equations and inequalities},
author={Rohn, Jir{\i}},
booktitle={Linear optimization problems with inexact data},
pages={35--77},
year={2006},
publisher={Springer}
}
```
```@raw html
</details></li></ul>
```
---
#### [ROH95]
```@raw html
<ul><li>
```
J. Rohn and V. Kreinovich. [*Computing exact componentwise bounds on solutions of lineary systems with interval data is NP-hard*](https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.88.8810&rep=rep1&type=pdf). SIAM Journal on Matrix
Analysis and Applications, 16(2):415–420, 1995.
```@raw html
<li style="list-style: none"><details>
<summary>bibtex</summary>
```
```
@article{rohn1995computing,
title={Computing exact componentwise bounds on solutions of lineary systems with interval data is NP-hard},
author={Rohn, Jiri and Kreinovich, Vladik},
journal={SIAM Journal on Matrix Analysis and Applications},
volume={16},
number={2},
pages={415--420},
year={1995},
publisher={SIAM}
}
```
```@raw html
</details></li></ul>
```
---
#### [RUM10]
```@raw html
<ul><li>
```
S.M. Rump, [*Verification methods: Rigorous results using floating-point arithmetic*](https://www.tuhh.de/ti3/paper/rump/Ru10.pdf), Acta Numerica, 19:287–449, 2010
```@raw html
<li style="list-style: none"><details>
<summary>bibtex</summary>
```
```
@article{rump2010verification,
title={Verification methods: Rigorous results using floating-point arithmetic},
author={Rump, Siegfried M},
journal={Acta Numerica},
volume={19},
pages={287--449},
year={2010},
publisher={Cambridge University Press}
}
```
```@raw html
</details></li></ul>
```
---
#### [RUM01]
```@raw html
<ul><li>
```
Rump, Siegfried M. [*Computational error bounds for multiple or nearly multiple eigenvalues*](https://www.tuhh.de/ti3/paper/rump/Ru99c.pdf), Linear algebra and its applications 324.1-3 (2001): 209-226.
```@raw html
<li style="list-style: none"><details>
<summary>bibtex</summary>
```
```
@article{rump2001computational,
title={Computational error bounds for multiple or nearly multiple eigenvalues},
author={Rump, Siegfried M},
journal={Linear algebra and its applications},
volume={324},
number={1-3},
pages={209--226},
year={2001},
publisher={Elsevier}
}
```
```@raw html
</details></li></ul>
```
---
#### [RUM99]
```@raw html
<ul><li>
```
Rump, Siegfried M. [*Fast and parallel interval arithmetic*](https://www.tuhh.de/ti3/paper/rump/Ru99b.pdf), BIT Numerical Mathematics 39.3, 534-554, 1999
```@raw html
<li style="list-style: none"><details>
<summary>bibtex</summary>
```
```
@article{rump1999fast,
title={Fast and parallel interval arithmetic},
author={Rump, Siegfried M},
journal={BIT Numerical Mathematics},
volume={39},
number={3},
pages={534--554},
year={1999},
publisher={Springer}
}
```
```@raw html
</details></li></ul>
```
---
#### [SKA06]
```@raw html
<ul><li>
```
Skalna, Iwona [*A Method for Outer Interval Solution of Systems of Linear Equations Depending Linearly on Interval Parameters*](https://doi.org/10.1007/s11155-006-4878-y),
Reliable Computing, 12.2, 107-120, 2006
```@raw html
<li style="list-style: none"><details>
<summary>bibtex</summary>
```
```
@article{skalna2006,
title={A Method for Outer Interval Solution of Systems of Linear Equations Depending Linearly on Interval Parameters},
author={Skalna, Iwona},
journal={Reliable Computing},
volume={12},
number={2},
pages={107--120},
year={2006},
publisher={Springer}
}
```
```@raw html
</details></li></ul>
```
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | docs | 416 | # Algorithms
Algorithms used to solve interval linear systems.
```@index
Pages=["algorithms.md"]
```
## Enclosure computation
### Direct methods
```@docs
GaussianElimination
HansenBliekRohn
```
### Iterative methods
```@docs
GaussSeidel
Jacobi
LinearKrawczyk
```
## Exact characterization
```@docs
LinearOettliPrager
NonLinearOettliPrager
```
## Parametric Solvers
### Direct Methods
```@docs
Skalna06
``` | IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | docs | 147 | # Interval matrices classification
```@index
Pages = ["classify.md"]
```
```@autodocs
Modules=[IntervalLinearAlgebra]
Pages = ["classify.jl"]
``` | IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | docs | 333 | # Eigenvalues computations
```@index
Pages = ["eigenvalues.md"]
```
## Interval matrices eigenvalues
```@autodocs
Modules=[IntervalLinearAlgebra]
Pages=["interval_eigenvalues.jl"]
Private=true
```
## Floating point eigenvalues verification
```@autodocs
Modules=[IntervalLinearAlgebra]
Pages=["verify_eigs.jl"]
Private=true
```
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | docs | 168 | # Verified real linear systems
```@index
Pages = ["epsilon_inflation.md"]
```
```@autodocs
Modules = [IntervalLinearAlgebra]
Pages = ["linear_systems/verify.jl"]
```
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | docs | 340 | # Miscellaneous
Other possibly useful functionalities.
```@index
Pages = ["misc.md"]
```
## Matrix multiplication API
```@docs
set_multiplication_mode
```
## Symbolic Interface
```@docs
@affinevars
AffineExpression
AffineParametricArray
```
## Others
```@autodocs
Modules = [IntervalLinearAlgebra]
Pages = ["utils.jl", "rref.jl"]
```
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | docs | 150 | # Preconditioners
```@index
Pages = ["precondition.md"]
```
```@autodocs
Modules=[IntervalLinearAlgebra]
Pages=["precondition.jl"]
Private=true
```
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | docs | 74 | # General inteface for solving interval linear systems
```@docs
solve
``` | IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | docs | 4466 | # Preconditioning interval linear systems
```@contents
Pages = ["preconditioning.md"]
```
## Basic concepts
Consider the square interval linear system
```math
\mathbf{Ax}=\mathbf{b},
```
*preconditioning* the interval linear system by a *real* matrix ``C`` means to multiply both sides of the equation by ``C``, obtaining the new system
```math
C\mathbf{Ax}=C\mathbf{b},
```
which is called *preconditioned system*. Let us denote by ``A_c`` the *midpoint matrix* of ``\mathbf{A}``. Popular choices for ``C`` are
- Inverse midpoint preconditioning: ``C\approx A_c^{-1}``
- Inverse diagonal midpoint preconditioning: ``C\approx D_{A_c}^{-1}`` where ``D_{A_c}`` is the diagonal matrix containing the main diagonal of ``A_c``.
## Advantages of preconditioning
Using preconditioning to solve an interval linear system can have mainly two advantages.
### Extend usability of algorithms
Some algorithms require the matrix to have a specific structure in order to be used. For example Hansen-Bliek-Rohn algorithm requires ``\mathbf{A}`` to be an H-matrix. However, the algorithm can be extended to work to strongly regular matrices using inverse midpoint preconditioning. (Recall that an interval matrix is strongly regular if ``A_c^{-1}\mathbf{A}`` is an H-matrix).
### Improve numerical stability
Even if the algorithms theoretically work, they can be prone to numerical instability without preconditioning. This is demonstrated with the following example, a more deep theoretical analysis can be found in [[NEU90]](@ref).
Let ``\mathbf{A}`` be an interval lower triangular matrix with all ``[1, 1]`` in the lower part, for example
```@example precondition
using IntervalLinearAlgebra
N = 5 # problem dimension
A = tril(fill(1..1, N, N))
```
and let ``\mathbf{b}`` having ``[-2, 2]`` as first element and all other elements set to zero
```@example precondition
b = vcat(-2..2, fill(0, N-1))
```
the "pen and paper" solution would be ``[[-2, 2], [-2, 2], [0, 0], [0, 0], [0, 0]]^\mathsf{T}``, that is a vector with ``[-2, 2]`` as first two elements and all other elements set to zero. Now, let us try to solve without preconditioning.
```@example precondition
solve(A, b, GaussianElimination(), NoPrecondition())
```
```@example precondition
solve(A, b, HansenBliekRohn(), NoPrecondition())
```
It can be seen that the width of the intervals grows exponentially, this gets worse with bigger matrices.
```@example precondition
N = 100 # problem dimension
A1 = tril(fill(1..1, N, N))
b1 = [-2..2, fill(0..0, N-1)...]
solve(A1, b1, GaussianElimination(), NoPrecondition())
```
```@example precondition
solve(A1, b1, HansenBliekRohn(), NoPrecondition())
```
However this numerical stability issue is solved using inverse midpoint preconditioning.
```@example precondition
solve(A, b, GaussianElimination(), InverseMidpoint())
```
```@example precondition
solve(A, b, HansenBliekRohn(), InverseMidpoint())
```
## Disadvantages of preconditioning
While preconditioning is useful, sometimes even necessary, to solve interval linear systems, it comes at a price. It is important to understand that *the preconditioned interval linear system is **not** equivalent to the original one*, particularly the preconditioned problem can have a larger solution set.
Let us consider the following linear system
```@example precondition
A = [2..4 -2..1;-1..2 2..4]
```
```@example precondition
b = [-2..2, -2..2]
```
Now we plot the solution set of the original and preconditioned problem using [Oettli-Präger](solution_set.md)
```@example precondition
using LazySets, Plots
polytopes = solve(A, b, LinearOettliPrager())
polytopes_precondition = solve(A, b, LinearOettliPrager(), InverseMidpoint())
plot(UnionSetArray(polytopes_precondition), ratio=1, label="preconditioned", legend=:right)
plot!(UnionSetArray(polytopes), label="original", α=1)
xlabel!("x")
ylabel!("y")
savefig("solution_set_precondition.png") # hide
```

## Take-home lessons
- Preconditioning an interval linear system can enlarge the solution set
- Preconditioning is sometimes needed to achieve numerical stability
- A rough rule of thumb (same used by `IntervalLinearAlgebra.jl` if no preconditioning is specified)
- not needed for M-matrices and strictly diagonal dominant matrices
- might be needed for H-matrices (IntervalLinearAlgebra.jl uses inverse midpoint by default with H-matrices)
- must be used for strongly regular matrices | IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | docs | 5753 | # Solution set of interval linear system
```@contents
Pages=["solution_set.md"]
```
## Interval linear systems
An interval linear system is defined as
```math
\mathbf{A}\mathbf{x}=\mathbf{b}
```
where ``\mathbf{A}\in\mathbb{I}\mathbb{R}^{n\times n}`` and ``\mathbf{b}\in\mathbb{I}\mathbb{R}^n`` are an interval matrix and vector, respectively.
The solution set ``\mathbf{x}`` is defined as
```math
\mathbf{x} = \{x \in \mathbb{R}^n | Ax=b \text{ for some } A\in\mathbf{A}, b\in\mathbf{b} \}.
```
In other words, ``\mathbf{x}`` is the set of solutions of the real linear systems ``Ax=b`` for some ``A\in\mathbf{A}`` and ``b\in\mathbf{b}``.
If the interval matrix ``\mathbf{A}`` is *regular*, that is all ``A\in\mathbf{A}`` are invertible, then the solution set ``\mathbf{x}`` will be non-empty and bounded.
In general, checking for regularity of an interval matrix has exponential complexity.
## Solution by Monte-Carlo
A naive approach to solve an interval linear system would be to use Montecarlo, i.e. to randomly sample elements from the intervals and solve the several random real systems.
Suppose we want to solve the linear system
```math
\begin{bmatrix}
[2, 4]&[-2,1]\\
[-1, 2]&[2, 4]
\end{bmatrix}\mathbf{x} =
\begin{bmatrix}
[-2, 2]\\
[-2, 2]
\end{bmatrix}
```
Since we are planning to solve several thousands of instances of the interval problem and we are working with small arrays, we can use [StaticArrays.jl](https://github.com/JuliaArrays/StaticArrays.jl) to speed up the computations.
```@example solution_set
using IntervalLinearAlgebra, StaticArrays
A = @SMatrix [2..4 -2..1; -1..2 2..4]
b = @SVector [-2..2, -2..2]
nothing # hide
```
To perform Montecarlo, we need to sample from the intervals. This can be achieved using the `rand` function, for example
```@example solution_set
rand(1..2)
```
we are now ready for our montecarlo simulation, let us solve ``100000`` random instances
```@example solution_set
N = 100000
xs = [rand.(A)\rand.(b) for _ in 1:N]
nothing # hide
```
now we plot a 2D-histogram to inspect the distribution of the solutions.
```@example solution_set
using Plots
x = [xs[i][1] for i in 1:N]
y = [xs[i][2] for i in 1:N]
histogram2d(x, y, ratio=1)
xlabel!("x")
ylabel!("y")
savefig("histogram-2d.png") # hide
```

As we can see, most of the solutions seem to be condensed close to the origin, but repeating the
experiments enough times we also got some solutions farther away, obtaining a star looking area.
Now the question is, **have we captured the whole solution set?**
## Oettli-Präger theorem
The solution set ``\mathbf{x}`` is exactly characterized by the **Oettli-Präger** theorem [[OET64]](@ref), which says that an interval linear system $\mathbf{A}\mathbf{x}=\mathbf{b}$
is equivalent to the set of real inequalities
```math
|A_cx-b_c| \le A_\Delta|x| + b_\Delta,
```
where ``A_c`` and `A_\Delta` are the midpoint and radius matrix of \mathbf{A}, ``b_c`` and ``b_\Delta`` are defined similarly. The absolute values are taken elementwise.
We have now transformed the set of interval equalities into a set of real inequalities. We can easily get rid of the absolute value on the left obtaining the system
```math
\begin{cases}
A_cx-b_c \le A_\Delta|x| + b_\Delta\\
-(A_cx-b_c) \le A_\Delta|x| + b_\Delta
\end{cases}
```
We can remove the absolute value on the right by considering each orthant separately, obtaining ``2^n`` linear inequalities, where ``n`` is the dimension of the problem.
Practically this means rewriting ``|x|=D_ex``, where ``e\in\{\pm 1\}^n`` and ``D_e`` is the diagonal matrix with e on the main diagonal. As there are ``2^n`` possible instances of ``e``, we will go through ``2^n`` linear inequalities in the form
```math
\begin{bmatrix}
A_c-A_\Delta D_e\\
-A_c-A_\Delta D_e
\end{bmatrix}x\le \begin{bmatrix}b_\Delta+b_c\\b_\Delta-b_c\end{bmatrix}
```
as this inequality is in the form ``\tilde{A}x\le \tilde{b}`` its solution set will be a convex polytope. This has also an important theoretical consequence: the solution set of any interval linear system is composed by the union of ``2^n`` convex polytopes (some possibly empty), each lying entirely in one orthant.
In `IntervalLinearAlgebra.jl` the polytopes composing the solution set can be found using the `LinearOettliPrager()` solver. Note that to use it you need to import `LazySets.jl` first.
```@example solution_set
using LazySets
polytopes = solve(A, b, LinearOettliPrager())
plot(polytopes, ratio=1, legend=:none)
histogram2d!(x, y)
xlabel!("x")
ylabel!("y")
savefig("oettli.png") # hide
```

As we can see, the original montecarlo approximation, despite the high number of iterations, could not cover the whole solution set.
Note also that the solution set is non-convex but is composed by ``4`` convex polygons, one in each orthant. This is a general property of interval linear systems. For example, let us consider the interval linear system
```math
\begin{bmatrix}
[4.5, 4.5]&[0, 2]&[0, 2]\\
[0, 2]&[4.5, 4.5]&[0, 2]\\
[0, 2]&[0, 2]& [4.5, 4.5]
\end{bmatrix}\mathbf{x}=\begin{bmatrix}[-1, 1]\\
[-1, 1]\\
[-1, 1]\end{bmatrix}
```
its solution set is depicted in the next picture.

## Disadvantages of Oettli-Präger
As the number of orthants grows exponential with the dimension ``n``, applying Oettli-Präger has exponential complexity and is thus practically unfeasible in higher dimensions. Moreover, also computing the interval hull of the solution set is NP-hard [[ROH95]](@ref). For this reason, in practical applications polynomial time algorithms that return an interval enclosure of the solution set are used, although these may return an interval box strictly larger than the interval hull. | IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | docs | 7057 | # Eigenvalue computations
## Eigenvalues of interval matrices
Given a (real or complex) interval matrix ``A\in\mathbb{IC}^{n\times n}``, we define the eigenvalue set
```math
\mathbf{\Lambda}=\{\lambda\in\mathbb{C}: \lambda\text{ is an eigenvalue of }A\text{ for some }A\in\mathbf{A}\}.
```
While characterizing the solution set ``\mathbf{\Lambda}`` (or even its hull) is computationally challenging, the package offers the function [`eigenbox`](@ref) which contains an interval box containing ``\mathbf{\Lambda}``.
!!! note
At the moment, `eigenbox` is not rigorous, that is the computations for the non-interval eigenvalue problem solved internally are carried out using normal non-verified floating point computations.
To demonstrate the functionality, let us consider the following interval matrix
```@example eigs
using IntervalLinearAlgebra
A = [-3.. -2 4..5 4..6 -1..1.5;
-4.. -3 -4.. -3 -4.. -3 1..2;
-5.. -4 2..3 -5.. -4 -1..0;
-1..0.1 0..1 1..2 -4..2.5]
```
Now we can bound the eigenvalue set
```@example eigs
ebox = eigenbox(A)
```
To get a qualitative evaluation of the enclosure, we can simulate the solution set of ``\mathbf{A}`` using Montecarlo, as it is done in the following example
```@example eigs
using Random; # hide
Random.seed!(42) # hide
using Plots
N = 1000
evalues = zeros(ComplexF64, 4, N)
for i in 1:N
evalues[:, i] = eigvals(rand.(A))
end
rpart = real.(evalues)
ipart = imag.(evalues)
plot(IntervalBox(real(ebox), imag(ebox)); ratio=1, label="enclosure")
scatter!(rpart[1, :], ipart[1, :]; label="λ₁")
scatter!(rpart[2, :], ipart[2, :]; label="λ₂")
scatter!(rpart[3, :], ipart[3, :]; label="λ₃")
scatter!(rpart[4, :], ipart[4, :]; label="λ₄")
xlabel!("real")
ylabel!("imag")
savefig("eigs.png") # hide
```

Internally, the generical interval eigenvalue problem is reduced to a real symmetric interval eigenvalue problem, as described in [[HLA13]](@ref). It is good to remind that a real symmetric matrix has only real eigenvalues. The real symmetric interval eigenvalue problem can be solved in two ways
- Rohn method -- (default one) computes an enclosure of the eigenvalues set for the symmetric interval matrix. This is fast but the enclosure can be strictly larger than the hull
- Hertz method -- computes the exact hull of the eigenvalues for the symmetric interval matrix. Generally, these leads to tigher bounds, but it has exponential complexity, so it will be unfeasible for big matrices.
The function `eigenbox` can take a second optional parameter (Rohn() by default) to specify what algorithm to use for the real symmetric interval eigenvalue problem. The following example bounds the eigenvalues of the previous matrix using Hertz(), as can be noticed by the figure below, the Hertz method gives a tighter bound on the eigenvalues set.
```@example eigs
eboxhertz = eigenbox(A, Hertz())
```
```@example eigs
plot(IntervalBox(real(ebox), imag(ebox)); ratio=1, label="enclosure")
plot!(IntervalBox(real(eboxhertz), imag(eboxhertz)); label="Hertz enclosure", color="#00FF00") # hide
scatter!(rpart[1, :], ipart[1, :]; label="λ₁") # hide
scatter!(rpart[2, :], ipart[2, :]; label="λ₂") # hide
scatter!(rpart[3, :], ipart[3, :]; label="λ₃") # hide
scatter!(rpart[4, :], ipart[4, :]; label="λ₄") # hide
xlabel!("real")
ylabel!("imag")
savefig("eigs2.png") # hide
```

## Verified floating point computations of eigenvalues
In the previous section we considered the problem of finding the eigenvalue set (or an enclosure of it) of an interval matrix. In this section, we consider the problem of computing eigenvalues and eigenvectors of a floating point matrix *rigorously*, that is we want to find an enclosure of the true eigenvalues and eigenvectors of the matrix. In `IntervalLinearAlgebra.jl` this is achieved using the [`verify_eigen`](@ref) function, as the following example demonstrates.
```@example eigs
A = [1 2; 3 4]
evals, evecs, cert = verify_eigen(A)
evals
```
```@example eigs
evecs
```
```@example eigs
cert
```
If called with only one input `verify_eigen` will first compute an approximate solution for the eigenvalues and eigenvectors of ``A`` and use that to find a rigorous bounding on the true eigenvalues and eigenvectors of the matrix. It is also possible to give the function the scalar parameters ``\lambda`` and ``\vec{v}``, in which case it will compute a rigorous bound only for the specified eigenvalue ``\lambda`` and eigenvector ``\vec{v}``. The last output of the function is a vector of boolean certificates, if the ``i``th element is set to true, then the enclosure of the ``i``th eigenvalue and eigenvector is rigorous, that is the algorithm could prove that that enclosure contains the true eigenvalue and eigenvector of ``A``. If the certificate is false, then the algorithm could not prove the validity of the enclosure.
The function also accepts interval inputs. This is handy if the input matrix elements cannot be represented exactly as floating point numbers. Note however that this is meant only for interval matrices with very small intervals. If you have larger intervals, you should use the function of the previous section.
To test the function, let us consider the following example. First we generate random eigenvalues and eigenvectors
```@example eigs
ev = sort(randn(5))
D = Diagonal(ev)
P = randn(5, 5)
Pinv, _ = epsilon_inflation(P, Diagonal(ones(5)))
A = interval.(P) * D * Pinv
```
Now we obtained an interval matrix ``\mathbf{A}`` so that `ev` and `P` are eigenvalues and eigenvectors of some ``A\in\mathbf{A}``. Note that ``P^{-1}`` had to be computed rigorously using [`epsilon_inflation`](@ref). Now we can compute its eigenvalues and eigenvectors and verify that the enclosures contain the true values.
```@example eigs
evals, evecs, cert = verify_eigen(A)
evals
```
```@example eigs
evecs
```
```@example eigs
cert
```
```@example eigs
ev .∈ evals
```
Note also that despite the original eigenvalues and eigenvectors were real, the returned enclosures are complex. This is because any infinitesimally small perturbation in the elements of ``A`` may cause the eigenvalues to move away from the real line. For this reason, unless the matrix has some special structure that guarantees the eigenvalues are real (e.g. symmetric matrices), a valid enclosure should always be complex.
Finally, the concept of enclosure of eigenvector may feel confusing, since eigenvectors are unique up to scale.
This scale ambiguity is resolved by starting with the approximate eigenvector computed by normal linear algebra routines and fixing the element with the highest magnitude.
```@example eigs
complex_diam(x) = max(diam(real(x)), diam(imag(x)))
complex_diam.(evecs)
```
As can be seen, for each eigenvector there's an interval with zero width, since to resolve scale ambiguity one non-zero element can be freely chosen (assuming eigenvalues have algebraic multiplicity ``1``). After that, the eigenvector is fixed and it makes sense to talk about enclosures of the other elements.
| IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 0.1.6 | 20fe817cbd2677717e6c97ab98204770286245e8 | docs | 4819 | # Linear systems
```@contents
Pages = ["linear_systems.md"]
```
This tutorial will show you how to solve linear systems rigorously using `IntervalLinearAlgebra.jl`.
## Solve interval linear systems
An interval linear system ``\mathbf{Ax}=\mathbf{b}`` is a linear system where ``\mathbf{A}`` and ``\mathbf{b}`` contain intervals. In general, the solution set ``\mathbf{x}`` can have a complex non-convex shape and can thus be hard to characterize exactly (see [this article](../explanations/solution_set.md) for more details). Hence we are interested in finding an interval box containing ``\mathbf{x}``. In `IntervalLinearAlgebra.jl`, this is achieved through the `solve` function, which gives a handy interface to choose the algorithm and preconditioning mechanism. The syntax to call solve is
```julia
solve(A, b, method, precondition)
```
- ``A`` is an interval matrix
- ``b`` is an interval vector
- `method` is an optional parameter to choose the algorithm used to solve the interval linear system, see below for more details
- `precondition` is an optional parameter to choose the preconditioning for the problem. More details about preconditoining can be found [here](../explanations/preconditioning.md)
### Methods
The supported methods are
- Direct solvers
- [`GaussianElimination`](@ref)
- [`HansenBliekRohn`](@ref)
- [`LinearOettliPrager`](@ref) (requires importing LazySets.jl)
- Iterative solvers
- [`LinearKrawczyk`](@ref)
- [`Jacobi`](@ref)
- [`GaussSeidel`](@ref)
- [`NonLinearOettliPrager`](@ref) (requires importing IntervalConstraintProgramming.jl)
`LinearOettliPrager` and `NonLinearOettliPrager` are "special" in the sense that they try to exactly characterize the solution set using Oettli-Präger and are not considered in this tutorial. More information about them can be found [here](../explanations/solution_set.md). The other solvers return a vector of intervals, representing an interval enclosure of the solution set. If the method is not specified, Gaussian elimination is used by default.
### Preconditioning
The supported preconditioning mechanisms are
- [`NoPrecondition`](@ref)
- [`InverseMidpoint`](@ref)
- [`InverseDiagonalMidpoint`](@ref)
If preconditioning is not specified, then an heuristic strategy based on the type of matrix and solver is used to choose the preconditioning. The strategy is discussed at the end of the [preconditioning tutorial](../explanations/preconditioning.md).
### Examples
We now demonstrate a few examples using the solve function, these examples are taken from [[HOR19]](@ref).
```@example ils
using IntervalLinearAlgebra
A = [4..6 -1..1 -1..1 -1..1;-1..1 -6.. -4 -1..1 -1..1;-1..1 -1..1 9..11 -1..1;-1..1 -1..1 -1..1 -11.. -9]
```
```@example ils
b = [-2..4, 1..8, -4..10, 2..12]
```
```@example ils
solve(A, b, HansenBliekRohn())
```
```@example ils
solve(A, b, GaussianElimination())
```
```@example ils
solve(A, b, GaussSeidel())
```
For iterative methods, an additional optional parameter `X0` representing an initial guess for the solution's enclosure can be given. If not given, a rough initial enclosure is computed using the [`enclose`](@ref) function.
```@example ils
X0 = fill(-5..5, 4)
solve(A, b, GaussSeidel(), InverseMidpoint(), X0)
```
## Verify real linear systems
`IntervalLinearAlgebra.jl` also offers functionalities to solve real linear systems rigorously. It is of course possible to just convert the real system to an interval system and use the methods described above. In this situation, however, the system will have the property where the diameters of the intervals will be very small (zero or a few floating point units). To solve these kind of systems, it can be more efficient to use the *epsilon inflation* method [[RUM10]](@ref), especially for bigger matrices. Here is an example
```@example ils
A = [1.0 2;3 4]
```
```@example ils
b = [3, 7]
```
the real linear system ``Ax=b`` can now be solved *rigorously* using the [`epsilon_inflation`](@ref) function.
```@example ils
x, cert = epsilon_inflation(A, b)
@show cert
x
```
This function returns two values: an interval vector `x` and a boolean certificate `cert`. If `cert==true` then `x` is guaranteed to be an enclosure of the real linear system `Ax=b`. If `cert == false` then the algorithm could not verify that the enclosure is rigorous, i.e. it may or may not contain the true solution.
In the following example the epsilon inflation method returns a non-rigorous bound
```@example ils
A1 = [1..1+1e-16 2;3 4]
x1, cert = epsilon_inflation(A1, b)
@show cert
x1
```
Since the matrix `A1` is non-regular (it contains the matrix ``\begin{bmatrix}1&2\\3&4\end{bmatrix}`` which is singluar), the solution set is unbounded, hence the algorithm could not prove (rightly) that `x1` is an enclosure of the true solution. | IntervalLinearAlgebra | https://github.com/JuliaIntervals/IntervalLinearAlgebra.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 599 | using Documenter
using MLJBase
const REPO="github.com/JuliaAI/MLJBase.jl"
makedocs(;
modules=[MLJBase],
format=Documenter.HTML(prettyurls = get(ENV, "CI", nothing) == "true"),
pages=[
"Home" => "index.md",
"Resampling" => "resampling.md",
"Composition" => "composition.md",
"Datasets" => "datasets.md",
"Distributions" => "distributions.md",
"Utilities" => "utilities.md"
],
repo="https://$REPO/blob/{commit}{path}#L{line}",
sitename="MLJBase.jl"
)
deploydocs(;
repo=REPO,
devbranch="dev",
push_preview=false,
)
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 672 | module DefaultMeasuresExt
using MLJBase
import MLJBase:default_measure, ProbabilisticDetector, DeterministicDetector
using StatisticalMeasures
using StatisticalMeasures.ScientificTypesBase
default_measure(::Deterministic, ::Type{<:Union{Continuous,Count}}) = l2
default_measure(::Deterministic, ::Type{<:Finite}) = misclassification_rate
default_measure(::Probabilistic, ::Type{<:Union{Finite,Count}}) = log_loss
default_measure(::Probabilistic, ::Type{<:Continuous}) = log_loss
default_measure(::ProbabilisticDetector, ::Type{<:OrderedFactor{2}}) = area_under_curve
default_measure(::DeterministicDetector, ::Type{<:OrderedFactor{2}}) = balanced_accuracy
end # module
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 9382 | module MLJBase
# ===================================================================
# IMPORTS
using Reexport
import Base: ==, precision, getindex, setindex!
import Base.+, Base.*, Base./
# Scitype
using ScientificTypes
# Traits for models and measures (which are being overloaded):
using StatisticalTraits
for trait in StatisticalTraits.TRAITS
eval(:(import StatisticalTraits.$trait))
end
import LearnAPI
import StatisticalTraits.snakecase
import StatisticalTraits.info
# Interface
# HACK: When https://github.com/JuliaAI/MLJModelInterface.jl/issues/124 and
# https://github.com/JuliaAI/MLJModelInterface.jl/issues/131 is resolved:
# Uncomment next line and delete "Hack Block"
# using MLJModelInterface
#####################
# Hack Block begins #
#####################
exported_names(m::Module) =
filter!(x -> Base.isexported(m, x),
Base.names(m; all=true, imported=true))
import MLJModelInterface
for name in exported_names(MLJModelInterface)
name in [
:UnivariateFinite,
:augmented_transform,
:info,
:scitype # Needed to avoid clashing with `ScientificTypes.scitype`
] && continue
quote
import MLJModelInterface.$name
end |> eval
end
###################
# Hack Block ends #
###################
import MLJModelInterface: ProbabilisticDetector, DeterministicDetector
import MLJModelInterface: fit, update, update_data, transform,
inverse_transform, fitted_params, predict, predict_mode,
predict_mean, predict_median, predict_joint,
evaluate, clean!, is_same_except,
save, restore, is_same_except, istransparent,
params, training_losses, feature_importances
# Macros
using Parameters
# Containers & data manipulation
using Serialization
using Tables
import PrettyTables
using DelimitedFiles
using OrderedCollections
using CategoricalArrays
import CategoricalArrays.DataAPI.unwrap
import InvertedIndices: Not
import Dates
# Distributed computing
using Distributed
using ComputationalResources
import ComputationalResources: CPU1, CPUProcesses, CPUThreads
using ProgressMeter
import .Threads
# Operations & extensions
import StatsBase
import StatsBase: fit!, mode, countmap
import Missings: levels
using Missings
import Distributions
using CategoricalDistributions
import Distributions: pdf, logpdf, sampler
const Dist = Distributions
# Measures
import StatisticalMeasuresBase
# Plots
using RecipesBase: RecipesBase, @recipe
# from Standard Library:
using Statistics, LinearAlgebra, Random, InteractiveUtils
# ===================================================================
## CONSTANTS
# for variable global constants, see src/init.jl
const PREDICT_OPERATIONS = (:predict,
:predict_mean,
:predict_mode,
:predict_median,
:predict_joint)
const OPERATIONS = (PREDICT_OPERATIONS..., :transform, :inverse_transform)
# the directory containing this file: (.../src/)
const MODULE_DIR = dirname(@__FILE__)
# horizontal space for field names in `MLJType` object display:
const COLUMN_WIDTH = 24
# how deep to display fields of `MLJType` objects:
const DEFAULT_SHOW_DEPTH = 0
const DEFAULT_AS_CONSTRUCTED_SHOW_DEPTH = 2
const INDENT = 2
const Arr = AbstractArray
const Vec = AbstractVector
# Note the following are existential (union) types. In particular,
# ArrMissing{Integer} is not the same as Arr{Union{Missing,Integer}},
# etc.
const ArrMissing{T,N} = Arr{<:Union{Missing,T},N}
const VecMissing{T} = ArrMissing{T,1}
const CatArrMissing{T,N} = ArrMissing{CategoricalValue{T},N}
const MMI = MLJModelInterface
const FI = MLJModelInterface.FullInterface
# ===================================================================
# Computational Resource
# default_resource allows to switch the mode of parallelization
default_resource() = DEFAULT_RESOURCE[]
default_resource(res) = (DEFAULT_RESOURCE[] = res;)
# ===================================================================
# Includes
include("init.jl")
include("utilities.jl")
include("show.jl")
include("interface/data_utils.jl")
include("interface/model_api.jl")
include("models.jl")
include("sources.jl")
include("machines.jl")
include("composition/learning_networks/nodes.jl")
include("composition/learning_networks/inspection.jl")
include("composition/learning_networks/signatures.jl")
include("composition/learning_networks/replace.jl")
include("composition/models/network_composite_types.jl")
include("composition/models/network_composite.jl")
include("composition/models/pipelines.jl")
include("composition/models/transformed_target_model.jl")
include("operations.jl")
include("resampling.jl")
include("hyperparam/one_dimensional_ranges.jl")
include("hyperparam/one_dimensional_range_methods.jl")
include("data/data.jl")
include("data/datasets.jl")
include("data/datasets_synthetic.jl")
include("default_measures.jl")
include("plots.jl")
include("composition/models/stacking.jl")
const EXTENDED_ABSTRACT_MODEL_TYPES = vcat(
MLJBase.MLJModelInterface.ABSTRACT_MODEL_SUBTYPES,
MLJBase.NETWORK_COMPOSITE_TYPES, # src/composition/models/network_composite_types.jl
[:MLJType, :Model, :NetworkComposite],
)
# ===================================================================
## EXPORTS
# -------------------------------------------------------------------
# re-exports from MLJModelInterface, ScientificTypes
# NOTE: MLJBase does **not** re-export UnivariateFinite to avoid
# ambiguities between the raw constructor (MLJBase.UnivariateFinite)
# and the general method (MLJModelInterface.UnivariateFinite)
# traits for measures and models:
using StatisticalTraits
for trait in StatisticalTraits.TRAITS
eval(:(export $trait))
end
export implemented_methods # defined here and not in StatisticalTraits
export UnivariateFinite
# MLJType equality
export is_same_except
# model constructor + metadata
export @mlj_model, metadata_pkg, metadata_model
# model api
export fit, update, update_data, transform, inverse_transform,
fitted_params, predict, predict_mode, predict_mean,
predict_median, predict_joint,
evaluate, clean!, training_losses, feature_importances
# data operations
export matrix, int, classes, decoder, table,
nrows, selectrows, selectcols, select
# re-export from ComputationalResources.jl:
export CPU1, CPUProcesses, CPUThreads
# re-exports from ScientificTypes
export Unknown, Known, Finite, Infinite,
OrderedFactor, Multiclass, Count, Continuous, Textual,
Binary, ColorImage, GrayImage, Image, Table
export scitype, scitype_union, elscitype, nonmissing
export coerce, coerce!, autotype, schema, info
# re-exports from CategoricalDistributions:
export UnivariateFiniteArray, UnivariateFiniteVector
# -----------------------------------------------------------------------
# re-export from MLJModelInterface.jl
#abstract model types defined in MLJModelInterface.jl and extended here:
for T in EXTENDED_ABSTRACT_MODEL_TYPES
@eval(export $T)
end
export params
# -------------------------------------------------------------------
# exports from this module, MLJBase
# get/set global constants:
export default_logger
export default_resource
# one_dimensional_ranges.jl:
export ParamRange, NumericRange, NominalRange, iterator, scale
# data.jl:
export partition, unpack, complement, restrict, corestrict
# utilities.jl:
export flat_values, recursive_setproperty!,
recursive_getproperty, pretty, unwind
# show.jl
export HANDLE_GIVEN_ID, @more, @constant, color_on, color_off
# datasets.jl:
export load_boston, load_ames, load_iris, load_sunspots,
load_reduced_ames, load_crabs, load_smarket,
@load_boston, @load_ames, @load_iris, @load_sunspots,
@load_reduced_ames, @load_crabs, @load_smarket
# sources.jl:
export source, Source, CallableReturning
# machines.jl:
export machine, Machine, fit!, report, fit_only!, default_scitype_check_level,
serializable, last_model, restore!
# datasets_synthetics.jl
export make_blobs, make_moons, make_circles, make_regression
# composition
export machines, sources, Stack,
glb, @tuple, node, @node, sources, origins, return!,
nrows_at_source, machine, rebind!, nodes, freeze!, thaw!,
Node, AbstractNode, Pipeline,
ProbabilisticPipeline, DeterministicPipeline, UnsupervisedPipeline,
StaticPipeline, IntervalPipeline
export TransformedTargetModel
# resampling.jl:
export ResamplingStrategy, InSample, Holdout, CV, StratifiedCV, TimeSeriesCV,
evaluate!, Resampler, PerformanceEvaluation, CompactPerformanceEvaluation
# `MLJType` and the abstract `Model` subtypes are exported from within
# src/composition/abstract_types.jl
# -------------------------------------------------------------------
# exports from MLJBase specific to measures
# measure/measures.jl (excluding traits):
export default_measure
# -------------------------------------------------------------------
# re-export from Random, StatsBase, Statistics, Distributions,
# OrderedCollections, CategoricalArrays, InvertedIndices:
export pdf, sampler, mode, median, mean, shuffle!, categorical, shuffle,
levels, levels!, std, Not, support, logpdf, LittleDict
# for julia < 1.9
if !isdefined(Base, :get_extension)
include(joinpath("..","ext", "DefaultMeasuresExt.jl"))
@reexport using .DefaultMeasuresExt.StatisticalMeasures
end
end # module
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 731 | # # DEFAULT MEASURES
"""
default_measure(model)
Return a measure that should work with `model`, or return `nothing` if none can be
reliably inferred.
For Julia 1.9 and higher, `nothing` is returned, unless StatisticalMeasures.jl is
loaded.
# New implementations
This method dispatches `default_measure(model, observation_scitype)`, which has
`nothing` as the fallback return value. Extend `default_measure` by overloading this
version of the method. See for example the MLJBase.jl package extension,
DefaultMeausuresExt.jl.
"""
default_measure(m) = nothing
default_measure(m::Union{Supervised,Annotator}) =
default_measure(m, nonmissingtype(guess_model_target_observation_scitype(m)))
default_measure(m, S) = nothing
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 593 | function __init__()
global HANDLE_GIVEN_ID = Dict{UInt64,Symbol}()
global DEFAULT_RESOURCE = Ref{AbstractResource}(CPU1())
global DEFAULT_SCITYPE_CHECK_LEVEL = Ref{Int}(1)
global SHOW_COLOR = Ref{Bool}(true)
global DEFAULT_LOGGER = Ref{Any}(nothing)
# for testing asynchronous training of learning networks:
global TESTING = parse(Bool, get(ENV, "TEST_MLJBASE", "false"))
if TESTING
global MACHINE_CHANNEL =
RemoteChannel(() -> Channel(100), myid())
end
MLJModelInterface.set_interface_mode(MLJModelInterface.FullInterface())
end
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 35944 | ## SCITYPE CHECK LEVEL
"""
default_scitype_check_level()
Return the current global default value for scientific type checking
when constructing machines.
default_scitype_check_level(i::Integer)
Set the global default value for scientific type checking to `i`.
The effect of the `scitype_check_level` option in calls of the form
`machine(model, data, scitype_check_level=...)` is summarized below:
`scitype_check_level` | Inspect scitypes? | If `Unknown` in scitypes | If other scitype mismatch |
|:--------------------|:-----------------:|:------------------------:|:-------------------------:|
0 | × | | |
1 (value at startup) | ✓ | | warning |
2 | ✓ | warning | warning |
3 | ✓ | warning | error |
4 | ✓ | error | error |
See also [`machine`](@ref)
"""
function default_scitype_check_level end
default_scitype_check_level() = DEFAULT_SCITYPE_CHECK_LEVEL[]
default_scitype_check_level(i) = (DEFAULT_SCITYPE_CHECK_LEVEL[] = i;)
## MACHINE TYPE
struct NotTrainedError{M} <: Exception
mach::M
operation::Symbol
end
Base.showerror(io::IO, e::NotTrainedError) =
print(io, "$(e.mach) has not been trained. "*
"Call `fit!` on the machine, or, "*
"if you meant to create a "*
"learning network `Node`, "*
"use the syntax `node($(e.operation), mach::Machine)`. ")
caches_data_by_default(m) = caches_data_by_default(typeof(m))
caches_data_by_default(::Type) = true
caches_data_by_default(::Type{<:Symbol}) = false
mutable struct Machine{M,OM,C} <: MLJType
model::M
old_model::OM # for remembering the model used in last call to `fit!`
# the next two refer to objects returned by `MLJModlelInterface.fit(::M, ...)`.
fitresult
cache # relevant to `MLJModelInterface.update`, not to be confused with type param `C`
# training arguments (`Node`s or user-specified data wrapped in
# `Source`s):
args::Tuple{Vararg{AbstractNode}}
# cached model-specific reformatting of args (for C=true):
data
# cached subsample of data (for C=true):
resampled_data
# dictionary of named tuples keyed on method (:fit, :predict, etc):
report
frozen::Bool
old_rows
state::Int
old_upstream_state
# cleared by fit!(::Node) calls; put! by `fit_only!(machine, true)` calls:
fit_okay::Channel{Bool}
function Machine(
model::M, args::AbstractNode...;
cache=caches_data_by_default(model),
) where M
# In the case of symbolic model, machine cannot know the type of model to be fit
# at time of construction:
OM = M == Symbol ? Any : M
mach = new{M,OM,cache}(model) # (this `cache` is not the *field* `cache`)
mach.frozen = false
mach.state = 0
mach.args = args
mach.old_upstream_state = upstream(mach)
mach.fit_okay = Channel{Bool}(1)
return mach
end
end
caches_data(::Machine{<:Any, <:Any, C}) where C = C
"""
age(mach::Machine)
Return an integer representing the number of times `mach` has been trained or updated. For
more detail, see the discussion of training logic at [`fit_only!`](@ref).
"""
age(mach::Machine) = mach.state
"""
replace(mach::Machine, field1 => value1, field2 => value2, ...)
**Private method.**
Return a shallow copy of the machine `mach` with the specified field
replacements. Undefined field values are preserved. Unspecified fields have identically
equal values, with the exception of `mach.fit_okay`, which is always a new instance
`Channel{Bool}(1)`.
The following example returns a machine with no traces of training data (but also removes
any upstream dependencies in a learning network):
```julia
replace(mach, :args => (), :data => (), :data_resampled_data => (), :cache => nothing)
```
"""
function Base.replace(mach::Machine{<:Any,<:Any,C}, field_value_pairs::Pair...) where C
# determined new `model` and `args` and build replacement dictionary:
newfield_given_old = Dict(field_value_pairs) # to be extended
fields_to_be_replaced = keys(newfield_given_old)
:fit_okay in fields_to_be_replaced && error("Cannot replace `:fit_okay` field. ")
newmodel = :model in fields_to_be_replaced ? newfield_given_old[:model] : mach.model
newargs = :args in fields_to_be_replaced ? newfield_given_old[:args] : mach.args
# instantiate a new machine and make field replacements:
clone = Machine(newmodel, newargs...; cache=C)
for field in fieldnames(typeof(mach))
if !(field in fields_to_be_replaced || isdefined(mach, field)) ||
field in [:model, :args, :fit_okay]
continue
end
value = field in fields_to_be_replaced ? newfield_given_old[field] :
getproperty(mach, field)
setproperty!(clone, field, value)
end
return clone
end
Base.copy(mach::Machine) = replace(mach)
upstream(mach::Machine) = Tuple(m.state for m in ancestors(mach))
"""
ancestors(mach::Machine; self=false)
All ancestors of `mach`, including `mach` if `self=true`.
"""
function ancestors(mach::Machine; self=false)
ret = Machine[]
self && push!(ret, mach)
return vcat(ret, (machines(N) for N in mach.args)...) |> unique
end
# # CONSTRUCTORS
# In the checks `args` is expected to be `Vector{<:AbstractNode}` (eg, a vector of source
# nodes) not raw data.
# # Helpers
# Here `F` is some fit_data_scitype, and so is tuple of scitypes, or a
# union of such tuples:
_contains_unknown(F) = false
_contains_unknown(F::Type{Unknown}) = true
_contains_unknown(F::Union) = any(_contains_unknown, Base.uniontypes(F))
function _contains_unknown(F::Type{<:Tuple})
# the first line seems necessary; see https://discourse.julialang.org/t/a-union-of-tuple-types-isa-tuple-type/75339?u=ablaom
F isa Union && return any(_contains_unknown, Base.uniontypes(F))
return any(_contains_unknown, F.parameters)
end
alert_generic_scitype_mismatch(S, F, T) =
"""
The number and/or types of data arguments do not match what the specified model
supports. Suppress this type check by specifying `scitype_check_level=0`.
Run `@doc $(package_name(T)).$(name(T))` to learn more about your model's requirements.
Commonly, but non exclusively, supervised models are constructed using the syntax
`machine(model, X, y)` or `machine(model, X, y, w)` while most other models are
constructed with `machine(model, X)`. Here `X` are features, `y` a target, and `w`
sample or class weights.
In general, data in `machine(model, data...)` is expected to satisfy
scitype(data) <: MLJ.fit_data_scitype(model)
In the present case:
scitype(data) = $S
fit_data_scitype(model) = $F
"""
const WARN_UNKNOWN_SCITYPE =
"Some data contains `Unknown` scitypes, which might lead to model-data mismatches. "
err_length_mismatch(model) = DimensionMismatch(
"Differing number of observations in input and target. ")
function check(model::Model, scitype_check_level, args...)
check_ismodel(model)
is_okay = true
scitype_check_level >= 1 || return is_okay
F = fit_data_scitype(model)
if _contains_unknown(F)
scitype_check_level in [2, 3] && @warn WARN_UNKNOWN_SCITYPE
scitype_check_level >= 4 && throw(ArgumentError(WARN_UNKNOWN_SCITYPE))
return is_okay
end
# Sometimes (X, ) is a table, when X is a table, which leads to scitype((X,)) =
# Table(...) where `Tuple{scitype(X)}` is wanted. Also, we use `elscitype` here
# instead of `scitype` because the data is wrapped in source nodes;
S = Tuple{elscitype.(args)...}
if !(S <: F)
is_okay = false
message = alert_generic_scitype_mismatch(S, F, typeof(model))
if scitype_check_level >= 3
throw(ArgumentError(message))
else
@warn message
end
end
if length(args) > 1 && is_supervised(model)
X, y = args[1:2]
# checks on dimension matching:
scitype(X) == CallableReturning{Nothing} || nrows(X()) == nrows(y()) ||
throw(err_length_mismatch(model))
end
return is_okay
end
# # Constructors
"""
machine(model, args...; cache=true, scitype_check_level=1)
Construct a `Machine` object binding a `model`, storing
hyper-parameters of some machine learning algorithm, to some data,
`args`. Calling [`fit!`](@ref) on a `Machine` instance `mach` stores
outcomes of applying the algorithm in `mach`, which can be inspected
using `fitted_params(mach)` (learned paramters) and `report(mach)`
(other outcomes). This in turn enables generalization to new data
using operations such as `predict` or `transform`:
```julia
using MLJModels
X, y = make_regression()
PCA = @load PCA pkg=MultivariateStats
model = PCA()
mach = machine(model, X)
fit!(mach, rows=1:50)
transform(mach, selectrows(X, 51:100)) # or transform(mach, rows=51:100)
DecisionTreeRegressor = @load DecisionTreeRegressor pkg=DecisionTree
model = DecisionTreeRegressor()
mach = machine(model, X, y)
fit!(mach, rows=1:50)
predict(mach, selectrows(X, 51:100)) # or predict(mach, rows=51:100)
```
Specify `cache=false` to prioritize memory management over speed.
When building a learning network, `Node` objects can be substituted
for the concrete data but no type or dimension checks are applied.
### Checks on the types of training data
A model articulates its data requirements using [scientific
types](https://juliaai.github.io/ScientificTypes.jl/dev/), i.e.,
using the [`scitype`](@ref) function instead of the `typeof` function.
If `scitype_check_level > 0` then the scitype of each `arg` in `args`
is computed, and this is compared with the scitypes expected by the
model, unless `args` contains `Unknown` scitypes and
`scitype_check_level < 4`, in which case no further action is
taken. Whether warnings are issued or errors thrown depends the
level. For details, see [`default_scitype_check_level`](@ref), a method
to inspect or change the default level (`1` at startup).
### Machines with model placeholders
A symbol can be substituted for a model in machine constructors to act as a placeholder
for a model specified at training time. The symbol must be the field name for a struct
whose corresponding value is a model, as shown in the following example:
```julia
mutable struct MyComposite
transformer
classifier
end
my_composite = MyComposite(Standardizer(), ConstantClassifier)
X, y = make_blobs()
mach = machine(:classifier, X, y)
fit!(mach, composite=my_composite)
```
The last two lines are equivalent to
```julia
mach = machine(ConstantClassifier(), X, y)
fit!(mach)
```
Delaying model specification is used when exporting learning networks as new stand-alone
model types. See [`prefit`](@ref) and the MLJ documentation on learning networks.
See also [`fit!`](@ref), [`default_scitype_check_level`](@ref),
[`MLJBase.save`](@ref), [`serializable`](@ref).
"""
function machine end
const ERR_STATIC_ARGUMENTS = ArgumentError(
"A `Static` transformer "*
"has no training arguments. "*
"Use `machine(model)`. "
)
machine(T::Type{<:Model}, args...; kwargs...) =
throw(ArgumentError("Model *type* provided where "*
"model *instance* expected. "))
function machine(model::Static, args...; cache=false, kwargs...)
isempty(args) || throw(ERR_STATIC_ARGUMENTS)
return Machine(model; cache=false, kwargs...)
end
function machine(
model::Static,
args::AbstractNode...;
cache=false,
kwargs...,
)
isempty(args) || model isa Symbol || throw(ERR_STATIC_ARGUMENTS)
mach = Machine(model; cache=false, kwargs...)
return mach
end
machine(model::Symbol; cache=false, kwargs...) =
Machine(model; cache, kwargs...)
machine(model::Union{Model,Symbol}, raw_arg1, arg2::AbstractNode, args::AbstractNode...;
kwargs...) =
error("Mixing concrete data with `Node` training arguments "*
"is not allowed. ")
function machine(
model::Union{Model,Symbol},
raw_arg1,
raw_args...;
scitype_check_level=default_scitype_check_level(),
kwargs...,
)
args = source.((raw_arg1, raw_args...))
model isa Symbol || check(model, scitype_check_level, args...;)
return Machine(model, args...; kwargs...)
end
function machine(model::Union{Model,Symbol}, arg1::AbstractNode, args::AbstractNode...;
kwargs...)
return Machine(model, arg1, args...; kwargs...)
end
function machine(model::Symbol, arg1::AbstractNode, args::AbstractNode...;
kwargs...)
return Machine(model, arg1, args...; kwargs...)
end
warn_bad_deserialization(state) =
"Deserialized machine state is not -1 (got $state). "*
"This means that the machine has not been saved by a conventional MLJ routine.\n"
"For example, it's possible original training data is accessible from the deserialised object. "
"""
machine(file::Union{String, IO})
Rebuild from a file a machine that has been serialized using the default
Serialization module.
"""
function machine(file::Union{String, IO})
smach = deserialize(file)
smach.state == -1 ||
@warn warn_bad_deserialization(smach.state)
restore!(smach)
return smach
end
## INSPECTION AND MINOR MANIPULATION OF FIELDS
# Note: freeze! and thaw! are possibly not used within MLJ itself.
"""
freeze!(mach)
Freeze the machine `mach` so that it will never be retrained (unless
thawed).
See also [`thaw!`](@ref).
"""
function freeze!(machine::Machine)
machine.frozen = true
end
"""
thaw!(mach)
Unfreeze the machine `mach` so that it can be retrained.
See also [`freeze!`](@ref).
"""
function thaw!(machine::Machine)
machine.frozen = false
end
params(mach::Machine) = params(mach.model)
machines(::Source) = Machine[]
## DISPLAY
_cache_status(::Machine{<:Any,<:Any,true}) = "caches model-specific representations of data"
_cache_status(::Machine{<:Any,<:Any,false}) = "does not cache data"
function Base.show(io::IO, mach::Machine)
model = mach.model
m = model isa Symbol ? ":$model" : model
print(io, "machine($m, …)")
end
function Base.show(io::IO, ::MIME"text/plain", mach::Machine{M}) where M
header =
mach.state == -1 ? "serializable " :
mach.state == 0 ? "untrained " :
"trained "
header *= "Machine"
mach.state >= 0 && (header *= "; "*_cache_status(mach))
println(io, header)
println(io, " model: $(mach.model)")
println(io, " args: ")
for i in eachindex(mach.args)
arg = mach.args[i]
print(io, " $i:\t$arg")
if arg isa Source
println(io, " \u23CE $(elscitype(arg))")
else
println(io)
end
end
end
## FITTING
# Not one, but *two*, fit methods are defined for machines here,
# `fit!` and `fit_only!`.
# - `fit_only!`: trains a machine without touching the learned parameters (`fitresult`) of
# any other machine. It may error if another machine on which it depends (through its node
# training arguments `N1, N2, ...`) has not been trained. It's possible that a dependent
# machine `mach` may have it's report mutated if `reporting_operations(mach.model)` is
# non-empty.
# - `fit!`: trains a machine after first progressively training all
# machines on which the machine depends. Implicitly this involves
# making `fit_only!` calls on those machines, scheduled by the node
# `glb(N1, N2, ... )`, where `glb` means greatest lower bound.)
function fitlog(mach, action::Symbol, verbosity)
if verbosity < -1000
put!(MACHINE_CHANNEL, (action, mach))
elseif verbosity > -1 && action == :frozen
@warn "$mach not trained as it is frozen."
elseif verbosity > 0
action == :train && (@info "Training $mach."; return)
action == :update && (@info "Updating $mach."; return)
action == :skip && begin
@info "Not retraining $mach. Use `force=true` to force."
return
end
end
end
# for getting model specific representation of the row-restricted
# training data from a machine, according to the value of the machine
# type parameter `C` (`true` or `false`):
_resampled_data(mach::Machine{<:Any,<:Any,true}, model, rows) = mach.resampled_data
function _resampled_data(mach::Machine{<:Any,<:Any,false}, model, rows)
raw_args = map(N -> N(), mach.args)
data = MMI.reformat(model, raw_args...)
return selectrows(model, rows, data...)
end
err_no_real_model(mach) = ErrorException(
"""
Cannot train or use $mach, which has a `Symbol` as model. Perhaps you
forgot to specify `composite=... ` in a `fit!` call?
"""
)
err_missing_model(model) = ErrorException(
"Specified `composite` model does not have `:$(model)` as a field."
)
"""
last_model(mach::Machine)
Return the last model used to train the machine `mach`. This is a bona fide model, even if
`mach.model` is a symbol.
Returns `nothing` if `mach` has not been trained.
"""
last_model(mach) = isdefined(mach, :old_model) ? mach.old_model : nothing
"""
MLJBase.fit_only!(
mach::Machine;
rows=nothing,
verbosity=1,
force=false,
composite=nothing,
)
Without mutating any other machine on which it may depend, perform one of the following
actions to the machine `mach`, using the data and model bound to it, and restricting the
data to `rows` if specified:
- *Ab initio training.* Ignoring any previous learned parameters and
cache, compute and store new learned parameters. Increment `mach.state`.
- *Training update.* Making use of previous learned parameters and/or
cache, replace or mutate existing learned parameters. The effect is
the same (or nearly the same) as in ab initio training, but may be
faster or use less memory, assuming the model supports an update
option (implements `MLJBase.update`). Increment `mach.state`.
- *No-operation.* Leave existing learned parameters untouched. Do not
increment `mach.state`.
If the model, `model`, bound to `mach` is a symbol, then instead perform the action using
the true model given by `getproperty(composite, model)`. See also [`machine`](@ref).
### Training action logic
For the action to be a no-operation, either `mach.frozen == true` or
or none of the following apply:
1. `mach` has never been trained (`mach.state == 0`).
2. `force == true`.
3. The `state` of some other machine on which `mach` depends has
changed since the last time `mach` was trained (ie, the last time
`mach.state` was last incremented).
4. The specified `rows` have changed since the last retraining and
`mach.model` does not have `Static` type.
5. `mach.model` is a model and different from the last model used for training, but has
the same type.
6. `mach.model` is a model but has a type different from the last model used for
training.
7. `mach.model` is a symbol and `(composite, mach.model)` is different from the last
model used for training, but has the same type.
8. `mach.model` is a symbol and `(composite, mach.model)` has a different type from
the last model used for training.
In any of the cases (1) - (4), (6), or (8), `mach` is trained ab initio.
If (5) or (7) is true, then a training update is applied.
To freeze or unfreeze `mach`, use `freeze!(mach)` or `thaw!(mach)`.
### Implementation details
The data to which a machine is bound is stored in `mach.args`. Each
element of `args` is either a `Node` object, or, in the case that
concrete data was bound to the machine, it is concrete data wrapped in
a `Source` node. In all cases, to obtain concrete data for actual
training, each argument `N` is called, as in `N()` or `N(rows=rows)`,
and either `MLJBase.fit` (ab initio training) or `MLJBase.update`
(training update) is dispatched on `mach.model` and this data. See the
"Adding models for general use" section of the MLJ documentation for
more on these lower-level training methods.
"""
function fit_only!(
mach::Machine{<:Any,<:Any,cache_data};
rows=nothing,
verbosity=1,
force=false,
composite=nothing,
) where cache_data
if mach.frozen
# no-op; do not increment `state`.
fitlog(mach, :frozen, verbosity)
return mach
end
# catch deserialized machines not bound to data:
if isempty(mach.args) && !(mach.model isa Static) && !(mach.model isa Symbol)
error("This machine is not bound to any data and so "*
"cannot be trained. ")
end
# If `mach.model` is a symbol, then we want to replace it with the bone fide model
# `getproperty(composite, mach.model)`:
model = if mach.model isa Symbol
isnothing(composite) && throw(err_no_real_model(mach))
mach.model in propertynames(composite) ||
throw(err_missing_model(model))
getproperty(composite, mach.model)
else
mach.model
end
modeltype_changed = !isdefined(mach, :old_model) ? true :
typeof(model) === typeof(mach.old_model) ? false :
true
# take action if model has been mutated illegally:
warning = clean!(model)
isempty(warning) || verbosity < 0 || @warn warning
upstream_state = upstream(mach)
rows === nothing && (rows = (:))
rows_is_new = !isdefined(mach, :old_rows) || rows != mach.old_rows
condition_4 = rows_is_new && !(mach.model isa Static)
upstream_has_changed = mach.old_upstream_state != upstream_state
data_is_valid = isdefined(mach, :data) && !upstream_has_changed
# build or update cached `data` if necessary:
if cache_data && !data_is_valid
raw_args = map(N -> N(), mach.args)
mach.data = MMI.reformat(model, raw_args...)
end
# build or update cached `resampled_data` if necessary (`mach.data` is already defined
# above if needed here):
if cache_data && (!data_is_valid || condition_4)
mach.resampled_data = selectrows(model, rows, mach.data...)
end
# `fit`, `update`, or return untouched:
if mach.state == 0 || # condition (1)
force == true || # condition (2)
upstream_has_changed || # condition (3)
condition_4 || # condition (4)
modeltype_changed # conditions (6) or (7)
isdefined(mach, :report) || (mach.report = LittleDict{Symbol,Any}())
# fit the model:
fitlog(mach, :train, verbosity)
mach.fitresult, mach.cache, mach.report[:fit] =
try
fit(model, verbosity, _resampled_data(mach, model, rows)...)
catch exception
@error "Problem fitting the machine $mach. "
_sources = sources(glb(mach.args...))
length(_sources) > 2 ||
all((!isempty).(_sources)) ||
@warn "Some learning network source nodes are empty. "
@info "Running type checks... "
raw_args = map(N -> N(), mach.args)
scitype_check_level = 1
if check(model, scitype_check_level, source.(raw_args)...)
@info "Type checks okay. "
else
@info "It seems an upstream node in a learning "*
"network is providing data of incompatible scitype. See "*
"above. "
end
rethrow()
end
elseif model != mach.old_model # condition (5)
# update the model:
fitlog(mach, :update, verbosity)
mach.fitresult, mach.cache, mach.report[:fit] =
update(model,
verbosity,
mach.fitresult,
mach.cache,
_resampled_data(mach, model, rows)...)
else
# don't fit the model and return without incrementing `state`:
fitlog(mach, :skip, verbosity)
return mach
end
# If we get to here it's because we have run `fit` or `update`!
if rows_is_new
mach.old_rows = deepcopy(rows)
end
mach.old_model = deepcopy(model)
mach.old_upstream_state = upstream_state
mach.state = mach.state + 1
return mach
end
# version of fit_only! for calling by scheduler (a node), which waits on all upstream
# `machines` to fit:
function fit_only!(mach::Machine, wait_on_upstream::Bool; kwargs...)
wait_on_upstream || fit_only!(mach; kwargs...)
upstream_machines = machines(glb(mach.args...))
# waiting on upstream machines to fit:
for m in upstream_machines
fit_okay = fetch(m.fit_okay)
if !fit_okay
put!(mach.fit_okay, false)
return mach
end
end
# try to fit this machine:
try
fit_only!(mach; kwargs...)
catch e
put!(mach.fit_okay, false)
@error "Problem fitting $mach"
throw(e)
end
put!(mach.fit_okay, true)
return mach
end
"""
fit!(mach::Machine, rows=nothing, verbosity=1, force=false, composite=nothing)
Fit the machine `mach`. In the case that `mach` has `Node` arguments,
first train all other machines on which `mach` depends.
To attempt to fit a machine without touching any other machine, use
`fit_only!`. For more on options and the the internal logic of fitting see
[`fit_only!`](@ref)
"""
function fit!(mach::Machine; kwargs...)
glb_node = glb(mach.args...) # greatest lower bound node of arguments
fit!(glb_node; kwargs...)
fit_only!(mach; kwargs...)
end
## INSPECTION OF TRAINING OUTCOMES
"""
fitted_params(mach)
Return the learned parameters for a machine `mach` that has been
`fit!`, for example the coefficients in a linear model.
This is a named tuple and human-readable if possible.
If `mach` is a machine for a composite model, such as a model constructed using the
pipeline syntax `model1 |> model2 |> ...`, then the returned named tuple has the composite
type's field names as keys. The corresponding value is the fitted parameters for the
machine in the underlying learning network bound to that model. (If multiple machines
share the same model, then the value is a vector.)
```julia-repl
julia> using MLJ
julia> @load LogisticClassifier pkg=MLJLinearModels
julia> X, y = @load_crabs;
julia> pipe = Standardizer() |> LogisticClassifier();
julia> mach = machine(pipe, X, y) |> fit!;
julia> fitted_params(mach).logistic_classifier
(classes = CategoricalArrays.CategoricalValue{String,UInt32}["B", "O"],
coefs = Pair{Symbol,Float64}[:FL => 3.7095037897680405, :RW => 0.1135739140854546, :CL => -1.6036892745322038, :CW => -4.415667573486482, :BD => 3.238476051092471],
intercept = 0.0883301599726305,)
```
See also [`report`](@ref)
"""
function fitted_params(mach::Machine)
if isdefined(mach, :fitresult)
return fitted_params(last_model(mach), mach.fitresult)
else
throw(NotTrainedError(mach, :fitted_params))
end
end
"""
report(mach)
Return the report for a machine `mach` that has been
`fit!`, for example the coefficients in a linear model.
This is a named tuple and human-readable if possible.
If `mach` is a machine for a composite model, such as a model constructed using the
pipeline syntax `model1 |> model2 |> ...`, then the returned named tuple has the composite
type's field names as keys. The corresponding value is the report for the machine in the
underlying learning network bound to that model. (If multiple machines share the same
model, then the value is a vector.)
```julia-repl
julia> using MLJ
julia> @load LinearBinaryClassifier pkg=GLM
julia> X, y = @load_crabs;
julia> pipe = Standardizer() |> LinearBinaryClassifier();
julia> mach = machine(pipe, X, y) |> fit!;
julia> report(mach).linear_binary_classifier
(deviance = 3.8893386087844543e-7,
dof_residual = 195.0,
stderror = [18954.83496713119, 6502.845740757159, 48484.240246060406, 34971.131004997274, 20654.82322484894, 2111.1294584763386],
vcov = [3.592857686311793e8 9.122732393971942e6 … -8.454645589364915e7 5.38856837634321e6; 9.122732393971942e6 4.228700272808351e7 … -4.978433790526467e7 -8.442545425533723e6; … ; -8.454645589364915e7 -4.978433790526467e7 … 4.2662172244975924e8 2.1799125705781363e7; 5.38856837634321e6 -8.442545425533723e6 … 2.1799125705781363e7 4.456867590446599e6],)
```
See also [`fitted_params`](@ref)
"""
function report(mach::Machine)
if isdefined(mach, :report)
return MMI.report(last_model(mach), mach.report)
else
throw(NotTrainedError(mach, :report))
end
end
"""
report_given_method(mach::Machine)
Same as `report(mach)` but broken down by the method (`fit`, `predict`, etc) that
contributed the report.
A specialized method intended for learning network applications.
The return value is a dictionary keyed on the symbol representing the method (`:fit`,
`:predict`, etc) and the values report contributed by that method.
"""
report_given_method(mach::Machine) = mach.report
"""
training_losses(mach::Machine)
Return a list of training losses, for models that make these
available. Otherwise, return `nothing`.
"""
function training_losses(mach::Machine)
if isdefined(mach, :report)
return training_losses(last_model(mach), report_given_method(mach)[:fit])
else
throw(NotTrainedError(mach, :training_losses))
end
end
"""
feature_importances(mach::Machine)
Return a list of `feature => importance` pairs for a fitted machine, `mach`, for supported
models. Otherwise return `nothing`.
"""
function feature_importances(mach::Machine)
if isdefined(mach, :report) && isdefined(mach, :fitresult)
return _feature_importances(
last_model(mach),
mach.fitresult,
report_given_method(mach)[:fit],
)
else
throw(NotTrainedError(mach, :feature_importances))
end
end
function _feature_importances(model, fitresult, report)
if reports_feature_importances(model)
return MMI.feature_importances(model, fitresult, report)
else
return nothing
end
end
###############################################################################
##### SERIALIZABLE, RESTORE!, SAVE AND A FEW UTILITY FUNCTIONS #####
###############################################################################
const ERR_SERIALIZING_UNTRAINED = ArgumentError(
"`serializable` called on untrained machine. "
)
"""
serializable(mach::Machine)
Returns a shallow copy of the machine to make it serializable. In particular,
all training data is removed and, if necessary, learned parameters are replaced
with persistent representations.
Any general purpose Julia serializer may be applied to the output of
`serializable` (eg, JLSO, BSON, JLD) but you must call
`restore!(mach)` on the deserialised object `mach` before using
it. See the example below.
If using Julia's standard Serialization library, a shorter workflow is
available using the [`MLJBase.save`](@ref) (or `MLJ.save`) method.
A machine returned by `serializable` is characterized by the property
`mach.state == -1`.
### Example using [JLSO](https://invenia.github.io/JLSO.jl/stable/)
```julia
using MLJ
using JLSO
Tree = @load DecisionTreeClassifier
tree = Tree()
X, y = @load_iris
mach = fit!(machine(tree, X, y))
# This machine can now be serialized
smach = serializable(mach)
JLSO.save("machine.jlso", :machine => smach)
# Deserialize and restore learned parameters to useable form:
loaded_mach = JLSO.load("machine.jlso")[:machine]
restore!(loaded_mach)
predict(loaded_mach, X)
predict(mach, X)
```
See also [`restore!`](@ref), [`MLJBase.save`](@ref).
"""
function serializable(mach::Machine{<:Any,<:Any,C}, model=mach.model; verbosity=1) where C
isdefined(mach, :fitresult) || throw(ERR_SERIALIZING_UNTRAINED)
mach.state == -1 && return mach
# The next line of code makes `serializable` recursive, in the case that `mach.model`
# is a `Composite` model: `save` duplicates the underlying learning network, which
# involves calls to `serializable` on the old machines in the network to create the
# new ones.
serializable_fitresult = save(model, mach.fitresult)
# Duplication currenty needs to happen in two steps for this to work in case of
# `Composite` models.
clone = replace(
mach,
:state => -1,
:args => (),
:fitresult => serializable_fitresult,
:old_rows => nothing,
:data => nothing,
:resampled_data => nothing,
:cache => nothing,
)
report = report_for_serialization(clone)
return replace(clone, :report => report)
end
"""
restore!(mach::Machine)
Restore the state of a machine that is currently serializable but
which may not be otherwise usable. For such a machine, `mach`, one has
`mach.state=1`. Intended for restoring deserialized machine objects to a
useable form.
For an example see [`serializable`](@ref).
"""
function restore!(mach::Machine, model=mach.model)
mach.state != -1 && return mach
mach.fitresult = restore(model, mach.fitresult)
mach.state = 1
return mach
end
"""
MLJ.save(filename, mach::Machine)
MLJ.save(io, mach::Machine)
MLJBase.save(filename, mach::Machine)
MLJBase.save(io, mach::Machine)
Serialize the machine `mach` to a file with path `filename`, or to an
input/output stream `io` (at least `IOBuffer` instances are
supported) using the Serialization module.
To serialise using a different format, see [`serializable`](@ref).
Machines are deserialized using the `machine` constructor as shown in
the example below.
!!! note
The implementation of `save` for machines changed in MLJ 0.18
(MLJBase 0.20). You can only restore a machine saved using older
versions of MLJ using an older version.
### Example
```julia
using MLJ
Tree = @load DecisionTreeClassifier
X, y = @load_iris
mach = fit!(machine(Tree(), X, y))
MLJ.save("tree.jls", mach)
mach_predict_only = machine("tree.jls")
predict(mach_predict_only, X)
# using a buffer:
io = IOBuffer()
MLJ.save(io, mach)
seekstart(io)
predict_only_mach = machine(io)
predict(predict_only_mach, X)
```
!!! warning "Only load files from trusted sources"
Maliciously constructed JLS files, like pickles, and most other
general purpose serialization formats, can allow for arbitrary code
execution during loading. This means it is possible for someone
to use a JLS file that looks like a serialized MLJ machine as a
[Trojan horse](https://en.wikipedia.org/wiki/Trojan_horse_(computing)).
See also [`serializable`](@ref), [`machine`](@ref).
"""
function save(file::Union{String,IO}, mach::Machine)
isdefined(mach, :fitresult) ||
error("Cannot save an untrained machine. ")
smach = serializable(mach)
serialize(file, smach)
end
const ERR_INVALID_DEFAULT_LOGGER = ArgumentError(
"You have attempted to save a machine to the default logger "*
"but `default_logger()` is currently `nothing`. "*
"Either specify an explicit logger, path or stream to save to, "*
"or use `default_logger(logger)` "*
"to change the default logger. "
)
"""
MLJ.save(mach)
MLJBase.save(mach)
Save the current machine as an artifact at the location associated with
`default_logger`](@ref).
"""
MLJBase.save(mach::Machine) = MLJBase.save(default_logger(), mach)
MLJBase.save(::Nothing, ::Machine) = throw(ERR_INVALID_DEFAULT_LOGGER)
report_for_serialization(mach) = mach.report
# NOTE. there is also a specialization of `report_for_serialization` for `Composite`
# models, defined in /src/composition/learning_networks/machines/
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 836 | # # EXCEPETIONS
function message_expecting_model(X; spelling=false)
message = "Expected a model instance but got `$X`"
if X isa Type{<:Model}
message *= ", which is a model *type*"
else
spelling && (message *= ". Perhaps you misspelled a keyword argument")
end
message *= ". "
return message
end
err_expecting_model(X; kwargs...) = ArgumentError(message_expecting_model(X; kwargs...))
"""
MLJBase.check_ismodel(model; spelling=false)
Return `nothing` if `model` is a model, throwing `err_expecting_model(X; spelling)`
otherwise. Specify `spelling=true` if there is a chance that user mispelled a keyword
argument that is being interpreted as a model.
**Private method.**
"""
check_ismodel(model; kwargs...) = model isa Model ? nothing :
throw(err_expecting_model(model; kwargs...))
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 7775 | # We wish to extend operations to identically named methods dispatched
# on `Machine`s. For example, we have from the model API
#
# `predict(model::M, fitresult, X) where M<:Supervised`
#
# but want also want to define
#
# 1. `predict(machine::Machine, X)` where `X` is concrete data
#
# and we would like the syntactic sugar (for `X` a node):
#
# 2. `predict(machine::Machine, X::Node) = node(predict, machine, X)`
#
# Finally, for a `model` that is `ProbabilisticComposite`,
# `DetermisiticComposite`, or `UnsupervisedComposite`, we want
#
# 3. `predict(model, fitresult, X) = fitresult.predict(X)`
#
# which makes sense because `fitresult` in those cases is a named
# tuple keyed on supported operations and with nodes as values.
## TODO: need to add checks on the arguments of
## predict(::Machine, ) and transform(::Machine, )
const ERR_ROWS_NOT_ALLOWED = ArgumentError(
"Calling `transform(mach, rows=...)` or "*
"`predict(mach, rows=...)` when "*
"`mach.model isa Static` is not allowed, as no data "*
"is bound to `mach` in this case. Specify an explicit "*
"data or node, as in `transform(mach, X)`, or "*
"`transform(mach, X1, X2, ...)`. "
)
err_serialized(operation) = ArgumentError(
"Calling $operation on a "*
"deserialized machine with no data "*
"bound to it. "
)
const err_untrained(mach) = ErrorException("$mach has not been trained. ")
const WARN_SERIALIZABLE_MACH = "You are attempting to use a "*
"deserialised machine whose learned parameters "*
"may be unusable. To be sure they are usable, "*
"first run restore!(mach)."
_scrub(x::NamedTuple) = isempty(x) ? nothing : x
_scrub(something_else) = something_else
# Given return value `ret` of an operation with symbol `operation` (eg, `:predict`) return
# `ret` in the ordinary case that the operation does not include an "report" component ;
# otherwise update `mach.report` with that component and return the non-report part of
# `ret`:
function get!(ret, operation, mach)
model = last_model(mach)
if operation in reporting_operations(model)
report = _scrub(last(ret))
# mach.report will always be a dictionary:
if isempty(mach.report)
mach.report = LittleDict{Symbol,Any}(operation => report)
else
mach.report[operation] = report
end
return first(ret)
end
return ret
end
# 0. operations on machine, given rows=...:
for operation in OPERATIONS
quoted_operation = QuoteNode(operation) # eg, :(:predict)
operation == :inverse_transform && continue
ex = quote
function $(operation)(mach::Machine{<:Model,<:Any,false}; rows=:)
# catch deserialized machine with no data:
isempty(mach.args) && throw(err_serialized($operation))
return ($operation)(mach, mach.args[1](rows=rows))
end
function $(operation)(mach::Machine{<:Model,<:Any,true}; rows=:)
# catch deserialized machine with no data:
isempty(mach.args) && throw(err_serialized($operation))
model = last_model(mach)
ret = ($operation)(
model,
mach.fitresult,
selectrows(model, rows, mach.data[1])...,
)
return get!(ret, $quoted_operation, mach)
end
# special case of Static models (no training arguments):
$operation(mach::Machine{<:Static,<:Any,true}; rows=:) =
throw(ERR_ROWS_NOT_ALLOWED)
$operation(mach::Machine{<:Static,<:Any,false}; rows=:) =
throw(ERR_ROWS_NOT_ALLOWED)
end
eval(ex)
end
inverse_transform(mach::Machine; rows=:) =
throw(ArgumentError("`inverse_transform(mach)` and "*
"`inverse_transform(mach, rows=...)` are "*
"not supported. Data or nodes "*
"must be explictly specified, "*
"as in `inverse_transform(mach, X)`. "))
_symbol(f) = Base.Core.Typeof(f).name.mt.name
# catches improperly deserialized machines and silently fits the machine if it is
# untrained and has no training arguments:
function _check_and_fit_if_warranted!(mach)
mach.state == -1 && @warn WARN_SERIALIZABLE_MACH
if mach.state == 0
if isempty(mach.args)
fit!(mach, verbosity=0)
else
throw(err_untrained(mach))
end
end
end
for operation in OPERATIONS
quoted_operation = QuoteNode(operation) # eg, :(:predict)
ex = quote
# 1. operations on machines, given *concrete* data:
function $operation(mach::Machine, Xraw)
_check_and_fit_if_warranted!(mach)
model = last_model(mach)
ret = $(operation)(
model,
mach.fitresult,
reformat(model, Xraw)[1],
)
get!(ret, $quoted_operation, mach)
end
function $operation(mach::Machine, Xraw, Xraw_more...)
_check_and_fit_if_warranted!(mach)
ret = $(operation)(
last_model(mach),
mach.fitresult,
Xraw,
Xraw_more...,
)
get!(ret, $quoted_operation, mach)
end
# 2. operations on machines, given *dynamic* data (nodes):
$operation(mach::Machine, X::AbstractNode) =
node($(operation), mach, X)
$operation(
mach::Machine,
X::AbstractNode,
Xmore::AbstractNode...,
) = node($(operation), mach, X, Xmore...)
end
eval(ex)
end
const err_unsupported_operation(operation) = ErrorException(
"The `$operation` operation has been applied to a composite model or learning "*
"network machine that does not support it. "
)
## NETWORK COMPOSITE MODELS
# In the case of `NetworkComposite` models, the `fitresult` is a learning network
# signature. If we call a node in the signature (eg, do `fitresult.predict()`) then we may
# mutate the underlying learning network (and hence `fitresult`). This is because some
# nodes in the network may be attached to machines whose reports are mutated when an
# operation is called on them (the associated model has a non-empty `reporting_operations`
# trait). For this reason we must first duplicate `fitresult`.
# The function `output_and_report(signature, operation, Xnew)` called below (and defined
# in signatures.jl) duplicates `signature`, applies `operation` with data `Xnew`, and
# returns the output and signature report.
for operation in [:predict,
:predict_joint,
:transform,
:inverse_transform]
quote
function $operation(model::NetworkComposite, fitresult, Xnew...)
if $(QuoteNode(operation)) in MLJBase.operations(fitresult)
return output_and_report(fitresult, $(QuoteNode(operation)), Xnew...)
end
throw(err_unsupported_operation($operation))
end
end |> eval
end
for (operation, fallback) in [(:predict_mode, :mode),
(:predict_mean, :mean),
(:predict_median, :median)]
quote
function $(operation)(m::ProbabilisticNetworkComposite,
fitresult,
Xnew)
if $(QuoteNode(operation)) in MLJBase.operations(fitresult)
return output_and_report(fitresult, $(QuoteNode(operation)), Xnew)
end
# The following line retuns a `Tuple` since `m` is a `NetworkComposite`
predictions, report = predict(m, fitresult, Xnew)
return $(fallback).(predictions), report
end
end |> eval
end
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 170 | @recipe function default_machine_plot(mach::Machine)
# Allow downstream packages to define plotting recipes
# for their own machine types.
mach.fitresult
end
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 61077 | # TYPE ALIASES
const AbstractRow = Union{AbstractVector{<:Integer}, Colon}
const TrainTestPair = Tuple{AbstractRow,AbstractRow}
const TrainTestPairs = AbstractVector{<:TrainTestPair}
# # ERROR MESSAGES
const PREDICT_OPERATIONS_STRING = begin
strings = map(PREDICT_OPERATIONS) do op
"`"*string(op)*"`"
end
join(strings, ", ", ", or ")
end
const PROG_METER_DT = 0.1
const ERR_WEIGHTS_LENGTH =
DimensionMismatch("`weights` and target have different lengths. ")
const ERR_WEIGHTS_DICT =
ArgumentError("`class_weights` must be a "*
"dictionary with `Real` values. ")
const ERR_WEIGHTS_CLASSES =
DimensionMismatch("The keys of `class_weights` "*
"are not the same as the levels of the "*
"target, `y`. Do `levels(y)` to check levels. ")
const ERR_OPERATION_MEASURE_MISMATCH = DimensionMismatch(
"The number of operations and the number of measures are different. ")
const ERR_INVALID_OPERATION = ArgumentError(
"Invalid `operation` or `operations`. "*
"An operation must be one of these: $PREDICT_OPERATIONS_STRING. ")
_ambiguous_operation(model, measure) =
"`$measure` does not support a `model` with "*
"`prediction_type(model) == :$(prediction_type(model))`. "
err_incompatible_prediction_types(model, measure) = ArgumentError(
_ambiguous_operation(model, measure)*
"If your model is truly making probabilistic predictions, try explicitly "*
"specifiying operations. For example, for "*
"`measures = [area_under_curve, accuracy]`, try "*
"`operations=[predict, predict_mode]`. ")
const LOG_AVOID = "\nTo override measure checks, set check_measure=false. "
const LOG_SUGGESTION1 =
"\nPerhaps you want to set `operation="*
"predict_mode` or need to "*
"specify multiple operations, "*
"one for each measure. "
const LOG_SUGGESTION2 =
"\nPerhaps you want to set `operation="*
"predict_mean` or `operation=predict_median`, or "*
"specify multiple operations, "*
"one for each measure. "
ERR_MEASURES_OBSERVATION_SCITYPE(measure, T_measure, T) = ArgumentError(
"\nobservation scitype of target = `$T` but ($measure) only supports "*
"`$T_measure`."*LOG_AVOID
)
ERR_MEASURES_PROBABILISTIC(measure, suggestion) = ArgumentError(
"The model subtypes `Probabilistic`, and so is not supported by "*
"`$measure`. $suggestion"*LOG_AVOID
)
ERR_MEASURES_DETERMINISTIC(measure) = ArgumentError(
"The model subtypes `Deterministic`, "*
"and so is not supported by `$measure`. "*LOG_AVOID
)
err_ambiguous_operation(model, measure) = ArgumentError(
_ambiguous_operation(model, measure)*
"\nUnable to infer an appropriate operation for `$measure`. "*
"Explicitly specify `operation=...` or `operations=...`. "*
"Possible value(s) are: $PREDICT_OPERATIONS_STRING. "
)
const ERR_UNSUPPORTED_PREDICTION_TYPE = ArgumentError(
"""
The `prediction_type` of your model needs to be one of: `:deterministic`,
`:probabilistic`, or `:interval`. Does your model implement one of these operations:
$PREDICT_OPERATIONS_STRING? If so, you can try explicitly specifying `operation=...`
or `operations=...` (and consider posting an issue to have the model review it's
definition of `MLJModelInterface.prediction_type`). Otherwise, performance
evaluation is not supported.
"""
)
const ERR_NEED_TARGET = ArgumentError(
"""
To evaluate a model's performance you must provide a target variable `y`, as in
`evaluate(model, X, y; options...)` or
mach = machine(model, X, y)
evaluate!(mach; options...)
"""
)
# ==================================================================
## RESAMPLING STRATEGIES
abstract type ResamplingStrategy <: MLJType end
show_as_constructed(::Type{<:ResamplingStrategy}) = true
# resampling strategies are `==` if they have the same type and their
# field values are `==`:
function ==(s1::S, s2::S) where S <: ResamplingStrategy
return all(getfield(s1, fld) == getfield(s2, fld) for fld in fieldnames(S))
end
# fallbacks for method to be implemented by each new strategy:
train_test_pairs(s::ResamplingStrategy, rows, X, y, w) =
train_test_pairs(s, rows, X, y)
train_test_pairs(s::ResamplingStrategy, rows, X, y) =
train_test_pairs(s, rows, y)
train_test_pairs(s::ResamplingStrategy, rows, y) =
train_test_pairs(s, rows)
# Helper to interpret rng, shuffle in case either is `nothing` or if
# `rng` is an integer:
function shuffle_and_rng(shuffle, rng)
if rng isa Integer
rng = MersenneTwister(rng)
end
if shuffle === nothing
shuffle = ifelse(rng===nothing, false, true)
end
if rng === nothing
rng = Random.GLOBAL_RNG
end
return shuffle, rng
end
# ----------------------------------------------------------------
# InSample
"""
in_sample = InSample()
Instantiate an `InSample` resampling strategy, for use in `evaluate!`, `evaluate` and in
tuning. In this strategy the train and test sets are the same, and consist of all
observations specified by the `rows` keyword argument. If `rows` is not specified, all
supplied rows are used.
# Example
```julia
using MLJBase, MLJModels
X, y = make_blobs() # a table and a vector
model = ConstantClassifier()
train, test = partition(eachindex(y), 0.7) # train:test = 70:30
```
Compute in-sample (training) loss:
```julia
evaluate(model, X, y, resampling=InSample(), rows=train, measure=brier_loss)
```
Compute the out-of-sample loss:
```julia
evaluate(model, X, y, resampling=[(train, test),], measure=brier_loss)
```
Or equivalently:
```julia
evaluate(model, X, y, resampling=Holdout(fraction_train=0.7), measure=brier_loss)
```
"""
struct InSample <: ResamplingStrategy end
train_test_pairs(::InSample, rows) = [(rows, rows),]
# ----------------------------------------------------------------
# Holdout
"""
holdout = Holdout(; fraction_train=0.7, shuffle=nothing, rng=nothing)
Instantiate a `Holdout` resampling strategy, for use in `evaluate!`, `evaluate` and in
tuning.
```julia
train_test_pairs(holdout, rows)
```
Returns the pair `[(train, test)]`, where `train` and `test` are
vectors such that `rows=vcat(train, test)` and
`length(train)/length(rows)` is approximatey equal to fraction_train`.
Pre-shuffling of `rows` is controlled by `rng` and `shuffle`. If `rng`
is an integer, then the `Holdout` keyword constructor resets it to
`MersenneTwister(rng)`. Otherwise some `AbstractRNG` object is
expected.
If `rng` is left unspecified, `rng` is reset to `Random.GLOBAL_RNG`,
in which case rows are only pre-shuffled if `shuffle=true` is
specified.
"""
struct Holdout <: ResamplingStrategy
fraction_train::Float64
shuffle::Bool
rng::Union{Int,AbstractRNG}
function Holdout(fraction_train, shuffle, rng)
0 < fraction_train < 1 ||
error("`fraction_train` must be between 0 and 1.")
return new(fraction_train, shuffle, rng)
end
end
# Keyword Constructor:
Holdout(; fraction_train::Float64=0.7, shuffle=nothing, rng=nothing) =
Holdout(fraction_train, shuffle_and_rng(shuffle, rng)...)
function train_test_pairs(holdout::Holdout, rows)
train, test = partition(rows, holdout.fraction_train,
shuffle=holdout.shuffle, rng=holdout.rng)
return [(train, test),]
end
# ----------------------------------------------------------------
# Cross-validation (vanilla)
"""
cv = CV(; nfolds=6, shuffle=nothing, rng=nothing)
Cross-validation resampling strategy, for use in `evaluate!`,
`evaluate` and tuning.
```julia
train_test_pairs(cv, rows)
```
Returns an `nfolds`-length iterator of `(train, test)` pairs of
vectors (row indices), where each `train` and `test` is a sub-vector
of `rows`. The `test` vectors are mutually exclusive and exhaust
`rows`. Each `train` vector is the complement of the corresponding
`test` vector. With no row pre-shuffling, the order of `rows` is
preserved, in the sense that `rows` coincides precisely with the
concatenation of the `test` vectors, in the order they are
generated. The first `r` test vectors have length `n + 1`, where `n, r
= divrem(length(rows), nfolds)`, and the remaining test vectors have
length `n`.
Pre-shuffling of `rows` is controlled by `rng` and `shuffle`. If `rng`
is an integer, then the `CV` keyword constructor resets it to
`MersenneTwister(rng)`. Otherwise some `AbstractRNG` object is
expected.
If `rng` is left unspecified, `rng` is reset to `Random.GLOBAL_RNG`,
in which case rows are only pre-shuffled if `shuffle=true` is
explicitly specified.
"""
struct CV <: ResamplingStrategy
nfolds::Int
shuffle::Bool
rng::Union{Int,AbstractRNG}
function CV(nfolds, shuffle, rng)
nfolds > 1 || throw(ArgumentError("Must have nfolds > 1. "))
return new(nfolds, shuffle, rng)
end
end
# Constructor with keywords
CV(; nfolds::Int=6, shuffle=nothing, rng=nothing) =
CV(nfolds, shuffle_and_rng(shuffle, rng)...)
function train_test_pairs(cv::CV, rows)
n_obs = length(rows)
n_folds = cv.nfolds
if cv.shuffle
rows=shuffle!(cv.rng, collect(rows))
end
n, r = divrem(n_obs, n_folds)
if n < 1
throw(ArgumentError(
"""Inusufficient data for $n_folds-fold cross-validation.
Try reducing nfolds. """
))
end
m = n + 1 # number of observations in first r folds
itr1 = Iterators.partition( 1 : m*r , m)
itr2 = Iterators.partition( m*r+1 : n_obs , n)
test_folds = Iterators.flatten((itr1, itr2))
return map(test_folds) do test_indices
test_rows = rows[test_indices]
train_rows = vcat(
rows[ 1 : first(test_indices)-1 ],
rows[ last(test_indices)+1 : end ]
)
(train_rows, test_rows)
end
end
# ----------------------------------------------------------------
# Cross-validation (TimeSeriesCV)
"""
tscv = TimeSeriesCV(; nfolds=4)
Cross-validation resampling strategy, for use in `evaluate!`,
`evaluate` and tuning, when observations are chronological and not
expected to be independent.
```julia
train_test_pairs(tscv, rows)
```
Returns an `nfolds`-length iterator of `(train, test)` pairs of
vectors (row indices), where each `train` and `test` is a sub-vector
of `rows`. The rows are partitioned sequentially into `nfolds + 1`
approximately equal length partitions, where the first partition is the first
train set, and the second partition is the first test set. The second
train set consists of the first two partitions, and the second test set
consists of the third partition, and so on for each fold.
The first partition (which is the first train set) has length `n + r`,
where `n, r = divrem(length(rows), nfolds + 1)`, and the remaining partitions
(all of the test folds) have length `n`.
# Examples
```julia-repl
julia> MLJBase.train_test_pairs(TimeSeriesCV(nfolds=3), 1:10)
3-element Vector{Tuple{UnitRange{Int64}, UnitRange{Int64}}}:
(1:4, 5:6)
(1:6, 7:8)
(1:8, 9:10)
julia> model = (@load RidgeRegressor pkg=MultivariateStats verbosity=0)();
julia> data = @load_sunspots;
julia> X = (lag1 = data.sunspot_number[2:end-1],
lag2 = data.sunspot_number[1:end-2]);
julia> y = data.sunspot_number[3:end];
julia> tscv = TimeSeriesCV(nfolds=3);
julia> evaluate(model, X, y, resampling=tscv, measure=rmse, verbosity=0)
┌───────────────────────────┬───────────────┬────────────────────┐
│ _.measure │ _.measurement │ _.per_fold │
├───────────────────────────┼───────────────┼────────────────────┤
│ RootMeanSquaredError @753 │ 21.7 │ [25.4, 16.3, 22.4] │
└───────────────────────────┴───────────────┴────────────────────┘
_.per_observation = [missing]
_.fitted_params_per_fold = [ … ]
_.report_per_fold = [ … ]
_.train_test_rows = [ … ]
```
"""
struct TimeSeriesCV <: ResamplingStrategy
nfolds::Int
function TimeSeriesCV(nfolds)
nfolds > 0 || throw(ArgumentError("Must have nfolds > 0. "))
return new(nfolds)
end
end
# Constructor with keywords
TimeSeriesCV(; nfolds::Int=4) = TimeSeriesCV(nfolds)
function train_test_pairs(tscv::TimeSeriesCV, rows)
if rows != sort(rows)
@warn "TimeSeriesCV is being applied to `rows` not in sequence. "
end
n_obs = length(rows)
n_folds = tscv.nfolds
m, r = divrem(n_obs, n_folds + 1)
if m < 1
throw(ArgumentError(
"Inusufficient data for $n_folds-fold " *
"time-series cross-validation.\n" *
"Try reducing nfolds. "
))
end
test_folds = Iterators.partition( m+r+1 : n_obs , m)
return map(test_folds) do test_indices
train_indices = 1 : first(test_indices)-1
rows[train_indices], rows[test_indices]
end
end
# ----------------------------------------------------------------
# Cross-validation (stratified; for `Finite` targets)
"""
stratified_cv = StratifiedCV(; nfolds=6,
shuffle=false,
rng=Random.GLOBAL_RNG)
Stratified cross-validation resampling strategy, for use in
`evaluate!`, `evaluate` and in tuning. Applies only to classification
problems (`OrderedFactor` or `Multiclass` targets).
```julia
train_test_pairs(stratified_cv, rows, y)
```
Returns an `nfolds`-length iterator of `(train, test)` pairs of
vectors (row indices) where each `train` and `test` is a sub-vector of
`rows`. The `test` vectors are mutually exclusive and exhaust
`rows`. Each `train` vector is the complement of the corresponding
`test` vector.
Unlike regular cross-validation, the distribution of the levels of the
target `y` corresponding to each `train` and `test` is constrained, as
far as possible, to replicate that of `y[rows]` as a whole.
The stratified `train_test_pairs` algorithm is invariant to label renaming.
For example, if you run `replace!(y, 'a' => 'b', 'b' => 'a')` and then re-run
`train_test_pairs`, the returned `(train, test)` pairs will be the same.
Pre-shuffling of `rows` is controlled by `rng` and `shuffle`. If `rng`
is an integer, then the `StratifedCV` keywod constructor resets it to
`MersenneTwister(rng)`. Otherwise some `AbstractRNG` object is
expected.
If `rng` is left unspecified, `rng` is reset to `Random.GLOBAL_RNG`,
in which case rows are only pre-shuffled if `shuffle=true` is
explicitly specified.
"""
struct StratifiedCV <: ResamplingStrategy
nfolds::Int
shuffle::Bool
rng::Union{Int,AbstractRNG}
function StratifiedCV(nfolds, shuffle, rng)
nfolds > 1 || throw(ArgumentError("Must have nfolds > 1. "))
return new(nfolds, shuffle, rng)
end
end
# Constructor with keywords
StratifiedCV(; nfolds::Int=6, shuffle=nothing, rng=nothing) =
StratifiedCV(nfolds, shuffle_and_rng(shuffle, rng)...)
# Description of the stratified CV algorithm:
#
# There are algorithms that are conceptually somewhat simpler than this
# algorithm, but this algorithm is O(n) and is invariant to relabelling
# of the target vector.
#
# 1) Use countmap() to get the count for each level.
#
# 2) Use unique() to get the order in which the levels appear. (Steps 1
# and 2 could be combined if countmap() used an OrderedDict.)
#
# 3) For y = ['b', 'c', 'a', 'b', 'b', 'b', 'c', 'c', 'c', 'a', 'a', 'a'],
# the levels occur in the order ['b', 'c', 'a'], and each level has a count
# of 4. So imagine a table like this:
#
# b b b b c c c c a a a a
# 1 2 3 1 2 3 1 2 3 1 2 3
#
# This table ensures that the levels are smoothly spread across the test folds.
# In other words, where one level leaves off, the next level picks up. So,
# for example, as the 'c' levels are encountered, the corresponding row indices
# are added to folds [2, 3, 1, 2], in that order. The table above is
# partitioned by y-level and put into a dictionary `fold_lookup` that maps
# levels to the corresponding array of fold indices.
#
# 4) Iterate i from 1 to length(rows). For each i, look up the corresponding
# level, i.e. `level = y[rows[i]]`. Then use `popfirst!(fold_lookup[level])`
# to find the test fold in which to put the i-th element of `rows`.
#
# 5) Concatenate the appropriate test folds together to get the train
# indices for each `(train, test)` pair.
function train_test_pairs(stratified_cv::StratifiedCV, rows, y)
st = scitype(y)
if stratified_cv.shuffle
rows=shuffle!(stratified_cv.rng, collect(rows))
end
n_folds = stratified_cv.nfolds
n_obs = length(rows)
obs_per_fold = div(n_obs, n_folds)
y_included = y[rows]
level_count_dict = countmap(y_included)
# unique() preserves the order of appearance of the levels.
# We need this so that the results are invariant to renaming of the levels.
y_levels = unique(y_included)
level_count = [level_count_dict[level] for level in y_levels]
fold_cycle = collect(Iterators.take(Iterators.cycle(1:n_folds), n_obs))
lasts = cumsum(level_count)
firsts = [1; lasts[1:end-1] .+ 1]
level_fold_indices = (fold_cycle[f:l] for (f, l) in zip(firsts, lasts))
fold_lookup = Dict(y_levels .=> level_fold_indices)
folds = [Int[] for _ in 1:n_folds]
for fold in folds
sizehint!(fold, obs_per_fold)
end
for i in 1:n_obs
level = y_included[i]
fold_index = popfirst!(fold_lookup[level])
push!(folds[fold_index], rows[i])
end
[(complement(folds, i), folds[i]) for i in 1:n_folds]
end
# ================================================================
## EVALUATION RESULT TYPE
abstract type AbstractPerformanceEvaluation <: MLJType end
"""
PerformanceEvaluation <: AbstractPerformanceEvaluation
Type of object returned by [`evaluate`](@ref) (for models plus data) or
[`evaluate!`](@ref) (for machines). Such objects encode estimates of the performance
(generalization error) of a supervised model or outlier detection model, and store other
information ancillary to the computation.
If [`evaluate`](@ref) or [`evaluate!`](@ref) is called with the `compact=true` option,
then a [`CompactPerformanceEvaluation`](@ref) object is returned instead.
When `evaluate`/`evaluate!` is called, a number of train/test pairs ("folds") of row
indices are generated, according to the options provided, which are discussed in the
[`evaluate!`](@ref) doc-string. Rows correspond to observations. The generated train/test
pairs are recorded in the `train_test_rows` field of the `PerformanceEvaluation` struct,
and the corresponding estimates, aggregated over all train/test pairs, are recorded in
`measurement`, a vector with one entry for each measure (metric) recorded in `measure`.
When displayed, a `PerformanceEvaluation` object includes a value under the heading
`1.96*SE`, derived from the standard error of the `per_fold` entries. This value is
suitable for constructing a formal 95% confidence interval for the given
`measurement`. Such intervals should be interpreted with caution. See, for example, [Bates
et al. (2021)](https://arxiv.org/abs/2104.00673).
### Fields
These fields are part of the public API of the `PerformanceEvaluation` struct.
- `model`: model used to create the performance evaluation. In the case a
tuning model, this is the best model found.
- `measure`: vector of measures (metrics) used to evaluate performance
- `measurement`: vector of measurements - one for each element of `measure` - aggregating
the performance measurements over all train/test pairs (folds). The aggregation method
applied for a given measure `m` is
`StatisticalMeasuresBase.external_aggregation_mode(m)` (commonly `Mean()` or `Sum()`)
- `operation` (e.g., `predict_mode`): the operations applied for each measure to generate
predictions to be evaluated. Possibilities are: $PREDICT_OPERATIONS_STRING.
- `per_fold`: a vector of vectors of individual test fold evaluations (one vector per
measure). Useful for obtaining a rough estimate of the variance of the performance
estimate.
- `per_observation`: a vector of vectors of vectors containing individual per-observation
measurements: for an evaluation `e`, `e.per_observation[m][f][i]` is the measurement for
the `i`th observation in the `f`th test fold, evaluated using the `m`th measure. Useful
for some forms of hyper-parameter optimization. Note that an aggregregated measurement
for some measure `measure` is repeated across all observations in a fold if
`StatisticalMeasures.can_report_unaggregated(measure) == true`. If `e` has been computed
with the `per_observation=false` option, then `e_per_observation` is a vector of
`missings`.
- `fitted_params_per_fold`: a vector containing `fitted params(mach)` for each machine
`mach` trained during resampling - one machine per train/test pair. Use this to extract
the learned parameters for each individual training event.
- `report_per_fold`: a vector containing `report(mach)` for each machine `mach` training
in resampling - one machine per train/test pair.
- `train_test_rows`: a vector of tuples, each of the form `(train, test)`, where `train`
and `test` are vectors of row (observation) indices for training and evaluation
respectively.
- `resampling`: the user-specified resampling strategy to generate the train/test pairs
(or literal train/test pairs if that was directly specified).
- `repeats`: the number of times the resampling strategy was repeated.
See also [`CompactPerformanceEvaluation`](@ref).
"""
struct PerformanceEvaluation{M,
Measure,
Measurement,
Operation,
PerFold,
PerObservation,
FittedParamsPerFold,
ReportPerFold,
R} <: AbstractPerformanceEvaluation
model::M
measure::Measure
measurement::Measurement
operation::Operation
per_fold::PerFold
per_observation::PerObservation
fitted_params_per_fold::FittedParamsPerFold
report_per_fold::ReportPerFold
train_test_rows::TrainTestPairs
resampling::R
repeats::Int
end
"""
CompactPerformanceEvaluation <: AbstractPerformanceEvaluation
Type of object returned by [`evaluate`](@ref) (for models plus data) or
[`evaluate!`](@ref) (for machines) when called with the option `compact = true`. Such
objects have the same structure as the [`PerformanceEvaluation`](@ref) objects returned by
default, except that the following fields are omitted to save memory:
`fitted_params_per_fold`, `report_per_fold`, `train_test_rows`.
For more on the remaining fields, see [`PerformanceEvaluation`](@ref).
"""
struct CompactPerformanceEvaluation{M,
Measure,
Measurement,
Operation,
PerFold,
PerObservation,
R} <: AbstractPerformanceEvaluation
model::M
measure::Measure
measurement::Measurement
operation::Operation
per_fold::PerFold
per_observation::PerObservation
resampling::R
repeats::Int
end
compactify(e::CompactPerformanceEvaluation) = e
compactify(e::PerformanceEvaluation) = CompactPerformanceEvaluation(
e.model,
e.measure,
e.measurement,
e.operation,
e.per_fold,
e. per_observation,
e.resampling,
e.repeats,
)
# pretty printing:
round3(x) = x
round3(x::AbstractFloat) = round(x, sigdigits=3)
const SE_FACTOR = 1.96 # For a 95% confidence interval.
_standard_error(v::AbstractVector{<:Real}) = SE_FACTOR*std(v) / sqrt(length(v) - 1)
_standard_error(v) = "N/A"
function _standard_errors(e::AbstractPerformanceEvaluation)
measure = e.measure
length(e.per_fold[1]) == 1 && return [nothing]
std_errors = map(_standard_error, e.per_fold)
return std_errors
end
# to address #874, while preserving the display worked out in #757:
_repr_(f::Function) = repr(f)
_repr_(x) = repr("text/plain", x)
# helper for row labels: _label(1) ="A", _label(2) = "B", _label(27) = "BA", etc
const alphabet = Char.(65:90)
_label(i) = map(digits(i - 1, base=26)) do d alphabet[d + 1] end |> join |> reverse
function Base.show(io::IO, ::MIME"text/plain", e::AbstractPerformanceEvaluation)
_measure = [_repr_(m) for m in e.measure]
_measurement = round3.(e.measurement)
_per_fold = [round3.(v) for v in e.per_fold]
_sterr = round3.(_standard_errors(e))
row_labels = _label.(eachindex(e.measure))
# Define header and data for main table
data = hcat(_measure, e.operation, _measurement)
header = ["measure", "operation", "measurement"]
if length(row_labels) > 1
data = hcat(row_labels, data)
header =["", header...]
end
if e isa PerformanceEvaluation
println(io, "PerformanceEvaluation object "*
"with these fields:")
println(io, " model, measure, operation,\n"*
" measurement, per_fold, per_observation,\n"*
" fitted_params_per_fold, report_per_fold,\n"*
" train_test_rows, resampling, repeats")
else
println(io, "CompactPerformanceEvaluation object "*
"with these fields:")
println(io, " model, measure, operation,\n"*
" measurement, per_fold, per_observation,\n"*
" train_test_rows, resampling, repeats")
end
println(io, "Extract:")
show_color = MLJBase.SHOW_COLOR[]
color_off()
PrettyTables.pretty_table(
io,
data;
header,
header_crayon=PrettyTables.Crayon(bold=false),
alignment=:l,
linebreaks=true,
)
# Show the per-fold table if needed:
if length(first(e.per_fold)) > 1
show_sterr = any(!isnothing, _sterr)
data2 = hcat(_per_fold, _sterr)
header2 = ["per_fold", "1.96*SE"]
if length(row_labels) > 1
data2 = hcat(row_labels, data2)
header2 =["", header2...]
end
PrettyTables.pretty_table(
io,
data2;
header=header2,
header_crayon=PrettyTables.Crayon(bold=false),
alignment=:l,
linebreaks=true,
)
end
show_color ? color_on() : color_off()
end
_summary(e) = Tuple(round3.(e.measurement))
Base.show(io::IO, e::PerformanceEvaluation) =
print(io, "PerformanceEvaluation$(_summary(e))")
Base.show(io::IO, e::CompactPerformanceEvaluation) =
print(io, "CompactPerformanceEvaluation$(_summary(e))")
# ===============================================================
## USER CONTROL OF DEFAULT LOGGING
const DOC_DEFAULT_LOGGER =
"""
The default logger is used in calls to [`evaluate!`](@ref) and [`evaluate`](@ref), and
in the constructors `TunedModel` and `IteratedModel`, unless the `logger` keyword is
explicitly specified.
!!! note
Prior to MLJ v0.20.7 (and MLJBase 1.5) the default logger was always `nothing`.
"""
"""
default_logger()
Return the current value of the default logger for use with supported machine learning
tracking platforms, such as [MLflow](https://mlflow.org/docs/latest/index.html).
$DOC_DEFAULT_LOGGER
When MLJBase is first loaded, the default logger is `nothing`.
"""
default_logger() = DEFAULT_LOGGER[]
"""
default_logger(logger)
Reset the default logger.
# Example
Suppose an [MLflow](https://mlflow.org/docs/latest/index.html) tracking service is running
on a local server at `http://127.0.0.1:500`. Then in every `evaluate` call in which
`logger` is not specified, the peformance evaluation is
automatically logged to the service, as here:
```julia
using MLJ
logger = MLJFlow.Logger("http://127.0.0.1:5000/api")
default_logger(logger)
X, y = make_moons()
model = ConstantClassifier()
evaluate(model, X, y, measures=[log_loss, accuracy)])
```
"""
function default_logger(logger)
DEFAULT_LOGGER[] = logger
end
# ===============================================================
## EVALUATION METHODS
# ---------------------------------------------------------------
# Helpers
function actual_rows(rows, N, verbosity)
unspecified_rows = (rows === nothing)
_rows = unspecified_rows ? (1:N) : rows
if !unspecified_rows && verbosity > 0
@info "Creating subsamples from a subset of all rows. "
end
return _rows
end
function _check_measure(measure, operation, model, y)
# get observation scitype:
T = MLJBase.guess_observation_scitype(y)
# get type supported by measure:
T_measure = StatisticalMeasuresBase.observation_scitype(measure)
T == Unknown && (return true)
T_measure == Union{} && (return true)
isnothing(StatisticalMeasuresBase.kind_of_proxy(measure)) && (return true)
T <: T_measure || throw(ERR_MEASURES_OBSERVATION_SCITYPE(measure, T_measure, T))
incompatible = model isa Probabilistic &&
operation == predict &&
StatisticalMeasuresBase.kind_of_proxy(measure) != LearnAPI.Distribution()
if incompatible
if T <: Union{Missing,Finite}
suggestion = LOG_SUGGESTION1
elseif T <: Union{Missing,Infinite}
suggestion = LOG_SUGGESTION2
else
suggestion = ""
end
throw(ERR_MEASURES_PROBABILISTIC(measure, suggestion))
end
model isa Deterministic &&
StatisticalMeasuresBase.kind_of_proxy(measure) != LearnAPI.LiteralTarget() &&
throw(ERR_MEASURES_DETERMINISTIC(measure))
return true
end
function _check_measures(measures, operations, model, y)
all(eachindex(measures)) do j
_check_measure(measures[j], operations[j], model, y)
end
end
function _actual_measures(measures, model)
if measures === nothing
candidate = default_measure(model)
candidate === nothing && error("You need to specify measure=... ")
_measures = [candidate, ]
elseif !(measures isa AbstractVector)
_measures = [measures, ]
else
_measures = measures
end
# wrap in `robust_measure` to allow unsupported weights to be silently treated as
# uniform when invoked; `_check_measure` will throw appropriate warnings unless
# explicitly suppressed.
return StatisticalMeasuresBase.robust_measure.(_measures)
end
function _check_weights(weights, nrows)
length(weights) == nrows ||
throw(ERR_WEIGHTS_LENGTH)
return true
end
function _check_class_weights(weights, levels)
weights isa AbstractDict{<:Any,<:Real} ||
throw(ERR_WEIGHTS_DICT)
Set(levels) == Set(keys(weights)) ||
throw(ERR_WEIGHTS_CLASSES)
return true
end
function _check_weights_measures(weights,
class_weights,
measures,
mach,
operations,
verbosity,
check_measure)
if check_measure || !(weights isa Nothing) || !(class_weights isa Nothing)
y = mach.args[2]()
end
check_measure && _check_measures(measures, operations, mach.model, y)
weights isa Nothing || _check_weights(weights, nrows(y))
class_weights isa Nothing ||
_check_class_weights(class_weights, levels(y))
end
# here `operation` is what the user has specified, and `nothing` if
# not specified:
_actual_operations(operation, measures, args...) =
_actual_operations(fill(operation, length(measures)), measures, args...)
function _actual_operations(operation::AbstractVector, measures, args...)
length(measures) === length(operation) ||
throw(ERR_OPERATION_MEASURE_MISMATCH)
all(operation) do op
op in eval.(PREDICT_OPERATIONS)
end || throw(ERR_INVALID_OPERATION)
return operation
end
function _actual_operations(operation::Nothing,
measures, # vector of measures
model,
verbosity)
map(measures) do m
# `kind_of_proxy` is the measure trait corresponding to `prediction_type` model
# trait. But it's values are instances of LearnAPI.KindOfProxy, instead of
# symbols:
#
# `LearnAPI.LiteralTarget()` ~ `:deterministic` (`model isa Deterministic`)
# `LearnAPI.Distribution()` ~ `:probabilistic` (`model isa Deterministic`)
#
kind_of_proxy = StatisticalMeasuresBase.kind_of_proxy(m)
# `observation_type` is the measure trait which we need to match the model
# `target_scitype` but the latter refers to the whole target `y`, not a single
# observation.
#
# One day, models will have their own `observation_scitype`
observation_scitype = StatisticalMeasuresBase.observation_scitype(m)
# One day, models will implement LearnAPI and will get their own `kind_of_proxy`
# trait replacing `prediction_type` and `observation_scitype` trait replacing
# `target_scitype`.
isnothing(kind_of_proxy) && (return predict)
if MLJBase.prediction_type(model) === :probabilistic
if kind_of_proxy === LearnAPI.Distribution()
return predict
elseif kind_of_proxy === LearnAPI.LiteralTarget()
if observation_scitype <: Union{Missing,Finite}
return predict_mode
elseif observation_scitype <:Union{Missing,Infinite}
return predict_mean
else
throw(err_ambiguous_operation(model, m))
end
else
throw(err_ambiguous_operation(model, m))
end
elseif MLJBase.prediction_type(model) === :deterministic
if kind_of_proxy === LearnAPI.Distribution()
throw(err_incompatible_prediction_types(model, m))
elseif kind_of_proxy === LearnAPI.LiteralTarget()
return predict
else
throw(err_ambiguous_operation(model, m))
end
elseif MLJBase.prediction_type(model) === :interval
if kind_of_proxy === LearnAPI.ConfidenceInterval()
return predict
else
throw(err_ambiguous_operation(model, m))
end
else
throw(ERR_UNSUPPORTED_PREDICTION_TYPE)
end
end
end
function _warn_about_unsupported(trait, str, measures, weights, verbosity)
if verbosity >= 0 && weights !== nothing
unsupported = filter(measures) do m
!trait(m)
end
if !isempty(unsupported)
unsupported_as_string = string(unsupported[1])
unsupported_as_string *=
reduce(*, [string(", ", m) for m in unsupported[2:end]])
@warn "$str weights ignored in evaluations of the following"*
" measures, as unsupported: \n$unsupported_as_string "
end
end
end
function _process_accel_settings(accel::CPUThreads)
if accel.settings === nothing
nthreads = Threads.nthreads()
_accel = CPUThreads(nthreads)
else
typeof(accel.settings) <: Signed ||
throw(ArgumentError("`n`used in `acceleration = CPUThreads(n)`must" *
"be an instance of type `T<:Signed`"))
accel.settings > 0 ||
throw(error("Can't create $(accel.settings) tasks)"))
_accel = accel
end
return _accel
end
_process_accel_settings(accel::Union{CPU1, CPUProcesses}) = accel
#fallback
_process_accel_settings(accel) = throw(ArgumentError("unsupported" *
" acceleration parameter`acceleration = $accel` "))
# --------------------------------------------------------------
# User interface points: `evaluate!` and `evaluate`
const RESAMPLING_STRATEGIES = subtypes(ResamplingStrategy)
const RESAMPLING_STRATEGIES_LIST =
join(
map(RESAMPLING_STRATEGIES) do s
name = split(string(s), ".") |> last
"`$name`"
end,
", ",
" and ",
)
"""
log_evaluation(logger, performance_evaluation)
Log a performance evaluation to `logger`, an object specific to some logging platform,
such as mlflow. If `logger=nothing` then no logging is performed. The method is called at
the end of every call to `evaluate/evaluate!` using the logger provided by the `logger`
keyword argument.
# Implementations for new logging platforms
Julia interfaces to workflow logging platforms, such as mlflow (provided by the
MLFlowClient.jl interface) should overload `log_evaluation(logger::LoggerType,
performance_evaluation)`, where `LoggerType` is a platform-specific type for logger
objects. For an example, see the implementation provided by the MLJFlow.jl package.
"""
log_evaluation(logger, performance_evaluation) = nothing
"""
evaluate!(mach; resampling=CV(), measure=nothing, options...)
Estimate the performance of a machine `mach` wrapping a supervised model in data, using
the specified `resampling` strategy (defaulting to 6-fold cross-validation) and `measure`,
which can be a single measure or vector. Returns a [`PerformanceEvaluation`](@ref)
object.
Available resampling strategies are $RESAMPLING_STRATEGIES_LIST. If `resampling` is not an
instance of one of these, then a vector of tuples of the form `(train_rows, test_rows)`
is expected. For example, setting
```julia
resampling = [(1:100, 101:200),
(101:200, 1:100)]
```
gives two-fold cross-validation using the first 200 rows of data.
Any measure conforming to the
[StatisticalMeasuresBase.jl](https://juliaai.github.io/StatisticalMeasuresBase.jl/dev/)
API can be provided, assuming it can consume multiple observations.
Although `evaluate!` is mutating, `mach.model` and `mach.args` are not mutated.
# Additional keyword options
- `rows` - vector of observation indices from which both train and test folds are
constructed (default is all observations)
- `operation`/`operations=nothing` - One of $PREDICT_OPERATIONS_STRING, or a vector of
these of the same length as `measure`/`measures`. Automatically inferred if left
unspecified. For example, `predict_mode` will be used for a `Multiclass` target, if
`model` is a probabilistic predictor, but `measure` is expects literal (point) target
predictions. Operations actually applied can be inspected from the `operation` field of
the object returned.
- `weights` - per-sample `Real` weights for measures that support them (not to be confused
with weights used in training, such as the `w` in `mach = machine(model, X, y, w)`).
- `class_weights` - dictionary of `Real` per-class weights for use with measures that
support these, in classification problems (not to be confused
with weights used in training, such as the `w` in `mach = machine(model, X, y, w)`).
- `repeats::Int=1`: set to a higher value for repeated (Monte Carlo)
resampling. For example, if `repeats = 10`, then `resampling = CV(nfolds=5,
shuffle=true)`, generates a total of 50 `(train, test)` pairs for evaluation and
subsequent aggregation.
- `acceleration=CPU1()`: acceleration/parallelization option; can be any instance of
`CPU1`, (single-threaded computation), `CPUThreads` (multi-threaded computation) or
`CPUProcesses` (multi-process computation); default is `default_resource()`. These types
are owned by ComputationalResources.jl.
- `force=false`: set to `true` to force cold-restart
of each training event
- `verbosity::Int=1` logging level; can be negative
- `check_measure=true`: whether to screen measures for possible incompatibility with the
model. Will not catch all incompatibilities.
- `per_observation=true`: whether to calculate estimates for individual observations; if
`false` the `per_observation` field of the returned object is populated with
`missing`s. Setting to `false` may reduce compute time and allocations.
- `logger=default_logger()` - a logger object for forwarding results to a machine learning
tracking platform; see [`default_logger`](@ref) for details.
- `compact=false` - if `true`, the returned evaluation object excludes these fields:
`fitted_params_per_fold`, `report_per_fold`, `train_test_rows`.
See also [`evaluate`](@ref), [`PerformanceEvaluation`](@ref),
[`CompactPerformanceEvaluation`](@ref).
"""
function evaluate!(
mach::Machine;
resampling=CV(),
measures=nothing,
measure=measures,
weights=nothing,
class_weights=nothing,
operations=nothing,
operation=operations,
acceleration=default_resource(),
rows=nothing,
repeats=1,
force=false,
check_measure=true,
per_observation=true,
verbosity=1,
logger=default_logger(),
compact=false,
)
# this method just checks validity of options, preprocess the
# weights, measures, operations, and dispatches a
# strategy-specific `evaluate!`
length(mach.args) > 1 || throw(ERR_NEED_TARGET)
repeats > 0 || error("Need `repeats > 0`. ")
if resampling isa TrainTestPairs
if rows !== nothing
error("You cannot specify `rows` unless `resampling "*
"isa MLJ.ResamplingStrategy` is true. ")
end
if repeats != 1 && verbosity > 0
@warn "repeats > 1 not supported unless "*
"`resampling <: ResamplingStrategy. "
end
end
_measures = _actual_measures(measure, mach.model)
_operations = _actual_operations(operation,
_measures,
mach.model,
verbosity)
_check_weights_measures(weights,
class_weights,
_measures,
mach,
_operations,
verbosity,
check_measure)
_warn_about_unsupported(
StatisticalMeasuresBase.supports_weights,
"Sample",
_measures,
weights,
verbosity,
)
_warn_about_unsupported(
StatisticalMeasuresBase.supports_class_weights,
"Class",
_measures,
class_weights,
verbosity,
)
_acceleration= _process_accel_settings(acceleration)
evaluate!(
mach,
resampling,
weights,
class_weights,
rows,
verbosity,
repeats,
_measures,
_operations,
_acceleration,
force,
per_observation,
logger,
resampling,
compact,
)
end
"""
evaluate(model, data...; cache=true, options...)
Equivalent to `evaluate!(machine(model, data..., cache=cache); options...)`.
See the machine version `evaluate!` for the complete list of options.
Returns a [`PerformanceEvaluation`](@ref) object.
See also [`evaluate!`](@ref).
"""
evaluate(model::Model, args...; cache=true, kwargs...) =
evaluate!(machine(model, args...; cache=cache); kwargs...)
# -------------------------------------------------------------------
# Resource-specific methods to distribute a function parameterized by
# fold number `k` over processes/threads.
# Here `func` is always going to be `fit_and_extract_on_fold`; see later
function _next!(p)
p.counter +=1
ProgressMeter.updateProgress!(p)
end
function _evaluate!(func, mach, ::CPU1, nfolds, verbosity)
if verbosity > 0
p = Progress(
nfolds,
dt = PROG_METER_DT,
desc = "Evaluating over $nfolds folds: ",
barglyphs = BarGlyphs("[=> ]"),
barlen = 25,
color = :yellow
)
end
ret = mapreduce(vcat, 1:nfolds) do k
r = func(mach, k)
verbosity < 1 || _next!(p)
return [r, ]
end
return zip(ret...) |> collect
end
function _evaluate!(func, mach, ::CPUProcesses, nfolds, verbosity)
local ret
@sync begin
if verbosity > 0
p = Progress(
nfolds,
dt = PROG_METER_DT,
desc = "Evaluating over $nfolds folds: ",
barglyphs = BarGlyphs("[=> ]"),
barlen = 25,
color = :yellow
)
channel = RemoteChannel(()->Channel{Bool}(), 1)
end
# printing the progress bar
verbosity < 1 || @async begin
while take!(channel)
_next!(p)
end
end
ret = @distributed vcat for k in 1:nfolds
r = func(mach, k)
verbosity < 1 || put!(channel, true)
[r, ]
end
verbosity < 1 || put!(channel, false)
end
return zip(ret...) |> collect
end
@static if VERSION >= v"1.3.0-DEV.573"
# determines if an instantiated machine caches data:
_caches_data(::Machine{<:Any,<:Any,C}) where C = C
function _evaluate!(func, mach, accel::CPUThreads, nfolds, verbosity)
nthreads = Threads.nthreads()
if nthreads == 1
return _evaluate!(func, mach, CPU1(), nfolds, verbosity)
end
ntasks = accel.settings
partitions = chunks(1:nfolds, ntasks)
if verbosity > 0
p = Progress(
nfolds,
dt = PROG_METER_DT,
desc = "Evaluating over $nfolds folds: ",
barglyphs = BarGlyphs("[=> ]"),
barlen = 25,
color = :yellow
)
ch = Channel{Bool}()
end
results = Vector(undef, length(partitions))
@sync begin
# printing the progress bar
verbosity < 1 || @async begin
while take!(ch)
_next!(p)
end
end
clean!(mach.model)
# One tmach for each task:
machines = vcat(
mach,
[
machine(mach.model, mach.args...; cache = _caches_data(mach))
for _ in 2:length(partitions)
]
)
@sync for (i, parts) in enumerate(partitions)
Threads.@spawn begin
results[i] = mapreduce(vcat, parts) do k
r = func(machines[i], k)
verbosity < 1 || put!(ch, true)
[r, ]
end
end
end
verbosity < 1 || put!(ch, false)
end
ret = reduce(vcat, results)
return zip(ret...) |> collect
end
end
# ------------------------------------------------------------
# Core `evaluation` method, operating on train-test pairs
const AbstractRow = Union{AbstractVector{<:Integer}, Colon}
const TrainTestPair = Tuple{AbstractRow, AbstractRow}
const TrainTestPairs = AbstractVector{<:TrainTestPair}
_view(::Nothing, rows) = nothing
_view(weights, rows) = view(weights, rows)
# Evaluation when `resampling` is a TrainTestPairs (CORE EVALUATOR):
function evaluate!(
mach::Machine,
resampling,
weights,
class_weights,
rows,
verbosity,
repeats,
measures,
operations,
acceleration,
force,
per_observation_flag,
logger,
user_resampling,
compact,
)
# Note: `user_resampling` keyword argument is the user-defined resampling strategy,
# while `resampling` is always a `TrainTestPairs`.
# Note: `rows` and `repeats` are only passed to the final `PeformanceEvaluation`
# object to be returned and are not otherwise used here.
if !(resampling isa TrainTestPairs)
error("`resampling` must be an "*
"`MLJ.ResamplingStrategy` or tuple of rows "*
"of the form `(train_rows, test_rows)`")
end
X = mach.args[1]()
y = mach.args[2]()
nrows = MLJBase.nrows(y)
nfolds = length(resampling)
test_fold_sizes = map(resampling) do train_test_pair
test = last(train_test_pair)
test isa Colon && (return nrows)
length(test)
end
# weights used to aggregate per-fold measurements, which depends on a measures
# external mode of aggregation:
fold_weights(mode) = nfolds .* test_fold_sizes ./ sum(test_fold_sizes)
fold_weights(::StatisticalMeasuresBase.Sum) = nothing
nmeasures = length(measures)
function fit_and_extract_on_fold(mach, k)
train, test = resampling[k]
fit!(mach; rows=train, verbosity=verbosity - 1, force=force)
# build a dictionary of predictions keyed on the operations
# that appear (`predict`, `predict_mode`, etc):
yhat_given_operation =
Dict(op=>op(mach, rows=test) for op in unique(operations))
ytest = selectrows(y, test)
if per_observation_flag
measurements = map(measures, operations) do m, op
StatisticalMeasuresBase.measurements(
m,
yhat_given_operation[op],
ytest,
_view(weights, test),
class_weights,
)
end
else
measurements = map(measures, operations) do m, op
m(
yhat_given_operation[op],
ytest,
_view(weights, test),
class_weights,
)
end
end
fp = fitted_params(mach)
r = report(mach)
return (measurements, fp, r)
end
if acceleration isa CPUProcesses
if verbosity > 0
@info "Distributing evaluations " *
"among $(nworkers()) workers."
end
end
if acceleration isa CPUThreads
if verbosity > 0
nthreads = Threads.nthreads()
@info "Performing evaluations " *
"using $(nthreads) thread" * ifelse(nthreads == 1, ".", "s.")
end
end
measurements_vector_of_vectors, fitted_params_per_fold, report_per_fold =
_evaluate!(
fit_and_extract_on_fold,
mach,
acceleration,
nfolds,
verbosity
)
measurements_flat = vcat(measurements_vector_of_vectors...)
# In the `measurements_matrix` below, rows=folds, columns=measures; each element of
# the matrix is:
#
# - a vector of meausurements, one per observation within a fold, if
# - `per_observation_flag = true`; or
#
# - a single measurment for the whole fold, if `per_observation_flag = false`.
#
measurements_matrix = permutedims(
reshape(collect(measurements_flat), (nmeasures, nfolds))
)
# measurements for each observation:
per_observation = if per_observation_flag
map(1:nmeasures) do k
measurements_matrix[:,k]
end
else
fill(missing, nmeasures)
end
# measurements for each fold:
per_fold = if per_observation_flag
map(1:nmeasures) do k
m = measures[k]
mode = StatisticalMeasuresBase.external_aggregation_mode(m)
map(per_observation[k]) do v
StatisticalMeasuresBase.aggregate(v; mode)
end
end
else
map(1:nmeasures) do k
measurements_matrix[:,k]
end
end
# overall aggregates:
per_measure = map(1:nmeasures) do k
m = measures[k]
mode = StatisticalMeasuresBase.external_aggregation_mode(m)
StatisticalMeasuresBase.aggregate(
per_fold[k];
mode,
weights=fold_weights(mode),
)
end
evaluation = PerformanceEvaluation(
mach.model,
measures,
per_measure,
operations,
per_fold,
per_observation,
fitted_params_per_fold |> collect,
report_per_fold |> collect,
resampling,
user_resampling,
repeats
)
log_evaluation(logger, evaluation)
compact && return compactify(evaluation)
return evaluation
end
# ----------------------------------------------------------------
# Evaluation when `resampling` is a ResamplingStrategy
function evaluate!(mach::Machine, resampling::ResamplingStrategy,
weights, class_weights, rows, verbosity, repeats, args...)
train_args = Tuple(a() for a in mach.args)
y = train_args[2]
_rows = actual_rows(rows, nrows(y), verbosity)
repeated_train_test_pairs =
vcat(
[train_test_pairs(resampling, _rows, train_args...) for i in 1:repeats]...
)
evaluate!(
mach,
repeated_train_test_pairs,
weights,
class_weights,
nothing,
verbosity,
repeats,
args...
)
end
# ====================================================================
## RESAMPLER - A MODEL WRAPPER WITH `evaluate` OPERATION
"""
resampler = Resampler(
model=ConstantRegressor(),
resampling=CV(),
measure=nothing,
weights=nothing,
class_weights=nothing
operation=predict,
repeats = 1,
acceleration=default_resource(),
check_measure=true,
per_observation=true,
logger=default_logger(),
compact=false,
)
*Private method.* Use at own risk.
Resampling model wrapper, used internally by the `fit` method of `TunedModel` instances
and `IteratedModel` instances. See [`evaluate!`](@ref) for meaning of the options. Not
intended for use by general user, who will ordinarily use [`evaluate!`](@ref) directly.
Given a machine `mach = machine(resampler, args...)` one obtains a performance evaluation
of the specified `model`, performed according to the prescribed `resampling` strategy and
other parameters, using data `args...`, by calling `fit!(mach)` followed by
`evaluate(mach)`.
On subsequent calls to `fit!(mach)` new train/test pairs of row indices are only
regenerated if `resampling`, `repeats` or `cache` fields of `resampler` have changed. The
evolution of an RNG field of `resampler` does *not* constitute a change (`==` for
`MLJType` objects is not sensitive to such changes; see [`is_same_except`](@ref)).
If there is single train/test pair, then warm-restart behavior of the wrapped model
`resampler.model` will extend to warm-restart behaviour of the wrapper `resampler`, with
respect to mutations of the wrapped model.
The sample `weights` are passed to the specified performance measures that support weights
for evaluation. These weights are not to be confused with any weights bound to a
`Resampler` instance in a machine, used for training the wrapped `model` when supported.
The sample `class_weights` are passed to the specified performance measures that support
per-class weights for evaluation. These weights are not to be confused with any weights
bound to a `Resampler` instance in a machine, used for training the wrapped `model` when
supported.
"""
mutable struct Resampler{S, L} <: Model
model
resampling::S # resampling strategy
measure
weights::Union{Nothing,AbstractVector{<:Real}}
class_weights::Union{Nothing, AbstractDict{<:Any, <:Real}}
operation
acceleration::AbstractResource
check_measure::Bool
repeats::Int
cache::Bool
per_observation::Bool
logger::L
compact::Bool
end
function MLJModelInterface.clean!(resampler::Resampler)
warning = ""
if resampler.measure === nothing && resampler.model !== nothing
measure = default_measure(resampler.model)
if measure === nothing
error("No default measure known for $(resampler.model). "*
"You must specify measure=... ")
else
warning *= "No `measure` specified. "*
"Setting `measure=$measure`. "
end
end
return warning
end
function Resampler(
;model=nothing,
resampling=CV(),
measures=nothing,
measure=measures,
weights=nothing,
class_weights=nothing,
operations=predict,
operation=operations,
acceleration=default_resource(),
check_measure=true,
repeats=1,
cache=true,
per_observation=true,
logger=default_logger(),
compact=false,
)
resampler = Resampler(
model,
resampling,
measure,
weights,
class_weights,
operation,
acceleration,
check_measure,
repeats,
cache,
per_observation,
logger,
compact,
)
message = MLJModelInterface.clean!(resampler)
isempty(message) || @warn message
return resampler
end
function MLJModelInterface.fit(resampler::Resampler, verbosity::Int, args...)
mach = machine(resampler.model, args...; cache=resampler.cache)
_measures = _actual_measures(resampler.measure, resampler.model)
_operations = _actual_operations(
resampler.operation,
_measures,
resampler.model,
verbosity
)
_check_weights_measures(
resampler.weights,
resampler.class_weights,
_measures,
mach,
_operations,
verbosity,
resampler.check_measure
)
_acceleration = _process_accel_settings(resampler.acceleration)
# the value of `compact` below is always `false`, because we need
# `e.train_test_rows` in `update`. (If `resampler.compact=true`, then
# `evaluate(resampler, ...)` returns the compactified version of the current
# `PerformanceEvaluation` object.)
e = evaluate!(
mach,
resampler.resampling,
resampler.weights,
resampler.class_weights,
nothing,
verbosity - 1,
resampler.repeats,
_measures,
_operations,
_acceleration,
false,
resampler.per_observation,
resampler.logger,
resampler.resampling,
false, # compact
)
fitresult = (machine = mach, evaluation = e)
cache = (
resampler = deepcopy(resampler),
acceleration = _acceleration
)
report = (evaluation = e, )
return fitresult, cache, report
end
# helper to update the model in a machine
# when the machine's existing model and the new model have same type:
function _update!(mach::Machine{M}, model::M) where M
mach.model = model
return mach
end
# when the types are different, we need a new machine:
_update!(mach, model) = machine(model, mach.args...)
function MLJModelInterface.update(
resampler::Resampler,
verbosity::Int,
fitresult,
cache,
args...
)
old_resampler, acceleration = cache
# if we need to generate new train/test pairs, or data caching
# option has changed, then fit from scratch:
if resampler.resampling != old_resampler.resampling ||
resampler.repeats != old_resampler.repeats ||
resampler.cache != old_resampler.cache
return MLJModelInterface.fit(resampler, verbosity, args...)
end
mach, e = fitresult
train_test_rows = e.train_test_rows
# since `resampler.model` could have changed, so might the actual measures and
# operations that should be passed to the (low level) `evaluate!`:
measures = _actual_measures(resampler.measure, resampler.model)
operations = _actual_operations(
resampler.operation,
measures,
resampler.model,
verbosity
)
# update the model:
mach2 = _update!(mach, resampler.model)
# re-evaluate:
e = evaluate!(
mach2,
train_test_rows,
resampler.weights,
resampler.class_weights,
nothing,
verbosity - 1,
resampler.repeats,
measures,
operations,
acceleration,
false,
resampler.per_observation,
resampler.logger,
resampler.resampling,
false # we use `compact=false`; see comment in `fit` above
)
report = (evaluation = e, )
fitresult = (machine=mach2, evaluation=e)
cache = (
resampler = deepcopy(resampler),
acceleration = acceleration
)
return fitresult, cache, report
end
# Some traits are marked as `missing` because we cannot determine
# them from from the type because we have removed `M` (for "model"} as
# a `Resampler` type parameter. See
# https://github.com/JuliaAI/MLJTuning.jl/issues/141#issue-951221466
StatisticalTraits.is_wrapper(::Type{<:Resampler}) = true
StatisticalTraits.supports_weights(::Type{<:Resampler}) = missing
StatisticalTraits.supports_class_weights(::Type{<:Resampler}) = missing
StatisticalTraits.is_pure_julia(::Type{<:Resampler}) = true
StatisticalTraits.constructor(::Type{<:Resampler}) = Resampler
StatisticalTraits.input_scitype(::Type{<:Resampler}) = Unknown
StatisticalTraits.target_scitype(::Type{<:Resampler}) = Unknown
StatisticalTraits.package_name(::Type{<:Resampler}) = "MLJBase"
StatisticalTraits.load_path(::Type{<:Resampler}) = "MLJBase.Resampler"
fitted_params(::Resampler, fitresult) = fitresult
evaluate(resampler::Resampler, fitresult) = resampler.compact ?
compactify(fitresult.evaluation) : fitresult.evaluation
function evaluate(machine::Machine{<:Resampler})
if isdefined(machine, :fitresult)
return evaluate(machine.model, machine.fitresult)
else
throw(error("$machine has not been trained."))
end
end
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 11223 | """
color_on()
Enable color and bold output at the REPL, for enhanced display of MLJ objects.
"""
color_on() = (SHOW_COLOR[] = true;)
"""
color_off()
Suppress color and bold output at the REPL for displaying MLJ objects.
"""
color_off() = (SHOW_COLOR[] = false;)
macro colon(p)
Expr(:quote, p)
end
## REGISTERING LABELS OF OBJECTS DURING ASSIGNMENT
"""
@constant x = value
Private method (used in testing).
Equivalent to `const x = value` but registers the binding thus:
```julia
MLJBase.HANDLE_GIVEN_ID[objectid(value)] = :x
```
Registered objects get displayed using the variable name to which it
was bound in calls to `show(x)`, etc.
!!! warning
As with any `const` declaration, binding `x` to new value of
the same type is not prevented and the registration will not be updated.
"""
macro constant(ex)
ex.head == :(=) || throw(error("Expression must be an assignment."))
handle = ex.args[1]
value = ex.args[2]
quote
const $(esc(handle)) = $(esc(value))
id = objectid($(esc(handle)))
HANDLE_GIVEN_ID[id] = @colon $handle
$(esc(handle))
end
end
"""
abbreviated(n)
Display abbreviated versions of integers.
"""
function abbreviated(n)
as_string = string(n)
return "@"*as_string[end-2:end]
end
"""
handle(X)
return abbreviated object id (as string) or it's registered handle
(as string) if this exists
"""
function handle(X)
id = objectid(X)
if id in keys(HANDLE_GIVEN_ID)
return string(HANDLE_GIVEN_ID[id])
else
return abbreviated(id)
end
end
## SHOW METHOD FOR NAMED TUPLES
# long version of showing a named tuple:
Base.show(stream::IO, ::MIME"text/plain", t::NamedTuple) = fancy_nt(stream, t)
fancy_nt(t) = fancy_nt(stdout, t) # is this used?
fancy_nt(stream, t::NamedTuple{(), Tuple{}}) = print(stream, "NamedTuple()")
fancy_nt(stream, t) = fancy_nt(stream, t, 0)
fancy_nt(stream, t, n) = show(stream, t)
function fancy_nt(stream, t::NamedTuple, n)
print(stream, "(")
first_item = true
for k in keys(t)
value = getproperty(t, k)
if !first_item
print(stream, crind(n + 1))
else
first_item = false
end
print(stream, "$k = ")
fancy_nt(stream, value, n + length("$k = ") + 1)
print(stream, ",")
end
print(stream, ")")
end
## OTHER EXPOSED SHOW METHODS
# string consisting of carriage return followed by indentation of length n:
crind(n) = "\n"*repeat(' ', max(n, 0))
# trait to tag those objects to be displayed as constructed:
show_as_constructed(::Type) = false
show_as_constructed(::Type{<:Model}) = true
show_compact(::Type) = false
show_as_constructed(object) = show_as_constructed(typeof(object))
show_compact(object) = show_compact(typeof(object))
show_handle(object) = false
# simplified string rep of an Type:
function simple_repr(T)
repr = string(T.name.name)
parameters = T.parameters
# # add abbreviated type parameters:
# p_string = ""
# if length(parameters) > 0
# p = parameters[1]
# if p isa DataType
# p_string = simple_repr(p)
# elseif p isa Symbol
# p_string = string(":", p)
# end
# if length(parameters) > 1
# p_string *= ",…"
# end
# end
# isempty(p_string) || (repr *= "{"*p_string*"}")
return repr
end
# short version of showing a `MLJType` object:
function Base.show(stream::IO, object::MLJType)
str = simple_repr(typeof(object))
L = length(propertynames(object))
if L > 0
first_name = propertynames(object) |> first
value = getproperty(object, first_name)
str *= "($first_name = $value"
L > 1 && (str *= ", …")
str *= ")"
else
str *= "()"
end
show_handle(object) && (str *= " $(handle(object))")
if false # !isempty(propertynames(object))
printstyled(IOContext(stream, :color=> SHOW_COLOR[]),
str, bold=false, color=:blue)
else
print(stream, str)
end
return nothing
end
# longer versions of showing objects
function Base.show(stream::IO, T::MIME"text/plain", object::MLJType)
show(stream, T, object, Val(show_as_constructed(typeof(object))))
end
# fallback:
function Base.show(stream::IO, ::MIME"text/plain", object, ::Val{false})
show(stream, MIME("text/plain"), object)
end
# fallback for MLJType:
function Base.show(stream::IO, ::MIME"text/plain",
object::MLJType, ::Val{false})
_recursive_show(stream, object, 1, DEFAULT_SHOW_DEPTH)
end
function Base.show(stream::IO, ::MIME"text/plain", object, ::Val{true})
fancy(stream, object)
end
fancy(stream::IO, object) = fancy(stream, object, 0,
DEFAULT_AS_CONSTRUCTED_SHOW_DEPTH, 0)
fancy(stream, object, current_depth, depth, n) = show(stream, object)
function fancy(stream, object::MLJType, current_depth, depth, n)
if current_depth == depth
show(stream, object)
else
prefix = MLJModelInterface.name(object)
anti = max(length(prefix) - INDENT)
print(stream, prefix, "(")
names = propertynames(object)
n_names = length(names)
for k in eachindex(names)
value = getproperty(object, names[k])
show_compact(object) ||
print(stream, crind(n + length(prefix) - anti))
print(stream, "$(names[k]) = ")
if show_compact(object)
show(stream, value)
else
fancy(stream, value, current_depth + 1, depth, n + length(prefix)
- anti + length("$k = "))
end
k == n_names || print(stream, ", ")
end
print(stream, ")")
if current_depth == 0 && show_handle(object)
description = " $(handle(object))"
printstyled(IOContext(stream, :color=> SHOW_COLOR[]),
description, bold=false, color=:blue)
end
end
return nothing
end
# version showing a `MLJType` object to arbitrary depth:
Base.show(stream::IO, object::M, depth::Int) where M<:MLJType =
show(stream, object, depth, Val(show_as_constructed(M)))
Base.show(stream::IO, object::MLJType, depth::Int, ::Val{false}) =
_recursive_show(stream, object, 1, depth)
Base.show(stream::IO, object::MLJType, depth::Int, ::Val{true}) =
fancy(stream, object, 0, 100, 0)
# for convenience:
Base.show(object::MLJType, depth::Int) = show(stdout, object, depth)
"""
@more
Entered at the REPL, equivalent to `show(ans, 100)`. Use to get a
recursive description of all properties of the last REPL value.
"""
macro more()
esc(quote
show(Main.ans, 100)
end)
end
## METHODS TO SUPRESS THE DISPLAY OF LARGE NON-BASETYPE OBJECTS
istoobig(::Any) = true
istoobig(::DataType) = false
istoobig(::UnionAll) = false
istoobig(::Union) = false
istoobig(::Number) = false
istoobig(::Char) = false
istoobig(::Function) = false
istoobig(::Symbol) = false
istoobig(::Distributions.Distribution) = false
istoobig(str::AbstractString) = length(str) > 50
## THE `_show` METHOD
# Note: The `_show` method controls how properties are displayed in
# the table generated by `_recursive_show`. See top of file.
# _show fallback:
function _show(stream::IO, object)
if !istoobig(object)
show(stream, MIME("text/plain"), object)
println(stream)
else
println(stream, "(omitted ", typeof(object), ")")
end
end
_show(stream::IO, object::MLJType) = println(stream, object)
# _show for other types:
istoobig(t::Tuple{Vararg{T}}) where T<:Union{Number,Symbol,Char,MLJType} =
length(t) > 5
function _show(stream::IO, t::Tuple)
if !istoobig(t)
show(stream, MIME("text/plain"), t)
println(stream)
else
println(stream, "(omitted $(typeof(t)) of length $(length(t)))")
end
end
istoobig(A::AbstractArray{T}) where T<:Union{Number,Symbol,Char,MLJType} =
maximum(size(A)) > 5
function _show(stream::IO, A::AbstractArray)
if !istoobig(A)
show(stream, MIME("text/plain"), A)
println(stream)
else
println(stream, "(omitted $(typeof(A)) of size $(size(A)))")
end
end
istoobig(d::Dict{T,Any}) where T <: Union{Number,Symbol,Char,MLJType} =
length(keys(d)) > 5
function _show(stream::IO, d::Dict{T, Any}) where T <: Union{Number,Symbol}
if isempty(d)
println(stream, "empty $(typeof(d))")
elseif !istoobig(d)
println(stream, "omitted $(typeof(d)) with keys: ")
show(stream, MIME("text/plain"), collect(keys(d)))
println(stream)
else
println(stream, "(omitted $(typeof(d)))")
end
end
function _show(stream::IO, v::Array{T, 1}) where T
if !istoobig(v)
show(stream, MIME("text/plain"), v)
println(stream)
else
println(stream, "(omitted Vector{$T} of length $(length(v)))")
end
end
_show(stream::IO, T::DataType) = println(stream, T)
_show(stream::IO, ::Nothing) = println(stream, "nothing")
## THE RECURSIVE SHOW METHOD
"""
_recursive_show(stream, object, current_depth, depth)
**Private method.**
Generate a table of the properties of the `MLJType` object, dislaying
each property value by calling the method `_show` on it. The behaviour
of `_show(stream, f)` is as follows:
1. If `f` is itself a `MLJType` object, then its short form is shown
and `_recursive_show` generates as separate table for each of its
properties (and so on, up to a depth of argument `depth`).
2. Otherwise `f` is displayed as "(omitted T)" where `T = typeof(f)`,
unless `istoobig(f)` is false (the `istoobig` fall-back for arbitrary
types being `true`). In the latter case, the long (ie,
MIME"plain/text") form of `f` is shown. To override this behaviour,
overload the `_show` method for the type in question.
"""
function _recursive_show(stream::IO, object::MLJType, current_depth, depth)
if depth == 0 || isempty(propertynames(object))
println(stream, object)
elseif current_depth <= depth
fields = propertynames(object)
print(stream, "#"^current_depth, " ")
show(stream, object)
println(stream, ": ")
# println(stream)
if isempty(fields)
println(stream)
return
end
for fld in fields
fld_string = string(fld)*
" "^(max(0,COLUMN_WIDTH - length(string(fld))))*"=> "
print(stream, fld_string)
if isdefined(object, fld)
_show(stream, getproperty(object, fld))
# println(stream)
else
println(stream, "(undefined)")
# println(stream)
end
end
println(stream)
for fld in fields
if isdefined(object, fld)
subobject = getproperty(object, fld)
if isa(subobject, MLJType) &&
!isempty(propertynames(subobject))
_recursive_show(stream, getproperty(object, fld),
current_depth + 1, depth)
end
end
end
end
return nothing
end
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 4182 | # Machines store data for training as *arguments*. Eg, in the
# supervised case, the first two arguments are always `X` and `y`. An
# argument could be raw data (such as a table) or, in the case of a
# learning network, a "promise" of data (aka "dynamic" data) -
# formally a `Node` object, which is *callable*. For uniformity of
# interface, even raw data is stored in a wrapper that can be called
# to return the object wrapped. The name of the wrapper type is
# `Source`, as these constitute the source nodes of learning networks.
# Here we define the `Source` wrapper, as well as some methods they
# will share with the `Node` type for use in learning networks.
## SOURCE TYPE
abstract type AbstractNode <: MLJType end
abstract type CallableReturning{K} end # scitype for sources and nodes
"""
Source
Type for a learning network source node. Constructed using
[`source`](@ref), as in `source()` or `source(rand(2,3))`.
See also [`source`](@ref), [`Node`](@ref).
"""
mutable struct Source <: AbstractNode
data # training data
scitype::DataType
end
"""
Xs = source(X=nothing)
Define, a learning network `Source` object, wrapping some input data
`X`, which can be `nothing` for purposes of exporting the network as
stand-alone model. For training and testing the unexported
network, appropriate vectors, tables, or other data containers are
expected.
The calling behaviour of a `Source` object is this:
```julia
Xs() = X
Xs(rows=r) = selectrows(X, r) # eg, X[r,:] for a DataFrame
Xs(Xnew) = Xnew
```
See also: [`MLJBase.prefit`](@ref), [`sources`](@ref),
[`origins`](@ref), [`node`](@ref).
"""
source(X) = Source(X, scitype(X))
source() = source(nothing)
source(Xs::Source; args...) = Xs
ScientificTypes.scitype(X::Source) = CallableReturning{X.scitype}
ScientificTypes.elscitype(X::Source) = X.scitype
nodes(X::Source) = [X, ]
Base.isempty(X::Source) = X.data === nothing
nrows_at_source(X::Source) = nrows(X.data)
color(::Source) = :yellow
# make source nodes callable:
function (X::Source)(; rows=:)
rows == (:) && return X.data
return selectrows(X.data, rows)
end
function (X::Source)(Xnew)
return Xnew
end
# return a string of diagnostics for the call `X(input...; kwargs...)`
diagnostic_table_sources(X::AbstractNode) =
"Learning network sources:\n"*
"source\tscitype\n"*
"-------------------------------------------\n"*
reduce(*, ("$s\t$(scitype(s()))\n" for s in sources(X)))
function diagnostics(X::AbstractNode, input...; kwargs...)
raw_args = map(X.args) do arg
arg(input...; kwargs...)
end
_sources = sources(X)
scitypes = scitype.(raw_args)
mach = X.machine
table0 = if !isnothing(mach)
model = mach.model
_input = input_scitype(model)
_target = target_scitype(model)
_output = output_scitype(model)
"""
Model ($model):
input_scitype = $_input
target_scitype =$_target
output_scitype =$_output
"""
else
""
end
table1 = "Incoming data:\n"*
"arg of $(X.operation)\tscitype\n"*
"-------------------------------------------\n"*
reduce(*, ("$(X.args[j])\t$(scitypes[j])\n" for j in eachindex(X.args)))
table2 = diagnostic_table_sources(X)
return """
$table0
$table1
$table2"""
end
"""
rebind!(s, X)
Attach new data `X` to an existing source node `s`. Not a public
method.
"""
function rebind!(s::Source, X)
s.data = X
s.scitype = scitype(X)
return s
end
origins(s::Source) = [s,]
## DISPLAY FOR SOURCES AND OTHER ABSTRACT NODES
# show within other objects:
function Base.show(stream::IO, object::AbstractNode)
str = simple_repr(typeof(object))
show_handle(object) && (str *= " $(handle(object))")
if false
printstyled(IOContext(stream, :color=> SHOW_COLOR[]),
str, bold=false, color=:blue)
else
print(stream, str)
end
return nothing
end
show_handle(::Source) = true
# show when alone:
function Base.show(stream::IO, ::MIME"text/plain", source::Source)
show(stream, source)
print(stream, " \u23CE `$(elscitype(source))`")
return nothing
end
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 14958 | function finaltypes(T::Type)
s = InteractiveUtils.subtypes(T)
if isempty(s)
return [T, ]
else
return reduce(vcat, [finaltypes(S) for S in s])
end
end
"""
flat_values(t::NamedTuple)
View a nested named tuple `t` as a tree and return, as a tuple, the values
at the leaves, in the order they appear in the original tuple.
```julia-repl
julia> t = (X = (x = 1, y = 2), Y = 3);
julia> flat_values(t)
(1, 2, 3)
```
"""
function flat_values(params::NamedTuple)
values = []
for k in keys(params)
value = getproperty(params, k)
if value isa NamedTuple
append!(values, flat_values(value))
else
push!(values, value)
end
end
return Tuple(values)
end
## RECURSIVE VERSIONS OF getproperty and setproperty!
# applying the following to `:(a.b.c)` returns `(:(a.b), :c)`
function reduce_nested_field(ex)
ex.head == :. || throw(ArgumentError)
tail = ex.args[2]
tail isa QuoteNode || throw(ArgumentError)
field = tail.value
field isa Symbol || throw(ArgumentError)
subex = ex.args[1]
return (subex, field)
end
"""
MLJBase.prepend(::Symbol, ::Union{Symbol,Expr,Nothing})
For prepending symbols in expressions like `:(y.w)` and `:(x1.x2.x3)`.
```julia-repl
julia> prepend(:x, :y)
:(x.y)
julia> prepend(:x, :(y.z))
:(x.y.z)
julia> prepend(:w, ans)
:(w.x.y.z)
```
If the second argument is `nothing`, then `nothing` is returned.
"""
prepend(s::Symbol, ::Nothing) = nothing
prepend(s::Symbol, t::Symbol) = Expr(:(.), s, QuoteNode(t))
prepend(s::Symbol, ex::Expr) = Expr(:(.), prepend(s, ex.args[1]), ex.args[2])
"""
recursive_getproperty(object, nested_name::Expr)
Call getproperty recursively on `object` to extract the value of some
nested property, as in the following example:
```julia-repl
julia> object = (X = (x = 1, y = 2), Y = 3);
julia> recursive_getproperty(object, :(X.y))
2
```
"""
recursive_getproperty(obj, property::Symbol) = getproperty(obj, property)
function recursive_getproperty(obj, ex::Expr)
subex, field = reduce_nested_field(ex)
return recursive_getproperty(recursive_getproperty(obj, subex), field)
end
recursive_getpropertytype(obj, property::Symbol) = typeof(getproperty(obj, property))
recursive_getpropertytype(obj::T, property::Symbol) where T <: Model = begin
model_type = typeof(obj)
property_names = fieldnames(model_type)
property_types = model_type.types
for (t, n) in zip(property_types, property_names)
n == property && return t
end
error("Property $property not found")
end
function recursive_getpropertytype(obj, ex::Expr)
subex, field = reduce_nested_field(ex)
return recursive_getpropertytype(recursive_getproperty(obj, subex), field)
end
"""
recursively_setproperty!(object, nested_name::Expr, value)
Set a nested property of an `object` to `value`, as in the following example:
```julia-repl
julia> mutable struct Foo
X
Y
end
julia> mutable struct Bar
x
y
end
julia> object = Foo(Bar(1, 2), 3)
Foo(Bar(1, 2), 3)
julia> recursively_setproperty!(object, :(X.y), 42)
42
julia> object
Foo(Bar(1, 42), 3)
```
"""
recursive_setproperty!(obj, property::Symbol, value) =
setproperty!(obj, property, value)
function recursive_setproperty!(obj, ex::Expr, value)
subex, field = reduce_nested_field(ex)
last_obj = recursive_getproperty(obj, subex)
return recursive_setproperty!(last_obj, field, value)
end
"""
check_same_nrows(X, Y)
Internal function to check two objects, each a vector or a matrix,
have the same number of rows.
"""
@inline function check_same_nrows(X, Y)
size(X, 1) == size(Y, 1) ||
throw(DimensionMismatch("The two objects don't have the same " *
"number of rows."))
return nothing
end
"""
_permute_rows(obj, perm)
Internal function to return a vector or matrix with permuted rows given
the permutation `perm`.
"""
function _permute_rows(obj::AbstractVecOrMat, perm::Vector{Int})
check_same_nrows(obj, perm)
obj isa AbstractVector && return obj[perm]
obj[perm, :]
end
"""
shuffle_rows(X::AbstractVecOrMat,
Y::AbstractVecOrMat;
rng::AbstractRNG=Random.GLOBAL_RNG)
Return row-shuffled vectors or matrices using a random permutation of `X`
and `Y`. An optional random number generator can be specified using
the `rng` argument.
"""
function shuffle_rows(
X::AbstractVecOrMat, Y::AbstractVecOrMat;
rng::AbstractRNG=Random.GLOBAL_RNG
)
check_same_nrows(X, Y)
perm_length = size(X, 1)
perm = randperm(rng, perm_length)
return _permute_rows(X, perm), _permute_rows(Y, perm)
end
"""
init_rng(rng)
Create an `AbstractRNG` from `rng`. If `rng` is a non-negative `Integer`, it returns a
`MersenneTwister` random number generator seeded with `rng`; If `rng` is
an `AbstractRNG` object it returns `rng`, otherwise it throws an error.
"""
function init_rng(rng)
if (rng isa Integer && rng > 0)
return Random.MersenneTwister(rng)
elseif !(rng isa AbstractRNG)
throw(
ArgumentError(
"`rng` must either be a non-negative `Integer`, "*
"or an `AbstractRNG` object."
)
)
end
return rng
end
## FOR PRETTY PRINTING
# of coloumns:
function pretty(io::IO, X; showtypes=true, alignment=:l, kwargs...)
names = schema(X).names |> collect
if showtypes
types = schema(X).types |> collect
scitypes = schema(X).scitypes |> collect
header = (names, types, scitypes)
else
header = (names, )
end
show_color = MLJBase.SHOW_COLOR[]
color_off()
try
PrettyTables.pretty_table(io, MLJBase.matrix(X),
header=header;
alignment=alignment,
kwargs...)
catch
println("Trouble displaying table.")
end
show_color ? color_on() : color_off()
return nothing
end
pretty(X; kwargs...) = pretty(stdout, X; kwargs...)
# of long vectors (returns a compact string version of a vector):
function short_string(v::Vector)
L = length(v)
if L <= 3
middle = join(v, ", ")
else
middle = string(round3(v[1]), ", ", round3(v[2]),
", ..., ", round3(v[end]))
end
return "[$middle]"
end
"""
sequence_string(itr, n=3)
Return a "sequence" string from the first `n` elements generated
by `itr`.
```julia-repl
julia> MLJBase.sequence_string(1:10, 4)
"1, 2, 3, 4, ..."
```
**Private method.**
"""
function sequence_string(itr::Itr, n=3) where Itr
n > 1 || throw(ArgumentError("Cutoff must be at least 2. "))
I = Base.IteratorSize(Itr)
I isa Base.HasLength ||
I isa Base.HasShape ||
I isa IsInfinite ||
throw(Argumenterror("Unsupported iterator. "))
vals = String[]
i = 0
earlystop = false
for x in itr
i += 1
if i === n + 1
earlystop = true
break
else
push!(vals, string(x))
end
end
ret = join(vals, ", ")
earlystop && (ret *= ", ...")
return ret
end
## UNWINDING ITERATORS
"""
unwind(iterators...)
Represent all possible combinations of values generated by `iterators`
as rows of a matrix `A`. In more detail, `A` has one column for each
iterator in `iterators` and one row for each distinct possible
combination of values taken on by the iterators. Elements in the first
column cycle fastest, those in the last clolumn slowest.
### Example
```julia-repl
julia> iterators = ([1, 2], ["a","b"], ["x", "y", "z"]);
julia> MLJTuning.unwind(iterators...)
12×3 Matrix{Any}:
1 "a" "x"
2 "a" "x"
1 "b" "x"
2 "b" "x"
1 "a" "y"
2 "a" "y"
1 "b" "y"
2 "b" "y"
1 "a" "z"
2 "a" "z"
1 "b" "z"
2 "b" "z"
```
"""
function unwind(iterators...)
n_iterators = length(iterators)
iterator_lengths = map(length, iterators)
# product of iterator lengths:
L = reduce(*, iterator_lengths)
L != 0 || error("Parameter iterator of length zero encountered.")
A = Array{Any}(undef, L, n_iterators) ## TODO: this can be done better
n_iterators != 0 || return A
inner = 1
outer = L
for j in 1:n_iterators
outer = outer ÷ iterator_lengths[j]
A[:,j] = repeat(iterators[j], inner=inner, outer=outer)
inner *= iterator_lengths[j]
end
return A
end
"""
chunks(range, n)
Split an `AbstractRange` into `n` subranges of approximately equal length.
### Example
```julia-repl
julia> collect(chunks(1:5, 2))
2-element Vector{UnitRange{Int64}}:
1:3
4:5
```
**Private method**
"""
function chunks(c::AbstractRange, n::Integer)
n < 1 && throw(ArgumentError("cannot split range into $n subranges"))
return Chunks(c, divrem(length(c), Int(n))...)
end
struct Chunks{T <: AbstractRange}
range::T
div::Int
rem::Int
end
Base.eltype(::Type{Chunks{T}}) where {T <: AbstractRange} = T
function Base.length(itr::Chunks{<:AbstractRange})
l = length(itr.range)
return itr.div == 0 ? l : div(l - itr.rem, itr.div)
end
function Base.iterate(itr::Chunks{<:AbstractRange}, state=(1,itr.rem))
first(state) > length(itr.range) && return nothing
rem = last(state)
r = min(first(state) + itr.div - (rem > 0 ? 0 : 1),
length(itr.range))
return @inbounds itr.range[first(state):r], (r + 1, rem-1)
end
"""
available_name(modl::Module, name::Symbol)
Function to replace, if necessary, a given `name` with a modified one
that ensures it is not the name of any existing object in the global
scope of `modl`. Modifications are created with numerical suffixes.
"""
function available_name(modl, name)
new_name = name
i = 1
while isdefined(modl, Symbol(new_name))
i += 1
new_name = string(name, i) |> Symbol
end
return new_name
end
"""
generate_name!(M, existing_names; only=Union{Function,Type}, substitute=:f)
Given a type `M` (e.g., `MyEvenInteger{N}`) return a symbolic,
snake-case, representation of the type name (such as
`my_even_integer`). The symbol is pushed to `existing_names`, which
must be an `AbstractVector` to which a `Symbol` can be pushed.
If the snake-case representation already exists in `existing_names` a
suitable integer is appended to the name.
If `only` is specified, then the operation is restricted to those `M`
for which `M isa only`. In all other cases the symbolic name is
generated using `substitute` as the base symbol.
```julia-repl
julia> existing_names = [];
julia> generate_name!(Vector{Int}, existing_names)
:vector
julia> generate_name!(Vector{Int}, existing_names)
:vector2
julia> generate_name!(AbstractFloat, existing_names)
:abstract_float
julia> generate_name!(Int, existing_names, only=Array, substitute=:not_array)
:not_array
julia> generate_name!(Int, existing_names, only=Array, substitute=:not_array)
:not_array2
```
"""
function generate_name!(M::DataType,
existing_names;
only=Any,
substitute=:f)
if M <: only
str = split(string(M), '{') |> first
candidate = split(str, '.') |> last |> snakecase |> Symbol
else
candidate = substitute
end
candidate in existing_names ||
(push!(existing_names, candidate); return candidate)
n = 2
new_candidate = candidate
while true
new_candidate = string(candidate, n) |> Symbol
new_candidate in existing_names || break
n += 1
end
push!(existing_names, new_candidate)
return new_candidate
end
generate_name!(model, existing_names; kwargs...) =
generate_name!(typeof(model), existing_names; kwargs...)
# # OBSERVATION VS CONTAINER HACKINGS TOOLS
# The following tools are used to bridge the gap between old paradigm of prescribing
# the scitype of containers of observations, and the LearnAPI.jl paradigm of prescribing
# only the scitype of the observations themeselves. This is needed because measures are
# now taken from StatisticalMeasures.jl which follows the LearnAPI.jl paradigm, but model
# `target_scitype` refers to containers.
"""
observation(S)
*Private method.*
Tries to infer the per-observation scitype from the scitype of `S`, when `S` is
known to be the scitype of some container with multiple observations; here we
view the scitype for one row of a table to be the scitype of the row converted
to a vector. Return `Unknown` if unable to draw reliable inferrence.
The observation scitype for a table is here understood as the scitype of a row
converted to a vector.
"""
observation(::Type) = Unknown
observation(::Type{AbstractVector{S}}) where S = S
observation(::Type{AbstractArray{S,N}}) where {S,N} = AbstractArray{S,N-1}
for T in [:Continuous, :Count, :Finite, :Infinite, :Multiclass, :OrderedFactor]
TM = "Union{Missing,$T}" |> Meta.parse
for S in [T, TM]
quote
observation(::Type{AbstractVector{<:$S}}) = $S
observation(::Type{AbstractArray{<:$S,N}}) where N = AbstractArray{<:$S,N-1}
observation(::Type{Table{<:AbstractVector{<:$S}}}) = AbstractVector{<:$S}
end |> eval
end
end
# note that in Julia `f(::Type{AbstractVector{<:T}}) where T = T` has not a well-formed
# left-hand side
"""
guess_observation_scitype(y)
*Private method.*
If `y` is an `AbstractArray`, return the scitype of `y[:, :, ..., :, 1]`. If `y`
is a table, return the scitype of the first row, converted to a vector, unless
this row has `missing` elements, in which case return `Unknown`.
In all other cases, `Unknown`.
```julia-repl
julia> guess_observation_scitype([missing, 1, 2, 3])
Union{Missing, Count}
julia> guess_observation_scitype(rand(3, 2))
AbstractVector{Continuous}
julia> guess_observation_scitype((x=rand(3), y=rand(Bool, 3)))
AbstractVector{Union{Continuous, Count}}
julia> guess_observation_scitype((x=[missing, 1, 2], y=[1, 2, 3]))
Unknown
```
"""
guess_observation_scitype(y) = guess_observation_scitype(y, Val(Tables.istable(y)))
guess_observation_scitype(y, ::Any) = Unknown
guess_observation_scitype(y::AbstractArray, ::Val{false}) = observation(scitype(y))
function guess_observation_scitype(table, ::Val{true})
row = Tables.subset(table, 1, viewhint=false) |> collect
E = eltype(row)
nonmissingtype(E) == E || return Unknown
scitype(row)
end
"""
guess_model_targetobservation_scitype(model)
*Private method*
Try to infer a lowest upper bound on the scitype of target observations
acceptable to `model`, by inspecting `target_scitype(model)`. Return `Unknown`
if unable to draw reliable inferrence.
The observation scitype for a table is here understood as the scitype of a row
converted to a vector.
"""
guess_model_target_observation_scitype(model) = observation(target_scitype(model))
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 4124 | ## INSPECTING LEARNING NETWORKS
"""
tree(N)
Return a named-tuple respresentation of the ancestor tree `N`
(including training edges)
"""
function tree(W::Node)
mach = W.machine
if mach === nothing
value2 = nothing
endkeys = []
endvalues = []
else
value2 = mach.model
endkeys = (Symbol("train_arg", i) for i in eachindex(mach.args))
endvalues = (tree(arg) for arg in mach.args)
end
keys = tuple(:operation, :model,
(Symbol("arg", i) for i in eachindex(W.args))...,
endkeys...)
values = tuple(W.operation, value2,
(tree(arg) for arg in W.args)...,
endvalues...)
return NamedTuple{keys}(values)
end
tree(s::Source) = (source = s,)
# """
# args(tree; train=false)
# Return a vector of the top level args of the tree associated with a node.
# If `train=true`, return the `train_args`.
# """
# function args(tree; train=false)
# keys_ = filter(keys(tree) |> collect) do key
# match(Regex("^$("train_"^train)arg[0-9]*"), string(key)) !== nothing
# end
# return [getproperty(tree, key) for key in keys_]
# end
"""
MLJBase.models(N::AbstractNode)
A vector of all models referenced by a node `N`, each model appearing
exactly once.
"""
function models(W::AbstractNode)
models_ = filter(flat_values(tree(W)) |> collect) do model
model isa Union{Model,Symbol}
end
return unique(models_)
end
"""
sources(N::AbstractNode)
A vector of all sources referenced by calls `N()` and `fit!(N)`. These
are the sources of the ancestor graph of `N` when including training
edges.
Not to be confused with `origins(N)`, in which training edges are
excluded.
See also: [`origins`](@ref), [`source`](@ref).
"""
function sources(W::AbstractNode; kind=:any)
if kind == :any
sources_ = filter(flat_values(tree(W)) |> collect) do value
value isa Source
end
else
sources_ = filter(flat_values(tree(W)) |> collect) do value
value isa Source && value.kind == kind
end
end
return unique(sources_)
end
"""
machines(N::AbstractNode [, model::Model])
List all machines in the ancestor graph of node `N`, optionally
restricting to those machines whose corresponding model matches the
specifed `model`.
Here two models *match* if they have the same, possibly nested
hyperparameters, or, more precisely, if
`MLJModelInterface.is_same_except(m1, m2)` is `true`.
See also `MLJModelInterface.is_same_except`.
"""
function machines(W::Node, model=nothing)
if W.machine === nothing
machs = vcat((machines(arg) for arg in W.args)...) |> unique
else
machs = vcat(Machine[W.machine, ],
(machines(arg) for arg in W.args)...,
(machines(arg) for arg in W.machine.args)...) |> unique
end
model === nothing && return machs
return filter(machs) do mach
mach.model == model
end
end
args(::Source) = []
args(N::Node) = N.args
train_args(::Source) = []
train_args(N::Node{<:Machine}) = N.machine.args
train_args(N::Node{Nothing}) = []
"""
children(N::AbstractNode, y::AbstractNode)
List all (immediate) children of node `N` in the ancestor graph of `y`
(training edges included).
"""
children(N::AbstractNode, y::AbstractNode) = filter(nodes(y)) do W
N in args(W) || N in train_args(W)
end |> unique
"""
lower_bound(type_itr)
Return the minimum type in the collection `type_itr` if one exists
(mininum in the sense of `<:`). If `type_itr` is empty, return `Any`,
and in all other cases return the universal lower bound `Union{}`.
"""
function lower_bound(Ts)
isempty(Ts) && return Any
sorted = sort(collect(Ts), lt=<:)
candidate = first(sorted)
all(T -> candidate <: T, sorted[2:end]) && return candidate
return Union{}
end
function _lower_bound(Ts)
Unknown in Ts && return Unknown
return lower_bound(Ts)
end
MLJModelInterface.input_scitype(N::Node) = Unknown
MLJModelInterface.input_scitype(N::Node{<:Machine}) =
input_scitype(N.machine.model)
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.