licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 662 |
abstract type AbstractEmpiricalValue <: AbstractUncertainValue end
function summarise(ed::AbstractEmpiricalValue)
_type = typeof(ed)
disttype = typeof(ed.distribution)
l = length(ed.values)
summary = "$_type estimated as a $disttype from $l values"
return summary
end
Base.show(io::IO, ed::AbstractEmpiricalValue) = println(io, summarise(ed))
@recipe function plot_empiricalval(empval::AbstractEmpiricalValue;
nbins = 200, n_samples = 10000)
dist = empval.distribution
@series begin
seriestype := :histogram
fit(Histogram, rand(dist, n_samples), nbins = nbins)
end
end
export
AbstractEmpiricalValue
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 178 | """
AbstractPopulation
An abstract type for population-based uncertain values.
"""
abstract type AbstractPopulation <: AbstractUncertainValue end
export AbstractPopulation | UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 3196 |
import IntervalArithmetic: interval
import Distributions
import StatsBase
"""
AbstractScalarPopulation
An abstract type for population-based uncertain scalar values.
"""
abstract type AbstractScalarPopulation{T, PW} <: AbstractPopulation end
Base.length(p::AbstractScalarPopulation) = length(p.values)
Base.getindex(p::AbstractScalarPopulation, i) = p.values[i]
Base.firstindex(p::AbstractScalarPopulation) = 1
Base.lastindex(p::AbstractScalarPopulation) = length(p.values)
Base.eachindex(p::AbstractScalarPopulation) = Base.OneTo(lastindex(p))
Base.iterate(p::AbstractScalarPopulation, state = 1) = iterate(p.values, state)
function summarise(p::AbstractScalarPopulation)
_type = typeof(p)
l = length(p.values)
summary = "$_type containing $l values"
return summary
end
Base.show(io::IO, p::AbstractScalarPopulation) = print(io, summarise(p))
Base.minimum(p::AbstractScalarPopulation) = minimum(p)
Base.maximum(p::AbstractScalarPopulation) = maximum(p)
Base.minimum(pop::AbstractScalarPopulation{T, PW} where {T <: Number, PW}) =
minimum(pop.values)
Base.maximum(pop::AbstractScalarPopulation{T, PW} where {T <: Number, PW}) =
maximum(pop.values)
Base.minimum(pop::AbstractScalarPopulation{T, PW} where {T <: AbstractUncertainValue, PW}) =
minimum([minimum(uv) for uv in pop])
Base.maximum(pop::AbstractScalarPopulation{T, PW} where {T <: AbstractUncertainValue, PW}) =
maximum([maximum(uv) for uv in pop])
Distributions.support(p::AbstractScalarPopulation) = interval(minimum(p), maximum(p))
function Base.rand(pop::AbstractScalarPopulation{T, PW}) where {T <: Number, PW}
StatsBase.sample(pop.values, pop.probs)
end
function Base.rand(pop::AbstractScalarPopulation{T, PW}, n::Int) where {T <: Number, PW}
StatsBase.sample(pop.values, pop.probs, n)
end
function Base.rand(pop::AbstractScalarPopulation{T, PW}) where {T <: AbstractUncertainValue, PW}
# Sample one of the populations, then draw a random number from it
popmember_idx = StatsBase.sample(1:length(pop), pop.probs)
rand(pop[popmember_idx])
end
function Base.rand(pop::AbstractScalarPopulation{T, PW}, n::Int) where {T <: AbstractUncertainValue, PW}
n_members = length(pop)
draws = zeros(Float64, n)
for i = 1:n
# Sample one of the populations, then draw a random number from it
sample_pop_idx = StatsBase.sample(1:n_members, pop.probs)
draws[i] = rand(pop[sample_pop_idx])
end
return draws
end
function Base.rand(pop::AbstractScalarPopulation{T, PW}) where {T <: Any, PW}#{T <: AbstractUncertainValue, PW}
# Sample one of the populations, then draw a random number from it
popmember_idx = StatsBase.sample(1:length(pop), pop.probs)
rand(pop[popmember_idx])
end
function Base.rand(pop::AbstractScalarPopulation{T, PW}, n::Int) where {T <: Any, PW}#{T <: AbstractUncertainValue, PW}
n_members = length(pop)
draws = zeros(Float64, n)
for i = 1:n
# Sample one of the populations, then draw a random number from it
sample_pop_idx = StatsBase.sample(1:n_members, pop.probs)
draws[i] = rand(pop[sample_pop_idx])
end
return draws
end
export AbstractScalarPopulation | UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 1519 | import Base.rand
import Distributions, StatsBase
import IntervalArithmetic: interval
import Statistics
abstract type TheoreticalDistributionScalarValue <: AbstractUncertainValue end
Base.rand(uv::TheoreticalDistributionScalarValue) = rand(uv.distribution)
Base.rand(uv::TheoreticalDistributionScalarValue, n::Int) = rand(uv.distribution, n)
function Distributions.support(fd::TheoreticalDistributionScalarValue)
s = support(fd.distribution)
interval(s.lb, s.ub)
end
Distributions.pdf(fd::TheoreticalDistributionScalarValue, x) = pdf(fd.distribution, x)
Statistics.mean(fd::TheoreticalDistributionScalarValue) = mean(fd.distribution)
Statistics.median(fd::TheoreticalDistributionScalarValue) = median(fd.distribution)
Statistics.middle(fd::TheoreticalDistributionScalarValue) = middle(fd.distribution)
Statistics.quantile(fd::TheoreticalDistributionScalarValue, q) = quantile(fd.distribution, q)
Statistics.std(fd::TheoreticalDistributionScalarValue) = std(fd.distribution)
Statistics.var(fd::TheoreticalDistributionScalarValue) = var(fd.distribution)
StatsBase.mode(fd::TheoreticalDistributionScalarValue) = mode(fd.distribution)
abstract type AbstractUncertainOneParameterScalarValue{S <: ValueSupport, T1} <: TheoreticalDistributionScalarValue end
abstract type AbstractUncertainTwoParameterScalarValue{S <: ValueSupport, T1, T2} <: TheoreticalDistributionScalarValue end
abstract type AbstractUncertainThreeParameterScalarValue{S <: ValueSupport, T1, T2, T3} <: TheoreticalDistributionScalarValue end
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 75 |
abstract type AbstractUncertainScalarKDE{T} <: AbstractEmpiricalValue end
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 2789 | using Distributions
using StaticArrays
import Printf.@sprintf
import Distributions.RealInterval
abstract type AbstractUncertainValue end
Broadcast.broadcastable(d::AbstractUncertainValue) = Ref(d)
##############################
# Support of an observation
##############################
import Distributions.support
"""
support(o::AbstractUncertainValue) -> IntervalArithmetic.Interval
Find the support of the uncertain observation.
"""
function support(o::AbstractUncertainValue)
supp = support(o.distribution)
lowerbound = supp.lb
upperbound = supp.ub
@interval(lowerbound, upperbound)
end
export support
##############################
# Intersection of the supports
##############################
"""
support_overlap(o1::AbstractUncertainValue, o2::AbstractUncertainValue)
Compute the overlap in the supports of two uncertain observations.
"""
function support_overlap(uval1::AbstractUncertainValue, uval2::AbstractUncertainValue)
s1 = support(uval1)
s2 = support(uval2)
if s1 isa RealInterval
s1 = interval(s1...)
end
if s2 isa RealInterval
s2 = interval(s2...)
end
s1 ∩ s2
end
export support_overlap
##################################################################
# Intersection of two UncertainValues as a mixture model
##################################################################
import Base.intersect, Base.∩
"""
intersect(o1::AbstractUncertainValue, o2::AbstractUncertainValue)
Compute the intersection between two uncertain observations probabilistically.
The intersection is represented as a mixture model of the distributions
furnishing the observations.
"""
function intersect(o1::AbstractUncertainValue, o2::AbstractUncertainValue)
# Create a mixture model representing the intersection
if support_overlap(o1, o2) == ∅
throw(DomainError((o1, o2), "intersect(o1, o2) == ∅. Cannot compute mixture model."))
end
MixtureModel([o1.distribution, o2.distribution])
end
export intersect
###################
# Pretty printing
###################
function summarise(o::AbstractUncertainValue)
_type = typeof(o)
"$_type"
end
Base.show(io::IO, q::AbstractUncertainValue) = print(io, summarise(q))
dimension(usv::AbstractUncertainValue) = 1
Base.max(uv::AbstractUncertainValue) = maximum(uv.distribution)
Base.min(uv::AbstractUncertainValue) = minimum(uv.distribution)
Base.minimum(uv::AbstractUncertainValue) = minimum(uv.distribution)
Base.maximum(uv::AbstractUncertainValue) = maximum(uv.distribution)
Base.maximum(uvs::AbstractVector{AbstractUncertainValue}) =
maximum([maximum(uv) for uv in uvs])
Base.minimum(uvs::AbstractVector{AbstractUncertainValue}) =
minimum([minimum(uv) for uv in uvs])
export
AbstractUncertainValue
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 287 | using Distributions
include("assigndist_normal.jl")
include("assigndist_uniform.jl")
include("assigndist_beta.jl")
include("assigndist_betaprime.jl")
include("assigndist_betabinomial.jl")
include("assigndist_binomial.jl")
include("assigndist_gamma.jl")
include("assigndist_frechet.jl")
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 336 | """
assigndist_beta(α, β; trunc_lower = -Inf, trunc_upper = Inf)
Assign parameters to a Beta distribution with parameters `α` and `β`, optionally
truncating the distribution.
"""
function assigndist_beta(α, β; trunc_lower = -Inf, trunc_upper = Inf)
truncated(Beta(α, β), trunc_lower, trunc_upper)
end
export assigndist_beta
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 405 | """
assigndist_betabinomial(n, α, β; trunc_lower = -Inf, trunc_upper = Inf)
Assign parameters to a beta binomial distribution with `n` trials and
shape parameters `α` and `β`, optionally truncating the distribution.
"""
function assigndist_betabinomial(n, α, β; trunc_lower = -Inf, trunc_upper = Inf)
truncated(BetaBinomial(n, α, β), trunc_lower, trunc_upper)
end
export assigndist_betabinomial
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 361 | """
assigndist_betaprime(α, β; trunc_lower = -Inf, trunc_upper = Inf)
Assign parameters to a Beta prime distribution with parameters `α` and `β`, optionally
truncating the distribution.
"""
function assigndist_betaprime(α, β; trunc_lower = -Inf, trunc_upper = Inf)
truncated(BetaPrime(α, β), trunc_lower, trunc_upper)
end
export assigndist_betaprime
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 394 | """
assigndist_binomial(n, p; trunc_lower = -Inf, trunc_upper = Inf)
Assign parameters to a binomial distribution with `n` trials and probability `p`
of success in individual trials, optionally truncating the distribution.
"""
function assigndist_binomial(n, p; trunc_lower = -Inf, trunc_upper = Inf)
truncated(Binomial(n, p), trunc_lower, trunc_upper)
end
export assigndist_binomial
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 350 | """
assigndist_frechet(α, θ; trunc_lower = -Inf, trunc_upper = Inf)
Assign parameters to a Fréchet distribution with parameters `α` and `θ`, optionally
truncating the distribution.
"""
function assigndist_frechet(α, θ; trunc_lower = -Inf, trunc_upper = Inf)
truncated(Frechet(α, θ), trunc_lower, trunc_upper)
end
export assigndist_frechet
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 340 | """
assigndist_gamma(α, θ; trunc_lower = -Inf, trunc_upper = Inf)
Assign parameters to a Gamma distribution with parameters `α` and `θ`, optionally
truncating the distribution.
"""
function assigndist_gamma(α, θ; trunc_lower = -Inf, trunc_upper = Inf)
truncated(Gamma(α, θ), trunc_lower, trunc_upper)
end
export assigndist_gamma
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 1235 | """
assigndist_normal(μ, lower, upper; nσ = 1,
trunc_lower = -Inf, trunc_upper = Inf,
tolerance = 1e-2μ)
Assign parameters to a Normal distribution based on location
parameter `μ`, together with `lower` and `upper` uncertainty
bounds. `nσ` indicates how many standard deviations away
from `μ` `lower`/`upper` are (they must be equidistant
from `μ`). `trunc_lower` and `trunc_upper` truncated the
distribution if specified (defaults to `-Inf` and `Inf`).
"""
function assigndist_normal(μ, lower, upper; nσ = 1,
trunc_lower = -Inf, trunc_upper = Inf,
tolerance = 1e-7)
dist_from_μ_upper = upper - μ
dist_from_μ_lower = μ - lower
if abs((upper - μ) - (μ - lower)) > tolerance
throw(DomainError("(μ - lower, upper - μ) = ($dist_from_μ_lower, $dist_from_μ_upper): lower and upper bounds are not equidistant-ish from μ. Cannot create normal distribution."))
end
σ_estim = dist_from_μ_upper/nσ
truncated(Normal(μ, σ_estim), trunc_lower, trunc_upper)
end
function assigndist_normal(μ, σ; nσ = 1, trunc_lower = -Inf, trunc_upper = Inf,
tolerance = 1e-7)
σ_estim = σ/nσ
truncated(Normal(μ, σ_estim), trunc_lower, trunc_upper)
end
export assigndist_normal
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 234 | """
assigndist_uniform(lower, upper)
Assign parameters to a uniform distribution with `lower` and `upper` uncertainty
bounds.
"""
function assigndist_uniform(lower, upper)
Uniform(lower, upper)
end
export assigndist_uniform
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 1328 | import Distributions.Distribution
import Distributions.rand
import Distributions.support
import StatsBase.quantile
import StatsBase.mean
import StatsBase.median
import StatsBase.middle
import StatsBase.quantile
import StatsBase.std
import Distributions.pdf
import Base: minimum, maximum
abstract type AbstractEmpiricalDistribution end
struct FittedDistribution{D <: Distribution} <: AbstractEmpiricalDistribution
distribution::D
end
Broadcast.broadcastable(fd::FittedDistribution) = Ref(fd.distribution)
Distributions.rand(fd::FittedDistribution) = rand(fd.distribution)
Distributions.rand(fd::FittedDistribution, n::Int) = rand(fd.distribution, n)
Distributions.support(fd::FittedDistribution) = support(fd.distribution)
Distributions.pdf(fd::FittedDistribution, x) = pdf(fd.distribution, x)
StatsBase.mean(fd::FittedDistribution) = mean(fd.distribution)
StatsBase.median(fd::FittedDistribution) = median(fd.distribution)
StatsBase.middle(fd::FittedDistribution) = middle(fd.distribution)
StatsBase.quantile(fd::FittedDistribution, q) = quantile(fd.distribution, q)
StatsBase.std(fd::FittedDistribution) = std(fd.distribution)
Base.minimum(uv::FittedDistribution) = minimum(uv.distribution)
Base.maximum(uv::FittedDistribution) = maximum(uv.distribution)
export
AbstractEmpiricalDistribution,
FittedDistribution
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 502 | import Base.<
import Base.isapprox
Base.:<(x::T1, y::CertainValue{T2}) where {T1 <: Real, T2 <: Real} = x < y.value
Base.:<(x::CertainValue{T1}, y::T2) where {T1 <: Real, T2 <: Real} = x.value < y
Base.isless(x::CertainValue{T1}, y::CertainValue{T2}) where {T1 <: Real, T2 <: Real} = isless(x.value, y.value)
Base.isapprox(x::CertainValue{T1}, y::T2) where {T1 <: Real, T2 <: Real} = isapprox(x.value, y)
Base.isapprox(x::T1, y::CertainValue{T2}) where {T1 <: Real, T2 <: Real} = isapprox(x, y.value)
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 10789 | """
combine(uvals::Vector{AbstractUncertainValue}; n = 10000*length(uvals),
bw::Union{Nothing, Real} = nothing)
Combine multiple uncertain values into a single uncertain value. This is
done by resampling each uncertain value in `uvals`, `n` times each,
then pooling these draws together. Finally, a kernel density estimate to the final
distribution is computed over those draws.
The KDE bandwidth is controlled by `bw`. By default, `bw = nothing`; in this case,
the bandwidth is determined using the `KernelDensity.default_bandwidth` function.
!!! tip
For very wide, close-to-normal distributions, the default bandwidth may work well.
If you're combining very peaked distributions or discrete populations, however,
you may want to lower the bandwidth significantly.
# Example
```julia
v1 = UncertainValue(Normal, 1, 0.3)
v2 = UncertainValue(Normal, 0.8, 0.4)
v3 = UncertainValue([rand() for i = 1:3], [0.3, 0.3, 0.4])
v4 = UncertainValue(Normal, 3.7, 0.8)
uvals = [v1, v2, v3, v4];
combine(uvals)
combine(uvals, n = 20000) # adjust number of total draws
```
"""
function combine(uvals::Vector{AbstractUncertainValue}; n = 10000*length(uvals),
bw::Union{Nothing, Real} = nothing)
N = length(uvals)
draws = zeros(Float64, N*n)
for (i, uval) in enumerate(uvals)
draws[(i-1)*n+1:i*n] = resample(uval, n)
end
return UncertainValue(UnivariateKDE, draws,
bandwidth = bw isa Real ? bw : default_bandwidth(draws))
end
"""
combine(uvals::Vector{AbstractUncertainValue}, weights::ProbabilityWeights;
n = 10000*length(uvals),
bw::Union{Nothing, Real} = nothing)
Combine multiple uncertain values into a single uncertain value. This is
done by resampling each uncertain value in `uvals` proportionally to the provided
relative analytic `weights` indicating their relative importance (these are normalised by
default, so don't need to sum to 1), then pooling these draws together. Finally, a kernel
density estimate to the final distribution is computed over the `n` total draws.
Providing `ProbabilityWeights` leads to the exact same behaviour as for `AnalyticWeights`,
but may be more appropriote when, for example, weights have been determined
quantitatively.
The KDE bandwidth is controlled by `bw`. By default, `bw = nothing`; in this case,
the bandwidth is determined using the `KernelDensity.default_bandwidth` function.
!!! tip
For very wide, close-to-normal distributions, the default bandwidth may work well.
If you're combining very peaked distributions or discrete populations, however,
you may want to lower the bandwidth significantly.
# Example
```julia
v1 = UncertainValue(Normal, 1, 0.3)
v2 = UncertainValue(Normal, 0.8, 0.4)
v3 = UncertainValue([rand() for i = 1:3], [0.3, 0.3, 0.4])
v4 = UncertainValue(Normal, 3.7, 0.8)
uvals = [v1, v2, v3, v4];
# Two difference syntax options
combine(uvals, ProbabilityWeights([0.2, 0.1, 0.3, 0.2]))
combine(uvals, pweights([0.2, 0.1, 0.3, 0.2]), n = 20000) # adjust number of total draws
```
"""
function combine(uvals::Vector{AbstractUncertainValue}, weights::ProbabilityWeights;
n = 10000*length(uvals),
bw::Union{Nothing, Real} = nothing)
length(uvals) == length(weights) ? "Number of values != number of weights" : nothing
# Scale the number of draws of each value according to their relative probability
L = length(uvals)
wts = weights ./ weights.sum
Ns = [ceil(Int, n*wts[i]) for i = 1:L]
N = sum(Ns)
draws = zeros(Float64, N)
for (i, uval) in enumerate(uvals)
if i == 1
draws[1:sum(Ns[1:i])] = resample(uval, Ns[i])
else
draws[sum(Ns[1:(i-1)])+1:sum(Ns[1:i])] = resample(uval, Ns[i])
end
end
return UncertainValue(UnivariateKDE, draws,
bandwidth = bw isa Real ? bw : default_bandwidth(draws))
end
"""
combine(uvals::Vector{AbstractUncertainValue}, weights::AnalyticWeights;
n = 10000*length(uvals),
bw::Union{Nothing, Real} = nothing)
Combine multiple uncertain values into a single uncertain value. This is
done by resampling each uncertain value in `uvals` proportionally to the provided
relative probability `weights` (these are normalised by default, so don't need
to sum to 1), then pooling these draws together. Finally, a kernel density
estimate to the final distribution is computed over the `n` total draws.
Providing `AnalyticWeights` leads to the exact same behaviour as for `ProbabilityWeights`,
but may be more appropriote when relative importance weights are assigned subjectively,
and not based on quantitative evidence.
The KDE bandwidth is controlled by `bw`. By default, `bw = nothing`; in this case,
the bandwidth is determined using the `KernelDensity.default_bandwidth` function.
!!! tip
For very wide, close-to-normal distributions, the default bandwidth may work well.
If you're combining very peaked distributions or discrete populations, however,
you may want to lower the bandwidth significantly.
# Example
```julia
v1 = UncertainValue(Normal, 1, 0.3)
v2 = UncertainValue(Normal, 0.8, 0.4)
v3 = UncertainValue([rand() for i = 1:3], [0.3, 0.3, 0.4])
v4 = UncertainValue(Normal, 3.7, 0.8)
uvals = [v1, v2, v3, v4];
# Two difference syntax options
combine(uvals, AnalyticWeights([0.2, 0.1, 0.3, 0.2]))
combine(uvals, aweights([0.2, 0.1, 0.3, 0.2]), n = 20000) # adjust number of total draws
```
"""
function combine(uvals::Vector{AbstractUncertainValue}, weights::AnalyticWeights;
n = 10000*length(uvals),
bw::Union{Nothing, Real} = nothing)
length(uvals) == length(weights) ? "Number of values != number of weights" : nothing
# Scale the number of draws of each value according to their relative probability
L = length(uvals)
wts = weights ./ weights.sum
Ns = [ceil(Int, n*wts[i]) for i = 1:L]
N = sum(Ns)
draws = zeros(Float64, N)
for (i, uval) in enumerate(uvals)
if i == 1
draws[1:sum(Ns[1:i])] = resample(uval, Ns[i])
else
draws[sum(Ns[1:(i-1)])+1:sum(Ns[1:i])] = resample(uval, Ns[i])
end
end
return UncertainValue(UnivariateKDE, draws,
bandwidth = bw isa Real ? bw : default_bandwidth(draws))
end
"""
combine(uvals::Vector{AbstractUncertainValue}, weights::Weights;
n = 10000*length(uvals),
bw::Union{Nothing, Real} = nothing)
Combine multiple uncertain values into a single uncertain value. This is
done by resampling each uncertain value in `uvals` proportionally to the provided `weights`
(these are normalised by default, so don't need to sum to 1), then pooling these draws
together. Finally, a kernel density estimate to the final distribution is computed over
the `n` total draws.
Providing `Weights` leads to the exact same behaviour as for `ProbabilityWeights` and
`AnalyticalWeights`.
The KDE bandwidth is controlled by `bw`. By default, `bw = nothing`; in this case,
the bandwidth is determined using the `KernelDensity.default_bandwidth` function.
!!! tip
For very wide, close-to-normal distributions, the default bandwidth may work well.
If you're combining very peaked distributions or discrete populations, however,
you may want to lower the bandwidth significantly.
# Example
```julia
v1 = UncertainValue(Normal, 1, 0.3)
v2 = UncertainValue(Normal, 0.8, 0.4)
v3 = UncertainValue([rand() for i = 1:3], [0.3, 0.3, 0.4])
v4 = UncertainValue(Normal, 3.7, 0.8)
uvals = [v1, v2, v3, v4];
# Two difference syntax options
combine(uvals, Weights([0.2, 0.1, 0.3, 0.2]))
combine(uvals, weights([0.2, 0.1, 0.3, 0.2]), n = 20000) # adjust number of total draws
```
"""
function combine(uvals::Vector{AbstractUncertainValue}, weights::Weights;
n = 10000*length(uvals),
bw::Union{Nothing, Real} = nothing)
length(uvals) == length(weights) ? "Number of values != number of weights" : nothing
# Scale the number of draws of each value according to their relative probability
L = length(uvals)
wts = weights ./ weights.sum
Ns = [ceil(Int, n*wts[i]) for i = 1:L]
N = sum(Ns)
draws = zeros(Float64, N)
for (i, uval) in enumerate(uvals)
if i == 1
draws[1:sum(Ns[1:i])] = resample(uval, Ns[i])
else
draws[sum(Ns[1:(i-1)])+1:sum(Ns[1:i])] = resample(uval, Ns[i])
end
end
return UncertainValue(UnivariateKDE, draws,
bandwidth = bw isa Real ? bw : default_bandwidth(draws))
end
"""
combine(uvals::Vector{AbstractUncertainValue}, weights::FrequencyWeights;
bw::Union{Nothing, Real} = nothing)
Combine multiple uncertain values into a single uncertain value. This is
done by resampling each uncertain value in `uvals` according to their relative
frequencies (the absolute number of draws provided by `weights`). Finally, a kernel density
estimate to the final distribution is computed over the `sum(weights)` total draws.
The KDE bandwidth is controlled by `bw`. By default, `bw = nothing`; in this case,
the bandwidth is determined using the `KernelDensity.default_bandwidth` function.
!!! tip
For very wide and close-to-normal distributions, the default bandwidth may work well.
If you're combining very peaked distributions or discrete populations, however,
you may want to lower the bandwidth significantly.
# Example
```julia
v1 = UncertainValue(Normal, 1, 0.3)
v2 = UncertainValue(Normal, 0.8, 0.4)
v3 = UncertainValue([rand() for i = 1:3], [0.3, 0.3, 0.4])
v4 = UncertainValue(Normal, 3.7, 0.8)
uvals = [v1, v2, v3, v4];
# Two difference syntax options
combine(uvals, FrequencyWeights([100, 500, 343, 7000]))
combine(uvals, pweights([1410, 550, 223, 801]))
```
"""
function combine(uvals::Vector{AbstractUncertainValue}, weights::FrequencyWeights;
bw::Union{Nothing, Real} = nothing)
length(uvals) == length(weights) ? "Number of values != number of weights" : nothing
# Scale the number of draws of each value according to their relative probability
Ns = weights
N = sum(Ns)
draws = zeros(Float64, N)
for (i, uval) in enumerate(uvals)
if i == 1
draws[1:sum(Ns[1:i])] = resample(uval, Ns[i])
else
draws[sum(Ns[1:(i-1)])+1:sum(Ns[1:i])] = resample(uval, Ns[i])
end
end
return UncertainValue(UnivariateKDE, draws,
bandwidth = bw isa Real ? bw : default_bandwidth(draws))
end
export combine,
ProbabilityWeights, pweights,
Weights, weights,
AnalyticWeights, aweights,
FrequencyWeights, fweights | UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 8449 | using Test
using UncertainData
using Distributions
using StaticArrays
using StatsBase
using KernelDensity
#############################################
# UncertainValues module
#############################################
@testset "Uncertain values" begin
@testset "Assign distributions" begin
include("uncertain_values/test_assign_distributions.jl")
@testset "Uncertain values" begin
include("uncertain_values/test_uncertain_values.jl")
end
@testset "minmax" begin
include("uncertain_values/test_minmax.jl")
end
@testset "CertainValue" begin
include("uncertain_values/test_certain_values.jl")
end
@testset "Merging" begin
include("uncertain_values/test_merging.jl")
end
end
end
@testset "Populations" begin
include("uncertain_values/populations/test_UncertainScalarPopulation.jl")
include("uncertain_values/populations/test_ConstrainedUncertainScalarPopulation.jl")
end
#############################################
# UncertainDatasets module
#############################################
@testset "Uncertain datasets" begin
include("uncertain_datasets/test_uncertain_datasets.jl")
end
#############################################
# Resampling uncertain values
#############################################
@testset "Uncertain values" begin
include("resampling/test_resampling_uncertain_values.jl")
end
#############################################
# Resampling uncertain tuples
#############################################
@testset "Uncertain tuples" begin
include("resampling/test_resampling_uncertain_tuples.jl")
end
#############################################
# Resampling uncertain datasets
#############################################
# Resampling schemes
@testset "Resampling" begin
@testset "BinnedResampling" begin
include("resampling/resampling_schemes/test_BinnedResampling.jl")
end
@testset "BinnedWeightedResampling" begin
include("resampling/resampling_schemes/test_BinnedWeightedResampling.jl")
end
@testset "BinnedMeanResampling" begin
include("resampling/resampling_schemes/test_BinnedMeanResampling.jl")
end
@testset "BinnedMeanWeightedResampling" begin
include("resampling/resampling_schemes/test_BinnedMeanWeightedResampling.jl")
end
@testset "ConstrainedIndexValueResampling" begin
include("resampling/resampling_schemes/test_ConstrainedIndexValueResampling.jl")
end
@testset "ContrainedValueResampling" begin
include("resampling/resampling_schemes/test_ConstrainedValueResampling.jl")
end
@testset "SequentialResampling" begin
include("resampling/resampling_schemes/test_SequentialResampling.jl")
end
@testset "SequentialInterpolatedResampling" begin
include("resampling/resampling_schemes/test_SequentialInterpolatedResampling.jl")
end
@testset "RandomSequences" begin
include("resampling/resampling_schemes/test_RandomSequences.jl")
end
# Define an array of uncertain values `uvals` that we can construct datasets from.
include("resampling/define_uncertain_values.jl")
@testset "Vectors of uncertain values" begin
# Resampling vectors of uncertain values
#---------------------------------------
include("resampling/uncertain_vectors/test_resampling_vectors.jl")
include("resampling/uncertain_vectors/test_resampling_vectors_apply_funcs.jl")
include("resampling/uncertain_vectors/test_resampling_vectors_constraints.jl")
end
# Resampling uncertain datasets
#-------------------------------
@testset "UncertainDataset" begin
include("resampling/uncertain_datasets/test_resampling_datasets.jl")
end
# Resampling uncertain value datasets
#-------------------------------------
@testset "UncertainValueDataset" begin
include("resampling/uncertain_datasets/test_resampling_datasets_uncertainvaluedataset.jl")
include("resampling/uncertain_datasets/test_resampling_datasets_uncertainvaluedataset_apply_funcs.jl")
include("resampling/uncertain_datasets/test_resampling_datasets_uncertainvaluedataset_constraints.jl")
end
# Resampling uncertain index datasets
#-------------------------------------
@testset "UncertainIndexDataset" begin
include("resampling/uncertain_datasets/test_resampling_datasets_uncertainindexdataset.jl")
include("resampling/uncertain_datasets/test_resampling_datasets_uncertainindexdataset_apply_funcs.jl")
include("resampling/uncertain_datasets/test_resampling_datasets_uncertainindexdataset_constraints.jl")
end
# Resampling uncertain index-value datasets
#-------------------------------------
@testset "UncertainIndexValueDataset" begin
include("resampling/uncertain_datasets/test_resampling_uncertainindexvaluedataset.jl")
include("resampling/uncertain_datasets/test_resampling_with_schemes.jl")
end
# Special resampling constraints
#-----------------------------
@testset "Special resampling constraints" begin
@testset "Sequential" begin
include("resampling/uncertain_datasets/sequential/test_resampling_sequential_increasing.jl")
include("resampling/uncertain_datasets/sequential/test_resampling_sequential_decreasing.jl")
end
end
# Resampling inplace.
#-----------------------------
@testset "Inplace resampling" begin
include("resampling/test_resampling_inplace.jl")
end
#############################################
# Resampling uncertain datasets element-wise
#############################################
@testset "Element-wise" begin
include("resampling/uncertain_datasets/test_resampling_abstractuncertainvaluedataset_elwise.jl")
end
end
############################################
# Interpolation and binning
#############################################
@testset "Interpolation/binning" begin
include("generic_interpolation/test_findall_nan_chunks.jl")
include("generic_interpolation/test_interpolate_nans.jl")
include("resampling/uncertain_datasets/test_interpolation.jl")
include("binning/test_binning.jl")
# Interpolation with resampling schemes
@testset "Intp w/ resampling scheme" begin
include("resampling/binning/test_bin_BinnedResampling.jl")
include("resampling/binning/test_bin_BinnedWeightedResampling.jl")
end
end
#############################
# Mathematics
#############################
@testset "Mathematics" begin
include("mathematics/test_mathematics.jl")
end
############################################
# Uncertain statistics
#############################################
@testset "Statistics" begin
include("uncertain_statistics/uncertain_values/test_core_stats_values_point_estimates.jl")
include("uncertain_statistics/uncertain_values/test_core_stats_values_pair_estimates.jl")
include("uncertain_statistics/uncertain_datasets/test_core_stats_datasets_single_dataset_estimates.jl")
include("uncertain_statistics/uncertain_datasets/test_core_stats_datasets_pairwise_estimates.jl")
include("uncertain_statistics/test_hypothesistests.jl")
end
#include("uncertain_statistics/test_hypothesistests_timeseries.jl")
#########################################
# Resampling with constraints and models
#########################################
# #####################
# # Resampling schemes
# #####################
# include("resampling/resampling_schemes/test_ConstrainedResampling.jl")
# #######################
# # Sampling constraints
# #######################
@testset "Resampling with constraints" begin
include("sampling_constraints/test_sampling_constraints.jl")
include("sampling_constraints/test_constrain_certainvalue.jl")
include("sampling_constraints/test_constrain_population.jl")
include("sampling_constraints/test_constrain_uncertainvalues.jl")
include("sampling_constraints/test_constrain_uncertainvalues_kde.jl")
include("sampling_constraints/test_constrain_uncertaindatasets.jl")
include("sampling_constraints/test_constrain_uncertainvaluedatasets.jl")
include("sampling_constraints/test_constrain_uncertainindexdatasets.jl")
include("sampling_constraints/test_constrain_with_schemes.jl")
end | UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 400 | using StatsBase
xs = sort(rand(1000))
ys = diff(rand(1001));
T = eltype(ys)
g = -0.3:0.025:1.2
nbins = length(g) - 1
ybinned = bin(median, g, xs, ys)
@test length(ybinned) == nbins
@test ybinned isa Vector{T}
ybinned = bin(quantile, g, xs, ys, [0.5])
@test length(ybinned) == nbins
@test ybinned isa Vector{T}
ybinned = bin_mean(g, xs, ys)
@test ybinned isa Vector
@test length(ybinned) == nbins | UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 1160 |
# 1 NaN chunk, no NaNs at edges
x = [1.0, 2.0, NaN, NaN, 2.3, 5.6]
fs = findall_nan_chunks(x)
@test length(fs) == 1
@test fs[1] == (3, 4)
# > 1 NaN chunk, no NaNs at edges
x = [1.0, 2.0, NaN, NaN, 2.3, 5.6, NaN, NaN, NaN, 2.0]
fs = findall_nan_chunks(x)
@test length(findall_nan_chunks(x)) == 2
@test fs[1] == (3, 4)
@test fs[2] == (7, 9)
# NaN chunk at left boundary
x = [NaN, 1.0, 2.0, NaN, NaN, 2.3, 5.6, NaN, NaN, NaN, 2.0]
fs = findall_nan_chunks(x)
fs[1] == (1, 1)
fs[2] == (4, 5)
fs[3] == (8, 10)
# NaN chunk at right boundary
x = [2.3, 5.6, NaN, NaN, NaN]
fs = findall_nan_chunks(x)
fs[1] == (3, 5)
x = [NaN, 1.0, 2.0, NaN, NaN, 2.3, NaN, 5.6, 8.7, NaN]
fc = findall_nan_chunks(x)
@test fc[1] == (1, 1)
@test fc[2] == (4, 5)
@test fc[3] == (7, 7)
@test fc[4] == (10, 10)
@test length(fc) == 4
# The number of NaNs in the ranges matches the total number of NaNs
x = rand(10000)
x[rand(1:1000, 1000)] .= NaN
tupdiff(t::Tuple{Int, Int}) = t[2] - t[1]
sum(tupdiff.(findall_nan_chunks(x)) .+ 1) == length(findall(isnan.(x)))
# In-place method matches regular method
v = zeros(Bool, length(x));
findall_nan_chunks!(v, x) == findall_nan_chunks(x) | UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 516 | using Interpolations
x = [NaN, NaN, 2.0, 1.0, 2.0, NaN, 2.5, 2.3, NaN, 5.6, 8.7, NaN]
g = 1:length(x)
intp = interpolate_nans(g, x, Linear(), NaN)
@test isnan(intp[1])
@test isnan(intp[end])
@test any(isnan.(intp[3:end-1])) == false
intp = interpolate_nans(g, x, Linear(), Flat(OnGrid()))
@test intp[1] == intp[3]
@test intp[end] == intp[end-1]
@test any(isnan.(intp[3:end-1])) == false
x1 = interpolate_nans(1:20, x, Linear(), NaN)
x2 = fill_nans(x, Linear())
all(x1[.!(isnan.(x1))] .≈ x2[.!(isnan.(x2))]) | UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 341 | include("uncertain_values/test_elementary_maths_uncertainvalues.jl")
include("uncertain_datasets/test_elementary_maths_uncertaindataset.jl")
include("uncertain_datasets/test_elementary_maths_uncertainvaluedataset.jl")
include("uncertain_datasets/test_elementary_maths_uncertainindexdataset.jl")
include("test_trig_funcs_uncertainvalues.jl") | UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 4188 | using Distributions, UncertainData
# Test all combinations of different types of uncertain values
M = MixtureModel([Normal(3, 0.2), Normal(2, 1)])
r1 = UncertainValue(Normal, rand(), rand())
r2 = UncertainValue(rand(M, 10000))
r3 = UncertainValue(Normal, rand(Normal(4, 3.2), 10000))
uvals = [r1; r2; r3]
n = 5
for uval in uvals
# Sine and inverse sine
@test sin(uval) isa Vector{Float64}
@test sin(uval, n) isa Vector{Float64}
@test sind(uval) isa Vector{Float64}
@test sind(uval, n) isa Vector{Float64}
@test sinh(uval) isa Vector{Float64}
@test sinh(uval, n) isa Vector{Float64}
#@test asin(uval) isa Vector{Float64}
#@test asin(uval, n) isa Vector{Float64}
#@test asind(uval) isa Vector{Float64}
#@test asind(uval, n) isa Vector{Float64}
#@test asinh(uval) isa Vector{Float64}
#@test asinh(uval, n) isa Vector{Float64}
# Cosine and inverse cosine
@test cos(uval) isa Vector{Float64}
@test cos(uval, n) isa Vector{Float64}
@test cosd(uval) isa Vector{Float64}
@test cosd(uval, n) isa Vector{Float64}
@test cosh(uval) isa Vector{Float64}
@test cosh(uval, n) isa Vector{Float64}
# @test acos(uval) isa Vector{Float64}
# @test acos(uval, n) isa Vector{Float64}
# @test acosd(uval) isa Vector{Float64}
# @test acosd(uval, n) isa Vector{Float64}
# @test acosh(uval) isa Vector{Float64}
# @test acosh(uval, n) isa Vector{Float64}
# Tangent and inverse tangent
@test tan(uval) isa Vector{Float64}
@test tan(uval, n) isa Vector{Float64}
@test tand(uval) isa Vector{Float64}
@test tand(uval, n) isa Vector{Float64}
@test tanh(uval) isa Vector{Float64}
@test tanh(uval, n) isa Vector{Float64}
# @test atan(uval) isa Vector{Float64}
# @test atan(uval, n) isa Vector{Float64}
# @test atand(uval) isa Vector{Float64}
# @test atand(uval, n) isa Vector{Float64}
# @test atanh(uval) isa Vector{Float64}
# @test atanh(uval, n) isa Vector{Float64}
# Cosecant and inverse cosecant
@test csc(uval) isa Vector{Float64}
@test csc(uval, n) isa Vector{Float64}
@test cscd(uval) isa Vector{Float64}
@test cscd(uval, n) isa Vector{Float64}
@test csch(uval) isa Vector{Float64}
@test csch(uval, n) isa Vector{Float64}
# @test acsc(uval) isa Vector{Float64}
# @test acsc(uval, n) isa Vector{Float64}
# @test acscd(uval) isa Vector{Float64}
# @test acscd(uval, n) isa Vector{Float64}
# @test acsch(uval) isa Vector{Float64}
# @test acsch(uval, n) isa Vector{Float64}
# Secant and inverse secant
@test sec(uval) isa Vector{Float64}
@test sec(uval, n) isa Vector{Float64}
@test secd(uval) isa Vector{Float64}
@test secd(uval, n) isa Vector{Float64}
@test sech(uval) isa Vector{Float64}
@test sech(uval, n) isa Vector{Float64}
# @test asec(uval) isa Vector{Float64}
# @test asec(uval, n) isa Vector{Float64}
# @test asecd(uval) isa Vector{Float64}
# @test asecd(uval, n) isa Vector{Float64}
# @test asech(uval) isa Vector{Float64}
# @test asech(uval, n) isa Vector{Float64}
# Cotangent and inverse cotangent
@test cot(uval) isa Vector{Float64}
@test cot(uval, n) isa Vector{Float64}
@test cotd(uval) isa Vector{Float64}
@test cotd(uval, n) isa Vector{Float64}
@test coth(uval) isa Vector{Float64}
@test coth(uval, n) isa Vector{Float64}
# @test acot(uval) isa Vector{Float64}
# @test acot(uval, n) isa Vector{Float64}
# @test acotd(uval) isa Vector{Float64}
# @test acotd(uval, n) isa Vector{Float64}
# @test acoth(uval) isa Vector{Float64}
# @test acoth(uval, n) isa Vector{Float64}
# Related trig functions
@test sincos(uval) isa Vector{Tuple{Float64, Float64}}
@test sincos(uval, n) isa Vector{Tuple{Float64, Float64}}
@test sinc(uval) isa Vector{Float64}
@test sinc(uval, n) isa Vector{Float64}
@test sinpi(uval) isa Vector{Float64}
@test sinpi(uval, n) isa Vector{Float64}
@test cosc(uval) isa Vector{Float64}
@test cosc(uval, n) isa Vector{Float64}
@test cospi(uval) isa Vector{Float64}
@test cospi(uval, n) isa Vector{Float64}
end | UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 2100 | using Distributions, UncertainData, Combinatorics
# Test all combinations of different types of uncertain values
M = MixtureModel([Normal(3, 0.2), Normal(2, 1)])
r1 = UncertainValue(Normal, rand(), rand())
r2 = UncertainValue(rand(M, 10000))
r3 = UncertainValue(Normal, rand(Normal(4, 3.2), 10000))
uvals = [r1; r2; r3]
a = UncertainDataset(uvals)
b = UncertainDataset(uvals[end:-1:1])
N = length(a)
###########
# Addition
###########
@test a + b isa UncertainDataset
@test 2 + b isa UncertainDataset
@test [2 for i = 1:N] + b isa UncertainDataset
@test 2.2 + b isa UncertainDataset
@test [2.2 for i = 1:N] + b isa UncertainDataset
@test a + [2 for i = 1:N] isa UncertainDataset
@test a + [2.2 for i = 1:N] isa UncertainDataset
#############
# Subtraction
#############
@test a - b isa UncertainDataset
@test 2 - b isa UncertainDataset
@test [2 for i = 1:N] - b isa UncertainDataset
@test 2.2 - b isa UncertainDataset
@test [2.2 for i = 1:N] - b isa UncertainDataset
@test a - [2 for i = 1:N] isa UncertainDataset
@test a - [2.2 for i = 1:N] isa UncertainDataset
################
# Multiplication
################
@test a * b isa UncertainDataset
@test 2 * b isa UncertainDataset
@test [2 for i = 1:N] * b isa UncertainDataset
@test 2.2 * b isa UncertainDataset
@test [2.2 for i = 1:N] * b isa UncertainDataset
@test a * [2 for i = 1:N] isa UncertainDataset
@test a * [2.2 for i = 1:N] isa UncertainDataset
################
# Division
################
@test a * b isa UncertainDataset
@test 2 * b isa UncertainDataset
@test [2 for i = 1:N] * b isa UncertainDataset
@test 2.2 * b isa UncertainDataset
@test [2.2 for i = 1:N] * b isa UncertainDataset
@test a * [2 for i = 1:N] isa UncertainDataset
@test a * [2.2 for i = 1:N] isa UncertainDataset
################
# Exponentiation
################
@test a * b isa UncertainDataset
@test 2 * b isa UncertainDataset
@test [2 for i = 1:N] * b isa UncertainDataset
@test 2.2 * b isa UncertainDataset
@test [2.2 for i = 1:N] * b isa UncertainDataset
@test a * [2 for i = 1:N] isa UncertainDataset
@test a * [2.2 for i = 1:N] isa UncertainDataset | UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 2285 | using Distributions, UncertainData, Combinatorics
# Test all combinations of different types of uncertain values
M = MixtureModel([Normal(3, 0.2), Normal(2, 1)])
r1 = UncertainValue(Normal, rand(), rand())
r2 = UncertainValue(rand(M, 10000))
r3 = UncertainValue(Normal, rand(Normal(4, 3.2), 10000))
uvals = [r1; r2; r3]
a = UncertainIndexDataset(uvals)
b = UncertainIndexDataset(uvals[end:-1:1])
N = length(a)
###########
# Addition
###########
@test a + b isa UncertainIndexDataset
@test 2 + b isa UncertainIndexDataset
@test [2 for i = 1:N] + b isa UncertainIndexDataset
@test 2.2 + b isa UncertainIndexDataset
@test [2.2 for i = 1:N] + b isa UncertainIndexDataset
@test a + [2 for i = 1:N] isa UncertainIndexDataset
@test a + [2.2 for i = 1:N] isa UncertainIndexDataset
#############
# Subtraction
#############
@test a - b isa UncertainIndexDataset
@test 2 - b isa UncertainIndexDataset
@test [2 for i = 1:N] - b isa UncertainIndexDataset
@test 2.2 - b isa UncertainIndexDataset
@test [2.2 for i = 1:N] - b isa UncertainIndexDataset
@test a - [2 for i = 1:N] isa UncertainIndexDataset
@test a - [2.2 for i = 1:N] isa UncertainIndexDataset
################
# Multiplication
################
@test a * b isa UncertainIndexDataset
@test 2 * b isa UncertainIndexDataset
@test [2 for i = 1:N] * b isa UncertainIndexDataset
@test 2.2 * b isa UncertainIndexDataset
@test [2.2 for i = 1:N] * b isa UncertainIndexDataset
@test a * [2 for i = 1:N] isa UncertainIndexDataset
@test a * [2.2 for i = 1:N] isa UncertainIndexDataset
################
# Division
################
@test a * b isa UncertainIndexDataset
@test 2 * b isa UncertainIndexDataset
@test [2 for i = 1:N] * b isa UncertainIndexDataset
@test 2.2 * b isa UncertainIndexDataset
@test [2.2 for i = 1:N] * b isa UncertainIndexDataset
@test a * [2 for i = 1:N] isa UncertainIndexDataset
@test a * [2.2 for i = 1:N] isa UncertainIndexDataset
################
# Exponentiation
################
@test a * b isa UncertainIndexDataset
@test 2 * b isa UncertainIndexDataset
@test [2 for i = 1:N] * b isa UncertainIndexDataset
@test 2.2 * b isa UncertainIndexDataset
@test [2.2 for i = 1:N] * b isa UncertainIndexDataset
@test a * [2 for i = 1:N] isa UncertainIndexDataset
@test a * [2.2 for i = 1:N] isa UncertainIndexDataset | UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 2285 | using Distributions, UncertainData, Combinatorics
# Test all combinations of different types of uncertain values
M = MixtureModel([Normal(3, 0.2), Normal(2, 1)])
r1 = UncertainValue(Normal, rand(), rand())
r2 = UncertainValue(rand(M, 10000))
r3 = UncertainValue(Normal, rand(Normal(4, 3.2), 10000))
uvals = [r1; r2; r3]
a = UncertainValueDataset(uvals)
b = UncertainValueDataset(uvals[end:-1:1])
N = length(a)
###########
# Addition
###########
@test a + b isa UncertainValueDataset
@test 2 + b isa UncertainValueDataset
@test [2 for i = 1:N] + b isa UncertainValueDataset
@test 2.2 + b isa UncertainValueDataset
@test [2.2 for i = 1:N] + b isa UncertainValueDataset
@test a + [2 for i = 1:N] isa UncertainValueDataset
@test a + [2.2 for i = 1:N] isa UncertainValueDataset
#############
# Subtraction
#############
@test a - b isa UncertainValueDataset
@test 2 - b isa UncertainValueDataset
@test [2 for i = 1:N] - b isa UncertainValueDataset
@test 2.2 - b isa UncertainValueDataset
@test [2.2 for i = 1:N] - b isa UncertainValueDataset
@test a - [2 for i = 1:N] isa UncertainValueDataset
@test a - [2.2 for i = 1:N] isa UncertainValueDataset
################
# Multiplication
################
@test a * b isa UncertainValueDataset
@test 2 * b isa UncertainValueDataset
@test [2 for i = 1:N] * b isa UncertainValueDataset
@test 2.2 * b isa UncertainValueDataset
@test [2.2 for i = 1:N] * b isa UncertainValueDataset
@test a * [2 for i = 1:N] isa UncertainValueDataset
@test a * [2.2 for i = 1:N] isa UncertainValueDataset
################
# Division
################
@test a * b isa UncertainValueDataset
@test 2 * b isa UncertainValueDataset
@test [2 for i = 1:N] * b isa UncertainValueDataset
@test 2.2 * b isa UncertainValueDataset
@test [2.2 for i = 1:N] * b isa UncertainValueDataset
@test a * [2 for i = 1:N] isa UncertainValueDataset
@test a * [2.2 for i = 1:N] isa UncertainValueDataset
################
# Exponentiation
################
@test a * b isa UncertainValueDataset
@test 2 * b isa UncertainValueDataset
@test [2 for i = 1:N] * b isa UncertainValueDataset
@test 2.2 * b isa UncertainValueDataset
@test [2.2 for i = 1:N] * b isa UncertainValueDataset
@test a * [2 for i = 1:N] isa UncertainValueDataset
@test a * [2.2 for i = 1:N] isa UncertainValueDataset | UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 2408 | using Distributions, UncertainData, Combinatorics
# Test all combinations of different types of uncertain values
M = MixtureModel([Normal(3, 0.2), Normal(2, 1)])
r1 = UncertainValue(Normal, rand(), rand())
r2 = UncertainValue(rand(M, 10000))
r3 = UncertainValue(Normal, rand(Normal(4, 3.2), 10000))
r4 = CertainValue(2.2)
r5 = CertainValue(2)
uvals = [r1; r2; r3]
# Find all the different ways of combining two uncertain values in `uvals`.
# We let the order matter, so that if the order of operations for some reason causes
# things to fail when using different types of uncertain values, the errors are caught.
perms = permutations(1:3, 2) |> collect
# A random number
x = rand()
# Number of draws when deviating from default n = 10000
n = 10
for perm in perms
r1, r2 = uvals[perm[1]], uvals[perm[2]]
# Addition
@test r1 + r2 isa AbstractUncertainValue
@test x + r2 isa AbstractUncertainValue
@test r1 + x isa AbstractUncertainValue
@test +(r1, r2, n) isa AbstractUncertainValue
@test +(x, r2, n) isa AbstractUncertainValue
@test +(r1, x, n) isa AbstractUncertainValue
# Subtraction
@test r1 - r2 isa AbstractUncertainValue
@test x - r2 isa AbstractUncertainValue
@test r1 - x isa AbstractUncertainValue
@test -(r1, r2, n) isa AbstractUncertainValue
@test -(x, r2, n) isa AbstractUncertainValue
@test -(r1, x, n) isa AbstractUncertainValue
# Multiplication
@test r1 * r2 isa AbstractUncertainValue
@test x * r2 isa AbstractUncertainValue
@test r1 * x isa AbstractUncertainValue
@test *(r1, r2, n) isa AbstractUncertainValue
@test *(x, r2, n) isa AbstractUncertainValue
@test *(r1, x, n) isa AbstractUncertainValue
# Division
@test r1 / r2 isa AbstractUncertainValue
@test x / r2 isa AbstractUncertainValue
@test r1 / x isa AbstractUncertainValue
@test /(r1, r2, n) isa AbstractUncertainValue
@test /(x, r2, n) isa AbstractUncertainValue
@test /(r1, x, n) isa AbstractUncertainValue
# Exponentiation
#@test r1 ^ r2 isa AbstractUncertainValue # implement for complex numbers.
end
# Elementary operations when one or both arguments is a vector of uncertain values
for val in uvals
@test val + uvals isa Vector{<:AbstractUncertainValue}
@test uvals + val isa Vector{<:AbstractUncertainValue}
@test uvals + uvals isa Vector{<:AbstractUncertainValue}
end | UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 1902 | using StatsBase
##########################################
# Uncertain theoretical distributions
##########################################
uncertain_theoretical_distributions = [
UncertainValue(Uniform, -1, 1),
UncertainValue(Normal, 0, 1),
UncertainValue(Gamma, 1, 2),
UncertainValue(Beta, 1, 2),
UncertainValue(BetaPrime, 4, 5),
UncertainValue(Frechet, 1, 1),
UncertainValue(Binomial, 10, 0.3),
UncertainValue(BetaBinomial, 100, 2, 3)
]
##########################################
# Kernel density estimates
##########################################
n = 10
uncertain_kde_estimates = [
UncertainValue(rand(100))
]
##########################################
# Fitted theoretical distributions
##########################################
n = 10
uncertain_fitted_distributions = [
UncertainValue(Uniform, rand(Uniform(-2, 2), n)),
UncertainValue(Normal, rand(Normal(0, 1), n))
]
########################
# Uncertain populations
########################
pop1 = UncertainValue(
[3.0, UncertainValue(Normal, 0, 1),
UncertainValue(Gamma, 2, 3),
UncertainValue(Uniform, rand(1000))],
[0.5, 0.5, 0.5, 0.5]
)
pop2 = UncertainValue([1, 2, 3], rand(3))
pop3 = UncertainValue([1.0, 2.0, 3.0], Weights(rand(3)))
# Uncertain population consisting of uncertain populations and other stuff
pop4 = UncertainValue([pop1, pop2], [0.1, 0.5])
pop5 = UncertainValue([pop1, pop2, 2, UncertainValue(Normal, -2, 3)], Weights(rand(4)));
uncertain_scalar_populations = [pop1, pop2, pop3, pop4, pop5]
##########################################
# Gather all examples
##########################################
uvals = [
uncertain_scalar_populations;
uncertain_theoretical_distributions;
uncertain_kde_estimates;
uncertain_fitted_distributions
];
uidxs = [UncertainValue(Normal, i, rand()*2) for i = 1:length(uvals)] | UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 3882 | val = UncertainValue([1, 2, 3], [0.3, 0.3, 0.3])
val2 = UncertainValue([1, 2, 3], [0.3, 0.3, 0.3])
vals = UncertainValue([val2, val], [1, 2])
val3 = UncertainValue(Normal, 0, 1)
###################################
# Single values
###################################
x = zeros(Float64, 10)
# There should be no zero entries after resampling in place
resample!(x, val)
@test any(x .== 0) == false
resample!(x, vals)
@test any(x .== 0) == false
resample!(x, val3)
@test any(x .== 0) == false
###################################
# Collections into vectors
###################################
v = fill(NaN, length(example_uvals))
resample!(v, example_uvals)
@test any(isnan.(v)) == false
###################################
# Collections into arrays
###################################
arr = fill(NaN, 100, length(example_uvals))
resample!(arr, example_uvals)
@test any(isnan.(arr)) == false
#################################################
# Uncertain index-value collections into vectors
#################################################
ids = UncertainIndexDataset(example_uidxs)
vals = UncertainValueDataset(example_uvals)
U = UncertainIndexValueDataset(ids, vals)
idxs = fill(NaN, length(U))
vals = fill(NaN, length(U))
resample!(idxs, vals, U)
@test any(isnan.(idxs)) == false
@test any(isnan.(vals)) == false
#################################################
# Uncertain index-value collections into arrays
#################################################
ids = UncertainIndexDataset(example_uidxs)
vals = UncertainValueDataset(example_uvals)
U = UncertainIndexValueDataset(ids, vals)
n_draws = 10
idxs = fill(NaN, n_draws, length(U))
vals = fill(NaN, n_draws, length(U))
resample!(idxs, vals, U)
@test any(isnan.(idxs)) == false
@test any(isnan.(vals)) == false
###################################################################################
# Draw N realisations of an uncertain value into vector-like containers of length N
###################################################################################
# A single uncertain value resampled multiple times into a N-element vector
N = 10
x = fill(NaN, 10)
resample!(x, UncertainValue(Normal(0, 1)))
@test any(isnan.(x)) == false
# A single uncertain value resampled multiple times into a N-element MVector
N = 10
x = MVector{N, Float64}(repeat([NaN], N))
resample!(x, UncertainValue(Normal(0, 1)))
@test any(isnan.(x)) == false
# A single uncertain value resampled multiple times into a 3-element FieldVector-type
mutable struct Vector3DType <: FieldVector{3, Float64}
x::Float64
y::Float64
z::Float64
end
x = Vector3DType(NaN, NaN, NaN)
resample!(x, UncertainValue(Normal(0, 1)))
@test any(isnan.(x)) == false
#########################################################################################
# Draw single realisations of N uncertain values into vector-like containers of length N
#########################################################################################
# Three uncertain values resampled element-wise into a 3-element vector
x = repeat([NaN], 3)
resample!(x, (val, val, val))
@test any(isnan.(x)) == false
# Three uncertain values resampled element-wise into a 3-element MVector
N = 3
x = MVector{N, Float64}(repeat([NaN], N))
uvals = [UncertainValue([0], [1]), UncertainValue([1], [1]), UncertainValue([2], [1])]
resample!(x, uvals)
@test any(isnan.(x)) == false
# when the number of elements does not match the number of uncertain values
x = MVector{2, Float64}(repeat([NaN], 2))
uval = UncertainValue(Normal(0, 1))
resample!(x, uval)
# Two uncertain values resampled elementwise into a 2-element vector-like type
mutable struct VectorLikeType <: FieldVector{2, Float64}
x::Float64
y::Float64
end
x = VectorLikeType(NaN, NaN)
uvals = [UncertainValue([0], [1]), UncertainValue([1], [1])]
resample!(x, uvals)
@test any(isnan.(x)) == false | UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 144 |
#############################
# Resampling uncertain tuples
#############################
include("uncertain_tuples/test_uncertain_tuples.jl")
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 355 |
#############################
# Resampling uncertain values
#############################
include("uncertain_values/test_resampling_certain_value.jl")
include("uncertain_values/test_resampling_uncertainvalues.jl")
include("uncertain_values/test_resampling_uncertainvalues_kde.jl")
include("uncertain_values/test_resampling_UncertainScalarPopulation.jl")
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 1495 | using DynamicalSystemsBase
function eom_ar1_unidir(x, p, n)
a₁, b₁, c_xy, σ = (p...,)
x, y = (x...,)
ξ₁ = rand(Normal(0, σ))
ξ₂ = rand(Normal(0, σ))
dx = a₁*x + ξ₁
dy = b₁*y + c_xy*x + ξ₂
return SVector{2}(dx, dy)
end
function ar1_unidir(;uᵢ = rand(2), a₁ = 0.90693, b₁ = 0.40693, c_xy = 0.5, σ = 0.40662)
p = [a₁, b₁, c_xy, σ]
DiscreteDynamicalSystem(eom_ar1_unidir, uᵢ, p)
end
vars = (1, 2)
npts, tstep = 50, 50
d_xind = Uniform(2.5, 5.5)
d_yind = Uniform(2.5, 5.5)
d_xval = Uniform(0.01, 0.2)
d_yval = Uniform(0.01, 0.2)
X, Y = example_uncertain_indexvalue_datasets(ar1_unidir(c_xy = 0.5), npts, vars, tstep = tstep,
d_xind = d_xind, d_yind = d_yind,
d_xval = d_xval, d_yval = d_yval);
time_grid = -20:100:2540
n_draws = 10000 # draws per uncertain value
n_bins = length(time_grid) - 1
# Values in each bin represented as RawValues
b = BinnedResampling(RawValues, time_grid, n_draws)
bc, vs = bin(Y, b);
@test vs isa Vector{Vector{T}} where T
@test length(vs) == n_bins
@test sum(length.(vs)) == length(Y)*n_draws
# Values in each bin represented as UncertainScalarKDE
b_kde = BinnedResampling(UncertainScalarKDE, time_grid, n_draws)
Y_binned = bin(Y, b_kde);
@test Y_binned isa AbstractUncertainIndexValueDataset
# Values in each bin represented as UncertainScalarPopulation
b_pop = BinnedResampling(UncertainScalarPopulation, time_grid, n_draws)
Y_binned = bin(Y, b_pop);
@test Y_binned isa AbstractUncertainIndexValueDataset
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 1512 | using DynamicalSystemsBase
function eom_ar1_unidir(x, p, n)
a₁, b₁, c_xy, σ = (p...,)
x, y = (x...,)
ξ₁ = rand(Normal(0, σ))
ξ₂ = rand(Normal(0, σ))
dx = a₁*x + ξ₁
dy = b₁*y + c_xy*x + ξ₂
return SVector{2}(dx, dy)
end
function ar1_unidir(;uᵢ = rand(2), a₁ = 0.90693, b₁ = 0.40693, c_xy = 0.5, σ = 0.40662)
p = [a₁, b₁, c_xy, σ]
DiscreteDynamicalSystem(eom_ar1_unidir, uᵢ, p)
end
vars = (1, 2)
npts, tstep = 50, 50
d_xind = Uniform(2.5, 5.5)
d_yind = Uniform(2.5, 5.5)
d_xval = Uniform(0.01, 0.2)
d_yval = Uniform(0.01, 0.2)
X, Y = example_uncertain_indexvalue_datasets(ar1_unidir(c_xy = 0.5), npts, vars, tstep = tstep,
d_xind = d_xind, d_yind = d_yind,
d_xval = d_xval, d_yval = d_yval);
time_grid = -20:100:2540
n_draws = 10000 # draws per uncertain value
n_bins = length(time_grid) - 1
wts = rand(length(X))
# Values in each bin represented as RawValues
b = BinnedWeightedResampling(RawValues, time_grid, wts, n_draws)
bc, vs = bin(Y, b);
@test vs isa Vector{Vector{T}} where T
@test length(vs) == n_bins
# Values in each bin represented as UncertainScalarKDE
b_kde = BinnedWeightedResampling(UncertainScalarKDE, time_grid, wts, n_draws)
Y_binned = bin(Y, b_kde);
@test Y_binned isa AbstractUncertainIndexValueDataset
# Values in each bin represented as UncertainScalarPopulation
b_pop = BinnedWeightedResampling(UncertainScalarPopulation, time_grid, wts, n_draws)
Y_binned = bin(Y, b_pop);
@test Y_binned isa AbstractUncertainIndexValueDataset
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 184 | n_draws = 100
grid = 0:10:500
resampling = BinnedMeanResampling(grid, n_draws)
@test resampling isa BinnedMeanResampling
@test typeof(resampling) <: AbstractBinnedSummarisedResampling | UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 220 | n_draws = 100
grid = 0:10:500
wts = rand(10)
resampling = BinnedMeanWeightedResampling(grid, wts, n_draws)
@test resampling isa BinnedMeanWeightedResampling
@test typeof(resampling) <: AbstractBinnedSummarisedResampling | UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 1257 | n_draws = 100
grid = 0:10:500
resampling = BinnedResampling(grid, n_draws)
@test resampling isa BinnedResampling
@test typeof(resampling) <: AbstractBinnedUncertainValueResampling
# Create the resampling scheme. Use kernel density estimates to distribution
# in each bin.
resampling = BinnedResampling(grid, n_draws, bin_repr = UncertainScalarKDE)
@test resampling isa BinnedResampling{UncertainScalarKDE}
resampling = BinnedResampling(UncertainScalarKDE, grid, n_draws)
@test resampling isa BinnedResampling{UncertainScalarKDE}
# Represent each bin as an equiprobably population
resampling = BinnedResampling(grid, n_draws, bin_repr = UncertainScalarPopulation)
@test resampling isa BinnedResampling{UncertainScalarPopulation}
resampling = BinnedResampling(UncertainScalarPopulation, grid, n_draws)
@test resampling isa BinnedResampling{UncertainScalarPopulation}
# Keep raw values for each bin (essentially the same as UncertainScalarPopulation,
# but avoids storing an additional vector of weights for the population members).
resampling = BinnedResampling(grid, n_draws, bin_repr = RawValues)
@test resampling isa BinnedResampling{RawValues}
resampling = BinnedResampling(RawValues, grid, n_draws)
@test resampling isa BinnedResampling{RawValues}
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 1428 | n_draws = 100
grid = 0:10:500
wts = Weights(rand(10))
resampling = BinnedWeightedResampling(grid, wts, n_draws)
@test resampling isa BinnedWeightedResampling
@test typeof(resampling) <: AbstractBinnedUncertainValueResampling
# Create the resampling scheme. Use kernel density estimates to distribution
# in each bin.
resampling = BinnedWeightedResampling(grid, wts, n_draws, bin_repr = UncertainScalarKDE)
@test resampling isa BinnedWeightedResampling{UncertainScalarKDE}
resampling = BinnedWeightedResampling(UncertainScalarKDE, grid, wts, n_draws)
@test resampling isa BinnedWeightedResampling{UncertainScalarKDE}
# Represent each bin as an equiprobably population
resampling = BinnedWeightedResampling(grid, wts, n_draws, bin_repr = UncertainScalarPopulation)
@test resampling isa BinnedWeightedResampling{UncertainScalarPopulation}
resampling = BinnedWeightedResampling(UncertainScalarPopulation, grid, wts, n_draws)
@test resampling isa BinnedWeightedResampling{UncertainScalarPopulation}
# Keep raw values for each bin (essentially the same as UncertainScalarPopulation,
# but avoids storing an additional vector of weights for the population members).
resampling = BinnedWeightedResampling(grid, wts, n_draws, bin_repr = RawValues)
@test resampling isa BinnedWeightedResampling{RawValues}
resampling = BinnedWeightedResampling(RawValues, grid, wts, n_draws)
@test resampling isa BinnedWeightedResampling{RawValues}
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 1467 | N = 10
#################################
# ConstrainedIndexValueResampling
#################################
# Truncate each of the indices for x at 0.8 their standard deviation around the mean
constraints_x_inds = TruncateStd(0.8)
# Truncate each of the indices for y at 1.5 their standard deviation around the mean
constraints_y_inds = TruncateStd(1.5)
# Truncate each of the values of x at the 20th percentile range
constraints_x_vals = [TruncateQuantiles(0.4, 0.6) for i = 1:N];
# Truncate each of the values of x at the 80th percentile range
constraints_y_vals = [TruncateQuantiles(0.1, 0.9) for i = 1:N];
cs_x = (constraints_x_inds, constraints_x_vals)
cs_y = (constraints_y_inds, constraints_y_vals)
resampling_idxval_scheme = ConstrainedIndexValueResampling(cs_x, cs_y, cs_x)
@test length(resampling_idxval_scheme) == 3
# There should be two
@test length(resampling_idxval_scheme[1]) == 2
@test length(resampling_idxval_scheme[2]) == 2
@test resampling_idxval_scheme[1][1] isa SamplingConstraint
@test resampling_idxval_scheme[1][2] isa Vector{<:SamplingConstraint}
@test length(resampling_idxval_scheme[1][2]) == N
@test resampling_idxval_scheme[2][1] isa SamplingConstraint
@test resampling_idxval_scheme[2][2] isa Vector{<:SamplingConstraint}
@test length(resampling_idxval_scheme[2][2]) == N
@test resampling_idxval_scheme.n == 1
resampling_idxval_scheme = ConstrainedIndexValueResampling(105, cs_x, cs_y)
@test resampling_idxval_scheme.n == 105 | UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 257 | v1 = ConstrainedResampling(
TruncateStd(1.5),
NoConstraint(),
[i % 2 == 0 ? TruncateQuantiles(0.1, 0.1*i) : TruncateStd(0.1*i) for i = 2:10])
@test v1 isa ConstrainedResampling
@test length(v1) == 3
@test v1[1] |> typeof <: SamplingConstraint | UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 311 | c1 = ConstrainedValueResampling(TruncateStd(1), TruncateStd(1), TruncateQuantiles(0.1, 0.2))
c2 = ConstrainedValueResampling(100, TruncateStd(1), TruncateStd(1), TruncateQuantiles(0.1, 0.2))
@test c1 isa ConstrainedValueResampling{3}
@test c1.n == 1
@test c2 isa ConstrainedValueResampling{3}
@test c2.n == 100 | UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 55 | r = RandomSequences(10, 10)
@test r isa RandomSequences | UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 282 | seq_intp = SequentialInterpolatedResampling(StrictlyIncreasing(), RegularGrid(0, N, 2))
seq_intp2 = SequentialInterpolatedResampling(StrictlyIncreasing(), RegularGrid(1:1:100))
@test seq_intp isa SequentialInterpolatedResampling
@test seq_intp2 isa SequentialInterpolatedResampling | UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 85 | seq = SequentialResampling(StrictlyIncreasing())
@test seq isa SequentialResampling
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 703 | # Test interpolation methods
x = 1:10
y = rand(10)
xgrid = 0:0.5:10
# Test that interpolation functions work
y_intp = linear_interpolation(x, y)
@test length(y_intp(xgrid)) == length(xgrid)
@test y_intp(xgrid) isa Vector{Float64}
# Test that interpolation work for uncertain index-value datasets
timeinds = [UncertainValue(Normal, i, 0.1) for i = 1:5]
measurements = [UncertainValue(Gamma, i, i + rand(1:2)) for i = 1:5]
d = UncertainIndexValueDataset(timeinds, measurements)
grid = RegularGrid(0, 5, 0.4)
@test length(resample(d, grid)) == 2
@test length(resample(d, StrictlyIncreasing(), grid)) == 2
@test create_interp_scheme(1:10, rand(10), RegularGrid(0, 1, 0.1)) isa InterpolationScheme1D | UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 3042 | # Define some example data
import KernelDensity.UnivariateKDE
o1 = UncertainValue(Normal, 0, 0.5)
o2 = UncertainValue(Normal, 2.0, 0.1)
o3 = UncertainValue(Uniform, 0, 4)
o4 = UncertainValue(Uniform, rand(100))
o5 = UncertainValue(Beta, 4, 5)
o6 = UncertainValue(Gamma, 4, 5)
o7 = UncertainValue(Frechet, 1, 2)
o8 = UncertainValue(BetaPrime, 1, 2)
o9 = UncertainValue(BetaBinomial, 10, 3, 2)
o10 = UncertainValue(Binomial, 10, 0.3)
o11 = UncertainValue(rand(100), rand(100))
o12 = UncertainValue(2)
o13 = UncertainValue(2.3)
o14 = UncertainValue([2, 3, 4], [0.3, 0.4, 0.3])
o15 = UncertainValue([rand() for i = 1:100])
uvals1 = [o1, o2, o3, o4, o5, o6, o7, o8, o9, o11, o12, o13, o14, o1, o2, o3, o15, o4, o5,
o6, o7, o8, o9, o11, o12, o13, o14, o1, o2, o3, o4, o5, o6, o7, o8, o9, o11, o12, o13,
o14, o1, o2, o3, o4, o5, o6, o7, o8, o9, o11, o12, o13, o14]
uvals2 = [uvals1[i] for i in [rand(1:length(uvals1)) for i = 1:length(uvals1)]]
idxs = UncertainIndexDataset([UncertainValue(Normal, i, 0.8) for i = 1:length(uvals1)])
UVD = UncertainValueDataset(uvals1)
UD = UncertainDataset(uvals2)
UIDX = UncertainIndexDataset(idxs)
UV = UncertainValueDataset(uvals1)
# Resampling interface should work for all subtypes of AbstractUncertainValueDataset,
# both with and without sampling constraints.
# We need to check UncertainDataset, UncertainIndexDataset, and UncertainValueDataset.
n = 3
#resample_elwise(uvd::AbstractUncertainValueDataset, n::Int)
@test length(resample_elwise(UIDX, n)) == length(UIDX)
@test length(resample_elwise(UIDX, n)[1]) == n
@test length(resample_elwise(UD, n)) == length(UD)
@test length(resample_elwise(UD, n)[1]) == n
@test length(resample_elwise(UVD, n)) == length(UVD)
@test length(resample_elwise(UVD, n)[1]) == n
@test length(resample_elwise(UVD)[1]) == 1
@test length(resample_elwise(UVD)) == length(UVD)
n = 5
@test length(resample_elwise(UD, [TruncateQuantiles(0.1, 0.9) for i = 1:length(UD)], n)) == length(UD)
@test length(resample_elwise(UD, [TruncateQuantiles(0.1, 0.9) for i = 1:length(UD)], n)[1]) == n
@test length(resample_elwise(UVD, [TruncateQuantiles(0.1, 0.9) for i = 1:length(UVD)], n)) == length(UD)
@test length(resample_elwise(UVD, [TruncateQuantiles(0.1, 0.9) for i = 1:length(UVD)], n)[1]) == n
@test length(resample_elwise(UIDX, [TruncateQuantiles(0.1, 0.9) for i = 1:length(UIDX)], n)) == length(UD)
@test length(resample_elwise(UIDX, [TruncateQuantiles(0.1, 0.9) for i = 1:length(UIDX)], n)[1]) == n
# resample_elwise(uvd::AbstractUncertainValueDataset, constraint::SamplingConstraint, n::Int)
@test length(resample_elwise(UIDX, TruncateQuantiles(0.1, 0.9), n)) == length(UIDX)
@test length(resample_elwise(UIDX, TruncateQuantiles(0.1, 0.9), n)[1]) == n
@test length(resample_elwise(UD, TruncateQuantiles(0.1, 0.9), n)) == length(UD)
@test length(resample_elwise(UD, TruncateQuantiles(0.1, 0.9), n)[1]) == n
@test length(resample_elwise(UVD, TruncateQuantiles(0.1, 0.9), n)) == length(UVD)
@test length(resample_elwise(UVD, TruncateQuantiles(0.1, 0.9), n)[1]) == n | UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 3181 | import KernelDensity.UnivariateKDE
o1 = UncertainValue(Normal, 0, 0.5)
o2 = UncertainValue(Normal, 2.0, 0.1)
o3 = UncertainValue(Uniform, 0, 4)
o4 = UncertainValue(Uniform, rand(100))
o5 = UncertainValue(Beta, 4, 5)
o6 = UncertainValue(Gamma, 4, 5)
o7 = UncertainValue(Frechet, 1, 2)
o8 = UncertainValue(BetaPrime, 1, 2)
o9 = UncertainValue(BetaBinomial, 10, 3, 2)
o10 = UncertainValue(Binomial, 10, 0.3)
o11 = UncertainValue(rand(100), rand(100))
o12 = UncertainValue(2)
o13 = UncertainValue(2.3)
o14 = UncertainValue([2, 3, 4], [0.3, 0.4, 0.1])
uvals = [o1, o2, o3, o4, o5, o6, o7, o8, o9, o11, o12, o13, o14]
D = UncertainDataset(uvals)
UV = UncertainValueDataset(uvals)
UIDX = UncertainIndexDataset(uvals)
UIV = UncertainIndexValueDataset(UIDX, UV)
n = length(D)
@test resample(D) isa Vector
@test resample(UV) isa Vector
@test resample(UIDX) isa Vector
@test resample(D, 10) isa Vector
@test resample(UV, 10) isa Vector
#@test resample(UIV, 10) isa Vector
######################################################################
# Resampling datasets consisting of uncertain values furnished by
# theoretical distributions
######################################################################
measurements = [UncertainValue(Normal, 0, 0.1) for i = 1:5]
d = UncertainDataset(measurements)
@test resample(d) isa Vector{T} where T <: Real
@test resample(d, 10) isa Vector{Vector{T}} where T <: Real
##########################################################################
# Resampling datasets consisting of uncertain values furnished by
# theoretical distributions with parameters estimated from empirical data.
##########################################################################
measurements = [UncertainValue(Normal, rand(Normal(), 1000)) for i = 1:5]
d = UncertainDataset(measurements)
@test resample(d) isa Vector{T} where T <: Real
@test resample(d, 10) isa Vector{Vector{T}} where T <: Real
##########################################################################
# Resampling datasets consisting of uncertain values furnished by
# kernel density estimations to the distributions.
##########################################################################
measurements = [UncertainValue(UnivariateKDE, rand(Normal(), 1000)) for i = 1:5]
d = UncertainDataset(measurements)
@test resample(d) isa Vector{T} where T <: Real
@test resample(d, 10) isa Vector{Vector{T}} where T <: Real
##########################################################################
# Resampling datasets consisting of a mixture of different types of
# uncertain values
##########################################################################
measurements = [UncertainValue(rand(100));
UncertainValue(Normal, rand(Normal(), 100));
UncertainValue(Normal, 0, 2)]
d = UncertainDataset(measurements)
@test resample(d) isa Vector{T} where T <: Real
@test resample(d, 10) isa Vector{Vector{T}} where T <: Real
iv = UncertainIndexValueDataset(measurements, measurements)
@test resample(iv) isa Tuple{Vector{T}, Vector{T}} where T <: Real
@test resample(iv, 5) isa Vector{Tuple{Vector{T}, Vector{T}}} where T <: Real
@test length(resample(iv, 5)) == 5
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 147 |
UI = UncertainIndexDataset(example_uidxs)
n = 3
@test resample(UI) isa Vector{<:Real}
@test resample(UI, n) isa Vector{Vector{T}} where T <: Real | UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 401 | ##########################################
# Some uncertain datasets
##########################################
UI = UncertainIndexDataset(uidxs)
##########################################
# Apply functions to datasets `n` times
##########################################
n = 3
@test resample(median, UI, n) isa Vector{T} where T <: Real
@test resample(cor, UI, UI, n) isa Vector{T} where T <: Real
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 1718 |
constraints = [i % 2 == 0 ? TruncateStd(1 + rand()) : TruncateQuantiles(0.1, 0.9)
for i in 1:length(example_uidxs)]
UI = UncertainIndexDataset(example_uidxs)
# A single constraint applied to each element of the dataset
n = 3
@test resample(UI, NoConstraint()) isa Vector{T} where {T <: Real}
@test resample(UI, TruncateLowerQuantile(0.2)) isa Vector{T} where {T <: Real}
@test resample(UI, TruncateUpperQuantile(0.2)) isa Vector{T} where {T <: Real}
@test resample(UI, TruncateQuantiles(0.2, 0.8)) isa Vector{T} where {T <: Real}
@test resample(UI, TruncateMaximum(0.2)) isa Vector{T} where {T <: Real}
@test resample(UI, TruncateMinimum(0.2)) isa Vector{T} where {T <: Real}
@test resample(UI, TruncateRange(-40, 10)) isa Vector{T} where {T <: Real}
@test resample(UI, TruncateStd(1)) isa Vector{T} where {T <: Real}
@test resample(UI, NoConstraint(), n) isa Vector{Vector{T}} where {T <: Real}
@test resample(UI, TruncateLowerQuantile(0.2), n) isa Vector{Vector{T}} where {T <: Real}
@test resample(UI, TruncateUpperQuantile(0.2), n) isa Vector{Vector{T}} where {T <: Real}
@test resample(UI, TruncateQuantiles(0.2, 0.8), n) isa Vector{Vector{T}} where {T <: Real}
@test resample(UI, TruncateMaximum(0.2), n) isa Vector{Vector{T}} where {T <: Real}
@test resample(UI, TruncateMinimum(0.2), n) isa Vector{Vector{T}} where {T <: Real}
@test resample(UI, TruncateRange(-40, 10), n) isa Vector{Vector{T}} where {T <: Real}
@test resample(UI, TruncateStd(1), n) isa Vector{Vector{T}} where {T <: Real}
# Different constraints applied to each element of the dataset
n = 3
@test resample(UI, constraints) isa Vector{T} where {T <: Real}
@test resample(UI, constraints, n) isa Vector{Vector{T}} where {T <: Real}
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 218 | UI = UncertainValueDataset(example_uidxs)
UV = UncertainValueDataset(example_uvals)
UIV = UncertainIndexValueDataset(UI, UV)
n = 3
@test resample(UIV) isa Tuple{Vector}
@test resample(UIV, n) isa Vector{Tuple{Vector}} | UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 157 | UV = UncertainValueDataset(example_uvals)
n = 3
@test resample(UV) isa Vector{T} where T <: Real
@test resample(UV, n) isa Vector{Vector{T}} where T <: Real | UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 411 |
##########################################
# Some uncertain datasets
##########################################
UV = UncertainValueDataset(example_uvals)
##########################################
# Apply functions to datasets `n` times
##########################################
n = 3
@test resample(median, UV, n) isa Vector{T} where T <: Real
@test resample(cor, UV, UV, n) isa Vector{T} where T <: Real
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 1391 | constraints = [TruncateStd(1 + rand()) for i in 1:length(example_uvals)]
UV = UncertainValueDataset(example_uvals)
# A single constraint applied to each element of the dataset
n = 5
@test resample(UV, NoConstraint()) isa Vector{Real}
@test resample(UV, TruncateLowerQuantile(0.2)) isa Vector{Real}
@test resample(UV, TruncateUpperQuantile(0.2)) isa Vector{Real}
@test resample(UV, TruncateQuantiles(0.2, 0.8)) isa Vector{Real}
@test resample(UV, TruncateMaximum(2)) isa Vector{Real}
@test resample(UV, TruncateMinimum(0.0)) isa Vector{Real}
@test resample(UV, TruncateRange(-40, 10)) isa Vector{Real}
@test resample(UV, TruncateStd(1)) isa Vector{Real}
@test resample(UV, NoConstraint(), n) isa Vector{Vector{Real}}
@test resample(UV, TruncateLowerQuantile(0.2), n) isa Vector{Vector{Real}}
@test resample(UV, TruncateUpperQuantile(0.2), n) isa Vector{Vector{Real}}
@test resample(UV, TruncateQuantiles(0.2, 0.8), n) isa Vector{Vector{Real}}
@test resample(UV, TruncateMaximum(2), n) isa Vector{Vector{Real}}
@test resample(UV, TruncateMinimum(0), n) isa Vector{Vector{Real}}
@test resample(UV, TruncateRange(-40, 10), n) isa Vector{Vector{Real}}
@test resample(UV, TruncateStd(1), n) isa Vector{Vector{Real}}
# Different constraints applied to each element of the dataset
n = 3
@test resample(UV, constraints) isa Vector{Real}
@test resample(UV, constraints, n) isa Vector{Vector{Real}} | UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 2089 | a = [UncertainValue(Normal, 1, 0.5) for i = 1:10]
b = UncertainValue(rand(1000))
c = UncertainValue(Uniform, rand(10000))
s = rand(-20:20, 100)
e = UncertainValue(s, rand(length(s)))
f = UncertainValue(2)
g = UncertainValue(3.1)
uidxs = UncertainIndexDataset([a; b; c; e; f; g])
uvals = UncertainValueDataset([a; b; c; e; f; g])
iv = UncertainIndexValueDataset(uidxs, uvals)
@test resample(iv, NoConstraint()) isa Tuple{Vector{Float64}, Vector{Float64}}
@test resample(iv, TruncateLowerQuantile(0.2)) isa Tuple{Vector{Float64}, Vector{Float64}}
@test resample(iv, TruncateUpperQuantile(0.2)) isa Tuple{Vector{Float64}, Vector{Float64}}
@test resample(iv, TruncateQuantiles(0.2, 0.8)) isa Tuple{Vector{Float64}, Vector{Float64}}
@test resample(iv, TruncateMaximum(10)) isa Tuple{Vector{Float64}, Vector{Float64}}
@test resample(iv, TruncateMinimum(-20)) isa Tuple{Vector{Float64}, Vector{Float64}}
@test resample(iv, TruncateRange(-20, 20)) isa Tuple{Vector{Float64}, Vector{Float64}}
@test resample(iv, NoConstraint(), 5) isa Vector{Tuple{Vector{Float64}, Vector{Float64}}}
@test resample(iv, TruncateLowerQuantile(0.2), 5) isa Vector{Tuple{Vector{Float64}, Vector{Float64}}}
@test resample(iv, TruncateUpperQuantile(0.2), 5) isa Vector{Tuple{Vector{Float64}, Vector{Float64}}}
@test resample(iv, TruncateQuantiles(0.2, 0.8), 5) isa Vector{Tuple{Vector{Float64}, Vector{Float64}}}
@test resample(iv, TruncateMaximum(10), 5) isa Vector{Tuple{Vector{Float64}, Vector{Float64}}}
@test resample(iv, TruncateMinimum(-20), 5) isa Vector{Tuple{Vector{Float64}, Vector{Float64}}}
@test resample(iv, TruncateRange(-20, 20), 5) isa Vector{Tuple{Vector{Float64}, Vector{Float64}}}
@test length(resample(iv, NoConstraint(), 5)) == 5
@test length(resample(iv, TruncateLowerQuantile(0.2), 5)) == 5
@test length(resample(iv, TruncateUpperQuantile(0.2), 5)) == 5
@test length(resample(iv, TruncateQuantiles(0.2, 0.8), 5)) == 5
@test length(resample(iv, TruncateMaximum(10), 5)) == 5
@test length(resample(iv, TruncateMinimum(-20), 5)) == 5
@test length(resample(iv, TruncateRange(-20, 20), 5)) == 5
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 1506 |
#Vectors of uncertain values
uvals_x = [UncertainValue(Normal, rand(Normal(0, 5)), abs(rand(Normal(0, 3)))) for i = 1:100]
uvals_y = [UncertainValue(Normal, rand(Normal(0, 5)), abs(rand(Normal(0, 3)))) for i = 1:100];
# UncertainIndexDataset
UVX = UncertainValueDataset(uvals_x)
UVY = UncertainValueDataset(uvals_y)
constraints = [[TruncateStd(0.3) for x in 1:50]; [TruncateQuantiles(0.3, 0.7) for x in 1:50]]
@test resample(UVX, constraints) isa Vector{<:Real}
@test resample(UVY, constraints) isa Vector{<:Real}
# Test for some more complicated uncertain values
#----------------------------------------------
a = [UncertainValue(Normal, 1, 0.5) for i = 1:10]
b = UncertainValue(rand(1000))
c = UncertainValue(Uniform, rand(10000))
s = rand(-20:20, 100)
e = UncertainValue(s, rand(length(s)))
f = UncertainValue(2)
g = UncertainValue(3.1)
uvals = [a; b; c; e; f; g]
udata = UncertainValueDataset(uvals)
@test resample(udata, NoConstraint(), 10) isa Vector{Vector{Real}}
@test resample(udata, TruncateLowerQuantile(0.2), 10) isa Vector{Vector{Real}}
@test resample(udata, TruncateUpperQuantile(0.2), 10) isa Vector{Vector{Real}}
@test resample(udata, TruncateQuantiles(0.2, 0.8), 10) isa Vector{Vector{Real}}
@test resample(udata, TruncateMaximum(0.2), 10) isa Vector{Vector{Real}}
@test resample(udata, TruncateMinimum(0.2), 10) isa Vector{Vector{Real}}
@test resample(udata, TruncateRange(-40, 10), 10) isa Vector{Vector{Real}}
@test resample(udata, TruncateStd(1), 10) isa Vector{Vector{Real}}
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 2685 | ################
# Example data
################
N = 50
x_uncertain = [UncertainValue(Normal, x, rand(Uniform(0.1, 0.8))) for x in rand(N)]
y_uncertain = [UncertainValue(Normal, y, rand(Uniform(0.1, 0.8))) for y in rand(N)]
x = UncertainValueDataset(x_uncertain)
y = UncertainValueDataset(y_uncertain)
time_uncertain = [UncertainValue(Normal, i, 1) for i = 1:length(x)];
time_certain = [CertainValue(i) for i = 1:length(x)];
timeinds_x = UncertainIndexDataset(time_uncertain)
timeinds_y = UncertainIndexDataset(time_certain)
X = UncertainIndexValueDataset(timeinds_x, x)
Y = UncertainIndexValueDataset(timeinds_y, y);
###########################################################
# SequentialResampling
###########################################################
seq = SequentialResampling(StrictlyIncreasing())
@test resample(X, seq) isa NTuple{2, Vector{<:Real}}
@test resample(X, seq)[1] |> length == N
@test resample(X, seq)[2] |> length == N
@test resample(Y, seq) isa NTuple{2, Vector{<:Real}}
@test resample(Y, seq)[1] |> length == N
@test resample(Y, seq)[2] |> length == N
###########################################################
# SequentialInterpolatedResampling
###########################################################
seq_intp = SequentialInterpolatedResampling(StrictlyIncreasing(), RegularGrid(0, N, 2))
@test resample(X, seq_intp) isa Tuple{AbstractRange, Vector{<:Real}}
@test resample(X, seq_intp)[1] |> length == length(0:2:N)
@test resample(X, seq_intp)[2] |> length == length(0:2:N)
@test resample(Y, seq_intp) isa Tuple{AbstractRange, Vector{<:Real}}
@test resample(Y, seq_intp)[1] |> length == length(0:2:N)
@test resample(Y, seq_intp)[2] |> length == length(0:2:N)
#################################
# ConstrainedValueResampling
#################################
c = ConstrainedValueResampling(10, TruncateStd(1))
r = resample(timeinds_y, c)
@test length(r) == c.n
@test length(r[1]) == N
r = resample(timeinds_x, c)
@test length(r) == c.n
@test length(r[1]) == N
r = resample(x, c)
@test length(r) == c.n
@test length(r[1]) == N
r = resample(y, c)
@test length(r) == c.n
@test length(r[1]) == N
#################################
# ConstrainedIndexValueResampling
#################################
c = ConstrainedIndexValueResampling(10, (TruncateStd(1), TruncateStd(1)))
rx = resample(X, c)
ry = resample(Y, c)
@test rx isa Vector{Tuple{Vector{Float64},Vector{Float64}}}
@test ry isa Vector{Tuple{Vector{Float64},Vector{Float64}}}
@test length(rx) == c.n
@test length(ry) == c.n
@test length(rx[1]) == 2
@test length(ry[1]) == 2
@test length(rx[1][1]) == N
@test length(ry[1][2]) == N
@test length(rx[2][1]) == N
@test length(ry[2][2]) == N | UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 2776 | using Test, UncertainData
@testset "StrictlyDecreasing" begin
# Create some uncertain data with decreasing magnitude and zero overlap between values,
# so we're guaranteed that a strictly decreasing sequence through the dataset exists.
N = 10
t = [i <= N/2 ? CertainValue(float(i)) : UncertainValue(Normal, i, 1) for i = N:-1:1]
T = UncertainIndexDataset(t)
iv = UncertainIndexValueDataset(t, t)
test_cs = [
NoConstraint(),
TruncateLowerQuantile(0.2),
TruncateUpperQuantile(0.2),
TruncateQuantiles(0.2, 0.8),
TruncateMaximum(50),
TruncateMinimum(-50),
TruncateRange(-50, 50),
TruncateStd(1)
]
test_seqs = [
StrictlyDecreasing(StartToEnd())
]
@testset "$(test_seqs[i])" for i in 1:length(test_seqs)
@test resample(t, StrictlyDecreasing()) isa Vector{Float64}
@test resample(T, StrictlyDecreasing()) isa Vector{Float64}
iv_draw = resample(iv, StrictlyDecreasing())
@test iv_draw isa Tuple{Vector{Float64}, Vector{Float64}}
@testset "$(test_cs[i])" for i in 1:length(test_cs)
c = test_cs[i] # sequential + single constraint
cs = sample(test_cs, N) # sequential + multiple constraints
@test resample(t, StrictlyDecreasing(), c) isa Vector{Float64}
@test resample(t, StrictlyDecreasing(), cs) isa Vector{Float64}
@test resample(T, StrictlyDecreasing(), c) isa Vector{Float64}
@test resample(T, StrictlyDecreasing(), cs) isa Vector{Float64}
# Single extra constraint
iv_draw = resample(iv, StrictlyDecreasing(), c)
@test iv_draw isa Tuple{Vector{Float64}, Vector{Float64}}
@test all(diff(iv_draw[1]) .< 0)
iv_draw = resample(iv, StrictlyDecreasing(), c, c)
@test iv_draw isa Tuple{Vector{Float64}, Vector{Float64}}
@test all(diff(iv_draw[1]) .< 0)
# Multiple extra constraints
iv_draw = resample(iv, StrictlyDecreasing(), cs)
#@test iv_draw isa Tuple{Vector{Float64}, Vector{Float64}}
#@test all(diff(iv_draw[1]) .< 0)
# iv_draw = resample(iv, StrictlyDecreasing(), cs, cs)
# @test iv_draw isa Tuple{Vector{Float64}, Vector{Float64}}
# @test all(diff(iv_draw[1]) .< 0)
# iv_draw = resample(iv, StrictlyDecreasing(), c, cs)
# @test iv_draw isa Tuple{Vector{Float64}, Vector{Float64}}
# @test all(diff(iv_draw[1]) .< 0)
# iv_draw = resample(iv, StrictlyDecreasing(), cs, c)
# @test iv_draw isa Tuple{Vector{Float64}, Vector{Float64}}
# @test all(diff(iv_draw[1]) .> 0)
end
end
end | UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 2801 | using Test, UncertainData
using StatsBase
@testset "StrictlyIncreasing" begin
# Create some uncertain data with decreasing magnitude and zero overlap between values,
# so we're guaranteed that a strictly decreasing sequence through the dataset exists.
N = 10
t = [ i <= N/2 ? CertainValue(float(i)) : UncertainValue(Normal, i, 1) for i = 1:N]
T = UncertainIndexDataset(t)
iv = UncertainIndexValueDataset(t, t)
test_cs = [
NoConstraint(),
TruncateLowerQuantile(0.2),
TruncateUpperQuantile(0.2),
TruncateQuantiles(0.2, 0.8),
TruncateMaximum(50),
TruncateMinimum(-50),
TruncateRange(-50, 50),
TruncateStd(1)
]
test_seqs = [
StrictlyIncreasing(StartToEnd())
]
@testset "$(test_seqs[i])" for i in 1:length(test_seqs)
@test resample(t, StrictlyIncreasing()) isa Vector{Float64}
@test resample(T, StrictlyIncreasing()) isa Vector{Float64}
iv_draw = resample(iv, StrictlyIncreasing())
@test iv_draw isa Tuple{Vector{Float64}, Vector{Float64}}
@testset "$(test_cs[i])" for i in 1:length(test_cs)
c = test_cs[i] # sequential + single constraint
cs = sample(test_cs, N) # sequential + multiple constraints
@test resample(t, StrictlyIncreasing(), c) isa Vector{Float64}
@test resample(t, StrictlyIncreasing(), cs) isa Vector{Float64}
@test resample(T, StrictlyIncreasing(), c) isa Vector{Float64}
@test resample(T, StrictlyIncreasing(), cs) isa Vector{Float64}
# Single extra constraint
iv_draw = resample(iv, StrictlyIncreasing(), c)
@test iv_draw isa Tuple{Vector{Float64}, Vector{Float64}}
@test all(diff(iv_draw[1]) .> 0)
# Multiple extra constraints
iv_draw = resample(iv, StrictlyIncreasing(), cs)
@test iv_draw isa Tuple{Vector{Float64}, Vector{Float64}}
@test all(diff(iv_draw[1]) .> 0)
#iv_draw = resample(iv, StrictlyIncreasing(), c, c)
#@test iv_draw isa Tuple{Vector{Float64}, Vector{Float64}}
#@test all(diff(iv_draw[1]) .> 0)
# iv_draw = resample(iv, StrictlyIncreasing(), cs, cs)
# @test iv_draw isa Tuple{Vector{Float64}, Vector{Float64}}
# @test all(diff(iv_draw[1]) .> 0)
# iv_draw = resample(iv, StrictlyIncreasing(), c, cs)
# @test iv_draw isa Tuple{Vector{Float64}, Vector{Float64}}
# @test all(diff(iv_draw[1]) .> 0)
# iv_draw = resample(iv, StrictlyIncreasing(), cs, c)
# @test iv_draw isa Tuple{Vector{Float64}, Vector{Float64}}
# @test all(diff(iv_draw[1]) .> 0)
end
end
end | UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 531 | using Test
o4 = UncertainValue(Uniform, rand(100))
o5 = UncertainValue(Beta, 4, 5)
o6 = UncertainValue(Gamma, 4, 5)
o7 = UncertainValue(rand(1000))
o8 = UncertainValue(Normal, 0, 2)
# 2-tuples
t1 = (o4, o5)
# n-tuples (a 5-tuple, including scalars)
t2 = (o4, o5, o6, o7, o8, 6);
@test resample(t1) isa NTuple{2, T} where T <: Number
@test resample(t2) isa NTuple{6, T} where T <: Number
@test resample(t1, 5) isa Vector{NTuple{2, T}} where {N, T <: Number}
@test resample(t2, 5) isa Vector{NTuple{6, T}} where {N, T <: Number} | UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 1162 | u1 = UncertainValue(Normal, 0, 0.1)
u2 = UncertainValue(Uniform, rand(100))
u3 = UncertainValue(rand(100))
u4 = UncertainValue(Normal, 5, 0.1)
u5 = UncertainValue(Normal, 10, 1.5)
vals = [u1, u2, u3, u4, u5, 6]
wts = [1, 3, 3, 1, 0.5, 0.1]
pop = UncertainScalarPopulation(vals, wts)
@test resample(pop) isa Real
@test resample(pop, 10) isa Vector{T} where {T <: Real}
x = UncertainScalarPopulation([u1, u2, u3, u4, u5], rand(5))
vals = [pop, u5, x, u4, u5, 6]
wts = rand(length(vals))
pop2 = UncertainScalarPopulation(vals, wts)
@test resample(pop2) isa Real
@test resample(pop2, 100000) isa Vector{T} where {T <: Real}
# Create a population consisting of a mixture of different types of uncertain values,
# both theoretical, fitted from empirical distributions, and discrete populations.
u1 = UncertainValue(Normal, 0, 0.1)
u2 = UncertainValue(Uniform, rand(100))
u3 = UncertainValue(rand(100))
u4 = UncertainValue(Normal, 5, 0.1)
u5 = UncertainValue(Normal, 10, 1.5)
wts = [1, 3, 3, 1, 0.5]
vals = [u1, u2, u3, u4, u5]
pop3 = UncertainScalarPopulation(vals, wts)
@test resample(pop3) isa Real
@test resample(pop3, 100) isa Vector{T} where {T <: Real}
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 609 | x = CertainValue(2.0)
test_constraints = [
NoConstraint(),
TruncateLowerQuantile(0.2),
TruncateUpperQuantile(0.2),
TruncateQuantiles(0.2, 0.8),
TruncateMaximum(50),
TruncateMinimum(-50),
TruncateRange(-50, 50),
TruncateStd(1)
]
T = eltype(x)
@test rand(x) isa T
@test rand(x, 10) isa Vector
@test resample(x) isa T
@test resample(x, 10) isa Vector
@test all(resample(x, 10) .== x.value)
for constraint in test_constraints
@test resample(x, constraint) isa T
@test resample(x, constraint, 10) isa Vector
@test all(resample(x, constraint, 10) .== x.value)
end | UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 2457 |
########################################################################
# Uncertain values constructed from distributions with known parameters
########################################################################
o1 = UncertainValue(Normal, 0, 0.5)
o2 = UncertainValue(Normal, 2.0, 0.1)
o3 = UncertainValue(Uniform, 0, 4)
o4 = UncertainValue(Uniform, rand(100))
o5 = UncertainValue(Beta, 4, 5)
o6 = UncertainValue(Gamma, 4, 5)
o7 = UncertainValue(Frechet, 1, 2)
o8 = UncertainValue(BetaPrime, 1, 2)
o9 = UncertainValue(BetaBinomial, 10, 3, 2)
o10 = UncertainValue(Binomial, 10, 0.3)
@test resample(o1) isa Float64
@test resample(o2) isa Float64
@test resample(o3) isa Float64
@test resample(o4) isa Float64
@test resample(o5) isa Float64
@test resample(o6) isa Float64
@test resample(o7) isa Float64
@test resample(o8) isa Float64
@test resample(o9) isa Int
@test resample(o10) isa Int
########################################################################
# Resampling uncertain values with constraints
########################################################################
u = UncertainValue(Normal, 0, 1)
# Resampling many times
@test maximum(resample(u, TruncateLowerQuantile(0.2), 1000)) >= quantile(u.distribution, 0.2)
@test maximum(resample(u, TruncateUpperQuantile(0.7), 1000)) <= quantile(u.distribution, 0.7)
@test maximum(resample(u, TruncateMinimum(-0.1), 10)) >= -0.1
@test maximum(resample(u, TruncateMaximum(0.1), 10)) <= 0.1
# quantile truncation
c = resample(u, TruncateQuantiles(0.3, 0.9), 1000)
@test minimum(c) >= quantile(u.distribution, 0.3)
@test maximum(c) <= quantile(u.distribution, 0.9)
# standard deviation truncation
c = resample(u, TruncateStd(1))
@test maximum(c) <= 0 + std(u)
@test minimum(c) >= 0 - std(u)
# range truncation
c = resample(u, TruncateRange(-0.2, 0.2))
@test maximum(c) <= 0.2
@test minimum(c) >= -0.2
# range truncation
c = resample(u, TruncateRange(-0.2, 0.2), 100)
@test maximum(c) <= 0.2
@test minimum(c) >= -0.2
# Resampling once
using StatsBase
@test maximum(resample(u, TruncateLowerQuantile(0.2))) >= quantile(u.distribution, 0.2)
@test maximum(resample(u, TruncateUpperQuantile(0.7))) <= quantile(u.distribution, 0.7)
@test maximum(resample(u, TruncateMinimum(-0.1))) >= -0.1
@test maximum(resample(u, TruncateMaximum(0.1))) <= 0.1
c = resample(u, TruncateQuantiles(0.3, 0.9))
@test minimum(c) >= quantile(u.distribution, 0.3)
@test maximum(c) <= quantile(u.distribution, 0.9)
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 2131 | tol = 1e-7
################################################
# Uncertain values represented by KDE estimates
################################################
# Create a random sample
d = Normal()
some_sample = rand(d, 2000)
# Create an uncertain value from the random sample by kernel density
# estimation.
uv = UncertainValue(some_sample)
@test resample(uv) isa Float64
@test resample(uv, 10) isa Vector{Float64}
@test resample(uv, NoConstraint()) isa Float64
@test resample(uv, NoConstraint(), 10) isa Vector{Float64}
r1a = resample(uv, TruncateLowerQuantile(0.2))
r1b = resample(uv, TruncateLowerQuantile(0.2), 10)
@test r1a isa Float64
@test r1b isa Vector{Float64}
#@test quantile(uv, 0.2) <= r1a + tol
##@test all(quantile(uv, 0.2) .<= r1b .+ tol)
r1a = resample(uv, TruncateUpperQuantile(0.8))
r1b = resample(uv, TruncateUpperQuantile(0.8), 10)
@test r1a isa Float64
@test r1b isa Vector{Float64}
#@test r1a <= quantile(uv, 0.8) + tol
#@test all(r1b .<= quantile(uv, 0.8) + tol)
r1a = resample(uv, TruncateQuantiles(0.2, 0.8))
r1b = resample(uv, TruncateQuantiles(0.2, 0.8), 10)
@test r1a isa Float64
@test r1b isa Vector{Float64}
#@test quantile(uv, 0.2) - tol <= r1a <= quantile(uv, .8) + tol
#@test all(quantile(uv, 0.2) - tol .<= r1b .<= quantile(uv, .8) + tol)
r1a = resample(uv, TruncateMinimum(-0.5))
r1b = resample(uv, TruncateMinimum(-0.5), 10)
@test r1a isa Float64
@test r1b isa Vector{Float64}
#@test -0.5 <= r1a + tol
#@test all(-0.5 .<= r1b .+ tol)
r1a = resample(uv, TruncateMaximum(0.5))
r1b = resample(uv, TruncateMaximum(0.5), 10)
@test r1a isa Float64
@test r1b isa Vector{Float64}
#@test r1a <= 0.5 + tol
#@test all(r1b .<= 0.5 + tol)
r1a = resample(uv, TruncateRange(-0.5, 0.5))
r1b = resample(uv, TruncateRange(-0.5, 0.5), 10)
@test r1a isa Float64
@test r1b isa Vector{Float64}
#@test -0.5 - tol <= r1a <= 0.5 + tol
#@test all(-0.5 - tol .<= r1b .<= 0.5 + tol)
# TruncateStd will not work, but with the default fallback we should still be able
# to resample.
r1a = resample(uv, TruncateStd(1))
r1b = resample(uv, TruncateStd(1), 10)
@test r1a isa Float64
@test r1b isa Vector{Float64}
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 692 |
# resample(uvals::Vector{AbstractUncertainValue})
@test resample(uvals) isa Vector
@test length(resample(uvals)) == length(uvals)
# resample(uvals::Vector{AbstractUncertainValue}, n::Int)
n = 2
@test resample(uvals, n) isa Vector{Vector{T}} where T<:Real
# TODO: make sure all values are promoted to floats, otherwise performance loss is
# significant
#@test resample(uvals, n) isa Vector{Vector{Float}}
@test length(resample(uvals, n)) == n
# resample_elwise(uvals::Vector{AbstractUncertainValue}, n::Int)
n = 2
@test resample_elwise(uvals, n) isa Vector{Vector{T}} where T<:Real
@test length(resample_elwise(uvals, n)) == length(uvals)
@test length(resample_elwise(uvals, n)[1]) == n
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 2029 | using StatsBase
##########################################
# Uncertain theoretical distributions
##########################################
uncertain_theoretical_distributions = [
UncertainValue(Uniform, -1, 1),
UncertainValue(Normal, 0, 1),
UncertainValue(Gamma, 1, 2),
UncertainValue(Beta, 1, 2),
UncertainValue(BetaPrime, 4, 5),
UncertainValue(Frechet, 1, 1),
UncertainValue(Binomial, 10, 0.3),
UncertainValue(BetaBinomial, 100, 2, 3)
]
##########################################
# Kernel density estimates
##########################################
n = 10
uncertain_kde_estimates = [
UncertainValue(rand(100))
]
##########################################
# Fitted theoretical distributions
##########################################
n = 10
uncertain_fitted_distributions = [
UncertainValue(Uniform, rand(Uniform(-2, 2), n)),
UncertainValue(Normal, rand(Normal(0, 1), n))
]
########################
# Uncertain populations
########################
pop1 = UncertainValue(
[3.0, UncertainValue(Normal, 0, 1),
UncertainValue(Gamma, 2, 3),
UncertainValue(Uniform, rand(1000))],
[0.5, 0.5, 0.5, 0.5]
)
pop2 = UncertainValue([1, 2, 3], rand(3))
pop3 = UncertainValue([1.0, 2.0, 3.0], Weights(rand(3)))
# Uncertain population consisting of uncertain populations and other stuff
pop4 = UncertainValue([pop1, pop2], [0.1, 0.5])
pop5 = UncertainValue([pop1, pop2, 2, UncertainValue(Normal, -2, 3)], Weights(rand(4)));
uncertain_scalar_populations = [pop1, pop2, pop3, pop4, pop5]
##########################################
# Gather all examples
##########################################
uvals = [
uncertain_scalar_populations;
uncertain_theoretical_distributions;
uncertain_kde_estimates;
uncertain_fitted_distributions
];
##########################################
# Apply functions to datasets `n` times
##########################################
@test x1 = resample(median, uvals, 1000) isa Vector{T} where T <: Real
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 583 |
constraints = [TruncateQuantiles(0.1, 0.9)
for i in 1:length(example_uvals)] #i % 2 == 0 ? TruncateStd(0.3) :
# Draw one realisation
@test resample(example_uvals) isa Vector{<:Real}
@test resample(example_uvals, constraints[1]) isa Vector{<:Real}
@test resample(example_uvals, constraints) isa Vector{<:Real}
# Draw multiple realisations
@test resample(example_uvals, 5) isa Vector{Vector{T}} where T<:Real
@test resample(example_uvals, constraints, 5) isa Vector{Vector{T}} where T<:Real
@test resample(example_uvals, constraints[1], 5) isa Vector{Vector{T}} where T<:Real
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 355 | x = CertainValue(2.0)
test_constraints = [
NoConstraint(),
TruncateLowerQuantile(0.2),
TruncateUpperQuantile(0.2),
TruncateQuantiles(0.2, 0.8),
TruncateMaximum(50),
TruncateMinimum(-50),
TruncateRange(-50, 50),
TruncateStd(1)
]
for constraint in test_constraints
@test constrain(x, constraint) isa CertainValue
end | UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 11030 | pop1 = UncertainValue(
[3.0, UncertainValue(Normal, 0, 1),
UncertainValue(Gamma, 2, 3),
UncertainValue(Uniform, rand(1000))],
[0.5, 0.5, 0.5, 0.5]
)
pop2 = UncertainValue(1:10 |> collect, rand(10))
pop3 = UncertainValue(1.0:10.0 |> collect, Weights(rand(10)))
# Uncertain population consisting of uncertain populations and other stuff
pop4 = UncertainValue([pop1, pop2], [0.1, 0.5])
pop5 = UncertainValue([pop1, pop2, 2, UncertainValue(Normal, -2, 3)], Weights(rand(4)));
uncertain_scalar_populations = [pop1, pop2, pop3, pop4, pop5];
T = Union{Nothing, ConstrainedUncertainScalarPopulation}
@test constrain(pop1, TruncateMinimum(0.2)) isa T
@test constrain(pop1, TruncateMaximum(3.0)) isa T
@test constrain(pop1, TruncateRange(0.0, 3.0)) isa T
@test constrain(pop1, TruncateLowerQuantile(0.2)) isa T
@test constrain(pop1, TruncateUpperQuantile(0.8)) isa T
@test constrain(pop1, TruncateQuantiles(0.2, 0.8)) isa T
@test constrain(pop1, TruncateStd(1)) isa T
@test constrain(pop1, TruncateStd(0.5)) isa T
# pop2.values = [1, 2, 3]
@test constrain(pop2, TruncateMinimum(0.2)) isa T
@test constrain(pop2, TruncateMaximum(2.5)) isa T
@test constrain(pop2, TruncateRange(0.5, 2.5)) isa T
@test constrain(pop2, TruncateLowerQuantile(0.2)) isa T
@test constrain(pop2, TruncateUpperQuantile(0.8)) isa T
@test constrain(pop2, TruncateQuantiles(0.2, 0.8)) isa T
@test constrain(pop2, TruncateStd(1)) isa T
@test constrain(pop2, TruncateStd(0.5)) isa T
# pop3.values = [1.0, 2.0, 3.0]
@test constrain(pop3, TruncateMinimum(0.2)) isa T
@test constrain(pop3, TruncateMaximum(2.5)) isa T
@test constrain(pop3, TruncateRange(0.5, 2.5)) isa T
@test constrain(pop3, TruncateLowerQuantile(0.2)) isa T
@test constrain(pop3, TruncateUpperQuantile(0.8)) isa T
@test constrain(pop3, TruncateQuantiles(0.2, 0.8)) isa T
@test constrain(pop3, TruncateStd(1)) isa T
@test constrain(pop3, TruncateStd(0.5)) isa T
@test constrain(pop4, TruncateMinimum(-10)) isa T
@test constrain(pop4, TruncateMaximum(10)) isa T
@test constrain(pop4, TruncateRange(-10, 10)) isa T
@test constrain(pop4, TruncateLowerQuantile(0.2)) isa T
@test constrain(pop4, TruncateUpperQuantile(0.8)) isa T
@test constrain(pop4, TruncateQuantiles(0.2, 0.8)) isa T
@test constrain(pop4, TruncateStd(1)) isa T
@test constrain(pop4, TruncateStd(0.5)) isa T
@test constrain(pop5, TruncateMinimum(-10)) isa T
@test constrain(pop5, TruncateMaximum(10)) isa T
@test constrain(pop5, TruncateRange(-10, 10)) isa T
@test constrain(pop5, TruncateLowerQuantile(0.2)) isa T
@test constrain(pop5, TruncateUpperQuantile(0.8)) isa T
@test constrain(pop5, TruncateQuantiles(0.2, 0.8)) isa T
@test constrain(pop5, TruncateStd(1)) isa T
@test constrain(pop5, TruncateStd(0.5)) isa T
# Some subpopulations consisting of both scalar values and distributions
subpop1_members = [UncertainValue(Normal, 0, 1), UncertainValue(Uniform, -2, 2), -5]
subpop2_members = [
UncertainValue(Normal, -2, 1),
UncertainValue(Uniform, -6, -1),
-3,
UncertainValue(Gamma, 1, 0.4)]
# Define the probabilities of sampling the different population members within the
# subpopulations. Weights are normalised, so we can input any numbers here indicating
# relative importance
subpop1_probs = [1, 2, 1]
subpop2_probs = [0.1, 0.2, 0.3, 0.1]
pop1 = UncertainValue(subpop1_members, subpop1_probs)
pop2 = UncertainValue(subpop2_members, subpop2_probs)
# Define the probabilities of sampling the two subpopulations in the overall population.
pop_probs = [0.3, 0.7]
# Construct overall population
pop_mixed = UncertainValue([pop1, pop2], pop_probs)
@test constrain(pop_mixed, TruncateMinimum(1)) isa T
@test constrain(pop_mixed, TruncateMaximum(2)) isa T
@test constrain(pop_mixed, TruncateRange(1, 2)) isa T
@test constrain(pop_mixed, TruncateQuantiles(0.1, 0.9)) isa T
@test constrain(pop_mixed, TruncateLowerQuantile(0.1)) isa T
@test constrain(pop_mixed, TruncateUpperQuantile(0.9)) isa T
@test constrain(pop_mixed, TruncateStd(1)) isa T
@test constrain(pop_mixed, TruncateStd(0.5)) isa T
# Truncation
#------------------------------------------
pop1 = UncertainValue(
[3.0, UncertainValue(Normal, 0, 1),
UncertainValue(Gamma, 2, 3),
UncertainValue(Uniform, rand(1000))],
[0.5, 0.5, 0.5, 0.5]
)
pop2 = UncertainValue([1, 2, 3], rand(3))
pop3 = UncertainValue([1.0, 2.0, 3.0], Weights(rand(3)))
# Uncertain population consisting of uncertain populations and other stuff
pop4 = UncertainValue([pop1, pop2], [0.1, 0.5])
pop5 = UncertainValue([pop1, pop2, 2, UncertainValue(Normal, -2, 3)], Weights(rand(4)));
n = 2000
p = pop1
sb = resample(p, n)
v_min, v_max = -1, 1
r_min, r_max = -1, 1
c_max = TruncateMaximum(v_max)
c_min = TruncateMinimum(v_min)
c_range = TruncateRange(r_min, r_max)
ql, qh = 0.2, 0.8
c_ql = TruncateLowerQuantile(ql)
c_qh = TruncateUpperQuantile(qh)
c_qs = TruncateQuantiles(ql, qh)
c_std = TruncateStd(1.0)
@test truncate(p, c_max) isa T
@test truncate(p, c_min) isa T
@test truncate(p, c_range) isa T
@test truncate(p, c_ql) isa T
@test truncate(p, c_qh) isa T
@test truncate(p, c_qs) isa T
@test truncate(p, c_std) isa T
n = 2000
p = pop2
sb = resample(p, n)
v_min, v_max = -1, 1
r_min, r_max = -1, 1
c_max = TruncateMaximum(v_max)
c_min = TruncateMinimum(v_min)
c_range = TruncateRange(r_min, r_max)
ql, qh = 0.2, 0.8
c_ql = TruncateLowerQuantile(ql)
c_qh = TruncateUpperQuantile(qh)
c_qs = TruncateQuantiles(ql, qh)
c_std = TruncateStd(1.0)
@test truncate(p, c_max) isa T
@test truncate(p, c_min) isa T
@test truncate(p, c_range) isa T
@test truncate(p, c_ql) isa T
@test truncate(p, c_qh) isa T
@test truncate(p, c_qs) isa T
@test truncate(p, c_std) isa T
n = 2000
p = pop3
sb = resample(p, n)
v_min, v_max = -1, 1
r_min, r_max = -1, 1
c_max = TruncateMaximum(v_max)
c_min = TruncateMinimum(v_min)
c_range = TruncateRange(r_min, r_max)
ql, qh = 0.2, 0.8
c_ql = TruncateLowerQuantile(ql)
c_qh = TruncateUpperQuantile(qh)
c_qs = TruncateQuantiles(ql, qh)
c_std = TruncateStd(1.0)
@test truncate(p, c_max) isa T
@test truncate(p, c_min) isa T
@test truncate(p, c_range) isa T
@test truncate(p, c_ql) isa T
@test truncate(p, c_qh) isa T
@test truncate(p, c_qs) isa T
@test truncate(p, c_std) isa T
n = 2000
p = pop4
sb = resample(p, n)
v_min, v_max = -2, 2
r_min, r_max = -2, 2
c_max = TruncateMaximum(v_max)
c_min = TruncateMinimum(v_min)
c_range = TruncateRange(r_min, r_max)
ql, qh = 0.2, 0.8
c_ql = TruncateLowerQuantile(ql)
c_qh = TruncateUpperQuantile(qh)
c_qs = TruncateQuantiles(ql, qh)
c_std = TruncateStd(1.0)
@test truncate(p, c_max) isa T
@test truncate(p, c_min) isa T
@test truncate(p, c_range) isa T
@test truncate(p, c_ql) isa T
@test truncate(p, c_qh) isa T
@test truncate(p, c_qs) isa T
@test truncate(p, c_std) isa T
n = 2000
p = pop5
sb = resample(p, n)
v_min, v_max = -2, 2
r_min, r_max = -2, 2
c_max = TruncateMaximum(v_max)
c_min = TruncateMinimum(v_min)
c_range = TruncateRange(r_min, r_max)
ql, qh = 0.2, 0.8
c_ql = TruncateLowerQuantile(ql)
c_qh = TruncateUpperQuantile(qh)
c_qs = TruncateQuantiles(ql, qh)
c_std = TruncateStd(1.0)
@test truncate(p, c_max) isa T
@test truncate(p, c_min) isa T
@test truncate(p, c_range) isa T
@test truncate(p, c_ql) isa T
@test truncate(p, c_qh) isa T
@test truncate(p, c_qs) isa T
@test truncate(p, c_std) isa T
pop1 = UncertainValue(
[3.0, UncertainValue(Normal, 0, 1),
UncertainValue(Gamma, 2, 3),
UncertainValue(Uniform, rand(10000))],
[0.5, 0.5, 0.5, 0.5]
)
pop2 = UncertainValue([1, 2, 3], rand(3))
pop3 = UncertainValue([1.0, 2.0, 3.0], Weights(rand(3)))
# Uncertain population consisting of uncertain populations and other stuff
pop4 = UncertainValue([pop1, pop2], [0.1, 0.5])
pop5 = UncertainValue([pop1, pop2, 2, UncertainValue(Normal, -2, 3)], Weights(rand(4)));
uncertain_scalar_populations = [pop1, pop2, pop3, pop4, pop5];
@test constrain(pop1, TruncateMinimum(0.2)) isa T
@test constrain(pop1, TruncateMaximum(3.0)) isa T
@test constrain(pop1, TruncateRange(0.0, 3.0)) isa T
@test constrain(pop1, TruncateLowerQuantile(0.2)) isa T
@test constrain(pop1, TruncateUpperQuantile(0.8)) isa T
@test constrain(pop1, TruncateQuantiles(0.2, 0.8)) isa T
@test constrain(pop1, TruncateStd(1)) isa T
# pop2.values = [1, 2, 3]
@test constrain(pop2, TruncateMinimum(0.2)) isa T
@test constrain(pop2, TruncateMaximum(2.5)) isa T
@test constrain(pop2, TruncateRange(0.5, 2.5)) isa T
@test constrain(pop2, TruncateLowerQuantile(0.2)) isa T
@test constrain(pop2, TruncateUpperQuantile(0.8)) isa T
@test constrain(pop2, TruncateQuantiles(0.2, 0.8)) isa T
@test constrain(pop2, TruncateStd(1)) isa T
# pop3.values = [1.0, 2.0, 3.0]
@test constrain(pop3, TruncateMinimum(0.2)) isa T
@test constrain(pop3, TruncateMaximum(2.5)) isa T
@test constrain(pop3, TruncateRange(0.5, 2.5)) isa T
@test constrain(pop3, TruncateLowerQuantile(0.2)) isa T
@test constrain(pop3, TruncateUpperQuantile(0.8)) isa T
@test constrain(pop3, TruncateQuantiles(0.2, 0.8)) isa T
@test constrain(pop3, TruncateStd(1)) isa T
@test constrain(pop4, TruncateMinimum(-10)) isa T
@test constrain(pop4, TruncateMaximum(10)) isa T
@test constrain(pop4, TruncateRange(-10, 10)) isa T
@test constrain(pop4, TruncateLowerQuantile(0.2)) isa T
@test constrain(pop4, TruncateUpperQuantile(0.8)) isa T
@test constrain(pop4, TruncateQuantiles(0.2, 0.8)) isa T
@test constrain(pop4, TruncateStd(1)) isa T
@test constrain(pop5, TruncateMinimum(-10)) isa T
@test constrain(pop5, TruncateMaximum(10)) isa T
@test constrain(pop5, TruncateRange(-10, 10)) isa T
@test constrain(pop5, TruncateLowerQuantile(0.2)) isa T
@test constrain(pop5, TruncateUpperQuantile(0.8)) isa T
@test constrain(pop5, TruncateQuantiles(0.2, 0.8)) isa T
@test constrain(pop5, TruncateStd(1)) isa T
# Some subpopulations consisting of both scalar values and distributions
subpop1_members = [UncertainValue(Normal, 0, 1), UncertainValue(Uniform, -2, 2), -5]
subpop2_members = [
UncertainValue(Normal, -2, 1),
UncertainValue(Uniform, -6, -1),
-3,
UncertainValue(Gamma, 1, 0.4)]
# Define the probabilities of sampling the different population members within the
# subpopulations. Weights are normalised, so we can input any numbers here indicating
# relative importance
subpop1_probs = [1, 2, 1]
subpop2_probs = [0.1, 0.2, 0.3, 0.1]
pop1 = UncertainValue(subpop1_members, subpop1_probs)
pop2 = UncertainValue(subpop2_members, subpop2_probs)
# Define the probabilities of sampling the two subpopulations in the overall population.
pop_probs = [0.3, 0.7]
# Construct overall population
pop_mixed = UncertainValue([pop1, pop2], pop_probs)
@test constrain(pop_mixed, TruncateMinimum(1)) isa T
@test constrain(pop_mixed, TruncateMaximum(2)) isa T
@test constrain(pop_mixed, TruncateRange(1, 2)) isa T
@test constrain(pop_mixed, TruncateQuantiles(0.1, 0.9)) isa T
@test constrain(pop_mixed, TruncateLowerQuantile(0.1)) isa T
@test constrain(pop_mixed, TruncateUpperQuantile(0.9)) isa T
@test constrain(pop_mixed, TruncateStd(1)) isa T | UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 1444 | import Distributions
using Distributions, UncertainData
# Create an uncertain dataset containing both theoretical values with known parameters,
# theoretical values with fitted parameters and kernel density estimated distributions.
u1 = UncertainValue(Gamma, rand(Gamma(), 500))
u2 = UncertainValue(rand(MixtureModel([Normal(1, 0.3), Normal(-3, 3)]), 500))
uvals3 = [UncertainValue(Normal, rand(), rand()) for i = 1:11]
measurements = [u1; u2; uvals3]
d = UncertainDataset(measurements)
# Test all available constraints
constraints = [
NoConstraint(),
TruncateLowerQuantile(0.2),
TruncateUpperQuantile(0.3),
TruncateQuantiles(0.2, 0.6),
TruncateMaximum(0.2),
TruncateMinimum(0.2),
TruncateRange(0.2, 0.6),
TruncateStd(1)
]
for i = 1:length(constraints)
# A single constraint applied to all values in the dataset
@test constrain(d, constraints[i]) isa ConstrainedUncertainDataset
# Element-wise application of different constraints to the values in the dataset
@test constrain(d, [constraints[i] for k = 1:length(d)]) isa ConstrainedUncertainDataset
# Constraining constrained datasets (might be nested several times)
constrained_dataset = constrain(d, constraints[i])
@test constrain(constrained_dataset, constraints[i]) isa ConstrainedUncertainDataset
@test constrain(constrained_dataset, [constraints[i] for k = 1:length(d)]) isa ConstrainedUncertainDataset
end | UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 1469 | import Distributions
using Distributions, UncertainData
# Create an uncertain dataset containing both theoretical values with known parameters,
# theoretical values with fitted parameters and kernel density estimated distributions.
u1 = UncertainValue(Gamma, rand(Gamma(), 500))
u2 = UncertainValue(rand(MixtureModel([Normal(1, 0.3), Normal(-3, 3)]), 500))
uvals3 = [UncertainValue(Normal, rand(), rand()) for i = 1:11]
measurements = [u1; u2; uvals3]
d = UncertainIndexDataset(measurements)
# Test all available constraints
constraints = [
NoConstraint(),
TruncateLowerQuantile(0.2),
TruncateUpperQuantile(0.3),
TruncateQuantiles(0.2, 0.6),
TruncateMaximum(0.2),
TruncateMinimum(0.2),
TruncateRange(0.2, 0.6),
TruncateStd(1)
]
for i = 1:length(constraints)
# A single constraint applied to all values in the dataset
@test constrain(d, constraints[i]) isa ConstrainedUncertainIndexDataset
# Element-wise application of different constraints to the values in the dataset
@test constrain(d, [constraints[i] for k = 1:length(d)]) isa ConstrainedUncertainIndexDataset
# Constraining constrained datasets (might be nested several times)
constrained_dataset = constrain(d, constraints[i])
@test constrain(constrained_dataset, constraints[i]) isa ConstrainedUncertainIndexDataset
@test constrain(constrained_dataset, [constraints[i] for k = 1:length(d)]) isa ConstrainedUncertainIndexDataset
end | UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 1469 | import Distributions
using Distributions, UncertainData
# Create an uncertain dataset containing both theoretical values with known parameters,
# theoretical values with fitted parameters and kernel density estimated distributions.
u1 = UncertainValue(Gamma, rand(Gamma(), 500))
u2 = UncertainValue(rand(MixtureModel([Normal(1, 0.3), Normal(-3, 3)]), 500))
uvals3 = [UncertainValue(Normal, rand(), rand()) for i = 1:11]
measurements = [u1; u2; uvals3]
d = UncertainValueDataset(measurements)
# Test all available constraints
constraints = [
NoConstraint(),
TruncateLowerQuantile(0.2),
TruncateUpperQuantile(0.3),
TruncateQuantiles(0.2, 0.6),
TruncateMaximum(0.2),
TruncateMinimum(0.2),
TruncateRange(0.2, 0.6),
TruncateStd(1)
]
for i = 1:length(constraints)
# A single constraint applied to all values in the dataset
@test constrain(d, constraints[i]) isa ConstrainedUncertainValueDataset
# Element-wise application of different constraints to the values in the dataset
@test constrain(d, [constraints[i] for k = 1:length(d)]) isa ConstrainedUncertainValueDataset
# Constraining constrained datasets (might be nested several times)
constrained_dataset = constrain(d, constraints[i])
@test constrain(constrained_dataset, constraints[i]) isa ConstrainedUncertainValueDataset
@test constrain(constrained_dataset, [constraints[i] for k = 1:length(d)]) isa ConstrainedUncertainValueDataset
end | UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 3550 |
import KernelDensity.UnivariateKDE
######################################################################
# Resampling datasets consisting of uncertain values furnished by
# theoretical distributions
######################################################################
tol = 1e-7
uv = UncertainValue(Normal, 1, 0.5)
uvc_lq = constrain(uv, TruncateLowerQuantile(0.2))
uvc_uq = constrain(uv, TruncateUpperQuantile(0.8))
uvc_q = constrain(uv, TruncateQuantiles(0.2, 0.8))
uvc_min = constrain(uv, TruncateMinimum(0.5))
uvc_max = constrain(uv, TruncateMaximum(1.5))
uvc_range = constrain(uv, TruncateRange(0.5, 1.5))
@test minimum(resample(uvc_lq, 1000)) >= quantile(uv, 0.2) - tol
@test maximum(resample(uvc_uq, 1000)) <= quantile(uv, 0.8) + tol
@test all(quantile(uv, 0.2) - tol .<= resample(uvc_q, 1000) .<= quantile(uv, 0.8) + tol)
@test minimum(resample(uvc_min, 1000)) >= 0.5 - tol
@test maximum(resample(uvc_max, 1000)) <= 1.5 + tol
@test all(0.5 - tol .<= resample(uvc_range, 1000) .<= 1.5 + tol)
##########################################################################
# Resampling datasets consisting of uncertain values furnished by
# theoretical distributions with parameters estimated from empirical data
##########################################################################
tol = 1e-7
uv = UncertainValue(Normal, rand(Normal(-1, 0.2), 1000))
uvc_lq = constrain(uv, TruncateLowerQuantile(0.2))
uvc_uq = constrain(uv, TruncateUpperQuantile(0.8))
uvc_q = constrain(uv, TruncateQuantiles(0.2, 0.8))
uvc_min = constrain(uv, TruncateMinimum(0.5))
uvc_max = constrain(uv, TruncateMaximum(1.5))
uvc_range = constrain(uv, TruncateRange(0.5, 1.5))
@test minimum(resample(uvc_lq, 1000)) >= quantile(uv, 0.2) - tol
@test maximum(resample(uvc_uq, 1000)) <= quantile(uv, 0.8) + tol
@test all(quantile(uv, 0.2) - tol .<= resample(uvc_q, 1000) .<= quantile(uv, 0.8) + tol)
@test minimum(resample(uvc_min, 1000)) >= 0.5 - tol
@test maximum(resample(uvc_max, 1000)) <= 1.5 + tol
@test all(0.5 - tol .<= resample(uvc_range, 1000) .<= 1.5 + tol)
##########################################################################
# Resampling datasets consisting of uncertain values furnished by
# kernel density estimations to the distributions.
##########################################################################
# quantile estimates for `uv` is not precise for KDE estimates,
# so we need to lower the tolerance.
tol = 1e-2
uv = UncertainValue(UnivariateKDE, rand(Uniform(10, 15), 1000))
#uv = UncertainValue(rand(Normal(15, 2), 1000)) # does the same
# Verify that we get errors if trying to sample outside the support of the
# distributon furnishing the data point
@test_throws ArgumentError constrain(uv, TruncateMaximum(-100))
@test_throws ArgumentError constrain(uv, TruncateMinimum(100))
@test_throws DomainError constrain(uv, TruncateRange(100, -100))
uvc_lq = constrain(uv, TruncateLowerQuantile(0.2))
uvc_uq = constrain(uv, TruncateUpperQuantile(0.8))
uvc_q = constrain(uv, TruncateQuantiles(0.2, 0.8))
uvc_min = constrain(uv, TruncateMinimum(13))
uvc_max = constrain(uv, TruncateMaximum(13))
uvc_range = constrain(uv, TruncateRange(11, 12))
@test minimum(resample(uvc_lq, 1000)) >= quantile(uv, 0.2) - tol
@test maximum(resample(uvc_uq, 1000)) <= quantile(uv, 0.8) + tol
@test all(quantile(uv, 0.2) - tol .<= resample(uvc_q, 1000) .<= quantile(uv, 0.8) + tol)
@test minimum(resample(uvc_min, 1000)) >= 13 - tol
@test maximum(resample(uvc_max, 1000)) <= 13 + tol
@test all(11 - tol .<= resample(uvc_range, 1000) .<= 12 + tol)
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 1087 |
uv = UncertainValue(rand(1000))
#######################
# TruncateLowerQuantile
#######################
uvc = constrain(uv, TruncateLowerQuantile(0.2))
@test minimum(resample(uvc, 1000)) >= quantile(uv, 0.2) - tol
#######################
# TruncateUpperQuantile
#######################
uvc = constrain(uv, TruncateUpperQuantile(0.8))
@test maximum(resample(uvc, 1000)) <= quantile(uv, 0.8) + tol
#######################
# TruncateQuantiles
#######################
uvc = constrain(uv, TruncateQuantiles(0.2, 0.8))
@test all(quantile(uv, 0.2) - tol .<= resample(uvc, 1000) .<= quantile(uv, 0.8) + tol)
#######################
# TruncateMinimum
#######################
uvc = constrain(uv, TruncateMinimum(0.5))
@test minimum(resample(uvc, 1000)) >= 0.5 - tol
#######################
# TruncateMaximum
#######################
uvc = constrain(uv, TruncateMaximum(1.5))
@test maximum(resample(uvc, 1000)) <= 1.5 + tol
#######################
# TruncateRange
#######################
uvc = constrain(uv, TruncateRange(0.5, 1.5))
@test all(0.5 - tol .<= resample(uvc, 1000) .<= 1.5 + tol)
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 1884 | # Note: these function live in the Resampling module, which extends the SamplingConstraints methods
# Add uncertainties to the time series values
n_points = 40
x_uncertain = [UncertainValue(Normal, x, rand(Uniform(0.1, 0.8))) for x in rand(n_points)]
y_uncertain = [UncertainValue(Normal, y, rand(Uniform(0.1, 0.8))) for y in rand(n_points)]
x = UncertainValueDataset(x_uncertain)
y = UncertainValueDataset(y_uncertain)
time_uncertain = [UncertainValue(Normal, i, 1) for i = 1:length(x)];
time_certain = [CertainValue(i) for i = 1:length(x)];
timeinds_x = UncertainIndexDataset(time_uncertain)
timeinds_y = UncertainIndexDataset(time_certain)
X = UncertainIndexValueDataset(timeinds_x, x)
Y = UncertainIndexValueDataset(timeinds_y, y);
# Truncate each of the indices for x at 0.8 their standard deviation around the mean
constraints_x_inds = TruncateStd(0.8)
# Truncate each of the indices for y at 1.5 their standard deviation around the mean
constraints_y_inds = TruncateStd(1.5)
# Truncate each of the values of x at the 20th percentile range
constraints_x_vals = [TruncateQuantiles(0.4, 0.6) for i = 1:length(x)];
# Truncate each of the values of x at the 80th percentile range
constraints_y_vals = [TruncateQuantiles(0.1, 0.9) for i = 1:length(x)];
@test constrain(X.indices, ConstrainedValueResampling(constraints_x_inds)) isa ConstrainedUncertainIndexDataset
@test constrain(X.values, ConstrainedValueResampling(constraints_x_vals)) isa ConstrainedUncertainValueDataset
@test constrain(X, ConstrainedValueResampling(constraints_x_inds), ConstrainedValueResampling(constraints_x_vals)) isa UncertainIndexValueDataset
idxval_resampling = ConstrainedIndexValueResampling((constraints_x_inds, constraints_x_vals))
@test constrain(X, idxval_resampling) isa UncertainIndexValueDataset
@test constrain(X, constraints_x_inds, constraints_x_vals) isa UncertainIndexValueDataset | UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 877 | # Test that input validation works where possible
@test TruncateStd(1) isa TruncateStd{<:Int}
@test TruncateStd(1.0) isa TruncateStd{<:Real}
@test_throws DomainError TruncateStd(0) isa TruncateStd{<:Int}
@test_throws DomainError TruncateStd(0.0) isa TruncateStd{<:Real}
@test_throws DomainError TruncateStd(-1) isa TruncateStd{<:Int}
@test_throws DomainError TruncateStd(-1.2) isa TruncateStd{<:Real}
@test TruncateQuantiles(0.1, 0.9) isa TruncateQuantiles{<:Real, <:Real}
@test TruncateQuantiles(0.0, 1.0) isa TruncateQuantiles{<:Real, <:Real}
@test_throws DomainError TruncateQuantiles(-0.1, 0.9)
@test_throws DomainError TruncateQuantiles(0.2, 1.9)
@test_throws DomainError TruncateQuantiles(0.3, 0.1)
@test TruncateRange(-10, 10) isa TruncateRange{<:Int, <:Int}
@test TruncateRange(-10.0, 10) isa TruncateRange{<:Real, <:Int}
@test_throws DomainError TruncateRange(5, 3) | UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 1590 | o1 = UncertainValue(Normal, 0, 0.5)
o2 = UncertainValue(Normal, 2, 0.3)
o3 = UncertainValue(Uniform, 0, 4)
o4 = UncertainValue(Uniform, rand(100))
#####################
# UncertainDataset
####################
# Iteration
D = UncertainDataset([o1, o2, o3])
@test length(D) == 3
@test length([x for x in D]) == 3
# Indexing
@test D[1] isa AbstractUncertainValue
@test D[end] isa AbstractUncertainValue
@test D[1:end] isa AbstractVector{<:AbstractUncertainValue}
@test D[[1, 2]] isa AbstractVector{<:AbstractUncertainValue}
@test D[:] isa AbstractVector{<:AbstractUncertainValue}
########################
# UncertainIndexDataset
########################
# Construction
UV = UncertainIndexDataset(D.values)
@test UV isa UncertainIndexDataset
# Iteration
@test length(UV) == 3
@test length([x for x in UV]) == 3
# Indexing
@test UV[1] isa AbstractUncertainValue
@test UV[end] isa AbstractUncertainValue
@test UV[1:end] isa AbstractVector{<:AbstractUncertainValue}
@test UV[[1, 2]] isa AbstractVector{<:AbstractUncertainValue}
@test UV[:] isa AbstractVector{<:AbstractUncertainValue}
########################
# UncertainValueDataset
########################
# Construction
UV = UncertainValueDataset(D.values)
@test UV isa UncertainValueDataset
# Iteration
@test length(UV) == 3
@test length([x for x in UV]) == 3
# Indexing
@test UV[1] isa AbstractUncertainValue
@test UV[end] isa AbstractUncertainValue
@test UV[1:end] isa AbstractVector{<:AbstractUncertainValue}
@test UV[[1, 2]] isa AbstractVector{<:AbstractUncertainValue}
@test UV[:] isa AbstractVector{<:AbstractUncertainValue} | UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 64 | @test UncertainIndexDataset(rand(30)) isa UncertainIndexDataset
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 1905 |
##############
# Constructors
##############
# Create some uncertain data of different types
o1 = UncertainValue(Normal, 0, 0.5)
o2 = UncertainValue(Normal, 2, 0.3)
o3 = UncertainValue(Uniform, 0, 4)
o4 = UncertainValue(Uniform, rand(100))
o5 = UncertainValue(rand(400))
o7 = CertainValue(2)
o8 = UncertainValue([2, 3, 4], [4, 5, 2])
o9 = UncertainValue([2, 4, 5, 2], rand(4))
uvals = [o1, o2, o3, o4, o5, o7, o8, o9]
UV = UncertainValueDataset(uvals)
UI = UncertainIndexDataset(uvals)
CUV = constrain(UV, TruncateQuantiles(0.1, 0.9))
CUI = constrain(UV, TruncateQuantiles(0.1, 0.9))
# From scalar vectors
@test UncertainIndexValueDataset(rand(30), rand(30)) isa UncertainIndexValueDataset
# Vectors
@test UncertainIndexValueDataset(uvals, uvals) isa UncertainIndexValueDataset
# Non-constrained datasets
@test UncertainIndexValueDataset(uvals, UV) isa UncertainIndexValueDataset
@test UncertainIndexValueDataset(UI, uvals) isa UncertainIndexValueDataset
@test UncertainIndexValueDataset(UI, UV) isa UncertainIndexValueDataset
# Constrained datasets
@test UncertainIndexValueDataset(uvals, CUV) isa UncertainIndexValueDataset
@test UncertainIndexValueDataset(CUI, uvals) isa UncertainIndexValueDataset
@test UncertainIndexValueDataset(CUI, CUV) isa UncertainIndexValueDataset
UIV = UncertainIndexValueDataset(UI, UV)
###########
# Iteration
###########
@test length(UIV) == 3
@test length([x for x in UIV]) == 3
@test UIV[1] isa Tuple{<:AbstractUncertainValue, <:AbstractUncertainValue}
###########
# Indexing
###########
@test UIV[1] isa Tuple{<:AbstractUncertainValue, <:AbstractUncertainValue}
@test UIV[end] isa Tuple{<:AbstractUncertainValue, <:AbstractUncertainValue}
@test UIV[1:end] isa AbstractVector
@test UIV[[1, 2]] isa AbstractVector
@test UIV[:] isa AbstractVector
@test index(UIV, 1) isa AbstractUncertainValue
@test index(UIV, 1:2) isa AbstractVector{<:AbstractUncertainValue}
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 64 | @test UncertainValueDataset(rand(30)) isa UncertainValueDataset
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 931 | o1 = UncertainValue(Normal, 0, 0.2)
o2 = UncertainValue(Normal, 1, 0.3)
o3 = UncertainValue(Uniform, 0, 4)
o4 = UncertainValue(Uniform, rand(100))
o5 = UncertainValue(rand(Normal(), 1000))
D1 = UncertainDataset([o1, o2, o3, o5])
D2 = UncertainDataset([o1, o2, o4, o5])
##################
# Uncertain values
##################
@test mean(o1) isa Float64
@test median(o1) isa Float64
@test quantile(o1, 0.86) isa Float64
@test std(o1) isa Float64
@test var(o1) isa Float64
#####################
## Uncertain datasets
#####################
@test mean(D1, 10) isa Vector{Float64}
@test median(D1, 10) isa Vector{Float64}
@test middle(D1, 10) isa Vector{Float64}
@test std(D1, 10) isa Vector{Float64}
@test var(D1, 10) isa Vector{Float64}
@test quantile(D1, 0.4, 10) isa Vector{Float64}
@test cor(D1, D2, 10) isa Vector{Float64}
@test cov(D1, D2, 10) isa Vector{Float64}
@test cor(D1, D2) isa Float64
@test cov(D1, D2) isa Float64
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 3850 | using HypothesisTests, UncertainData, Distributions
# Single uncertain values
uval1 = UncertainValue(Normal, 0, 0.2)
uval2 = UncertainValue(Beta, 1, 2)
xs = [UncertainValue(Normal, 0, rand()) for i = 1:10]
ys = [UncertainValue(Gamma, rand(Uniform(2, 3)), rand(Uniform(4, 5))) for i = 1:10]
x = UncertainDataset(xs)
y = UncertainDataset(ys)
#######################
# On uncertain values
######################
@test pvalue(MannWhitneyUTest(uval1, uval2)) isa Float64
@test pvalue(MannWhitneyUTest(uval1, uval2, 10)) isa Float64
@test pvalue(OneSampleTTest(uval1)) isa Float64
@test pvalue(OneSampleTTest(uval1, μ0 = 0.0)) isa Float64
@test pvalue(OneSampleTTest(uval1, uval2)) isa Float64
@test pvalue(OneSampleTTest(uval1, uval2, μ0 = 0.0)) isa Float64
@test pvalue(OneSampleTTest(uval1, 10)) isa Float64
@test pvalue(OneSampleTTest(uval1, 10)) isa Float64
@test pvalue(OneSampleTTest(uval1, uval2, 10)) isa Float64
@test pvalue(OneSampleTTest(uval1, uval2, 10, μ0 = 0.0)) isa Float64
@test pvalue(EqualVarianceTTest(uval1, uval2)) isa Float64
@test pvalue(EqualVarianceTTest(uval1, uval2, 10)) isa Float64
@test pvalue(EqualVarianceTTest(uval1, uval2, μ0 = 0.0)) isa Float64
@test pvalue(EqualVarianceTTest(uval1, uval2, 10, μ0 = 0.0)) isa Float64
@test pvalue(UnequalVarianceTTest(uval1, uval2)) isa Float64
@test pvalue(UnequalVarianceTTest(uval1, uval2, 10)) isa Float64
@test pvalue(UnequalVarianceTTest(uval1, uval2, μ0 = 0.0)) isa Float64
@test pvalue(UnequalVarianceTTest(uval1, uval2, 10, μ0 = 0.0)) isa Float64
@test pvalue(ApproximateTwoSampleKSTest(uval1, uval2)) isa Float64
@test pvalue(ApproximateTwoSampleKSTest(uval1, uval2, 10)) isa Float64
@test pvalue(ExactOneSampleKSTest(uval1, Normal())) isa Float64
@test pvalue(OneSampleADTest(uval1, Normal())) isa Float64
#######################
# On uncertain datasets
######################
@test pvalue(MannWhitneyUTestPooled(x, y)) isa Float64
@test pvalue(MannWhitneyUTestPooled(x, y, 10)) isa Float64
@test pvalue.(MannWhitneyUTestElementWise(x, y)) isa Vector{Float64}
@test pvalue.(MannWhitneyUTestElementWise(x, y, 10)) isa Vector{Float64}
@test pvalue(OneSampleTTestPooled(x)) isa Float64
@test pvalue(OneSampleTTestPooled(x, y)) isa Float64
@test pvalue(OneSampleTTestPooled(x, y, 10)) isa Float64
@test pvalue.(OneSampleTTestElementWise(x, y)) isa Vector{Float64}
@test pvalue.(OneSampleTTestElementWise(x, y, 10)) isa Vector{Float64}
@test pvalue(OneSampleTTestPooled(x, μ0 = 0.0)) isa Float64
@test pvalue(OneSampleTTestPooled(x, y, μ0 = 0.0)) isa Float64
@test pvalue(OneSampleTTestPooled(x, y, 10, μ0 = 0.0)) isa Float64
@test pvalue.(OneSampleTTestElementWise(x, y, μ0 = 0.0)) isa Vector{Float64}
@test pvalue.(OneSampleTTestElementWise(x, y, 10, μ0 = 0.0)) isa Vector{Float64}
@test pvalue(EqualVarianceTTestPooled(x, y)) isa Float64
@test pvalue(EqualVarianceTTestPooled(x, y, 10)) isa Float64
@test pvalue.(EqualVarianceTTestElementWise(x, y)) isa Vector{Float64}
@test pvalue.(EqualVarianceTTestElementWise(x, y, 10)) isa Vector{Float64}
@test pvalue(UnequalVarianceTTestPooled(x, y)) isa Float64
@test pvalue(UnequalVarianceTTestPooled(x, y, 10)) isa Float64
@test pvalue.(UnequalVarianceTTestElementWise(x, y)) isa Vector{Float64}
@test pvalue.(UnequalVarianceTTestElementWise(x, y, 10)) isa Vector{Float64}
@test pvalue(ApproximateTwoSampleKSTestPooled(x, y)) isa Float64
@test pvalue.(ApproximateTwoSampleKSTestElementWise(x, y)) isa Vector{Float64}
@test pvalue(ExactOneSampleKSTestPooled(x, Normal())) isa Float64
@test pvalue.(ExactOneSampleKSTestElementWise(x, Normal())) isa Vector{Float64}
@test pvalue(OneSampleADTestPooled(x, Normal())) isa Float64
@test pvalue.(OneSampleADTestElementWise(x, Normal())) isa Vector{Float64}
@test pvalue(JarqueBeraTestPooled(x)) isa Float64
@test pvalue.(JarqueBeraTestElementWise(x)) isa Vector{Float64}
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 508 | using HypothesisTests, UncertainData, Distributions
ts_vals = [UncertainValue(Normal, rand(Uniform(-10, 10)), rand()) for i = 1:10]
ts = UncertainDataset(xs)
#
# @show size(LjungBoxTest(ts))
# @show size(LjungBoxTest(ts, 10))
# @show size(BoxPierceTest(ts))
# @show size(BoxPiercexTest(ts, 10))
#
# @test pvalue(LjungBoxTest(ts)) isa Float64
# @test pvalue.(LjungBoxTest(ts, 10)) isa Vector{Float64}
#
# @test pvalue(BoxPierceTest(ts)) isa Float64
# @test pvalue.(BoxPierceTest(ts, 10)) isa Vector{Float64}
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 2638 | n = 10
using Test
pairwise_funcs = [
StatsBase.cov,
StatsBase.cor,
StatsBase.countne,
StatsBase.counteq,
StatsBase.corkendall,
StatsBase.corspearman,
StatsBase.maxad,
StatsBase.meanad,
StatsBase.msd,
StatsBase.psnr,
StatsBase.rmsd,
StatsBase.sqL2dist,
StatsBase.crosscor,
StatsBase.crosscov
]
uvals = example_uvals
@testset "Pairwise statistics on datasets" begin
@testset "$(pairwise_funcs[i])" for i = 1:length(pairwise_funcs)
f = pairwise_funcs[i]
if f == StatsBase.psnr
maxv = 100
@test f(uvals, uvals, maxv, n) isa Vector{T} where T <: Real
elseif f ∈ [StatsBase.crosscor, StatsBase.crosscov]
@test f(uvals, uvals, n) isa Vector{Vector{T}} where T <: Real
@test f(uvals, uvals, 1:5, n) isa Vector{Vector{T}} where T <: Real
else
@test f(uvals, uvals, n) isa Vector{T} where T <: Real
end
end
end
UV = UncertainValueDataset(example_uvals)
@testset "Pairwise statistics on datasets" begin
@testset "$(pairwise_funcs[i])" for i = 1:length(pairwise_funcs)
f = pairwise_funcs[i]
if f == StatsBase.psnr
maxv = 100
@test f(UV, UV, maxv, n) isa Vector{T} where T <: Real
elseif f ∈ [StatsBase.crosscor, StatsBase.crosscov]
@test f(UV, UV, n) isa Vector{Vector{T}} where T <: Real
@test f(UV, UV, 1:5, n) isa Vector{Vector{T}} where T <: Real
else
@test f(UV, UV, n) isa Vector{T} where T <: Real
end
end
end
UI = UncertainIndexDataset(example_uvals)
@testset "Pairwise statistics on datasets" begin
@testset "$(pairwise_funcs[i])" for i = 1:length(pairwise_funcs)
f = pairwise_funcs[i]
if f == StatsBase.psnr
maxv = 100
@test f(UI, UI, maxv, n) isa Vector{T} where T <: Real
elseif f ∈ [StatsBase.crosscor, StatsBase.crosscov]
@test f(UI, UI, n) isa Vector{Vector{T}} where T <: Real
@test f(UI, UI, 1:5, n) isa Vector{Vector{T}} where T <: Real
else
@test f(UI, UI, n) isa Vector{T} where T <: Real
end
end
end
# Functions that under the hood use functions with strictly positive domains
# special_pairwise_funcs = [
# StatsBase.gkldiv,
# StatsBase.kldivergence,
# ]
# @testset "$(pairwise_funcs[i])" for i = 1:length(pairwise_funcs)
# f = pairwise_funcs[i]
# @testset for (i, uval) in enumerate(example_uvals)
# @test f(uval, uval, n) isa T where T <: Real
# end
# end; | UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 1917 | import StatsBase
single_estimate_funcs = [
StatsBase.var,
StatsBase.std,
StatsBase.middle,
StatsBase.median,
StatsBase.mean,
StatsBase.genmean,
StatsBase.genvar,
StatsBase.harmmean,
StatsBase.mode,
StatsBase.percentile,
StatsBase.quantile,
StatsBase.rle,
StatsBase.sem,
StatsBase.span,
StatsBase.summarystats,
StatsBase.totalvar,
StatsBase.kurtosis,
StatsBase.moment,
StatsBase.skewness,
StatsBase.renyientropy
]
n = 10
udata = UncertainValueDataset(example_uvals)
@testset "Single-estimate statistic for dataset" begin
@testset "$(single_estimate_funcs[i])" for i = 1:length(single_estimate_funcs)
f = single_estimate_funcs[i]
if f == StatsBase.summarystats
@test f(udata, n) isa Vector{StatsBase.SummaryStats{T}} where T
elseif f == StatsBase.percentile
@test f(udata, 10, n) isa Vector{T} where T <: Real
@test f(udata, [10, 20], n) isa Vector{Vector{T}} where T <: Real
elseif f == StatsBase.quantile
@test f(udata, 0.1, n) isa Vector{T} where T <: Real
@test f(udata, [0.2, 0.5], n) isa Vector{Vector{T}} where T <: Real
elseif f == StatsBase.moment
@test f(udata, 1, n) isa Vector{T} where T <: Real
@test f(udata, 2, n) isa Vector{T} where T <: Real
elseif f == StatsBase.genmean
@test f(udata, 4, n) isa Vector{T} where T <: Real
elseif f == StatsBase.rle
@test rle(udata, n) isa Vector{Tuple{Vector{T1}, Vector{T2}}} where {T1, T2}
elseif f == StatsBase.span
@test f(udata, n) isa Vector{T} where {T <: AbstractRange{S}} where S
elseif f == StatsBase.renyientropy
@test f(udata, 1, n) isa Vector{T} where T <: Real
else
@test f(udata, n) isa Vector{T} where T <: Real
end
end;
end
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 1444 | n = 10
using Test
pairwise_funcs = [
StatsBase.cov,
StatsBase.cor,
StatsBase.countne,
StatsBase.counteq,
StatsBase.corkendall,
StatsBase.corspearman,
StatsBase.maxad,
StatsBase.meanad,
StatsBase.msd,
StatsBase.psnr,
StatsBase.rmsd,
StatsBase.sqL2dist,
StatsBase.crosscor,
StatsBase.crosscov
]
@testset "Pairwise statistics on uncertain values" begin
@testset "$(pairwise_funcs[i])" for i = 1:length(pairwise_funcs)
f = pairwise_funcs[i]
@testset for (i, uval) in enumerate(example_uvals)
if f == StatsBase.psnr
maxv = 100
@test f(uval, uval, maxv, n) isa T where T <: Real
elseif f ∈ [StatsBase.crosscor, StatsBase.crosscov]
@test f(uval, uval, n) isa AbstractVector{T} where T <: Real
@test f(uval, uval, 1:5, n) isa AbstractVector{T} where T <: Real
else
@test f(uval, uval, n) isa T where T <: Real
end
end
end;
end
# Functions that under the hood use functions with strictly positive domains
# special_pairwise_funcs = [
# StatsBase.gkldiv,
# StatsBase.kldivergence,
# ]
# @testset "$(pairwise_funcs[i])" for i = 1:length(pairwise_funcs)
# f = pairwise_funcs[i]
# @testset for (i, uval) in enumerate(example_uvals)
# @test f(uval, uval, n) isa T where T <: Real
# end
# end; | UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 1922 | import StatsBase
pointestimate_funcs = [
StatsBase.var,
StatsBase.std,
StatsBase.middle,
StatsBase.median,
StatsBase.mean,
StatsBase.genmean,
StatsBase.genvar,
StatsBase.harmmean,
StatsBase.mode,
StatsBase.percentile,
StatsBase.quantile,
StatsBase.rle,
StatsBase.sem,
StatsBase.span,
StatsBase.summarystats,
StatsBase.totalvar,
StatsBase.kurtosis,
StatsBase.moment,
StatsBase.skewness,
StatsBase.renyientropy
]
n = 10
@testset "Point-estimate statistics" begin
@testset "point-estimate statistic: $(pointestimate_funcs[i])" for i = 1:length(pointestimate_funcs)
f = pointestimate_funcs[i]
@testset for (i, uval) in enumerate(example_uvals)
if f == StatsBase.summarystats
@test f(uval, n) isa StatsBase.SummaryStats{T} where T
elseif f == StatsBase.percentile
@test f(uval, 10, n) isa T where T <: Real
@test f(uval, [10, 20], n) isa Vector{T} where T <: Real
elseif f == StatsBase.quantile
@test f(uval, 0.1, n) isa T where T <: Real
@test f(uval, [0.2, 0.5], n) isa Vector{T} where T <: Real
elseif f == StatsBase.moment
@test f(uval, 1, n) isa T where T <: Real
@test f(uval, 2, n) isa T where T <: Real
elseif f == StatsBase.genmean
@test f(uval, 4, n) isa T where T <: Real
elseif f == StatsBase.rle
@test rle(uval, n) isa Tuple{Vector{T} where T, Vector{T2} where T2}
elseif f == StatsBase.span
@test f(uval, n) isa AbstractRange{T} where T <: Real
elseif f == StatsBase.renyientropy
@test f(uval, 1, n) isa T where T <: Real
else
@test f(uval, n) isa T where T <: Real
end
end
end;
end
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 660 | using UncertainData
using Test
using Distributions
@test assigndist_uniform(-3, 3) isa Uniform
@test assigndist_uniform(0, 0.2) isa Uniform
@test assigndist_uniform(-2, 1) isa Uniform
@test assigndist_normal(0, 0.3) isa Truncated
@test assigndist_normal(2, 0.2, nσ = 2) isa Truncated
@test assigndist_normal(10.0, 2, trunc_lower = -2) isa Truncated
@test assigndist_normal(5, 0.2, trunc_upper = 2) isa Truncated
@test assigndist_normal(0, 0.2) isa Truncated
@test assigndist_normal(2, 0.1, trunc_lower = -3) isa Truncated
@test assigndist_normal(-2, 0.1, trunc_upper = 3) isa Truncated
@test assigndist_normal(0, 0.3, trunc_upper = 3, nσ = 3) isa Truncated
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 98 |
x = 3
y = 3.3
@test UncertainValue(x) isa CertainValue
@test UncertainValue(y) isa CertainValue | UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 1196 | import StatsBase: ProbabilityWeights, pweights
v1 = UncertainValue(UnivariateKDE, rand(4:0.25:6, 1000), bandwidth = 0.01)
v2 = UncertainValue(Normal, 0.8, 0.4)
v3 = UncertainValue([rand() for i = 1:3], [0.3, 0.3, 0.4])
v4 = UncertainValue(Normal, 3.7, 0.8)
uvals = [v1, v2, v3, v4];
# Combining without weights
r1 = combine(uvals)
r2 = combine(uvals, n = 10000)
@test r1 isa AbstractUncertainValue
@test r2 isa AbstractUncertainValue
# Combining with ProbabilityWeights
r1 = combine(uvals, ProbabilityWeights([0.2, 0.1, 0.3, 0.2]))
r2 = combine(uvals, pweights([0.2, 0.1, 0.3, 0.2]))
@test r1 isa AbstractUncertainValue
@test r2 isa AbstractUncertainValue
# Combining with AnalyticWeights
combine(uvals, AnalyticWeights([0.2, 0.1, 0.3, 0.2]))
combine(uvals, aweights([0.2, 0.1, 0.3, 0.2]))
@test r1 isa AbstractUncertainValue
@test r2 isa AbstractUncertainValue
# Combining with FrequencyWeights
combine(uvals, FrequencyWeights([100, 200, 300, 400]))
combine(uvals, fweights([500, 700, 800, 124]))
@test r1 isa AbstractUncertainValue
@test r2 isa AbstractUncertainValue
# Combining with generic Weights
r1 = combine(uvals, Weights([0.2, 0.1, 0.3, 0.2]))
@test r1 isa AbstractUncertainValue
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 404 | uval1 = UncertainValue(Normal, 1, 5)
uval2 = UncertainValue(Uniform, rand(1000))
uval3 = UncertainValue(rand(10000))
v = [uval1, uval2, uval3]
@test minimum(uval1) isa Float64
@test minimum(uval2) isa Float64
@test minimum(uval3) isa Float64
@test maximum(uval1) isa Float64
@test maximum(uval2) isa Float64
@test maximum(uval3) isa Float64
@test minimum(v) isa Float64
@test maximum(v) isa Float64
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 4992 | ##################################################
# Uncertain populations
################################################
x = UncertainValue([1, 2, 3], rand(3))
y = UncertainValue([1, UncertainValue(Normal, 0, 1), 3], rand(3))
@test x isa UncertainScalarPopulation{T1, T2} where {T1 <: Number, T2}
@test y isa UncertainScalarPopulation{T1, T2} where {T1 <: AbstractUncertainValue, T2}
##################################################
# Uncertain theoretical distributions
################################################
@test UncertainValue(Normal(1, 2)) isa UncertainScalarNormallyDistributed
@test UncertainValue(Normal(1, 2)) isa UncertainScalarNormallyDistributed
@test UncertainValue(truncated(Normal(1, 2), 0.2, 0.8)) isa ConstrainedUncertainScalarValueTwoParameter
# Uncertain normally distributed values
@test UncertainValue(Normal, 1, 0.2, nσ = 2, trunc_lower = -5) isa UncertainScalarNormallyDistributed
@test UncertainValue(Normal, -3, 0.2) isa UncertainScalarNormallyDistributed
@test UncertainValue(Normal, 0, 2) isa UncertainScalarNormallyDistributed
@test UncertainValue(Normal, -1, 0.1) isa UncertainScalarNormallyDistributed
@test UncertainValue(Normal, -1, 0.1, nσ = 2, trunc_upper = 4) isa UncertainScalarNormallyDistributed
@test UncertainValue(Normal, 5.0, 0.2) isa UncertainScalarNormallyDistributed
# Uncertain uniformly distributed values
@test UncertainValue(Uniform, 1, 7) isa UncertainScalarUniformlyDistributed
@test UncertainValue(Uniform, -1, 7) isa UncertainScalarUniformlyDistributed
@test UncertainValue(Uniform, -6, -2) isa UncertainScalarUniformlyDistributed
# Uncertain beta-distributed values
@test UncertainValue(Beta, 1, 7) isa UncertainScalarBetaDistributed
@test_broken UncertainValue(Beta, -1, 7) isa UncertainScalarBetaDistributed
@test_broken UncertainValue(Beta, -1, -2) isa UncertainScalarBetaDistributed
@test_broken UncertainValue(Beta, 4, -2) isa UncertainScalarBetaDistributed
# Uncertain beta-distributed values
@test UncertainValue(BetaBinomial, 10, 1, 7) isa UncertainScalarBetaBinomialDistributed
@test_broken UncertainValue(BetaBinomial, 10, -1, 7) isa UncertainScalarBetaBinomialDistributed
@test_broken UncertainValue(BetaBinomial, 10, -1, -2) isa UncertainScalarBetaBinomialDistributed
@test_broken UncertainValue(BetaBinomial, 1, 2) isa UncertainScalarBetaBinomialDistributed
# Uncertain beta prime-distributed values
@test UncertainValue(BetaPrime, 1, 7) isa UncertainScalarBetaPrimeDistributed
@test_broken UncertainValue(BetaPrime, -1, 7) isa UncertainScalarBetaPrimeDistributed
@test_broken UncertainValue(BetaPrime, -1, -2) isa UncertainScalarBetaPrimeDistributed
@test_broken UncertainValue(BetaPrime, 2, -2) isa UncertainScalarBetaPrimeDistributed
# Uncertain gamma-distributed values
@test UncertainValue(Gamma, 1, 7) isa UncertainScalarGammaDistributed
@test UncertainValue(Gamma, 1, 7, trunc_upper = 4, trunc_lower = 1) isa UncertainScalarGammaDistributed
@test_broken UncertainValue(Gamma, -1, 7) isa UncertainScalarGammaDistributed
@test_broken UncertainValue(Gamma, -1, -2) isa UncertainScalarGammaDistributed
@test_broken UncertainValue(Gamma, -1, -2) isa UncertainScalarGammaDistributed
# Uncertain Fréchet-distributed values
@test UncertainValue(Frechet, 1, 7) isa UncertainScalarFrechetDistributed
# Uncertain Binomial-distributed values
@test UncertainValue(Binomial, 50, 0.4) isa UncertainScalarBinomialDistributed
################################################
# Uncertain values from empirical distributions
################################################
empirical_uniform = rand(Uniform(), 100)
empirical_normal = rand(Normal(), 100)
empirical_beta = rand(Beta(), 100)
@test UncertainValue(Uniform, empirical_uniform) isa UncertainScalarTheoreticalFit
@test UncertainValue(Normal, empirical_normal) isa UncertainScalarTheoreticalFit
@test UncertainValue(Beta, empirical_beta) isa UncertainScalarTheoreticalFit
##################################################
# Uncertain values from kernel density estimates
################################################
# Implicit constructor
@test UncertainValue(empirical_uniform) isa UncertainScalarKDE
@test UncertainValue(empirical_normal) isa UncertainScalarKDE
@test UncertainValue(empirical_beta) isa UncertainScalarKDE
# Explicit constructor
@test UncertainValue(UnivariateKDE, empirical_uniform) isa UncertainScalarKDE
@test UncertainValue(UnivariateKDE, empirical_normal) isa UncertainScalarKDE
@test UncertainValue(UnivariateKDE, empirical_beta) isa UncertainScalarKDE
# Empirical cumulative distribution function
d = Normal()
some_sample = rand(d, 1000)
uv = UncertainValue(some_sample)
uv_ecdf = UncertainData.UncertainValues.ecdf(uv)
tol = 1e-7
@test all(uv_ecdf .>= 0.0 - tol)
@test all(uv_ecdf .<= 1.0 + tol)
# Quantiles (empirical and true qantiles should be close for large samples)
large_sample = rand(d, Int(1e6))
uv = UncertainValue(UnivariateKDE, large_sample)
@test abs(quantile(uv, 0.8) - quantile(d, 0.8)) < 1e-2
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 1290 | # Uncertain population consisting of CertainValues (scalars get promoted to CertainValue)s
# theoretical distributions and KDE distributions
p1 = ConstrainedUncertainScalarPopulation(
[3.0, UncertainValue(Normal, 0, 1),
UncertainValue(Gamma, 2, 3),
UncertainValue(Uniform, rand(1000))],
[0.5, 0.5, 0.5, 0.5]
)
# Uncertain population consisting of scalar values
p2 = ConstrainedUncertainScalarPopulation([1, 2, 3], rand(3))
p3 = ConstrainedUncertainScalarPopulation([1, 2, 3], Weights(rand(3)))
# Uncertain population consisting of uncertain populations
p4 = ConstrainedUncertainScalarPopulation([p1, p2], [0.1, 0.5])
p5 = ConstrainedUncertainScalarPopulation([p1, p2], Weights([0.1, 0.5]));
@test p1 isa ConstrainedUncertainScalarPopulation{T1, T2} where {T1 <: AbstractUncertainValue, T2 <: AbstractWeights}
@test p2 isa ConstrainedUncertainScalarPopulation{T1, T2} where {T1 <: Number, T2 <: AbstractWeights}
@test p3 isa ConstrainedUncertainScalarPopulation{T1, T2} where {T1 <: Number, T2 <: AbstractWeights}
@test p4 isa ConstrainedUncertainScalarPopulation{T1, T2} where {T1 <: AbstractUncertainValue, T2 <: AbstractWeights}
@test p5 isa ConstrainedUncertainScalarPopulation{T1, T2} where {T1 <: AbstractUncertainValue, T2 <: AbstractWeights} | UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | code | 1216 | import StatsBase: AbstractWeights
# Uncertain population consisting of CertainValues (scalars get promoted to CertainValue)s
# theoretical distributions and KDE distributions
p1 = UncertainScalarPopulation(
[3.0, UncertainValue(Normal, 0, 1),
UncertainValue(Gamma, 2, 3),
UncertainValue(Uniform, rand(1000))],
[0.5, 0.5, 0.5, 0.5]
)
# Uncertain population consisting of scalar values
p2 = UncertainScalarPopulation([1, 2, 3], rand(3))
p3 = UncertainScalarPopulation([1, 2, 3], Weights(rand(3)))
# Uncertain population consisting of uncertain populations
p4 = UncertainScalarPopulation([p1, p2], [0.1, 0.5])
p5 = UncertainScalarPopulation([p1, p2], Weights([0.1, 0.5]));
@test p1 isa UncertainScalarPopulation{T1, T2} where {T1 <: AbstractUncertainValue, T2 <: AbstractWeights}
@test p2 isa UncertainScalarPopulation{T1, T2} where {T1 <: Number, T2 <: AbstractWeights}
@test p3 isa UncertainScalarPopulation{T1, T2} where {T1 <: Number, T2 <: AbstractWeights}
@test p4 isa UncertainScalarPopulation{T1, T2} where {T1 <: AbstractUncertainValue, T2 <: AbstractWeights}
@test p5 isa UncertainScalarPopulation{T1, T2} where {T1 <: AbstractUncertainValue, T2 <: AbstractWeights}
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | docs | 889 | # UncertainData changelog
## v.0.14
### Breaking changes
- `sequence_exists` replaces `strictly_increasing_sequence_exists`/`strictly_decreasing_sequence_exists`.
- When resampling using sequential constraints, the quantiles used to truncate distributions now have to be given
to the constructors of the sequential constraints, i.e. `resample(x, StrictlyIncreasing(lq = 0.1, uq = 0.9)` instead of `resample(x, StrictlyIncreasing(), 0.1, 0.9)`.
### Bug fixes
- Fixed bug that could occasionally occur for certain types of data when performing resampling with the `StrictlyIncreasing`/`StrictlyDecreasing` sequential constraints.
## v0.12.0
### Bug fixes
- Fixed bug where indices were sampled instead of values for the method
`resample(x::UncertainIndexValueDataset, constraint::SamplingConstraint, sequential_constraint::Union{StrictlyDecreasing, StrictlyDecreasing}`.
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | docs | 1668 | [](https://github.com/kahaaga/UncertainData.jl/actions)
[](https://kahaaga.github.io/UncertainData.jl/stable/)
[-blue.svg)](https://kahaaga.github.io/UncertainData.jl/dev/)
[](https://doi.org/10.21105/joss.01666)
[](https://zenodo.org/badge/latestdoi/160108056)
# UncertainData.jl
A Julia package for dealing with data values with associated uncertainties and
datasets consisting of uncertain values.
## Goals
1. Systematic and intuitive ways of representing uncertain data.
2. Easy and robust resampling of uncertain data, given pre-defined or
custom user-defined constraints.
3. Provide a framework for robust computation of ensemble statistics for
uncertain data.
Please check out the
[documentation](https://kahaaga.github.io/UncertainData.jl/dev) for more
information.
# Installation
UncertainData.jl is a registered Julia package. Install it by opening a Julia console and run
```julia
using Pkg
Pkg.add("UncertainData")
```
# Citing
If you use UncertainData.jl for any of your projects or scientific publications, please cite [this small Journal of Open Source Software (JOSS) publication](https://joss.theoj.org/papers/10.21105/joss.01666) as follows
> Haaga, (2019). UncertainData.jl: a Julia package for working with measurements and datasets with uncertainties.. Journal of Open Source Software, 4(43), 1666, https://doi.org/10.21105/joss.01666
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | docs | 26975 |
# Changelog
## UncertainData.jl v.0.14
### Breaking changes
- `sequence_exists` replaces `strictly_increasing_sequence_exists`/`strictly_decreasing_sequence_exists`.
### Bug fixes
- Fixed bug that could occasionally occur for certain types of data when performing resampling with the `StrictlyIncreasing`/`StrictlyDecreasing` sequential constraints.
## UncertainData.jl v0.10.4
### Documentation
- Changed to regular documentation template.
### Bug fixes
- Fixed type error for test.
## UncertainData.jl v0.10.3
### Improvements
- The user can now control how each bin is represented when using `BinnedWeightedResampling`.
One can now provide `BinnedWeightedResampling{UncertainScalarKDE}`,
`BinnedWeightedResampling{UncertainScalarPopulaton}` or
`BinnedWeightedResampling{RawValues}`. Corresponding `bin` methods are also implemented.
### Documentation
- Fixed missing doc string for `bin!`.
## UncertainData.jl v0.10.2
### Improvements
- The user can now control how each bin is represented when using `BinnedResampling`. One can now
provide `BinnedResampling{UncertainScalarKDE}`, `BinnedResampling{UncertainScalarPopulaton}` or
`BinnedResampling{RawValues}`.
- Explicit `bin` methods for binning both scalar valued data and uncertain data.
### Documentation
- Added documentation for binning methods.
- Improved documentation for `UncertainScalarKDE`.
## UncertainData.jl v0.10.0
### Improvements
- The `resample` family of methods for vectors now dispatches on `AbstractVector`s, which allows more
flexibility. Now, for example `LArray`s from `LabelledArrays.jl` also can be resampled.
- Relax `resample(x::Real)` to `resample(x::Number)`.
## UncertainData.jl v0.9.3
- `dimension` is no longer exported.
## UncertainData.jl v0.9.2
### New features
- Added `SensitivityTests` module defining the abstract type `SensitivityTest`.
## UncertainData.jl v0.9.1
### Bug fixes
- Missing import of `interpolate_and_bin` between sub-packages fixed.
## Uncertaindata.jl v0.9.0
### New features
- Added `interpolate_and_bin` function.
- Added `InterpolateAndBin` type.
- Added `resample(inds, vals, resampling::InterpolateAndBin{Linear})` method, which interpolates and bins `inds` and `vals` onto an interpolation grid, then bins and summarises the bins. Returns the binned values.
- Added `resample(x::AbstractUncertainIndexValueDataset, resampling::InterpolateAndBin{Linear})` method.
Draws a single realisation of both the indices and values of `x` and orders them sequentially according
to the indices (assuming independent points). Then, interpolate, bin and summarise bins.
- Added `bin` and `bin!` functions.
- Added `bin_mean` function.
- Added `fill_nans`, `fill_nans!` and `interpolate_nans` functions for dealing with data containing `NaN`s.
- Added `findall_nan_chunks` function for identifying consecutive `NaN`s in a dataset.
- Added `RandomSequences` resampling scheme.
## Uncertaindata.jl v0.8.2
### New features
- Added `resample` method for `BinnedWeightedResampling` scheme.
- Added `AbstractBinnedResampling` for binned resamplings.
- Added `AbstractBinnedUncertainValueResampling` abstract type for binnings where
the values in each bin is represented by an uncertain value. `BinnedResampling`
and `BinnedWeightedResampling` are subtypes `AbstractBinnedUncertainValueResampling`.
- Added `AbstractBinnedSummarisedResampling` abstract type for binnings where the values
in each bin are summarised to a single value. `BinnedMeanResampling` and
`BinnedMeanWeightedResampling` are subtypes `AbstractBinnedResampling`.
### Improvements
- Added more tests for binned resampling schemes.
## Uncertaindata.jl v0.8.1
### New features
- Added `UncertainValueDataset`, `UncertainIndexDataset`, and `UncertainIndexValueDataset` constructors for vectors of numbers (they get converted to `CertainValue`s).
### Bug fixes
- `rand(x::CertainValue, n::Int)` now returns a length-`n` array with `x` repeated `n` times.
## Uncertaindata.jl v0.8.0
### New functionality
- Added binned resampling methods that uses [`BinnedResampling`](@ref) and [`BinnedMeanResampling`](@ref) schemes.
- [`resample(::AbstractUncertainIndexValueDataset, ::BinnedResampling`](@ref)
- [`resample(::AbstractUncertainIndexValueDataset, ::BinnedMeanResampling`](@ref)
### Bug fixes
- Fixed bug where `resample!` method for vectors and tuples of uncertain values didn't return the
expected result.
### Improvements
- Improved `resample!` docs.
## Uncertaindata.jl v0.7.0
### New functionality
- Added `resample!` for in-place resampling into pre-allocated containers.
## UncertainData.jl v0.5.1
### Bug fixes
- Strictly increasing or decreasing sequences were not always possible to construct
when using `CertainValue`s, because `TruncateRange` instances with equal
minimum and maximum was constructed (not possible). It is now possible to
resample with sequential constraints even with the `StrictlyIncreasing`
and `StrictlyDecreasing` constraints.
## UncertainData.jl v0.5.0
### Breaking changes
- To allow easier multiple dispatch, the `indices` field of a `UncertainIndexValueDataset` is
now *always* an instance of a subtype of `AbstractUncertainIndexDataset`. The `values` field
of a `UncertainIndexValueDataset` is now *always* an instance of a subtype of
`AbstractUncertainValueDataset`.
### New functionality
- Experimental support for nested populations.
- Added point-estimators for single uncertain values:
1. `harmmean(x::AbstractUncertainValue, n::Int)`
2. `geomean(x::AbstractUncertainValue, n::Int)`
3. `kurtosis(x::AbstractUncertainValue, n::Int; m = mean(x))`
4. `moment(x::AbstractUncertainValue, k, n::Int, m = mean(x))`
5. `percentile(x::AbstractUncertainValue, p, n::Int)`
6. `renyientropy(x::AbstractUncertainValue, α, n::Int)`
7. `rle(x::AbstractUncertainValue, n::Int)`
8. `sem(x::AbstractUncertainValue, n::Int)`
9. `skewness(x::AbstractUncertainValue, n::Int; m = mean(x))`
10. `span(x::AbstractUncertainValue, n::Int)`
11. `summarystats(x::AbstractUncertainValue, n::Int)`
12. `totalvar(x::AbstractUncertainValue, n::Int)`
- Added statistical estimators for pairs of uncertain values:
1. `cov(x::AbstractUncertainValue, y::AbstractUncertainValue, n::Int; corrected::Bool = true)`
1. `cor(x::AbstractUncertainValue, y::AbstractUncertainValue, n::Int)`
1. `countne(x::AbstractUncertainValue, y::AbstractUncertainValue, n::Int)`
1. `counteq(x::AbstractUncertainValue, y::AbstractUncertainValue, n::Int)`
1. `corkendall(x::AbstractUncertainValue, y::AbstractUncertainValue, n::Int)`
1. `corspearman(x::AbstractUncertainValue, y::AbstractUncertainValue, n::Int)`
1. `maxad(x::AbstractUncertainValue, y::AbstractUncertainValue, n::Int)`
1. `meanad(x::AbstractUncertainValue, y::AbstractUncertainValue, n::Int)`
1. `msd(x::AbstractUncertainValue, y::AbstractUncertainValue, n::Int)`
1. `psnr(x::AbstractUncertainValue, y::AbstractUncertainValue, maxv, n::Int)`
1. `rmsd(x::AbstractUncertainValue, y::AbstractUncertainValue, n::Int; normalize = false)`
1. `sqL2dist(x::AbstractUncertainValue, y::AbstractUncertainValue, n::Int)`
1. `crosscor(x::AbstractUncertainValue, y::AbstractUncertainValue, n::Int; demean = true)`
1. `crosscov(x::AbstractUncertainValue, y::AbstractUncertainValue, n::Int; demean = true)`
1. `gkldiv(x::AbstractUncertainValue, y::AbstractUncertainValue, n::Int)`
1. `kldivergence(x::AbstractUncertainValue, y::AbstractUncertainValue, n::Int)`
- Added `UncertainValue` constructor for distribution instances.
- Added `UncertainValue` constructor for (potentially nested) truncated distribution instances.
- Implemented `resample` methods for `NTuple`s of uncertain values.
- Added `resample(f::Function, n::Int, x::AbstractUncertainValue, args...; kwargs...)`method for
easy evaluation of point-estimates for single uncertain values.
- Added support for `Measurement` instances from
[Measurements.jl](https://github.com/JuliaPhysics/Measurements.jl).
These are treated as uncertain values represented by normal distibutions.
Hence, they are given no extra treatment and error propagation is done by
resampling, not by exact methods.
- The uncertain value type `UncertainScalarPopulation` may now not only have real-valued scalars
as elements of the population. It can now have uncertain values as members of the population!
- Resampling implemented for `UncertainScalarPopulation` so that we can also sample population
members that are uncertain values.
- Implemented iteration for `UncertainScalarPopulation`.
### Improvements
- Improved subtyping for theoretical distributions.
- Removed redundant `resample` methods for the `UncertainDataset` type. `UncertainDataset`
is a subtype of `AbstractUncertainValueDataset` and has no special behaviour beyond
that implemented for the abstract type, so now we just rely on multiple dispatch here.
### Documentation
- Improved documentation statistical methods.
- Other minor documentation improvements.
- Improved documentation for `TruncateStd`.
### Bug fixes
- Fixed error in `show` method for `AbstractUncertainValue`. Not subtypes of `AbstractUncertainValue` has the `distributions` field, so that is now removed from the `show` method.
## UncertainData.jl v0.4.0
### New functionality
- Introduce an abstract resampling type `AbstractUncertainDataResampling` for this
package pending the implementation of `AbstractResampling` in StatsBase.jl.
- Added `ConstrainedResampling` resampling scheme.
- Resample vectors of uncertain values without constraints. Syntax:
1. `resample(::Vector{<:AbstractUncertainValue}` for single draws.
2. `resample(::Vector{<:AbstractUncertainValue}, ::Int}` for multiple draws.
- Resample vectors of uncertain values with constraint(s) multiple times. Syntax:
1. `resample(::Vector{<:AbstractUncertainValue}, ::Union{SamplingConstraint, Vector{<:SamplingConstraint}}` for single draws.
2. `resample(::Vector{<:AbstractUncertainValue}, ::Union{SamplingConstraint, Vector{<:SamplingConstraint}}, ::Int` for multiple draws.
## UncertainData.jl v0.3.0
### New functionality
- Added additional resampling methods for uncertain index and uncertain value datasets,
allowing passing vectors of constraints that are mapped to each value in the dataset. The
syntax is `resample(::AbstractUncertainValueDataset, ::Vector{<:SamplingConstraint}` for a
single draw, and `resample(::AbstractUncertainValueDataset, ::Vector{<:SamplingConstraint}, n::Int`
for `n` draws.
## UncertainData.jl v0.2.3
### Improvements
- Added input validation when initialising `TruncateQuantiles`, `TruncateRange` and
`TruncateStd`.
- Separate parameters types for `TruncateQuantiles` and `TruncateRange`, so one can do for
example `TruncateRange(1, 8.0)`, instead of having to promote to `Float64`.
- Added validation for distribution truncation when resampling.
## UncertainData.jl v0.2.2
### New functionality and syntax changes
#### Resampling vectors consisting of uncertain values (done in #61)
- `resample(uvals::Vector{AbstractUncertainValue}, n::Int)` is now interpreted as "treat
`uvals` as a dataset and sample it `n` times". Thus, it now behaves as
`resample(AbstractUncertainDataset, n::Int)`, returning `n` vectors of length
`length(uvals)`, where the i-th element is a unique draw of `uvals[i]`.
- `resample_elwise(uvals::Vector{AbstractUncertainValue}, n::Int)` takes over the role as
"sample `uvals` element-wise and `n` times for each element". Returns a vector of
length `length(uvals)`, where the i-th element is a `n`-element vector of unique draws
of `uvals[i]`.
#### Resampling with subtypes of `AbstractUncertainValueDataset`
Currently, this affects the generic `UncertainDataset`s, as well as the specialized
`UncertainIndexDataset`s and `UncertainValueDataset`s.
- `resample_elwise(uvd::AbstractUncertainValueDataset, n::Int)` is now interpreted as
"draw `n` realisations of each value in `uvd`". Returns a vector of length `length(uvals)`
where the i-th element is a `n`-element vector of unique draws of `uvals[i]`. This works
for `UncertainDataset`s, `UncertainIndexDataset`s, and `UncertainValueDataset`s.
- `resample_elwise(uvd::AbstractUncertainValueDataset, constraint::Union{SamplingConstraint, Vector{SamplingConstraint}}, n::Int)`
is now interpreted as "draw `n` realisations of each value in `uvd`, subjecting each value
in `uvd` to some sampling `constraint`(s) during resampling". Returns a vector of
length `length(uvals)` where the i-th element is a `n`-element vector of unique draws
of `uvals[i]`, where the support of `uvals[i]` has been truncated by the provided
`constraint`(s).
### Bug fixes
- Removed extra blank line from print method for `AbstractUncertainPopulation`.
## UncertainData.jl v0.2.1
### New functionality
- `merge(uvals::Vector{<:AbstractUncertainValue}; n = 1000)` now makes it possible to
combine many uncertain values of different into one uncertain value represented by a
kernel density estimate. This is achieved by resampling each of the values `n` times,
then pooling the draws and estimating a total distribution using KDE.
- `merge(uvals::Vector{<:AbstractUncertainValue}; weights::Weights n = 1000)`,
`merge(uvals::Vector{<:AbstractUncertainValue}; weights::AnalyticalWeights n = 1000)`
and
`merge(uvals::Vector{<:AbstractUncertainValue}; weights::ProbabilityWeights n = 1000)`
merges uncertain values by resampling them proportionally to `weights`, then pooling
the draws and performing KDE. These are all functionally equivalent, but implementations
for different weights are provided for compatibility with StatsBase.
- `merge(uvals::Vector{<:AbstractUncertainValue}; weights::FrequencyWeights n = 1000)`
merges uncertain values by sampling them according to the number of samples provided
with `weights`.
### Bug fixes
- `resample` didn't work for `UncertainIndexDataset`s due to the data being stored in the
`indices` field, not the `values` field as for other subtypes of
`AbstractUncertainValueDataset`. This is now fixed.
## UncertainData.jl v0.2.0
### Notes
- Julia 1.1 is required for version > v.0.2.0.
### New functionality
- Spline interpolation on a regular grid.
- Linear interpolation on an irregular grid.
### Improvements
- `support_overlap` now returns an interval (from `IntervalArithmetic`), in line with
what `support` returns.
## UncertainData.jl v0.1.8
### Bug fixes
- Added missing package dependencies which were not caught by CI.
## UncertainData.jl v0.1.7
### New functionality
- `UncertainIndexValueDataset`s can now be constructed from vectors of uncertain values.
To do so, provide a vector of uncertain values for the indices, and the same for the
values, e.g. `UncertainIndexValueDataset([idx1, idx2], [val1, val2])`.
- Index-value dataset realizations can now be
[interpolated on a regular grid](resampling/interpolation/gridded.md).
### Bug fixes
- `minima` and `maxima` now returns the global minimum for a dataset instead of a vector
of elementwise minima and maxima.
- Implemented the option to linearly interpolate index-value dataset realizations.
To do so, provide `resample` with a [`RegularGrid`](link) instance.
- Merged redundant methods for assigning some distributions.
- Fixed non-critical indexing bug for uncertain index-value datasets.
- Removed redudant method definitions and multiple imports of the same files causing
definitions to be overwritten and printing warnings statements when loading the package.
## UncertainData.jl v0.1.6
### New functionality
- Implemented sequential sampling constraints `StrictlyIncreasing` and `StrictlyDecreasing`
for `UncertainIndexValueDataset`s.
- Added [UncertainScalarPopulation](uncertain_values/populations.md) type, representing
vectors of values that should be sampled according to a vector of probabilities.
### Improvements
- Improved documentation for `CertainValue`s.
- Added documentation for `UncertainScalarPopulation`.
- Added `UncertainScalarPopulation` to uncertain value overview list in the documentation.
- Fixed duplicate docs for `cot`, `cotd`, `coth` and added missing `acot`, `acotd`, `acoth`
docs.
- Shortened and updated main documentation page with more links.
### Bug fixes
- Import `Base` functions properly when defining `CertainValue`, so that no unexpected
behaviour is introduced.
- Fixed links in documentation that pointed to the wrong locations.
- Remove model resampling docs which was not supposed to be published until the
functionality is properly implemented.
## UncertainData.jl v0.1.5
### New functionality
- Added [CertainValue](uncertain_values/certainvalue.md) type to represent scalars without
any uncertainty. Even though a scalar is not uncertain, we'll define it as subtype of
`AbstractUncertainValue` to treat certain values alongside uncertain values in datasets.
- Added plot recipe for `CertainValue`s. They are just plotted as regular points.
- Added method `resample(Vector{AbstractUncertainValue})` for resampling vectors of
uncertain values. Operates element-wise, just as for an uncertain dataset.
- Added an abstract type `SequentialSamplingConstraint` to separate sequential constraints
from general constraints that might be applied *before* resampling according to
the sequential constraints.
- Added abstract type (`OrderedSamplingAlgorithm`) and composite types
(`StartToEnd`, `EndToStart`, `MidpointOutwards`, `ChunksForwards`, `ChunksBackwards`)
which indicates how to sample sequential realizations when resampling an uncertain
dataset. Only `StartToEnd` is used at the moment.
- Added abstract type `SequentialSamplingConstraint` which is the supertype for all
sequential constraints.
- Added function to check if strictly increasing sequences through an uncertain dataset
exist: `strictly_increasing_sequence_exists(udata::AbstractUncertainValueDataset`.
- Added function to check if strictly decreasing sequences through an uncertain dataset
exist: `strictly_increasing_sequence_exists(udata::AbstractUncertainValueDataset`.
- Added the `StrictlyIncreasing{T} where {T<:OrderedSamplingAlgorithm}` sequential
constraint for resampling uncertain datasets.
- Added the `StrictlyDecreasing{T} where {T<:OrderedSamplingAlgorithm}` sequential
constraint for resampling uncertain datasets.
- Added resampling methods
1. `resample(udata, sequential_constraint::StrictlyIncreasing{T} where {T <: StartToEnd}`
2. `resample(udata, sequential_constraint::StrictlyDecreasing{T} where {T <: StartToEnd}`
3. `resample(udata, constraint::SamplingConstraint, sequential_constraint::StrictlyIncreasing{T} where {T <: StartToEnd}`
4. `resample(udata, constraint::SamplingConstraint, sequential_constraint::StrictlyDecreasing{T} where {T <: StartToEnd}`
5. `resample(udata, constraint::Vector{SamplingConstraint}, sequential_constraint::StrictlyIncreasing{T} where {T <: StartToEnd}`
6. `resample(udata, constraint::Vector{SamplingConstraint}, sequential_constraint::StrictlyDecreasing{T} where {T <: StartToEnd}`
### Improvements
- Added [documentation on sequential constraints]("sampling_constraints/sequential_constraints.md"), clearly separating it from the general constraints.
## UncertainData.jl v0.1.4
### Breaking changes
- Elementary operations for `(scalar, uncertain_value)`, `(uncertain_value, scalar)` and
`(uncertain_value, uncertain_value)` pairs now returns an uncertain value instead of
a vector of resampled realizations. The default behaviour is to perform a kernel
density estimate over the vector of results of the element-wise operations (which
was previously returned without representing it as an uncertain value).
### New functionality
- Implemented constraints for datasets that have already been constrained.
`constrain(udata::ConstrainedDataset, s::SamplingConstraint)` will now return another
`ConstrainedDataset`. The same applies for `ConstrainedIndexDataset` and
`ConstrainedValueDataset`.
- Added `maximum(Vector{AbstractUncertainValue})` and
`minimum(Vector{AbstractUncertainValue})` methods.
- Added plot recipe for `Vector{AbstractUncertainValue}`s. Behaves just as plotting an
uncertain dataset, assuming an implicit indices `1:length(v)`. Error bars may be
tuned by providing a second argument of quantiles to `plot`, e.g. `plot(v, [0.2, 0.8]`
gives error bars covering the 20th to 80th percentile range of the data.
### Improvements
- Added documentation for `StrictlyIncreasing` and `StrictlyDecreasing` sampling
constraints.
- Added `show` function for `AbstractUncertainIndexDataset`. `show` errored previously,
because it assumed the default behaviour of `AbstractUncertainValueDataset`, which
does not have the `indices` field.
### Bug fixes
- Fixed bug when resampling an uncertain dataset using the `NoConstraint` constraint,
which did not work to due to a reference to a non-existing variable.
- Fixed test bug where when resampling an uncertain value with the `TruncateStd` sampling
constraint, the test compared the result to a fixed scalar, not the standar deviation
of the value. This sometimes made the travis build fail.
## UncertainData.jl v0.1.3
### New functionality
- Allow both the `indices` and `values` fields of `UncertainIndexValueDataset` to be any
subtype of `AbstractUncertainValueDataset`. This way, you don't **have** to use an
index dataset type for the indices if not necessary.
### Improvements
- Improved documentation for `UncertainIndexDataset`, `UncertainValueDataset`,
`UncertainDataset` and `UncertainIndexValueDataset` types and added an
[overview page](uncertain_datasets/uncertain_datasets_overview.md) in the documentation
to explain the difference between these types.
- Added an [overview](resampling/resampling_overview.md) section for the resampling
documentation.
- Cleaned and improved [documentation for uncertain values](uncertain_values/uncertainvalues_overview.md).
- Added separate [documentation for the uncertain index dataset type](uncertain_datasets/uncertain_index_dataset.md).
- Added separate [documentation for the uncertain value dataset type](uncertain_datasets/uncertain_value_dataset.md).
- Improved [documentation for the generic uncertain dataset type](uncertain_datasets/uncertain_dataset.md)
- Merged documentation for sampling constraints and resampling.
- Added missing documentation for the `sinc`, `sincos`, `sinpi`, `cosc` and `cospi` trig
functions.
## UncertainData.jl v0.1.2
### New functionality
- Support [elementary mathematical operations](mathematics/elementary_operations.md)
(`+`, `-`, `*` and `/`) between arbitrary
uncertain values of different types. Also works with the combination of scalars and
uncertain values. Because elementary operations should work on arbitrary uncertain
values, a resampling approach is used to perform the mathematical operations. This
means that all mathematical operations return a vector containing the results of
repeated element-wise operations (where each element is a resampled draw from the
furnishing distribution(s) of the uncertain value(s)). The default number of
realizations is set to `10000`. This allows calling `uval1 + uval2` for two uncertain
values `uval1` and `uval2`. If you need to tune the number of resample draws to `n`,
you need to use the `+(uval1, uval2, n)` syntax (similar for the operators). In the
future, elementary operations might be improved for certain combinations of uncertain
values where exact expressions for error propagation are now, for example using the
machinery in `Measurements.jl` for normally distributed values.
- Support for [trigonometric functions] (mathematics/trig_functions.md) added (`sin`, `sind`, `sinh`, `cos`,
`cosd`, `cosh`, `tan`, `tand`, `tanh`, `csc`, `cscd`, `csch`, `csc`, `cscd`, `csch`,
`sec`, `secd`, `sech`, `cot`, `cotd`, `coth`, `sincos`, `sinc`, `sinpi`, `cosc`,
`cospi`). Inverses are also defined (`asin`, `asind`, `asinh`, `acos`,
`acosd`, `acosh`, `atan`, `atand`, `atanh`, `acsc`, `acscd`, `acsch`, `acsc`, `acscd`,
`acsch`, `asec`, `asecd`, `asech`, `acot`, `acotd`, `acoth`).
Beware: if the support of the funishing distribution for an uncertain value lies partly
outside the domain of the function, you risk encountering errors.
These also use a resampling approach, using `10000` realizations by default.
Use either the `sin(uval)` syntax for the default, and `sin(uval, n::Int)` to tune the
number of samples.
- Support non-integer multiples of the standard deviation in the `TruncateStd` sampling
constraint.
### Fixes
- Fixed bug in resampling of index-value datasets, where the `n` arguments wasn't used.
- Bugfix: due to `StatsBase.std` not being defined for `FittedDistribution` instances,
uncertain values represented by `UncertainScalarTheoreticalFit` instances were not
compatible with the `TruncateStd` sampling constraint. Now fixed!
- Added missing `resample(uv::AbstractUncertainValue, constraint::TruncateRange, n::Int)`
method.
### Improvements
- Improved resampling documentation for `UncertainIndexValueDataset`s. Now shows
the documentation for the main methods, as well as examples of how to use different
sampling constraints for each individual index and data value.
- Improved resampling documentation for `UncertainDataset`s. Now shows
the documentation for the main methods.
## UncertainData.jl v0.1.1
### New functionality
- Indexing implemented for `UncertainIndexValueDataset`.
- Resampling implemented for `UncertainIndexValueDataset`.
- Uncertain values and uncertain datasets now support `minimum` and `maximum`.
- `support(uv::AbstractUncertainValue)` now always returns an interval from
[IntervalArithmetic.jl](https://github.com/JuliaIntervals/IntervalArithmetic.jl/)
- `support_overlap` now computes overlaps also for fitted theoretical distributions.
- Added more plotting recipes.
- All implemented uncertain data types now support resampling.
### Improvements
- Improved general documentation. Added a reference to [
Measurements.jl](https://github.com/JuliaPhysics/Measurements.jl) and an explanation
for the differences between the packages.
- Improved resampling documentation with detailed explanation and plots.
## UncertainData.jl v0.1.0
- Basic functionality in place.
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | docs | 428 | # Citing
If you use UncertainData.jl for any of your projects or scientific publications, please cite [this small Journal of Open Source Software (JOSS) publication](https://joss.theoj.org/papers/10.21105/joss.01666) as follows
> Haaga, (2019). UncertainData.jl: a Julia package for working with measurements and datasets with uncertainties.. Journal of Open Source Software, 4(43), 1666, https://doi.org/10.21105/joss.01666
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | docs | 1134 | # Extending existing algorithms for uncertain data types
Do you already have an algorithm computing some statistic that you want to obtain uncertainty estimates for? Simply use Julia's multiple dispatch and create a version of the algorithm function that accepts the `AbstractUncertainValue` and `AbstractUncertainDataset` types, along with a `SamplingConstraints` specifying how the uncertain values are should be resampled.
A basic function skeleton could be
```julia
# Some algorithm computing a statistic for a scalar-valued vector
function myalgorithm(dataset::Vector{T}; kwargs...) where T
# some algorithm returning a single-valued statistic
end
# Applying the algorithm to an ensemble of realisations from
# an uncertain dataset, given a sampling constraint.
function myalgorithm(d::UncertainDataset, constraint::C;
n_ensemble_realisations = 100, kwargs...)
where {C <: SamplingConstraint}
ensemble_stats = zeros(n_ensemble_realisations)
for i in 1:n_ensemble_realisations
ensemble_stats[i] = myalgorithm(resample(d, constraint); kwargs...)
end
return ensemble_stats
end
```
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
|
[
"MIT"
] | 0.16.0 | df107bbf91afba419309adb9daa486b0457c693c | docs | 5280 | # UncertainData.jl
## Motivation
UncertainData.jl was born to systematically deal with uncertain data, and to
[sample](resampling/resampling_overview.md) from
[uncertain datasets](uncertain_datasets/uncertain_datasets_overview.md) more rigorously.
It makes workflows involving uncertain data of
[different types](uncertain_values/uncertainvalues_overview.md)
and from different sources significantly easier.

## Package philosophy
Way too often in data analysis the uncertainties in observational data are ignored or not
dealt with in a systematic manner. The core concept of the package is that uncertain data
should live in the probability domain, not as single value representations of the data
(e.g. the mean).
In this package, uncertain data values are thus
[stored as probability distributions or populations](uncertain_values/uncertainvalues_overview.md).
Only when performing a computation or plotting, the uncertain values are realized by
resampling the probability distributions furnishing them.
## Organising uncertain data
Individual uncertain observations of different types are seamlessly mixed and can
be organised in [collections of uncertain values](uncertain_datasets/uncertain_datasets_overview.md).
## Mathematical operations
Several [elementary mathematical operations](mathematics/elementary_operations.md) and
[trigonometric functions](mathematics/trig_functions.md) are supported
for uncertain values. Computations are done using a
[resampling approach](resampling/resampling_overview).
## Statistics on uncertain datasets
Statistics on uncertain datasets are computed using a resampling approach:
- [Core statistics](uncertain_statistics/core_stats/core_statistics.md)
- [Hypothesis tests](uncertain_statistics/hypothesistests/hypothesis_tests_overview.md)
## Resampling
[Resampling](resampling/resampling_overview.md) is done by drawing random numbers from the furnishing distributions/populations of the uncertain value(s), using one of the [`resample`](@ref) methods.
- [Individual uncertain values](resampling/resampling_uncertain_values) may be sampled as they
are, or after first applying [sampling constraints](sampling_constraints/available_constraints.md) on the underlying distributions/populations.
- [Collections of uncertain values](resampling/resampling_uncertain_datasets.md) can be
resampled by either assuming no sequential dependence for your data, or by applying sequential sampling models. During this process [sampling constraints](sampling_constraints/available_constraints.md) can be applied element-wise or on entire collections.
## Basic workflow
1. [**Define uncertain values**](uncertain_values/uncertainvalues_overview.md) by probability distributions.
2. [**Define uncertain datasets**](uncertain_datasets/uncertain_datasets_overview.md) by gathering uncertain values.
3. [**Use sampling constraints**](sampling_constraints/available_constraints.md) to [constraint the support of the distributions furnishing the uncertain values](sampling_constraints/constrain_uncertain_values.md) (i.e. apply subjective criteria to decide what is acceptable data and what is not).
4. [**Resample the uncertain values**](resampling/resampling_uncertain_values.md) or [uncertain datasets](resampling/resampling_uncertain_values.md).
5. [**Extend existing algorithm**](implementing_algorithms_for_uncertaindata.md) to accept uncertain values/datasets.
6. [**Quantify the uncertainty**](uncertain_statistics/core_stats/core_statistics.md) in your dataset or on whatever measure your algorithm computes.
## Related software
A related package is [Measurements.jl](https://github.com/JuliaPhysics/Measurements.jl),
which propagates errors exactly and handles correlated uncertainties. However,
Measurements.jl accepts only normally distributed values. This package serves a slightly
different purpose: it was born to provide an easy way of handling uncertainties of
[many different types](uncertain_values/uncertainvalues_overview.md),
using a [resampling](resampling/resampling_overview.md) approach to obtain
[statistics](uncertain_statistics/core_stats/core_statistics.md)
when needed, and providing a rich set of
[sampling constraints](sampling_constraints/available_constraints.md) that makes it easy
for the user to reason about and plot their uncertain data under different assumptions.
Depending on your needs, [Measurements.jl](https://github.com/JuliaPhysics/Measurements.jl)
may be a better (and faster) choice if your data satisfies the requirements for the package
(normally distributed) and if your uncertainties are correlated.
## Contributing
If you have questions, or a good idea for new functionality that could be useful to have in
the package, please submit an issue, or even better - a pull request.
## Citing
If you use UncertainData.jl for any of your projects or scientific publications, please cite [this small Journal of Open Source Software (JOSS) publication](https://joss.theoj.org/papers/10.21105/joss.01666) as follows
> Haaga, (2019). UncertainData.jl: a Julia package for working with measurements and datasets with uncertainties. Journal of Open Source Software, 4(43), 1666, https://doi.org/10.21105/joss.01666
| UncertainData | https://github.com/kahaaga/UncertainData.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.